]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/pcb.c
xnu-3789.70.16.tar.gz
[apple/xnu.git] / osfmk / i386 / pcb.c
CommitLineData
1c79356b 1/*
39037602 2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
1c79356b
A
57#include <mach_rt.h>
58#include <mach_debug.h>
59#include <mach_ldebug.h>
60
61#include <sys/kdebug.h>
62
63#include <mach/kern_return.h>
64#include <mach/thread_status.h>
65#include <mach/vm_param.h>
1c79356b
A
66
67#include <kern/counters.h>
91447636 68#include <kern/kalloc.h>
1c79356b 69#include <kern/mach_param.h>
91447636
A
70#include <kern/processor.h>
71#include <kern/cpu_data.h>
72#include <kern/cpu_number.h>
1c79356b
A
73#include <kern/task.h>
74#include <kern/thread.h>
1c79356b
A
75#include <kern/sched_prim.h>
76#include <kern/misc_protos.h>
77#include <kern/assert.h>
78#include <kern/spl.h>
91447636 79#include <kern/machine.h>
39037602 80#include <kern/kpc.h>
1c79356b
A
81#include <ipc/ipc_port.h>
82#include <vm/vm_kern.h>
91447636 83#include <vm/vm_map.h>
1c79356b 84#include <vm/pmap.h>
91447636 85#include <vm/vm_protos.h>
1c79356b 86
b0d623f7
A
87#include <i386/cpu_data.h>
88#include <i386/cpu_number.h>
1c79356b
A
89#include <i386/eflags.h>
90#include <i386/proc_reg.h>
1c79356b 91#include <i386/fpu.h>
2d21ac55 92#include <i386/misc_protos.h>
6d2010ae 93#include <i386/mp_desc.h>
b0d623f7 94#include <i386/thread.h>
0c530ab8 95#include <i386/machine_routines.h>
b0d623f7 96#include <i386/lapic.h> /* LAPIC_PMC_SWI_VECTOR */
55e303ae 97
fe8ab488
A
98#if HYPERVISOR
99#include <kern/hv_support.h>
100#endif
101
1c79356b
A
102/*
103 * Maps state flavor to number of words in the state:
104 */
91447636 105unsigned int _MachineStateCount[] = {
3e170ce0
A
106 [x86_THREAD_STATE32] = x86_THREAD_STATE32_COUNT,
107 [x86_THREAD_STATE64] = x86_THREAD_STATE64_COUNT,
108 [x86_THREAD_STATE] = x86_THREAD_STATE_COUNT,
109 [x86_FLOAT_STATE32] = x86_FLOAT_STATE32_COUNT,
110 [x86_FLOAT_STATE64] = x86_FLOAT_STATE64_COUNT,
111 [x86_FLOAT_STATE] = x86_FLOAT_STATE_COUNT,
112 [x86_EXCEPTION_STATE32] = x86_EXCEPTION_STATE32_COUNT,
113 [x86_EXCEPTION_STATE64] = x86_EXCEPTION_STATE64_COUNT,
114 [x86_EXCEPTION_STATE] = x86_EXCEPTION_STATE_COUNT,
115 [x86_DEBUG_STATE32] = x86_DEBUG_STATE32_COUNT,
116 [x86_DEBUG_STATE64] = x86_DEBUG_STATE64_COUNT,
117 [x86_DEBUG_STATE] = x86_DEBUG_STATE_COUNT,
118 [x86_AVX_STATE32] = x86_AVX_STATE32_COUNT,
119 [x86_AVX_STATE64] = x86_AVX_STATE64_COUNT,
120 [x86_AVX_STATE] = x86_AVX_STATE_COUNT,
1c79356b
A
121};
122
2d21ac55
A
123zone_t iss_zone; /* zone for saved_state area */
124zone_t ids_zone; /* zone for debug_state area */
0c530ab8 125
1c79356b
A
126/* Forward */
127
1c79356b
A
128extern void Thread_continue(void);
129extern void Load_context(
91447636 130 thread_t thread);
1c79356b 131
0c530ab8
A
132static void
133get_exception_state32(thread_t thread, x86_exception_state32_t *es);
134
135static void
136get_exception_state64(thread_t thread, x86_exception_state64_t *es);
137
138static void
139get_thread_state32(thread_t thread, x86_thread_state32_t *ts);
140
141static void
142get_thread_state64(thread_t thread, x86_thread_state64_t *ts);
143
144static int
145set_thread_state32(thread_t thread, x86_thread_state32_t *ts);
146
147static int
148set_thread_state64(thread_t thread, x86_thread_state64_t *ts);
149
fe8ab488
A
150#if HYPERVISOR
151static inline void
152ml_hv_cswitch(thread_t old, thread_t new)
153{
154 if (old->hv_thread_target)
155 hv_callbacks.preempt(old->hv_thread_target);
156
157 if (new->hv_thread_target)
158 hv_callbacks.dispatch(new->hv_thread_target);
159}
160#endif
161
0c530ab8 162/*
3e170ce0
A
163 * Don't let an illegal value for the lower 32-bits of dr7 get set.
164 * Specifically, check for undefined settings. Setting these bit patterns
0c530ab8
A
165 * result in undefined behaviour and can lead to an unexpected
166 * TRCTRAP.
167 */
168static boolean_t
3e170ce0 169dr7d_is_valid(uint32_t *dr7d)
0c530ab8
A
170{
171 int i;
172 uint32_t mask1, mask2;
173
174 /*
175 * If the DE bit is set in CR4, R/W0-3 can be pattern
176 * "10B" to indicate i/o reads and write
177 */
178 if (!(get_cr4() & CR4_DE))
179 for (i = 0, mask1 = 0x3<<16, mask2 = 0x2<<16; i < 4;
180 i++, mask1 <<= 4, mask2 <<= 4)
3e170ce0 181 if ((*dr7d & mask1) == mask2)
0c530ab8
A
182 return (FALSE);
183
0c530ab8
A
184 /*
185 * if we are doing an instruction execution break (indicated
186 * by r/w[x] being "00B"), then the len[x] must also be set
187 * to "00B"
188 */
189 for (i = 0; i < 4; i++)
3e170ce0
A
190 if (((((*dr7d >> (16 + i*4))) & 0x3) == 0) &&
191 ((((*dr7d >> (18 + i*4))) & 0x3) != 0))
0c530ab8
A
192 return (FALSE);
193
194 /*
195 * Intel docs have these bits fixed.
196 */
3e170ce0
A
197 *dr7d |= 0x1 << 10; /* set bit 10 to 1 */
198 *dr7d &= ~(0x1 << 11); /* set bit 11 to 0 */
199 *dr7d &= ~(0x1 << 12); /* set bit 12 to 0 */
200 *dr7d &= ~(0x1 << 14); /* set bit 14 to 0 */
201 *dr7d &= ~(0x1 << 15); /* set bit 15 to 0 */
0c530ab8
A
202
203 /*
204 * We don't allow anything to set the global breakpoints.
205 */
206
3e170ce0 207 if (*dr7d & 0x2)
0c530ab8
A
208 return (FALSE);
209
3e170ce0 210 if (*dr7d & (0x2<<2))
0c530ab8
A
211 return (FALSE);
212
3e170ce0 213 if (*dr7d & (0x2<<4))
0c530ab8
A
214 return (FALSE);
215
3e170ce0 216 if (*dr7d & (0x2<<6))
0c530ab8
A
217 return (FALSE);
218
219 return (TRUE);
220}
221
0c530ab8
A
222extern void set_64bit_debug_regs(x86_debug_state64_t *ds);
223
b0d623f7
A
224boolean_t
225debug_state_is_valid32(x86_debug_state32_t *ds)
226{
3e170ce0 227 if (!dr7d_is_valid(&ds->dr7))
b0d623f7
A
228 return FALSE;
229
b0d623f7
A
230 return TRUE;
231}
232
233boolean_t
234debug_state_is_valid64(x86_debug_state64_t *ds)
235{
3e170ce0 236 if (!dr7d_is_valid((uint32_t *)&ds->dr7))
b0d623f7
A
237 return FALSE;
238
239 /*
240 * Don't allow the user to set debug addresses above their max
241 * value
242 */
243 if (ds->dr7 & 0x1)
244 if (ds->dr0 >= VM_MAX_PAGE_ADDRESS)
245 return FALSE;
246
247 if (ds->dr7 & (0x1<<2))
248 if (ds->dr1 >= VM_MAX_PAGE_ADDRESS)
249 return FALSE;
250
251 if (ds->dr7 & (0x1<<4))
252 if (ds->dr2 >= VM_MAX_PAGE_ADDRESS)
253 return FALSE;
254
255 if (ds->dr7 & (0x1<<6))
256 if (ds->dr3 >= VM_MAX_PAGE_ADDRESS)
257 return FALSE;
258
3e170ce0
A
259 /* For x86-64, we must ensure the upper 32-bits of DR7 are clear */
260 ds->dr7 &= 0xffffffffULL;
261
b0d623f7
A
262 return TRUE;
263}
264
265
0c530ab8
A
266static kern_return_t
267set_debug_state32(thread_t thread, x86_debug_state32_t *ds)
268{
269 x86_debug_state32_t *ids;
270 pcb_t pcb;
271
6d2010ae 272 pcb = THREAD_TO_PCB(thread);
0c530ab8
A
273 ids = pcb->ids;
274
b0d623f7
A
275 if (debug_state_is_valid32(ds) != TRUE) {
276 return KERN_INVALID_ARGUMENT;
277 }
278
0c530ab8 279 if (ids == NULL) {
2d21ac55 280 ids = zalloc(ids_zone);
0c530ab8
A
281 bzero(ids, sizeof *ids);
282
283 simple_lock(&pcb->lock);
284 /* make sure it wasn't already alloc()'d elsewhere */
285 if (pcb->ids == NULL) {
286 pcb->ids = ids;
287 simple_unlock(&pcb->lock);
288 } else {
289 simple_unlock(&pcb->lock);
2d21ac55 290 zfree(ids_zone, ids);
0c530ab8
A
291 }
292 }
293
0c530ab8 294
b0d623f7 295 copy_debug_state32(ds, ids, FALSE);
0c530ab8
A
296
297 return (KERN_SUCCESS);
0c530ab8
A
298}
299
300static kern_return_t
301set_debug_state64(thread_t thread, x86_debug_state64_t *ds)
302{
303 x86_debug_state64_t *ids;
304 pcb_t pcb;
305
6d2010ae 306 pcb = THREAD_TO_PCB(thread);
0c530ab8
A
307 ids = pcb->ids;
308
b0d623f7
A
309 if (debug_state_is_valid64(ds) != TRUE) {
310 return KERN_INVALID_ARGUMENT;
311 }
312
0c530ab8 313 if (ids == NULL) {
2d21ac55 314 ids = zalloc(ids_zone);
0c530ab8
A
315 bzero(ids, sizeof *ids);
316
fe8ab488
A
317#if HYPERVISOR
318 if (thread->hv_thread_target) {
319 hv_callbacks.volatile_state(thread->hv_thread_target,
320 HV_DEBUG_STATE);
321 }
322#endif
323
0c530ab8
A
324 simple_lock(&pcb->lock);
325 /* make sure it wasn't already alloc()'d elsewhere */
326 if (pcb->ids == NULL) {
327 pcb->ids = ids;
328 simple_unlock(&pcb->lock);
329 } else {
330 simple_unlock(&pcb->lock);
2d21ac55 331 zfree(ids_zone, ids);
0c530ab8
A
332 }
333 }
334
b0d623f7 335 copy_debug_state64(ds, ids, FALSE);
0c530ab8
A
336
337 return (KERN_SUCCESS);
0c530ab8
A
338}
339
340static void
341get_debug_state32(thread_t thread, x86_debug_state32_t *ds)
342{
343 x86_debug_state32_t *saved_state;
344
6d2010ae 345 saved_state = thread->machine.ids;
0c530ab8
A
346
347 if (saved_state) {
b0d623f7 348 copy_debug_state32(saved_state, ds, TRUE);
0c530ab8
A
349 } else
350 bzero(ds, sizeof *ds);
351}
352
353static void
354get_debug_state64(thread_t thread, x86_debug_state64_t *ds)
355{
356 x86_debug_state64_t *saved_state;
357
6d2010ae 358 saved_state = (x86_debug_state64_t *)thread->machine.ids;
0c530ab8
A
359
360 if (saved_state) {
b0d623f7 361 copy_debug_state64(saved_state, ds, TRUE);
0c530ab8
A
362 } else
363 bzero(ds, sizeof *ds);
364}
365
1c79356b
A
366/*
367 * consider_machine_collect:
368 *
369 * Try to collect machine-dependent pages
370 */
371void
91447636 372consider_machine_collect(void)
1c79356b
A
373{
374}
375
1c79356b 376void
91447636 377consider_machine_adjust(void)
1c79356b 378{
1c79356b 379}
1c79356b 380
1c79356b
A
381/*
382 * Switch to the first thread on a CPU.
383 */
384void
55e303ae 385machine_load_context(
1c79356b
A
386 thread_t new)
387{
0c530ab8 388 new->machine.specFlags |= OnProc;
6d2010ae 389 act_machine_switch_pcb(NULL, new);
91447636 390 Load_context(new);
1c79356b
A
391}
392
39037602
A
393static inline void pmap_switch_context(thread_t ot, thread_t nt, int cnum) {
394 pmap_assert(ml_get_interrupts_enabled() == FALSE);
395 vm_map_t nmap = nt->map, omap = ot->map;
396 if ((omap != nmap) || (nmap->pmap->pagezero_accessible)) {
397 PMAP_DEACTIVATE_MAP(omap, ot, cnum);
398 PMAP_ACTIVATE_MAP(nmap, nt, cnum);
399 }
400}
401
1c79356b
A
402/*
403 * Switch to a new thread.
404 * Save the old thread`s kernel state or continuation,
405 * and return it.
406 */
407thread_t
55e303ae 408machine_switch_context(
91447636
A
409 thread_t old,
410 thread_continue_t continuation,
411 thread_t new)
1c79356b 412{
1c79356b 413#if MACH_RT
39037602 414 assert(current_cpu_datap()->cpu_active_stack == old->kernel_stack);
b0d623f7 415#endif
39037602
A
416
417 kpc_off_cpu(old);
418
1c79356b
A
419 /*
420 * Save FP registers if in use.
421 */
422 fpu_save_context(old);
423
0c530ab8
A
424 old->machine.specFlags &= ~OnProc;
425 new->machine.specFlags |= OnProc;
426
b0d623f7 427 /*
39037602 428 * Monitor the stack depth and report new max,
b0d623f7
A
429 * not worrying about races.
430 */
431 vm_offset_t depth = current_stack_depth();
432 if (depth > kernel_stack_depth_max) {
433 kernel_stack_depth_max = depth;
434 KERNEL_DEBUG_CONSTANT(
435 MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DEPTH),
436 (long) depth, 0, 0, 0, 0);
437 }
438
1c79356b
A
439 /*
440 * Switch address maps if need be, even if not switching tasks.
441 * (A server activation may be "borrowing" a client map.)
442 */
39037602 443 pmap_switch_context(old, new, cpu_number());
1c79356b
A
444
445 /*
446 * Load the rest of the user state for the new thread
447 */
6d2010ae 448 act_machine_switch_pcb(old, new);
2d21ac55 449
fe8ab488
A
450#if HYPERVISOR
451 ml_hv_cswitch(old, new);
452#endif
453
1c79356b
A
454 return(Switch_context(old, continuation, new));
455}
456
b0d623f7
A
457thread_t
458machine_processor_shutdown(
459 thread_t thread,
460 void (*doshutdown)(processor_t),
461 processor_t processor)
462{
463#if CONFIG_VMX
464 vmx_suspend();
465#endif
466 fpu_save_context(thread);
39037602 467 pmap_switch_context(thread, processor->idle_thread, cpu_number());
b0d623f7
A
468 return(Shutdown_context(thread, doshutdown, processor));
469}
470
91447636
A
471
472/*
473 * This is where registers that are not normally specified by the mach-o
474 * file on an execve would be nullified, perhaps to avoid a covert channel.
475 */
476kern_return_t
477machine_thread_state_initialize(
478 thread_t thread)
479{
2d21ac55
A
480 /*
481 * If there's an fpu save area, free it.
482 * The initialized state will then be lazily faulted-in, if required.
483 * And if we're target, re-arm the no-fpu trap.
484 */
6d2010ae 485 if (thread->machine.ifps) {
060df5ea 486 (void) fpu_set_fxstate(thread, NULL, x86_FLOAT_STATE64);
4452a7af 487
b0d623f7
A
488 if (thread == current_thread())
489 clear_fpu();
490 }
491
6d2010ae
A
492 if (thread->machine.ids) {
493 zfree(ids_zone, thread->machine.ids);
494 thread->machine.ids = NULL;
b0d623f7
A
495 }
496
497 return KERN_SUCCESS;
4452a7af 498}
0c530ab8
A
499
500uint32_t
501get_eflags_exportmask(void)
502{
503 return EFL_USER_SET;
504}
505
506/*
507 * x86_SAVED_STATE32 - internal save/restore general register state on 32/64 bit processors
508 * for 32bit tasks only
509 * x86_SAVED_STATE64 - internal save/restore general register state on 64 bit processors
510 * for 64bit tasks only
511 * x86_THREAD_STATE32 - external set/get general register state on 32/64 bit processors
512 * for 32bit tasks only
513 * x86_THREAD_STATE64 - external set/get general register state on 64 bit processors
514 * for 64bit tasks only
515 * x86_SAVED_STATE - external set/get general register state on 32/64 bit processors
516 * for either 32bit or 64bit tasks
517 * x86_FLOAT_STATE32 - internal/external save/restore float and xmm state on 32/64 bit processors
518 * for 32bit tasks only
519 * x86_FLOAT_STATE64 - internal/external save/restore float and xmm state on 64 bit processors
520 * for 64bit tasks only
521 * x86_FLOAT_STATE - external save/restore float and xmm state on 32/64 bit processors
522 * for either 32bit or 64bit tasks
523 * x86_EXCEPTION_STATE32 - external get exception state on 32/64 bit processors
524 * for 32bit tasks only
525 * x86_EXCEPTION_STATE64 - external get exception state on 64 bit processors
526 * for 64bit tasks only
527 * x86_EXCEPTION_STATE - external get exception state on 323/64 bit processors
528 * for either 32bit or 64bit tasks
529 */
530
2d21ac55 531
0c530ab8
A
532static void
533get_exception_state64(thread_t thread, x86_exception_state64_t *es)
534{
535 x86_saved_state64_t *saved_state;
536
537 saved_state = USER_REGS64(thread);
538
539 es->trapno = saved_state->isf.trapno;
6d2010ae 540 es->cpu = saved_state->isf.cpu;
b0d623f7 541 es->err = (typeof(es->err))saved_state->isf.err;
0c530ab8
A
542 es->faultvaddr = saved_state->cr2;
543}
544
545static void
546get_exception_state32(thread_t thread, x86_exception_state32_t *es)
547{
548 x86_saved_state32_t *saved_state;
549
550 saved_state = USER_REGS32(thread);
551
552 es->trapno = saved_state->trapno;
6d2010ae 553 es->cpu = saved_state->cpu;
0c530ab8
A
554 es->err = saved_state->err;
555 es->faultvaddr = saved_state->cr2;
556}
557
558
559static int
560set_thread_state32(thread_t thread, x86_thread_state32_t *ts)
561{
562 x86_saved_state32_t *saved_state;
563
6d2010ae 564 pal_register_cache_state(thread, DIRTY);
b0d623f7 565
0c530ab8
A
566 saved_state = USER_REGS32(thread);
567
568 /*
569 * Scrub segment selector values:
570 */
b0d623f7 571 ts->cs = USER_CS;
b0d623f7
A
572 /*
573 * On a 64 bit kernel, we always override the data segments,
574 * as the actual selector numbers have changed. This also
575 * means that we don't support setting the data segments
576 * manually any more.
577 */
578 ts->ss = USER_DS;
579 ts->ds = USER_DS;
580 ts->es = USER_DS;
0c530ab8 581
fe8ab488
A
582 /* Set GS to CTHREAD only if's been established */
583 ts->gs = thread->machine.cthread_self ? USER_CTHREAD : NULL_SEG;
584
0c530ab8
A
585 /* Check segment selectors are safe */
586 if (!valid_user_segment_selectors(ts->cs,
587 ts->ss,
588 ts->ds,
589 ts->es,
590 ts->fs,
591 ts->gs))
592 return(KERN_INVALID_ARGUMENT);
593
594 saved_state->eax = ts->eax;
595 saved_state->ebx = ts->ebx;
596 saved_state->ecx = ts->ecx;
597 saved_state->edx = ts->edx;
598 saved_state->edi = ts->edi;
599 saved_state->esi = ts->esi;
600 saved_state->ebp = ts->ebp;
601 saved_state->uesp = ts->esp;
602 saved_state->efl = (ts->eflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
603 saved_state->eip = ts->eip;
604 saved_state->cs = ts->cs;
605 saved_state->ss = ts->ss;
606 saved_state->ds = ts->ds;
607 saved_state->es = ts->es;
608 saved_state->fs = ts->fs;
609 saved_state->gs = ts->gs;
610
611 /*
612 * If the trace trap bit is being set,
613 * ensure that the user returns via iret
614 * - which is signaled thusly:
615 */
616 if ((saved_state->efl & EFL_TF) && saved_state->cs == SYSENTER_CS)
617 saved_state->cs = SYSENTER_TF_CS;
618
619 return(KERN_SUCCESS);
620}
621
622static int
623set_thread_state64(thread_t thread, x86_thread_state64_t *ts)
624{
625 x86_saved_state64_t *saved_state;
626
6d2010ae 627 pal_register_cache_state(thread, DIRTY);
b0d623f7 628
0c530ab8
A
629 saved_state = USER_REGS64(thread);
630
631 if (!IS_USERADDR64_CANONICAL(ts->rsp) ||
632 !IS_USERADDR64_CANONICAL(ts->rip))
633 return(KERN_INVALID_ARGUMENT);
634
635 saved_state->r8 = ts->r8;
636 saved_state->r9 = ts->r9;
637 saved_state->r10 = ts->r10;
638 saved_state->r11 = ts->r11;
639 saved_state->r12 = ts->r12;
640 saved_state->r13 = ts->r13;
641 saved_state->r14 = ts->r14;
642 saved_state->r15 = ts->r15;
643 saved_state->rax = ts->rax;
0c530ab8
A
644 saved_state->rbx = ts->rbx;
645 saved_state->rcx = ts->rcx;
646 saved_state->rdx = ts->rdx;
647 saved_state->rdi = ts->rdi;
648 saved_state->rsi = ts->rsi;
649 saved_state->rbp = ts->rbp;
650 saved_state->isf.rsp = ts->rsp;
651 saved_state->isf.rflags = (ts->rflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
652 saved_state->isf.rip = ts->rip;
653 saved_state->isf.cs = USER64_CS;
b0d623f7
A
654 saved_state->fs = (uint32_t)ts->fs;
655 saved_state->gs = (uint32_t)ts->gs;
0c530ab8
A
656
657 return(KERN_SUCCESS);
658}
659
660
661
662static void
663get_thread_state32(thread_t thread, x86_thread_state32_t *ts)
664{
665 x86_saved_state32_t *saved_state;
666
6d2010ae 667 pal_register_cache_state(thread, VALID);
b0d623f7 668
0c530ab8
A
669 saved_state = USER_REGS32(thread);
670
671 ts->eax = saved_state->eax;
672 ts->ebx = saved_state->ebx;
673 ts->ecx = saved_state->ecx;
674 ts->edx = saved_state->edx;
675 ts->edi = saved_state->edi;
676 ts->esi = saved_state->esi;
677 ts->ebp = saved_state->ebp;
678 ts->esp = saved_state->uesp;
679 ts->eflags = saved_state->efl;
680 ts->eip = saved_state->eip;
681 ts->cs = saved_state->cs;
682 ts->ss = saved_state->ss;
683 ts->ds = saved_state->ds;
684 ts->es = saved_state->es;
685 ts->fs = saved_state->fs;
686 ts->gs = saved_state->gs;
687}
688
689
690static void
691get_thread_state64(thread_t thread, x86_thread_state64_t *ts)
692{
693 x86_saved_state64_t *saved_state;
694
6d2010ae 695 pal_register_cache_state(thread, VALID);
b0d623f7 696
0c530ab8
A
697 saved_state = USER_REGS64(thread);
698
699 ts->r8 = saved_state->r8;
700 ts->r9 = saved_state->r9;
701 ts->r10 = saved_state->r10;
702 ts->r11 = saved_state->r11;
703 ts->r12 = saved_state->r12;
704 ts->r13 = saved_state->r13;
705 ts->r14 = saved_state->r14;
706 ts->r15 = saved_state->r15;
707 ts->rax = saved_state->rax;
708 ts->rbx = saved_state->rbx;
709 ts->rcx = saved_state->rcx;
710 ts->rdx = saved_state->rdx;
711 ts->rdi = saved_state->rdi;
712 ts->rsi = saved_state->rsi;
713 ts->rbp = saved_state->rbp;
714 ts->rsp = saved_state->isf.rsp;
715 ts->rflags = saved_state->isf.rflags;
716 ts->rip = saved_state->isf.rip;
717 ts->cs = saved_state->isf.cs;
718 ts->fs = saved_state->fs;
719 ts->gs = saved_state->gs;
720}
721
722
1c79356b
A
723/*
724 * act_machine_set_state:
725 *
91447636 726 * Set the status of the specified thread.
1c79356b
A
727 */
728
729kern_return_t
55e303ae 730machine_thread_set_state(
91447636 731 thread_t thr_act,
1c79356b
A
732 thread_flavor_t flavor,
733 thread_state_t tstate,
734 mach_msg_type_number_t count)
735{
2d21ac55
A
736 switch (flavor) {
737 case x86_SAVED_STATE32:
0c530ab8 738 {
0c530ab8
A
739 x86_saved_state32_t *state;
740 x86_saved_state32_t *saved_state;
1c79356b 741
0c530ab8 742 if (count < x86_SAVED_STATE32_COUNT)
2d21ac55
A
743 return(KERN_INVALID_ARGUMENT);
744
745 if (thread_is_64bit(thr_act))
746 return(KERN_INVALID_ARGUMENT);
1c79356b 747
0c530ab8 748 state = (x86_saved_state32_t *) tstate;
1c79356b 749
91447636 750 /* Check segment selectors are safe */
0c530ab8 751 if (!valid_user_segment_selectors(state->cs,
2d21ac55
A
752 state->ss,
753 state->ds,
754 state->es,
755 state->fs,
756 state->gs))
757 return KERN_INVALID_ARGUMENT;
758
6d2010ae 759 pal_register_cache_state(thr_act, DIRTY);
b0d623f7 760
0c530ab8 761 saved_state = USER_REGS32(thr_act);
1c79356b
A
762
763 /*
764 * General registers
765 */
766 saved_state->edi = state->edi;
767 saved_state->esi = state->esi;
768 saved_state->ebp = state->ebp;
769 saved_state->uesp = state->uesp;
770 saved_state->ebx = state->ebx;
771 saved_state->edx = state->edx;
772 saved_state->ecx = state->ecx;
773 saved_state->eax = state->eax;
774 saved_state->eip = state->eip;
0c530ab8
A
775
776 saved_state->efl = (state->efl & ~EFL_USER_CLEAR) | EFL_USER_SET;
c0fea474 777
8f6c56a5 778 /*
0c530ab8
A
779 * If the trace trap bit is being set,
780 * ensure that the user returns via iret
781 * - which is signaled thusly:
21362eb3 782 */
0c530ab8
A
783 if ((saved_state->efl & EFL_TF) && state->cs == SYSENTER_CS)
784 state->cs = SYSENTER_TF_CS;
785
786 /*
787 * User setting segment registers.
788 * Code and stack selectors have already been
789 * checked. Others will be reset by 'iret'
790 * if they are not valid.
791 */
792 saved_state->cs = state->cs;
793 saved_state->ss = state->ss;
794 saved_state->ds = state->ds;
795 saved_state->es = state->es;
796 saved_state->fs = state->fs;
797 saved_state->gs = state->gs;
b0d623f7 798
4452a7af 799 break;
2d21ac55 800 }
4452a7af 801
2d21ac55
A
802 case x86_SAVED_STATE64:
803 {
0c530ab8
A
804 x86_saved_state64_t *state;
805 x86_saved_state64_t *saved_state;
89b3af67 806
0c530ab8 807 if (count < x86_SAVED_STATE64_COUNT)
2d21ac55
A
808 return(KERN_INVALID_ARGUMENT);
809
810 if (!thread_is_64bit(thr_act))
811 return(KERN_INVALID_ARGUMENT);
0c530ab8
A
812
813 state = (x86_saved_state64_t *) tstate;
5d5c5d0d 814
0c530ab8
A
815 /* Verify that the supplied code segment selector is
816 * valid. In 64-bit mode, the FS and GS segment overrides
817 * use the FS.base and GS.base MSRs to calculate
818 * base addresses, and the trampolines don't directly
819 * restore the segment registers--hence they are no
820 * longer relevant for validation.
821 */
822 if (!valid_user_code_selector(state->isf.cs))
823 return KERN_INVALID_ARGUMENT;
824
825 /* Check pc and stack are canonical addresses */
826 if (!IS_USERADDR64_CANONICAL(state->isf.rsp) ||
827 !IS_USERADDR64_CANONICAL(state->isf.rip))
6601e61a 828 return KERN_INVALID_ARGUMENT;
5d5c5d0d 829
6d2010ae 830 pal_register_cache_state(thr_act, DIRTY);
b0d623f7 831
0c530ab8 832 saved_state = USER_REGS64(thr_act);
5d5c5d0d 833
21362eb3 834 /*
6601e61a 835 * General registers
21362eb3 836 */
0c530ab8
A
837 saved_state->r8 = state->r8;
838 saved_state->r9 = state->r9;
839 saved_state->r10 = state->r10;
840 saved_state->r11 = state->r11;
841 saved_state->r12 = state->r12;
842 saved_state->r13 = state->r13;
843 saved_state->r14 = state->r14;
844 saved_state->r15 = state->r15;
845 saved_state->rdi = state->rdi;
846 saved_state->rsi = state->rsi;
847 saved_state->rbp = state->rbp;
848 saved_state->rbx = state->rbx;
849 saved_state->rdx = state->rdx;
850 saved_state->rcx = state->rcx;
851 saved_state->rax = state->rax;
852 saved_state->isf.rsp = state->isf.rsp;
853 saved_state->isf.rip = state->isf.rip;
854
855 saved_state->isf.rflags = (state->isf.rflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
856
2d21ac55 857 /*
0c530ab8
A
858 * User setting segment registers.
859 * Code and stack selectors have already been
860 * checked. Others will be reset by 'sys'
861 * if they are not valid.
6601e61a 862 */
2d21ac55 863 saved_state->isf.cs = state->isf.cs;
0c530ab8
A
864 saved_state->isf.ss = state->isf.ss;
865 saved_state->fs = state->fs;
866 saved_state->gs = state->gs;
b0d623f7 867
89b3af67 868 break;
2d21ac55 869 }
89b3af67 870
2d21ac55
A
871 case x86_FLOAT_STATE32:
872 {
0c530ab8
A
873 if (count != x86_FLOAT_STATE32_COUNT)
874 return(KERN_INVALID_ARGUMENT);
875
876 if (thread_is_64bit(thr_act))
4452a7af 877 return(KERN_INVALID_ARGUMENT);
0c530ab8 878
060df5ea 879 return fpu_set_fxstate(thr_act, tstate, flavor);
2d21ac55 880 }
4452a7af 881
2d21ac55
A
882 case x86_FLOAT_STATE64:
883 {
0c530ab8 884 if (count != x86_FLOAT_STATE64_COUNT)
4452a7af
A
885 return(KERN_INVALID_ARGUMENT);
886
0c530ab8
A
887 if ( !thread_is_64bit(thr_act))
888 return(KERN_INVALID_ARGUMENT);
889
060df5ea 890 return fpu_set_fxstate(thr_act, tstate, flavor);
2d21ac55 891 }
5d5c5d0d 892
2d21ac55
A
893 case x86_FLOAT_STATE:
894 {
895 x86_float_state_t *state;
4452a7af 896
0c530ab8
A
897 if (count != x86_FLOAT_STATE_COUNT)
898 return(KERN_INVALID_ARGUMENT);
4452a7af 899
0c530ab8 900 state = (x86_float_state_t *)tstate;
0c530ab8
A
901 if (state->fsh.flavor == x86_FLOAT_STATE64 && state->fsh.count == x86_FLOAT_STATE64_COUNT &&
902 thread_is_64bit(thr_act)) {
060df5ea 903 return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs64, x86_FLOAT_STATE64);
0c530ab8
A
904 }
905 if (state->fsh.flavor == x86_FLOAT_STATE32 && state->fsh.count == x86_FLOAT_STATE32_COUNT &&
906 !thread_is_64bit(thr_act)) {
060df5ea 907 return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs32, x86_FLOAT_STATE32);
0c530ab8
A
908 }
909 return(KERN_INVALID_ARGUMENT);
2d21ac55 910 }
0c530ab8 911
060df5ea
A
912 case x86_AVX_STATE32:
913 {
914 if (count != x86_AVX_STATE32_COUNT)
915 return(KERN_INVALID_ARGUMENT);
916
917 if (thread_is_64bit(thr_act))
918 return(KERN_INVALID_ARGUMENT);
919
920 return fpu_set_fxstate(thr_act, tstate, flavor);
921 }
922
923 case x86_AVX_STATE64:
924 {
925 if (count != x86_AVX_STATE64_COUNT)
926 return(KERN_INVALID_ARGUMENT);
927
928 if (!thread_is_64bit(thr_act))
929 return(KERN_INVALID_ARGUMENT);
930
931 return fpu_set_fxstate(thr_act, tstate, flavor);
932 }
933
bd504ef0
A
934 case x86_AVX_STATE:
935 {
936 x86_avx_state_t *state;
937
938 if (count != x86_AVX_STATE_COUNT)
939 return(KERN_INVALID_ARGUMENT);
940
941 state = (x86_avx_state_t *)tstate;
942 if (state->ash.flavor == x86_AVX_STATE64 &&
943 state->ash.count == x86_FLOAT_STATE64_COUNT &&
944 thread_is_64bit(thr_act)) {
945 return fpu_set_fxstate(thr_act,
946 (thread_state_t)&state->ufs.as64,
947 x86_FLOAT_STATE64);
948 }
949 if (state->ash.flavor == x86_FLOAT_STATE32 &&
950 state->ash.count == x86_FLOAT_STATE32_COUNT &&
951 !thread_is_64bit(thr_act)) {
952 return fpu_set_fxstate(thr_act,
953 (thread_state_t)&state->ufs.as32,
954 x86_FLOAT_STATE32);
955 }
956 return(KERN_INVALID_ARGUMENT);
957 }
958
2d21ac55
A
959 case x86_THREAD_STATE32:
960 {
0c530ab8
A
961 if (count != x86_THREAD_STATE32_COUNT)
962 return(KERN_INVALID_ARGUMENT);
963
964 if (thread_is_64bit(thr_act))
965 return(KERN_INVALID_ARGUMENT);
2d21ac55 966
0c530ab8 967 return set_thread_state32(thr_act, (x86_thread_state32_t *)tstate);
2d21ac55 968 }
0c530ab8 969
2d21ac55
A
970 case x86_THREAD_STATE64:
971 {
0c530ab8
A
972 if (count != x86_THREAD_STATE64_COUNT)
973 return(KERN_INVALID_ARGUMENT);
974
2d21ac55 975 if (!thread_is_64bit(thr_act))
0c530ab8
A
976 return(KERN_INVALID_ARGUMENT);
977
978 return set_thread_state64(thr_act, (x86_thread_state64_t *)tstate);
0c530ab8 979
2d21ac55
A
980 }
981 case x86_THREAD_STATE:
982 {
983 x86_thread_state_t *state;
0c530ab8
A
984
985 if (count != x86_THREAD_STATE_COUNT)
986 return(KERN_INVALID_ARGUMENT);
987
988 state = (x86_thread_state_t *)tstate;
989
2d21ac55
A
990 if (state->tsh.flavor == x86_THREAD_STATE64 &&
991 state->tsh.count == x86_THREAD_STATE64_COUNT &&
0c530ab8 992 thread_is_64bit(thr_act)) {
2d21ac55
A
993 return set_thread_state64(thr_act, &state->uts.ts64);
994 } else if (state->tsh.flavor == x86_THREAD_STATE32 &&
995 state->tsh.count == x86_THREAD_STATE32_COUNT &&
0c530ab8 996 !thread_is_64bit(thr_act)) {
2d21ac55 997 return set_thread_state32(thr_act, &state->uts.ts32);
0c530ab8 998 } else
2d21ac55 999 return(KERN_INVALID_ARGUMENT);
2d21ac55 1000 }
0c530ab8
A
1001 case x86_DEBUG_STATE32:
1002 {
1003 x86_debug_state32_t *state;
1004 kern_return_t ret;
4452a7af 1005
0c530ab8
A
1006 if (thread_is_64bit(thr_act))
1007 return(KERN_INVALID_ARGUMENT);
1008
1009 state = (x86_debug_state32_t *)tstate;
1010
1011 ret = set_debug_state32(thr_act, state);
1012
1013 return ret;
6601e61a 1014 }
0c530ab8
A
1015 case x86_DEBUG_STATE64:
1016 {
1017 x86_debug_state64_t *state;
1018 kern_return_t ret;
4452a7af 1019
0c530ab8
A
1020 if (!thread_is_64bit(thr_act))
1021 return(KERN_INVALID_ARGUMENT);
1022
1023 state = (x86_debug_state64_t *)tstate;
1024
1025 ret = set_debug_state64(thr_act, state);
1026
1027 return ret;
1028 }
1029 case x86_DEBUG_STATE:
1030 {
1031 x86_debug_state_t *state;
1032 kern_return_t ret = KERN_INVALID_ARGUMENT;
1033
1034 if (count != x86_DEBUG_STATE_COUNT)
1035 return (KERN_INVALID_ARGUMENT);
1036
1037 state = (x86_debug_state_t *)tstate;
1038 if (state->dsh.flavor == x86_DEBUG_STATE64 &&
1039 state->dsh.count == x86_DEBUG_STATE64_COUNT &&
1040 thread_is_64bit(thr_act)) {
1041 ret = set_debug_state64(thr_act, &state->uds.ds64);
1042 }
1043 else
1044 if (state->dsh.flavor == x86_DEBUG_STATE32 &&
1045 state->dsh.count == x86_DEBUG_STATE32_COUNT &&
1046 !thread_is_64bit(thr_act)) {
1047 ret = set_debug_state32(thr_act, &state->uds.ds32);
1048 }
1049 return ret;
1050 }
1051 default:
6601e61a
A
1052 return(KERN_INVALID_ARGUMENT);
1053 }
4452a7af 1054
6601e61a
A
1055 return(KERN_SUCCESS);
1056}
4452a7af 1057
0c530ab8
A
1058
1059
6601e61a
A
1060/*
1061 * thread_getstatus:
1062 *
1063 * Get the status of the specified thread.
1064 */
4452a7af 1065
6601e61a
A
1066kern_return_t
1067machine_thread_get_state(
1068 thread_t thr_act,
1069 thread_flavor_t flavor,
1070 thread_state_t tstate,
1071 mach_msg_type_number_t *count)
1072{
2d21ac55 1073
6601e61a 1074 switch (flavor) {
4452a7af 1075
0c530ab8
A
1076 case THREAD_STATE_FLAVOR_LIST:
1077 {
1078 if (*count < 3)
1079 return (KERN_INVALID_ARGUMENT);
1080
1081 tstate[0] = i386_THREAD_STATE;
1082 tstate[1] = i386_FLOAT_STATE;
1083 tstate[2] = i386_EXCEPTION_STATE;
1084
1085 *count = 3;
1086 break;
1087 }
1088
1089 case THREAD_STATE_FLAVOR_LIST_NEW:
1090 {
1091 if (*count < 4)
1092 return (KERN_INVALID_ARGUMENT);
1093
1094 tstate[0] = x86_THREAD_STATE;
1095 tstate[1] = x86_FLOAT_STATE;
1096 tstate[2] = x86_EXCEPTION_STATE;
1097 tstate[3] = x86_DEBUG_STATE;
1098
1099 *count = 4;
1100 break;
1101 }
1102
bd504ef0
A
1103 case THREAD_STATE_FLAVOR_LIST_10_9:
1104 {
1105 if (*count < 5)
1106 return (KERN_INVALID_ARGUMENT);
1107
1108 tstate[0] = x86_THREAD_STATE;
1109 tstate[1] = x86_FLOAT_STATE;
1110 tstate[2] = x86_EXCEPTION_STATE;
1111 tstate[3] = x86_DEBUG_STATE;
1112 tstate[4] = x86_AVX_STATE;
1113
1114 *count = 5;
1115 break;
1116 }
1117
0c530ab8 1118 case x86_SAVED_STATE32:
4452a7af 1119 {
0c530ab8
A
1120 x86_saved_state32_t *state;
1121 x86_saved_state32_t *saved_state;
4452a7af 1122
0c530ab8
A
1123 if (*count < x86_SAVED_STATE32_COUNT)
1124 return(KERN_INVALID_ARGUMENT);
4452a7af 1125
2d21ac55
A
1126 if (thread_is_64bit(thr_act))
1127 return(KERN_INVALID_ARGUMENT);
1128
0c530ab8
A
1129 state = (x86_saved_state32_t *) tstate;
1130 saved_state = USER_REGS32(thr_act);
4452a7af 1131
6601e61a
A
1132 /*
1133 * First, copy everything:
1134 */
1135 *state = *saved_state;
0c530ab8
A
1136 state->ds = saved_state->ds & 0xffff;
1137 state->es = saved_state->es & 0xffff;
1138 state->fs = saved_state->fs & 0xffff;
1139 state->gs = saved_state->gs & 0xffff;
4452a7af 1140
0c530ab8 1141 *count = x86_SAVED_STATE32_COUNT;
4452a7af 1142 break;
8f6c56a5 1143 }
5d5c5d0d 1144
0c530ab8 1145 case x86_SAVED_STATE64:
4452a7af 1146 {
0c530ab8
A
1147 x86_saved_state64_t *state;
1148 x86_saved_state64_t *saved_state;
89b3af67 1149
0c530ab8
A
1150 if (*count < x86_SAVED_STATE64_COUNT)
1151 return(KERN_INVALID_ARGUMENT);
89b3af67 1152
2d21ac55
A
1153 if (!thread_is_64bit(thr_act))
1154 return(KERN_INVALID_ARGUMENT);
1155
0c530ab8
A
1156 state = (x86_saved_state64_t *)tstate;
1157 saved_state = USER_REGS64(thr_act);
89b3af67 1158
6601e61a 1159 /*
0c530ab8 1160 * First, copy everything:
6601e61a 1161 */
0c530ab8
A
1162 *state = *saved_state;
1163 state->fs = saved_state->fs & 0xffff;
1164 state->gs = saved_state->gs & 0xffff;
1165
1166 *count = x86_SAVED_STATE64_COUNT;
4452a7af
A
1167 break;
1168 }
1169
0c530ab8 1170 case x86_FLOAT_STATE32:
4452a7af 1171 {
0c530ab8
A
1172 if (*count < x86_FLOAT_STATE32_COUNT)
1173 return(KERN_INVALID_ARGUMENT);
1174
1175 if (thread_is_64bit(thr_act))
1176 return(KERN_INVALID_ARGUMENT);
1177
1178 *count = x86_FLOAT_STATE32_COUNT;
1179
060df5ea 1180 return fpu_get_fxstate(thr_act, tstate, flavor);
21362eb3 1181 }
89b3af67 1182
0c530ab8
A
1183 case x86_FLOAT_STATE64:
1184 {
1185 if (*count < x86_FLOAT_STATE64_COUNT)
1186 return(KERN_INVALID_ARGUMENT);
1187
1188 if ( !thread_is_64bit(thr_act))
1189 return(KERN_INVALID_ARGUMENT);
1190
1191 *count = x86_FLOAT_STATE64_COUNT;
1192
060df5ea 1193 return fpu_get_fxstate(thr_act, tstate, flavor);
0c530ab8
A
1194 }
1195
1196 case x86_FLOAT_STATE:
1197 {
1198 x86_float_state_t *state;
1199 kern_return_t kret;
1200
1201 if (*count < x86_FLOAT_STATE_COUNT)
1202 return(KERN_INVALID_ARGUMENT);
1203
1204 state = (x86_float_state_t *)tstate;
1205
1206 /*
1207 * no need to bzero... currently
1208 * x86_FLOAT_STATE64_COUNT == x86_FLOAT_STATE32_COUNT
1209 */
1210 if (thread_is_64bit(thr_act)) {
1211 state->fsh.flavor = x86_FLOAT_STATE64;
1212 state->fsh.count = x86_FLOAT_STATE64_COUNT;
1213
060df5ea 1214 kret = fpu_get_fxstate(thr_act, (thread_state_t)&state->ufs.fs64, x86_FLOAT_STATE64);
0c530ab8
A
1215 } else {
1216 state->fsh.flavor = x86_FLOAT_STATE32;
1217 state->fsh.count = x86_FLOAT_STATE32_COUNT;
1218
060df5ea 1219 kret = fpu_get_fxstate(thr_act, (thread_state_t)&state->ufs.fs32, x86_FLOAT_STATE32);
0c530ab8
A
1220 }
1221 *count = x86_FLOAT_STATE_COUNT;
1222
1223 return(kret);
1224 }
1225
bd504ef0
A
1226 case x86_AVX_STATE32:
1227 {
060df5ea
A
1228 if (*count != x86_AVX_STATE32_COUNT)
1229 return(KERN_INVALID_ARGUMENT);
1230
1231 if (thread_is_64bit(thr_act))
1232 return(KERN_INVALID_ARGUMENT);
1233
1234 *count = x86_AVX_STATE32_COUNT;
1235
1236 return fpu_get_fxstate(thr_act, tstate, flavor);
bd504ef0 1237 }
060df5ea 1238
bd504ef0
A
1239 case x86_AVX_STATE64:
1240 {
060df5ea
A
1241 if (*count != x86_AVX_STATE64_COUNT)
1242 return(KERN_INVALID_ARGUMENT);
1243
1244 if ( !thread_is_64bit(thr_act))
1245 return(KERN_INVALID_ARGUMENT);
1246
1247 *count = x86_AVX_STATE64_COUNT;
1248
1249 return fpu_get_fxstate(thr_act, tstate, flavor);
bd504ef0
A
1250 }
1251
1252 case x86_AVX_STATE:
1253 {
1254 x86_avx_state_t *state;
1255 kern_return_t kret;
1256
1257 if (*count < x86_AVX_STATE_COUNT)
1258 return(KERN_INVALID_ARGUMENT);
1259
1260 state = (x86_avx_state_t *)tstate;
1261
1262 bzero((char *)state, sizeof(x86_avx_state_t));
1263 if (thread_is_64bit(thr_act)) {
1264 state->ash.flavor = x86_AVX_STATE64;
1265 state->ash.count = x86_AVX_STATE64_COUNT;
1266 kret = fpu_get_fxstate(thr_act,
1267 (thread_state_t)&state->ufs.as64,
1268 x86_AVX_STATE64);
1269 } else {
1270 state->ash.flavor = x86_AVX_STATE32;
1271 state->ash.count = x86_AVX_STATE32_COUNT;
1272 kret = fpu_get_fxstate(thr_act,
1273 (thread_state_t)&state->ufs.as32,
1274 x86_AVX_STATE32);
1275 }
1276 *count = x86_AVX_STATE_COUNT;
1277
1278 return(kret);
1279 }
060df5ea 1280
0c530ab8
A
1281 case x86_THREAD_STATE32:
1282 {
1283 if (*count < x86_THREAD_STATE32_COUNT)
1284 return(KERN_INVALID_ARGUMENT);
1285
1286 if (thread_is_64bit(thr_act))
1287 return(KERN_INVALID_ARGUMENT);
1288
1289 *count = x86_THREAD_STATE32_COUNT;
1290
1291 get_thread_state32(thr_act, (x86_thread_state32_t *)tstate);
6601e61a 1292 break;
0c530ab8 1293 }
89b3af67 1294
0c530ab8
A
1295 case x86_THREAD_STATE64:
1296 {
1297 if (*count < x86_THREAD_STATE64_COUNT)
4452a7af 1298 return(KERN_INVALID_ARGUMENT);
0c530ab8
A
1299
1300 if ( !thread_is_64bit(thr_act))
1301 return(KERN_INVALID_ARGUMENT);
1302
1303 *count = x86_THREAD_STATE64_COUNT;
1304
1305 get_thread_state64(thr_act, (x86_thread_state64_t *)tstate);
1306 break;
21362eb3 1307 }
89b3af67 1308
0c530ab8
A
1309 case x86_THREAD_STATE:
1310 {
1311 x86_thread_state_t *state;
4452a7af 1312
0c530ab8 1313 if (*count < x86_THREAD_STATE_COUNT)
4452a7af
A
1314 return(KERN_INVALID_ARGUMENT);
1315
0c530ab8 1316 state = (x86_thread_state_t *)tstate;
4452a7af 1317
0c530ab8 1318 bzero((char *)state, sizeof(x86_thread_state_t));
4452a7af 1319
0c530ab8
A
1320 if (thread_is_64bit(thr_act)) {
1321 state->tsh.flavor = x86_THREAD_STATE64;
1322 state->tsh.count = x86_THREAD_STATE64_COUNT;
4452a7af 1323
0c530ab8 1324 get_thread_state64(thr_act, &state->uts.ts64);
4452a7af 1325 } else {
0c530ab8
A
1326 state->tsh.flavor = x86_THREAD_STATE32;
1327 state->tsh.count = x86_THREAD_STATE32_COUNT;
4452a7af 1328
0c530ab8 1329 get_thread_state32(thr_act, &state->uts.ts32);
4452a7af 1330 }
0c530ab8
A
1331 *count = x86_THREAD_STATE_COUNT;
1332
1333 break;
1334 }
1335
1336
1337 case x86_EXCEPTION_STATE32:
1338 {
1339 if (*count < x86_EXCEPTION_STATE32_COUNT)
1340 return(KERN_INVALID_ARGUMENT);
1341
1342 if (thread_is_64bit(thr_act))
1343 return(KERN_INVALID_ARGUMENT);
1344
1345 *count = x86_EXCEPTION_STATE32_COUNT;
4452a7af 1346
0c530ab8 1347 get_exception_state32(thr_act, (x86_exception_state32_t *)tstate);
6d2010ae
A
1348 /*
1349 * Suppress the cpu number for binary compatibility
1350 * of this deprecated state.
1351 */
1352 ((x86_exception_state32_t *)tstate)->cpu = 0;
4452a7af 1353 break;
6601e61a 1354 }
4452a7af 1355
0c530ab8 1356 case x86_EXCEPTION_STATE64:
6601e61a 1357 {
0c530ab8
A
1358 if (*count < x86_EXCEPTION_STATE64_COUNT)
1359 return(KERN_INVALID_ARGUMENT);
4452a7af 1360
0c530ab8
A
1361 if ( !thread_is_64bit(thr_act))
1362 return(KERN_INVALID_ARGUMENT);
4452a7af 1363
0c530ab8 1364 *count = x86_EXCEPTION_STATE64_COUNT;
4452a7af 1365
0c530ab8 1366 get_exception_state64(thr_act, (x86_exception_state64_t *)tstate);
6d2010ae
A
1367 /*
1368 * Suppress the cpu number for binary compatibility
1369 * of this deprecated state.
1370 */
1371 ((x86_exception_state64_t *)tstate)->cpu = 0;
4452a7af 1372 break;
6601e61a 1373 }
4452a7af 1374
0c530ab8
A
1375 case x86_EXCEPTION_STATE:
1376 {
1377 x86_exception_state_t *state;
1378
1379 if (*count < x86_EXCEPTION_STATE_COUNT)
1380 return(KERN_INVALID_ARGUMENT);
1381
1382 state = (x86_exception_state_t *)tstate;
1383
1384 bzero((char *)state, sizeof(x86_exception_state_t));
1385
1386 if (thread_is_64bit(thr_act)) {
1387 state->esh.flavor = x86_EXCEPTION_STATE64;
1388 state->esh.count = x86_EXCEPTION_STATE64_COUNT;
1389
1390 get_exception_state64(thr_act, &state->ues.es64);
1391 } else {
1392 state->esh.flavor = x86_EXCEPTION_STATE32;
1393 state->esh.count = x86_EXCEPTION_STATE32_COUNT;
1394
1395 get_exception_state32(thr_act, &state->ues.es32);
1396 }
1397 *count = x86_EXCEPTION_STATE_COUNT;
1398
1399 break;
1400 }
1401 case x86_DEBUG_STATE32:
1402 {
1403 if (*count < x86_DEBUG_STATE32_COUNT)
1404 return(KERN_INVALID_ARGUMENT);
1405
1406 if (thread_is_64bit(thr_act))
1407 return(KERN_INVALID_ARGUMENT);
1408
1409 get_debug_state32(thr_act, (x86_debug_state32_t *)tstate);
1410
1411 *count = x86_DEBUG_STATE32_COUNT;
1412
1413 break;
1414 }
1415 case x86_DEBUG_STATE64:
1416 {
1417 if (*count < x86_DEBUG_STATE64_COUNT)
1418 return(KERN_INVALID_ARGUMENT);
1419
1420 if (!thread_is_64bit(thr_act))
1421 return(KERN_INVALID_ARGUMENT);
1422
1423 get_debug_state64(thr_act, (x86_debug_state64_t *)tstate);
1424
1425 *count = x86_DEBUG_STATE64_COUNT;
1426
1c79356b
A
1427 break;
1428 }
0c530ab8
A
1429 case x86_DEBUG_STATE:
1430 {
1431 x86_debug_state_t *state;
1432
1433 if (*count < x86_DEBUG_STATE_COUNT)
1434 return(KERN_INVALID_ARGUMENT);
1435
1436 state = (x86_debug_state_t *)tstate;
1437
1438 bzero(state, sizeof *state);
1439
1440 if (thread_is_64bit(thr_act)) {
1441 state->dsh.flavor = x86_DEBUG_STATE64;
1442 state->dsh.count = x86_DEBUG_STATE64_COUNT;
1443
1444 get_debug_state64(thr_act, &state->uds.ds64);
1445 } else {
1446 state->dsh.flavor = x86_DEBUG_STATE32;
1447 state->dsh.count = x86_DEBUG_STATE32_COUNT;
1c79356b 1448
0c530ab8
A
1449 get_debug_state32(thr_act, &state->uds.ds32);
1450 }
1451 *count = x86_DEBUG_STATE_COUNT;
1452 break;
1453 }
2d21ac55 1454 default:
1c79356b
A
1455 return(KERN_INVALID_ARGUMENT);
1456 }
1457
1458 return(KERN_SUCCESS);
1459}
1460
0c530ab8
A
1461kern_return_t
1462machine_thread_get_kern_state(
1463 thread_t thread,
1464 thread_flavor_t flavor,
1465 thread_state_t tstate,
1466 mach_msg_type_number_t *count)
1467{
b0d623f7 1468 x86_saved_state_t *int_state = current_cpu_datap()->cpu_int_state;
0c530ab8
A
1469
1470 /*
1471 * This works only for an interrupted kernel thread
1472 */
b0d623f7 1473 if (thread != current_thread() || int_state == NULL)
0c530ab8
A
1474 return KERN_FAILURE;
1475
b0d623f7
A
1476 switch (flavor) {
1477 case x86_THREAD_STATE32: {
1478 x86_thread_state32_t *state;
1479 x86_saved_state32_t *saved_state;
1480
1481 if (!is_saved_state32(int_state) ||
1482 *count < x86_THREAD_STATE32_COUNT)
1483 return (KERN_INVALID_ARGUMENT);
1484
1485 state = (x86_thread_state32_t *) tstate;
0c530ab8 1486
b0d623f7
A
1487 saved_state = saved_state32(int_state);
1488 /*
1489 * General registers.
1490 */
1491 state->eax = saved_state->eax;
1492 state->ebx = saved_state->ebx;
1493 state->ecx = saved_state->ecx;
1494 state->edx = saved_state->edx;
1495 state->edi = saved_state->edi;
1496 state->esi = saved_state->esi;
1497 state->ebp = saved_state->ebp;
1498 state->esp = saved_state->uesp;
1499 state->eflags = saved_state->efl;
1500 state->eip = saved_state->eip;
1501 state->cs = saved_state->cs;
1502 state->ss = saved_state->ss;
1503 state->ds = saved_state->ds & 0xffff;
1504 state->es = saved_state->es & 0xffff;
1505 state->fs = saved_state->fs & 0xffff;
1506 state->gs = saved_state->gs & 0xffff;
1507
1508 *count = x86_THREAD_STATE32_COUNT;
2d21ac55 1509
b0d623f7
A
1510 return KERN_SUCCESS;
1511 }
1512
1513 case x86_THREAD_STATE64: {
1514 x86_thread_state64_t *state;
1515 x86_saved_state64_t *saved_state;
1516
1517 if (!is_saved_state64(int_state) ||
1518 *count < x86_THREAD_STATE64_COUNT)
1519 return (KERN_INVALID_ARGUMENT);
1520
1521 state = (x86_thread_state64_t *) tstate;
1522
1523 saved_state = saved_state64(int_state);
1524 /*
1525 * General registers.
1526 */
1527 state->rax = saved_state->rax;
1528 state->rbx = saved_state->rbx;
1529 state->rcx = saved_state->rcx;
1530 state->rdx = saved_state->rdx;
1531 state->rdi = saved_state->rdi;
1532 state->rsi = saved_state->rsi;
1533 state->rbp = saved_state->rbp;
1534 state->rsp = saved_state->isf.rsp;
1535 state->r8 = saved_state->r8;
1536 state->r9 = saved_state->r9;
1537 state->r10 = saved_state->r10;
1538 state->r11 = saved_state->r11;
1539 state->r12 = saved_state->r12;
1540 state->r13 = saved_state->r13;
1541 state->r14 = saved_state->r14;
1542 state->r15 = saved_state->r15;
1543
1544 state->rip = saved_state->isf.rip;
1545 state->rflags = saved_state->isf.rflags;
1546 state->cs = saved_state->isf.cs;
1547 state->fs = saved_state->fs & 0xffff;
1548 state->gs = saved_state->gs & 0xffff;
1549 *count = x86_THREAD_STATE64_COUNT;
1550
1551 return KERN_SUCCESS;
1552 }
1553
1554 case x86_THREAD_STATE: {
1555 x86_thread_state_t *state = NULL;
1556
1557 if (*count < x86_THREAD_STATE_COUNT)
1558 return (KERN_INVALID_ARGUMENT);
1559
1560 state = (x86_thread_state_t *) tstate;
1561
1562 if (is_saved_state32(int_state)) {
1563 x86_saved_state32_t *saved_state = saved_state32(int_state);
1564
1565 state->tsh.flavor = x86_THREAD_STATE32;
1566 state->tsh.count = x86_THREAD_STATE32_COUNT;
0c530ab8 1567
0c530ab8
A
1568 /*
1569 * General registers.
1570 */
b0d623f7
A
1571 state->uts.ts32.eax = saved_state->eax;
1572 state->uts.ts32.ebx = saved_state->ebx;
1573 state->uts.ts32.ecx = saved_state->ecx;
1574 state->uts.ts32.edx = saved_state->edx;
1575 state->uts.ts32.edi = saved_state->edi;
1576 state->uts.ts32.esi = saved_state->esi;
1577 state->uts.ts32.ebp = saved_state->ebp;
1578 state->uts.ts32.esp = saved_state->uesp;
1579 state->uts.ts32.eflags = saved_state->efl;
1580 state->uts.ts32.eip = saved_state->eip;
1581 state->uts.ts32.cs = saved_state->cs;
1582 state->uts.ts32.ss = saved_state->ss;
1583 state->uts.ts32.ds = saved_state->ds & 0xffff;
1584 state->uts.ts32.es = saved_state->es & 0xffff;
1585 state->uts.ts32.fs = saved_state->fs & 0xffff;
1586 state->uts.ts32.gs = saved_state->gs & 0xffff;
1587 } else if (is_saved_state64(int_state)) {
1588 x86_saved_state64_t *saved_state = saved_state64(int_state);
0c530ab8 1589
b0d623f7
A
1590 state->tsh.flavor = x86_THREAD_STATE64;
1591 state->tsh.count = x86_THREAD_STATE64_COUNT;
1592
1593 /*
1594 * General registers.
1595 */
1596 state->uts.ts64.rax = saved_state->rax;
1597 state->uts.ts64.rbx = saved_state->rbx;
1598 state->uts.ts64.rcx = saved_state->rcx;
1599 state->uts.ts64.rdx = saved_state->rdx;
1600 state->uts.ts64.rdi = saved_state->rdi;
1601 state->uts.ts64.rsi = saved_state->rsi;
1602 state->uts.ts64.rbp = saved_state->rbp;
1603 state->uts.ts64.rsp = saved_state->isf.rsp;
1604 state->uts.ts64.r8 = saved_state->r8;
1605 state->uts.ts64.r9 = saved_state->r9;
1606 state->uts.ts64.r10 = saved_state->r10;
1607 state->uts.ts64.r11 = saved_state->r11;
1608 state->uts.ts64.r12 = saved_state->r12;
1609 state->uts.ts64.r13 = saved_state->r13;
1610 state->uts.ts64.r14 = saved_state->r14;
1611 state->uts.ts64.r15 = saved_state->r15;
1612
1613 state->uts.ts64.rip = saved_state->isf.rip;
1614 state->uts.ts64.rflags = saved_state->isf.rflags;
1615 state->uts.ts64.cs = saved_state->isf.cs;
1616 state->uts.ts64.fs = saved_state->fs & 0xffff;
1617 state->uts.ts64.gs = saved_state->gs & 0xffff;
1618 } else {
1619 panic("unknown thread state");
0c530ab8 1620 }
b0d623f7
A
1621
1622 *count = x86_THREAD_STATE_COUNT;
1623 return KERN_SUCCESS;
1624 }
2d21ac55 1625 }
0c530ab8
A
1626 return KERN_FAILURE;
1627}
1628
1629
0c530ab8 1630void
2d21ac55 1631machine_thread_switch_addrmode(thread_t thread)
0c530ab8 1632{
2d21ac55
A
1633 /*
1634 * We don't want to be preempted until we're done
1635 * - particularly if we're switching the current thread
1636 */
1637 disable_preemption();
0c530ab8 1638
2d21ac55 1639 /*
6d2010ae
A
1640 * Reset the state saveareas. As we're resetting, we anticipate no
1641 * memory allocations in this path.
2d21ac55 1642 */
0c530ab8
A
1643 machine_thread_create(thread, thread->task);
1644
1645 /* If we're switching ourselves, reset the pcb addresses etc. */
c910b4d9 1646 if (thread == current_thread()) {
6d2010ae 1647 boolean_t istate = ml_set_interrupts_enabled(FALSE);
6d2010ae
A
1648 act_machine_switch_pcb(NULL, thread);
1649 ml_set_interrupts_enabled(istate);
c910b4d9 1650 }
2d21ac55 1651 enable_preemption();
1c79356b
A
1652}
1653
0c530ab8
A
1654
1655
1c79356b
A
1656/*
1657 * This is used to set the current thr_act/thread
1658 * when starting up a new processor
1659 */
1660void
b0d623f7 1661machine_set_current_thread(thread_t thread)
1c79356b 1662{
0c530ab8 1663 current_cpu_datap()->cpu_active_thread = thread;
1c79356b
A
1664}
1665
1c79356b 1666
1c79356b
A
1667/*
1668 * Perform machine-dependent per-thread initializations
1669 */
1670void
55e303ae 1671machine_thread_init(void)
1c79356b 1672{
39236c6e
A
1673 iss_zone = zinit(sizeof(x86_saved_state_t),
1674 thread_max * sizeof(x86_saved_state_t),
1675 THREAD_CHUNK * sizeof(x86_saved_state_t),
1676 "x86_64 saved state");
0c530ab8 1677
39236c6e
A
1678 ids_zone = zinit(sizeof(x86_debug_state64_t),
1679 thread_max * sizeof(x86_debug_state64_t),
1680 THREAD_CHUNK * sizeof(x86_debug_state64_t),
1681 "x86_64 debug state");
1c79356b 1682
39236c6e 1683 fpu_module_init();
1c79356b
A
1684}
1685
1c79356b 1686
91447636
A
1687
1688user_addr_t
1689get_useraddr(void)
1c79356b 1690{
91447636 1691 thread_t thr_act = current_thread();
1c79356b 1692
0c530ab8
A
1693 if (thread_is_64bit(thr_act)) {
1694 x86_saved_state64_t *iss64;
1695
1696 iss64 = USER_REGS64(thr_act);
1697
1698 return(iss64->isf.rip);
1699 } else {
1700 x86_saved_state32_t *iss32;
4452a7af 1701
0c530ab8
A
1702 iss32 = USER_REGS32(thr_act);
1703
1704 return(iss32->eip);
1705 }
1c79356b
A
1706}
1707
1c79356b
A
1708/*
1709 * detach and return a kernel stack from a thread
1710 */
1711
1712vm_offset_t
55e303ae 1713machine_stack_detach(thread_t thread)
1c79356b 1714{
0c530ab8 1715 vm_offset_t stack;
1c79356b 1716
0c530ab8 1717 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH),
b0d623f7 1718 (uintptr_t)thread_tid(thread), thread->priority,
0c530ab8
A
1719 thread->sched_pri, 0,
1720 0);
1c79356b 1721
0c530ab8
A
1722 stack = thread->kernel_stack;
1723 thread->kernel_stack = 0;
1724
1725 return (stack);
1c79356b
A
1726}
1727
1728/*
1729 * attach a kernel stack to a thread and initialize it
1730 */
1731
1732void
91447636
A
1733machine_stack_attach(
1734 thread_t thread,
1735 vm_offset_t stack)
1c79356b 1736{
b0d623f7 1737 struct x86_kernel_state *statep;
1c79356b 1738
0c530ab8 1739 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH),
b0d623f7 1740 (uintptr_t)thread_tid(thread), thread->priority,
0c530ab8 1741 thread->sched_pri, 0, 0);
1c79356b 1742
0c530ab8
A
1743 assert(stack);
1744 thread->kernel_stack = stack;
55e303ae 1745
0c530ab8 1746 statep = STACK_IKS(stack);
b0d623f7
A
1747#if defined(__x86_64__)
1748 statep->k_rip = (unsigned long) Thread_continue;
1749 statep->k_rbx = (unsigned long) thread_continue;
6d2010ae 1750 statep->k_rsp = (unsigned long) (STACK_IKS(stack) - 1);
b0d623f7 1751#else
0c530ab8
A
1752 statep->k_eip = (unsigned long) Thread_continue;
1753 statep->k_ebx = (unsigned long) thread_continue;
6d2010ae 1754 statep->k_esp = (unsigned long) (STACK_IKS(stack) - 1);
b0d623f7 1755#endif
1c79356b 1756
0c530ab8 1757 return;
1c79356b
A
1758}
1759
1760/*
1761 * move a stack from old to new thread
1762 */
1763
1764void
55e303ae 1765machine_stack_handoff(thread_t old,
1c79356b
A
1766 thread_t new)
1767{
0c530ab8 1768 vm_offset_t stack;
1c79356b 1769
0c530ab8
A
1770 assert(new);
1771 assert(old);
1c79356b 1772
39037602 1773 kpc_off_cpu(old);
b0d623f7 1774
0c530ab8
A
1775 stack = old->kernel_stack;
1776 if (stack == old->reserved_stack) {
1777 assert(new->reserved_stack);
1778 old->reserved_stack = new->reserved_stack;
1779 new->reserved_stack = stack;
1780 }
1781 old->kernel_stack = 0;
1782 /*
1783 * A full call to machine_stack_attach() is unnecessry
1784 * because old stack is already initialized.
1785 */
1786 new->kernel_stack = stack;
1c79356b 1787
0c530ab8 1788 fpu_save_context(old);
b0d623f7 1789
0c530ab8
A
1790 old->machine.specFlags &= ~OnProc;
1791 new->machine.specFlags |= OnProc;
1c79356b 1792
39037602 1793 pmap_switch_context(old, new, cpu_number());
6d2010ae 1794 act_machine_switch_pcb(old, new);
9bccf70c 1795
fe8ab488
A
1796#if HYPERVISOR
1797 ml_hv_cswitch(old, new);
1798#endif
1799
0c530ab8 1800 machine_set_current_thread(new);
1c79356b 1801
0c530ab8 1802 return;
1c79356b 1803}
0b4e3aa0 1804
0c530ab8
A
1805
1806
1807
1808struct x86_act_context32 {
1809 x86_saved_state32_t ss;
1810 x86_float_state32_t fs;
1811 x86_debug_state32_t ds;
1812};
1813
1814struct x86_act_context64 {
1815 x86_saved_state64_t ss;
1816 x86_float_state64_t fs;
1817 x86_debug_state64_t ds;
0b4e3aa0
A
1818};
1819
0c530ab8
A
1820
1821
0b4e3aa0
A
1822void *
1823act_thread_csave(void)
1824{
2d21ac55 1825 kern_return_t kret;
0c530ab8 1826 mach_msg_type_number_t val;
2d21ac55 1827 thread_t thr_act = current_thread();
0c530ab8 1828
2d21ac55
A
1829 if (thread_is_64bit(thr_act)) {
1830 struct x86_act_context64 *ic64;
0b4e3aa0 1831
2d21ac55 1832 ic64 = (struct x86_act_context64 *)kalloc(sizeof(struct x86_act_context64));
0c530ab8
A
1833
1834 if (ic64 == (struct x86_act_context64 *)NULL)
2d21ac55 1835 return((void *)0);
0c530ab8
A
1836
1837 val = x86_SAVED_STATE64_COUNT;
1838 kret = machine_thread_get_state(thr_act, x86_SAVED_STATE64,
2d21ac55 1839 (thread_state_t) &ic64->ss, &val);
0c530ab8 1840 if (kret != KERN_SUCCESS) {
2d21ac55 1841 kfree(ic64, sizeof(struct x86_act_context64));
0c530ab8
A
1842 return((void *)0);
1843 }
1844 val = x86_FLOAT_STATE64_COUNT;
1845 kret = machine_thread_get_state(thr_act, x86_FLOAT_STATE64,
2d21ac55 1846 (thread_state_t) &ic64->fs, &val);
0c530ab8 1847 if (kret != KERN_SUCCESS) {
2d21ac55 1848 kfree(ic64, sizeof(struct x86_act_context64));
0c530ab8
A
1849 return((void *)0);
1850 }
0b4e3aa0 1851
0c530ab8
A
1852 val = x86_DEBUG_STATE64_COUNT;
1853 kret = machine_thread_get_state(thr_act,
1854 x86_DEBUG_STATE64,
1855 (thread_state_t)&ic64->ds,
55e303ae 1856 &val);
0b4e3aa0 1857 if (kret != KERN_SUCCESS) {
0c530ab8
A
1858 kfree(ic64, sizeof(struct x86_act_context64));
1859 return((void *)0);
1860 }
1861 return(ic64);
1862
1863 } else {
2d21ac55 1864 struct x86_act_context32 *ic32;
0c530ab8 1865
2d21ac55 1866 ic32 = (struct x86_act_context32 *)kalloc(sizeof(struct x86_act_context32));
0c530ab8
A
1867
1868 if (ic32 == (struct x86_act_context32 *)NULL)
2d21ac55 1869 return((void *)0);
0c530ab8
A
1870
1871 val = x86_SAVED_STATE32_COUNT;
1872 kret = machine_thread_get_state(thr_act, x86_SAVED_STATE32,
2d21ac55 1873 (thread_state_t) &ic32->ss, &val);
0c530ab8 1874 if (kret != KERN_SUCCESS) {
2d21ac55 1875 kfree(ic32, sizeof(struct x86_act_context32));
0c530ab8 1876 return((void *)0);
0b4e3aa0 1877 }
0c530ab8
A
1878 val = x86_FLOAT_STATE32_COUNT;
1879 kret = machine_thread_get_state(thr_act, x86_FLOAT_STATE32,
2d21ac55 1880 (thread_state_t) &ic32->fs, &val);
0c530ab8 1881 if (kret != KERN_SUCCESS) {
2d21ac55 1882 kfree(ic32, sizeof(struct x86_act_context32));
0c530ab8
A
1883 return((void *)0);
1884 }
1885
1886 val = x86_DEBUG_STATE32_COUNT;
1887 kret = machine_thread_get_state(thr_act,
1888 x86_DEBUG_STATE32,
1889 (thread_state_t)&ic32->ds,
55e303ae 1890 &val);
0b4e3aa0 1891 if (kret != KERN_SUCCESS) {
0c530ab8
A
1892 kfree(ic32, sizeof(struct x86_act_context32));
1893 return((void *)0);
0b4e3aa0 1894 }
0c530ab8
A
1895 return(ic32);
1896 }
0b4e3aa0 1897}
0c530ab8
A
1898
1899
0b4e3aa0
A
1900void
1901act_thread_catt(void *ctx)
1902{
0c530ab8
A
1903 thread_t thr_act = current_thread();
1904 kern_return_t kret;
1905
1906 if (ctx == (void *)NULL)
2d21ac55 1907 return;
0c530ab8
A
1908
1909 if (thread_is_64bit(thr_act)) {
1910 struct x86_act_context64 *ic64;
1911
1912 ic64 = (struct x86_act_context64 *)ctx;
1913
1914 kret = machine_thread_set_state(thr_act, x86_SAVED_STATE64,
1915 (thread_state_t) &ic64->ss, x86_SAVED_STATE64_COUNT);
1916 if (kret == KERN_SUCCESS) {
2d21ac55
A
1917 machine_thread_set_state(thr_act, x86_FLOAT_STATE64,
1918 (thread_state_t) &ic64->fs, x86_FLOAT_STATE64_COUNT);
0c530ab8
A
1919 }
1920 kfree(ic64, sizeof(struct x86_act_context64));
1921 } else {
1922 struct x86_act_context32 *ic32;
1923
1924 ic32 = (struct x86_act_context32 *)ctx;
1925
1926 kret = machine_thread_set_state(thr_act, x86_SAVED_STATE32,
1927 (thread_state_t) &ic32->ss, x86_SAVED_STATE32_COUNT);
1928 if (kret == KERN_SUCCESS) {
060df5ea 1929 (void) machine_thread_set_state(thr_act, x86_FLOAT_STATE32,
0c530ab8 1930 (thread_state_t) &ic32->fs, x86_FLOAT_STATE32_COUNT);
0c530ab8
A
1931 }
1932 kfree(ic32, sizeof(struct x86_act_context32));
1933 }
0b4e3aa0
A
1934}
1935
0c530ab8
A
1936
1937void act_thread_cfree(__unused void *ctx)
0b4e3aa0 1938{
0c530ab8 1939 /* XXX - Unused */
0b4e3aa0 1940}
b0d623f7
A
1941
1942/*
1943 * Duplicate one x86_debug_state32_t to another. "all" parameter
1944 * chooses whether dr4 and dr5 are copied (they are never meant
1945 * to be installed when we do machine_task_set_state() or
1946 * machine_thread_set_state()).
1947 */
1948void
1949copy_debug_state32(
1950 x86_debug_state32_t *src,
1951 x86_debug_state32_t *target,
1952 boolean_t all)
1953{
1954 if (all) {
1955 target->dr4 = src->dr4;
1956 target->dr5 = src->dr5;
1957 }
1958
1959 target->dr0 = src->dr0;
1960 target->dr1 = src->dr1;
1961 target->dr2 = src->dr2;
1962 target->dr3 = src->dr3;
1963 target->dr6 = src->dr6;
1964 target->dr7 = src->dr7;
1965}
1966
1967/*
1968 * Duplicate one x86_debug_state64_t to another. "all" parameter
1969 * chooses whether dr4 and dr5 are copied (they are never meant
1970 * to be installed when we do machine_task_set_state() or
1971 * machine_thread_set_state()).
1972 */
1973void
1974copy_debug_state64(
1975 x86_debug_state64_t *src,
1976 x86_debug_state64_t *target,
1977 boolean_t all)
1978{
1979 if (all) {
1980 target->dr4 = src->dr4;
1981 target->dr5 = src->dr5;
1982 }
1983
1984 target->dr0 = src->dr0;
1985 target->dr1 = src->dr1;
1986 target->dr2 = src->dr2;
1987 target->dr3 = src->dr3;
1988 target->dr6 = src->dr6;
1989 target->dr7 = src->dr7;
1990}