]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/pcb.c
fe5d56b8e12d1f367c80ea76ad2744544e5635b7
[apple/xnu.git] / osfmk / i386 / pcb.c
1 /*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 #include <mach_debug.h>
58 #include <mach_ldebug.h>
59
60 #include <sys/kdebug.h>
61
62 #include <mach/kern_return.h>
63 #include <mach/thread_status.h>
64 #include <mach/vm_param.h>
65
66 #include <kern/counters.h>
67 #include <kern/kalloc.h>
68 #include <kern/mach_param.h>
69 #include <kern/processor.h>
70 #include <kern/cpu_data.h>
71 #include <kern/cpu_number.h>
72 #include <kern/task.h>
73 #include <kern/thread.h>
74 #include <kern/sched_prim.h>
75 #include <kern/misc_protos.h>
76 #include <kern/assert.h>
77 #include <kern/spl.h>
78 #include <kern/machine.h>
79 #include <kern/kpc.h>
80 #include <ipc/ipc_port.h>
81 #include <vm/vm_kern.h>
82 #include <vm/vm_map.h>
83 #include <vm/pmap.h>
84 #include <vm/vm_protos.h>
85
86 #include <i386/cpu_data.h>
87 #include <i386/cpu_number.h>
88 #include <i386/eflags.h>
89 #include <i386/proc_reg.h>
90 #include <i386/fpu.h>
91 #include <i386/misc_protos.h>
92 #include <i386/mp_desc.h>
93 #include <i386/thread.h>
94 #include <i386/machine_routines.h>
95 #include <i386/lapic.h> /* LAPIC_PMC_SWI_VECTOR */
96 #include <i386/seg.h>
97
98 #if HYPERVISOR
99 #include <kern/hv_support.h>
100 #endif
101
102 /*
103 * Maps state flavor to number of words in the state:
104 */
105 unsigned int _MachineStateCount[] = {
106 [x86_THREAD_STATE32] = x86_THREAD_STATE32_COUNT,
107 [x86_THREAD_STATE64] = x86_THREAD_STATE64_COUNT,
108 [x86_THREAD_FULL_STATE64] = x86_THREAD_FULL_STATE64_COUNT,
109 [x86_THREAD_STATE] = x86_THREAD_STATE_COUNT,
110 [x86_FLOAT_STATE32] = x86_FLOAT_STATE32_COUNT,
111 [x86_FLOAT_STATE64] = x86_FLOAT_STATE64_COUNT,
112 [x86_FLOAT_STATE] = x86_FLOAT_STATE_COUNT,
113 [x86_EXCEPTION_STATE32] = x86_EXCEPTION_STATE32_COUNT,
114 [x86_EXCEPTION_STATE64] = x86_EXCEPTION_STATE64_COUNT,
115 [x86_EXCEPTION_STATE] = x86_EXCEPTION_STATE_COUNT,
116 [x86_DEBUG_STATE32] = x86_DEBUG_STATE32_COUNT,
117 [x86_DEBUG_STATE64] = x86_DEBUG_STATE64_COUNT,
118 [x86_DEBUG_STATE] = x86_DEBUG_STATE_COUNT,
119 [x86_AVX_STATE32] = x86_AVX_STATE32_COUNT,
120 [x86_AVX_STATE64] = x86_AVX_STATE64_COUNT,
121 [x86_AVX_STATE] = x86_AVX_STATE_COUNT,
122 #if !defined(RC_HIDE_XNU_J137)
123 [x86_AVX512_STATE32] = x86_AVX512_STATE32_COUNT,
124 [x86_AVX512_STATE64] = x86_AVX512_STATE64_COUNT,
125 [x86_AVX512_STATE] = x86_AVX512_STATE_COUNT,
126 #endif /* not RC_HIDE_XNU_J137 */
127 };
128
129 zone_t iss_zone; /* zone for saved_state area */
130 zone_t ids_zone; /* zone for debug_state area */
131
132 extern int allow_64bit_proc_LDT_ops;
133
134 /* Forward */
135
136 extern void Thread_continue(void);
137 extern void Load_context(
138 thread_t thread) __attribute__((noreturn));
139
140 static void
141 get_exception_state32(thread_t thread, x86_exception_state32_t *es);
142
143 static void
144 get_exception_state64(thread_t thread, x86_exception_state64_t *es);
145
146 static void
147 get_thread_state32(thread_t thread, x86_thread_state32_t *ts);
148
149 static void
150 get_thread_state64(thread_t thread, void *ts, boolean_t full);
151
152 static int
153 set_thread_state32(thread_t thread, x86_thread_state32_t *ts);
154
155 static int
156 set_thread_state64(thread_t thread, void *ts, boolean_t full);
157
158 #if HYPERVISOR
159 static inline void
160 ml_hv_cswitch(thread_t old, thread_t new)
161 {
162 if (old->hv_thread_target) {
163 hv_callbacks.preempt(old->hv_thread_target);
164 }
165
166 if (new->hv_thread_target) {
167 hv_callbacks.dispatch(new->hv_thread_target);
168 }
169 }
170 #endif
171
172 /*
173 * Don't let an illegal value for the lower 32-bits of dr7 get set.
174 * Specifically, check for undefined settings. Setting these bit patterns
175 * result in undefined behaviour and can lead to an unexpected
176 * TRCTRAP.
177 */
178 static boolean_t
179 dr7d_is_valid(uint32_t *dr7d)
180 {
181 int i;
182 uint32_t mask1, mask2;
183
184 /*
185 * If the DE bit is set in CR4, R/W0-3 can be pattern
186 * "10B" to indicate i/o reads and write
187 */
188 if (!(get_cr4() & CR4_DE)) {
189 for (i = 0, mask1 = 0x3 << 16, mask2 = 0x2 << 16; i < 4;
190 i++, mask1 <<= 4, mask2 <<= 4) {
191 if ((*dr7d & mask1) == mask2) {
192 return FALSE;
193 }
194 }
195 }
196
197 /*
198 * if we are doing an instruction execution break (indicated
199 * by r/w[x] being "00B"), then the len[x] must also be set
200 * to "00B"
201 */
202 for (i = 0; i < 4; i++) {
203 if (((((*dr7d >> (16 + i * 4))) & 0x3) == 0) &&
204 ((((*dr7d >> (18 + i * 4))) & 0x3) != 0)) {
205 return FALSE;
206 }
207 }
208
209 /*
210 * Intel docs have these bits fixed.
211 */
212 *dr7d |= 0x1 << 10; /* set bit 10 to 1 */
213 *dr7d &= ~(0x1 << 11); /* set bit 11 to 0 */
214 *dr7d &= ~(0x1 << 12); /* set bit 12 to 0 */
215 *dr7d &= ~(0x1 << 14); /* set bit 14 to 0 */
216 *dr7d &= ~(0x1 << 15); /* set bit 15 to 0 */
217
218 /*
219 * We don't allow anything to set the global breakpoints.
220 */
221
222 if (*dr7d & 0x2) {
223 return FALSE;
224 }
225
226 if (*dr7d & (0x2 << 2)) {
227 return FALSE;
228 }
229
230 if (*dr7d & (0x2 << 4)) {
231 return FALSE;
232 }
233
234 if (*dr7d & (0x2 << 6)) {
235 return FALSE;
236 }
237
238 return TRUE;
239 }
240
241 extern void set_64bit_debug_regs(x86_debug_state64_t *ds);
242
243 boolean_t
244 debug_state_is_valid32(x86_debug_state32_t *ds)
245 {
246 if (!dr7d_is_valid(&ds->dr7)) {
247 return FALSE;
248 }
249
250 return TRUE;
251 }
252
253 boolean_t
254 debug_state_is_valid64(x86_debug_state64_t *ds)
255 {
256 if (!dr7d_is_valid((uint32_t *)&ds->dr7)) {
257 return FALSE;
258 }
259
260 /*
261 * Don't allow the user to set debug addresses above their max
262 * value
263 */
264 if (ds->dr7 & 0x1) {
265 if (ds->dr0 >= VM_MAX_PAGE_ADDRESS) {
266 return FALSE;
267 }
268 }
269
270 if (ds->dr7 & (0x1 << 2)) {
271 if (ds->dr1 >= VM_MAX_PAGE_ADDRESS) {
272 return FALSE;
273 }
274 }
275
276 if (ds->dr7 & (0x1 << 4)) {
277 if (ds->dr2 >= VM_MAX_PAGE_ADDRESS) {
278 return FALSE;
279 }
280 }
281
282 if (ds->dr7 & (0x1 << 6)) {
283 if (ds->dr3 >= VM_MAX_PAGE_ADDRESS) {
284 return FALSE;
285 }
286 }
287
288 /* For x86-64, we must ensure the upper 32-bits of DR7 are clear */
289 ds->dr7 &= 0xffffffffULL;
290
291 return TRUE;
292 }
293
294
295 static kern_return_t
296 set_debug_state32(thread_t thread, x86_debug_state32_t *ds)
297 {
298 x86_debug_state32_t *new_ids;
299 pcb_t pcb;
300
301 pcb = THREAD_TO_PCB(thread);
302
303 if (debug_state_is_valid32(ds) != TRUE) {
304 return KERN_INVALID_ARGUMENT;
305 }
306
307 if (pcb->ids == NULL) {
308 new_ids = zalloc(ids_zone);
309 bzero(new_ids, sizeof *new_ids);
310
311 simple_lock(&pcb->lock, LCK_GRP_NULL);
312 /* make sure it wasn't already alloc()'d elsewhere */
313 if (pcb->ids == NULL) {
314 pcb->ids = new_ids;
315 simple_unlock(&pcb->lock);
316 } else {
317 simple_unlock(&pcb->lock);
318 zfree(ids_zone, new_ids);
319 }
320 }
321
322
323 copy_debug_state32(ds, pcb->ids, FALSE);
324
325 return KERN_SUCCESS;
326 }
327
328 static kern_return_t
329 set_debug_state64(thread_t thread, x86_debug_state64_t *ds)
330 {
331 x86_debug_state64_t *new_ids;
332 pcb_t pcb;
333
334 pcb = THREAD_TO_PCB(thread);
335
336 if (debug_state_is_valid64(ds) != TRUE) {
337 return KERN_INVALID_ARGUMENT;
338 }
339
340 if (pcb->ids == NULL) {
341 new_ids = zalloc(ids_zone);
342 bzero(new_ids, sizeof *new_ids);
343
344 #if HYPERVISOR
345 if (thread->hv_thread_target) {
346 hv_callbacks.volatile_state(thread->hv_thread_target,
347 HV_DEBUG_STATE);
348 }
349 #endif
350
351 simple_lock(&pcb->lock, LCK_GRP_NULL);
352 /* make sure it wasn't already alloc()'d elsewhere */
353 if (pcb->ids == NULL) {
354 pcb->ids = new_ids;
355 simple_unlock(&pcb->lock);
356 } else {
357 simple_unlock(&pcb->lock);
358 zfree(ids_zone, new_ids);
359 }
360 }
361
362 copy_debug_state64(ds, pcb->ids, FALSE);
363
364 return KERN_SUCCESS;
365 }
366
367 static void
368 get_debug_state32(thread_t thread, x86_debug_state32_t *ds)
369 {
370 x86_debug_state32_t *saved_state;
371
372 saved_state = thread->machine.ids;
373
374 if (saved_state) {
375 copy_debug_state32(saved_state, ds, TRUE);
376 } else {
377 bzero(ds, sizeof *ds);
378 }
379 }
380
381 static void
382 get_debug_state64(thread_t thread, x86_debug_state64_t *ds)
383 {
384 x86_debug_state64_t *saved_state;
385
386 saved_state = (x86_debug_state64_t *)thread->machine.ids;
387
388 if (saved_state) {
389 copy_debug_state64(saved_state, ds, TRUE);
390 } else {
391 bzero(ds, sizeof *ds);
392 }
393 }
394
395 /*
396 * consider_machine_collect:
397 *
398 * Try to collect machine-dependent pages
399 */
400 void
401 consider_machine_collect(void)
402 {
403 }
404
405 void
406 consider_machine_adjust(void)
407 {
408 }
409
410 /*
411 * Switch to the first thread on a CPU.
412 */
413 void
414 machine_load_context(
415 thread_t new)
416 {
417 new->machine.specFlags |= OnProc;
418 act_machine_switch_pcb(NULL, new);
419 Load_context(new);
420 }
421
422 static inline void
423 pmap_switch_context(thread_t ot, thread_t nt, int cnum)
424 {
425 pmap_assert(ml_get_interrupts_enabled() == FALSE);
426 vm_map_t nmap = nt->map, omap = ot->map;
427 if ((omap != nmap) || (nmap->pmap->pagezero_accessible)) {
428 PMAP_DEACTIVATE_MAP(omap, ot, cnum);
429 PMAP_ACTIVATE_MAP(nmap, nt, cnum);
430 }
431 }
432
433 /*
434 * Switch to a new thread.
435 * Save the old thread`s kernel state or continuation,
436 * and return it.
437 */
438 thread_t
439 machine_switch_context(
440 thread_t old,
441 thread_continue_t continuation,
442 thread_t new)
443 {
444 assert(current_cpu_datap()->cpu_active_stack == old->kernel_stack);
445
446 #if KPC
447 kpc_off_cpu(old);
448 #endif /* KPC */
449
450 /*
451 * Save FP registers if in use.
452 */
453 fpu_switch_context(old, new);
454
455 old->machine.specFlags &= ~OnProc;
456 new->machine.specFlags |= OnProc;
457
458 /*
459 * Monitor the stack depth and report new max,
460 * not worrying about races.
461 */
462 vm_offset_t depth = current_stack_depth();
463 if (depth > kernel_stack_depth_max) {
464 kernel_stack_depth_max = depth;
465 KERNEL_DEBUG_CONSTANT(
466 MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DEPTH),
467 (long) depth, 0, 0, 0, 0);
468 }
469
470 /*
471 * Switch address maps if need be, even if not switching tasks.
472 * (A server activation may be "borrowing" a client map.)
473 */
474 pmap_switch_context(old, new, cpu_number());
475
476 /*
477 * Load the rest of the user state for the new thread
478 */
479 act_machine_switch_pcb(old, new);
480
481 #if HYPERVISOR
482 ml_hv_cswitch(old, new);
483 #endif
484
485 return Switch_context(old, continuation, new);
486 }
487
488 thread_t
489 machine_processor_shutdown(
490 thread_t thread,
491 void (*doshutdown)(processor_t),
492 processor_t processor)
493 {
494 #if CONFIG_VMX
495 vmx_suspend();
496 #endif
497 fpu_switch_context(thread, NULL);
498 pmap_switch_context(thread, processor->idle_thread, cpu_number());
499 return Shutdown_context(thread, doshutdown, processor);
500 }
501
502
503 /*
504 * This is where registers that are not normally specified by the mach-o
505 * file on an execve would be nullified, perhaps to avoid a covert channel.
506 */
507 kern_return_t
508 machine_thread_state_initialize(
509 thread_t thread)
510 {
511 /*
512 * If there's an fpu save area, free it.
513 * The initialized state will then be lazily faulted-in, if required.
514 * And if we're target, re-arm the no-fpu trap.
515 */
516 if (thread->machine.ifps) {
517 (void) fpu_set_fxstate(thread, NULL, x86_FLOAT_STATE64);
518
519 if (thread == current_thread()) {
520 clear_fpu();
521 }
522 }
523
524 if (thread->machine.ids) {
525 zfree(ids_zone, thread->machine.ids);
526 thread->machine.ids = NULL;
527 }
528
529 return KERN_SUCCESS;
530 }
531
532 uint32_t
533 get_eflags_exportmask(void)
534 {
535 return EFL_USER_SET;
536 }
537
538 /*
539 * x86_SAVED_STATE32 - internal save/restore general register state on 32/64 bit processors
540 * for 32bit tasks only
541 * x86_SAVED_STATE64 - internal save/restore general register state on 64 bit processors
542 * for 64bit tasks only
543 * x86_THREAD_STATE32 - external set/get general register state on 32/64 bit processors
544 * for 32bit tasks only
545 * x86_THREAD_STATE64 - external set/get general register state on 64 bit processors
546 * for 64bit tasks only
547 * x86_SAVED_STATE - external set/get general register state on 32/64 bit processors
548 * for either 32bit or 64bit tasks
549 * x86_FLOAT_STATE32 - internal/external save/restore float and xmm state on 32/64 bit processors
550 * for 32bit tasks only
551 * x86_FLOAT_STATE64 - internal/external save/restore float and xmm state on 64 bit processors
552 * for 64bit tasks only
553 * x86_FLOAT_STATE - external save/restore float and xmm state on 32/64 bit processors
554 * for either 32bit or 64bit tasks
555 * x86_EXCEPTION_STATE32 - external get exception state on 32/64 bit processors
556 * for 32bit tasks only
557 * x86_EXCEPTION_STATE64 - external get exception state on 64 bit processors
558 * for 64bit tasks only
559 * x86_EXCEPTION_STATE - external get exception state on 323/64 bit processors
560 * for either 32bit or 64bit tasks
561 */
562
563
564 static void
565 get_exception_state64(thread_t thread, x86_exception_state64_t *es)
566 {
567 x86_saved_state64_t *saved_state;
568
569 saved_state = USER_REGS64(thread);
570
571 es->trapno = saved_state->isf.trapno;
572 es->cpu = saved_state->isf.cpu;
573 es->err = (typeof(es->err))saved_state->isf.err;
574 es->faultvaddr = saved_state->cr2;
575 }
576
577 static void
578 get_exception_state32(thread_t thread, x86_exception_state32_t *es)
579 {
580 x86_saved_state32_t *saved_state;
581
582 saved_state = USER_REGS32(thread);
583
584 es->trapno = saved_state->trapno;
585 es->cpu = saved_state->cpu;
586 es->err = saved_state->err;
587 es->faultvaddr = saved_state->cr2;
588 }
589
590
591 static int
592 set_thread_state32(thread_t thread, x86_thread_state32_t *ts)
593 {
594 x86_saved_state32_t *saved_state;
595
596 pal_register_cache_state(thread, DIRTY);
597
598 saved_state = USER_REGS32(thread);
599
600 /*
601 * Scrub segment selector values:
602 */
603 ts->cs = USER_CS;
604 /*
605 * On a 64 bit kernel, we always override the data segments,
606 * as the actual selector numbers have changed. This also
607 * means that we don't support setting the data segments
608 * manually any more.
609 */
610 ts->ss = USER_DS;
611 ts->ds = USER_DS;
612 ts->es = USER_DS;
613
614 /* Set GS to CTHREAD only if's been established */
615 ts->gs = thread->machine.cthread_self ? USER_CTHREAD : NULL_SEG;
616
617 /* Check segment selectors are safe */
618 if (!valid_user_segment_selectors(ts->cs,
619 ts->ss,
620 ts->ds,
621 ts->es,
622 ts->fs,
623 ts->gs)) {
624 return KERN_INVALID_ARGUMENT;
625 }
626
627 saved_state->eax = ts->eax;
628 saved_state->ebx = ts->ebx;
629 saved_state->ecx = ts->ecx;
630 saved_state->edx = ts->edx;
631 saved_state->edi = ts->edi;
632 saved_state->esi = ts->esi;
633 saved_state->ebp = ts->ebp;
634 saved_state->uesp = ts->esp;
635 saved_state->efl = (ts->eflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
636 saved_state->eip = ts->eip;
637 saved_state->cs = ts->cs;
638 saved_state->ss = ts->ss;
639 saved_state->ds = ts->ds;
640 saved_state->es = ts->es;
641 saved_state->fs = ts->fs;
642 saved_state->gs = ts->gs;
643
644 /*
645 * If the trace trap bit is being set,
646 * ensure that the user returns via iret
647 * - which is signaled thusly:
648 */
649 if ((saved_state->efl & EFL_TF) && saved_state->cs == SYSENTER_CS) {
650 saved_state->cs = SYSENTER_TF_CS;
651 }
652
653 return KERN_SUCCESS;
654 }
655
656 static int
657 set_thread_state64(thread_t thread, void *state, int full)
658 {
659 x86_thread_state64_t *ts;
660 x86_saved_state64_t *saved_state;
661
662 if (full == TRUE) {
663 ts = &((x86_thread_full_state64_t *)state)->ss64;
664 } else {
665 ts = (x86_thread_state64_t *)state;
666 }
667
668 pal_register_cache_state(thread, DIRTY);
669
670 saved_state = USER_REGS64(thread);
671
672 if (!IS_USERADDR64_CANONICAL(ts->rsp) ||
673 !IS_USERADDR64_CANONICAL(ts->rip)) {
674 return KERN_INVALID_ARGUMENT;
675 }
676
677 saved_state->r8 = ts->r8;
678 saved_state->r9 = ts->r9;
679 saved_state->r10 = ts->r10;
680 saved_state->r11 = ts->r11;
681 saved_state->r12 = ts->r12;
682 saved_state->r13 = ts->r13;
683 saved_state->r14 = ts->r14;
684 saved_state->r15 = ts->r15;
685 saved_state->rax = ts->rax;
686 saved_state->rbx = ts->rbx;
687 saved_state->rcx = ts->rcx;
688 saved_state->rdx = ts->rdx;
689 saved_state->rdi = ts->rdi;
690 saved_state->rsi = ts->rsi;
691 saved_state->rbp = ts->rbp;
692 saved_state->isf.rsp = ts->rsp;
693 saved_state->isf.rflags = (ts->rflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
694 saved_state->isf.rip = ts->rip;
695
696 if (full == FALSE) {
697 saved_state->isf.cs = USER64_CS;
698 } else {
699 saved_state->isf.cs = ((x86_thread_full_state64_t *)ts)->ss64.cs;
700 saved_state->isf.ss = ((x86_thread_full_state64_t *)ts)->ss;
701 saved_state->ds = (uint32_t)((x86_thread_full_state64_t *)ts)->ds;
702 saved_state->es = (uint32_t)((x86_thread_full_state64_t *)ts)->es;
703 machine_thread_set_tsd_base(thread,
704 ((x86_thread_full_state64_t *)ts)->gsbase);
705 }
706
707 saved_state->fs = (uint32_t)ts->fs;
708 saved_state->gs = (uint32_t)ts->gs;
709
710 return KERN_SUCCESS;
711 }
712
713
714
715 static void
716 get_thread_state32(thread_t thread, x86_thread_state32_t *ts)
717 {
718 x86_saved_state32_t *saved_state;
719
720 pal_register_cache_state(thread, VALID);
721
722 saved_state = USER_REGS32(thread);
723
724 ts->eax = saved_state->eax;
725 ts->ebx = saved_state->ebx;
726 ts->ecx = saved_state->ecx;
727 ts->edx = saved_state->edx;
728 ts->edi = saved_state->edi;
729 ts->esi = saved_state->esi;
730 ts->ebp = saved_state->ebp;
731 ts->esp = saved_state->uesp;
732 ts->eflags = saved_state->efl;
733 ts->eip = saved_state->eip;
734 ts->cs = saved_state->cs;
735 ts->ss = saved_state->ss;
736 ts->ds = saved_state->ds;
737 ts->es = saved_state->es;
738 ts->fs = saved_state->fs;
739 ts->gs = saved_state->gs;
740 }
741
742
743 static void
744 get_thread_state64(thread_t thread, void *state, boolean_t full)
745 {
746 x86_thread_state64_t *ts;
747 x86_saved_state64_t *saved_state;
748
749 if (full == TRUE) {
750 ts = &((x86_thread_full_state64_t *)state)->ss64;
751 } else {
752 ts = (x86_thread_state64_t *)state;
753 }
754
755 pal_register_cache_state(thread, VALID);
756
757 saved_state = USER_REGS64(thread);
758
759 ts->r8 = saved_state->r8;
760 ts->r9 = saved_state->r9;
761 ts->r10 = saved_state->r10;
762 ts->r11 = saved_state->r11;
763 ts->r12 = saved_state->r12;
764 ts->r13 = saved_state->r13;
765 ts->r14 = saved_state->r14;
766 ts->r15 = saved_state->r15;
767 ts->rax = saved_state->rax;
768 ts->rbx = saved_state->rbx;
769 ts->rcx = saved_state->rcx;
770 ts->rdx = saved_state->rdx;
771 ts->rdi = saved_state->rdi;
772 ts->rsi = saved_state->rsi;
773 ts->rbp = saved_state->rbp;
774 ts->rsp = saved_state->isf.rsp;
775 ts->rflags = saved_state->isf.rflags;
776 ts->rip = saved_state->isf.rip;
777 ts->cs = saved_state->isf.cs;
778
779 if (full == TRUE) {
780 ((x86_thread_full_state64_t *)state)->ds = saved_state->ds;
781 ((x86_thread_full_state64_t *)state)->es = saved_state->es;
782 ((x86_thread_full_state64_t *)state)->ss = saved_state->isf.ss;
783 ((x86_thread_full_state64_t *)state)->gsbase =
784 thread->machine.cthread_self;
785 }
786
787 ts->fs = saved_state->fs;
788 ts->gs = saved_state->gs;
789 }
790
791 kern_return_t
792 machine_thread_state_convert_to_user(
793 __unused thread_t thread,
794 __unused thread_flavor_t flavor,
795 __unused thread_state_t tstate,
796 __unused mach_msg_type_number_t *count)
797 {
798 // No conversion to userspace representation on this platform
799 return KERN_SUCCESS;
800 }
801
802 kern_return_t
803 machine_thread_state_convert_from_user(
804 __unused thread_t thread,
805 __unused thread_flavor_t flavor,
806 __unused thread_state_t tstate,
807 __unused mach_msg_type_number_t count)
808 {
809 // No conversion from userspace representation on this platform
810 return KERN_SUCCESS;
811 }
812
813 kern_return_t
814 machine_thread_siguctx_pointer_convert_to_user(
815 __unused thread_t thread,
816 __unused user_addr_t *uctxp)
817 {
818 // No conversion to userspace representation on this platform
819 return KERN_SUCCESS;
820 }
821
822 kern_return_t
823 machine_thread_function_pointers_convert_from_user(
824 __unused thread_t thread,
825 __unused user_addr_t *fptrs,
826 __unused uint32_t count)
827 {
828 // No conversion from userspace representation on this platform
829 return KERN_SUCCESS;
830 }
831
832 /*
833 * act_machine_set_state:
834 *
835 * Set the status of the specified thread.
836 */
837
838 kern_return_t
839 machine_thread_set_state(
840 thread_t thr_act,
841 thread_flavor_t flavor,
842 thread_state_t tstate,
843 mach_msg_type_number_t count)
844 {
845 switch (flavor) {
846 case x86_SAVED_STATE32:
847 {
848 x86_saved_state32_t *state;
849 x86_saved_state32_t *saved_state;
850
851 if (count < x86_SAVED_STATE32_COUNT) {
852 return KERN_INVALID_ARGUMENT;
853 }
854
855 state = (x86_saved_state32_t *) tstate;
856
857 /*
858 * Allow a thread in a 64-bit process to set
859 * 32-bit state iff the code segment originates
860 * in the LDT (the implication is that only
861 * 32-bit code segments are allowed there, so
862 * setting 32-bit state implies a switch to
863 * compatibility mode on resume-to-user).
864 */
865 if (thread_is_64bit_addr(thr_act) &&
866 thr_act->task->i386_ldt == 0) {
867 return KERN_INVALID_ARGUMENT;
868 }
869
870 /* Check segment selectors are safe */
871 if (!valid_user_segment_selectors(state->cs,
872 state->ss,
873 state->ds,
874 state->es,
875 state->fs,
876 state->gs)) {
877 return KERN_INVALID_ARGUMENT;
878 }
879
880 pal_register_cache_state(thr_act, DIRTY);
881
882 saved_state = USER_REGS32(thr_act);
883
884 /*
885 * General registers
886 */
887 saved_state->edi = state->edi;
888 saved_state->esi = state->esi;
889 saved_state->ebp = state->ebp;
890 saved_state->uesp = state->uesp;
891 saved_state->ebx = state->ebx;
892 saved_state->edx = state->edx;
893 saved_state->ecx = state->ecx;
894 saved_state->eax = state->eax;
895 saved_state->eip = state->eip;
896
897 saved_state->efl = (state->efl & ~EFL_USER_CLEAR) | EFL_USER_SET;
898
899 /*
900 * If the trace trap bit is being set,
901 * ensure that the user returns via iret
902 * - which is signaled thusly:
903 */
904 if ((saved_state->efl & EFL_TF) && state->cs == SYSENTER_CS) {
905 state->cs = SYSENTER_TF_CS;
906 }
907
908 /*
909 * User setting segment registers.
910 * Code and stack selectors have already been
911 * checked. Others will be reset by 'iret'
912 * if they are not valid.
913 */
914 saved_state->cs = state->cs;
915 saved_state->ss = state->ss;
916 saved_state->ds = state->ds;
917 saved_state->es = state->es;
918 saved_state->fs = state->fs;
919 saved_state->gs = state->gs;
920
921 break;
922 }
923
924 case x86_SAVED_STATE64:
925 {
926 x86_saved_state64_t *state;
927 x86_saved_state64_t *saved_state;
928
929 if (count < x86_SAVED_STATE64_COUNT) {
930 return KERN_INVALID_ARGUMENT;
931 }
932
933 if (!thread_is_64bit_addr(thr_act)) {
934 return KERN_INVALID_ARGUMENT;
935 }
936
937 state = (x86_saved_state64_t *) tstate;
938
939 /* Verify that the supplied code segment selector is
940 * valid. In 64-bit mode, the FS and GS segment overrides
941 * use the FS.base and GS.base MSRs to calculate
942 * base addresses, and the trampolines don't directly
943 * restore the segment registers--hence they are no
944 * longer relevant for validation.
945 */
946 if (!valid_user_code_selector(state->isf.cs)) {
947 return KERN_INVALID_ARGUMENT;
948 }
949
950 /* Check pc and stack are canonical addresses */
951 if (!IS_USERADDR64_CANONICAL(state->isf.rsp) ||
952 !IS_USERADDR64_CANONICAL(state->isf.rip)) {
953 return KERN_INVALID_ARGUMENT;
954 }
955
956 pal_register_cache_state(thr_act, DIRTY);
957
958 saved_state = USER_REGS64(thr_act);
959
960 /*
961 * General registers
962 */
963 saved_state->r8 = state->r8;
964 saved_state->r9 = state->r9;
965 saved_state->r10 = state->r10;
966 saved_state->r11 = state->r11;
967 saved_state->r12 = state->r12;
968 saved_state->r13 = state->r13;
969 saved_state->r14 = state->r14;
970 saved_state->r15 = state->r15;
971 saved_state->rdi = state->rdi;
972 saved_state->rsi = state->rsi;
973 saved_state->rbp = state->rbp;
974 saved_state->rbx = state->rbx;
975 saved_state->rdx = state->rdx;
976 saved_state->rcx = state->rcx;
977 saved_state->rax = state->rax;
978 saved_state->isf.rsp = state->isf.rsp;
979 saved_state->isf.rip = state->isf.rip;
980
981 saved_state->isf.rflags = (state->isf.rflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
982
983 /*
984 * User setting segment registers.
985 * Code and stack selectors have already been
986 * checked. Others will be reset by 'sys'
987 * if they are not valid.
988 */
989 saved_state->isf.cs = state->isf.cs;
990 saved_state->isf.ss = state->isf.ss;
991 saved_state->fs = state->fs;
992 saved_state->gs = state->gs;
993
994 break;
995 }
996
997 case x86_FLOAT_STATE32:
998 case x86_AVX_STATE32:
999 #if !defined(RC_HIDE_XNU_J137)
1000 case x86_AVX512_STATE32:
1001 #endif /* not RC_HIDE_XNU_J137 */
1002 {
1003 if (count != _MachineStateCount[flavor]) {
1004 return KERN_INVALID_ARGUMENT;
1005 }
1006
1007 if (thread_is_64bit_addr(thr_act)) {
1008 return KERN_INVALID_ARGUMENT;
1009 }
1010
1011 return fpu_set_fxstate(thr_act, tstate, flavor);
1012 }
1013
1014 case x86_FLOAT_STATE64:
1015 case x86_AVX_STATE64:
1016 #if !defined(RC_HIDE_XNU_J137)
1017 case x86_AVX512_STATE64:
1018 #endif /* not RC_HIDE_XNU_J137 */
1019 {
1020 if (count != _MachineStateCount[flavor]) {
1021 return KERN_INVALID_ARGUMENT;
1022 }
1023
1024 if (!thread_is_64bit_addr(thr_act)) {
1025 return KERN_INVALID_ARGUMENT;
1026 }
1027
1028 return fpu_set_fxstate(thr_act, tstate, flavor);
1029 }
1030
1031 case x86_FLOAT_STATE:
1032 {
1033 x86_float_state_t *state;
1034
1035 if (count != x86_FLOAT_STATE_COUNT) {
1036 return KERN_INVALID_ARGUMENT;
1037 }
1038
1039 state = (x86_float_state_t *)tstate;
1040 if (state->fsh.flavor == x86_FLOAT_STATE64 && state->fsh.count == x86_FLOAT_STATE64_COUNT &&
1041 thread_is_64bit_addr(thr_act)) {
1042 return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs64, x86_FLOAT_STATE64);
1043 }
1044 if (state->fsh.flavor == x86_FLOAT_STATE32 && state->fsh.count == x86_FLOAT_STATE32_COUNT &&
1045 !thread_is_64bit_addr(thr_act)) {
1046 return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs32, x86_FLOAT_STATE32);
1047 }
1048 return KERN_INVALID_ARGUMENT;
1049 }
1050
1051 case x86_AVX_STATE:
1052 #if !defined(RC_HIDE_XNU_J137)
1053 case x86_AVX512_STATE:
1054 #endif
1055 {
1056 x86_avx_state_t *state;
1057
1058 if (count != _MachineStateCount[flavor]) {
1059 return KERN_INVALID_ARGUMENT;
1060 }
1061
1062 state = (x86_avx_state_t *)tstate;
1063 /* Flavors are defined to have sequential values: 32-bit, 64-bit, non-specific */
1064 /* 64-bit flavor? */
1065 if (state->ash.flavor == (flavor - 1) &&
1066 state->ash.count == _MachineStateCount[flavor - 1] &&
1067 thread_is_64bit_addr(thr_act)) {
1068 return fpu_set_fxstate(thr_act,
1069 (thread_state_t)&state->ufs.as64,
1070 flavor - 1);
1071 }
1072 /* 32-bit flavor? */
1073 if (state->ash.flavor == (flavor - 2) &&
1074 state->ash.count == _MachineStateCount[flavor - 2] &&
1075 !thread_is_64bit_addr(thr_act)) {
1076 return fpu_set_fxstate(thr_act,
1077 (thread_state_t)&state->ufs.as32,
1078 flavor - 2);
1079 }
1080 return KERN_INVALID_ARGUMENT;
1081 }
1082
1083 case x86_THREAD_STATE32:
1084 {
1085 if (count != x86_THREAD_STATE32_COUNT) {
1086 return KERN_INVALID_ARGUMENT;
1087 }
1088
1089 if (thread_is_64bit_addr(thr_act)) {
1090 return KERN_INVALID_ARGUMENT;
1091 }
1092
1093 return set_thread_state32(thr_act, (x86_thread_state32_t *)tstate);
1094 }
1095
1096 case x86_THREAD_STATE64:
1097 {
1098 if (count != x86_THREAD_STATE64_COUNT) {
1099 return KERN_INVALID_ARGUMENT;
1100 }
1101
1102 if (!thread_is_64bit_addr(thr_act)) {
1103 return KERN_INVALID_ARGUMENT;
1104 }
1105
1106 return set_thread_state64(thr_act, tstate, FALSE);
1107 }
1108
1109 case x86_THREAD_FULL_STATE64:
1110 {
1111 if (!allow_64bit_proc_LDT_ops) {
1112 return KERN_INVALID_ARGUMENT;
1113 }
1114
1115 if (count != x86_THREAD_FULL_STATE64_COUNT) {
1116 return KERN_INVALID_ARGUMENT;
1117 }
1118
1119 if (!thread_is_64bit_addr(thr_act)) {
1120 return KERN_INVALID_ARGUMENT;
1121 }
1122
1123 return set_thread_state64(thr_act, tstate, TRUE);
1124 }
1125
1126 case x86_THREAD_STATE:
1127 {
1128 x86_thread_state_t *state;
1129
1130 if (count != x86_THREAD_STATE_COUNT) {
1131 return KERN_INVALID_ARGUMENT;
1132 }
1133
1134 state = (x86_thread_state_t *)tstate;
1135
1136 if (state->tsh.flavor == x86_THREAD_STATE64 &&
1137 state->tsh.count == x86_THREAD_STATE64_COUNT &&
1138 thread_is_64bit_addr(thr_act)) {
1139 return set_thread_state64(thr_act, &state->uts.ts64, FALSE);
1140 } else if (state->tsh.flavor == x86_THREAD_FULL_STATE64 &&
1141 state->tsh.count == x86_THREAD_FULL_STATE64_COUNT &&
1142 thread_is_64bit_addr(thr_act)) {
1143 return set_thread_state64(thr_act, &state->uts.ts64, TRUE);
1144 } else if (state->tsh.flavor == x86_THREAD_STATE32 &&
1145 state->tsh.count == x86_THREAD_STATE32_COUNT &&
1146 !thread_is_64bit_addr(thr_act)) {
1147 return set_thread_state32(thr_act, &state->uts.ts32);
1148 } else {
1149 return KERN_INVALID_ARGUMENT;
1150 }
1151 }
1152 case x86_DEBUG_STATE32:
1153 {
1154 x86_debug_state32_t *state;
1155 kern_return_t ret;
1156
1157 if (thread_is_64bit_addr(thr_act)) {
1158 return KERN_INVALID_ARGUMENT;
1159 }
1160
1161 state = (x86_debug_state32_t *)tstate;
1162
1163 ret = set_debug_state32(thr_act, state);
1164
1165 return ret;
1166 }
1167 case x86_DEBUG_STATE64:
1168 {
1169 x86_debug_state64_t *state;
1170 kern_return_t ret;
1171
1172 if (!thread_is_64bit_addr(thr_act)) {
1173 return KERN_INVALID_ARGUMENT;
1174 }
1175
1176 state = (x86_debug_state64_t *)tstate;
1177
1178 ret = set_debug_state64(thr_act, state);
1179
1180 return ret;
1181 }
1182 case x86_DEBUG_STATE:
1183 {
1184 x86_debug_state_t *state;
1185 kern_return_t ret = KERN_INVALID_ARGUMENT;
1186
1187 if (count != x86_DEBUG_STATE_COUNT) {
1188 return KERN_INVALID_ARGUMENT;
1189 }
1190
1191 state = (x86_debug_state_t *)tstate;
1192 if (state->dsh.flavor == x86_DEBUG_STATE64 &&
1193 state->dsh.count == x86_DEBUG_STATE64_COUNT &&
1194 thread_is_64bit_addr(thr_act)) {
1195 ret = set_debug_state64(thr_act, &state->uds.ds64);
1196 } else if (state->dsh.flavor == x86_DEBUG_STATE32 &&
1197 state->dsh.count == x86_DEBUG_STATE32_COUNT &&
1198 !thread_is_64bit_addr(thr_act)) {
1199 ret = set_debug_state32(thr_act, &state->uds.ds32);
1200 }
1201 return ret;
1202 }
1203 default:
1204 return KERN_INVALID_ARGUMENT;
1205 }
1206
1207 return KERN_SUCCESS;
1208 }
1209
1210
1211
1212 /*
1213 * thread_getstatus:
1214 *
1215 * Get the status of the specified thread.
1216 */
1217
1218 kern_return_t
1219 machine_thread_get_state(
1220 thread_t thr_act,
1221 thread_flavor_t flavor,
1222 thread_state_t tstate,
1223 mach_msg_type_number_t *count)
1224 {
1225 switch (flavor) {
1226 case THREAD_STATE_FLAVOR_LIST:
1227 {
1228 if (*count < 3) {
1229 return KERN_INVALID_ARGUMENT;
1230 }
1231
1232 tstate[0] = i386_THREAD_STATE;
1233 tstate[1] = i386_FLOAT_STATE;
1234 tstate[2] = i386_EXCEPTION_STATE;
1235
1236 *count = 3;
1237 break;
1238 }
1239
1240 case THREAD_STATE_FLAVOR_LIST_NEW:
1241 {
1242 if (*count < 4) {
1243 return KERN_INVALID_ARGUMENT;
1244 }
1245
1246 tstate[0] = x86_THREAD_STATE;
1247 tstate[1] = x86_FLOAT_STATE;
1248 tstate[2] = x86_EXCEPTION_STATE;
1249 tstate[3] = x86_DEBUG_STATE;
1250
1251 *count = 4;
1252 break;
1253 }
1254
1255 case THREAD_STATE_FLAVOR_LIST_10_9:
1256 {
1257 if (*count < 5) {
1258 return KERN_INVALID_ARGUMENT;
1259 }
1260
1261 tstate[0] = x86_THREAD_STATE;
1262 tstate[1] = x86_FLOAT_STATE;
1263 tstate[2] = x86_EXCEPTION_STATE;
1264 tstate[3] = x86_DEBUG_STATE;
1265 tstate[4] = x86_AVX_STATE;
1266
1267 *count = 5;
1268 break;
1269 }
1270
1271 #if !defined(RC_HIDE_XNU_J137)
1272 case THREAD_STATE_FLAVOR_LIST_10_13:
1273 {
1274 if (*count < 6) {
1275 return KERN_INVALID_ARGUMENT;
1276 }
1277
1278 tstate[0] = x86_THREAD_STATE;
1279 tstate[1] = x86_FLOAT_STATE;
1280 tstate[2] = x86_EXCEPTION_STATE;
1281 tstate[3] = x86_DEBUG_STATE;
1282 tstate[4] = x86_AVX_STATE;
1283 tstate[5] = x86_AVX512_STATE;
1284
1285 *count = 6;
1286 break;
1287 }
1288
1289 #endif
1290 case x86_SAVED_STATE32:
1291 {
1292 x86_saved_state32_t *state;
1293 x86_saved_state32_t *saved_state;
1294
1295 if (*count < x86_SAVED_STATE32_COUNT) {
1296 return KERN_INVALID_ARGUMENT;
1297 }
1298
1299 if (thread_is_64bit_addr(thr_act)) {
1300 return KERN_INVALID_ARGUMENT;
1301 }
1302
1303 state = (x86_saved_state32_t *) tstate;
1304 saved_state = USER_REGS32(thr_act);
1305
1306 /*
1307 * First, copy everything:
1308 */
1309 *state = *saved_state;
1310 state->ds = saved_state->ds & 0xffff;
1311 state->es = saved_state->es & 0xffff;
1312 state->fs = saved_state->fs & 0xffff;
1313 state->gs = saved_state->gs & 0xffff;
1314
1315 *count = x86_SAVED_STATE32_COUNT;
1316 break;
1317 }
1318
1319 case x86_SAVED_STATE64:
1320 {
1321 x86_saved_state64_t *state;
1322 x86_saved_state64_t *saved_state;
1323
1324 if (*count < x86_SAVED_STATE64_COUNT) {
1325 return KERN_INVALID_ARGUMENT;
1326 }
1327
1328 if (!thread_is_64bit_addr(thr_act)) {
1329 return KERN_INVALID_ARGUMENT;
1330 }
1331
1332 state = (x86_saved_state64_t *)tstate;
1333 saved_state = USER_REGS64(thr_act);
1334
1335 /*
1336 * First, copy everything:
1337 */
1338 *state = *saved_state;
1339 state->ds = saved_state->ds & 0xffff;
1340 state->es = saved_state->es & 0xffff;
1341 state->fs = saved_state->fs & 0xffff;
1342 state->gs = saved_state->gs & 0xffff;
1343
1344 *count = x86_SAVED_STATE64_COUNT;
1345 break;
1346 }
1347
1348 case x86_FLOAT_STATE32:
1349 {
1350 if (*count < x86_FLOAT_STATE32_COUNT) {
1351 return KERN_INVALID_ARGUMENT;
1352 }
1353
1354 if (thread_is_64bit_addr(thr_act)) {
1355 return KERN_INVALID_ARGUMENT;
1356 }
1357
1358 *count = x86_FLOAT_STATE32_COUNT;
1359
1360 return fpu_get_fxstate(thr_act, tstate, flavor);
1361 }
1362
1363 case x86_FLOAT_STATE64:
1364 {
1365 if (*count < x86_FLOAT_STATE64_COUNT) {
1366 return KERN_INVALID_ARGUMENT;
1367 }
1368
1369 if (!thread_is_64bit_addr(thr_act)) {
1370 return KERN_INVALID_ARGUMENT;
1371 }
1372
1373 *count = x86_FLOAT_STATE64_COUNT;
1374
1375 return fpu_get_fxstate(thr_act, tstate, flavor);
1376 }
1377
1378 case x86_FLOAT_STATE:
1379 {
1380 x86_float_state_t *state;
1381 kern_return_t kret;
1382
1383 if (*count < x86_FLOAT_STATE_COUNT) {
1384 return KERN_INVALID_ARGUMENT;
1385 }
1386
1387 state = (x86_float_state_t *)tstate;
1388
1389 /*
1390 * no need to bzero... currently
1391 * x86_FLOAT_STATE64_COUNT == x86_FLOAT_STATE32_COUNT
1392 */
1393 if (thread_is_64bit_addr(thr_act)) {
1394 state->fsh.flavor = x86_FLOAT_STATE64;
1395 state->fsh.count = x86_FLOAT_STATE64_COUNT;
1396
1397 kret = fpu_get_fxstate(thr_act, (thread_state_t)&state->ufs.fs64, x86_FLOAT_STATE64);
1398 } else {
1399 state->fsh.flavor = x86_FLOAT_STATE32;
1400 state->fsh.count = x86_FLOAT_STATE32_COUNT;
1401
1402 kret = fpu_get_fxstate(thr_act, (thread_state_t)&state->ufs.fs32, x86_FLOAT_STATE32);
1403 }
1404 *count = x86_FLOAT_STATE_COUNT;
1405
1406 return kret;
1407 }
1408
1409 case x86_AVX_STATE32:
1410 #if !defined(RC_HIDE_XNU_J137)
1411 case x86_AVX512_STATE32:
1412 #endif
1413 {
1414 if (*count != _MachineStateCount[flavor]) {
1415 return KERN_INVALID_ARGUMENT;
1416 }
1417
1418 if (thread_is_64bit_addr(thr_act)) {
1419 return KERN_INVALID_ARGUMENT;
1420 }
1421
1422 *count = _MachineStateCount[flavor];
1423
1424 return fpu_get_fxstate(thr_act, tstate, flavor);
1425 }
1426
1427 case x86_AVX_STATE64:
1428 #if !defined(RC_HIDE_XNU_J137)
1429 case x86_AVX512_STATE64:
1430 #endif
1431 {
1432 if (*count != _MachineStateCount[flavor]) {
1433 return KERN_INVALID_ARGUMENT;
1434 }
1435
1436 if (!thread_is_64bit_addr(thr_act)) {
1437 return KERN_INVALID_ARGUMENT;
1438 }
1439
1440 *count = _MachineStateCount[flavor];
1441
1442 return fpu_get_fxstate(thr_act, tstate, flavor);
1443 }
1444
1445 case x86_AVX_STATE:
1446 #if !defined(RC_HIDE_XNU_J137)
1447 case x86_AVX512_STATE:
1448 #endif
1449 {
1450 x86_avx_state_t *state;
1451 thread_state_t fstate;
1452
1453 if (*count < _MachineStateCount[flavor]) {
1454 return KERN_INVALID_ARGUMENT;
1455 }
1456
1457 *count = _MachineStateCount[flavor];
1458 state = (x86_avx_state_t *)tstate;
1459
1460 bzero((char *)state, *count * sizeof(int));
1461
1462 if (thread_is_64bit_addr(thr_act)) {
1463 flavor -= 1; /* 64-bit flavor */
1464 fstate = (thread_state_t) &state->ufs.as64;
1465 } else {
1466 flavor -= 2; /* 32-bit flavor */
1467 fstate = (thread_state_t) &state->ufs.as32;
1468 }
1469 state->ash.flavor = flavor;
1470 state->ash.count = _MachineStateCount[flavor];
1471
1472 return fpu_get_fxstate(thr_act, fstate, flavor);
1473 }
1474
1475 case x86_THREAD_STATE32:
1476 {
1477 if (*count < x86_THREAD_STATE32_COUNT) {
1478 return KERN_INVALID_ARGUMENT;
1479 }
1480
1481 if (thread_is_64bit_addr(thr_act)) {
1482 return KERN_INVALID_ARGUMENT;
1483 }
1484
1485 *count = x86_THREAD_STATE32_COUNT;
1486
1487 get_thread_state32(thr_act, (x86_thread_state32_t *)tstate);
1488 break;
1489 }
1490
1491 case x86_THREAD_STATE64:
1492 {
1493 if (*count < x86_THREAD_STATE64_COUNT) {
1494 return KERN_INVALID_ARGUMENT;
1495 }
1496
1497 if (!thread_is_64bit_addr(thr_act)) {
1498 return KERN_INVALID_ARGUMENT;
1499 }
1500
1501 *count = x86_THREAD_STATE64_COUNT;
1502
1503 get_thread_state64(thr_act, tstate, FALSE);
1504 break;
1505 }
1506
1507 case x86_THREAD_FULL_STATE64:
1508 {
1509 if (!allow_64bit_proc_LDT_ops) {
1510 return KERN_INVALID_ARGUMENT;
1511 }
1512
1513 if (*count < x86_THREAD_FULL_STATE64_COUNT) {
1514 return KERN_INVALID_ARGUMENT;
1515 }
1516
1517 if (!thread_is_64bit_addr(thr_act)) {
1518 return KERN_INVALID_ARGUMENT;
1519 }
1520
1521 *count = x86_THREAD_FULL_STATE64_COUNT;
1522
1523 get_thread_state64(thr_act, tstate, TRUE);
1524 break;
1525 }
1526
1527 case x86_THREAD_STATE:
1528 {
1529 x86_thread_state_t *state;
1530
1531 if (*count < x86_THREAD_STATE_COUNT) {
1532 return KERN_INVALID_ARGUMENT;
1533 }
1534
1535 state = (x86_thread_state_t *)tstate;
1536
1537 bzero((char *)state, sizeof(x86_thread_state_t));
1538
1539 if (thread_is_64bit_addr(thr_act)) {
1540 state->tsh.flavor = x86_THREAD_STATE64;
1541 state->tsh.count = x86_THREAD_STATE64_COUNT;
1542
1543 get_thread_state64(thr_act, &state->uts.ts64, FALSE);
1544 } else {
1545 state->tsh.flavor = x86_THREAD_STATE32;
1546 state->tsh.count = x86_THREAD_STATE32_COUNT;
1547
1548 get_thread_state32(thr_act, &state->uts.ts32);
1549 }
1550 *count = x86_THREAD_STATE_COUNT;
1551
1552 break;
1553 }
1554
1555
1556 case x86_EXCEPTION_STATE32:
1557 {
1558 if (*count < x86_EXCEPTION_STATE32_COUNT) {
1559 return KERN_INVALID_ARGUMENT;
1560 }
1561
1562 if (thread_is_64bit_addr(thr_act)) {
1563 return KERN_INVALID_ARGUMENT;
1564 }
1565
1566 *count = x86_EXCEPTION_STATE32_COUNT;
1567
1568 get_exception_state32(thr_act, (x86_exception_state32_t *)tstate);
1569 /*
1570 * Suppress the cpu number for binary compatibility
1571 * of this deprecated state.
1572 */
1573 ((x86_exception_state32_t *)tstate)->cpu = 0;
1574 break;
1575 }
1576
1577 case x86_EXCEPTION_STATE64:
1578 {
1579 if (*count < x86_EXCEPTION_STATE64_COUNT) {
1580 return KERN_INVALID_ARGUMENT;
1581 }
1582
1583 if (!thread_is_64bit_addr(thr_act)) {
1584 return KERN_INVALID_ARGUMENT;
1585 }
1586
1587 *count = x86_EXCEPTION_STATE64_COUNT;
1588
1589 get_exception_state64(thr_act, (x86_exception_state64_t *)tstate);
1590 /*
1591 * Suppress the cpu number for binary compatibility
1592 * of this deprecated state.
1593 */
1594 ((x86_exception_state64_t *)tstate)->cpu = 0;
1595 break;
1596 }
1597
1598 case x86_EXCEPTION_STATE:
1599 {
1600 x86_exception_state_t *state;
1601
1602 if (*count < x86_EXCEPTION_STATE_COUNT) {
1603 return KERN_INVALID_ARGUMENT;
1604 }
1605
1606 state = (x86_exception_state_t *)tstate;
1607
1608 bzero((char *)state, sizeof(x86_exception_state_t));
1609
1610 if (thread_is_64bit_addr(thr_act)) {
1611 state->esh.flavor = x86_EXCEPTION_STATE64;
1612 state->esh.count = x86_EXCEPTION_STATE64_COUNT;
1613
1614 get_exception_state64(thr_act, &state->ues.es64);
1615 } else {
1616 state->esh.flavor = x86_EXCEPTION_STATE32;
1617 state->esh.count = x86_EXCEPTION_STATE32_COUNT;
1618
1619 get_exception_state32(thr_act, &state->ues.es32);
1620 }
1621 *count = x86_EXCEPTION_STATE_COUNT;
1622
1623 break;
1624 }
1625 case x86_DEBUG_STATE32:
1626 {
1627 if (*count < x86_DEBUG_STATE32_COUNT) {
1628 return KERN_INVALID_ARGUMENT;
1629 }
1630
1631 if (thread_is_64bit_addr(thr_act)) {
1632 return KERN_INVALID_ARGUMENT;
1633 }
1634
1635 get_debug_state32(thr_act, (x86_debug_state32_t *)tstate);
1636
1637 *count = x86_DEBUG_STATE32_COUNT;
1638
1639 break;
1640 }
1641 case x86_DEBUG_STATE64:
1642 {
1643 if (*count < x86_DEBUG_STATE64_COUNT) {
1644 return KERN_INVALID_ARGUMENT;
1645 }
1646
1647 if (!thread_is_64bit_addr(thr_act)) {
1648 return KERN_INVALID_ARGUMENT;
1649 }
1650
1651 get_debug_state64(thr_act, (x86_debug_state64_t *)tstate);
1652
1653 *count = x86_DEBUG_STATE64_COUNT;
1654
1655 break;
1656 }
1657 case x86_DEBUG_STATE:
1658 {
1659 x86_debug_state_t *state;
1660
1661 if (*count < x86_DEBUG_STATE_COUNT) {
1662 return KERN_INVALID_ARGUMENT;
1663 }
1664
1665 state = (x86_debug_state_t *)tstate;
1666
1667 bzero(state, sizeof *state);
1668
1669 if (thread_is_64bit_addr(thr_act)) {
1670 state->dsh.flavor = x86_DEBUG_STATE64;
1671 state->dsh.count = x86_DEBUG_STATE64_COUNT;
1672
1673 get_debug_state64(thr_act, &state->uds.ds64);
1674 } else {
1675 state->dsh.flavor = x86_DEBUG_STATE32;
1676 state->dsh.count = x86_DEBUG_STATE32_COUNT;
1677
1678 get_debug_state32(thr_act, &state->uds.ds32);
1679 }
1680 *count = x86_DEBUG_STATE_COUNT;
1681 break;
1682 }
1683 default:
1684 return KERN_INVALID_ARGUMENT;
1685 }
1686
1687 return KERN_SUCCESS;
1688 }
1689
1690 kern_return_t
1691 machine_thread_get_kern_state(
1692 thread_t thread,
1693 thread_flavor_t flavor,
1694 thread_state_t tstate,
1695 mach_msg_type_number_t *count)
1696 {
1697 x86_saved_state_t *int_state = current_cpu_datap()->cpu_int_state;
1698
1699 /*
1700 * This works only for an interrupted kernel thread
1701 */
1702 if (thread != current_thread() || int_state == NULL) {
1703 return KERN_FAILURE;
1704 }
1705
1706 switch (flavor) {
1707 case x86_THREAD_STATE32: {
1708 x86_thread_state32_t *state;
1709 x86_saved_state32_t *saved_state;
1710
1711 if (!is_saved_state32(int_state) ||
1712 *count < x86_THREAD_STATE32_COUNT) {
1713 return KERN_INVALID_ARGUMENT;
1714 }
1715
1716 state = (x86_thread_state32_t *) tstate;
1717
1718 saved_state = saved_state32(int_state);
1719 /*
1720 * General registers.
1721 */
1722 state->eax = saved_state->eax;
1723 state->ebx = saved_state->ebx;
1724 state->ecx = saved_state->ecx;
1725 state->edx = saved_state->edx;
1726 state->edi = saved_state->edi;
1727 state->esi = saved_state->esi;
1728 state->ebp = saved_state->ebp;
1729 state->esp = saved_state->uesp;
1730 state->eflags = saved_state->efl;
1731 state->eip = saved_state->eip;
1732 state->cs = saved_state->cs;
1733 state->ss = saved_state->ss;
1734 state->ds = saved_state->ds & 0xffff;
1735 state->es = saved_state->es & 0xffff;
1736 state->fs = saved_state->fs & 0xffff;
1737 state->gs = saved_state->gs & 0xffff;
1738
1739 *count = x86_THREAD_STATE32_COUNT;
1740
1741 return KERN_SUCCESS;
1742 }
1743
1744 case x86_THREAD_STATE64: {
1745 x86_thread_state64_t *state;
1746 x86_saved_state64_t *saved_state;
1747
1748 if (!is_saved_state64(int_state) ||
1749 *count < x86_THREAD_STATE64_COUNT) {
1750 return KERN_INVALID_ARGUMENT;
1751 }
1752
1753 state = (x86_thread_state64_t *) tstate;
1754
1755 saved_state = saved_state64(int_state);
1756 /*
1757 * General registers.
1758 */
1759 state->rax = saved_state->rax;
1760 state->rbx = saved_state->rbx;
1761 state->rcx = saved_state->rcx;
1762 state->rdx = saved_state->rdx;
1763 state->rdi = saved_state->rdi;
1764 state->rsi = saved_state->rsi;
1765 state->rbp = saved_state->rbp;
1766 state->rsp = saved_state->isf.rsp;
1767 state->r8 = saved_state->r8;
1768 state->r9 = saved_state->r9;
1769 state->r10 = saved_state->r10;
1770 state->r11 = saved_state->r11;
1771 state->r12 = saved_state->r12;
1772 state->r13 = saved_state->r13;
1773 state->r14 = saved_state->r14;
1774 state->r15 = saved_state->r15;
1775
1776 state->rip = saved_state->isf.rip;
1777 state->rflags = saved_state->isf.rflags;
1778 state->cs = saved_state->isf.cs;
1779 state->fs = saved_state->fs & 0xffff;
1780 state->gs = saved_state->gs & 0xffff;
1781 *count = x86_THREAD_STATE64_COUNT;
1782
1783 return KERN_SUCCESS;
1784 }
1785
1786 case x86_THREAD_STATE: {
1787 x86_thread_state_t *state = NULL;
1788
1789 if (*count < x86_THREAD_STATE_COUNT) {
1790 return KERN_INVALID_ARGUMENT;
1791 }
1792
1793 state = (x86_thread_state_t *) tstate;
1794
1795 if (is_saved_state32(int_state)) {
1796 x86_saved_state32_t *saved_state = saved_state32(int_state);
1797
1798 state->tsh.flavor = x86_THREAD_STATE32;
1799 state->tsh.count = x86_THREAD_STATE32_COUNT;
1800
1801 /*
1802 * General registers.
1803 */
1804 state->uts.ts32.eax = saved_state->eax;
1805 state->uts.ts32.ebx = saved_state->ebx;
1806 state->uts.ts32.ecx = saved_state->ecx;
1807 state->uts.ts32.edx = saved_state->edx;
1808 state->uts.ts32.edi = saved_state->edi;
1809 state->uts.ts32.esi = saved_state->esi;
1810 state->uts.ts32.ebp = saved_state->ebp;
1811 state->uts.ts32.esp = saved_state->uesp;
1812 state->uts.ts32.eflags = saved_state->efl;
1813 state->uts.ts32.eip = saved_state->eip;
1814 state->uts.ts32.cs = saved_state->cs;
1815 state->uts.ts32.ss = saved_state->ss;
1816 state->uts.ts32.ds = saved_state->ds & 0xffff;
1817 state->uts.ts32.es = saved_state->es & 0xffff;
1818 state->uts.ts32.fs = saved_state->fs & 0xffff;
1819 state->uts.ts32.gs = saved_state->gs & 0xffff;
1820 } else if (is_saved_state64(int_state)) {
1821 x86_saved_state64_t *saved_state = saved_state64(int_state);
1822
1823 state->tsh.flavor = x86_THREAD_STATE64;
1824 state->tsh.count = x86_THREAD_STATE64_COUNT;
1825
1826 /*
1827 * General registers.
1828 */
1829 state->uts.ts64.rax = saved_state->rax;
1830 state->uts.ts64.rbx = saved_state->rbx;
1831 state->uts.ts64.rcx = saved_state->rcx;
1832 state->uts.ts64.rdx = saved_state->rdx;
1833 state->uts.ts64.rdi = saved_state->rdi;
1834 state->uts.ts64.rsi = saved_state->rsi;
1835 state->uts.ts64.rbp = saved_state->rbp;
1836 state->uts.ts64.rsp = saved_state->isf.rsp;
1837 state->uts.ts64.r8 = saved_state->r8;
1838 state->uts.ts64.r9 = saved_state->r9;
1839 state->uts.ts64.r10 = saved_state->r10;
1840 state->uts.ts64.r11 = saved_state->r11;
1841 state->uts.ts64.r12 = saved_state->r12;
1842 state->uts.ts64.r13 = saved_state->r13;
1843 state->uts.ts64.r14 = saved_state->r14;
1844 state->uts.ts64.r15 = saved_state->r15;
1845
1846 state->uts.ts64.rip = saved_state->isf.rip;
1847 state->uts.ts64.rflags = saved_state->isf.rflags;
1848 state->uts.ts64.cs = saved_state->isf.cs;
1849 state->uts.ts64.fs = saved_state->fs & 0xffff;
1850 state->uts.ts64.gs = saved_state->gs & 0xffff;
1851 } else {
1852 panic("unknown thread state");
1853 }
1854
1855 *count = x86_THREAD_STATE_COUNT;
1856 return KERN_SUCCESS;
1857 }
1858 }
1859 return KERN_FAILURE;
1860 }
1861
1862
1863 void
1864 machine_thread_switch_addrmode(thread_t thread)
1865 {
1866 /*
1867 * We don't want to be preempted until we're done
1868 * - particularly if we're switching the current thread
1869 */
1870 disable_preemption();
1871
1872 /*
1873 * Reset the state saveareas. As we're resetting, we anticipate no
1874 * memory allocations in this path.
1875 */
1876 machine_thread_create(thread, thread->task);
1877
1878 /* Adjust FPU state */
1879 fpu_switch_addrmode(thread, task_has_64Bit_addr(thread->task));
1880
1881 /* If we're switching ourselves, reset the pcb addresses etc. */
1882 if (thread == current_thread()) {
1883 boolean_t istate = ml_set_interrupts_enabled(FALSE);
1884 act_machine_switch_pcb(NULL, thread);
1885 ml_set_interrupts_enabled(istate);
1886 }
1887 enable_preemption();
1888 }
1889
1890
1891
1892 /*
1893 * This is used to set the current thr_act/thread
1894 * when starting up a new processor
1895 */
1896 void
1897 machine_set_current_thread(thread_t thread)
1898 {
1899 current_cpu_datap()->cpu_active_thread = thread;
1900 }
1901
1902
1903 /*
1904 * Perform machine-dependent per-thread initializations
1905 */
1906 void
1907 machine_thread_init(void)
1908 {
1909 iss_zone = zinit(sizeof(x86_saved_state_t),
1910 thread_max * sizeof(x86_saved_state_t),
1911 THREAD_CHUNK * sizeof(x86_saved_state_t),
1912 "x86_64 saved state");
1913
1914 ids_zone = zinit(sizeof(x86_debug_state64_t),
1915 thread_max * sizeof(x86_debug_state64_t),
1916 THREAD_CHUNK * sizeof(x86_debug_state64_t),
1917 "x86_64 debug state");
1918
1919 fpu_module_init();
1920 }
1921
1922
1923
1924 user_addr_t
1925 get_useraddr(void)
1926 {
1927 thread_t thr_act = current_thread();
1928
1929 if (thread_is_64bit_addr(thr_act)) {
1930 x86_saved_state64_t *iss64;
1931
1932 iss64 = USER_REGS64(thr_act);
1933
1934 return iss64->isf.rip;
1935 } else {
1936 x86_saved_state32_t *iss32;
1937
1938 iss32 = USER_REGS32(thr_act);
1939
1940 return iss32->eip;
1941 }
1942 }
1943
1944 /*
1945 * detach and return a kernel stack from a thread
1946 */
1947
1948 vm_offset_t
1949 machine_stack_detach(thread_t thread)
1950 {
1951 vm_offset_t stack;
1952
1953 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH),
1954 (uintptr_t)thread_tid(thread), thread->priority,
1955 thread->sched_pri, 0,
1956 0);
1957
1958 stack = thread->kernel_stack;
1959 thread->kernel_stack = 0;
1960
1961 return stack;
1962 }
1963
1964 /*
1965 * attach a kernel stack to a thread and initialize it
1966 */
1967
1968 void
1969 machine_stack_attach(
1970 thread_t thread,
1971 vm_offset_t stack)
1972 {
1973 struct x86_kernel_state *statep;
1974
1975 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH),
1976 (uintptr_t)thread_tid(thread), thread->priority,
1977 thread->sched_pri, 0, 0);
1978
1979 assert(stack);
1980 thread->kernel_stack = stack;
1981 thread_initialize_kernel_state(thread);
1982
1983 statep = STACK_IKS(stack);
1984 #if defined(__x86_64__)
1985 statep->k_rip = (unsigned long) Thread_continue;
1986 statep->k_rbx = (unsigned long) thread_continue;
1987 statep->k_rsp = (unsigned long) STACK_IKS(stack);
1988 #else
1989 statep->k_eip = (unsigned long) Thread_continue;
1990 statep->k_ebx = (unsigned long) thread_continue;
1991 statep->k_esp = (unsigned long) STACK_IKS(stack);
1992 #endif
1993
1994 return;
1995 }
1996
1997 /*
1998 * move a stack from old to new thread
1999 */
2000
2001 void
2002 machine_stack_handoff(thread_t old,
2003 thread_t new)
2004 {
2005 vm_offset_t stack;
2006
2007 assert(new);
2008 assert(old);
2009
2010 kpc_off_cpu(old);
2011
2012 stack = old->kernel_stack;
2013 if (stack == old->reserved_stack) {
2014 assert(new->reserved_stack);
2015 old->reserved_stack = new->reserved_stack;
2016 new->reserved_stack = stack;
2017 }
2018 old->kernel_stack = 0;
2019 /*
2020 * A full call to machine_stack_attach() is unnecessry
2021 * because old stack is already initialized.
2022 */
2023 new->kernel_stack = stack;
2024
2025 fpu_switch_context(old, new);
2026
2027 old->machine.specFlags &= ~OnProc;
2028 new->machine.specFlags |= OnProc;
2029
2030 pmap_switch_context(old, new, cpu_number());
2031 act_machine_switch_pcb(old, new);
2032
2033 #if HYPERVISOR
2034 ml_hv_cswitch(old, new);
2035 #endif
2036
2037 machine_set_current_thread(new);
2038 thread_initialize_kernel_state(new);
2039
2040 return;
2041 }
2042
2043
2044
2045
2046 struct x86_act_context32 {
2047 x86_saved_state32_t ss;
2048 x86_float_state32_t fs;
2049 x86_debug_state32_t ds;
2050 };
2051
2052 struct x86_act_context64 {
2053 x86_saved_state64_t ss;
2054 x86_float_state64_t fs;
2055 x86_debug_state64_t ds;
2056 };
2057
2058
2059
2060 void *
2061 act_thread_csave(void)
2062 {
2063 kern_return_t kret;
2064 mach_msg_type_number_t val;
2065 thread_t thr_act = current_thread();
2066
2067 if (thread_is_64bit_addr(thr_act)) {
2068 struct x86_act_context64 *ic64;
2069
2070 ic64 = (struct x86_act_context64 *)kalloc(sizeof(struct x86_act_context64));
2071
2072 if (ic64 == (struct x86_act_context64 *)NULL) {
2073 return (void *)0;
2074 }
2075
2076 val = x86_SAVED_STATE64_COUNT;
2077 kret = machine_thread_get_state(thr_act, x86_SAVED_STATE64,
2078 (thread_state_t) &ic64->ss, &val);
2079 if (kret != KERN_SUCCESS) {
2080 kfree(ic64, sizeof(struct x86_act_context64));
2081 return (void *)0;
2082 }
2083 val = x86_FLOAT_STATE64_COUNT;
2084 kret = machine_thread_get_state(thr_act, x86_FLOAT_STATE64,
2085 (thread_state_t) &ic64->fs, &val);
2086 if (kret != KERN_SUCCESS) {
2087 kfree(ic64, sizeof(struct x86_act_context64));
2088 return (void *)0;
2089 }
2090
2091 val = x86_DEBUG_STATE64_COUNT;
2092 kret = machine_thread_get_state(thr_act,
2093 x86_DEBUG_STATE64,
2094 (thread_state_t)&ic64->ds,
2095 &val);
2096 if (kret != KERN_SUCCESS) {
2097 kfree(ic64, sizeof(struct x86_act_context64));
2098 return (void *)0;
2099 }
2100 return ic64;
2101 } else {
2102 struct x86_act_context32 *ic32;
2103
2104 ic32 = (struct x86_act_context32 *)kalloc(sizeof(struct x86_act_context32));
2105
2106 if (ic32 == (struct x86_act_context32 *)NULL) {
2107 return (void *)0;
2108 }
2109
2110 val = x86_SAVED_STATE32_COUNT;
2111 kret = machine_thread_get_state(thr_act, x86_SAVED_STATE32,
2112 (thread_state_t) &ic32->ss, &val);
2113 if (kret != KERN_SUCCESS) {
2114 kfree(ic32, sizeof(struct x86_act_context32));
2115 return (void *)0;
2116 }
2117 val = x86_FLOAT_STATE32_COUNT;
2118 kret = machine_thread_get_state(thr_act, x86_FLOAT_STATE32,
2119 (thread_state_t) &ic32->fs, &val);
2120 if (kret != KERN_SUCCESS) {
2121 kfree(ic32, sizeof(struct x86_act_context32));
2122 return (void *)0;
2123 }
2124
2125 val = x86_DEBUG_STATE32_COUNT;
2126 kret = machine_thread_get_state(thr_act,
2127 x86_DEBUG_STATE32,
2128 (thread_state_t)&ic32->ds,
2129 &val);
2130 if (kret != KERN_SUCCESS) {
2131 kfree(ic32, sizeof(struct x86_act_context32));
2132 return (void *)0;
2133 }
2134 return ic32;
2135 }
2136 }
2137
2138
2139 void
2140 act_thread_catt(void *ctx)
2141 {
2142 thread_t thr_act = current_thread();
2143 kern_return_t kret;
2144
2145 if (ctx == (void *)NULL) {
2146 return;
2147 }
2148
2149 if (thread_is_64bit_addr(thr_act)) {
2150 struct x86_act_context64 *ic64;
2151
2152 ic64 = (struct x86_act_context64 *)ctx;
2153
2154 kret = machine_thread_set_state(thr_act, x86_SAVED_STATE64,
2155 (thread_state_t) &ic64->ss, x86_SAVED_STATE64_COUNT);
2156 if (kret == KERN_SUCCESS) {
2157 machine_thread_set_state(thr_act, x86_FLOAT_STATE64,
2158 (thread_state_t) &ic64->fs, x86_FLOAT_STATE64_COUNT);
2159 }
2160 kfree(ic64, sizeof(struct x86_act_context64));
2161 } else {
2162 struct x86_act_context32 *ic32;
2163
2164 ic32 = (struct x86_act_context32 *)ctx;
2165
2166 kret = machine_thread_set_state(thr_act, x86_SAVED_STATE32,
2167 (thread_state_t) &ic32->ss, x86_SAVED_STATE32_COUNT);
2168 if (kret == KERN_SUCCESS) {
2169 (void) machine_thread_set_state(thr_act, x86_FLOAT_STATE32,
2170 (thread_state_t) &ic32->fs, x86_FLOAT_STATE32_COUNT);
2171 }
2172 kfree(ic32, sizeof(struct x86_act_context32));
2173 }
2174 }
2175
2176
2177 void
2178 act_thread_cfree(__unused void *ctx)
2179 {
2180 /* XXX - Unused */
2181 }
2182
2183 /*
2184 * Duplicate one x86_debug_state32_t to another. "all" parameter
2185 * chooses whether dr4 and dr5 are copied (they are never meant
2186 * to be installed when we do machine_task_set_state() or
2187 * machine_thread_set_state()).
2188 */
2189 void
2190 copy_debug_state32(
2191 x86_debug_state32_t *src,
2192 x86_debug_state32_t *target,
2193 boolean_t all)
2194 {
2195 if (all) {
2196 target->dr4 = src->dr4;
2197 target->dr5 = src->dr5;
2198 }
2199
2200 target->dr0 = src->dr0;
2201 target->dr1 = src->dr1;
2202 target->dr2 = src->dr2;
2203 target->dr3 = src->dr3;
2204 target->dr6 = src->dr6;
2205 target->dr7 = src->dr7;
2206 }
2207
2208 /*
2209 * Duplicate one x86_debug_state64_t to another. "all" parameter
2210 * chooses whether dr4 and dr5 are copied (they are never meant
2211 * to be installed when we do machine_task_set_state() or
2212 * machine_thread_set_state()).
2213 */
2214 void
2215 copy_debug_state64(
2216 x86_debug_state64_t *src,
2217 x86_debug_state64_t *target,
2218 boolean_t all)
2219 {
2220 if (all) {
2221 target->dr4 = src->dr4;
2222 target->dr5 = src->dr5;
2223 }
2224
2225 target->dr0 = src->dr0;
2226 target->dr1 = src->dr1;
2227 target->dr2 = src->dr2;
2228 target->dr3 = src->dr3;
2229 target->dr6 = src->dr6;
2230 target->dr7 = src->dr7;
2231 }