]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/i386/pcb.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / i386 / pcb.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57#include <mach_debug.h>
58#include <mach_ldebug.h>
59
60#include <sys/kdebug.h>
61
62#include <mach/kern_return.h>
63#include <mach/thread_status.h>
64#include <mach/vm_param.h>
65
66#include <kern/kalloc.h>
67#include <kern/mach_param.h>
68#include <kern/processor.h>
69#include <kern/cpu_data.h>
70#include <kern/cpu_number.h>
71#include <kern/task.h>
72#include <kern/thread.h>
73#include <kern/sched_prim.h>
74#include <kern/misc_protos.h>
75#include <kern/assert.h>
76#include <kern/spl.h>
77#include <kern/machine.h>
78#include <kern/kpc.h>
79#include <ipc/ipc_port.h>
80#include <vm/vm_kern.h>
81#include <vm/vm_map.h>
82#include <vm/pmap.h>
83#include <vm/vm_protos.h>
84
85#include <i386/cpu_data.h>
86#include <i386/cpu_number.h>
87#include <i386/eflags.h>
88#include <i386/proc_reg.h>
89#include <i386/fpu.h>
90#include <i386/misc_protos.h>
91#include <i386/mp_desc.h>
92#include <i386/thread.h>
93#include <i386/machine_routines.h>
94#include <i386/lapic.h> /* LAPIC_PMC_SWI_VECTOR */
95#include <i386/seg.h>
96
97#if HYPERVISOR
98#include <kern/hv_support.h>
99#endif
100
101/*
102 * Maps state flavor to number of words in the state:
103 */
104unsigned int _MachineStateCount[] = {
105 [x86_THREAD_STATE32] = x86_THREAD_STATE32_COUNT,
106 [x86_THREAD_STATE64] = x86_THREAD_STATE64_COUNT,
107 [x86_THREAD_FULL_STATE64] = x86_THREAD_FULL_STATE64_COUNT,
108 [x86_THREAD_STATE] = x86_THREAD_STATE_COUNT,
109 [x86_FLOAT_STATE32] = x86_FLOAT_STATE32_COUNT,
110 [x86_FLOAT_STATE64] = x86_FLOAT_STATE64_COUNT,
111 [x86_FLOAT_STATE] = x86_FLOAT_STATE_COUNT,
112 [x86_EXCEPTION_STATE32] = x86_EXCEPTION_STATE32_COUNT,
113 [x86_EXCEPTION_STATE64] = x86_EXCEPTION_STATE64_COUNT,
114 [x86_EXCEPTION_STATE] = x86_EXCEPTION_STATE_COUNT,
115 [x86_DEBUG_STATE32] = x86_DEBUG_STATE32_COUNT,
116 [x86_DEBUG_STATE64] = x86_DEBUG_STATE64_COUNT,
117 [x86_DEBUG_STATE] = x86_DEBUG_STATE_COUNT,
118 [x86_AVX_STATE32] = x86_AVX_STATE32_COUNT,
119 [x86_AVX_STATE64] = x86_AVX_STATE64_COUNT,
120 [x86_AVX_STATE] = x86_AVX_STATE_COUNT,
121 [x86_AVX512_STATE32] = x86_AVX512_STATE32_COUNT,
122 [x86_AVX512_STATE64] = x86_AVX512_STATE64_COUNT,
123 [x86_AVX512_STATE] = x86_AVX512_STATE_COUNT,
124 [x86_PAGEIN_STATE] = x86_PAGEIN_STATE_COUNT
125};
126
127ZONE_DECLARE(iss_zone, "x86_64 saved state",
128 sizeof(x86_saved_state_t), ZC_NONE);
129
130ZONE_DECLARE(ids_zone, "x86_64 debug state",
131 sizeof(x86_debug_state64_t), ZC_NONE);
132
133/* Forward */
134
135extern void Thread_continue(void);
136extern void Load_context(
137 thread_t thread) __attribute__((noreturn));
138
139static void
140get_exception_state32(thread_t thread, x86_exception_state32_t *es);
141
142static void
143get_exception_state64(thread_t thread, x86_exception_state64_t *es);
144
145static void
146get_thread_state32(thread_t thread, x86_thread_state32_t *ts);
147
148static void
149get_thread_state64(thread_t thread, void *ts, boolean_t full);
150
151static int
152set_thread_state32(thread_t thread, x86_thread_state32_t *ts);
153
154static int
155set_thread_state64(thread_t thread, void *ts, boolean_t full);
156
157#if HYPERVISOR
158static inline void
159ml_hv_cswitch(thread_t old, thread_t new)
160{
161 if (old->hv_thread_target) {
162 hv_callbacks.preempt(old->hv_thread_target);
163 }
164
165 if (new->hv_thread_target) {
166 hv_callbacks.dispatch(new->hv_thread_target);
167 }
168}
169#endif
170
171/*
172 * Don't let an illegal value for the lower 32-bits of dr7 get set.
173 * Specifically, check for undefined settings. Setting these bit patterns
174 * result in undefined behaviour and can lead to an unexpected
175 * TRCTRAP.
176 */
177static boolean_t
178dr7d_is_valid(uint32_t *dr7d)
179{
180 int i;
181 uint32_t mask1, mask2;
182
183 /*
184 * If the DE bit is set in CR4, R/W0-3 can be pattern
185 * "10B" to indicate i/o reads and write
186 */
187 if (!(get_cr4() & CR4_DE)) {
188 for (i = 0, mask1 = 0x3 << 16, mask2 = 0x2 << 16; i < 4;
189 i++, mask1 <<= 4, mask2 <<= 4) {
190 if ((*dr7d & mask1) == mask2) {
191 return FALSE;
192 }
193 }
194 }
195
196 /*
197 * if we are doing an instruction execution break (indicated
198 * by r/w[x] being "00B"), then the len[x] must also be set
199 * to "00B"
200 */
201 for (i = 0; i < 4; i++) {
202 if (((((*dr7d >> (16 + i * 4))) & 0x3) == 0) &&
203 ((((*dr7d >> (18 + i * 4))) & 0x3) != 0)) {
204 return FALSE;
205 }
206 }
207
208 /*
209 * Intel docs have these bits fixed.
210 */
211 *dr7d |= 0x1 << 10; /* set bit 10 to 1 */
212 *dr7d &= ~(0x1 << 11); /* set bit 11 to 0 */
213 *dr7d &= ~(0x1 << 12); /* set bit 12 to 0 */
214 *dr7d &= ~(0x1 << 14); /* set bit 14 to 0 */
215 *dr7d &= ~(0x1 << 15); /* set bit 15 to 0 */
216
217 /*
218 * We don't allow anything to set the global breakpoints.
219 */
220
221 if (*dr7d & 0x2) {
222 return FALSE;
223 }
224
225 if (*dr7d & (0x2 << 2)) {
226 return FALSE;
227 }
228
229 if (*dr7d & (0x2 << 4)) {
230 return FALSE;
231 }
232
233 if (*dr7d & (0x2 << 6)) {
234 return FALSE;
235 }
236
237 return TRUE;
238}
239
240extern void set_64bit_debug_regs(x86_debug_state64_t *ds);
241
242boolean_t
243debug_state_is_valid32(x86_debug_state32_t *ds)
244{
245 if (!dr7d_is_valid(&ds->dr7)) {
246 return FALSE;
247 }
248
249 return TRUE;
250}
251
252boolean_t
253debug_state_is_valid64(x86_debug_state64_t *ds)
254{
255 if (!dr7d_is_valid((uint32_t *)&ds->dr7)) {
256 return FALSE;
257 }
258
259 /*
260 * Don't allow the user to set debug addresses above their max
261 * value
262 */
263 if (ds->dr7 & 0x1) {
264 if (ds->dr0 >= VM_MAX_PAGE_ADDRESS) {
265 return FALSE;
266 }
267 }
268
269 if (ds->dr7 & (0x1 << 2)) {
270 if (ds->dr1 >= VM_MAX_PAGE_ADDRESS) {
271 return FALSE;
272 }
273 }
274
275 if (ds->dr7 & (0x1 << 4)) {
276 if (ds->dr2 >= VM_MAX_PAGE_ADDRESS) {
277 return FALSE;
278 }
279 }
280
281 if (ds->dr7 & (0x1 << 6)) {
282 if (ds->dr3 >= VM_MAX_PAGE_ADDRESS) {
283 return FALSE;
284 }
285 }
286
287 /* For x86-64, we must ensure the upper 32-bits of DR7 are clear */
288 ds->dr7 &= 0xffffffffULL;
289
290 return TRUE;
291}
292
293
294static kern_return_t
295set_debug_state32(thread_t thread, x86_debug_state32_t *ds)
296{
297 x86_debug_state32_t *new_ids;
298 pcb_t pcb;
299
300 pcb = THREAD_TO_PCB(thread);
301
302 if (debug_state_is_valid32(ds) != TRUE) {
303 return KERN_INVALID_ARGUMENT;
304 }
305
306 if (pcb->ids == NULL) {
307 new_ids = zalloc(ids_zone);
308 bzero(new_ids, sizeof *new_ids);
309
310 simple_lock(&pcb->lock, LCK_GRP_NULL);
311 /* make sure it wasn't already alloc()'d elsewhere */
312 if (pcb->ids == NULL) {
313 pcb->ids = new_ids;
314 simple_unlock(&pcb->lock);
315 } else {
316 simple_unlock(&pcb->lock);
317 zfree(ids_zone, new_ids);
318 }
319 }
320
321
322 copy_debug_state32(ds, pcb->ids, FALSE);
323
324 return KERN_SUCCESS;
325}
326
327static kern_return_t
328set_debug_state64(thread_t thread, x86_debug_state64_t *ds)
329{
330 x86_debug_state64_t *new_ids;
331 pcb_t pcb;
332
333 pcb = THREAD_TO_PCB(thread);
334
335 if (debug_state_is_valid64(ds) != TRUE) {
336 return KERN_INVALID_ARGUMENT;
337 }
338
339 if (pcb->ids == NULL) {
340 new_ids = zalloc(ids_zone);
341 bzero(new_ids, sizeof *new_ids);
342
343#if HYPERVISOR
344 if (thread->hv_thread_target) {
345 hv_callbacks.volatile_state(thread->hv_thread_target,
346 HV_DEBUG_STATE);
347 }
348#endif
349
350 simple_lock(&pcb->lock, LCK_GRP_NULL);
351 /* make sure it wasn't already alloc()'d elsewhere */
352 if (pcb->ids == NULL) {
353 pcb->ids = new_ids;
354 simple_unlock(&pcb->lock);
355 } else {
356 simple_unlock(&pcb->lock);
357 zfree(ids_zone, new_ids);
358 }
359 }
360
361 copy_debug_state64(ds, pcb->ids, FALSE);
362
363 return KERN_SUCCESS;
364}
365
366static void
367get_debug_state32(thread_t thread, x86_debug_state32_t *ds)
368{
369 x86_debug_state32_t *saved_state;
370
371 saved_state = thread->machine.ids;
372
373 if (saved_state) {
374 copy_debug_state32(saved_state, ds, TRUE);
375 } else {
376 bzero(ds, sizeof *ds);
377 }
378}
379
380static void
381get_debug_state64(thread_t thread, x86_debug_state64_t *ds)
382{
383 x86_debug_state64_t *saved_state;
384
385 saved_state = (x86_debug_state64_t *)thread->machine.ids;
386
387 if (saved_state) {
388 copy_debug_state64(saved_state, ds, TRUE);
389 } else {
390 bzero(ds, sizeof *ds);
391 }
392}
393
394/*
395 * consider_machine_collect:
396 *
397 * Try to collect machine-dependent pages
398 */
399void
400consider_machine_collect(void)
401{
402}
403
404void
405consider_machine_adjust(void)
406{
407}
408
409/*
410 * Switch to the first thread on a CPU.
411 */
412void
413machine_load_context(
414 thread_t new)
415{
416 new->machine.specFlags |= OnProc;
417 act_machine_switch_pcb(NULL, new);
418 Load_context(new);
419}
420
421static inline void
422pmap_switch_context(thread_t ot, thread_t nt, int cnum)
423{
424 pmap_assert(ml_get_interrupts_enabled() == FALSE);
425 vm_map_t nmap = nt->map, omap = ot->map;
426 if ((omap != nmap) || (nmap->pmap->pagezero_accessible)) {
427 PMAP_DEACTIVATE_MAP(omap, ot, cnum);
428 PMAP_ACTIVATE_MAP(nmap, nt, cnum);
429 }
430}
431
432/*
433 * Switch to a new thread.
434 * Save the old thread`s kernel state or continuation,
435 * and return it.
436 */
437thread_t
438machine_switch_context(
439 thread_t old,
440 thread_continue_t continuation,
441 thread_t new)
442{
443 assert(current_cpu_datap()->cpu_active_stack == old->kernel_stack);
444
445#if KPC
446 kpc_off_cpu(old);
447#endif /* KPC */
448
449 /*
450 * Save FP registers if in use.
451 */
452 fpu_switch_context(old, new);
453
454 old->machine.specFlags &= ~OnProc;
455 new->machine.specFlags |= OnProc;
456
457 /*
458 * Monitor the stack depth and report new max,
459 * not worrying about races.
460 */
461 vm_offset_t depth = current_stack_depth();
462 if (depth > kernel_stack_depth_max) {
463 kernel_stack_depth_max = depth;
464 KERNEL_DEBUG_CONSTANT(
465 MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DEPTH),
466 (long) depth, 0, 0, 0, 0);
467 }
468
469 /*
470 * Switch address maps if need be, even if not switching tasks.
471 * (A server activation may be "borrowing" a client map.)
472 */
473 pmap_switch_context(old, new, cpu_number());
474
475 /*
476 * Load the rest of the user state for the new thread
477 */
478 act_machine_switch_pcb(old, new);
479
480#if HYPERVISOR
481 ml_hv_cswitch(old, new);
482#endif
483
484 return Switch_context(old, continuation, new);
485}
486
487boolean_t
488machine_thread_on_core(thread_t thread)
489{
490 return thread->machine.specFlags & OnProc;
491}
492
493thread_t
494machine_processor_shutdown(
495 thread_t thread,
496 void (*doshutdown)(processor_t),
497 processor_t processor)
498{
499#if CONFIG_VMX
500 vmx_suspend();
501#endif
502 fpu_switch_context(thread, NULL);
503 pmap_switch_context(thread, processor->idle_thread, cpu_number());
504 return Shutdown_context(thread, doshutdown, processor);
505}
506
507
508/*
509 * This is where registers that are not normally specified by the mach-o
510 * file on an execve would be nullified, perhaps to avoid a covert channel.
511 */
512kern_return_t
513machine_thread_state_initialize(
514 thread_t thread)
515{
516 /*
517 * If there's an fpu save area, free it.
518 * The initialized state will then be lazily faulted-in, if required.
519 * And if we're target, re-arm the no-fpu trap.
520 */
521 if (thread->machine.ifps) {
522 (void) fpu_set_fxstate(thread, NULL, x86_FLOAT_STATE64);
523
524 if (thread == current_thread()) {
525 clear_fpu();
526 }
527 }
528
529 if (thread->machine.ids) {
530 zfree(ids_zone, thread->machine.ids);
531 thread->machine.ids = NULL;
532 }
533
534 return KERN_SUCCESS;
535}
536
537uint32_t
538get_eflags_exportmask(void)
539{
540 return EFL_USER_SET;
541}
542
543/*
544 * x86_SAVED_STATE32 - internal save/restore general register state on 32/64 bit processors
545 * for 32bit tasks only
546 * x86_SAVED_STATE64 - internal save/restore general register state on 64 bit processors
547 * for 64bit tasks only
548 * x86_THREAD_STATE32 - external set/get general register state on 32/64 bit processors
549 * for 32bit tasks only
550 * x86_THREAD_STATE64 - external set/get general register state on 64 bit processors
551 * for 64bit tasks only
552 * x86_SAVED_STATE - external set/get general register state on 32/64 bit processors
553 * for either 32bit or 64bit tasks
554 * x86_FLOAT_STATE32 - internal/external save/restore float and xmm state on 32/64 bit processors
555 * for 32bit tasks only
556 * x86_FLOAT_STATE64 - internal/external save/restore float and xmm state on 64 bit processors
557 * for 64bit tasks only
558 * x86_FLOAT_STATE - external save/restore float and xmm state on 32/64 bit processors
559 * for either 32bit or 64bit tasks
560 * x86_EXCEPTION_STATE32 - external get exception state on 32/64 bit processors
561 * for 32bit tasks only
562 * x86_EXCEPTION_STATE64 - external get exception state on 64 bit processors
563 * for 64bit tasks only
564 * x86_EXCEPTION_STATE - external get exception state on 323/64 bit processors
565 * for either 32bit or 64bit tasks
566 */
567
568
569static void
570get_exception_state64(thread_t thread, x86_exception_state64_t *es)
571{
572 x86_saved_state64_t *saved_state;
573
574 saved_state = USER_REGS64(thread);
575
576 es->trapno = saved_state->isf.trapno;
577 es->cpu = saved_state->isf.cpu;
578 es->err = (typeof(es->err))saved_state->isf.err;
579 es->faultvaddr = saved_state->cr2;
580}
581
582static void
583get_exception_state32(thread_t thread, x86_exception_state32_t *es)
584{
585 x86_saved_state32_t *saved_state;
586
587 saved_state = USER_REGS32(thread);
588
589 es->trapno = saved_state->trapno;
590 es->cpu = saved_state->cpu;
591 es->err = saved_state->err;
592 es->faultvaddr = saved_state->cr2;
593}
594
595
596static int
597set_thread_state32(thread_t thread, x86_thread_state32_t *ts)
598{
599 x86_saved_state32_t *saved_state;
600
601 pal_register_cache_state(thread, DIRTY);
602
603 saved_state = USER_REGS32(thread);
604
605 /*
606 * Scrub segment selector values:
607 */
608 ts->cs = USER_CS;
609 /*
610 * On a 64 bit kernel, we always override the data segments,
611 * as the actual selector numbers have changed. This also
612 * means that we don't support setting the data segments
613 * manually any more.
614 */
615 ts->ss = USER_DS;
616 ts->ds = USER_DS;
617 ts->es = USER_DS;
618
619 /* Set GS to CTHREAD only if's been established */
620 ts->gs = thread->machine.cthread_self ? USER_CTHREAD : NULL_SEG;
621
622 /* Check segment selectors are safe */
623 if (!valid_user_segment_selectors(ts->cs,
624 ts->ss,
625 ts->ds,
626 ts->es,
627 ts->fs,
628 ts->gs)) {
629 return KERN_INVALID_ARGUMENT;
630 }
631
632 saved_state->eax = ts->eax;
633 saved_state->ebx = ts->ebx;
634 saved_state->ecx = ts->ecx;
635 saved_state->edx = ts->edx;
636 saved_state->edi = ts->edi;
637 saved_state->esi = ts->esi;
638 saved_state->ebp = ts->ebp;
639 saved_state->uesp = ts->esp;
640 saved_state->efl = (ts->eflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
641 saved_state->eip = ts->eip;
642 saved_state->cs = ts->cs;
643 saved_state->ss = ts->ss;
644 saved_state->ds = ts->ds;
645 saved_state->es = ts->es;
646 saved_state->fs = ts->fs;
647 saved_state->gs = ts->gs;
648
649 /*
650 * If the trace trap bit is being set,
651 * ensure that the user returns via iret
652 * - which is signaled thusly:
653 */
654 if ((saved_state->efl & EFL_TF) && saved_state->cs == SYSENTER_CS) {
655 saved_state->cs = SYSENTER_TF_CS;
656 }
657
658 return KERN_SUCCESS;
659}
660
661static int
662set_thread_state64(thread_t thread, void *state, int full)
663{
664 x86_thread_state64_t *ts;
665 x86_saved_state64_t *saved_state;
666
667 if (full == TRUE) {
668 ts = &((x86_thread_full_state64_t *)state)->ss64;
669 if (!valid_user_code_selector(((x86_thread_full_state64_t *)ts)->ss64.cs)) {
670 return KERN_INVALID_ARGUMENT;
671 }
672 } else {
673 ts = (x86_thread_state64_t *)state;
674 // In this case, ts->cs exists but is ignored, and
675 // CS is always set to USER_CS below instead.
676 }
677
678 pal_register_cache_state(thread, DIRTY);
679
680 saved_state = USER_REGS64(thread);
681
682 if (!IS_USERADDR64_CANONICAL(ts->rsp) ||
683 !IS_USERADDR64_CANONICAL(ts->rip)) {
684 return KERN_INVALID_ARGUMENT;
685 }
686
687 saved_state->r8 = ts->r8;
688 saved_state->r9 = ts->r9;
689 saved_state->r10 = ts->r10;
690 saved_state->r11 = ts->r11;
691 saved_state->r12 = ts->r12;
692 saved_state->r13 = ts->r13;
693 saved_state->r14 = ts->r14;
694 saved_state->r15 = ts->r15;
695 saved_state->rax = ts->rax;
696 saved_state->rbx = ts->rbx;
697 saved_state->rcx = ts->rcx;
698 saved_state->rdx = ts->rdx;
699 saved_state->rdi = ts->rdi;
700 saved_state->rsi = ts->rsi;
701 saved_state->rbp = ts->rbp;
702 saved_state->isf.rsp = ts->rsp;
703 saved_state->isf.rflags = (ts->rflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
704 saved_state->isf.rip = ts->rip;
705
706 if (full == FALSE) {
707 saved_state->isf.cs = USER64_CS;
708 } else {
709 saved_state->isf.cs = ((x86_thread_full_state64_t *)ts)->ss64.cs;
710 saved_state->isf.ss = ((x86_thread_full_state64_t *)ts)->ss;
711 saved_state->ds = (uint32_t)((x86_thread_full_state64_t *)ts)->ds;
712 saved_state->es = (uint32_t)((x86_thread_full_state64_t *)ts)->es;
713 machine_thread_set_tsd_base(thread,
714 ((x86_thread_full_state64_t *)ts)->gsbase);
715 }
716
717 saved_state->fs = (uint32_t)ts->fs;
718 saved_state->gs = (uint32_t)ts->gs;
719
720 return KERN_SUCCESS;
721}
722
723
724
725static void
726get_thread_state32(thread_t thread, x86_thread_state32_t *ts)
727{
728 x86_saved_state32_t *saved_state;
729
730 pal_register_cache_state(thread, VALID);
731
732 saved_state = USER_REGS32(thread);
733
734 ts->eax = saved_state->eax;
735 ts->ebx = saved_state->ebx;
736 ts->ecx = saved_state->ecx;
737 ts->edx = saved_state->edx;
738 ts->edi = saved_state->edi;
739 ts->esi = saved_state->esi;
740 ts->ebp = saved_state->ebp;
741 ts->esp = saved_state->uesp;
742 ts->eflags = saved_state->efl;
743 ts->eip = saved_state->eip;
744 ts->cs = saved_state->cs;
745 ts->ss = saved_state->ss;
746 ts->ds = saved_state->ds;
747 ts->es = saved_state->es;
748 ts->fs = saved_state->fs;
749 ts->gs = saved_state->gs;
750}
751
752
753static void
754get_thread_state64(thread_t thread, void *state, boolean_t full)
755{
756 x86_thread_state64_t *ts;
757 x86_saved_state64_t *saved_state;
758
759 if (full == TRUE) {
760 ts = &((x86_thread_full_state64_t *)state)->ss64;
761 } else {
762 ts = (x86_thread_state64_t *)state;
763 }
764
765 pal_register_cache_state(thread, VALID);
766
767 saved_state = USER_REGS64(thread);
768
769 ts->r8 = saved_state->r8;
770 ts->r9 = saved_state->r9;
771 ts->r10 = saved_state->r10;
772 ts->r11 = saved_state->r11;
773 ts->r12 = saved_state->r12;
774 ts->r13 = saved_state->r13;
775 ts->r14 = saved_state->r14;
776 ts->r15 = saved_state->r15;
777 ts->rax = saved_state->rax;
778 ts->rbx = saved_state->rbx;
779 ts->rcx = saved_state->rcx;
780 ts->rdx = saved_state->rdx;
781 ts->rdi = saved_state->rdi;
782 ts->rsi = saved_state->rsi;
783 ts->rbp = saved_state->rbp;
784 ts->rsp = saved_state->isf.rsp;
785 ts->rflags = saved_state->isf.rflags;
786 ts->rip = saved_state->isf.rip;
787 ts->cs = saved_state->isf.cs;
788
789 if (full == TRUE) {
790 ((x86_thread_full_state64_t *)state)->ds = saved_state->ds;
791 ((x86_thread_full_state64_t *)state)->es = saved_state->es;
792 ((x86_thread_full_state64_t *)state)->ss = saved_state->isf.ss;
793 ((x86_thread_full_state64_t *)state)->gsbase =
794 thread->machine.cthread_self;
795 }
796
797 ts->fs = saved_state->fs;
798 ts->gs = saved_state->gs;
799}
800
801kern_return_t
802machine_thread_state_convert_to_user(
803 __unused thread_t thread,
804 __unused thread_flavor_t flavor,
805 __unused thread_state_t tstate,
806 __unused mach_msg_type_number_t *count)
807{
808 // No conversion to userspace representation on this platform
809 return KERN_SUCCESS;
810}
811
812kern_return_t
813machine_thread_state_convert_from_user(
814 __unused thread_t thread,
815 __unused thread_flavor_t flavor,
816 __unused thread_state_t tstate,
817 __unused mach_msg_type_number_t count)
818{
819 // No conversion from userspace representation on this platform
820 return KERN_SUCCESS;
821}
822
823kern_return_t
824machine_thread_siguctx_pointer_convert_to_user(
825 __unused thread_t thread,
826 __unused user_addr_t *uctxp)
827{
828 // No conversion to userspace representation on this platform
829 return KERN_SUCCESS;
830}
831
832kern_return_t
833machine_thread_function_pointers_convert_from_user(
834 __unused thread_t thread,
835 __unused user_addr_t *fptrs,
836 __unused uint32_t count)
837{
838 // No conversion from userspace representation on this platform
839 return KERN_SUCCESS;
840}
841
842/*
843 * act_machine_set_state:
844 *
845 * Set the status of the specified thread.
846 */
847
848kern_return_t
849machine_thread_set_state(
850 thread_t thr_act,
851 thread_flavor_t flavor,
852 thread_state_t tstate,
853 mach_msg_type_number_t count)
854{
855 switch (flavor) {
856 case x86_SAVED_STATE32:
857 {
858 x86_saved_state32_t *state;
859 x86_saved_state32_t *saved_state;
860
861 if (count < x86_SAVED_STATE32_COUNT) {
862 return KERN_INVALID_ARGUMENT;
863 }
864
865 state = (x86_saved_state32_t *) tstate;
866
867 /*
868 * Refuse to allow 64-bit processes to set
869 * 32-bit state.
870 */
871 if (thread_is_64bit_addr(thr_act)) {
872 return KERN_INVALID_ARGUMENT;
873 }
874
875 /* Check segment selectors are safe */
876 if (!valid_user_segment_selectors(state->cs,
877 state->ss,
878 state->ds,
879 state->es,
880 state->fs,
881 state->gs)) {
882 return KERN_INVALID_ARGUMENT;
883 }
884
885 pal_register_cache_state(thr_act, DIRTY);
886
887 saved_state = USER_REGS32(thr_act);
888
889 /*
890 * General registers
891 */
892 saved_state->edi = state->edi;
893 saved_state->esi = state->esi;
894 saved_state->ebp = state->ebp;
895 saved_state->uesp = state->uesp;
896 saved_state->ebx = state->ebx;
897 saved_state->edx = state->edx;
898 saved_state->ecx = state->ecx;
899 saved_state->eax = state->eax;
900 saved_state->eip = state->eip;
901
902 saved_state->efl = (state->efl & ~EFL_USER_CLEAR) | EFL_USER_SET;
903
904 /*
905 * If the trace trap bit is being set,
906 * ensure that the user returns via iret
907 * - which is signaled thusly:
908 */
909 if ((saved_state->efl & EFL_TF) && state->cs == SYSENTER_CS) {
910 state->cs = SYSENTER_TF_CS;
911 }
912
913 /*
914 * User setting segment registers.
915 * Code and stack selectors have already been
916 * checked. Others will be reset by 'iret'
917 * if they are not valid.
918 */
919 saved_state->cs = state->cs;
920 saved_state->ss = state->ss;
921 saved_state->ds = state->ds;
922 saved_state->es = state->es;
923 saved_state->fs = state->fs;
924 saved_state->gs = state->gs;
925
926 break;
927 }
928
929 case x86_SAVED_STATE64:
930 {
931 x86_saved_state64_t *state;
932 x86_saved_state64_t *saved_state;
933
934 if (count < x86_SAVED_STATE64_COUNT) {
935 return KERN_INVALID_ARGUMENT;
936 }
937
938 if (!thread_is_64bit_addr(thr_act)) {
939 return KERN_INVALID_ARGUMENT;
940 }
941
942 state = (x86_saved_state64_t *) tstate;
943
944 /* Verify that the supplied code segment selector is
945 * valid. In 64-bit mode, the FS and GS segment overrides
946 * use the FS.base and GS.base MSRs to calculate
947 * base addresses, and the trampolines don't directly
948 * restore the segment registers--hence they are no
949 * longer relevant for validation.
950 */
951 if (!valid_user_code_selector(state->isf.cs)) {
952 return KERN_INVALID_ARGUMENT;
953 }
954
955 /* Check pc and stack are canonical addresses */
956 if (!IS_USERADDR64_CANONICAL(state->isf.rsp) ||
957 !IS_USERADDR64_CANONICAL(state->isf.rip)) {
958 return KERN_INVALID_ARGUMENT;
959 }
960
961 pal_register_cache_state(thr_act, DIRTY);
962
963 saved_state = USER_REGS64(thr_act);
964
965 /*
966 * General registers
967 */
968 saved_state->r8 = state->r8;
969 saved_state->r9 = state->r9;
970 saved_state->r10 = state->r10;
971 saved_state->r11 = state->r11;
972 saved_state->r12 = state->r12;
973 saved_state->r13 = state->r13;
974 saved_state->r14 = state->r14;
975 saved_state->r15 = state->r15;
976 saved_state->rdi = state->rdi;
977 saved_state->rsi = state->rsi;
978 saved_state->rbp = state->rbp;
979 saved_state->rbx = state->rbx;
980 saved_state->rdx = state->rdx;
981 saved_state->rcx = state->rcx;
982 saved_state->rax = state->rax;
983 saved_state->isf.rsp = state->isf.rsp;
984 saved_state->isf.rip = state->isf.rip;
985
986 saved_state->isf.rflags = (state->isf.rflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
987
988 /*
989 * User setting segment registers.
990 * Code and stack selectors have already been
991 * checked. Others will be reset by 'sys'
992 * if they are not valid.
993 */
994 saved_state->isf.cs = state->isf.cs;
995 saved_state->isf.ss = state->isf.ss;
996 saved_state->fs = state->fs;
997 saved_state->gs = state->gs;
998
999 break;
1000 }
1001
1002 case x86_FLOAT_STATE32:
1003 case x86_AVX_STATE32:
1004 case x86_AVX512_STATE32:
1005 {
1006 if (count != _MachineStateCount[flavor]) {
1007 return KERN_INVALID_ARGUMENT;
1008 }
1009
1010 if (thread_is_64bit_addr(thr_act)) {
1011 return KERN_INVALID_ARGUMENT;
1012 }
1013
1014 return fpu_set_fxstate(thr_act, tstate, flavor);
1015 }
1016
1017 case x86_FLOAT_STATE64:
1018 case x86_AVX_STATE64:
1019 case x86_AVX512_STATE64:
1020 {
1021 if (count != _MachineStateCount[flavor]) {
1022 return KERN_INVALID_ARGUMENT;
1023 }
1024
1025 if (!thread_is_64bit_addr(thr_act)) {
1026 return KERN_INVALID_ARGUMENT;
1027 }
1028
1029 return fpu_set_fxstate(thr_act, tstate, flavor);
1030 }
1031
1032 case x86_FLOAT_STATE:
1033 {
1034 x86_float_state_t *state;
1035
1036 if (count != x86_FLOAT_STATE_COUNT) {
1037 return KERN_INVALID_ARGUMENT;
1038 }
1039
1040 state = (x86_float_state_t *)tstate;
1041 if (state->fsh.flavor == x86_FLOAT_STATE64 && state->fsh.count == x86_FLOAT_STATE64_COUNT &&
1042 thread_is_64bit_addr(thr_act)) {
1043 return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs64, x86_FLOAT_STATE64);
1044 }
1045 if (state->fsh.flavor == x86_FLOAT_STATE32 && state->fsh.count == x86_FLOAT_STATE32_COUNT &&
1046 !thread_is_64bit_addr(thr_act)) {
1047 return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs32, x86_FLOAT_STATE32);
1048 }
1049 return KERN_INVALID_ARGUMENT;
1050 }
1051
1052 case x86_AVX_STATE:
1053 case x86_AVX512_STATE:
1054 {
1055 x86_avx_state_t *state;
1056
1057 if (count != _MachineStateCount[flavor]) {
1058 return KERN_INVALID_ARGUMENT;
1059 }
1060
1061 state = (x86_avx_state_t *)tstate;
1062 /* Flavors are defined to have sequential values: 32-bit, 64-bit, non-specific */
1063 /* 64-bit flavor? */
1064 if (state->ash.flavor == (flavor - 1) &&
1065 state->ash.count == _MachineStateCount[flavor - 1] &&
1066 thread_is_64bit_addr(thr_act)) {
1067 return fpu_set_fxstate(thr_act,
1068 (thread_state_t)&state->ufs.as64,
1069 flavor - 1);
1070 }
1071 /* 32-bit flavor? */
1072 if (state->ash.flavor == (flavor - 2) &&
1073 state->ash.count == _MachineStateCount[flavor - 2] &&
1074 !thread_is_64bit_addr(thr_act)) {
1075 return fpu_set_fxstate(thr_act,
1076 (thread_state_t)&state->ufs.as32,
1077 flavor - 2);
1078 }
1079 return KERN_INVALID_ARGUMENT;
1080 }
1081
1082 case x86_THREAD_STATE32:
1083 {
1084 if (count != x86_THREAD_STATE32_COUNT) {
1085 return KERN_INVALID_ARGUMENT;
1086 }
1087
1088 if (thread_is_64bit_addr(thr_act)) {
1089 return KERN_INVALID_ARGUMENT;
1090 }
1091
1092 return set_thread_state32(thr_act, (x86_thread_state32_t *)tstate);
1093 }
1094
1095 case x86_THREAD_STATE64:
1096 {
1097 if (count != x86_THREAD_STATE64_COUNT) {
1098 return KERN_INVALID_ARGUMENT;
1099 }
1100
1101 if (!thread_is_64bit_addr(thr_act)) {
1102 return KERN_INVALID_ARGUMENT;
1103 }
1104
1105 return set_thread_state64(thr_act, tstate, FALSE);
1106 }
1107
1108 case x86_THREAD_FULL_STATE64:
1109 {
1110 if (count != x86_THREAD_FULL_STATE64_COUNT) {
1111 return KERN_INVALID_ARGUMENT;
1112 }
1113
1114 if (!thread_is_64bit_addr(thr_act)) {
1115 return KERN_INVALID_ARGUMENT;
1116 }
1117
1118 /* If this process does not have a custom LDT, return failure */
1119 if (thr_act->task->i386_ldt == 0) {
1120 return KERN_INVALID_ARGUMENT;
1121 }
1122
1123 return set_thread_state64(thr_act, tstate, TRUE);
1124 }
1125
1126 case x86_THREAD_STATE:
1127 {
1128 x86_thread_state_t *state;
1129
1130 if (count != x86_THREAD_STATE_COUNT) {
1131 return KERN_INVALID_ARGUMENT;
1132 }
1133
1134 state = (x86_thread_state_t *)tstate;
1135
1136 if (state->tsh.flavor == x86_THREAD_STATE64 &&
1137 state->tsh.count == x86_THREAD_STATE64_COUNT &&
1138 thread_is_64bit_addr(thr_act)) {
1139 return set_thread_state64(thr_act, &state->uts.ts64, FALSE);
1140 } else if (state->tsh.flavor == x86_THREAD_FULL_STATE64 &&
1141 state->tsh.count == x86_THREAD_FULL_STATE64_COUNT &&
1142 thread_is_64bit_addr(thr_act) && thr_act->task->i386_ldt != 0) {
1143 return set_thread_state64(thr_act, &state->uts.ts64, TRUE);
1144 } else if (state->tsh.flavor == x86_THREAD_STATE32 &&
1145 state->tsh.count == x86_THREAD_STATE32_COUNT &&
1146 !thread_is_64bit_addr(thr_act)) {
1147 return set_thread_state32(thr_act, &state->uts.ts32);
1148 } else {
1149 return KERN_INVALID_ARGUMENT;
1150 }
1151 }
1152 case x86_DEBUG_STATE32:
1153 {
1154 x86_debug_state32_t *state;
1155 kern_return_t ret;
1156
1157 if (thread_is_64bit_addr(thr_act)) {
1158 return KERN_INVALID_ARGUMENT;
1159 }
1160
1161 state = (x86_debug_state32_t *)tstate;
1162
1163 ret = set_debug_state32(thr_act, state);
1164
1165 return ret;
1166 }
1167 case x86_DEBUG_STATE64:
1168 {
1169 x86_debug_state64_t *state;
1170 kern_return_t ret;
1171
1172 if (!thread_is_64bit_addr(thr_act)) {
1173 return KERN_INVALID_ARGUMENT;
1174 }
1175
1176 state = (x86_debug_state64_t *)tstate;
1177
1178 ret = set_debug_state64(thr_act, state);
1179
1180 return ret;
1181 }
1182 case x86_DEBUG_STATE:
1183 {
1184 x86_debug_state_t *state;
1185 kern_return_t ret = KERN_INVALID_ARGUMENT;
1186
1187 if (count != x86_DEBUG_STATE_COUNT) {
1188 return KERN_INVALID_ARGUMENT;
1189 }
1190
1191 state = (x86_debug_state_t *)tstate;
1192 if (state->dsh.flavor == x86_DEBUG_STATE64 &&
1193 state->dsh.count == x86_DEBUG_STATE64_COUNT &&
1194 thread_is_64bit_addr(thr_act)) {
1195 ret = set_debug_state64(thr_act, &state->uds.ds64);
1196 } else if (state->dsh.flavor == x86_DEBUG_STATE32 &&
1197 state->dsh.count == x86_DEBUG_STATE32_COUNT &&
1198 !thread_is_64bit_addr(thr_act)) {
1199 ret = set_debug_state32(thr_act, &state->uds.ds32);
1200 }
1201 return ret;
1202 }
1203 default:
1204 return KERN_INVALID_ARGUMENT;
1205 }
1206
1207 return KERN_SUCCESS;
1208}
1209
1210mach_vm_address_t
1211machine_thread_pc(thread_t thr_act)
1212{
1213 if (thread_is_64bit_addr(thr_act)) {
1214 return (mach_vm_address_t)USER_REGS64(thr_act)->isf.rip;
1215 } else {
1216 return (mach_vm_address_t)USER_REGS32(thr_act)->eip;
1217 }
1218}
1219
1220void
1221machine_thread_reset_pc(thread_t thr_act, mach_vm_address_t pc)
1222{
1223 pal_register_cache_state(thr_act, DIRTY);
1224
1225 if (thread_is_64bit_addr(thr_act)) {
1226 if (!IS_USERADDR64_CANONICAL(pc)) {
1227 pc = 0;
1228 }
1229 USER_REGS64(thr_act)->isf.rip = (uint64_t)pc;
1230 } else {
1231 USER_REGS32(thr_act)->eip = (uint32_t)pc;
1232 }
1233}
1234
1235
1236/*
1237 * thread_getstatus:
1238 *
1239 * Get the status of the specified thread.
1240 */
1241
1242kern_return_t
1243machine_thread_get_state(
1244 thread_t thr_act,
1245 thread_flavor_t flavor,
1246 thread_state_t tstate,
1247 mach_msg_type_number_t *count)
1248{
1249 switch (flavor) {
1250 case THREAD_STATE_FLAVOR_LIST:
1251 {
1252 if (*count < 3) {
1253 return KERN_INVALID_ARGUMENT;
1254 }
1255
1256 tstate[0] = i386_THREAD_STATE;
1257 tstate[1] = i386_FLOAT_STATE;
1258 tstate[2] = i386_EXCEPTION_STATE;
1259
1260 *count = 3;
1261 break;
1262 }
1263
1264 case THREAD_STATE_FLAVOR_LIST_NEW:
1265 {
1266 if (*count < 4) {
1267 return KERN_INVALID_ARGUMENT;
1268 }
1269
1270 tstate[0] = x86_THREAD_STATE;
1271 tstate[1] = x86_FLOAT_STATE;
1272 tstate[2] = x86_EXCEPTION_STATE;
1273 tstate[3] = x86_DEBUG_STATE;
1274
1275 *count = 4;
1276 break;
1277 }
1278
1279 case THREAD_STATE_FLAVOR_LIST_10_9:
1280 {
1281 if (*count < 5) {
1282 return KERN_INVALID_ARGUMENT;
1283 }
1284
1285 tstate[0] = x86_THREAD_STATE;
1286 tstate[1] = x86_FLOAT_STATE;
1287 tstate[2] = x86_EXCEPTION_STATE;
1288 tstate[3] = x86_DEBUG_STATE;
1289 tstate[4] = x86_AVX_STATE;
1290
1291 *count = 5;
1292 break;
1293 }
1294
1295 case THREAD_STATE_FLAVOR_LIST_10_13:
1296 {
1297 if (*count < 6) {
1298 return KERN_INVALID_ARGUMENT;
1299 }
1300
1301 tstate[0] = x86_THREAD_STATE;
1302 tstate[1] = x86_FLOAT_STATE;
1303 tstate[2] = x86_EXCEPTION_STATE;
1304 tstate[3] = x86_DEBUG_STATE;
1305 tstate[4] = x86_AVX_STATE;
1306 tstate[5] = x86_AVX512_STATE;
1307
1308 *count = 6;
1309 break;
1310 }
1311
1312 case THREAD_STATE_FLAVOR_LIST_10_15:
1313 {
1314 if (*count < 7) {
1315 return KERN_INVALID_ARGUMENT;
1316 }
1317
1318 tstate[0] = x86_THREAD_STATE;
1319 tstate[1] = x86_FLOAT_STATE;
1320 tstate[2] = x86_EXCEPTION_STATE;
1321 tstate[3] = x86_DEBUG_STATE;
1322 tstate[4] = x86_AVX_STATE;
1323 tstate[5] = x86_AVX512_STATE;
1324 tstate[6] = x86_PAGEIN_STATE;
1325
1326 *count = 7;
1327 break;
1328 }
1329
1330 case x86_SAVED_STATE32:
1331 {
1332 x86_saved_state32_t *state;
1333 x86_saved_state32_t *saved_state;
1334
1335 if (*count < x86_SAVED_STATE32_COUNT) {
1336 return KERN_INVALID_ARGUMENT;
1337 }
1338
1339 if (thread_is_64bit_addr(thr_act)) {
1340 return KERN_INVALID_ARGUMENT;
1341 }
1342
1343 state = (x86_saved_state32_t *) tstate;
1344 saved_state = USER_REGS32(thr_act);
1345
1346 /*
1347 * First, copy everything:
1348 */
1349 *state = *saved_state;
1350 state->ds = saved_state->ds & 0xffff;
1351 state->es = saved_state->es & 0xffff;
1352 state->fs = saved_state->fs & 0xffff;
1353 state->gs = saved_state->gs & 0xffff;
1354
1355 *count = x86_SAVED_STATE32_COUNT;
1356 break;
1357 }
1358
1359 case x86_SAVED_STATE64:
1360 {
1361 x86_saved_state64_t *state;
1362 x86_saved_state64_t *saved_state;
1363
1364 if (*count < x86_SAVED_STATE64_COUNT) {
1365 return KERN_INVALID_ARGUMENT;
1366 }
1367
1368 if (!thread_is_64bit_addr(thr_act)) {
1369 return KERN_INVALID_ARGUMENT;
1370 }
1371
1372 state = (x86_saved_state64_t *)tstate;
1373 saved_state = USER_REGS64(thr_act);
1374
1375 /*
1376 * First, copy everything:
1377 */
1378 *state = *saved_state;
1379 state->ds = saved_state->ds & 0xffff;
1380 state->es = saved_state->es & 0xffff;
1381 state->fs = saved_state->fs & 0xffff;
1382 state->gs = saved_state->gs & 0xffff;
1383
1384 *count = x86_SAVED_STATE64_COUNT;
1385 break;
1386 }
1387
1388 case x86_FLOAT_STATE32:
1389 {
1390 if (*count < x86_FLOAT_STATE32_COUNT) {
1391 return KERN_INVALID_ARGUMENT;
1392 }
1393
1394 if (thread_is_64bit_addr(thr_act)) {
1395 return KERN_INVALID_ARGUMENT;
1396 }
1397
1398 *count = x86_FLOAT_STATE32_COUNT;
1399
1400 return fpu_get_fxstate(thr_act, tstate, flavor);
1401 }
1402
1403 case x86_FLOAT_STATE64:
1404 {
1405 if (*count < x86_FLOAT_STATE64_COUNT) {
1406 return KERN_INVALID_ARGUMENT;
1407 }
1408
1409 if (!thread_is_64bit_addr(thr_act)) {
1410 return KERN_INVALID_ARGUMENT;
1411 }
1412
1413 *count = x86_FLOAT_STATE64_COUNT;
1414
1415 return fpu_get_fxstate(thr_act, tstate, flavor);
1416 }
1417
1418 case x86_FLOAT_STATE:
1419 {
1420 x86_float_state_t *state;
1421 kern_return_t kret;
1422
1423 if (*count < x86_FLOAT_STATE_COUNT) {
1424 return KERN_INVALID_ARGUMENT;
1425 }
1426
1427 state = (x86_float_state_t *)tstate;
1428
1429 /*
1430 * no need to bzero... currently
1431 * x86_FLOAT_STATE64_COUNT == x86_FLOAT_STATE32_COUNT
1432 */
1433 if (thread_is_64bit_addr(thr_act)) {
1434 state->fsh.flavor = x86_FLOAT_STATE64;
1435 state->fsh.count = x86_FLOAT_STATE64_COUNT;
1436
1437 kret = fpu_get_fxstate(thr_act, (thread_state_t)&state->ufs.fs64, x86_FLOAT_STATE64);
1438 } else {
1439 state->fsh.flavor = x86_FLOAT_STATE32;
1440 state->fsh.count = x86_FLOAT_STATE32_COUNT;
1441
1442 kret = fpu_get_fxstate(thr_act, (thread_state_t)&state->ufs.fs32, x86_FLOAT_STATE32);
1443 }
1444 *count = x86_FLOAT_STATE_COUNT;
1445
1446 return kret;
1447 }
1448
1449 case x86_AVX_STATE32:
1450 case x86_AVX512_STATE32:
1451 {
1452 if (*count != _MachineStateCount[flavor]) {
1453 return KERN_INVALID_ARGUMENT;
1454 }
1455
1456 if (thread_is_64bit_addr(thr_act)) {
1457 return KERN_INVALID_ARGUMENT;
1458 }
1459
1460 *count = _MachineStateCount[flavor];
1461
1462 return fpu_get_fxstate(thr_act, tstate, flavor);
1463 }
1464
1465 case x86_AVX_STATE64:
1466 case x86_AVX512_STATE64:
1467 {
1468 if (*count != _MachineStateCount[flavor]) {
1469 return KERN_INVALID_ARGUMENT;
1470 }
1471
1472 if (!thread_is_64bit_addr(thr_act)) {
1473 return KERN_INVALID_ARGUMENT;
1474 }
1475
1476 *count = _MachineStateCount[flavor];
1477
1478 return fpu_get_fxstate(thr_act, tstate, flavor);
1479 }
1480
1481 case x86_AVX_STATE:
1482 case x86_AVX512_STATE:
1483 {
1484 x86_avx_state_t *state;
1485 thread_state_t fstate;
1486
1487 if (*count < _MachineStateCount[flavor]) {
1488 return KERN_INVALID_ARGUMENT;
1489 }
1490
1491 *count = _MachineStateCount[flavor];
1492 state = (x86_avx_state_t *)tstate;
1493
1494 bzero((char *)state, *count * sizeof(int));
1495
1496 if (thread_is_64bit_addr(thr_act)) {
1497 flavor -= 1; /* 64-bit flavor */
1498 fstate = (thread_state_t) &state->ufs.as64;
1499 } else {
1500 flavor -= 2; /* 32-bit flavor */
1501 fstate = (thread_state_t) &state->ufs.as32;
1502 }
1503 state->ash.flavor = flavor;
1504 state->ash.count = _MachineStateCount[flavor];
1505
1506 return fpu_get_fxstate(thr_act, fstate, flavor);
1507 }
1508
1509 case x86_THREAD_STATE32:
1510 {
1511 if (*count < x86_THREAD_STATE32_COUNT) {
1512 return KERN_INVALID_ARGUMENT;
1513 }
1514
1515 if (thread_is_64bit_addr(thr_act)) {
1516 return KERN_INVALID_ARGUMENT;
1517 }
1518
1519 *count = x86_THREAD_STATE32_COUNT;
1520
1521 get_thread_state32(thr_act, (x86_thread_state32_t *)tstate);
1522 break;
1523 }
1524
1525 case x86_THREAD_STATE64:
1526 {
1527 if (*count < x86_THREAD_STATE64_COUNT) {
1528 return KERN_INVALID_ARGUMENT;
1529 }
1530
1531 if (!thread_is_64bit_addr(thr_act)) {
1532 return KERN_INVALID_ARGUMENT;
1533 }
1534
1535 *count = x86_THREAD_STATE64_COUNT;
1536
1537 get_thread_state64(thr_act, tstate, FALSE);
1538 break;
1539 }
1540
1541 case x86_THREAD_FULL_STATE64:
1542 {
1543 if (*count < x86_THREAD_FULL_STATE64_COUNT) {
1544 return KERN_INVALID_ARGUMENT;
1545 }
1546
1547 if (!thread_is_64bit_addr(thr_act)) {
1548 return KERN_INVALID_ARGUMENT;
1549 }
1550
1551 /* If this process does not have a custom LDT, return failure */
1552 if (thr_act->task->i386_ldt == 0) {
1553 return KERN_INVALID_ARGUMENT;
1554 }
1555
1556 *count = x86_THREAD_FULL_STATE64_COUNT;
1557
1558 get_thread_state64(thr_act, tstate, TRUE);
1559 break;
1560 }
1561
1562 case x86_THREAD_STATE:
1563 {
1564 x86_thread_state_t *state;
1565
1566 if (*count < x86_THREAD_STATE_COUNT) {
1567 return KERN_INVALID_ARGUMENT;
1568 }
1569
1570 state = (x86_thread_state_t *)tstate;
1571
1572 bzero((char *)state, sizeof(x86_thread_state_t));
1573
1574 if (thread_is_64bit_addr(thr_act)) {
1575 state->tsh.flavor = x86_THREAD_STATE64;
1576 state->tsh.count = x86_THREAD_STATE64_COUNT;
1577
1578 get_thread_state64(thr_act, &state->uts.ts64, FALSE);
1579 } else {
1580 state->tsh.flavor = x86_THREAD_STATE32;
1581 state->tsh.count = x86_THREAD_STATE32_COUNT;
1582
1583 get_thread_state32(thr_act, &state->uts.ts32);
1584 }
1585 *count = x86_THREAD_STATE_COUNT;
1586
1587 break;
1588 }
1589
1590
1591 case x86_EXCEPTION_STATE32:
1592 {
1593 if (*count < x86_EXCEPTION_STATE32_COUNT) {
1594 return KERN_INVALID_ARGUMENT;
1595 }
1596
1597 if (thread_is_64bit_addr(thr_act)) {
1598 return KERN_INVALID_ARGUMENT;
1599 }
1600
1601 *count = x86_EXCEPTION_STATE32_COUNT;
1602
1603 get_exception_state32(thr_act, (x86_exception_state32_t *)tstate);
1604 /*
1605 * Suppress the cpu number for binary compatibility
1606 * of this deprecated state.
1607 */
1608 ((x86_exception_state32_t *)tstate)->cpu = 0;
1609 break;
1610 }
1611
1612 case x86_EXCEPTION_STATE64:
1613 {
1614 if (*count < x86_EXCEPTION_STATE64_COUNT) {
1615 return KERN_INVALID_ARGUMENT;
1616 }
1617
1618 if (!thread_is_64bit_addr(thr_act)) {
1619 return KERN_INVALID_ARGUMENT;
1620 }
1621
1622 *count = x86_EXCEPTION_STATE64_COUNT;
1623
1624 get_exception_state64(thr_act, (x86_exception_state64_t *)tstate);
1625 /*
1626 * Suppress the cpu number for binary compatibility
1627 * of this deprecated state.
1628 */
1629 ((x86_exception_state64_t *)tstate)->cpu = 0;
1630 break;
1631 }
1632
1633 case x86_EXCEPTION_STATE:
1634 {
1635 x86_exception_state_t *state;
1636
1637 if (*count < x86_EXCEPTION_STATE_COUNT) {
1638 return KERN_INVALID_ARGUMENT;
1639 }
1640
1641 state = (x86_exception_state_t *)tstate;
1642
1643 bzero((char *)state, sizeof(x86_exception_state_t));
1644
1645 if (thread_is_64bit_addr(thr_act)) {
1646 state->esh.flavor = x86_EXCEPTION_STATE64;
1647 state->esh.count = x86_EXCEPTION_STATE64_COUNT;
1648
1649 get_exception_state64(thr_act, &state->ues.es64);
1650 } else {
1651 state->esh.flavor = x86_EXCEPTION_STATE32;
1652 state->esh.count = x86_EXCEPTION_STATE32_COUNT;
1653
1654 get_exception_state32(thr_act, &state->ues.es32);
1655 }
1656 *count = x86_EXCEPTION_STATE_COUNT;
1657
1658 break;
1659 }
1660 case x86_DEBUG_STATE32:
1661 {
1662 if (*count < x86_DEBUG_STATE32_COUNT) {
1663 return KERN_INVALID_ARGUMENT;
1664 }
1665
1666 if (thread_is_64bit_addr(thr_act)) {
1667 return KERN_INVALID_ARGUMENT;
1668 }
1669
1670 get_debug_state32(thr_act, (x86_debug_state32_t *)tstate);
1671
1672 *count = x86_DEBUG_STATE32_COUNT;
1673
1674 break;
1675 }
1676 case x86_DEBUG_STATE64:
1677 {
1678 if (*count < x86_DEBUG_STATE64_COUNT) {
1679 return KERN_INVALID_ARGUMENT;
1680 }
1681
1682 if (!thread_is_64bit_addr(thr_act)) {
1683 return KERN_INVALID_ARGUMENT;
1684 }
1685
1686 get_debug_state64(thr_act, (x86_debug_state64_t *)tstate);
1687
1688 *count = x86_DEBUG_STATE64_COUNT;
1689
1690 break;
1691 }
1692 case x86_DEBUG_STATE:
1693 {
1694 x86_debug_state_t *state;
1695
1696 if (*count < x86_DEBUG_STATE_COUNT) {
1697 return KERN_INVALID_ARGUMENT;
1698 }
1699
1700 state = (x86_debug_state_t *)tstate;
1701
1702 bzero(state, sizeof *state);
1703
1704 if (thread_is_64bit_addr(thr_act)) {
1705 state->dsh.flavor = x86_DEBUG_STATE64;
1706 state->dsh.count = x86_DEBUG_STATE64_COUNT;
1707
1708 get_debug_state64(thr_act, &state->uds.ds64);
1709 } else {
1710 state->dsh.flavor = x86_DEBUG_STATE32;
1711 state->dsh.count = x86_DEBUG_STATE32_COUNT;
1712
1713 get_debug_state32(thr_act, &state->uds.ds32);
1714 }
1715 *count = x86_DEBUG_STATE_COUNT;
1716 break;
1717 }
1718
1719 case x86_PAGEIN_STATE:
1720 {
1721 if (*count < x86_PAGEIN_STATE_COUNT) {
1722 return KERN_INVALID_ARGUMENT;
1723 }
1724
1725 x86_pagein_state_t *state = (void *)tstate;
1726
1727 state->__pagein_error = thr_act->t_pagein_error;
1728
1729 *count = x86_PAGEIN_STATE_COUNT;
1730 break;
1731 }
1732
1733 case x86_INSTRUCTION_STATE:
1734 {
1735 if (*count < x86_INSTRUCTION_STATE_COUNT) {
1736 return KERN_INVALID_ARGUMENT;
1737 }
1738
1739 x86_instruction_state_t *state = (void *)tstate;
1740 x86_instruction_state_t *src_state = THREAD_TO_PCB(thr_act)->insn_state;
1741
1742 if (src_state != 0 && (src_state->insn_stream_valid_bytes > 0 || src_state->out_of_synch)) {
1743#if DEVELOPMENT || DEBUG
1744 extern int insnstream_force_cacheline_mismatch;
1745#endif
1746 size_t byte_count = (src_state->insn_stream_valid_bytes > x86_INSTRUCTION_STATE_MAX_INSN_BYTES)
1747 ? x86_INSTRUCTION_STATE_MAX_INSN_BYTES : src_state->insn_stream_valid_bytes;
1748 if (byte_count > 0) {
1749 bcopy(src_state->insn_bytes, state->insn_bytes, byte_count);
1750 }
1751 state->insn_offset = src_state->insn_offset;
1752 state->insn_stream_valid_bytes = byte_count;
1753#if DEVELOPMENT || DEBUG
1754 state->out_of_synch = src_state->out_of_synch || insnstream_force_cacheline_mismatch;
1755 insnstream_force_cacheline_mismatch = 0; /* One-shot, reset after use */
1756
1757 if (state->out_of_synch) {
1758 bcopy(&src_state->insn_cacheline[0], &state->insn_cacheline[0],
1759 x86_INSTRUCTION_STATE_CACHELINE_SIZE);
1760 } else {
1761 bzero(&state->insn_cacheline[0], x86_INSTRUCTION_STATE_CACHELINE_SIZE);
1762 }
1763#else
1764 state->out_of_synch = src_state->out_of_synch;
1765#endif
1766 *count = x86_INSTRUCTION_STATE_COUNT;
1767 } else {
1768 *count = 0;
1769 }
1770 break;
1771 }
1772
1773 case x86_LAST_BRANCH_STATE:
1774 {
1775 boolean_t istate;
1776
1777 if (!last_branch_support_enabled || *count < x86_LAST_BRANCH_STATE_COUNT) {
1778 return KERN_INVALID_ARGUMENT;
1779 }
1780
1781 istate = ml_set_interrupts_enabled(FALSE);
1782 /* If the current thread is asking for its own LBR data, synch the LBRs first */
1783 if (thr_act == current_thread()) {
1784 i386_lbr_synch(thr_act);
1785 }
1786 ml_set_interrupts_enabled(istate);
1787
1788 if (i386_lbr_native_state_to_mach_thread_state(THREAD_TO_PCB(thr_act), (last_branch_state_t *)tstate) < 0) {
1789 *count = 0;
1790 return KERN_INVALID_ARGUMENT;
1791 }
1792
1793 *count = x86_LAST_BRANCH_STATE_COUNT;
1794 break;
1795 }
1796
1797 default:
1798 return KERN_INVALID_ARGUMENT;
1799 }
1800
1801 return KERN_SUCCESS;
1802}
1803
1804kern_return_t
1805machine_thread_get_kern_state(
1806 thread_t thread,
1807 thread_flavor_t flavor,
1808 thread_state_t tstate,
1809 mach_msg_type_number_t *count)
1810{
1811 x86_saved_state_t *int_state = current_cpu_datap()->cpu_int_state;
1812
1813 /*
1814 * This works only for an interrupted kernel thread
1815 */
1816 if (thread != current_thread() || int_state == NULL) {
1817 return KERN_FAILURE;
1818 }
1819
1820 switch (flavor) {
1821 case x86_THREAD_STATE32: {
1822 x86_thread_state32_t *state;
1823 x86_saved_state32_t *saved_state;
1824
1825 if (!is_saved_state32(int_state) ||
1826 *count < x86_THREAD_STATE32_COUNT) {
1827 return KERN_INVALID_ARGUMENT;
1828 }
1829
1830 state = (x86_thread_state32_t *) tstate;
1831
1832 saved_state = saved_state32(int_state);
1833 /*
1834 * General registers.
1835 */
1836 state->eax = saved_state->eax;
1837 state->ebx = saved_state->ebx;
1838 state->ecx = saved_state->ecx;
1839 state->edx = saved_state->edx;
1840 state->edi = saved_state->edi;
1841 state->esi = saved_state->esi;
1842 state->ebp = saved_state->ebp;
1843 state->esp = saved_state->uesp;
1844 state->eflags = saved_state->efl;
1845 state->eip = saved_state->eip;
1846 state->cs = saved_state->cs;
1847 state->ss = saved_state->ss;
1848 state->ds = saved_state->ds & 0xffff;
1849 state->es = saved_state->es & 0xffff;
1850 state->fs = saved_state->fs & 0xffff;
1851 state->gs = saved_state->gs & 0xffff;
1852
1853 *count = x86_THREAD_STATE32_COUNT;
1854
1855 return KERN_SUCCESS;
1856 }
1857
1858 case x86_THREAD_STATE64: {
1859 x86_thread_state64_t *state;
1860 x86_saved_state64_t *saved_state;
1861
1862 if (!is_saved_state64(int_state) ||
1863 *count < x86_THREAD_STATE64_COUNT) {
1864 return KERN_INVALID_ARGUMENT;
1865 }
1866
1867 state = (x86_thread_state64_t *) tstate;
1868
1869 saved_state = saved_state64(int_state);
1870 /*
1871 * General registers.
1872 */
1873 state->rax = saved_state->rax;
1874 state->rbx = saved_state->rbx;
1875 state->rcx = saved_state->rcx;
1876 state->rdx = saved_state->rdx;
1877 state->rdi = saved_state->rdi;
1878 state->rsi = saved_state->rsi;
1879 state->rbp = saved_state->rbp;
1880 state->rsp = saved_state->isf.rsp;
1881 state->r8 = saved_state->r8;
1882 state->r9 = saved_state->r9;
1883 state->r10 = saved_state->r10;
1884 state->r11 = saved_state->r11;
1885 state->r12 = saved_state->r12;
1886 state->r13 = saved_state->r13;
1887 state->r14 = saved_state->r14;
1888 state->r15 = saved_state->r15;
1889
1890 state->rip = saved_state->isf.rip;
1891 state->rflags = saved_state->isf.rflags;
1892 state->cs = saved_state->isf.cs;
1893 state->fs = saved_state->fs & 0xffff;
1894 state->gs = saved_state->gs & 0xffff;
1895 *count = x86_THREAD_STATE64_COUNT;
1896
1897 return KERN_SUCCESS;
1898 }
1899
1900 case x86_THREAD_STATE: {
1901 x86_thread_state_t *state = NULL;
1902
1903 if (*count < x86_THREAD_STATE_COUNT) {
1904 return KERN_INVALID_ARGUMENT;
1905 }
1906
1907 state = (x86_thread_state_t *) tstate;
1908
1909 if (is_saved_state32(int_state)) {
1910 x86_saved_state32_t *saved_state = saved_state32(int_state);
1911
1912 state->tsh.flavor = x86_THREAD_STATE32;
1913 state->tsh.count = x86_THREAD_STATE32_COUNT;
1914
1915 /*
1916 * General registers.
1917 */
1918 state->uts.ts32.eax = saved_state->eax;
1919 state->uts.ts32.ebx = saved_state->ebx;
1920 state->uts.ts32.ecx = saved_state->ecx;
1921 state->uts.ts32.edx = saved_state->edx;
1922 state->uts.ts32.edi = saved_state->edi;
1923 state->uts.ts32.esi = saved_state->esi;
1924 state->uts.ts32.ebp = saved_state->ebp;
1925 state->uts.ts32.esp = saved_state->uesp;
1926 state->uts.ts32.eflags = saved_state->efl;
1927 state->uts.ts32.eip = saved_state->eip;
1928 state->uts.ts32.cs = saved_state->cs;
1929 state->uts.ts32.ss = saved_state->ss;
1930 state->uts.ts32.ds = saved_state->ds & 0xffff;
1931 state->uts.ts32.es = saved_state->es & 0xffff;
1932 state->uts.ts32.fs = saved_state->fs & 0xffff;
1933 state->uts.ts32.gs = saved_state->gs & 0xffff;
1934 } else if (is_saved_state64(int_state)) {
1935 x86_saved_state64_t *saved_state = saved_state64(int_state);
1936
1937 state->tsh.flavor = x86_THREAD_STATE64;
1938 state->tsh.count = x86_THREAD_STATE64_COUNT;
1939
1940 /*
1941 * General registers.
1942 */
1943 state->uts.ts64.rax = saved_state->rax;
1944 state->uts.ts64.rbx = saved_state->rbx;
1945 state->uts.ts64.rcx = saved_state->rcx;
1946 state->uts.ts64.rdx = saved_state->rdx;
1947 state->uts.ts64.rdi = saved_state->rdi;
1948 state->uts.ts64.rsi = saved_state->rsi;
1949 state->uts.ts64.rbp = saved_state->rbp;
1950 state->uts.ts64.rsp = saved_state->isf.rsp;
1951 state->uts.ts64.r8 = saved_state->r8;
1952 state->uts.ts64.r9 = saved_state->r9;
1953 state->uts.ts64.r10 = saved_state->r10;
1954 state->uts.ts64.r11 = saved_state->r11;
1955 state->uts.ts64.r12 = saved_state->r12;
1956 state->uts.ts64.r13 = saved_state->r13;
1957 state->uts.ts64.r14 = saved_state->r14;
1958 state->uts.ts64.r15 = saved_state->r15;
1959
1960 state->uts.ts64.rip = saved_state->isf.rip;
1961 state->uts.ts64.rflags = saved_state->isf.rflags;
1962 state->uts.ts64.cs = saved_state->isf.cs;
1963 state->uts.ts64.fs = saved_state->fs & 0xffff;
1964 state->uts.ts64.gs = saved_state->gs & 0xffff;
1965 } else {
1966 panic("unknown thread state");
1967 }
1968
1969 *count = x86_THREAD_STATE_COUNT;
1970 return KERN_SUCCESS;
1971 }
1972 }
1973 return KERN_FAILURE;
1974}
1975
1976
1977void
1978machine_thread_switch_addrmode(thread_t thread)
1979{
1980 /*
1981 * We don't want to be preempted until we're done
1982 * - particularly if we're switching the current thread
1983 */
1984 disable_preemption();
1985
1986 /*
1987 * Reset the state saveareas. As we're resetting, we anticipate no
1988 * memory allocations in this path.
1989 */
1990 machine_thread_create(thread, thread->task);
1991
1992 /* Adjust FPU state */
1993 fpu_switch_addrmode(thread, task_has_64Bit_addr(thread->task));
1994
1995 /* If we're switching ourselves, reset the pcb addresses etc. */
1996 if (thread == current_thread()) {
1997 boolean_t istate = ml_set_interrupts_enabled(FALSE);
1998 act_machine_switch_pcb(NULL, thread);
1999 ml_set_interrupts_enabled(istate);
2000 }
2001 enable_preemption();
2002}
2003
2004
2005
2006/*
2007 * This is used to set the current thr_act/thread
2008 * when starting up a new processor
2009 */
2010void
2011machine_set_current_thread(thread_t thread)
2012{
2013 current_cpu_datap()->cpu_active_thread = thread;
2014}
2015
2016
2017/*
2018 * Perform machine-dependent per-thread initializations
2019 */
2020void
2021machine_thread_init(void)
2022{
2023 fpu_module_init();
2024}
2025
2026/*
2027 * machine_thread_template_init: Initialize machine-specific portion of
2028 * the thread template.
2029 */
2030void
2031machine_thread_template_init(thread_t thr_template)
2032{
2033 assert(fpu_default != UNDEFINED);
2034
2035 THREAD_TO_PCB(thr_template)->xstate = fpu_default;
2036}
2037
2038user_addr_t
2039get_useraddr(void)
2040{
2041 thread_t thr_act = current_thread();
2042
2043 if (thread_is_64bit_addr(thr_act)) {
2044 x86_saved_state64_t *iss64;
2045
2046 iss64 = USER_REGS64(thr_act);
2047
2048 return iss64->isf.rip;
2049 } else {
2050 x86_saved_state32_t *iss32;
2051
2052 iss32 = USER_REGS32(thr_act);
2053
2054 return iss32->eip;
2055 }
2056}
2057
2058/*
2059 * detach and return a kernel stack from a thread
2060 */
2061
2062vm_offset_t
2063machine_stack_detach(thread_t thread)
2064{
2065 vm_offset_t stack;
2066
2067 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH),
2068 (uintptr_t)thread_tid(thread), thread->priority,
2069 thread->sched_pri, 0,
2070 0);
2071
2072 stack = thread->kernel_stack;
2073 thread->kernel_stack = 0;
2074
2075 return stack;
2076}
2077
2078/*
2079 * attach a kernel stack to a thread and initialize it
2080 */
2081
2082void
2083machine_stack_attach(
2084 thread_t thread,
2085 vm_offset_t stack)
2086{
2087 struct x86_kernel_state *statep;
2088
2089 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH),
2090 (uintptr_t)thread_tid(thread), thread->priority,
2091 thread->sched_pri, 0, 0);
2092
2093 assert(stack);
2094 thread->kernel_stack = stack;
2095 thread_initialize_kernel_state(thread);
2096
2097 statep = STACK_IKS(stack);
2098
2099 /*
2100 * Reset the state of the thread to resume from a continuation,
2101 * including resetting the stack and frame pointer to avoid backtracers
2102 * seeing this temporary state and attempting to walk the defunct stack.
2103 */
2104 statep->k_rbp = (uint64_t) 0;
2105 statep->k_rip = (uint64_t) Thread_continue;
2106 statep->k_rbx = (uint64_t) thread_continue;
2107 statep->k_rsp = (uint64_t) STACK_IKS(stack);
2108
2109 return;
2110}
2111
2112/*
2113 * move a stack from old to new thread
2114 */
2115
2116void
2117machine_stack_handoff(thread_t old,
2118 thread_t new)
2119{
2120 vm_offset_t stack;
2121
2122 assert(new);
2123 assert(old);
2124
2125 kpc_off_cpu(old);
2126
2127 stack = old->kernel_stack;
2128 if (stack == old->reserved_stack) {
2129 assert(new->reserved_stack);
2130 old->reserved_stack = new->reserved_stack;
2131 new->reserved_stack = stack;
2132 }
2133 old->kernel_stack = 0;
2134 /*
2135 * A full call to machine_stack_attach() is unnecessry
2136 * because old stack is already initialized.
2137 */
2138 new->kernel_stack = stack;
2139
2140 fpu_switch_context(old, new);
2141
2142 old->machine.specFlags &= ~OnProc;
2143 new->machine.specFlags |= OnProc;
2144
2145 pmap_switch_context(old, new, cpu_number());
2146 act_machine_switch_pcb(old, new);
2147
2148#if HYPERVISOR
2149 ml_hv_cswitch(old, new);
2150#endif
2151
2152 machine_set_current_thread(new);
2153 thread_initialize_kernel_state(new);
2154
2155 return;
2156}
2157
2158
2159
2160
2161struct x86_act_context32 {
2162 x86_saved_state32_t ss;
2163 x86_float_state32_t fs;
2164 x86_debug_state32_t ds;
2165};
2166
2167struct x86_act_context64 {
2168 x86_saved_state64_t ss;
2169 x86_float_state64_t fs;
2170 x86_debug_state64_t ds;
2171};
2172
2173
2174
2175void *
2176act_thread_csave(void)
2177{
2178 kern_return_t kret;
2179 mach_msg_type_number_t val;
2180 thread_t thr_act = current_thread();
2181
2182 if (thread_is_64bit_addr(thr_act)) {
2183 struct x86_act_context64 *ic64;
2184
2185 ic64 = (struct x86_act_context64 *)kalloc(sizeof(struct x86_act_context64));
2186
2187 if (ic64 == (struct x86_act_context64 *)NULL) {
2188 return (void *)0;
2189 }
2190
2191 val = x86_SAVED_STATE64_COUNT;
2192 kret = machine_thread_get_state(thr_act, x86_SAVED_STATE64,
2193 (thread_state_t) &ic64->ss, &val);
2194 if (kret != KERN_SUCCESS) {
2195 kfree(ic64, sizeof(struct x86_act_context64));
2196 return (void *)0;
2197 }
2198 val = x86_FLOAT_STATE64_COUNT;
2199 kret = machine_thread_get_state(thr_act, x86_FLOAT_STATE64,
2200 (thread_state_t) &ic64->fs, &val);
2201 if (kret != KERN_SUCCESS) {
2202 kfree(ic64, sizeof(struct x86_act_context64));
2203 return (void *)0;
2204 }
2205
2206 val = x86_DEBUG_STATE64_COUNT;
2207 kret = machine_thread_get_state(thr_act,
2208 x86_DEBUG_STATE64,
2209 (thread_state_t)&ic64->ds,
2210 &val);
2211 if (kret != KERN_SUCCESS) {
2212 kfree(ic64, sizeof(struct x86_act_context64));
2213 return (void *)0;
2214 }
2215 return ic64;
2216 } else {
2217 struct x86_act_context32 *ic32;
2218
2219 ic32 = (struct x86_act_context32 *)kalloc(sizeof(struct x86_act_context32));
2220
2221 if (ic32 == (struct x86_act_context32 *)NULL) {
2222 return (void *)0;
2223 }
2224
2225 val = x86_SAVED_STATE32_COUNT;
2226 kret = machine_thread_get_state(thr_act, x86_SAVED_STATE32,
2227 (thread_state_t) &ic32->ss, &val);
2228 if (kret != KERN_SUCCESS) {
2229 kfree(ic32, sizeof(struct x86_act_context32));
2230 return (void *)0;
2231 }
2232 val = x86_FLOAT_STATE32_COUNT;
2233 kret = machine_thread_get_state(thr_act, x86_FLOAT_STATE32,
2234 (thread_state_t) &ic32->fs, &val);
2235 if (kret != KERN_SUCCESS) {
2236 kfree(ic32, sizeof(struct x86_act_context32));
2237 return (void *)0;
2238 }
2239
2240 val = x86_DEBUG_STATE32_COUNT;
2241 kret = machine_thread_get_state(thr_act,
2242 x86_DEBUG_STATE32,
2243 (thread_state_t)&ic32->ds,
2244 &val);
2245 if (kret != KERN_SUCCESS) {
2246 kfree(ic32, sizeof(struct x86_act_context32));
2247 return (void *)0;
2248 }
2249 return ic32;
2250 }
2251}
2252
2253
2254void
2255act_thread_catt(void *ctx)
2256{
2257 thread_t thr_act = current_thread();
2258 kern_return_t kret;
2259
2260 if (ctx == (void *)NULL) {
2261 return;
2262 }
2263
2264 if (thread_is_64bit_addr(thr_act)) {
2265 struct x86_act_context64 *ic64;
2266
2267 ic64 = (struct x86_act_context64 *)ctx;
2268
2269 kret = machine_thread_set_state(thr_act, x86_SAVED_STATE64,
2270 (thread_state_t) &ic64->ss, x86_SAVED_STATE64_COUNT);
2271 if (kret == KERN_SUCCESS) {
2272 machine_thread_set_state(thr_act, x86_FLOAT_STATE64,
2273 (thread_state_t) &ic64->fs, x86_FLOAT_STATE64_COUNT);
2274 }
2275 kfree(ic64, sizeof(struct x86_act_context64));
2276 } else {
2277 struct x86_act_context32 *ic32;
2278
2279 ic32 = (struct x86_act_context32 *)ctx;
2280
2281 kret = machine_thread_set_state(thr_act, x86_SAVED_STATE32,
2282 (thread_state_t) &ic32->ss, x86_SAVED_STATE32_COUNT);
2283 if (kret == KERN_SUCCESS) {
2284 (void) machine_thread_set_state(thr_act, x86_FLOAT_STATE32,
2285 (thread_state_t) &ic32->fs, x86_FLOAT_STATE32_COUNT);
2286 }
2287 kfree(ic32, sizeof(struct x86_act_context32));
2288 }
2289}
2290
2291
2292void
2293act_thread_cfree(__unused void *ctx)
2294{
2295 /* XXX - Unused */
2296}
2297
2298/*
2299 * Duplicate one x86_debug_state32_t to another. "all" parameter
2300 * chooses whether dr4 and dr5 are copied (they are never meant
2301 * to be installed when we do machine_task_set_state() or
2302 * machine_thread_set_state()).
2303 */
2304void
2305copy_debug_state32(
2306 x86_debug_state32_t *src,
2307 x86_debug_state32_t *target,
2308 boolean_t all)
2309{
2310 if (all) {
2311 target->dr4 = src->dr4;
2312 target->dr5 = src->dr5;
2313 }
2314
2315 target->dr0 = src->dr0;
2316 target->dr1 = src->dr1;
2317 target->dr2 = src->dr2;
2318 target->dr3 = src->dr3;
2319 target->dr6 = src->dr6;
2320 target->dr7 = src->dr7;
2321}
2322
2323/*
2324 * Duplicate one x86_debug_state64_t to another. "all" parameter
2325 * chooses whether dr4 and dr5 are copied (they are never meant
2326 * to be installed when we do machine_task_set_state() or
2327 * machine_thread_set_state()).
2328 */
2329void
2330copy_debug_state64(
2331 x86_debug_state64_t *src,
2332 x86_debug_state64_t *target,
2333 boolean_t all)
2334{
2335 if (all) {
2336 target->dr4 = src->dr4;
2337 target->dr5 = src->dr5;
2338 }
2339
2340 target->dr0 = src->dr0;
2341 target->dr1 = src->dr1;
2342 target->dr2 = src->dr2;
2343 target->dr3 = src->dr3;
2344 target->dr6 = src->dr6;
2345 target->dr7 = src->dr7;
2346}