]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/pcb.c
xnu-7195.50.7.100.1.tar.gz
[apple/xnu.git] / osfmk / i386 / pcb.c
1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 #include <mach_debug.h>
58 #include <mach_ldebug.h>
59
60 #include <sys/kdebug.h>
61
62 #include <mach/kern_return.h>
63 #include <mach/thread_status.h>
64 #include <mach/vm_param.h>
65
66 #include <kern/counters.h>
67 #include <kern/kalloc.h>
68 #include <kern/mach_param.h>
69 #include <kern/processor.h>
70 #include <kern/cpu_data.h>
71 #include <kern/cpu_number.h>
72 #include <kern/task.h>
73 #include <kern/thread.h>
74 #include <kern/sched_prim.h>
75 #include <kern/misc_protos.h>
76 #include <kern/assert.h>
77 #include <kern/spl.h>
78 #include <kern/machine.h>
79 #include <kern/kpc.h>
80 #include <ipc/ipc_port.h>
81 #include <vm/vm_kern.h>
82 #include <vm/vm_map.h>
83 #include <vm/pmap.h>
84 #include <vm/vm_protos.h>
85
86 #include <i386/cpu_data.h>
87 #include <i386/cpu_number.h>
88 #include <i386/eflags.h>
89 #include <i386/proc_reg.h>
90 #include <i386/fpu.h>
91 #include <i386/misc_protos.h>
92 #include <i386/mp_desc.h>
93 #include <i386/thread.h>
94 #include <i386/machine_routines.h>
95 #include <i386/lapic.h> /* LAPIC_PMC_SWI_VECTOR */
96 #include <i386/seg.h>
97
98 #if HYPERVISOR
99 #include <kern/hv_support.h>
100 #endif
101
102 /*
103 * Maps state flavor to number of words in the state:
104 */
105 unsigned int _MachineStateCount[] = {
106 [x86_THREAD_STATE32] = x86_THREAD_STATE32_COUNT,
107 [x86_THREAD_STATE64] = x86_THREAD_STATE64_COUNT,
108 [x86_THREAD_FULL_STATE64] = x86_THREAD_FULL_STATE64_COUNT,
109 [x86_THREAD_STATE] = x86_THREAD_STATE_COUNT,
110 [x86_FLOAT_STATE32] = x86_FLOAT_STATE32_COUNT,
111 [x86_FLOAT_STATE64] = x86_FLOAT_STATE64_COUNT,
112 [x86_FLOAT_STATE] = x86_FLOAT_STATE_COUNT,
113 [x86_EXCEPTION_STATE32] = x86_EXCEPTION_STATE32_COUNT,
114 [x86_EXCEPTION_STATE64] = x86_EXCEPTION_STATE64_COUNT,
115 [x86_EXCEPTION_STATE] = x86_EXCEPTION_STATE_COUNT,
116 [x86_DEBUG_STATE32] = x86_DEBUG_STATE32_COUNT,
117 [x86_DEBUG_STATE64] = x86_DEBUG_STATE64_COUNT,
118 [x86_DEBUG_STATE] = x86_DEBUG_STATE_COUNT,
119 [x86_AVX_STATE32] = x86_AVX_STATE32_COUNT,
120 [x86_AVX_STATE64] = x86_AVX_STATE64_COUNT,
121 [x86_AVX_STATE] = x86_AVX_STATE_COUNT,
122 [x86_AVX512_STATE32] = x86_AVX512_STATE32_COUNT,
123 [x86_AVX512_STATE64] = x86_AVX512_STATE64_COUNT,
124 [x86_AVX512_STATE] = x86_AVX512_STATE_COUNT,
125 [x86_PAGEIN_STATE] = x86_PAGEIN_STATE_COUNT
126 };
127
128 ZONE_DECLARE(iss_zone, "x86_64 saved state",
129 sizeof(x86_saved_state_t), ZC_NONE);
130
131 ZONE_DECLARE(ids_zone, "x86_64 debug state",
132 sizeof(x86_debug_state64_t), ZC_NONE);
133
134 /* Forward */
135
136 extern void Thread_continue(void);
137 extern void Load_context(
138 thread_t thread) __attribute__((noreturn));
139
140 static void
141 get_exception_state32(thread_t thread, x86_exception_state32_t *es);
142
143 static void
144 get_exception_state64(thread_t thread, x86_exception_state64_t *es);
145
146 static void
147 get_thread_state32(thread_t thread, x86_thread_state32_t *ts);
148
149 static void
150 get_thread_state64(thread_t thread, void *ts, boolean_t full);
151
152 static int
153 set_thread_state32(thread_t thread, x86_thread_state32_t *ts);
154
155 static int
156 set_thread_state64(thread_t thread, void *ts, boolean_t full);
157
158 #if HYPERVISOR
159 static inline void
160 ml_hv_cswitch(thread_t old, thread_t new)
161 {
162 if (old->hv_thread_target) {
163 hv_callbacks.preempt(old->hv_thread_target);
164 }
165
166 if (new->hv_thread_target) {
167 hv_callbacks.dispatch(new->hv_thread_target);
168 }
169 }
170 #endif
171
172 /*
173 * Don't let an illegal value for the lower 32-bits of dr7 get set.
174 * Specifically, check for undefined settings. Setting these bit patterns
175 * result in undefined behaviour and can lead to an unexpected
176 * TRCTRAP.
177 */
178 static boolean_t
179 dr7d_is_valid(uint32_t *dr7d)
180 {
181 int i;
182 uint32_t mask1, mask2;
183
184 /*
185 * If the DE bit is set in CR4, R/W0-3 can be pattern
186 * "10B" to indicate i/o reads and write
187 */
188 if (!(get_cr4() & CR4_DE)) {
189 for (i = 0, mask1 = 0x3 << 16, mask2 = 0x2 << 16; i < 4;
190 i++, mask1 <<= 4, mask2 <<= 4) {
191 if ((*dr7d & mask1) == mask2) {
192 return FALSE;
193 }
194 }
195 }
196
197 /*
198 * if we are doing an instruction execution break (indicated
199 * by r/w[x] being "00B"), then the len[x] must also be set
200 * to "00B"
201 */
202 for (i = 0; i < 4; i++) {
203 if (((((*dr7d >> (16 + i * 4))) & 0x3) == 0) &&
204 ((((*dr7d >> (18 + i * 4))) & 0x3) != 0)) {
205 return FALSE;
206 }
207 }
208
209 /*
210 * Intel docs have these bits fixed.
211 */
212 *dr7d |= 0x1 << 10; /* set bit 10 to 1 */
213 *dr7d &= ~(0x1 << 11); /* set bit 11 to 0 */
214 *dr7d &= ~(0x1 << 12); /* set bit 12 to 0 */
215 *dr7d &= ~(0x1 << 14); /* set bit 14 to 0 */
216 *dr7d &= ~(0x1 << 15); /* set bit 15 to 0 */
217
218 /*
219 * We don't allow anything to set the global breakpoints.
220 */
221
222 if (*dr7d & 0x2) {
223 return FALSE;
224 }
225
226 if (*dr7d & (0x2 << 2)) {
227 return FALSE;
228 }
229
230 if (*dr7d & (0x2 << 4)) {
231 return FALSE;
232 }
233
234 if (*dr7d & (0x2 << 6)) {
235 return FALSE;
236 }
237
238 return TRUE;
239 }
240
241 extern void set_64bit_debug_regs(x86_debug_state64_t *ds);
242
243 boolean_t
244 debug_state_is_valid32(x86_debug_state32_t *ds)
245 {
246 if (!dr7d_is_valid(&ds->dr7)) {
247 return FALSE;
248 }
249
250 return TRUE;
251 }
252
253 boolean_t
254 debug_state_is_valid64(x86_debug_state64_t *ds)
255 {
256 if (!dr7d_is_valid((uint32_t *)&ds->dr7)) {
257 return FALSE;
258 }
259
260 /*
261 * Don't allow the user to set debug addresses above their max
262 * value
263 */
264 if (ds->dr7 & 0x1) {
265 if (ds->dr0 >= VM_MAX_PAGE_ADDRESS) {
266 return FALSE;
267 }
268 }
269
270 if (ds->dr7 & (0x1 << 2)) {
271 if (ds->dr1 >= VM_MAX_PAGE_ADDRESS) {
272 return FALSE;
273 }
274 }
275
276 if (ds->dr7 & (0x1 << 4)) {
277 if (ds->dr2 >= VM_MAX_PAGE_ADDRESS) {
278 return FALSE;
279 }
280 }
281
282 if (ds->dr7 & (0x1 << 6)) {
283 if (ds->dr3 >= VM_MAX_PAGE_ADDRESS) {
284 return FALSE;
285 }
286 }
287
288 /* For x86-64, we must ensure the upper 32-bits of DR7 are clear */
289 ds->dr7 &= 0xffffffffULL;
290
291 return TRUE;
292 }
293
294
295 static kern_return_t
296 set_debug_state32(thread_t thread, x86_debug_state32_t *ds)
297 {
298 x86_debug_state32_t *new_ids;
299 pcb_t pcb;
300
301 pcb = THREAD_TO_PCB(thread);
302
303 if (debug_state_is_valid32(ds) != TRUE) {
304 return KERN_INVALID_ARGUMENT;
305 }
306
307 if (pcb->ids == NULL) {
308 new_ids = zalloc(ids_zone);
309 bzero(new_ids, sizeof *new_ids);
310
311 simple_lock(&pcb->lock, LCK_GRP_NULL);
312 /* make sure it wasn't already alloc()'d elsewhere */
313 if (pcb->ids == NULL) {
314 pcb->ids = new_ids;
315 simple_unlock(&pcb->lock);
316 } else {
317 simple_unlock(&pcb->lock);
318 zfree(ids_zone, new_ids);
319 }
320 }
321
322
323 copy_debug_state32(ds, pcb->ids, FALSE);
324
325 return KERN_SUCCESS;
326 }
327
328 static kern_return_t
329 set_debug_state64(thread_t thread, x86_debug_state64_t *ds)
330 {
331 x86_debug_state64_t *new_ids;
332 pcb_t pcb;
333
334 pcb = THREAD_TO_PCB(thread);
335
336 if (debug_state_is_valid64(ds) != TRUE) {
337 return KERN_INVALID_ARGUMENT;
338 }
339
340 if (pcb->ids == NULL) {
341 new_ids = zalloc(ids_zone);
342 bzero(new_ids, sizeof *new_ids);
343
344 #if HYPERVISOR
345 if (thread->hv_thread_target) {
346 hv_callbacks.volatile_state(thread->hv_thread_target,
347 HV_DEBUG_STATE);
348 }
349 #endif
350
351 simple_lock(&pcb->lock, LCK_GRP_NULL);
352 /* make sure it wasn't already alloc()'d elsewhere */
353 if (pcb->ids == NULL) {
354 pcb->ids = new_ids;
355 simple_unlock(&pcb->lock);
356 } else {
357 simple_unlock(&pcb->lock);
358 zfree(ids_zone, new_ids);
359 }
360 }
361
362 copy_debug_state64(ds, pcb->ids, FALSE);
363
364 return KERN_SUCCESS;
365 }
366
367 static void
368 get_debug_state32(thread_t thread, x86_debug_state32_t *ds)
369 {
370 x86_debug_state32_t *saved_state;
371
372 saved_state = thread->machine.ids;
373
374 if (saved_state) {
375 copy_debug_state32(saved_state, ds, TRUE);
376 } else {
377 bzero(ds, sizeof *ds);
378 }
379 }
380
381 static void
382 get_debug_state64(thread_t thread, x86_debug_state64_t *ds)
383 {
384 x86_debug_state64_t *saved_state;
385
386 saved_state = (x86_debug_state64_t *)thread->machine.ids;
387
388 if (saved_state) {
389 copy_debug_state64(saved_state, ds, TRUE);
390 } else {
391 bzero(ds, sizeof *ds);
392 }
393 }
394
395 /*
396 * consider_machine_collect:
397 *
398 * Try to collect machine-dependent pages
399 */
400 void
401 consider_machine_collect(void)
402 {
403 }
404
405 void
406 consider_machine_adjust(void)
407 {
408 }
409
410 /*
411 * Switch to the first thread on a CPU.
412 */
413 void
414 machine_load_context(
415 thread_t new)
416 {
417 new->machine.specFlags |= OnProc;
418 act_machine_switch_pcb(NULL, new);
419 Load_context(new);
420 }
421
422 static inline void
423 pmap_switch_context(thread_t ot, thread_t nt, int cnum)
424 {
425 pmap_assert(ml_get_interrupts_enabled() == FALSE);
426 vm_map_t nmap = nt->map, omap = ot->map;
427 if ((omap != nmap) || (nmap->pmap->pagezero_accessible)) {
428 PMAP_DEACTIVATE_MAP(omap, ot, cnum);
429 PMAP_ACTIVATE_MAP(nmap, nt, cnum);
430 }
431 }
432
433 /*
434 * Switch to a new thread.
435 * Save the old thread`s kernel state or continuation,
436 * and return it.
437 */
438 thread_t
439 machine_switch_context(
440 thread_t old,
441 thread_continue_t continuation,
442 thread_t new)
443 {
444 assert(current_cpu_datap()->cpu_active_stack == old->kernel_stack);
445
446 #if KPC
447 kpc_off_cpu(old);
448 #endif /* KPC */
449
450 /*
451 * Save FP registers if in use.
452 */
453 fpu_switch_context(old, new);
454
455 old->machine.specFlags &= ~OnProc;
456 new->machine.specFlags |= OnProc;
457
458 /*
459 * Monitor the stack depth and report new max,
460 * not worrying about races.
461 */
462 vm_offset_t depth = current_stack_depth();
463 if (depth > kernel_stack_depth_max) {
464 kernel_stack_depth_max = depth;
465 KERNEL_DEBUG_CONSTANT(
466 MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DEPTH),
467 (long) depth, 0, 0, 0, 0);
468 }
469
470 /*
471 * Switch address maps if need be, even if not switching tasks.
472 * (A server activation may be "borrowing" a client map.)
473 */
474 pmap_switch_context(old, new, cpu_number());
475
476 /*
477 * Load the rest of the user state for the new thread
478 */
479 act_machine_switch_pcb(old, new);
480
481 #if HYPERVISOR
482 ml_hv_cswitch(old, new);
483 #endif
484
485 return Switch_context(old, continuation, new);
486 }
487
488 boolean_t
489 machine_thread_on_core(thread_t thread)
490 {
491 return thread->machine.specFlags & OnProc;
492 }
493
494 thread_t
495 machine_processor_shutdown(
496 thread_t thread,
497 void (*doshutdown)(processor_t),
498 processor_t processor)
499 {
500 #if CONFIG_VMX
501 vmx_suspend();
502 #endif
503 fpu_switch_context(thread, NULL);
504 pmap_switch_context(thread, processor->idle_thread, cpu_number());
505 return Shutdown_context(thread, doshutdown, processor);
506 }
507
508
509 /*
510 * This is where registers that are not normally specified by the mach-o
511 * file on an execve would be nullified, perhaps to avoid a covert channel.
512 */
513 kern_return_t
514 machine_thread_state_initialize(
515 thread_t thread)
516 {
517 /*
518 * If there's an fpu save area, free it.
519 * The initialized state will then be lazily faulted-in, if required.
520 * And if we're target, re-arm the no-fpu trap.
521 */
522 if (thread->machine.ifps) {
523 (void) fpu_set_fxstate(thread, NULL, x86_FLOAT_STATE64);
524
525 if (thread == current_thread()) {
526 clear_fpu();
527 }
528 }
529
530 if (thread->machine.ids) {
531 zfree(ids_zone, thread->machine.ids);
532 thread->machine.ids = NULL;
533 }
534
535 return KERN_SUCCESS;
536 }
537
538 uint32_t
539 get_eflags_exportmask(void)
540 {
541 return EFL_USER_SET;
542 }
543
544 /*
545 * x86_SAVED_STATE32 - internal save/restore general register state on 32/64 bit processors
546 * for 32bit tasks only
547 * x86_SAVED_STATE64 - internal save/restore general register state on 64 bit processors
548 * for 64bit tasks only
549 * x86_THREAD_STATE32 - external set/get general register state on 32/64 bit processors
550 * for 32bit tasks only
551 * x86_THREAD_STATE64 - external set/get general register state on 64 bit processors
552 * for 64bit tasks only
553 * x86_SAVED_STATE - external set/get general register state on 32/64 bit processors
554 * for either 32bit or 64bit tasks
555 * x86_FLOAT_STATE32 - internal/external save/restore float and xmm state on 32/64 bit processors
556 * for 32bit tasks only
557 * x86_FLOAT_STATE64 - internal/external save/restore float and xmm state on 64 bit processors
558 * for 64bit tasks only
559 * x86_FLOAT_STATE - external save/restore float and xmm state on 32/64 bit processors
560 * for either 32bit or 64bit tasks
561 * x86_EXCEPTION_STATE32 - external get exception state on 32/64 bit processors
562 * for 32bit tasks only
563 * x86_EXCEPTION_STATE64 - external get exception state on 64 bit processors
564 * for 64bit tasks only
565 * x86_EXCEPTION_STATE - external get exception state on 323/64 bit processors
566 * for either 32bit or 64bit tasks
567 */
568
569
570 static void
571 get_exception_state64(thread_t thread, x86_exception_state64_t *es)
572 {
573 x86_saved_state64_t *saved_state;
574
575 saved_state = USER_REGS64(thread);
576
577 es->trapno = saved_state->isf.trapno;
578 es->cpu = saved_state->isf.cpu;
579 es->err = (typeof(es->err))saved_state->isf.err;
580 es->faultvaddr = saved_state->cr2;
581 }
582
583 static void
584 get_exception_state32(thread_t thread, x86_exception_state32_t *es)
585 {
586 x86_saved_state32_t *saved_state;
587
588 saved_state = USER_REGS32(thread);
589
590 es->trapno = saved_state->trapno;
591 es->cpu = saved_state->cpu;
592 es->err = saved_state->err;
593 es->faultvaddr = saved_state->cr2;
594 }
595
596
597 static int
598 set_thread_state32(thread_t thread, x86_thread_state32_t *ts)
599 {
600 x86_saved_state32_t *saved_state;
601
602 pal_register_cache_state(thread, DIRTY);
603
604 saved_state = USER_REGS32(thread);
605
606 /*
607 * Scrub segment selector values:
608 */
609 ts->cs = USER_CS;
610 /*
611 * On a 64 bit kernel, we always override the data segments,
612 * as the actual selector numbers have changed. This also
613 * means that we don't support setting the data segments
614 * manually any more.
615 */
616 ts->ss = USER_DS;
617 ts->ds = USER_DS;
618 ts->es = USER_DS;
619
620 /* Set GS to CTHREAD only if's been established */
621 ts->gs = thread->machine.cthread_self ? USER_CTHREAD : NULL_SEG;
622
623 /* Check segment selectors are safe */
624 if (!valid_user_segment_selectors(ts->cs,
625 ts->ss,
626 ts->ds,
627 ts->es,
628 ts->fs,
629 ts->gs)) {
630 return KERN_INVALID_ARGUMENT;
631 }
632
633 saved_state->eax = ts->eax;
634 saved_state->ebx = ts->ebx;
635 saved_state->ecx = ts->ecx;
636 saved_state->edx = ts->edx;
637 saved_state->edi = ts->edi;
638 saved_state->esi = ts->esi;
639 saved_state->ebp = ts->ebp;
640 saved_state->uesp = ts->esp;
641 saved_state->efl = (ts->eflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
642 saved_state->eip = ts->eip;
643 saved_state->cs = ts->cs;
644 saved_state->ss = ts->ss;
645 saved_state->ds = ts->ds;
646 saved_state->es = ts->es;
647 saved_state->fs = ts->fs;
648 saved_state->gs = ts->gs;
649
650 /*
651 * If the trace trap bit is being set,
652 * ensure that the user returns via iret
653 * - which is signaled thusly:
654 */
655 if ((saved_state->efl & EFL_TF) && saved_state->cs == SYSENTER_CS) {
656 saved_state->cs = SYSENTER_TF_CS;
657 }
658
659 return KERN_SUCCESS;
660 }
661
662 static int
663 set_thread_state64(thread_t thread, void *state, int full)
664 {
665 x86_thread_state64_t *ts;
666 x86_saved_state64_t *saved_state;
667
668 if (full == TRUE) {
669 ts = &((x86_thread_full_state64_t *)state)->ss64;
670 if (!valid_user_code_selector(((x86_thread_full_state64_t *)ts)->ss64.cs)) {
671 return KERN_INVALID_ARGUMENT;
672 }
673 } else {
674 ts = (x86_thread_state64_t *)state;
675 // In this case, ts->cs exists but is ignored, and
676 // CS is always set to USER_CS below instead.
677 }
678
679 pal_register_cache_state(thread, DIRTY);
680
681 saved_state = USER_REGS64(thread);
682
683 if (!IS_USERADDR64_CANONICAL(ts->rsp) ||
684 !IS_USERADDR64_CANONICAL(ts->rip)) {
685 return KERN_INVALID_ARGUMENT;
686 }
687
688 saved_state->r8 = ts->r8;
689 saved_state->r9 = ts->r9;
690 saved_state->r10 = ts->r10;
691 saved_state->r11 = ts->r11;
692 saved_state->r12 = ts->r12;
693 saved_state->r13 = ts->r13;
694 saved_state->r14 = ts->r14;
695 saved_state->r15 = ts->r15;
696 saved_state->rax = ts->rax;
697 saved_state->rbx = ts->rbx;
698 saved_state->rcx = ts->rcx;
699 saved_state->rdx = ts->rdx;
700 saved_state->rdi = ts->rdi;
701 saved_state->rsi = ts->rsi;
702 saved_state->rbp = ts->rbp;
703 saved_state->isf.rsp = ts->rsp;
704 saved_state->isf.rflags = (ts->rflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
705 saved_state->isf.rip = ts->rip;
706
707 if (full == FALSE) {
708 saved_state->isf.cs = USER64_CS;
709 } else {
710 saved_state->isf.cs = ((x86_thread_full_state64_t *)ts)->ss64.cs;
711 saved_state->isf.ss = ((x86_thread_full_state64_t *)ts)->ss;
712 saved_state->ds = (uint32_t)((x86_thread_full_state64_t *)ts)->ds;
713 saved_state->es = (uint32_t)((x86_thread_full_state64_t *)ts)->es;
714 machine_thread_set_tsd_base(thread,
715 ((x86_thread_full_state64_t *)ts)->gsbase);
716 }
717
718 saved_state->fs = (uint32_t)ts->fs;
719 saved_state->gs = (uint32_t)ts->gs;
720
721 return KERN_SUCCESS;
722 }
723
724
725
726 static void
727 get_thread_state32(thread_t thread, x86_thread_state32_t *ts)
728 {
729 x86_saved_state32_t *saved_state;
730
731 pal_register_cache_state(thread, VALID);
732
733 saved_state = USER_REGS32(thread);
734
735 ts->eax = saved_state->eax;
736 ts->ebx = saved_state->ebx;
737 ts->ecx = saved_state->ecx;
738 ts->edx = saved_state->edx;
739 ts->edi = saved_state->edi;
740 ts->esi = saved_state->esi;
741 ts->ebp = saved_state->ebp;
742 ts->esp = saved_state->uesp;
743 ts->eflags = saved_state->efl;
744 ts->eip = saved_state->eip;
745 ts->cs = saved_state->cs;
746 ts->ss = saved_state->ss;
747 ts->ds = saved_state->ds;
748 ts->es = saved_state->es;
749 ts->fs = saved_state->fs;
750 ts->gs = saved_state->gs;
751 }
752
753
754 static void
755 get_thread_state64(thread_t thread, void *state, boolean_t full)
756 {
757 x86_thread_state64_t *ts;
758 x86_saved_state64_t *saved_state;
759
760 if (full == TRUE) {
761 ts = &((x86_thread_full_state64_t *)state)->ss64;
762 } else {
763 ts = (x86_thread_state64_t *)state;
764 }
765
766 pal_register_cache_state(thread, VALID);
767
768 saved_state = USER_REGS64(thread);
769
770 ts->r8 = saved_state->r8;
771 ts->r9 = saved_state->r9;
772 ts->r10 = saved_state->r10;
773 ts->r11 = saved_state->r11;
774 ts->r12 = saved_state->r12;
775 ts->r13 = saved_state->r13;
776 ts->r14 = saved_state->r14;
777 ts->r15 = saved_state->r15;
778 ts->rax = saved_state->rax;
779 ts->rbx = saved_state->rbx;
780 ts->rcx = saved_state->rcx;
781 ts->rdx = saved_state->rdx;
782 ts->rdi = saved_state->rdi;
783 ts->rsi = saved_state->rsi;
784 ts->rbp = saved_state->rbp;
785 ts->rsp = saved_state->isf.rsp;
786 ts->rflags = saved_state->isf.rflags;
787 ts->rip = saved_state->isf.rip;
788 ts->cs = saved_state->isf.cs;
789
790 if (full == TRUE) {
791 ((x86_thread_full_state64_t *)state)->ds = saved_state->ds;
792 ((x86_thread_full_state64_t *)state)->es = saved_state->es;
793 ((x86_thread_full_state64_t *)state)->ss = saved_state->isf.ss;
794 ((x86_thread_full_state64_t *)state)->gsbase =
795 thread->machine.cthread_self;
796 }
797
798 ts->fs = saved_state->fs;
799 ts->gs = saved_state->gs;
800 }
801
802 kern_return_t
803 machine_thread_state_convert_to_user(
804 __unused thread_t thread,
805 __unused thread_flavor_t flavor,
806 __unused thread_state_t tstate,
807 __unused mach_msg_type_number_t *count)
808 {
809 // No conversion to userspace representation on this platform
810 return KERN_SUCCESS;
811 }
812
813 kern_return_t
814 machine_thread_state_convert_from_user(
815 __unused thread_t thread,
816 __unused thread_flavor_t flavor,
817 __unused thread_state_t tstate,
818 __unused mach_msg_type_number_t count)
819 {
820 // No conversion from userspace representation on this platform
821 return KERN_SUCCESS;
822 }
823
824 kern_return_t
825 machine_thread_siguctx_pointer_convert_to_user(
826 __unused thread_t thread,
827 __unused user_addr_t *uctxp)
828 {
829 // No conversion to userspace representation on this platform
830 return KERN_SUCCESS;
831 }
832
833 kern_return_t
834 machine_thread_function_pointers_convert_from_user(
835 __unused thread_t thread,
836 __unused user_addr_t *fptrs,
837 __unused uint32_t count)
838 {
839 // No conversion from userspace representation on this platform
840 return KERN_SUCCESS;
841 }
842
843 /*
844 * act_machine_set_state:
845 *
846 * Set the status of the specified thread.
847 */
848
849 kern_return_t
850 machine_thread_set_state(
851 thread_t thr_act,
852 thread_flavor_t flavor,
853 thread_state_t tstate,
854 mach_msg_type_number_t count)
855 {
856 switch (flavor) {
857 case x86_SAVED_STATE32:
858 {
859 x86_saved_state32_t *state;
860 x86_saved_state32_t *saved_state;
861
862 if (count < x86_SAVED_STATE32_COUNT) {
863 return KERN_INVALID_ARGUMENT;
864 }
865
866 state = (x86_saved_state32_t *) tstate;
867
868 /*
869 * Refuse to allow 64-bit processes to set
870 * 32-bit state.
871 */
872 if (thread_is_64bit_addr(thr_act)) {
873 return KERN_INVALID_ARGUMENT;
874 }
875
876 /* Check segment selectors are safe */
877 if (!valid_user_segment_selectors(state->cs,
878 state->ss,
879 state->ds,
880 state->es,
881 state->fs,
882 state->gs)) {
883 return KERN_INVALID_ARGUMENT;
884 }
885
886 pal_register_cache_state(thr_act, DIRTY);
887
888 saved_state = USER_REGS32(thr_act);
889
890 /*
891 * General registers
892 */
893 saved_state->edi = state->edi;
894 saved_state->esi = state->esi;
895 saved_state->ebp = state->ebp;
896 saved_state->uesp = state->uesp;
897 saved_state->ebx = state->ebx;
898 saved_state->edx = state->edx;
899 saved_state->ecx = state->ecx;
900 saved_state->eax = state->eax;
901 saved_state->eip = state->eip;
902
903 saved_state->efl = (state->efl & ~EFL_USER_CLEAR) | EFL_USER_SET;
904
905 /*
906 * If the trace trap bit is being set,
907 * ensure that the user returns via iret
908 * - which is signaled thusly:
909 */
910 if ((saved_state->efl & EFL_TF) && state->cs == SYSENTER_CS) {
911 state->cs = SYSENTER_TF_CS;
912 }
913
914 /*
915 * User setting segment registers.
916 * Code and stack selectors have already been
917 * checked. Others will be reset by 'iret'
918 * if they are not valid.
919 */
920 saved_state->cs = state->cs;
921 saved_state->ss = state->ss;
922 saved_state->ds = state->ds;
923 saved_state->es = state->es;
924 saved_state->fs = state->fs;
925 saved_state->gs = state->gs;
926
927 break;
928 }
929
930 case x86_SAVED_STATE64:
931 {
932 x86_saved_state64_t *state;
933 x86_saved_state64_t *saved_state;
934
935 if (count < x86_SAVED_STATE64_COUNT) {
936 return KERN_INVALID_ARGUMENT;
937 }
938
939 if (!thread_is_64bit_addr(thr_act)) {
940 return KERN_INVALID_ARGUMENT;
941 }
942
943 state = (x86_saved_state64_t *) tstate;
944
945 /* Verify that the supplied code segment selector is
946 * valid. In 64-bit mode, the FS and GS segment overrides
947 * use the FS.base and GS.base MSRs to calculate
948 * base addresses, and the trampolines don't directly
949 * restore the segment registers--hence they are no
950 * longer relevant for validation.
951 */
952 if (!valid_user_code_selector(state->isf.cs)) {
953 return KERN_INVALID_ARGUMENT;
954 }
955
956 /* Check pc and stack are canonical addresses */
957 if (!IS_USERADDR64_CANONICAL(state->isf.rsp) ||
958 !IS_USERADDR64_CANONICAL(state->isf.rip)) {
959 return KERN_INVALID_ARGUMENT;
960 }
961
962 pal_register_cache_state(thr_act, DIRTY);
963
964 saved_state = USER_REGS64(thr_act);
965
966 /*
967 * General registers
968 */
969 saved_state->r8 = state->r8;
970 saved_state->r9 = state->r9;
971 saved_state->r10 = state->r10;
972 saved_state->r11 = state->r11;
973 saved_state->r12 = state->r12;
974 saved_state->r13 = state->r13;
975 saved_state->r14 = state->r14;
976 saved_state->r15 = state->r15;
977 saved_state->rdi = state->rdi;
978 saved_state->rsi = state->rsi;
979 saved_state->rbp = state->rbp;
980 saved_state->rbx = state->rbx;
981 saved_state->rdx = state->rdx;
982 saved_state->rcx = state->rcx;
983 saved_state->rax = state->rax;
984 saved_state->isf.rsp = state->isf.rsp;
985 saved_state->isf.rip = state->isf.rip;
986
987 saved_state->isf.rflags = (state->isf.rflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
988
989 /*
990 * User setting segment registers.
991 * Code and stack selectors have already been
992 * checked. Others will be reset by 'sys'
993 * if they are not valid.
994 */
995 saved_state->isf.cs = state->isf.cs;
996 saved_state->isf.ss = state->isf.ss;
997 saved_state->fs = state->fs;
998 saved_state->gs = state->gs;
999
1000 break;
1001 }
1002
1003 case x86_FLOAT_STATE32:
1004 case x86_AVX_STATE32:
1005 case x86_AVX512_STATE32:
1006 {
1007 if (count != _MachineStateCount[flavor]) {
1008 return KERN_INVALID_ARGUMENT;
1009 }
1010
1011 if (thread_is_64bit_addr(thr_act)) {
1012 return KERN_INVALID_ARGUMENT;
1013 }
1014
1015 return fpu_set_fxstate(thr_act, tstate, flavor);
1016 }
1017
1018 case x86_FLOAT_STATE64:
1019 case x86_AVX_STATE64:
1020 case x86_AVX512_STATE64:
1021 {
1022 if (count != _MachineStateCount[flavor]) {
1023 return KERN_INVALID_ARGUMENT;
1024 }
1025
1026 if (!thread_is_64bit_addr(thr_act)) {
1027 return KERN_INVALID_ARGUMENT;
1028 }
1029
1030 return fpu_set_fxstate(thr_act, tstate, flavor);
1031 }
1032
1033 case x86_FLOAT_STATE:
1034 {
1035 x86_float_state_t *state;
1036
1037 if (count != x86_FLOAT_STATE_COUNT) {
1038 return KERN_INVALID_ARGUMENT;
1039 }
1040
1041 state = (x86_float_state_t *)tstate;
1042 if (state->fsh.flavor == x86_FLOAT_STATE64 && state->fsh.count == x86_FLOAT_STATE64_COUNT &&
1043 thread_is_64bit_addr(thr_act)) {
1044 return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs64, x86_FLOAT_STATE64);
1045 }
1046 if (state->fsh.flavor == x86_FLOAT_STATE32 && state->fsh.count == x86_FLOAT_STATE32_COUNT &&
1047 !thread_is_64bit_addr(thr_act)) {
1048 return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs32, x86_FLOAT_STATE32);
1049 }
1050 return KERN_INVALID_ARGUMENT;
1051 }
1052
1053 case x86_AVX_STATE:
1054 case x86_AVX512_STATE:
1055 {
1056 x86_avx_state_t *state;
1057
1058 if (count != _MachineStateCount[flavor]) {
1059 return KERN_INVALID_ARGUMENT;
1060 }
1061
1062 state = (x86_avx_state_t *)tstate;
1063 /* Flavors are defined to have sequential values: 32-bit, 64-bit, non-specific */
1064 /* 64-bit flavor? */
1065 if (state->ash.flavor == (flavor - 1) &&
1066 state->ash.count == _MachineStateCount[flavor - 1] &&
1067 thread_is_64bit_addr(thr_act)) {
1068 return fpu_set_fxstate(thr_act,
1069 (thread_state_t)&state->ufs.as64,
1070 flavor - 1);
1071 }
1072 /* 32-bit flavor? */
1073 if (state->ash.flavor == (flavor - 2) &&
1074 state->ash.count == _MachineStateCount[flavor - 2] &&
1075 !thread_is_64bit_addr(thr_act)) {
1076 return fpu_set_fxstate(thr_act,
1077 (thread_state_t)&state->ufs.as32,
1078 flavor - 2);
1079 }
1080 return KERN_INVALID_ARGUMENT;
1081 }
1082
1083 case x86_THREAD_STATE32:
1084 {
1085 if (count != x86_THREAD_STATE32_COUNT) {
1086 return KERN_INVALID_ARGUMENT;
1087 }
1088
1089 if (thread_is_64bit_addr(thr_act)) {
1090 return KERN_INVALID_ARGUMENT;
1091 }
1092
1093 return set_thread_state32(thr_act, (x86_thread_state32_t *)tstate);
1094 }
1095
1096 case x86_THREAD_STATE64:
1097 {
1098 if (count != x86_THREAD_STATE64_COUNT) {
1099 return KERN_INVALID_ARGUMENT;
1100 }
1101
1102 if (!thread_is_64bit_addr(thr_act)) {
1103 return KERN_INVALID_ARGUMENT;
1104 }
1105
1106 return set_thread_state64(thr_act, tstate, FALSE);
1107 }
1108
1109 case x86_THREAD_FULL_STATE64:
1110 {
1111 if (count != x86_THREAD_FULL_STATE64_COUNT) {
1112 return KERN_INVALID_ARGUMENT;
1113 }
1114
1115 if (!thread_is_64bit_addr(thr_act)) {
1116 return KERN_INVALID_ARGUMENT;
1117 }
1118
1119 /* If this process does not have a custom LDT, return failure */
1120 if (thr_act->task->i386_ldt == 0) {
1121 return KERN_INVALID_ARGUMENT;
1122 }
1123
1124 return set_thread_state64(thr_act, tstate, TRUE);
1125 }
1126
1127 case x86_THREAD_STATE:
1128 {
1129 x86_thread_state_t *state;
1130
1131 if (count != x86_THREAD_STATE_COUNT) {
1132 return KERN_INVALID_ARGUMENT;
1133 }
1134
1135 state = (x86_thread_state_t *)tstate;
1136
1137 if (state->tsh.flavor == x86_THREAD_STATE64 &&
1138 state->tsh.count == x86_THREAD_STATE64_COUNT &&
1139 thread_is_64bit_addr(thr_act)) {
1140 return set_thread_state64(thr_act, &state->uts.ts64, FALSE);
1141 } else if (state->tsh.flavor == x86_THREAD_FULL_STATE64 &&
1142 state->tsh.count == x86_THREAD_FULL_STATE64_COUNT &&
1143 thread_is_64bit_addr(thr_act) && thr_act->task->i386_ldt != 0) {
1144 return set_thread_state64(thr_act, &state->uts.ts64, TRUE);
1145 } else if (state->tsh.flavor == x86_THREAD_STATE32 &&
1146 state->tsh.count == x86_THREAD_STATE32_COUNT &&
1147 !thread_is_64bit_addr(thr_act)) {
1148 return set_thread_state32(thr_act, &state->uts.ts32);
1149 } else {
1150 return KERN_INVALID_ARGUMENT;
1151 }
1152 }
1153 case x86_DEBUG_STATE32:
1154 {
1155 x86_debug_state32_t *state;
1156 kern_return_t ret;
1157
1158 if (thread_is_64bit_addr(thr_act)) {
1159 return KERN_INVALID_ARGUMENT;
1160 }
1161
1162 state = (x86_debug_state32_t *)tstate;
1163
1164 ret = set_debug_state32(thr_act, state);
1165
1166 return ret;
1167 }
1168 case x86_DEBUG_STATE64:
1169 {
1170 x86_debug_state64_t *state;
1171 kern_return_t ret;
1172
1173 if (!thread_is_64bit_addr(thr_act)) {
1174 return KERN_INVALID_ARGUMENT;
1175 }
1176
1177 state = (x86_debug_state64_t *)tstate;
1178
1179 ret = set_debug_state64(thr_act, state);
1180
1181 return ret;
1182 }
1183 case x86_DEBUG_STATE:
1184 {
1185 x86_debug_state_t *state;
1186 kern_return_t ret = KERN_INVALID_ARGUMENT;
1187
1188 if (count != x86_DEBUG_STATE_COUNT) {
1189 return KERN_INVALID_ARGUMENT;
1190 }
1191
1192 state = (x86_debug_state_t *)tstate;
1193 if (state->dsh.flavor == x86_DEBUG_STATE64 &&
1194 state->dsh.count == x86_DEBUG_STATE64_COUNT &&
1195 thread_is_64bit_addr(thr_act)) {
1196 ret = set_debug_state64(thr_act, &state->uds.ds64);
1197 } else if (state->dsh.flavor == x86_DEBUG_STATE32 &&
1198 state->dsh.count == x86_DEBUG_STATE32_COUNT &&
1199 !thread_is_64bit_addr(thr_act)) {
1200 ret = set_debug_state32(thr_act, &state->uds.ds32);
1201 }
1202 return ret;
1203 }
1204 default:
1205 return KERN_INVALID_ARGUMENT;
1206 }
1207
1208 return KERN_SUCCESS;
1209 }
1210
1211 mach_vm_address_t
1212 machine_thread_pc(thread_t thr_act)
1213 {
1214 if (thread_is_64bit_addr(thr_act)) {
1215 return (mach_vm_address_t)USER_REGS64(thr_act)->isf.rip;
1216 } else {
1217 return (mach_vm_address_t)USER_REGS32(thr_act)->eip;
1218 }
1219 }
1220
1221 void
1222 machine_thread_reset_pc(thread_t thr_act, mach_vm_address_t pc)
1223 {
1224 pal_register_cache_state(thr_act, DIRTY);
1225
1226 if (thread_is_64bit_addr(thr_act)) {
1227 if (!IS_USERADDR64_CANONICAL(pc)) {
1228 pc = 0;
1229 }
1230 USER_REGS64(thr_act)->isf.rip = (uint64_t)pc;
1231 } else {
1232 USER_REGS32(thr_act)->eip = (uint32_t)pc;
1233 }
1234 }
1235
1236
1237 /*
1238 * thread_getstatus:
1239 *
1240 * Get the status of the specified thread.
1241 */
1242
1243 kern_return_t
1244 machine_thread_get_state(
1245 thread_t thr_act,
1246 thread_flavor_t flavor,
1247 thread_state_t tstate,
1248 mach_msg_type_number_t *count)
1249 {
1250 switch (flavor) {
1251 case THREAD_STATE_FLAVOR_LIST:
1252 {
1253 if (*count < 3) {
1254 return KERN_INVALID_ARGUMENT;
1255 }
1256
1257 tstate[0] = i386_THREAD_STATE;
1258 tstate[1] = i386_FLOAT_STATE;
1259 tstate[2] = i386_EXCEPTION_STATE;
1260
1261 *count = 3;
1262 break;
1263 }
1264
1265 case THREAD_STATE_FLAVOR_LIST_NEW:
1266 {
1267 if (*count < 4) {
1268 return KERN_INVALID_ARGUMENT;
1269 }
1270
1271 tstate[0] = x86_THREAD_STATE;
1272 tstate[1] = x86_FLOAT_STATE;
1273 tstate[2] = x86_EXCEPTION_STATE;
1274 tstate[3] = x86_DEBUG_STATE;
1275
1276 *count = 4;
1277 break;
1278 }
1279
1280 case THREAD_STATE_FLAVOR_LIST_10_9:
1281 {
1282 if (*count < 5) {
1283 return KERN_INVALID_ARGUMENT;
1284 }
1285
1286 tstate[0] = x86_THREAD_STATE;
1287 tstate[1] = x86_FLOAT_STATE;
1288 tstate[2] = x86_EXCEPTION_STATE;
1289 tstate[3] = x86_DEBUG_STATE;
1290 tstate[4] = x86_AVX_STATE;
1291
1292 *count = 5;
1293 break;
1294 }
1295
1296 case THREAD_STATE_FLAVOR_LIST_10_13:
1297 {
1298 if (*count < 6) {
1299 return KERN_INVALID_ARGUMENT;
1300 }
1301
1302 tstate[0] = x86_THREAD_STATE;
1303 tstate[1] = x86_FLOAT_STATE;
1304 tstate[2] = x86_EXCEPTION_STATE;
1305 tstate[3] = x86_DEBUG_STATE;
1306 tstate[4] = x86_AVX_STATE;
1307 tstate[5] = x86_AVX512_STATE;
1308
1309 *count = 6;
1310 break;
1311 }
1312
1313 case THREAD_STATE_FLAVOR_LIST_10_15:
1314 {
1315 if (*count < 7) {
1316 return KERN_INVALID_ARGUMENT;
1317 }
1318
1319 tstate[0] = x86_THREAD_STATE;
1320 tstate[1] = x86_FLOAT_STATE;
1321 tstate[2] = x86_EXCEPTION_STATE;
1322 tstate[3] = x86_DEBUG_STATE;
1323 tstate[4] = x86_AVX_STATE;
1324 tstate[5] = x86_AVX512_STATE;
1325 tstate[6] = x86_PAGEIN_STATE;
1326
1327 *count = 7;
1328 break;
1329 }
1330
1331 case x86_SAVED_STATE32:
1332 {
1333 x86_saved_state32_t *state;
1334 x86_saved_state32_t *saved_state;
1335
1336 if (*count < x86_SAVED_STATE32_COUNT) {
1337 return KERN_INVALID_ARGUMENT;
1338 }
1339
1340 if (thread_is_64bit_addr(thr_act)) {
1341 return KERN_INVALID_ARGUMENT;
1342 }
1343
1344 state = (x86_saved_state32_t *) tstate;
1345 saved_state = USER_REGS32(thr_act);
1346
1347 /*
1348 * First, copy everything:
1349 */
1350 *state = *saved_state;
1351 state->ds = saved_state->ds & 0xffff;
1352 state->es = saved_state->es & 0xffff;
1353 state->fs = saved_state->fs & 0xffff;
1354 state->gs = saved_state->gs & 0xffff;
1355
1356 *count = x86_SAVED_STATE32_COUNT;
1357 break;
1358 }
1359
1360 case x86_SAVED_STATE64:
1361 {
1362 x86_saved_state64_t *state;
1363 x86_saved_state64_t *saved_state;
1364
1365 if (*count < x86_SAVED_STATE64_COUNT) {
1366 return KERN_INVALID_ARGUMENT;
1367 }
1368
1369 if (!thread_is_64bit_addr(thr_act)) {
1370 return KERN_INVALID_ARGUMENT;
1371 }
1372
1373 state = (x86_saved_state64_t *)tstate;
1374 saved_state = USER_REGS64(thr_act);
1375
1376 /*
1377 * First, copy everything:
1378 */
1379 *state = *saved_state;
1380 state->ds = saved_state->ds & 0xffff;
1381 state->es = saved_state->es & 0xffff;
1382 state->fs = saved_state->fs & 0xffff;
1383 state->gs = saved_state->gs & 0xffff;
1384
1385 *count = x86_SAVED_STATE64_COUNT;
1386 break;
1387 }
1388
1389 case x86_FLOAT_STATE32:
1390 {
1391 if (*count < x86_FLOAT_STATE32_COUNT) {
1392 return KERN_INVALID_ARGUMENT;
1393 }
1394
1395 if (thread_is_64bit_addr(thr_act)) {
1396 return KERN_INVALID_ARGUMENT;
1397 }
1398
1399 *count = x86_FLOAT_STATE32_COUNT;
1400
1401 return fpu_get_fxstate(thr_act, tstate, flavor);
1402 }
1403
1404 case x86_FLOAT_STATE64:
1405 {
1406 if (*count < x86_FLOAT_STATE64_COUNT) {
1407 return KERN_INVALID_ARGUMENT;
1408 }
1409
1410 if (!thread_is_64bit_addr(thr_act)) {
1411 return KERN_INVALID_ARGUMENT;
1412 }
1413
1414 *count = x86_FLOAT_STATE64_COUNT;
1415
1416 return fpu_get_fxstate(thr_act, tstate, flavor);
1417 }
1418
1419 case x86_FLOAT_STATE:
1420 {
1421 x86_float_state_t *state;
1422 kern_return_t kret;
1423
1424 if (*count < x86_FLOAT_STATE_COUNT) {
1425 return KERN_INVALID_ARGUMENT;
1426 }
1427
1428 state = (x86_float_state_t *)tstate;
1429
1430 /*
1431 * no need to bzero... currently
1432 * x86_FLOAT_STATE64_COUNT == x86_FLOAT_STATE32_COUNT
1433 */
1434 if (thread_is_64bit_addr(thr_act)) {
1435 state->fsh.flavor = x86_FLOAT_STATE64;
1436 state->fsh.count = x86_FLOAT_STATE64_COUNT;
1437
1438 kret = fpu_get_fxstate(thr_act, (thread_state_t)&state->ufs.fs64, x86_FLOAT_STATE64);
1439 } else {
1440 state->fsh.flavor = x86_FLOAT_STATE32;
1441 state->fsh.count = x86_FLOAT_STATE32_COUNT;
1442
1443 kret = fpu_get_fxstate(thr_act, (thread_state_t)&state->ufs.fs32, x86_FLOAT_STATE32);
1444 }
1445 *count = x86_FLOAT_STATE_COUNT;
1446
1447 return kret;
1448 }
1449
1450 case x86_AVX_STATE32:
1451 case x86_AVX512_STATE32:
1452 {
1453 if (*count != _MachineStateCount[flavor]) {
1454 return KERN_INVALID_ARGUMENT;
1455 }
1456
1457 if (thread_is_64bit_addr(thr_act)) {
1458 return KERN_INVALID_ARGUMENT;
1459 }
1460
1461 *count = _MachineStateCount[flavor];
1462
1463 return fpu_get_fxstate(thr_act, tstate, flavor);
1464 }
1465
1466 case x86_AVX_STATE64:
1467 case x86_AVX512_STATE64:
1468 {
1469 if (*count != _MachineStateCount[flavor]) {
1470 return KERN_INVALID_ARGUMENT;
1471 }
1472
1473 if (!thread_is_64bit_addr(thr_act)) {
1474 return KERN_INVALID_ARGUMENT;
1475 }
1476
1477 *count = _MachineStateCount[flavor];
1478
1479 return fpu_get_fxstate(thr_act, tstate, flavor);
1480 }
1481
1482 case x86_AVX_STATE:
1483 case x86_AVX512_STATE:
1484 {
1485 x86_avx_state_t *state;
1486 thread_state_t fstate;
1487
1488 if (*count < _MachineStateCount[flavor]) {
1489 return KERN_INVALID_ARGUMENT;
1490 }
1491
1492 *count = _MachineStateCount[flavor];
1493 state = (x86_avx_state_t *)tstate;
1494
1495 bzero((char *)state, *count * sizeof(int));
1496
1497 if (thread_is_64bit_addr(thr_act)) {
1498 flavor -= 1; /* 64-bit flavor */
1499 fstate = (thread_state_t) &state->ufs.as64;
1500 } else {
1501 flavor -= 2; /* 32-bit flavor */
1502 fstate = (thread_state_t) &state->ufs.as32;
1503 }
1504 state->ash.flavor = flavor;
1505 state->ash.count = _MachineStateCount[flavor];
1506
1507 return fpu_get_fxstate(thr_act, fstate, flavor);
1508 }
1509
1510 case x86_THREAD_STATE32:
1511 {
1512 if (*count < x86_THREAD_STATE32_COUNT) {
1513 return KERN_INVALID_ARGUMENT;
1514 }
1515
1516 if (thread_is_64bit_addr(thr_act)) {
1517 return KERN_INVALID_ARGUMENT;
1518 }
1519
1520 *count = x86_THREAD_STATE32_COUNT;
1521
1522 get_thread_state32(thr_act, (x86_thread_state32_t *)tstate);
1523 break;
1524 }
1525
1526 case x86_THREAD_STATE64:
1527 {
1528 if (*count < x86_THREAD_STATE64_COUNT) {
1529 return KERN_INVALID_ARGUMENT;
1530 }
1531
1532 if (!thread_is_64bit_addr(thr_act)) {
1533 return KERN_INVALID_ARGUMENT;
1534 }
1535
1536 *count = x86_THREAD_STATE64_COUNT;
1537
1538 get_thread_state64(thr_act, tstate, FALSE);
1539 break;
1540 }
1541
1542 case x86_THREAD_FULL_STATE64:
1543 {
1544 if (*count < x86_THREAD_FULL_STATE64_COUNT) {
1545 return KERN_INVALID_ARGUMENT;
1546 }
1547
1548 if (!thread_is_64bit_addr(thr_act)) {
1549 return KERN_INVALID_ARGUMENT;
1550 }
1551
1552 /* If this process does not have a custom LDT, return failure */
1553 if (thr_act->task->i386_ldt == 0) {
1554 return KERN_INVALID_ARGUMENT;
1555 }
1556
1557 *count = x86_THREAD_FULL_STATE64_COUNT;
1558
1559 get_thread_state64(thr_act, tstate, TRUE);
1560 break;
1561 }
1562
1563 case x86_THREAD_STATE:
1564 {
1565 x86_thread_state_t *state;
1566
1567 if (*count < x86_THREAD_STATE_COUNT) {
1568 return KERN_INVALID_ARGUMENT;
1569 }
1570
1571 state = (x86_thread_state_t *)tstate;
1572
1573 bzero((char *)state, sizeof(x86_thread_state_t));
1574
1575 if (thread_is_64bit_addr(thr_act)) {
1576 state->tsh.flavor = x86_THREAD_STATE64;
1577 state->tsh.count = x86_THREAD_STATE64_COUNT;
1578
1579 get_thread_state64(thr_act, &state->uts.ts64, FALSE);
1580 } else {
1581 state->tsh.flavor = x86_THREAD_STATE32;
1582 state->tsh.count = x86_THREAD_STATE32_COUNT;
1583
1584 get_thread_state32(thr_act, &state->uts.ts32);
1585 }
1586 *count = x86_THREAD_STATE_COUNT;
1587
1588 break;
1589 }
1590
1591
1592 case x86_EXCEPTION_STATE32:
1593 {
1594 if (*count < x86_EXCEPTION_STATE32_COUNT) {
1595 return KERN_INVALID_ARGUMENT;
1596 }
1597
1598 if (thread_is_64bit_addr(thr_act)) {
1599 return KERN_INVALID_ARGUMENT;
1600 }
1601
1602 *count = x86_EXCEPTION_STATE32_COUNT;
1603
1604 get_exception_state32(thr_act, (x86_exception_state32_t *)tstate);
1605 /*
1606 * Suppress the cpu number for binary compatibility
1607 * of this deprecated state.
1608 */
1609 ((x86_exception_state32_t *)tstate)->cpu = 0;
1610 break;
1611 }
1612
1613 case x86_EXCEPTION_STATE64:
1614 {
1615 if (*count < x86_EXCEPTION_STATE64_COUNT) {
1616 return KERN_INVALID_ARGUMENT;
1617 }
1618
1619 if (!thread_is_64bit_addr(thr_act)) {
1620 return KERN_INVALID_ARGUMENT;
1621 }
1622
1623 *count = x86_EXCEPTION_STATE64_COUNT;
1624
1625 get_exception_state64(thr_act, (x86_exception_state64_t *)tstate);
1626 /*
1627 * Suppress the cpu number for binary compatibility
1628 * of this deprecated state.
1629 */
1630 ((x86_exception_state64_t *)tstate)->cpu = 0;
1631 break;
1632 }
1633
1634 case x86_EXCEPTION_STATE:
1635 {
1636 x86_exception_state_t *state;
1637
1638 if (*count < x86_EXCEPTION_STATE_COUNT) {
1639 return KERN_INVALID_ARGUMENT;
1640 }
1641
1642 state = (x86_exception_state_t *)tstate;
1643
1644 bzero((char *)state, sizeof(x86_exception_state_t));
1645
1646 if (thread_is_64bit_addr(thr_act)) {
1647 state->esh.flavor = x86_EXCEPTION_STATE64;
1648 state->esh.count = x86_EXCEPTION_STATE64_COUNT;
1649
1650 get_exception_state64(thr_act, &state->ues.es64);
1651 } else {
1652 state->esh.flavor = x86_EXCEPTION_STATE32;
1653 state->esh.count = x86_EXCEPTION_STATE32_COUNT;
1654
1655 get_exception_state32(thr_act, &state->ues.es32);
1656 }
1657 *count = x86_EXCEPTION_STATE_COUNT;
1658
1659 break;
1660 }
1661 case x86_DEBUG_STATE32:
1662 {
1663 if (*count < x86_DEBUG_STATE32_COUNT) {
1664 return KERN_INVALID_ARGUMENT;
1665 }
1666
1667 if (thread_is_64bit_addr(thr_act)) {
1668 return KERN_INVALID_ARGUMENT;
1669 }
1670
1671 get_debug_state32(thr_act, (x86_debug_state32_t *)tstate);
1672
1673 *count = x86_DEBUG_STATE32_COUNT;
1674
1675 break;
1676 }
1677 case x86_DEBUG_STATE64:
1678 {
1679 if (*count < x86_DEBUG_STATE64_COUNT) {
1680 return KERN_INVALID_ARGUMENT;
1681 }
1682
1683 if (!thread_is_64bit_addr(thr_act)) {
1684 return KERN_INVALID_ARGUMENT;
1685 }
1686
1687 get_debug_state64(thr_act, (x86_debug_state64_t *)tstate);
1688
1689 *count = x86_DEBUG_STATE64_COUNT;
1690
1691 break;
1692 }
1693 case x86_DEBUG_STATE:
1694 {
1695 x86_debug_state_t *state;
1696
1697 if (*count < x86_DEBUG_STATE_COUNT) {
1698 return KERN_INVALID_ARGUMENT;
1699 }
1700
1701 state = (x86_debug_state_t *)tstate;
1702
1703 bzero(state, sizeof *state);
1704
1705 if (thread_is_64bit_addr(thr_act)) {
1706 state->dsh.flavor = x86_DEBUG_STATE64;
1707 state->dsh.count = x86_DEBUG_STATE64_COUNT;
1708
1709 get_debug_state64(thr_act, &state->uds.ds64);
1710 } else {
1711 state->dsh.flavor = x86_DEBUG_STATE32;
1712 state->dsh.count = x86_DEBUG_STATE32_COUNT;
1713
1714 get_debug_state32(thr_act, &state->uds.ds32);
1715 }
1716 *count = x86_DEBUG_STATE_COUNT;
1717 break;
1718 }
1719
1720 case x86_PAGEIN_STATE:
1721 {
1722 if (*count < x86_PAGEIN_STATE_COUNT) {
1723 return KERN_INVALID_ARGUMENT;
1724 }
1725
1726 x86_pagein_state_t *state = (void *)tstate;
1727
1728 state->__pagein_error = thr_act->t_pagein_error;
1729
1730 *count = x86_PAGEIN_STATE_COUNT;
1731 break;
1732 }
1733
1734 case x86_INSTRUCTION_STATE:
1735 {
1736 if (*count < x86_INSTRUCTION_STATE_COUNT) {
1737 return KERN_INVALID_ARGUMENT;
1738 }
1739
1740 x86_instruction_state_t *state = (void *)tstate;
1741 x86_instruction_state_t *src_state = THREAD_TO_PCB(thr_act)->insn_state;
1742
1743 if (src_state != 0 && (src_state->insn_stream_valid_bytes > 0 || src_state->out_of_synch)) {
1744 #if DEVELOPMENT || DEBUG
1745 extern int insnstream_force_cacheline_mismatch;
1746 #endif
1747 size_t byte_count = (src_state->insn_stream_valid_bytes > x86_INSTRUCTION_STATE_MAX_INSN_BYTES)
1748 ? x86_INSTRUCTION_STATE_MAX_INSN_BYTES : src_state->insn_stream_valid_bytes;
1749 if (byte_count > 0) {
1750 bcopy(src_state->insn_bytes, state->insn_bytes, byte_count);
1751 }
1752 state->insn_offset = src_state->insn_offset;
1753 state->insn_stream_valid_bytes = byte_count;
1754 #if DEVELOPMENT || DEBUG
1755 state->out_of_synch = src_state->out_of_synch || insnstream_force_cacheline_mismatch;
1756 insnstream_force_cacheline_mismatch = 0; /* One-shot, reset after use */
1757
1758 if (state->out_of_synch) {
1759 bcopy(&src_state->insn_cacheline[0], &state->insn_cacheline[0],
1760 x86_INSTRUCTION_STATE_CACHELINE_SIZE);
1761 } else {
1762 bzero(&state->insn_cacheline[0], x86_INSTRUCTION_STATE_CACHELINE_SIZE);
1763 }
1764 #else
1765 state->out_of_synch = src_state->out_of_synch;
1766 #endif
1767 *count = x86_INSTRUCTION_STATE_COUNT;
1768 } else {
1769 *count = 0;
1770 }
1771 break;
1772 }
1773
1774 case x86_LAST_BRANCH_STATE:
1775 {
1776 boolean_t istate;
1777
1778 if (!last_branch_support_enabled || *count < x86_LAST_BRANCH_STATE_COUNT) {
1779 return KERN_INVALID_ARGUMENT;
1780 }
1781
1782 istate = ml_set_interrupts_enabled(FALSE);
1783 /* If the current thread is asking for its own LBR data, synch the LBRs first */
1784 if (thr_act == current_thread()) {
1785 i386_lbr_synch(thr_act);
1786 }
1787 ml_set_interrupts_enabled(istate);
1788
1789 if (i386_lbr_native_state_to_mach_thread_state(THREAD_TO_PCB(thr_act), (last_branch_state_t *)tstate) < 0) {
1790 *count = 0;
1791 return KERN_INVALID_ARGUMENT;
1792 }
1793
1794 *count = x86_LAST_BRANCH_STATE_COUNT;
1795 break;
1796 }
1797
1798 default:
1799 return KERN_INVALID_ARGUMENT;
1800 }
1801
1802 return KERN_SUCCESS;
1803 }
1804
1805 kern_return_t
1806 machine_thread_get_kern_state(
1807 thread_t thread,
1808 thread_flavor_t flavor,
1809 thread_state_t tstate,
1810 mach_msg_type_number_t *count)
1811 {
1812 x86_saved_state_t *int_state = current_cpu_datap()->cpu_int_state;
1813
1814 /*
1815 * This works only for an interrupted kernel thread
1816 */
1817 if (thread != current_thread() || int_state == NULL) {
1818 return KERN_FAILURE;
1819 }
1820
1821 switch (flavor) {
1822 case x86_THREAD_STATE32: {
1823 x86_thread_state32_t *state;
1824 x86_saved_state32_t *saved_state;
1825
1826 if (!is_saved_state32(int_state) ||
1827 *count < x86_THREAD_STATE32_COUNT) {
1828 return KERN_INVALID_ARGUMENT;
1829 }
1830
1831 state = (x86_thread_state32_t *) tstate;
1832
1833 saved_state = saved_state32(int_state);
1834 /*
1835 * General registers.
1836 */
1837 state->eax = saved_state->eax;
1838 state->ebx = saved_state->ebx;
1839 state->ecx = saved_state->ecx;
1840 state->edx = saved_state->edx;
1841 state->edi = saved_state->edi;
1842 state->esi = saved_state->esi;
1843 state->ebp = saved_state->ebp;
1844 state->esp = saved_state->uesp;
1845 state->eflags = saved_state->efl;
1846 state->eip = saved_state->eip;
1847 state->cs = saved_state->cs;
1848 state->ss = saved_state->ss;
1849 state->ds = saved_state->ds & 0xffff;
1850 state->es = saved_state->es & 0xffff;
1851 state->fs = saved_state->fs & 0xffff;
1852 state->gs = saved_state->gs & 0xffff;
1853
1854 *count = x86_THREAD_STATE32_COUNT;
1855
1856 return KERN_SUCCESS;
1857 }
1858
1859 case x86_THREAD_STATE64: {
1860 x86_thread_state64_t *state;
1861 x86_saved_state64_t *saved_state;
1862
1863 if (!is_saved_state64(int_state) ||
1864 *count < x86_THREAD_STATE64_COUNT) {
1865 return KERN_INVALID_ARGUMENT;
1866 }
1867
1868 state = (x86_thread_state64_t *) tstate;
1869
1870 saved_state = saved_state64(int_state);
1871 /*
1872 * General registers.
1873 */
1874 state->rax = saved_state->rax;
1875 state->rbx = saved_state->rbx;
1876 state->rcx = saved_state->rcx;
1877 state->rdx = saved_state->rdx;
1878 state->rdi = saved_state->rdi;
1879 state->rsi = saved_state->rsi;
1880 state->rbp = saved_state->rbp;
1881 state->rsp = saved_state->isf.rsp;
1882 state->r8 = saved_state->r8;
1883 state->r9 = saved_state->r9;
1884 state->r10 = saved_state->r10;
1885 state->r11 = saved_state->r11;
1886 state->r12 = saved_state->r12;
1887 state->r13 = saved_state->r13;
1888 state->r14 = saved_state->r14;
1889 state->r15 = saved_state->r15;
1890
1891 state->rip = saved_state->isf.rip;
1892 state->rflags = saved_state->isf.rflags;
1893 state->cs = saved_state->isf.cs;
1894 state->fs = saved_state->fs & 0xffff;
1895 state->gs = saved_state->gs & 0xffff;
1896 *count = x86_THREAD_STATE64_COUNT;
1897
1898 return KERN_SUCCESS;
1899 }
1900
1901 case x86_THREAD_STATE: {
1902 x86_thread_state_t *state = NULL;
1903
1904 if (*count < x86_THREAD_STATE_COUNT) {
1905 return KERN_INVALID_ARGUMENT;
1906 }
1907
1908 state = (x86_thread_state_t *) tstate;
1909
1910 if (is_saved_state32(int_state)) {
1911 x86_saved_state32_t *saved_state = saved_state32(int_state);
1912
1913 state->tsh.flavor = x86_THREAD_STATE32;
1914 state->tsh.count = x86_THREAD_STATE32_COUNT;
1915
1916 /*
1917 * General registers.
1918 */
1919 state->uts.ts32.eax = saved_state->eax;
1920 state->uts.ts32.ebx = saved_state->ebx;
1921 state->uts.ts32.ecx = saved_state->ecx;
1922 state->uts.ts32.edx = saved_state->edx;
1923 state->uts.ts32.edi = saved_state->edi;
1924 state->uts.ts32.esi = saved_state->esi;
1925 state->uts.ts32.ebp = saved_state->ebp;
1926 state->uts.ts32.esp = saved_state->uesp;
1927 state->uts.ts32.eflags = saved_state->efl;
1928 state->uts.ts32.eip = saved_state->eip;
1929 state->uts.ts32.cs = saved_state->cs;
1930 state->uts.ts32.ss = saved_state->ss;
1931 state->uts.ts32.ds = saved_state->ds & 0xffff;
1932 state->uts.ts32.es = saved_state->es & 0xffff;
1933 state->uts.ts32.fs = saved_state->fs & 0xffff;
1934 state->uts.ts32.gs = saved_state->gs & 0xffff;
1935 } else if (is_saved_state64(int_state)) {
1936 x86_saved_state64_t *saved_state = saved_state64(int_state);
1937
1938 state->tsh.flavor = x86_THREAD_STATE64;
1939 state->tsh.count = x86_THREAD_STATE64_COUNT;
1940
1941 /*
1942 * General registers.
1943 */
1944 state->uts.ts64.rax = saved_state->rax;
1945 state->uts.ts64.rbx = saved_state->rbx;
1946 state->uts.ts64.rcx = saved_state->rcx;
1947 state->uts.ts64.rdx = saved_state->rdx;
1948 state->uts.ts64.rdi = saved_state->rdi;
1949 state->uts.ts64.rsi = saved_state->rsi;
1950 state->uts.ts64.rbp = saved_state->rbp;
1951 state->uts.ts64.rsp = saved_state->isf.rsp;
1952 state->uts.ts64.r8 = saved_state->r8;
1953 state->uts.ts64.r9 = saved_state->r9;
1954 state->uts.ts64.r10 = saved_state->r10;
1955 state->uts.ts64.r11 = saved_state->r11;
1956 state->uts.ts64.r12 = saved_state->r12;
1957 state->uts.ts64.r13 = saved_state->r13;
1958 state->uts.ts64.r14 = saved_state->r14;
1959 state->uts.ts64.r15 = saved_state->r15;
1960
1961 state->uts.ts64.rip = saved_state->isf.rip;
1962 state->uts.ts64.rflags = saved_state->isf.rflags;
1963 state->uts.ts64.cs = saved_state->isf.cs;
1964 state->uts.ts64.fs = saved_state->fs & 0xffff;
1965 state->uts.ts64.gs = saved_state->gs & 0xffff;
1966 } else {
1967 panic("unknown thread state");
1968 }
1969
1970 *count = x86_THREAD_STATE_COUNT;
1971 return KERN_SUCCESS;
1972 }
1973 }
1974 return KERN_FAILURE;
1975 }
1976
1977
1978 void
1979 machine_thread_switch_addrmode(thread_t thread)
1980 {
1981 /*
1982 * We don't want to be preempted until we're done
1983 * - particularly if we're switching the current thread
1984 */
1985 disable_preemption();
1986
1987 /*
1988 * Reset the state saveareas. As we're resetting, we anticipate no
1989 * memory allocations in this path.
1990 */
1991 machine_thread_create(thread, thread->task);
1992
1993 /* Adjust FPU state */
1994 fpu_switch_addrmode(thread, task_has_64Bit_addr(thread->task));
1995
1996 /* If we're switching ourselves, reset the pcb addresses etc. */
1997 if (thread == current_thread()) {
1998 boolean_t istate = ml_set_interrupts_enabled(FALSE);
1999 act_machine_switch_pcb(NULL, thread);
2000 ml_set_interrupts_enabled(istate);
2001 }
2002 enable_preemption();
2003 }
2004
2005
2006
2007 /*
2008 * This is used to set the current thr_act/thread
2009 * when starting up a new processor
2010 */
2011 void
2012 machine_set_current_thread(thread_t thread)
2013 {
2014 current_cpu_datap()->cpu_active_thread = thread;
2015 }
2016
2017
2018 /*
2019 * Perform machine-dependent per-thread initializations
2020 */
2021 void
2022 machine_thread_init(void)
2023 {
2024 fpu_module_init();
2025 }
2026
2027 /*
2028 * machine_thread_template_init: Initialize machine-specific portion of
2029 * the thread template.
2030 */
2031 void
2032 machine_thread_template_init(thread_t thr_template)
2033 {
2034 assert(fpu_default != UNDEFINED);
2035
2036 THREAD_TO_PCB(thr_template)->xstate = fpu_default;
2037 }
2038
2039 user_addr_t
2040 get_useraddr(void)
2041 {
2042 thread_t thr_act = current_thread();
2043
2044 if (thread_is_64bit_addr(thr_act)) {
2045 x86_saved_state64_t *iss64;
2046
2047 iss64 = USER_REGS64(thr_act);
2048
2049 return iss64->isf.rip;
2050 } else {
2051 x86_saved_state32_t *iss32;
2052
2053 iss32 = USER_REGS32(thr_act);
2054
2055 return iss32->eip;
2056 }
2057 }
2058
2059 /*
2060 * detach and return a kernel stack from a thread
2061 */
2062
2063 vm_offset_t
2064 machine_stack_detach(thread_t thread)
2065 {
2066 vm_offset_t stack;
2067
2068 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH),
2069 (uintptr_t)thread_tid(thread), thread->priority,
2070 thread->sched_pri, 0,
2071 0);
2072
2073 stack = thread->kernel_stack;
2074 thread->kernel_stack = 0;
2075
2076 return stack;
2077 }
2078
2079 /*
2080 * attach a kernel stack to a thread and initialize it
2081 */
2082
2083 void
2084 machine_stack_attach(
2085 thread_t thread,
2086 vm_offset_t stack)
2087 {
2088 struct x86_kernel_state *statep;
2089
2090 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH),
2091 (uintptr_t)thread_tid(thread), thread->priority,
2092 thread->sched_pri, 0, 0);
2093
2094 assert(stack);
2095 thread->kernel_stack = stack;
2096 thread_initialize_kernel_state(thread);
2097
2098 statep = STACK_IKS(stack);
2099
2100 /*
2101 * Reset the state of the thread to resume from a continuation,
2102 * including resetting the stack and frame pointer to avoid backtracers
2103 * seeing this temporary state and attempting to walk the defunct stack.
2104 */
2105 statep->k_rbp = (uint64_t) 0;
2106 statep->k_rip = (uint64_t) Thread_continue;
2107 statep->k_rbx = (uint64_t) thread_continue;
2108 statep->k_rsp = (uint64_t) STACK_IKS(stack);
2109
2110 return;
2111 }
2112
2113 /*
2114 * move a stack from old to new thread
2115 */
2116
2117 void
2118 machine_stack_handoff(thread_t old,
2119 thread_t new)
2120 {
2121 vm_offset_t stack;
2122
2123 assert(new);
2124 assert(old);
2125
2126 kpc_off_cpu(old);
2127
2128 stack = old->kernel_stack;
2129 if (stack == old->reserved_stack) {
2130 assert(new->reserved_stack);
2131 old->reserved_stack = new->reserved_stack;
2132 new->reserved_stack = stack;
2133 }
2134 old->kernel_stack = 0;
2135 /*
2136 * A full call to machine_stack_attach() is unnecessry
2137 * because old stack is already initialized.
2138 */
2139 new->kernel_stack = stack;
2140
2141 fpu_switch_context(old, new);
2142
2143 old->machine.specFlags &= ~OnProc;
2144 new->machine.specFlags |= OnProc;
2145
2146 pmap_switch_context(old, new, cpu_number());
2147 act_machine_switch_pcb(old, new);
2148
2149 #if HYPERVISOR
2150 ml_hv_cswitch(old, new);
2151 #endif
2152
2153 machine_set_current_thread(new);
2154 thread_initialize_kernel_state(new);
2155
2156 return;
2157 }
2158
2159
2160
2161
2162 struct x86_act_context32 {
2163 x86_saved_state32_t ss;
2164 x86_float_state32_t fs;
2165 x86_debug_state32_t ds;
2166 };
2167
2168 struct x86_act_context64 {
2169 x86_saved_state64_t ss;
2170 x86_float_state64_t fs;
2171 x86_debug_state64_t ds;
2172 };
2173
2174
2175
2176 void *
2177 act_thread_csave(void)
2178 {
2179 kern_return_t kret;
2180 mach_msg_type_number_t val;
2181 thread_t thr_act = current_thread();
2182
2183 if (thread_is_64bit_addr(thr_act)) {
2184 struct x86_act_context64 *ic64;
2185
2186 ic64 = (struct x86_act_context64 *)kalloc(sizeof(struct x86_act_context64));
2187
2188 if (ic64 == (struct x86_act_context64 *)NULL) {
2189 return (void *)0;
2190 }
2191
2192 val = x86_SAVED_STATE64_COUNT;
2193 kret = machine_thread_get_state(thr_act, x86_SAVED_STATE64,
2194 (thread_state_t) &ic64->ss, &val);
2195 if (kret != KERN_SUCCESS) {
2196 kfree(ic64, sizeof(struct x86_act_context64));
2197 return (void *)0;
2198 }
2199 val = x86_FLOAT_STATE64_COUNT;
2200 kret = machine_thread_get_state(thr_act, x86_FLOAT_STATE64,
2201 (thread_state_t) &ic64->fs, &val);
2202 if (kret != KERN_SUCCESS) {
2203 kfree(ic64, sizeof(struct x86_act_context64));
2204 return (void *)0;
2205 }
2206
2207 val = x86_DEBUG_STATE64_COUNT;
2208 kret = machine_thread_get_state(thr_act,
2209 x86_DEBUG_STATE64,
2210 (thread_state_t)&ic64->ds,
2211 &val);
2212 if (kret != KERN_SUCCESS) {
2213 kfree(ic64, sizeof(struct x86_act_context64));
2214 return (void *)0;
2215 }
2216 return ic64;
2217 } else {
2218 struct x86_act_context32 *ic32;
2219
2220 ic32 = (struct x86_act_context32 *)kalloc(sizeof(struct x86_act_context32));
2221
2222 if (ic32 == (struct x86_act_context32 *)NULL) {
2223 return (void *)0;
2224 }
2225
2226 val = x86_SAVED_STATE32_COUNT;
2227 kret = machine_thread_get_state(thr_act, x86_SAVED_STATE32,
2228 (thread_state_t) &ic32->ss, &val);
2229 if (kret != KERN_SUCCESS) {
2230 kfree(ic32, sizeof(struct x86_act_context32));
2231 return (void *)0;
2232 }
2233 val = x86_FLOAT_STATE32_COUNT;
2234 kret = machine_thread_get_state(thr_act, x86_FLOAT_STATE32,
2235 (thread_state_t) &ic32->fs, &val);
2236 if (kret != KERN_SUCCESS) {
2237 kfree(ic32, sizeof(struct x86_act_context32));
2238 return (void *)0;
2239 }
2240
2241 val = x86_DEBUG_STATE32_COUNT;
2242 kret = machine_thread_get_state(thr_act,
2243 x86_DEBUG_STATE32,
2244 (thread_state_t)&ic32->ds,
2245 &val);
2246 if (kret != KERN_SUCCESS) {
2247 kfree(ic32, sizeof(struct x86_act_context32));
2248 return (void *)0;
2249 }
2250 return ic32;
2251 }
2252 }
2253
2254
2255 void
2256 act_thread_catt(void *ctx)
2257 {
2258 thread_t thr_act = current_thread();
2259 kern_return_t kret;
2260
2261 if (ctx == (void *)NULL) {
2262 return;
2263 }
2264
2265 if (thread_is_64bit_addr(thr_act)) {
2266 struct x86_act_context64 *ic64;
2267
2268 ic64 = (struct x86_act_context64 *)ctx;
2269
2270 kret = machine_thread_set_state(thr_act, x86_SAVED_STATE64,
2271 (thread_state_t) &ic64->ss, x86_SAVED_STATE64_COUNT);
2272 if (kret == KERN_SUCCESS) {
2273 machine_thread_set_state(thr_act, x86_FLOAT_STATE64,
2274 (thread_state_t) &ic64->fs, x86_FLOAT_STATE64_COUNT);
2275 }
2276 kfree(ic64, sizeof(struct x86_act_context64));
2277 } else {
2278 struct x86_act_context32 *ic32;
2279
2280 ic32 = (struct x86_act_context32 *)ctx;
2281
2282 kret = machine_thread_set_state(thr_act, x86_SAVED_STATE32,
2283 (thread_state_t) &ic32->ss, x86_SAVED_STATE32_COUNT);
2284 if (kret == KERN_SUCCESS) {
2285 (void) machine_thread_set_state(thr_act, x86_FLOAT_STATE32,
2286 (thread_state_t) &ic32->fs, x86_FLOAT_STATE32_COUNT);
2287 }
2288 kfree(ic32, sizeof(struct x86_act_context32));
2289 }
2290 }
2291
2292
2293 void
2294 act_thread_cfree(__unused void *ctx)
2295 {
2296 /* XXX - Unused */
2297 }
2298
2299 /*
2300 * Duplicate one x86_debug_state32_t to another. "all" parameter
2301 * chooses whether dr4 and dr5 are copied (they are never meant
2302 * to be installed when we do machine_task_set_state() or
2303 * machine_thread_set_state()).
2304 */
2305 void
2306 copy_debug_state32(
2307 x86_debug_state32_t *src,
2308 x86_debug_state32_t *target,
2309 boolean_t all)
2310 {
2311 if (all) {
2312 target->dr4 = src->dr4;
2313 target->dr5 = src->dr5;
2314 }
2315
2316 target->dr0 = src->dr0;
2317 target->dr1 = src->dr1;
2318 target->dr2 = src->dr2;
2319 target->dr3 = src->dr3;
2320 target->dr6 = src->dr6;
2321 target->dr7 = src->dr7;
2322 }
2323
2324 /*
2325 * Duplicate one x86_debug_state64_t to another. "all" parameter
2326 * chooses whether dr4 and dr5 are copied (they are never meant
2327 * to be installed when we do machine_task_set_state() or
2328 * machine_thread_set_state()).
2329 */
2330 void
2331 copy_debug_state64(
2332 x86_debug_state64_t *src,
2333 x86_debug_state64_t *target,
2334 boolean_t all)
2335 {
2336 if (all) {
2337 target->dr4 = src->dr4;
2338 target->dr5 = src->dr5;
2339 }
2340
2341 target->dr0 = src->dr0;
2342 target->dr1 = src->dr1;
2343 target->dr2 = src->dr2;
2344 target->dr3 = src->dr3;
2345 target->dr6 = src->dr6;
2346 target->dr7 = src->dr7;
2347 }