]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/pcb.c
xnu-4570.41.2.tar.gz
[apple/xnu.git] / osfmk / i386 / pcb.c
CommitLineData
1c79356b 1/*
39037602 2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
1c79356b
A
57#include <mach_debug.h>
58#include <mach_ldebug.h>
59
60#include <sys/kdebug.h>
61
62#include <mach/kern_return.h>
63#include <mach/thread_status.h>
64#include <mach/vm_param.h>
1c79356b
A
65
66#include <kern/counters.h>
91447636 67#include <kern/kalloc.h>
1c79356b 68#include <kern/mach_param.h>
91447636
A
69#include <kern/processor.h>
70#include <kern/cpu_data.h>
71#include <kern/cpu_number.h>
1c79356b
A
72#include <kern/task.h>
73#include <kern/thread.h>
1c79356b
A
74#include <kern/sched_prim.h>
75#include <kern/misc_protos.h>
76#include <kern/assert.h>
77#include <kern/spl.h>
91447636 78#include <kern/machine.h>
39037602 79#include <kern/kpc.h>
1c79356b
A
80#include <ipc/ipc_port.h>
81#include <vm/vm_kern.h>
91447636 82#include <vm/vm_map.h>
1c79356b 83#include <vm/pmap.h>
91447636 84#include <vm/vm_protos.h>
1c79356b 85
b0d623f7
A
86#include <i386/cpu_data.h>
87#include <i386/cpu_number.h>
1c79356b
A
88#include <i386/eflags.h>
89#include <i386/proc_reg.h>
1c79356b 90#include <i386/fpu.h>
2d21ac55 91#include <i386/misc_protos.h>
6d2010ae 92#include <i386/mp_desc.h>
b0d623f7 93#include <i386/thread.h>
0c530ab8 94#include <i386/machine_routines.h>
b0d623f7 95#include <i386/lapic.h> /* LAPIC_PMC_SWI_VECTOR */
55e303ae 96
fe8ab488
A
97#if HYPERVISOR
98#include <kern/hv_support.h>
99#endif
100
1c79356b
A
101/*
102 * Maps state flavor to number of words in the state:
103 */
91447636 104unsigned int _MachineStateCount[] = {
3e170ce0
A
105 [x86_THREAD_STATE32] = x86_THREAD_STATE32_COUNT,
106 [x86_THREAD_STATE64] = x86_THREAD_STATE64_COUNT,
107 [x86_THREAD_STATE] = x86_THREAD_STATE_COUNT,
108 [x86_FLOAT_STATE32] = x86_FLOAT_STATE32_COUNT,
109 [x86_FLOAT_STATE64] = x86_FLOAT_STATE64_COUNT,
110 [x86_FLOAT_STATE] = x86_FLOAT_STATE_COUNT,
111 [x86_EXCEPTION_STATE32] = x86_EXCEPTION_STATE32_COUNT,
112 [x86_EXCEPTION_STATE64] = x86_EXCEPTION_STATE64_COUNT,
113 [x86_EXCEPTION_STATE] = x86_EXCEPTION_STATE_COUNT,
114 [x86_DEBUG_STATE32] = x86_DEBUG_STATE32_COUNT,
115 [x86_DEBUG_STATE64] = x86_DEBUG_STATE64_COUNT,
116 [x86_DEBUG_STATE] = x86_DEBUG_STATE_COUNT,
117 [x86_AVX_STATE32] = x86_AVX_STATE32_COUNT,
118 [x86_AVX_STATE64] = x86_AVX_STATE64_COUNT,
119 [x86_AVX_STATE] = x86_AVX_STATE_COUNT,
5ba3f43e
A
120#if !defined(RC_HIDE_XNU_J137)
121 [x86_AVX512_STATE32] = x86_AVX512_STATE32_COUNT,
122 [x86_AVX512_STATE64] = x86_AVX512_STATE64_COUNT,
123 [x86_AVX512_STATE] = x86_AVX512_STATE_COUNT,
124#endif /* not RC_HIDE_XNU_J137 */
1c79356b
A
125};
126
2d21ac55
A
127zone_t iss_zone; /* zone for saved_state area */
128zone_t ids_zone; /* zone for debug_state area */
0c530ab8 129
1c79356b
A
130/* Forward */
131
1c79356b
A
132extern void Thread_continue(void);
133extern void Load_context(
5ba3f43e 134 thread_t thread) __attribute__((noreturn));
1c79356b 135
0c530ab8
A
136static void
137get_exception_state32(thread_t thread, x86_exception_state32_t *es);
138
139static void
140get_exception_state64(thread_t thread, x86_exception_state64_t *es);
141
142static void
143get_thread_state32(thread_t thread, x86_thread_state32_t *ts);
144
145static void
146get_thread_state64(thread_t thread, x86_thread_state64_t *ts);
147
148static int
149set_thread_state32(thread_t thread, x86_thread_state32_t *ts);
150
151static int
152set_thread_state64(thread_t thread, x86_thread_state64_t *ts);
153
fe8ab488
A
154#if HYPERVISOR
155static inline void
156ml_hv_cswitch(thread_t old, thread_t new)
157{
158 if (old->hv_thread_target)
159 hv_callbacks.preempt(old->hv_thread_target);
160
161 if (new->hv_thread_target)
162 hv_callbacks.dispatch(new->hv_thread_target);
163}
164#endif
165
0c530ab8 166/*
3e170ce0
A
167 * Don't let an illegal value for the lower 32-bits of dr7 get set.
168 * Specifically, check for undefined settings. Setting these bit patterns
0c530ab8
A
169 * result in undefined behaviour and can lead to an unexpected
170 * TRCTRAP.
171 */
172static boolean_t
3e170ce0 173dr7d_is_valid(uint32_t *dr7d)
0c530ab8
A
174{
175 int i;
176 uint32_t mask1, mask2;
177
178 /*
179 * If the DE bit is set in CR4, R/W0-3 can be pattern
180 * "10B" to indicate i/o reads and write
181 */
182 if (!(get_cr4() & CR4_DE))
183 for (i = 0, mask1 = 0x3<<16, mask2 = 0x2<<16; i < 4;
184 i++, mask1 <<= 4, mask2 <<= 4)
3e170ce0 185 if ((*dr7d & mask1) == mask2)
0c530ab8
A
186 return (FALSE);
187
0c530ab8
A
188 /*
189 * if we are doing an instruction execution break (indicated
190 * by r/w[x] being "00B"), then the len[x] must also be set
191 * to "00B"
192 */
193 for (i = 0; i < 4; i++)
3e170ce0
A
194 if (((((*dr7d >> (16 + i*4))) & 0x3) == 0) &&
195 ((((*dr7d >> (18 + i*4))) & 0x3) != 0))
0c530ab8
A
196 return (FALSE);
197
198 /*
199 * Intel docs have these bits fixed.
200 */
3e170ce0
A
201 *dr7d |= 0x1 << 10; /* set bit 10 to 1 */
202 *dr7d &= ~(0x1 << 11); /* set bit 11 to 0 */
203 *dr7d &= ~(0x1 << 12); /* set bit 12 to 0 */
204 *dr7d &= ~(0x1 << 14); /* set bit 14 to 0 */
205 *dr7d &= ~(0x1 << 15); /* set bit 15 to 0 */
0c530ab8
A
206
207 /*
208 * We don't allow anything to set the global breakpoints.
209 */
210
3e170ce0 211 if (*dr7d & 0x2)
0c530ab8
A
212 return (FALSE);
213
3e170ce0 214 if (*dr7d & (0x2<<2))
0c530ab8
A
215 return (FALSE);
216
3e170ce0 217 if (*dr7d & (0x2<<4))
0c530ab8
A
218 return (FALSE);
219
3e170ce0 220 if (*dr7d & (0x2<<6))
0c530ab8
A
221 return (FALSE);
222
223 return (TRUE);
224}
225
0c530ab8
A
226extern void set_64bit_debug_regs(x86_debug_state64_t *ds);
227
b0d623f7
A
228boolean_t
229debug_state_is_valid32(x86_debug_state32_t *ds)
230{
3e170ce0 231 if (!dr7d_is_valid(&ds->dr7))
b0d623f7
A
232 return FALSE;
233
b0d623f7
A
234 return TRUE;
235}
236
237boolean_t
238debug_state_is_valid64(x86_debug_state64_t *ds)
239{
3e170ce0 240 if (!dr7d_is_valid((uint32_t *)&ds->dr7))
b0d623f7
A
241 return FALSE;
242
243 /*
244 * Don't allow the user to set debug addresses above their max
245 * value
246 */
247 if (ds->dr7 & 0x1)
248 if (ds->dr0 >= VM_MAX_PAGE_ADDRESS)
249 return FALSE;
250
251 if (ds->dr7 & (0x1<<2))
252 if (ds->dr1 >= VM_MAX_PAGE_ADDRESS)
253 return FALSE;
254
255 if (ds->dr7 & (0x1<<4))
256 if (ds->dr2 >= VM_MAX_PAGE_ADDRESS)
257 return FALSE;
258
259 if (ds->dr7 & (0x1<<6))
260 if (ds->dr3 >= VM_MAX_PAGE_ADDRESS)
261 return FALSE;
262
3e170ce0
A
263 /* For x86-64, we must ensure the upper 32-bits of DR7 are clear */
264 ds->dr7 &= 0xffffffffULL;
265
b0d623f7
A
266 return TRUE;
267}
268
269
0c530ab8
A
270static kern_return_t
271set_debug_state32(thread_t thread, x86_debug_state32_t *ds)
272{
273 x86_debug_state32_t *ids;
274 pcb_t pcb;
275
6d2010ae 276 pcb = THREAD_TO_PCB(thread);
0c530ab8
A
277 ids = pcb->ids;
278
b0d623f7
A
279 if (debug_state_is_valid32(ds) != TRUE) {
280 return KERN_INVALID_ARGUMENT;
281 }
282
0c530ab8 283 if (ids == NULL) {
2d21ac55 284 ids = zalloc(ids_zone);
0c530ab8
A
285 bzero(ids, sizeof *ids);
286
287 simple_lock(&pcb->lock);
288 /* make sure it wasn't already alloc()'d elsewhere */
289 if (pcb->ids == NULL) {
290 pcb->ids = ids;
291 simple_unlock(&pcb->lock);
292 } else {
293 simple_unlock(&pcb->lock);
2d21ac55 294 zfree(ids_zone, ids);
0c530ab8
A
295 }
296 }
297
0c530ab8 298
b0d623f7 299 copy_debug_state32(ds, ids, FALSE);
0c530ab8
A
300
301 return (KERN_SUCCESS);
0c530ab8
A
302}
303
304static kern_return_t
305set_debug_state64(thread_t thread, x86_debug_state64_t *ds)
306{
307 x86_debug_state64_t *ids;
308 pcb_t pcb;
309
6d2010ae 310 pcb = THREAD_TO_PCB(thread);
0c530ab8
A
311 ids = pcb->ids;
312
b0d623f7
A
313 if (debug_state_is_valid64(ds) != TRUE) {
314 return KERN_INVALID_ARGUMENT;
315 }
316
0c530ab8 317 if (ids == NULL) {
2d21ac55 318 ids = zalloc(ids_zone);
0c530ab8
A
319 bzero(ids, sizeof *ids);
320
fe8ab488
A
321#if HYPERVISOR
322 if (thread->hv_thread_target) {
323 hv_callbacks.volatile_state(thread->hv_thread_target,
324 HV_DEBUG_STATE);
325 }
326#endif
327
0c530ab8
A
328 simple_lock(&pcb->lock);
329 /* make sure it wasn't already alloc()'d elsewhere */
330 if (pcb->ids == NULL) {
331 pcb->ids = ids;
332 simple_unlock(&pcb->lock);
333 } else {
334 simple_unlock(&pcb->lock);
2d21ac55 335 zfree(ids_zone, ids);
0c530ab8
A
336 }
337 }
338
b0d623f7 339 copy_debug_state64(ds, ids, FALSE);
0c530ab8
A
340
341 return (KERN_SUCCESS);
0c530ab8
A
342}
343
344static void
345get_debug_state32(thread_t thread, x86_debug_state32_t *ds)
346{
347 x86_debug_state32_t *saved_state;
348
6d2010ae 349 saved_state = thread->machine.ids;
0c530ab8
A
350
351 if (saved_state) {
b0d623f7 352 copy_debug_state32(saved_state, ds, TRUE);
0c530ab8
A
353 } else
354 bzero(ds, sizeof *ds);
355}
356
357static void
358get_debug_state64(thread_t thread, x86_debug_state64_t *ds)
359{
360 x86_debug_state64_t *saved_state;
361
6d2010ae 362 saved_state = (x86_debug_state64_t *)thread->machine.ids;
0c530ab8
A
363
364 if (saved_state) {
b0d623f7 365 copy_debug_state64(saved_state, ds, TRUE);
0c530ab8
A
366 } else
367 bzero(ds, sizeof *ds);
368}
369
1c79356b
A
370/*
371 * consider_machine_collect:
372 *
373 * Try to collect machine-dependent pages
374 */
375void
91447636 376consider_machine_collect(void)
1c79356b
A
377{
378}
379
1c79356b 380void
91447636 381consider_machine_adjust(void)
1c79356b 382{
1c79356b 383}
1c79356b 384
1c79356b
A
385/*
386 * Switch to the first thread on a CPU.
387 */
388void
55e303ae 389machine_load_context(
1c79356b
A
390 thread_t new)
391{
0c530ab8 392 new->machine.specFlags |= OnProc;
6d2010ae 393 act_machine_switch_pcb(NULL, new);
91447636 394 Load_context(new);
1c79356b
A
395}
396
39037602
A
397static inline void pmap_switch_context(thread_t ot, thread_t nt, int cnum) {
398 pmap_assert(ml_get_interrupts_enabled() == FALSE);
399 vm_map_t nmap = nt->map, omap = ot->map;
400 if ((omap != nmap) || (nmap->pmap->pagezero_accessible)) {
401 PMAP_DEACTIVATE_MAP(omap, ot, cnum);
402 PMAP_ACTIVATE_MAP(nmap, nt, cnum);
403 }
404}
405
1c79356b
A
406/*
407 * Switch to a new thread.
408 * Save the old thread`s kernel state or continuation,
409 * and return it.
410 */
411thread_t
55e303ae 412machine_switch_context(
91447636
A
413 thread_t old,
414 thread_continue_t continuation,
415 thread_t new)
1c79356b 416{
39037602 417 assert(current_cpu_datap()->cpu_active_stack == old->kernel_stack);
39037602 418
5ba3f43e 419#if KPC
39037602 420 kpc_off_cpu(old);
5ba3f43e 421#endif /* KPC */
39037602 422
1c79356b
A
423 /*
424 * Save FP registers if in use.
425 */
5ba3f43e 426 fpu_switch_context(old, new);
1c79356b 427
0c530ab8
A
428 old->machine.specFlags &= ~OnProc;
429 new->machine.specFlags |= OnProc;
430
b0d623f7 431 /*
39037602 432 * Monitor the stack depth and report new max,
b0d623f7
A
433 * not worrying about races.
434 */
435 vm_offset_t depth = current_stack_depth();
436 if (depth > kernel_stack_depth_max) {
437 kernel_stack_depth_max = depth;
438 KERNEL_DEBUG_CONSTANT(
439 MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DEPTH),
440 (long) depth, 0, 0, 0, 0);
441 }
442
1c79356b
A
443 /*
444 * Switch address maps if need be, even if not switching tasks.
445 * (A server activation may be "borrowing" a client map.)
446 */
39037602 447 pmap_switch_context(old, new, cpu_number());
1c79356b
A
448
449 /*
450 * Load the rest of the user state for the new thread
451 */
6d2010ae 452 act_machine_switch_pcb(old, new);
2d21ac55 453
fe8ab488
A
454#if HYPERVISOR
455 ml_hv_cswitch(old, new);
456#endif
457
1c79356b
A
458 return(Switch_context(old, continuation, new));
459}
460
b0d623f7
A
461thread_t
462machine_processor_shutdown(
463 thread_t thread,
464 void (*doshutdown)(processor_t),
465 processor_t processor)
466{
467#if CONFIG_VMX
468 vmx_suspend();
469#endif
5ba3f43e 470 fpu_switch_context(thread, NULL);
39037602 471 pmap_switch_context(thread, processor->idle_thread, cpu_number());
b0d623f7
A
472 return(Shutdown_context(thread, doshutdown, processor));
473}
474
91447636
A
475
476/*
477 * This is where registers that are not normally specified by the mach-o
478 * file on an execve would be nullified, perhaps to avoid a covert channel.
479 */
480kern_return_t
481machine_thread_state_initialize(
482 thread_t thread)
483{
2d21ac55
A
484 /*
485 * If there's an fpu save area, free it.
486 * The initialized state will then be lazily faulted-in, if required.
487 * And if we're target, re-arm the no-fpu trap.
488 */
6d2010ae 489 if (thread->machine.ifps) {
060df5ea 490 (void) fpu_set_fxstate(thread, NULL, x86_FLOAT_STATE64);
4452a7af 491
b0d623f7
A
492 if (thread == current_thread())
493 clear_fpu();
494 }
495
6d2010ae
A
496 if (thread->machine.ids) {
497 zfree(ids_zone, thread->machine.ids);
498 thread->machine.ids = NULL;
b0d623f7
A
499 }
500
501 return KERN_SUCCESS;
4452a7af 502}
0c530ab8
A
503
504uint32_t
505get_eflags_exportmask(void)
506{
507 return EFL_USER_SET;
508}
509
510/*
511 * x86_SAVED_STATE32 - internal save/restore general register state on 32/64 bit processors
512 * for 32bit tasks only
513 * x86_SAVED_STATE64 - internal save/restore general register state on 64 bit processors
514 * for 64bit tasks only
515 * x86_THREAD_STATE32 - external set/get general register state on 32/64 bit processors
516 * for 32bit tasks only
517 * x86_THREAD_STATE64 - external set/get general register state on 64 bit processors
518 * for 64bit tasks only
519 * x86_SAVED_STATE - external set/get general register state on 32/64 bit processors
520 * for either 32bit or 64bit tasks
521 * x86_FLOAT_STATE32 - internal/external save/restore float and xmm state on 32/64 bit processors
522 * for 32bit tasks only
523 * x86_FLOAT_STATE64 - internal/external save/restore float and xmm state on 64 bit processors
524 * for 64bit tasks only
525 * x86_FLOAT_STATE - external save/restore float and xmm state on 32/64 bit processors
526 * for either 32bit or 64bit tasks
527 * x86_EXCEPTION_STATE32 - external get exception state on 32/64 bit processors
528 * for 32bit tasks only
529 * x86_EXCEPTION_STATE64 - external get exception state on 64 bit processors
530 * for 64bit tasks only
531 * x86_EXCEPTION_STATE - external get exception state on 323/64 bit processors
532 * for either 32bit or 64bit tasks
533 */
534
2d21ac55 535
0c530ab8
A
536static void
537get_exception_state64(thread_t thread, x86_exception_state64_t *es)
538{
539 x86_saved_state64_t *saved_state;
540
541 saved_state = USER_REGS64(thread);
542
543 es->trapno = saved_state->isf.trapno;
6d2010ae 544 es->cpu = saved_state->isf.cpu;
b0d623f7 545 es->err = (typeof(es->err))saved_state->isf.err;
0c530ab8
A
546 es->faultvaddr = saved_state->cr2;
547}
548
549static void
550get_exception_state32(thread_t thread, x86_exception_state32_t *es)
551{
552 x86_saved_state32_t *saved_state;
553
554 saved_state = USER_REGS32(thread);
555
556 es->trapno = saved_state->trapno;
6d2010ae 557 es->cpu = saved_state->cpu;
0c530ab8
A
558 es->err = saved_state->err;
559 es->faultvaddr = saved_state->cr2;
560}
561
562
563static int
564set_thread_state32(thread_t thread, x86_thread_state32_t *ts)
565{
566 x86_saved_state32_t *saved_state;
567
6d2010ae 568 pal_register_cache_state(thread, DIRTY);
b0d623f7 569
0c530ab8
A
570 saved_state = USER_REGS32(thread);
571
572 /*
573 * Scrub segment selector values:
574 */
b0d623f7 575 ts->cs = USER_CS;
b0d623f7
A
576 /*
577 * On a 64 bit kernel, we always override the data segments,
578 * as the actual selector numbers have changed. This also
579 * means that we don't support setting the data segments
580 * manually any more.
581 */
582 ts->ss = USER_DS;
583 ts->ds = USER_DS;
584 ts->es = USER_DS;
0c530ab8 585
fe8ab488
A
586 /* Set GS to CTHREAD only if's been established */
587 ts->gs = thread->machine.cthread_self ? USER_CTHREAD : NULL_SEG;
588
0c530ab8
A
589 /* Check segment selectors are safe */
590 if (!valid_user_segment_selectors(ts->cs,
591 ts->ss,
592 ts->ds,
593 ts->es,
594 ts->fs,
595 ts->gs))
596 return(KERN_INVALID_ARGUMENT);
597
598 saved_state->eax = ts->eax;
599 saved_state->ebx = ts->ebx;
600 saved_state->ecx = ts->ecx;
601 saved_state->edx = ts->edx;
602 saved_state->edi = ts->edi;
603 saved_state->esi = ts->esi;
604 saved_state->ebp = ts->ebp;
605 saved_state->uesp = ts->esp;
606 saved_state->efl = (ts->eflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
607 saved_state->eip = ts->eip;
608 saved_state->cs = ts->cs;
609 saved_state->ss = ts->ss;
610 saved_state->ds = ts->ds;
611 saved_state->es = ts->es;
612 saved_state->fs = ts->fs;
613 saved_state->gs = ts->gs;
614
615 /*
616 * If the trace trap bit is being set,
617 * ensure that the user returns via iret
618 * - which is signaled thusly:
619 */
620 if ((saved_state->efl & EFL_TF) && saved_state->cs == SYSENTER_CS)
621 saved_state->cs = SYSENTER_TF_CS;
622
623 return(KERN_SUCCESS);
624}
625
626static int
627set_thread_state64(thread_t thread, x86_thread_state64_t *ts)
628{
629 x86_saved_state64_t *saved_state;
630
6d2010ae 631 pal_register_cache_state(thread, DIRTY);
b0d623f7 632
0c530ab8
A
633 saved_state = USER_REGS64(thread);
634
635 if (!IS_USERADDR64_CANONICAL(ts->rsp) ||
636 !IS_USERADDR64_CANONICAL(ts->rip))
637 return(KERN_INVALID_ARGUMENT);
638
639 saved_state->r8 = ts->r8;
640 saved_state->r9 = ts->r9;
641 saved_state->r10 = ts->r10;
642 saved_state->r11 = ts->r11;
643 saved_state->r12 = ts->r12;
644 saved_state->r13 = ts->r13;
645 saved_state->r14 = ts->r14;
646 saved_state->r15 = ts->r15;
647 saved_state->rax = ts->rax;
0c530ab8
A
648 saved_state->rbx = ts->rbx;
649 saved_state->rcx = ts->rcx;
650 saved_state->rdx = ts->rdx;
651 saved_state->rdi = ts->rdi;
652 saved_state->rsi = ts->rsi;
653 saved_state->rbp = ts->rbp;
654 saved_state->isf.rsp = ts->rsp;
655 saved_state->isf.rflags = (ts->rflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
656 saved_state->isf.rip = ts->rip;
657 saved_state->isf.cs = USER64_CS;
b0d623f7
A
658 saved_state->fs = (uint32_t)ts->fs;
659 saved_state->gs = (uint32_t)ts->gs;
0c530ab8
A
660
661 return(KERN_SUCCESS);
662}
663
664
665
666static void
667get_thread_state32(thread_t thread, x86_thread_state32_t *ts)
668{
669 x86_saved_state32_t *saved_state;
670
6d2010ae 671 pal_register_cache_state(thread, VALID);
b0d623f7 672
0c530ab8
A
673 saved_state = USER_REGS32(thread);
674
675 ts->eax = saved_state->eax;
676 ts->ebx = saved_state->ebx;
677 ts->ecx = saved_state->ecx;
678 ts->edx = saved_state->edx;
679 ts->edi = saved_state->edi;
680 ts->esi = saved_state->esi;
681 ts->ebp = saved_state->ebp;
682 ts->esp = saved_state->uesp;
683 ts->eflags = saved_state->efl;
684 ts->eip = saved_state->eip;
685 ts->cs = saved_state->cs;
686 ts->ss = saved_state->ss;
687 ts->ds = saved_state->ds;
688 ts->es = saved_state->es;
689 ts->fs = saved_state->fs;
690 ts->gs = saved_state->gs;
691}
692
693
694static void
695get_thread_state64(thread_t thread, x86_thread_state64_t *ts)
696{
697 x86_saved_state64_t *saved_state;
698
6d2010ae 699 pal_register_cache_state(thread, VALID);
b0d623f7 700
0c530ab8
A
701 saved_state = USER_REGS64(thread);
702
703 ts->r8 = saved_state->r8;
704 ts->r9 = saved_state->r9;
705 ts->r10 = saved_state->r10;
706 ts->r11 = saved_state->r11;
707 ts->r12 = saved_state->r12;
708 ts->r13 = saved_state->r13;
709 ts->r14 = saved_state->r14;
710 ts->r15 = saved_state->r15;
711 ts->rax = saved_state->rax;
712 ts->rbx = saved_state->rbx;
713 ts->rcx = saved_state->rcx;
714 ts->rdx = saved_state->rdx;
715 ts->rdi = saved_state->rdi;
716 ts->rsi = saved_state->rsi;
717 ts->rbp = saved_state->rbp;
718 ts->rsp = saved_state->isf.rsp;
719 ts->rflags = saved_state->isf.rflags;
720 ts->rip = saved_state->isf.rip;
721 ts->cs = saved_state->isf.cs;
722 ts->fs = saved_state->fs;
723 ts->gs = saved_state->gs;
724}
725
726
1c79356b
A
727/*
728 * act_machine_set_state:
729 *
91447636 730 * Set the status of the specified thread.
1c79356b
A
731 */
732
733kern_return_t
55e303ae 734machine_thread_set_state(
91447636 735 thread_t thr_act,
1c79356b
A
736 thread_flavor_t flavor,
737 thread_state_t tstate,
738 mach_msg_type_number_t count)
739{
2d21ac55
A
740 switch (flavor) {
741 case x86_SAVED_STATE32:
0c530ab8 742 {
0c530ab8
A
743 x86_saved_state32_t *state;
744 x86_saved_state32_t *saved_state;
1c79356b 745
0c530ab8 746 if (count < x86_SAVED_STATE32_COUNT)
2d21ac55
A
747 return(KERN_INVALID_ARGUMENT);
748
749 if (thread_is_64bit(thr_act))
750 return(KERN_INVALID_ARGUMENT);
1c79356b 751
0c530ab8 752 state = (x86_saved_state32_t *) tstate;
1c79356b 753
91447636 754 /* Check segment selectors are safe */
0c530ab8 755 if (!valid_user_segment_selectors(state->cs,
2d21ac55
A
756 state->ss,
757 state->ds,
758 state->es,
759 state->fs,
760 state->gs))
761 return KERN_INVALID_ARGUMENT;
762
6d2010ae 763 pal_register_cache_state(thr_act, DIRTY);
b0d623f7 764
0c530ab8 765 saved_state = USER_REGS32(thr_act);
1c79356b
A
766
767 /*
768 * General registers
769 */
770 saved_state->edi = state->edi;
771 saved_state->esi = state->esi;
772 saved_state->ebp = state->ebp;
773 saved_state->uesp = state->uesp;
774 saved_state->ebx = state->ebx;
775 saved_state->edx = state->edx;
776 saved_state->ecx = state->ecx;
777 saved_state->eax = state->eax;
778 saved_state->eip = state->eip;
0c530ab8
A
779
780 saved_state->efl = (state->efl & ~EFL_USER_CLEAR) | EFL_USER_SET;
c0fea474 781
8f6c56a5 782 /*
0c530ab8
A
783 * If the trace trap bit is being set,
784 * ensure that the user returns via iret
785 * - which is signaled thusly:
21362eb3 786 */
0c530ab8
A
787 if ((saved_state->efl & EFL_TF) && state->cs == SYSENTER_CS)
788 state->cs = SYSENTER_TF_CS;
789
790 /*
791 * User setting segment registers.
792 * Code and stack selectors have already been
793 * checked. Others will be reset by 'iret'
794 * if they are not valid.
795 */
796 saved_state->cs = state->cs;
797 saved_state->ss = state->ss;
798 saved_state->ds = state->ds;
799 saved_state->es = state->es;
800 saved_state->fs = state->fs;
801 saved_state->gs = state->gs;
b0d623f7 802
4452a7af 803 break;
2d21ac55 804 }
4452a7af 805
2d21ac55
A
806 case x86_SAVED_STATE64:
807 {
0c530ab8
A
808 x86_saved_state64_t *state;
809 x86_saved_state64_t *saved_state;
89b3af67 810
0c530ab8 811 if (count < x86_SAVED_STATE64_COUNT)
2d21ac55
A
812 return(KERN_INVALID_ARGUMENT);
813
814 if (!thread_is_64bit(thr_act))
815 return(KERN_INVALID_ARGUMENT);
0c530ab8
A
816
817 state = (x86_saved_state64_t *) tstate;
5d5c5d0d 818
0c530ab8
A
819 /* Verify that the supplied code segment selector is
820 * valid. In 64-bit mode, the FS and GS segment overrides
821 * use the FS.base and GS.base MSRs to calculate
822 * base addresses, and the trampolines don't directly
823 * restore the segment registers--hence they are no
824 * longer relevant for validation.
825 */
826 if (!valid_user_code_selector(state->isf.cs))
827 return KERN_INVALID_ARGUMENT;
828
829 /* Check pc and stack are canonical addresses */
830 if (!IS_USERADDR64_CANONICAL(state->isf.rsp) ||
831 !IS_USERADDR64_CANONICAL(state->isf.rip))
6601e61a 832 return KERN_INVALID_ARGUMENT;
5d5c5d0d 833
6d2010ae 834 pal_register_cache_state(thr_act, DIRTY);
b0d623f7 835
0c530ab8 836 saved_state = USER_REGS64(thr_act);
5d5c5d0d 837
21362eb3 838 /*
6601e61a 839 * General registers
21362eb3 840 */
0c530ab8
A
841 saved_state->r8 = state->r8;
842 saved_state->r9 = state->r9;
843 saved_state->r10 = state->r10;
844 saved_state->r11 = state->r11;
845 saved_state->r12 = state->r12;
846 saved_state->r13 = state->r13;
847 saved_state->r14 = state->r14;
848 saved_state->r15 = state->r15;
849 saved_state->rdi = state->rdi;
850 saved_state->rsi = state->rsi;
851 saved_state->rbp = state->rbp;
852 saved_state->rbx = state->rbx;
853 saved_state->rdx = state->rdx;
854 saved_state->rcx = state->rcx;
855 saved_state->rax = state->rax;
856 saved_state->isf.rsp = state->isf.rsp;
857 saved_state->isf.rip = state->isf.rip;
858
859 saved_state->isf.rflags = (state->isf.rflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
860
2d21ac55 861 /*
0c530ab8
A
862 * User setting segment registers.
863 * Code and stack selectors have already been
864 * checked. Others will be reset by 'sys'
865 * if they are not valid.
6601e61a 866 */
2d21ac55 867 saved_state->isf.cs = state->isf.cs;
0c530ab8
A
868 saved_state->isf.ss = state->isf.ss;
869 saved_state->fs = state->fs;
870 saved_state->gs = state->gs;
b0d623f7 871
89b3af67 872 break;
2d21ac55 873 }
89b3af67 874
2d21ac55 875 case x86_FLOAT_STATE32:
5ba3f43e
A
876 case x86_AVX_STATE32:
877#if !defined(RC_HIDE_XNU_J137)
878 case x86_AVX512_STATE32:
879#endif /* not RC_HIDE_XNU_J137 */
2d21ac55 880 {
5ba3f43e 881 if (count != _MachineStateCount[flavor])
0c530ab8
A
882 return(KERN_INVALID_ARGUMENT);
883
884 if (thread_is_64bit(thr_act))
4452a7af 885 return(KERN_INVALID_ARGUMENT);
0c530ab8 886
060df5ea 887 return fpu_set_fxstate(thr_act, tstate, flavor);
2d21ac55 888 }
4452a7af 889
2d21ac55 890 case x86_FLOAT_STATE64:
5ba3f43e
A
891 case x86_AVX_STATE64:
892#if !defined(RC_HIDE_XNU_J137)
893 case x86_AVX512_STATE64:
894#endif /* not RC_HIDE_XNU_J137 */
2d21ac55 895 {
5ba3f43e 896 if (count != _MachineStateCount[flavor])
4452a7af
A
897 return(KERN_INVALID_ARGUMENT);
898
5ba3f43e 899 if (!thread_is_64bit(thr_act))
0c530ab8
A
900 return(KERN_INVALID_ARGUMENT);
901
060df5ea 902 return fpu_set_fxstate(thr_act, tstate, flavor);
2d21ac55 903 }
5d5c5d0d 904
2d21ac55
A
905 case x86_FLOAT_STATE:
906 {
907 x86_float_state_t *state;
4452a7af 908
0c530ab8
A
909 if (count != x86_FLOAT_STATE_COUNT)
910 return(KERN_INVALID_ARGUMENT);
4452a7af 911
0c530ab8 912 state = (x86_float_state_t *)tstate;
0c530ab8
A
913 if (state->fsh.flavor == x86_FLOAT_STATE64 && state->fsh.count == x86_FLOAT_STATE64_COUNT &&
914 thread_is_64bit(thr_act)) {
060df5ea 915 return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs64, x86_FLOAT_STATE64);
0c530ab8
A
916 }
917 if (state->fsh.flavor == x86_FLOAT_STATE32 && state->fsh.count == x86_FLOAT_STATE32_COUNT &&
918 !thread_is_64bit(thr_act)) {
060df5ea 919 return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs32, x86_FLOAT_STATE32);
0c530ab8
A
920 }
921 return(KERN_INVALID_ARGUMENT);
2d21ac55 922 }
0c530ab8 923
bd504ef0 924 case x86_AVX_STATE:
5ba3f43e
A
925#if !defined(RC_HIDE_XNU_J137)
926 case x86_AVX512_STATE:
927#endif
bd504ef0
A
928 {
929 x86_avx_state_t *state;
930
5ba3f43e 931 if (count != _MachineStateCount[flavor])
bd504ef0
A
932 return(KERN_INVALID_ARGUMENT);
933
934 state = (x86_avx_state_t *)tstate;
5ba3f43e
A
935 /* Flavors are defined to have sequential values: 32-bit, 64-bit, non-specific */
936 /* 64-bit flavor? */
937 if (state->ash.flavor == (flavor - 1) &&
938 state->ash.count == _MachineStateCount[flavor - 1] &&
bd504ef0
A
939 thread_is_64bit(thr_act)) {
940 return fpu_set_fxstate(thr_act,
941 (thread_state_t)&state->ufs.as64,
5ba3f43e 942 flavor - 1);
bd504ef0 943 }
5ba3f43e
A
944 /* 32-bit flavor? */
945 if (state->ash.flavor == (flavor - 2) &&
946 state->ash.count == _MachineStateCount[flavor - 2] &&
bd504ef0
A
947 !thread_is_64bit(thr_act)) {
948 return fpu_set_fxstate(thr_act,
949 (thread_state_t)&state->ufs.as32,
5ba3f43e 950 flavor - 2);
bd504ef0
A
951 }
952 return(KERN_INVALID_ARGUMENT);
953 }
954
2d21ac55
A
955 case x86_THREAD_STATE32:
956 {
0c530ab8
A
957 if (count != x86_THREAD_STATE32_COUNT)
958 return(KERN_INVALID_ARGUMENT);
959
960 if (thread_is_64bit(thr_act))
961 return(KERN_INVALID_ARGUMENT);
2d21ac55 962
0c530ab8 963 return set_thread_state32(thr_act, (x86_thread_state32_t *)tstate);
2d21ac55 964 }
0c530ab8 965
2d21ac55
A
966 case x86_THREAD_STATE64:
967 {
0c530ab8
A
968 if (count != x86_THREAD_STATE64_COUNT)
969 return(KERN_INVALID_ARGUMENT);
970
2d21ac55 971 if (!thread_is_64bit(thr_act))
0c530ab8
A
972 return(KERN_INVALID_ARGUMENT);
973
974 return set_thread_state64(thr_act, (x86_thread_state64_t *)tstate);
0c530ab8 975
2d21ac55
A
976 }
977 case x86_THREAD_STATE:
978 {
979 x86_thread_state_t *state;
0c530ab8
A
980
981 if (count != x86_THREAD_STATE_COUNT)
982 return(KERN_INVALID_ARGUMENT);
983
984 state = (x86_thread_state_t *)tstate;
985
2d21ac55
A
986 if (state->tsh.flavor == x86_THREAD_STATE64 &&
987 state->tsh.count == x86_THREAD_STATE64_COUNT &&
0c530ab8 988 thread_is_64bit(thr_act)) {
2d21ac55
A
989 return set_thread_state64(thr_act, &state->uts.ts64);
990 } else if (state->tsh.flavor == x86_THREAD_STATE32 &&
991 state->tsh.count == x86_THREAD_STATE32_COUNT &&
0c530ab8 992 !thread_is_64bit(thr_act)) {
2d21ac55 993 return set_thread_state32(thr_act, &state->uts.ts32);
0c530ab8 994 } else
2d21ac55 995 return(KERN_INVALID_ARGUMENT);
2d21ac55 996 }
0c530ab8
A
997 case x86_DEBUG_STATE32:
998 {
999 x86_debug_state32_t *state;
1000 kern_return_t ret;
4452a7af 1001
0c530ab8
A
1002 if (thread_is_64bit(thr_act))
1003 return(KERN_INVALID_ARGUMENT);
1004
1005 state = (x86_debug_state32_t *)tstate;
1006
1007 ret = set_debug_state32(thr_act, state);
1008
1009 return ret;
6601e61a 1010 }
0c530ab8
A
1011 case x86_DEBUG_STATE64:
1012 {
1013 x86_debug_state64_t *state;
1014 kern_return_t ret;
4452a7af 1015
0c530ab8
A
1016 if (!thread_is_64bit(thr_act))
1017 return(KERN_INVALID_ARGUMENT);
1018
1019 state = (x86_debug_state64_t *)tstate;
1020
1021 ret = set_debug_state64(thr_act, state);
1022
1023 return ret;
1024 }
1025 case x86_DEBUG_STATE:
1026 {
1027 x86_debug_state_t *state;
1028 kern_return_t ret = KERN_INVALID_ARGUMENT;
1029
1030 if (count != x86_DEBUG_STATE_COUNT)
1031 return (KERN_INVALID_ARGUMENT);
1032
1033 state = (x86_debug_state_t *)tstate;
1034 if (state->dsh.flavor == x86_DEBUG_STATE64 &&
1035 state->dsh.count == x86_DEBUG_STATE64_COUNT &&
1036 thread_is_64bit(thr_act)) {
1037 ret = set_debug_state64(thr_act, &state->uds.ds64);
1038 }
1039 else
1040 if (state->dsh.flavor == x86_DEBUG_STATE32 &&
1041 state->dsh.count == x86_DEBUG_STATE32_COUNT &&
1042 !thread_is_64bit(thr_act)) {
1043 ret = set_debug_state32(thr_act, &state->uds.ds32);
1044 }
1045 return ret;
1046 }
1047 default:
6601e61a
A
1048 return(KERN_INVALID_ARGUMENT);
1049 }
4452a7af 1050
6601e61a
A
1051 return(KERN_SUCCESS);
1052}
4452a7af 1053
0c530ab8
A
1054
1055
6601e61a
A
1056/*
1057 * thread_getstatus:
1058 *
1059 * Get the status of the specified thread.
1060 */
4452a7af 1061
6601e61a
A
1062kern_return_t
1063machine_thread_get_state(
1064 thread_t thr_act,
1065 thread_flavor_t flavor,
1066 thread_state_t tstate,
1067 mach_msg_type_number_t *count)
1068{
2d21ac55 1069
6601e61a 1070 switch (flavor) {
4452a7af 1071
0c530ab8
A
1072 case THREAD_STATE_FLAVOR_LIST:
1073 {
1074 if (*count < 3)
1075 return (KERN_INVALID_ARGUMENT);
1076
1077 tstate[0] = i386_THREAD_STATE;
1078 tstate[1] = i386_FLOAT_STATE;
1079 tstate[2] = i386_EXCEPTION_STATE;
1080
1081 *count = 3;
1082 break;
1083 }
1084
1085 case THREAD_STATE_FLAVOR_LIST_NEW:
1086 {
1087 if (*count < 4)
1088 return (KERN_INVALID_ARGUMENT);
1089
1090 tstate[0] = x86_THREAD_STATE;
1091 tstate[1] = x86_FLOAT_STATE;
1092 tstate[2] = x86_EXCEPTION_STATE;
1093 tstate[3] = x86_DEBUG_STATE;
1094
1095 *count = 4;
1096 break;
1097 }
1098
bd504ef0
A
1099 case THREAD_STATE_FLAVOR_LIST_10_9:
1100 {
1101 if (*count < 5)
1102 return (KERN_INVALID_ARGUMENT);
1103
1104 tstate[0] = x86_THREAD_STATE;
1105 tstate[1] = x86_FLOAT_STATE;
1106 tstate[2] = x86_EXCEPTION_STATE;
1107 tstate[3] = x86_DEBUG_STATE;
1108 tstate[4] = x86_AVX_STATE;
1109
1110 *count = 5;
1111 break;
1112 }
1113
5ba3f43e
A
1114#if !defined(RC_HIDE_XNU_J137)
1115 case THREAD_STATE_FLAVOR_LIST_10_13:
1116 {
1117 if (*count < 6)
1118 return (KERN_INVALID_ARGUMENT);
1119
1120 tstate[0] = x86_THREAD_STATE;
1121 tstate[1] = x86_FLOAT_STATE;
1122 tstate[2] = x86_EXCEPTION_STATE;
1123 tstate[3] = x86_DEBUG_STATE;
1124 tstate[4] = x86_AVX_STATE;
1125 tstate[5] = x86_AVX512_STATE;
1126
1127 *count = 6;
1128 break;
1129 }
1130
1131#endif
0c530ab8 1132 case x86_SAVED_STATE32:
4452a7af 1133 {
0c530ab8
A
1134 x86_saved_state32_t *state;
1135 x86_saved_state32_t *saved_state;
4452a7af 1136
0c530ab8
A
1137 if (*count < x86_SAVED_STATE32_COUNT)
1138 return(KERN_INVALID_ARGUMENT);
4452a7af 1139
2d21ac55
A
1140 if (thread_is_64bit(thr_act))
1141 return(KERN_INVALID_ARGUMENT);
1142
0c530ab8
A
1143 state = (x86_saved_state32_t *) tstate;
1144 saved_state = USER_REGS32(thr_act);
4452a7af 1145
6601e61a
A
1146 /*
1147 * First, copy everything:
1148 */
1149 *state = *saved_state;
0c530ab8
A
1150 state->ds = saved_state->ds & 0xffff;
1151 state->es = saved_state->es & 0xffff;
1152 state->fs = saved_state->fs & 0xffff;
1153 state->gs = saved_state->gs & 0xffff;
4452a7af 1154
0c530ab8 1155 *count = x86_SAVED_STATE32_COUNT;
4452a7af 1156 break;
8f6c56a5 1157 }
5d5c5d0d 1158
0c530ab8 1159 case x86_SAVED_STATE64:
4452a7af 1160 {
0c530ab8
A
1161 x86_saved_state64_t *state;
1162 x86_saved_state64_t *saved_state;
89b3af67 1163
0c530ab8
A
1164 if (*count < x86_SAVED_STATE64_COUNT)
1165 return(KERN_INVALID_ARGUMENT);
89b3af67 1166
2d21ac55
A
1167 if (!thread_is_64bit(thr_act))
1168 return(KERN_INVALID_ARGUMENT);
1169
0c530ab8
A
1170 state = (x86_saved_state64_t *)tstate;
1171 saved_state = USER_REGS64(thr_act);
89b3af67 1172
6601e61a 1173 /*
0c530ab8 1174 * First, copy everything:
6601e61a 1175 */
0c530ab8
A
1176 *state = *saved_state;
1177 state->fs = saved_state->fs & 0xffff;
1178 state->gs = saved_state->gs & 0xffff;
1179
1180 *count = x86_SAVED_STATE64_COUNT;
4452a7af
A
1181 break;
1182 }
1183
0c530ab8 1184 case x86_FLOAT_STATE32:
4452a7af 1185 {
0c530ab8
A
1186 if (*count < x86_FLOAT_STATE32_COUNT)
1187 return(KERN_INVALID_ARGUMENT);
1188
1189 if (thread_is_64bit(thr_act))
1190 return(KERN_INVALID_ARGUMENT);
1191
1192 *count = x86_FLOAT_STATE32_COUNT;
1193
060df5ea 1194 return fpu_get_fxstate(thr_act, tstate, flavor);
21362eb3 1195 }
89b3af67 1196
0c530ab8
A
1197 case x86_FLOAT_STATE64:
1198 {
1199 if (*count < x86_FLOAT_STATE64_COUNT)
1200 return(KERN_INVALID_ARGUMENT);
1201
1202 if ( !thread_is_64bit(thr_act))
1203 return(KERN_INVALID_ARGUMENT);
1204
1205 *count = x86_FLOAT_STATE64_COUNT;
1206
060df5ea 1207 return fpu_get_fxstate(thr_act, tstate, flavor);
0c530ab8
A
1208 }
1209
1210 case x86_FLOAT_STATE:
1211 {
1212 x86_float_state_t *state;
1213 kern_return_t kret;
1214
1215 if (*count < x86_FLOAT_STATE_COUNT)
1216 return(KERN_INVALID_ARGUMENT);
1217
1218 state = (x86_float_state_t *)tstate;
1219
1220 /*
1221 * no need to bzero... currently
1222 * x86_FLOAT_STATE64_COUNT == x86_FLOAT_STATE32_COUNT
1223 */
1224 if (thread_is_64bit(thr_act)) {
1225 state->fsh.flavor = x86_FLOAT_STATE64;
1226 state->fsh.count = x86_FLOAT_STATE64_COUNT;
1227
060df5ea 1228 kret = fpu_get_fxstate(thr_act, (thread_state_t)&state->ufs.fs64, x86_FLOAT_STATE64);
0c530ab8
A
1229 } else {
1230 state->fsh.flavor = x86_FLOAT_STATE32;
1231 state->fsh.count = x86_FLOAT_STATE32_COUNT;
1232
060df5ea 1233 kret = fpu_get_fxstate(thr_act, (thread_state_t)&state->ufs.fs32, x86_FLOAT_STATE32);
0c530ab8
A
1234 }
1235 *count = x86_FLOAT_STATE_COUNT;
1236
1237 return(kret);
1238 }
1239
bd504ef0 1240 case x86_AVX_STATE32:
5ba3f43e
A
1241#if !defined(RC_HIDE_XNU_J137)
1242 case x86_AVX512_STATE32:
1243#endif
bd504ef0 1244 {
5ba3f43e 1245 if (*count != _MachineStateCount[flavor])
060df5ea
A
1246 return(KERN_INVALID_ARGUMENT);
1247
1248 if (thread_is_64bit(thr_act))
1249 return(KERN_INVALID_ARGUMENT);
1250
5ba3f43e 1251 *count = _MachineStateCount[flavor];
060df5ea
A
1252
1253 return fpu_get_fxstate(thr_act, tstate, flavor);
bd504ef0 1254 }
060df5ea 1255
bd504ef0 1256 case x86_AVX_STATE64:
5ba3f43e
A
1257#if !defined(RC_HIDE_XNU_J137)
1258 case x86_AVX512_STATE64:
1259#endif
bd504ef0 1260 {
5ba3f43e 1261 if (*count != _MachineStateCount[flavor])
060df5ea
A
1262 return(KERN_INVALID_ARGUMENT);
1263
1264 if ( !thread_is_64bit(thr_act))
1265 return(KERN_INVALID_ARGUMENT);
1266
5ba3f43e 1267 *count = _MachineStateCount[flavor];
060df5ea
A
1268
1269 return fpu_get_fxstate(thr_act, tstate, flavor);
bd504ef0
A
1270 }
1271
1272 case x86_AVX_STATE:
5ba3f43e
A
1273#if !defined(RC_HIDE_XNU_J137)
1274 case x86_AVX512_STATE:
1275#endif
bd504ef0
A
1276 {
1277 x86_avx_state_t *state;
5ba3f43e 1278 thread_state_t fstate;
bd504ef0 1279
5ba3f43e 1280 if (*count < _MachineStateCount[flavor])
bd504ef0
A
1281 return(KERN_INVALID_ARGUMENT);
1282
5ba3f43e 1283 *count = _MachineStateCount[flavor];
bd504ef0
A
1284 state = (x86_avx_state_t *)tstate;
1285
5ba3f43e
A
1286 bzero((char *)state, *count * sizeof(int));
1287
bd504ef0 1288 if (thread_is_64bit(thr_act)) {
5ba3f43e
A
1289 flavor -= 1; /* 64-bit flavor */
1290 fstate = (thread_state_t) &state->ufs.as64;
bd504ef0 1291 } else {
5ba3f43e
A
1292 flavor -= 2; /* 32-bit flavor */
1293 fstate = (thread_state_t) &state->ufs.as32;
bd504ef0 1294 }
5ba3f43e
A
1295 state->ash.flavor = flavor;
1296 state->ash.count = _MachineStateCount[flavor];
bd504ef0 1297
5ba3f43e 1298 return fpu_get_fxstate(thr_act, fstate, flavor);
bd504ef0 1299 }
060df5ea 1300
0c530ab8
A
1301 case x86_THREAD_STATE32:
1302 {
1303 if (*count < x86_THREAD_STATE32_COUNT)
1304 return(KERN_INVALID_ARGUMENT);
1305
1306 if (thread_is_64bit(thr_act))
1307 return(KERN_INVALID_ARGUMENT);
1308
1309 *count = x86_THREAD_STATE32_COUNT;
1310
1311 get_thread_state32(thr_act, (x86_thread_state32_t *)tstate);
6601e61a 1312 break;
0c530ab8 1313 }
89b3af67 1314
0c530ab8
A
1315 case x86_THREAD_STATE64:
1316 {
1317 if (*count < x86_THREAD_STATE64_COUNT)
4452a7af 1318 return(KERN_INVALID_ARGUMENT);
0c530ab8
A
1319
1320 if ( !thread_is_64bit(thr_act))
1321 return(KERN_INVALID_ARGUMENT);
1322
1323 *count = x86_THREAD_STATE64_COUNT;
1324
1325 get_thread_state64(thr_act, (x86_thread_state64_t *)tstate);
1326 break;
21362eb3 1327 }
89b3af67 1328
0c530ab8
A
1329 case x86_THREAD_STATE:
1330 {
1331 x86_thread_state_t *state;
4452a7af 1332
0c530ab8 1333 if (*count < x86_THREAD_STATE_COUNT)
4452a7af
A
1334 return(KERN_INVALID_ARGUMENT);
1335
0c530ab8 1336 state = (x86_thread_state_t *)tstate;
4452a7af 1337
0c530ab8 1338 bzero((char *)state, sizeof(x86_thread_state_t));
4452a7af 1339
0c530ab8
A
1340 if (thread_is_64bit(thr_act)) {
1341 state->tsh.flavor = x86_THREAD_STATE64;
1342 state->tsh.count = x86_THREAD_STATE64_COUNT;
4452a7af 1343
0c530ab8 1344 get_thread_state64(thr_act, &state->uts.ts64);
4452a7af 1345 } else {
0c530ab8
A
1346 state->tsh.flavor = x86_THREAD_STATE32;
1347 state->tsh.count = x86_THREAD_STATE32_COUNT;
4452a7af 1348
0c530ab8 1349 get_thread_state32(thr_act, &state->uts.ts32);
4452a7af 1350 }
0c530ab8
A
1351 *count = x86_THREAD_STATE_COUNT;
1352
1353 break;
1354 }
1355
1356
1357 case x86_EXCEPTION_STATE32:
1358 {
1359 if (*count < x86_EXCEPTION_STATE32_COUNT)
1360 return(KERN_INVALID_ARGUMENT);
1361
1362 if (thread_is_64bit(thr_act))
1363 return(KERN_INVALID_ARGUMENT);
1364
1365 *count = x86_EXCEPTION_STATE32_COUNT;
4452a7af 1366
0c530ab8 1367 get_exception_state32(thr_act, (x86_exception_state32_t *)tstate);
6d2010ae
A
1368 /*
1369 * Suppress the cpu number for binary compatibility
1370 * of this deprecated state.
1371 */
1372 ((x86_exception_state32_t *)tstate)->cpu = 0;
4452a7af 1373 break;
6601e61a 1374 }
4452a7af 1375
0c530ab8 1376 case x86_EXCEPTION_STATE64:
6601e61a 1377 {
0c530ab8
A
1378 if (*count < x86_EXCEPTION_STATE64_COUNT)
1379 return(KERN_INVALID_ARGUMENT);
4452a7af 1380
0c530ab8
A
1381 if ( !thread_is_64bit(thr_act))
1382 return(KERN_INVALID_ARGUMENT);
4452a7af 1383
0c530ab8 1384 *count = x86_EXCEPTION_STATE64_COUNT;
4452a7af 1385
0c530ab8 1386 get_exception_state64(thr_act, (x86_exception_state64_t *)tstate);
6d2010ae
A
1387 /*
1388 * Suppress the cpu number for binary compatibility
1389 * of this deprecated state.
1390 */
1391 ((x86_exception_state64_t *)tstate)->cpu = 0;
4452a7af 1392 break;
6601e61a 1393 }
4452a7af 1394
0c530ab8
A
1395 case x86_EXCEPTION_STATE:
1396 {
1397 x86_exception_state_t *state;
1398
1399 if (*count < x86_EXCEPTION_STATE_COUNT)
1400 return(KERN_INVALID_ARGUMENT);
1401
1402 state = (x86_exception_state_t *)tstate;
1403
1404 bzero((char *)state, sizeof(x86_exception_state_t));
1405
1406 if (thread_is_64bit(thr_act)) {
1407 state->esh.flavor = x86_EXCEPTION_STATE64;
1408 state->esh.count = x86_EXCEPTION_STATE64_COUNT;
1409
1410 get_exception_state64(thr_act, &state->ues.es64);
1411 } else {
1412 state->esh.flavor = x86_EXCEPTION_STATE32;
1413 state->esh.count = x86_EXCEPTION_STATE32_COUNT;
1414
1415 get_exception_state32(thr_act, &state->ues.es32);
1416 }
1417 *count = x86_EXCEPTION_STATE_COUNT;
1418
1419 break;
1420 }
1421 case x86_DEBUG_STATE32:
1422 {
1423 if (*count < x86_DEBUG_STATE32_COUNT)
1424 return(KERN_INVALID_ARGUMENT);
1425
1426 if (thread_is_64bit(thr_act))
1427 return(KERN_INVALID_ARGUMENT);
1428
1429 get_debug_state32(thr_act, (x86_debug_state32_t *)tstate);
1430
1431 *count = x86_DEBUG_STATE32_COUNT;
1432
1433 break;
1434 }
1435 case x86_DEBUG_STATE64:
1436 {
1437 if (*count < x86_DEBUG_STATE64_COUNT)
1438 return(KERN_INVALID_ARGUMENT);
1439
1440 if (!thread_is_64bit(thr_act))
1441 return(KERN_INVALID_ARGUMENT);
1442
1443 get_debug_state64(thr_act, (x86_debug_state64_t *)tstate);
1444
1445 *count = x86_DEBUG_STATE64_COUNT;
1446
1c79356b
A
1447 break;
1448 }
0c530ab8
A
1449 case x86_DEBUG_STATE:
1450 {
1451 x86_debug_state_t *state;
1452
1453 if (*count < x86_DEBUG_STATE_COUNT)
1454 return(KERN_INVALID_ARGUMENT);
1455
1456 state = (x86_debug_state_t *)tstate;
1457
1458 bzero(state, sizeof *state);
1459
1460 if (thread_is_64bit(thr_act)) {
1461 state->dsh.flavor = x86_DEBUG_STATE64;
1462 state->dsh.count = x86_DEBUG_STATE64_COUNT;
1463
1464 get_debug_state64(thr_act, &state->uds.ds64);
1465 } else {
1466 state->dsh.flavor = x86_DEBUG_STATE32;
1467 state->dsh.count = x86_DEBUG_STATE32_COUNT;
1c79356b 1468
0c530ab8
A
1469 get_debug_state32(thr_act, &state->uds.ds32);
1470 }
1471 *count = x86_DEBUG_STATE_COUNT;
1472 break;
1473 }
2d21ac55 1474 default:
1c79356b
A
1475 return(KERN_INVALID_ARGUMENT);
1476 }
1477
1478 return(KERN_SUCCESS);
1479}
1480
0c530ab8
A
1481kern_return_t
1482machine_thread_get_kern_state(
1483 thread_t thread,
1484 thread_flavor_t flavor,
1485 thread_state_t tstate,
1486 mach_msg_type_number_t *count)
1487{
b0d623f7 1488 x86_saved_state_t *int_state = current_cpu_datap()->cpu_int_state;
0c530ab8
A
1489
1490 /*
1491 * This works only for an interrupted kernel thread
1492 */
b0d623f7 1493 if (thread != current_thread() || int_state == NULL)
0c530ab8
A
1494 return KERN_FAILURE;
1495
b0d623f7
A
1496 switch (flavor) {
1497 case x86_THREAD_STATE32: {
1498 x86_thread_state32_t *state;
1499 x86_saved_state32_t *saved_state;
1500
1501 if (!is_saved_state32(int_state) ||
1502 *count < x86_THREAD_STATE32_COUNT)
1503 return (KERN_INVALID_ARGUMENT);
1504
1505 state = (x86_thread_state32_t *) tstate;
0c530ab8 1506
b0d623f7
A
1507 saved_state = saved_state32(int_state);
1508 /*
1509 * General registers.
1510 */
1511 state->eax = saved_state->eax;
1512 state->ebx = saved_state->ebx;
1513 state->ecx = saved_state->ecx;
1514 state->edx = saved_state->edx;
1515 state->edi = saved_state->edi;
1516 state->esi = saved_state->esi;
1517 state->ebp = saved_state->ebp;
1518 state->esp = saved_state->uesp;
1519 state->eflags = saved_state->efl;
1520 state->eip = saved_state->eip;
1521 state->cs = saved_state->cs;
1522 state->ss = saved_state->ss;
1523 state->ds = saved_state->ds & 0xffff;
1524 state->es = saved_state->es & 0xffff;
1525 state->fs = saved_state->fs & 0xffff;
1526 state->gs = saved_state->gs & 0xffff;
1527
1528 *count = x86_THREAD_STATE32_COUNT;
2d21ac55 1529
b0d623f7
A
1530 return KERN_SUCCESS;
1531 }
1532
1533 case x86_THREAD_STATE64: {
1534 x86_thread_state64_t *state;
1535 x86_saved_state64_t *saved_state;
1536
1537 if (!is_saved_state64(int_state) ||
1538 *count < x86_THREAD_STATE64_COUNT)
1539 return (KERN_INVALID_ARGUMENT);
1540
1541 state = (x86_thread_state64_t *) tstate;
1542
1543 saved_state = saved_state64(int_state);
1544 /*
1545 * General registers.
1546 */
1547 state->rax = saved_state->rax;
1548 state->rbx = saved_state->rbx;
1549 state->rcx = saved_state->rcx;
1550 state->rdx = saved_state->rdx;
1551 state->rdi = saved_state->rdi;
1552 state->rsi = saved_state->rsi;
1553 state->rbp = saved_state->rbp;
1554 state->rsp = saved_state->isf.rsp;
1555 state->r8 = saved_state->r8;
1556 state->r9 = saved_state->r9;
1557 state->r10 = saved_state->r10;
1558 state->r11 = saved_state->r11;
1559 state->r12 = saved_state->r12;
1560 state->r13 = saved_state->r13;
1561 state->r14 = saved_state->r14;
1562 state->r15 = saved_state->r15;
1563
1564 state->rip = saved_state->isf.rip;
1565 state->rflags = saved_state->isf.rflags;
1566 state->cs = saved_state->isf.cs;
1567 state->fs = saved_state->fs & 0xffff;
1568 state->gs = saved_state->gs & 0xffff;
1569 *count = x86_THREAD_STATE64_COUNT;
1570
1571 return KERN_SUCCESS;
1572 }
1573
1574 case x86_THREAD_STATE: {
1575 x86_thread_state_t *state = NULL;
1576
1577 if (*count < x86_THREAD_STATE_COUNT)
1578 return (KERN_INVALID_ARGUMENT);
1579
1580 state = (x86_thread_state_t *) tstate;
1581
1582 if (is_saved_state32(int_state)) {
1583 x86_saved_state32_t *saved_state = saved_state32(int_state);
1584
1585 state->tsh.flavor = x86_THREAD_STATE32;
1586 state->tsh.count = x86_THREAD_STATE32_COUNT;
0c530ab8 1587
0c530ab8
A
1588 /*
1589 * General registers.
1590 */
b0d623f7
A
1591 state->uts.ts32.eax = saved_state->eax;
1592 state->uts.ts32.ebx = saved_state->ebx;
1593 state->uts.ts32.ecx = saved_state->ecx;
1594 state->uts.ts32.edx = saved_state->edx;
1595 state->uts.ts32.edi = saved_state->edi;
1596 state->uts.ts32.esi = saved_state->esi;
1597 state->uts.ts32.ebp = saved_state->ebp;
1598 state->uts.ts32.esp = saved_state->uesp;
1599 state->uts.ts32.eflags = saved_state->efl;
1600 state->uts.ts32.eip = saved_state->eip;
1601 state->uts.ts32.cs = saved_state->cs;
1602 state->uts.ts32.ss = saved_state->ss;
1603 state->uts.ts32.ds = saved_state->ds & 0xffff;
1604 state->uts.ts32.es = saved_state->es & 0xffff;
1605 state->uts.ts32.fs = saved_state->fs & 0xffff;
1606 state->uts.ts32.gs = saved_state->gs & 0xffff;
1607 } else if (is_saved_state64(int_state)) {
1608 x86_saved_state64_t *saved_state = saved_state64(int_state);
0c530ab8 1609
b0d623f7
A
1610 state->tsh.flavor = x86_THREAD_STATE64;
1611 state->tsh.count = x86_THREAD_STATE64_COUNT;
1612
1613 /*
1614 * General registers.
1615 */
1616 state->uts.ts64.rax = saved_state->rax;
1617 state->uts.ts64.rbx = saved_state->rbx;
1618 state->uts.ts64.rcx = saved_state->rcx;
1619 state->uts.ts64.rdx = saved_state->rdx;
1620 state->uts.ts64.rdi = saved_state->rdi;
1621 state->uts.ts64.rsi = saved_state->rsi;
1622 state->uts.ts64.rbp = saved_state->rbp;
1623 state->uts.ts64.rsp = saved_state->isf.rsp;
1624 state->uts.ts64.r8 = saved_state->r8;
1625 state->uts.ts64.r9 = saved_state->r9;
1626 state->uts.ts64.r10 = saved_state->r10;
1627 state->uts.ts64.r11 = saved_state->r11;
1628 state->uts.ts64.r12 = saved_state->r12;
1629 state->uts.ts64.r13 = saved_state->r13;
1630 state->uts.ts64.r14 = saved_state->r14;
1631 state->uts.ts64.r15 = saved_state->r15;
1632
1633 state->uts.ts64.rip = saved_state->isf.rip;
1634 state->uts.ts64.rflags = saved_state->isf.rflags;
1635 state->uts.ts64.cs = saved_state->isf.cs;
1636 state->uts.ts64.fs = saved_state->fs & 0xffff;
1637 state->uts.ts64.gs = saved_state->gs & 0xffff;
1638 } else {
1639 panic("unknown thread state");
0c530ab8 1640 }
b0d623f7
A
1641
1642 *count = x86_THREAD_STATE_COUNT;
1643 return KERN_SUCCESS;
1644 }
2d21ac55 1645 }
0c530ab8
A
1646 return KERN_FAILURE;
1647}
1648
1649
0c530ab8 1650void
2d21ac55 1651machine_thread_switch_addrmode(thread_t thread)
0c530ab8 1652{
2d21ac55
A
1653 /*
1654 * We don't want to be preempted until we're done
1655 * - particularly if we're switching the current thread
1656 */
1657 disable_preemption();
0c530ab8 1658
2d21ac55 1659 /*
6d2010ae
A
1660 * Reset the state saveareas. As we're resetting, we anticipate no
1661 * memory allocations in this path.
2d21ac55 1662 */
0c530ab8
A
1663 machine_thread_create(thread, thread->task);
1664
5ba3f43e
A
1665 /* Adjust FPU state */
1666 fpu_switch_addrmode(thread, task_has_64BitAddr(thread->task));
1667
0c530ab8 1668 /* If we're switching ourselves, reset the pcb addresses etc. */
c910b4d9 1669 if (thread == current_thread()) {
6d2010ae 1670 boolean_t istate = ml_set_interrupts_enabled(FALSE);
6d2010ae
A
1671 act_machine_switch_pcb(NULL, thread);
1672 ml_set_interrupts_enabled(istate);
c910b4d9 1673 }
2d21ac55 1674 enable_preemption();
1c79356b
A
1675}
1676
0c530ab8
A
1677
1678
1c79356b
A
1679/*
1680 * This is used to set the current thr_act/thread
1681 * when starting up a new processor
1682 */
1683void
b0d623f7 1684machine_set_current_thread(thread_t thread)
1c79356b 1685{
0c530ab8 1686 current_cpu_datap()->cpu_active_thread = thread;
1c79356b
A
1687}
1688
1c79356b 1689
1c79356b
A
1690/*
1691 * Perform machine-dependent per-thread initializations
1692 */
1693void
55e303ae 1694machine_thread_init(void)
1c79356b 1695{
39236c6e
A
1696 iss_zone = zinit(sizeof(x86_saved_state_t),
1697 thread_max * sizeof(x86_saved_state_t),
1698 THREAD_CHUNK * sizeof(x86_saved_state_t),
1699 "x86_64 saved state");
0c530ab8 1700
39236c6e
A
1701 ids_zone = zinit(sizeof(x86_debug_state64_t),
1702 thread_max * sizeof(x86_debug_state64_t),
1703 THREAD_CHUNK * sizeof(x86_debug_state64_t),
1704 "x86_64 debug state");
1c79356b 1705
39236c6e 1706 fpu_module_init();
1c79356b
A
1707}
1708
1c79356b 1709
91447636
A
1710
1711user_addr_t
1712get_useraddr(void)
1c79356b 1713{
91447636 1714 thread_t thr_act = current_thread();
1c79356b 1715
0c530ab8
A
1716 if (thread_is_64bit(thr_act)) {
1717 x86_saved_state64_t *iss64;
1718
1719 iss64 = USER_REGS64(thr_act);
1720
1721 return(iss64->isf.rip);
1722 } else {
1723 x86_saved_state32_t *iss32;
4452a7af 1724
0c530ab8
A
1725 iss32 = USER_REGS32(thr_act);
1726
1727 return(iss32->eip);
1728 }
1c79356b
A
1729}
1730
1c79356b
A
1731/*
1732 * detach and return a kernel stack from a thread
1733 */
1734
1735vm_offset_t
55e303ae 1736machine_stack_detach(thread_t thread)
1c79356b 1737{
0c530ab8 1738 vm_offset_t stack;
1c79356b 1739
0c530ab8 1740 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH),
b0d623f7 1741 (uintptr_t)thread_tid(thread), thread->priority,
0c530ab8
A
1742 thread->sched_pri, 0,
1743 0);
1c79356b 1744
0c530ab8
A
1745 stack = thread->kernel_stack;
1746 thread->kernel_stack = 0;
1747
1748 return (stack);
1c79356b
A
1749}
1750
1751/*
1752 * attach a kernel stack to a thread and initialize it
1753 */
1754
1755void
91447636
A
1756machine_stack_attach(
1757 thread_t thread,
1758 vm_offset_t stack)
1c79356b 1759{
b0d623f7 1760 struct x86_kernel_state *statep;
1c79356b 1761
0c530ab8 1762 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH),
b0d623f7 1763 (uintptr_t)thread_tid(thread), thread->priority,
0c530ab8 1764 thread->sched_pri, 0, 0);
1c79356b 1765
0c530ab8
A
1766 assert(stack);
1767 thread->kernel_stack = stack;
5ba3f43e 1768 thread_initialize_kernel_state(thread);
55e303ae 1769
0c530ab8 1770 statep = STACK_IKS(stack);
b0d623f7
A
1771#if defined(__x86_64__)
1772 statep->k_rip = (unsigned long) Thread_continue;
1773 statep->k_rbx = (unsigned long) thread_continue;
5ba3f43e 1774 statep->k_rsp = (unsigned long) STACK_IKS(stack);
b0d623f7 1775#else
0c530ab8
A
1776 statep->k_eip = (unsigned long) Thread_continue;
1777 statep->k_ebx = (unsigned long) thread_continue;
5ba3f43e 1778 statep->k_esp = (unsigned long) STACK_IKS(stack);
b0d623f7 1779#endif
1c79356b 1780
0c530ab8 1781 return;
1c79356b
A
1782}
1783
1784/*
1785 * move a stack from old to new thread
1786 */
1787
1788void
55e303ae 1789machine_stack_handoff(thread_t old,
1c79356b
A
1790 thread_t new)
1791{
0c530ab8 1792 vm_offset_t stack;
1c79356b 1793
0c530ab8
A
1794 assert(new);
1795 assert(old);
1c79356b 1796
39037602 1797 kpc_off_cpu(old);
b0d623f7 1798
0c530ab8
A
1799 stack = old->kernel_stack;
1800 if (stack == old->reserved_stack) {
1801 assert(new->reserved_stack);
1802 old->reserved_stack = new->reserved_stack;
1803 new->reserved_stack = stack;
1804 }
1805 old->kernel_stack = 0;
1806 /*
1807 * A full call to machine_stack_attach() is unnecessry
1808 * because old stack is already initialized.
1809 */
1810 new->kernel_stack = stack;
1c79356b 1811
5ba3f43e 1812 fpu_switch_context(old, new);
b0d623f7 1813
0c530ab8
A
1814 old->machine.specFlags &= ~OnProc;
1815 new->machine.specFlags |= OnProc;
1c79356b 1816
39037602 1817 pmap_switch_context(old, new, cpu_number());
6d2010ae 1818 act_machine_switch_pcb(old, new);
9bccf70c 1819
fe8ab488
A
1820#if HYPERVISOR
1821 ml_hv_cswitch(old, new);
1822#endif
1823
0c530ab8 1824 machine_set_current_thread(new);
5ba3f43e 1825 thread_initialize_kernel_state(new);
1c79356b 1826
0c530ab8 1827 return;
1c79356b 1828}
0b4e3aa0 1829
0c530ab8
A
1830
1831
1832
1833struct x86_act_context32 {
1834 x86_saved_state32_t ss;
1835 x86_float_state32_t fs;
1836 x86_debug_state32_t ds;
1837};
1838
1839struct x86_act_context64 {
1840 x86_saved_state64_t ss;
1841 x86_float_state64_t fs;
1842 x86_debug_state64_t ds;
0b4e3aa0
A
1843};
1844
0c530ab8
A
1845
1846
0b4e3aa0
A
1847void *
1848act_thread_csave(void)
1849{
2d21ac55 1850 kern_return_t kret;
0c530ab8 1851 mach_msg_type_number_t val;
2d21ac55 1852 thread_t thr_act = current_thread();
0c530ab8 1853
2d21ac55
A
1854 if (thread_is_64bit(thr_act)) {
1855 struct x86_act_context64 *ic64;
0b4e3aa0 1856
2d21ac55 1857 ic64 = (struct x86_act_context64 *)kalloc(sizeof(struct x86_act_context64));
0c530ab8
A
1858
1859 if (ic64 == (struct x86_act_context64 *)NULL)
2d21ac55 1860 return((void *)0);
0c530ab8
A
1861
1862 val = x86_SAVED_STATE64_COUNT;
1863 kret = machine_thread_get_state(thr_act, x86_SAVED_STATE64,
2d21ac55 1864 (thread_state_t) &ic64->ss, &val);
0c530ab8 1865 if (kret != KERN_SUCCESS) {
2d21ac55 1866 kfree(ic64, sizeof(struct x86_act_context64));
0c530ab8
A
1867 return((void *)0);
1868 }
1869 val = x86_FLOAT_STATE64_COUNT;
1870 kret = machine_thread_get_state(thr_act, x86_FLOAT_STATE64,
2d21ac55 1871 (thread_state_t) &ic64->fs, &val);
0c530ab8 1872 if (kret != KERN_SUCCESS) {
2d21ac55 1873 kfree(ic64, sizeof(struct x86_act_context64));
0c530ab8
A
1874 return((void *)0);
1875 }
0b4e3aa0 1876
0c530ab8
A
1877 val = x86_DEBUG_STATE64_COUNT;
1878 kret = machine_thread_get_state(thr_act,
1879 x86_DEBUG_STATE64,
1880 (thread_state_t)&ic64->ds,
55e303ae 1881 &val);
0b4e3aa0 1882 if (kret != KERN_SUCCESS) {
0c530ab8
A
1883 kfree(ic64, sizeof(struct x86_act_context64));
1884 return((void *)0);
1885 }
1886 return(ic64);
1887
1888 } else {
2d21ac55 1889 struct x86_act_context32 *ic32;
0c530ab8 1890
2d21ac55 1891 ic32 = (struct x86_act_context32 *)kalloc(sizeof(struct x86_act_context32));
0c530ab8
A
1892
1893 if (ic32 == (struct x86_act_context32 *)NULL)
2d21ac55 1894 return((void *)0);
0c530ab8
A
1895
1896 val = x86_SAVED_STATE32_COUNT;
1897 kret = machine_thread_get_state(thr_act, x86_SAVED_STATE32,
2d21ac55 1898 (thread_state_t) &ic32->ss, &val);
0c530ab8 1899 if (kret != KERN_SUCCESS) {
2d21ac55 1900 kfree(ic32, sizeof(struct x86_act_context32));
0c530ab8 1901 return((void *)0);
0b4e3aa0 1902 }
0c530ab8
A
1903 val = x86_FLOAT_STATE32_COUNT;
1904 kret = machine_thread_get_state(thr_act, x86_FLOAT_STATE32,
2d21ac55 1905 (thread_state_t) &ic32->fs, &val);
0c530ab8 1906 if (kret != KERN_SUCCESS) {
2d21ac55 1907 kfree(ic32, sizeof(struct x86_act_context32));
0c530ab8
A
1908 return((void *)0);
1909 }
1910
1911 val = x86_DEBUG_STATE32_COUNT;
1912 kret = machine_thread_get_state(thr_act,
1913 x86_DEBUG_STATE32,
1914 (thread_state_t)&ic32->ds,
55e303ae 1915 &val);
0b4e3aa0 1916 if (kret != KERN_SUCCESS) {
0c530ab8
A
1917 kfree(ic32, sizeof(struct x86_act_context32));
1918 return((void *)0);
0b4e3aa0 1919 }
0c530ab8
A
1920 return(ic32);
1921 }
0b4e3aa0 1922}
0c530ab8
A
1923
1924
0b4e3aa0
A
1925void
1926act_thread_catt(void *ctx)
1927{
0c530ab8
A
1928 thread_t thr_act = current_thread();
1929 kern_return_t kret;
1930
1931 if (ctx == (void *)NULL)
2d21ac55 1932 return;
0c530ab8
A
1933
1934 if (thread_is_64bit(thr_act)) {
1935 struct x86_act_context64 *ic64;
1936
1937 ic64 = (struct x86_act_context64 *)ctx;
1938
1939 kret = machine_thread_set_state(thr_act, x86_SAVED_STATE64,
1940 (thread_state_t) &ic64->ss, x86_SAVED_STATE64_COUNT);
1941 if (kret == KERN_SUCCESS) {
2d21ac55
A
1942 machine_thread_set_state(thr_act, x86_FLOAT_STATE64,
1943 (thread_state_t) &ic64->fs, x86_FLOAT_STATE64_COUNT);
0c530ab8
A
1944 }
1945 kfree(ic64, sizeof(struct x86_act_context64));
1946 } else {
1947 struct x86_act_context32 *ic32;
1948
1949 ic32 = (struct x86_act_context32 *)ctx;
1950
1951 kret = machine_thread_set_state(thr_act, x86_SAVED_STATE32,
1952 (thread_state_t) &ic32->ss, x86_SAVED_STATE32_COUNT);
1953 if (kret == KERN_SUCCESS) {
060df5ea 1954 (void) machine_thread_set_state(thr_act, x86_FLOAT_STATE32,
0c530ab8 1955 (thread_state_t) &ic32->fs, x86_FLOAT_STATE32_COUNT);
0c530ab8
A
1956 }
1957 kfree(ic32, sizeof(struct x86_act_context32));
1958 }
0b4e3aa0
A
1959}
1960
0c530ab8
A
1961
1962void act_thread_cfree(__unused void *ctx)
0b4e3aa0 1963{
0c530ab8 1964 /* XXX - Unused */
0b4e3aa0 1965}
b0d623f7
A
1966
1967/*
1968 * Duplicate one x86_debug_state32_t to another. "all" parameter
1969 * chooses whether dr4 and dr5 are copied (they are never meant
1970 * to be installed when we do machine_task_set_state() or
1971 * machine_thread_set_state()).
1972 */
1973void
1974copy_debug_state32(
1975 x86_debug_state32_t *src,
1976 x86_debug_state32_t *target,
1977 boolean_t all)
1978{
1979 if (all) {
1980 target->dr4 = src->dr4;
1981 target->dr5 = src->dr5;
1982 }
1983
1984 target->dr0 = src->dr0;
1985 target->dr1 = src->dr1;
1986 target->dr2 = src->dr2;
1987 target->dr3 = src->dr3;
1988 target->dr6 = src->dr6;
1989 target->dr7 = src->dr7;
1990}
1991
1992/*
1993 * Duplicate one x86_debug_state64_t to another. "all" parameter
1994 * chooses whether dr4 and dr5 are copied (they are never meant
1995 * to be installed when we do machine_task_set_state() or
1996 * machine_thread_set_state()).
1997 */
1998void
1999copy_debug_state64(
2000 x86_debug_state64_t *src,
2001 x86_debug_state64_t *target,
2002 boolean_t all)
2003{
2004 if (all) {
2005 target->dr4 = src->dr4;
2006 target->dr5 = src->dr5;
2007 }
2008
2009 target->dr0 = src->dr0;
2010 target->dr1 = src->dr1;
2011 target->dr2 = src->dr2;
2012 target->dr3 = src->dr3;
2013 target->dr6 = src->dr6;
2014 target->dr7 = src->dr7;
2015}