]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/pcb.c
xnu-4570.71.2.tar.gz
[apple/xnu.git] / osfmk / i386 / pcb.c
CommitLineData
1c79356b 1/*
39037602 2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
1c79356b
A
57#include <mach_debug.h>
58#include <mach_ldebug.h>
59
60#include <sys/kdebug.h>
61
62#include <mach/kern_return.h>
63#include <mach/thread_status.h>
64#include <mach/vm_param.h>
1c79356b
A
65
66#include <kern/counters.h>
91447636 67#include <kern/kalloc.h>
1c79356b 68#include <kern/mach_param.h>
91447636
A
69#include <kern/processor.h>
70#include <kern/cpu_data.h>
71#include <kern/cpu_number.h>
1c79356b
A
72#include <kern/task.h>
73#include <kern/thread.h>
1c79356b
A
74#include <kern/sched_prim.h>
75#include <kern/misc_protos.h>
76#include <kern/assert.h>
77#include <kern/spl.h>
91447636 78#include <kern/machine.h>
39037602 79#include <kern/kpc.h>
1c79356b
A
80#include <ipc/ipc_port.h>
81#include <vm/vm_kern.h>
91447636 82#include <vm/vm_map.h>
1c79356b 83#include <vm/pmap.h>
91447636 84#include <vm/vm_protos.h>
1c79356b 85
b0d623f7
A
86#include <i386/cpu_data.h>
87#include <i386/cpu_number.h>
1c79356b
A
88#include <i386/eflags.h>
89#include <i386/proc_reg.h>
1c79356b 90#include <i386/fpu.h>
2d21ac55 91#include <i386/misc_protos.h>
6d2010ae 92#include <i386/mp_desc.h>
b0d623f7 93#include <i386/thread.h>
0c530ab8 94#include <i386/machine_routines.h>
b0d623f7 95#include <i386/lapic.h> /* LAPIC_PMC_SWI_VECTOR */
55e303ae 96
fe8ab488
A
97#if HYPERVISOR
98#include <kern/hv_support.h>
99#endif
100
1c79356b
A
101/*
102 * Maps state flavor to number of words in the state:
103 */
91447636 104unsigned int _MachineStateCount[] = {
3e170ce0
A
105 [x86_THREAD_STATE32] = x86_THREAD_STATE32_COUNT,
106 [x86_THREAD_STATE64] = x86_THREAD_STATE64_COUNT,
107 [x86_THREAD_STATE] = x86_THREAD_STATE_COUNT,
108 [x86_FLOAT_STATE32] = x86_FLOAT_STATE32_COUNT,
109 [x86_FLOAT_STATE64] = x86_FLOAT_STATE64_COUNT,
110 [x86_FLOAT_STATE] = x86_FLOAT_STATE_COUNT,
111 [x86_EXCEPTION_STATE32] = x86_EXCEPTION_STATE32_COUNT,
112 [x86_EXCEPTION_STATE64] = x86_EXCEPTION_STATE64_COUNT,
113 [x86_EXCEPTION_STATE] = x86_EXCEPTION_STATE_COUNT,
114 [x86_DEBUG_STATE32] = x86_DEBUG_STATE32_COUNT,
115 [x86_DEBUG_STATE64] = x86_DEBUG_STATE64_COUNT,
116 [x86_DEBUG_STATE] = x86_DEBUG_STATE_COUNT,
117 [x86_AVX_STATE32] = x86_AVX_STATE32_COUNT,
118 [x86_AVX_STATE64] = x86_AVX_STATE64_COUNT,
119 [x86_AVX_STATE] = x86_AVX_STATE_COUNT,
5ba3f43e
A
120#if !defined(RC_HIDE_XNU_J137)
121 [x86_AVX512_STATE32] = x86_AVX512_STATE32_COUNT,
122 [x86_AVX512_STATE64] = x86_AVX512_STATE64_COUNT,
123 [x86_AVX512_STATE] = x86_AVX512_STATE_COUNT,
124#endif /* not RC_HIDE_XNU_J137 */
1c79356b
A
125};
126
2d21ac55
A
127zone_t iss_zone; /* zone for saved_state area */
128zone_t ids_zone; /* zone for debug_state area */
0c530ab8 129
1c79356b
A
130/* Forward */
131
1c79356b
A
132extern void Thread_continue(void);
133extern void Load_context(
5ba3f43e 134 thread_t thread) __attribute__((noreturn));
1c79356b 135
0c530ab8
A
136static void
137get_exception_state32(thread_t thread, x86_exception_state32_t *es);
138
139static void
140get_exception_state64(thread_t thread, x86_exception_state64_t *es);
141
142static void
143get_thread_state32(thread_t thread, x86_thread_state32_t *ts);
144
145static void
146get_thread_state64(thread_t thread, x86_thread_state64_t *ts);
147
148static int
149set_thread_state32(thread_t thread, x86_thread_state32_t *ts);
150
151static int
152set_thread_state64(thread_t thread, x86_thread_state64_t *ts);
153
fe8ab488
A
154#if HYPERVISOR
155static inline void
156ml_hv_cswitch(thread_t old, thread_t new)
157{
158 if (old->hv_thread_target)
159 hv_callbacks.preempt(old->hv_thread_target);
160
161 if (new->hv_thread_target)
162 hv_callbacks.dispatch(new->hv_thread_target);
163}
164#endif
165
0c530ab8 166/*
3e170ce0
A
167 * Don't let an illegal value for the lower 32-bits of dr7 get set.
168 * Specifically, check for undefined settings. Setting these bit patterns
0c530ab8
A
169 * result in undefined behaviour and can lead to an unexpected
170 * TRCTRAP.
171 */
172static boolean_t
3e170ce0 173dr7d_is_valid(uint32_t *dr7d)
0c530ab8
A
174{
175 int i;
176 uint32_t mask1, mask2;
177
178 /*
179 * If the DE bit is set in CR4, R/W0-3 can be pattern
180 * "10B" to indicate i/o reads and write
181 */
182 if (!(get_cr4() & CR4_DE))
183 for (i = 0, mask1 = 0x3<<16, mask2 = 0x2<<16; i < 4;
184 i++, mask1 <<= 4, mask2 <<= 4)
3e170ce0 185 if ((*dr7d & mask1) == mask2)
0c530ab8
A
186 return (FALSE);
187
0c530ab8
A
188 /*
189 * if we are doing an instruction execution break (indicated
190 * by r/w[x] being "00B"), then the len[x] must also be set
191 * to "00B"
192 */
193 for (i = 0; i < 4; i++)
3e170ce0
A
194 if (((((*dr7d >> (16 + i*4))) & 0x3) == 0) &&
195 ((((*dr7d >> (18 + i*4))) & 0x3) != 0))
0c530ab8
A
196 return (FALSE);
197
198 /*
199 * Intel docs have these bits fixed.
200 */
3e170ce0
A
201 *dr7d |= 0x1 << 10; /* set bit 10 to 1 */
202 *dr7d &= ~(0x1 << 11); /* set bit 11 to 0 */
203 *dr7d &= ~(0x1 << 12); /* set bit 12 to 0 */
204 *dr7d &= ~(0x1 << 14); /* set bit 14 to 0 */
205 *dr7d &= ~(0x1 << 15); /* set bit 15 to 0 */
0c530ab8
A
206
207 /*
208 * We don't allow anything to set the global breakpoints.
209 */
210
3e170ce0 211 if (*dr7d & 0x2)
0c530ab8
A
212 return (FALSE);
213
3e170ce0 214 if (*dr7d & (0x2<<2))
0c530ab8
A
215 return (FALSE);
216
3e170ce0 217 if (*dr7d & (0x2<<4))
0c530ab8
A
218 return (FALSE);
219
3e170ce0 220 if (*dr7d & (0x2<<6))
0c530ab8
A
221 return (FALSE);
222
223 return (TRUE);
224}
225
0c530ab8
A
226extern void set_64bit_debug_regs(x86_debug_state64_t *ds);
227
b0d623f7
A
228boolean_t
229debug_state_is_valid32(x86_debug_state32_t *ds)
230{
3e170ce0 231 if (!dr7d_is_valid(&ds->dr7))
b0d623f7
A
232 return FALSE;
233
b0d623f7
A
234 return TRUE;
235}
236
237boolean_t
238debug_state_is_valid64(x86_debug_state64_t *ds)
239{
3e170ce0 240 if (!dr7d_is_valid((uint32_t *)&ds->dr7))
b0d623f7
A
241 return FALSE;
242
243 /*
244 * Don't allow the user to set debug addresses above their max
245 * value
246 */
247 if (ds->dr7 & 0x1)
248 if (ds->dr0 >= VM_MAX_PAGE_ADDRESS)
249 return FALSE;
250
251 if (ds->dr7 & (0x1<<2))
252 if (ds->dr1 >= VM_MAX_PAGE_ADDRESS)
253 return FALSE;
254
255 if (ds->dr7 & (0x1<<4))
256 if (ds->dr2 >= VM_MAX_PAGE_ADDRESS)
257 return FALSE;
258
259 if (ds->dr7 & (0x1<<6))
260 if (ds->dr3 >= VM_MAX_PAGE_ADDRESS)
261 return FALSE;
262
3e170ce0
A
263 /* For x86-64, we must ensure the upper 32-bits of DR7 are clear */
264 ds->dr7 &= 0xffffffffULL;
265
b0d623f7
A
266 return TRUE;
267}
268
269
0c530ab8
A
270static kern_return_t
271set_debug_state32(thread_t thread, x86_debug_state32_t *ds)
272{
a39ff7e2 273 x86_debug_state32_t *new_ids;
0c530ab8
A
274 pcb_t pcb;
275
6d2010ae 276 pcb = THREAD_TO_PCB(thread);
0c530ab8 277
b0d623f7
A
278 if (debug_state_is_valid32(ds) != TRUE) {
279 return KERN_INVALID_ARGUMENT;
280 }
281
a39ff7e2
A
282 if (pcb->ids == NULL) {
283 new_ids = zalloc(ids_zone);
284 bzero(new_ids, sizeof *new_ids);
0c530ab8
A
285
286 simple_lock(&pcb->lock);
287 /* make sure it wasn't already alloc()'d elsewhere */
288 if (pcb->ids == NULL) {
a39ff7e2 289 pcb->ids = new_ids;
0c530ab8
A
290 simple_unlock(&pcb->lock);
291 } else {
292 simple_unlock(&pcb->lock);
a39ff7e2 293 zfree(ids_zone, new_ids);
0c530ab8
A
294 }
295 }
296
0c530ab8 297
a39ff7e2 298 copy_debug_state32(ds, pcb->ids, FALSE);
0c530ab8
A
299
300 return (KERN_SUCCESS);
0c530ab8
A
301}
302
303static kern_return_t
304set_debug_state64(thread_t thread, x86_debug_state64_t *ds)
305{
a39ff7e2 306 x86_debug_state64_t *new_ids;
0c530ab8
A
307 pcb_t pcb;
308
6d2010ae 309 pcb = THREAD_TO_PCB(thread);
0c530ab8 310
b0d623f7
A
311 if (debug_state_is_valid64(ds) != TRUE) {
312 return KERN_INVALID_ARGUMENT;
313 }
314
a39ff7e2
A
315 if (pcb->ids == NULL) {
316 new_ids = zalloc(ids_zone);
317 bzero(new_ids, sizeof *new_ids);
0c530ab8 318
fe8ab488
A
319#if HYPERVISOR
320 if (thread->hv_thread_target) {
321 hv_callbacks.volatile_state(thread->hv_thread_target,
322 HV_DEBUG_STATE);
323 }
324#endif
325
0c530ab8
A
326 simple_lock(&pcb->lock);
327 /* make sure it wasn't already alloc()'d elsewhere */
328 if (pcb->ids == NULL) {
a39ff7e2 329 pcb->ids = new_ids;
0c530ab8
A
330 simple_unlock(&pcb->lock);
331 } else {
332 simple_unlock(&pcb->lock);
a39ff7e2 333 zfree(ids_zone, new_ids);
0c530ab8
A
334 }
335 }
336
a39ff7e2 337 copy_debug_state64(ds, pcb->ids, FALSE);
0c530ab8
A
338
339 return (KERN_SUCCESS);
0c530ab8
A
340}
341
342static void
343get_debug_state32(thread_t thread, x86_debug_state32_t *ds)
344{
345 x86_debug_state32_t *saved_state;
346
6d2010ae 347 saved_state = thread->machine.ids;
0c530ab8
A
348
349 if (saved_state) {
b0d623f7 350 copy_debug_state32(saved_state, ds, TRUE);
0c530ab8
A
351 } else
352 bzero(ds, sizeof *ds);
353}
354
355static void
356get_debug_state64(thread_t thread, x86_debug_state64_t *ds)
357{
358 x86_debug_state64_t *saved_state;
359
6d2010ae 360 saved_state = (x86_debug_state64_t *)thread->machine.ids;
0c530ab8
A
361
362 if (saved_state) {
b0d623f7 363 copy_debug_state64(saved_state, ds, TRUE);
0c530ab8
A
364 } else
365 bzero(ds, sizeof *ds);
366}
367
1c79356b
A
368/*
369 * consider_machine_collect:
370 *
371 * Try to collect machine-dependent pages
372 */
373void
91447636 374consider_machine_collect(void)
1c79356b
A
375{
376}
377
1c79356b 378void
91447636 379consider_machine_adjust(void)
1c79356b 380{
1c79356b 381}
1c79356b 382
1c79356b
A
383/*
384 * Switch to the first thread on a CPU.
385 */
386void
55e303ae 387machine_load_context(
1c79356b
A
388 thread_t new)
389{
0c530ab8 390 new->machine.specFlags |= OnProc;
6d2010ae 391 act_machine_switch_pcb(NULL, new);
91447636 392 Load_context(new);
1c79356b
A
393}
394
39037602
A
395static inline void pmap_switch_context(thread_t ot, thread_t nt, int cnum) {
396 pmap_assert(ml_get_interrupts_enabled() == FALSE);
397 vm_map_t nmap = nt->map, omap = ot->map;
398 if ((omap != nmap) || (nmap->pmap->pagezero_accessible)) {
399 PMAP_DEACTIVATE_MAP(omap, ot, cnum);
400 PMAP_ACTIVATE_MAP(nmap, nt, cnum);
401 }
402}
403
1c79356b
A
404/*
405 * Switch to a new thread.
406 * Save the old thread`s kernel state or continuation,
407 * and return it.
408 */
409thread_t
55e303ae 410machine_switch_context(
91447636
A
411 thread_t old,
412 thread_continue_t continuation,
413 thread_t new)
1c79356b 414{
39037602 415 assert(current_cpu_datap()->cpu_active_stack == old->kernel_stack);
39037602 416
5ba3f43e 417#if KPC
39037602 418 kpc_off_cpu(old);
5ba3f43e 419#endif /* KPC */
39037602 420
1c79356b
A
421 /*
422 * Save FP registers if in use.
423 */
5ba3f43e 424 fpu_switch_context(old, new);
1c79356b 425
0c530ab8
A
426 old->machine.specFlags &= ~OnProc;
427 new->machine.specFlags |= OnProc;
428
b0d623f7 429 /*
39037602 430 * Monitor the stack depth and report new max,
b0d623f7
A
431 * not worrying about races.
432 */
433 vm_offset_t depth = current_stack_depth();
434 if (depth > kernel_stack_depth_max) {
435 kernel_stack_depth_max = depth;
436 KERNEL_DEBUG_CONSTANT(
437 MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DEPTH),
438 (long) depth, 0, 0, 0, 0);
439 }
440
1c79356b
A
441 /*
442 * Switch address maps if need be, even if not switching tasks.
443 * (A server activation may be "borrowing" a client map.)
444 */
39037602 445 pmap_switch_context(old, new, cpu_number());
1c79356b
A
446
447 /*
448 * Load the rest of the user state for the new thread
449 */
6d2010ae 450 act_machine_switch_pcb(old, new);
2d21ac55 451
fe8ab488
A
452#if HYPERVISOR
453 ml_hv_cswitch(old, new);
454#endif
455
1c79356b
A
456 return(Switch_context(old, continuation, new));
457}
458
b0d623f7
A
459thread_t
460machine_processor_shutdown(
461 thread_t thread,
462 void (*doshutdown)(processor_t),
463 processor_t processor)
464{
465#if CONFIG_VMX
466 vmx_suspend();
467#endif
5ba3f43e 468 fpu_switch_context(thread, NULL);
39037602 469 pmap_switch_context(thread, processor->idle_thread, cpu_number());
b0d623f7
A
470 return(Shutdown_context(thread, doshutdown, processor));
471}
472
91447636
A
473
474/*
475 * This is where registers that are not normally specified by the mach-o
476 * file on an execve would be nullified, perhaps to avoid a covert channel.
477 */
478kern_return_t
479machine_thread_state_initialize(
480 thread_t thread)
481{
2d21ac55
A
482 /*
483 * If there's an fpu save area, free it.
484 * The initialized state will then be lazily faulted-in, if required.
485 * And if we're target, re-arm the no-fpu trap.
486 */
6d2010ae 487 if (thread->machine.ifps) {
060df5ea 488 (void) fpu_set_fxstate(thread, NULL, x86_FLOAT_STATE64);
4452a7af 489
b0d623f7
A
490 if (thread == current_thread())
491 clear_fpu();
492 }
493
6d2010ae
A
494 if (thread->machine.ids) {
495 zfree(ids_zone, thread->machine.ids);
496 thread->machine.ids = NULL;
b0d623f7
A
497 }
498
499 return KERN_SUCCESS;
4452a7af 500}
0c530ab8
A
501
502uint32_t
503get_eflags_exportmask(void)
504{
505 return EFL_USER_SET;
506}
507
508/*
509 * x86_SAVED_STATE32 - internal save/restore general register state on 32/64 bit processors
510 * for 32bit tasks only
511 * x86_SAVED_STATE64 - internal save/restore general register state on 64 bit processors
512 * for 64bit tasks only
513 * x86_THREAD_STATE32 - external set/get general register state on 32/64 bit processors
514 * for 32bit tasks only
515 * x86_THREAD_STATE64 - external set/get general register state on 64 bit processors
516 * for 64bit tasks only
517 * x86_SAVED_STATE - external set/get general register state on 32/64 bit processors
518 * for either 32bit or 64bit tasks
519 * x86_FLOAT_STATE32 - internal/external save/restore float and xmm state on 32/64 bit processors
520 * for 32bit tasks only
521 * x86_FLOAT_STATE64 - internal/external save/restore float and xmm state on 64 bit processors
522 * for 64bit tasks only
523 * x86_FLOAT_STATE - external save/restore float and xmm state on 32/64 bit processors
524 * for either 32bit or 64bit tasks
525 * x86_EXCEPTION_STATE32 - external get exception state on 32/64 bit processors
526 * for 32bit tasks only
527 * x86_EXCEPTION_STATE64 - external get exception state on 64 bit processors
528 * for 64bit tasks only
529 * x86_EXCEPTION_STATE - external get exception state on 323/64 bit processors
530 * for either 32bit or 64bit tasks
531 */
532
2d21ac55 533
0c530ab8
A
534static void
535get_exception_state64(thread_t thread, x86_exception_state64_t *es)
536{
537 x86_saved_state64_t *saved_state;
538
539 saved_state = USER_REGS64(thread);
540
541 es->trapno = saved_state->isf.trapno;
6d2010ae 542 es->cpu = saved_state->isf.cpu;
b0d623f7 543 es->err = (typeof(es->err))saved_state->isf.err;
0c530ab8
A
544 es->faultvaddr = saved_state->cr2;
545}
546
547static void
548get_exception_state32(thread_t thread, x86_exception_state32_t *es)
549{
550 x86_saved_state32_t *saved_state;
551
552 saved_state = USER_REGS32(thread);
553
554 es->trapno = saved_state->trapno;
6d2010ae 555 es->cpu = saved_state->cpu;
0c530ab8
A
556 es->err = saved_state->err;
557 es->faultvaddr = saved_state->cr2;
558}
559
560
561static int
562set_thread_state32(thread_t thread, x86_thread_state32_t *ts)
563{
564 x86_saved_state32_t *saved_state;
565
6d2010ae 566 pal_register_cache_state(thread, DIRTY);
b0d623f7 567
0c530ab8
A
568 saved_state = USER_REGS32(thread);
569
570 /*
571 * Scrub segment selector values:
572 */
b0d623f7 573 ts->cs = USER_CS;
b0d623f7
A
574 /*
575 * On a 64 bit kernel, we always override the data segments,
576 * as the actual selector numbers have changed. This also
577 * means that we don't support setting the data segments
578 * manually any more.
579 */
580 ts->ss = USER_DS;
581 ts->ds = USER_DS;
582 ts->es = USER_DS;
0c530ab8 583
fe8ab488
A
584 /* Set GS to CTHREAD only if's been established */
585 ts->gs = thread->machine.cthread_self ? USER_CTHREAD : NULL_SEG;
586
0c530ab8
A
587 /* Check segment selectors are safe */
588 if (!valid_user_segment_selectors(ts->cs,
589 ts->ss,
590 ts->ds,
591 ts->es,
592 ts->fs,
593 ts->gs))
594 return(KERN_INVALID_ARGUMENT);
595
596 saved_state->eax = ts->eax;
597 saved_state->ebx = ts->ebx;
598 saved_state->ecx = ts->ecx;
599 saved_state->edx = ts->edx;
600 saved_state->edi = ts->edi;
601 saved_state->esi = ts->esi;
602 saved_state->ebp = ts->ebp;
603 saved_state->uesp = ts->esp;
604 saved_state->efl = (ts->eflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
605 saved_state->eip = ts->eip;
606 saved_state->cs = ts->cs;
607 saved_state->ss = ts->ss;
608 saved_state->ds = ts->ds;
609 saved_state->es = ts->es;
610 saved_state->fs = ts->fs;
611 saved_state->gs = ts->gs;
612
613 /*
614 * If the trace trap bit is being set,
615 * ensure that the user returns via iret
616 * - which is signaled thusly:
617 */
618 if ((saved_state->efl & EFL_TF) && saved_state->cs == SYSENTER_CS)
619 saved_state->cs = SYSENTER_TF_CS;
620
621 return(KERN_SUCCESS);
622}
623
624static int
625set_thread_state64(thread_t thread, x86_thread_state64_t *ts)
626{
627 x86_saved_state64_t *saved_state;
628
6d2010ae 629 pal_register_cache_state(thread, DIRTY);
b0d623f7 630
0c530ab8
A
631 saved_state = USER_REGS64(thread);
632
633 if (!IS_USERADDR64_CANONICAL(ts->rsp) ||
634 !IS_USERADDR64_CANONICAL(ts->rip))
635 return(KERN_INVALID_ARGUMENT);
636
637 saved_state->r8 = ts->r8;
638 saved_state->r9 = ts->r9;
639 saved_state->r10 = ts->r10;
640 saved_state->r11 = ts->r11;
641 saved_state->r12 = ts->r12;
642 saved_state->r13 = ts->r13;
643 saved_state->r14 = ts->r14;
644 saved_state->r15 = ts->r15;
645 saved_state->rax = ts->rax;
0c530ab8
A
646 saved_state->rbx = ts->rbx;
647 saved_state->rcx = ts->rcx;
648 saved_state->rdx = ts->rdx;
649 saved_state->rdi = ts->rdi;
650 saved_state->rsi = ts->rsi;
651 saved_state->rbp = ts->rbp;
652 saved_state->isf.rsp = ts->rsp;
653 saved_state->isf.rflags = (ts->rflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
654 saved_state->isf.rip = ts->rip;
655 saved_state->isf.cs = USER64_CS;
b0d623f7
A
656 saved_state->fs = (uint32_t)ts->fs;
657 saved_state->gs = (uint32_t)ts->gs;
0c530ab8
A
658
659 return(KERN_SUCCESS);
660}
661
662
663
664static void
665get_thread_state32(thread_t thread, x86_thread_state32_t *ts)
666{
667 x86_saved_state32_t *saved_state;
668
6d2010ae 669 pal_register_cache_state(thread, VALID);
b0d623f7 670
0c530ab8
A
671 saved_state = USER_REGS32(thread);
672
673 ts->eax = saved_state->eax;
674 ts->ebx = saved_state->ebx;
675 ts->ecx = saved_state->ecx;
676 ts->edx = saved_state->edx;
677 ts->edi = saved_state->edi;
678 ts->esi = saved_state->esi;
679 ts->ebp = saved_state->ebp;
680 ts->esp = saved_state->uesp;
681 ts->eflags = saved_state->efl;
682 ts->eip = saved_state->eip;
683 ts->cs = saved_state->cs;
684 ts->ss = saved_state->ss;
685 ts->ds = saved_state->ds;
686 ts->es = saved_state->es;
687 ts->fs = saved_state->fs;
688 ts->gs = saved_state->gs;
689}
690
691
692static void
693get_thread_state64(thread_t thread, x86_thread_state64_t *ts)
694{
695 x86_saved_state64_t *saved_state;
696
6d2010ae 697 pal_register_cache_state(thread, VALID);
b0d623f7 698
0c530ab8
A
699 saved_state = USER_REGS64(thread);
700
701 ts->r8 = saved_state->r8;
702 ts->r9 = saved_state->r9;
703 ts->r10 = saved_state->r10;
704 ts->r11 = saved_state->r11;
705 ts->r12 = saved_state->r12;
706 ts->r13 = saved_state->r13;
707 ts->r14 = saved_state->r14;
708 ts->r15 = saved_state->r15;
709 ts->rax = saved_state->rax;
710 ts->rbx = saved_state->rbx;
711 ts->rcx = saved_state->rcx;
712 ts->rdx = saved_state->rdx;
713 ts->rdi = saved_state->rdi;
714 ts->rsi = saved_state->rsi;
715 ts->rbp = saved_state->rbp;
716 ts->rsp = saved_state->isf.rsp;
717 ts->rflags = saved_state->isf.rflags;
718 ts->rip = saved_state->isf.rip;
719 ts->cs = saved_state->isf.cs;
720 ts->fs = saved_state->fs;
721 ts->gs = saved_state->gs;
722}
723
724
1c79356b
A
725/*
726 * act_machine_set_state:
727 *
91447636 728 * Set the status of the specified thread.
1c79356b
A
729 */
730
731kern_return_t
55e303ae 732machine_thread_set_state(
91447636 733 thread_t thr_act,
1c79356b
A
734 thread_flavor_t flavor,
735 thread_state_t tstate,
736 mach_msg_type_number_t count)
737{
2d21ac55
A
738 switch (flavor) {
739 case x86_SAVED_STATE32:
0c530ab8 740 {
0c530ab8
A
741 x86_saved_state32_t *state;
742 x86_saved_state32_t *saved_state;
1c79356b 743
0c530ab8 744 if (count < x86_SAVED_STATE32_COUNT)
2d21ac55
A
745 return(KERN_INVALID_ARGUMENT);
746
747 if (thread_is_64bit(thr_act))
748 return(KERN_INVALID_ARGUMENT);
1c79356b 749
0c530ab8 750 state = (x86_saved_state32_t *) tstate;
1c79356b 751
91447636 752 /* Check segment selectors are safe */
0c530ab8 753 if (!valid_user_segment_selectors(state->cs,
2d21ac55
A
754 state->ss,
755 state->ds,
756 state->es,
757 state->fs,
758 state->gs))
759 return KERN_INVALID_ARGUMENT;
760
6d2010ae 761 pal_register_cache_state(thr_act, DIRTY);
b0d623f7 762
0c530ab8 763 saved_state = USER_REGS32(thr_act);
1c79356b
A
764
765 /*
766 * General registers
767 */
768 saved_state->edi = state->edi;
769 saved_state->esi = state->esi;
770 saved_state->ebp = state->ebp;
771 saved_state->uesp = state->uesp;
772 saved_state->ebx = state->ebx;
773 saved_state->edx = state->edx;
774 saved_state->ecx = state->ecx;
775 saved_state->eax = state->eax;
776 saved_state->eip = state->eip;
0c530ab8
A
777
778 saved_state->efl = (state->efl & ~EFL_USER_CLEAR) | EFL_USER_SET;
c0fea474 779
8f6c56a5 780 /*
0c530ab8
A
781 * If the trace trap bit is being set,
782 * ensure that the user returns via iret
783 * - which is signaled thusly:
21362eb3 784 */
0c530ab8
A
785 if ((saved_state->efl & EFL_TF) && state->cs == SYSENTER_CS)
786 state->cs = SYSENTER_TF_CS;
787
788 /*
789 * User setting segment registers.
790 * Code and stack selectors have already been
791 * checked. Others will be reset by 'iret'
792 * if they are not valid.
793 */
794 saved_state->cs = state->cs;
795 saved_state->ss = state->ss;
796 saved_state->ds = state->ds;
797 saved_state->es = state->es;
798 saved_state->fs = state->fs;
799 saved_state->gs = state->gs;
b0d623f7 800
4452a7af 801 break;
2d21ac55 802 }
4452a7af 803
2d21ac55
A
804 case x86_SAVED_STATE64:
805 {
0c530ab8
A
806 x86_saved_state64_t *state;
807 x86_saved_state64_t *saved_state;
89b3af67 808
0c530ab8 809 if (count < x86_SAVED_STATE64_COUNT)
2d21ac55
A
810 return(KERN_INVALID_ARGUMENT);
811
812 if (!thread_is_64bit(thr_act))
813 return(KERN_INVALID_ARGUMENT);
0c530ab8
A
814
815 state = (x86_saved_state64_t *) tstate;
5d5c5d0d 816
0c530ab8
A
817 /* Verify that the supplied code segment selector is
818 * valid. In 64-bit mode, the FS and GS segment overrides
819 * use the FS.base and GS.base MSRs to calculate
820 * base addresses, and the trampolines don't directly
821 * restore the segment registers--hence they are no
822 * longer relevant for validation.
823 */
824 if (!valid_user_code_selector(state->isf.cs))
825 return KERN_INVALID_ARGUMENT;
826
827 /* Check pc and stack are canonical addresses */
828 if (!IS_USERADDR64_CANONICAL(state->isf.rsp) ||
829 !IS_USERADDR64_CANONICAL(state->isf.rip))
6601e61a 830 return KERN_INVALID_ARGUMENT;
5d5c5d0d 831
6d2010ae 832 pal_register_cache_state(thr_act, DIRTY);
b0d623f7 833
0c530ab8 834 saved_state = USER_REGS64(thr_act);
5d5c5d0d 835
21362eb3 836 /*
6601e61a 837 * General registers
21362eb3 838 */
0c530ab8
A
839 saved_state->r8 = state->r8;
840 saved_state->r9 = state->r9;
841 saved_state->r10 = state->r10;
842 saved_state->r11 = state->r11;
843 saved_state->r12 = state->r12;
844 saved_state->r13 = state->r13;
845 saved_state->r14 = state->r14;
846 saved_state->r15 = state->r15;
847 saved_state->rdi = state->rdi;
848 saved_state->rsi = state->rsi;
849 saved_state->rbp = state->rbp;
850 saved_state->rbx = state->rbx;
851 saved_state->rdx = state->rdx;
852 saved_state->rcx = state->rcx;
853 saved_state->rax = state->rax;
854 saved_state->isf.rsp = state->isf.rsp;
855 saved_state->isf.rip = state->isf.rip;
856
857 saved_state->isf.rflags = (state->isf.rflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
858
2d21ac55 859 /*
0c530ab8
A
860 * User setting segment registers.
861 * Code and stack selectors have already been
862 * checked. Others will be reset by 'sys'
863 * if they are not valid.
6601e61a 864 */
2d21ac55 865 saved_state->isf.cs = state->isf.cs;
0c530ab8
A
866 saved_state->isf.ss = state->isf.ss;
867 saved_state->fs = state->fs;
868 saved_state->gs = state->gs;
b0d623f7 869
89b3af67 870 break;
2d21ac55 871 }
89b3af67 872
2d21ac55 873 case x86_FLOAT_STATE32:
5ba3f43e
A
874 case x86_AVX_STATE32:
875#if !defined(RC_HIDE_XNU_J137)
876 case x86_AVX512_STATE32:
877#endif /* not RC_HIDE_XNU_J137 */
2d21ac55 878 {
5ba3f43e 879 if (count != _MachineStateCount[flavor])
0c530ab8
A
880 return(KERN_INVALID_ARGUMENT);
881
882 if (thread_is_64bit(thr_act))
4452a7af 883 return(KERN_INVALID_ARGUMENT);
0c530ab8 884
060df5ea 885 return fpu_set_fxstate(thr_act, tstate, flavor);
2d21ac55 886 }
4452a7af 887
2d21ac55 888 case x86_FLOAT_STATE64:
5ba3f43e
A
889 case x86_AVX_STATE64:
890#if !defined(RC_HIDE_XNU_J137)
891 case x86_AVX512_STATE64:
892#endif /* not RC_HIDE_XNU_J137 */
2d21ac55 893 {
5ba3f43e 894 if (count != _MachineStateCount[flavor])
4452a7af
A
895 return(KERN_INVALID_ARGUMENT);
896
5ba3f43e 897 if (!thread_is_64bit(thr_act))
0c530ab8
A
898 return(KERN_INVALID_ARGUMENT);
899
060df5ea 900 return fpu_set_fxstate(thr_act, tstate, flavor);
2d21ac55 901 }
5d5c5d0d 902
2d21ac55
A
903 case x86_FLOAT_STATE:
904 {
905 x86_float_state_t *state;
4452a7af 906
0c530ab8
A
907 if (count != x86_FLOAT_STATE_COUNT)
908 return(KERN_INVALID_ARGUMENT);
4452a7af 909
0c530ab8 910 state = (x86_float_state_t *)tstate;
0c530ab8
A
911 if (state->fsh.flavor == x86_FLOAT_STATE64 && state->fsh.count == x86_FLOAT_STATE64_COUNT &&
912 thread_is_64bit(thr_act)) {
060df5ea 913 return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs64, x86_FLOAT_STATE64);
0c530ab8
A
914 }
915 if (state->fsh.flavor == x86_FLOAT_STATE32 && state->fsh.count == x86_FLOAT_STATE32_COUNT &&
916 !thread_is_64bit(thr_act)) {
060df5ea 917 return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs32, x86_FLOAT_STATE32);
0c530ab8
A
918 }
919 return(KERN_INVALID_ARGUMENT);
2d21ac55 920 }
0c530ab8 921
bd504ef0 922 case x86_AVX_STATE:
5ba3f43e
A
923#if !defined(RC_HIDE_XNU_J137)
924 case x86_AVX512_STATE:
925#endif
bd504ef0
A
926 {
927 x86_avx_state_t *state;
928
5ba3f43e 929 if (count != _MachineStateCount[flavor])
bd504ef0
A
930 return(KERN_INVALID_ARGUMENT);
931
932 state = (x86_avx_state_t *)tstate;
5ba3f43e
A
933 /* Flavors are defined to have sequential values: 32-bit, 64-bit, non-specific */
934 /* 64-bit flavor? */
935 if (state->ash.flavor == (flavor - 1) &&
936 state->ash.count == _MachineStateCount[flavor - 1] &&
bd504ef0
A
937 thread_is_64bit(thr_act)) {
938 return fpu_set_fxstate(thr_act,
939 (thread_state_t)&state->ufs.as64,
5ba3f43e 940 flavor - 1);
bd504ef0 941 }
5ba3f43e
A
942 /* 32-bit flavor? */
943 if (state->ash.flavor == (flavor - 2) &&
944 state->ash.count == _MachineStateCount[flavor - 2] &&
bd504ef0
A
945 !thread_is_64bit(thr_act)) {
946 return fpu_set_fxstate(thr_act,
947 (thread_state_t)&state->ufs.as32,
5ba3f43e 948 flavor - 2);
bd504ef0
A
949 }
950 return(KERN_INVALID_ARGUMENT);
951 }
952
2d21ac55
A
953 case x86_THREAD_STATE32:
954 {
0c530ab8
A
955 if (count != x86_THREAD_STATE32_COUNT)
956 return(KERN_INVALID_ARGUMENT);
957
958 if (thread_is_64bit(thr_act))
959 return(KERN_INVALID_ARGUMENT);
2d21ac55 960
0c530ab8 961 return set_thread_state32(thr_act, (x86_thread_state32_t *)tstate);
2d21ac55 962 }
0c530ab8 963
2d21ac55
A
964 case x86_THREAD_STATE64:
965 {
0c530ab8
A
966 if (count != x86_THREAD_STATE64_COUNT)
967 return(KERN_INVALID_ARGUMENT);
968
2d21ac55 969 if (!thread_is_64bit(thr_act))
0c530ab8
A
970 return(KERN_INVALID_ARGUMENT);
971
972 return set_thread_state64(thr_act, (x86_thread_state64_t *)tstate);
0c530ab8 973
2d21ac55
A
974 }
975 case x86_THREAD_STATE:
976 {
977 x86_thread_state_t *state;
0c530ab8
A
978
979 if (count != x86_THREAD_STATE_COUNT)
980 return(KERN_INVALID_ARGUMENT);
981
982 state = (x86_thread_state_t *)tstate;
983
2d21ac55
A
984 if (state->tsh.flavor == x86_THREAD_STATE64 &&
985 state->tsh.count == x86_THREAD_STATE64_COUNT &&
0c530ab8 986 thread_is_64bit(thr_act)) {
2d21ac55
A
987 return set_thread_state64(thr_act, &state->uts.ts64);
988 } else if (state->tsh.flavor == x86_THREAD_STATE32 &&
989 state->tsh.count == x86_THREAD_STATE32_COUNT &&
0c530ab8 990 !thread_is_64bit(thr_act)) {
2d21ac55 991 return set_thread_state32(thr_act, &state->uts.ts32);
0c530ab8 992 } else
2d21ac55 993 return(KERN_INVALID_ARGUMENT);
2d21ac55 994 }
0c530ab8
A
995 case x86_DEBUG_STATE32:
996 {
997 x86_debug_state32_t *state;
998 kern_return_t ret;
4452a7af 999
0c530ab8
A
1000 if (thread_is_64bit(thr_act))
1001 return(KERN_INVALID_ARGUMENT);
1002
1003 state = (x86_debug_state32_t *)tstate;
1004
1005 ret = set_debug_state32(thr_act, state);
1006
1007 return ret;
6601e61a 1008 }
0c530ab8
A
1009 case x86_DEBUG_STATE64:
1010 {
1011 x86_debug_state64_t *state;
1012 kern_return_t ret;
4452a7af 1013
0c530ab8
A
1014 if (!thread_is_64bit(thr_act))
1015 return(KERN_INVALID_ARGUMENT);
1016
1017 state = (x86_debug_state64_t *)tstate;
1018
1019 ret = set_debug_state64(thr_act, state);
1020
1021 return ret;
1022 }
1023 case x86_DEBUG_STATE:
1024 {
1025 x86_debug_state_t *state;
1026 kern_return_t ret = KERN_INVALID_ARGUMENT;
1027
1028 if (count != x86_DEBUG_STATE_COUNT)
1029 return (KERN_INVALID_ARGUMENT);
1030
1031 state = (x86_debug_state_t *)tstate;
1032 if (state->dsh.flavor == x86_DEBUG_STATE64 &&
1033 state->dsh.count == x86_DEBUG_STATE64_COUNT &&
1034 thread_is_64bit(thr_act)) {
1035 ret = set_debug_state64(thr_act, &state->uds.ds64);
1036 }
1037 else
1038 if (state->dsh.flavor == x86_DEBUG_STATE32 &&
1039 state->dsh.count == x86_DEBUG_STATE32_COUNT &&
1040 !thread_is_64bit(thr_act)) {
1041 ret = set_debug_state32(thr_act, &state->uds.ds32);
1042 }
1043 return ret;
1044 }
1045 default:
6601e61a
A
1046 return(KERN_INVALID_ARGUMENT);
1047 }
4452a7af 1048
6601e61a
A
1049 return(KERN_SUCCESS);
1050}
4452a7af 1051
0c530ab8
A
1052
1053
6601e61a
A
1054/*
1055 * thread_getstatus:
1056 *
1057 * Get the status of the specified thread.
1058 */
4452a7af 1059
6601e61a
A
1060kern_return_t
1061machine_thread_get_state(
1062 thread_t thr_act,
1063 thread_flavor_t flavor,
1064 thread_state_t tstate,
1065 mach_msg_type_number_t *count)
1066{
2d21ac55 1067
6601e61a 1068 switch (flavor) {
4452a7af 1069
0c530ab8
A
1070 case THREAD_STATE_FLAVOR_LIST:
1071 {
1072 if (*count < 3)
1073 return (KERN_INVALID_ARGUMENT);
1074
1075 tstate[0] = i386_THREAD_STATE;
1076 tstate[1] = i386_FLOAT_STATE;
1077 tstate[2] = i386_EXCEPTION_STATE;
1078
1079 *count = 3;
1080 break;
1081 }
1082
1083 case THREAD_STATE_FLAVOR_LIST_NEW:
1084 {
1085 if (*count < 4)
1086 return (KERN_INVALID_ARGUMENT);
1087
1088 tstate[0] = x86_THREAD_STATE;
1089 tstate[1] = x86_FLOAT_STATE;
1090 tstate[2] = x86_EXCEPTION_STATE;
1091 tstate[3] = x86_DEBUG_STATE;
1092
1093 *count = 4;
1094 break;
1095 }
1096
bd504ef0
A
1097 case THREAD_STATE_FLAVOR_LIST_10_9:
1098 {
1099 if (*count < 5)
1100 return (KERN_INVALID_ARGUMENT);
1101
1102 tstate[0] = x86_THREAD_STATE;
1103 tstate[1] = x86_FLOAT_STATE;
1104 tstate[2] = x86_EXCEPTION_STATE;
1105 tstate[3] = x86_DEBUG_STATE;
1106 tstate[4] = x86_AVX_STATE;
1107
1108 *count = 5;
1109 break;
1110 }
1111
5ba3f43e
A
1112#if !defined(RC_HIDE_XNU_J137)
1113 case THREAD_STATE_FLAVOR_LIST_10_13:
1114 {
1115 if (*count < 6)
1116 return (KERN_INVALID_ARGUMENT);
1117
1118 tstate[0] = x86_THREAD_STATE;
1119 tstate[1] = x86_FLOAT_STATE;
1120 tstate[2] = x86_EXCEPTION_STATE;
1121 tstate[3] = x86_DEBUG_STATE;
1122 tstate[4] = x86_AVX_STATE;
1123 tstate[5] = x86_AVX512_STATE;
1124
1125 *count = 6;
1126 break;
1127 }
1128
1129#endif
0c530ab8 1130 case x86_SAVED_STATE32:
4452a7af 1131 {
0c530ab8
A
1132 x86_saved_state32_t *state;
1133 x86_saved_state32_t *saved_state;
4452a7af 1134
0c530ab8
A
1135 if (*count < x86_SAVED_STATE32_COUNT)
1136 return(KERN_INVALID_ARGUMENT);
4452a7af 1137
2d21ac55
A
1138 if (thread_is_64bit(thr_act))
1139 return(KERN_INVALID_ARGUMENT);
1140
0c530ab8
A
1141 state = (x86_saved_state32_t *) tstate;
1142 saved_state = USER_REGS32(thr_act);
4452a7af 1143
6601e61a
A
1144 /*
1145 * First, copy everything:
1146 */
1147 *state = *saved_state;
0c530ab8
A
1148 state->ds = saved_state->ds & 0xffff;
1149 state->es = saved_state->es & 0xffff;
1150 state->fs = saved_state->fs & 0xffff;
1151 state->gs = saved_state->gs & 0xffff;
4452a7af 1152
0c530ab8 1153 *count = x86_SAVED_STATE32_COUNT;
4452a7af 1154 break;
8f6c56a5 1155 }
5d5c5d0d 1156
0c530ab8 1157 case x86_SAVED_STATE64:
4452a7af 1158 {
0c530ab8
A
1159 x86_saved_state64_t *state;
1160 x86_saved_state64_t *saved_state;
89b3af67 1161
0c530ab8
A
1162 if (*count < x86_SAVED_STATE64_COUNT)
1163 return(KERN_INVALID_ARGUMENT);
89b3af67 1164
2d21ac55
A
1165 if (!thread_is_64bit(thr_act))
1166 return(KERN_INVALID_ARGUMENT);
1167
0c530ab8
A
1168 state = (x86_saved_state64_t *)tstate;
1169 saved_state = USER_REGS64(thr_act);
89b3af67 1170
6601e61a 1171 /*
0c530ab8 1172 * First, copy everything:
6601e61a 1173 */
0c530ab8
A
1174 *state = *saved_state;
1175 state->fs = saved_state->fs & 0xffff;
1176 state->gs = saved_state->gs & 0xffff;
1177
1178 *count = x86_SAVED_STATE64_COUNT;
4452a7af
A
1179 break;
1180 }
1181
0c530ab8 1182 case x86_FLOAT_STATE32:
4452a7af 1183 {
0c530ab8
A
1184 if (*count < x86_FLOAT_STATE32_COUNT)
1185 return(KERN_INVALID_ARGUMENT);
1186
1187 if (thread_is_64bit(thr_act))
1188 return(KERN_INVALID_ARGUMENT);
1189
1190 *count = x86_FLOAT_STATE32_COUNT;
1191
060df5ea 1192 return fpu_get_fxstate(thr_act, tstate, flavor);
21362eb3 1193 }
89b3af67 1194
0c530ab8
A
1195 case x86_FLOAT_STATE64:
1196 {
1197 if (*count < x86_FLOAT_STATE64_COUNT)
1198 return(KERN_INVALID_ARGUMENT);
1199
1200 if ( !thread_is_64bit(thr_act))
1201 return(KERN_INVALID_ARGUMENT);
1202
1203 *count = x86_FLOAT_STATE64_COUNT;
1204
060df5ea 1205 return fpu_get_fxstate(thr_act, tstate, flavor);
0c530ab8
A
1206 }
1207
1208 case x86_FLOAT_STATE:
1209 {
1210 x86_float_state_t *state;
1211 kern_return_t kret;
1212
1213 if (*count < x86_FLOAT_STATE_COUNT)
1214 return(KERN_INVALID_ARGUMENT);
1215
1216 state = (x86_float_state_t *)tstate;
1217
1218 /*
1219 * no need to bzero... currently
1220 * x86_FLOAT_STATE64_COUNT == x86_FLOAT_STATE32_COUNT
1221 */
1222 if (thread_is_64bit(thr_act)) {
1223 state->fsh.flavor = x86_FLOAT_STATE64;
1224 state->fsh.count = x86_FLOAT_STATE64_COUNT;
1225
060df5ea 1226 kret = fpu_get_fxstate(thr_act, (thread_state_t)&state->ufs.fs64, x86_FLOAT_STATE64);
0c530ab8
A
1227 } else {
1228 state->fsh.flavor = x86_FLOAT_STATE32;
1229 state->fsh.count = x86_FLOAT_STATE32_COUNT;
1230
060df5ea 1231 kret = fpu_get_fxstate(thr_act, (thread_state_t)&state->ufs.fs32, x86_FLOAT_STATE32);
0c530ab8
A
1232 }
1233 *count = x86_FLOAT_STATE_COUNT;
1234
1235 return(kret);
1236 }
1237
bd504ef0 1238 case x86_AVX_STATE32:
5ba3f43e
A
1239#if !defined(RC_HIDE_XNU_J137)
1240 case x86_AVX512_STATE32:
1241#endif
bd504ef0 1242 {
5ba3f43e 1243 if (*count != _MachineStateCount[flavor])
060df5ea
A
1244 return(KERN_INVALID_ARGUMENT);
1245
1246 if (thread_is_64bit(thr_act))
1247 return(KERN_INVALID_ARGUMENT);
1248
5ba3f43e 1249 *count = _MachineStateCount[flavor];
060df5ea
A
1250
1251 return fpu_get_fxstate(thr_act, tstate, flavor);
bd504ef0 1252 }
060df5ea 1253
bd504ef0 1254 case x86_AVX_STATE64:
5ba3f43e
A
1255#if !defined(RC_HIDE_XNU_J137)
1256 case x86_AVX512_STATE64:
1257#endif
bd504ef0 1258 {
5ba3f43e 1259 if (*count != _MachineStateCount[flavor])
060df5ea
A
1260 return(KERN_INVALID_ARGUMENT);
1261
1262 if ( !thread_is_64bit(thr_act))
1263 return(KERN_INVALID_ARGUMENT);
1264
5ba3f43e 1265 *count = _MachineStateCount[flavor];
060df5ea
A
1266
1267 return fpu_get_fxstate(thr_act, tstate, flavor);
bd504ef0
A
1268 }
1269
1270 case x86_AVX_STATE:
5ba3f43e
A
1271#if !defined(RC_HIDE_XNU_J137)
1272 case x86_AVX512_STATE:
1273#endif
bd504ef0
A
1274 {
1275 x86_avx_state_t *state;
5ba3f43e 1276 thread_state_t fstate;
bd504ef0 1277
5ba3f43e 1278 if (*count < _MachineStateCount[flavor])
bd504ef0
A
1279 return(KERN_INVALID_ARGUMENT);
1280
5ba3f43e 1281 *count = _MachineStateCount[flavor];
bd504ef0
A
1282 state = (x86_avx_state_t *)tstate;
1283
5ba3f43e
A
1284 bzero((char *)state, *count * sizeof(int));
1285
bd504ef0 1286 if (thread_is_64bit(thr_act)) {
5ba3f43e
A
1287 flavor -= 1; /* 64-bit flavor */
1288 fstate = (thread_state_t) &state->ufs.as64;
bd504ef0 1289 } else {
5ba3f43e
A
1290 flavor -= 2; /* 32-bit flavor */
1291 fstate = (thread_state_t) &state->ufs.as32;
bd504ef0 1292 }
5ba3f43e
A
1293 state->ash.flavor = flavor;
1294 state->ash.count = _MachineStateCount[flavor];
bd504ef0 1295
5ba3f43e 1296 return fpu_get_fxstate(thr_act, fstate, flavor);
bd504ef0 1297 }
060df5ea 1298
0c530ab8
A
1299 case x86_THREAD_STATE32:
1300 {
1301 if (*count < x86_THREAD_STATE32_COUNT)
1302 return(KERN_INVALID_ARGUMENT);
1303
1304 if (thread_is_64bit(thr_act))
1305 return(KERN_INVALID_ARGUMENT);
1306
1307 *count = x86_THREAD_STATE32_COUNT;
1308
1309 get_thread_state32(thr_act, (x86_thread_state32_t *)tstate);
6601e61a 1310 break;
0c530ab8 1311 }
89b3af67 1312
0c530ab8
A
1313 case x86_THREAD_STATE64:
1314 {
1315 if (*count < x86_THREAD_STATE64_COUNT)
4452a7af 1316 return(KERN_INVALID_ARGUMENT);
0c530ab8
A
1317
1318 if ( !thread_is_64bit(thr_act))
1319 return(KERN_INVALID_ARGUMENT);
1320
1321 *count = x86_THREAD_STATE64_COUNT;
1322
1323 get_thread_state64(thr_act, (x86_thread_state64_t *)tstate);
1324 break;
21362eb3 1325 }
89b3af67 1326
0c530ab8
A
1327 case x86_THREAD_STATE:
1328 {
1329 x86_thread_state_t *state;
4452a7af 1330
0c530ab8 1331 if (*count < x86_THREAD_STATE_COUNT)
4452a7af
A
1332 return(KERN_INVALID_ARGUMENT);
1333
0c530ab8 1334 state = (x86_thread_state_t *)tstate;
4452a7af 1335
0c530ab8 1336 bzero((char *)state, sizeof(x86_thread_state_t));
4452a7af 1337
0c530ab8
A
1338 if (thread_is_64bit(thr_act)) {
1339 state->tsh.flavor = x86_THREAD_STATE64;
1340 state->tsh.count = x86_THREAD_STATE64_COUNT;
4452a7af 1341
0c530ab8 1342 get_thread_state64(thr_act, &state->uts.ts64);
4452a7af 1343 } else {
0c530ab8
A
1344 state->tsh.flavor = x86_THREAD_STATE32;
1345 state->tsh.count = x86_THREAD_STATE32_COUNT;
4452a7af 1346
0c530ab8 1347 get_thread_state32(thr_act, &state->uts.ts32);
4452a7af 1348 }
0c530ab8
A
1349 *count = x86_THREAD_STATE_COUNT;
1350
1351 break;
1352 }
1353
1354
1355 case x86_EXCEPTION_STATE32:
1356 {
1357 if (*count < x86_EXCEPTION_STATE32_COUNT)
1358 return(KERN_INVALID_ARGUMENT);
1359
1360 if (thread_is_64bit(thr_act))
1361 return(KERN_INVALID_ARGUMENT);
1362
1363 *count = x86_EXCEPTION_STATE32_COUNT;
4452a7af 1364
0c530ab8 1365 get_exception_state32(thr_act, (x86_exception_state32_t *)tstate);
6d2010ae
A
1366 /*
1367 * Suppress the cpu number for binary compatibility
1368 * of this deprecated state.
1369 */
1370 ((x86_exception_state32_t *)tstate)->cpu = 0;
4452a7af 1371 break;
6601e61a 1372 }
4452a7af 1373
0c530ab8 1374 case x86_EXCEPTION_STATE64:
6601e61a 1375 {
0c530ab8
A
1376 if (*count < x86_EXCEPTION_STATE64_COUNT)
1377 return(KERN_INVALID_ARGUMENT);
4452a7af 1378
0c530ab8
A
1379 if ( !thread_is_64bit(thr_act))
1380 return(KERN_INVALID_ARGUMENT);
4452a7af 1381
0c530ab8 1382 *count = x86_EXCEPTION_STATE64_COUNT;
4452a7af 1383
0c530ab8 1384 get_exception_state64(thr_act, (x86_exception_state64_t *)tstate);
6d2010ae
A
1385 /*
1386 * Suppress the cpu number for binary compatibility
1387 * of this deprecated state.
1388 */
1389 ((x86_exception_state64_t *)tstate)->cpu = 0;
4452a7af 1390 break;
6601e61a 1391 }
4452a7af 1392
0c530ab8
A
1393 case x86_EXCEPTION_STATE:
1394 {
1395 x86_exception_state_t *state;
1396
1397 if (*count < x86_EXCEPTION_STATE_COUNT)
1398 return(KERN_INVALID_ARGUMENT);
1399
1400 state = (x86_exception_state_t *)tstate;
1401
1402 bzero((char *)state, sizeof(x86_exception_state_t));
1403
1404 if (thread_is_64bit(thr_act)) {
1405 state->esh.flavor = x86_EXCEPTION_STATE64;
1406 state->esh.count = x86_EXCEPTION_STATE64_COUNT;
1407
1408 get_exception_state64(thr_act, &state->ues.es64);
1409 } else {
1410 state->esh.flavor = x86_EXCEPTION_STATE32;
1411 state->esh.count = x86_EXCEPTION_STATE32_COUNT;
1412
1413 get_exception_state32(thr_act, &state->ues.es32);
1414 }
1415 *count = x86_EXCEPTION_STATE_COUNT;
1416
1417 break;
1418 }
1419 case x86_DEBUG_STATE32:
1420 {
1421 if (*count < x86_DEBUG_STATE32_COUNT)
1422 return(KERN_INVALID_ARGUMENT);
1423
1424 if (thread_is_64bit(thr_act))
1425 return(KERN_INVALID_ARGUMENT);
1426
1427 get_debug_state32(thr_act, (x86_debug_state32_t *)tstate);
1428
1429 *count = x86_DEBUG_STATE32_COUNT;
1430
1431 break;
1432 }
1433 case x86_DEBUG_STATE64:
1434 {
1435 if (*count < x86_DEBUG_STATE64_COUNT)
1436 return(KERN_INVALID_ARGUMENT);
1437
1438 if (!thread_is_64bit(thr_act))
1439 return(KERN_INVALID_ARGUMENT);
1440
1441 get_debug_state64(thr_act, (x86_debug_state64_t *)tstate);
1442
1443 *count = x86_DEBUG_STATE64_COUNT;
1444
1c79356b
A
1445 break;
1446 }
0c530ab8
A
1447 case x86_DEBUG_STATE:
1448 {
1449 x86_debug_state_t *state;
1450
1451 if (*count < x86_DEBUG_STATE_COUNT)
1452 return(KERN_INVALID_ARGUMENT);
1453
1454 state = (x86_debug_state_t *)tstate;
1455
1456 bzero(state, sizeof *state);
1457
1458 if (thread_is_64bit(thr_act)) {
1459 state->dsh.flavor = x86_DEBUG_STATE64;
1460 state->dsh.count = x86_DEBUG_STATE64_COUNT;
1461
1462 get_debug_state64(thr_act, &state->uds.ds64);
1463 } else {
1464 state->dsh.flavor = x86_DEBUG_STATE32;
1465 state->dsh.count = x86_DEBUG_STATE32_COUNT;
1c79356b 1466
0c530ab8
A
1467 get_debug_state32(thr_act, &state->uds.ds32);
1468 }
1469 *count = x86_DEBUG_STATE_COUNT;
1470 break;
1471 }
2d21ac55 1472 default:
1c79356b
A
1473 return(KERN_INVALID_ARGUMENT);
1474 }
1475
1476 return(KERN_SUCCESS);
1477}
1478
0c530ab8
A
1479kern_return_t
1480machine_thread_get_kern_state(
1481 thread_t thread,
1482 thread_flavor_t flavor,
1483 thread_state_t tstate,
1484 mach_msg_type_number_t *count)
1485{
b0d623f7 1486 x86_saved_state_t *int_state = current_cpu_datap()->cpu_int_state;
0c530ab8
A
1487
1488 /*
1489 * This works only for an interrupted kernel thread
1490 */
b0d623f7 1491 if (thread != current_thread() || int_state == NULL)
0c530ab8
A
1492 return KERN_FAILURE;
1493
b0d623f7
A
1494 switch (flavor) {
1495 case x86_THREAD_STATE32: {
1496 x86_thread_state32_t *state;
1497 x86_saved_state32_t *saved_state;
1498
1499 if (!is_saved_state32(int_state) ||
1500 *count < x86_THREAD_STATE32_COUNT)
1501 return (KERN_INVALID_ARGUMENT);
1502
1503 state = (x86_thread_state32_t *) tstate;
0c530ab8 1504
b0d623f7
A
1505 saved_state = saved_state32(int_state);
1506 /*
1507 * General registers.
1508 */
1509 state->eax = saved_state->eax;
1510 state->ebx = saved_state->ebx;
1511 state->ecx = saved_state->ecx;
1512 state->edx = saved_state->edx;
1513 state->edi = saved_state->edi;
1514 state->esi = saved_state->esi;
1515 state->ebp = saved_state->ebp;
1516 state->esp = saved_state->uesp;
1517 state->eflags = saved_state->efl;
1518 state->eip = saved_state->eip;
1519 state->cs = saved_state->cs;
1520 state->ss = saved_state->ss;
1521 state->ds = saved_state->ds & 0xffff;
1522 state->es = saved_state->es & 0xffff;
1523 state->fs = saved_state->fs & 0xffff;
1524 state->gs = saved_state->gs & 0xffff;
1525
1526 *count = x86_THREAD_STATE32_COUNT;
2d21ac55 1527
b0d623f7
A
1528 return KERN_SUCCESS;
1529 }
1530
1531 case x86_THREAD_STATE64: {
1532 x86_thread_state64_t *state;
1533 x86_saved_state64_t *saved_state;
1534
1535 if (!is_saved_state64(int_state) ||
1536 *count < x86_THREAD_STATE64_COUNT)
1537 return (KERN_INVALID_ARGUMENT);
1538
1539 state = (x86_thread_state64_t *) tstate;
1540
1541 saved_state = saved_state64(int_state);
1542 /*
1543 * General registers.
1544 */
1545 state->rax = saved_state->rax;
1546 state->rbx = saved_state->rbx;
1547 state->rcx = saved_state->rcx;
1548 state->rdx = saved_state->rdx;
1549 state->rdi = saved_state->rdi;
1550 state->rsi = saved_state->rsi;
1551 state->rbp = saved_state->rbp;
1552 state->rsp = saved_state->isf.rsp;
1553 state->r8 = saved_state->r8;
1554 state->r9 = saved_state->r9;
1555 state->r10 = saved_state->r10;
1556 state->r11 = saved_state->r11;
1557 state->r12 = saved_state->r12;
1558 state->r13 = saved_state->r13;
1559 state->r14 = saved_state->r14;
1560 state->r15 = saved_state->r15;
1561
1562 state->rip = saved_state->isf.rip;
1563 state->rflags = saved_state->isf.rflags;
1564 state->cs = saved_state->isf.cs;
1565 state->fs = saved_state->fs & 0xffff;
1566 state->gs = saved_state->gs & 0xffff;
1567 *count = x86_THREAD_STATE64_COUNT;
1568
1569 return KERN_SUCCESS;
1570 }
1571
1572 case x86_THREAD_STATE: {
1573 x86_thread_state_t *state = NULL;
1574
1575 if (*count < x86_THREAD_STATE_COUNT)
1576 return (KERN_INVALID_ARGUMENT);
1577
1578 state = (x86_thread_state_t *) tstate;
1579
1580 if (is_saved_state32(int_state)) {
1581 x86_saved_state32_t *saved_state = saved_state32(int_state);
1582
1583 state->tsh.flavor = x86_THREAD_STATE32;
1584 state->tsh.count = x86_THREAD_STATE32_COUNT;
0c530ab8 1585
0c530ab8
A
1586 /*
1587 * General registers.
1588 */
b0d623f7
A
1589 state->uts.ts32.eax = saved_state->eax;
1590 state->uts.ts32.ebx = saved_state->ebx;
1591 state->uts.ts32.ecx = saved_state->ecx;
1592 state->uts.ts32.edx = saved_state->edx;
1593 state->uts.ts32.edi = saved_state->edi;
1594 state->uts.ts32.esi = saved_state->esi;
1595 state->uts.ts32.ebp = saved_state->ebp;
1596 state->uts.ts32.esp = saved_state->uesp;
1597 state->uts.ts32.eflags = saved_state->efl;
1598 state->uts.ts32.eip = saved_state->eip;
1599 state->uts.ts32.cs = saved_state->cs;
1600 state->uts.ts32.ss = saved_state->ss;
1601 state->uts.ts32.ds = saved_state->ds & 0xffff;
1602 state->uts.ts32.es = saved_state->es & 0xffff;
1603 state->uts.ts32.fs = saved_state->fs & 0xffff;
1604 state->uts.ts32.gs = saved_state->gs & 0xffff;
1605 } else if (is_saved_state64(int_state)) {
1606 x86_saved_state64_t *saved_state = saved_state64(int_state);
0c530ab8 1607
b0d623f7
A
1608 state->tsh.flavor = x86_THREAD_STATE64;
1609 state->tsh.count = x86_THREAD_STATE64_COUNT;
1610
1611 /*
1612 * General registers.
1613 */
1614 state->uts.ts64.rax = saved_state->rax;
1615 state->uts.ts64.rbx = saved_state->rbx;
1616 state->uts.ts64.rcx = saved_state->rcx;
1617 state->uts.ts64.rdx = saved_state->rdx;
1618 state->uts.ts64.rdi = saved_state->rdi;
1619 state->uts.ts64.rsi = saved_state->rsi;
1620 state->uts.ts64.rbp = saved_state->rbp;
1621 state->uts.ts64.rsp = saved_state->isf.rsp;
1622 state->uts.ts64.r8 = saved_state->r8;
1623 state->uts.ts64.r9 = saved_state->r9;
1624 state->uts.ts64.r10 = saved_state->r10;
1625 state->uts.ts64.r11 = saved_state->r11;
1626 state->uts.ts64.r12 = saved_state->r12;
1627 state->uts.ts64.r13 = saved_state->r13;
1628 state->uts.ts64.r14 = saved_state->r14;
1629 state->uts.ts64.r15 = saved_state->r15;
1630
1631 state->uts.ts64.rip = saved_state->isf.rip;
1632 state->uts.ts64.rflags = saved_state->isf.rflags;
1633 state->uts.ts64.cs = saved_state->isf.cs;
1634 state->uts.ts64.fs = saved_state->fs & 0xffff;
1635 state->uts.ts64.gs = saved_state->gs & 0xffff;
1636 } else {
1637 panic("unknown thread state");
0c530ab8 1638 }
b0d623f7
A
1639
1640 *count = x86_THREAD_STATE_COUNT;
1641 return KERN_SUCCESS;
1642 }
2d21ac55 1643 }
0c530ab8
A
1644 return KERN_FAILURE;
1645}
1646
1647
0c530ab8 1648void
2d21ac55 1649machine_thread_switch_addrmode(thread_t thread)
0c530ab8 1650{
2d21ac55
A
1651 /*
1652 * We don't want to be preempted until we're done
1653 * - particularly if we're switching the current thread
1654 */
1655 disable_preemption();
0c530ab8 1656
2d21ac55 1657 /*
6d2010ae
A
1658 * Reset the state saveareas. As we're resetting, we anticipate no
1659 * memory allocations in this path.
2d21ac55 1660 */
0c530ab8
A
1661 machine_thread_create(thread, thread->task);
1662
5ba3f43e
A
1663 /* Adjust FPU state */
1664 fpu_switch_addrmode(thread, task_has_64BitAddr(thread->task));
1665
0c530ab8 1666 /* If we're switching ourselves, reset the pcb addresses etc. */
c910b4d9 1667 if (thread == current_thread()) {
6d2010ae 1668 boolean_t istate = ml_set_interrupts_enabled(FALSE);
6d2010ae
A
1669 act_machine_switch_pcb(NULL, thread);
1670 ml_set_interrupts_enabled(istate);
c910b4d9 1671 }
2d21ac55 1672 enable_preemption();
1c79356b
A
1673}
1674
0c530ab8
A
1675
1676
1c79356b
A
1677/*
1678 * This is used to set the current thr_act/thread
1679 * when starting up a new processor
1680 */
1681void
b0d623f7 1682machine_set_current_thread(thread_t thread)
1c79356b 1683{
0c530ab8 1684 current_cpu_datap()->cpu_active_thread = thread;
1c79356b
A
1685}
1686
1c79356b 1687
1c79356b
A
1688/*
1689 * Perform machine-dependent per-thread initializations
1690 */
1691void
55e303ae 1692machine_thread_init(void)
1c79356b 1693{
39236c6e
A
1694 iss_zone = zinit(sizeof(x86_saved_state_t),
1695 thread_max * sizeof(x86_saved_state_t),
1696 THREAD_CHUNK * sizeof(x86_saved_state_t),
1697 "x86_64 saved state");
0c530ab8 1698
39236c6e
A
1699 ids_zone = zinit(sizeof(x86_debug_state64_t),
1700 thread_max * sizeof(x86_debug_state64_t),
1701 THREAD_CHUNK * sizeof(x86_debug_state64_t),
1702 "x86_64 debug state");
1c79356b 1703
39236c6e 1704 fpu_module_init();
1c79356b
A
1705}
1706
1c79356b 1707
91447636
A
1708
1709user_addr_t
1710get_useraddr(void)
1c79356b 1711{
91447636 1712 thread_t thr_act = current_thread();
1c79356b 1713
0c530ab8
A
1714 if (thread_is_64bit(thr_act)) {
1715 x86_saved_state64_t *iss64;
1716
1717 iss64 = USER_REGS64(thr_act);
1718
1719 return(iss64->isf.rip);
1720 } else {
1721 x86_saved_state32_t *iss32;
4452a7af 1722
0c530ab8
A
1723 iss32 = USER_REGS32(thr_act);
1724
1725 return(iss32->eip);
1726 }
1c79356b
A
1727}
1728
1c79356b
A
1729/*
1730 * detach and return a kernel stack from a thread
1731 */
1732
1733vm_offset_t
55e303ae 1734machine_stack_detach(thread_t thread)
1c79356b 1735{
0c530ab8 1736 vm_offset_t stack;
1c79356b 1737
0c530ab8 1738 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH),
b0d623f7 1739 (uintptr_t)thread_tid(thread), thread->priority,
0c530ab8
A
1740 thread->sched_pri, 0,
1741 0);
1c79356b 1742
0c530ab8
A
1743 stack = thread->kernel_stack;
1744 thread->kernel_stack = 0;
1745
1746 return (stack);
1c79356b
A
1747}
1748
1749/*
1750 * attach a kernel stack to a thread and initialize it
1751 */
1752
1753void
91447636
A
1754machine_stack_attach(
1755 thread_t thread,
1756 vm_offset_t stack)
1c79356b 1757{
b0d623f7 1758 struct x86_kernel_state *statep;
1c79356b 1759
0c530ab8 1760 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH),
b0d623f7 1761 (uintptr_t)thread_tid(thread), thread->priority,
0c530ab8 1762 thread->sched_pri, 0, 0);
1c79356b 1763
0c530ab8
A
1764 assert(stack);
1765 thread->kernel_stack = stack;
5ba3f43e 1766 thread_initialize_kernel_state(thread);
55e303ae 1767
0c530ab8 1768 statep = STACK_IKS(stack);
b0d623f7
A
1769#if defined(__x86_64__)
1770 statep->k_rip = (unsigned long) Thread_continue;
1771 statep->k_rbx = (unsigned long) thread_continue;
5ba3f43e 1772 statep->k_rsp = (unsigned long) STACK_IKS(stack);
b0d623f7 1773#else
0c530ab8
A
1774 statep->k_eip = (unsigned long) Thread_continue;
1775 statep->k_ebx = (unsigned long) thread_continue;
5ba3f43e 1776 statep->k_esp = (unsigned long) STACK_IKS(stack);
b0d623f7 1777#endif
1c79356b 1778
0c530ab8 1779 return;
1c79356b
A
1780}
1781
1782/*
1783 * move a stack from old to new thread
1784 */
1785
1786void
55e303ae 1787machine_stack_handoff(thread_t old,
1c79356b
A
1788 thread_t new)
1789{
0c530ab8 1790 vm_offset_t stack;
1c79356b 1791
0c530ab8
A
1792 assert(new);
1793 assert(old);
1c79356b 1794
39037602 1795 kpc_off_cpu(old);
b0d623f7 1796
0c530ab8
A
1797 stack = old->kernel_stack;
1798 if (stack == old->reserved_stack) {
1799 assert(new->reserved_stack);
1800 old->reserved_stack = new->reserved_stack;
1801 new->reserved_stack = stack;
1802 }
1803 old->kernel_stack = 0;
1804 /*
1805 * A full call to machine_stack_attach() is unnecessry
1806 * because old stack is already initialized.
1807 */
1808 new->kernel_stack = stack;
1c79356b 1809
5ba3f43e 1810 fpu_switch_context(old, new);
b0d623f7 1811
0c530ab8
A
1812 old->machine.specFlags &= ~OnProc;
1813 new->machine.specFlags |= OnProc;
1c79356b 1814
39037602 1815 pmap_switch_context(old, new, cpu_number());
6d2010ae 1816 act_machine_switch_pcb(old, new);
9bccf70c 1817
fe8ab488
A
1818#if HYPERVISOR
1819 ml_hv_cswitch(old, new);
1820#endif
1821
0c530ab8 1822 machine_set_current_thread(new);
5ba3f43e 1823 thread_initialize_kernel_state(new);
1c79356b 1824
0c530ab8 1825 return;
1c79356b 1826}
0b4e3aa0 1827
0c530ab8
A
1828
1829
1830
1831struct x86_act_context32 {
1832 x86_saved_state32_t ss;
1833 x86_float_state32_t fs;
1834 x86_debug_state32_t ds;
1835};
1836
1837struct x86_act_context64 {
1838 x86_saved_state64_t ss;
1839 x86_float_state64_t fs;
1840 x86_debug_state64_t ds;
0b4e3aa0
A
1841};
1842
0c530ab8
A
1843
1844
0b4e3aa0
A
1845void *
1846act_thread_csave(void)
1847{
2d21ac55 1848 kern_return_t kret;
0c530ab8 1849 mach_msg_type_number_t val;
2d21ac55 1850 thread_t thr_act = current_thread();
0c530ab8 1851
2d21ac55
A
1852 if (thread_is_64bit(thr_act)) {
1853 struct x86_act_context64 *ic64;
0b4e3aa0 1854
2d21ac55 1855 ic64 = (struct x86_act_context64 *)kalloc(sizeof(struct x86_act_context64));
0c530ab8
A
1856
1857 if (ic64 == (struct x86_act_context64 *)NULL)
2d21ac55 1858 return((void *)0);
0c530ab8
A
1859
1860 val = x86_SAVED_STATE64_COUNT;
1861 kret = machine_thread_get_state(thr_act, x86_SAVED_STATE64,
2d21ac55 1862 (thread_state_t) &ic64->ss, &val);
0c530ab8 1863 if (kret != KERN_SUCCESS) {
2d21ac55 1864 kfree(ic64, sizeof(struct x86_act_context64));
0c530ab8
A
1865 return((void *)0);
1866 }
1867 val = x86_FLOAT_STATE64_COUNT;
1868 kret = machine_thread_get_state(thr_act, x86_FLOAT_STATE64,
2d21ac55 1869 (thread_state_t) &ic64->fs, &val);
0c530ab8 1870 if (kret != KERN_SUCCESS) {
2d21ac55 1871 kfree(ic64, sizeof(struct x86_act_context64));
0c530ab8
A
1872 return((void *)0);
1873 }
0b4e3aa0 1874
0c530ab8
A
1875 val = x86_DEBUG_STATE64_COUNT;
1876 kret = machine_thread_get_state(thr_act,
1877 x86_DEBUG_STATE64,
1878 (thread_state_t)&ic64->ds,
55e303ae 1879 &val);
0b4e3aa0 1880 if (kret != KERN_SUCCESS) {
0c530ab8
A
1881 kfree(ic64, sizeof(struct x86_act_context64));
1882 return((void *)0);
1883 }
1884 return(ic64);
1885
1886 } else {
2d21ac55 1887 struct x86_act_context32 *ic32;
0c530ab8 1888
2d21ac55 1889 ic32 = (struct x86_act_context32 *)kalloc(sizeof(struct x86_act_context32));
0c530ab8
A
1890
1891 if (ic32 == (struct x86_act_context32 *)NULL)
2d21ac55 1892 return((void *)0);
0c530ab8
A
1893
1894 val = x86_SAVED_STATE32_COUNT;
1895 kret = machine_thread_get_state(thr_act, x86_SAVED_STATE32,
2d21ac55 1896 (thread_state_t) &ic32->ss, &val);
0c530ab8 1897 if (kret != KERN_SUCCESS) {
2d21ac55 1898 kfree(ic32, sizeof(struct x86_act_context32));
0c530ab8 1899 return((void *)0);
0b4e3aa0 1900 }
0c530ab8
A
1901 val = x86_FLOAT_STATE32_COUNT;
1902 kret = machine_thread_get_state(thr_act, x86_FLOAT_STATE32,
2d21ac55 1903 (thread_state_t) &ic32->fs, &val);
0c530ab8 1904 if (kret != KERN_SUCCESS) {
2d21ac55 1905 kfree(ic32, sizeof(struct x86_act_context32));
0c530ab8
A
1906 return((void *)0);
1907 }
1908
1909 val = x86_DEBUG_STATE32_COUNT;
1910 kret = machine_thread_get_state(thr_act,
1911 x86_DEBUG_STATE32,
1912 (thread_state_t)&ic32->ds,
55e303ae 1913 &val);
0b4e3aa0 1914 if (kret != KERN_SUCCESS) {
0c530ab8
A
1915 kfree(ic32, sizeof(struct x86_act_context32));
1916 return((void *)0);
0b4e3aa0 1917 }
0c530ab8
A
1918 return(ic32);
1919 }
0b4e3aa0 1920}
0c530ab8
A
1921
1922
0b4e3aa0
A
1923void
1924act_thread_catt(void *ctx)
1925{
0c530ab8
A
1926 thread_t thr_act = current_thread();
1927 kern_return_t kret;
1928
1929 if (ctx == (void *)NULL)
2d21ac55 1930 return;
0c530ab8
A
1931
1932 if (thread_is_64bit(thr_act)) {
1933 struct x86_act_context64 *ic64;
1934
1935 ic64 = (struct x86_act_context64 *)ctx;
1936
1937 kret = machine_thread_set_state(thr_act, x86_SAVED_STATE64,
1938 (thread_state_t) &ic64->ss, x86_SAVED_STATE64_COUNT);
1939 if (kret == KERN_SUCCESS) {
2d21ac55
A
1940 machine_thread_set_state(thr_act, x86_FLOAT_STATE64,
1941 (thread_state_t) &ic64->fs, x86_FLOAT_STATE64_COUNT);
0c530ab8
A
1942 }
1943 kfree(ic64, sizeof(struct x86_act_context64));
1944 } else {
1945 struct x86_act_context32 *ic32;
1946
1947 ic32 = (struct x86_act_context32 *)ctx;
1948
1949 kret = machine_thread_set_state(thr_act, x86_SAVED_STATE32,
1950 (thread_state_t) &ic32->ss, x86_SAVED_STATE32_COUNT);
1951 if (kret == KERN_SUCCESS) {
060df5ea 1952 (void) machine_thread_set_state(thr_act, x86_FLOAT_STATE32,
0c530ab8 1953 (thread_state_t) &ic32->fs, x86_FLOAT_STATE32_COUNT);
0c530ab8
A
1954 }
1955 kfree(ic32, sizeof(struct x86_act_context32));
1956 }
0b4e3aa0
A
1957}
1958
0c530ab8
A
1959
1960void act_thread_cfree(__unused void *ctx)
0b4e3aa0 1961{
0c530ab8 1962 /* XXX - Unused */
0b4e3aa0 1963}
b0d623f7
A
1964
1965/*
1966 * Duplicate one x86_debug_state32_t to another. "all" parameter
1967 * chooses whether dr4 and dr5 are copied (they are never meant
1968 * to be installed when we do machine_task_set_state() or
1969 * machine_thread_set_state()).
1970 */
1971void
1972copy_debug_state32(
1973 x86_debug_state32_t *src,
1974 x86_debug_state32_t *target,
1975 boolean_t all)
1976{
1977 if (all) {
1978 target->dr4 = src->dr4;
1979 target->dr5 = src->dr5;
1980 }
1981
1982 target->dr0 = src->dr0;
1983 target->dr1 = src->dr1;
1984 target->dr2 = src->dr2;
1985 target->dr3 = src->dr3;
1986 target->dr6 = src->dr6;
1987 target->dr7 = src->dr7;
1988}
1989
1990/*
1991 * Duplicate one x86_debug_state64_t to another. "all" parameter
1992 * chooses whether dr4 and dr5 are copied (they are never meant
1993 * to be installed when we do machine_task_set_state() or
1994 * machine_thread_set_state()).
1995 */
1996void
1997copy_debug_state64(
1998 x86_debug_state64_t *src,
1999 x86_debug_state64_t *target,
2000 boolean_t all)
2001{
2002 if (all) {
2003 target->dr4 = src->dr4;
2004 target->dr5 = src->dr5;
2005 }
2006
2007 target->dr0 = src->dr0;
2008 target->dr1 = src->dr1;
2009 target->dr2 = src->dr2;
2010 target->dr3 = src->dr3;
2011 target->dr6 = src->dr6;
2012 target->dr7 = src->dr7;
2013}