]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/pcb.c
xnu-1699.22.73.tar.gz
[apple/xnu.git] / osfmk / i386 / pcb.c
1 /*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 #include <mach_rt.h>
58 #include <mach_debug.h>
59 #include <mach_ldebug.h>
60
61 #include <sys/kdebug.h>
62
63 #include <mach/kern_return.h>
64 #include <mach/thread_status.h>
65 #include <mach/vm_param.h>
66
67 #include <kern/counters.h>
68 #include <kern/kalloc.h>
69 #include <kern/mach_param.h>
70 #include <kern/processor.h>
71 #include <kern/cpu_data.h>
72 #include <kern/cpu_number.h>
73 #include <kern/task.h>
74 #include <kern/thread.h>
75 #include <kern/sched_prim.h>
76 #include <kern/misc_protos.h>
77 #include <kern/assert.h>
78 #include <kern/spl.h>
79 #include <kern/machine.h>
80 #include <ipc/ipc_port.h>
81 #include <vm/vm_kern.h>
82 #include <vm/vm_map.h>
83 #include <vm/pmap.h>
84 #include <vm/vm_protos.h>
85
86 #include <i386/cpu_data.h>
87 #include <i386/cpu_number.h>
88 #include <i386/eflags.h>
89 #include <i386/proc_reg.h>
90 #include <i386/fpu.h>
91 #include <i386/misc_protos.h>
92 #include <i386/mp_desc.h>
93 #include <i386/thread.h>
94 #if defined(__i386__)
95 #include <i386/fpu.h>
96 #endif
97 #include <i386/machine_routines.h>
98 #include <i386/lapic.h> /* LAPIC_PMC_SWI_VECTOR */
99
100 #if CONFIG_COUNTERS
101 #include <pmc/pmc.h>
102 #endif /* CONFIG_COUNTERS */
103
104 /*
105 * Maps state flavor to number of words in the state:
106 */
107 unsigned int _MachineStateCount[] = {
108 /* FLAVOR_LIST */
109 0,
110 x86_THREAD_STATE32_COUNT,
111 x86_FLOAT_STATE32_COUNT,
112 x86_EXCEPTION_STATE32_COUNT,
113 x86_THREAD_STATE64_COUNT,
114 x86_FLOAT_STATE64_COUNT,
115 x86_EXCEPTION_STATE64_COUNT,
116 x86_THREAD_STATE_COUNT,
117 x86_FLOAT_STATE_COUNT,
118 x86_EXCEPTION_STATE_COUNT,
119 0,
120 x86_SAVED_STATE32_COUNT,
121 x86_SAVED_STATE64_COUNT,
122 x86_DEBUG_STATE32_COUNT,
123 x86_DEBUG_STATE64_COUNT,
124 x86_DEBUG_STATE_COUNT
125 };
126
127 zone_t iss_zone; /* zone for saved_state area */
128 zone_t ids_zone; /* zone for debug_state area */
129
130 /* Forward */
131
132 extern void Thread_continue(void);
133 extern void Load_context(
134 thread_t thread);
135
136 static void
137 get_exception_state32(thread_t thread, x86_exception_state32_t *es);
138
139 static void
140 get_exception_state64(thread_t thread, x86_exception_state64_t *es);
141
142 static void
143 get_thread_state32(thread_t thread, x86_thread_state32_t *ts);
144
145 static void
146 get_thread_state64(thread_t thread, x86_thread_state64_t *ts);
147
148 static int
149 set_thread_state32(thread_t thread, x86_thread_state32_t *ts);
150
151 static int
152 set_thread_state64(thread_t thread, x86_thread_state64_t *ts);
153
154 #if CONFIG_COUNTERS
155 static inline void
156 machine_pmc_cswitch(thread_t /* old */, thread_t /* new */);
157
158 static inline boolean_t
159 machine_thread_pmc_eligible(thread_t);
160
161 static inline void
162 pmc_swi(thread_t /* old */, thread_t /*new */);
163
164 static inline boolean_t
165 machine_thread_pmc_eligible(thread_t t) {
166 /*
167 * NOTE: Task-level reservations are propagated to child threads via
168 * thread_create_internal. Any mutation of task reservations forces a
169 * recalculate of t_chud (for the pmc flag) for all threads in that task.
170 * Consequently, we can simply check the current thread's flag against
171 * THREAD_PMC_FLAG. If the result is non-zero, we SWI for a PMC switch.
172 */
173 return (t != NULL) ? ((t->t_chud & THREAD_PMC_FLAG) ? TRUE : FALSE) : FALSE;
174 }
175
176 static inline void
177 pmc_swi(thread_t old, thread_t new) {
178 current_cpu_datap()->csw_old_thread = old;
179 current_cpu_datap()->csw_new_thread = new;
180 pal_pmc_swi();
181 }
182
183 static inline void
184 machine_pmc_cswitch(thread_t old, thread_t new) {
185 if (machine_thread_pmc_eligible(old) || machine_thread_pmc_eligible(new)) {
186 pmc_swi(old, new);
187 }
188 }
189
190 void ml_get_csw_threads(thread_t *old, thread_t *new) {
191 *old = current_cpu_datap()->csw_old_thread;
192 *new = current_cpu_datap()->csw_new_thread;
193 }
194
195 #endif /* CONFIG_COUNTERS */
196
197 /*
198 * Don't let an illegal value for dr7 get set. Specifically,
199 * check for undefined settings. Setting these bit patterns
200 * result in undefined behaviour and can lead to an unexpected
201 * TRCTRAP.
202 */
203 static boolean_t
204 dr7_is_valid(uint32_t *dr7)
205 {
206 int i;
207 uint32_t mask1, mask2;
208
209 /*
210 * If the DE bit is set in CR4, R/W0-3 can be pattern
211 * "10B" to indicate i/o reads and write
212 */
213 if (!(get_cr4() & CR4_DE))
214 for (i = 0, mask1 = 0x3<<16, mask2 = 0x2<<16; i < 4;
215 i++, mask1 <<= 4, mask2 <<= 4)
216 if ((*dr7 & mask1) == mask2)
217 return (FALSE);
218
219 /*
220 * len0-3 pattern "10B" is ok for len on Merom and newer processors
221 * (it signifies an 8-byte wide region). We use the 64bit capability
222 * of the processor in lieu of the more laborious model/family checks
223 * as all 64-bit capable processors so far support this.
224 * Reject an attempt to use this on 64-bit incapable processors.
225 */
226 if (current_cpu_datap()->cpu_is64bit == FALSE)
227 for (i = 0, mask1 = 0x3<<18, mask2 = 0x2<<18; i < 4;
228 i++, mask1 <<= 4, mask2 <<= 4)
229 if ((*dr7 & mask1) == mask2)
230 return (FALSE);
231
232 /*
233 * if we are doing an instruction execution break (indicated
234 * by r/w[x] being "00B"), then the len[x] must also be set
235 * to "00B"
236 */
237 for (i = 0; i < 4; i++)
238 if (((((*dr7 >> (16 + i*4))) & 0x3) == 0) &&
239 ((((*dr7 >> (18 + i*4))) & 0x3) != 0))
240 return (FALSE);
241
242 /*
243 * Intel docs have these bits fixed.
244 */
245 *dr7 |= 0x1 << 10; /* set bit 10 to 1 */
246 *dr7 &= ~(0x1 << 11); /* set bit 11 to 0 */
247 *dr7 &= ~(0x1 << 12); /* set bit 12 to 0 */
248 *dr7 &= ~(0x1 << 14); /* set bit 14 to 0 */
249 *dr7 &= ~(0x1 << 15); /* set bit 15 to 0 */
250
251 /*
252 * We don't allow anything to set the global breakpoints.
253 */
254
255 if (*dr7 & 0x2)
256 return (FALSE);
257
258 if (*dr7 & (0x2<<2))
259 return (FALSE);
260
261 if (*dr7 & (0x2<<4))
262 return (FALSE);
263
264 if (*dr7 & (0x2<<6))
265 return (FALSE);
266
267 return (TRUE);
268 }
269
270 static inline void
271 set_live_debug_state32(cpu_data_t *cdp, x86_debug_state32_t *ds)
272 {
273 __asm__ volatile ("movl %0,%%db0" : :"r" (ds->dr0));
274 __asm__ volatile ("movl %0,%%db1" : :"r" (ds->dr1));
275 __asm__ volatile ("movl %0,%%db2" : :"r" (ds->dr2));
276 __asm__ volatile ("movl %0,%%db3" : :"r" (ds->dr3));
277 if (cpu_mode_is64bit())
278 cdp->cpu_dr7 = ds->dr7;
279 }
280
281 extern void set_64bit_debug_regs(x86_debug_state64_t *ds);
282
283 static inline void
284 set_live_debug_state64(cpu_data_t *cdp, x86_debug_state64_t *ds)
285 {
286 /*
287 * We need to enter 64-bit mode in order to set the full
288 * width of these registers
289 */
290 set_64bit_debug_regs(ds);
291 cdp->cpu_dr7 = ds->dr7;
292 }
293
294 boolean_t
295 debug_state_is_valid32(x86_debug_state32_t *ds)
296 {
297 if (!dr7_is_valid(&ds->dr7))
298 return FALSE;
299
300 #if defined(__i386__)
301 /*
302 * Only allow local breakpoints and make sure they are not
303 * in the trampoline code.
304 */
305 if (ds->dr7 & 0x1)
306 if (ds->dr0 >= (unsigned long)HIGH_MEM_BASE)
307 return FALSE;
308
309 if (ds->dr7 & (0x1<<2))
310 if (ds->dr1 >= (unsigned long)HIGH_MEM_BASE)
311 return FALSE;
312
313 if (ds->dr7 & (0x1<<4))
314 if (ds->dr2 >= (unsigned long)HIGH_MEM_BASE)
315 return FALSE;
316
317 if (ds->dr7 & (0x1<<6))
318 if (ds->dr3 >= (unsigned long)HIGH_MEM_BASE)
319 return FALSE;
320 #endif
321
322 return TRUE;
323 }
324
325 boolean_t
326 debug_state_is_valid64(x86_debug_state64_t *ds)
327 {
328 if (!dr7_is_valid((uint32_t *)&ds->dr7))
329 return FALSE;
330
331 /*
332 * Don't allow the user to set debug addresses above their max
333 * value
334 */
335 if (ds->dr7 & 0x1)
336 if (ds->dr0 >= VM_MAX_PAGE_ADDRESS)
337 return FALSE;
338
339 if (ds->dr7 & (0x1<<2))
340 if (ds->dr1 >= VM_MAX_PAGE_ADDRESS)
341 return FALSE;
342
343 if (ds->dr7 & (0x1<<4))
344 if (ds->dr2 >= VM_MAX_PAGE_ADDRESS)
345 return FALSE;
346
347 if (ds->dr7 & (0x1<<6))
348 if (ds->dr3 >= VM_MAX_PAGE_ADDRESS)
349 return FALSE;
350
351 return TRUE;
352 }
353
354
355 static kern_return_t
356 set_debug_state32(thread_t thread, x86_debug_state32_t *ds)
357 {
358 x86_debug_state32_t *ids;
359 pcb_t pcb;
360
361 pcb = THREAD_TO_PCB(thread);
362 ids = pcb->ids;
363
364 if (debug_state_is_valid32(ds) != TRUE) {
365 return KERN_INVALID_ARGUMENT;
366 }
367
368 if (ids == NULL) {
369 ids = zalloc(ids_zone);
370 bzero(ids, sizeof *ids);
371
372 simple_lock(&pcb->lock);
373 /* make sure it wasn't already alloc()'d elsewhere */
374 if (pcb->ids == NULL) {
375 pcb->ids = ids;
376 simple_unlock(&pcb->lock);
377 } else {
378 simple_unlock(&pcb->lock);
379 zfree(ids_zone, ids);
380 }
381 }
382
383
384 copy_debug_state32(ds, ids, FALSE);
385
386 return (KERN_SUCCESS);
387 }
388
389 static kern_return_t
390 set_debug_state64(thread_t thread, x86_debug_state64_t *ds)
391 {
392 x86_debug_state64_t *ids;
393 pcb_t pcb;
394
395 pcb = THREAD_TO_PCB(thread);
396 ids = pcb->ids;
397
398 if (debug_state_is_valid64(ds) != TRUE) {
399 return KERN_INVALID_ARGUMENT;
400 }
401
402 if (ids == NULL) {
403 ids = zalloc(ids_zone);
404 bzero(ids, sizeof *ids);
405
406 simple_lock(&pcb->lock);
407 /* make sure it wasn't already alloc()'d elsewhere */
408 if (pcb->ids == NULL) {
409 pcb->ids = ids;
410 simple_unlock(&pcb->lock);
411 } else {
412 simple_unlock(&pcb->lock);
413 zfree(ids_zone, ids);
414 }
415 }
416
417 copy_debug_state64(ds, ids, FALSE);
418
419 return (KERN_SUCCESS);
420 }
421
422 static void
423 get_debug_state32(thread_t thread, x86_debug_state32_t *ds)
424 {
425 x86_debug_state32_t *saved_state;
426
427 saved_state = thread->machine.ids;
428
429 if (saved_state) {
430 copy_debug_state32(saved_state, ds, TRUE);
431 } else
432 bzero(ds, sizeof *ds);
433 }
434
435 static void
436 get_debug_state64(thread_t thread, x86_debug_state64_t *ds)
437 {
438 x86_debug_state64_t *saved_state;
439
440 saved_state = (x86_debug_state64_t *)thread->machine.ids;
441
442 if (saved_state) {
443 copy_debug_state64(saved_state, ds, TRUE);
444 } else
445 bzero(ds, sizeof *ds);
446 }
447
448 /*
449 * consider_machine_collect:
450 *
451 * Try to collect machine-dependent pages
452 */
453 void
454 consider_machine_collect(void)
455 {
456 }
457
458 void
459 consider_machine_adjust(void)
460 {
461 }
462
463 /*
464 * Switch to the first thread on a CPU.
465 */
466 void
467 machine_load_context(
468 thread_t new)
469 {
470 #if CONFIG_COUNTERS
471 machine_pmc_cswitch(NULL, new);
472 #endif
473 new->machine.specFlags |= OnProc;
474 act_machine_switch_pcb(NULL, new);
475 Load_context(new);
476 }
477
478 /*
479 * Switch to a new thread.
480 * Save the old thread`s kernel state or continuation,
481 * and return it.
482 */
483 thread_t
484 machine_switch_context(
485 thread_t old,
486 thread_continue_t continuation,
487 thread_t new)
488 {
489 #if MACH_RT
490 assert(current_cpu_datap()->cpu_active_stack == old->kernel_stack);
491 #endif
492 #if CONFIG_COUNTERS
493 machine_pmc_cswitch(old, new);
494 #endif
495 /*
496 * Save FP registers if in use.
497 */
498 fpu_save_context(old);
499
500 old->machine.specFlags &= ~OnProc;
501 new->machine.specFlags |= OnProc;
502
503 /*
504 * Monitor the stack depth and report new max,
505 * not worrying about races.
506 */
507 vm_offset_t depth = current_stack_depth();
508 if (depth > kernel_stack_depth_max) {
509 kernel_stack_depth_max = depth;
510 KERNEL_DEBUG_CONSTANT(
511 MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DEPTH),
512 (long) depth, 0, 0, 0, 0);
513 }
514
515 /*
516 * Switch address maps if need be, even if not switching tasks.
517 * (A server activation may be "borrowing" a client map.)
518 */
519 PMAP_SWITCH_CONTEXT(old, new, cpu_number());
520
521 /*
522 * Load the rest of the user state for the new thread
523 */
524 act_machine_switch_pcb(old, new);
525
526 return(Switch_context(old, continuation, new));
527 }
528
529 thread_t
530 machine_processor_shutdown(
531 thread_t thread,
532 void (*doshutdown)(processor_t),
533 processor_t processor)
534 {
535 #if CONFIG_VMX
536 vmx_suspend();
537 #endif
538 fpu_save_context(thread);
539 PMAP_SWITCH_CONTEXT(thread, processor->idle_thread, cpu_number());
540 return(Shutdown_context(thread, doshutdown, processor));
541 }
542
543
544 /*
545 * This is where registers that are not normally specified by the mach-o
546 * file on an execve would be nullified, perhaps to avoid a covert channel.
547 */
548 kern_return_t
549 machine_thread_state_initialize(
550 thread_t thread)
551 {
552 /*
553 * If there's an fpu save area, free it.
554 * The initialized state will then be lazily faulted-in, if required.
555 * And if we're target, re-arm the no-fpu trap.
556 */
557 if (thread->machine.ifps) {
558 (void) fpu_set_fxstate(thread, NULL, x86_FLOAT_STATE64);
559
560 if (thread == current_thread())
561 clear_fpu();
562 }
563
564 if (thread->machine.ids) {
565 zfree(ids_zone, thread->machine.ids);
566 thread->machine.ids = NULL;
567 }
568
569 return KERN_SUCCESS;
570 }
571
572 uint32_t
573 get_eflags_exportmask(void)
574 {
575 return EFL_USER_SET;
576 }
577
578 /*
579 * x86_SAVED_STATE32 - internal save/restore general register state on 32/64 bit processors
580 * for 32bit tasks only
581 * x86_SAVED_STATE64 - internal save/restore general register state on 64 bit processors
582 * for 64bit tasks only
583 * x86_THREAD_STATE32 - external set/get general register state on 32/64 bit processors
584 * for 32bit tasks only
585 * x86_THREAD_STATE64 - external set/get general register state on 64 bit processors
586 * for 64bit tasks only
587 * x86_SAVED_STATE - external set/get general register state on 32/64 bit processors
588 * for either 32bit or 64bit tasks
589 * x86_FLOAT_STATE32 - internal/external save/restore float and xmm state on 32/64 bit processors
590 * for 32bit tasks only
591 * x86_FLOAT_STATE64 - internal/external save/restore float and xmm state on 64 bit processors
592 * for 64bit tasks only
593 * x86_FLOAT_STATE - external save/restore float and xmm state on 32/64 bit processors
594 * for either 32bit or 64bit tasks
595 * x86_EXCEPTION_STATE32 - external get exception state on 32/64 bit processors
596 * for 32bit tasks only
597 * x86_EXCEPTION_STATE64 - external get exception state on 64 bit processors
598 * for 64bit tasks only
599 * x86_EXCEPTION_STATE - external get exception state on 323/64 bit processors
600 * for either 32bit or 64bit tasks
601 */
602
603
604 static void
605 get_exception_state64(thread_t thread, x86_exception_state64_t *es)
606 {
607 x86_saved_state64_t *saved_state;
608
609 saved_state = USER_REGS64(thread);
610
611 es->trapno = saved_state->isf.trapno;
612 es->cpu = saved_state->isf.cpu;
613 es->err = (typeof(es->err))saved_state->isf.err;
614 es->faultvaddr = saved_state->cr2;
615 }
616
617 static void
618 get_exception_state32(thread_t thread, x86_exception_state32_t *es)
619 {
620 x86_saved_state32_t *saved_state;
621
622 saved_state = USER_REGS32(thread);
623
624 es->trapno = saved_state->trapno;
625 es->cpu = saved_state->cpu;
626 es->err = saved_state->err;
627 es->faultvaddr = saved_state->cr2;
628 }
629
630
631 static int
632 set_thread_state32(thread_t thread, x86_thread_state32_t *ts)
633 {
634 x86_saved_state32_t *saved_state;
635
636 pal_register_cache_state(thread, DIRTY);
637
638 saved_state = USER_REGS32(thread);
639
640 /*
641 * Scrub segment selector values:
642 */
643 ts->cs = USER_CS;
644 #ifdef __i386__
645 if (ts->ss == 0) ts->ss = USER_DS;
646 if (ts->ds == 0) ts->ds = USER_DS;
647 if (ts->es == 0) ts->es = USER_DS;
648 #else /* __x86_64__ */
649 /*
650 * On a 64 bit kernel, we always override the data segments,
651 * as the actual selector numbers have changed. This also
652 * means that we don't support setting the data segments
653 * manually any more.
654 */
655 ts->ss = USER_DS;
656 ts->ds = USER_DS;
657 ts->es = USER_DS;
658 #endif
659
660 /* Check segment selectors are safe */
661 if (!valid_user_segment_selectors(ts->cs,
662 ts->ss,
663 ts->ds,
664 ts->es,
665 ts->fs,
666 ts->gs))
667 return(KERN_INVALID_ARGUMENT);
668
669 saved_state->eax = ts->eax;
670 saved_state->ebx = ts->ebx;
671 saved_state->ecx = ts->ecx;
672 saved_state->edx = ts->edx;
673 saved_state->edi = ts->edi;
674 saved_state->esi = ts->esi;
675 saved_state->ebp = ts->ebp;
676 saved_state->uesp = ts->esp;
677 saved_state->efl = (ts->eflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
678 saved_state->eip = ts->eip;
679 saved_state->cs = ts->cs;
680 saved_state->ss = ts->ss;
681 saved_state->ds = ts->ds;
682 saved_state->es = ts->es;
683 saved_state->fs = ts->fs;
684 saved_state->gs = ts->gs;
685
686 /*
687 * If the trace trap bit is being set,
688 * ensure that the user returns via iret
689 * - which is signaled thusly:
690 */
691 if ((saved_state->efl & EFL_TF) && saved_state->cs == SYSENTER_CS)
692 saved_state->cs = SYSENTER_TF_CS;
693
694 return(KERN_SUCCESS);
695 }
696
697 static int
698 set_thread_state64(thread_t thread, x86_thread_state64_t *ts)
699 {
700 x86_saved_state64_t *saved_state;
701
702 pal_register_cache_state(thread, DIRTY);
703
704 saved_state = USER_REGS64(thread);
705
706 if (!IS_USERADDR64_CANONICAL(ts->rsp) ||
707 !IS_USERADDR64_CANONICAL(ts->rip))
708 return(KERN_INVALID_ARGUMENT);
709
710 saved_state->r8 = ts->r8;
711 saved_state->r9 = ts->r9;
712 saved_state->r10 = ts->r10;
713 saved_state->r11 = ts->r11;
714 saved_state->r12 = ts->r12;
715 saved_state->r13 = ts->r13;
716 saved_state->r14 = ts->r14;
717 saved_state->r15 = ts->r15;
718 saved_state->rax = ts->rax;
719 saved_state->rbx = ts->rbx;
720 saved_state->rcx = ts->rcx;
721 saved_state->rdx = ts->rdx;
722 saved_state->rdi = ts->rdi;
723 saved_state->rsi = ts->rsi;
724 saved_state->rbp = ts->rbp;
725 saved_state->isf.rsp = ts->rsp;
726 saved_state->isf.rflags = (ts->rflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
727 saved_state->isf.rip = ts->rip;
728 saved_state->isf.cs = USER64_CS;
729 saved_state->fs = (uint32_t)ts->fs;
730 saved_state->gs = (uint32_t)ts->gs;
731
732 return(KERN_SUCCESS);
733 }
734
735
736
737 static void
738 get_thread_state32(thread_t thread, x86_thread_state32_t *ts)
739 {
740 x86_saved_state32_t *saved_state;
741
742 pal_register_cache_state(thread, VALID);
743
744 saved_state = USER_REGS32(thread);
745
746 ts->eax = saved_state->eax;
747 ts->ebx = saved_state->ebx;
748 ts->ecx = saved_state->ecx;
749 ts->edx = saved_state->edx;
750 ts->edi = saved_state->edi;
751 ts->esi = saved_state->esi;
752 ts->ebp = saved_state->ebp;
753 ts->esp = saved_state->uesp;
754 ts->eflags = saved_state->efl;
755 ts->eip = saved_state->eip;
756 ts->cs = saved_state->cs;
757 ts->ss = saved_state->ss;
758 ts->ds = saved_state->ds;
759 ts->es = saved_state->es;
760 ts->fs = saved_state->fs;
761 ts->gs = saved_state->gs;
762 }
763
764
765 static void
766 get_thread_state64(thread_t thread, x86_thread_state64_t *ts)
767 {
768 x86_saved_state64_t *saved_state;
769
770 pal_register_cache_state(thread, VALID);
771
772 saved_state = USER_REGS64(thread);
773
774 ts->r8 = saved_state->r8;
775 ts->r9 = saved_state->r9;
776 ts->r10 = saved_state->r10;
777 ts->r11 = saved_state->r11;
778 ts->r12 = saved_state->r12;
779 ts->r13 = saved_state->r13;
780 ts->r14 = saved_state->r14;
781 ts->r15 = saved_state->r15;
782 ts->rax = saved_state->rax;
783 ts->rbx = saved_state->rbx;
784 ts->rcx = saved_state->rcx;
785 ts->rdx = saved_state->rdx;
786 ts->rdi = saved_state->rdi;
787 ts->rsi = saved_state->rsi;
788 ts->rbp = saved_state->rbp;
789 ts->rsp = saved_state->isf.rsp;
790 ts->rflags = saved_state->isf.rflags;
791 ts->rip = saved_state->isf.rip;
792 ts->cs = saved_state->isf.cs;
793 ts->fs = saved_state->fs;
794 ts->gs = saved_state->gs;
795 }
796
797
798 /*
799 * act_machine_set_state:
800 *
801 * Set the status of the specified thread.
802 */
803
804 kern_return_t
805 machine_thread_set_state(
806 thread_t thr_act,
807 thread_flavor_t flavor,
808 thread_state_t tstate,
809 mach_msg_type_number_t count)
810 {
811 switch (flavor) {
812 case x86_SAVED_STATE32:
813 {
814 x86_saved_state32_t *state;
815 x86_saved_state32_t *saved_state;
816
817 if (count < x86_SAVED_STATE32_COUNT)
818 return(KERN_INVALID_ARGUMENT);
819
820 if (thread_is_64bit(thr_act))
821 return(KERN_INVALID_ARGUMENT);
822
823 state = (x86_saved_state32_t *) tstate;
824
825 /* Check segment selectors are safe */
826 if (!valid_user_segment_selectors(state->cs,
827 state->ss,
828 state->ds,
829 state->es,
830 state->fs,
831 state->gs))
832 return KERN_INVALID_ARGUMENT;
833
834 pal_register_cache_state(thr_act, DIRTY);
835
836 saved_state = USER_REGS32(thr_act);
837
838 /*
839 * General registers
840 */
841 saved_state->edi = state->edi;
842 saved_state->esi = state->esi;
843 saved_state->ebp = state->ebp;
844 saved_state->uesp = state->uesp;
845 saved_state->ebx = state->ebx;
846 saved_state->edx = state->edx;
847 saved_state->ecx = state->ecx;
848 saved_state->eax = state->eax;
849 saved_state->eip = state->eip;
850
851 saved_state->efl = (state->efl & ~EFL_USER_CLEAR) | EFL_USER_SET;
852
853 /*
854 * If the trace trap bit is being set,
855 * ensure that the user returns via iret
856 * - which is signaled thusly:
857 */
858 if ((saved_state->efl & EFL_TF) && state->cs == SYSENTER_CS)
859 state->cs = SYSENTER_TF_CS;
860
861 /*
862 * User setting segment registers.
863 * Code and stack selectors have already been
864 * checked. Others will be reset by 'iret'
865 * if they are not valid.
866 */
867 saved_state->cs = state->cs;
868 saved_state->ss = state->ss;
869 saved_state->ds = state->ds;
870 saved_state->es = state->es;
871 saved_state->fs = state->fs;
872 saved_state->gs = state->gs;
873
874 break;
875 }
876
877 case x86_SAVED_STATE64:
878 {
879 x86_saved_state64_t *state;
880 x86_saved_state64_t *saved_state;
881
882 if (count < x86_SAVED_STATE64_COUNT)
883 return(KERN_INVALID_ARGUMENT);
884
885 if (!thread_is_64bit(thr_act))
886 return(KERN_INVALID_ARGUMENT);
887
888 state = (x86_saved_state64_t *) tstate;
889
890 /* Verify that the supplied code segment selector is
891 * valid. In 64-bit mode, the FS and GS segment overrides
892 * use the FS.base and GS.base MSRs to calculate
893 * base addresses, and the trampolines don't directly
894 * restore the segment registers--hence they are no
895 * longer relevant for validation.
896 */
897 if (!valid_user_code_selector(state->isf.cs))
898 return KERN_INVALID_ARGUMENT;
899
900 /* Check pc and stack are canonical addresses */
901 if (!IS_USERADDR64_CANONICAL(state->isf.rsp) ||
902 !IS_USERADDR64_CANONICAL(state->isf.rip))
903 return KERN_INVALID_ARGUMENT;
904
905 pal_register_cache_state(thr_act, DIRTY);
906
907 saved_state = USER_REGS64(thr_act);
908
909 /*
910 * General registers
911 */
912 saved_state->r8 = state->r8;
913 saved_state->r9 = state->r9;
914 saved_state->r10 = state->r10;
915 saved_state->r11 = state->r11;
916 saved_state->r12 = state->r12;
917 saved_state->r13 = state->r13;
918 saved_state->r14 = state->r14;
919 saved_state->r15 = state->r15;
920 saved_state->rdi = state->rdi;
921 saved_state->rsi = state->rsi;
922 saved_state->rbp = state->rbp;
923 saved_state->rbx = state->rbx;
924 saved_state->rdx = state->rdx;
925 saved_state->rcx = state->rcx;
926 saved_state->rax = state->rax;
927 saved_state->isf.rsp = state->isf.rsp;
928 saved_state->isf.rip = state->isf.rip;
929
930 saved_state->isf.rflags = (state->isf.rflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
931
932 /*
933 * User setting segment registers.
934 * Code and stack selectors have already been
935 * checked. Others will be reset by 'sys'
936 * if they are not valid.
937 */
938 saved_state->isf.cs = state->isf.cs;
939 saved_state->isf.ss = state->isf.ss;
940 saved_state->fs = state->fs;
941 saved_state->gs = state->gs;
942
943 break;
944 }
945
946 case x86_FLOAT_STATE32:
947 {
948 if (count != x86_FLOAT_STATE32_COUNT)
949 return(KERN_INVALID_ARGUMENT);
950
951 if (thread_is_64bit(thr_act))
952 return(KERN_INVALID_ARGUMENT);
953
954 return fpu_set_fxstate(thr_act, tstate, flavor);
955 }
956
957 case x86_FLOAT_STATE64:
958 {
959 if (count != x86_FLOAT_STATE64_COUNT)
960 return(KERN_INVALID_ARGUMENT);
961
962 if ( !thread_is_64bit(thr_act))
963 return(KERN_INVALID_ARGUMENT);
964
965 return fpu_set_fxstate(thr_act, tstate, flavor);
966 }
967
968 case x86_FLOAT_STATE:
969 {
970 x86_float_state_t *state;
971
972 if (count != x86_FLOAT_STATE_COUNT)
973 return(KERN_INVALID_ARGUMENT);
974
975 state = (x86_float_state_t *)tstate;
976 if (state->fsh.flavor == x86_FLOAT_STATE64 && state->fsh.count == x86_FLOAT_STATE64_COUNT &&
977 thread_is_64bit(thr_act)) {
978 return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs64, x86_FLOAT_STATE64);
979 }
980 if (state->fsh.flavor == x86_FLOAT_STATE32 && state->fsh.count == x86_FLOAT_STATE32_COUNT &&
981 !thread_is_64bit(thr_act)) {
982 return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs32, x86_FLOAT_STATE32);
983 }
984 return(KERN_INVALID_ARGUMENT);
985 }
986
987 case x86_AVX_STATE32:
988 {
989 if (count != x86_AVX_STATE32_COUNT)
990 return(KERN_INVALID_ARGUMENT);
991
992 if (thread_is_64bit(thr_act))
993 return(KERN_INVALID_ARGUMENT);
994
995 return fpu_set_fxstate(thr_act, tstate, flavor);
996 }
997
998 case x86_AVX_STATE64:
999 {
1000 if (count != x86_AVX_STATE64_COUNT)
1001 return(KERN_INVALID_ARGUMENT);
1002
1003 if (!thread_is_64bit(thr_act))
1004 return(KERN_INVALID_ARGUMENT);
1005
1006 return fpu_set_fxstate(thr_act, tstate, flavor);
1007 }
1008
1009 case x86_THREAD_STATE32:
1010 {
1011 if (count != x86_THREAD_STATE32_COUNT)
1012 return(KERN_INVALID_ARGUMENT);
1013
1014 if (thread_is_64bit(thr_act))
1015 return(KERN_INVALID_ARGUMENT);
1016
1017 return set_thread_state32(thr_act, (x86_thread_state32_t *)tstate);
1018 }
1019
1020 case x86_THREAD_STATE64:
1021 {
1022 if (count != x86_THREAD_STATE64_COUNT)
1023 return(KERN_INVALID_ARGUMENT);
1024
1025 if (!thread_is_64bit(thr_act))
1026 return(KERN_INVALID_ARGUMENT);
1027
1028 return set_thread_state64(thr_act, (x86_thread_state64_t *)tstate);
1029
1030 }
1031 case x86_THREAD_STATE:
1032 {
1033 x86_thread_state_t *state;
1034
1035 if (count != x86_THREAD_STATE_COUNT)
1036 return(KERN_INVALID_ARGUMENT);
1037
1038 state = (x86_thread_state_t *)tstate;
1039
1040 if (state->tsh.flavor == x86_THREAD_STATE64 &&
1041 state->tsh.count == x86_THREAD_STATE64_COUNT &&
1042 thread_is_64bit(thr_act)) {
1043 return set_thread_state64(thr_act, &state->uts.ts64);
1044 } else if (state->tsh.flavor == x86_THREAD_STATE32 &&
1045 state->tsh.count == x86_THREAD_STATE32_COUNT &&
1046 !thread_is_64bit(thr_act)) {
1047 return set_thread_state32(thr_act, &state->uts.ts32);
1048 } else
1049 return(KERN_INVALID_ARGUMENT);
1050
1051 break;
1052 }
1053 case x86_DEBUG_STATE32:
1054 {
1055 x86_debug_state32_t *state;
1056 kern_return_t ret;
1057
1058 if (thread_is_64bit(thr_act))
1059 return(KERN_INVALID_ARGUMENT);
1060
1061 state = (x86_debug_state32_t *)tstate;
1062
1063 ret = set_debug_state32(thr_act, state);
1064
1065 return ret;
1066 }
1067 case x86_DEBUG_STATE64:
1068 {
1069 x86_debug_state64_t *state;
1070 kern_return_t ret;
1071
1072 if (!thread_is_64bit(thr_act))
1073 return(KERN_INVALID_ARGUMENT);
1074
1075 state = (x86_debug_state64_t *)tstate;
1076
1077 ret = set_debug_state64(thr_act, state);
1078
1079 return ret;
1080 }
1081 case x86_DEBUG_STATE:
1082 {
1083 x86_debug_state_t *state;
1084 kern_return_t ret = KERN_INVALID_ARGUMENT;
1085
1086 if (count != x86_DEBUG_STATE_COUNT)
1087 return (KERN_INVALID_ARGUMENT);
1088
1089 state = (x86_debug_state_t *)tstate;
1090 if (state->dsh.flavor == x86_DEBUG_STATE64 &&
1091 state->dsh.count == x86_DEBUG_STATE64_COUNT &&
1092 thread_is_64bit(thr_act)) {
1093 ret = set_debug_state64(thr_act, &state->uds.ds64);
1094 }
1095 else
1096 if (state->dsh.flavor == x86_DEBUG_STATE32 &&
1097 state->dsh.count == x86_DEBUG_STATE32_COUNT &&
1098 !thread_is_64bit(thr_act)) {
1099 ret = set_debug_state32(thr_act, &state->uds.ds32);
1100 }
1101 return ret;
1102 }
1103 default:
1104 return(KERN_INVALID_ARGUMENT);
1105 }
1106
1107 return(KERN_SUCCESS);
1108 }
1109
1110
1111
1112 /*
1113 * thread_getstatus:
1114 *
1115 * Get the status of the specified thread.
1116 */
1117
1118 kern_return_t
1119 machine_thread_get_state(
1120 thread_t thr_act,
1121 thread_flavor_t flavor,
1122 thread_state_t tstate,
1123 mach_msg_type_number_t *count)
1124 {
1125
1126 switch (flavor) {
1127
1128 case THREAD_STATE_FLAVOR_LIST:
1129 {
1130 if (*count < 3)
1131 return (KERN_INVALID_ARGUMENT);
1132
1133 tstate[0] = i386_THREAD_STATE;
1134 tstate[1] = i386_FLOAT_STATE;
1135 tstate[2] = i386_EXCEPTION_STATE;
1136
1137 *count = 3;
1138 break;
1139 }
1140
1141 case THREAD_STATE_FLAVOR_LIST_NEW:
1142 {
1143 if (*count < 4)
1144 return (KERN_INVALID_ARGUMENT);
1145
1146 tstate[0] = x86_THREAD_STATE;
1147 tstate[1] = x86_FLOAT_STATE;
1148 tstate[2] = x86_EXCEPTION_STATE;
1149 tstate[3] = x86_DEBUG_STATE;
1150
1151 *count = 4;
1152 break;
1153 }
1154
1155 case x86_SAVED_STATE32:
1156 {
1157 x86_saved_state32_t *state;
1158 x86_saved_state32_t *saved_state;
1159
1160 if (*count < x86_SAVED_STATE32_COUNT)
1161 return(KERN_INVALID_ARGUMENT);
1162
1163 if (thread_is_64bit(thr_act))
1164 return(KERN_INVALID_ARGUMENT);
1165
1166 state = (x86_saved_state32_t *) tstate;
1167 saved_state = USER_REGS32(thr_act);
1168
1169 /*
1170 * First, copy everything:
1171 */
1172 *state = *saved_state;
1173 state->ds = saved_state->ds & 0xffff;
1174 state->es = saved_state->es & 0xffff;
1175 state->fs = saved_state->fs & 0xffff;
1176 state->gs = saved_state->gs & 0xffff;
1177
1178 *count = x86_SAVED_STATE32_COUNT;
1179 break;
1180 }
1181
1182 case x86_SAVED_STATE64:
1183 {
1184 x86_saved_state64_t *state;
1185 x86_saved_state64_t *saved_state;
1186
1187 if (*count < x86_SAVED_STATE64_COUNT)
1188 return(KERN_INVALID_ARGUMENT);
1189
1190 if (!thread_is_64bit(thr_act))
1191 return(KERN_INVALID_ARGUMENT);
1192
1193 state = (x86_saved_state64_t *)tstate;
1194 saved_state = USER_REGS64(thr_act);
1195
1196 /*
1197 * First, copy everything:
1198 */
1199 *state = *saved_state;
1200 state->fs = saved_state->fs & 0xffff;
1201 state->gs = saved_state->gs & 0xffff;
1202
1203 *count = x86_SAVED_STATE64_COUNT;
1204 break;
1205 }
1206
1207 case x86_FLOAT_STATE32:
1208 {
1209 if (*count < x86_FLOAT_STATE32_COUNT)
1210 return(KERN_INVALID_ARGUMENT);
1211
1212 if (thread_is_64bit(thr_act))
1213 return(KERN_INVALID_ARGUMENT);
1214
1215 *count = x86_FLOAT_STATE32_COUNT;
1216
1217 return fpu_get_fxstate(thr_act, tstate, flavor);
1218 }
1219
1220 case x86_FLOAT_STATE64:
1221 {
1222 if (*count < x86_FLOAT_STATE64_COUNT)
1223 return(KERN_INVALID_ARGUMENT);
1224
1225 if ( !thread_is_64bit(thr_act))
1226 return(KERN_INVALID_ARGUMENT);
1227
1228 *count = x86_FLOAT_STATE64_COUNT;
1229
1230 return fpu_get_fxstate(thr_act, tstate, flavor);
1231 }
1232
1233 case x86_FLOAT_STATE:
1234 {
1235 x86_float_state_t *state;
1236 kern_return_t kret;
1237
1238 if (*count < x86_FLOAT_STATE_COUNT)
1239 return(KERN_INVALID_ARGUMENT);
1240
1241 state = (x86_float_state_t *)tstate;
1242
1243 /*
1244 * no need to bzero... currently
1245 * x86_FLOAT_STATE64_COUNT == x86_FLOAT_STATE32_COUNT
1246 */
1247 if (thread_is_64bit(thr_act)) {
1248 state->fsh.flavor = x86_FLOAT_STATE64;
1249 state->fsh.count = x86_FLOAT_STATE64_COUNT;
1250
1251 kret = fpu_get_fxstate(thr_act, (thread_state_t)&state->ufs.fs64, x86_FLOAT_STATE64);
1252 } else {
1253 state->fsh.flavor = x86_FLOAT_STATE32;
1254 state->fsh.count = x86_FLOAT_STATE32_COUNT;
1255
1256 kret = fpu_get_fxstate(thr_act, (thread_state_t)&state->ufs.fs32, x86_FLOAT_STATE32);
1257 }
1258 *count = x86_FLOAT_STATE_COUNT;
1259
1260 return(kret);
1261 }
1262
1263 case x86_AVX_STATE32:
1264 {
1265 if (*count != x86_AVX_STATE32_COUNT)
1266 return(KERN_INVALID_ARGUMENT);
1267
1268 if (thread_is_64bit(thr_act))
1269 return(KERN_INVALID_ARGUMENT);
1270
1271 *count = x86_AVX_STATE32_COUNT;
1272
1273 return fpu_get_fxstate(thr_act, tstate, flavor);
1274 }
1275
1276 case x86_AVX_STATE64:
1277 {
1278 if (*count != x86_AVX_STATE64_COUNT)
1279 return(KERN_INVALID_ARGUMENT);
1280
1281 if ( !thread_is_64bit(thr_act))
1282 return(KERN_INVALID_ARGUMENT);
1283
1284 *count = x86_AVX_STATE64_COUNT;
1285
1286 return fpu_get_fxstate(thr_act, tstate, flavor);
1287 }
1288
1289 case x86_THREAD_STATE32:
1290 {
1291 if (*count < x86_THREAD_STATE32_COUNT)
1292 return(KERN_INVALID_ARGUMENT);
1293
1294 if (thread_is_64bit(thr_act))
1295 return(KERN_INVALID_ARGUMENT);
1296
1297 *count = x86_THREAD_STATE32_COUNT;
1298
1299 get_thread_state32(thr_act, (x86_thread_state32_t *)tstate);
1300 break;
1301 }
1302
1303 case x86_THREAD_STATE64:
1304 {
1305 if (*count < x86_THREAD_STATE64_COUNT)
1306 return(KERN_INVALID_ARGUMENT);
1307
1308 if ( !thread_is_64bit(thr_act))
1309 return(KERN_INVALID_ARGUMENT);
1310
1311 *count = x86_THREAD_STATE64_COUNT;
1312
1313 get_thread_state64(thr_act, (x86_thread_state64_t *)tstate);
1314 break;
1315 }
1316
1317 case x86_THREAD_STATE:
1318 {
1319 x86_thread_state_t *state;
1320
1321 if (*count < x86_THREAD_STATE_COUNT)
1322 return(KERN_INVALID_ARGUMENT);
1323
1324 state = (x86_thread_state_t *)tstate;
1325
1326 bzero((char *)state, sizeof(x86_thread_state_t));
1327
1328 if (thread_is_64bit(thr_act)) {
1329 state->tsh.flavor = x86_THREAD_STATE64;
1330 state->tsh.count = x86_THREAD_STATE64_COUNT;
1331
1332 get_thread_state64(thr_act, &state->uts.ts64);
1333 } else {
1334 state->tsh.flavor = x86_THREAD_STATE32;
1335 state->tsh.count = x86_THREAD_STATE32_COUNT;
1336
1337 get_thread_state32(thr_act, &state->uts.ts32);
1338 }
1339 *count = x86_THREAD_STATE_COUNT;
1340
1341 break;
1342 }
1343
1344
1345 case x86_EXCEPTION_STATE32:
1346 {
1347 if (*count < x86_EXCEPTION_STATE32_COUNT)
1348 return(KERN_INVALID_ARGUMENT);
1349
1350 if (thread_is_64bit(thr_act))
1351 return(KERN_INVALID_ARGUMENT);
1352
1353 *count = x86_EXCEPTION_STATE32_COUNT;
1354
1355 get_exception_state32(thr_act, (x86_exception_state32_t *)tstate);
1356 /*
1357 * Suppress the cpu number for binary compatibility
1358 * of this deprecated state.
1359 */
1360 ((x86_exception_state32_t *)tstate)->cpu = 0;
1361 break;
1362 }
1363
1364 case x86_EXCEPTION_STATE64:
1365 {
1366 if (*count < x86_EXCEPTION_STATE64_COUNT)
1367 return(KERN_INVALID_ARGUMENT);
1368
1369 if ( !thread_is_64bit(thr_act))
1370 return(KERN_INVALID_ARGUMENT);
1371
1372 *count = x86_EXCEPTION_STATE64_COUNT;
1373
1374 get_exception_state64(thr_act, (x86_exception_state64_t *)tstate);
1375 /*
1376 * Suppress the cpu number for binary compatibility
1377 * of this deprecated state.
1378 */
1379 ((x86_exception_state64_t *)tstate)->cpu = 0;
1380 break;
1381 }
1382
1383 case x86_EXCEPTION_STATE:
1384 {
1385 x86_exception_state_t *state;
1386
1387 if (*count < x86_EXCEPTION_STATE_COUNT)
1388 return(KERN_INVALID_ARGUMENT);
1389
1390 state = (x86_exception_state_t *)tstate;
1391
1392 bzero((char *)state, sizeof(x86_exception_state_t));
1393
1394 if (thread_is_64bit(thr_act)) {
1395 state->esh.flavor = x86_EXCEPTION_STATE64;
1396 state->esh.count = x86_EXCEPTION_STATE64_COUNT;
1397
1398 get_exception_state64(thr_act, &state->ues.es64);
1399 } else {
1400 state->esh.flavor = x86_EXCEPTION_STATE32;
1401 state->esh.count = x86_EXCEPTION_STATE32_COUNT;
1402
1403 get_exception_state32(thr_act, &state->ues.es32);
1404 }
1405 *count = x86_EXCEPTION_STATE_COUNT;
1406
1407 break;
1408 }
1409 case x86_DEBUG_STATE32:
1410 {
1411 if (*count < x86_DEBUG_STATE32_COUNT)
1412 return(KERN_INVALID_ARGUMENT);
1413
1414 if (thread_is_64bit(thr_act))
1415 return(KERN_INVALID_ARGUMENT);
1416
1417 get_debug_state32(thr_act, (x86_debug_state32_t *)tstate);
1418
1419 *count = x86_DEBUG_STATE32_COUNT;
1420
1421 break;
1422 }
1423 case x86_DEBUG_STATE64:
1424 {
1425 if (*count < x86_DEBUG_STATE64_COUNT)
1426 return(KERN_INVALID_ARGUMENT);
1427
1428 if (!thread_is_64bit(thr_act))
1429 return(KERN_INVALID_ARGUMENT);
1430
1431 get_debug_state64(thr_act, (x86_debug_state64_t *)tstate);
1432
1433 *count = x86_DEBUG_STATE64_COUNT;
1434
1435 break;
1436 }
1437 case x86_DEBUG_STATE:
1438 {
1439 x86_debug_state_t *state;
1440
1441 if (*count < x86_DEBUG_STATE_COUNT)
1442 return(KERN_INVALID_ARGUMENT);
1443
1444 state = (x86_debug_state_t *)tstate;
1445
1446 bzero(state, sizeof *state);
1447
1448 if (thread_is_64bit(thr_act)) {
1449 state->dsh.flavor = x86_DEBUG_STATE64;
1450 state->dsh.count = x86_DEBUG_STATE64_COUNT;
1451
1452 get_debug_state64(thr_act, &state->uds.ds64);
1453 } else {
1454 state->dsh.flavor = x86_DEBUG_STATE32;
1455 state->dsh.count = x86_DEBUG_STATE32_COUNT;
1456
1457 get_debug_state32(thr_act, &state->uds.ds32);
1458 }
1459 *count = x86_DEBUG_STATE_COUNT;
1460 break;
1461 }
1462 default:
1463 return(KERN_INVALID_ARGUMENT);
1464 }
1465
1466 return(KERN_SUCCESS);
1467 }
1468
1469 kern_return_t
1470 machine_thread_get_kern_state(
1471 thread_t thread,
1472 thread_flavor_t flavor,
1473 thread_state_t tstate,
1474 mach_msg_type_number_t *count)
1475 {
1476 x86_saved_state_t *int_state = current_cpu_datap()->cpu_int_state;
1477
1478 /*
1479 * This works only for an interrupted kernel thread
1480 */
1481 if (thread != current_thread() || int_state == NULL)
1482 return KERN_FAILURE;
1483
1484 switch (flavor) {
1485 case x86_THREAD_STATE32: {
1486 x86_thread_state32_t *state;
1487 x86_saved_state32_t *saved_state;
1488
1489 if (!is_saved_state32(int_state) ||
1490 *count < x86_THREAD_STATE32_COUNT)
1491 return (KERN_INVALID_ARGUMENT);
1492
1493 state = (x86_thread_state32_t *) tstate;
1494
1495 saved_state = saved_state32(int_state);
1496 /*
1497 * General registers.
1498 */
1499 state->eax = saved_state->eax;
1500 state->ebx = saved_state->ebx;
1501 state->ecx = saved_state->ecx;
1502 state->edx = saved_state->edx;
1503 state->edi = saved_state->edi;
1504 state->esi = saved_state->esi;
1505 state->ebp = saved_state->ebp;
1506 state->esp = saved_state->uesp;
1507 state->eflags = saved_state->efl;
1508 state->eip = saved_state->eip;
1509 state->cs = saved_state->cs;
1510 state->ss = saved_state->ss;
1511 state->ds = saved_state->ds & 0xffff;
1512 state->es = saved_state->es & 0xffff;
1513 state->fs = saved_state->fs & 0xffff;
1514 state->gs = saved_state->gs & 0xffff;
1515
1516 *count = x86_THREAD_STATE32_COUNT;
1517
1518 return KERN_SUCCESS;
1519 }
1520
1521 case x86_THREAD_STATE64: {
1522 x86_thread_state64_t *state;
1523 x86_saved_state64_t *saved_state;
1524
1525 if (!is_saved_state64(int_state) ||
1526 *count < x86_THREAD_STATE64_COUNT)
1527 return (KERN_INVALID_ARGUMENT);
1528
1529 state = (x86_thread_state64_t *) tstate;
1530
1531 saved_state = saved_state64(int_state);
1532 /*
1533 * General registers.
1534 */
1535 state->rax = saved_state->rax;
1536 state->rbx = saved_state->rbx;
1537 state->rcx = saved_state->rcx;
1538 state->rdx = saved_state->rdx;
1539 state->rdi = saved_state->rdi;
1540 state->rsi = saved_state->rsi;
1541 state->rbp = saved_state->rbp;
1542 state->rsp = saved_state->isf.rsp;
1543 state->r8 = saved_state->r8;
1544 state->r9 = saved_state->r9;
1545 state->r10 = saved_state->r10;
1546 state->r11 = saved_state->r11;
1547 state->r12 = saved_state->r12;
1548 state->r13 = saved_state->r13;
1549 state->r14 = saved_state->r14;
1550 state->r15 = saved_state->r15;
1551
1552 state->rip = saved_state->isf.rip;
1553 state->rflags = saved_state->isf.rflags;
1554 state->cs = saved_state->isf.cs;
1555 state->fs = saved_state->fs & 0xffff;
1556 state->gs = saved_state->gs & 0xffff;
1557 *count = x86_THREAD_STATE64_COUNT;
1558
1559 return KERN_SUCCESS;
1560 }
1561
1562 case x86_THREAD_STATE: {
1563 x86_thread_state_t *state = NULL;
1564
1565 if (*count < x86_THREAD_STATE_COUNT)
1566 return (KERN_INVALID_ARGUMENT);
1567
1568 state = (x86_thread_state_t *) tstate;
1569
1570 if (is_saved_state32(int_state)) {
1571 x86_saved_state32_t *saved_state = saved_state32(int_state);
1572
1573 state->tsh.flavor = x86_THREAD_STATE32;
1574 state->tsh.count = x86_THREAD_STATE32_COUNT;
1575
1576 /*
1577 * General registers.
1578 */
1579 state->uts.ts32.eax = saved_state->eax;
1580 state->uts.ts32.ebx = saved_state->ebx;
1581 state->uts.ts32.ecx = saved_state->ecx;
1582 state->uts.ts32.edx = saved_state->edx;
1583 state->uts.ts32.edi = saved_state->edi;
1584 state->uts.ts32.esi = saved_state->esi;
1585 state->uts.ts32.ebp = saved_state->ebp;
1586 state->uts.ts32.esp = saved_state->uesp;
1587 state->uts.ts32.eflags = saved_state->efl;
1588 state->uts.ts32.eip = saved_state->eip;
1589 state->uts.ts32.cs = saved_state->cs;
1590 state->uts.ts32.ss = saved_state->ss;
1591 state->uts.ts32.ds = saved_state->ds & 0xffff;
1592 state->uts.ts32.es = saved_state->es & 0xffff;
1593 state->uts.ts32.fs = saved_state->fs & 0xffff;
1594 state->uts.ts32.gs = saved_state->gs & 0xffff;
1595 } else if (is_saved_state64(int_state)) {
1596 x86_saved_state64_t *saved_state = saved_state64(int_state);
1597
1598 state->tsh.flavor = x86_THREAD_STATE64;
1599 state->tsh.count = x86_THREAD_STATE64_COUNT;
1600
1601 /*
1602 * General registers.
1603 */
1604 state->uts.ts64.rax = saved_state->rax;
1605 state->uts.ts64.rbx = saved_state->rbx;
1606 state->uts.ts64.rcx = saved_state->rcx;
1607 state->uts.ts64.rdx = saved_state->rdx;
1608 state->uts.ts64.rdi = saved_state->rdi;
1609 state->uts.ts64.rsi = saved_state->rsi;
1610 state->uts.ts64.rbp = saved_state->rbp;
1611 state->uts.ts64.rsp = saved_state->isf.rsp;
1612 state->uts.ts64.r8 = saved_state->r8;
1613 state->uts.ts64.r9 = saved_state->r9;
1614 state->uts.ts64.r10 = saved_state->r10;
1615 state->uts.ts64.r11 = saved_state->r11;
1616 state->uts.ts64.r12 = saved_state->r12;
1617 state->uts.ts64.r13 = saved_state->r13;
1618 state->uts.ts64.r14 = saved_state->r14;
1619 state->uts.ts64.r15 = saved_state->r15;
1620
1621 state->uts.ts64.rip = saved_state->isf.rip;
1622 state->uts.ts64.rflags = saved_state->isf.rflags;
1623 state->uts.ts64.cs = saved_state->isf.cs;
1624 state->uts.ts64.fs = saved_state->fs & 0xffff;
1625 state->uts.ts64.gs = saved_state->gs & 0xffff;
1626 } else {
1627 panic("unknown thread state");
1628 }
1629
1630 *count = x86_THREAD_STATE_COUNT;
1631 return KERN_SUCCESS;
1632 }
1633 }
1634 return KERN_FAILURE;
1635 }
1636
1637
1638 void
1639 machine_thread_switch_addrmode(thread_t thread)
1640 {
1641 /*
1642 * We don't want to be preempted until we're done
1643 * - particularly if we're switching the current thread
1644 */
1645 disable_preemption();
1646
1647 /*
1648 * Reset the state saveareas. As we're resetting, we anticipate no
1649 * memory allocations in this path.
1650 */
1651 machine_thread_create(thread, thread->task);
1652
1653 /* If we're switching ourselves, reset the pcb addresses etc. */
1654 if (thread == current_thread()) {
1655 boolean_t istate = ml_set_interrupts_enabled(FALSE);
1656 #if defined(__i386__)
1657 if (current_cpu_datap()->cpu_active_cr3 != kernel_pmap->pm_cr3)
1658 pmap_load_kernel_cr3();
1659 #endif /* defined(__i386) */
1660 act_machine_switch_pcb(NULL, thread);
1661 ml_set_interrupts_enabled(istate);
1662 }
1663 enable_preemption();
1664 }
1665
1666
1667
1668 /*
1669 * This is used to set the current thr_act/thread
1670 * when starting up a new processor
1671 */
1672 void
1673 machine_set_current_thread(thread_t thread)
1674 {
1675 current_cpu_datap()->cpu_active_thread = thread;
1676 }
1677
1678 /*
1679 * This is called when a task is terminated, and also on exec().
1680 * Clear machine-dependent state that is stored on the task.
1681 */
1682 void
1683 machine_thread_terminate_self(void)
1684 {
1685 task_t self_task = current_task();
1686 if (self_task) {
1687 user_ldt_t user_ldt = self_task->i386_ldt;
1688 if (user_ldt != 0) {
1689 self_task->i386_ldt = 0;
1690 user_ldt_free(user_ldt);
1691 }
1692
1693 if (self_task->task_debug != NULL) {
1694 zfree(ids_zone, self_task->task_debug);
1695 self_task->task_debug = NULL;
1696 }
1697 }
1698 }
1699
1700 /*
1701 * Perform machine-dependent per-thread initializations
1702 */
1703 void
1704 machine_thread_init(void)
1705 {
1706 if (cpu_mode_is64bit()) {
1707 assert(sizeof(x86_sframe_compat32_t) % 16 == 0);
1708 iss_zone = zinit(sizeof(x86_sframe64_t),
1709 thread_max * sizeof(x86_sframe64_t),
1710 THREAD_CHUNK * sizeof(x86_sframe64_t),
1711 "x86_64 saved state");
1712
1713 ids_zone = zinit(sizeof(x86_debug_state64_t),
1714 thread_max * sizeof(x86_debug_state64_t),
1715 THREAD_CHUNK * sizeof(x86_debug_state64_t),
1716 "x86_64 debug state");
1717
1718 } else {
1719 iss_zone = zinit(sizeof(x86_sframe32_t),
1720 thread_max * sizeof(x86_sframe32_t),
1721 THREAD_CHUNK * sizeof(x86_sframe32_t),
1722 "x86 saved state");
1723 ids_zone = zinit(sizeof(x86_debug_state32_t),
1724 thread_max * (sizeof(x86_debug_state32_t)),
1725 THREAD_CHUNK * (sizeof(x86_debug_state32_t)),
1726 "x86 debug state");
1727 }
1728 fpu_module_init();
1729 }
1730
1731
1732 #if defined(__i386__)
1733 /*
1734 * Some routines for debugging activation code
1735 */
1736 static void dump_handlers(thread_t);
1737 void dump_regs(thread_t);
1738 int dump_act(thread_t thr_act);
1739
1740 static void
1741 dump_handlers(thread_t thr_act)
1742 {
1743 ReturnHandler *rhp = thr_act->handlers;
1744 int counter = 0;
1745
1746 printf("\t");
1747 while (rhp) {
1748 if (rhp == &thr_act->special_handler){
1749 if (rhp->next)
1750 printf("[NON-Zero next ptr(%p)]", rhp->next);
1751 printf("special_handler()->");
1752 break;
1753 }
1754 printf("hdlr_%d(%p)->", counter, rhp->handler);
1755 rhp = rhp->next;
1756 if (++counter > 32) {
1757 printf("Aborting: HUGE handler chain\n");
1758 break;
1759 }
1760 }
1761 printf("HLDR_NULL\n");
1762 }
1763
1764 void
1765 dump_regs(thread_t thr_act)
1766 {
1767 if (thread_is_64bit(thr_act)) {
1768 x86_saved_state64_t *ssp;
1769
1770 ssp = USER_REGS64(thr_act);
1771
1772 panic("dump_regs: 64bit tasks not yet supported");
1773
1774 } else {
1775 x86_saved_state32_t *ssp;
1776
1777 ssp = USER_REGS32(thr_act);
1778
1779 /*
1780 * Print out user register state
1781 */
1782 printf("\tRegs:\tedi=%x esi=%x ebp=%x ebx=%x edx=%x\n",
1783 ssp->edi, ssp->esi, ssp->ebp, ssp->ebx, ssp->edx);
1784
1785 printf("\t\tecx=%x eax=%x eip=%x efl=%x uesp=%x\n",
1786 ssp->ecx, ssp->eax, ssp->eip, ssp->efl, ssp->uesp);
1787
1788 printf("\t\tcs=%x ss=%x\n", ssp->cs, ssp->ss);
1789 }
1790 }
1791
1792 int
1793 dump_act(thread_t thr_act)
1794 {
1795 if (!thr_act)
1796 return(0);
1797
1798 printf("thread(%p)(%d): task=%p(%d)\n",
1799 thr_act, thr_act->ref_count,
1800 thr_act->task,
1801 thr_act->task ? thr_act->task->ref_count : 0);
1802
1803 printf("\tsusp=%d user_stop=%d active=%x ast=%x\n",
1804 thr_act->suspend_count, thr_act->user_stop_count,
1805 thr_act->active, thr_act->ast);
1806 printf("\tpcb=%p\n", &thr_act->machine);
1807
1808 if (thr_act->kernel_stack) {
1809 vm_offset_t stack = thr_act->kernel_stack;
1810
1811 printf("\tk_stk %lx eip %x ebx %x esp %x iss %p\n",
1812 (long)stack, STACK_IKS(stack)->k_eip, STACK_IKS(stack)->k_ebx,
1813 STACK_IKS(stack)->k_esp, thr_act->machine.iss);
1814 }
1815
1816 dump_handlers(thr_act);
1817 dump_regs(thr_act);
1818 return((int)thr_act);
1819 }
1820 #endif
1821
1822 user_addr_t
1823 get_useraddr(void)
1824 {
1825 thread_t thr_act = current_thread();
1826
1827 if (thread_is_64bit(thr_act)) {
1828 x86_saved_state64_t *iss64;
1829
1830 iss64 = USER_REGS64(thr_act);
1831
1832 return(iss64->isf.rip);
1833 } else {
1834 x86_saved_state32_t *iss32;
1835
1836 iss32 = USER_REGS32(thr_act);
1837
1838 return(iss32->eip);
1839 }
1840 }
1841
1842 /*
1843 * detach and return a kernel stack from a thread
1844 */
1845
1846 vm_offset_t
1847 machine_stack_detach(thread_t thread)
1848 {
1849 vm_offset_t stack;
1850
1851 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH),
1852 (uintptr_t)thread_tid(thread), thread->priority,
1853 thread->sched_pri, 0,
1854 0);
1855
1856 stack = thread->kernel_stack;
1857 thread->kernel_stack = 0;
1858
1859 return (stack);
1860 }
1861
1862 /*
1863 * attach a kernel stack to a thread and initialize it
1864 */
1865
1866 void
1867 machine_stack_attach(
1868 thread_t thread,
1869 vm_offset_t stack)
1870 {
1871 struct x86_kernel_state *statep;
1872
1873 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH),
1874 (uintptr_t)thread_tid(thread), thread->priority,
1875 thread->sched_pri, 0, 0);
1876
1877 assert(stack);
1878 thread->kernel_stack = stack;
1879
1880 statep = STACK_IKS(stack);
1881 #if defined(__x86_64__)
1882 statep->k_rip = (unsigned long) Thread_continue;
1883 statep->k_rbx = (unsigned long) thread_continue;
1884 statep->k_rsp = (unsigned long) (STACK_IKS(stack) - 1);
1885 #else
1886 statep->k_eip = (unsigned long) Thread_continue;
1887 statep->k_ebx = (unsigned long) thread_continue;
1888 statep->k_esp = (unsigned long) (STACK_IKS(stack) - 1);
1889 #endif
1890
1891 return;
1892 }
1893
1894 /*
1895 * move a stack from old to new thread
1896 */
1897
1898 void
1899 machine_stack_handoff(thread_t old,
1900 thread_t new)
1901 {
1902 vm_offset_t stack;
1903
1904 assert(new);
1905 assert(old);
1906
1907 #if CONFIG_COUNTERS
1908 machine_pmc_cswitch(old, new);
1909 #endif
1910
1911 stack = old->kernel_stack;
1912 if (stack == old->reserved_stack) {
1913 assert(new->reserved_stack);
1914 old->reserved_stack = new->reserved_stack;
1915 new->reserved_stack = stack;
1916 }
1917 old->kernel_stack = 0;
1918 /*
1919 * A full call to machine_stack_attach() is unnecessry
1920 * because old stack is already initialized.
1921 */
1922 new->kernel_stack = stack;
1923
1924 fpu_save_context(old);
1925
1926 old->machine.specFlags &= ~OnProc;
1927 new->machine.specFlags |= OnProc;
1928
1929 PMAP_SWITCH_CONTEXT(old, new, cpu_number());
1930 act_machine_switch_pcb(old, new);
1931
1932 machine_set_current_thread(new);
1933
1934 return;
1935 }
1936
1937
1938
1939
1940 struct x86_act_context32 {
1941 x86_saved_state32_t ss;
1942 x86_float_state32_t fs;
1943 x86_debug_state32_t ds;
1944 };
1945
1946 struct x86_act_context64 {
1947 x86_saved_state64_t ss;
1948 x86_float_state64_t fs;
1949 x86_debug_state64_t ds;
1950 };
1951
1952
1953
1954 void *
1955 act_thread_csave(void)
1956 {
1957 kern_return_t kret;
1958 mach_msg_type_number_t val;
1959 thread_t thr_act = current_thread();
1960
1961 if (thread_is_64bit(thr_act)) {
1962 struct x86_act_context64 *ic64;
1963
1964 ic64 = (struct x86_act_context64 *)kalloc(sizeof(struct x86_act_context64));
1965
1966 if (ic64 == (struct x86_act_context64 *)NULL)
1967 return((void *)0);
1968
1969 val = x86_SAVED_STATE64_COUNT;
1970 kret = machine_thread_get_state(thr_act, x86_SAVED_STATE64,
1971 (thread_state_t) &ic64->ss, &val);
1972 if (kret != KERN_SUCCESS) {
1973 kfree(ic64, sizeof(struct x86_act_context64));
1974 return((void *)0);
1975 }
1976 val = x86_FLOAT_STATE64_COUNT;
1977 kret = machine_thread_get_state(thr_act, x86_FLOAT_STATE64,
1978 (thread_state_t) &ic64->fs, &val);
1979 if (kret != KERN_SUCCESS) {
1980 kfree(ic64, sizeof(struct x86_act_context64));
1981 return((void *)0);
1982 }
1983
1984 val = x86_DEBUG_STATE64_COUNT;
1985 kret = machine_thread_get_state(thr_act,
1986 x86_DEBUG_STATE64,
1987 (thread_state_t)&ic64->ds,
1988 &val);
1989 if (kret != KERN_SUCCESS) {
1990 kfree(ic64, sizeof(struct x86_act_context64));
1991 return((void *)0);
1992 }
1993 return(ic64);
1994
1995 } else {
1996 struct x86_act_context32 *ic32;
1997
1998 ic32 = (struct x86_act_context32 *)kalloc(sizeof(struct x86_act_context32));
1999
2000 if (ic32 == (struct x86_act_context32 *)NULL)
2001 return((void *)0);
2002
2003 val = x86_SAVED_STATE32_COUNT;
2004 kret = machine_thread_get_state(thr_act, x86_SAVED_STATE32,
2005 (thread_state_t) &ic32->ss, &val);
2006 if (kret != KERN_SUCCESS) {
2007 kfree(ic32, sizeof(struct x86_act_context32));
2008 return((void *)0);
2009 }
2010 val = x86_FLOAT_STATE32_COUNT;
2011 kret = machine_thread_get_state(thr_act, x86_FLOAT_STATE32,
2012 (thread_state_t) &ic32->fs, &val);
2013 if (kret != KERN_SUCCESS) {
2014 kfree(ic32, sizeof(struct x86_act_context32));
2015 return((void *)0);
2016 }
2017
2018 val = x86_DEBUG_STATE32_COUNT;
2019 kret = machine_thread_get_state(thr_act,
2020 x86_DEBUG_STATE32,
2021 (thread_state_t)&ic32->ds,
2022 &val);
2023 if (kret != KERN_SUCCESS) {
2024 kfree(ic32, sizeof(struct x86_act_context32));
2025 return((void *)0);
2026 }
2027 return(ic32);
2028 }
2029 }
2030
2031
2032 void
2033 act_thread_catt(void *ctx)
2034 {
2035 thread_t thr_act = current_thread();
2036 kern_return_t kret;
2037
2038 if (ctx == (void *)NULL)
2039 return;
2040
2041 if (thread_is_64bit(thr_act)) {
2042 struct x86_act_context64 *ic64;
2043
2044 ic64 = (struct x86_act_context64 *)ctx;
2045
2046 kret = machine_thread_set_state(thr_act, x86_SAVED_STATE64,
2047 (thread_state_t) &ic64->ss, x86_SAVED_STATE64_COUNT);
2048 if (kret == KERN_SUCCESS) {
2049 machine_thread_set_state(thr_act, x86_FLOAT_STATE64,
2050 (thread_state_t) &ic64->fs, x86_FLOAT_STATE64_COUNT);
2051 }
2052 kfree(ic64, sizeof(struct x86_act_context64));
2053 } else {
2054 struct x86_act_context32 *ic32;
2055
2056 ic32 = (struct x86_act_context32 *)ctx;
2057
2058 kret = machine_thread_set_state(thr_act, x86_SAVED_STATE32,
2059 (thread_state_t) &ic32->ss, x86_SAVED_STATE32_COUNT);
2060 if (kret == KERN_SUCCESS) {
2061 (void) machine_thread_set_state(thr_act, x86_FLOAT_STATE32,
2062 (thread_state_t) &ic32->fs, x86_FLOAT_STATE32_COUNT);
2063 }
2064 kfree(ic32, sizeof(struct x86_act_context32));
2065 }
2066 }
2067
2068
2069 void act_thread_cfree(__unused void *ctx)
2070 {
2071 /* XXX - Unused */
2072 }
2073 void x86_toggle_sysenter_arg_store(thread_t thread, boolean_t valid);
2074 void x86_toggle_sysenter_arg_store(thread_t thread, boolean_t valid) {
2075 thread->machine.arg_store_valid = valid;
2076 }
2077
2078 boolean_t x86_sysenter_arg_store_isvalid(thread_t thread);
2079
2080 boolean_t x86_sysenter_arg_store_isvalid(thread_t thread) {
2081 return (thread->machine.arg_store_valid);
2082 }
2083
2084 /*
2085 * Duplicate one x86_debug_state32_t to another. "all" parameter
2086 * chooses whether dr4 and dr5 are copied (they are never meant
2087 * to be installed when we do machine_task_set_state() or
2088 * machine_thread_set_state()).
2089 */
2090 void
2091 copy_debug_state32(
2092 x86_debug_state32_t *src,
2093 x86_debug_state32_t *target,
2094 boolean_t all)
2095 {
2096 if (all) {
2097 target->dr4 = src->dr4;
2098 target->dr5 = src->dr5;
2099 }
2100
2101 target->dr0 = src->dr0;
2102 target->dr1 = src->dr1;
2103 target->dr2 = src->dr2;
2104 target->dr3 = src->dr3;
2105 target->dr6 = src->dr6;
2106 target->dr7 = src->dr7;
2107 }
2108
2109 /*
2110 * Duplicate one x86_debug_state64_t to another. "all" parameter
2111 * chooses whether dr4 and dr5 are copied (they are never meant
2112 * to be installed when we do machine_task_set_state() or
2113 * machine_thread_set_state()).
2114 */
2115 void
2116 copy_debug_state64(
2117 x86_debug_state64_t *src,
2118 x86_debug_state64_t *target,
2119 boolean_t all)
2120 {
2121 if (all) {
2122 target->dr4 = src->dr4;
2123 target->dr5 = src->dr5;
2124 }
2125
2126 target->dr0 = src->dr0;
2127 target->dr1 = src->dr1;
2128 target->dr2 = src->dr2;
2129 target->dr3 = src->dr3;
2130 target->dr6 = src->dr6;
2131 target->dr7 = src->dr7;
2132 }