]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/pcb.c
xnu-2050.22.13.tar.gz
[apple/xnu.git] / osfmk / i386 / pcb.c
1 /*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 #include <mach_rt.h>
58 #include <mach_debug.h>
59 #include <mach_ldebug.h>
60
61 #include <sys/kdebug.h>
62
63 #include <mach/kern_return.h>
64 #include <mach/thread_status.h>
65 #include <mach/vm_param.h>
66
67 #include <kern/counters.h>
68 #include <kern/kalloc.h>
69 #include <kern/mach_param.h>
70 #include <kern/processor.h>
71 #include <kern/cpu_data.h>
72 #include <kern/cpu_number.h>
73 #include <kern/task.h>
74 #include <kern/thread.h>
75 #include <kern/sched_prim.h>
76 #include <kern/misc_protos.h>
77 #include <kern/assert.h>
78 #include <kern/spl.h>
79 #include <kern/machine.h>
80 #include <ipc/ipc_port.h>
81 #include <vm/vm_kern.h>
82 #include <vm/vm_map.h>
83 #include <vm/pmap.h>
84 #include <vm/vm_protos.h>
85
86 #include <i386/cpu_data.h>
87 #include <i386/cpu_number.h>
88 #include <i386/eflags.h>
89 #include <i386/proc_reg.h>
90 #include <i386/fpu.h>
91 #include <i386/misc_protos.h>
92 #include <i386/mp_desc.h>
93 #include <i386/thread.h>
94 #if defined(__i386__)
95 #include <i386/fpu.h>
96 #endif
97 #include <i386/machine_routines.h>
98 #include <i386/lapic.h> /* LAPIC_PMC_SWI_VECTOR */
99
100 #if CONFIG_COUNTERS
101 #include <pmc/pmc.h>
102 #endif /* CONFIG_COUNTERS */
103
104 /*
105 * Maps state flavor to number of words in the state:
106 */
107 unsigned int _MachineStateCount[] = {
108 /* FLAVOR_LIST */
109 0,
110 x86_THREAD_STATE32_COUNT,
111 x86_FLOAT_STATE32_COUNT,
112 x86_EXCEPTION_STATE32_COUNT,
113 x86_THREAD_STATE64_COUNT,
114 x86_FLOAT_STATE64_COUNT,
115 x86_EXCEPTION_STATE64_COUNT,
116 x86_THREAD_STATE_COUNT,
117 x86_FLOAT_STATE_COUNT,
118 x86_EXCEPTION_STATE_COUNT,
119 0,
120 x86_SAVED_STATE32_COUNT,
121 x86_SAVED_STATE64_COUNT,
122 x86_DEBUG_STATE32_COUNT,
123 x86_DEBUG_STATE64_COUNT,
124 x86_DEBUG_STATE_COUNT
125 };
126
127 zone_t iss_zone; /* zone for saved_state area */
128 zone_t ids_zone; /* zone for debug_state area */
129
130 /* Forward */
131
132 extern void Thread_continue(void);
133 extern void Load_context(
134 thread_t thread);
135
136 static void
137 get_exception_state32(thread_t thread, x86_exception_state32_t *es);
138
139 static void
140 get_exception_state64(thread_t thread, x86_exception_state64_t *es);
141
142 static void
143 get_thread_state32(thread_t thread, x86_thread_state32_t *ts);
144
145 static void
146 get_thread_state64(thread_t thread, x86_thread_state64_t *ts);
147
148 static int
149 set_thread_state32(thread_t thread, x86_thread_state32_t *ts);
150
151 static int
152 set_thread_state64(thread_t thread, x86_thread_state64_t *ts);
153
154 #if CONFIG_COUNTERS
155 static inline void
156 machine_pmc_cswitch(thread_t /* old */, thread_t /* new */);
157
158 static inline void
159 pmc_swi(thread_t /* old */, thread_t /*new */);
160
161 static inline void
162 pmc_swi(thread_t old, thread_t new) {
163 current_cpu_datap()->csw_old_thread = old;
164 current_cpu_datap()->csw_new_thread = new;
165 pal_pmc_swi();
166 }
167
168 static inline void
169 machine_pmc_cswitch(thread_t old, thread_t new) {
170 if (pmc_thread_eligible(old) || pmc_thread_eligible(new)) {
171 pmc_swi(old, new);
172 }
173 }
174
175 void ml_get_csw_threads(thread_t *old, thread_t *new) {
176 *old = current_cpu_datap()->csw_old_thread;
177 *new = current_cpu_datap()->csw_new_thread;
178 }
179
180 #endif /* CONFIG_COUNTERS */
181
182 /*
183 * Don't let an illegal value for dr7 get set. Specifically,
184 * check for undefined settings. Setting these bit patterns
185 * result in undefined behaviour and can lead to an unexpected
186 * TRCTRAP.
187 */
188 static boolean_t
189 dr7_is_valid(uint32_t *dr7)
190 {
191 int i;
192 uint32_t mask1, mask2;
193
194 /*
195 * If the DE bit is set in CR4, R/W0-3 can be pattern
196 * "10B" to indicate i/o reads and write
197 */
198 if (!(get_cr4() & CR4_DE))
199 for (i = 0, mask1 = 0x3<<16, mask2 = 0x2<<16; i < 4;
200 i++, mask1 <<= 4, mask2 <<= 4)
201 if ((*dr7 & mask1) == mask2)
202 return (FALSE);
203
204 /*
205 * len0-3 pattern "10B" is ok for len on Merom and newer processors
206 * (it signifies an 8-byte wide region). We use the 64bit capability
207 * of the processor in lieu of the more laborious model/family checks
208 * as all 64-bit capable processors so far support this.
209 * Reject an attempt to use this on 64-bit incapable processors.
210 */
211 if (current_cpu_datap()->cpu_is64bit == FALSE)
212 for (i = 0, mask1 = 0x3<<18, mask2 = 0x2<<18; i < 4;
213 i++, mask1 <<= 4, mask2 <<= 4)
214 if ((*dr7 & mask1) == mask2)
215 return (FALSE);
216
217 /*
218 * if we are doing an instruction execution break (indicated
219 * by r/w[x] being "00B"), then the len[x] must also be set
220 * to "00B"
221 */
222 for (i = 0; i < 4; i++)
223 if (((((*dr7 >> (16 + i*4))) & 0x3) == 0) &&
224 ((((*dr7 >> (18 + i*4))) & 0x3) != 0))
225 return (FALSE);
226
227 /*
228 * Intel docs have these bits fixed.
229 */
230 *dr7 |= 0x1 << 10; /* set bit 10 to 1 */
231 *dr7 &= ~(0x1 << 11); /* set bit 11 to 0 */
232 *dr7 &= ~(0x1 << 12); /* set bit 12 to 0 */
233 *dr7 &= ~(0x1 << 14); /* set bit 14 to 0 */
234 *dr7 &= ~(0x1 << 15); /* set bit 15 to 0 */
235
236 /*
237 * We don't allow anything to set the global breakpoints.
238 */
239
240 if (*dr7 & 0x2)
241 return (FALSE);
242
243 if (*dr7 & (0x2<<2))
244 return (FALSE);
245
246 if (*dr7 & (0x2<<4))
247 return (FALSE);
248
249 if (*dr7 & (0x2<<6))
250 return (FALSE);
251
252 return (TRUE);
253 }
254
255 static inline void
256 set_live_debug_state32(cpu_data_t *cdp, x86_debug_state32_t *ds)
257 {
258 __asm__ volatile ("movl %0,%%db0" : :"r" (ds->dr0));
259 __asm__ volatile ("movl %0,%%db1" : :"r" (ds->dr1));
260 __asm__ volatile ("movl %0,%%db2" : :"r" (ds->dr2));
261 __asm__ volatile ("movl %0,%%db3" : :"r" (ds->dr3));
262 if (cpu_mode_is64bit())
263 cdp->cpu_dr7 = ds->dr7;
264 }
265
266 extern void set_64bit_debug_regs(x86_debug_state64_t *ds);
267
268 static inline void
269 set_live_debug_state64(cpu_data_t *cdp, x86_debug_state64_t *ds)
270 {
271 /*
272 * We need to enter 64-bit mode in order to set the full
273 * width of these registers
274 */
275 set_64bit_debug_regs(ds);
276 cdp->cpu_dr7 = ds->dr7;
277 }
278
279 boolean_t
280 debug_state_is_valid32(x86_debug_state32_t *ds)
281 {
282 if (!dr7_is_valid(&ds->dr7))
283 return FALSE;
284
285 #if defined(__i386__)
286 /*
287 * Only allow local breakpoints and make sure they are not
288 * in the trampoline code.
289 */
290 if (ds->dr7 & 0x1)
291 if (ds->dr0 >= (unsigned long)HIGH_MEM_BASE)
292 return FALSE;
293
294 if (ds->dr7 & (0x1<<2))
295 if (ds->dr1 >= (unsigned long)HIGH_MEM_BASE)
296 return FALSE;
297
298 if (ds->dr7 & (0x1<<4))
299 if (ds->dr2 >= (unsigned long)HIGH_MEM_BASE)
300 return FALSE;
301
302 if (ds->dr7 & (0x1<<6))
303 if (ds->dr3 >= (unsigned long)HIGH_MEM_BASE)
304 return FALSE;
305 #endif
306
307 return TRUE;
308 }
309
310 boolean_t
311 debug_state_is_valid64(x86_debug_state64_t *ds)
312 {
313 if (!dr7_is_valid((uint32_t *)&ds->dr7))
314 return FALSE;
315
316 /*
317 * Don't allow the user to set debug addresses above their max
318 * value
319 */
320 if (ds->dr7 & 0x1)
321 if (ds->dr0 >= VM_MAX_PAGE_ADDRESS)
322 return FALSE;
323
324 if (ds->dr7 & (0x1<<2))
325 if (ds->dr1 >= VM_MAX_PAGE_ADDRESS)
326 return FALSE;
327
328 if (ds->dr7 & (0x1<<4))
329 if (ds->dr2 >= VM_MAX_PAGE_ADDRESS)
330 return FALSE;
331
332 if (ds->dr7 & (0x1<<6))
333 if (ds->dr3 >= VM_MAX_PAGE_ADDRESS)
334 return FALSE;
335
336 return TRUE;
337 }
338
339
340 static kern_return_t
341 set_debug_state32(thread_t thread, x86_debug_state32_t *ds)
342 {
343 x86_debug_state32_t *ids;
344 pcb_t pcb;
345
346 pcb = THREAD_TO_PCB(thread);
347 ids = pcb->ids;
348
349 if (debug_state_is_valid32(ds) != TRUE) {
350 return KERN_INVALID_ARGUMENT;
351 }
352
353 if (ids == NULL) {
354 ids = zalloc(ids_zone);
355 bzero(ids, sizeof *ids);
356
357 simple_lock(&pcb->lock);
358 /* make sure it wasn't already alloc()'d elsewhere */
359 if (pcb->ids == NULL) {
360 pcb->ids = ids;
361 simple_unlock(&pcb->lock);
362 } else {
363 simple_unlock(&pcb->lock);
364 zfree(ids_zone, ids);
365 }
366 }
367
368
369 copy_debug_state32(ds, ids, FALSE);
370
371 return (KERN_SUCCESS);
372 }
373
374 static kern_return_t
375 set_debug_state64(thread_t thread, x86_debug_state64_t *ds)
376 {
377 x86_debug_state64_t *ids;
378 pcb_t pcb;
379
380 pcb = THREAD_TO_PCB(thread);
381 ids = pcb->ids;
382
383 if (debug_state_is_valid64(ds) != TRUE) {
384 return KERN_INVALID_ARGUMENT;
385 }
386
387 if (ids == NULL) {
388 ids = zalloc(ids_zone);
389 bzero(ids, sizeof *ids);
390
391 simple_lock(&pcb->lock);
392 /* make sure it wasn't already alloc()'d elsewhere */
393 if (pcb->ids == NULL) {
394 pcb->ids = ids;
395 simple_unlock(&pcb->lock);
396 } else {
397 simple_unlock(&pcb->lock);
398 zfree(ids_zone, ids);
399 }
400 }
401
402 copy_debug_state64(ds, ids, FALSE);
403
404 return (KERN_SUCCESS);
405 }
406
407 static void
408 get_debug_state32(thread_t thread, x86_debug_state32_t *ds)
409 {
410 x86_debug_state32_t *saved_state;
411
412 saved_state = thread->machine.ids;
413
414 if (saved_state) {
415 copy_debug_state32(saved_state, ds, TRUE);
416 } else
417 bzero(ds, sizeof *ds);
418 }
419
420 static void
421 get_debug_state64(thread_t thread, x86_debug_state64_t *ds)
422 {
423 x86_debug_state64_t *saved_state;
424
425 saved_state = (x86_debug_state64_t *)thread->machine.ids;
426
427 if (saved_state) {
428 copy_debug_state64(saved_state, ds, TRUE);
429 } else
430 bzero(ds, sizeof *ds);
431 }
432
433 /*
434 * consider_machine_collect:
435 *
436 * Try to collect machine-dependent pages
437 */
438 void
439 consider_machine_collect(void)
440 {
441 }
442
443 void
444 consider_machine_adjust(void)
445 {
446 }
447
448 /*
449 * Switch to the first thread on a CPU.
450 */
451 void
452 machine_load_context(
453 thread_t new)
454 {
455 #if CONFIG_COUNTERS
456 machine_pmc_cswitch(NULL, new);
457 #endif
458 new->machine.specFlags |= OnProc;
459 act_machine_switch_pcb(NULL, new);
460 Load_context(new);
461 }
462
463 /*
464 * Switch to a new thread.
465 * Save the old thread`s kernel state or continuation,
466 * and return it.
467 */
468 thread_t
469 machine_switch_context(
470 thread_t old,
471 thread_continue_t continuation,
472 thread_t new)
473 {
474 #if MACH_RT
475 assert(current_cpu_datap()->cpu_active_stack == old->kernel_stack);
476 #endif
477 #if CONFIG_COUNTERS
478 machine_pmc_cswitch(old, new);
479 #endif
480 /*
481 * Save FP registers if in use.
482 */
483 fpu_save_context(old);
484
485 old->machine.specFlags &= ~OnProc;
486 new->machine.specFlags |= OnProc;
487
488 /*
489 * Monitor the stack depth and report new max,
490 * not worrying about races.
491 */
492 vm_offset_t depth = current_stack_depth();
493 if (depth > kernel_stack_depth_max) {
494 kernel_stack_depth_max = depth;
495 KERNEL_DEBUG_CONSTANT(
496 MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DEPTH),
497 (long) depth, 0, 0, 0, 0);
498 }
499
500 /*
501 * Switch address maps if need be, even if not switching tasks.
502 * (A server activation may be "borrowing" a client map.)
503 */
504 PMAP_SWITCH_CONTEXT(old, new, cpu_number());
505
506 /*
507 * Load the rest of the user state for the new thread
508 */
509 act_machine_switch_pcb(old, new);
510
511 return(Switch_context(old, continuation, new));
512 }
513
514 thread_t
515 machine_processor_shutdown(
516 thread_t thread,
517 void (*doshutdown)(processor_t),
518 processor_t processor)
519 {
520 #if CONFIG_VMX
521 vmx_suspend();
522 #endif
523 fpu_save_context(thread);
524 PMAP_SWITCH_CONTEXT(thread, processor->idle_thread, cpu_number());
525 return(Shutdown_context(thread, doshutdown, processor));
526 }
527
528
529 /*
530 * This is where registers that are not normally specified by the mach-o
531 * file on an execve would be nullified, perhaps to avoid a covert channel.
532 */
533 kern_return_t
534 machine_thread_state_initialize(
535 thread_t thread)
536 {
537 /*
538 * If there's an fpu save area, free it.
539 * The initialized state will then be lazily faulted-in, if required.
540 * And if we're target, re-arm the no-fpu trap.
541 */
542 if (thread->machine.ifps) {
543 (void) fpu_set_fxstate(thread, NULL, x86_FLOAT_STATE64);
544
545 if (thread == current_thread())
546 clear_fpu();
547 }
548
549 if (thread->machine.ids) {
550 zfree(ids_zone, thread->machine.ids);
551 thread->machine.ids = NULL;
552 }
553
554 return KERN_SUCCESS;
555 }
556
557 uint32_t
558 get_eflags_exportmask(void)
559 {
560 return EFL_USER_SET;
561 }
562
563 /*
564 * x86_SAVED_STATE32 - internal save/restore general register state on 32/64 bit processors
565 * for 32bit tasks only
566 * x86_SAVED_STATE64 - internal save/restore general register state on 64 bit processors
567 * for 64bit tasks only
568 * x86_THREAD_STATE32 - external set/get general register state on 32/64 bit processors
569 * for 32bit tasks only
570 * x86_THREAD_STATE64 - external set/get general register state on 64 bit processors
571 * for 64bit tasks only
572 * x86_SAVED_STATE - external set/get general register state on 32/64 bit processors
573 * for either 32bit or 64bit tasks
574 * x86_FLOAT_STATE32 - internal/external save/restore float and xmm state on 32/64 bit processors
575 * for 32bit tasks only
576 * x86_FLOAT_STATE64 - internal/external save/restore float and xmm state on 64 bit processors
577 * for 64bit tasks only
578 * x86_FLOAT_STATE - external save/restore float and xmm state on 32/64 bit processors
579 * for either 32bit or 64bit tasks
580 * x86_EXCEPTION_STATE32 - external get exception state on 32/64 bit processors
581 * for 32bit tasks only
582 * x86_EXCEPTION_STATE64 - external get exception state on 64 bit processors
583 * for 64bit tasks only
584 * x86_EXCEPTION_STATE - external get exception state on 323/64 bit processors
585 * for either 32bit or 64bit tasks
586 */
587
588
589 static void
590 get_exception_state64(thread_t thread, x86_exception_state64_t *es)
591 {
592 x86_saved_state64_t *saved_state;
593
594 saved_state = USER_REGS64(thread);
595
596 es->trapno = saved_state->isf.trapno;
597 es->cpu = saved_state->isf.cpu;
598 es->err = (typeof(es->err))saved_state->isf.err;
599 es->faultvaddr = saved_state->cr2;
600 }
601
602 static void
603 get_exception_state32(thread_t thread, x86_exception_state32_t *es)
604 {
605 x86_saved_state32_t *saved_state;
606
607 saved_state = USER_REGS32(thread);
608
609 es->trapno = saved_state->trapno;
610 es->cpu = saved_state->cpu;
611 es->err = saved_state->err;
612 es->faultvaddr = saved_state->cr2;
613 }
614
615
616 static int
617 set_thread_state32(thread_t thread, x86_thread_state32_t *ts)
618 {
619 x86_saved_state32_t *saved_state;
620
621 pal_register_cache_state(thread, DIRTY);
622
623 saved_state = USER_REGS32(thread);
624
625 /*
626 * Scrub segment selector values:
627 */
628 ts->cs = USER_CS;
629 #ifdef __i386__
630 if (ts->ss == 0) ts->ss = USER_DS;
631 if (ts->ds == 0) ts->ds = USER_DS;
632 if (ts->es == 0) ts->es = USER_DS;
633 #else /* __x86_64__ */
634 /*
635 * On a 64 bit kernel, we always override the data segments,
636 * as the actual selector numbers have changed. This also
637 * means that we don't support setting the data segments
638 * manually any more.
639 */
640 ts->ss = USER_DS;
641 ts->ds = USER_DS;
642 ts->es = USER_DS;
643 #endif
644
645 /* Check segment selectors are safe */
646 if (!valid_user_segment_selectors(ts->cs,
647 ts->ss,
648 ts->ds,
649 ts->es,
650 ts->fs,
651 ts->gs))
652 return(KERN_INVALID_ARGUMENT);
653
654 saved_state->eax = ts->eax;
655 saved_state->ebx = ts->ebx;
656 saved_state->ecx = ts->ecx;
657 saved_state->edx = ts->edx;
658 saved_state->edi = ts->edi;
659 saved_state->esi = ts->esi;
660 saved_state->ebp = ts->ebp;
661 saved_state->uesp = ts->esp;
662 saved_state->efl = (ts->eflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
663 saved_state->eip = ts->eip;
664 saved_state->cs = ts->cs;
665 saved_state->ss = ts->ss;
666 saved_state->ds = ts->ds;
667 saved_state->es = ts->es;
668 saved_state->fs = ts->fs;
669 saved_state->gs = ts->gs;
670
671 /*
672 * If the trace trap bit is being set,
673 * ensure that the user returns via iret
674 * - which is signaled thusly:
675 */
676 if ((saved_state->efl & EFL_TF) && saved_state->cs == SYSENTER_CS)
677 saved_state->cs = SYSENTER_TF_CS;
678
679 return(KERN_SUCCESS);
680 }
681
682 static int
683 set_thread_state64(thread_t thread, x86_thread_state64_t *ts)
684 {
685 x86_saved_state64_t *saved_state;
686
687 pal_register_cache_state(thread, DIRTY);
688
689 saved_state = USER_REGS64(thread);
690
691 if (!IS_USERADDR64_CANONICAL(ts->rsp) ||
692 !IS_USERADDR64_CANONICAL(ts->rip))
693 return(KERN_INVALID_ARGUMENT);
694
695 saved_state->r8 = ts->r8;
696 saved_state->r9 = ts->r9;
697 saved_state->r10 = ts->r10;
698 saved_state->r11 = ts->r11;
699 saved_state->r12 = ts->r12;
700 saved_state->r13 = ts->r13;
701 saved_state->r14 = ts->r14;
702 saved_state->r15 = ts->r15;
703 saved_state->rax = ts->rax;
704 saved_state->rbx = ts->rbx;
705 saved_state->rcx = ts->rcx;
706 saved_state->rdx = ts->rdx;
707 saved_state->rdi = ts->rdi;
708 saved_state->rsi = ts->rsi;
709 saved_state->rbp = ts->rbp;
710 saved_state->isf.rsp = ts->rsp;
711 saved_state->isf.rflags = (ts->rflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
712 saved_state->isf.rip = ts->rip;
713 saved_state->isf.cs = USER64_CS;
714 saved_state->fs = (uint32_t)ts->fs;
715 saved_state->gs = (uint32_t)ts->gs;
716
717 return(KERN_SUCCESS);
718 }
719
720
721
722 static void
723 get_thread_state32(thread_t thread, x86_thread_state32_t *ts)
724 {
725 x86_saved_state32_t *saved_state;
726
727 pal_register_cache_state(thread, VALID);
728
729 saved_state = USER_REGS32(thread);
730
731 ts->eax = saved_state->eax;
732 ts->ebx = saved_state->ebx;
733 ts->ecx = saved_state->ecx;
734 ts->edx = saved_state->edx;
735 ts->edi = saved_state->edi;
736 ts->esi = saved_state->esi;
737 ts->ebp = saved_state->ebp;
738 ts->esp = saved_state->uesp;
739 ts->eflags = saved_state->efl;
740 ts->eip = saved_state->eip;
741 ts->cs = saved_state->cs;
742 ts->ss = saved_state->ss;
743 ts->ds = saved_state->ds;
744 ts->es = saved_state->es;
745 ts->fs = saved_state->fs;
746 ts->gs = saved_state->gs;
747 }
748
749
750 static void
751 get_thread_state64(thread_t thread, x86_thread_state64_t *ts)
752 {
753 x86_saved_state64_t *saved_state;
754
755 pal_register_cache_state(thread, VALID);
756
757 saved_state = USER_REGS64(thread);
758
759 ts->r8 = saved_state->r8;
760 ts->r9 = saved_state->r9;
761 ts->r10 = saved_state->r10;
762 ts->r11 = saved_state->r11;
763 ts->r12 = saved_state->r12;
764 ts->r13 = saved_state->r13;
765 ts->r14 = saved_state->r14;
766 ts->r15 = saved_state->r15;
767 ts->rax = saved_state->rax;
768 ts->rbx = saved_state->rbx;
769 ts->rcx = saved_state->rcx;
770 ts->rdx = saved_state->rdx;
771 ts->rdi = saved_state->rdi;
772 ts->rsi = saved_state->rsi;
773 ts->rbp = saved_state->rbp;
774 ts->rsp = saved_state->isf.rsp;
775 ts->rflags = saved_state->isf.rflags;
776 ts->rip = saved_state->isf.rip;
777 ts->cs = saved_state->isf.cs;
778 ts->fs = saved_state->fs;
779 ts->gs = saved_state->gs;
780 }
781
782
783 /*
784 * act_machine_set_state:
785 *
786 * Set the status of the specified thread.
787 */
788
789 kern_return_t
790 machine_thread_set_state(
791 thread_t thr_act,
792 thread_flavor_t flavor,
793 thread_state_t tstate,
794 mach_msg_type_number_t count)
795 {
796 switch (flavor) {
797 case x86_SAVED_STATE32:
798 {
799 x86_saved_state32_t *state;
800 x86_saved_state32_t *saved_state;
801
802 if (count < x86_SAVED_STATE32_COUNT)
803 return(KERN_INVALID_ARGUMENT);
804
805 if (thread_is_64bit(thr_act))
806 return(KERN_INVALID_ARGUMENT);
807
808 state = (x86_saved_state32_t *) tstate;
809
810 /* Check segment selectors are safe */
811 if (!valid_user_segment_selectors(state->cs,
812 state->ss,
813 state->ds,
814 state->es,
815 state->fs,
816 state->gs))
817 return KERN_INVALID_ARGUMENT;
818
819 pal_register_cache_state(thr_act, DIRTY);
820
821 saved_state = USER_REGS32(thr_act);
822
823 /*
824 * General registers
825 */
826 saved_state->edi = state->edi;
827 saved_state->esi = state->esi;
828 saved_state->ebp = state->ebp;
829 saved_state->uesp = state->uesp;
830 saved_state->ebx = state->ebx;
831 saved_state->edx = state->edx;
832 saved_state->ecx = state->ecx;
833 saved_state->eax = state->eax;
834 saved_state->eip = state->eip;
835
836 saved_state->efl = (state->efl & ~EFL_USER_CLEAR) | EFL_USER_SET;
837
838 /*
839 * If the trace trap bit is being set,
840 * ensure that the user returns via iret
841 * - which is signaled thusly:
842 */
843 if ((saved_state->efl & EFL_TF) && state->cs == SYSENTER_CS)
844 state->cs = SYSENTER_TF_CS;
845
846 /*
847 * User setting segment registers.
848 * Code and stack selectors have already been
849 * checked. Others will be reset by 'iret'
850 * if they are not valid.
851 */
852 saved_state->cs = state->cs;
853 saved_state->ss = state->ss;
854 saved_state->ds = state->ds;
855 saved_state->es = state->es;
856 saved_state->fs = state->fs;
857 saved_state->gs = state->gs;
858
859 break;
860 }
861
862 case x86_SAVED_STATE64:
863 {
864 x86_saved_state64_t *state;
865 x86_saved_state64_t *saved_state;
866
867 if (count < x86_SAVED_STATE64_COUNT)
868 return(KERN_INVALID_ARGUMENT);
869
870 if (!thread_is_64bit(thr_act))
871 return(KERN_INVALID_ARGUMENT);
872
873 state = (x86_saved_state64_t *) tstate;
874
875 /* Verify that the supplied code segment selector is
876 * valid. In 64-bit mode, the FS and GS segment overrides
877 * use the FS.base and GS.base MSRs to calculate
878 * base addresses, and the trampolines don't directly
879 * restore the segment registers--hence they are no
880 * longer relevant for validation.
881 */
882 if (!valid_user_code_selector(state->isf.cs))
883 return KERN_INVALID_ARGUMENT;
884
885 /* Check pc and stack are canonical addresses */
886 if (!IS_USERADDR64_CANONICAL(state->isf.rsp) ||
887 !IS_USERADDR64_CANONICAL(state->isf.rip))
888 return KERN_INVALID_ARGUMENT;
889
890 pal_register_cache_state(thr_act, DIRTY);
891
892 saved_state = USER_REGS64(thr_act);
893
894 /*
895 * General registers
896 */
897 saved_state->r8 = state->r8;
898 saved_state->r9 = state->r9;
899 saved_state->r10 = state->r10;
900 saved_state->r11 = state->r11;
901 saved_state->r12 = state->r12;
902 saved_state->r13 = state->r13;
903 saved_state->r14 = state->r14;
904 saved_state->r15 = state->r15;
905 saved_state->rdi = state->rdi;
906 saved_state->rsi = state->rsi;
907 saved_state->rbp = state->rbp;
908 saved_state->rbx = state->rbx;
909 saved_state->rdx = state->rdx;
910 saved_state->rcx = state->rcx;
911 saved_state->rax = state->rax;
912 saved_state->isf.rsp = state->isf.rsp;
913 saved_state->isf.rip = state->isf.rip;
914
915 saved_state->isf.rflags = (state->isf.rflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
916
917 /*
918 * User setting segment registers.
919 * Code and stack selectors have already been
920 * checked. Others will be reset by 'sys'
921 * if they are not valid.
922 */
923 saved_state->isf.cs = state->isf.cs;
924 saved_state->isf.ss = state->isf.ss;
925 saved_state->fs = state->fs;
926 saved_state->gs = state->gs;
927
928 break;
929 }
930
931 case x86_FLOAT_STATE32:
932 {
933 if (count != x86_FLOAT_STATE32_COUNT)
934 return(KERN_INVALID_ARGUMENT);
935
936 if (thread_is_64bit(thr_act))
937 return(KERN_INVALID_ARGUMENT);
938
939 return fpu_set_fxstate(thr_act, tstate, flavor);
940 }
941
942 case x86_FLOAT_STATE64:
943 {
944 if (count != x86_FLOAT_STATE64_COUNT)
945 return(KERN_INVALID_ARGUMENT);
946
947 if ( !thread_is_64bit(thr_act))
948 return(KERN_INVALID_ARGUMENT);
949
950 return fpu_set_fxstate(thr_act, tstate, flavor);
951 }
952
953 case x86_FLOAT_STATE:
954 {
955 x86_float_state_t *state;
956
957 if (count != x86_FLOAT_STATE_COUNT)
958 return(KERN_INVALID_ARGUMENT);
959
960 state = (x86_float_state_t *)tstate;
961 if (state->fsh.flavor == x86_FLOAT_STATE64 && state->fsh.count == x86_FLOAT_STATE64_COUNT &&
962 thread_is_64bit(thr_act)) {
963 return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs64, x86_FLOAT_STATE64);
964 }
965 if (state->fsh.flavor == x86_FLOAT_STATE32 && state->fsh.count == x86_FLOAT_STATE32_COUNT &&
966 !thread_is_64bit(thr_act)) {
967 return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs32, x86_FLOAT_STATE32);
968 }
969 return(KERN_INVALID_ARGUMENT);
970 }
971
972 case x86_AVX_STATE32:
973 {
974 if (count != x86_AVX_STATE32_COUNT)
975 return(KERN_INVALID_ARGUMENT);
976
977 if (thread_is_64bit(thr_act))
978 return(KERN_INVALID_ARGUMENT);
979
980 return fpu_set_fxstate(thr_act, tstate, flavor);
981 }
982
983 case x86_AVX_STATE64:
984 {
985 if (count != x86_AVX_STATE64_COUNT)
986 return(KERN_INVALID_ARGUMENT);
987
988 if (!thread_is_64bit(thr_act))
989 return(KERN_INVALID_ARGUMENT);
990
991 return fpu_set_fxstate(thr_act, tstate, flavor);
992 }
993
994 case x86_THREAD_STATE32:
995 {
996 if (count != x86_THREAD_STATE32_COUNT)
997 return(KERN_INVALID_ARGUMENT);
998
999 if (thread_is_64bit(thr_act))
1000 return(KERN_INVALID_ARGUMENT);
1001
1002 return set_thread_state32(thr_act, (x86_thread_state32_t *)tstate);
1003 }
1004
1005 case x86_THREAD_STATE64:
1006 {
1007 if (count != x86_THREAD_STATE64_COUNT)
1008 return(KERN_INVALID_ARGUMENT);
1009
1010 if (!thread_is_64bit(thr_act))
1011 return(KERN_INVALID_ARGUMENT);
1012
1013 return set_thread_state64(thr_act, (x86_thread_state64_t *)tstate);
1014
1015 }
1016 case x86_THREAD_STATE:
1017 {
1018 x86_thread_state_t *state;
1019
1020 if (count != x86_THREAD_STATE_COUNT)
1021 return(KERN_INVALID_ARGUMENT);
1022
1023 state = (x86_thread_state_t *)tstate;
1024
1025 if (state->tsh.flavor == x86_THREAD_STATE64 &&
1026 state->tsh.count == x86_THREAD_STATE64_COUNT &&
1027 thread_is_64bit(thr_act)) {
1028 return set_thread_state64(thr_act, &state->uts.ts64);
1029 } else if (state->tsh.flavor == x86_THREAD_STATE32 &&
1030 state->tsh.count == x86_THREAD_STATE32_COUNT &&
1031 !thread_is_64bit(thr_act)) {
1032 return set_thread_state32(thr_act, &state->uts.ts32);
1033 } else
1034 return(KERN_INVALID_ARGUMENT);
1035
1036 break;
1037 }
1038 case x86_DEBUG_STATE32:
1039 {
1040 x86_debug_state32_t *state;
1041 kern_return_t ret;
1042
1043 if (thread_is_64bit(thr_act))
1044 return(KERN_INVALID_ARGUMENT);
1045
1046 state = (x86_debug_state32_t *)tstate;
1047
1048 ret = set_debug_state32(thr_act, state);
1049
1050 return ret;
1051 }
1052 case x86_DEBUG_STATE64:
1053 {
1054 x86_debug_state64_t *state;
1055 kern_return_t ret;
1056
1057 if (!thread_is_64bit(thr_act))
1058 return(KERN_INVALID_ARGUMENT);
1059
1060 state = (x86_debug_state64_t *)tstate;
1061
1062 ret = set_debug_state64(thr_act, state);
1063
1064 return ret;
1065 }
1066 case x86_DEBUG_STATE:
1067 {
1068 x86_debug_state_t *state;
1069 kern_return_t ret = KERN_INVALID_ARGUMENT;
1070
1071 if (count != x86_DEBUG_STATE_COUNT)
1072 return (KERN_INVALID_ARGUMENT);
1073
1074 state = (x86_debug_state_t *)tstate;
1075 if (state->dsh.flavor == x86_DEBUG_STATE64 &&
1076 state->dsh.count == x86_DEBUG_STATE64_COUNT &&
1077 thread_is_64bit(thr_act)) {
1078 ret = set_debug_state64(thr_act, &state->uds.ds64);
1079 }
1080 else
1081 if (state->dsh.flavor == x86_DEBUG_STATE32 &&
1082 state->dsh.count == x86_DEBUG_STATE32_COUNT &&
1083 !thread_is_64bit(thr_act)) {
1084 ret = set_debug_state32(thr_act, &state->uds.ds32);
1085 }
1086 return ret;
1087 }
1088 default:
1089 return(KERN_INVALID_ARGUMENT);
1090 }
1091
1092 return(KERN_SUCCESS);
1093 }
1094
1095
1096
1097 /*
1098 * thread_getstatus:
1099 *
1100 * Get the status of the specified thread.
1101 */
1102
1103 kern_return_t
1104 machine_thread_get_state(
1105 thread_t thr_act,
1106 thread_flavor_t flavor,
1107 thread_state_t tstate,
1108 mach_msg_type_number_t *count)
1109 {
1110
1111 switch (flavor) {
1112
1113 case THREAD_STATE_FLAVOR_LIST:
1114 {
1115 if (*count < 3)
1116 return (KERN_INVALID_ARGUMENT);
1117
1118 tstate[0] = i386_THREAD_STATE;
1119 tstate[1] = i386_FLOAT_STATE;
1120 tstate[2] = i386_EXCEPTION_STATE;
1121
1122 *count = 3;
1123 break;
1124 }
1125
1126 case THREAD_STATE_FLAVOR_LIST_NEW:
1127 {
1128 if (*count < 4)
1129 return (KERN_INVALID_ARGUMENT);
1130
1131 tstate[0] = x86_THREAD_STATE;
1132 tstate[1] = x86_FLOAT_STATE;
1133 tstate[2] = x86_EXCEPTION_STATE;
1134 tstate[3] = x86_DEBUG_STATE;
1135
1136 *count = 4;
1137 break;
1138 }
1139
1140 case x86_SAVED_STATE32:
1141 {
1142 x86_saved_state32_t *state;
1143 x86_saved_state32_t *saved_state;
1144
1145 if (*count < x86_SAVED_STATE32_COUNT)
1146 return(KERN_INVALID_ARGUMENT);
1147
1148 if (thread_is_64bit(thr_act))
1149 return(KERN_INVALID_ARGUMENT);
1150
1151 state = (x86_saved_state32_t *) tstate;
1152 saved_state = USER_REGS32(thr_act);
1153
1154 /*
1155 * First, copy everything:
1156 */
1157 *state = *saved_state;
1158 state->ds = saved_state->ds & 0xffff;
1159 state->es = saved_state->es & 0xffff;
1160 state->fs = saved_state->fs & 0xffff;
1161 state->gs = saved_state->gs & 0xffff;
1162
1163 *count = x86_SAVED_STATE32_COUNT;
1164 break;
1165 }
1166
1167 case x86_SAVED_STATE64:
1168 {
1169 x86_saved_state64_t *state;
1170 x86_saved_state64_t *saved_state;
1171
1172 if (*count < x86_SAVED_STATE64_COUNT)
1173 return(KERN_INVALID_ARGUMENT);
1174
1175 if (!thread_is_64bit(thr_act))
1176 return(KERN_INVALID_ARGUMENT);
1177
1178 state = (x86_saved_state64_t *)tstate;
1179 saved_state = USER_REGS64(thr_act);
1180
1181 /*
1182 * First, copy everything:
1183 */
1184 *state = *saved_state;
1185 state->fs = saved_state->fs & 0xffff;
1186 state->gs = saved_state->gs & 0xffff;
1187
1188 *count = x86_SAVED_STATE64_COUNT;
1189 break;
1190 }
1191
1192 case x86_FLOAT_STATE32:
1193 {
1194 if (*count < x86_FLOAT_STATE32_COUNT)
1195 return(KERN_INVALID_ARGUMENT);
1196
1197 if (thread_is_64bit(thr_act))
1198 return(KERN_INVALID_ARGUMENT);
1199
1200 *count = x86_FLOAT_STATE32_COUNT;
1201
1202 return fpu_get_fxstate(thr_act, tstate, flavor);
1203 }
1204
1205 case x86_FLOAT_STATE64:
1206 {
1207 if (*count < x86_FLOAT_STATE64_COUNT)
1208 return(KERN_INVALID_ARGUMENT);
1209
1210 if ( !thread_is_64bit(thr_act))
1211 return(KERN_INVALID_ARGUMENT);
1212
1213 *count = x86_FLOAT_STATE64_COUNT;
1214
1215 return fpu_get_fxstate(thr_act, tstate, flavor);
1216 }
1217
1218 case x86_FLOAT_STATE:
1219 {
1220 x86_float_state_t *state;
1221 kern_return_t kret;
1222
1223 if (*count < x86_FLOAT_STATE_COUNT)
1224 return(KERN_INVALID_ARGUMENT);
1225
1226 state = (x86_float_state_t *)tstate;
1227
1228 /*
1229 * no need to bzero... currently
1230 * x86_FLOAT_STATE64_COUNT == x86_FLOAT_STATE32_COUNT
1231 */
1232 if (thread_is_64bit(thr_act)) {
1233 state->fsh.flavor = x86_FLOAT_STATE64;
1234 state->fsh.count = x86_FLOAT_STATE64_COUNT;
1235
1236 kret = fpu_get_fxstate(thr_act, (thread_state_t)&state->ufs.fs64, x86_FLOAT_STATE64);
1237 } else {
1238 state->fsh.flavor = x86_FLOAT_STATE32;
1239 state->fsh.count = x86_FLOAT_STATE32_COUNT;
1240
1241 kret = fpu_get_fxstate(thr_act, (thread_state_t)&state->ufs.fs32, x86_FLOAT_STATE32);
1242 }
1243 *count = x86_FLOAT_STATE_COUNT;
1244
1245 return(kret);
1246 }
1247
1248 case x86_AVX_STATE32:
1249 {
1250 if (*count != x86_AVX_STATE32_COUNT)
1251 return(KERN_INVALID_ARGUMENT);
1252
1253 if (thread_is_64bit(thr_act))
1254 return(KERN_INVALID_ARGUMENT);
1255
1256 *count = x86_AVX_STATE32_COUNT;
1257
1258 return fpu_get_fxstate(thr_act, tstate, flavor);
1259 }
1260
1261 case x86_AVX_STATE64:
1262 {
1263 if (*count != x86_AVX_STATE64_COUNT)
1264 return(KERN_INVALID_ARGUMENT);
1265
1266 if ( !thread_is_64bit(thr_act))
1267 return(KERN_INVALID_ARGUMENT);
1268
1269 *count = x86_AVX_STATE64_COUNT;
1270
1271 return fpu_get_fxstate(thr_act, tstate, flavor);
1272 }
1273
1274 case x86_THREAD_STATE32:
1275 {
1276 if (*count < x86_THREAD_STATE32_COUNT)
1277 return(KERN_INVALID_ARGUMENT);
1278
1279 if (thread_is_64bit(thr_act))
1280 return(KERN_INVALID_ARGUMENT);
1281
1282 *count = x86_THREAD_STATE32_COUNT;
1283
1284 get_thread_state32(thr_act, (x86_thread_state32_t *)tstate);
1285 break;
1286 }
1287
1288 case x86_THREAD_STATE64:
1289 {
1290 if (*count < x86_THREAD_STATE64_COUNT)
1291 return(KERN_INVALID_ARGUMENT);
1292
1293 if ( !thread_is_64bit(thr_act))
1294 return(KERN_INVALID_ARGUMENT);
1295
1296 *count = x86_THREAD_STATE64_COUNT;
1297
1298 get_thread_state64(thr_act, (x86_thread_state64_t *)tstate);
1299 break;
1300 }
1301
1302 case x86_THREAD_STATE:
1303 {
1304 x86_thread_state_t *state;
1305
1306 if (*count < x86_THREAD_STATE_COUNT)
1307 return(KERN_INVALID_ARGUMENT);
1308
1309 state = (x86_thread_state_t *)tstate;
1310
1311 bzero((char *)state, sizeof(x86_thread_state_t));
1312
1313 if (thread_is_64bit(thr_act)) {
1314 state->tsh.flavor = x86_THREAD_STATE64;
1315 state->tsh.count = x86_THREAD_STATE64_COUNT;
1316
1317 get_thread_state64(thr_act, &state->uts.ts64);
1318 } else {
1319 state->tsh.flavor = x86_THREAD_STATE32;
1320 state->tsh.count = x86_THREAD_STATE32_COUNT;
1321
1322 get_thread_state32(thr_act, &state->uts.ts32);
1323 }
1324 *count = x86_THREAD_STATE_COUNT;
1325
1326 break;
1327 }
1328
1329
1330 case x86_EXCEPTION_STATE32:
1331 {
1332 if (*count < x86_EXCEPTION_STATE32_COUNT)
1333 return(KERN_INVALID_ARGUMENT);
1334
1335 if (thread_is_64bit(thr_act))
1336 return(KERN_INVALID_ARGUMENT);
1337
1338 *count = x86_EXCEPTION_STATE32_COUNT;
1339
1340 get_exception_state32(thr_act, (x86_exception_state32_t *)tstate);
1341 /*
1342 * Suppress the cpu number for binary compatibility
1343 * of this deprecated state.
1344 */
1345 ((x86_exception_state32_t *)tstate)->cpu = 0;
1346 break;
1347 }
1348
1349 case x86_EXCEPTION_STATE64:
1350 {
1351 if (*count < x86_EXCEPTION_STATE64_COUNT)
1352 return(KERN_INVALID_ARGUMENT);
1353
1354 if ( !thread_is_64bit(thr_act))
1355 return(KERN_INVALID_ARGUMENT);
1356
1357 *count = x86_EXCEPTION_STATE64_COUNT;
1358
1359 get_exception_state64(thr_act, (x86_exception_state64_t *)tstate);
1360 /*
1361 * Suppress the cpu number for binary compatibility
1362 * of this deprecated state.
1363 */
1364 ((x86_exception_state64_t *)tstate)->cpu = 0;
1365 break;
1366 }
1367
1368 case x86_EXCEPTION_STATE:
1369 {
1370 x86_exception_state_t *state;
1371
1372 if (*count < x86_EXCEPTION_STATE_COUNT)
1373 return(KERN_INVALID_ARGUMENT);
1374
1375 state = (x86_exception_state_t *)tstate;
1376
1377 bzero((char *)state, sizeof(x86_exception_state_t));
1378
1379 if (thread_is_64bit(thr_act)) {
1380 state->esh.flavor = x86_EXCEPTION_STATE64;
1381 state->esh.count = x86_EXCEPTION_STATE64_COUNT;
1382
1383 get_exception_state64(thr_act, &state->ues.es64);
1384 } else {
1385 state->esh.flavor = x86_EXCEPTION_STATE32;
1386 state->esh.count = x86_EXCEPTION_STATE32_COUNT;
1387
1388 get_exception_state32(thr_act, &state->ues.es32);
1389 }
1390 *count = x86_EXCEPTION_STATE_COUNT;
1391
1392 break;
1393 }
1394 case x86_DEBUG_STATE32:
1395 {
1396 if (*count < x86_DEBUG_STATE32_COUNT)
1397 return(KERN_INVALID_ARGUMENT);
1398
1399 if (thread_is_64bit(thr_act))
1400 return(KERN_INVALID_ARGUMENT);
1401
1402 get_debug_state32(thr_act, (x86_debug_state32_t *)tstate);
1403
1404 *count = x86_DEBUG_STATE32_COUNT;
1405
1406 break;
1407 }
1408 case x86_DEBUG_STATE64:
1409 {
1410 if (*count < x86_DEBUG_STATE64_COUNT)
1411 return(KERN_INVALID_ARGUMENT);
1412
1413 if (!thread_is_64bit(thr_act))
1414 return(KERN_INVALID_ARGUMENT);
1415
1416 get_debug_state64(thr_act, (x86_debug_state64_t *)tstate);
1417
1418 *count = x86_DEBUG_STATE64_COUNT;
1419
1420 break;
1421 }
1422 case x86_DEBUG_STATE:
1423 {
1424 x86_debug_state_t *state;
1425
1426 if (*count < x86_DEBUG_STATE_COUNT)
1427 return(KERN_INVALID_ARGUMENT);
1428
1429 state = (x86_debug_state_t *)tstate;
1430
1431 bzero(state, sizeof *state);
1432
1433 if (thread_is_64bit(thr_act)) {
1434 state->dsh.flavor = x86_DEBUG_STATE64;
1435 state->dsh.count = x86_DEBUG_STATE64_COUNT;
1436
1437 get_debug_state64(thr_act, &state->uds.ds64);
1438 } else {
1439 state->dsh.flavor = x86_DEBUG_STATE32;
1440 state->dsh.count = x86_DEBUG_STATE32_COUNT;
1441
1442 get_debug_state32(thr_act, &state->uds.ds32);
1443 }
1444 *count = x86_DEBUG_STATE_COUNT;
1445 break;
1446 }
1447 default:
1448 return(KERN_INVALID_ARGUMENT);
1449 }
1450
1451 return(KERN_SUCCESS);
1452 }
1453
1454 kern_return_t
1455 machine_thread_get_kern_state(
1456 thread_t thread,
1457 thread_flavor_t flavor,
1458 thread_state_t tstate,
1459 mach_msg_type_number_t *count)
1460 {
1461 x86_saved_state_t *int_state = current_cpu_datap()->cpu_int_state;
1462
1463 /*
1464 * This works only for an interrupted kernel thread
1465 */
1466 if (thread != current_thread() || int_state == NULL)
1467 return KERN_FAILURE;
1468
1469 switch (flavor) {
1470 case x86_THREAD_STATE32: {
1471 x86_thread_state32_t *state;
1472 x86_saved_state32_t *saved_state;
1473
1474 if (!is_saved_state32(int_state) ||
1475 *count < x86_THREAD_STATE32_COUNT)
1476 return (KERN_INVALID_ARGUMENT);
1477
1478 state = (x86_thread_state32_t *) tstate;
1479
1480 saved_state = saved_state32(int_state);
1481 /*
1482 * General registers.
1483 */
1484 state->eax = saved_state->eax;
1485 state->ebx = saved_state->ebx;
1486 state->ecx = saved_state->ecx;
1487 state->edx = saved_state->edx;
1488 state->edi = saved_state->edi;
1489 state->esi = saved_state->esi;
1490 state->ebp = saved_state->ebp;
1491 state->esp = saved_state->uesp;
1492 state->eflags = saved_state->efl;
1493 state->eip = saved_state->eip;
1494 state->cs = saved_state->cs;
1495 state->ss = saved_state->ss;
1496 state->ds = saved_state->ds & 0xffff;
1497 state->es = saved_state->es & 0xffff;
1498 state->fs = saved_state->fs & 0xffff;
1499 state->gs = saved_state->gs & 0xffff;
1500
1501 *count = x86_THREAD_STATE32_COUNT;
1502
1503 return KERN_SUCCESS;
1504 }
1505
1506 case x86_THREAD_STATE64: {
1507 x86_thread_state64_t *state;
1508 x86_saved_state64_t *saved_state;
1509
1510 if (!is_saved_state64(int_state) ||
1511 *count < x86_THREAD_STATE64_COUNT)
1512 return (KERN_INVALID_ARGUMENT);
1513
1514 state = (x86_thread_state64_t *) tstate;
1515
1516 saved_state = saved_state64(int_state);
1517 /*
1518 * General registers.
1519 */
1520 state->rax = saved_state->rax;
1521 state->rbx = saved_state->rbx;
1522 state->rcx = saved_state->rcx;
1523 state->rdx = saved_state->rdx;
1524 state->rdi = saved_state->rdi;
1525 state->rsi = saved_state->rsi;
1526 state->rbp = saved_state->rbp;
1527 state->rsp = saved_state->isf.rsp;
1528 state->r8 = saved_state->r8;
1529 state->r9 = saved_state->r9;
1530 state->r10 = saved_state->r10;
1531 state->r11 = saved_state->r11;
1532 state->r12 = saved_state->r12;
1533 state->r13 = saved_state->r13;
1534 state->r14 = saved_state->r14;
1535 state->r15 = saved_state->r15;
1536
1537 state->rip = saved_state->isf.rip;
1538 state->rflags = saved_state->isf.rflags;
1539 state->cs = saved_state->isf.cs;
1540 state->fs = saved_state->fs & 0xffff;
1541 state->gs = saved_state->gs & 0xffff;
1542 *count = x86_THREAD_STATE64_COUNT;
1543
1544 return KERN_SUCCESS;
1545 }
1546
1547 case x86_THREAD_STATE: {
1548 x86_thread_state_t *state = NULL;
1549
1550 if (*count < x86_THREAD_STATE_COUNT)
1551 return (KERN_INVALID_ARGUMENT);
1552
1553 state = (x86_thread_state_t *) tstate;
1554
1555 if (is_saved_state32(int_state)) {
1556 x86_saved_state32_t *saved_state = saved_state32(int_state);
1557
1558 state->tsh.flavor = x86_THREAD_STATE32;
1559 state->tsh.count = x86_THREAD_STATE32_COUNT;
1560
1561 /*
1562 * General registers.
1563 */
1564 state->uts.ts32.eax = saved_state->eax;
1565 state->uts.ts32.ebx = saved_state->ebx;
1566 state->uts.ts32.ecx = saved_state->ecx;
1567 state->uts.ts32.edx = saved_state->edx;
1568 state->uts.ts32.edi = saved_state->edi;
1569 state->uts.ts32.esi = saved_state->esi;
1570 state->uts.ts32.ebp = saved_state->ebp;
1571 state->uts.ts32.esp = saved_state->uesp;
1572 state->uts.ts32.eflags = saved_state->efl;
1573 state->uts.ts32.eip = saved_state->eip;
1574 state->uts.ts32.cs = saved_state->cs;
1575 state->uts.ts32.ss = saved_state->ss;
1576 state->uts.ts32.ds = saved_state->ds & 0xffff;
1577 state->uts.ts32.es = saved_state->es & 0xffff;
1578 state->uts.ts32.fs = saved_state->fs & 0xffff;
1579 state->uts.ts32.gs = saved_state->gs & 0xffff;
1580 } else if (is_saved_state64(int_state)) {
1581 x86_saved_state64_t *saved_state = saved_state64(int_state);
1582
1583 state->tsh.flavor = x86_THREAD_STATE64;
1584 state->tsh.count = x86_THREAD_STATE64_COUNT;
1585
1586 /*
1587 * General registers.
1588 */
1589 state->uts.ts64.rax = saved_state->rax;
1590 state->uts.ts64.rbx = saved_state->rbx;
1591 state->uts.ts64.rcx = saved_state->rcx;
1592 state->uts.ts64.rdx = saved_state->rdx;
1593 state->uts.ts64.rdi = saved_state->rdi;
1594 state->uts.ts64.rsi = saved_state->rsi;
1595 state->uts.ts64.rbp = saved_state->rbp;
1596 state->uts.ts64.rsp = saved_state->isf.rsp;
1597 state->uts.ts64.r8 = saved_state->r8;
1598 state->uts.ts64.r9 = saved_state->r9;
1599 state->uts.ts64.r10 = saved_state->r10;
1600 state->uts.ts64.r11 = saved_state->r11;
1601 state->uts.ts64.r12 = saved_state->r12;
1602 state->uts.ts64.r13 = saved_state->r13;
1603 state->uts.ts64.r14 = saved_state->r14;
1604 state->uts.ts64.r15 = saved_state->r15;
1605
1606 state->uts.ts64.rip = saved_state->isf.rip;
1607 state->uts.ts64.rflags = saved_state->isf.rflags;
1608 state->uts.ts64.cs = saved_state->isf.cs;
1609 state->uts.ts64.fs = saved_state->fs & 0xffff;
1610 state->uts.ts64.gs = saved_state->gs & 0xffff;
1611 } else {
1612 panic("unknown thread state");
1613 }
1614
1615 *count = x86_THREAD_STATE_COUNT;
1616 return KERN_SUCCESS;
1617 }
1618 }
1619 return KERN_FAILURE;
1620 }
1621
1622
1623 void
1624 machine_thread_switch_addrmode(thread_t thread)
1625 {
1626 /*
1627 * We don't want to be preempted until we're done
1628 * - particularly if we're switching the current thread
1629 */
1630 disable_preemption();
1631
1632 /*
1633 * Reset the state saveareas. As we're resetting, we anticipate no
1634 * memory allocations in this path.
1635 */
1636 machine_thread_create(thread, thread->task);
1637
1638 /* If we're switching ourselves, reset the pcb addresses etc. */
1639 if (thread == current_thread()) {
1640 boolean_t istate = ml_set_interrupts_enabled(FALSE);
1641 #if defined(__i386__)
1642 if (current_cpu_datap()->cpu_active_cr3 != kernel_pmap->pm_cr3)
1643 pmap_load_kernel_cr3();
1644 #endif /* defined(__i386) */
1645 act_machine_switch_pcb(NULL, thread);
1646 ml_set_interrupts_enabled(istate);
1647 }
1648 enable_preemption();
1649 }
1650
1651
1652
1653 /*
1654 * This is used to set the current thr_act/thread
1655 * when starting up a new processor
1656 */
1657 void
1658 machine_set_current_thread(thread_t thread)
1659 {
1660 current_cpu_datap()->cpu_active_thread = thread;
1661 }
1662
1663
1664 /*
1665 * Perform machine-dependent per-thread initializations
1666 */
1667 void
1668 machine_thread_init(void)
1669 {
1670 if (cpu_mode_is64bit()) {
1671 assert(sizeof(x86_sframe_compat32_t) % 16 == 0);
1672 iss_zone = zinit(sizeof(x86_sframe64_t),
1673 thread_max * sizeof(x86_sframe64_t),
1674 THREAD_CHUNK * sizeof(x86_sframe64_t),
1675 "x86_64 saved state");
1676
1677 ids_zone = zinit(sizeof(x86_debug_state64_t),
1678 thread_max * sizeof(x86_debug_state64_t),
1679 THREAD_CHUNK * sizeof(x86_debug_state64_t),
1680 "x86_64 debug state");
1681
1682 } else {
1683 iss_zone = zinit(sizeof(x86_sframe32_t),
1684 thread_max * sizeof(x86_sframe32_t),
1685 THREAD_CHUNK * sizeof(x86_sframe32_t),
1686 "x86 saved state");
1687 ids_zone = zinit(sizeof(x86_debug_state32_t),
1688 thread_max * (sizeof(x86_debug_state32_t)),
1689 THREAD_CHUNK * (sizeof(x86_debug_state32_t)),
1690 "x86 debug state");
1691 }
1692 fpu_module_init();
1693 }
1694
1695
1696 #if defined(__i386__)
1697 /*
1698 * Some routines for debugging activation code
1699 */
1700 static void dump_handlers(thread_t);
1701 void dump_regs(thread_t);
1702 int dump_act(thread_t thr_act);
1703
1704 static void
1705 dump_handlers(thread_t thr_act)
1706 {
1707 ReturnHandler *rhp = thr_act->handlers;
1708 int counter = 0;
1709
1710 printf("\t");
1711 while (rhp) {
1712 if (rhp == &thr_act->special_handler){
1713 if (rhp->next)
1714 printf("[NON-Zero next ptr(%p)]", rhp->next);
1715 printf("special_handler()->");
1716 break;
1717 }
1718 printf("hdlr_%d(%p)->", counter, rhp->handler);
1719 rhp = rhp->next;
1720 if (++counter > 32) {
1721 printf("Aborting: HUGE handler chain\n");
1722 break;
1723 }
1724 }
1725 printf("HLDR_NULL\n");
1726 }
1727
1728 void
1729 dump_regs(thread_t thr_act)
1730 {
1731 if (thread_is_64bit(thr_act)) {
1732 x86_saved_state64_t *ssp;
1733
1734 ssp = USER_REGS64(thr_act);
1735
1736 panic("dump_regs: 64bit tasks not yet supported");
1737
1738 } else {
1739 x86_saved_state32_t *ssp;
1740
1741 ssp = USER_REGS32(thr_act);
1742
1743 /*
1744 * Print out user register state
1745 */
1746 printf("\tRegs:\tedi=%x esi=%x ebp=%x ebx=%x edx=%x\n",
1747 ssp->edi, ssp->esi, ssp->ebp, ssp->ebx, ssp->edx);
1748
1749 printf("\t\tecx=%x eax=%x eip=%x efl=%x uesp=%x\n",
1750 ssp->ecx, ssp->eax, ssp->eip, ssp->efl, ssp->uesp);
1751
1752 printf("\t\tcs=%x ss=%x\n", ssp->cs, ssp->ss);
1753 }
1754 }
1755
1756 int
1757 dump_act(thread_t thr_act)
1758 {
1759 if (!thr_act)
1760 return(0);
1761
1762 printf("thread(%p)(%d): task=%p(%d)\n",
1763 thr_act, thr_act->ref_count,
1764 thr_act->task,
1765 thr_act->task ? thr_act->task->ref_count : 0);
1766
1767 printf("\tsusp=%d user_stop=%d active=%x ast=%x\n",
1768 thr_act->suspend_count, thr_act->user_stop_count,
1769 thr_act->active, thr_act->ast);
1770 printf("\tpcb=%p\n", &thr_act->machine);
1771
1772 if (thr_act->kernel_stack) {
1773 vm_offset_t stack = thr_act->kernel_stack;
1774
1775 printf("\tk_stk %lx eip %x ebx %x esp %x iss %p\n",
1776 (long)stack, STACK_IKS(stack)->k_eip, STACK_IKS(stack)->k_ebx,
1777 STACK_IKS(stack)->k_esp, thr_act->machine.iss);
1778 }
1779
1780 dump_handlers(thr_act);
1781 dump_regs(thr_act);
1782 return((int)thr_act);
1783 }
1784 #endif
1785
1786 user_addr_t
1787 get_useraddr(void)
1788 {
1789 thread_t thr_act = current_thread();
1790
1791 if (thread_is_64bit(thr_act)) {
1792 x86_saved_state64_t *iss64;
1793
1794 iss64 = USER_REGS64(thr_act);
1795
1796 return(iss64->isf.rip);
1797 } else {
1798 x86_saved_state32_t *iss32;
1799
1800 iss32 = USER_REGS32(thr_act);
1801
1802 return(iss32->eip);
1803 }
1804 }
1805
1806 /*
1807 * detach and return a kernel stack from a thread
1808 */
1809
1810 vm_offset_t
1811 machine_stack_detach(thread_t thread)
1812 {
1813 vm_offset_t stack;
1814
1815 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH),
1816 (uintptr_t)thread_tid(thread), thread->priority,
1817 thread->sched_pri, 0,
1818 0);
1819
1820 stack = thread->kernel_stack;
1821 thread->kernel_stack = 0;
1822
1823 return (stack);
1824 }
1825
1826 /*
1827 * attach a kernel stack to a thread and initialize it
1828 */
1829
1830 void
1831 machine_stack_attach(
1832 thread_t thread,
1833 vm_offset_t stack)
1834 {
1835 struct x86_kernel_state *statep;
1836
1837 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH),
1838 (uintptr_t)thread_tid(thread), thread->priority,
1839 thread->sched_pri, 0, 0);
1840
1841 assert(stack);
1842 thread->kernel_stack = stack;
1843
1844 statep = STACK_IKS(stack);
1845 #if defined(__x86_64__)
1846 statep->k_rip = (unsigned long) Thread_continue;
1847 statep->k_rbx = (unsigned long) thread_continue;
1848 statep->k_rsp = (unsigned long) (STACK_IKS(stack) - 1);
1849 #else
1850 statep->k_eip = (unsigned long) Thread_continue;
1851 statep->k_ebx = (unsigned long) thread_continue;
1852 statep->k_esp = (unsigned long) (STACK_IKS(stack) - 1);
1853 #endif
1854
1855 return;
1856 }
1857
1858 /*
1859 * move a stack from old to new thread
1860 */
1861
1862 void
1863 machine_stack_handoff(thread_t old,
1864 thread_t new)
1865 {
1866 vm_offset_t stack;
1867
1868 assert(new);
1869 assert(old);
1870
1871 #if CONFIG_COUNTERS
1872 machine_pmc_cswitch(old, new);
1873 #endif
1874
1875 stack = old->kernel_stack;
1876 if (stack == old->reserved_stack) {
1877 assert(new->reserved_stack);
1878 old->reserved_stack = new->reserved_stack;
1879 new->reserved_stack = stack;
1880 }
1881 old->kernel_stack = 0;
1882 /*
1883 * A full call to machine_stack_attach() is unnecessry
1884 * because old stack is already initialized.
1885 */
1886 new->kernel_stack = stack;
1887
1888 fpu_save_context(old);
1889
1890 old->machine.specFlags &= ~OnProc;
1891 new->machine.specFlags |= OnProc;
1892
1893 PMAP_SWITCH_CONTEXT(old, new, cpu_number());
1894 act_machine_switch_pcb(old, new);
1895
1896 machine_set_current_thread(new);
1897
1898 return;
1899 }
1900
1901
1902
1903
1904 struct x86_act_context32 {
1905 x86_saved_state32_t ss;
1906 x86_float_state32_t fs;
1907 x86_debug_state32_t ds;
1908 };
1909
1910 struct x86_act_context64 {
1911 x86_saved_state64_t ss;
1912 x86_float_state64_t fs;
1913 x86_debug_state64_t ds;
1914 };
1915
1916
1917
1918 void *
1919 act_thread_csave(void)
1920 {
1921 kern_return_t kret;
1922 mach_msg_type_number_t val;
1923 thread_t thr_act = current_thread();
1924
1925 if (thread_is_64bit(thr_act)) {
1926 struct x86_act_context64 *ic64;
1927
1928 ic64 = (struct x86_act_context64 *)kalloc(sizeof(struct x86_act_context64));
1929
1930 if (ic64 == (struct x86_act_context64 *)NULL)
1931 return((void *)0);
1932
1933 val = x86_SAVED_STATE64_COUNT;
1934 kret = machine_thread_get_state(thr_act, x86_SAVED_STATE64,
1935 (thread_state_t) &ic64->ss, &val);
1936 if (kret != KERN_SUCCESS) {
1937 kfree(ic64, sizeof(struct x86_act_context64));
1938 return((void *)0);
1939 }
1940 val = x86_FLOAT_STATE64_COUNT;
1941 kret = machine_thread_get_state(thr_act, x86_FLOAT_STATE64,
1942 (thread_state_t) &ic64->fs, &val);
1943 if (kret != KERN_SUCCESS) {
1944 kfree(ic64, sizeof(struct x86_act_context64));
1945 return((void *)0);
1946 }
1947
1948 val = x86_DEBUG_STATE64_COUNT;
1949 kret = machine_thread_get_state(thr_act,
1950 x86_DEBUG_STATE64,
1951 (thread_state_t)&ic64->ds,
1952 &val);
1953 if (kret != KERN_SUCCESS) {
1954 kfree(ic64, sizeof(struct x86_act_context64));
1955 return((void *)0);
1956 }
1957 return(ic64);
1958
1959 } else {
1960 struct x86_act_context32 *ic32;
1961
1962 ic32 = (struct x86_act_context32 *)kalloc(sizeof(struct x86_act_context32));
1963
1964 if (ic32 == (struct x86_act_context32 *)NULL)
1965 return((void *)0);
1966
1967 val = x86_SAVED_STATE32_COUNT;
1968 kret = machine_thread_get_state(thr_act, x86_SAVED_STATE32,
1969 (thread_state_t) &ic32->ss, &val);
1970 if (kret != KERN_SUCCESS) {
1971 kfree(ic32, sizeof(struct x86_act_context32));
1972 return((void *)0);
1973 }
1974 val = x86_FLOAT_STATE32_COUNT;
1975 kret = machine_thread_get_state(thr_act, x86_FLOAT_STATE32,
1976 (thread_state_t) &ic32->fs, &val);
1977 if (kret != KERN_SUCCESS) {
1978 kfree(ic32, sizeof(struct x86_act_context32));
1979 return((void *)0);
1980 }
1981
1982 val = x86_DEBUG_STATE32_COUNT;
1983 kret = machine_thread_get_state(thr_act,
1984 x86_DEBUG_STATE32,
1985 (thread_state_t)&ic32->ds,
1986 &val);
1987 if (kret != KERN_SUCCESS) {
1988 kfree(ic32, sizeof(struct x86_act_context32));
1989 return((void *)0);
1990 }
1991 return(ic32);
1992 }
1993 }
1994
1995
1996 void
1997 act_thread_catt(void *ctx)
1998 {
1999 thread_t thr_act = current_thread();
2000 kern_return_t kret;
2001
2002 if (ctx == (void *)NULL)
2003 return;
2004
2005 if (thread_is_64bit(thr_act)) {
2006 struct x86_act_context64 *ic64;
2007
2008 ic64 = (struct x86_act_context64 *)ctx;
2009
2010 kret = machine_thread_set_state(thr_act, x86_SAVED_STATE64,
2011 (thread_state_t) &ic64->ss, x86_SAVED_STATE64_COUNT);
2012 if (kret == KERN_SUCCESS) {
2013 machine_thread_set_state(thr_act, x86_FLOAT_STATE64,
2014 (thread_state_t) &ic64->fs, x86_FLOAT_STATE64_COUNT);
2015 }
2016 kfree(ic64, sizeof(struct x86_act_context64));
2017 } else {
2018 struct x86_act_context32 *ic32;
2019
2020 ic32 = (struct x86_act_context32 *)ctx;
2021
2022 kret = machine_thread_set_state(thr_act, x86_SAVED_STATE32,
2023 (thread_state_t) &ic32->ss, x86_SAVED_STATE32_COUNT);
2024 if (kret == KERN_SUCCESS) {
2025 (void) machine_thread_set_state(thr_act, x86_FLOAT_STATE32,
2026 (thread_state_t) &ic32->fs, x86_FLOAT_STATE32_COUNT);
2027 }
2028 kfree(ic32, sizeof(struct x86_act_context32));
2029 }
2030 }
2031
2032
2033 void act_thread_cfree(__unused void *ctx)
2034 {
2035 /* XXX - Unused */
2036 }
2037 void x86_toggle_sysenter_arg_store(thread_t thread, boolean_t valid);
2038 void x86_toggle_sysenter_arg_store(thread_t thread, boolean_t valid) {
2039 thread->machine.arg_store_valid = valid;
2040 }
2041
2042 boolean_t x86_sysenter_arg_store_isvalid(thread_t thread);
2043
2044 boolean_t x86_sysenter_arg_store_isvalid(thread_t thread) {
2045 return (thread->machine.arg_store_valid);
2046 }
2047
2048 /*
2049 * Duplicate one x86_debug_state32_t to another. "all" parameter
2050 * chooses whether dr4 and dr5 are copied (they are never meant
2051 * to be installed when we do machine_task_set_state() or
2052 * machine_thread_set_state()).
2053 */
2054 void
2055 copy_debug_state32(
2056 x86_debug_state32_t *src,
2057 x86_debug_state32_t *target,
2058 boolean_t all)
2059 {
2060 if (all) {
2061 target->dr4 = src->dr4;
2062 target->dr5 = src->dr5;
2063 }
2064
2065 target->dr0 = src->dr0;
2066 target->dr1 = src->dr1;
2067 target->dr2 = src->dr2;
2068 target->dr3 = src->dr3;
2069 target->dr6 = src->dr6;
2070 target->dr7 = src->dr7;
2071 }
2072
2073 /*
2074 * Duplicate one x86_debug_state64_t to another. "all" parameter
2075 * chooses whether dr4 and dr5 are copied (they are never meant
2076 * to be installed when we do machine_task_set_state() or
2077 * machine_thread_set_state()).
2078 */
2079 void
2080 copy_debug_state64(
2081 x86_debug_state64_t *src,
2082 x86_debug_state64_t *target,
2083 boolean_t all)
2084 {
2085 if (all) {
2086 target->dr4 = src->dr4;
2087 target->dr5 = src->dr5;
2088 }
2089
2090 target->dr0 = src->dr0;
2091 target->dr1 = src->dr1;
2092 target->dr2 = src->dr2;
2093 target->dr3 = src->dr3;
2094 target->dr6 = src->dr6;
2095 target->dr7 = src->dr7;
2096 }
2097
2098 boolean_t is_useraddr64_canonical(uint64_t addr64);
2099
2100 boolean_t
2101 is_useraddr64_canonical(uint64_t addr64)
2102 {
2103 return IS_USERADDR64_CANONICAL(addr64);
2104 }