]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/pcb.c
6843377d6f113226caed1f154882297e18d40a38
[apple/xnu.git] / osfmk / i386 / pcb.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * @OSF_COPYRIGHT@
32 */
33 /*
34 * Mach Operating System
35 * Copyright (c) 1991,1990 Carnegie Mellon University
36 * All Rights Reserved.
37 *
38 * Permission to use, copy, modify and distribute this software and its
39 * documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
43 *
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
46 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47 *
48 * Carnegie Mellon requests users of this software to return to
49 *
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
54 *
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
57 */
58
59 #include <mach_rt.h>
60 #include <mach_debug.h>
61 #include <mach_ldebug.h>
62
63 #include <sys/kdebug.h>
64
65 #include <mach/kern_return.h>
66 #include <mach/thread_status.h>
67 #include <mach/vm_param.h>
68
69 #include <i386/cpu_data.h>
70 #include <i386/cpu_number.h>
71
72 #include <kern/counters.h>
73 #include <kern/kalloc.h>
74 #include <kern/mach_param.h>
75 #include <kern/processor.h>
76 #include <kern/cpu_data.h>
77 #include <kern/cpu_number.h>
78 #include <kern/task.h>
79 #include <kern/thread.h>
80 #include <kern/sched_prim.h>
81 #include <kern/misc_protos.h>
82 #include <kern/assert.h>
83 #include <kern/spl.h>
84 #include <kern/machine.h>
85 #include <ipc/ipc_port.h>
86 #include <vm/vm_kern.h>
87 #include <vm/vm_map.h>
88 #include <vm/pmap.h>
89 #include <vm/vm_protos.h>
90
91 #include <i386/thread.h>
92 #include <i386/eflags.h>
93 #include <i386/proc_reg.h>
94 #include <i386/seg.h>
95 #include <i386/tss.h>
96 #include <i386/user_ldt.h>
97 #include <i386/fpu.h>
98 #include <i386/iopb_entries.h>
99 #include <i386/mp_desc.h>
100 #include <i386/cpu_data.h>
101 #include <i386/machine_routines.h>
102
103 /*
104 * Maps state flavor to number of words in the state:
105 */
106 unsigned int _MachineStateCount[] = {
107 /* FLAVOR_LIST */
108 0,
109 x86_THREAD_STATE32_COUNT,
110 x86_FLOAT_STATE32_COUNT,
111 x86_EXCEPTION_STATE32_COUNT,
112 x86_THREAD_STATE64_COUNT,
113 x86_FLOAT_STATE64_COUNT,
114 x86_EXCEPTION_STATE64_COUNT,
115 x86_THREAD_STATE_COUNT,
116 x86_FLOAT_STATE_COUNT,
117 x86_EXCEPTION_STATE_COUNT,
118 0,
119 x86_SAVED_STATE32_COUNT,
120 x86_SAVED_STATE64_COUNT,
121 x86_DEBUG_STATE32_COUNT,
122 x86_DEBUG_STATE64_COUNT,
123 x86_DEBUG_STATE_COUNT
124 };
125
126 zone_t iss_zone32; /* zone for 32bit saved_state area */
127 zone_t iss_zone64; /* zone for 64bit saved_state area */
128 zone_t ids_zone32; /* zone for 32bit debug_state area */
129 zone_t ids_zone64; /* zone for 64bit debug_state area */
130
131
132 /* Forward */
133
134 void act_machine_throughcall(thread_t thr_act);
135 user_addr_t get_useraddr(void);
136 void act_machine_return(int);
137 void act_machine_sv_free(thread_t, int);
138
139 extern thread_t Switch_context(
140 thread_t old,
141 thread_continue_t cont,
142 thread_t new);
143 extern void Thread_continue(void);
144 extern void Load_context(
145 thread_t thread);
146
147
148 static void
149 get_exception_state32(thread_t thread, x86_exception_state32_t *es);
150
151 static void
152 get_exception_state64(thread_t thread, x86_exception_state64_t *es);
153
154 static void
155 get_thread_state32(thread_t thread, x86_thread_state32_t *ts);
156
157 static void
158 get_thread_state64(thread_t thread, x86_thread_state64_t *ts);
159
160 static int
161 set_thread_state32(thread_t thread, x86_thread_state32_t *ts);
162
163 static int
164 set_thread_state64(thread_t thread, x86_thread_state64_t *ts);
165
166 /*
167 * Don't let an illegal value for dr7 get set. Specifically,
168 * check for undefined settings. Setting these bit patterns
169 * result in undefined behaviour and can lead to an unexpected
170 * TRCTRAP.
171 */
172 static boolean_t
173 dr7_is_valid(uint32_t *dr7)
174 {
175 int i;
176 uint32_t mask1, mask2;
177
178 /*
179 * If the DE bit is set in CR4, R/W0-3 can be pattern
180 * "10B" to indicate i/o reads and write
181 */
182 if (!(get_cr4() & CR4_DE))
183 for (i = 0, mask1 = 0x3<<16, mask2 = 0x2<<16; i < 4;
184 i++, mask1 <<= 4, mask2 <<= 4)
185 if ((*dr7 & mask1) == mask2)
186 return (FALSE);
187
188 /*
189 * len0-3 pattern "10B" is ok for len on 64-bit.
190 */
191 if (current_cpu_datap()->cpu_is64bit == TRUE)
192 for (i = 0, mask1 = 0x3<<18, mask2 = 0x2<<18; i < 4;
193 i++, mask1 <<= 4, mask2 <<= 4)
194 if ((*dr7 & mask1) == mask2)
195 return (FALSE);
196
197 /*
198 * if we are doing an instruction execution break (indicated
199 * by r/w[x] being "00B"), then the len[x] must also be set
200 * to "00B"
201 */
202 for (i = 0; i < 4; i++)
203 if (((((*dr7 >> (16 + i*4))) & 0x3) == 0) &&
204 ((((*dr7 >> (18 + i*4))) & 0x3) != 0))
205 return (FALSE);
206
207 /*
208 * Intel docs have these bits fixed.
209 */
210 *dr7 |= 0x1 << 10; /* set bit 10 to 1 */
211 *dr7 &= ~(0x1 << 11); /* set bit 11 to 0 */
212 *dr7 &= ~(0x1 << 12); /* set bit 12 to 0 */
213 *dr7 &= ~(0x1 << 14); /* set bit 14 to 0 */
214 *dr7 &= ~(0x1 << 15); /* set bit 15 to 0 */
215
216 /*
217 * We don't allow anything to set the global breakpoints.
218 */
219
220 if (*dr7 & 0x2)
221 return (FALSE);
222
223 if (*dr7 & (0x2<<2))
224 return (FALSE);
225
226 if (*dr7 & (0x2<<4))
227 return (FALSE);
228
229 if (*dr7 & (0x2<<6))
230 return (FALSE);
231
232 return (TRUE);
233 }
234
235 static inline void
236 set_live_debug_state32(cpu_data_t *cdp, x86_debug_state32_t *ds)
237 {
238 __asm__ volatile ("movl %0,%%db0" : :"r" (ds->dr0));
239 __asm__ volatile ("movl %0,%%db1" : :"r" (ds->dr1));
240 __asm__ volatile ("movl %0,%%db2" : :"r" (ds->dr2));
241 __asm__ volatile ("movl %0,%%db3" : :"r" (ds->dr3));
242 if (cpu_mode_is64bit())
243 cdp->cpu_dr7 = ds->dr7;
244 }
245
246 extern void set_64bit_debug_regs(x86_debug_state64_t *ds);
247
248 static inline void
249 set_live_debug_state64(cpu_data_t *cdp, x86_debug_state64_t *ds)
250 {
251 /*
252 * We need to enter 64-bit mode in order to set the full
253 * width of these registers
254 */
255 set_64bit_debug_regs(ds);
256 cdp->cpu_dr7 = ds->dr7;
257 }
258
259 static kern_return_t
260 set_debug_state32(thread_t thread, x86_debug_state32_t *ds)
261 {
262 x86_debug_state32_t *ids;
263 pcb_t pcb;
264
265 pcb = thread->machine.pcb;
266 ids = pcb->ids;
267
268 if (ids == NULL) {
269 ids = zalloc(ids_zone32);
270 bzero(ids, sizeof *ids);
271
272 simple_lock(&pcb->lock);
273 /* make sure it wasn't already alloc()'d elsewhere */
274 if (pcb->ids == NULL) {
275 pcb->ids = ids;
276 simple_unlock(&pcb->lock);
277 } else {
278 simple_unlock(&pcb->lock);
279 zfree(ids_zone32, ids);
280 }
281 }
282
283 if (!dr7_is_valid(&ds->dr7))
284 goto err;
285
286 /*
287 * Only allow local breakpoints and make sure they are not
288 * in the trampoline code.
289 */
290
291 if (ds->dr7 & 0x1)
292 if (ds->dr0 >= (unsigned long)HIGH_MEM_BASE)
293 goto err;
294
295 if (ds->dr7 & (0x1<<2))
296 if (ds->dr1 >= (unsigned long)HIGH_MEM_BASE)
297 goto err;
298
299 if (ds->dr7 & (0x1<<4))
300 if (ds->dr2 >= (unsigned long)HIGH_MEM_BASE)
301 goto err;
302
303 if (ds->dr7 & (0x1<<6))
304 if (ds->dr3 >= (unsigned long)HIGH_MEM_BASE)
305 goto err;
306
307 ids->dr0 = ds->dr0;
308 ids->dr1 = ds->dr1;
309 ids->dr2 = ds->dr2;
310 ids->dr3 = ds->dr3;
311 ids->dr6 = ds->dr6;
312 ids->dr7 = ds->dr7;
313
314 return (KERN_SUCCESS);
315
316 err:
317 return (KERN_INVALID_ARGUMENT);
318 }
319
320 static kern_return_t
321 set_debug_state64(thread_t thread, x86_debug_state64_t *ds)
322 {
323 x86_debug_state64_t *ids;
324 pcb_t pcb;
325
326 pcb = thread->machine.pcb;
327 ids = pcb->ids;
328
329 if (ids == NULL) {
330 ids = zalloc(ids_zone64);
331 bzero(ids, sizeof *ids);
332
333 simple_lock(&pcb->lock);
334 /* make sure it wasn't already alloc()'d elsewhere */
335 if (pcb->ids == NULL) {
336 pcb->ids = ids;
337 simple_unlock(&pcb->lock);
338 } else {
339 simple_unlock(&pcb->lock);
340 zfree(ids_zone64, ids);
341 }
342 }
343
344 if (!dr7_is_valid((uint32_t *)&ds->dr7))
345 goto err;
346
347 /*
348 * Don't allow the user to set debug addresses above their max
349 * value
350 */
351 if (ds->dr7 & 0x1)
352 if (ds->dr0 >= VM_MAX_PAGE_ADDRESS)
353 goto err;
354
355 if (ds->dr7 & (0x1<<2))
356 if (ds->dr1 >= VM_MAX_PAGE_ADDRESS)
357 goto err;
358
359 if (ds->dr7 & (0x1<<4))
360 if (ds->dr2 >= VM_MAX_PAGE_ADDRESS)
361 goto err;
362
363 if (ds->dr7 & (0x1<<6))
364 if (ds->dr3 >= VM_MAX_PAGE_ADDRESS)
365 goto err;
366
367 ids->dr0 = ds->dr0;
368 ids->dr1 = ds->dr1;
369 ids->dr2 = ds->dr2;
370 ids->dr3 = ds->dr3;
371 ids->dr6 = ds->dr6;
372 ids->dr7 = ds->dr7;
373
374 return (KERN_SUCCESS);
375
376 err:
377 return (KERN_INVALID_ARGUMENT);
378 }
379
380 static void
381 get_debug_state32(thread_t thread, x86_debug_state32_t *ds)
382 {
383 x86_debug_state32_t *saved_state;
384
385 saved_state = thread->machine.pcb->ids;
386
387 if (saved_state) {
388 ds->dr0 = saved_state->dr0;
389 ds->dr1 = saved_state->dr1;
390 ds->dr2 = saved_state->dr2;
391 ds->dr3 = saved_state->dr3;
392 ds->dr4 = saved_state->dr4;
393 ds->dr5 = saved_state->dr5;
394 ds->dr6 = saved_state->dr6;
395 ds->dr7 = saved_state->dr7;
396 } else
397 bzero(ds, sizeof *ds);
398 }
399
400 static void
401 get_debug_state64(thread_t thread, x86_debug_state64_t *ds)
402 {
403 x86_debug_state64_t *saved_state;
404
405 saved_state = (x86_debug_state64_t *)thread->machine.pcb->ids;
406
407 if (saved_state) {
408 ds->dr0 = saved_state->dr0;
409 ds->dr1 = saved_state->dr1;
410 ds->dr2 = saved_state->dr2;
411 ds->dr3 = saved_state->dr3;
412 ds->dr4 = saved_state->dr4;
413 ds->dr5 = saved_state->dr5;
414 ds->dr6 = saved_state->dr6;
415 ds->dr7 = saved_state->dr7;
416 } else
417 bzero(ds, sizeof *ds);
418 }
419
420 /*
421 * consider_machine_collect:
422 *
423 * Try to collect machine-dependent pages
424 */
425 void
426 consider_machine_collect(void)
427 {
428 }
429
430 void
431 consider_machine_adjust(void)
432 {
433 }
434
435
436
437 static void
438 act_machine_switch_pcb( thread_t new )
439 {
440 pcb_t pcb = new->machine.pcb;
441 struct real_descriptor *ldtp;
442 vm_offset_t pcb_stack_top;
443 vm_offset_t hi_pcb_stack_top;
444 vm_offset_t hi_iss;
445 cpu_data_t *cdp = current_cpu_datap();
446
447 assert(new->kernel_stack != 0);
448 STACK_IEL(new->kernel_stack)->saved_state = pcb->iss;
449
450 if (!cpu_mode_is64bit()) {
451 x86_saved_state32_tagged_t *hi_iss32;
452
453 /*
454 * Save a pointer to the top of the "kernel" stack -
455 * actually the place in the PCB where a trap into
456 * kernel mode will push the registers.
457 */
458 hi_iss = (vm_offset_t)((unsigned long)
459 pmap_cpu_high_map_vaddr(cpu_number(), HIGH_CPU_ISS0) |
460 ((unsigned long)pcb->iss & PAGE_MASK));
461
462 cdp->cpu_hi_iss = (void *)hi_iss;
463
464 pmap_high_map(pcb->iss_pte0, HIGH_CPU_ISS0);
465 pmap_high_map(pcb->iss_pte1, HIGH_CPU_ISS1);
466
467 hi_iss32 = (x86_saved_state32_tagged_t *) hi_iss;
468 assert(hi_iss32->tag == x86_SAVED_STATE32);
469
470 hi_pcb_stack_top = (int) (hi_iss32 + 1);
471
472 /*
473 * For fast syscall, top of interrupt stack points to pcb stack
474 */
475 *(vm_offset_t *) current_sstk() = hi_pcb_stack_top;
476
477 current_ktss()->esp0 = hi_pcb_stack_top;
478 /* XXX: This check is performed against the thread save state flavor rather than the
479 * task's 64-bit feature flag because of the thread/task 64-bit state divergence
480 * that can arise in task_set_64bit() on x86. When that is addressed, we can
481 * revert to checking the task 64 bit feature flag. The assert below is retained
482 * for that reason.
483 */
484 } else if (is_saved_state64(pcb->iss)) {
485 x86_saved_state64_tagged_t *iss64;
486 vm_offset_t isf;
487
488 assert(is_saved_state64(pcb->iss));
489
490 iss64 = (x86_saved_state64_tagged_t *) pcb->iss;
491
492 /*
493 * Set pointer to PCB's interrupt stack frame in cpu data.
494 * Used by syscall and double-fault trap handlers.
495 */
496 isf = (vm_offset_t) &iss64->state.isf;
497 cdp->cpu_uber.cu_isf = UBER64(isf);
498 pcb_stack_top = (vm_offset_t) (iss64 + 1);
499 /* require 16-byte alignment */
500 assert((pcb_stack_top & 0xF) == 0);
501 /* Interrupt stack is pcb */
502 current_ktss64()->rsp0 = UBER64(pcb_stack_top);
503
504 /*
505 * Top of temporary sysenter stack points to pcb stack.
506 * Although this is not normally used by 64-bit users,
507 * it needs to be set in case a sysenter is attempted.
508 */
509 *current_sstk64() = UBER64(pcb_stack_top);
510
511 cdp->cpu_task_map = new->map->pmap->pm_kernel_cr3 ?
512 TASK_MAP_64BIT_SHARED : TASK_MAP_64BIT;
513
514 /*
515 * Enable the 64-bit user code segment, USER64_CS.
516 */
517 ldt_desc_p(USER64_CS)->access |= ACC_PL_U;
518
519 } else {
520 x86_saved_state_compat32_t *iss32compat;
521 vm_offset_t isf;
522
523 assert(is_saved_state32(pcb->iss));
524 iss32compat = (x86_saved_state_compat32_t *) pcb->iss;
525
526 pcb_stack_top = (int) (iss32compat + 1);
527 /* require 16-byte alignment */
528 assert((pcb_stack_top & 0xF) == 0);
529
530 /*
531 * Set pointer to PCB's interrupt stack frame in cpu data.
532 * Used by debug trap handler.
533 */
534 isf = (vm_offset_t) &iss32compat->isf64;
535 cdp->cpu_uber.cu_isf = UBER64(isf);
536
537 /* Top of temporary sysenter stack points to pcb stack */
538 *current_sstk64() = UBER64(pcb_stack_top);
539
540 /* Interrupt stack is pcb */
541 current_ktss64()->rsp0 = UBER64(pcb_stack_top);
542
543 cdp->cpu_task_map = TASK_MAP_32BIT;
544
545 /*
546 * Disable USER64_CS
547 */
548 ldt_desc_p(USER64_CS)->access &= ~ACC_PL_U;
549 }
550
551 /*
552 * Set the thread`s cthread (a.k.a pthread)
553 * For 32-bit user this involves setting the USER_CTHREAD
554 * descriptor in the LDT to point to the cthread data.
555 * The involves copying in the pre-initialized descriptor.
556 */
557 ldtp = (struct real_descriptor *)current_ldt();
558 ldtp[sel_idx(USER_CTHREAD)] = pcb->cthread_desc;
559 if (pcb->uldt_selector != 0)
560 ldtp[sel_idx(pcb->uldt_selector)] = pcb->uldt_desc;
561 /*
562 * For 64-bit, we additionally set the 64-bit User GS base
563 * address. On return to 64-bit user, the GS.Base MSR will be written.
564 */
565 cdp->cpu_uber.cu_user_gs_base = pcb->cthread_self;
566
567 /*
568 * Set the thread`s LDT or LDT entry.
569 */
570 if (new->task == TASK_NULL || new->task->i386_ldt == 0) {
571 /*
572 * Use system LDT.
573 */
574 ml_cpu_set_ldt(KERNEL_LDT);
575 } else {
576 /*
577 * Task has its own LDT.
578 */
579 user_ldt_set(new);
580 }
581 }
582
583 /*
584 * Switch to the first thread on a CPU.
585 */
586 void
587 machine_load_context(
588 thread_t new)
589 {
590 new->machine.specFlags |= OnProc;
591 act_machine_switch_pcb(new);
592 Load_context(new);
593 }
594
595 /*
596 * Switch to a new thread.
597 * Save the old thread`s kernel state or continuation,
598 * and return it.
599 */
600 thread_t
601 machine_switch_context(
602 thread_t old,
603 thread_continue_t continuation,
604 thread_t new)
605 {
606 #if MACH_RT
607 assert(current_cpu_datap()->cpu_active_stack == old->kernel_stack);
608 #endif
609
610 /*
611 * Save FP registers if in use.
612 */
613 fpu_save_context(old);
614
615 old->machine.specFlags &= ~OnProc;
616 new->machine.specFlags |= OnProc;
617
618 /*
619 * Switch address maps if need be, even if not switching tasks.
620 * (A server activation may be "borrowing" a client map.)
621 */
622 PMAP_SWITCH_CONTEXT(old, new, cpu_number())
623
624 /*
625 * Load the rest of the user state for the new thread
626 */
627 act_machine_switch_pcb(new);
628 KERNEL_DEBUG_CONSTANT(
629 MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE,
630 (int)old, (int)new, old->sched_pri, new->sched_pri, 0);
631 return(Switch_context(old, continuation, new));
632 }
633
634 /*
635 * act_machine_sv_free
636 * release saveareas associated with an act. if flag is true, release
637 * user level savearea(s) too, else don't
638 */
639 void
640 act_machine_sv_free(__unused thread_t act, __unused int flag)
641 {
642 }
643
644
645 /*
646 * This is where registers that are not normally specified by the mach-o
647 * file on an execve would be nullified, perhaps to avoid a covert channel.
648 */
649 kern_return_t
650 machine_thread_state_initialize(
651 thread_t thread)
652 {
653 /*
654 * If there's an fpu save area, free it.
655 * The initialized state will then be lazily faulted-in, if required.
656 * And if we're target, re-arm the no-fpu trap.
657 */
658 if (thread->machine.pcb->ifps) {
659 (void) fpu_set_fxstate(thread, NULL);
660
661 if (thread == current_thread())
662 clear_fpu();
663 }
664 return KERN_SUCCESS;
665 }
666
667 uint32_t
668 get_eflags_exportmask(void)
669 {
670 return EFL_USER_SET;
671 }
672
673 /*
674 * x86_SAVED_STATE32 - internal save/restore general register state on 32/64 bit processors
675 * for 32bit tasks only
676 * x86_SAVED_STATE64 - internal save/restore general register state on 64 bit processors
677 * for 64bit tasks only
678 * x86_THREAD_STATE32 - external set/get general register state on 32/64 bit processors
679 * for 32bit tasks only
680 * x86_THREAD_STATE64 - external set/get general register state on 64 bit processors
681 * for 64bit tasks only
682 * x86_SAVED_STATE - external set/get general register state on 32/64 bit processors
683 * for either 32bit or 64bit tasks
684 * x86_FLOAT_STATE32 - internal/external save/restore float and xmm state on 32/64 bit processors
685 * for 32bit tasks only
686 * x86_FLOAT_STATE64 - internal/external save/restore float and xmm state on 64 bit processors
687 * for 64bit tasks only
688 * x86_FLOAT_STATE - external save/restore float and xmm state on 32/64 bit processors
689 * for either 32bit or 64bit tasks
690 * x86_EXCEPTION_STATE32 - external get exception state on 32/64 bit processors
691 * for 32bit tasks only
692 * x86_EXCEPTION_STATE64 - external get exception state on 64 bit processors
693 * for 64bit tasks only
694 * x86_EXCEPTION_STATE - external get exception state on 323/64 bit processors
695 * for either 32bit or 64bit tasks
696 */
697
698
699 static void
700 get_exception_state64(thread_t thread, x86_exception_state64_t *es)
701 {
702 x86_saved_state64_t *saved_state;
703
704 saved_state = USER_REGS64(thread);
705
706 es->trapno = saved_state->isf.trapno;
707 es->err = saved_state->isf.err;
708 es->faultvaddr = saved_state->cr2;
709 }
710
711 static void
712 get_exception_state32(thread_t thread, x86_exception_state32_t *es)
713 {
714 x86_saved_state32_t *saved_state;
715
716 saved_state = USER_REGS32(thread);
717
718 es->trapno = saved_state->trapno;
719 es->err = saved_state->err;
720 es->faultvaddr = saved_state->cr2;
721 }
722
723
724 static int
725 set_thread_state32(thread_t thread, x86_thread_state32_t *ts)
726 {
727 x86_saved_state32_t *saved_state;
728
729 saved_state = USER_REGS32(thread);
730
731 saved_state->eax = ts->eax;
732 saved_state->ebx = ts->ebx;
733 saved_state->ecx = ts->ecx;
734 saved_state->edx = ts->edx;
735 saved_state->edi = ts->edi;
736 saved_state->esi = ts->esi;
737 saved_state->ebp = ts->ebp;
738 saved_state->uesp = ts->esp;
739 saved_state->efl = (ts->eflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
740 saved_state->eip = ts->eip;
741 saved_state->cs = ts->cs ? ts->cs : USER_CS;
742 saved_state->ss = ts->ss ? ts->ss : USER_DS;
743 saved_state->ds = ts->ds ? ts->ds : USER_DS;
744 saved_state->es = ts->es ? ts->es : USER_DS;
745 saved_state->fs = ts->fs;
746 saved_state->gs = ts->gs;
747
748 /*
749 * If the trace trap bit is being set,
750 * ensure that the user returns via iret
751 * - which is signaled thusly:
752 */
753 if ((saved_state->efl & EFL_TF) && saved_state->cs == SYSENTER_CS)
754 saved_state->cs = SYSENTER_TF_CS;
755
756 return(KERN_SUCCESS);
757 }
758
759 static int
760 set_thread_state64(thread_t thread, x86_thread_state64_t *ts)
761 {
762 x86_saved_state64_t *saved_state;
763
764 saved_state = USER_REGS64(thread);
765
766 if (!IS_USERADDR64_CANONICAL(ts->rsp) ||
767 !IS_USERADDR64_CANONICAL(ts->rip))
768 return(KERN_INVALID_ARGUMENT);
769
770 saved_state->r8 = ts->r8;
771 saved_state->r9 = ts->r9;
772 saved_state->r10 = ts->r10;
773 saved_state->r11 = ts->r11;
774 saved_state->r12 = ts->r12;
775 saved_state->r13 = ts->r13;
776 saved_state->r14 = ts->r14;
777 saved_state->r15 = ts->r15;
778 saved_state->rax = ts->rax;
779 saved_state->rax = ts->rax;
780 saved_state->rbx = ts->rbx;
781 saved_state->rcx = ts->rcx;
782 saved_state->rdx = ts->rdx;
783 saved_state->rdi = ts->rdi;
784 saved_state->rsi = ts->rsi;
785 saved_state->rbp = ts->rbp;
786 saved_state->isf.rsp = ts->rsp;
787 saved_state->isf.rflags = (ts->rflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
788 saved_state->isf.rip = ts->rip;
789 saved_state->isf.cs = USER64_CS;
790 saved_state->fs = ts->fs;
791 saved_state->gs = ts->gs;
792
793 return(KERN_SUCCESS);
794 }
795
796
797
798 static void
799 get_thread_state32(thread_t thread, x86_thread_state32_t *ts)
800 {
801 x86_saved_state32_t *saved_state;
802
803 saved_state = USER_REGS32(thread);
804
805 ts->eax = saved_state->eax;
806 ts->ebx = saved_state->ebx;
807 ts->ecx = saved_state->ecx;
808 ts->edx = saved_state->edx;
809 ts->edi = saved_state->edi;
810 ts->esi = saved_state->esi;
811 ts->ebp = saved_state->ebp;
812 ts->esp = saved_state->uesp;
813 ts->eflags = saved_state->efl;
814 ts->eip = saved_state->eip;
815 ts->cs = saved_state->cs;
816 ts->ss = saved_state->ss;
817 ts->ds = saved_state->ds;
818 ts->es = saved_state->es;
819 ts->fs = saved_state->fs;
820 ts->gs = saved_state->gs;
821 }
822
823
824 static void
825 get_thread_state64(thread_t thread, x86_thread_state64_t *ts)
826 {
827 x86_saved_state64_t *saved_state;
828
829 saved_state = USER_REGS64(thread);
830
831 ts->r8 = saved_state->r8;
832 ts->r9 = saved_state->r9;
833 ts->r10 = saved_state->r10;
834 ts->r11 = saved_state->r11;
835 ts->r12 = saved_state->r12;
836 ts->r13 = saved_state->r13;
837 ts->r14 = saved_state->r14;
838 ts->r15 = saved_state->r15;
839 ts->rax = saved_state->rax;
840 ts->rbx = saved_state->rbx;
841 ts->rcx = saved_state->rcx;
842 ts->rdx = saved_state->rdx;
843 ts->rdi = saved_state->rdi;
844 ts->rsi = saved_state->rsi;
845 ts->rbp = saved_state->rbp;
846 ts->rsp = saved_state->isf.rsp;
847 ts->rflags = saved_state->isf.rflags;
848 ts->rip = saved_state->isf.rip;
849 ts->cs = saved_state->isf.cs;
850 ts->fs = saved_state->fs;
851 ts->gs = saved_state->gs;
852 }
853
854
855
856 /*
857 * act_machine_set_state:
858 *
859 * Set the status of the specified thread.
860 */
861
862 kern_return_t
863 machine_thread_set_state(
864 thread_t thr_act,
865 thread_flavor_t flavor,
866 thread_state_t tstate,
867 mach_msg_type_number_t count)
868 {
869
870 switch (flavor)
871 {
872 case x86_SAVED_STATE32:
873 {
874 x86_saved_state32_t *state;
875 x86_saved_state32_t *saved_state;
876
877 if (count < x86_SAVED_STATE32_COUNT)
878 return(KERN_INVALID_ARGUMENT);
879
880 state = (x86_saved_state32_t *) tstate;
881
882 /* Check segment selectors are safe */
883 if (!valid_user_segment_selectors(state->cs,
884 state->ss,
885 state->ds,
886 state->es,
887 state->fs,
888 state->gs))
889 return KERN_INVALID_ARGUMENT;
890
891 saved_state = USER_REGS32(thr_act);
892
893 /*
894 * General registers
895 */
896 saved_state->edi = state->edi;
897 saved_state->esi = state->esi;
898 saved_state->ebp = state->ebp;
899 saved_state->uesp = state->uesp;
900 saved_state->ebx = state->ebx;
901 saved_state->edx = state->edx;
902 saved_state->ecx = state->ecx;
903 saved_state->eax = state->eax;
904 saved_state->eip = state->eip;
905
906 saved_state->efl = (state->efl & ~EFL_USER_CLEAR) | EFL_USER_SET;
907
908 /*
909 * If the trace trap bit is being set,
910 * ensure that the user returns via iret
911 * - which is signaled thusly:
912 */
913 if ((saved_state->efl & EFL_TF) && state->cs == SYSENTER_CS)
914 state->cs = SYSENTER_TF_CS;
915
916 /*
917 * User setting segment registers.
918 * Code and stack selectors have already been
919 * checked. Others will be reset by 'iret'
920 * if they are not valid.
921 */
922 saved_state->cs = state->cs;
923 saved_state->ss = state->ss;
924 saved_state->ds = state->ds;
925 saved_state->es = state->es;
926 saved_state->fs = state->fs;
927 saved_state->gs = state->gs;
928 break;
929 }
930
931 case x86_SAVED_STATE64:
932 {
933 x86_saved_state64_t *state;
934 x86_saved_state64_t *saved_state;
935
936 if (count < x86_SAVED_STATE64_COUNT)
937 return(KERN_INVALID_ARGUMENT);
938
939 state = (x86_saved_state64_t *) tstate;
940
941 /* Verify that the supplied code segment selector is
942 * valid. In 64-bit mode, the FS and GS segment overrides
943 * use the FS.base and GS.base MSRs to calculate
944 * base addresses, and the trampolines don't directly
945 * restore the segment registers--hence they are no
946 * longer relevant for validation.
947 */
948 if (!valid_user_code_selector(state->isf.cs))
949 return KERN_INVALID_ARGUMENT;
950
951 /* Check pc and stack are canonical addresses */
952 if (!IS_USERADDR64_CANONICAL(state->isf.rsp) ||
953 !IS_USERADDR64_CANONICAL(state->isf.rip))
954 return KERN_INVALID_ARGUMENT;
955
956 saved_state = USER_REGS64(thr_act);
957
958 /*
959 * General registers
960 */
961 saved_state->r8 = state->r8;
962 saved_state->r9 = state->r9;
963 saved_state->r10 = state->r10;
964 saved_state->r11 = state->r11;
965 saved_state->r12 = state->r12;
966 saved_state->r13 = state->r13;
967 saved_state->r14 = state->r14;
968 saved_state->r15 = state->r15;
969 saved_state->rdi = state->rdi;
970 saved_state->rsi = state->rsi;
971 saved_state->rbp = state->rbp;
972 saved_state->rbx = state->rbx;
973 saved_state->rdx = state->rdx;
974 saved_state->rcx = state->rcx;
975 saved_state->rax = state->rax;
976 saved_state->isf.rsp = state->isf.rsp;
977 saved_state->isf.rip = state->isf.rip;
978
979 saved_state->isf.rflags = (state->isf.rflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
980
981 /*
982 * User setting segment registers.
983 * Code and stack selectors have already been
984 * checked. Others will be reset by 'sys'
985 * if they are not valid.
986 */
987 saved_state->isf.cs = state->isf.cs;
988 saved_state->isf.ss = state->isf.ss;
989 saved_state->fs = state->fs;
990 saved_state->gs = state->gs;
991
992 break;
993 }
994
995 case x86_FLOAT_STATE32:
996 {
997 if (count != x86_FLOAT_STATE32_COUNT)
998 return(KERN_INVALID_ARGUMENT);
999
1000 if (thread_is_64bit(thr_act))
1001 return(KERN_INVALID_ARGUMENT);
1002
1003 return fpu_set_fxstate(thr_act, tstate);
1004 }
1005
1006 case x86_FLOAT_STATE64:
1007 {
1008 if (count != x86_FLOAT_STATE64_COUNT)
1009 return(KERN_INVALID_ARGUMENT);
1010
1011 if ( !thread_is_64bit(thr_act))
1012 return(KERN_INVALID_ARGUMENT);
1013
1014 return fpu_set_fxstate(thr_act, tstate);
1015 }
1016
1017 case x86_FLOAT_STATE:
1018 {
1019 x86_float_state_t *state;
1020
1021 if (count != x86_FLOAT_STATE_COUNT)
1022 return(KERN_INVALID_ARGUMENT);
1023
1024 state = (x86_float_state_t *)tstate;
1025
1026 if (state->fsh.flavor == x86_FLOAT_STATE64 && state->fsh.count == x86_FLOAT_STATE64_COUNT &&
1027 thread_is_64bit(thr_act)) {
1028 return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs64);
1029 }
1030 if (state->fsh.flavor == x86_FLOAT_STATE32 && state->fsh.count == x86_FLOAT_STATE32_COUNT &&
1031 !thread_is_64bit(thr_act)) {
1032 return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs32);
1033 }
1034 return(KERN_INVALID_ARGUMENT);
1035 }
1036
1037
1038
1039 case OLD_i386_THREAD_STATE:
1040 case x86_THREAD_STATE32:
1041 {
1042 if (count != x86_THREAD_STATE32_COUNT)
1043 return(KERN_INVALID_ARGUMENT);
1044
1045 if (thread_is_64bit(thr_act))
1046 return(KERN_INVALID_ARGUMENT);
1047
1048 return set_thread_state32(thr_act, (x86_thread_state32_t *)tstate);
1049 }
1050
1051 case x86_THREAD_STATE64:
1052 {
1053 if (count != x86_THREAD_STATE64_COUNT)
1054 return(KERN_INVALID_ARGUMENT);
1055
1056 if ( !thread_is_64bit(thr_act))
1057 return(KERN_INVALID_ARGUMENT);
1058
1059 return set_thread_state64(thr_act, (x86_thread_state64_t *)tstate);
1060 }
1061
1062 case x86_THREAD_STATE:
1063 {
1064 x86_thread_state_t *state;
1065
1066 if (count != x86_THREAD_STATE_COUNT)
1067 return(KERN_INVALID_ARGUMENT);
1068
1069 state = (x86_thread_state_t *)tstate;
1070
1071 if (state->tsh.flavor == x86_THREAD_STATE64 && state->tsh.count == x86_THREAD_STATE64_COUNT &&
1072 thread_is_64bit(thr_act)) {
1073 return set_thread_state64(thr_act, &state->uts.ts64);
1074 } else if (state->tsh.flavor == x86_THREAD_STATE32 && state->tsh.count == x86_THREAD_STATE32_COUNT &&
1075 !thread_is_64bit(thr_act)) {
1076 return set_thread_state32(thr_act, &state->uts.ts32);
1077 } else
1078 return(KERN_INVALID_ARGUMENT);
1079
1080 break;
1081 }
1082 case x86_DEBUG_STATE32:
1083 {
1084 x86_debug_state32_t *state;
1085 kern_return_t ret;
1086
1087 if (thread_is_64bit(thr_act))
1088 return(KERN_INVALID_ARGUMENT);
1089
1090 state = (x86_debug_state32_t *)tstate;
1091
1092 ret = set_debug_state32(thr_act, state);
1093
1094 return ret;
1095 }
1096 case x86_DEBUG_STATE64:
1097 {
1098 x86_debug_state64_t *state;
1099 kern_return_t ret;
1100
1101 if (!thread_is_64bit(thr_act))
1102 return(KERN_INVALID_ARGUMENT);
1103
1104 state = (x86_debug_state64_t *)tstate;
1105
1106 ret = set_debug_state64(thr_act, state);
1107
1108 return ret;
1109 }
1110 case x86_DEBUG_STATE:
1111 {
1112 x86_debug_state_t *state;
1113 kern_return_t ret = KERN_INVALID_ARGUMENT;
1114
1115 if (count != x86_DEBUG_STATE_COUNT)
1116 return (KERN_INVALID_ARGUMENT);
1117
1118 state = (x86_debug_state_t *)tstate;
1119 if (state->dsh.flavor == x86_DEBUG_STATE64 &&
1120 state->dsh.count == x86_DEBUG_STATE64_COUNT &&
1121 thread_is_64bit(thr_act)) {
1122 ret = set_debug_state64(thr_act, &state->uds.ds64);
1123 }
1124 else
1125 if (state->dsh.flavor == x86_DEBUG_STATE32 &&
1126 state->dsh.count == x86_DEBUG_STATE32_COUNT &&
1127 !thread_is_64bit(thr_act)) {
1128 ret = set_debug_state32(thr_act, &state->uds.ds32);
1129 }
1130 return ret;
1131 }
1132 default:
1133 return(KERN_INVALID_ARGUMENT);
1134 }
1135
1136 return(KERN_SUCCESS);
1137 }
1138
1139
1140
1141 /*
1142 * thread_getstatus:
1143 *
1144 * Get the status of the specified thread.
1145 */
1146
1147 kern_return_t
1148 machine_thread_get_state(
1149 thread_t thr_act,
1150 thread_flavor_t flavor,
1151 thread_state_t tstate,
1152 mach_msg_type_number_t *count)
1153 {
1154 switch (flavor) {
1155
1156 case THREAD_STATE_FLAVOR_LIST:
1157 {
1158 if (*count < 3)
1159 return (KERN_INVALID_ARGUMENT);
1160
1161 tstate[0] = i386_THREAD_STATE;
1162 tstate[1] = i386_FLOAT_STATE;
1163 tstate[2] = i386_EXCEPTION_STATE;
1164
1165 *count = 3;
1166 break;
1167 }
1168
1169 case THREAD_STATE_FLAVOR_LIST_NEW:
1170 {
1171 if (*count < 4)
1172 return (KERN_INVALID_ARGUMENT);
1173
1174 tstate[0] = x86_THREAD_STATE;
1175 tstate[1] = x86_FLOAT_STATE;
1176 tstate[2] = x86_EXCEPTION_STATE;
1177 tstate[3] = x86_DEBUG_STATE;
1178
1179 *count = 4;
1180 break;
1181 }
1182
1183 case x86_SAVED_STATE32:
1184 {
1185 x86_saved_state32_t *state;
1186 x86_saved_state32_t *saved_state;
1187
1188 if (*count < x86_SAVED_STATE32_COUNT)
1189 return(KERN_INVALID_ARGUMENT);
1190
1191 state = (x86_saved_state32_t *) tstate;
1192 saved_state = USER_REGS32(thr_act);
1193
1194 /*
1195 * First, copy everything:
1196 */
1197 *state = *saved_state;
1198 state->ds = saved_state->ds & 0xffff;
1199 state->es = saved_state->es & 0xffff;
1200 state->fs = saved_state->fs & 0xffff;
1201 state->gs = saved_state->gs & 0xffff;
1202
1203 *count = x86_SAVED_STATE32_COUNT;
1204 break;
1205 }
1206
1207 case x86_SAVED_STATE64:
1208 {
1209 x86_saved_state64_t *state;
1210 x86_saved_state64_t *saved_state;
1211
1212 if (*count < x86_SAVED_STATE64_COUNT)
1213 return(KERN_INVALID_ARGUMENT);
1214
1215 state = (x86_saved_state64_t *)tstate;
1216 saved_state = USER_REGS64(thr_act);
1217
1218 /*
1219 * First, copy everything:
1220 */
1221 *state = *saved_state;
1222 state->fs = saved_state->fs & 0xffff;
1223 state->gs = saved_state->gs & 0xffff;
1224
1225 *count = x86_SAVED_STATE64_COUNT;
1226 break;
1227 }
1228
1229 case x86_FLOAT_STATE32:
1230 {
1231 if (*count < x86_FLOAT_STATE32_COUNT)
1232 return(KERN_INVALID_ARGUMENT);
1233
1234 if (thread_is_64bit(thr_act))
1235 return(KERN_INVALID_ARGUMENT);
1236
1237 *count = x86_FLOAT_STATE32_COUNT;
1238
1239 return fpu_get_fxstate(thr_act, tstate);
1240 }
1241
1242 case x86_FLOAT_STATE64:
1243 {
1244 if (*count < x86_FLOAT_STATE64_COUNT)
1245 return(KERN_INVALID_ARGUMENT);
1246
1247 if ( !thread_is_64bit(thr_act))
1248 return(KERN_INVALID_ARGUMENT);
1249
1250 *count = x86_FLOAT_STATE64_COUNT;
1251
1252 return fpu_get_fxstate(thr_act, tstate);
1253 }
1254
1255 case x86_FLOAT_STATE:
1256 {
1257 x86_float_state_t *state;
1258 kern_return_t kret;
1259
1260 if (*count < x86_FLOAT_STATE_COUNT)
1261 return(KERN_INVALID_ARGUMENT);
1262
1263 state = (x86_float_state_t *)tstate;
1264
1265 /*
1266 * no need to bzero... currently
1267 * x86_FLOAT_STATE64_COUNT == x86_FLOAT_STATE32_COUNT
1268 */
1269 if (thread_is_64bit(thr_act)) {
1270 state->fsh.flavor = x86_FLOAT_STATE64;
1271 state->fsh.count = x86_FLOAT_STATE64_COUNT;
1272
1273 kret = fpu_get_fxstate(thr_act, (thread_state_t)&state->ufs.fs64);
1274 } else {
1275 state->fsh.flavor = x86_FLOAT_STATE32;
1276 state->fsh.count = x86_FLOAT_STATE32_COUNT;
1277
1278 kret = fpu_get_fxstate(thr_act, (thread_state_t)&state->ufs.fs32);
1279 }
1280 *count = x86_FLOAT_STATE_COUNT;
1281
1282 return(kret);
1283 }
1284
1285
1286 case OLD_i386_THREAD_STATE:
1287 case x86_THREAD_STATE32:
1288 {
1289 if (*count < x86_THREAD_STATE32_COUNT)
1290 return(KERN_INVALID_ARGUMENT);
1291
1292 if (thread_is_64bit(thr_act))
1293 return(KERN_INVALID_ARGUMENT);
1294
1295 *count = x86_THREAD_STATE32_COUNT;
1296
1297 get_thread_state32(thr_act, (x86_thread_state32_t *)tstate);
1298 break;
1299 }
1300
1301 case x86_THREAD_STATE64:
1302 {
1303 if (*count < x86_THREAD_STATE64_COUNT)
1304 return(KERN_INVALID_ARGUMENT);
1305
1306 if ( !thread_is_64bit(thr_act))
1307 return(KERN_INVALID_ARGUMENT);
1308
1309 *count = x86_THREAD_STATE64_COUNT;
1310
1311 get_thread_state64(thr_act, (x86_thread_state64_t *)tstate);
1312 break;
1313 }
1314
1315 case x86_THREAD_STATE:
1316 {
1317 x86_thread_state_t *state;
1318
1319 if (*count < x86_THREAD_STATE_COUNT)
1320 return(KERN_INVALID_ARGUMENT);
1321
1322 state = (x86_thread_state_t *)tstate;
1323
1324 bzero((char *)state, sizeof(x86_thread_state_t));
1325
1326 if (thread_is_64bit(thr_act)) {
1327 state->tsh.flavor = x86_THREAD_STATE64;
1328 state->tsh.count = x86_THREAD_STATE64_COUNT;
1329
1330 get_thread_state64(thr_act, &state->uts.ts64);
1331 } else {
1332 state->tsh.flavor = x86_THREAD_STATE32;
1333 state->tsh.count = x86_THREAD_STATE32_COUNT;
1334
1335 get_thread_state32(thr_act, &state->uts.ts32);
1336 }
1337 *count = x86_THREAD_STATE_COUNT;
1338
1339 break;
1340 }
1341
1342
1343 case x86_EXCEPTION_STATE32:
1344 {
1345 if (*count < x86_EXCEPTION_STATE32_COUNT)
1346 return(KERN_INVALID_ARGUMENT);
1347
1348 if (thread_is_64bit(thr_act))
1349 return(KERN_INVALID_ARGUMENT);
1350
1351 *count = x86_EXCEPTION_STATE32_COUNT;
1352
1353 get_exception_state32(thr_act, (x86_exception_state32_t *)tstate);
1354 break;
1355 }
1356
1357 case x86_EXCEPTION_STATE64:
1358 {
1359 if (*count < x86_EXCEPTION_STATE64_COUNT)
1360 return(KERN_INVALID_ARGUMENT);
1361
1362 if ( !thread_is_64bit(thr_act))
1363 return(KERN_INVALID_ARGUMENT);
1364
1365 *count = x86_EXCEPTION_STATE64_COUNT;
1366
1367 get_exception_state64(thr_act, (x86_exception_state64_t *)tstate);
1368 break;
1369 }
1370
1371 case x86_EXCEPTION_STATE:
1372 {
1373 x86_exception_state_t *state;
1374
1375 if (*count < x86_EXCEPTION_STATE_COUNT)
1376 return(KERN_INVALID_ARGUMENT);
1377
1378 state = (x86_exception_state_t *)tstate;
1379
1380 bzero((char *)state, sizeof(x86_exception_state_t));
1381
1382 if (thread_is_64bit(thr_act)) {
1383 state->esh.flavor = x86_EXCEPTION_STATE64;
1384 state->esh.count = x86_EXCEPTION_STATE64_COUNT;
1385
1386 get_exception_state64(thr_act, &state->ues.es64);
1387 } else {
1388 state->esh.flavor = x86_EXCEPTION_STATE32;
1389 state->esh.count = x86_EXCEPTION_STATE32_COUNT;
1390
1391 get_exception_state32(thr_act, &state->ues.es32);
1392 }
1393 *count = x86_EXCEPTION_STATE_COUNT;
1394
1395 break;
1396 }
1397 case x86_DEBUG_STATE32:
1398 {
1399 if (*count < x86_DEBUG_STATE32_COUNT)
1400 return(KERN_INVALID_ARGUMENT);
1401
1402 if (thread_is_64bit(thr_act))
1403 return(KERN_INVALID_ARGUMENT);
1404
1405 get_debug_state32(thr_act, (x86_debug_state32_t *)tstate);
1406
1407 *count = x86_DEBUG_STATE32_COUNT;
1408
1409 break;
1410 }
1411 case x86_DEBUG_STATE64:
1412 {
1413 if (*count < x86_DEBUG_STATE64_COUNT)
1414 return(KERN_INVALID_ARGUMENT);
1415
1416 if (!thread_is_64bit(thr_act))
1417 return(KERN_INVALID_ARGUMENT);
1418
1419 get_debug_state64(thr_act, (x86_debug_state64_t *)tstate);
1420
1421 *count = x86_DEBUG_STATE64_COUNT;
1422
1423 break;
1424 }
1425 case x86_DEBUG_STATE:
1426 {
1427 x86_debug_state_t *state;
1428
1429 if (*count < x86_DEBUG_STATE_COUNT)
1430 return(KERN_INVALID_ARGUMENT);
1431
1432 state = (x86_debug_state_t *)tstate;
1433
1434 bzero(state, sizeof *state);
1435
1436 if (thread_is_64bit(thr_act)) {
1437 state->dsh.flavor = x86_DEBUG_STATE64;
1438 state->dsh.count = x86_DEBUG_STATE64_COUNT;
1439
1440 get_debug_state64(thr_act, &state->uds.ds64);
1441 } else {
1442 state->dsh.flavor = x86_DEBUG_STATE32;
1443 state->dsh.count = x86_DEBUG_STATE32_COUNT;
1444
1445 get_debug_state32(thr_act, &state->uds.ds32);
1446 }
1447 *count = x86_DEBUG_STATE_COUNT;
1448 break;
1449 }
1450 default:
1451 return(KERN_INVALID_ARGUMENT);
1452 }
1453
1454 return(KERN_SUCCESS);
1455 }
1456
1457 kern_return_t
1458 machine_thread_get_kern_state(
1459 thread_t thread,
1460 thread_flavor_t flavor,
1461 thread_state_t tstate,
1462 mach_msg_type_number_t *count)
1463 {
1464
1465 /*
1466 * This works only for an interrupted kernel thread
1467 */
1468 if (thread != current_thread() || current_cpu_datap()->cpu_int_state == NULL)
1469 return KERN_FAILURE;
1470
1471 switch(flavor) {
1472
1473 case x86_THREAD_STATE32:
1474 {
1475
1476 x86_thread_state32_t *state;
1477 x86_saved_state32_t *saved_state;
1478
1479 if (*count < x86_THREAD_STATE32_COUNT)
1480 return(KERN_INVALID_ARGUMENT);
1481
1482 state = (x86_thread_state32_t *)tstate;
1483
1484 assert(is_saved_state32(current_cpu_datap()->cpu_int_state));
1485 saved_state = saved_state32(current_cpu_datap()->cpu_int_state);
1486 /*
1487 * General registers.
1488 */
1489 state->eax = saved_state->eax;
1490 state->ebx = saved_state->ebx;
1491 state->ecx = saved_state->ecx;
1492 state->edx = saved_state->edx;
1493 state->edi = saved_state->edi;
1494 state->esi = saved_state->esi;
1495 state->ebp = saved_state->ebp;
1496 state->esp = saved_state->uesp;
1497 state->eflags = saved_state->efl;
1498 state->eip = saved_state->eip;
1499 state->cs = saved_state->cs;
1500 state->ss = saved_state->ss;
1501 state->ds = saved_state->ds & 0xffff;
1502 state->es = saved_state->es & 0xffff;
1503 state->fs = saved_state->fs & 0xffff;
1504 state->gs = saved_state->gs & 0xffff;
1505
1506 *count = x86_THREAD_STATE32_COUNT;
1507
1508 return KERN_SUCCESS;
1509 }
1510 break; // for completeness
1511
1512 case x86_THREAD_STATE:
1513 {
1514 // wrap a 32 bit thread state into a 32/64bit clean thread state
1515 x86_thread_state_t *state;
1516 x86_saved_state32_t *saved_state;
1517
1518 if(*count < x86_THREAD_STATE_COUNT)
1519 return (KERN_INVALID_ARGUMENT);
1520
1521 state = (x86_thread_state_t *)tstate;
1522 assert(is_saved_state32(current_cpu_datap()->cpu_int_state));
1523 saved_state = saved_state32(current_cpu_datap()->cpu_int_state);
1524
1525 state->tsh.flavor = x86_THREAD_STATE32;
1526 state->tsh.count = x86_THREAD_STATE32_COUNT;
1527
1528 /*
1529 * General registers.
1530 */
1531
1532 state->uts.ts32.eax = saved_state->eax;
1533 state->uts.ts32.ebx = saved_state->ebx;
1534 state->uts.ts32.ecx = saved_state->ecx;
1535 state->uts.ts32.edx = saved_state->edx;
1536 state->uts.ts32.edi = saved_state->edi;
1537 state->uts.ts32.esi = saved_state->esi;
1538 state->uts.ts32.ebp = saved_state->ebp;
1539 state->uts.ts32.esp = saved_state->uesp;
1540 state->uts.ts32.eflags = saved_state->efl;
1541 state->uts.ts32.eip = saved_state->eip;
1542 state->uts.ts32.cs = saved_state->cs;
1543 state->uts.ts32.ss = saved_state->ss;
1544 state->uts.ts32.ds = saved_state->ds & 0xffff;
1545 state->uts.ts32.es = saved_state->es & 0xffff;
1546 state->uts.ts32.fs = saved_state->fs & 0xffff;
1547 state->uts.ts32.gs = saved_state->gs & 0xffff;
1548
1549 *count = x86_THREAD_STATE_COUNT;
1550 return KERN_SUCCESS;
1551 }
1552 break;
1553 }
1554 return KERN_FAILURE;
1555 }
1556
1557
1558 /*
1559 * Initialize the machine-dependent state for a new thread.
1560 */
1561 kern_return_t
1562 machine_thread_create(
1563 thread_t thread,
1564 task_t task)
1565 {
1566 pcb_t pcb = &thread->machine.xxx_pcb;
1567 struct real_descriptor *ldtp;
1568 pmap_paddr_t paddr;
1569 x86_saved_state_t *iss;
1570
1571 inval_copy_windows(thread);
1572
1573 thread->machine.physwindow_pte = 0;
1574 thread->machine.physwindow_busy = 0;
1575
1576 if (task_has_64BitAddr(task)) {
1577 x86_sframe64_t *sf64;
1578
1579 sf64 = (x86_sframe64_t *)zalloc(iss_zone64);
1580
1581 if (sf64 == NULL)
1582 panic("iss_zone64");
1583 pcb->sf = (void *)sf64;
1584
1585 bzero((char *)sf64, sizeof(x86_sframe64_t));
1586
1587 iss = (x86_saved_state_t *) &sf64->ssf;
1588 iss->flavor = x86_SAVED_STATE64;
1589 /*
1590 * Guarantee that the bootstrapped thread will be in user
1591 * mode.
1592 */
1593 iss->ss_64.isf.rflags = EFL_USER_SET;
1594 iss->ss_64.isf.cs = USER64_CS;
1595 iss->ss_64.isf.ss = USER_DS;
1596 iss->ss_64.fs = USER_DS;
1597 iss->ss_64.gs = USER_DS;
1598 } else {
1599 if (cpu_mode_is64bit()) {
1600 x86_sframe_compat32_t *sfc32;
1601
1602 sfc32 = (x86_sframe_compat32_t *)zalloc(iss_zone32);
1603 if (sfc32 == NULL)
1604 panic("iss_zone32");
1605 pcb->sf = (void *)sfc32;
1606
1607 bzero((char *)sfc32, sizeof(x86_sframe_compat32_t));
1608
1609 iss = (x86_saved_state_t *) &sfc32->ssf.iss32;
1610 iss->flavor = x86_SAVED_STATE32;
1611 #if DEBUG
1612 {
1613 x86_saved_state_compat32_t *xssc;
1614
1615 xssc = (x86_saved_state_compat32_t *) iss;
1616 xssc->pad_for_16byte_alignment[0] = 0x64326432;
1617 xssc->pad_for_16byte_alignment[1] = 0x64326432;
1618 }
1619 #endif
1620 } else {
1621 x86_sframe32_t *sf32;
1622
1623 sf32 = (x86_sframe32_t *)zalloc(iss_zone32);
1624
1625 if (sf32 == NULL)
1626 panic("iss_zone32");
1627 pcb->sf = (void *)sf32;
1628
1629 bzero((char *)sf32, sizeof(x86_sframe32_t));
1630
1631 iss = (x86_saved_state_t *) &sf32->ssf;
1632 iss->flavor = x86_SAVED_STATE32;
1633 }
1634 /*
1635 * Guarantee that the bootstrapped thread will be in user
1636 * mode.
1637 */
1638 iss->ss_32.cs = USER_CS;
1639 iss->ss_32.ss = USER_DS;
1640 iss->ss_32.ds = USER_DS;
1641 iss->ss_32.es = USER_DS;
1642 iss->ss_32.fs = USER_DS;
1643 iss->ss_32.gs = USER_DS;
1644 iss->ss_32.efl = EFL_USER_SET;
1645 }
1646 pcb->iss = iss;
1647
1648 thread->machine.pcb = pcb;
1649 simple_lock_init(&pcb->lock, 0);
1650
1651 ldtp = (struct real_descriptor *)pmap_index_to_virt(HIGH_FIXED_LDT_BEGIN);
1652 pcb->cthread_desc = ldtp[sel_idx(USER_DS)];
1653 pcb->uldt_desc = ldtp[sel_idx(USER_DS)];
1654 pcb->uldt_selector = 0;
1655
1656 pcb->iss_pte0 = (uint64_t)pte_kernel_rw(kvtophys((vm_offset_t)pcb->iss));
1657
1658 if (0 == (paddr = pa_to_pte(kvtophys((vm_offset_t)(pcb->iss) + PAGE_SIZE))))
1659 pcb->iss_pte1 = INTEL_PTE_INVALID;
1660 else
1661 pcb->iss_pte1 = (uint64_t)pte_kernel_rw(paddr);
1662
1663 return(KERN_SUCCESS);
1664 }
1665
1666 /*
1667 * Machine-dependent cleanup prior to destroying a thread
1668 */
1669 void
1670 machine_thread_destroy(
1671 thread_t thread)
1672 {
1673 register pcb_t pcb = thread->machine.pcb;
1674
1675 assert(pcb);
1676
1677 if (pcb->ifps != 0)
1678 fpu_free(pcb->ifps);
1679 if (pcb->sf != 0) {
1680 if (thread_is_64bit(thread))
1681 zfree(iss_zone64, pcb->sf);
1682 else
1683 zfree(iss_zone32, pcb->sf);
1684 pcb->sf = 0;
1685 }
1686 if (pcb->ids) {
1687 if (thread_is_64bit(thread))
1688 zfree(ids_zone64, pcb->ids);
1689 else
1690 zfree(ids_zone32, pcb->ids);
1691 }
1692 thread->machine.pcb = (pcb_t)0;
1693
1694 }
1695
1696 void
1697 machine_thread_switch_addrmode(thread_t thread, int oldmode_is64bit)
1698 {
1699 register pcb_t pcb = thread->machine.pcb;
1700
1701 assert(pcb);
1702
1703 if (pcb->sf != 0) {
1704 if (oldmode_is64bit)
1705 zfree(iss_zone64, pcb->sf);
1706 else
1707 zfree(iss_zone32, pcb->sf);
1708 }
1709 machine_thread_create(thread, thread->task);
1710
1711 /* If we're switching ourselves, reset the pcb addresses etc. */
1712 if (thread == current_thread())
1713 act_machine_switch_pcb(thread);
1714 }
1715
1716
1717
1718 /*
1719 * This is used to set the current thr_act/thread
1720 * when starting up a new processor
1721 */
1722 void
1723 machine_set_current_thread( thread_t thread )
1724 {
1725 current_cpu_datap()->cpu_active_thread = thread;
1726 }
1727
1728 /*
1729 * This is called when a task is termianted.
1730 */
1731 void
1732 machine_thread_terminate_self(void)
1733 {
1734 task_t self_task = current_task();
1735 if (self_task) {
1736 user_ldt_t user_ldt = self_task->i386_ldt;
1737 if (user_ldt != 0) {
1738 self_task->i386_ldt = 0;
1739 user_ldt_free(user_ldt);
1740 }
1741 }
1742 }
1743
1744 void
1745 act_machine_return(int code)
1746 {
1747 /*
1748 * This code is called with nothing locked.
1749 * It also returns with nothing locked, if it returns.
1750 *
1751 * This routine terminates the current thread activation.
1752 * If this is the only activation associated with its
1753 * thread shuttle, then the entire thread (shuttle plus
1754 * activation) is terminated.
1755 */
1756 assert( code == KERN_TERMINATED );
1757
1758 thread_terminate_self();
1759
1760 /*NOTREACHED*/
1761
1762 panic("act_machine_return(%d): TALKING ZOMBIE! (1)", code);
1763 }
1764
1765
1766 /*
1767 * Perform machine-dependent per-thread initializations
1768 */
1769 void
1770 machine_thread_init(void)
1771 {
1772 if (cpu_mode_is64bit()) {
1773 iss_zone64 = zinit(sizeof(x86_sframe64_t),
1774 THREAD_MAX * sizeof(x86_sframe64_t),
1775 THREAD_CHUNK * sizeof(x86_sframe64_t),
1776 "x86_64 saved state");
1777
1778 assert(sizeof(x86_sframe_compat32_t) % 16 == 0);
1779 iss_zone32 = zinit(sizeof(x86_sframe_compat32_t),
1780 THREAD_MAX * sizeof(x86_sframe_compat32_t),
1781 THREAD_CHUNK * sizeof(x86_sframe_compat32_t),
1782 "x86_32 saved state");
1783
1784 ids_zone32 = zinit(sizeof(x86_debug_state32_t),
1785 THREAD_MAX * (sizeof(x86_debug_state32_t)),
1786 THREAD_CHUNK * (sizeof(x86_debug_state32_t)),
1787 "x86_32 debug state");
1788 ids_zone64 = zinit(sizeof(x86_debug_state64_t),
1789 THREAD_MAX * sizeof(x86_debug_state64_t),
1790 THREAD_CHUNK * sizeof(x86_debug_state64_t),
1791 "x86_64 debug state");
1792
1793 } else {
1794 iss_zone32 = zinit(sizeof(x86_sframe32_t),
1795 THREAD_MAX * sizeof(x86_sframe32_t),
1796 THREAD_CHUNK * sizeof(x86_sframe32_t),
1797 "x86 saved state");
1798 ids_zone32 = zinit(sizeof(x86_debug_state32_t),
1799 THREAD_MAX * (sizeof(x86_debug_state32_t)),
1800 THREAD_CHUNK * (sizeof(x86_debug_state32_t)),
1801 "x86 debug state");
1802 }
1803 fpu_module_init();
1804 iopb_init();
1805 }
1806
1807 /*
1808 * Some routines for debugging activation code
1809 */
1810 static void dump_handlers(thread_t);
1811 void dump_regs(thread_t);
1812 int dump_act(thread_t thr_act);
1813
1814 static void
1815 dump_handlers(thread_t thr_act)
1816 {
1817 ReturnHandler *rhp = thr_act->handlers;
1818 int counter = 0;
1819
1820 printf("\t");
1821 while (rhp) {
1822 if (rhp == &thr_act->special_handler){
1823 if (rhp->next)
1824 printf("[NON-Zero next ptr(%x)]", rhp->next);
1825 printf("special_handler()->");
1826 break;
1827 }
1828 printf("hdlr_%d(%x)->",counter,rhp->handler);
1829 rhp = rhp->next;
1830 if (++counter > 32) {
1831 printf("Aborting: HUGE handler chain\n");
1832 break;
1833 }
1834 }
1835 printf("HLDR_NULL\n");
1836 }
1837
1838 void
1839 dump_regs(thread_t thr_act)
1840 {
1841 if (thr_act->machine.pcb == NULL)
1842 return;
1843
1844 if (thread_is_64bit(thr_act)) {
1845 x86_saved_state64_t *ssp;
1846
1847 ssp = USER_REGS64(thr_act);
1848
1849 panic("dump_regs: 64bit tasks not yet supported");
1850
1851 } else {
1852 x86_saved_state32_t *ssp;
1853
1854 ssp = USER_REGS32(thr_act);
1855
1856 /*
1857 * Print out user register state
1858 */
1859 printf("\tRegs:\tedi=%x esi=%x ebp=%x ebx=%x edx=%x\n",
1860 ssp->edi, ssp->esi, ssp->ebp, ssp->ebx, ssp->edx);
1861
1862 printf("\t\tecx=%x eax=%x eip=%x efl=%x uesp=%x\n",
1863 ssp->ecx, ssp->eax, ssp->eip, ssp->efl, ssp->uesp);
1864
1865 printf("\t\tcs=%x ss=%x\n", ssp->cs, ssp->ss);
1866 }
1867 }
1868
1869 int
1870 dump_act(thread_t thr_act)
1871 {
1872 if (!thr_act)
1873 return(0);
1874
1875 printf("thread(0x%x)(%d): task=%x(%d)\n",
1876 thr_act, thr_act->ref_count,
1877 thr_act->task, thr_act->task ? thr_act->task->ref_count : 0);
1878
1879 printf("\tsusp=%d user_stop=%d active=%x ast=%x\n",
1880 thr_act->suspend_count, thr_act->user_stop_count,
1881 thr_act->active, thr_act->ast);
1882 printf("\tpcb=%x\n", thr_act->machine.pcb);
1883
1884 if (thr_act->kernel_stack) {
1885 vm_offset_t stack = thr_act->kernel_stack;
1886
1887 printf("\tk_stk %x eip %x ebx %x esp %x iss %x\n",
1888 stack, STACK_IKS(stack)->k_eip, STACK_IKS(stack)->k_ebx,
1889 STACK_IKS(stack)->k_esp, STACK_IEL(stack)->saved_state);
1890 }
1891
1892 dump_handlers(thr_act);
1893 dump_regs(thr_act);
1894 return((int)thr_act);
1895 }
1896
1897 user_addr_t
1898 get_useraddr(void)
1899 {
1900 thread_t thr_act = current_thread();
1901
1902 if (thr_act->machine.pcb == NULL)
1903 return (0);
1904
1905 if (thread_is_64bit(thr_act)) {
1906 x86_saved_state64_t *iss64;
1907
1908 iss64 = USER_REGS64(thr_act);
1909
1910 return(iss64->isf.rip);
1911 } else {
1912 x86_saved_state32_t *iss32;
1913
1914 iss32 = USER_REGS32(thr_act);
1915
1916 return(iss32->eip);
1917 }
1918 }
1919
1920 /*
1921 * detach and return a kernel stack from a thread
1922 */
1923
1924 vm_offset_t
1925 machine_stack_detach(thread_t thread)
1926 {
1927 vm_offset_t stack;
1928
1929 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH),
1930 thread, thread->priority,
1931 thread->sched_pri, 0,
1932 0);
1933
1934 stack = thread->kernel_stack;
1935 thread->kernel_stack = 0;
1936
1937 return (stack);
1938 }
1939
1940 /*
1941 * attach a kernel stack to a thread and initialize it
1942 */
1943
1944 void
1945 machine_stack_attach(
1946 thread_t thread,
1947 vm_offset_t stack)
1948 {
1949 struct x86_kernel_state32 *statep;
1950
1951 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH),
1952 thread, thread->priority,
1953 thread->sched_pri, 0, 0);
1954
1955 assert(stack);
1956 thread->kernel_stack = stack;
1957
1958 statep = STACK_IKS(stack);
1959 statep->k_eip = (unsigned long) Thread_continue;
1960 statep->k_ebx = (unsigned long) thread_continue;
1961 statep->k_esp = (unsigned long) STACK_IEL(stack);
1962
1963 return;
1964 }
1965
1966 /*
1967 * move a stack from old to new thread
1968 */
1969
1970 void
1971 machine_stack_handoff(thread_t old,
1972 thread_t new)
1973 {
1974 vm_offset_t stack;
1975
1976 assert(new);
1977 assert(old);
1978
1979 stack = old->kernel_stack;
1980 if (stack == old->reserved_stack) {
1981 assert(new->reserved_stack);
1982 old->reserved_stack = new->reserved_stack;
1983 new->reserved_stack = stack;
1984 }
1985 old->kernel_stack = 0;
1986 /*
1987 * A full call to machine_stack_attach() is unnecessry
1988 * because old stack is already initialized.
1989 */
1990 new->kernel_stack = stack;
1991
1992 fpu_save_context(old);
1993
1994 old->machine.specFlags &= ~OnProc;
1995 new->machine.specFlags |= OnProc;
1996
1997 PMAP_SWITCH_CONTEXT(old, new, cpu_number());
1998 act_machine_switch_pcb(new);
1999
2000 KERNEL_DEBUG_CONSTANT(
2001 MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_HANDOFF)|DBG_FUNC_NONE,
2002 old->reason, (int)new, old->sched_pri, new->sched_pri, 0);
2003
2004 machine_set_current_thread(new);
2005
2006 return;
2007 }
2008
2009
2010
2011
2012 struct x86_act_context32 {
2013 x86_saved_state32_t ss;
2014 x86_float_state32_t fs;
2015 x86_debug_state32_t ds;
2016 };
2017
2018 struct x86_act_context64 {
2019 x86_saved_state64_t ss;
2020 x86_float_state64_t fs;
2021 x86_debug_state64_t ds;
2022 };
2023
2024
2025
2026 void *
2027 act_thread_csave(void)
2028 {
2029 kern_return_t kret;
2030 mach_msg_type_number_t val;
2031 thread_t thr_act = current_thread();
2032
2033 if (thread_is_64bit(thr_act)) {
2034 struct x86_act_context64 *ic64;
2035
2036 ic64 = (struct x86_act_context64 *)kalloc(sizeof(struct x86_act_context64));
2037
2038 if (ic64 == (struct x86_act_context64 *)NULL)
2039 return((void *)0);
2040
2041 val = x86_SAVED_STATE64_COUNT;
2042 kret = machine_thread_get_state(thr_act, x86_SAVED_STATE64,
2043 (thread_state_t) &ic64->ss, &val);
2044 if (kret != KERN_SUCCESS) {
2045 kfree(ic64, sizeof(struct x86_act_context64));
2046 return((void *)0);
2047 }
2048 val = x86_FLOAT_STATE64_COUNT;
2049 kret = machine_thread_get_state(thr_act, x86_FLOAT_STATE64,
2050 (thread_state_t) &ic64->fs, &val);
2051
2052 if (kret != KERN_SUCCESS) {
2053 kfree(ic64, sizeof(struct x86_act_context64));
2054 return((void *)0);
2055 }
2056
2057 val = x86_DEBUG_STATE64_COUNT;
2058 kret = machine_thread_get_state(thr_act,
2059 x86_DEBUG_STATE64,
2060 (thread_state_t)&ic64->ds,
2061 &val);
2062 if (kret != KERN_SUCCESS) {
2063 kfree(ic64, sizeof(struct x86_act_context64));
2064 return((void *)0);
2065 }
2066 return(ic64);
2067
2068 } else {
2069 struct x86_act_context32 *ic32;
2070
2071 ic32 = (struct x86_act_context32 *)kalloc(sizeof(struct x86_act_context32));
2072
2073 if (ic32 == (struct x86_act_context32 *)NULL)
2074 return((void *)0);
2075
2076 val = x86_SAVED_STATE32_COUNT;
2077 kret = machine_thread_get_state(thr_act, x86_SAVED_STATE32,
2078 (thread_state_t) &ic32->ss, &val);
2079 if (kret != KERN_SUCCESS) {
2080 kfree(ic32, sizeof(struct x86_act_context32));
2081 return((void *)0);
2082 }
2083 val = x86_FLOAT_STATE32_COUNT;
2084 kret = machine_thread_get_state(thr_act, x86_FLOAT_STATE32,
2085 (thread_state_t) &ic32->fs, &val);
2086 if (kret != KERN_SUCCESS) {
2087 kfree(ic32, sizeof(struct x86_act_context32));
2088 return((void *)0);
2089 }
2090
2091 val = x86_DEBUG_STATE32_COUNT;
2092 kret = machine_thread_get_state(thr_act,
2093 x86_DEBUG_STATE32,
2094 (thread_state_t)&ic32->ds,
2095 &val);
2096 if (kret != KERN_SUCCESS) {
2097 kfree(ic32, sizeof(struct x86_act_context32));
2098 return((void *)0);
2099 }
2100 return(ic32);
2101 }
2102 }
2103
2104
2105 void
2106 act_thread_catt(void *ctx)
2107 {
2108 thread_t thr_act = current_thread();
2109 kern_return_t kret;
2110
2111 if (ctx == (void *)NULL)
2112 return;
2113
2114 if (thread_is_64bit(thr_act)) {
2115 struct x86_act_context64 *ic64;
2116
2117 ic64 = (struct x86_act_context64 *)ctx;
2118
2119 kret = machine_thread_set_state(thr_act, x86_SAVED_STATE64,
2120 (thread_state_t) &ic64->ss, x86_SAVED_STATE64_COUNT);
2121 if (kret == KERN_SUCCESS) {
2122 machine_thread_set_state(thr_act, x86_FLOAT_STATE64,
2123 (thread_state_t) &ic64->fs, x86_FLOAT_STATE64_COUNT);
2124 }
2125 kfree(ic64, sizeof(struct x86_act_context64));
2126 } else {
2127 struct x86_act_context32 *ic32;
2128
2129 ic32 = (struct x86_act_context32 *)ctx;
2130
2131 kret = machine_thread_set_state(thr_act, x86_SAVED_STATE32,
2132 (thread_state_t) &ic32->ss, x86_SAVED_STATE32_COUNT);
2133 if (kret == KERN_SUCCESS) {
2134 kret = machine_thread_set_state(thr_act, x86_FLOAT_STATE32,
2135 (thread_state_t) &ic32->fs, x86_FLOAT_STATE32_COUNT);
2136 if (kret == KERN_SUCCESS && thr_act->machine.pcb->ids)
2137 machine_thread_set_state(thr_act,
2138 x86_DEBUG_STATE32,
2139 (thread_state_t)&ic32->ds,
2140 x86_DEBUG_STATE32_COUNT);
2141 }
2142 kfree(ic32, sizeof(struct x86_act_context32));
2143 }
2144 }
2145
2146
2147 void act_thread_cfree(__unused void *ctx)
2148 {
2149 /* XXX - Unused */
2150 }