]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/pcb.c
xnu-792.10.96.tar.gz
[apple/xnu.git] / osfmk / i386 / pcb.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
37839358
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
37839358
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
37839358
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
25/*
26 * Mach Operating System
27 * Copyright (c) 1991,1990 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50
1c79356b
A
51#include <mach_rt.h>
52#include <mach_debug.h>
53#include <mach_ldebug.h>
54
55#include <sys/kdebug.h>
56
57#include <mach/kern_return.h>
58#include <mach/thread_status.h>
59#include <mach/vm_param.h>
1c79356b 60
91447636
A
61#include <i386/cpu_data.h>
62#include <i386/cpu_number.h>
63
1c79356b 64#include <kern/counters.h>
91447636 65#include <kern/kalloc.h>
1c79356b 66#include <kern/mach_param.h>
91447636
A
67#include <kern/processor.h>
68#include <kern/cpu_data.h>
69#include <kern/cpu_number.h>
1c79356b
A
70#include <kern/task.h>
71#include <kern/thread.h>
1c79356b
A
72#include <kern/sched_prim.h>
73#include <kern/misc_protos.h>
74#include <kern/assert.h>
75#include <kern/spl.h>
91447636 76#include <kern/machine.h>
1c79356b
A
77#include <ipc/ipc_port.h>
78#include <vm/vm_kern.h>
91447636 79#include <vm/vm_map.h>
1c79356b 80#include <vm/pmap.h>
91447636 81#include <vm/vm_protos.h>
1c79356b
A
82
83#include <i386/thread.h>
84#include <i386/eflags.h>
85#include <i386/proc_reg.h>
86#include <i386/seg.h>
87#include <i386/tss.h>
88#include <i386/user_ldt.h>
89#include <i386/fpu.h>
90#include <i386/iopb_entries.h>
91447636
A
91#include <i386/mp_desc.h>
92#include <i386/cpu_data.h>
c0fea474 93#include <i386/machine_routines.h>
55e303ae 94
1c79356b
A
95/*
96 * Maps state flavor to number of words in the state:
97 */
91447636 98unsigned int _MachineStateCount[] = {
c0fea474
A
99 /* FLAVOR_LIST */
100 0,
101 x86_THREAD_STATE32_COUNT,
102 x86_FLOAT_STATE32_COUNT,
103 x86_EXCEPTION_STATE32_COUNT,
104 x86_THREAD_STATE64_COUNT,
105 x86_FLOAT_STATE64_COUNT,
106 x86_EXCEPTION_STATE64_COUNT,
107 x86_THREAD_STATE_COUNT,
108 x86_FLOAT_STATE_COUNT,
109 x86_EXCEPTION_STATE_COUNT,
110 0,
111 x86_SAVED_STATE32_COUNT,
112 x86_SAVED_STATE64_COUNT,
113 x86_DEBUG_STATE32_COUNT,
114 x86_DEBUG_STATE64_COUNT,
115 x86_DEBUG_STATE_COUNT
1c79356b
A
116};
117
c0fea474
A
118zone_t iss_zone32; /* zone for 32bit saved_state area */
119zone_t iss_zone64; /* zone for 64bit saved_state area */
120zone_t ids_zone32; /* zone for 32bit debug_state area */
121zone_t ids_zone64; /* zone for 64bit debug_state area */
122
123
1c79356b
A
124/* Forward */
125
91447636
A
126void act_machine_throughcall(thread_t thr_act);
127user_addr_t get_useraddr(void);
128void act_machine_return(int);
129void act_machine_sv_free(thread_t, int);
130
1c79356b 131extern thread_t Switch_context(
91447636
A
132 thread_t old,
133 thread_continue_t cont,
134 thread_t new);
1c79356b
A
135extern void Thread_continue(void);
136extern void Load_context(
91447636 137 thread_t thread);
1c79356b 138
c0fea474
A
139
140static void
141get_exception_state32(thread_t thread, x86_exception_state32_t *es);
142
143static void
144get_exception_state64(thread_t thread, x86_exception_state64_t *es);
145
146static void
147get_thread_state32(thread_t thread, x86_thread_state32_t *ts);
148
149static void
150get_thread_state64(thread_t thread, x86_thread_state64_t *ts);
151
152static int
153set_thread_state32(thread_t thread, x86_thread_state32_t *ts);
154
155static int
156set_thread_state64(thread_t thread, x86_thread_state64_t *ts);
157
158/*
159 * Don't let an illegal value for dr7 get set. Specifically,
160 * check for undefined settings. Setting these bit patterns
161 * result in undefined behaviour and can lead to an unexpected
162 * TRCTRAP.
163 */
164static boolean_t
165dr7_is_valid(uint32_t *dr7)
166{
167 int i;
168 uint32_t mask1, mask2;
169
170 /*
171 * If the DE bit is set in CR4, R/W0-3 can be pattern
172 * "10B" to indicate i/o reads and write
173 */
174 if (!(get_cr4() & CR4_DE))
175 for (i = 0, mask1 = 0x3<<16, mask2 = 0x2<<16; i < 4;
176 i++, mask1 <<= 4, mask2 <<= 4)
177 if ((*dr7 & mask1) == mask2)
178 return (FALSE);
179
180 /*
181 * len0-3 pattern "10B" is ok for len on 64-bit.
182 */
183 if (current_cpu_datap()->cpu_is64bit == TRUE)
184 for (i = 0, mask1 = 0x3<<18, mask2 = 0x2<<18; i < 4;
185 i++, mask1 <<= 4, mask2 <<= 4)
186 if ((*dr7 & mask1) == mask2)
187 return (FALSE);
188
189 /*
190 * if we are doing an instruction execution break (indicated
191 * by r/w[x] being "00B"), then the len[x] must also be set
192 * to "00B"
193 */
194 for (i = 0; i < 4; i++)
195 if (((((*dr7 >> (16 + i*4))) & 0x3) == 0) &&
196 ((((*dr7 >> (18 + i*4))) & 0x3) != 0))
197 return (FALSE);
198
199 /*
200 * Intel docs have these bits fixed.
201 */
202 *dr7 |= 0x1 << 10; /* set bit 10 to 1 */
203 *dr7 &= ~(0x1 << 11); /* set bit 11 to 0 */
204 *dr7 &= ~(0x1 << 12); /* set bit 12 to 0 */
205 *dr7 &= ~(0x1 << 14); /* set bit 14 to 0 */
206 *dr7 &= ~(0x1 << 15); /* set bit 15 to 0 */
207
208 /*
209 * We don't allow anything to set the global breakpoints.
210 */
211
212 if (*dr7 & 0x2)
213 return (FALSE);
214
215 if (*dr7 & (0x2<<2))
216 return (FALSE);
217
218 if (*dr7 & (0x2<<4))
219 return (FALSE);
220
221 if (*dr7 & (0x2<<6))
222 return (FALSE);
223
224 return (TRUE);
225}
226
227static inline void
228set_live_debug_state32(cpu_data_t *cdp, x86_debug_state32_t *ds)
229{
230 __asm__ volatile ("movl %0,%%db0" : :"r" (ds->dr0));
231 __asm__ volatile ("movl %0,%%db1" : :"r" (ds->dr1));
232 __asm__ volatile ("movl %0,%%db2" : :"r" (ds->dr2));
233 __asm__ volatile ("movl %0,%%db3" : :"r" (ds->dr3));
234 if (cpu_mode_is64bit())
235 cdp->cpu_dr7 = ds->dr7;
236}
237
238extern void set_64bit_debug_regs(x86_debug_state64_t *ds);
239
240static inline void
241set_live_debug_state64(cpu_data_t *cdp, x86_debug_state64_t *ds)
242{
243 /*
244 * We need to enter 64-bit mode in order to set the full
245 * width of these registers
246 */
247 set_64bit_debug_regs(ds);
248 cdp->cpu_dr7 = ds->dr7;
249}
250
251static kern_return_t
252set_debug_state32(thread_t thread, x86_debug_state32_t *ds)
253{
254 x86_debug_state32_t *ids;
255 pcb_t pcb;
256
257 pcb = thread->machine.pcb;
258 ids = pcb->ids;
259
260 if (ids == NULL) {
261 ids = zalloc(ids_zone32);
262 bzero(ids, sizeof *ids);
263
264 simple_lock(&pcb->lock);
265 /* make sure it wasn't already alloc()'d elsewhere */
266 if (pcb->ids == NULL) {
267 pcb->ids = ids;
268 simple_unlock(&pcb->lock);
269 } else {
270 simple_unlock(&pcb->lock);
271 zfree(ids_zone32, ids);
272 }
273 }
274
275 if (!dr7_is_valid(&ds->dr7))
276 goto err;
277
278 /*
279 * Only allow local breakpoints and make sure they are not
280 * in the trampoline code.
281 */
282
283 if (ds->dr7 & 0x1)
284 if (ds->dr0 >= (unsigned long)HIGH_MEM_BASE)
285 goto err;
286
287 if (ds->dr7 & (0x1<<2))
288 if (ds->dr1 >= (unsigned long)HIGH_MEM_BASE)
289 goto err;
290
291 if (ds->dr7 & (0x1<<4))
292 if (ds->dr2 >= (unsigned long)HIGH_MEM_BASE)
293 goto err;
294
295 if (ds->dr7 & (0x1<<6))
296 if (ds->dr3 >= (unsigned long)HIGH_MEM_BASE)
297 goto err;
298
299 ids->dr0 = ds->dr0;
300 ids->dr1 = ds->dr1;
301 ids->dr2 = ds->dr2;
302 ids->dr3 = ds->dr3;
303 ids->dr6 = ds->dr6;
304 ids->dr7 = ds->dr7;
305
306 return (KERN_SUCCESS);
307
308err:
309 return (KERN_INVALID_ARGUMENT);
310}
311
312static kern_return_t
313set_debug_state64(thread_t thread, x86_debug_state64_t *ds)
314{
315 x86_debug_state64_t *ids;
316 pcb_t pcb;
317
318 pcb = thread->machine.pcb;
319 ids = pcb->ids;
320
321 if (ids == NULL) {
322 ids = zalloc(ids_zone64);
323 bzero(ids, sizeof *ids);
324
325 simple_lock(&pcb->lock);
326 /* make sure it wasn't already alloc()'d elsewhere */
327 if (pcb->ids == NULL) {
328 pcb->ids = ids;
329 simple_unlock(&pcb->lock);
330 } else {
331 simple_unlock(&pcb->lock);
332 zfree(ids_zone64, ids);
333 }
334 }
335
336 if (!dr7_is_valid((uint32_t *)&ds->dr7))
337 goto err;
338
339 /*
340 * Don't allow the user to set debug addresses above their max
341 * value
342 */
343 if (ds->dr7 & 0x1)
344 if (ds->dr0 >= VM_MAX_PAGE_ADDRESS)
345 goto err;
346
347 if (ds->dr7 & (0x1<<2))
348 if (ds->dr1 >= VM_MAX_PAGE_ADDRESS)
349 goto err;
350
351 if (ds->dr7 & (0x1<<4))
352 if (ds->dr2 >= VM_MAX_PAGE_ADDRESS)
353 goto err;
354
355 if (ds->dr7 & (0x1<<6))
356 if (ds->dr3 >= VM_MAX_PAGE_ADDRESS)
357 goto err;
358
359 ids->dr0 = ds->dr0;
360 ids->dr1 = ds->dr1;
361 ids->dr2 = ds->dr2;
362 ids->dr3 = ds->dr3;
363 ids->dr6 = ds->dr6;
364 ids->dr7 = ds->dr7;
365
366 return (KERN_SUCCESS);
367
368err:
369 return (KERN_INVALID_ARGUMENT);
370}
371
372static void
373get_debug_state32(thread_t thread, x86_debug_state32_t *ds)
374{
375 x86_debug_state32_t *saved_state;
376
377 saved_state = thread->machine.pcb->ids;
378
379 if (saved_state) {
380 ds->dr0 = saved_state->dr0;
381 ds->dr1 = saved_state->dr1;
382 ds->dr2 = saved_state->dr2;
383 ds->dr3 = saved_state->dr3;
384 ds->dr4 = saved_state->dr4;
385 ds->dr5 = saved_state->dr5;
386 ds->dr6 = saved_state->dr6;
387 ds->dr7 = saved_state->dr7;
388 } else
389 bzero(ds, sizeof *ds);
390}
391
392static void
393get_debug_state64(thread_t thread, x86_debug_state64_t *ds)
394{
395 x86_debug_state64_t *saved_state;
396
397 saved_state = (x86_debug_state64_t *)thread->machine.pcb->ids;
398
399 if (saved_state) {
400 ds->dr0 = saved_state->dr0;
401 ds->dr1 = saved_state->dr1;
402 ds->dr2 = saved_state->dr2;
403 ds->dr3 = saved_state->dr3;
404 ds->dr4 = saved_state->dr4;
405 ds->dr5 = saved_state->dr5;
406 ds->dr6 = saved_state->dr6;
407 ds->dr7 = saved_state->dr7;
408 } else
409 bzero(ds, sizeof *ds);
410}
411
1c79356b
A
412/*
413 * consider_machine_collect:
414 *
415 * Try to collect machine-dependent pages
416 */
417void
91447636 418consider_machine_collect(void)
1c79356b
A
419{
420}
421
1c79356b 422void
91447636 423consider_machine_adjust(void)
1c79356b 424{
1c79356b
A
425}
426
427
1c79356b 428
91447636
A
429static void
430act_machine_switch_pcb( thread_t new )
1c79356b 431{
c0fea474
A
432 pcb_t pcb = new->machine.pcb;
433 struct real_descriptor *ldtp;
1c79356b 434 vm_offset_t pcb_stack_top;
c0fea474
A
435 vm_offset_t hi_pcb_stack_top;
436 vm_offset_t hi_iss;
437 cpu_data_t *cdp = current_cpu_datap();
1c79356b 438
c0fea474
A
439 assert(new->kernel_stack != 0);
440 STACK_IEL(new->kernel_stack)->saved_state = pcb->iss;
1c79356b 441
c0fea474
A
442 if (!cpu_mode_is64bit()) {
443 x86_saved_state32_tagged_t *hi_iss32;
1c79356b 444
c0fea474
A
445 /*
446 * Save a pointer to the top of the "kernel" stack -
447 * actually the place in the PCB where a trap into
448 * kernel mode will push the registers.
449 */
450 hi_iss = (vm_offset_t)((unsigned long)
451 pmap_cpu_high_map_vaddr(cpu_number(), HIGH_CPU_ISS0) |
452 ((unsigned long)pcb->iss & PAGE_MASK));
453
454 cdp->cpu_hi_iss = (void *)hi_iss;
1c79356b 455
c0fea474
A
456 pmap_high_map(pcb->iss_pte0, HIGH_CPU_ISS0);
457 pmap_high_map(pcb->iss_pte1, HIGH_CPU_ISS1);
458
459 hi_iss32 = (x86_saved_state32_tagged_t *) hi_iss;
460 assert(hi_iss32->tag == x86_SAVED_STATE32);
461
462 hi_pcb_stack_top = (int) (hi_iss32 + 1);
463
464 /*
465 * For fast syscall, top of interrupt stack points to pcb stack
466 */
467 *(vm_offset_t *) current_sstk() = hi_pcb_stack_top;
468
469 current_ktss()->esp0 = hi_pcb_stack_top;
470/* XXX: This check is performed against the thread save state flavor rather than the
471 * task's 64-bit feature flag because of the thread/task 64-bit state divergence
472 * that can arise in task_set_64bit() on x86. When that is addressed, we can
473 * revert to checking the task 64 bit feature flag. The assert below is retained
474 * for that reason.
475 */
476 } else if (is_saved_state64(pcb->iss)) {
477 x86_saved_state64_tagged_t *iss64;
478 vm_offset_t isf;
479
480 assert(is_saved_state64(pcb->iss));
481
482 iss64 = (x86_saved_state64_tagged_t *) pcb->iss;
91447636 483
c0fea474
A
484 /*
485 * Set pointer to PCB's interrupt stack frame in cpu data.
486 * Used by syscall and double-fault trap handlers.
487 */
488 isf = (vm_offset_t) &iss64->state.isf;
489 cdp->cpu_uber.cu_isf = UBER64(isf);
490 pcb_stack_top = (vm_offset_t) (iss64 + 1);
491 /* require 16-byte alignment */
492 assert((pcb_stack_top & 0xF) == 0);
493 /* Interrupt stack is pcb */
494 current_ktss64()->rsp0 = UBER64(pcb_stack_top);
495
496 /*
497 * Top of temporary sysenter stack points to pcb stack.
498 * Although this is not normally used by 64-bit users,
499 * it needs to be set in case a sysenter is attempted.
500 */
501 *current_sstk64() = UBER64(pcb_stack_top);
502
503 cdp->cpu_task_map = new->map->pmap->pm_kernel_cr3 ?
504 TASK_MAP_64BIT_SHARED : TASK_MAP_64BIT;
505
506 /*
507 * Enable the 64-bit user code segment, USER64_CS.
508 */
509 ldt_desc_p(USER64_CS)->access |= ACC_PL_U;
510
511 } else {
512 x86_saved_state_compat32_t *iss32compat;
513 vm_offset_t isf;
514
515 assert(is_saved_state32(pcb->iss));
516 iss32compat = (x86_saved_state_compat32_t *) pcb->iss;
517
518 pcb_stack_top = (int) (iss32compat + 1);
519 /* require 16-byte alignment */
520 assert((pcb_stack_top & 0xF) == 0);
521
522 /*
523 * Set pointer to PCB's interrupt stack frame in cpu data.
524 * Used by debug trap handler.
525 */
526 isf = (vm_offset_t) &iss32compat->isf64;
527 cdp->cpu_uber.cu_isf = UBER64(isf);
528
529 /* Top of temporary sysenter stack points to pcb stack */
530 *current_sstk64() = UBER64(pcb_stack_top);
531
532 /* Interrupt stack is pcb */
533 current_ktss64()->rsp0 = UBER64(pcb_stack_top);
534
535 cdp->cpu_task_map = TASK_MAP_32BIT;
536
537 /*
538 * Disable USER64_CS
539 */
540 ldt_desc_p(USER64_CS)->access &= ~ACC_PL_U;
1c79356b 541 }
55e303ae 542
1c79356b 543 /*
c0fea474
A
544 * Set the thread`s cthread (a.k.a pthread)
545 * For 32-bit user this involves setting the USER_CTHREAD
546 * descriptor in the LDT to point to the cthread data.
547 * The involves copying in the pre-initialized descriptor.
548 */
549 ldtp = (struct real_descriptor *)current_ldt();
550 ldtp[sel_idx(USER_CTHREAD)] = pcb->cthread_desc;
551 if (pcb->uldt_selector != 0)
552 ldtp[sel_idx(pcb->uldt_selector)] = pcb->uldt_desc;
553 /*
554 * For 64-bit, we additionally set the 64-bit User GS base
555 * address. On return to 64-bit user, the GS.Base MSR will be written.
1c79356b 556 */
c0fea474 557 cdp->cpu_uber.cu_user_gs_base = pcb->cthread_self;
1c79356b 558
c0fea474
A
559 /*
560 * Set the thread`s LDT or LDT entry.
561 */
562 if (new->task == TASK_NULL || new->task->i386_ldt == 0) {
563 /*
564 * Use system LDT.
565 */
566 ml_cpu_set_ldt(KERNEL_LDT);
567 } else {
568 /*
569 * Task has its own LDT.
570 */
571 user_ldt_set(new);
572 }
1c79356b
A
573}
574
1c79356b
A
575/*
576 * Switch to the first thread on a CPU.
577 */
578void
55e303ae 579machine_load_context(
1c79356b
A
580 thread_t new)
581{
c0fea474 582 new->machine.specFlags |= OnProc;
1c79356b 583 act_machine_switch_pcb(new);
91447636 584 Load_context(new);
1c79356b
A
585}
586
587/*
588 * Switch to a new thread.
589 * Save the old thread`s kernel state or continuation,
590 * and return it.
591 */
592thread_t
55e303ae 593machine_switch_context(
91447636
A
594 thread_t old,
595 thread_continue_t continuation,
596 thread_t new)
1c79356b 597{
1c79356b 598#if MACH_RT
91447636 599 assert(current_cpu_datap()->cpu_active_stack == old->kernel_stack);
1c79356b 600#endif
1c79356b
A
601
602 /*
603 * Save FP registers if in use.
604 */
605 fpu_save_context(old);
606
c0fea474
A
607 old->machine.specFlags &= ~OnProc;
608 new->machine.specFlags |= OnProc;
609
1c79356b
A
610 /*
611 * Switch address maps if need be, even if not switching tasks.
612 * (A server activation may be "borrowing" a client map.)
613 */
c0fea474 614 PMAP_SWITCH_CONTEXT(old, new, cpu_number())
1c79356b
A
615
616 /*
617 * Load the rest of the user state for the new thread
618 */
91447636 619 act_machine_switch_pcb(new);
c0fea474
A
620 KERNEL_DEBUG_CONSTANT(
621 MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE,
622 (int)old, (int)new, old->sched_pri, new->sched_pri, 0);
1c79356b
A
623 return(Switch_context(old, continuation, new));
624}
625
1c79356b
A
626/*
627 * act_machine_sv_free
628 * release saveareas associated with an act. if flag is true, release
629 * user level savearea(s) too, else don't
630 */
631void
91447636 632act_machine_sv_free(__unused thread_t act, __unused int flag)
1c79356b 633{
1c79356b
A
634}
635
91447636
A
636
637/*
638 * This is where registers that are not normally specified by the mach-o
639 * file on an execve would be nullified, perhaps to avoid a covert channel.
640 */
641kern_return_t
642machine_thread_state_initialize(
643 thread_t thread)
644{
c0fea474
A
645 /*
646 * If there's an fpu save area, free it.
647 * The initialized state will then be lazily faulted-in, if required.
648 * And if we're target, re-arm the no-fpu trap.
649 */
650 if (thread->machine.pcb->ifps) {
651 (void) fpu_set_fxstate(thread, NULL);
91447636 652
c0fea474
A
653 if (thread == current_thread())
654 clear_fpu();
655 }
656 return KERN_SUCCESS;
91447636 657}
c0fea474
A
658
659uint32_t
660get_eflags_exportmask(void)
661{
662 return EFL_USER_SET;
663}
664
665/*
666 * x86_SAVED_STATE32 - internal save/restore general register state on 32/64 bit processors
667 * for 32bit tasks only
668 * x86_SAVED_STATE64 - internal save/restore general register state on 64 bit processors
669 * for 64bit tasks only
670 * x86_THREAD_STATE32 - external set/get general register state on 32/64 bit processors
671 * for 32bit tasks only
672 * x86_THREAD_STATE64 - external set/get general register state on 64 bit processors
673 * for 64bit tasks only
674 * x86_SAVED_STATE - external set/get general register state on 32/64 bit processors
675 * for either 32bit or 64bit tasks
676 * x86_FLOAT_STATE32 - internal/external save/restore float and xmm state on 32/64 bit processors
677 * for 32bit tasks only
678 * x86_FLOAT_STATE64 - internal/external save/restore float and xmm state on 64 bit processors
679 * for 64bit tasks only
680 * x86_FLOAT_STATE - external save/restore float and xmm state on 32/64 bit processors
681 * for either 32bit or 64bit tasks
682 * x86_EXCEPTION_STATE32 - external get exception state on 32/64 bit processors
683 * for 32bit tasks only
684 * x86_EXCEPTION_STATE64 - external get exception state on 64 bit processors
685 * for 64bit tasks only
686 * x86_EXCEPTION_STATE - external get exception state on 323/64 bit processors
687 * for either 32bit or 64bit tasks
688 */
689
690
691static void
692get_exception_state64(thread_t thread, x86_exception_state64_t *es)
693{
694 x86_saved_state64_t *saved_state;
695
696 saved_state = USER_REGS64(thread);
697
698 es->trapno = saved_state->isf.trapno;
699 es->err = saved_state->isf.err;
700 es->faultvaddr = saved_state->cr2;
701}
702
703static void
704get_exception_state32(thread_t thread, x86_exception_state32_t *es)
705{
706 x86_saved_state32_t *saved_state;
707
708 saved_state = USER_REGS32(thread);
709
710 es->trapno = saved_state->trapno;
711 es->err = saved_state->err;
712 es->faultvaddr = saved_state->cr2;
713}
714
715
716static int
717set_thread_state32(thread_t thread, x86_thread_state32_t *ts)
718{
719 x86_saved_state32_t *saved_state;
720
721 saved_state = USER_REGS32(thread);
722
723 saved_state->eax = ts->eax;
724 saved_state->ebx = ts->ebx;
725 saved_state->ecx = ts->ecx;
726 saved_state->edx = ts->edx;
727 saved_state->edi = ts->edi;
728 saved_state->esi = ts->esi;
729 saved_state->ebp = ts->ebp;
730 saved_state->uesp = ts->esp;
731 saved_state->efl = (ts->eflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
732 saved_state->eip = ts->eip;
733 saved_state->cs = ts->cs ? ts->cs : USER_CS;
734 saved_state->ss = ts->ss ? ts->ss : USER_DS;
735 saved_state->ds = ts->ds ? ts->ds : USER_DS;
736 saved_state->es = ts->es ? ts->es : USER_DS;
737 saved_state->fs = ts->fs;
738 saved_state->gs = ts->gs;
739
740 /*
741 * If the trace trap bit is being set,
742 * ensure that the user returns via iret
743 * - which is signaled thusly:
744 */
745 if ((saved_state->efl & EFL_TF) && saved_state->cs == SYSENTER_CS)
746 saved_state->cs = SYSENTER_TF_CS;
747
748 return(KERN_SUCCESS);
749}
750
751static int
752set_thread_state64(thread_t thread, x86_thread_state64_t *ts)
753{
754 x86_saved_state64_t *saved_state;
755
756 saved_state = USER_REGS64(thread);
757
758 if (!IS_USERADDR64_CANONICAL(ts->rsp) ||
759 !IS_USERADDR64_CANONICAL(ts->rip))
760 return(KERN_INVALID_ARGUMENT);
761
762 saved_state->r8 = ts->r8;
763 saved_state->r9 = ts->r9;
764 saved_state->r10 = ts->r10;
765 saved_state->r11 = ts->r11;
766 saved_state->r12 = ts->r12;
767 saved_state->r13 = ts->r13;
768 saved_state->r14 = ts->r14;
769 saved_state->r15 = ts->r15;
770 saved_state->rax = ts->rax;
771 saved_state->rax = ts->rax;
772 saved_state->rbx = ts->rbx;
773 saved_state->rcx = ts->rcx;
774 saved_state->rdx = ts->rdx;
775 saved_state->rdi = ts->rdi;
776 saved_state->rsi = ts->rsi;
777 saved_state->rbp = ts->rbp;
778 saved_state->isf.rsp = ts->rsp;
779 saved_state->isf.rflags = (ts->rflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
780 saved_state->isf.rip = ts->rip;
781 saved_state->isf.cs = USER64_CS;
782 saved_state->fs = ts->fs;
783 saved_state->gs = ts->gs;
784
785 return(KERN_SUCCESS);
786}
787
788
789
790static void
791get_thread_state32(thread_t thread, x86_thread_state32_t *ts)
792{
793 x86_saved_state32_t *saved_state;
794
795 saved_state = USER_REGS32(thread);
796
797 ts->eax = saved_state->eax;
798 ts->ebx = saved_state->ebx;
799 ts->ecx = saved_state->ecx;
800 ts->edx = saved_state->edx;
801 ts->edi = saved_state->edi;
802 ts->esi = saved_state->esi;
803 ts->ebp = saved_state->ebp;
804 ts->esp = saved_state->uesp;
805 ts->eflags = saved_state->efl;
806 ts->eip = saved_state->eip;
807 ts->cs = saved_state->cs;
808 ts->ss = saved_state->ss;
809 ts->ds = saved_state->ds;
810 ts->es = saved_state->es;
811 ts->fs = saved_state->fs;
812 ts->gs = saved_state->gs;
813}
814
815
816static void
817get_thread_state64(thread_t thread, x86_thread_state64_t *ts)
818{
819 x86_saved_state64_t *saved_state;
820
821 saved_state = USER_REGS64(thread);
822
823 ts->r8 = saved_state->r8;
824 ts->r9 = saved_state->r9;
825 ts->r10 = saved_state->r10;
826 ts->r11 = saved_state->r11;
827 ts->r12 = saved_state->r12;
828 ts->r13 = saved_state->r13;
829 ts->r14 = saved_state->r14;
830 ts->r15 = saved_state->r15;
831 ts->rax = saved_state->rax;
832 ts->rbx = saved_state->rbx;
833 ts->rcx = saved_state->rcx;
834 ts->rdx = saved_state->rdx;
835 ts->rdi = saved_state->rdi;
836 ts->rsi = saved_state->rsi;
837 ts->rbp = saved_state->rbp;
838 ts->rsp = saved_state->isf.rsp;
839 ts->rflags = saved_state->isf.rflags;
840 ts->rip = saved_state->isf.rip;
841 ts->cs = saved_state->isf.cs;
842 ts->fs = saved_state->fs;
843 ts->gs = saved_state->gs;
844}
845
846
847
1c79356b
A
848/*
849 * act_machine_set_state:
850 *
91447636 851 * Set the status of the specified thread.
1c79356b
A
852 */
853
854kern_return_t
55e303ae 855machine_thread_set_state(
91447636 856 thread_t thr_act,
1c79356b
A
857 thread_flavor_t flavor,
858 thread_state_t tstate,
859 mach_msg_type_number_t count)
860{
1c79356b 861
c0fea474
A
862 switch (flavor)
863 {
864 case x86_SAVED_STATE32:
1c79356b 865 {
c0fea474
A
866 x86_saved_state32_t *state;
867 x86_saved_state32_t *saved_state;
1c79356b 868
c0fea474
A
869 if (count < x86_SAVED_STATE32_COUNT)
870 return(KERN_INVALID_ARGUMENT);
1c79356b 871
c0fea474 872 state = (x86_saved_state32_t *) tstate;
1c79356b 873
91447636 874 /* Check segment selectors are safe */
c0fea474 875 if (!valid_user_segment_selectors(state->cs,
91447636
A
876 state->ss,
877 state->ds,
878 state->es,
879 state->fs,
880 state->gs))
c0fea474 881 return KERN_INVALID_ARGUMENT;
91447636 882
c0fea474 883 saved_state = USER_REGS32(thr_act);
1c79356b
A
884
885 /*
886 * General registers
887 */
888 saved_state->edi = state->edi;
889 saved_state->esi = state->esi;
890 saved_state->ebp = state->ebp;
891 saved_state->uesp = state->uesp;
892 saved_state->ebx = state->ebx;
893 saved_state->edx = state->edx;
894 saved_state->ecx = state->ecx;
895 saved_state->eax = state->eax;
896 saved_state->eip = state->eip;
c0fea474
A
897
898 saved_state->efl = (state->efl & ~EFL_USER_CLEAR) | EFL_USER_SET;
1c79356b
A
899
900 /*
c0fea474
A
901 * If the trace trap bit is being set,
902 * ensure that the user returns via iret
903 * - which is signaled thusly:
1c79356b 904 */
c0fea474
A
905 if ((saved_state->efl & EFL_TF) && state->cs == SYSENTER_CS)
906 state->cs = SYSENTER_TF_CS;
907
908 /*
909 * User setting segment registers.
910 * Code and stack selectors have already been
911 * checked. Others will be reset by 'iret'
912 * if they are not valid.
913 */
914 saved_state->cs = state->cs;
915 saved_state->ss = state->ss;
916 saved_state->ds = state->ds;
917 saved_state->es = state->es;
918 saved_state->fs = state->fs;
919 saved_state->gs = state->gs;
1c79356b
A
920 break;
921 }
922
c0fea474 923 case x86_SAVED_STATE64:
1c79356b 924 {
c0fea474
A
925 x86_saved_state64_t *state;
926 x86_saved_state64_t *saved_state;
1c79356b 927
c0fea474
A
928 if (count < x86_SAVED_STATE64_COUNT)
929 return(KERN_INVALID_ARGUMENT);
1c79356b 930
c0fea474
A
931 state = (x86_saved_state64_t *) tstate;
932
933 /* Check segment selectors are safe XXX gs/fs? */
934 if (!valid_user_code_selector(state->isf.cs) ||
935 !valid_user_data_selector(state->fs) ||
936 !valid_user_data_selector(state->gs))
937 return KERN_INVALID_ARGUMENT;
938
939 /* Check pc and stack are canonical addresses */
940 if (!IS_USERADDR64_CANONICAL(state->isf.rsp) ||
941 !IS_USERADDR64_CANONICAL(state->isf.rip))
1c79356b 942 return KERN_INVALID_ARGUMENT;
1c79356b 943
c0fea474 944 saved_state = USER_REGS64(thr_act);
1c79356b
A
945
946 /*
947 * General registers
948 */
c0fea474
A
949 saved_state->r8 = state->r8;
950 saved_state->r9 = state->r9;
951 saved_state->r10 = state->r10;
952 saved_state->r11 = state->r11;
953 saved_state->r12 = state->r12;
954 saved_state->r13 = state->r13;
955 saved_state->r14 = state->r14;
956 saved_state->r15 = state->r15;
957 saved_state->rdi = state->rdi;
958 saved_state->rsi = state->rsi;
959 saved_state->rbp = state->rbp;
960 saved_state->rbx = state->rbx;
961 saved_state->rdx = state->rdx;
962 saved_state->rcx = state->rcx;
963 saved_state->rax = state->rax;
964 saved_state->isf.rsp = state->isf.rsp;
965 saved_state->isf.rip = state->isf.rip;
966
967 saved_state->isf.rflags = (state->isf.rflags & ~EFL_USER_CLEAR) | EFL_USER_SET;
968
969 /*
970 * User setting segment registers.
971 * Code and stack selectors have already been
972 * checked. Others will be reset by 'sys'
973 * if they are not valid.
1c79356b 974 */
c0fea474
A
975 saved_state->isf.cs = state->isf.cs;
976 saved_state->isf.ss = state->isf.ss;
977 saved_state->fs = state->fs;
978 saved_state->gs = state->gs;
979
1c79356b
A
980 break;
981 }
982
c0fea474
A
983 case x86_FLOAT_STATE32:
984 {
985 if (count != x86_FLOAT_STATE32_COUNT)
986 return(KERN_INVALID_ARGUMENT);
987
988 if (thread_is_64bit(thr_act))
1c79356b 989 return(KERN_INVALID_ARGUMENT);
c0fea474
A
990
991 return fpu_set_fxstate(thr_act, tstate);
1c79356b
A
992 }
993
c0fea474
A
994 case x86_FLOAT_STATE64:
995 {
996 if (count != x86_FLOAT_STATE64_COUNT)
1c79356b
A
997 return(KERN_INVALID_ARGUMENT);
998
c0fea474
A
999 if ( !thread_is_64bit(thr_act))
1000 return(KERN_INVALID_ARGUMENT);
1001
1002 return fpu_set_fxstate(thr_act, tstate);
1c79356b
A
1003 }
1004
c0fea474 1005 case x86_FLOAT_STATE:
1c79356b 1006 {
c0fea474
A
1007 x86_float_state_t *state;
1008
1009 if (count != x86_FLOAT_STATE_COUNT)
1010 return(KERN_INVALID_ARGUMENT);
1c79356b 1011
c0fea474 1012 state = (x86_float_state_t *)tstate;
1c79356b 1013
c0fea474
A
1014 if (state->fsh.flavor == x86_FLOAT_STATE64 && state->fsh.count == x86_FLOAT_STATE64_COUNT &&
1015 thread_is_64bit(thr_act)) {
1016 return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs64);
1017 }
1018 if (state->fsh.flavor == x86_FLOAT_STATE32 && state->fsh.count == x86_FLOAT_STATE32_COUNT &&
1019 !thread_is_64bit(thr_act)) {
1020 return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs32);
1021 }
1022 return(KERN_INVALID_ARGUMENT);
1023 }
1c79356b 1024
1c79356b 1025
1c79356b 1026
c0fea474
A
1027 case OLD_i386_THREAD_STATE:
1028 case x86_THREAD_STATE32:
1029 {
1030 if (count != x86_THREAD_STATE32_COUNT)
1031 return(KERN_INVALID_ARGUMENT);
1032
1033 if (thread_is_64bit(thr_act))
1034 return(KERN_INVALID_ARGUMENT);
1035
1036 return set_thread_state32(thr_act, (x86_thread_state32_t *)tstate);
1037 }
1038
1039 case x86_THREAD_STATE64:
1040 {
1041 if (count != x86_THREAD_STATE64_COUNT)
1042 return(KERN_INVALID_ARGUMENT);
1043
1044 if ( !thread_is_64bit(thr_act))
1045 return(KERN_INVALID_ARGUMENT);
1046
1047 return set_thread_state64(thr_act, (x86_thread_state64_t *)tstate);
1048 }
1049
1050 case x86_THREAD_STATE:
1051 {
1052 x86_thread_state_t *state;
1053
1054 if (count != x86_THREAD_STATE_COUNT)
1055 return(KERN_INVALID_ARGUMENT);
1056
1057 state = (x86_thread_state_t *)tstate;
1058
1059 if (state->tsh.flavor == x86_THREAD_STATE64 && state->tsh.count == x86_THREAD_STATE64_COUNT &&
1060 thread_is_64bit(thr_act)) {
1061 return set_thread_state64(thr_act, &state->uts.ts64);
1062 } else if (state->tsh.flavor == x86_THREAD_STATE32 && state->tsh.count == x86_THREAD_STATE32_COUNT &&
1063 !thread_is_64bit(thr_act)) {
1064 return set_thread_state32(thr_act, &state->uts.ts32);
1065 } else
1066 return(KERN_INVALID_ARGUMENT);
1067
1c79356b
A
1068 break;
1069 }
c0fea474
A
1070 case x86_DEBUG_STATE32:
1071 {
1072 x86_debug_state32_t *state;
1073 kern_return_t ret;
1074
1075 if (thread_is_64bit(thr_act))
1076 return(KERN_INVALID_ARGUMENT);
1077
1078 state = (x86_debug_state32_t *)tstate;
1079
1080 ret = set_debug_state32(thr_act, state);
1c79356b 1081
c0fea474 1082 return ret;
1c79356b 1083 }
c0fea474
A
1084 case x86_DEBUG_STATE64:
1085 {
1086 x86_debug_state64_t *state;
1087 kern_return_t ret;
1c79356b 1088
c0fea474
A
1089 if (!thread_is_64bit(thr_act))
1090 return(KERN_INVALID_ARGUMENT);
1091
1092 state = (x86_debug_state64_t *)tstate;
1093
1094 ret = set_debug_state64(thr_act, state);
1095
1096 return ret;
1097 }
1098 case x86_DEBUG_STATE:
1099 {
1100 x86_debug_state_t *state;
1101 kern_return_t ret = KERN_INVALID_ARGUMENT;
1102
1103 if (count != x86_DEBUG_STATE_COUNT)
1104 return (KERN_INVALID_ARGUMENT);
1105
1106 state = (x86_debug_state_t *)tstate;
1107 if (state->dsh.flavor == x86_DEBUG_STATE64 &&
1108 state->dsh.count == x86_DEBUG_STATE64_COUNT &&
1109 thread_is_64bit(thr_act)) {
1110 ret = set_debug_state64(thr_act, &state->uds.ds64);
1111 }
1112 else
1113 if (state->dsh.flavor == x86_DEBUG_STATE32 &&
1114 state->dsh.count == x86_DEBUG_STATE32_COUNT &&
1115 !thread_is_64bit(thr_act)) {
1116 ret = set_debug_state32(thr_act, &state->uds.ds32);
1117 }
1118 return ret;
1119 }
1120 default:
1c79356b
A
1121 return(KERN_INVALID_ARGUMENT);
1122 }
1123
1124 return(KERN_SUCCESS);
1125}
1126
c0fea474
A
1127
1128
1c79356b
A
1129/*
1130 * thread_getstatus:
1131 *
1132 * Get the status of the specified thread.
1133 */
1134
1c79356b 1135kern_return_t
55e303ae 1136machine_thread_get_state(
91447636 1137 thread_t thr_act,
1c79356b
A
1138 thread_flavor_t flavor,
1139 thread_state_t tstate,
1140 mach_msg_type_number_t *count)
1141{
1c79356b
A
1142 switch (flavor) {
1143
c0fea474
A
1144 case THREAD_STATE_FLAVOR_LIST:
1145 {
1146 if (*count < 3)
1147 return (KERN_INVALID_ARGUMENT);
1148
1149 tstate[0] = i386_THREAD_STATE;
1150 tstate[1] = i386_FLOAT_STATE;
1151 tstate[2] = i386_EXCEPTION_STATE;
1152
1153 *count = 3;
1154 break;
1155 }
1156
1157 case THREAD_STATE_FLAVOR_LIST_NEW:
1158 {
1159 if (*count < 4)
1160 return (KERN_INVALID_ARGUMENT);
1161
1162 tstate[0] = x86_THREAD_STATE;
1163 tstate[1] = x86_FLOAT_STATE;
1164 tstate[2] = x86_EXCEPTION_STATE;
1165 tstate[3] = x86_DEBUG_STATE;
1166
1167 *count = 4;
1168 break;
1169 }
1170
1171 case x86_SAVED_STATE32:
1c79356b 1172 {
c0fea474
A
1173 x86_saved_state32_t *state;
1174 x86_saved_state32_t *saved_state;
1c79356b 1175
c0fea474
A
1176 if (*count < x86_SAVED_STATE32_COUNT)
1177 return(KERN_INVALID_ARGUMENT);
1c79356b 1178
c0fea474
A
1179 state = (x86_saved_state32_t *) tstate;
1180 saved_state = USER_REGS32(thr_act);
1c79356b
A
1181
1182 /*
1183 * First, copy everything:
1184 */
1185 *state = *saved_state;
c0fea474
A
1186 state->ds = saved_state->ds & 0xffff;
1187 state->es = saved_state->es & 0xffff;
1188 state->fs = saved_state->fs & 0xffff;
1189 state->gs = saved_state->gs & 0xffff;
1c79356b 1190
c0fea474 1191 *count = x86_SAVED_STATE32_COUNT;
1c79356b
A
1192 break;
1193 }
1194
c0fea474 1195 case x86_SAVED_STATE64:
1c79356b 1196 {
c0fea474
A
1197 x86_saved_state64_t *state;
1198 x86_saved_state64_t *saved_state;
1c79356b 1199
c0fea474
A
1200 if (*count < x86_SAVED_STATE64_COUNT)
1201 return(KERN_INVALID_ARGUMENT);
1c79356b 1202
c0fea474
A
1203 state = (x86_saved_state64_t *)tstate;
1204 saved_state = USER_REGS64(thr_act);
1c79356b
A
1205
1206 /*
c0fea474 1207 * First, copy everything:
1c79356b 1208 */
c0fea474
A
1209 *state = *saved_state;
1210 state->fs = saved_state->fs & 0xffff;
1211 state->gs = saved_state->gs & 0xffff;
1212
1213 *count = x86_SAVED_STATE64_COUNT;
1c79356b
A
1214 break;
1215 }
1216
c0fea474 1217 case x86_FLOAT_STATE32:
1c79356b 1218 {
c0fea474
A
1219 if (*count < x86_FLOAT_STATE32_COUNT)
1220 return(KERN_INVALID_ARGUMENT);
1221
1222 if (thread_is_64bit(thr_act))
1223 return(KERN_INVALID_ARGUMENT);
1224
1225 *count = x86_FLOAT_STATE32_COUNT;
1226
1227 return fpu_get_fxstate(thr_act, tstate);
1c79356b
A
1228 }
1229
c0fea474
A
1230 case x86_FLOAT_STATE64:
1231 {
1232 if (*count < x86_FLOAT_STATE64_COUNT)
1233 return(KERN_INVALID_ARGUMENT);
1234
1235 if ( !thread_is_64bit(thr_act))
1236 return(KERN_INVALID_ARGUMENT);
1237
1238 *count = x86_FLOAT_STATE64_COUNT;
1239
1240 return fpu_get_fxstate(thr_act, tstate);
1241 }
1242
1243 case x86_FLOAT_STATE:
1244 {
1245 x86_float_state_t *state;
1246 kern_return_t kret;
1247
1248 if (*count < x86_FLOAT_STATE_COUNT)
1249 return(KERN_INVALID_ARGUMENT);
1250
1251 state = (x86_float_state_t *)tstate;
1252
1253 /*
1254 * no need to bzero... currently
1255 * x86_FLOAT_STATE64_COUNT == x86_FLOAT_STATE32_COUNT
1256 */
1257 if (thread_is_64bit(thr_act)) {
1258 state->fsh.flavor = x86_FLOAT_STATE64;
1259 state->fsh.count = x86_FLOAT_STATE64_COUNT;
1260
1261 kret = fpu_get_fxstate(thr_act, (thread_state_t)&state->ufs.fs64);
1262 } else {
1263 state->fsh.flavor = x86_FLOAT_STATE32;
1264 state->fsh.count = x86_FLOAT_STATE32_COUNT;
1265
1266 kret = fpu_get_fxstate(thr_act, (thread_state_t)&state->ufs.fs32);
1267 }
1268 *count = x86_FLOAT_STATE_COUNT;
1269
1270 return(kret);
1271 }
1272
1273
1274 case OLD_i386_THREAD_STATE:
1275 case x86_THREAD_STATE32:
1276 {
1277 if (*count < x86_THREAD_STATE32_COUNT)
1278 return(KERN_INVALID_ARGUMENT);
1279
1280 if (thread_is_64bit(thr_act))
1281 return(KERN_INVALID_ARGUMENT);
1282
1283 *count = x86_THREAD_STATE32_COUNT;
1284
1285 get_thread_state32(thr_act, (x86_thread_state32_t *)tstate);
1c79356b 1286 break;
c0fea474 1287 }
1c79356b 1288
c0fea474
A
1289 case x86_THREAD_STATE64:
1290 {
1291 if (*count < x86_THREAD_STATE64_COUNT)
1c79356b 1292 return(KERN_INVALID_ARGUMENT);
c0fea474
A
1293
1294 if ( !thread_is_64bit(thr_act))
1295 return(KERN_INVALID_ARGUMENT);
1296
1297 *count = x86_THREAD_STATE64_COUNT;
1298
1299 get_thread_state64(thr_act, (x86_thread_state64_t *)tstate);
1300 break;
1c79356b
A
1301 }
1302
c0fea474
A
1303 case x86_THREAD_STATE:
1304 {
1305 x86_thread_state_t *state;
1c79356b 1306
c0fea474 1307 if (*count < x86_THREAD_STATE_COUNT)
1c79356b
A
1308 return(KERN_INVALID_ARGUMENT);
1309
c0fea474 1310 state = (x86_thread_state_t *)tstate;
1c79356b 1311
c0fea474 1312 bzero((char *)state, sizeof(x86_thread_state_t));
1c79356b 1313
c0fea474
A
1314 if (thread_is_64bit(thr_act)) {
1315 state->tsh.flavor = x86_THREAD_STATE64;
1316 state->tsh.count = x86_THREAD_STATE64_COUNT;
1c79356b 1317
c0fea474 1318 get_thread_state64(thr_act, &state->uts.ts64);
1c79356b 1319 } else {
c0fea474
A
1320 state->tsh.flavor = x86_THREAD_STATE32;
1321 state->tsh.count = x86_THREAD_STATE32_COUNT;
1c79356b 1322
c0fea474 1323 get_thread_state32(thr_act, &state->uts.ts32);
1c79356b 1324 }
c0fea474
A
1325 *count = x86_THREAD_STATE_COUNT;
1326
1327 break;
1328 }
1329
1330
1331 case x86_EXCEPTION_STATE32:
1332 {
1333 if (*count < x86_EXCEPTION_STATE32_COUNT)
1334 return(KERN_INVALID_ARGUMENT);
1335
1336 if (thread_is_64bit(thr_act))
1337 return(KERN_INVALID_ARGUMENT);
1338
1339 *count = x86_EXCEPTION_STATE32_COUNT;
1c79356b 1340
c0fea474 1341 get_exception_state32(thr_act, (x86_exception_state32_t *)tstate);
1c79356b
A
1342 break;
1343 }
1344
c0fea474 1345 case x86_EXCEPTION_STATE64:
1c79356b 1346 {
c0fea474
A
1347 if (*count < x86_EXCEPTION_STATE64_COUNT)
1348 return(KERN_INVALID_ARGUMENT);
1c79356b 1349
c0fea474
A
1350 if ( !thread_is_64bit(thr_act))
1351 return(KERN_INVALID_ARGUMENT);
1c79356b 1352
c0fea474 1353 *count = x86_EXCEPTION_STATE64_COUNT;
1c79356b 1354
c0fea474 1355 get_exception_state64(thr_act, (x86_exception_state64_t *)tstate);
1c79356b
A
1356 break;
1357 }
1358
c0fea474
A
1359 case x86_EXCEPTION_STATE:
1360 {
1361 x86_exception_state_t *state;
1362
1363 if (*count < x86_EXCEPTION_STATE_COUNT)
1364 return(KERN_INVALID_ARGUMENT);
1365
1366 state = (x86_exception_state_t *)tstate;
1367
1368 bzero((char *)state, sizeof(x86_exception_state_t));
1369
1370 if (thread_is_64bit(thr_act)) {
1371 state->esh.flavor = x86_EXCEPTION_STATE64;
1372 state->esh.count = x86_EXCEPTION_STATE64_COUNT;
1373
1374 get_exception_state64(thr_act, &state->ues.es64);
1375 } else {
1376 state->esh.flavor = x86_EXCEPTION_STATE32;
1377 state->esh.count = x86_EXCEPTION_STATE32_COUNT;
1378
1379 get_exception_state32(thr_act, &state->ues.es32);
1380 }
1381 *count = x86_EXCEPTION_STATE_COUNT;
1382
1383 break;
1384 }
1385 case x86_DEBUG_STATE32:
1386 {
1387 if (*count < x86_DEBUG_STATE32_COUNT)
1388 return(KERN_INVALID_ARGUMENT);
1389
1390 if (thread_is_64bit(thr_act))
1391 return(KERN_INVALID_ARGUMENT);
1392
1393 get_debug_state32(thr_act, (x86_debug_state32_t *)tstate);
1394
1395 *count = x86_DEBUG_STATE32_COUNT;
1396
1397 break;
1398 }
1399 case x86_DEBUG_STATE64:
1400 {
1401 if (*count < x86_DEBUG_STATE64_COUNT)
1402 return(KERN_INVALID_ARGUMENT);
1403
1404 if (!thread_is_64bit(thr_act))
1405 return(KERN_INVALID_ARGUMENT);
1406
1407 get_debug_state64(thr_act, (x86_debug_state64_t *)tstate);
1408
1409 *count = x86_DEBUG_STATE64_COUNT;
1410
1c79356b
A
1411 break;
1412 }
c0fea474
A
1413 case x86_DEBUG_STATE:
1414 {
1415 x86_debug_state_t *state;
1416
1417 if (*count < x86_DEBUG_STATE_COUNT)
1418 return(KERN_INVALID_ARGUMENT);
1419
1420 state = (x86_debug_state_t *)tstate;
1421
1422 bzero(state, sizeof *state);
1423
1424 if (thread_is_64bit(thr_act)) {
1425 state->dsh.flavor = x86_DEBUG_STATE64;
1426 state->dsh.count = x86_DEBUG_STATE64_COUNT;
1427
1428 get_debug_state64(thr_act, &state->uds.ds64);
1429 } else {
1430 state->dsh.flavor = x86_DEBUG_STATE32;
1431 state->dsh.count = x86_DEBUG_STATE32_COUNT;
1c79356b 1432
c0fea474
A
1433 get_debug_state32(thr_act, &state->uds.ds32);
1434 }
1435 *count = x86_DEBUG_STATE_COUNT;
1436 break;
1437 }
1c79356b
A
1438 default:
1439 return(KERN_INVALID_ARGUMENT);
1440 }
1441
1442 return(KERN_SUCCESS);
1443}
1444
c0fea474
A
1445kern_return_t
1446machine_thread_get_kern_state(
1447 thread_t thread,
1448 thread_flavor_t flavor,
1449 thread_state_t tstate,
1450 mach_msg_type_number_t *count)
1451{
1452
1453 /*
1454 * This works only for an interrupted kernel thread
1455 */
1456 if (thread != current_thread() || current_cpu_datap()->cpu_int_state == NULL)
1457 return KERN_FAILURE;
1458
1459 switch(flavor) {
1460
1461 case x86_THREAD_STATE32:
1462 {
1463
1464 x86_thread_state32_t *state;
1465 x86_saved_state32_t *saved_state;
1466
1467 if (*count < x86_THREAD_STATE32_COUNT)
1468 return(KERN_INVALID_ARGUMENT);
1469
1470 state = (x86_thread_state32_t *)tstate;
1471
1472 assert(is_saved_state32(current_cpu_datap()->cpu_int_state));
1473 saved_state = saved_state32(current_cpu_datap()->cpu_int_state);
1474 /*
1475 * General registers.
1476 */
1477 state->eax = saved_state->eax;
1478 state->ebx = saved_state->ebx;
1479 state->ecx = saved_state->ecx;
1480 state->edx = saved_state->edx;
1481 state->edi = saved_state->edi;
1482 state->esi = saved_state->esi;
1483 state->ebp = saved_state->ebp;
1484 state->esp = saved_state->uesp;
1485 state->eflags = saved_state->efl;
1486 state->eip = saved_state->eip;
1487 state->cs = saved_state->cs;
1488 state->ss = saved_state->ss;
1489 state->ds = saved_state->ds & 0xffff;
1490 state->es = saved_state->es & 0xffff;
1491 state->fs = saved_state->fs & 0xffff;
1492 state->gs = saved_state->gs & 0xffff;
1493
1494 *count = x86_THREAD_STATE32_COUNT;
1495
1496 return KERN_SUCCESS;
1497 }
1498 break; // for completeness
1499
1500 case x86_THREAD_STATE:
1501 {
1502 // wrap a 32 bit thread state into a 32/64bit clean thread state
1503 x86_thread_state_t *state;
1504 x86_saved_state32_t *saved_state;
1505
1506 if(*count < x86_THREAD_STATE_COUNT)
1507 return (KERN_INVALID_ARGUMENT);
1508
1509 state = (x86_thread_state_t *)tstate;
1510 assert(is_saved_state32(current_cpu_datap()->cpu_int_state));
1511 saved_state = saved_state32(current_cpu_datap()->cpu_int_state);
1512
1513 state->tsh.flavor = x86_THREAD_STATE32;
1514 state->tsh.count = x86_THREAD_STATE32_COUNT;
1515
1516 /*
1517 * General registers.
1518 */
1519
1520 state->uts.ts32.eax = saved_state->eax;
1521 state->uts.ts32.ebx = saved_state->ebx;
1522 state->uts.ts32.ecx = saved_state->ecx;
1523 state->uts.ts32.edx = saved_state->edx;
1524 state->uts.ts32.edi = saved_state->edi;
1525 state->uts.ts32.esi = saved_state->esi;
1526 state->uts.ts32.ebp = saved_state->ebp;
1527 state->uts.ts32.esp = saved_state->uesp;
1528 state->uts.ts32.eflags = saved_state->efl;
1529 state->uts.ts32.eip = saved_state->eip;
1530 state->uts.ts32.cs = saved_state->cs;
1531 state->uts.ts32.ss = saved_state->ss;
1532 state->uts.ts32.ds = saved_state->ds & 0xffff;
1533 state->uts.ts32.es = saved_state->es & 0xffff;
1534 state->uts.ts32.fs = saved_state->fs & 0xffff;
1535 state->uts.ts32.gs = saved_state->gs & 0xffff;
1536
1537 *count = x86_THREAD_STATE_COUNT;
1538 return KERN_SUCCESS;
1539 }
1540 break;
1541 }
1542 return KERN_FAILURE;
1543}
1544
1545
1c79356b
A
1546/*
1547 * Initialize the machine-dependent state for a new thread.
1548 */
1549kern_return_t
55e303ae
A
1550machine_thread_create(
1551 thread_t thread,
c0fea474 1552 task_t task)
1c79356b 1553{
c0fea474
A
1554 pcb_t pcb = &thread->machine.xxx_pcb;
1555 struct real_descriptor *ldtp;
1556 pmap_paddr_t paddr;
1557 x86_saved_state_t *iss;
1c79356b 1558
c0fea474 1559 inval_copy_windows(thread);
1c79356b 1560
c0fea474
A
1561 thread->machine.physwindow_pte = 0;
1562 thread->machine.physwindow_busy = 0;
55e303ae 1563
c0fea474
A
1564 if (task_has_64BitAddr(task)) {
1565 x86_sframe64_t *sf64;
1566
1567 sf64 = (x86_sframe64_t *)zalloc(iss_zone64);
1568
1569 if (sf64 == NULL)
1570 panic("iss_zone64");
1571 pcb->sf = (void *)sf64;
1572
1573 bzero((char *)sf64, sizeof(x86_sframe64_t));
1574
1575 iss = (x86_saved_state_t *) &sf64->ssf;
1576 iss->flavor = x86_SAVED_STATE64;
1577 /*
1578 * Guarantee that the bootstrapped thread will be in user
1579 * mode.
1580 */
1581 iss->ss_64.isf.rflags = EFL_USER_SET;
1582 iss->ss_64.isf.cs = USER64_CS;
1583 iss->ss_64.isf.ss = USER_DS;
1584 iss->ss_64.fs = USER_DS;
1585 iss->ss_64.gs = USER_DS;
1586 } else {
1587 if (cpu_mode_is64bit()) {
1588 x86_sframe_compat32_t *sfc32;
1589
1590 sfc32 = (x86_sframe_compat32_t *)zalloc(iss_zone32);
1591 if (sfc32 == NULL)
1592 panic("iss_zone32");
1593 pcb->sf = (void *)sfc32;
1594
1595 bzero((char *)sfc32, sizeof(x86_sframe_compat32_t));
1596
1597 iss = (x86_saved_state_t *) &sfc32->ssf.iss32;
1598 iss->flavor = x86_SAVED_STATE32;
1599#if DEBUG
1600 {
1601 x86_saved_state_compat32_t *xssc;
1602
1603 xssc = (x86_saved_state_compat32_t *) iss;
1604 xssc->pad_for_16byte_alignment[0] = 0x64326432;
1605 xssc->pad_for_16byte_alignment[1] = 0x64326432;
1606 }
1607#endif
1608 } else {
1609 x86_sframe32_t *sf32;
1610
1611 sf32 = (x86_sframe32_t *)zalloc(iss_zone32);
1612
1613 if (sf32 == NULL)
1614 panic("iss_zone32");
1615 pcb->sf = (void *)sf32;
1616
1617 bzero((char *)sf32, sizeof(x86_sframe32_t));
1618
1619 iss = (x86_saved_state_t *) &sf32->ssf;
1620 iss->flavor = x86_SAVED_STATE32;
1621 }
1622 /*
1623 * Guarantee that the bootstrapped thread will be in user
1624 * mode.
1625 */
1626 iss->ss_32.cs = USER_CS;
1627 iss->ss_32.ss = USER_DS;
1628 iss->ss_32.ds = USER_DS;
1629 iss->ss_32.es = USER_DS;
1630 iss->ss_32.fs = USER_DS;
1631 iss->ss_32.gs = USER_DS;
1632 iss->ss_32.efl = EFL_USER_SET;
55e303ae 1633 }
c0fea474 1634 pcb->iss = iss;
1c79356b 1635
c0fea474
A
1636 thread->machine.pcb = pcb;
1637 simple_lock_init(&pcb->lock, 0);
1638
1639 ldtp = (struct real_descriptor *)pmap_index_to_virt(HIGH_FIXED_LDT_BEGIN);
1640 pcb->cthread_desc = ldtp[sel_idx(USER_DS)];
1641 pcb->uldt_desc = ldtp[sel_idx(USER_DS)];
1642 pcb->uldt_selector = 0;
1643
1644 pcb->iss_pte0 = (uint64_t)pte_kernel_rw(kvtophys((vm_offset_t)pcb->iss));
1645
1646 if (0 == (paddr = pa_to_pte(kvtophys((vm_offset_t)(pcb->iss) + PAGE_SIZE))))
1647 pcb->iss_pte1 = INTEL_PTE_INVALID;
1648 else
1649 pcb->iss_pte1 = (uint64_t)pte_kernel_rw(paddr);
0b4e3aa0 1650
1c79356b
A
1651 return(KERN_SUCCESS);
1652}
1653
1654/*
1655 * Machine-dependent cleanup prior to destroying a thread
1656 */
1657void
55e303ae
A
1658machine_thread_destroy(
1659 thread_t thread)
1c79356b 1660{
91447636 1661 register pcb_t pcb = thread->machine.pcb;
1c79356b 1662
55e303ae 1663 assert(pcb);
91447636 1664
c0fea474
A
1665 if (pcb->ifps != 0)
1666 fpu_free(pcb->ifps);
1667 if (pcb->sf != 0) {
1668 if (thread_is_64bit(thread))
1669 zfree(iss_zone64, pcb->sf);
1670 else
1671 zfree(iss_zone32, pcb->sf);
1672 pcb->sf = 0;
1673 }
1674 if (pcb->ids) {
1675 if (thread_is_64bit(thread))
1676 zfree(ids_zone64, pcb->ids);
1677 else
1678 zfree(ids_zone32, pcb->ids);
1679 }
91447636 1680 thread->machine.pcb = (pcb_t)0;
c0fea474
A
1681
1682}
1683
1684void
1685machine_thread_switch_addrmode(thread_t thread, int oldmode_is64bit)
1686{
1687 register pcb_t pcb = thread->machine.pcb;
1688
1689 assert(pcb);
1690
1691 if (pcb->sf != 0) {
1692 if (oldmode_is64bit)
1693 zfree(iss_zone64, pcb->sf);
1694 else
1695 zfree(iss_zone32, pcb->sf);
1696 }
1697 machine_thread_create(thread, thread->task);
1698
1699 /* If we're switching ourselves, reset the pcb addresses etc. */
1700 if (thread == current_thread())
1701 act_machine_switch_pcb(thread);
1c79356b
A
1702}
1703
c0fea474
A
1704
1705
1c79356b
A
1706/*
1707 * This is used to set the current thr_act/thread
1708 * when starting up a new processor
1709 */
1710void
91447636 1711machine_set_current_thread( thread_t thread )
1c79356b 1712{
c0fea474 1713 current_cpu_datap()->cpu_active_thread = thread;
1c79356b
A
1714}
1715
c0fea474
A
1716/*
1717 * This is called when a task is termianted.
1718 */
1c79356b 1719void
55e303ae 1720machine_thread_terminate_self(void)
1c79356b 1721{
c0fea474
A
1722 task_t self_task = current_task();
1723 if (self_task) {
1724 user_ldt_t user_ldt = self_task->i386_ldt;
1725 if (user_ldt != 0) {
1726 self_task->i386_ldt = 0;
1727 user_ldt_free(user_ldt);
1728 }
1729 }
1c79356b
A
1730}
1731
1732void
1733act_machine_return(int code)
1734{
1c79356b
A
1735 /*
1736 * This code is called with nothing locked.
1737 * It also returns with nothing locked, if it returns.
1738 *
1739 * This routine terminates the current thread activation.
1740 * If this is the only activation associated with its
1741 * thread shuttle, then the entire thread (shuttle plus
1742 * activation) is terminated.
1743 */
1744 assert( code == KERN_TERMINATED );
1c79356b 1745
1c79356b
A
1746 thread_terminate_self();
1747
1748 /*NOTREACHED*/
1749
91447636 1750 panic("act_machine_return(%d): TALKING ZOMBIE! (1)", code);
1c79356b
A
1751}
1752
1753
1754/*
1755 * Perform machine-dependent per-thread initializations
1756 */
1757void
55e303ae 1758machine_thread_init(void)
1c79356b 1759{
c0fea474
A
1760 if (cpu_mode_is64bit()) {
1761 iss_zone64 = zinit(sizeof(x86_sframe64_t),
1762 THREAD_MAX * sizeof(x86_sframe64_t),
1763 THREAD_CHUNK * sizeof(x86_sframe64_t),
1764 "x86_64 saved state");
1765
1766 assert(sizeof(x86_sframe_compat32_t) % 16 == 0);
1767 iss_zone32 = zinit(sizeof(x86_sframe_compat32_t),
1768 THREAD_MAX * sizeof(x86_sframe_compat32_t),
1769 THREAD_CHUNK * sizeof(x86_sframe_compat32_t),
1770 "x86_32 saved state");
1771
1772 ids_zone32 = zinit(sizeof(x86_debug_state32_t),
1773 THREAD_MAX * (sizeof(x86_debug_state32_t)),
1774 THREAD_CHUNK * (sizeof(x86_debug_state32_t)),
1775 "x86_32 debug state");
1776 ids_zone64 = zinit(sizeof(x86_debug_state64_t),
1777 THREAD_MAX * sizeof(x86_debug_state64_t),
1778 THREAD_CHUNK * sizeof(x86_debug_state64_t),
1779 "x86_64 debug state");
1780
1781 } else {
1782 iss_zone32 = zinit(sizeof(x86_sframe32_t),
1783 THREAD_MAX * sizeof(x86_sframe32_t),
1784 THREAD_CHUNK * sizeof(x86_sframe32_t),
1785 "x86 saved state");
1786 ids_zone32 = zinit(sizeof(x86_debug_state32_t),
1787 THREAD_MAX * (sizeof(x86_debug_state32_t)),
1788 THREAD_CHUNK * (sizeof(x86_debug_state32_t)),
1789 "x86 debug state");
1790 }
55e303ae
A
1791 fpu_module_init();
1792 iopb_init();
1c79356b
A
1793}
1794
1795/*
1796 * Some routines for debugging activation code
1797 */
91447636
A
1798static void dump_handlers(thread_t);
1799void dump_regs(thread_t);
1800int dump_act(thread_t thr_act);
1c79356b
A
1801
1802static void
91447636 1803dump_handlers(thread_t thr_act)
1c79356b
A
1804{
1805 ReturnHandler *rhp = thr_act->handlers;
1806 int counter = 0;
1807
1808 printf("\t");
1809 while (rhp) {
1810 if (rhp == &thr_act->special_handler){
1811 if (rhp->next)
1812 printf("[NON-Zero next ptr(%x)]", rhp->next);
1813 printf("special_handler()->");
1814 break;
1815 }
1816 printf("hdlr_%d(%x)->",counter,rhp->handler);
1817 rhp = rhp->next;
1818 if (++counter > 32) {
1819 printf("Aborting: HUGE handler chain\n");
1820 break;
1821 }
1822 }
1823 printf("HLDR_NULL\n");
1824}
1825
1826void
91447636 1827dump_regs(thread_t thr_act)
1c79356b 1828{
c0fea474
A
1829 if (thr_act->machine.pcb == NULL)
1830 return;
1831
1832 if (thread_is_64bit(thr_act)) {
1833 x86_saved_state64_t *ssp;
1834
1835 ssp = USER_REGS64(thr_act);
1836
1837 panic("dump_regs: 64bit tasks not yet supported");
1838
1839 } else {
1840 x86_saved_state32_t *ssp;
1841
1842 ssp = USER_REGS32(thr_act);
1843
1844 /*
1845 * Print out user register state
1846 */
1c79356b
A
1847 printf("\tRegs:\tedi=%x esi=%x ebp=%x ebx=%x edx=%x\n",
1848 ssp->edi, ssp->esi, ssp->ebp, ssp->ebx, ssp->edx);
c0fea474 1849
1c79356b
A
1850 printf("\t\tecx=%x eax=%x eip=%x efl=%x uesp=%x\n",
1851 ssp->ecx, ssp->eax, ssp->eip, ssp->efl, ssp->uesp);
c0fea474 1852
1c79356b
A
1853 printf("\t\tcs=%x ss=%x\n", ssp->cs, ssp->ss);
1854 }
1855}
1856
1857int
91447636 1858dump_act(thread_t thr_act)
1c79356b
A
1859{
1860 if (!thr_act)
1861 return(0);
1862
91447636 1863 printf("thread(0x%x)(%d): task=%x(%d)\n",
1c79356b 1864 thr_act, thr_act->ref_count,
1c79356b
A
1865 thr_act->task, thr_act->task ? thr_act->task->ref_count : 0);
1866
55e303ae 1867 printf("\tsusp=%d user_stop=%d active=%x ast=%x\n",
1c79356b
A
1868 thr_act->suspend_count, thr_act->user_stop_count,
1869 thr_act->active, thr_act->ast);
91447636 1870 printf("\tpcb=%x\n", thr_act->machine.pcb);
1c79356b 1871
91447636
A
1872 if (thr_act->kernel_stack) {
1873 vm_offset_t stack = thr_act->kernel_stack;
1c79356b
A
1874
1875 printf("\tk_stk %x eip %x ebx %x esp %x iss %x\n",
1876 stack, STACK_IKS(stack)->k_eip, STACK_IKS(stack)->k_ebx,
1877 STACK_IKS(stack)->k_esp, STACK_IEL(stack)->saved_state);
1878 }
1879
1880 dump_handlers(thr_act);
1881 dump_regs(thr_act);
1882 return((int)thr_act);
1883}
91447636
A
1884
1885user_addr_t
1886get_useraddr(void)
1c79356b 1887{
91447636 1888 thread_t thr_act = current_thread();
1c79356b 1889
c0fea474
A
1890 if (thr_act->machine.pcb == NULL)
1891 return (0);
1892
1893 if (thread_is_64bit(thr_act)) {
1894 x86_saved_state64_t *iss64;
1895
1896 iss64 = USER_REGS64(thr_act);
1897
1898 return(iss64->isf.rip);
1899 } else {
1900 x86_saved_state32_t *iss32;
1c79356b 1901
c0fea474
A
1902 iss32 = USER_REGS32(thr_act);
1903
1904 return(iss32->eip);
1905 }
1c79356b
A
1906}
1907
1c79356b
A
1908/*
1909 * detach and return a kernel stack from a thread
1910 */
1911
1912vm_offset_t
55e303ae 1913machine_stack_detach(thread_t thread)
1c79356b 1914{
c0fea474 1915 vm_offset_t stack;
1c79356b 1916
c0fea474
A
1917 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH),
1918 thread, thread->priority,
1919 thread->sched_pri, 0,
1920 0);
1c79356b 1921
c0fea474
A
1922 stack = thread->kernel_stack;
1923 thread->kernel_stack = 0;
1924
1925 return (stack);
1c79356b
A
1926}
1927
1928/*
1929 * attach a kernel stack to a thread and initialize it
1930 */
1931
1932void
91447636
A
1933machine_stack_attach(
1934 thread_t thread,
1935 vm_offset_t stack)
1c79356b 1936{
c0fea474 1937 struct x86_kernel_state32 *statep;
1c79356b 1938
c0fea474
A
1939 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH),
1940 thread, thread->priority,
1941 thread->sched_pri, 0, 0);
1c79356b 1942
c0fea474
A
1943 assert(stack);
1944 thread->kernel_stack = stack;
55e303ae 1945
c0fea474
A
1946 statep = STACK_IKS(stack);
1947 statep->k_eip = (unsigned long) Thread_continue;
1948 statep->k_ebx = (unsigned long) thread_continue;
1949 statep->k_esp = (unsigned long) STACK_IEL(stack);
1c79356b 1950
c0fea474 1951 return;
1c79356b
A
1952}
1953
1954/*
1955 * move a stack from old to new thread
1956 */
1957
1958void
55e303ae 1959machine_stack_handoff(thread_t old,
1c79356b
A
1960 thread_t new)
1961{
c0fea474 1962 vm_offset_t stack;
1c79356b 1963
c0fea474
A
1964 assert(new);
1965 assert(old);
1c79356b 1966
c0fea474
A
1967 stack = old->kernel_stack;
1968 if (stack == old->reserved_stack) {
1969 assert(new->reserved_stack);
1970 old->reserved_stack = new->reserved_stack;
1971 new->reserved_stack = stack;
1972 }
1973 old->kernel_stack = 0;
1974 /*
1975 * A full call to machine_stack_attach() is unnecessry
1976 * because old stack is already initialized.
1977 */
1978 new->kernel_stack = stack;
1c79356b 1979
c0fea474 1980 fpu_save_context(old);
1c79356b 1981
c0fea474
A
1982 old->machine.specFlags &= ~OnProc;
1983 new->machine.specFlags |= OnProc;
1c79356b 1984
c0fea474
A
1985 PMAP_SWITCH_CONTEXT(old, new, cpu_number());
1986 act_machine_switch_pcb(new);
9bccf70c 1987
c0fea474
A
1988 KERNEL_DEBUG_CONSTANT(
1989 MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_HANDOFF)|DBG_FUNC_NONE,
1990 old->reason, (int)new, old->sched_pri, new->sched_pri, 0);
1c79356b 1991
c0fea474 1992 machine_set_current_thread(new);
1c79356b 1993
c0fea474 1994 return;
1c79356b 1995}
0b4e3aa0 1996
c0fea474
A
1997
1998
1999
2000struct x86_act_context32 {
2001 x86_saved_state32_t ss;
2002 x86_float_state32_t fs;
2003 x86_debug_state32_t ds;
2004};
2005
2006struct x86_act_context64 {
2007 x86_saved_state64_t ss;
2008 x86_float_state64_t fs;
2009 x86_debug_state64_t ds;
0b4e3aa0
A
2010};
2011
c0fea474
A
2012
2013
0b4e3aa0
A
2014void *
2015act_thread_csave(void)
2016{
c0fea474
A
2017 kern_return_t kret;
2018 mach_msg_type_number_t val;
2019 thread_t thr_act = current_thread();
2020
2021 if (thread_is_64bit(thr_act)) {
2022 struct x86_act_context64 *ic64;
0b4e3aa0 2023
c0fea474
A
2024 ic64 = (struct x86_act_context64 *)kalloc(sizeof(struct x86_act_context64));
2025
2026 if (ic64 == (struct x86_act_context64 *)NULL)
2027 return((void *)0);
2028
2029 val = x86_SAVED_STATE64_COUNT;
2030 kret = machine_thread_get_state(thr_act, x86_SAVED_STATE64,
2031 (thread_state_t) &ic64->ss, &val);
2032 if (kret != KERN_SUCCESS) {
2033 kfree(ic64, sizeof(struct x86_act_context64));
2034 return((void *)0);
2035 }
2036 val = x86_FLOAT_STATE64_COUNT;
2037 kret = machine_thread_get_state(thr_act, x86_FLOAT_STATE64,
2038 (thread_state_t) &ic64->fs, &val);
0b4e3aa0 2039
c0fea474
A
2040 if (kret != KERN_SUCCESS) {
2041 kfree(ic64, sizeof(struct x86_act_context64));
2042 return((void *)0);
2043 }
0b4e3aa0 2044
c0fea474
A
2045 val = x86_DEBUG_STATE64_COUNT;
2046 kret = machine_thread_get_state(thr_act,
2047 x86_DEBUG_STATE64,
2048 (thread_state_t)&ic64->ds,
55e303ae 2049 &val);
0b4e3aa0 2050 if (kret != KERN_SUCCESS) {
c0fea474
A
2051 kfree(ic64, sizeof(struct x86_act_context64));
2052 return((void *)0);
2053 }
2054 return(ic64);
2055
2056 } else {
2057 struct x86_act_context32 *ic32;
2058
2059 ic32 = (struct x86_act_context32 *)kalloc(sizeof(struct x86_act_context32));
2060
2061 if (ic32 == (struct x86_act_context32 *)NULL)
2062 return((void *)0);
2063
2064 val = x86_SAVED_STATE32_COUNT;
2065 kret = machine_thread_get_state(thr_act, x86_SAVED_STATE32,
2066 (thread_state_t) &ic32->ss, &val);
2067 if (kret != KERN_SUCCESS) {
2068 kfree(ic32, sizeof(struct x86_act_context32));
2069 return((void *)0);
0b4e3aa0 2070 }
c0fea474
A
2071 val = x86_FLOAT_STATE32_COUNT;
2072 kret = machine_thread_get_state(thr_act, x86_FLOAT_STATE32,
2073 (thread_state_t) &ic32->fs, &val);
2074 if (kret != KERN_SUCCESS) {
2075 kfree(ic32, sizeof(struct x86_act_context32));
2076 return((void *)0);
2077 }
2078
2079 val = x86_DEBUG_STATE32_COUNT;
2080 kret = machine_thread_get_state(thr_act,
2081 x86_DEBUG_STATE32,
2082 (thread_state_t)&ic32->ds,
55e303ae 2083 &val);
0b4e3aa0 2084 if (kret != KERN_SUCCESS) {
c0fea474
A
2085 kfree(ic32, sizeof(struct x86_act_context32));
2086 return((void *)0);
0b4e3aa0 2087 }
c0fea474
A
2088 return(ic32);
2089 }
0b4e3aa0 2090}
c0fea474
A
2091
2092
0b4e3aa0
A
2093void
2094act_thread_catt(void *ctx)
2095{
c0fea474
A
2096 thread_t thr_act = current_thread();
2097 kern_return_t kret;
2098
2099 if (ctx == (void *)NULL)
2100 return;
2101
2102 if (thread_is_64bit(thr_act)) {
2103 struct x86_act_context64 *ic64;
2104
2105 ic64 = (struct x86_act_context64 *)ctx;
2106
2107 kret = machine_thread_set_state(thr_act, x86_SAVED_STATE64,
2108 (thread_state_t) &ic64->ss, x86_SAVED_STATE64_COUNT);
2109 if (kret == KERN_SUCCESS) {
2110 machine_thread_set_state(thr_act, x86_FLOAT_STATE64,
2111 (thread_state_t) &ic64->fs, x86_FLOAT_STATE64_COUNT);
2112 }
2113 kfree(ic64, sizeof(struct x86_act_context64));
2114 } else {
2115 struct x86_act_context32 *ic32;
2116
2117 ic32 = (struct x86_act_context32 *)ctx;
2118
2119 kret = machine_thread_set_state(thr_act, x86_SAVED_STATE32,
2120 (thread_state_t) &ic32->ss, x86_SAVED_STATE32_COUNT);
2121 if (kret == KERN_SUCCESS) {
2122 kret = machine_thread_set_state(thr_act, x86_FLOAT_STATE32,
2123 (thread_state_t) &ic32->fs, x86_FLOAT_STATE32_COUNT);
2124 if (kret == KERN_SUCCESS && thr_act->machine.pcb->ids)
2125 machine_thread_set_state(thr_act,
2126 x86_DEBUG_STATE32,
2127 (thread_state_t)&ic32->ds,
2128 x86_DEBUG_STATE32_COUNT);
2129 }
2130 kfree(ic32, sizeof(struct x86_act_context32));
2131 }
0b4e3aa0
A
2132}
2133
c0fea474
A
2134
2135void act_thread_cfree(__unused void *ctx)
0b4e3aa0 2136{
c0fea474 2137 /* XXX - Unused */
0b4e3aa0 2138}