]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/bsd_i386.c
xnu-2422.115.4.tar.gz
[apple/xnu.git] / osfmk / i386 / bsd_i386.c
CommitLineData
1c79356b 1/*
6d2010ae 2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28#ifdef MACH_BSD
1c79356b
A
29#include <mach_rt.h>
30#include <mach_debug.h>
31#include <mach_ldebug.h>
32
33#include <mach/kern_return.h>
91447636 34#include <mach/mach_traps.h>
1c79356b
A
35#include <mach/thread_status.h>
36#include <mach/vm_param.h>
1c79356b
A
37
38#include <kern/counters.h>
39#include <kern/cpu_data.h>
40#include <kern/mach_param.h>
41#include <kern/task.h>
42#include <kern/thread.h>
1c79356b
A
43#include <kern/sched_prim.h>
44#include <kern/misc_protos.h>
45#include <kern/assert.h>
b0d623f7 46#include <kern/debug.h>
1c79356b 47#include <kern/spl.h>
55e303ae 48#include <kern/syscall_sw.h>
1c79356b
A
49#include <ipc/ipc_port.h>
50#include <vm/vm_kern.h>
51#include <vm/pmap.h>
52
91447636 53#include <i386/cpu_number.h>
1c79356b
A
54#include <i386/eflags.h>
55#include <i386/proc_reg.h>
1c79356b
A
56#include <i386/tss.h>
57#include <i386/user_ldt.h>
58#include <i386/fpu.h>
1c79356b 59#include <i386/machdep_call.h>
91447636 60#include <i386/vmparam.h>
b0d623f7
A
61#include <i386/mp_desc.h>
62#include <i386/misc_protos.h>
63#include <i386/thread.h>
0c530ab8 64#include <i386/trap.h>
b0d623f7 65#include <i386/seg.h>
0c530ab8 66#include <mach/i386/syscall_sw.h>
9bccf70c 67#include <sys/syscall.h>
91447636 68#include <sys/kdebug.h>
0c530ab8 69#include <sys/errno.h>
91447636
A
70#include <../bsd/sys/sysent.h>
71
b0d623f7
A
72#ifdef MACH_BSD
73extern void mach_kauth_cred_uthread_update(void);
39236c6e 74extern void throttle_lowpri_io(int);
b0d623f7
A
75#endif
76
0c530ab8
A
77void * find_user_regs(thread_t);
78
1c79356b
A
79unsigned int get_msr_exportmask(void);
80
81unsigned int get_msr_nbits(void);
82
83unsigned int get_msr_rbits(void);
84
85/*
86 * thread_userstack:
87 *
88 * Return the user stack pointer from the machine
89 * dependent thread state info.
90 */
91kern_return_t
92thread_userstack(
2d21ac55
A
93 __unused thread_t thread,
94 int flavor,
95 thread_state_t tstate,
96 __unused unsigned int count,
6d2010ae 97 mach_vm_offset_t *user_stack,
2d21ac55
A
98 int *customstack
99)
1c79356b 100{
2d21ac55
A
101 if (customstack)
102 *customstack = 0;
4452a7af 103
2d21ac55
A
104 switch (flavor) {
105 case x86_THREAD_STATE32:
106 {
107 x86_thread_state32_t *state25;
108
109 state25 = (x86_thread_state32_t *) tstate;
110
6d2010ae 111 if (state25->esp) {
2d21ac55 112 *user_stack = state25->esp;
6d2010ae
A
113 if (customstack)
114 *customstack = 1;
115 } else {
2d21ac55 116 *user_stack = VM_USRSTACK32;
6d2010ae
A
117 if (customstack)
118 *customstack = 0;
119 }
2d21ac55
A
120 break;
121 }
0c530ab8 122
2d21ac55
A
123 case x86_THREAD_STATE64:
124 {
125 x86_thread_state64_t *state25;
126
127 state25 = (x86_thread_state64_t *) tstate;
128
6d2010ae 129 if (state25->rsp) {
2d21ac55 130 *user_stack = state25->rsp;
6d2010ae
A
131 if (customstack)
132 *customstack = 1;
133 } else {
2d21ac55 134 *user_stack = VM_USRSTACK64;
6d2010ae
A
135 if (customstack)
136 *customstack = 0;
137 }
2d21ac55
A
138 break;
139 }
1c79356b 140
2d21ac55
A
141 default:
142 return (KERN_INVALID_ARGUMENT);
143 }
0c530ab8 144
2d21ac55
A
145 return (KERN_SUCCESS);
146}
1c79356b 147
316670eb
A
148/*
149 * thread_userstackdefault:
150 *
151 * Return the default stack location for the
152 * thread, if otherwise unknown.
153 */
154kern_return_t
155thread_userstackdefault(
156 thread_t thread,
157 mach_vm_offset_t *default_user_stack)
158{
159 if (thread_is_64bit(thread)) {
160 *default_user_stack = VM_USRSTACK64;
161 } else {
162 *default_user_stack = VM_USRSTACK32;
163 }
164 return (KERN_SUCCESS);
165}
0c530ab8 166
1c79356b
A
167kern_return_t
168thread_entrypoint(
2d21ac55
A
169 __unused thread_t thread,
170 int flavor,
171 thread_state_t tstate,
172 __unused unsigned int count,
173 mach_vm_offset_t *entry_point
174)
1c79356b 175{
2d21ac55 176 /*
0c530ab8
A
177 * Set a default.
178 */
2d21ac55
A
179 if (*entry_point == 0)
180 *entry_point = VM_MIN_ADDRESS;
181
0c530ab8 182 switch (flavor) {
2d21ac55
A
183 case x86_THREAD_STATE32:
184 {
185 x86_thread_state32_t *state25;
0c530ab8 186
2d21ac55
A
187 state25 = (i386_thread_state_t *) tstate;
188 *entry_point = state25->eip ? state25->eip: VM_MIN_ADDRESS;
189 break;
190 }
6601e61a 191
2d21ac55
A
192 case x86_THREAD_STATE64:
193 {
194 x86_thread_state64_t *state25;
0c530ab8 195
2d21ac55
A
196 state25 = (x86_thread_state64_t *) tstate;
197 *entry_point = state25->rip ? state25->rip: VM_MIN_ADDRESS64;
198 break;
199 }
200 }
201 return (KERN_SUCCESS);
202}
1c79356b 203
1c79356b
A
204/*
205 * FIXME - thread_set_child
206 */
207
91447636 208void thread_set_child(thread_t child, int pid);
1c79356b 209void
91447636 210thread_set_child(thread_t child, int pid)
1c79356b 211{
6d2010ae 212 pal_register_cache_state(child, DIRTY);
b0d623f7 213
2d21ac55
A
214 if (thread_is_64bit(child)) {
215 x86_saved_state64_t *iss64;
0c530ab8 216
0c530ab8
A
217 iss64 = USER_REGS64(child);
218
219 iss64->rax = pid;
220 iss64->rdx = 1;
221 iss64->isf.rflags &= ~EFL_CF;
222 } else {
2d21ac55
A
223 x86_saved_state32_t *iss32;
224
0c530ab8
A
225 iss32 = USER_REGS32(child);
226
227 iss32->eax = pid;
228 iss32->edx = 1;
229 iss32->efl &= ~EFL_CF;
230 }
1c79356b 231}
0c530ab8
A
232
233
1c79356b 234
1c79356b
A
235/*
236 * System Call handling code
237 */
238
91447636
A
239extern long fuword(vm_offset_t);
240
1c79356b 241
1c79356b
A
242
243void
0c530ab8 244machdep_syscall(x86_saved_state_t *state)
8f6c56a5 245{
0c530ab8 246 int args[machdep_call_count];
2d21ac55 247 int trapno;
0c530ab8 248 int nargs;
316670eb 249 const machdep_call_t *entry;
0c530ab8
A
250 x86_saved_state32_t *regs;
251
252 assert(is_saved_state32(state));
253 regs = saved_state32(state);
1c79356b 254
0c530ab8
A
255 trapno = regs->eax;
256#if DEBUG_TRACE
257 kprintf("machdep_syscall(0x%08x) code=%d\n", regs, trapno);
258#endif
1c79356b 259
b0d623f7
A
260 DEBUG_KPRINT_SYSCALL_MDEP(
261 "machdep_syscall: trapno=%d\n", trapno);
262
0c530ab8 263 if (trapno < 0 || trapno >= machdep_call_count) {
2d21ac55 264 regs->eax = (unsigned int)kern_invalid(NULL);
1c79356b 265
91447636
A
266 thread_exception_return();
267 /* NOTREACHED */
268 }
0c530ab8
A
269 entry = &machdep_call_table[trapno];
270 nargs = entry->nargs;
91447636 271
0c530ab8 272 if (nargs != 0) {
2d21ac55
A
273 if (copyin((user_addr_t) regs->uesp + sizeof (int),
274 (char *) args, (nargs * sizeof (int)))) {
275 regs->eax = KERN_INVALID_ADDRESS;
6601e61a 276
0c530ab8
A
277 thread_exception_return();
278 /* NOTREACHED */
279 }
1c79356b 280 }
0c530ab8 281 switch (nargs) {
2d21ac55
A
282 case 0:
283 regs->eax = (*entry->routine.args_0)();
0c530ab8 284 break;
2d21ac55 285 case 1:
0c530ab8
A
286 regs->eax = (*entry->routine.args_1)(args[0]);
287 break;
2d21ac55
A
288 case 2:
289 regs->eax = (*entry->routine.args_2)(args[0],args[1]);
0c530ab8 290 break;
2d21ac55
A
291 case 3:
292 if (!entry->bsd_style)
293 regs->eax = (*entry->routine.args_3)(args[0],args[1],args[2]);
0c530ab8 294 else {
2d21ac55
A
295 int error;
296 uint32_t rval;
0c530ab8 297
2d21ac55 298 error = (*entry->routine.args_bsd_3)(&rval, args[0], args[1], args[2]);
0c530ab8 299 if (error) {
2d21ac55 300 regs->eax = error;
0c530ab8
A
301 regs->efl |= EFL_CF; /* carry bit */
302 } else {
2d21ac55
A
303 regs->eax = rval;
304 regs->efl &= ~EFL_CF;
0c530ab8
A
305 }
306 }
307 break;
2d21ac55 308 case 4:
0c530ab8
A
309 regs->eax = (*entry->routine.args_4)(args[0], args[1], args[2], args[3]);
310 break;
1c79356b 311
2d21ac55
A
312 default:
313 panic("machdep_syscall: too many args");
6601e61a 314 }
0c530ab8 315 if (current_thread()->funnel_lock)
2d21ac55 316 (void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
6601e61a 317
b0d623f7
A
318 DEBUG_KPRINT_SYSCALL_MDEP("machdep_syscall: retval=%u\n", regs->eax);
319
39236c6e 320 throttle_lowpri_io(1);
593a1d5f 321
0c530ab8
A
322 thread_exception_return();
323 /* NOTREACHED */
1c79356b
A
324}
325
326
327void
0c530ab8 328machdep_syscall64(x86_saved_state_t *state)
1c79356b 329{
2d21ac55 330 int trapno;
316670eb 331 const machdep_call_t *entry;
0c530ab8 332 x86_saved_state64_t *regs;
1c79356b 333
0c530ab8
A
334 assert(is_saved_state64(state));
335 regs = saved_state64(state);
1c79356b 336
b0d623f7
A
337 trapno = (int)(regs->rax & SYSCALL_NUMBER_MASK);
338
339 DEBUG_KPRINT_SYSCALL_MDEP(
340 "machdep_syscall64: trapno=%d\n", trapno);
1c79356b 341
0c530ab8 342 if (trapno < 0 || trapno >= machdep_call_count) {
2d21ac55 343 regs->rax = (unsigned int)kern_invalid(NULL);
1c79356b 344
0c530ab8
A
345 thread_exception_return();
346 /* NOTREACHED */
1c79356b 347 }
0c530ab8 348 entry = &machdep_call_table64[trapno];
1c79356b 349
0c530ab8 350 switch (entry->nargs) {
2d21ac55
A
351 case 0:
352 regs->rax = (*entry->routine.args_0)();
6601e61a 353 break;
2d21ac55 354 case 1:
0c530ab8 355 regs->rax = (*entry->routine.args64_1)(regs->rdi);
55e303ae 356 break;
2d21ac55
A
357 default:
358 panic("machdep_syscall64: too many args");
55e303ae 359 }
0c530ab8 360 if (current_thread()->funnel_lock)
2d21ac55 361 (void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
6601e61a 362
b0d623f7
A
363 DEBUG_KPRINT_SYSCALL_MDEP("machdep_syscall: retval=%llu\n", regs->rax);
364
39236c6e 365 throttle_lowpri_io(1);
593a1d5f 366
0c530ab8
A
367 thread_exception_return();
368 /* NOTREACHED */
1c79356b
A
369}
370
0c530ab8 371#endif /* MACH_BSD */
4452a7af 372
21362eb3 373
0c530ab8 374typedef kern_return_t (*mach_call_t)(void *);
4452a7af 375
0c530ab8
A
376struct mach_call_args {
377 syscall_arg_t arg1;
378 syscall_arg_t arg2;
379 syscall_arg_t arg3;
380 syscall_arg_t arg4;
381 syscall_arg_t arg5;
382 syscall_arg_t arg6;
383 syscall_arg_t arg7;
384 syscall_arg_t arg8;
385 syscall_arg_t arg9;
386};
4452a7af 387
0c530ab8 388static kern_return_t
39236c6e 389mach_call_arg_munger32(uint32_t sp, struct mach_call_args *args, const mach_trap_t *trapp);
4452a7af 390
6601e61a 391
0c530ab8 392static kern_return_t
39236c6e 393mach_call_arg_munger32(uint32_t sp, struct mach_call_args *args, const mach_trap_t *trapp)
0c530ab8 394{
39236c6e 395 if (copyin((user_addr_t)(sp + sizeof(int)), (char *)args, trapp->mach_trap_u32_words * sizeof (int)))
2d21ac55 396 return KERN_INVALID_ARGUMENT;
39236c6e 397 trapp->mach_trap_arg_munge32(NULL, args);
0c530ab8 398 return KERN_SUCCESS;
6601e61a 399}
4452a7af
A
400
401
2d21ac55 402__private_extern__ void mach_call_munger(x86_saved_state_t *state);
0c530ab8 403
b0d623f7
A
404extern const char *mach_syscall_name_table[];
405
0c530ab8
A
406void
407mach_call_munger(x86_saved_state_t *state)
4452a7af 408{
4452a7af 409 int argc;
0c530ab8 410 int call_number;
4452a7af 411 mach_call_t mach_call;
6601e61a
A
412 kern_return_t retval;
413 struct mach_call_args args = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
0c530ab8
A
414 x86_saved_state32_t *regs;
415
416 assert(is_saved_state32(state));
417 regs = saved_state32(state);
418
419 call_number = -(regs->eax);
b0d623f7
A
420
421 DEBUG_KPRINT_SYSCALL_MACH(
422 "mach_call_munger: code=%d(%s)\n",
423 call_number, mach_syscall_name_table[call_number]);
0c530ab8
A
424#if DEBUG_TRACE
425 kprintf("mach_call_munger(0x%08x) code=%d\n", regs, call_number);
426#endif
427
428 if (call_number < 0 || call_number >= mach_trap_count) {
2d21ac55 429 i386_exception(EXC_SYSCALL, call_number, 1);
0c530ab8
A
430 /* NOTREACHED */
431 }
432 mach_call = (mach_call_t)mach_trap_table[call_number].mach_trap_function;
2d21ac55 433
0c530ab8 434 if (mach_call == (mach_call_t)kern_invalid) {
b0d623f7
A
435 DEBUG_KPRINT_SYSCALL_MACH(
436 "mach_call_munger: kern_invalid 0x%x\n", regs->eax);
2d21ac55 437 i386_exception(EXC_SYSCALL, call_number, 1);
0c530ab8
A
438 /* NOTREACHED */
439 }
0c530ab8 440
2d21ac55 441 argc = mach_trap_table[call_number].mach_trap_arg_count;
0c530ab8 442 if (argc) {
39236c6e 443 retval = mach_call_arg_munger32(regs->uesp, &args, &mach_trap_table[call_number]);
0c530ab8 444 if (retval != KERN_SUCCESS) {
2d21ac55
A
445 regs->eax = retval;
446
b0d623f7
A
447 DEBUG_KPRINT_SYSCALL_MACH(
448 "mach_call_munger: retval=0x%x\n", retval);
449
0c530ab8
A
450 thread_exception_return();
451 /* NOTREACHED */
452 }
4452a7af 453 }
b0d623f7
A
454
455#ifdef MACH_BSD
456 mach_kauth_cred_uthread_update();
457#endif
316670eb
A
458
459 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
460 MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START,
461 args.arg1, args.arg2, args.arg3, args.arg4, 0);
2d21ac55 462
0c530ab8
A
463 retval = mach_call(&args);
464
b0d623f7
A
465 DEBUG_KPRINT_SYSCALL_MACH("mach_call_munger: retval=0x%x\n", retval);
466
316670eb
A
467 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
468 MACHDBG_CODE(DBG_MACH_EXCP_SC,(call_number)) | DBG_FUNC_END,
469 retval, 0, 0, 0, 0);
470
0c530ab8 471 regs->eax = retval;
2d21ac55 472
39236c6e 473 throttle_lowpri_io(1);
593a1d5f 474
0c530ab8
A
475 thread_exception_return();
476 /* NOTREACHED */
477}
478
479
2d21ac55 480__private_extern__ void mach_call_munger64(x86_saved_state_t *regs);
0c530ab8 481
0c530ab8
A
482void
483mach_call_munger64(x86_saved_state_t *state)
484{
485 int call_number;
486 int argc;
487 mach_call_t mach_call;
488 x86_saved_state64_t *regs;
489
490 assert(is_saved_state64(state));
491 regs = saved_state64(state);
492
b0d623f7
A
493 call_number = (int)(regs->rax & SYSCALL_NUMBER_MASK);
494
495 DEBUG_KPRINT_SYSCALL_MACH(
496 "mach_call_munger64: code=%d(%s)\n",
497 call_number, mach_syscall_name_table[call_number]);
0c530ab8 498
316670eb
A
499 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
500 MACHDBG_CODE(DBG_MACH_EXCP_SC,(call_number)) | DBG_FUNC_START,
501 regs->rdi, regs->rsi, regs->rdx, regs->r10, 0);
6601e61a 502
0c530ab8
A
503 if (call_number < 0 || call_number >= mach_trap_count) {
504 i386_exception(EXC_SYSCALL, regs->rax, 1);
505 /* NOTREACHED */
506 }
6601e61a 507 mach_call = (mach_call_t)mach_trap_table[call_number].mach_trap_function;
6601e61a 508
0c530ab8
A
509 if (mach_call == (mach_call_t)kern_invalid) {
510 i386_exception(EXC_SYSCALL, regs->rax, 1);
511 /* NOTREACHED */
512 }
513 argc = mach_trap_table[call_number].mach_trap_arg_count;
514
515 if (argc > 6) {
516 int copyin_count;
517
b0d623f7 518 copyin_count = (argc - 6) * (int)sizeof(uint64_t);
0c530ab8
A
519
520 if (copyin((user_addr_t)(regs->isf.rsp + sizeof(user_addr_t)), (char *)&regs->v_arg6, copyin_count)) {
521 regs->rax = KERN_INVALID_ARGUMENT;
522
523 thread_exception_return();
524 /* NOTREACHED */
525 }
526 }
b0d623f7
A
527
528#ifdef MACH_BSD
529 mach_kauth_cred_uthread_update();
530#endif
531
0c530ab8
A
532 regs->rax = (uint64_t)mach_call((void *)(&regs->rdi));
533
b0d623f7
A
534 DEBUG_KPRINT_SYSCALL_MACH( "mach_call_munger64: retval=0x%llx\n", regs->rax);
535
316670eb
A
536 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
537 MACHDBG_CODE(DBG_MACH_EXCP_SC,(call_number)) | DBG_FUNC_END,
538 regs->rax, 0, 0, 0, 0);
91447636 539
39236c6e 540 throttle_lowpri_io(1);
593a1d5f 541
0c530ab8
A
542 thread_exception_return();
543 /* NOTREACHED */
91447636
A
544}
545
0c530ab8 546
91447636
A
547/*
548 * thread_setuserstack:
549 *
550 * Sets the user stack pointer into the machine
551 * dependent thread state info.
552 */
553void
554thread_setuserstack(
555 thread_t thread,
556 mach_vm_address_t user_stack)
557{
6d2010ae 558 pal_register_cache_state(thread, DIRTY);
2d21ac55
A
559 if (thread_is_64bit(thread)) {
560 x86_saved_state64_t *iss64;
0c530ab8
A
561
562 iss64 = USER_REGS64(thread);
5d5c5d0d 563
0c530ab8
A
564 iss64->isf.rsp = (uint64_t)user_stack;
565 } else {
2d21ac55
A
566 x86_saved_state32_t *iss32;
567
0c530ab8
A
568 iss32 = USER_REGS32(thread);
569
b0d623f7 570 iss32->uesp = CAST_DOWN_EXPLICIT(unsigned int, user_stack);
0c530ab8 571 }
91447636
A
572}
573
574/*
575 * thread_adjuserstack:
576 *
577 * Returns the adjusted user stack pointer from the machine
578 * dependent thread state info. Used for small (<2G) deltas.
579 */
580uint64_t
581thread_adjuserstack(
582 thread_t thread,
583 int adjust)
584{
6d2010ae 585 pal_register_cache_state(thread, DIRTY);
2d21ac55
A
586 if (thread_is_64bit(thread)) {
587 x86_saved_state64_t *iss64;
5d5c5d0d 588
0c530ab8
A
589 iss64 = USER_REGS64(thread);
590
591 iss64->isf.rsp += adjust;
592
593 return iss64->isf.rsp;
594 } else {
2d21ac55
A
595 x86_saved_state32_t *iss32;
596
0c530ab8
A
597 iss32 = USER_REGS32(thread);
598
599 iss32->uesp += adjust;
600
601 return CAST_USER_ADDR_T(iss32->uesp);
602 }
91447636
A
603}
604
605/*
606 * thread_setentrypoint:
607 *
608 * Sets the user PC into the machine
609 * dependent thread state info.
610 */
611void
0c530ab8 612thread_setentrypoint(thread_t thread, mach_vm_address_t entry)
4452a7af 613{
6d2010ae 614 pal_register_cache_state(thread, DIRTY);
2d21ac55
A
615 if (thread_is_64bit(thread)) {
616 x86_saved_state64_t *iss64;
4452a7af 617
0c530ab8
A
618 iss64 = USER_REGS64(thread);
619
620 iss64->isf.rip = (uint64_t)entry;
621 } else {
2d21ac55
A
622 x86_saved_state32_t *iss32;
623
0c530ab8
A
624 iss32 = USER_REGS32(thread);
625
b0d623f7 626 iss32->eip = CAST_DOWN_EXPLICIT(unsigned int, entry);
0c530ab8
A
627 }
628}
629
630
2d21ac55 631kern_return_t
0c530ab8
A
632thread_setsinglestep(thread_t thread, int on)
633{
6d2010ae 634 pal_register_cache_state(thread, DIRTY);
2d21ac55
A
635 if (thread_is_64bit(thread)) {
636 x86_saved_state64_t *iss64;
0c530ab8
A
637
638 iss64 = USER_REGS64(thread);
639
640 if (on)
2d21ac55 641 iss64->isf.rflags |= EFL_TF;
0c530ab8 642 else
2d21ac55 643 iss64->isf.rflags &= ~EFL_TF;
0c530ab8 644 } else {
2d21ac55
A
645 x86_saved_state32_t *iss32;
646
0c530ab8
A
647 iss32 = USER_REGS32(thread);
648
b0d623f7 649 if (on) {
2d21ac55 650 iss32->efl |= EFL_TF;
b0d623f7
A
651 /* Ensure IRET */
652 if (iss32->cs == SYSENTER_CS)
653 iss32->cs = SYSENTER_TF_CS;
654 }
0c530ab8 655 else
2d21ac55 656 iss32->efl &= ~EFL_TF;
0c530ab8 657 }
2d21ac55
A
658
659 return (KERN_SUCCESS);
0c530ab8
A
660}
661
662
663
664/* XXX this should be a struct savearea so that CHUD will work better on x86 */
665void *
2d21ac55 666find_user_regs(thread_t thread)
0c530ab8 667{
6d2010ae 668 pal_register_cache_state(thread, DIRTY);
0c530ab8
A
669 return USER_STATE(thread);
670}
91447636 671
2d21ac55
A
672void *
673get_user_regs(thread_t th)
674{
6d2010ae
A
675 pal_register_cache_state(th, DIRTY);
676 return(USER_STATE(th));
2d21ac55
A
677}
678
679#if CONFIG_DTRACE
680/*
681 * DTrace would like to have a peek at the kernel interrupt state, if available.
682 * Based on osfmk/chud/i386/chud_thread_i386.c:chudxnu_thread_get_state(), which see.
683 */
b0d623f7 684x86_saved_state_t *find_kern_regs(thread_t);
2d21ac55 685
b0d623f7 686x86_saved_state_t *
2d21ac55
A
687find_kern_regs(thread_t thread)
688{
689 if (thread == current_thread() &&
690 NULL != current_cpu_datap()->cpu_int_state &&
691 !(USER_STATE(thread) == current_cpu_datap()->cpu_int_state &&
692 current_cpu_datap()->cpu_interrupt_level == 1)) {
693
b0d623f7 694 return current_cpu_datap()->cpu_int_state;
2d21ac55
A
695 } else {
696 return NULL;
697 }
698}
699
700vm_offset_t dtrace_get_cpu_int_stack_top(void);
701
702vm_offset_t
703dtrace_get_cpu_int_stack_top(void)
704{
705 return current_cpu_datap()->cpu_int_stack_top;
706}
707#endif