]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/bsd_i386.c
xnu-1699.32.7.tar.gz
[apple/xnu.git] / osfmk / i386 / bsd_i386.c
1 /*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #ifdef MACH_BSD
29 #include <mach_rt.h>
30 #include <mach_debug.h>
31 #include <mach_ldebug.h>
32
33 #include <mach/kern_return.h>
34 #include <mach/mach_traps.h>
35 #include <mach/thread_status.h>
36 #include <mach/vm_param.h>
37
38 #include <kern/counters.h>
39 #include <kern/cpu_data.h>
40 #include <kern/mach_param.h>
41 #include <kern/task.h>
42 #include <kern/thread.h>
43 #include <kern/sched_prim.h>
44 #include <kern/misc_protos.h>
45 #include <kern/assert.h>
46 #include <kern/debug.h>
47 #include <kern/spl.h>
48 #include <kern/syscall_sw.h>
49 #include <ipc/ipc_port.h>
50 #include <vm/vm_kern.h>
51 #include <vm/pmap.h>
52
53 #include <i386/cpu_number.h>
54 #include <i386/eflags.h>
55 #include <i386/proc_reg.h>
56 #include <i386/tss.h>
57 #include <i386/user_ldt.h>
58 #include <i386/fpu.h>
59 #include <i386/machdep_call.h>
60 #include <i386/vmparam.h>
61 #include <i386/mp_desc.h>
62 #include <i386/misc_protos.h>
63 #include <i386/thread.h>
64 #include <i386/trap.h>
65 #include <i386/seg.h>
66 #include <mach/i386/syscall_sw.h>
67 #include <sys/syscall.h>
68 #include <sys/kdebug.h>
69 #include <sys/errno.h>
70 #include <../bsd/sys/sysent.h>
71
72 #ifdef MACH_BSD
73 extern void mach_kauth_cred_uthread_update(void);
74 #endif
75
76 void * find_user_regs(thread_t);
77
78 unsigned int get_msr_exportmask(void);
79
80 unsigned int get_msr_nbits(void);
81
82 unsigned int get_msr_rbits(void);
83
84 extern void throttle_lowpri_io(int);
85
86 /*
87 * thread_userstack:
88 *
89 * Return the user stack pointer from the machine
90 * dependent thread state info.
91 */
92 kern_return_t
93 thread_userstack(
94 __unused thread_t thread,
95 int flavor,
96 thread_state_t tstate,
97 __unused unsigned int count,
98 mach_vm_offset_t *user_stack,
99 int *customstack
100 )
101 {
102 if (customstack)
103 *customstack = 0;
104
105 switch (flavor) {
106 case x86_THREAD_STATE32:
107 {
108 x86_thread_state32_t *state25;
109
110 state25 = (x86_thread_state32_t *) tstate;
111
112 if (state25->esp) {
113 *user_stack = state25->esp;
114 if (customstack)
115 *customstack = 1;
116 } else {
117 *user_stack = VM_USRSTACK32;
118 if (customstack)
119 *customstack = 0;
120 }
121 break;
122 }
123
124 case x86_THREAD_STATE64:
125 {
126 x86_thread_state64_t *state25;
127
128 state25 = (x86_thread_state64_t *) tstate;
129
130 if (state25->rsp) {
131 *user_stack = state25->rsp;
132 if (customstack)
133 *customstack = 1;
134 } else {
135 *user_stack = VM_USRSTACK64;
136 if (customstack)
137 *customstack = 0;
138 }
139 break;
140 }
141
142 default:
143 return (KERN_INVALID_ARGUMENT);
144 }
145
146 return (KERN_SUCCESS);
147 }
148
149
150 kern_return_t
151 thread_entrypoint(
152 __unused thread_t thread,
153 int flavor,
154 thread_state_t tstate,
155 __unused unsigned int count,
156 mach_vm_offset_t *entry_point
157 )
158 {
159 /*
160 * Set a default.
161 */
162 if (*entry_point == 0)
163 *entry_point = VM_MIN_ADDRESS;
164
165 switch (flavor) {
166 case x86_THREAD_STATE32:
167 {
168 x86_thread_state32_t *state25;
169
170 state25 = (i386_thread_state_t *) tstate;
171 *entry_point = state25->eip ? state25->eip: VM_MIN_ADDRESS;
172 break;
173 }
174
175 case x86_THREAD_STATE64:
176 {
177 x86_thread_state64_t *state25;
178
179 state25 = (x86_thread_state64_t *) tstate;
180 *entry_point = state25->rip ? state25->rip: VM_MIN_ADDRESS64;
181 break;
182 }
183 }
184 return (KERN_SUCCESS);
185 }
186
187 /*
188 * FIXME - thread_set_child
189 */
190
191 void thread_set_child(thread_t child, int pid);
192 void
193 thread_set_child(thread_t child, int pid)
194 {
195 pal_register_cache_state(child, DIRTY);
196
197 if (thread_is_64bit(child)) {
198 x86_saved_state64_t *iss64;
199
200 iss64 = USER_REGS64(child);
201
202 iss64->rax = pid;
203 iss64->rdx = 1;
204 iss64->isf.rflags &= ~EFL_CF;
205 } else {
206 x86_saved_state32_t *iss32;
207
208 iss32 = USER_REGS32(child);
209
210 iss32->eax = pid;
211 iss32->edx = 1;
212 iss32->efl &= ~EFL_CF;
213 }
214 }
215
216
217
218 /*
219 * System Call handling code
220 */
221
222 extern long fuword(vm_offset_t);
223
224
225
226 void
227 machdep_syscall(x86_saved_state_t *state)
228 {
229 int args[machdep_call_count];
230 int trapno;
231 int nargs;
232 machdep_call_t *entry;
233 x86_saved_state32_t *regs;
234
235 assert(is_saved_state32(state));
236 regs = saved_state32(state);
237
238 trapno = regs->eax;
239 #if DEBUG_TRACE
240 kprintf("machdep_syscall(0x%08x) code=%d\n", regs, trapno);
241 #endif
242
243 DEBUG_KPRINT_SYSCALL_MDEP(
244 "machdep_syscall: trapno=%d\n", trapno);
245
246 if (trapno < 0 || trapno >= machdep_call_count) {
247 regs->eax = (unsigned int)kern_invalid(NULL);
248
249 thread_exception_return();
250 /* NOTREACHED */
251 }
252 entry = &machdep_call_table[trapno];
253 nargs = entry->nargs;
254
255 if (nargs != 0) {
256 if (copyin((user_addr_t) regs->uesp + sizeof (int),
257 (char *) args, (nargs * sizeof (int)))) {
258 regs->eax = KERN_INVALID_ADDRESS;
259
260 thread_exception_return();
261 /* NOTREACHED */
262 }
263 }
264 switch (nargs) {
265 case 0:
266 regs->eax = (*entry->routine.args_0)();
267 break;
268 case 1:
269 regs->eax = (*entry->routine.args_1)(args[0]);
270 break;
271 case 2:
272 regs->eax = (*entry->routine.args_2)(args[0],args[1]);
273 break;
274 case 3:
275 if (!entry->bsd_style)
276 regs->eax = (*entry->routine.args_3)(args[0],args[1],args[2]);
277 else {
278 int error;
279 uint32_t rval;
280
281 error = (*entry->routine.args_bsd_3)(&rval, args[0], args[1], args[2]);
282 if (error) {
283 regs->eax = error;
284 regs->efl |= EFL_CF; /* carry bit */
285 } else {
286 regs->eax = rval;
287 regs->efl &= ~EFL_CF;
288 }
289 }
290 break;
291 case 4:
292 regs->eax = (*entry->routine.args_4)(args[0], args[1], args[2], args[3]);
293 break;
294
295 default:
296 panic("machdep_syscall: too many args");
297 }
298 if (current_thread()->funnel_lock)
299 (void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
300
301 DEBUG_KPRINT_SYSCALL_MDEP("machdep_syscall: retval=%u\n", regs->eax);
302
303 throttle_lowpri_io(TRUE);
304
305 thread_exception_return();
306 /* NOTREACHED */
307 }
308
309
310 void
311 machdep_syscall64(x86_saved_state_t *state)
312 {
313 int trapno;
314 machdep_call_t *entry;
315 x86_saved_state64_t *regs;
316
317 assert(is_saved_state64(state));
318 regs = saved_state64(state);
319
320 trapno = (int)(regs->rax & SYSCALL_NUMBER_MASK);
321
322 DEBUG_KPRINT_SYSCALL_MDEP(
323 "machdep_syscall64: trapno=%d\n", trapno);
324
325 if (trapno < 0 || trapno >= machdep_call_count) {
326 regs->rax = (unsigned int)kern_invalid(NULL);
327
328 thread_exception_return();
329 /* NOTREACHED */
330 }
331 entry = &machdep_call_table64[trapno];
332
333 switch (entry->nargs) {
334 case 0:
335 regs->rax = (*entry->routine.args_0)();
336 break;
337 case 1:
338 regs->rax = (*entry->routine.args64_1)(regs->rdi);
339 break;
340 default:
341 panic("machdep_syscall64: too many args");
342 }
343 if (current_thread()->funnel_lock)
344 (void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
345
346 DEBUG_KPRINT_SYSCALL_MDEP("machdep_syscall: retval=%llu\n", regs->rax);
347
348 throttle_lowpri_io(TRUE);
349
350 thread_exception_return();
351 /* NOTREACHED */
352 }
353
354 #endif /* MACH_BSD */
355
356
357 typedef kern_return_t (*mach_call_t)(void *);
358
359 struct mach_call_args {
360 syscall_arg_t arg1;
361 syscall_arg_t arg2;
362 syscall_arg_t arg3;
363 syscall_arg_t arg4;
364 syscall_arg_t arg5;
365 syscall_arg_t arg6;
366 syscall_arg_t arg7;
367 syscall_arg_t arg8;
368 syscall_arg_t arg9;
369 };
370
371 static kern_return_t
372 mach_call_arg_munger32(uint32_t sp, int nargs, int call_number, struct mach_call_args *args);
373
374
375 static kern_return_t
376 mach_call_arg_munger32(uint32_t sp, int nargs, int call_number, struct mach_call_args *args)
377 {
378 unsigned int args32[9];
379
380 if (copyin((user_addr_t)(sp + sizeof(int)), (char *)args32, nargs * sizeof (int)))
381 return KERN_INVALID_ARGUMENT;
382
383 switch (nargs) {
384 case 9: args->arg9 = args32[8];
385 case 8: args->arg8 = args32[7];
386 case 7: args->arg7 = args32[6];
387 case 6: args->arg6 = args32[5];
388 case 5: args->arg5 = args32[4];
389 case 4: args->arg4 = args32[3];
390 case 3: args->arg3 = args32[2];
391 case 2: args->arg2 = args32[1];
392 case 1: args->arg1 = args32[0];
393 }
394 if (call_number == 90) {
395 /* munge_l for mach_wait_until_trap() */
396 args->arg1 = (((uint64_t)(args32[0])) | ((((uint64_t)(args32[1]))<<32)));
397 }
398 if (call_number == 93) {
399 /* munge_wl for mk_timer_arm_trap() */
400 args->arg2 = (((uint64_t)(args32[1])) | ((((uint64_t)(args32[2]))<<32)));
401 }
402
403 return KERN_SUCCESS;
404 }
405
406
407 __private_extern__ void mach_call_munger(x86_saved_state_t *state);
408
409 extern const char *mach_syscall_name_table[];
410
411 void
412 mach_call_munger(x86_saved_state_t *state)
413 {
414 int argc;
415 int call_number;
416 mach_call_t mach_call;
417 kern_return_t retval;
418 struct mach_call_args args = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
419 x86_saved_state32_t *regs;
420
421 assert(is_saved_state32(state));
422 regs = saved_state32(state);
423
424 call_number = -(regs->eax);
425
426 DEBUG_KPRINT_SYSCALL_MACH(
427 "mach_call_munger: code=%d(%s)\n",
428 call_number, mach_syscall_name_table[call_number]);
429 #if DEBUG_TRACE
430 kprintf("mach_call_munger(0x%08x) code=%d\n", regs, call_number);
431 #endif
432
433 if (call_number < 0 || call_number >= mach_trap_count) {
434 i386_exception(EXC_SYSCALL, call_number, 1);
435 /* NOTREACHED */
436 }
437 mach_call = (mach_call_t)mach_trap_table[call_number].mach_trap_function;
438
439 if (mach_call == (mach_call_t)kern_invalid) {
440 DEBUG_KPRINT_SYSCALL_MACH(
441 "mach_call_munger: kern_invalid 0x%x\n", regs->eax);
442 i386_exception(EXC_SYSCALL, call_number, 1);
443 /* NOTREACHED */
444 }
445
446 argc = mach_trap_table[call_number].mach_trap_arg_count;
447 if (argc) {
448 retval = mach_call_arg_munger32(regs->uesp, argc, call_number, &args);
449 if (retval != KERN_SUCCESS) {
450 regs->eax = retval;
451
452 DEBUG_KPRINT_SYSCALL_MACH(
453 "mach_call_munger: retval=0x%x\n", retval);
454
455 thread_exception_return();
456 /* NOTREACHED */
457 }
458 }
459
460 #ifdef MACH_BSD
461 mach_kauth_cred_uthread_update();
462 #endif
463 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START,
464 args.arg1, args.arg2, args.arg3, args.arg4, 0);
465
466 retval = mach_call(&args);
467
468 DEBUG_KPRINT_SYSCALL_MACH("mach_call_munger: retval=0x%x\n", retval);
469
470 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(call_number)) | DBG_FUNC_END,
471 retval, 0, 0, 0, 0);
472 regs->eax = retval;
473
474 throttle_lowpri_io(TRUE);
475
476 thread_exception_return();
477 /* NOTREACHED */
478 }
479
480
481 __private_extern__ void mach_call_munger64(x86_saved_state_t *regs);
482
483 void
484 mach_call_munger64(x86_saved_state_t *state)
485 {
486 int call_number;
487 int argc;
488 mach_call_t mach_call;
489 x86_saved_state64_t *regs;
490
491 assert(is_saved_state64(state));
492 regs = saved_state64(state);
493
494 call_number = (int)(regs->rax & SYSCALL_NUMBER_MASK);
495
496 DEBUG_KPRINT_SYSCALL_MACH(
497 "mach_call_munger64: code=%d(%s)\n",
498 call_number, mach_syscall_name_table[call_number]);
499
500 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,
501 (call_number)) | DBG_FUNC_START,
502 regs->rdi, regs->rsi,
503 regs->rdx, regs->r10, 0);
504
505 if (call_number < 0 || call_number >= mach_trap_count) {
506 i386_exception(EXC_SYSCALL, regs->rax, 1);
507 /* NOTREACHED */
508 }
509 mach_call = (mach_call_t)mach_trap_table[call_number].mach_trap_function;
510
511 if (mach_call == (mach_call_t)kern_invalid) {
512 i386_exception(EXC_SYSCALL, regs->rax, 1);
513 /* NOTREACHED */
514 }
515 argc = mach_trap_table[call_number].mach_trap_arg_count;
516
517 if (argc > 6) {
518 int copyin_count;
519
520 copyin_count = (argc - 6) * (int)sizeof(uint64_t);
521
522 if (copyin((user_addr_t)(regs->isf.rsp + sizeof(user_addr_t)), (char *)&regs->v_arg6, copyin_count)) {
523 regs->rax = KERN_INVALID_ARGUMENT;
524
525 thread_exception_return();
526 /* NOTREACHED */
527 }
528 }
529
530 #ifdef MACH_BSD
531 mach_kauth_cred_uthread_update();
532 #endif
533
534 regs->rax = (uint64_t)mach_call((void *)(&regs->rdi));
535
536 DEBUG_KPRINT_SYSCALL_MACH( "mach_call_munger64: retval=0x%llx\n", regs->rax);
537
538 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,
539 (call_number)) | DBG_FUNC_END,
540 regs->rax, 0, 0, 0, 0);
541
542 throttle_lowpri_io(TRUE);
543
544 thread_exception_return();
545 /* NOTREACHED */
546 }
547
548
549 /*
550 * thread_setuserstack:
551 *
552 * Sets the user stack pointer into the machine
553 * dependent thread state info.
554 */
555 void
556 thread_setuserstack(
557 thread_t thread,
558 mach_vm_address_t user_stack)
559 {
560 pal_register_cache_state(thread, DIRTY);
561 if (thread_is_64bit(thread)) {
562 x86_saved_state64_t *iss64;
563
564 iss64 = USER_REGS64(thread);
565
566 iss64->isf.rsp = (uint64_t)user_stack;
567 } else {
568 x86_saved_state32_t *iss32;
569
570 iss32 = USER_REGS32(thread);
571
572 iss32->uesp = CAST_DOWN_EXPLICIT(unsigned int, user_stack);
573 }
574 }
575
576 /*
577 * thread_adjuserstack:
578 *
579 * Returns the adjusted user stack pointer from the machine
580 * dependent thread state info. Used for small (<2G) deltas.
581 */
582 uint64_t
583 thread_adjuserstack(
584 thread_t thread,
585 int adjust)
586 {
587 pal_register_cache_state(thread, DIRTY);
588 if (thread_is_64bit(thread)) {
589 x86_saved_state64_t *iss64;
590
591 iss64 = USER_REGS64(thread);
592
593 iss64->isf.rsp += adjust;
594
595 return iss64->isf.rsp;
596 } else {
597 x86_saved_state32_t *iss32;
598
599 iss32 = USER_REGS32(thread);
600
601 iss32->uesp += adjust;
602
603 return CAST_USER_ADDR_T(iss32->uesp);
604 }
605 }
606
607 /*
608 * thread_setentrypoint:
609 *
610 * Sets the user PC into the machine
611 * dependent thread state info.
612 */
613 void
614 thread_setentrypoint(thread_t thread, mach_vm_address_t entry)
615 {
616 pal_register_cache_state(thread, DIRTY);
617 if (thread_is_64bit(thread)) {
618 x86_saved_state64_t *iss64;
619
620 iss64 = USER_REGS64(thread);
621
622 iss64->isf.rip = (uint64_t)entry;
623 } else {
624 x86_saved_state32_t *iss32;
625
626 iss32 = USER_REGS32(thread);
627
628 iss32->eip = CAST_DOWN_EXPLICIT(unsigned int, entry);
629 }
630 }
631
632
633 kern_return_t
634 thread_setsinglestep(thread_t thread, int on)
635 {
636 pal_register_cache_state(thread, DIRTY);
637 if (thread_is_64bit(thread)) {
638 x86_saved_state64_t *iss64;
639
640 iss64 = USER_REGS64(thread);
641
642 if (on)
643 iss64->isf.rflags |= EFL_TF;
644 else
645 iss64->isf.rflags &= ~EFL_TF;
646 } else {
647 x86_saved_state32_t *iss32;
648
649 iss32 = USER_REGS32(thread);
650
651 if (on) {
652 iss32->efl |= EFL_TF;
653 /* Ensure IRET */
654 if (iss32->cs == SYSENTER_CS)
655 iss32->cs = SYSENTER_TF_CS;
656 }
657 else
658 iss32->efl &= ~EFL_TF;
659 }
660
661 return (KERN_SUCCESS);
662 }
663
664
665
666 /* XXX this should be a struct savearea so that CHUD will work better on x86 */
667 void *
668 find_user_regs(thread_t thread)
669 {
670 pal_register_cache_state(thread, DIRTY);
671 return USER_STATE(thread);
672 }
673
674 void *
675 get_user_regs(thread_t th)
676 {
677 pal_register_cache_state(th, DIRTY);
678 return(USER_STATE(th));
679 }
680
681 #if CONFIG_DTRACE
682 /*
683 * DTrace would like to have a peek at the kernel interrupt state, if available.
684 * Based on osfmk/chud/i386/chud_thread_i386.c:chudxnu_thread_get_state(), which see.
685 */
686 x86_saved_state_t *find_kern_regs(thread_t);
687
688 x86_saved_state_t *
689 find_kern_regs(thread_t thread)
690 {
691 if (thread == current_thread() &&
692 NULL != current_cpu_datap()->cpu_int_state &&
693 !(USER_STATE(thread) == current_cpu_datap()->cpu_int_state &&
694 current_cpu_datap()->cpu_interrupt_level == 1)) {
695
696 return current_cpu_datap()->cpu_int_state;
697 } else {
698 return NULL;
699 }
700 }
701
702 vm_offset_t dtrace_get_cpu_int_stack_top(void);
703
704 vm_offset_t
705 dtrace_get_cpu_int_stack_top(void)
706 {
707 return current_cpu_datap()->cpu_int_stack_top;
708 }
709 #endif