]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2016 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | #ifdef MACH_BSD | |
29 | #include <mach_rt.h> | |
30 | #include <mach_debug.h> | |
31 | #include <mach_ldebug.h> | |
32 | ||
33 | #include <mach/kern_return.h> | |
34 | #include <mach/mach_traps.h> | |
35 | #include <mach/thread_status.h> | |
36 | #include <mach/vm_param.h> | |
37 | ||
38 | #include <kern/counters.h> | |
39 | #include <kern/cpu_data.h> | |
40 | #include <kern/mach_param.h> | |
41 | #include <kern/task.h> | |
42 | #include <kern/thread.h> | |
43 | #include <kern/sched_prim.h> | |
44 | #include <kern/misc_protos.h> | |
45 | #include <kern/assert.h> | |
46 | #include <kern/debug.h> | |
47 | #include <kern/spl.h> | |
48 | #include <kern/syscall_sw.h> | |
49 | #include <ipc/ipc_port.h> | |
50 | #include <vm/vm_kern.h> | |
51 | #include <vm/pmap.h> | |
52 | ||
53 | #include <i386/cpu_number.h> | |
54 | #include <i386/eflags.h> | |
55 | #include <i386/proc_reg.h> | |
56 | #include <i386/tss.h> | |
57 | #include <i386/user_ldt.h> | |
58 | #include <i386/fpu.h> | |
59 | #include <i386/machdep_call.h> | |
60 | #include <i386/vmparam.h> | |
61 | #include <i386/mp_desc.h> | |
62 | #include <i386/misc_protos.h> | |
63 | #include <i386/thread.h> | |
64 | #include <i386/trap.h> | |
65 | #include <i386/seg.h> | |
66 | #include <mach/i386/syscall_sw.h> | |
67 | #include <sys/syscall.h> | |
68 | #include <sys/kdebug.h> | |
69 | #include <sys/errno.h> | |
70 | #include <../bsd/sys/sysent.h> | |
71 | ||
72 | #ifdef MACH_BSD | |
73 | extern void mach_kauth_cred_uthread_update(void); | |
74 | extern void throttle_lowpri_io(int); | |
75 | #endif | |
76 | ||
77 | void * find_user_regs(thread_t); | |
78 | ||
79 | unsigned int get_msr_exportmask(void); | |
80 | ||
81 | unsigned int get_msr_nbits(void); | |
82 | ||
83 | unsigned int get_msr_rbits(void); | |
84 | ||
85 | /* | |
86 | * thread_userstack: | |
87 | * | |
88 | * Return the user stack pointer from the machine | |
89 | * dependent thread state info. | |
90 | */ | |
91 | kern_return_t | |
92 | thread_userstack( | |
93 | __unused thread_t thread, | |
94 | int flavor, | |
95 | thread_state_t tstate, | |
96 | __unused unsigned int count, | |
97 | mach_vm_offset_t *user_stack, | |
98 | int *customstack, | |
99 | __unused boolean_t is64bit | |
100 | ) | |
101 | { | |
102 | if (customstack) | |
103 | *customstack = 0; | |
104 | ||
105 | switch (flavor) { | |
106 | case x86_THREAD_STATE32: | |
107 | { | |
108 | x86_thread_state32_t *state25; | |
109 | ||
110 | state25 = (x86_thread_state32_t *) tstate; | |
111 | ||
112 | if (state25->esp) { | |
113 | *user_stack = state25->esp; | |
114 | if (customstack) | |
115 | *customstack = 1; | |
116 | } else { | |
117 | *user_stack = VM_USRSTACK32; | |
118 | if (customstack) | |
119 | *customstack = 0; | |
120 | } | |
121 | break; | |
122 | } | |
123 | ||
124 | case x86_THREAD_STATE64: | |
125 | { | |
126 | x86_thread_state64_t *state25; | |
127 | ||
128 | state25 = (x86_thread_state64_t *) tstate; | |
129 | ||
130 | if (state25->rsp) { | |
131 | *user_stack = state25->rsp; | |
132 | if (customstack) | |
133 | *customstack = 1; | |
134 | } else { | |
135 | *user_stack = VM_USRSTACK64; | |
136 | if (customstack) | |
137 | *customstack = 0; | |
138 | } | |
139 | break; | |
140 | } | |
141 | ||
142 | default: | |
143 | return (KERN_INVALID_ARGUMENT); | |
144 | } | |
145 | ||
146 | return (KERN_SUCCESS); | |
147 | } | |
148 | ||
149 | /* | |
150 | * thread_userstackdefault: | |
151 | * | |
152 | * Return the default stack location for the | |
153 | * thread, if otherwise unknown. | |
154 | */ | |
155 | kern_return_t | |
156 | thread_userstackdefault( | |
157 | mach_vm_offset_t *default_user_stack, | |
158 | boolean_t is64bit) | |
159 | { | |
160 | if (is64bit) { | |
161 | *default_user_stack = VM_USRSTACK64; | |
162 | } else { | |
163 | *default_user_stack = VM_USRSTACK32; | |
164 | } | |
165 | return (KERN_SUCCESS); | |
166 | } | |
167 | ||
168 | kern_return_t | |
169 | thread_entrypoint( | |
170 | __unused thread_t thread, | |
171 | int flavor, | |
172 | thread_state_t tstate, | |
173 | __unused unsigned int count, | |
174 | mach_vm_offset_t *entry_point | |
175 | ) | |
176 | { | |
177 | /* | |
178 | * Set a default. | |
179 | */ | |
180 | if (*entry_point == 0) | |
181 | *entry_point = VM_MIN_ADDRESS; | |
182 | ||
183 | switch (flavor) { | |
184 | case x86_THREAD_STATE32: | |
185 | { | |
186 | x86_thread_state32_t *state25; | |
187 | ||
188 | state25 = (i386_thread_state_t *) tstate; | |
189 | *entry_point = state25->eip ? state25->eip: VM_MIN_ADDRESS; | |
190 | break; | |
191 | } | |
192 | ||
193 | case x86_THREAD_STATE64: | |
194 | { | |
195 | x86_thread_state64_t *state25; | |
196 | ||
197 | state25 = (x86_thread_state64_t *) tstate; | |
198 | *entry_point = state25->rip ? state25->rip: VM_MIN_ADDRESS64; | |
199 | break; | |
200 | } | |
201 | } | |
202 | return (KERN_SUCCESS); | |
203 | } | |
204 | ||
205 | /* | |
206 | * FIXME - thread_set_child | |
207 | */ | |
208 | ||
209 | void thread_set_child(thread_t child, int pid); | |
210 | void | |
211 | thread_set_child(thread_t child, int pid) | |
212 | { | |
213 | pal_register_cache_state(child, DIRTY); | |
214 | ||
215 | if (thread_is_64bit(child)) { | |
216 | x86_saved_state64_t *iss64; | |
217 | ||
218 | iss64 = USER_REGS64(child); | |
219 | ||
220 | iss64->rax = pid; | |
221 | iss64->rdx = 1; | |
222 | iss64->isf.rflags &= ~EFL_CF; | |
223 | } else { | |
224 | x86_saved_state32_t *iss32; | |
225 | ||
226 | iss32 = USER_REGS32(child); | |
227 | ||
228 | iss32->eax = pid; | |
229 | iss32->edx = 1; | |
230 | iss32->efl &= ~EFL_CF; | |
231 | } | |
232 | } | |
233 | ||
234 | ||
235 | ||
236 | /* | |
237 | * System Call handling code | |
238 | */ | |
239 | ||
240 | extern long fuword(vm_offset_t); | |
241 | ||
242 | __attribute__((noreturn)) | |
243 | void | |
244 | machdep_syscall(x86_saved_state_t *state) | |
245 | { | |
246 | int args[machdep_call_count]; | |
247 | int trapno; | |
248 | int nargs; | |
249 | const machdep_call_t *entry; | |
250 | x86_saved_state32_t *regs; | |
251 | ||
252 | assert(is_saved_state32(state)); | |
253 | regs = saved_state32(state); | |
254 | ||
255 | trapno = regs->eax; | |
256 | #if DEBUG_TRACE | |
257 | kprintf("machdep_syscall(0x%08x) code=%d\n", regs, trapno); | |
258 | #endif | |
259 | ||
260 | DEBUG_KPRINT_SYSCALL_MDEP( | |
261 | "machdep_syscall: trapno=%d\n", trapno); | |
262 | ||
263 | if (trapno < 0 || trapno >= machdep_call_count) { | |
264 | regs->eax = (unsigned int)kern_invalid(NULL); | |
265 | ||
266 | thread_exception_return(); | |
267 | /* NOTREACHED */ | |
268 | } | |
269 | entry = &machdep_call_table[trapno]; | |
270 | nargs = entry->nargs; | |
271 | ||
272 | if (nargs != 0) { | |
273 | if (copyin((user_addr_t) regs->uesp + sizeof (int), | |
274 | (char *) args, (nargs * sizeof (int)))) { | |
275 | regs->eax = KERN_INVALID_ADDRESS; | |
276 | ||
277 | thread_exception_return(); | |
278 | /* NOTREACHED */ | |
279 | } | |
280 | } | |
281 | switch (nargs) { | |
282 | case 0: | |
283 | regs->eax = (*entry->routine.args_0)(); | |
284 | break; | |
285 | case 1: | |
286 | regs->eax = (*entry->routine.args_1)(args[0]); | |
287 | break; | |
288 | case 2: | |
289 | regs->eax = (*entry->routine.args_2)(args[0],args[1]); | |
290 | break; | |
291 | case 3: | |
292 | if (!entry->bsd_style) | |
293 | regs->eax = (*entry->routine.args_3)(args[0],args[1],args[2]); | |
294 | else { | |
295 | int error; | |
296 | uint32_t rval; | |
297 | ||
298 | error = (*entry->routine.args_bsd_3)(&rval, args[0], args[1], args[2]); | |
299 | if (error) { | |
300 | regs->eax = error; | |
301 | regs->efl |= EFL_CF; /* carry bit */ | |
302 | } else { | |
303 | regs->eax = rval; | |
304 | regs->efl &= ~EFL_CF; | |
305 | } | |
306 | } | |
307 | break; | |
308 | case 4: | |
309 | regs->eax = (*entry->routine.args_4)(args[0], args[1], args[2], args[3]); | |
310 | break; | |
311 | ||
312 | default: | |
313 | panic("machdep_syscall: too many args"); | |
314 | } | |
315 | ||
316 | DEBUG_KPRINT_SYSCALL_MDEP("machdep_syscall: retval=%u\n", regs->eax); | |
317 | ||
318 | throttle_lowpri_io(1); | |
319 | ||
320 | thread_exception_return(); | |
321 | /* NOTREACHED */ | |
322 | } | |
323 | ||
324 | __attribute__((noreturn)) | |
325 | void | |
326 | machdep_syscall64(x86_saved_state_t *state) | |
327 | { | |
328 | int trapno; | |
329 | const machdep_call_t *entry; | |
330 | x86_saved_state64_t *regs; | |
331 | ||
332 | assert(is_saved_state64(state)); | |
333 | regs = saved_state64(state); | |
334 | ||
335 | trapno = (int)(regs->rax & SYSCALL_NUMBER_MASK); | |
336 | ||
337 | DEBUG_KPRINT_SYSCALL_MDEP( | |
338 | "machdep_syscall64: trapno=%d\n", trapno); | |
339 | ||
340 | if (trapno < 0 || trapno >= machdep_call_count) { | |
341 | regs->rax = (unsigned int)kern_invalid(NULL); | |
342 | ||
343 | thread_exception_return(); | |
344 | /* NOTREACHED */ | |
345 | } | |
346 | entry = &machdep_call_table64[trapno]; | |
347 | ||
348 | switch (entry->nargs) { | |
349 | case 0: | |
350 | regs->rax = (*entry->routine.args_0)(); | |
351 | break; | |
352 | case 1: | |
353 | regs->rax = (*entry->routine.args64_1)(regs->rdi); | |
354 | break; | |
355 | case 2: | |
356 | regs->rax = (*entry->routine.args64_2)(regs->rdi, regs->rsi); | |
357 | break; | |
358 | default: | |
359 | panic("machdep_syscall64: too many args"); | |
360 | } | |
361 | ||
362 | DEBUG_KPRINT_SYSCALL_MDEP("machdep_syscall: retval=%llu\n", regs->rax); | |
363 | ||
364 | throttle_lowpri_io(1); | |
365 | ||
366 | thread_exception_return(); | |
367 | /* NOTREACHED */ | |
368 | } | |
369 | ||
370 | #endif /* MACH_BSD */ | |
371 | ||
372 | ||
373 | typedef kern_return_t (*mach_call_t)(void *); | |
374 | ||
375 | struct mach_call_args { | |
376 | syscall_arg_t arg1; | |
377 | syscall_arg_t arg2; | |
378 | syscall_arg_t arg3; | |
379 | syscall_arg_t arg4; | |
380 | syscall_arg_t arg5; | |
381 | syscall_arg_t arg6; | |
382 | syscall_arg_t arg7; | |
383 | syscall_arg_t arg8; | |
384 | syscall_arg_t arg9; | |
385 | }; | |
386 | ||
387 | static kern_return_t | |
388 | mach_call_arg_munger32(uint32_t sp, struct mach_call_args *args, const mach_trap_t *trapp); | |
389 | ||
390 | ||
391 | static kern_return_t | |
392 | mach_call_arg_munger32(uint32_t sp, struct mach_call_args *args, const mach_trap_t *trapp) | |
393 | { | |
394 | if (copyin((user_addr_t)(sp + sizeof(int)), (char *)args, trapp->mach_trap_u32_words * sizeof (int))) | |
395 | return KERN_INVALID_ARGUMENT; | |
396 | #if CONFIG_REQUIRES_U32_MUNGING | |
397 | trapp->mach_trap_arg_munge32(args); | |
398 | #else | |
399 | #error U32 mach traps on x86_64 kernel requires munging | |
400 | #endif | |
401 | return KERN_SUCCESS; | |
402 | } | |
403 | ||
404 | ||
405 | __private_extern__ void mach_call_munger(x86_saved_state_t *state); | |
406 | ||
407 | extern const char *mach_syscall_name_table[]; | |
408 | ||
409 | __attribute__((noreturn)) | |
410 | void | |
411 | mach_call_munger(x86_saved_state_t *state) | |
412 | { | |
413 | int argc; | |
414 | int call_number; | |
415 | mach_call_t mach_call; | |
416 | kern_return_t retval; | |
417 | struct mach_call_args args = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }; | |
418 | x86_saved_state32_t *regs; | |
419 | ||
420 | struct uthread *ut = get_bsdthread_info(current_thread()); | |
421 | uthread_reset_proc_refcount(ut); | |
422 | ||
423 | assert(is_saved_state32(state)); | |
424 | regs = saved_state32(state); | |
425 | ||
426 | call_number = -(regs->eax); | |
427 | ||
428 | DEBUG_KPRINT_SYSCALL_MACH( | |
429 | "mach_call_munger: code=%d(%s)\n", | |
430 | call_number, mach_syscall_name_table[call_number]); | |
431 | #if DEBUG_TRACE | |
432 | kprintf("mach_call_munger(0x%08x) code=%d\n", regs, call_number); | |
433 | #endif | |
434 | ||
435 | if (call_number < 0 || call_number >= mach_trap_count) { | |
436 | i386_exception(EXC_SYSCALL, call_number, 1); | |
437 | /* NOTREACHED */ | |
438 | } | |
439 | mach_call = (mach_call_t)mach_trap_table[call_number].mach_trap_function; | |
440 | ||
441 | if (mach_call == (mach_call_t)kern_invalid) { | |
442 | DEBUG_KPRINT_SYSCALL_MACH( | |
443 | "mach_call_munger: kern_invalid 0x%x\n", regs->eax); | |
444 | i386_exception(EXC_SYSCALL, call_number, 1); | |
445 | /* NOTREACHED */ | |
446 | } | |
447 | ||
448 | argc = mach_trap_table[call_number].mach_trap_arg_count; | |
449 | if (argc) { | |
450 | retval = mach_call_arg_munger32(regs->uesp, &args, &mach_trap_table[call_number]); | |
451 | if (retval != KERN_SUCCESS) { | |
452 | regs->eax = retval; | |
453 | ||
454 | DEBUG_KPRINT_SYSCALL_MACH( | |
455 | "mach_call_munger: retval=0x%x\n", retval); | |
456 | ||
457 | thread_exception_return(); | |
458 | /* NOTREACHED */ | |
459 | } | |
460 | } | |
461 | ||
462 | #ifdef MACH_BSD | |
463 | mach_kauth_cred_uthread_update(); | |
464 | #endif | |
465 | ||
466 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, | |
467 | MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START, | |
468 | args.arg1, args.arg2, args.arg3, args.arg4, 0); | |
469 | ||
470 | retval = mach_call(&args); | |
471 | ||
472 | DEBUG_KPRINT_SYSCALL_MACH("mach_call_munger: retval=0x%x\n", retval); | |
473 | ||
474 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, | |
475 | MACHDBG_CODE(DBG_MACH_EXCP_SC,(call_number)) | DBG_FUNC_END, | |
476 | retval, 0, 0, 0, 0); | |
477 | ||
478 | regs->eax = retval; | |
479 | ||
480 | throttle_lowpri_io(1); | |
481 | ||
482 | #if PROC_REF_DEBUG | |
483 | if (__improbable(uthread_get_proc_refcount(ut) != 0)) { | |
484 | panic("system call returned with uu_proc_refcount != 0"); | |
485 | } | |
486 | #endif | |
487 | ||
488 | thread_exception_return(); | |
489 | /* NOTREACHED */ | |
490 | } | |
491 | ||
492 | ||
493 | __private_extern__ void mach_call_munger64(x86_saved_state_t *regs); | |
494 | ||
495 | __attribute__((noreturn)) | |
496 | void | |
497 | mach_call_munger64(x86_saved_state_t *state) | |
498 | { | |
499 | int call_number; | |
500 | int argc; | |
501 | mach_call_t mach_call; | |
502 | struct mach_call_args args = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }; | |
503 | x86_saved_state64_t *regs; | |
504 | ||
505 | struct uthread *ut = get_bsdthread_info(current_thread()); | |
506 | uthread_reset_proc_refcount(ut); | |
507 | ||
508 | assert(is_saved_state64(state)); | |
509 | regs = saved_state64(state); | |
510 | ||
511 | call_number = (int)(regs->rax & SYSCALL_NUMBER_MASK); | |
512 | ||
513 | DEBUG_KPRINT_SYSCALL_MACH( | |
514 | "mach_call_munger64: code=%d(%s)\n", | |
515 | call_number, mach_syscall_name_table[call_number]); | |
516 | ||
517 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, | |
518 | MACHDBG_CODE(DBG_MACH_EXCP_SC,(call_number)) | DBG_FUNC_START, | |
519 | regs->rdi, regs->rsi, regs->rdx, regs->r10, 0); | |
520 | ||
521 | if (call_number < 0 || call_number >= mach_trap_count) { | |
522 | i386_exception(EXC_SYSCALL, regs->rax, 1); | |
523 | /* NOTREACHED */ | |
524 | } | |
525 | mach_call = (mach_call_t)mach_trap_table[call_number].mach_trap_function; | |
526 | ||
527 | if (mach_call == (mach_call_t)kern_invalid) { | |
528 | i386_exception(EXC_SYSCALL, regs->rax, 1); | |
529 | /* NOTREACHED */ | |
530 | } | |
531 | argc = mach_trap_table[call_number].mach_trap_arg_count; | |
532 | if (argc) { | |
533 | int args_in_regs = MIN(6, argc); | |
534 | ||
535 | memcpy(&args.arg1, ®s->rdi, args_in_regs * sizeof(syscall_arg_t)); | |
536 | ||
537 | if (argc > 6) { | |
538 | int copyin_count; | |
539 | ||
540 | assert(argc <= 9); | |
541 | copyin_count = (argc - 6) * (int)sizeof(syscall_arg_t); | |
542 | ||
543 | if (copyin((user_addr_t)(regs->isf.rsp + sizeof(user_addr_t)), (char *)&args.arg7, copyin_count)) { | |
544 | regs->rax = KERN_INVALID_ARGUMENT; | |
545 | ||
546 | thread_exception_return(); | |
547 | /* NOTREACHED */ | |
548 | } | |
549 | } | |
550 | } | |
551 | ||
552 | #ifdef MACH_BSD | |
553 | mach_kauth_cred_uthread_update(); | |
554 | #endif | |
555 | ||
556 | regs->rax = (uint64_t)mach_call((void *)&args); | |
557 | ||
558 | DEBUG_KPRINT_SYSCALL_MACH( "mach_call_munger64: retval=0x%llx\n", regs->rax); | |
559 | ||
560 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, | |
561 | MACHDBG_CODE(DBG_MACH_EXCP_SC,(call_number)) | DBG_FUNC_END, | |
562 | regs->rax, 0, 0, 0, 0); | |
563 | ||
564 | throttle_lowpri_io(1); | |
565 | ||
566 | #if PROC_REF_DEBUG | |
567 | if (__improbable(uthread_get_proc_refcount(ut) != 0)) { | |
568 | panic("system call returned with uu_proc_refcount != 0"); | |
569 | } | |
570 | #endif | |
571 | ||
572 | thread_exception_return(); | |
573 | /* NOTREACHED */ | |
574 | } | |
575 | ||
576 | ||
577 | /* | |
578 | * thread_setuserstack: | |
579 | * | |
580 | * Sets the user stack pointer into the machine | |
581 | * dependent thread state info. | |
582 | */ | |
583 | void | |
584 | thread_setuserstack( | |
585 | thread_t thread, | |
586 | mach_vm_address_t user_stack) | |
587 | { | |
588 | pal_register_cache_state(thread, DIRTY); | |
589 | if (thread_is_64bit(thread)) { | |
590 | x86_saved_state64_t *iss64; | |
591 | ||
592 | iss64 = USER_REGS64(thread); | |
593 | ||
594 | iss64->isf.rsp = (uint64_t)user_stack; | |
595 | } else { | |
596 | x86_saved_state32_t *iss32; | |
597 | ||
598 | iss32 = USER_REGS32(thread); | |
599 | ||
600 | iss32->uesp = CAST_DOWN_EXPLICIT(unsigned int, user_stack); | |
601 | } | |
602 | } | |
603 | ||
604 | /* | |
605 | * thread_adjuserstack: | |
606 | * | |
607 | * Returns the adjusted user stack pointer from the machine | |
608 | * dependent thread state info. Used for small (<2G) deltas. | |
609 | */ | |
610 | uint64_t | |
611 | thread_adjuserstack( | |
612 | thread_t thread, | |
613 | int adjust) | |
614 | { | |
615 | pal_register_cache_state(thread, DIRTY); | |
616 | if (thread_is_64bit(thread)) { | |
617 | x86_saved_state64_t *iss64; | |
618 | ||
619 | iss64 = USER_REGS64(thread); | |
620 | ||
621 | iss64->isf.rsp += adjust; | |
622 | ||
623 | return iss64->isf.rsp; | |
624 | } else { | |
625 | x86_saved_state32_t *iss32; | |
626 | ||
627 | iss32 = USER_REGS32(thread); | |
628 | ||
629 | iss32->uesp += adjust; | |
630 | ||
631 | return CAST_USER_ADDR_T(iss32->uesp); | |
632 | } | |
633 | } | |
634 | ||
635 | /* | |
636 | * thread_setentrypoint: | |
637 | * | |
638 | * Sets the user PC into the machine | |
639 | * dependent thread state info. | |
640 | */ | |
641 | void | |
642 | thread_setentrypoint(thread_t thread, mach_vm_address_t entry) | |
643 | { | |
644 | pal_register_cache_state(thread, DIRTY); | |
645 | if (thread_is_64bit(thread)) { | |
646 | x86_saved_state64_t *iss64; | |
647 | ||
648 | iss64 = USER_REGS64(thread); | |
649 | ||
650 | iss64->isf.rip = (uint64_t)entry; | |
651 | } else { | |
652 | x86_saved_state32_t *iss32; | |
653 | ||
654 | iss32 = USER_REGS32(thread); | |
655 | ||
656 | iss32->eip = CAST_DOWN_EXPLICIT(unsigned int, entry); | |
657 | } | |
658 | } | |
659 | ||
660 | ||
661 | kern_return_t | |
662 | thread_setsinglestep(thread_t thread, int on) | |
663 | { | |
664 | pal_register_cache_state(thread, DIRTY); | |
665 | if (thread_is_64bit(thread)) { | |
666 | x86_saved_state64_t *iss64; | |
667 | ||
668 | iss64 = USER_REGS64(thread); | |
669 | ||
670 | if (on) | |
671 | iss64->isf.rflags |= EFL_TF; | |
672 | else | |
673 | iss64->isf.rflags &= ~EFL_TF; | |
674 | } else { | |
675 | x86_saved_state32_t *iss32; | |
676 | ||
677 | iss32 = USER_REGS32(thread); | |
678 | ||
679 | if (on) { | |
680 | iss32->efl |= EFL_TF; | |
681 | /* Ensure IRET */ | |
682 | if (iss32->cs == SYSENTER_CS) | |
683 | iss32->cs = SYSENTER_TF_CS; | |
684 | } | |
685 | else | |
686 | iss32->efl &= ~EFL_TF; | |
687 | } | |
688 | ||
689 | return (KERN_SUCCESS); | |
690 | } | |
691 | ||
692 | void * | |
693 | get_user_regs(thread_t th) | |
694 | { | |
695 | pal_register_cache_state(th, DIRTY); | |
696 | return(USER_STATE(th)); | |
697 | } | |
698 | ||
699 | void * | |
700 | find_user_regs(thread_t thread) | |
701 | { | |
702 | return get_user_regs(thread); | |
703 | } | |
704 | ||
705 | #if CONFIG_DTRACE | |
706 | /* | |
707 | * DTrace would like to have a peek at the kernel interrupt state, if available. | |
708 | * Based on osfmk/chud/i386/chud_thread_i386.c:chudxnu_thread_get_state(), which see. | |
709 | */ | |
710 | x86_saved_state_t *find_kern_regs(thread_t); | |
711 | ||
712 | x86_saved_state_t * | |
713 | find_kern_regs(thread_t thread) | |
714 | { | |
715 | if (thread == current_thread() && | |
716 | NULL != current_cpu_datap()->cpu_int_state && | |
717 | !(USER_STATE(thread) == current_cpu_datap()->cpu_int_state && | |
718 | current_cpu_datap()->cpu_interrupt_level == 1)) { | |
719 | ||
720 | return current_cpu_datap()->cpu_int_state; | |
721 | } else { | |
722 | return NULL; | |
723 | } | |
724 | } | |
725 | ||
726 | vm_offset_t dtrace_get_cpu_int_stack_top(void); | |
727 | ||
728 | vm_offset_t | |
729 | dtrace_get_cpu_int_stack_top(void) | |
730 | { | |
731 | return current_cpu_datap()->cpu_int_stack_top; | |
732 | } | |
733 | #endif |