]> git.saurik.com Git - apple/xnu.git/blob - bsd/dev/arm/systemcalls.c
5ac5fcde257e2be9e106106cbb99116c0ec775a3
[apple/xnu.git] / bsd / dev / arm / systemcalls.c
1 /*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 */
4
5 #include <kern/task.h>
6 #include <kern/thread.h>
7 #include <kern/assert.h>
8 #include <kern/clock.h>
9 #include <kern/locks.h>
10 #include <kern/sched_prim.h>
11 #include <mach/machine/thread_status.h>
12 #include <mach/thread_act.h>
13 #include <machine/machine_routines.h>
14 #include <arm/thread.h>
15 #include <arm/proc_reg.h>
16 #include <pexpert/pexpert.h>
17
18 #include <sys/kernel.h>
19 #include <sys/vm.h>
20 #include <sys/proc_internal.h>
21 #include <sys/syscall.h>
22 #include <sys/systm.h>
23 #include <sys/user.h>
24 #include <sys/errno.h>
25 #include <sys/kdebug.h>
26 #include <sys/sysent.h>
27 #include <sys/sysproto.h>
28 #include <sys/kauth.h>
29
30 #include <security/audit/audit.h>
31
32 #if CONFIG_DTRACE
33 extern int32_t dtrace_systrace_syscall(struct proc *, void *, int *);
34 extern void dtrace_systrace_syscall_return(unsigned short, int, int *);
35 #endif /* CONFIG_DTRACE */
36
37 extern void
38 unix_syscall(struct arm_saved_state * regs, thread_t thread_act,
39 struct uthread * uthread, struct proc * proc);
40
41 static int arm_get_syscall_args(uthread_t, struct arm_saved_state *, struct sysent *);
42 static int arm_get_u32_syscall_args(uthread_t, arm_saved_state32_t *, struct sysent *);
43 static void arm_prepare_u32_syscall_return(struct sysent *, arm_saved_state_t *, uthread_t, int);
44 static void arm_prepare_syscall_return(struct sysent *, struct arm_saved_state *, uthread_t, int);
45 static int arm_get_syscall_number(struct arm_saved_state *);
46 static void arm_trace_unix_syscall(int, struct arm_saved_state *);
47 static void arm_clear_syscall_error(struct arm_saved_state *);
48 #define save_r0 r[0]
49 #define save_r1 r[1]
50 #define save_r2 r[2]
51 #define save_r3 r[3]
52 #define save_r4 r[4]
53 #define save_r5 r[5]
54 #define save_r6 r[6]
55 #define save_r7 r[7]
56 #define save_r8 r[8]
57 #define save_r9 r[9]
58 #define save_r10 r[10]
59 #define save_r11 r[11]
60 #define save_r12 r[12]
61 #define save_r13 r[13]
62
63 #if COUNT_SYSCALLS
64 __XNU_PRIVATE_EXTERN int do_count_syscalls = 1;
65 __XNU_PRIVATE_EXTERN int syscalls_log[SYS_MAXSYSCALL];
66 #endif
67
68 #define code_is_kdebug_trace(code) (((code) == SYS_kdebug_trace) || \
69 ((code) == SYS_kdebug_trace64) || \
70 ((code) == SYS_kdebug_trace_string))
71
72 /*
73 * Function: unix_syscall
74 *
75 * Inputs: regs - pointer to Process Control Block
76 *
77 * Outputs: none
78 */
79 #ifdef __arm__
80 __attribute__((noreturn))
81 #endif
82 void
83 unix_syscall(
84 struct arm_saved_state * state,
85 __unused thread_t thread_act,
86 struct uthread * uthread,
87 struct proc * proc)
88 {
89 struct sysent *callp;
90 int error;
91 unsigned short code;
92 pid_t pid;
93
94 #if defined(__arm__)
95 assert(is_saved_state32(state));
96 #endif
97
98 uthread_reset_proc_refcount(uthread);
99
100 code = arm_get_syscall_number(state);
101
102 #define unix_syscall_kprintf(x...) /* kprintf("unix_syscall: " x) */
103
104 #if (KDEBUG_LEVEL >= KDEBUG_LEVEL_IST)
105 if (kdebug_enable && !code_is_kdebug_trace(code)) {
106 arm_trace_unix_syscall(code, state);
107 }
108 #endif
109
110 if ((uthread->uu_flag & UT_VFORK))
111 proc = current_proc();
112
113 callp = (code >= nsysent) ? &sysent[SYS_invalid] : &sysent[code];
114
115 /*
116 * sy_narg is inaccurate on ARM if a 64 bit parameter is specified. Since user_addr_t
117 * is currently a 32 bit type, this is really a long word count. See rdar://problem/6104668.
118 */
119 if (callp->sy_narg != 0) {
120 if (arm_get_syscall_args(uthread, state, callp) != 0) {
121 /* Too many arguments, or something failed */
122 unix_syscall_kprintf("arm_get_syscall_args failed.\n");
123 callp = &sysent[SYS_invalid];
124 }
125 }
126
127 uthread->uu_flag |= UT_NOTCANCELPT;
128 uthread->syscall_code = code;
129
130 uthread->uu_rval[0] = 0;
131
132 /*
133 * r4 is volatile, if we set it to regs->save_r4 here the child
134 * will have parents r4 after execve
135 */
136 uthread->uu_rval[1] = 0;
137
138 error = 0;
139
140 /*
141 * ARM runtime will call cerror if the carry bit is set after a
142 * system call, so clear it here for the common case of success.
143 */
144 arm_clear_syscall_error(state);
145
146 #if COUNT_SYSCALLS
147 if (do_count_syscalls > 0) {
148 syscalls_log[code]++;
149 }
150 #endif
151 pid = proc_pid(proc);
152
153 #ifdef JOE_DEBUG
154 uthread->uu_iocount = 0;
155 uthread->uu_vpindex = 0;
156 #endif
157 unix_syscall_kprintf("code %d (pid %d - %s, tid %lld)\n", code,
158 pid, proc->p_comm, thread_tid(current_thread()));
159
160 AUDIT_SYSCALL_ENTER(code, proc, uthread);
161 error = (*(callp->sy_call)) (proc, &uthread->uu_arg[0], &(uthread->uu_rval[0]));
162 AUDIT_SYSCALL_EXIT(code, proc, uthread, error);
163
164 unix_syscall_kprintf("code %d, error %d, results %x, %x (pid %d - %s, tid %lld)\n", code, error,
165 uthread->uu_rval[0], uthread->uu_rval[1],
166 pid, get_bsdtask_info(current_task()) ? proc->p_comm : "unknown" , thread_tid(current_thread()));
167
168 #ifdef JOE_DEBUG
169 if (uthread->uu_iocount) {
170 printf("system call returned with uu_iocount != 0");
171 }
172 #endif
173 #if CONFIG_DTRACE
174 uthread->t_dtrace_errno = error;
175 #endif /* CONFIG_DTRACE */
176 #if DEBUG || DEVELOPMENT
177 kern_allocation_name_t
178 prior __assert_only = thread_set_allocation_name(NULL);
179 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
180 #endif /* DEBUG || DEVELOPMENT */
181
182 arm_prepare_syscall_return(callp, state, uthread, error);
183
184 uthread->uu_flag &= ~UT_NOTCANCELPT;
185 uthread->syscall_code = 0;
186
187 if (uthread->uu_lowpri_window) {
188 /*
189 * task is marked as a low priority I/O type
190 * and the I/O we issued while in this system call
191 * collided with normal I/O operations... we'll
192 * delay in order to mitigate the impact of this
193 * task on the normal operation of the system
194 */
195 throttle_lowpri_io(1);
196 }
197 #if (KDEBUG_LEVEL >= KDEBUG_LEVEL_IST)
198 if (kdebug_enable && !code_is_kdebug_trace(code)) {
199 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
200 BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
201 error, uthread->uu_rval[0], uthread->uu_rval[1], pid, 0);
202 }
203 #endif
204
205 #if PROC_REF_DEBUG
206 if (__improbable(uthread_get_proc_refcount(uthread) != 0)) {
207 panic("system call returned with uu_proc_refcount != 0");
208 }
209 #endif
210
211 #ifdef __arm__
212 thread_exception_return();
213 #endif
214 }
215
216 void
217 unix_syscall_return(int error)
218 {
219 thread_t thread_act;
220 struct uthread *uthread;
221 struct proc *proc;
222 struct arm_saved_state *regs;
223 unsigned short code;
224 struct sysent *callp;
225
226 #define unix_syscall_return_kprintf(x...) /* kprintf("unix_syscall_retur
227 * n: " x) */
228
229 thread_act = current_thread();
230 proc = current_proc();
231 uthread = get_bsdthread_info(thread_act);
232
233 regs = find_user_regs(thread_act);
234 code = uthread->syscall_code;
235 callp = (code >= nsysent) ? &sysent[SYS_invalid] : &sysent[code];
236
237 #if CONFIG_DTRACE
238 if (callp->sy_call == dtrace_systrace_syscall)
239 dtrace_systrace_syscall_return( code, error, uthread->uu_rval );
240 #endif /* CONFIG_DTRACE */
241 #if DEBUG || DEVELOPMENT
242 kern_allocation_name_t
243 prior __assert_only = thread_set_allocation_name(NULL);
244 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
245 #endif /* DEBUG || DEVELOPMENT */
246
247 AUDIT_SYSCALL_EXIT(code, proc, uthread, error);
248
249 /*
250 * Get index into sysent table
251 */
252 arm_prepare_syscall_return(callp, regs, uthread, error);
253
254 uthread->uu_flag &= ~UT_NOTCANCELPT;
255 uthread->syscall_code = 0;
256
257 if (uthread->uu_lowpri_window) {
258 /*
259 * task is marked as a low priority I/O type
260 * and the I/O we issued while in this system call
261 * collided with normal I/O operations... we'll
262 * delay in order to mitigate the impact of this
263 * task on the normal operation of the system
264 */
265 throttle_lowpri_io(1);
266 }
267 #if (KDEBUG_LEVEL >= KDEBUG_LEVEL_IST)
268 if (kdebug_enable && !code_is_kdebug_trace(code)) {
269 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
270 BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
271 error, uthread->uu_rval[0], uthread->uu_rval[1], proc->p_pid, 0);
272 }
273 #endif
274
275 thread_exception_return();
276 /* NOTREACHED */
277 }
278
279 static void
280 arm_prepare_u32_syscall_return(struct sysent *callp, arm_saved_state_t *regs, uthread_t uthread, int error)
281 {
282 assert(is_saved_state32(regs));
283
284 arm_saved_state32_t *ss32 = saved_state32(regs);
285
286 if (error == ERESTART) {
287 ss32->pc -= 4;
288 } else if (error != EJUSTRETURN) {
289 if (error) {
290 ss32->save_r0 = error;
291 ss32->save_r1 = 0;
292 /* set the carry bit to execute cerror routine */
293 ss32->cpsr |= PSR_CF;
294 unix_syscall_return_kprintf("error: setting carry to trigger cerror call\n");
295 } else { /* (not error) */
296 switch (callp->sy_return_type) {
297 case _SYSCALL_RET_INT_T:
298 case _SYSCALL_RET_UINT_T:
299 case _SYSCALL_RET_OFF_T:
300 case _SYSCALL_RET_ADDR_T:
301 case _SYSCALL_RET_SIZE_T:
302 case _SYSCALL_RET_SSIZE_T:
303 case _SYSCALL_RET_UINT64_T:
304 ss32->save_r0 = uthread->uu_rval[0];
305 ss32->save_r1 = uthread->uu_rval[1];
306 break;
307 case _SYSCALL_RET_NONE:
308 ss32->save_r0 = 0;
309 ss32->save_r1 = 0;
310 break;
311 default:
312 panic("unix_syscall: unknown return type");
313 break;
314 }
315 }
316 }
317 /* else (error == EJUSTRETURN) { nothing } */
318
319 }
320
321 static void
322 arm_trace_u32_unix_syscall(int code, arm_saved_state32_t *regs)
323 {
324 boolean_t indirect = (regs->save_r12 == 0);
325 if (indirect)
326 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
327 BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
328 regs->save_r1, regs->save_r2, regs->save_r3, regs->save_r4, 0);
329 else
330 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
331 BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
332 regs->save_r0, regs->save_r1, regs->save_r2, regs->save_r3, 0);
333 }
334
335 static void
336 arm_clear_u32_syscall_error(arm_saved_state32_t *regs)
337 {
338 regs->cpsr &= ~PSR_CF;
339 }
340
341 #if defined(__arm__)
342
343 static int
344 arm_get_syscall_args(uthread_t uthread, struct arm_saved_state *state, struct sysent *callp)
345 {
346 assert(is_saved_state32(state));
347 return arm_get_u32_syscall_args(uthread, saved_state32(state), callp);
348 }
349
350 #if __arm__ && (__BIGGEST_ALIGNMENT__ > 4)
351 /*
352 * For armv7k, the alignment constraints of the ABI mean we don't know how the userspace
353 * arguments are arranged without knowing the the prototype of the syscall. So we use mungers
354 * to marshal the userspace data into the uu_arg. This also means we need the same convention
355 * as mach syscalls. That means we use r8 to pass arguments in the BSD case as well.
356 */
357 static int
358 arm_get_u32_syscall_args(uthread_t uthread, arm_saved_state32_t *regs, struct sysent *callp)
359 {
360 sy_munge_t *munger;
361
362 /* This check is probably not very useful since these both come from build-time */
363 if (callp->sy_arg_bytes > sizeof(uthread->uu_arg))
364 return -1;
365
366 /* get the munger and use it to marshal in the data from userspace */
367 munger = callp->sy_arg_munge32;
368 if (munger == NULL || (callp->sy_arg_bytes == 0))
369 return 0;
370
371 return munger(regs, uthread->uu_arg);
372 }
373 #else
374 /*
375 * For an AArch32 kernel, where we know that we have only AArch32 userland,
376 * we do not do any munging (which is a little confusing, as it is a contrast
377 * to the i386 kernel, where, like the x86_64 kernel, we always munge
378 * arguments from a 32-bit userland out to 64-bit.
379 */
380 static int
381 arm_get_u32_syscall_args(uthread_t uthread, arm_saved_state32_t *regs, struct sysent *callp)
382 {
383 int regparams;
384 int flavor = (regs->save_r12 == 0 ? 1 : 0);
385
386 regparams = (7 - flavor); /* Indirect value consumes a register */
387
388 assert((unsigned) callp->sy_arg_bytes <= sizeof (uthread->uu_arg));
389
390 if (callp->sy_arg_bytes <= (sizeof(uint32_t) * regparams)) {
391 /*
392 * Seven arguments or less are passed in registers.
393 */
394 memcpy(&uthread->uu_arg[0], &regs->r[flavor], callp->sy_arg_bytes);
395 } else if (callp->sy_arg_bytes <= sizeof(uthread->uu_arg)) {
396 /*
397 * In this case, we composite - take the first args from registers,
398 * the remainder from the stack (offset by the 7 regs therein).
399 */
400 unix_syscall_kprintf("%s: spillover...\n", __FUNCTION__);
401 memcpy(&uthread->uu_arg[0] , &regs->r[flavor], regparams * sizeof(int));
402 if (copyin((user_addr_t)regs->sp + 7 * sizeof(int), (int *)&uthread->uu_arg[0] + regparams,
403 (callp->sy_arg_bytes - (sizeof(uint32_t) * regparams))) != 0) {
404 return -1;
405 }
406 } else {
407 return -1;
408 }
409
410 return 0;
411 }
412 #endif
413
414 static int
415 arm_get_syscall_number(struct arm_saved_state *regs)
416 {
417 if (regs->save_r12 != 0) {
418 return regs->save_r12;
419 } else {
420 return regs->save_r0;
421 }
422 }
423
424 static void
425 arm_prepare_syscall_return(struct sysent *callp, struct arm_saved_state *state, uthread_t uthread, int error)
426 {
427 assert(is_saved_state32(state));
428 arm_prepare_u32_syscall_return(callp, state, uthread, error);
429 }
430
431 static void
432 arm_trace_unix_syscall(int code, struct arm_saved_state *state)
433 {
434 assert(is_saved_state32(state));
435 arm_trace_u32_unix_syscall(code, saved_state32(state));
436 }
437
438 static void
439 arm_clear_syscall_error(struct arm_saved_state * state)
440 {
441 assert(is_saved_state32(state));
442 arm_clear_u32_syscall_error(saved_state32(state));
443 }
444
445 #elif defined(__arm64__)
446 static void arm_prepare_u64_syscall_return(struct sysent *, arm_saved_state_t *, uthread_t, int);
447 static int arm_get_u64_syscall_args(uthread_t, arm_saved_state64_t *, struct sysent *);
448
449 static int
450 arm_get_syscall_args(uthread_t uthread, struct arm_saved_state *state, struct sysent *callp)
451 {
452 if (is_saved_state32(state)) {
453 return arm_get_u32_syscall_args(uthread, saved_state32(state), callp);
454 } else {
455 return arm_get_u64_syscall_args(uthread, saved_state64(state), callp);
456 }
457 }
458
459 /*
460 * 64-bit: all arguments in registers. We're willing to use x9, a temporary
461 * register per the ABI, to pass an argument to the kernel for one case,
462 * an indirect syscall with 8 arguments. No munging required, as all arguments
463 * are in 64-bit wide registers already.
464 */
465 static int
466 arm_get_u64_syscall_args(uthread_t uthread, arm_saved_state64_t *regs, struct sysent *callp)
467 {
468 int indirect_offset;
469
470 #if CONFIG_REQUIRES_U32_MUNGING
471 sy_munge_t *mungerp;
472 #endif
473
474 indirect_offset = (regs->x[ARM64_SYSCALL_CODE_REG_NUM] == 0) ? 1 : 0;
475
476 /*
477 * Everything should fit in registers for now.
478 */
479 if (callp->sy_narg > (int)(sizeof(uthread->uu_arg) / sizeof(uthread->uu_arg[0]))) {
480 return -1;
481 }
482
483 memcpy(&uthread->uu_arg[0], &regs->x[indirect_offset], callp->sy_narg * sizeof(uint64_t));
484
485 #if CONFIG_REQUIRES_U32_MUNGING
486 /*
487 * The indirect system call interface is vararg based. For armv7k, arm64_32,
488 * and arm64, this means we simply lay the values down on the stack, padded to
489 * a width multiple (4 bytes for armv7k and arm64_32, 8 bytes for arm64).
490 * The arm64(_32) stub for syscall will load this data into the registers and
491 * then trap. This gives us register state that corresponds to what we would
492 * expect from a armv7 task, so in this particular case we need to munge the
493 * arguments.
494 *
495 * TODO: Is there a cleaner way to do this check? What we're actually
496 * interested in is whether the task is arm64_32. We don't appear to guarantee
497 * that uu_proc is populated here, which is why this currently uses the
498 * thread_t.
499 */
500 mungerp = callp->sy_arg_munge32;
501 assert(uthread->uu_thread);
502
503 if (indirect_offset && !ml_thread_is64bit(uthread->uu_thread)) {
504 (*mungerp)(&uthread->uu_arg[0]);
505 }
506 #endif
507
508 return 0;
509 }
510 /*
511 * When the kernel is running AArch64, munge arguments from 32-bit
512 * userland out to 64-bit.
513 *
514 * flavor == 1 indicates an indirect syscall.
515 */
516 static int
517 arm_get_u32_syscall_args(uthread_t uthread, arm_saved_state32_t *regs, struct sysent *callp)
518 {
519 int regparams;
520 #if CONFIG_REQUIRES_U32_MUNGING
521 sy_munge_t *mungerp;
522 #else
523 #error U32 syscalls on ARM64 kernel requires munging
524 #endif
525 int flavor = (regs->save_r12 == 0 ? 1 : 0);
526
527 regparams = (7 - flavor); /* Indirect value consumes a register */
528
529 assert((unsigned) callp->sy_arg_bytes <= sizeof (uthread->uu_arg));
530
531 if (callp->sy_arg_bytes <= (sizeof(uint32_t) * regparams)) {
532 /*
533 * Seven arguments or less are passed in registers.
534 */
535 memcpy(&uthread->uu_arg[0], &regs->r[flavor], callp->sy_arg_bytes);
536 } else if (callp->sy_arg_bytes <= sizeof(uthread->uu_arg)) {
537 /*
538 * In this case, we composite - take the first args from registers,
539 * the remainder from the stack (offset by the 7 regs therein).
540 */
541 unix_syscall_kprintf("%s: spillover...\n", __FUNCTION__);
542 memcpy(&uthread->uu_arg[0] , &regs->r[flavor], regparams * sizeof(int));
543 if (copyin((user_addr_t)regs->sp + 7 * sizeof(int), (int *)&uthread->uu_arg[0] + regparams,
544 (callp->sy_arg_bytes - (sizeof(uint32_t) * regparams))) != 0) {
545 return -1;
546 }
547 } else {
548 return -1;
549 }
550
551 #if CONFIG_REQUIRES_U32_MUNGING
552 /* Munge here */
553 mungerp = callp->sy_arg_munge32;
554 if (mungerp != NULL) {
555 (*mungerp)(&uthread->uu_arg[0]);
556 }
557 #endif
558
559 return 0;
560
561 }
562
563 static int
564 arm_get_syscall_number(struct arm_saved_state *state)
565 {
566 if (is_saved_state32(state)) {
567 if (saved_state32(state)->save_r12 != 0) {
568 return saved_state32(state)->save_r12;
569 } else {
570 return saved_state32(state)->save_r0;
571 }
572 } else {
573 if (saved_state64(state)->x[ARM64_SYSCALL_CODE_REG_NUM] != 0) {
574 return saved_state64(state)->x[ARM64_SYSCALL_CODE_REG_NUM];
575 } else {
576 return saved_state64(state)->x[0];
577 }
578 }
579
580 }
581
582 static void
583 arm_prepare_syscall_return(struct sysent *callp, struct arm_saved_state *state, uthread_t uthread, int error)
584 {
585 if (is_saved_state32(state)) {
586 arm_prepare_u32_syscall_return(callp, state, uthread, error);
587 } else {
588 arm_prepare_u64_syscall_return(callp, state, uthread, error);
589 }
590 }
591
592 static void
593 arm_prepare_u64_syscall_return(struct sysent *callp, arm_saved_state_t *regs, uthread_t uthread, int error)
594 {
595 assert(is_saved_state64(regs));
596
597 arm_saved_state64_t *ss64 = saved_state64(regs);
598
599 if (error == ERESTART) {
600 ss64->pc -= 4;
601 } else if (error != EJUSTRETURN) {
602 if (error) {
603 ss64->x[0] = error;
604 ss64->x[1] = 0;
605 /*
606 * Set the carry bit to execute cerror routine.
607 * ARM64_TODO: should we have a separate definition?
608 * The bits are the same.
609 */
610 ss64->cpsr |= PSR_CF;
611 unix_syscall_return_kprintf("error: setting carry to trigger cerror call\n");
612 } else { /* (not error) */
613 switch (callp->sy_return_type) {
614 case _SYSCALL_RET_INT_T:
615 ss64->x[0] = uthread->uu_rval[0];
616 ss64->x[1] = uthread->uu_rval[1];
617 break;
618 case _SYSCALL_RET_UINT_T:
619 ss64->x[0] = (u_int)uthread->uu_rval[0];
620 ss64->x[1] = (u_int)uthread->uu_rval[1];
621 break;
622 case _SYSCALL_RET_OFF_T:
623 case _SYSCALL_RET_ADDR_T:
624 case _SYSCALL_RET_SIZE_T:
625 case _SYSCALL_RET_SSIZE_T:
626 case _SYSCALL_RET_UINT64_T:
627 ss64->x[0] = *((uint64_t *)(&uthread->uu_rval[0]));
628 ss64->x[1] = 0;
629 break;
630 case _SYSCALL_RET_NONE:
631 break;
632 default:
633 panic("unix_syscall: unknown return type");
634 break;
635 }
636 }
637 }
638 /* else (error == EJUSTRETURN) { nothing } */
639
640
641 }
642 static void
643 arm_trace_u64_unix_syscall(int code, arm_saved_state64_t *regs)
644 {
645 boolean_t indirect = (regs->x[ARM64_SYSCALL_CODE_REG_NUM] == 0);
646 if (indirect)
647 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
648 BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
649 regs->x[1], regs->x[2], regs->x[3], regs->x[4], 0);
650 else
651 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
652 BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
653 regs->x[0], regs->x[1], regs->x[2], regs->x[3], 0);
654 }
655
656 static void
657 arm_trace_unix_syscall(int code, struct arm_saved_state *state)
658 {
659 if (is_saved_state32(state)) {
660 arm_trace_u32_unix_syscall(code, saved_state32(state));
661 } else {
662 arm_trace_u64_unix_syscall(code, saved_state64(state));
663 }
664 }
665
666 static void
667 arm_clear_u64_syscall_error(arm_saved_state64_t *regs)
668 {
669 /*
670 * ARM64_TODO: should we have a separate definition?
671 * The bits are the same.
672 */
673 regs->cpsr &= ~PSR_CF;
674 }
675
676 static void
677 arm_clear_syscall_error(struct arm_saved_state * state)
678 {
679 if (is_saved_state32(state)) {
680 arm_clear_u32_syscall_error(saved_state32(state));
681 } else {
682 arm_clear_u64_syscall_error(saved_state64(state));
683 }
684 }
685
686 #else
687 #error Unknown architecture.
688 #endif