]> git.saurik.com Git - apple/xnu.git/blame_incremental - bsd/dev/arm/systemcalls.c
xnu-6153.61.1.tar.gz
[apple/xnu.git] / bsd / dev / arm / systemcalls.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 */
4
5#include <kern/task.h>
6#include <kern/thread.h>
7#include <kern/assert.h>
8#include <kern/clock.h>
9#include <kern/locks.h>
10#include <kern/sched_prim.h>
11#include <mach/machine/thread_status.h>
12#include <mach/thread_act.h>
13#include <machine/machine_routines.h>
14#include <arm/thread.h>
15#include <arm/proc_reg.h>
16#include <pexpert/pexpert.h>
17
18#include <sys/kernel.h>
19#include <sys/vm.h>
20#include <sys/proc_internal.h>
21#include <sys/syscall.h>
22#include <sys/systm.h>
23#include <sys/user.h>
24#include <sys/errno.h>
25#include <sys/kdebug.h>
26#include <sys/sysent.h>
27#include <sys/sysproto.h>
28#include <sys/kauth.h>
29#include <sys/bitstring.h>
30
31#include <security/audit/audit.h>
32
33#if CONFIG_MACF
34#include <security/mac_framework.h>
35#endif
36
37#if CONFIG_DTRACE
38extern int32_t dtrace_systrace_syscall(struct proc *, void *, int *);
39extern void dtrace_systrace_syscall_return(unsigned short, int, int *);
40#endif /* CONFIG_DTRACE */
41
42extern void
43unix_syscall(struct arm_saved_state * regs, thread_t thread_act,
44 struct uthread * uthread, struct proc * proc);
45
46static int arm_get_syscall_args(uthread_t, struct arm_saved_state *, struct sysent *);
47static int arm_get_u32_syscall_args(uthread_t, arm_saved_state32_t *, struct sysent *);
48static void arm_prepare_u32_syscall_return(struct sysent *, arm_saved_state_t *, uthread_t, int);
49static void arm_prepare_syscall_return(struct sysent *, struct arm_saved_state *, uthread_t, int);
50static int arm_get_syscall_number(struct arm_saved_state *);
51static void arm_trace_unix_syscall(int, struct arm_saved_state *);
52static void arm_clear_syscall_error(struct arm_saved_state *);
53#define save_r0 r[0]
54#define save_r1 r[1]
55#define save_r2 r[2]
56#define save_r3 r[3]
57#define save_r4 r[4]
58#define save_r5 r[5]
59#define save_r6 r[6]
60#define save_r7 r[7]
61#define save_r8 r[8]
62#define save_r9 r[9]
63#define save_r10 r[10]
64#define save_r11 r[11]
65#define save_r12 r[12]
66#define save_r13 r[13]
67
68#if COUNT_SYSCALLS
69__XNU_PRIVATE_EXTERN int do_count_syscalls = 1;
70__XNU_PRIVATE_EXTERN int syscalls_log[SYS_MAXSYSCALL];
71#endif
72
73#define code_is_kdebug_trace(code) (((code) == SYS_kdebug_trace) || \
74 ((code) == SYS_kdebug_trace64) || \
75 ((code) == SYS_kdebug_trace_string))
76
77/*
78 * Function: unix_syscall
79 *
80 * Inputs: regs - pointer to Process Control Block
81 *
82 * Outputs: none
83 */
84#ifdef __arm__
85__attribute__((noreturn))
86#endif
87void
88unix_syscall(
89 struct arm_saved_state * state,
90 __unused thread_t thread_act,
91 struct uthread * uthread,
92 struct proc * proc)
93{
94 struct sysent *callp;
95 int error;
96 unsigned short code, syscode;
97 pid_t pid;
98
99#if defined(__arm__)
100 assert(is_saved_state32(state));
101#endif
102
103 uthread_reset_proc_refcount(uthread);
104
105 code = arm_get_syscall_number(state);
106
107#define unix_syscall_kprintf(x...) /* kprintf("unix_syscall: " x) */
108
109 if (kdebug_enable && !code_is_kdebug_trace(code)) {
110 arm_trace_unix_syscall(code, state);
111 }
112
113 if ((uthread->uu_flag & UT_VFORK))
114 proc = current_proc();
115
116 syscode = (code < nsysent) ? code : SYS_invalid;
117 callp = &sysent[syscode];
118
119 /*
120 * sy_narg is inaccurate on ARM if a 64 bit parameter is specified. Since user_addr_t
121 * is currently a 32 bit type, this is really a long word count. See rdar://problem/6104668.
122 */
123 if (callp->sy_narg != 0) {
124 if (arm_get_syscall_args(uthread, state, callp) != 0) {
125 /* Too many arguments, or something failed */
126 unix_syscall_kprintf("arm_get_syscall_args failed.\n");
127 callp = &sysent[SYS_invalid];
128 }
129 }
130
131 uthread->uu_flag |= UT_NOTCANCELPT;
132 uthread->syscall_code = code;
133
134 uthread->uu_rval[0] = 0;
135
136 /*
137 * r4 is volatile, if we set it to regs->save_r4 here the child
138 * will have parents r4 after execve
139 */
140 uthread->uu_rval[1] = 0;
141
142 error = 0;
143
144 /*
145 * ARM runtime will call cerror if the carry bit is set after a
146 * system call, so clear it here for the common case of success.
147 */
148 arm_clear_syscall_error(state);
149
150#if COUNT_SYSCALLS
151 if (do_count_syscalls > 0) {
152 syscalls_log[code]++;
153 }
154#endif
155 pid = proc_pid(proc);
156
157#ifdef JOE_DEBUG
158 uthread->uu_iocount = 0;
159 uthread->uu_vpindex = 0;
160#endif
161 unix_syscall_kprintf("code %d (pid %d - %s, tid %lld)\n", code,
162 pid, proc->p_comm, thread_tid(current_thread()));
163
164#if CONFIG_MACF
165 if (__improbable(proc->syscall_filter_mask != NULL && !bitstr_test(proc->syscall_filter_mask, syscode))) {
166 error = mac_proc_check_syscall_unix(proc, syscode);
167 if (error)
168 goto skip_syscall;
169 }
170#endif /* CONFIG_MACF */
171
172 AUDIT_SYSCALL_ENTER(code, proc, uthread);
173 error = (*(callp->sy_call)) (proc, &uthread->uu_arg[0], &(uthread->uu_rval[0]));
174 AUDIT_SYSCALL_EXIT(code, proc, uthread, error);
175
176#if CONFIG_MACF
177skip_syscall:
178#endif /* CONFIG_MACF */
179
180 unix_syscall_kprintf("code %d, error %d, results %x, %x (pid %d - %s, tid %lld)\n", code, error,
181 uthread->uu_rval[0], uthread->uu_rval[1],
182 pid, get_bsdtask_info(current_task()) ? proc->p_comm : "unknown" , thread_tid(current_thread()));
183
184#ifdef JOE_DEBUG
185 if (uthread->uu_iocount) {
186 printf("system call returned with uu_iocount != 0");
187 }
188#endif
189#if CONFIG_DTRACE
190 uthread->t_dtrace_errno = error;
191#endif /* CONFIG_DTRACE */
192#if DEBUG || DEVELOPMENT
193 kern_allocation_name_t
194 prior __assert_only = thread_set_allocation_name(NULL);
195 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
196#endif /* DEBUG || DEVELOPMENT */
197
198 arm_prepare_syscall_return(callp, state, uthread, error);
199
200 uthread->uu_flag &= ~UT_NOTCANCELPT;
201 uthread->syscall_code = 0;
202
203 if (uthread->uu_lowpri_window) {
204 /*
205 * task is marked as a low priority I/O type
206 * and the I/O we issued while in this system call
207 * collided with normal I/O operations... we'll
208 * delay in order to mitigate the impact of this
209 * task on the normal operation of the system
210 */
211 throttle_lowpri_io(1);
212 }
213 if (kdebug_enable && !code_is_kdebug_trace(code)) {
214 KDBG_RELEASE(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
215 error, uthread->uu_rval[0], uthread->uu_rval[1], pid);
216 }
217
218#if PROC_REF_DEBUG
219 if (__improbable(uthread_get_proc_refcount(uthread) != 0)) {
220 panic("system call returned with uu_proc_refcount != 0");
221 }
222#endif
223
224#ifdef __arm__
225 thread_exception_return();
226#endif
227}
228
229void
230unix_syscall_return(int error)
231{
232 thread_t thread_act;
233 struct uthread *uthread;
234 struct proc *proc;
235 struct arm_saved_state *regs;
236 unsigned short code;
237 struct sysent *callp;
238
239#define unix_syscall_return_kprintf(x...) /* kprintf("unix_syscall_retur
240 * n: " x) */
241
242 thread_act = current_thread();
243 proc = current_proc();
244 uthread = get_bsdthread_info(thread_act);
245
246 regs = find_user_regs(thread_act);
247 code = uthread->syscall_code;
248 callp = (code >= nsysent) ? &sysent[SYS_invalid] : &sysent[code];
249
250#if CONFIG_DTRACE
251 if (callp->sy_call == dtrace_systrace_syscall)
252 dtrace_systrace_syscall_return( code, error, uthread->uu_rval );
253#endif /* CONFIG_DTRACE */
254#if DEBUG || DEVELOPMENT
255 kern_allocation_name_t
256 prior __assert_only = thread_set_allocation_name(NULL);
257 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
258#endif /* DEBUG || DEVELOPMENT */
259
260 AUDIT_SYSCALL_EXIT(code, proc, uthread, error);
261
262 /*
263 * Get index into sysent table
264 */
265 arm_prepare_syscall_return(callp, regs, uthread, error);
266
267 uthread->uu_flag &= ~UT_NOTCANCELPT;
268 uthread->syscall_code = 0;
269
270 if (uthread->uu_lowpri_window) {
271 /*
272 * task is marked as a low priority I/O type
273 * and the I/O we issued while in this system call
274 * collided with normal I/O operations... we'll
275 * delay in order to mitigate the impact of this
276 * task on the normal operation of the system
277 */
278 throttle_lowpri_io(1);
279 }
280 if (kdebug_enable && !code_is_kdebug_trace(code)) {
281 KDBG_RELEASE(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
282 error, uthread->uu_rval[0], uthread->uu_rval[1], proc->p_pid);
283 }
284
285 thread_exception_return();
286 /* NOTREACHED */
287}
288
289static void
290arm_prepare_u32_syscall_return(struct sysent *callp, arm_saved_state_t *regs, uthread_t uthread, int error)
291{
292 assert(is_saved_state32(regs));
293
294 arm_saved_state32_t *ss32 = saved_state32(regs);
295
296 if (error == ERESTART) {
297 ss32->pc -= 4;
298 } else if (error != EJUSTRETURN) {
299 if (error) {
300 ss32->save_r0 = error;
301 ss32->save_r1 = 0;
302 /* set the carry bit to execute cerror routine */
303 ss32->cpsr |= PSR_CF;
304 unix_syscall_return_kprintf("error: setting carry to trigger cerror call\n");
305 } else { /* (not error) */
306 switch (callp->sy_return_type) {
307 case _SYSCALL_RET_INT_T:
308 case _SYSCALL_RET_UINT_T:
309 case _SYSCALL_RET_OFF_T:
310 case _SYSCALL_RET_ADDR_T:
311 case _SYSCALL_RET_SIZE_T:
312 case _SYSCALL_RET_SSIZE_T:
313 case _SYSCALL_RET_UINT64_T:
314 ss32->save_r0 = uthread->uu_rval[0];
315 ss32->save_r1 = uthread->uu_rval[1];
316 break;
317 case _SYSCALL_RET_NONE:
318 ss32->save_r0 = 0;
319 ss32->save_r1 = 0;
320 break;
321 default:
322 panic("unix_syscall: unknown return type");
323 break;
324 }
325 }
326 }
327 /* else (error == EJUSTRETURN) { nothing } */
328
329}
330
331static void
332arm_trace_u32_unix_syscall(int code, arm_saved_state32_t *regs)
333{
334 bool indirect = (regs->save_r12 == 0);
335 if (indirect) {
336 KDBG_RELEASE(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
337 regs->save_r1, regs->save_r2, regs->save_r3, regs->save_r4);
338 } else {
339 KDBG_RELEASE(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
340 regs->save_r0, regs->save_r1, regs->save_r2, regs->save_r3);
341 }
342}
343
344static void
345arm_clear_u32_syscall_error(arm_saved_state32_t *regs)
346{
347 regs->cpsr &= ~PSR_CF;
348}
349
350#if defined(__arm__)
351
352static int
353arm_get_syscall_args(uthread_t uthread, struct arm_saved_state *state, struct sysent *callp)
354{
355 assert(is_saved_state32(state));
356 return arm_get_u32_syscall_args(uthread, saved_state32(state), callp);
357}
358
359#if __arm__ && (__BIGGEST_ALIGNMENT__ > 4)
360/*
361 * For armv7k, the alignment constraints of the ABI mean we don't know how the userspace
362 * arguments are arranged without knowing the the prototype of the syscall. So we use mungers
363 * to marshal the userspace data into the uu_arg. This also means we need the same convention
364 * as mach syscalls. That means we use r8 to pass arguments in the BSD case as well.
365 */
366static int
367arm_get_u32_syscall_args(uthread_t uthread, arm_saved_state32_t *regs, struct sysent *callp)
368{
369 sy_munge_t *munger;
370
371 /* This check is probably not very useful since these both come from build-time */
372 if (callp->sy_arg_bytes > sizeof(uthread->uu_arg))
373 return -1;
374
375 /* get the munger and use it to marshal in the data from userspace */
376 munger = callp->sy_arg_munge32;
377 if (munger == NULL || (callp->sy_arg_bytes == 0))
378 return 0;
379
380 return munger(regs, uthread->uu_arg);
381}
382#else
383/*
384 * For an AArch32 kernel, where we know that we have only AArch32 userland,
385 * we do not do any munging (which is a little confusing, as it is a contrast
386 * to the i386 kernel, where, like the x86_64 kernel, we always munge
387 * arguments from a 32-bit userland out to 64-bit.
388 */
389static int
390arm_get_u32_syscall_args(uthread_t uthread, arm_saved_state32_t *regs, struct sysent *callp)
391{
392 int regparams;
393 int flavor = (regs->save_r12 == 0 ? 1 : 0);
394
395 regparams = (7 - flavor); /* Indirect value consumes a register */
396
397 assert((unsigned) callp->sy_arg_bytes <= sizeof (uthread->uu_arg));
398
399 if (callp->sy_arg_bytes <= (sizeof(uint32_t) * regparams)) {
400 /*
401 * Seven arguments or less are passed in registers.
402 */
403 memcpy(&uthread->uu_arg[0], &regs->r[flavor], callp->sy_arg_bytes);
404 } else if (callp->sy_arg_bytes <= sizeof(uthread->uu_arg)) {
405 /*
406 * In this case, we composite - take the first args from registers,
407 * the remainder from the stack (offset by the 7 regs therein).
408 */
409 unix_syscall_kprintf("%s: spillover...\n", __FUNCTION__);
410 memcpy(&uthread->uu_arg[0] , &regs->r[flavor], regparams * sizeof(int));
411 if (copyin((user_addr_t)regs->sp + 7 * sizeof(int), (int *)&uthread->uu_arg[0] + regparams,
412 (callp->sy_arg_bytes - (sizeof(uint32_t) * regparams))) != 0) {
413 return -1;
414 }
415 } else {
416 return -1;
417 }
418
419 return 0;
420}
421#endif
422
423static int
424arm_get_syscall_number(struct arm_saved_state *regs)
425{
426 if (regs->save_r12 != 0) {
427 return regs->save_r12;
428 } else {
429 return regs->save_r0;
430 }
431}
432
433static void
434arm_prepare_syscall_return(struct sysent *callp, struct arm_saved_state *state, uthread_t uthread, int error)
435{
436 assert(is_saved_state32(state));
437 arm_prepare_u32_syscall_return(callp, state, uthread, error);
438}
439
440static void
441arm_trace_unix_syscall(int code, struct arm_saved_state *state)
442{
443 assert(is_saved_state32(state));
444 arm_trace_u32_unix_syscall(code, saved_state32(state));
445}
446
447static void
448arm_clear_syscall_error(struct arm_saved_state * state)
449{
450 assert(is_saved_state32(state));
451 arm_clear_u32_syscall_error(saved_state32(state));
452}
453
454#elif defined(__arm64__)
455static void arm_prepare_u64_syscall_return(struct sysent *, arm_saved_state_t *, uthread_t, int);
456static int arm_get_u64_syscall_args(uthread_t, arm_saved_state64_t *, struct sysent *);
457
458static int
459arm_get_syscall_args(uthread_t uthread, struct arm_saved_state *state, struct sysent *callp)
460{
461 if (is_saved_state32(state)) {
462 return arm_get_u32_syscall_args(uthread, saved_state32(state), callp);
463 } else {
464 return arm_get_u64_syscall_args(uthread, saved_state64(state), callp);
465 }
466}
467
468/*
469 * 64-bit: all arguments in registers. We're willing to use x9, a temporary
470 * register per the ABI, to pass an argument to the kernel for one case,
471 * an indirect syscall with 8 arguments. No munging required, as all arguments
472 * are in 64-bit wide registers already.
473 */
474static int
475arm_get_u64_syscall_args(uthread_t uthread, arm_saved_state64_t *regs, struct sysent *callp)
476{
477 int indirect_offset;
478
479#if CONFIG_REQUIRES_U32_MUNGING
480 sy_munge_t *mungerp;
481#endif
482
483 indirect_offset = (regs->x[ARM64_SYSCALL_CODE_REG_NUM] == 0) ? 1 : 0;
484
485 /*
486 * Everything should fit in registers for now.
487 */
488 if (callp->sy_narg > (int)(sizeof(uthread->uu_arg) / sizeof(uthread->uu_arg[0]))) {
489 return -1;
490 }
491
492 memcpy(&uthread->uu_arg[0], &regs->x[indirect_offset], callp->sy_narg * sizeof(uint64_t));
493
494#if CONFIG_REQUIRES_U32_MUNGING
495 /*
496 * The indirect system call interface is vararg based. For armv7k, arm64_32,
497 * and arm64, this means we simply lay the values down on the stack, padded to
498 * a width multiple (4 bytes for armv7k and arm64_32, 8 bytes for arm64).
499 * The arm64(_32) stub for syscall will load this data into the registers and
500 * then trap. This gives us register state that corresponds to what we would
501 * expect from a armv7 task, so in this particular case we need to munge the
502 * arguments.
503 *
504 * TODO: Is there a cleaner way to do this check? What we're actually
505 * interested in is whether the task is arm64_32. We don't appear to guarantee
506 * that uu_proc is populated here, which is why this currently uses the
507 * thread_t.
508 */
509 mungerp = callp->sy_arg_munge32;
510 assert(uthread->uu_thread);
511
512 if (indirect_offset && !ml_thread_is64bit(uthread->uu_thread)) {
513 (*mungerp)(&uthread->uu_arg[0]);
514 }
515#endif
516
517 return 0;
518}
519/*
520 * When the kernel is running AArch64, munge arguments from 32-bit
521 * userland out to 64-bit.
522 *
523 * flavor == 1 indicates an indirect syscall.
524 */
525static int
526arm_get_u32_syscall_args(uthread_t uthread, arm_saved_state32_t *regs, struct sysent *callp)
527{
528 int regparams;
529#if CONFIG_REQUIRES_U32_MUNGING
530 sy_munge_t *mungerp;
531#else
532#error U32 syscalls on ARM64 kernel requires munging
533#endif
534 int flavor = (regs->save_r12 == 0 ? 1 : 0);
535
536 regparams = (7 - flavor); /* Indirect value consumes a register */
537
538 assert((unsigned) callp->sy_arg_bytes <= sizeof (uthread->uu_arg));
539
540 if (callp->sy_arg_bytes <= (sizeof(uint32_t) * regparams)) {
541 /*
542 * Seven arguments or less are passed in registers.
543 */
544 memcpy(&uthread->uu_arg[0], &regs->r[flavor], callp->sy_arg_bytes);
545 } else if (callp->sy_arg_bytes <= sizeof(uthread->uu_arg)) {
546 /*
547 * In this case, we composite - take the first args from registers,
548 * the remainder from the stack (offset by the 7 regs therein).
549 */
550 unix_syscall_kprintf("%s: spillover...\n", __FUNCTION__);
551 memcpy(&uthread->uu_arg[0] , &regs->r[flavor], regparams * sizeof(int));
552 if (copyin((user_addr_t)regs->sp + 7 * sizeof(int), (int *)&uthread->uu_arg[0] + regparams,
553 (callp->sy_arg_bytes - (sizeof(uint32_t) * regparams))) != 0) {
554 return -1;
555 }
556 } else {
557 return -1;
558 }
559
560#if CONFIG_REQUIRES_U32_MUNGING
561 /* Munge here */
562 mungerp = callp->sy_arg_munge32;
563 if (mungerp != NULL) {
564 (*mungerp)(&uthread->uu_arg[0]);
565 }
566#endif
567
568 return 0;
569
570}
571
572static int
573arm_get_syscall_number(struct arm_saved_state *state)
574{
575 if (is_saved_state32(state)) {
576 if (saved_state32(state)->save_r12 != 0) {
577 return saved_state32(state)->save_r12;
578 } else {
579 return saved_state32(state)->save_r0;
580 }
581 } else {
582 if (saved_state64(state)->x[ARM64_SYSCALL_CODE_REG_NUM] != 0) {
583 return saved_state64(state)->x[ARM64_SYSCALL_CODE_REG_NUM];
584 } else {
585 return saved_state64(state)->x[0];
586 }
587 }
588
589}
590
591static void
592arm_prepare_syscall_return(struct sysent *callp, struct arm_saved_state *state, uthread_t uthread, int error)
593{
594 if (is_saved_state32(state)) {
595 arm_prepare_u32_syscall_return(callp, state, uthread, error);
596 } else {
597 arm_prepare_u64_syscall_return(callp, state, uthread, error);
598 }
599}
600
601static void
602arm_prepare_u64_syscall_return(struct sysent *callp, arm_saved_state_t *regs, uthread_t uthread, int error)
603{
604 assert(is_saved_state64(regs));
605
606 arm_saved_state64_t *ss64 = saved_state64(regs);
607
608 if (error == ERESTART) {
609 add_saved_state_pc(regs, -4);
610 } else if (error != EJUSTRETURN) {
611 if (error) {
612 ss64->x[0] = error;
613 ss64->x[1] = 0;
614 /*
615 * Set the carry bit to execute cerror routine.
616 * ARM64_TODO: should we have a separate definition?
617 * The bits are the same.
618 */
619 ss64->cpsr |= PSR_CF;
620 unix_syscall_return_kprintf("error: setting carry to trigger cerror call\n");
621 } else { /* (not error) */
622 switch (callp->sy_return_type) {
623 case _SYSCALL_RET_INT_T:
624 ss64->x[0] = uthread->uu_rval[0];
625 ss64->x[1] = uthread->uu_rval[1];
626 break;
627 case _SYSCALL_RET_UINT_T:
628 ss64->x[0] = (u_int)uthread->uu_rval[0];
629 ss64->x[1] = (u_int)uthread->uu_rval[1];
630 break;
631 case _SYSCALL_RET_OFF_T:
632 case _SYSCALL_RET_ADDR_T:
633 case _SYSCALL_RET_SIZE_T:
634 case _SYSCALL_RET_SSIZE_T:
635 case _SYSCALL_RET_UINT64_T:
636 ss64->x[0] = *((uint64_t *)(&uthread->uu_rval[0]));
637 ss64->x[1] = 0;
638 break;
639 case _SYSCALL_RET_NONE:
640 break;
641 default:
642 panic("unix_syscall: unknown return type");
643 break;
644 }
645 }
646 }
647 /* else (error == EJUSTRETURN) { nothing } */
648
649
650}
651static void
652arm_trace_u64_unix_syscall(int code, arm_saved_state64_t *regs)
653{
654 bool indirect = (regs->x[ARM64_SYSCALL_CODE_REG_NUM] == 0);
655 if (indirect) {
656 KDBG_RELEASE(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
657 regs->x[1], regs->x[2], regs->x[3], regs->x[4]);
658 } else {
659 KDBG_RELEASE(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
660 regs->x[0], regs->x[1], regs->x[2], regs->x[3]);
661 }
662}
663
664static void
665arm_trace_unix_syscall(int code, struct arm_saved_state *state)
666{
667 if (is_saved_state32(state)) {
668 arm_trace_u32_unix_syscall(code, saved_state32(state));
669 } else {
670 arm_trace_u64_unix_syscall(code, saved_state64(state));
671 }
672}
673
674static void
675arm_clear_u64_syscall_error(arm_saved_state64_t *regs)
676{
677 /*
678 * ARM64_TODO: should we have a separate definition?
679 * The bits are the same.
680 */
681 regs->cpsr &= ~PSR_CF;
682}
683
684static void
685arm_clear_syscall_error(struct arm_saved_state * state)
686{
687 if (is_saved_state32(state)) {
688 arm_clear_u32_syscall_error(saved_state32(state));
689 } else {
690 arm_clear_u64_syscall_error(saved_state64(state));
691 }
692}
693
694#else
695#error Unknown architecture.
696#endif