/*
- * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <mach/thread_status.h>
#include <mach/vm_param.h>
-#include <kern/counters.h>
#include <kern/cpu_data.h>
#include <kern/mach_param.h>
#include <kern/task.h>
extern void throttle_lowpri_io(int);
#endif
+#if CONFIG_MACF
+#include <security/mac_mach_internal.h>
+#endif
+
void * find_user_regs(thread_t);
unsigned int get_msr_exportmask(void);
__unused thread_t thread,
int flavor,
thread_state_t tstate,
- __unused unsigned int count,
+ unsigned int count,
mach_vm_offset_t *user_stack,
int *customstack,
__unused boolean_t is64bit
{
x86_thread_state32_t *state25;
+ if (__improbable(count != x86_THREAD_STATE32_COUNT)) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
state25 = (x86_thread_state32_t *) tstate;
if (state25->esp) {
}
case x86_THREAD_FULL_STATE64:
- /* FALL THROUGH */
+ {
+ x86_thread_full_state64_t *state25;
+
+ if (__improbable(count != x86_THREAD_FULL_STATE64_COUNT)) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ state25 = (x86_thread_full_state64_t *) tstate;
+
+ if (state25->ss64.rsp) {
+ *user_stack = state25->ss64.rsp;
+ if (customstack) {
+ *customstack = 1;
+ }
+ } else {
+ *user_stack = VM_USRSTACK64;
+ if (customstack) {
+ *customstack = 0;
+ }
+ }
+ break;
+ }
+
case x86_THREAD_STATE64:
{
x86_thread_state64_t *state25;
+ if (__improbable(count != x86_THREAD_STATE64_COUNT)) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
state25 = (x86_thread_state64_t *) tstate;
if (state25->rsp) {
__unused thread_t thread,
int flavor,
thread_state_t tstate,
- __unused unsigned int count,
+ unsigned int count,
mach_vm_offset_t *entry_point
)
{
{
x86_thread_state32_t *state25;
+ if (count != x86_THREAD_STATE32_COUNT) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
state25 = (i386_thread_state_t *) tstate;
*entry_point = state25->eip ? state25->eip : VM_MIN_ADDRESS;
break;
{
x86_thread_state64_t *state25;
+ if (count != x86_THREAD_STATE64_COUNT) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
state25 = (x86_thread_state64_t *) tstate;
*entry_point = state25->rip ? state25->rip : VM_MIN_ADDRESS64;
break;
__private_extern__ void mach_call_munger(x86_saved_state_t *state);
-extern const char *mach_syscall_name_table[];
+extern const char *const mach_syscall_name_table[];
__attribute__((noreturn))
void
int call_number;
mach_call_t mach_call;
kern_return_t retval;
- struct mach_call_args args = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+ struct mach_call_args args = {
+ .arg1 = 0,
+ .arg2 = 0,
+ .arg3 = 0,
+ .arg4 = 0,
+ .arg5 = 0,
+ .arg6 = 0,
+ .arg7 = 0,
+ .arg8 = 0,
+ .arg9 = 0
+ };
x86_saved_state32_t *regs;
struct uthread *ut = get_bsdthread_info(current_thread());
MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START,
args.arg1, args.arg2, args.arg3, args.arg4, 0);
+#if CONFIG_MACF
+ /* Check mach trap filter mask, if exists. */
+ task_t task = current_task();
+ uint8_t *filter_mask = task->mach_trap_filter_mask;
+
+ if (__improbable(filter_mask != NULL &&
+ !bitstr_test(filter_mask, call_number))) {
+ /* Not in filter mask, evaluate policy. */
+ if (mac_task_mach_trap_evaluate != NULL) {
+ retval = mac_task_mach_trap_evaluate(get_bsdtask_info(task),
+ call_number);
+ if (retval) {
+ goto skip_machcall;
+ }
+ }
+ }
+#endif /* CONFIG_MACF */
+
retval = mach_call(&args);
+#if CONFIG_MACF
+skip_machcall:
+#endif
+
DEBUG_KPRINT_SYSCALL_MACH("mach_call_munger: retval=0x%x\n", retval);
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
int call_number;
int argc;
mach_call_t mach_call;
- struct mach_call_args args = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+ struct mach_call_args args = {
+ .arg1 = 0,
+ .arg2 = 0,
+ .arg3 = 0,
+ .arg4 = 0,
+ .arg5 = 0,
+ .arg6 = 0,
+ .arg7 = 0,
+ .arg8 = 0,
+ .arg9 = 0
+ };
x86_saved_state64_t *regs;
struct uthread *ut = get_bsdthread_info(current_thread());
argc = mach_trap_table[call_number].mach_trap_arg_count;
if (argc) {
int args_in_regs = MIN(6, argc);
-
- memcpy(&args.arg1, ®s->rdi, args_in_regs * sizeof(syscall_arg_t));
+ __nochk_memcpy(&args.arg1, ®s->rdi, args_in_regs * sizeof(syscall_arg_t));
if (argc > 6) {
int copyin_count;
mach_kauth_cred_uthread_update();
#endif
+#if CONFIG_MACF
+ /* Check syscall filter mask, if exists. */
+ task_t task = current_task();
+ uint8_t *filter_mask = task->mach_trap_filter_mask;
+
+ if (__improbable(filter_mask != NULL &&
+ !bitstr_test(filter_mask, call_number))) {
+ /* Not in filter mask, evaluate policy. */
+ if (mac_task_mach_trap_evaluate != NULL) {
+ regs->rax = mac_task_mach_trap_evaluate(get_bsdtask_info(task),
+ call_number);
+ if (regs->rax) {
+ goto skip_machcall;
+ }
+ }
+ }
+#endif /* CONFIG_MACF */
+
regs->rax = (uint64_t)mach_call((void *)&args);
+#if CONFIG_MACF
+skip_machcall:
+#endif
+
DEBUG_KPRINT_SYSCALL_MACH( "mach_call_munger64: retval=0x%llx\n", regs->rax);
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
* Returns the adjusted user stack pointer from the machine
* dependent thread state info. Used for small (<2G) deltas.
*/
-uint64_t
+user_addr_t
thread_adjuserstack(
thread_t thread,
int adjust)