X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/e8c3f78193f1895ea514044358b93b1add9322f3..2a1bd2d3eef5c7a7bb14f4bb9fdbca9a96ee4752:/osfmk/kern/syscall_subr.c diff --git a/osfmk/kern/syscall_subr.c b/osfmk/kern/syscall_subr.c index 507d5ec35..4d65fe2ae 100644 --- a/osfmk/kern/syscall_subr.c +++ b/osfmk/kern/syscall_subr.c @@ -28,28 +28,28 @@ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -84,11 +84,11 @@ static void thread_depress_ms(mach_msg_timeout_t interval); */ kern_return_t pfz_exit( -__unused struct pfz_exit_args *args) + __unused struct pfz_exit_args *args) { /* For now, nothing special to do. We'll pick up the ASTs on kernel exit. */ - return (KERN_SUCCESS); + return KERN_SUCCESS; } @@ -102,8 +102,8 @@ __unused struct pfz_exit_args *args) static void swtch_continue(void) { - processor_t myprocessor; - boolean_t result; + processor_t myprocessor; + boolean_t result; disable_preemption(); myprocessor = current_processor(); @@ -120,14 +120,14 @@ boolean_t swtch( __unused struct swtch_args *args) { - processor_t myprocessor; + processor_t myprocessor; disable_preemption(); myprocessor = current_processor(); if (!SCHED(thread_should_yield)(myprocessor, current_thread())) { mp_enable_preemption(); - return (FALSE); + return FALSE; } enable_preemption(); @@ -139,8 +139,8 @@ swtch( static void swtch_pri_continue(void) { - processor_t myprocessor; - boolean_t result; + processor_t myprocessor; + boolean_t result; thread_depress_abort(current_thread()); @@ -157,16 +157,16 @@ swtch_pri_continue(void) boolean_t swtch_pri( -__unused struct swtch_pri_args *args) + __unused struct swtch_pri_args *args) { - processor_t myprocessor; + processor_t myprocessor; disable_preemption(); myprocessor = current_processor(); if (!SCHED(thread_should_yield)(myprocessor, current_thread())) { mp_enable_preemption(); - return (FALSE); + return FALSE; } enable_preemption(); @@ -183,8 +183,9 @@ thread_switch_continue(void *parameter, __unused int ret) thread_t self = current_thread(); int option = (int)(intptr_t)parameter; - if (option == SWITCH_OPTION_DEPRESS || option == SWITCH_OPTION_OSLOCK_DEPRESS) + if (option == SWITCH_OPTION_DEPRESS || option == SWITCH_OPTION_OSLOCK_DEPRESS) { thread_depress_abort(self); + } ml_delay_on_yield(); @@ -201,18 +202,23 @@ kern_return_t thread_switch( struct thread_switch_args *args) { - thread_t thread = THREAD_NULL; - thread_t self = current_thread(); - mach_port_name_t thread_name = args->thread_name; - int option = args->option; - mach_msg_timeout_t option_time = args->option_time; - uint32_t scale_factor = NSEC_PER_MSEC; - boolean_t depress_option = FALSE; - boolean_t wait_option = FALSE; - wait_interrupt_t interruptible = THREAD_ABORTSAFE; - - /* + thread_t thread = THREAD_NULL; + thread_t self = current_thread(); + mach_port_name_t thread_name = args->thread_name; + int option = args->option; + mach_msg_timeout_t option_time = args->option_time; + uint32_t scale_factor = NSEC_PER_MSEC; + boolean_t depress_option = FALSE; + boolean_t wait_option = FALSE; + wait_interrupt_t interruptible = THREAD_ABORTSAFE; + port_to_thread_options_t ptt_options = PORT_TO_THREAD_NOT_CURRENT_THREAD; + + /* * Validate and process option. + * + * OSLock boosting only applies to other threads + * in your same task (even if you have a port for + * a thread in another task) */ switch (option) { case SWITCH_OPTION_NONE: @@ -231,60 +237,36 @@ thread_switch( case SWITCH_OPTION_OSLOCK_DEPRESS: depress_option = TRUE; interruptible |= THREAD_WAIT_NOREPORT; + ptt_options |= PORT_TO_THREAD_IN_CURRENT_TASK; break; case SWITCH_OPTION_OSLOCK_WAIT: wait_option = TRUE; interruptible |= THREAD_WAIT_NOREPORT; + ptt_options |= PORT_TO_THREAD_IN_CURRENT_TASK; break; default: - return (KERN_INVALID_ARGUMENT); - } + return KERN_INVALID_ARGUMENT; + } /* * Translate the port name if supplied. */ if (thread_name != MACH_PORT_NULL) { - ipc_port_t port; - - if (ipc_port_translate_send(self->task->itk_space, - thread_name, &port) == KERN_SUCCESS) { - ip_reference(port); - ip_unlock(port); - - thread = convert_port_to_thread(port); - ip_release(port); - - if (thread == self) { - thread_deallocate(thread); - thread = THREAD_NULL; - } - } + thread = port_name_to_thread(thread_name, ptt_options); } if (option == SWITCH_OPTION_OSLOCK_DEPRESS || option == SWITCH_OPTION_OSLOCK_WAIT) { if (thread != THREAD_NULL) { - - if (thread->task != self->task) { - /* - * OSLock boosting only applies to other threads - * in your same task (even if you have a port for - * a thread in another task) - */ - - thread_deallocate(thread); - thread = THREAD_NULL; - } else { - /* - * Attempt to kick the lock owner up to our same IO throttling tier. - * If the thread is currently blocked in throttle_lowpri_io(), - * it will immediately break out. - * - * TODO: SFI break out? - */ - int new_policy = proc_get_effective_thread_policy(self, TASK_POLICY_IO); - - set_thread_iotier_override(thread, new_policy); - } + /* + * Attempt to kick the lock owner up to our same IO throttling tier. + * If the thread is currently blocked in throttle_lowpri_io(), + * it will immediately break out. + * + * TODO: SFI break out? + */ + int new_policy = proc_get_effective_thread_policy(self, TASK_POLICY_IO); + + set_thread_iotier_override(thread, new_policy); } } @@ -297,19 +279,20 @@ thread_switch( /* This may return a different thread if the target is pushing on something */ thread_t pulled_thread = thread_run_queue_remove_for_handoff(thread); - KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED_THREAD_SWITCH)|DBG_FUNC_NONE, - thread_tid(thread), thread->state, - pulled_thread ? TRUE : FALSE, 0, 0); + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_THREAD_SWITCH) | DBG_FUNC_NONE, + thread_tid(thread), thread->state, + pulled_thread ? TRUE : FALSE, 0, 0); if (pulled_thread != THREAD_NULL) { /* We can't be dropping the last ref here */ thread_deallocate_safe(thread); - if (wait_option) + if (wait_option) { assert_wait_timeout((event_t)assert_wait_timeout, interruptible, - option_time, scale_factor); - else if (depress_option) + option_time, scale_factor); + } else if (depress_option) { thread_depress_ms(option_time); + } thread_run(self, thread_switch_continue, (void *)(intptr_t)option, pulled_thread); __builtin_unreachable(); @@ -343,64 +326,33 @@ thread_switch( void thread_yield_with_continuation( - thread_continue_t continuation, - void *parameter) + thread_continue_t continuation, + void *parameter) { assert(continuation); thread_block_reason(continuation, parameter, AST_YIELD); __builtin_unreachable(); } - -/* Returns a +1 thread reference */ -thread_t -port_name_to_thread_for_ulock(mach_port_name_t thread_name) -{ - thread_t thread = THREAD_NULL; - thread_t self = current_thread(); - - /* - * Translate the port name if supplied. - */ - if (thread_name != MACH_PORT_NULL) { - ipc_port_t port; - - if (ipc_port_translate_send(self->task->itk_space, - thread_name, &port) == KERN_SUCCESS) { - ip_reference(port); - ip_unlock(port); - - thread = convert_port_to_thread(port); - ip_release(port); - - if (thread == THREAD_NULL) { - return thread; - } - - if ((thread == self) || (thread->task != self->task)) { - thread_deallocate(thread); - thread = THREAD_NULL; - } - } - } - - return thread; -} - /* This function is called after an assert_wait(), therefore it must not * cause another wait until after the thread_run() or thread_block() * + * Following are the calling convention for thread ref deallocation. + * + * 1) If no continuation is provided, then thread ref is consumed. + * (thread_handoff_deallocate convention). * - * When called with a NULL continuation, the thread ref is consumed - * (thread_handoff_deallocate calling convention) else it is up to the - * continuation to do the cleanup (thread_handoff_parameter calling convention) - * and it instead doesn't return. + * 2) If continuation is provided with option THREAD_HANDOFF_SETRUN_NEEDED + * then thread ref is always consumed. + * + * 3) If continuation is provided with option THREAD_HANDOFF_NONE then thread + * ref is not consumed and it is upto the continuation to deallocate + * the thread reference. */ static wait_result_t thread_handoff_internal(thread_t thread, thread_continue_t continuation, - void *parameter) + void *parameter, thread_handoff_option_t option) { - thread_t deallocate_thread = THREAD_NULL; thread_t self = current_thread(); /* @@ -409,18 +361,19 @@ thread_handoff_internal(thread_t thread, thread_continue_t continuation, if (thread != THREAD_NULL) { spl_t s = splsched(); - thread_t pulled_thread = thread_run_queue_remove_for_handoff(thread); + thread_t pulled_thread = thread_prepare_for_handoff(thread, option); - KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED_THREAD_SWITCH)|DBG_FUNC_NONE, - thread_tid(thread), thread->state, - pulled_thread ? TRUE : FALSE, 0, 0); + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_THREAD_SWITCH) | DBG_FUNC_NONE, + thread_tid(thread), thread->state, + pulled_thread ? TRUE : FALSE, 0, 0); - if (pulled_thread != THREAD_NULL) { - if (continuation == NULL) { - /* We can't be dropping the last ref here */ - thread_deallocate_safe(thread); - } + /* Deallocate thread ref if needed */ + if (continuation == NULL || (option & THREAD_HANDOFF_SETRUN_NEEDED)) { + /* Use the safe version of thread deallocate */ + thread_deallocate_safe(thread); + } + if (pulled_thread != THREAD_NULL) { int result = thread_run(self, continuation, parameter, pulled_thread); splx(s); @@ -428,32 +381,25 @@ thread_handoff_internal(thread_t thread, thread_continue_t continuation, } splx(s); - - deallocate_thread = thread; - thread = THREAD_NULL; } int result = thread_block_parameter(continuation, parameter); - if (deallocate_thread != THREAD_NULL) { - thread_deallocate(deallocate_thread); - } - return result; } void thread_handoff_parameter(thread_t thread, thread_continue_t continuation, - void *parameter) + void *parameter, thread_handoff_option_t option) { - thread_handoff_internal(thread, continuation, parameter); + thread_handoff_internal(thread, continuation, parameter, option); panic("NULL continuation passed to %s", __func__); __builtin_unreachable(); } wait_result_t -thread_handoff_deallocate(thread_t thread) +thread_handoff_deallocate(thread_t thread, thread_handoff_option_t option) { - return thread_handoff_internal(thread, NULL, NULL); + return thread_handoff_internal(thread, NULL, NULL, option); } /* @@ -495,8 +441,9 @@ thread_depress_abstime(uint64_t interval) uint64_t deadline; clock_absolutetime_interval_to_deadline(interval, &deadline); - if (!timer_call_enter(&self->depress_timer, deadline, TIMER_CALL_USER_CRITICAL)) + if (!timer_call_enter(&self->depress_timer, deadline, TIMER_CALL_USER_CRITICAL)) { self->depress_timer_active++; + } } } @@ -518,7 +465,7 @@ thread_depress_ms(mach_msg_timeout_t interval) */ void thread_depress_expire(void *p0, - __unused void *p1) + __unused void *p1) { thread_t thread = (thread_t)p0; @@ -529,6 +476,9 @@ thread_depress_expire(void *p0, if (--thread->depress_timer_active == 0) { thread->sched_flags &= ~TH_SFLAG_DEPRESSED_MASK; + if ((thread->state & TH_RUN) == TH_RUN) { + thread->last_basepri_change_time = mach_absolute_time(); + } thread_recompute_sched_pri(thread, SETPRI_DEFAULT); } @@ -570,17 +520,22 @@ thread_depress_abort(thread_t thread) kern_return_t thread_depress_abort_locked(thread_t thread) { - if ((thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) == 0) + if ((thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) == 0) { return KERN_NOT_DEPRESSED; + } assert((thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) != TH_SFLAG_DEPRESSED_MASK); thread->sched_flags &= ~TH_SFLAG_DEPRESSED_MASK; + if ((thread->state & TH_RUN) == TH_RUN) { + thread->last_basepri_change_time = mach_absolute_time(); + } thread_recompute_sched_pri(thread, SETPRI_LAZY); - if (timer_call_cancel(&thread->depress_timer)) + if (timer_call_cancel(&thread->depress_timer)) { thread->depress_timer_active--; + } return KERN_SUCCESS; } @@ -596,14 +551,15 @@ thread_poll_yield(thread_t self) assert(self == current_thread()); assert((self->sched_flags & TH_SFLAG_DEPRESS) == 0); - if (self->sched_mode != TH_MODE_FIXED) + if (self->sched_mode != TH_MODE_FIXED) { return; + } spl_t s = splsched(); uint64_t abstime = mach_absolute_time(); uint64_t total_computation = abstime - - self->computation_epoch + self->computation_metered; + self->computation_epoch + self->computation_metered; if (total_computation >= max_poll_computation) { thread_lock(self); @@ -612,11 +568,12 @@ thread_poll_yield(thread_t self) self->computation_metered = 0; uint64_t yield_expiration = abstime + - (total_computation >> sched_poll_yield_shift); + (total_computation >> sched_poll_yield_shift); if (!timer_call_enter(&self->depress_timer, yield_expiration, - TIMER_CALL_USER_CRITICAL)) + TIMER_CALL_USER_CRITICAL)) { self->depress_timer_active++; + } self->sched_flags |= TH_SFLAG_POLLDEPRESS; thread_recompute_sched_pri(self, SETPRI_DEFAULT); @@ -638,7 +595,7 @@ thread_yield_internal(mach_msg_timeout_t ms) assert((self->sched_flags & TH_SFLAG_DEPRESSED_MASK) != TH_SFLAG_DEPRESSED_MASK); - processor_t myprocessor; + processor_t myprocessor; disable_preemption(); myprocessor = current_processor(); @@ -666,8 +623,8 @@ thread_yield_internal(mach_msg_timeout_t ms) void thread_yield_to_preemption() { - /* - * ast_pending() should ideally be called with interrupts disabled, but + /* + * ast_pending() should ideally be called with interrupts disabled, but * the check here is fine because csw_check() will do the right thing. */ ast_t *pending_ast = ast_pending(); @@ -681,15 +638,14 @@ thread_yield_to_preemption() p = current_processor(); thread_lock(self); - ast = csw_check(p, AST_YIELD); + ast = csw_check(self, p, AST_YIELD); ast_on(ast); thread_unlock(self); if (ast != AST_NONE) { - (void)thread_block_reason(THREAD_CONTINUE_NULL, NULL, ast); + (void)thread_block_reason(THREAD_CONTINUE_NULL, NULL, ast); } splx(s); } } -