X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/4bd07ac2140668789aa3ee8ec4dde4a3e0a3bba5..HEAD:/osfmk/kern/sync_sema.c diff --git a/osfmk/kern/sync_sema.c b/osfmk/kern/sync_sema.c index 4304559e6..dfa8d1153 100644 --- a/osfmk/kern/sync_sema.c +++ b/osfmk/kern/sync_sema.c @@ -1,8 +1,8 @@ /* - * Copyright (c) 2000-2009 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,12 +22,12 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ - * + * */ /* * File: kern/sync_sema.c @@ -63,89 +63,74 @@ static unsigned int semaphore_event; #define SEMAPHORE_EVENT CAST_EVENT64_T(&semaphore_event) -zone_t semaphore_zone; -unsigned int semaphore_max; +ZONE_DECLARE(semaphore_zone, "semaphores", sizeof(struct semaphore), ZC_NONE); + +os_refgrp_decl(static, sema_refgrp, "semaphore", NULL); /* Forward declarations */ -kern_return_t +kern_return_t semaphore_wait_trap_internal( - mach_port_name_t name, - void (*caller_cont)(kern_return_t)); + mach_port_name_t name, + void (*caller_cont)(kern_return_t)); -kern_return_t +kern_return_t semaphore_wait_signal_trap_internal( - mach_port_name_t wait_name, - mach_port_name_t signal_name, - void (*caller_cont)(kern_return_t)); + mach_port_name_t wait_name, + mach_port_name_t signal_name, + void (*caller_cont)(kern_return_t)); -kern_return_t +kern_return_t semaphore_timedwait_trap_internal( - mach_port_name_t name, - unsigned int sec, - clock_res_t nsec, - void (*caller_cont)(kern_return_t)); + mach_port_name_t name, + unsigned int sec, + clock_res_t nsec, + void (*caller_cont)(kern_return_t)); -kern_return_t +kern_return_t semaphore_timedwait_signal_trap_internal( - mach_port_name_t wait_name, - mach_port_name_t signal_name, - unsigned int sec, - clock_res_t nsec, - void (*caller_cont)(kern_return_t)); + mach_port_name_t wait_name, + mach_port_name_t signal_name, + unsigned int sec, + clock_res_t nsec, + void (*caller_cont)(kern_return_t)); kern_return_t semaphore_signal_internal_trap(mach_port_name_t sema_name); kern_return_t semaphore_signal_internal( - semaphore_t semaphore, - thread_t thread, - int options); + semaphore_t semaphore, + thread_t thread, + int options); kern_return_t semaphore_convert_wait_result( - int wait_result); + int wait_result); void -semaphore_wait_continue(void); +semaphore_wait_continue(void *arg __unused, wait_result_t wr); static kern_return_t semaphore_wait_internal( - semaphore_t wait_semaphore, - semaphore_t signal_semaphore, - uint64_t deadline, - int option, - void (*caller_cont)(kern_return_t)); + semaphore_t wait_semaphore, + semaphore_t signal_semaphore, + uint64_t deadline, + int option, + void (*caller_cont)(kern_return_t)); static __inline__ uint64_t semaphore_deadline( - unsigned int sec, - clock_res_t nsec) + unsigned int sec, + clock_res_t nsec) { - uint64_t abstime; + uint64_t abstime; - nanoseconds_to_absolutetime((uint64_t)sec * NSEC_PER_SEC + nsec, &abstime); + nanoseconds_to_absolutetime((uint64_t)sec * NSEC_PER_SEC + nsec, &abstime); clock_absolutetime_interval_to_deadline(abstime, &abstime); - return (abstime); -} - -/* - * ROUTINE: semaphore_init [private] - * - * Initialize the semaphore mechanisms. - * Right now, we only need to initialize the semaphore zone. - */ -void -semaphore_init(void) -{ - semaphore_zone = zinit(sizeof(struct semaphore), - semaphore_max * sizeof(struct semaphore), - sizeof(struct semaphore), - "semaphores"); - zone_change(semaphore_zone, Z_NOENCRYPT, TRUE); + return abstime; } /* @@ -156,23 +141,24 @@ semaphore_init(void) */ kern_return_t semaphore_create( - task_t task, - semaphore_t *new_semaphore, - int policy, - int value) + task_t task, + semaphore_t *new_semaphore, + int policy, + int value) { - semaphore_t s = SEMAPHORE_NULL; - kern_return_t kret; - + semaphore_t s = SEMAPHORE_NULL; + kern_return_t kret; *new_semaphore = SEMAPHORE_NULL; - if (task == TASK_NULL || value < 0 || policy > SYNC_POLICY_MAX) + if (task == TASK_NULL || value < 0 || policy > SYNC_POLICY_MAX || policy < 0) { return KERN_INVALID_ARGUMENT; + } - s = (semaphore_t) zalloc (semaphore_zone); + s = (semaphore_t) zalloc(semaphore_zone); - if (s == SEMAPHORE_NULL) - return KERN_RESOURCE_SHORTAGE; + if (s == SEMAPHORE_NULL) { + return KERN_RESOURCE_SHORTAGE; + } kret = waitq_init(&s->waitq, policy | SYNC_POLICY_DISABLE_IRQ); /* also inits lock */ if (kret != KERN_SUCCESS) { @@ -183,8 +169,8 @@ semaphore_create( /* * Initialize the semaphore values. */ - s->port = IP_NULL; - s->ref_count = 1; + s->port = IP_NULL; + os_ref_init(&s->ref_count, &sema_refgrp); s->count = value; s->active = TRUE; s->owner = task; @@ -194,6 +180,12 @@ semaphore_create( * the new semaphore to the task's semaphore list. */ task_lock(task); + /* Check for race with task_terminate */ + if (!task->active) { + task_unlock(task); + zfree(semaphore_zone, s); + return KERN_INVALID_TASK; + } enqueue_head(&task->semaphore_list, (queue_entry_t) s); task->semaphores_owned++; task_unlock(task); @@ -201,7 +193,7 @@ semaphore_create( *new_semaphore = s; return KERN_SUCCESS; -} +} /* * Routine: semaphore_destroy_internal @@ -218,10 +210,10 @@ semaphore_create( */ static void semaphore_destroy_internal( - task_t task, - semaphore_t semaphore) + task_t task, + semaphore_t semaphore) { - int old_count; + int old_count; /* unlink semaphore from owning task */ assert(semaphore->owner == task); @@ -236,17 +228,17 @@ semaphore_destroy_internal( semaphore->active = FALSE; /* - * Wakeup blocked threads + * Wakeup blocked threads */ old_count = semaphore->count; semaphore->count = 0; if (old_count < 0) { waitq_wakeup64_all_locked(&semaphore->waitq, - SEMAPHORE_EVENT, - THREAD_RESTART, NULL, - WAITQ_ALL_PRIORITIES, - WAITQ_UNLOCK); + SEMAPHORE_EVENT, + THREAD_RESTART, NULL, + WAITQ_ALL_PRIORITIES, + WAITQ_UNLOCK); /* waitq/semaphore is unlocked */ } else { semaphore_unlock(semaphore); @@ -261,13 +253,14 @@ semaphore_destroy_internal( */ kern_return_t semaphore_destroy( - task_t task, - semaphore_t semaphore) + task_t task, + semaphore_t semaphore) { spl_t spl_level; - if (semaphore == SEMAPHORE_NULL) + if (semaphore == SEMAPHORE_NULL) { return KERN_INVALID_ARGUMENT; + } if (task == TASK_NULL) { semaphore_dereference(semaphore); @@ -280,11 +273,12 @@ semaphore_destroy( if (semaphore->owner != task) { semaphore_unlock(semaphore); + semaphore_dereference(semaphore); splx(spl_level); task_unlock(task); return KERN_INVALID_ARGUMENT; } - + semaphore_destroy_internal(task, semaphore); /* semaphore unlocked */ @@ -304,7 +298,7 @@ semaphore_destroy( void semaphore_destroy_all( - task_t task) + task_t task) { uint32_t count; spl_t spl_level; @@ -316,8 +310,9 @@ semaphore_destroy_all( semaphore = (semaphore_t) queue_first(&task->semaphore_list); - if (count == 0) + if (count == 0) { spl_level = splsched(); + } semaphore_lock(semaphore); semaphore_destroy_internal(task, semaphore); @@ -329,8 +324,9 @@ semaphore_destroy_all( splx(spl_level); } } - if (count != 0) + if (count != 0) { splx(spl_level); + } task_unlock(task); } @@ -338,15 +334,15 @@ semaphore_destroy_all( /* * Routine: semaphore_signal_internal * - * Signals the semaphore as direct. + * Signals the semaphore as direct. * Assumptions: * Semaphore is locked. */ kern_return_t semaphore_signal_internal( - semaphore_t semaphore, - thread_t thread, - int options) + semaphore_t semaphore, + thread_t thread, + int options) { kern_return_t kr; spl_t spl_level; @@ -363,11 +359,11 @@ semaphore_signal_internal( if (thread != THREAD_NULL) { if (semaphore->count < 0) { kr = waitq_wakeup64_thread_locked( - &semaphore->waitq, - SEMAPHORE_EVENT, - thread, - THREAD_AWAKENED, - WAITQ_UNLOCK); + &semaphore->waitq, + SEMAPHORE_EVENT, + thread, + THREAD_AWAKENED, + WAITQ_UNLOCK); /* waitq/semaphore is unlocked */ } else { kr = KERN_NOT_WAITING; @@ -375,7 +371,7 @@ semaphore_signal_internal( } splx(spl_level); return kr; - } + } if (options & SEMAPHORE_SIGNAL_ALL) { int old_count = semaphore->count; @@ -384,29 +380,33 @@ semaphore_signal_internal( if (old_count < 0) { semaphore->count = 0; /* always reset */ kr = waitq_wakeup64_all_locked( - &semaphore->waitq, - SEMAPHORE_EVENT, - THREAD_AWAKENED, NULL, - WAITQ_ALL_PRIORITIES, - WAITQ_UNLOCK); + &semaphore->waitq, + SEMAPHORE_EVENT, + THREAD_AWAKENED, NULL, + WAITQ_ALL_PRIORITIES, + WAITQ_UNLOCK); /* waitq / semaphore is unlocked */ } else { - if (options & SEMAPHORE_SIGNAL_PREPOST) + if (options & SEMAPHORE_SIGNAL_PREPOST) { semaphore->count++; + } kr = KERN_SUCCESS; semaphore_unlock(semaphore); } splx(spl_level); return kr; } - + if (semaphore->count < 0) { + waitq_options_t wq_option = (options & SEMAPHORE_THREAD_HANDOFF) ? + WQ_OPTION_HANDOFF : WQ_OPTION_NONE; kr = waitq_wakeup64_one_locked( - &semaphore->waitq, - SEMAPHORE_EVENT, - THREAD_AWAKENED, NULL, - WAITQ_ALL_PRIORITIES, - WAITQ_KEEP_LOCKED); + &semaphore->waitq, + SEMAPHORE_EVENT, + THREAD_AWAKENED, NULL, + WAITQ_ALL_PRIORITIES, + WAITQ_KEEP_LOCKED, + wq_option); if (kr == KERN_SUCCESS) { semaphore_unlock(semaphore); splx(spl_level); @@ -435,19 +435,20 @@ semaphore_signal_internal( */ kern_return_t semaphore_signal_thread( - semaphore_t semaphore, - thread_t thread) + semaphore_t semaphore, + thread_t thread) { - kern_return_t ret; + kern_return_t ret; - if (semaphore == SEMAPHORE_NULL) + if (semaphore == SEMAPHORE_NULL) { return KERN_INVALID_ARGUMENT; + } ret = semaphore_signal_internal(semaphore, - thread, - SEMAPHORE_OPTION_NONE); + thread, + SEMAPHORE_OPTION_NONE); return ret; -} +} /* * Routine: semaphore_signal_thread_trap @@ -460,27 +461,29 @@ semaphore_signal_thread_trap( { mach_port_name_t sema_name = args->signal_name; mach_port_name_t thread_name = args->thread_name; - semaphore_t semaphore; - thread_t thread; - kern_return_t kr; + semaphore_t semaphore; + thread_t thread; + kern_return_t kr; - /* + /* * MACH_PORT_NULL is not an error. It means that we want to * select any one thread that is already waiting, but not to * pre-post the semaphore. */ if (thread_name != MACH_PORT_NULL) { - thread = port_name_to_thread(thread_name); - if (thread == THREAD_NULL) + thread = port_name_to_thread(thread_name, PORT_TO_THREAD_NONE); + if (thread == THREAD_NULL) { return KERN_INVALID_ARGUMENT; - } else + } + } else { thread = THREAD_NULL; + } kr = port_name_to_semaphore(sema_name, &semaphore); if (kr == KERN_SUCCESS) { kr = semaphore_signal_internal(semaphore, - thread, - SEMAPHORE_OPTION_NONE); + thread, + SEMAPHORE_OPTION_NONE); semaphore_dereference(semaphore); } if (thread != THREAD_NULL) { @@ -504,18 +507,20 @@ semaphore_signal_thread_trap( */ kern_return_t semaphore_signal( - semaphore_t semaphore) + semaphore_t semaphore) { - kern_return_t kr; + kern_return_t kr; - if (semaphore == SEMAPHORE_NULL) + if (semaphore == SEMAPHORE_NULL) { return KERN_INVALID_ARGUMENT; + } kr = semaphore_signal_internal(semaphore, - THREAD_NULL, - SEMAPHORE_SIGNAL_PREPOST); - if (kr == KERN_NOT_WAITING) + THREAD_NULL, + SEMAPHORE_SIGNAL_PREPOST); + if (kr == KERN_NOT_WAITING) { return KERN_SUCCESS; + } return kr; } @@ -530,23 +535,24 @@ semaphore_signal_trap( { mach_port_name_t sema_name = args->signal_name; - return (semaphore_signal_internal_trap(sema_name)); + return semaphore_signal_internal_trap(sema_name); } kern_return_t semaphore_signal_internal_trap(mach_port_name_t sema_name) { - semaphore_t semaphore; + semaphore_t semaphore; kern_return_t kr; kr = port_name_to_semaphore(sema_name, &semaphore); if (kr == KERN_SUCCESS) { - kr = semaphore_signal_internal(semaphore, - THREAD_NULL, - SEMAPHORE_SIGNAL_PREPOST); + kr = semaphore_signal_internal(semaphore, + THREAD_NULL, + SEMAPHORE_SIGNAL_PREPOST); semaphore_dereference(semaphore); - if (kr == KERN_NOT_WAITING) + if (kr == KERN_NOT_WAITING) { kr = KERN_SUCCESS; + } } return kr; } @@ -559,18 +565,20 @@ semaphore_signal_internal_trap(mach_port_name_t sema_name) */ kern_return_t semaphore_signal_all( - semaphore_t semaphore) + semaphore_t semaphore) { kern_return_t kr; - if (semaphore == SEMAPHORE_NULL) + if (semaphore == SEMAPHORE_NULL) { return KERN_INVALID_ARGUMENT; + } kr = semaphore_signal_internal(semaphore, - THREAD_NULL, - SEMAPHORE_SIGNAL_ALL); - if (kr == KERN_NOT_WAITING) + THREAD_NULL, + SEMAPHORE_SIGNAL_ALL); + if (kr == KERN_NOT_WAITING) { return KERN_SUCCESS; + } return kr; } @@ -584,17 +592,18 @@ semaphore_signal_all_trap( struct semaphore_signal_all_trap_args *args) { mach_port_name_t sema_name = args->signal_name; - semaphore_t semaphore; + semaphore_t semaphore; kern_return_t kr; kr = port_name_to_semaphore(sema_name, &semaphore); if (kr == KERN_SUCCESS) { kr = semaphore_signal_internal(semaphore, - THREAD_NULL, - SEMAPHORE_SIGNAL_ALL); + THREAD_NULL, + SEMAPHORE_SIGNAL_ALL); semaphore_dereference(semaphore); - if (kr == KERN_NOT_WAITING) + if (kr == KERN_NOT_WAITING) { kr = KERN_SUCCESS; + } } return kr; } @@ -615,7 +624,7 @@ semaphore_convert_wait_result(int wait_result) case THREAD_TIMED_OUT: return KERN_OPERATION_TIMED_OUT; - + case THREAD_INTERRUPTED: return KERN_ABORTED; @@ -635,19 +644,20 @@ semaphore_convert_wait_result(int wait_result) * It returns directly to user space. */ void -semaphore_wait_continue(void) +semaphore_wait_continue(void *arg __unused, wait_result_t wr) { thread_t self = current_thread(); - int wait_result = self->wait_result; void (*caller_cont)(kern_return_t) = self->sth_continuation; assert(self->sth_waitsemaphore != SEMAPHORE_NULL); semaphore_dereference(self->sth_waitsemaphore); - if (self->sth_signalsemaphore != SEMAPHORE_NULL) + if (self->sth_signalsemaphore != SEMAPHORE_NULL) { semaphore_dereference(self->sth_signalsemaphore); + } + assert(self->handoff_thread == THREAD_NULL); assert(caller_cont != (void (*)(kern_return_t))0); - (*caller_cont)(semaphore_convert_wait_result(wait_result)); + (*caller_cont)(semaphore_convert_wait_result(wr)); } /* @@ -663,18 +673,22 @@ semaphore_wait_continue(void) */ static kern_return_t semaphore_wait_internal( - semaphore_t wait_semaphore, - semaphore_t signal_semaphore, - uint64_t deadline, - int option, - void (*caller_cont)(kern_return_t)) + semaphore_t wait_semaphore, + semaphore_t signal_semaphore, + uint64_t deadline, + int option, + void (*caller_cont)(kern_return_t)) { - int wait_result; - spl_t spl_level; - kern_return_t kr = KERN_ALREADY_WAITING; + int wait_result; + spl_t spl_level; + kern_return_t kr = KERN_ALREADY_WAITING; spl_level = splsched(); semaphore_lock(wait_semaphore); + thread_t self = current_thread(); + thread_t handoff_thread = THREAD_NULL; + thread_handoff_option_t handoff_option = THREAD_HANDOFF_NONE; + int semaphore_signal_options = SEMAPHORE_SIGNAL_PREPOST; if (!wait_semaphore->active) { kr = KERN_TERMINATED; @@ -684,18 +698,18 @@ semaphore_wait_internal( } else if (option & SEMAPHORE_TIMEOUT_NOBLOCK) { kr = KERN_OPERATION_TIMED_OUT; } else { - thread_t self = current_thread(); - wait_semaphore->count = -1; /* we don't keep an actual count */ - thread_lock(self); + + thread_set_pending_block_hint(self, kThreadWaitSemaphore); (void)waitq_assert_wait64_locked( - &wait_semaphore->waitq, - SEMAPHORE_EVENT, - THREAD_ABORTSAFE, - TIMEOUT_URGENCY_USER_NORMAL, - deadline, TIMEOUT_NO_LEEWAY, - self); - thread_unlock(self); + &wait_semaphore->waitq, + SEMAPHORE_EVENT, + THREAD_ABORTSAFE, + TIMEOUT_URGENCY_USER_NORMAL, + deadline, TIMEOUT_NO_LEEWAY, + self); + + semaphore_signal_options |= SEMAPHORE_THREAD_HANDOFF; } semaphore_unlock(wait_semaphore); splx(spl_level); @@ -713,13 +727,13 @@ semaphore_wait_internal( * our intention to wait above). */ signal_kr = semaphore_signal_internal(signal_semaphore, - THREAD_NULL, - SEMAPHORE_SIGNAL_PREPOST); + THREAD_NULL, semaphore_signal_options); - if (signal_kr == KERN_NOT_WAITING) + if (signal_kr == KERN_NOT_WAITING) { + assert(self->handoff_thread == THREAD_NULL); signal_kr = KERN_SUCCESS; - else if (signal_kr == KERN_TERMINATED) { - /* + } else if (signal_kr == KERN_TERMINATED) { + /* * Uh!Oh! The semaphore we were to signal died. * We have to get ourselves out of the wait in * case we get stuck here forever (it is assumed @@ -732,42 +746,49 @@ semaphore_wait_internal( * (most important) result. Otherwise, * return the KERN_TERMINATED status. */ - thread_t self = current_thread(); - + assert(self->handoff_thread == THREAD_NULL); clear_wait(self, THREAD_INTERRUPTED); kr = semaphore_convert_wait_result(self->wait_result); - if (kr == KERN_ABORTED) + if (kr == KERN_ABORTED) { kr = KERN_TERMINATED; + } } } - + /* * If we had an error, or we didn't really need to wait we can * return now that we have signalled the signal semaphore. */ - if (kr != KERN_ALREADY_WAITING) + if (kr != KERN_ALREADY_WAITING) { + assert(self->handoff_thread == THREAD_NULL); return kr; + } + if (self->handoff_thread) { + handoff_thread = self->handoff_thread; + self->handoff_thread = THREAD_NULL; + handoff_option = THREAD_HANDOFF_SETRUN_NEEDED; + } /* * Now, we can block. If the caller supplied a continuation * pointer of his own for after the block, block with the - * appropriate semaphore continuation. Thiswill gather the + * appropriate semaphore continuation. This will gather the * semaphore results, release references on the semaphore(s), * and then call the caller's continuation. */ if (caller_cont) { - thread_t self = current_thread(); - self->sth_continuation = caller_cont; self->sth_waitsemaphore = wait_semaphore; self->sth_signalsemaphore = signal_semaphore; - wait_result = thread_block((thread_continue_t)semaphore_wait_continue); - } - else { - wait_result = thread_block(THREAD_CONTINUE_NULL); + + thread_handoff_parameter(handoff_thread, semaphore_wait_continue, + NULL, handoff_option); + } else { + wait_result = thread_handoff_deallocate(handoff_thread, handoff_option); } - return (semaphore_convert_wait_result(wait_result)); + assert(self->handoff_thread == THREAD_NULL); + return semaphore_convert_wait_result(wait_result); } @@ -775,49 +796,49 @@ semaphore_wait_internal( * Routine: semaphore_wait * * Traditional (non-continuation) interface presented to - * in-kernel clients to wait on a semaphore. + * in-kernel clients to wait on a semaphore. */ kern_return_t semaphore_wait( - semaphore_t semaphore) -{ - - if (semaphore == SEMAPHORE_NULL) + semaphore_t semaphore) +{ + if (semaphore == SEMAPHORE_NULL) { return KERN_INVALID_ARGUMENT; + } - return(semaphore_wait_internal(semaphore, - SEMAPHORE_NULL, - 0ULL, SEMAPHORE_OPTION_NONE, - (void (*)(kern_return_t))0)); + return semaphore_wait_internal(semaphore, + SEMAPHORE_NULL, + 0ULL, SEMAPHORE_OPTION_NONE, + (void (*)(kern_return_t))0); } kern_return_t semaphore_wait_noblock( - semaphore_t semaphore) -{ - - if (semaphore == SEMAPHORE_NULL) + semaphore_t semaphore) +{ + if (semaphore == SEMAPHORE_NULL) { return KERN_INVALID_ARGUMENT; + } - return(semaphore_wait_internal(semaphore, - SEMAPHORE_NULL, - 0ULL, SEMAPHORE_TIMEOUT_NOBLOCK, - (void (*)(kern_return_t))0)); + return semaphore_wait_internal(semaphore, + SEMAPHORE_NULL, + 0ULL, SEMAPHORE_TIMEOUT_NOBLOCK, + (void (*)(kern_return_t))0); } kern_return_t semaphore_wait_deadline( - semaphore_t semaphore, - uint64_t deadline) -{ - - if (semaphore == SEMAPHORE_NULL) + semaphore_t semaphore, + uint64_t deadline) +{ + if (semaphore == SEMAPHORE_NULL) { return KERN_INVALID_ARGUMENT; + } - return(semaphore_wait_internal(semaphore, - SEMAPHORE_NULL, - deadline, SEMAPHORE_OPTION_NONE, - (void (*)(kern_return_t))0)); + return semaphore_wait_internal(semaphore, + SEMAPHORE_NULL, + deadline, SEMAPHORE_OPTION_NONE, + (void (*)(kern_return_t))0); } /* @@ -831,25 +852,25 @@ kern_return_t semaphore_wait_trap( struct semaphore_wait_trap_args *args) { - return(semaphore_wait_trap_internal(args->wait_name, thread_syscall_return)); + return semaphore_wait_trap_internal(args->wait_name, thread_syscall_return); } kern_return_t semaphore_wait_trap_internal( - mach_port_name_t name, + mach_port_name_t name, void (*caller_cont)(kern_return_t)) -{ - semaphore_t semaphore; +{ + semaphore_t semaphore; kern_return_t kr; kr = port_name_to_semaphore(name, &semaphore); if (kr == KERN_SUCCESS) { kr = semaphore_wait_internal(semaphore, - SEMAPHORE_NULL, - 0ULL, SEMAPHORE_OPTION_NONE, - caller_cont); + SEMAPHORE_NULL, + 0ULL, SEMAPHORE_OPTION_NONE, + caller_cont); semaphore_dereference(semaphore); } return kr; @@ -859,34 +880,36 @@ semaphore_wait_trap_internal( * Routine: semaphore_timedwait * * Traditional (non-continuation) interface presented to - * in-kernel clients to wait on a semaphore with a timeout. + * in-kernel clients to wait on a semaphore with a timeout. * * A timeout of {0,0} is considered non-blocking. */ kern_return_t semaphore_timedwait( - semaphore_t semaphore, - mach_timespec_t wait_time) + semaphore_t semaphore, + mach_timespec_t wait_time) { - int option = SEMAPHORE_OPTION_NONE; - uint64_t deadline = 0; + int option = SEMAPHORE_OPTION_NONE; + uint64_t deadline = 0; - if (semaphore == SEMAPHORE_NULL) + if (semaphore == SEMAPHORE_NULL) { return KERN_INVALID_ARGUMENT; - - if(BAD_MACH_TIMESPEC(&wait_time)) + } + + if (BAD_MACH_TIMESPEC(&wait_time)) { return KERN_INVALID_VALUE; + } - if (wait_time.tv_sec == 0 && wait_time.tv_nsec == 0) + if (wait_time.tv_sec == 0 && wait_time.tv_nsec == 0) { option = SEMAPHORE_TIMEOUT_NOBLOCK; - else + } else { deadline = semaphore_deadline(wait_time.tv_sec, wait_time.tv_nsec); - - return (semaphore_wait_internal(semaphore, - SEMAPHORE_NULL, - deadline, option, - (void(*)(kern_return_t))0)); - + } + + return semaphore_wait_internal(semaphore, + SEMAPHORE_NULL, + deadline, option, + (void (*)(kern_return_t))0); } /* @@ -903,9 +926,8 @@ semaphore_timedwait( kern_return_t semaphore_timedwait_trap( struct semaphore_timedwait_trap_args *args) -{ - - return(semaphore_timedwait_trap_internal(args->wait_name, args->sec, args->nsec, thread_syscall_return)); +{ + return semaphore_timedwait_trap_internal(args->wait_name, args->sec, args->nsec, thread_syscall_return); } @@ -922,23 +944,25 @@ semaphore_timedwait_trap_internal( wait_time.tv_sec = sec; wait_time.tv_nsec = nsec; - if(BAD_MACH_TIMESPEC(&wait_time)) + if (BAD_MACH_TIMESPEC(&wait_time)) { return KERN_INVALID_VALUE; - + } + kr = port_name_to_semaphore(name, &semaphore); if (kr == KERN_SUCCESS) { - int option = SEMAPHORE_OPTION_NONE; - uint64_t deadline = 0; + int option = SEMAPHORE_OPTION_NONE; + uint64_t deadline = 0; - if (sec == 0 && nsec == 0) + if (sec == 0 && nsec == 0) { option = SEMAPHORE_TIMEOUT_NOBLOCK; - else + } else { deadline = semaphore_deadline(sec, nsec); + } kr = semaphore_wait_internal(semaphore, - SEMAPHORE_NULL, - deadline, option, - caller_cont); + SEMAPHORE_NULL, + deadline, option, + caller_cont); semaphore_dereference(semaphore); } return kr; @@ -954,29 +978,30 @@ semaphore_timedwait_trap_internal( */ kern_return_t semaphore_wait_signal( - semaphore_t wait_semaphore, - semaphore_t signal_semaphore) + semaphore_t wait_semaphore, + semaphore_t signal_semaphore) { - if (wait_semaphore == SEMAPHORE_NULL) + if (wait_semaphore == SEMAPHORE_NULL) { return KERN_INVALID_ARGUMENT; - - return(semaphore_wait_internal(wait_semaphore, - signal_semaphore, - 0ULL, SEMAPHORE_OPTION_NONE, - (void(*)(kern_return_t))0)); + } + + return semaphore_wait_internal(wait_semaphore, + signal_semaphore, + 0ULL, SEMAPHORE_OPTION_NONE, + (void (*)(kern_return_t))0); } /* * Trap: semaphore_wait_signal_trap * * Atomically register a wait on a semaphore and THEN signal - * another. This is the trap version from user space. + * another. This is the trap version from user space. */ kern_return_t semaphore_wait_signal_trap( struct semaphore_wait_signal_trap_args *args) { - return(semaphore_wait_signal_trap_internal(args->wait_name, args->signal_name, thread_syscall_return)); + return semaphore_wait_signal_trap_internal(args->wait_name, args->signal_name, thread_syscall_return); } kern_return_t @@ -994,9 +1019,9 @@ semaphore_wait_signal_trap_internal( kr = port_name_to_semaphore(wait_name, &wait_semaphore); if (kr == KERN_SUCCESS) { kr = semaphore_wait_internal(wait_semaphore, - signal_semaphore, - 0ULL, SEMAPHORE_OPTION_NONE, - caller_cont); + signal_semaphore, + 0ULL, SEMAPHORE_OPTION_NONE, + caller_cont); semaphore_dereference(wait_semaphore); } semaphore_dereference(signal_semaphore); @@ -1016,41 +1041,44 @@ semaphore_wait_signal_trap_internal( */ kern_return_t semaphore_timedwait_signal( - semaphore_t wait_semaphore, - semaphore_t signal_semaphore, - mach_timespec_t wait_time) + semaphore_t wait_semaphore, + semaphore_t signal_semaphore, + mach_timespec_t wait_time) { - int option = SEMAPHORE_OPTION_NONE; - uint64_t deadline = 0; + int option = SEMAPHORE_OPTION_NONE; + uint64_t deadline = 0; - if (wait_semaphore == SEMAPHORE_NULL) + if (wait_semaphore == SEMAPHORE_NULL) { return KERN_INVALID_ARGUMENT; - - if(BAD_MACH_TIMESPEC(&wait_time)) + } + + if (BAD_MACH_TIMESPEC(&wait_time)) { return KERN_INVALID_VALUE; + } - if (wait_time.tv_sec == 0 && wait_time.tv_nsec == 0) + if (wait_time.tv_sec == 0 && wait_time.tv_nsec == 0) { option = SEMAPHORE_TIMEOUT_NOBLOCK; - else + } else { deadline = semaphore_deadline(wait_time.tv_sec, wait_time.tv_nsec); - - return(semaphore_wait_internal(wait_semaphore, - signal_semaphore, - deadline, option, - (void(*)(kern_return_t))0)); + } + + return semaphore_wait_internal(wait_semaphore, + signal_semaphore, + deadline, option, + (void (*)(kern_return_t))0); } /* * Trap: semaphore_timedwait_signal_trap * * Atomically register a timed wait on a semaphore and THEN signal - * another. This is the trap version from user space. + * another. This is the trap version from user space. */ kern_return_t semaphore_timedwait_signal_trap( struct semaphore_timedwait_signal_trap_args *args) { - return(semaphore_timedwait_signal_trap_internal(args->wait_name, args->signal_name, args->sec, args->nsec, thread_syscall_return)); + return semaphore_timedwait_signal_trap_internal(args->wait_name, args->signal_name, args->sec, args->nsec, thread_syscall_return); } kern_return_t @@ -1068,25 +1096,27 @@ semaphore_timedwait_signal_trap_internal( wait_time.tv_sec = sec; wait_time.tv_nsec = nsec; - if(BAD_MACH_TIMESPEC(&wait_time)) + if (BAD_MACH_TIMESPEC(&wait_time)) { return KERN_INVALID_VALUE; - + } + kr = port_name_to_semaphore(signal_name, &signal_semaphore); if (kr == KERN_SUCCESS) { kr = port_name_to_semaphore(wait_name, &wait_semaphore); if (kr == KERN_SUCCESS) { - int option = SEMAPHORE_OPTION_NONE; - uint64_t deadline = 0; + int option = SEMAPHORE_OPTION_NONE; + uint64_t deadline = 0; - if (sec == 0 && nsec == 0) + if (sec == 0 && nsec == 0) { option = SEMAPHORE_TIMEOUT_NOBLOCK; - else + } else { deadline = semaphore_deadline(sec, nsec); + } kr = semaphore_wait_internal(wait_semaphore, - signal_semaphore, - deadline, option, - caller_cont); + signal_semaphore, + deadline, option, + caller_cont); semaphore_dereference(wait_semaphore); } semaphore_dereference(signal_semaphore); @@ -1103,9 +1133,9 @@ semaphore_timedwait_signal_trap_internal( */ void semaphore_reference( - semaphore_t semaphore) + semaphore_t semaphore) { - (void)hw_atomic_add(&semaphore->ref_count, 1); + os_ref_retain(&semaphore->ref_count); } /* @@ -1116,16 +1146,18 @@ semaphore_reference( */ void semaphore_dereference( - semaphore_t semaphore) + semaphore_t semaphore) { uint32_t collisions; spl_t spl_level; - if (semaphore == NULL) + if (semaphore == NULL) { return; + } - if (hw_atomic_sub(&semaphore->ref_count, 1) != 0) + if (os_ref_release(&semaphore->ref_count) > 0) { return; + } /* * Last ref, clean up the port [if any] @@ -1150,7 +1182,7 @@ semaphore_dereference( task_t task = semaphore->owner; assert(task != TASK_NULL); - + if (task_lock_try(task)) { semaphore_destroy_internal(task, semaphore); /* semaphore unlocked */ @@ -1158,7 +1190,7 @@ semaphore_dereference( task_unlock(task); goto out; } - + /* failed to get out-of-order locks */ semaphore_unlock(semaphore); splx(spl_level); @@ -1169,8 +1201,21 @@ semaphore_dereference( semaphore_unlock(semaphore); splx(spl_level); - out: +out: zfree(semaphore_zone, semaphore); } +#define WAITQ_TO_SEMA(wq) ((semaphore_t) ((uintptr_t)(wq) - offsetof(struct semaphore, waitq))) +void +kdp_sema_find_owner(struct waitq * waitq, __assert_only event64_t event, thread_waitinfo_t * waitinfo) +{ + semaphore_t sem = WAITQ_TO_SEMA(waitq); + assert(event == SEMAPHORE_EVENT); + + zone_require(semaphore_zone, sem); + waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(sem->port); + if (sem->owner) { + waitinfo->owner = pid_from_task(sem->owner); + } +}