X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/378393581903b274cb7a4d18e0d978071a6b592d..c6bf4f310a33a9262d455ea4d3f0630b1255e3fe:/bsd/kern/kern_synch.c?ds=sidebyside diff --git a/bsd/kern/kern_synch.c b/bsd/kern/kern_synch.c index 9f33c4ce1..019952e73 100644 --- a/bsd/kern/kern_synch.c +++ b/bsd/kern/kern_synch.c @@ -1,25 +1,31 @@ /* - * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2016 Apple Computer, Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ - * - * The contents of this file constitute Original Code as defined in and - * are subject to the Apple Public Source License Version 1.1 (the - * "License"). You may not use this file except in compliance with the - * License. Please obtain a copy of the License at - * http://www.apple.com/publicsource and read it before using this file. - * - * This Original Code and all software distributed under the License are - * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the - * License for the specific language governing rights and limitations - * under the License. - * - * @APPLE_LICENSE_HEADER_END@ + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* +/* * Mach Operating System * Copyright (c) 1987 Carnegie-Mellon University * All rights reserved. The CMU software License Agreement specifies @@ -34,8 +40,6 @@ #include #include -#include - #include #include #include @@ -47,75 +51,82 @@ #include #include -#include +#include +#include +#include /* for unix_syscall_return() */ +#include -#if KTRACE -#include -#include -#endif +extern void compute_averunnable(void *); /* XXX */ +__attribute__((noreturn)) static void -_sleep_continue( - void *parameter, - wait_result_t wresult) +_sleep_continue( __unused void *parameter, wait_result_t wresult) { - register struct proc *p = current_proc(); - register thread_t self = current_thread(); + struct proc *p = current_proc(); + thread_t self = current_thread(); struct uthread * ut; int sig, catch; int error = 0; - int dropmutex; + int dropmutex, spinmutex; ut = get_bsdthread_info(self); catch = ut->uu_pri & PCATCH; dropmutex = ut->uu_pri & PDROP; + spinmutex = ut->uu_pri & PSPIN; switch (wresult) { - case THREAD_TIMED_OUT: - error = EWOULDBLOCK; + case THREAD_TIMED_OUT: + error = EWOULDBLOCK; + break; + case THREAD_AWAKENED: + /* + * Posix implies any signal should be delivered + * first, regardless of whether awakened due + * to receiving event. + */ + if (!catch) { break; - case THREAD_AWAKENED: - /* - * Posix implies any signal should be delivered - * first, regardless of whether awakened due - * to receiving event. - */ - if (!catch) - break; - /* else fall through */ - case THREAD_INTERRUPTED: - if (catch) { - if (thread_should_abort(self)) { - error = EINTR; - } else if (SHOULDissignal(p,ut)) { - if (sig = CURSIG(p)) { - if (p->p_sigacts->ps_sigintr & sigmask(sig)) - error = EINTR; - else - error = ERESTART; - } - if (thread_should_abort(self)) { + } + /* else fall through */ + case THREAD_INTERRUPTED: + if (catch) { + if (thread_should_abort(self)) { + error = EINTR; + } else if (SHOULDissignal(p, ut)) { + if ((sig = CURSIG(p)) != 0) { + if (p->p_sigacts->ps_sigintr & sigmask(sig)) { error = EINTR; + } else { + error = ERESTART; } - } else if( (ut->uu_flag & ( UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) { - /* due to thread cancel */ - error = EINTR; - } - } else + } + if (thread_should_abort(self)) { + error = EINTR; + } + } else if ((ut->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) { + /* due to thread cancel */ error = EINTR; - break; + } + } else { + error = EINTR; + } + break; } - if (error == EINTR || error == ERESTART) + if (error == EINTR || error == ERESTART) { act_set_astbsd(self); + } -#if KTRACE - if (KTRPOINT(p, KTR_CSW)) - ktrcsw(p->p_tracep, 0, 0); -#endif - if (ut->uu_mtx && !dropmutex) - lck_mtx_lock(ut->uu_mtx); + if (ut->uu_mtx && !dropmutex) { + if (spinmutex) { + lck_mtx_lock_spin(ut->uu_mtx); + } else { + lck_mtx_lock(ut->uu_mtx); + } + } + ut->uu_wchan = NULL; + ut->uu_wmesg = NULL; unix_syscall_return((*ut->uu_continuation)(error)); } @@ -143,185 +154,213 @@ _sleep_continue( static int _sleep( - caddr_t chan, - int pri, - const char *wmsg, - u_int64_t abstime, - int (*continuation)(int), - lck_mtx_t *mtx) + caddr_t chan, + int pri, + const char *wmsg, + u_int64_t abstime, + int (*continuation)(int), + lck_mtx_t *mtx) { - register struct proc *p; - register thread_t self = current_thread(); + struct proc *p; + thread_t self = current_thread(); struct uthread * ut; - int sig, catch = pri & PCATCH; + int sig, catch; int dropmutex = pri & PDROP; + int spinmutex = pri & PSPIN; int wait_result; int error = 0; ut = get_bsdthread_info(self); p = current_proc(); -#if KTRACE - if (KTRPOINT(p, KTR_CSW)) - ktrcsw(p->p_tracep, 1, 0); -#endif p->p_priority = pri & PRIMASK; - p->p_stats->p_ru.ru_nvcsw++; + /* It can still block in proc_exit() after the teardown. */ + if (p->p_stats != NULL) { + OSIncrementAtomicLong(&p->p_stats->p_ru.ru_nvcsw); + } + + if (pri & PCATCH) { + catch = THREAD_ABORTSAFE; + } else { + catch = THREAD_UNINT; + } + + /* set wait message & channel */ + ut->uu_wchan = chan; + ut->uu_wmesg = wmsg ? wmsg : "unknown"; if (mtx != NULL && chan != NULL && (thread_continue_t)continuation == THREAD_CONTINUE_NULL) { + int flags; - if (abstime) - wait_result = lck_mtx_sleep_deadline(mtx, (dropmutex) ? LCK_SLEEP_UNLOCK : 0, - chan, (catch) ? THREAD_ABORTSAFE : THREAD_UNINT, abstime); - else - wait_result = lck_mtx_sleep(mtx, (dropmutex) ? LCK_SLEEP_UNLOCK : 0, - chan, (catch) ? THREAD_ABORTSAFE : THREAD_UNINT); - } - else { - if (chan != NULL) - assert_wait_deadline(chan, (catch) ? THREAD_ABORTSAFE : THREAD_UNINT, abstime); - if (mtx) + if (dropmutex) { + flags = LCK_SLEEP_UNLOCK; + } else { + flags = LCK_SLEEP_DEFAULT; + } + + if (spinmutex) { + flags |= LCK_SLEEP_SPIN; + } + + if (abstime) { + wait_result = lck_mtx_sleep_deadline(mtx, flags, chan, catch, abstime); + } else { + wait_result = lck_mtx_sleep(mtx, flags, chan, catch); + } + } else { + if (chan != NULL) { + assert_wait_deadline(chan, catch, abstime); + } + if (mtx) { lck_mtx_unlock(mtx); - if (catch) { - if (SHOULDissignal(p,ut)) { - if (sig = CURSIG(p)) { - if (clear_wait(self, THREAD_INTERRUPTED) == KERN_FAILURE) - goto block; - /* if SIGTTOU or SIGTTIN then block till SIGCONT */ - if ((pri & PTTYBLOCK) && ((sig == SIGTTOU) || (sig == SIGTTIN))) { - p->p_flag |= P_TTYSLEEP; - /* reset signal bits */ - clear_procsiglist(p, sig); - assert_wait(&p->p_siglist, THREAD_ABORTSAFE); - /* assert wait can block and SIGCONT should be checked */ - if (p->p_flag & P_TTYSLEEP) { - thread_block(THREAD_CONTINUE_NULL); - - if (mtx && !dropmutex) - lck_mtx_lock(mtx); - } + } - /* return with success */ - error = 0; - goto out; + if (catch == THREAD_ABORTSAFE) { + if (SHOULDissignal(p, ut)) { + if ((sig = CURSIG(p)) != 0) { + if (clear_wait(self, THREAD_INTERRUPTED) == KERN_FAILURE) { + goto block; } - if (p->p_sigacts->ps_sigintr & sigmask(sig)) + if (p->p_sigacts->ps_sigintr & sigmask(sig)) { error = EINTR; - else + } else { error = ERESTART; - if (mtx && !dropmutex) - lck_mtx_lock(mtx); + } + if (mtx && !dropmutex) { + if (spinmutex) { + lck_mtx_lock_spin(mtx); + } else { + lck_mtx_lock(mtx); + } + } goto out; } } if (thread_should_abort(self)) { - if (clear_wait(self, THREAD_INTERRUPTED) == KERN_FAILURE) + if (clear_wait(self, THREAD_INTERRUPTED) == KERN_FAILURE) { goto block; + } error = EINTR; - if (mtx && !dropmutex) - lck_mtx_lock(mtx); + if (mtx && !dropmutex) { + if (spinmutex) { + lck_mtx_lock_spin(mtx); + } else { + lck_mtx_lock(mtx); + } + } goto out; } - } + } block: if ((thread_continue_t)continuation != THREAD_CONTINUE_NULL) { - ut->uu_continuation = continuation; + ut->uu_continuation = continuation; ut->uu_pri = pri; - ut->uu_timo = abstime? 1: 0; ut->uu_mtx = mtx; (void) thread_block(_sleep_continue); /* NOTREACHED */ } - + wait_result = thread_block(THREAD_CONTINUE_NULL); - if (mtx && !dropmutex) - lck_mtx_lock(mtx); + if (mtx && !dropmutex) { + if (spinmutex) { + lck_mtx_lock_spin(mtx); + } else { + lck_mtx_lock(mtx); + } + } } switch (wait_result) { - case THREAD_TIMED_OUT: - error = EWOULDBLOCK; + case THREAD_TIMED_OUT: + error = EWOULDBLOCK; + break; + case THREAD_AWAKENED: + case THREAD_RESTART: + /* + * Posix implies any signal should be delivered + * first, regardless of whether awakened due + * to receiving event. + */ + if (catch != THREAD_ABORTSAFE) { break; - case THREAD_AWAKENED: - /* - * Posix implies any signal should be delivered - * first, regardless of whether awakened due - * to receiving event. - */ - if (!catch) - break; - /* else fall through */ - case THREAD_INTERRUPTED: - if (catch) { - if (thread_should_abort(self)) { - error = EINTR; - } else if (SHOULDissignal(p, ut)) { - if (sig = CURSIG(p)) { - if (p->p_sigacts->ps_sigintr & sigmask(sig)) - error = EINTR; - else - error = ERESTART; - } - if (thread_should_abort(self)) { + } + /* else fall through */ + case THREAD_INTERRUPTED: + if (catch == THREAD_ABORTSAFE) { + if (thread_should_abort(self)) { + error = EINTR; + } else if (SHOULDissignal(p, ut)) { + if ((sig = CURSIG(p)) != 0) { + if (p->p_sigacts->ps_sigintr & sigmask(sig)) { error = EINTR; + } else { + error = ERESTART; } } - } else + if (thread_should_abort(self)) { + error = EINTR; + } + } else if ((ut->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) { + /* due to thread cancel */ error = EINTR; - break; + } + } else { + error = EINTR; + } + break; } out: - if (error == EINTR || error == ERESTART) + if (error == EINTR || error == ERESTART) { act_set_astbsd(self); + } + ut->uu_wchan = NULL; + ut->uu_wmesg = NULL; -#if KTRACE - if (KTRPOINT(p, KTR_CSW)) - ktrcsw(p->p_tracep, 0, 0); -#endif - return (error); + return error; } int sleep( - void *chan, - int pri) + void *chan, + int pri) { return _sleep((caddr_t)chan, pri, (char *)NULL, 0, (int (*)(int))0, (lck_mtx_t *)0); } int msleep0( - void *chan, - lck_mtx_t *mtx, - int pri, - const char *wmsg, - int timo, - int (*continuation)(int)) + void *chan, + lck_mtx_t *mtx, + int pri, + const char *wmsg, + int timo, + int (*continuation)(int)) { - u_int64_t abstime = 0; + u_int64_t abstime = 0; - if (timo) + if (timo) { clock_interval_to_deadline(timo, NSEC_PER_SEC / hz, &abstime); + } return _sleep((caddr_t)chan, pri, wmsg, abstime, continuation, mtx); } int msleep( - void *chan, - lck_mtx_t *mtx, - int pri, - const char *wmsg, - struct timespec *ts) + void *chan, + lck_mtx_t *mtx, + int pri, + const char *wmsg, + struct timespec *ts) { - u_int64_t abstime = 0; + u_int64_t abstime = 0; if (ts && (ts->tv_sec || ts->tv_nsec)) { - nanoseconds_to_absolutetime((uint64_t)ts->tv_sec * NSEC_PER_SEC + ts->tv_nsec, &abstime ); + nanoseconds_to_absolutetime((uint64_t)ts->tv_sec * NSEC_PER_SEC + ts->tv_nsec, &abstime ); clock_absolutetime_interval_to_deadline( abstime, &abstime ); } @@ -330,52 +369,54 @@ msleep( int msleep1( - void *chan, - lck_mtx_t *mtx, - int pri, - const char *wmsg, - u_int64_t abstime) + void *chan, + lck_mtx_t *mtx, + int pri, + const char *wmsg, + u_int64_t abstime) { return _sleep((caddr_t)chan, pri, wmsg, abstime, (int (*)(int))0, mtx); } int tsleep( - void *chan, - int pri, - const char *wmsg, - int timo) + void *chan, + int pri, + const char *wmsg, + int timo) { - u_int64_t abstime = 0; + u_int64_t abstime = 0; - if (timo) + if (timo) { clock_interval_to_deadline(timo, NSEC_PER_SEC / hz, &abstime); + } return _sleep((caddr_t)chan, pri, wmsg, abstime, (int (*)(int))0, (lck_mtx_t *)0); } int tsleep0( - void *chan, - int pri, - const char *wmsg, - int timo, - int (*continuation)(int)) -{ - u_int64_t abstime = 0; - - if (timo) + void *chan, + int pri, + const char *wmsg, + int timo, + int (*continuation)(int)) +{ + u_int64_t abstime = 0; + + if (timo) { clock_interval_to_deadline(timo, NSEC_PER_SEC / hz, &abstime); + } return _sleep((caddr_t)chan, pri, wmsg, abstime, continuation, (lck_mtx_t *)0); } int tsleep1( - void *chan, - int pri, - const char *wmsg, - u_int64_t abstime, - int (*continuation)(int)) -{ + void *chan, + int pri, + const char *wmsg, + u_int64_t abstime, + int (*continuation)(int)) +{ return _sleep((caddr_t)chan, pri, wmsg, abstime, continuation, (lck_mtx_t *)0); } @@ -383,10 +424,9 @@ tsleep1( * Wake up all processes sleeping on chan. */ void -wakeup(chan) - register void *chan; +wakeup(void *chan) { - thread_wakeup_prim((caddr_t)chan, FALSE, THREAD_AWAKENED); + thread_wakeup((caddr_t)chan); } /* @@ -396,10 +436,9 @@ wakeup(chan) * the right one to wakeup. */ void -wakeup_one(chan) - register caddr_t chan; +wakeup_one(caddr_t chan) { - thread_wakeup_prim((caddr_t)chan, TRUE, THREAD_AWAKENED); + thread_wakeup_one((caddr_t)chan); } /* @@ -408,33 +447,32 @@ wakeup_one(chan) * than that of the current process. */ void -resetpriority(p) - register struct proc *p; +resetpriority(struct proc *p) { (void)task_importance(p->task, -p->p_nice); } struct loadavg averunnable = - { {0, 0, 0}, FSCALE }; /* load average, of runnable procs */ +{ {0, 0, 0}, FSCALE }; /* load average, of runnable procs */ /* * Constants for averages over 1, 5, and 15 minutes * when sampling at 5 second intervals. */ static fixpt_t cexp[3] = { - (fixpt_t)(0.9200444146293232 * FSCALE), /* exp(-1/12) */ - (fixpt_t)(0.9834714538216174 * FSCALE), /* exp(-1/60) */ - (fixpt_t)(0.9944598480048967 * FSCALE), /* exp(-1/180) */ + (fixpt_t)(0.9200444146293232 * FSCALE), /* exp(-1/12) */ + (fixpt_t)(0.9834714538216174 * FSCALE), /* exp(-1/60) */ + (fixpt_t)(0.9944598480048967 * FSCALE), /* exp(-1/180) */ }; void -compute_averunnable( - void *arg) +compute_averunnable(void *arg) { - unsigned int nrun = *(unsigned int *)arg; - struct loadavg *avg = &averunnable; - register int i; + unsigned int nrun = *(unsigned int *)arg; + struct loadavg *avg = &averunnable; + int i; - for (i = 0; i < 3; i++) - avg->ldavg[i] = (cexp[i] * avg->ldavg[i] + - nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT; + for (i = 0; i < 3; i++) { + avg->ldavg[i] = (cexp[i] * avg->ldavg[i] + + nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT; + } }