]>
git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_synch.c
2 * Copyright (c) 2000-2016 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Mach Operating System
30 * Copyright (c) 1987 Carnegie-Mellon University
31 * All rights reserved. The CMU software License Agreement specifies
32 * the terms and conditions for use and redistribution.
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/proc_internal.h>
39 #include <sys/file_internal.h>
40 #include <sys/vnode.h>
41 #include <sys/kernel.h>
43 #include <kern/queue.h>
45 #include <kern/thread.h>
46 #include <kern/sched_prim.h>
49 #include <kern/cpu_number.h>
50 #include <vm/vm_kern.h>
52 #include <kern/task.h>
53 #include <mach/time_value.h>
54 #include <kern/locks.h>
55 #include <kern/policy_internal.h>
57 #include <sys/systm.h> /* for unix_syscall_return() */
58 #include <libkern/OSAtomic.h>
60 extern void compute_averunnable(void *); /* XXX */
62 __attribute__((noreturn
))
64 _sleep_continue( __unused
void *parameter
, wait_result_t wresult
)
66 struct proc
*p
= current_proc();
67 thread_t self
= current_thread();
71 int dropmutex
, spinmutex
;
73 ut
= get_bsdthread_info(self
);
74 catch = ut
->uu_pri
& PCATCH
;
75 dropmutex
= ut
->uu_pri
& PDROP
;
76 spinmutex
= ut
->uu_pri
& PSPIN
;
79 case THREAD_TIMED_OUT
:
84 * Posix implies any signal should be delivered
85 * first, regardless of whether awakened due
90 /* else fall through */
91 case THREAD_INTERRUPTED
:
93 if (thread_should_abort(self
)) {
95 } else if (SHOULDissignal(p
,ut
)) {
96 if ((sig
= CURSIG(p
)) != 0) {
97 if (p
->p_sigacts
->ps_sigintr
& sigmask(sig
))
102 if (thread_should_abort(self
)) {
105 } else if( (ut
->uu_flag
& ( UT_CANCELDISABLE
| UT_CANCEL
| UT_CANCELED
)) == UT_CANCEL
) {
106 /* due to thread cancel */
114 if (error
== EINTR
|| error
== ERESTART
)
115 act_set_astbsd(self
);
117 if (ut
->uu_mtx
&& !dropmutex
) {
119 lck_mtx_lock_spin(ut
->uu_mtx
);
121 lck_mtx_lock(ut
->uu_mtx
);
126 unix_syscall_return((*ut
->uu_continuation
)(error
));
130 * Give up the processor till a wakeup occurs
131 * on chan, at which time the process
132 * enters the scheduling queue at priority pri.
133 * The most important effect of pri is that when
134 * pri<=PZERO a signal cannot disturb the sleep;
135 * if pri>PZERO signals will be processed.
136 * If pri&PCATCH is set, signals will cause sleep
137 * to return 1, rather than longjmp.
138 * Callers of this routine must be prepared for
139 * premature return, and check that the reason for
140 * sleeping has gone away.
142 * if msleep was the entry point, than we have a mutex to deal with
144 * The mutex is unlocked before the caller is blocked, and
145 * relocked before msleep returns unless the priority includes the PDROP
146 * flag... if PDROP is specified, _sleep returns with the mutex unlocked
147 * regardless of whether it actually blocked or not.
156 int (*continuation
)(int),
160 thread_t self
= current_thread();
163 int dropmutex
= pri
& PDROP
;
164 int spinmutex
= pri
& PSPIN
;
168 ut
= get_bsdthread_info(self
);
171 p
->p_priority
= pri
& PRIMASK
;
172 /* It can still block in proc_exit() after the teardown. */
173 if (p
->p_stats
!= NULL
)
174 OSIncrementAtomicLong(&p
->p_stats
->p_ru
.ru_nvcsw
);
177 catch = THREAD_ABORTSAFE
;
179 catch = THREAD_UNINT
;
181 /* set wait message & channel */
183 ut
->uu_wmesg
= wmsg
? wmsg
: "unknown";
185 if (mtx
!= NULL
&& chan
!= NULL
&& (thread_continue_t
)continuation
== THREAD_CONTINUE_NULL
) {
189 flags
= LCK_SLEEP_UNLOCK
;
191 flags
= LCK_SLEEP_DEFAULT
;
194 flags
|= LCK_SLEEP_SPIN
;
197 wait_result
= lck_mtx_sleep_deadline(mtx
, flags
, chan
, catch, abstime
);
199 wait_result
= lck_mtx_sleep(mtx
, flags
, chan
, catch);
203 assert_wait_deadline(chan
, catch, abstime
);
207 if (catch == THREAD_ABORTSAFE
) {
208 if (SHOULDissignal(p
,ut
)) {
209 if ((sig
= CURSIG(p
)) != 0) {
210 if (clear_wait(self
, THREAD_INTERRUPTED
) == KERN_FAILURE
)
212 if (p
->p_sigacts
->ps_sigintr
& sigmask(sig
))
216 if (mtx
&& !dropmutex
) {
218 lck_mtx_lock_spin(mtx
);
225 if (thread_should_abort(self
)) {
226 if (clear_wait(self
, THREAD_INTERRUPTED
) == KERN_FAILURE
)
230 if (mtx
&& !dropmutex
) {
232 lck_mtx_lock_spin(mtx
);
242 if ((thread_continue_t
)continuation
!= THREAD_CONTINUE_NULL
) {
243 ut
->uu_continuation
= continuation
;
245 ut
->uu_timo
= abstime
? 1: 0;
247 (void) thread_block(_sleep_continue
);
251 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
253 if (mtx
&& !dropmutex
) {
255 lck_mtx_lock_spin(mtx
);
261 switch (wait_result
) {
262 case THREAD_TIMED_OUT
:
265 case THREAD_AWAKENED
:
268 * Posix implies any signal should be delivered
269 * first, regardless of whether awakened due
270 * to receiving event.
272 if (catch != THREAD_ABORTSAFE
)
274 /* else fall through */
275 case THREAD_INTERRUPTED
:
276 if (catch == THREAD_ABORTSAFE
) {
277 if (thread_should_abort(self
)) {
279 } else if (SHOULDissignal(p
, ut
)) {
280 if ((sig
= CURSIG(p
)) != 0) {
281 if (p
->p_sigacts
->ps_sigintr
& sigmask(sig
))
286 if (thread_should_abort(self
)) {
289 } else if( (ut
->uu_flag
& ( UT_CANCELDISABLE
| UT_CANCEL
| UT_CANCELED
)) == UT_CANCEL
) {
290 /* due to thread cancel */
298 if (error
== EINTR
|| error
== ERESTART
)
299 act_set_astbsd(self
);
311 return _sleep((caddr_t
)chan
, pri
, (char *)NULL
, 0, (int (*)(int))0, (lck_mtx_t
*)0);
321 int (*continuation
)(int))
323 u_int64_t abstime
= 0;
326 clock_interval_to_deadline(timo
, NSEC_PER_SEC
/ hz
, &abstime
);
328 return _sleep((caddr_t
)chan
, pri
, wmsg
, abstime
, continuation
, mtx
);
339 u_int64_t abstime
= 0;
341 if (ts
&& (ts
->tv_sec
|| ts
->tv_nsec
)) {
342 nanoseconds_to_absolutetime((uint64_t)ts
->tv_sec
* NSEC_PER_SEC
+ ts
->tv_nsec
, &abstime
);
343 clock_absolutetime_interval_to_deadline( abstime
, &abstime
);
346 return _sleep((caddr_t
)chan
, pri
, wmsg
, abstime
, (int (*)(int))0, mtx
);
357 return _sleep((caddr_t
)chan
, pri
, wmsg
, abstime
, (int (*)(int))0, mtx
);
367 u_int64_t abstime
= 0;
370 clock_interval_to_deadline(timo
, NSEC_PER_SEC
/ hz
, &abstime
);
371 return _sleep((caddr_t
)chan
, pri
, wmsg
, abstime
, (int (*)(int))0, (lck_mtx_t
*)0);
380 int (*continuation
)(int))
382 u_int64_t abstime
= 0;
385 clock_interval_to_deadline(timo
, NSEC_PER_SEC
/ hz
, &abstime
);
386 return _sleep((caddr_t
)chan
, pri
, wmsg
, abstime
, continuation
, (lck_mtx_t
*)0);
395 int (*continuation
)(int))
397 return _sleep((caddr_t
)chan
, pri
, wmsg
, abstime
, continuation
, (lck_mtx_t
*)0);
401 * Wake up all processes sleeping on chan.
406 thread_wakeup((caddr_t
)chan
);
410 * Wake up the first process sleeping on chan.
412 * Be very sure that the first process is really
413 * the right one to wakeup.
416 wakeup_one(caddr_t chan
)
418 thread_wakeup_one((caddr_t
)chan
);
422 * Compute the priority of a process when running in user mode.
423 * Arrange to reschedule if the resulting priority is better
424 * than that of the current process.
427 resetpriority(struct proc
*p
)
429 (void)task_importance(p
->task
, -p
->p_nice
);
432 struct loadavg averunnable
=
433 { {0, 0, 0}, FSCALE
}; /* load average, of runnable procs */
435 * Constants for averages over 1, 5, and 15 minutes
436 * when sampling at 5 second intervals.
438 static fixpt_t cexp
[3] = {
439 (fixpt_t
)(0.9200444146293232 * FSCALE
), /* exp(-1/12) */
440 (fixpt_t
)(0.9834714538216174 * FSCALE
), /* exp(-1/60) */
441 (fixpt_t
)(0.9944598480048967 * FSCALE
), /* exp(-1/180) */
445 compute_averunnable(void *arg
)
447 unsigned int nrun
= *(unsigned int *)arg
;
448 struct loadavg
*avg
= &averunnable
;
451 for (i
= 0; i
< 3; i
++)
452 avg
->ldavg
[i
] = (cexp
[i
] * avg
->ldavg
[i
] +
453 nrun
* FSCALE
* (FSCALE
- cexp
[i
])) >> FSHIFT
;