]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_synch.c
d8afd780c650351a6b9df994ef040d5d9e776aec
[apple/xnu.git] / bsd / kern / kern_synch.c
1 /*
2 * Copyright (c) 2000-2016 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Mach Operating System
30 * Copyright (c) 1987 Carnegie-Mellon University
31 * All rights reserved. The CMU software License Agreement specifies
32 * the terms and conditions for use and redistribution.
33 */
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/proc_internal.h>
38 #include <sys/user.h>
39 #include <sys/file_internal.h>
40 #include <sys/vnode.h>
41 #include <sys/kernel.h>
42
43 #include <kern/queue.h>
44 #include <sys/lock.h>
45 #include <kern/thread.h>
46 #include <kern/sched_prim.h>
47 #include <kern/ast.h>
48
49 #include <kern/cpu_number.h>
50 #include <vm/vm_kern.h>
51
52 #include <kern/task.h>
53 #include <mach/time_value.h>
54 #include <kern/locks.h>
55 #include <kern/policy_internal.h>
56
57 #include <sys/systm.h> /* for unix_syscall_return() */
58 #include <libkern/OSAtomic.h>
59
60 extern void compute_averunnable(void *); /* XXX */
61
62 __attribute__((noreturn))
63 static void
64 _sleep_continue( __unused void *parameter, wait_result_t wresult)
65 {
66 struct proc *p = current_proc();
67 thread_t self = current_thread();
68 struct uthread * ut;
69 int sig, catch;
70 int error = 0;
71 int dropmutex, spinmutex;
72
73 ut = get_bsdthread_info(self);
74 catch = ut->uu_pri & PCATCH;
75 dropmutex = ut->uu_pri & PDROP;
76 spinmutex = ut->uu_pri & PSPIN;
77
78 switch (wresult) {
79 case THREAD_TIMED_OUT:
80 error = EWOULDBLOCK;
81 break;
82 case THREAD_AWAKENED:
83 /*
84 * Posix implies any signal should be delivered
85 * first, regardless of whether awakened due
86 * to receiving event.
87 */
88 if (!catch) {
89 break;
90 }
91 /* else fall through */
92 case THREAD_INTERRUPTED:
93 if (catch) {
94 if (thread_should_abort(self)) {
95 error = EINTR;
96 } else if (SHOULDissignal(p, ut)) {
97 if ((sig = CURSIG(p)) != 0) {
98 if (p->p_sigacts->ps_sigintr & sigmask(sig)) {
99 error = EINTR;
100 } else {
101 error = ERESTART;
102 }
103 }
104 if (thread_should_abort(self)) {
105 error = EINTR;
106 }
107 } else if ((ut->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
108 /* due to thread cancel */
109 error = EINTR;
110 }
111 } else {
112 error = EINTR;
113 }
114 break;
115 }
116
117 if (error == EINTR || error == ERESTART) {
118 act_set_astbsd(self);
119 }
120
121 if (ut->uu_mtx && !dropmutex) {
122 if (spinmutex) {
123 lck_mtx_lock_spin(ut->uu_mtx);
124 } else {
125 lck_mtx_lock(ut->uu_mtx);
126 }
127 }
128 ut->uu_wchan = NULL;
129 ut->uu_wmesg = NULL;
130
131 unix_syscall_return((*ut->uu_continuation)(error));
132 }
133
134 /*
135 * Give up the processor till a wakeup occurs
136 * on chan, at which time the process
137 * enters the scheduling queue at priority pri.
138 * The most important effect of pri is that when
139 * pri<=PZERO a signal cannot disturb the sleep;
140 * if pri>PZERO signals will be processed.
141 * If pri&PCATCH is set, signals will cause sleep
142 * to return 1, rather than longjmp.
143 * Callers of this routine must be prepared for
144 * premature return, and check that the reason for
145 * sleeping has gone away.
146 *
147 * if msleep was the entry point, than we have a mutex to deal with
148 *
149 * The mutex is unlocked before the caller is blocked, and
150 * relocked before msleep returns unless the priority includes the PDROP
151 * flag... if PDROP is specified, _sleep returns with the mutex unlocked
152 * regardless of whether it actually blocked or not.
153 */
154
155 static int
156 _sleep(
157 caddr_t chan,
158 int pri,
159 const char *wmsg,
160 u_int64_t abstime,
161 int (*continuation)(int),
162 lck_mtx_t *mtx)
163 {
164 struct proc *p;
165 thread_t self = current_thread();
166 struct uthread * ut;
167 int sig, catch;
168 int dropmutex = pri & PDROP;
169 int spinmutex = pri & PSPIN;
170 int wait_result;
171 int error = 0;
172
173 ut = get_bsdthread_info(self);
174
175 p = current_proc();
176 p->p_priority = pri & PRIMASK;
177 /* It can still block in proc_exit() after the teardown. */
178 if (p->p_stats != NULL) {
179 OSIncrementAtomicLong(&p->p_stats->p_ru.ru_nvcsw);
180 }
181
182 if (pri & PCATCH) {
183 catch = THREAD_ABORTSAFE;
184 } else {
185 catch = THREAD_UNINT;
186 }
187
188 /* set wait message & channel */
189 ut->uu_wchan = chan;
190 ut->uu_wmesg = wmsg ? wmsg : "unknown";
191
192 if (mtx != NULL && chan != NULL && (thread_continue_t)continuation == THREAD_CONTINUE_NULL) {
193 int flags;
194
195 if (dropmutex) {
196 flags = LCK_SLEEP_UNLOCK;
197 } else {
198 flags = LCK_SLEEP_DEFAULT;
199 }
200
201 if (spinmutex) {
202 flags |= LCK_SLEEP_SPIN;
203 }
204
205 if (abstime) {
206 wait_result = lck_mtx_sleep_deadline(mtx, flags, chan, catch, abstime);
207 } else {
208 wait_result = lck_mtx_sleep(mtx, flags, chan, catch);
209 }
210 } else {
211 if (chan != NULL) {
212 assert_wait_deadline(chan, catch, abstime);
213 }
214 if (mtx) {
215 lck_mtx_unlock(mtx);
216 }
217
218 if (catch == THREAD_ABORTSAFE) {
219 if (SHOULDissignal(p, ut)) {
220 if ((sig = CURSIG(p)) != 0) {
221 if (clear_wait(self, THREAD_INTERRUPTED) == KERN_FAILURE) {
222 goto block;
223 }
224 if (p->p_sigacts->ps_sigintr & sigmask(sig)) {
225 error = EINTR;
226 } else {
227 error = ERESTART;
228 }
229 if (mtx && !dropmutex) {
230 if (spinmutex) {
231 lck_mtx_lock_spin(mtx);
232 } else {
233 lck_mtx_lock(mtx);
234 }
235 }
236 goto out;
237 }
238 }
239 if (thread_should_abort(self)) {
240 if (clear_wait(self, THREAD_INTERRUPTED) == KERN_FAILURE) {
241 goto block;
242 }
243 error = EINTR;
244
245 if (mtx && !dropmutex) {
246 if (spinmutex) {
247 lck_mtx_lock_spin(mtx);
248 } else {
249 lck_mtx_lock(mtx);
250 }
251 }
252 goto out;
253 }
254 }
255
256
257 block:
258 if ((thread_continue_t)continuation != THREAD_CONTINUE_NULL) {
259 ut->uu_continuation = continuation;
260 ut->uu_pri = pri;
261 ut->uu_timo = abstime? 1: 0;
262 ut->uu_mtx = mtx;
263 (void) thread_block(_sleep_continue);
264 /* NOTREACHED */
265 }
266
267 wait_result = thread_block(THREAD_CONTINUE_NULL);
268
269 if (mtx && !dropmutex) {
270 if (spinmutex) {
271 lck_mtx_lock_spin(mtx);
272 } else {
273 lck_mtx_lock(mtx);
274 }
275 }
276 }
277
278 switch (wait_result) {
279 case THREAD_TIMED_OUT:
280 error = EWOULDBLOCK;
281 break;
282 case THREAD_AWAKENED:
283 case THREAD_RESTART:
284 /*
285 * Posix implies any signal should be delivered
286 * first, regardless of whether awakened due
287 * to receiving event.
288 */
289 if (catch != THREAD_ABORTSAFE) {
290 break;
291 }
292 /* else fall through */
293 case THREAD_INTERRUPTED:
294 if (catch == THREAD_ABORTSAFE) {
295 if (thread_should_abort(self)) {
296 error = EINTR;
297 } else if (SHOULDissignal(p, ut)) {
298 if ((sig = CURSIG(p)) != 0) {
299 if (p->p_sigacts->ps_sigintr & sigmask(sig)) {
300 error = EINTR;
301 } else {
302 error = ERESTART;
303 }
304 }
305 if (thread_should_abort(self)) {
306 error = EINTR;
307 }
308 } else if ((ut->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
309 /* due to thread cancel */
310 error = EINTR;
311 }
312 } else {
313 error = EINTR;
314 }
315 break;
316 }
317 out:
318 if (error == EINTR || error == ERESTART) {
319 act_set_astbsd(self);
320 }
321 ut->uu_wchan = NULL;
322 ut->uu_wmesg = NULL;
323
324 return error;
325 }
326
327 int
328 sleep(
329 void *chan,
330 int pri)
331 {
332 return _sleep((caddr_t)chan, pri, (char *)NULL, 0, (int (*)(int))0, (lck_mtx_t *)0);
333 }
334
335 int
336 msleep0(
337 void *chan,
338 lck_mtx_t *mtx,
339 int pri,
340 const char *wmsg,
341 int timo,
342 int (*continuation)(int))
343 {
344 u_int64_t abstime = 0;
345
346 if (timo) {
347 clock_interval_to_deadline(timo, NSEC_PER_SEC / hz, &abstime);
348 }
349
350 return _sleep((caddr_t)chan, pri, wmsg, abstime, continuation, mtx);
351 }
352
353 int
354 msleep(
355 void *chan,
356 lck_mtx_t *mtx,
357 int pri,
358 const char *wmsg,
359 struct timespec *ts)
360 {
361 u_int64_t abstime = 0;
362
363 if (ts && (ts->tv_sec || ts->tv_nsec)) {
364 nanoseconds_to_absolutetime((uint64_t)ts->tv_sec * NSEC_PER_SEC + ts->tv_nsec, &abstime );
365 clock_absolutetime_interval_to_deadline( abstime, &abstime );
366 }
367
368 return _sleep((caddr_t)chan, pri, wmsg, abstime, (int (*)(int))0, mtx);
369 }
370
371 int
372 msleep1(
373 void *chan,
374 lck_mtx_t *mtx,
375 int pri,
376 const char *wmsg,
377 u_int64_t abstime)
378 {
379 return _sleep((caddr_t)chan, pri, wmsg, abstime, (int (*)(int))0, mtx);
380 }
381
382 int
383 tsleep(
384 void *chan,
385 int pri,
386 const char *wmsg,
387 int timo)
388 {
389 u_int64_t abstime = 0;
390
391 if (timo) {
392 clock_interval_to_deadline(timo, NSEC_PER_SEC / hz, &abstime);
393 }
394 return _sleep((caddr_t)chan, pri, wmsg, abstime, (int (*)(int))0, (lck_mtx_t *)0);
395 }
396
397 int
398 tsleep0(
399 void *chan,
400 int pri,
401 const char *wmsg,
402 int timo,
403 int (*continuation)(int))
404 {
405 u_int64_t abstime = 0;
406
407 if (timo) {
408 clock_interval_to_deadline(timo, NSEC_PER_SEC / hz, &abstime);
409 }
410 return _sleep((caddr_t)chan, pri, wmsg, abstime, continuation, (lck_mtx_t *)0);
411 }
412
413 int
414 tsleep1(
415 void *chan,
416 int pri,
417 const char *wmsg,
418 u_int64_t abstime,
419 int (*continuation)(int))
420 {
421 return _sleep((caddr_t)chan, pri, wmsg, abstime, continuation, (lck_mtx_t *)0);
422 }
423
424 /*
425 * Wake up all processes sleeping on chan.
426 */
427 void
428 wakeup(void *chan)
429 {
430 thread_wakeup((caddr_t)chan);
431 }
432
433 /*
434 * Wake up the first process sleeping on chan.
435 *
436 * Be very sure that the first process is really
437 * the right one to wakeup.
438 */
439 void
440 wakeup_one(caddr_t chan)
441 {
442 thread_wakeup_one((caddr_t)chan);
443 }
444
445 /*
446 * Compute the priority of a process when running in user mode.
447 * Arrange to reschedule if the resulting priority is better
448 * than that of the current process.
449 */
450 void
451 resetpriority(struct proc *p)
452 {
453 (void)task_importance(p->task, -p->p_nice);
454 }
455
456 struct loadavg averunnable =
457 { {0, 0, 0}, FSCALE }; /* load average, of runnable procs */
458 /*
459 * Constants for averages over 1, 5, and 15 minutes
460 * when sampling at 5 second intervals.
461 */
462 static fixpt_t cexp[3] = {
463 (fixpt_t)(0.9200444146293232 * FSCALE), /* exp(-1/12) */
464 (fixpt_t)(0.9834714538216174 * FSCALE), /* exp(-1/60) */
465 (fixpt_t)(0.9944598480048967 * FSCALE), /* exp(-1/180) */
466 };
467
468 void
469 compute_averunnable(void *arg)
470 {
471 unsigned int nrun = *(unsigned int *)arg;
472 struct loadavg *avg = &averunnable;
473 int i;
474
475 for (i = 0; i < 3; i++) {
476 avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
477 nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
478 }
479 }