xnu-1504.15.3.tar.gz
[apple/xnu.git] / bsd / kern / kern_synch.c
1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Mach Operating System
30 * Copyright (c) 1987 Carnegie-Mellon University
31 * All rights reserved. The CMU software License Agreement specifies
32 * the terms and conditions for use and redistribution.
33 */
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/proc_internal.h>
38 #include <sys/user.h>
39 #include <sys/file_internal.h>
40 #include <sys/vnode.h>
41 #include <sys/kernel.h>
42
43 #include <machine/spl.h>
44
45 #include <kern/queue.h>
46 #include <sys/lock.h>
47 #include <kern/thread.h>
48 #include <kern/sched_prim.h>
49 #include <kern/ast.h>
50
51 #include <kern/cpu_number.h>
52 #include <vm/vm_kern.h>
53
54 #include <kern/task.h>
55 #include <mach/time_value.h>
56 #include <kern/lock.h>
57
58 #include <sys/systm.h> /* for unix_syscall_return() */
59 #include <libkern/OSAtomic.h>
60
61 extern boolean_t thread_should_abort(thread_t); /* XXX */
62 extern void compute_averunnable(void *); /* XXX */
63
64
65
66 static void
67 _sleep_continue( __unused void *parameter, wait_result_t wresult)
68 {
69 struct proc *p = current_proc();
70 thread_t self = current_thread();
71 struct uthread * ut;
72 int sig, catch;
73 int error = 0;
74 int dropmutex, spinmutex;
75
76 ut = get_bsdthread_info(self);
77 catch = ut->uu_pri & PCATCH;
78 dropmutex = ut->uu_pri & PDROP;
79 spinmutex = ut->uu_pri & PSPIN;
80
81 switch (wresult) {
82 case THREAD_TIMED_OUT:
83 error = EWOULDBLOCK;
84 break;
85 case THREAD_AWAKENED:
86 /*
87 * Posix implies any signal should be delivered
88 * first, regardless of whether awakened due
89 * to receiving event.
90 */
91 if (!catch)
92 break;
93 /* else fall through */
94 case THREAD_INTERRUPTED:
95 if (catch) {
96 if (thread_should_abort(self)) {
97 error = EINTR;
98 } else if (SHOULDissignal(p,ut)) {
99 if ((sig = CURSIG(p)) != 0) {
100 if (p->p_sigacts->ps_sigintr & sigmask(sig))
101 error = EINTR;
102 else
103 error = ERESTART;
104 }
105 if (thread_should_abort(self)) {
106 error = EINTR;
107 }
108 } else if( (ut->uu_flag & ( UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
109 /* due to thread cancel */
110 error = EINTR;
111 }
112 } else
113 error = EINTR;
114 break;
115 }
116
117 if (error == EINTR || error == ERESTART)
118 act_set_astbsd(self);
119
120 if (ut->uu_mtx && !dropmutex) {
121 if (spinmutex)
122 lck_mtx_lock_spin(ut->uu_mtx);
123 else
124 lck_mtx_lock(ut->uu_mtx);
125 }
126 ut->uu_wchan = NULL;
127 ut->uu_wmesg = NULL;
128
129 unix_syscall_return((*ut->uu_continuation)(error));
130 }
131
132 /*
133 * Give up the processor till a wakeup occurs
134 * on chan, at which time the process
135 * enters the scheduling queue at priority pri.
136 * The most important effect of pri is that when
137 * pri<=PZERO a signal cannot disturb the sleep;
138 * if pri>PZERO signals will be processed.
139 * If pri&PCATCH is set, signals will cause sleep
140 * to return 1, rather than longjmp.
141 * Callers of this routine must be prepared for
142 * premature return, and check that the reason for
143 * sleeping has gone away.
144 *
145 * if msleep was the entry point, than we have a mutex to deal with
146 *
147 * The mutex is unlocked before the caller is blocked, and
148 * relocked before msleep returns unless the priority includes the PDROP
149 * flag... if PDROP is specified, _sleep returns with the mutex unlocked
150 * regardless of whether it actually blocked or not.
151 */
152
153 static int
154 _sleep(
155 caddr_t chan,
156 int pri,
157 const char *wmsg,
158 u_int64_t abstime,
159 int (*continuation)(int),
160 lck_mtx_t *mtx)
161 {
162 struct proc *p;
163 thread_t self = current_thread();
164 struct uthread * ut;
165 int sig, catch = pri & PCATCH;
166 int dropmutex = pri & PDROP;
167 int spinmutex = pri & PSPIN;
168 int wait_result;
169 int error = 0;
170
171 ut = get_bsdthread_info(self);
172
173 p = current_proc();
174 p->p_priority = pri & PRIMASK;
175 /* It can still block in proc_exit() after the teardown. */
176 if (p->p_stats != NULL)
177 OSIncrementAtomicLong(&p->p_stats->p_ru.ru_nvcsw);
178
179 /* set wait message & channel */
180 ut->uu_wchan = chan;
181 ut->uu_wmesg = wmsg ? wmsg : "unknown";
182
183 if (mtx != NULL && chan != NULL && (thread_continue_t)continuation == THREAD_CONTINUE_NULL) {
184
185 if (abstime)
186 wait_result = lck_mtx_sleep_deadline(mtx, (dropmutex) ? LCK_SLEEP_UNLOCK : 0,
187 chan, (catch) ? THREAD_ABORTSAFE : THREAD_UNINT, abstime);
188 else
189 wait_result = lck_mtx_sleep(mtx, (dropmutex) ? LCK_SLEEP_UNLOCK : 0,
190 chan, (catch) ? THREAD_ABORTSAFE : THREAD_UNINT);
191 }
192 else {
193 if (chan != NULL)
194 assert_wait_deadline(chan, (catch) ? THREAD_ABORTSAFE : THREAD_UNINT, abstime);
195 if (mtx)
196 lck_mtx_unlock(mtx);
197 if (catch) {
198 if (SHOULDissignal(p,ut)) {
199 if ((sig = CURSIG(p)) != 0) {
200 if (clear_wait(self, THREAD_INTERRUPTED) == KERN_FAILURE)
201 goto block;
202 if (p->p_sigacts->ps_sigintr & sigmask(sig))
203 error = EINTR;
204 else
205 error = ERESTART;
206 if (mtx && !dropmutex) {
207 if (spinmutex)
208 lck_mtx_lock_spin(mtx);
209 else
210 lck_mtx_lock(mtx);
211 }
212 goto out;
213 }
214 }
215 if (thread_should_abort(self)) {
216 if (clear_wait(self, THREAD_INTERRUPTED) == KERN_FAILURE)
217 goto block;
218 error = EINTR;
219
220 if (mtx && !dropmutex) {
221 if (spinmutex)
222 lck_mtx_lock_spin(mtx);
223 else
224 lck_mtx_lock(mtx);
225 }
226 goto out;
227 }
228 }
229
230
231 block:
232 if ((thread_continue_t)continuation != THREAD_CONTINUE_NULL) {
233 ut->uu_continuation = continuation;
234 ut->uu_pri = pri;
235 ut->uu_timo = abstime? 1: 0;
236 ut->uu_mtx = mtx;
237 (void) thread_block(_sleep_continue);
238 /* NOTREACHED */
239 }
240
241 wait_result = thread_block(THREAD_CONTINUE_NULL);
242
243 if (mtx && !dropmutex) {
244 if (spinmutex)
245 lck_mtx_lock_spin(mtx);
246 else
247 lck_mtx_lock(mtx);
248 }
249 }
250
251 switch (wait_result) {
252 case THREAD_TIMED_OUT:
253 error = EWOULDBLOCK;
254 break;
255 case THREAD_AWAKENED:
256 /*
257 * Posix implies any signal should be delivered
258 * first, regardless of whether awakened due
259 * to receiving event.
260 */
261 if (!catch)
262 break;
263 /* else fall through */
264 case THREAD_INTERRUPTED:
265 if (catch) {
266 if (thread_should_abort(self)) {
267 error = EINTR;
268 } else if (SHOULDissignal(p, ut)) {
269 if ((sig = CURSIG(p)) != 0) {
270 if (p->p_sigacts->ps_sigintr & sigmask(sig))
271 error = EINTR;
272 else
273 error = ERESTART;
274 }
275 if (thread_should_abort(self)) {
276 error = EINTR;
277 }
278 } else if( (ut->uu_flag & ( UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
279 /* due to thread cancel */
280 error = EINTR;
281 }
282 } else
283 error = EINTR;
284 break;
285 }
286 out:
287 if (error == EINTR || error == ERESTART)
288 act_set_astbsd(self);
289 ut->uu_wchan = NULL;
290 ut->uu_wmesg = NULL;
291
292 return (error);
293 }
294
295 int
296 sleep(
297 void *chan,
298 int pri)
299 {
300 return _sleep((caddr_t)chan, pri, (char *)NULL, 0, (int (*)(int))0, (lck_mtx_t *)0);
301 }
302
303 int
304 msleep0(
305 void *chan,
306 lck_mtx_t *mtx,
307 int pri,
308 const char *wmsg,
309 int timo,
310 int (*continuation)(int))
311 {
312 u_int64_t abstime = 0;
313
314 if (timo)
315 clock_interval_to_deadline(timo, NSEC_PER_SEC / hz, &abstime);
316
317 return _sleep((caddr_t)chan, pri, wmsg, abstime, continuation, mtx);
318 }
319
320 int
321 msleep(
322 void *chan,
323 lck_mtx_t *mtx,
324 int pri,
325 const char *wmsg,
326 struct timespec *ts)
327 {
328 u_int64_t abstime = 0;
329
330 if (ts && (ts->tv_sec || ts->tv_nsec)) {
331 nanoseconds_to_absolutetime((uint64_t)ts->tv_sec * NSEC_PER_SEC + ts->tv_nsec, &abstime );
332 clock_absolutetime_interval_to_deadline( abstime, &abstime );
333 }
334
335 return _sleep((caddr_t)chan, pri, wmsg, abstime, (int (*)(int))0, mtx);
336 }
337
338 int
339 msleep1(
340 void *chan,
341 lck_mtx_t *mtx,
342 int pri,
343 const char *wmsg,
344 u_int64_t abstime)
345 {
346 return _sleep((caddr_t)chan, pri, wmsg, abstime, (int (*)(int))0, mtx);
347 }
348
349 int
350 tsleep(
351 void *chan,
352 int pri,
353 const char *wmsg,
354 int timo)
355 {
356 u_int64_t abstime = 0;
357
358 if (timo)
359 clock_interval_to_deadline(timo, NSEC_PER_SEC / hz, &abstime);
360 return _sleep((caddr_t)chan, pri, wmsg, abstime, (int (*)(int))0, (lck_mtx_t *)0);
361 }
362
363 int
364 tsleep0(
365 void *chan,
366 int pri,
367 const char *wmsg,
368 int timo,
369 int (*continuation)(int))
370 {
371 u_int64_t abstime = 0;
372
373 if (timo)
374 clock_interval_to_deadline(timo, NSEC_PER_SEC / hz, &abstime);
375 return _sleep((caddr_t)chan, pri, wmsg, abstime, continuation, (lck_mtx_t *)0);
376 }
377
378 int
379 tsleep1(
380 void *chan,
381 int pri,
382 const char *wmsg,
383 u_int64_t abstime,
384 int (*continuation)(int))
385 {
386 return _sleep((caddr_t)chan, pri, wmsg, abstime, continuation, (lck_mtx_t *)0);
387 }
388
389 /*
390 * Wake up all processes sleeping on chan.
391 */
392 void
393 wakeup(void *chan)
394 {
395 thread_wakeup_prim((caddr_t)chan, FALSE, THREAD_AWAKENED);
396 }
397
398 /*
399 * Wake up the first process sleeping on chan.
400 *
401 * Be very sure that the first process is really
402 * the right one to wakeup.
403 */
404 void
405 wakeup_one(caddr_t chan)
406 {
407 thread_wakeup_prim((caddr_t)chan, TRUE, THREAD_AWAKENED);
408 }
409
410 /*
411 * Compute the priority of a process when running in user mode.
412 * Arrange to reschedule if the resulting priority is better
413 * than that of the current process.
414 */
415 void
416 resetpriority(struct proc *p)
417 {
418 (void)task_importance(p->task, -p->p_nice);
419 }
420
421 struct loadavg averunnable =
422 { {0, 0, 0}, FSCALE }; /* load average, of runnable procs */
423 /*
424 * Constants for averages over 1, 5, and 15 minutes
425 * when sampling at 5 second intervals.
426 */
427 static fixpt_t cexp[3] = {
428 (fixpt_t)(0.9200444146293232 * FSCALE), /* exp(-1/12) */
429 (fixpt_t)(0.9834714538216174 * FSCALE), /* exp(-1/60) */
430 (fixpt_t)(0.9944598480048967 * FSCALE), /* exp(-1/180) */
431 };
432
433 void
434 compute_averunnable(void *arg)
435 {
436 unsigned int nrun = *(unsigned int *)arg;
437 struct loadavg *avg = &averunnable;
438 int i;
439
440 for (i = 0; i < 3; i++)
441 avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
442 nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
443 }