]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_synch.c
c6b4888c35692b0fd16ae0cb6ebde657ea0ce1bc
[apple/xnu.git] / bsd / kern / kern_synch.c
1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Mach Operating System
30 * Copyright (c) 1987 Carnegie-Mellon University
31 * All rights reserved. The CMU software License Agreement specifies
32 * the terms and conditions for use and redistribution.
33 */
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/proc_internal.h>
38 #include <sys/user.h>
39 #include <sys/file_internal.h>
40 #include <sys/vnode.h>
41 #include <sys/kernel.h>
42
43 #include <machine/spl.h>
44
45 #include <kern/queue.h>
46 #include <sys/lock.h>
47 #include <kern/thread.h>
48 #include <kern/sched_prim.h>
49 #include <kern/ast.h>
50
51 #include <kern/cpu_number.h>
52 #include <vm/vm_kern.h>
53
54 #include <kern/task.h>
55 #include <mach/time_value.h>
56 #include <kern/lock.h>
57
58 #include <sys/systm.h> /* for unix_syscall_return() */
59 #include <libkern/OSAtomic.h>
60
61 extern boolean_t thread_should_abort(thread_t); /* XXX */
62 extern void compute_averunnable(void *); /* XXX */
63
64
65
66 static void
67 _sleep_continue( __unused void *parameter, wait_result_t wresult)
68 {
69 struct proc *p = current_proc();
70 thread_t self = current_thread();
71 struct uthread * ut;
72 int sig, catch;
73 int error = 0;
74 int dropmutex, spinmutex;
75
76 ut = get_bsdthread_info(self);
77 catch = ut->uu_pri & PCATCH;
78 dropmutex = ut->uu_pri & PDROP;
79 spinmutex = ut->uu_pri & PSPIN;
80
81 switch (wresult) {
82 case THREAD_TIMED_OUT:
83 error = EWOULDBLOCK;
84 break;
85 case THREAD_AWAKENED:
86 /*
87 * Posix implies any signal should be delivered
88 * first, regardless of whether awakened due
89 * to receiving event.
90 */
91 if (!catch)
92 break;
93 /* else fall through */
94 case THREAD_INTERRUPTED:
95 if (catch) {
96 if (thread_should_abort(self)) {
97 error = EINTR;
98 } else if (SHOULDissignal(p,ut)) {
99 if ((sig = CURSIG(p)) != 0) {
100 if (p->p_sigacts->ps_sigintr & sigmask(sig))
101 error = EINTR;
102 else
103 error = ERESTART;
104 }
105 if (thread_should_abort(self)) {
106 error = EINTR;
107 }
108 } else if( (ut->uu_flag & ( UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
109 /* due to thread cancel */
110 error = EINTR;
111 }
112 } else
113 error = EINTR;
114 break;
115 }
116
117 if (error == EINTR || error == ERESTART)
118 act_set_astbsd(self);
119
120 if (ut->uu_mtx && !dropmutex) {
121 if (spinmutex)
122 lck_mtx_lock_spin(ut->uu_mtx);
123 else
124 lck_mtx_lock(ut->uu_mtx);
125 }
126 ut->uu_wchan = NULL;
127 ut->uu_wmesg = NULL;
128
129 unix_syscall_return((*ut->uu_continuation)(error));
130 }
131
132 /*
133 * Give up the processor till a wakeup occurs
134 * on chan, at which time the process
135 * enters the scheduling queue at priority pri.
136 * The most important effect of pri is that when
137 * pri<=PZERO a signal cannot disturb the sleep;
138 * if pri>PZERO signals will be processed.
139 * If pri&PCATCH is set, signals will cause sleep
140 * to return 1, rather than longjmp.
141 * Callers of this routine must be prepared for
142 * premature return, and check that the reason for
143 * sleeping has gone away.
144 *
145 * if msleep was the entry point, than we have a mutex to deal with
146 *
147 * The mutex is unlocked before the caller is blocked, and
148 * relocked before msleep returns unless the priority includes the PDROP
149 * flag... if PDROP is specified, _sleep returns with the mutex unlocked
150 * regardless of whether it actually blocked or not.
151 */
152
153 static int
154 _sleep(
155 caddr_t chan,
156 int pri,
157 const char *wmsg,
158 u_int64_t abstime,
159 int (*continuation)(int),
160 lck_mtx_t *mtx)
161 {
162 struct proc *p;
163 thread_t self = current_thread();
164 struct uthread * ut;
165 int sig, catch;
166 int dropmutex = pri & PDROP;
167 int spinmutex = pri & PSPIN;
168 int wait_result;
169 int error = 0;
170
171 ut = get_bsdthread_info(self);
172
173 p = current_proc();
174 p->p_priority = pri & PRIMASK;
175 /* It can still block in proc_exit() after the teardown. */
176 if (p->p_stats != NULL)
177 OSIncrementAtomicLong(&p->p_stats->p_ru.ru_nvcsw);
178
179 if (pri & PCATCH)
180 catch = THREAD_ABORTSAFE;
181 else
182 catch = THREAD_UNINT;
183
184 /* set wait message & channel */
185 ut->uu_wchan = chan;
186 ut->uu_wmesg = wmsg ? wmsg : "unknown";
187
188 if (mtx != NULL && chan != NULL && (thread_continue_t)continuation == THREAD_CONTINUE_NULL) {
189 int flags;
190
191 if (dropmutex)
192 flags = LCK_SLEEP_UNLOCK;
193 else
194 flags = LCK_SLEEP_DEFAULT;
195
196 if (spinmutex)
197 flags |= LCK_SLEEP_SPIN;
198
199 if (abstime)
200 wait_result = lck_mtx_sleep_deadline(mtx, flags, chan, catch, abstime);
201 else
202 wait_result = lck_mtx_sleep(mtx, flags, chan, catch);
203 }
204 else {
205 if (chan != NULL)
206 assert_wait_deadline(chan, catch, abstime);
207 if (mtx)
208 lck_mtx_unlock(mtx);
209
210 if (catch == THREAD_ABORTSAFE) {
211 if (SHOULDissignal(p,ut)) {
212 if ((sig = CURSIG(p)) != 0) {
213 if (clear_wait(self, THREAD_INTERRUPTED) == KERN_FAILURE)
214 goto block;
215 if (p->p_sigacts->ps_sigintr & sigmask(sig))
216 error = EINTR;
217 else
218 error = ERESTART;
219 if (mtx && !dropmutex) {
220 if (spinmutex)
221 lck_mtx_lock_spin(mtx);
222 else
223 lck_mtx_lock(mtx);
224 }
225 goto out;
226 }
227 }
228 if (thread_should_abort(self)) {
229 if (clear_wait(self, THREAD_INTERRUPTED) == KERN_FAILURE)
230 goto block;
231 error = EINTR;
232
233 if (mtx && !dropmutex) {
234 if (spinmutex)
235 lck_mtx_lock_spin(mtx);
236 else
237 lck_mtx_lock(mtx);
238 }
239 goto out;
240 }
241 }
242
243
244 block:
245 if ((thread_continue_t)continuation != THREAD_CONTINUE_NULL) {
246 ut->uu_continuation = continuation;
247 ut->uu_pri = pri;
248 ut->uu_timo = abstime? 1: 0;
249 ut->uu_mtx = mtx;
250 (void) thread_block(_sleep_continue);
251 /* NOTREACHED */
252 }
253
254 wait_result = thread_block(THREAD_CONTINUE_NULL);
255
256 if (mtx && !dropmutex) {
257 if (spinmutex)
258 lck_mtx_lock_spin(mtx);
259 else
260 lck_mtx_lock(mtx);
261 }
262 }
263
264 switch (wait_result) {
265 case THREAD_TIMED_OUT:
266 error = EWOULDBLOCK;
267 break;
268 case THREAD_AWAKENED:
269 /*
270 * Posix implies any signal should be delivered
271 * first, regardless of whether awakened due
272 * to receiving event.
273 */
274 if (catch != THREAD_ABORTSAFE)
275 break;
276 /* else fall through */
277 case THREAD_INTERRUPTED:
278 if (catch == THREAD_ABORTSAFE) {
279 if (thread_should_abort(self)) {
280 error = EINTR;
281 } else if (SHOULDissignal(p, ut)) {
282 if ((sig = CURSIG(p)) != 0) {
283 if (p->p_sigacts->ps_sigintr & sigmask(sig))
284 error = EINTR;
285 else
286 error = ERESTART;
287 }
288 if (thread_should_abort(self)) {
289 error = EINTR;
290 }
291 } else if( (ut->uu_flag & ( UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
292 /* due to thread cancel */
293 error = EINTR;
294 }
295 } else
296 error = EINTR;
297 break;
298 }
299 out:
300 if (error == EINTR || error == ERESTART)
301 act_set_astbsd(self);
302 ut->uu_wchan = NULL;
303 ut->uu_wmesg = NULL;
304
305 return (error);
306 }
307
308 int
309 sleep(
310 void *chan,
311 int pri)
312 {
313 return _sleep((caddr_t)chan, pri, (char *)NULL, 0, (int (*)(int))0, (lck_mtx_t *)0);
314 }
315
316 int
317 msleep0(
318 void *chan,
319 lck_mtx_t *mtx,
320 int pri,
321 const char *wmsg,
322 int timo,
323 int (*continuation)(int))
324 {
325 u_int64_t abstime = 0;
326
327 if (timo)
328 clock_interval_to_deadline(timo, NSEC_PER_SEC / hz, &abstime);
329
330 return _sleep((caddr_t)chan, pri, wmsg, abstime, continuation, mtx);
331 }
332
333 int
334 msleep(
335 void *chan,
336 lck_mtx_t *mtx,
337 int pri,
338 const char *wmsg,
339 struct timespec *ts)
340 {
341 u_int64_t abstime = 0;
342
343 if (ts && (ts->tv_sec || ts->tv_nsec)) {
344 nanoseconds_to_absolutetime((uint64_t)ts->tv_sec * NSEC_PER_SEC + ts->tv_nsec, &abstime );
345 clock_absolutetime_interval_to_deadline( abstime, &abstime );
346 }
347
348 return _sleep((caddr_t)chan, pri, wmsg, abstime, (int (*)(int))0, mtx);
349 }
350
351 int
352 msleep1(
353 void *chan,
354 lck_mtx_t *mtx,
355 int pri,
356 const char *wmsg,
357 u_int64_t abstime)
358 {
359 return _sleep((caddr_t)chan, pri, wmsg, abstime, (int (*)(int))0, mtx);
360 }
361
362 int
363 tsleep(
364 void *chan,
365 int pri,
366 const char *wmsg,
367 int timo)
368 {
369 u_int64_t abstime = 0;
370
371 if (timo)
372 clock_interval_to_deadline(timo, NSEC_PER_SEC / hz, &abstime);
373 return _sleep((caddr_t)chan, pri, wmsg, abstime, (int (*)(int))0, (lck_mtx_t *)0);
374 }
375
376 int
377 tsleep0(
378 void *chan,
379 int pri,
380 const char *wmsg,
381 int timo,
382 int (*continuation)(int))
383 {
384 u_int64_t abstime = 0;
385
386 if (timo)
387 clock_interval_to_deadline(timo, NSEC_PER_SEC / hz, &abstime);
388 return _sleep((caddr_t)chan, pri, wmsg, abstime, continuation, (lck_mtx_t *)0);
389 }
390
391 int
392 tsleep1(
393 void *chan,
394 int pri,
395 const char *wmsg,
396 u_int64_t abstime,
397 int (*continuation)(int))
398 {
399 return _sleep((caddr_t)chan, pri, wmsg, abstime, continuation, (lck_mtx_t *)0);
400 }
401
402 /*
403 * Wake up all processes sleeping on chan.
404 */
405 void
406 wakeup(void *chan)
407 {
408 thread_wakeup((caddr_t)chan);
409 }
410
411 /*
412 * Wake up the first process sleeping on chan.
413 *
414 * Be very sure that the first process is really
415 * the right one to wakeup.
416 */
417 void
418 wakeup_one(caddr_t chan)
419 {
420 thread_wakeup_one((caddr_t)chan);
421 }
422
423 /*
424 * Compute the priority of a process when running in user mode.
425 * Arrange to reschedule if the resulting priority is better
426 * than that of the current process.
427 */
428 void
429 resetpriority(struct proc *p)
430 {
431 (void)task_importance(p->task, -p->p_nice);
432 }
433
434 struct loadavg averunnable =
435 { {0, 0, 0}, FSCALE }; /* load average, of runnable procs */
436 /*
437 * Constants for averages over 1, 5, and 15 minutes
438 * when sampling at 5 second intervals.
439 */
440 static fixpt_t cexp[3] = {
441 (fixpt_t)(0.9200444146293232 * FSCALE), /* exp(-1/12) */
442 (fixpt_t)(0.9834714538216174 * FSCALE), /* exp(-1/60) */
443 (fixpt_t)(0.9944598480048967 * FSCALE), /* exp(-1/180) */
444 };
445
446 void
447 compute_averunnable(void *arg)
448 {
449 unsigned int nrun = *(unsigned int *)arg;
450 struct loadavg *avg = &averunnable;
451 int i;
452
453 for (i = 0; i < 3; i++)
454 avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
455 nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
456 }