]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_synch.c
156d25a1e643aa22a3637822d335f0a1172eff06
[apple/xnu.git] / bsd / kern / kern_synch.c
1 /*
2 * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * Mach Operating System
32 * Copyright (c) 1987 Carnegie-Mellon University
33 * All rights reserved. The CMU software License Agreement specifies
34 * the terms and conditions for use and redistribution.
35 */
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/proc_internal.h>
40 #include <sys/user.h>
41 #include <sys/file_internal.h>
42 #include <sys/vnode.h>
43 #include <sys/kernel.h>
44
45 #include <machine/spl.h>
46
47 #include <kern/queue.h>
48 #include <sys/lock.h>
49 #include <kern/thread.h>
50 #include <kern/sched_prim.h>
51 #include <kern/ast.h>
52
53 #include <kern/cpu_number.h>
54 #include <vm/vm_kern.h>
55
56 #include <kern/task.h>
57 #include <mach/time_value.h>
58 #include <kern/lock.h>
59
60
61 #if KTRACE
62 #include <sys/uio.h>
63 #include <sys/ktrace.h>
64 #endif
65
66 static void
67 _sleep_continue(
68 void *parameter,
69 wait_result_t wresult)
70 {
71 register struct proc *p = current_proc();
72 register thread_t self = current_thread();
73 struct uthread * ut;
74 int sig, catch;
75 int error = 0;
76 int dropmutex;
77
78 ut = get_bsdthread_info(self);
79 catch = ut->uu_pri & PCATCH;
80 dropmutex = ut->uu_pri & PDROP;
81
82 switch (wresult) {
83 case THREAD_TIMED_OUT:
84 error = EWOULDBLOCK;
85 break;
86 case THREAD_AWAKENED:
87 /*
88 * Posix implies any signal should be delivered
89 * first, regardless of whether awakened due
90 * to receiving event.
91 */
92 if (!catch)
93 break;
94 /* else fall through */
95 case THREAD_INTERRUPTED:
96 if (catch) {
97 if (thread_should_abort(self)) {
98 error = EINTR;
99 } else if (SHOULDissignal(p,ut)) {
100 if (sig = CURSIG(p)) {
101 if (p->p_sigacts->ps_sigintr & sigmask(sig))
102 error = EINTR;
103 else
104 error = ERESTART;
105 }
106 if (thread_should_abort(self)) {
107 error = EINTR;
108 }
109 } else if( (ut->uu_flag & ( UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
110 /* due to thread cancel */
111 error = EINTR;
112 }
113 } else
114 error = EINTR;
115 break;
116 }
117
118 if (error == EINTR || error == ERESTART)
119 act_set_astbsd(self);
120
121 #if KTRACE
122 if (KTRPOINT(p, KTR_CSW))
123 ktrcsw(p->p_tracep, 0, 0);
124 #endif
125 if (ut->uu_mtx && !dropmutex)
126 lck_mtx_lock(ut->uu_mtx);
127
128 unix_syscall_return((*ut->uu_continuation)(error));
129 }
130
131 /*
132 * Give up the processor till a wakeup occurs
133 * on chan, at which time the process
134 * enters the scheduling queue at priority pri.
135 * The most important effect of pri is that when
136 * pri<=PZERO a signal cannot disturb the sleep;
137 * if pri>PZERO signals will be processed.
138 * If pri&PCATCH is set, signals will cause sleep
139 * to return 1, rather than longjmp.
140 * Callers of this routine must be prepared for
141 * premature return, and check that the reason for
142 * sleeping has gone away.
143 *
144 * if msleep was the entry point, than we have a mutex to deal with
145 *
146 * The mutex is unlocked before the caller is blocked, and
147 * relocked before msleep returns unless the priority includes the PDROP
148 * flag... if PDROP is specified, _sleep returns with the mutex unlocked
149 * regardless of whether it actually blocked or not.
150 */
151
152 static int
153 _sleep(
154 caddr_t chan,
155 int pri,
156 const char *wmsg,
157 u_int64_t abstime,
158 int (*continuation)(int),
159 lck_mtx_t *mtx)
160 {
161 register struct proc *p;
162 register thread_t self = current_thread();
163 struct uthread * ut;
164 int sig, catch = pri & PCATCH;
165 int dropmutex = pri & PDROP;
166 int wait_result;
167 int error = 0;
168
169 ut = get_bsdthread_info(self);
170
171 p = current_proc();
172 #if KTRACE
173 if (KTRPOINT(p, KTR_CSW))
174 ktrcsw(p->p_tracep, 1, 0);
175 #endif
176 p->p_priority = pri & PRIMASK;
177 p->p_stats->p_ru.ru_nvcsw++;
178
179 if (mtx != NULL && chan != NULL && (thread_continue_t)continuation == THREAD_CONTINUE_NULL) {
180
181 if (abstime)
182 wait_result = lck_mtx_sleep_deadline(mtx, (dropmutex) ? LCK_SLEEP_UNLOCK : 0,
183 chan, (catch) ? THREAD_ABORTSAFE : THREAD_UNINT, abstime);
184 else
185 wait_result = lck_mtx_sleep(mtx, (dropmutex) ? LCK_SLEEP_UNLOCK : 0,
186 chan, (catch) ? THREAD_ABORTSAFE : THREAD_UNINT);
187 }
188 else {
189 if (chan != NULL)
190 assert_wait_deadline(chan, (catch) ? THREAD_ABORTSAFE : THREAD_UNINT, abstime);
191 if (mtx)
192 lck_mtx_unlock(mtx);
193 if (catch) {
194 if (SHOULDissignal(p,ut)) {
195 if (sig = CURSIG(p)) {
196 if (clear_wait(self, THREAD_INTERRUPTED) == KERN_FAILURE)
197 goto block;
198 /* if SIGTTOU or SIGTTIN then block till SIGCONT */
199 if ((pri & PTTYBLOCK) && ((sig == SIGTTOU) || (sig == SIGTTIN))) {
200 p->p_flag |= P_TTYSLEEP;
201 /* reset signal bits */
202 clear_procsiglist(p, sig);
203 assert_wait(&p->p_siglist, THREAD_ABORTSAFE);
204 /* assert wait can block and SIGCONT should be checked */
205 if (p->p_flag & P_TTYSLEEP) {
206 thread_block(THREAD_CONTINUE_NULL);
207
208 if (mtx && !dropmutex)
209 lck_mtx_lock(mtx);
210 }
211
212 /* return with success */
213 error = 0;
214 goto out;
215 }
216 if (p->p_sigacts->ps_sigintr & sigmask(sig))
217 error = EINTR;
218 else
219 error = ERESTART;
220 if (mtx && !dropmutex)
221 lck_mtx_lock(mtx);
222 goto out;
223 }
224 }
225 if (thread_should_abort(self)) {
226 if (clear_wait(self, THREAD_INTERRUPTED) == KERN_FAILURE)
227 goto block;
228 error = EINTR;
229
230 if (mtx && !dropmutex)
231 lck_mtx_lock(mtx);
232 goto out;
233 }
234 }
235
236
237 block:
238 if ((thread_continue_t)continuation != THREAD_CONTINUE_NULL) {
239 ut->uu_continuation = continuation;
240 ut->uu_pri = pri;
241 ut->uu_timo = abstime? 1: 0;
242 ut->uu_mtx = mtx;
243 (void) thread_block(_sleep_continue);
244 /* NOTREACHED */
245 }
246
247 wait_result = thread_block(THREAD_CONTINUE_NULL);
248
249 if (mtx && !dropmutex)
250 lck_mtx_lock(mtx);
251 }
252
253 switch (wait_result) {
254 case THREAD_TIMED_OUT:
255 error = EWOULDBLOCK;
256 break;
257 case THREAD_AWAKENED:
258 /*
259 * Posix implies any signal should be delivered
260 * first, regardless of whether awakened due
261 * to receiving event.
262 */
263 if (!catch)
264 break;
265 /* else fall through */
266 case THREAD_INTERRUPTED:
267 if (catch) {
268 if (thread_should_abort(self)) {
269 error = EINTR;
270 } else if (SHOULDissignal(p, ut)) {
271 if (sig = CURSIG(p)) {
272 if (p->p_sigacts->ps_sigintr & sigmask(sig))
273 error = EINTR;
274 else
275 error = ERESTART;
276 }
277 if (thread_should_abort(self)) {
278 error = EINTR;
279 }
280 }
281 } else
282 error = EINTR;
283 break;
284 }
285 out:
286 if (error == EINTR || error == ERESTART)
287 act_set_astbsd(self);
288
289 #if KTRACE
290 if (KTRPOINT(p, KTR_CSW))
291 ktrcsw(p->p_tracep, 0, 0);
292 #endif
293 return (error);
294 }
295
296 int
297 sleep(
298 void *chan,
299 int pri)
300 {
301 return _sleep((caddr_t)chan, pri, (char *)NULL, 0, (int (*)(int))0, (lck_mtx_t *)0);
302 }
303
304 int
305 msleep0(
306 void *chan,
307 lck_mtx_t *mtx,
308 int pri,
309 const char *wmsg,
310 int timo,
311 int (*continuation)(int))
312 {
313 u_int64_t abstime = 0;
314
315 if (timo)
316 clock_interval_to_deadline(timo, NSEC_PER_SEC / hz, &abstime);
317
318 return _sleep((caddr_t)chan, pri, wmsg, abstime, continuation, mtx);
319 }
320
321 int
322 msleep(
323 void *chan,
324 lck_mtx_t *mtx,
325 int pri,
326 const char *wmsg,
327 struct timespec *ts)
328 {
329 u_int64_t abstime = 0;
330
331 if (ts && (ts->tv_sec || ts->tv_nsec)) {
332 nanoseconds_to_absolutetime((uint64_t)ts->tv_sec * NSEC_PER_SEC + ts->tv_nsec, &abstime );
333 clock_absolutetime_interval_to_deadline( abstime, &abstime );
334 }
335
336 return _sleep((caddr_t)chan, pri, wmsg, abstime, (int (*)(int))0, mtx);
337 }
338
339 int
340 msleep1(
341 void *chan,
342 lck_mtx_t *mtx,
343 int pri,
344 const char *wmsg,
345 u_int64_t abstime)
346 {
347 return _sleep((caddr_t)chan, pri, wmsg, abstime, (int (*)(int))0, mtx);
348 }
349
350 int
351 tsleep(
352 void *chan,
353 int pri,
354 const char *wmsg,
355 int timo)
356 {
357 u_int64_t abstime = 0;
358
359 if (timo)
360 clock_interval_to_deadline(timo, NSEC_PER_SEC / hz, &abstime);
361 return _sleep((caddr_t)chan, pri, wmsg, abstime, (int (*)(int))0, (lck_mtx_t *)0);
362 }
363
364 int
365 tsleep0(
366 void *chan,
367 int pri,
368 const char *wmsg,
369 int timo,
370 int (*continuation)(int))
371 {
372 u_int64_t abstime = 0;
373
374 if (timo)
375 clock_interval_to_deadline(timo, NSEC_PER_SEC / hz, &abstime);
376 return _sleep((caddr_t)chan, pri, wmsg, abstime, continuation, (lck_mtx_t *)0);
377 }
378
379 int
380 tsleep1(
381 void *chan,
382 int pri,
383 const char *wmsg,
384 u_int64_t abstime,
385 int (*continuation)(int))
386 {
387 return _sleep((caddr_t)chan, pri, wmsg, abstime, continuation, (lck_mtx_t *)0);
388 }
389
390 /*
391 * Wake up all processes sleeping on chan.
392 */
393 void
394 wakeup(chan)
395 register void *chan;
396 {
397 thread_wakeup_prim((caddr_t)chan, FALSE, THREAD_AWAKENED);
398 }
399
400 /*
401 * Wake up the first process sleeping on chan.
402 *
403 * Be very sure that the first process is really
404 * the right one to wakeup.
405 */
406 void
407 wakeup_one(chan)
408 register caddr_t chan;
409 {
410 thread_wakeup_prim((caddr_t)chan, TRUE, THREAD_AWAKENED);
411 }
412
413 /*
414 * Compute the priority of a process when running in user mode.
415 * Arrange to reschedule if the resulting priority is better
416 * than that of the current process.
417 */
418 void
419 resetpriority(p)
420 register struct proc *p;
421 {
422 (void)task_importance(p->task, -p->p_nice);
423 }
424
425 struct loadavg averunnable =
426 { {0, 0, 0}, FSCALE }; /* load average, of runnable procs */
427 /*
428 * Constants for averages over 1, 5, and 15 minutes
429 * when sampling at 5 second intervals.
430 */
431 static fixpt_t cexp[3] = {
432 (fixpt_t)(0.9200444146293232 * FSCALE), /* exp(-1/12) */
433 (fixpt_t)(0.9834714538216174 * FSCALE), /* exp(-1/60) */
434 (fixpt_t)(0.9944598480048967 * FSCALE), /* exp(-1/180) */
435 };
436
437 void
438 compute_averunnable(
439 void *arg)
440 {
441 unsigned int nrun = *(unsigned int *)arg;
442 struct loadavg *avg = &averunnable;
443 register int i;
444
445 for (i = 0; i < 3; i++)
446 avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
447 nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
448 }