]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_synch.c
xnu-792.22.5.tar.gz
[apple/xnu.git] / bsd / kern / kern_synch.c
1 /*
2 * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Mach Operating System
30 * Copyright (c) 1987 Carnegie-Mellon University
31 * All rights reserved. The CMU software License Agreement specifies
32 * the terms and conditions for use and redistribution.
33 */
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/proc_internal.h>
38 #include <sys/user.h>
39 #include <sys/file_internal.h>
40 #include <sys/vnode.h>
41 #include <sys/kernel.h>
42
43 #include <machine/spl.h>
44
45 #include <kern/queue.h>
46 #include <sys/lock.h>
47 #include <kern/thread.h>
48 #include <kern/sched_prim.h>
49 #include <kern/ast.h>
50
51 #include <kern/cpu_number.h>
52 #include <vm/vm_kern.h>
53
54 #include <kern/task.h>
55 #include <mach/time_value.h>
56 #include <kern/lock.h>
57
58
59 #if KTRACE
60 #include <sys/uio.h>
61 #include <sys/ktrace.h>
62 #endif
63
64 static void
65 _sleep_continue(
66 void *parameter,
67 wait_result_t wresult)
68 {
69 register struct proc *p = current_proc();
70 register thread_t self = current_thread();
71 struct uthread * ut;
72 int sig, catch;
73 int error = 0;
74 int dropmutex;
75
76 ut = get_bsdthread_info(self);
77 catch = ut->uu_pri & PCATCH;
78 dropmutex = ut->uu_pri & PDROP;
79
80 switch (wresult) {
81 case THREAD_TIMED_OUT:
82 error = EWOULDBLOCK;
83 break;
84 case THREAD_AWAKENED:
85 /*
86 * Posix implies any signal should be delivered
87 * first, regardless of whether awakened due
88 * to receiving event.
89 */
90 if (!catch)
91 break;
92 /* else fall through */
93 case THREAD_INTERRUPTED:
94 if (catch) {
95 if (thread_should_abort(self)) {
96 error = EINTR;
97 } else if (SHOULDissignal(p,ut)) {
98 if (sig = CURSIG(p)) {
99 if (p->p_sigacts->ps_sigintr & sigmask(sig))
100 error = EINTR;
101 else
102 error = ERESTART;
103 }
104 if (thread_should_abort(self)) {
105 error = EINTR;
106 }
107 } else if( (ut->uu_flag & ( UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
108 /* due to thread cancel */
109 error = EINTR;
110 }
111 } else
112 error = EINTR;
113 break;
114 }
115
116 if (error == EINTR || error == ERESTART)
117 act_set_astbsd(self);
118
119 #if KTRACE
120 if (KTRPOINT(p, KTR_CSW))
121 ktrcsw(p->p_tracep, 0, 0);
122 #endif
123 if (ut->uu_mtx && !dropmutex)
124 lck_mtx_lock(ut->uu_mtx);
125
126 unix_syscall_return((*ut->uu_continuation)(error));
127 }
128
129 /*
130 * Give up the processor till a wakeup occurs
131 * on chan, at which time the process
132 * enters the scheduling queue at priority pri.
133 * The most important effect of pri is that when
134 * pri<=PZERO a signal cannot disturb the sleep;
135 * if pri>PZERO signals will be processed.
136 * If pri&PCATCH is set, signals will cause sleep
137 * to return 1, rather than longjmp.
138 * Callers of this routine must be prepared for
139 * premature return, and check that the reason for
140 * sleeping has gone away.
141 *
142 * if msleep was the entry point, than we have a mutex to deal with
143 *
144 * The mutex is unlocked before the caller is blocked, and
145 * relocked before msleep returns unless the priority includes the PDROP
146 * flag... if PDROP is specified, _sleep returns with the mutex unlocked
147 * regardless of whether it actually blocked or not.
148 */
149
150 static int
151 _sleep(
152 caddr_t chan,
153 int pri,
154 const char *wmsg,
155 u_int64_t abstime,
156 int (*continuation)(int),
157 lck_mtx_t *mtx)
158 {
159 register struct proc *p;
160 register thread_t self = current_thread();
161 struct uthread * ut;
162 int sig, catch = pri & PCATCH;
163 int dropmutex = pri & PDROP;
164 int wait_result;
165 int error = 0;
166
167 ut = get_bsdthread_info(self);
168
169 p = current_proc();
170 #if KTRACE
171 if (KTRPOINT(p, KTR_CSW))
172 ktrcsw(p->p_tracep, 1, 0);
173 #endif
174 p->p_priority = pri & PRIMASK;
175 p->p_stats->p_ru.ru_nvcsw++;
176
177 if (mtx != NULL && chan != NULL && (thread_continue_t)continuation == THREAD_CONTINUE_NULL) {
178
179 if (abstime)
180 wait_result = lck_mtx_sleep_deadline(mtx, (dropmutex) ? LCK_SLEEP_UNLOCK : 0,
181 chan, (catch) ? THREAD_ABORTSAFE : THREAD_UNINT, abstime);
182 else
183 wait_result = lck_mtx_sleep(mtx, (dropmutex) ? LCK_SLEEP_UNLOCK : 0,
184 chan, (catch) ? THREAD_ABORTSAFE : THREAD_UNINT);
185 }
186 else {
187 if (chan != NULL)
188 assert_wait_deadline(chan, (catch) ? THREAD_ABORTSAFE : THREAD_UNINT, abstime);
189 if (mtx)
190 lck_mtx_unlock(mtx);
191 if (catch) {
192 if (SHOULDissignal(p,ut)) {
193 if (sig = CURSIG(p)) {
194 if (clear_wait(self, THREAD_INTERRUPTED) == KERN_FAILURE)
195 goto block;
196 /* if SIGTTOU or SIGTTIN then block till SIGCONT */
197 if ((pri & PTTYBLOCK) && ((sig == SIGTTOU) || (sig == SIGTTIN))) {
198 p->p_flag |= P_TTYSLEEP;
199 /* reset signal bits */
200 clear_procsiglist(p, sig);
201 assert_wait(&p->p_siglist, THREAD_ABORTSAFE);
202 /* assert wait can block and SIGCONT should be checked */
203 if (p->p_flag & P_TTYSLEEP) {
204 thread_block(THREAD_CONTINUE_NULL);
205
206 if (mtx && !dropmutex)
207 lck_mtx_lock(mtx);
208 }
209
210 /* return with success */
211 error = 0;
212 goto out;
213 }
214 if (p->p_sigacts->ps_sigintr & sigmask(sig))
215 error = EINTR;
216 else
217 error = ERESTART;
218 if (mtx && !dropmutex)
219 lck_mtx_lock(mtx);
220 goto out;
221 }
222 }
223 if (thread_should_abort(self)) {
224 if (clear_wait(self, THREAD_INTERRUPTED) == KERN_FAILURE)
225 goto block;
226 error = EINTR;
227
228 if (mtx && !dropmutex)
229 lck_mtx_lock(mtx);
230 goto out;
231 }
232 }
233
234
235 block:
236 if ((thread_continue_t)continuation != THREAD_CONTINUE_NULL) {
237 ut->uu_continuation = continuation;
238 ut->uu_pri = pri;
239 ut->uu_timo = abstime? 1: 0;
240 ut->uu_mtx = mtx;
241 (void) thread_block(_sleep_continue);
242 /* NOTREACHED */
243 }
244
245 wait_result = thread_block(THREAD_CONTINUE_NULL);
246
247 if (mtx && !dropmutex)
248 lck_mtx_lock(mtx);
249 }
250
251 switch (wait_result) {
252 case THREAD_TIMED_OUT:
253 error = EWOULDBLOCK;
254 break;
255 case THREAD_AWAKENED:
256 /*
257 * Posix implies any signal should be delivered
258 * first, regardless of whether awakened due
259 * to receiving event.
260 */
261 if (!catch)
262 break;
263 /* else fall through */
264 case THREAD_INTERRUPTED:
265 if (catch) {
266 if (thread_should_abort(self)) {
267 error = EINTR;
268 } else if (SHOULDissignal(p, ut)) {
269 if (sig = CURSIG(p)) {
270 if (p->p_sigacts->ps_sigintr & sigmask(sig))
271 error = EINTR;
272 else
273 error = ERESTART;
274 }
275 if (thread_should_abort(self)) {
276 error = EINTR;
277 }
278 }
279 } else
280 error = EINTR;
281 break;
282 }
283 out:
284 if (error == EINTR || error == ERESTART)
285 act_set_astbsd(self);
286
287 #if KTRACE
288 if (KTRPOINT(p, KTR_CSW))
289 ktrcsw(p->p_tracep, 0, 0);
290 #endif
291 return (error);
292 }
293
294 int
295 sleep(
296 void *chan,
297 int pri)
298 {
299 return _sleep((caddr_t)chan, pri, (char *)NULL, 0, (int (*)(int))0, (lck_mtx_t *)0);
300 }
301
302 int
303 msleep0(
304 void *chan,
305 lck_mtx_t *mtx,
306 int pri,
307 const char *wmsg,
308 int timo,
309 int (*continuation)(int))
310 {
311 u_int64_t abstime = 0;
312
313 if (timo)
314 clock_interval_to_deadline(timo, NSEC_PER_SEC / hz, &abstime);
315
316 return _sleep((caddr_t)chan, pri, wmsg, abstime, continuation, mtx);
317 }
318
319 int
320 msleep(
321 void *chan,
322 lck_mtx_t *mtx,
323 int pri,
324 const char *wmsg,
325 struct timespec *ts)
326 {
327 u_int64_t abstime = 0;
328
329 if (ts && (ts->tv_sec || ts->tv_nsec)) {
330 nanoseconds_to_absolutetime((uint64_t)ts->tv_sec * NSEC_PER_SEC + ts->tv_nsec, &abstime );
331 clock_absolutetime_interval_to_deadline( abstime, &abstime );
332 }
333
334 return _sleep((caddr_t)chan, pri, wmsg, abstime, (int (*)(int))0, mtx);
335 }
336
337 int
338 msleep1(
339 void *chan,
340 lck_mtx_t *mtx,
341 int pri,
342 const char *wmsg,
343 u_int64_t abstime)
344 {
345 return _sleep((caddr_t)chan, pri, wmsg, abstime, (int (*)(int))0, mtx);
346 }
347
348 int
349 tsleep(
350 void *chan,
351 int pri,
352 const char *wmsg,
353 int timo)
354 {
355 u_int64_t abstime = 0;
356
357 if (timo)
358 clock_interval_to_deadline(timo, NSEC_PER_SEC / hz, &abstime);
359 return _sleep((caddr_t)chan, pri, wmsg, abstime, (int (*)(int))0, (lck_mtx_t *)0);
360 }
361
362 int
363 tsleep0(
364 void *chan,
365 int pri,
366 const char *wmsg,
367 int timo,
368 int (*continuation)(int))
369 {
370 u_int64_t abstime = 0;
371
372 if (timo)
373 clock_interval_to_deadline(timo, NSEC_PER_SEC / hz, &abstime);
374 return _sleep((caddr_t)chan, pri, wmsg, abstime, continuation, (lck_mtx_t *)0);
375 }
376
377 int
378 tsleep1(
379 void *chan,
380 int pri,
381 const char *wmsg,
382 u_int64_t abstime,
383 int (*continuation)(int))
384 {
385 return _sleep((caddr_t)chan, pri, wmsg, abstime, continuation, (lck_mtx_t *)0);
386 }
387
388 /*
389 * Wake up all processes sleeping on chan.
390 */
391 void
392 wakeup(chan)
393 register void *chan;
394 {
395 thread_wakeup_prim((caddr_t)chan, FALSE, THREAD_AWAKENED);
396 }
397
398 /*
399 * Wake up the first process sleeping on chan.
400 *
401 * Be very sure that the first process is really
402 * the right one to wakeup.
403 */
404 void
405 wakeup_one(chan)
406 register caddr_t chan;
407 {
408 thread_wakeup_prim((caddr_t)chan, TRUE, THREAD_AWAKENED);
409 }
410
411 /*
412 * Compute the priority of a process when running in user mode.
413 * Arrange to reschedule if the resulting priority is better
414 * than that of the current process.
415 */
416 void
417 resetpriority(p)
418 register struct proc *p;
419 {
420 (void)task_importance(p->task, -p->p_nice);
421 }
422
423 struct loadavg averunnable =
424 { {0, 0, 0}, FSCALE }; /* load average, of runnable procs */
425 /*
426 * Constants for averages over 1, 5, and 15 minutes
427 * when sampling at 5 second intervals.
428 */
429 static fixpt_t cexp[3] = {
430 (fixpt_t)(0.9200444146293232 * FSCALE), /* exp(-1/12) */
431 (fixpt_t)(0.9834714538216174 * FSCALE), /* exp(-1/60) */
432 (fixpt_t)(0.9944598480048967 * FSCALE), /* exp(-1/180) */
433 };
434
435 void
436 compute_averunnable(
437 void *arg)
438 {
439 unsigned int nrun = *(unsigned int *)arg;
440 struct loadavg *avg = &averunnable;
441 register int i;
442
443 for (i = 0; i < 3; i++)
444 avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
445 nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
446 }