]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_synch.c
xnu-792.2.4.tar.gz
[apple/xnu.git] / bsd / kern / kern_synch.c
1 /*
2 * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * Mach Operating System
24 * Copyright (c) 1987 Carnegie-Mellon University
25 * All rights reserved. The CMU software License Agreement specifies
26 * the terms and conditions for use and redistribution.
27 */
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/proc_internal.h>
32 #include <sys/user.h>
33 #include <sys/file_internal.h>
34 #include <sys/vnode.h>
35 #include <sys/kernel.h>
36
37 #include <machine/spl.h>
38
39 #include <kern/queue.h>
40 #include <sys/lock.h>
41 #include <kern/thread.h>
42 #include <kern/sched_prim.h>
43 #include <kern/ast.h>
44
45 #include <kern/cpu_number.h>
46 #include <vm/vm_kern.h>
47
48 #include <kern/task.h>
49 #include <mach/time_value.h>
50 #include <kern/lock.h>
51
52
53 #if KTRACE
54 #include <sys/uio.h>
55 #include <sys/ktrace.h>
56 #endif
57
58 static void
59 _sleep_continue(
60 void *parameter,
61 wait_result_t wresult)
62 {
63 register struct proc *p = current_proc();
64 register thread_t self = current_thread();
65 struct uthread * ut;
66 int sig, catch;
67 int error = 0;
68 int dropmutex;
69
70 ut = get_bsdthread_info(self);
71 catch = ut->uu_pri & PCATCH;
72 dropmutex = ut->uu_pri & PDROP;
73
74 switch (wresult) {
75 case THREAD_TIMED_OUT:
76 error = EWOULDBLOCK;
77 break;
78 case THREAD_AWAKENED:
79 /*
80 * Posix implies any signal should be delivered
81 * first, regardless of whether awakened due
82 * to receiving event.
83 */
84 if (!catch)
85 break;
86 /* else fall through */
87 case THREAD_INTERRUPTED:
88 if (catch) {
89 if (thread_should_abort(self)) {
90 error = EINTR;
91 } else if (SHOULDissignal(p,ut)) {
92 if (sig = CURSIG(p)) {
93 if (p->p_sigacts->ps_sigintr & sigmask(sig))
94 error = EINTR;
95 else
96 error = ERESTART;
97 }
98 if (thread_should_abort(self)) {
99 error = EINTR;
100 }
101 } else if( (ut->uu_flag & ( UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
102 /* due to thread cancel */
103 error = EINTR;
104 }
105 } else
106 error = EINTR;
107 break;
108 }
109
110 if (error == EINTR || error == ERESTART)
111 act_set_astbsd(self);
112
113 #if KTRACE
114 if (KTRPOINT(p, KTR_CSW))
115 ktrcsw(p->p_tracep, 0, 0);
116 #endif
117 if (ut->uu_mtx && !dropmutex)
118 lck_mtx_lock(ut->uu_mtx);
119
120 unix_syscall_return((*ut->uu_continuation)(error));
121 }
122
123 /*
124 * Give up the processor till a wakeup occurs
125 * on chan, at which time the process
126 * enters the scheduling queue at priority pri.
127 * The most important effect of pri is that when
128 * pri<=PZERO a signal cannot disturb the sleep;
129 * if pri>PZERO signals will be processed.
130 * If pri&PCATCH is set, signals will cause sleep
131 * to return 1, rather than longjmp.
132 * Callers of this routine must be prepared for
133 * premature return, and check that the reason for
134 * sleeping has gone away.
135 *
136 * if msleep was the entry point, than we have a mutex to deal with
137 *
138 * The mutex is unlocked before the caller is blocked, and
139 * relocked before msleep returns unless the priority includes the PDROP
140 * flag... if PDROP is specified, _sleep returns with the mutex unlocked
141 * regardless of whether it actually blocked or not.
142 */
143
144 static int
145 _sleep(
146 caddr_t chan,
147 int pri,
148 const char *wmsg,
149 u_int64_t abstime,
150 int (*continuation)(int),
151 lck_mtx_t *mtx)
152 {
153 register struct proc *p;
154 register thread_t self = current_thread();
155 struct uthread * ut;
156 int sig, catch = pri & PCATCH;
157 int dropmutex = pri & PDROP;
158 int wait_result;
159 int error = 0;
160
161 ut = get_bsdthread_info(self);
162
163 p = current_proc();
164 #if KTRACE
165 if (KTRPOINT(p, KTR_CSW))
166 ktrcsw(p->p_tracep, 1, 0);
167 #endif
168 p->p_priority = pri & PRIMASK;
169 p->p_stats->p_ru.ru_nvcsw++;
170
171 if (mtx != NULL && chan != NULL && (thread_continue_t)continuation == THREAD_CONTINUE_NULL) {
172
173 if (abstime)
174 wait_result = lck_mtx_sleep_deadline(mtx, (dropmutex) ? LCK_SLEEP_UNLOCK : 0,
175 chan, (catch) ? THREAD_ABORTSAFE : THREAD_UNINT, abstime);
176 else
177 wait_result = lck_mtx_sleep(mtx, (dropmutex) ? LCK_SLEEP_UNLOCK : 0,
178 chan, (catch) ? THREAD_ABORTSAFE : THREAD_UNINT);
179 }
180 else {
181 if (chan != NULL)
182 assert_wait_deadline(chan, (catch) ? THREAD_ABORTSAFE : THREAD_UNINT, abstime);
183 if (mtx)
184 lck_mtx_unlock(mtx);
185 if (catch) {
186 if (SHOULDissignal(p,ut)) {
187 if (sig = CURSIG(p)) {
188 if (clear_wait(self, THREAD_INTERRUPTED) == KERN_FAILURE)
189 goto block;
190 /* if SIGTTOU or SIGTTIN then block till SIGCONT */
191 if ((pri & PTTYBLOCK) && ((sig == SIGTTOU) || (sig == SIGTTIN))) {
192 p->p_flag |= P_TTYSLEEP;
193 /* reset signal bits */
194 clear_procsiglist(p, sig);
195 assert_wait(&p->p_siglist, THREAD_ABORTSAFE);
196 /* assert wait can block and SIGCONT should be checked */
197 if (p->p_flag & P_TTYSLEEP) {
198 thread_block(THREAD_CONTINUE_NULL);
199
200 if (mtx && !dropmutex)
201 lck_mtx_lock(mtx);
202 }
203
204 /* return with success */
205 error = 0;
206 goto out;
207 }
208 if (p->p_sigacts->ps_sigintr & sigmask(sig))
209 error = EINTR;
210 else
211 error = ERESTART;
212 if (mtx && !dropmutex)
213 lck_mtx_lock(mtx);
214 goto out;
215 }
216 }
217 if (thread_should_abort(self)) {
218 if (clear_wait(self, THREAD_INTERRUPTED) == KERN_FAILURE)
219 goto block;
220 error = EINTR;
221
222 if (mtx && !dropmutex)
223 lck_mtx_lock(mtx);
224 goto out;
225 }
226 }
227
228
229 block:
230 if ((thread_continue_t)continuation != THREAD_CONTINUE_NULL) {
231 ut->uu_continuation = continuation;
232 ut->uu_pri = pri;
233 ut->uu_timo = abstime? 1: 0;
234 ut->uu_mtx = mtx;
235 (void) thread_block(_sleep_continue);
236 /* NOTREACHED */
237 }
238
239 wait_result = thread_block(THREAD_CONTINUE_NULL);
240
241 if (mtx && !dropmutex)
242 lck_mtx_lock(mtx);
243 }
244
245 switch (wait_result) {
246 case THREAD_TIMED_OUT:
247 error = EWOULDBLOCK;
248 break;
249 case THREAD_AWAKENED:
250 /*
251 * Posix implies any signal should be delivered
252 * first, regardless of whether awakened due
253 * to receiving event.
254 */
255 if (!catch)
256 break;
257 /* else fall through */
258 case THREAD_INTERRUPTED:
259 if (catch) {
260 if (thread_should_abort(self)) {
261 error = EINTR;
262 } else if (SHOULDissignal(p, ut)) {
263 if (sig = CURSIG(p)) {
264 if (p->p_sigacts->ps_sigintr & sigmask(sig))
265 error = EINTR;
266 else
267 error = ERESTART;
268 }
269 if (thread_should_abort(self)) {
270 error = EINTR;
271 }
272 }
273 } else
274 error = EINTR;
275 break;
276 }
277 out:
278 if (error == EINTR || error == ERESTART)
279 act_set_astbsd(self);
280
281 #if KTRACE
282 if (KTRPOINT(p, KTR_CSW))
283 ktrcsw(p->p_tracep, 0, 0);
284 #endif
285 return (error);
286 }
287
288 int
289 sleep(
290 void *chan,
291 int pri)
292 {
293 return _sleep((caddr_t)chan, pri, (char *)NULL, 0, (int (*)(int))0, (lck_mtx_t *)0);
294 }
295
296 int
297 msleep0(
298 void *chan,
299 lck_mtx_t *mtx,
300 int pri,
301 const char *wmsg,
302 int timo,
303 int (*continuation)(int))
304 {
305 u_int64_t abstime = 0;
306
307 if (timo)
308 clock_interval_to_deadline(timo, NSEC_PER_SEC / hz, &abstime);
309
310 return _sleep((caddr_t)chan, pri, wmsg, abstime, continuation, mtx);
311 }
312
313 int
314 msleep(
315 void *chan,
316 lck_mtx_t *mtx,
317 int pri,
318 const char *wmsg,
319 struct timespec *ts)
320 {
321 u_int64_t abstime = 0;
322
323 if (ts && (ts->tv_sec || ts->tv_nsec)) {
324 nanoseconds_to_absolutetime((uint64_t)ts->tv_sec * NSEC_PER_SEC + ts->tv_nsec, &abstime );
325 clock_absolutetime_interval_to_deadline( abstime, &abstime );
326 }
327
328 return _sleep((caddr_t)chan, pri, wmsg, abstime, (int (*)(int))0, mtx);
329 }
330
331 int
332 msleep1(
333 void *chan,
334 lck_mtx_t *mtx,
335 int pri,
336 const char *wmsg,
337 u_int64_t abstime)
338 {
339 return _sleep((caddr_t)chan, pri, wmsg, abstime, (int (*)(int))0, mtx);
340 }
341
342 int
343 tsleep(
344 void *chan,
345 int pri,
346 const char *wmsg,
347 int timo)
348 {
349 u_int64_t abstime = 0;
350
351 if (timo)
352 clock_interval_to_deadline(timo, NSEC_PER_SEC / hz, &abstime);
353 return _sleep((caddr_t)chan, pri, wmsg, abstime, (int (*)(int))0, (lck_mtx_t *)0);
354 }
355
356 int
357 tsleep0(
358 void *chan,
359 int pri,
360 const char *wmsg,
361 int timo,
362 int (*continuation)(int))
363 {
364 u_int64_t abstime = 0;
365
366 if (timo)
367 clock_interval_to_deadline(timo, NSEC_PER_SEC / hz, &abstime);
368 return _sleep((caddr_t)chan, pri, wmsg, abstime, continuation, (lck_mtx_t *)0);
369 }
370
371 int
372 tsleep1(
373 void *chan,
374 int pri,
375 const char *wmsg,
376 u_int64_t abstime,
377 int (*continuation)(int))
378 {
379 return _sleep((caddr_t)chan, pri, wmsg, abstime, continuation, (lck_mtx_t *)0);
380 }
381
382 /*
383 * Wake up all processes sleeping on chan.
384 */
385 void
386 wakeup(chan)
387 register void *chan;
388 {
389 thread_wakeup_prim((caddr_t)chan, FALSE, THREAD_AWAKENED);
390 }
391
392 /*
393 * Wake up the first process sleeping on chan.
394 *
395 * Be very sure that the first process is really
396 * the right one to wakeup.
397 */
398 void
399 wakeup_one(chan)
400 register caddr_t chan;
401 {
402 thread_wakeup_prim((caddr_t)chan, TRUE, THREAD_AWAKENED);
403 }
404
405 /*
406 * Compute the priority of a process when running in user mode.
407 * Arrange to reschedule if the resulting priority is better
408 * than that of the current process.
409 */
410 void
411 resetpriority(p)
412 register struct proc *p;
413 {
414 (void)task_importance(p->task, -p->p_nice);
415 }
416
417 struct loadavg averunnable =
418 { {0, 0, 0}, FSCALE }; /* load average, of runnable procs */
419 /*
420 * Constants for averages over 1, 5, and 15 minutes
421 * when sampling at 5 second intervals.
422 */
423 static fixpt_t cexp[3] = {
424 (fixpt_t)(0.9200444146293232 * FSCALE), /* exp(-1/12) */
425 (fixpt_t)(0.9834714538216174 * FSCALE), /* exp(-1/60) */
426 (fixpt_t)(0.9944598480048967 * FSCALE), /* exp(-1/180) */
427 };
428
429 void
430 compute_averunnable(
431 void *arg)
432 {
433 unsigned int nrun = *(unsigned int *)arg;
434 struct loadavg *avg = &averunnable;
435 register int i;
436
437 for (i = 0; i < 3; i++)
438 avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
439 nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
440 }