]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_synch.c
xnu-1228.5.18.tar.gz
[apple/xnu.git] / bsd / kern / kern_synch.c
1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Mach Operating System
30 * Copyright (c) 1987 Carnegie-Mellon University
31 * All rights reserved. The CMU software License Agreement specifies
32 * the terms and conditions for use and redistribution.
33 */
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/proc_internal.h>
38 #include <sys/user.h>
39 #include <sys/file_internal.h>
40 #include <sys/vnode.h>
41 #include <sys/kernel.h>
42
43 #include <machine/spl.h>
44
45 #include <kern/queue.h>
46 #include <sys/lock.h>
47 #include <kern/thread.h>
48 #include <kern/sched_prim.h>
49 #include <kern/ast.h>
50
51 #include <kern/cpu_number.h>
52 #include <vm/vm_kern.h>
53
54 #include <kern/task.h>
55 #include <mach/time_value.h>
56 #include <kern/lock.h>
57
58 #include <sys/systm.h> /* for unix_syscall_return() */
59 #include <libkern/OSAtomic.h>
60
61 extern boolean_t thread_should_abort(thread_t); /* XXX */
62 extern void compute_averunnable(void *); /* XXX */
63
64
65
66 static void
67 _sleep_continue( __unused void *parameter, wait_result_t wresult)
68 {
69 struct proc *p = current_proc();
70 thread_t self = current_thread();
71 struct uthread * ut;
72 int sig, catch;
73 int error = 0;
74 int dropmutex;
75
76 ut = get_bsdthread_info(self);
77 catch = ut->uu_pri & PCATCH;
78 dropmutex = ut->uu_pri & PDROP;
79
80 switch (wresult) {
81 case THREAD_TIMED_OUT:
82 error = EWOULDBLOCK;
83 break;
84 case THREAD_AWAKENED:
85 /*
86 * Posix implies any signal should be delivered
87 * first, regardless of whether awakened due
88 * to receiving event.
89 */
90 if (!catch)
91 break;
92 /* else fall through */
93 case THREAD_INTERRUPTED:
94 if (catch) {
95 if (thread_should_abort(self)) {
96 error = EINTR;
97 } else if (SHOULDissignal(p,ut)) {
98 if ((sig = CURSIG(p)) != 0) {
99 if (p->p_sigacts->ps_sigintr & sigmask(sig))
100 error = EINTR;
101 else
102 error = ERESTART;
103 }
104 if (thread_should_abort(self)) {
105 error = EINTR;
106 }
107 } else if( (ut->uu_flag & ( UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
108 /* due to thread cancel */
109 error = EINTR;
110 }
111 } else
112 error = EINTR;
113 break;
114 }
115
116 if (error == EINTR || error == ERESTART)
117 act_set_astbsd(self);
118
119 if (ut->uu_mtx && !dropmutex)
120 lck_mtx_lock(ut->uu_mtx);
121
122 ut->uu_wchan = NULL;
123 ut->uu_wmesg = NULL;
124
125 unix_syscall_return((*ut->uu_continuation)(error));
126 }
127
128 /*
129 * Give up the processor till a wakeup occurs
130 * on chan, at which time the process
131 * enters the scheduling queue at priority pri.
132 * The most important effect of pri is that when
133 * pri<=PZERO a signal cannot disturb the sleep;
134 * if pri>PZERO signals will be processed.
135 * If pri&PCATCH is set, signals will cause sleep
136 * to return 1, rather than longjmp.
137 * Callers of this routine must be prepared for
138 * premature return, and check that the reason for
139 * sleeping has gone away.
140 *
141 * if msleep was the entry point, than we have a mutex to deal with
142 *
143 * The mutex is unlocked before the caller is blocked, and
144 * relocked before msleep returns unless the priority includes the PDROP
145 * flag... if PDROP is specified, _sleep returns with the mutex unlocked
146 * regardless of whether it actually blocked or not.
147 */
148
149 static int
150 _sleep(
151 caddr_t chan,
152 int pri,
153 const char *wmsg,
154 u_int64_t abstime,
155 int (*continuation)(int),
156 lck_mtx_t *mtx)
157 {
158 struct proc *p;
159 thread_t self = current_thread();
160 struct uthread * ut;
161 int sig, catch = pri & PCATCH;
162 int dropmutex = pri & PDROP;
163 int wait_result;
164 int error = 0;
165
166 ut = get_bsdthread_info(self);
167
168 p = current_proc();
169 p->p_priority = pri & PRIMASK;
170 /* It can still block in proc_exit() after the teardown. */
171 if (p->p_stats != NULL)
172 OSIncrementAtomic(&p->p_stats->p_ru.ru_nvcsw);
173
174 /* set wait message & channel */
175 ut->uu_wchan = chan;
176 ut->uu_wmesg = wmsg ? wmsg : "unknown";
177
178 if (mtx != NULL && chan != NULL && (thread_continue_t)continuation == THREAD_CONTINUE_NULL) {
179
180 if (abstime)
181 wait_result = lck_mtx_sleep_deadline(mtx, (dropmutex) ? LCK_SLEEP_UNLOCK : 0,
182 chan, (catch) ? THREAD_ABORTSAFE : THREAD_UNINT, abstime);
183 else
184 wait_result = lck_mtx_sleep(mtx, (dropmutex) ? LCK_SLEEP_UNLOCK : 0,
185 chan, (catch) ? THREAD_ABORTSAFE : THREAD_UNINT);
186 }
187 else {
188 if (chan != NULL)
189 assert_wait_deadline(chan, (catch) ? THREAD_ABORTSAFE : THREAD_UNINT, abstime);
190 if (mtx)
191 lck_mtx_unlock(mtx);
192 if (catch) {
193 if (SHOULDissignal(p,ut)) {
194 if ((sig = CURSIG(p)) != 0) {
195 if (clear_wait(self, THREAD_INTERRUPTED) == KERN_FAILURE)
196 goto block;
197 if (p->p_sigacts->ps_sigintr & sigmask(sig))
198 error = EINTR;
199 else
200 error = ERESTART;
201 if (mtx && !dropmutex)
202 lck_mtx_lock(mtx);
203 goto out;
204 }
205 }
206 if (thread_should_abort(self)) {
207 if (clear_wait(self, THREAD_INTERRUPTED) == KERN_FAILURE)
208 goto block;
209 error = EINTR;
210
211 if (mtx && !dropmutex)
212 lck_mtx_lock(mtx);
213 goto out;
214 }
215 }
216
217
218 block:
219 if ((thread_continue_t)continuation != THREAD_CONTINUE_NULL) {
220 ut->uu_continuation = continuation;
221 ut->uu_pri = pri;
222 ut->uu_timo = abstime? 1: 0;
223 ut->uu_mtx = mtx;
224 (void) thread_block(_sleep_continue);
225 /* NOTREACHED */
226 }
227
228 wait_result = thread_block(THREAD_CONTINUE_NULL);
229
230 if (mtx && !dropmutex)
231 lck_mtx_lock(mtx);
232 }
233
234 switch (wait_result) {
235 case THREAD_TIMED_OUT:
236 error = EWOULDBLOCK;
237 break;
238 case THREAD_AWAKENED:
239 /*
240 * Posix implies any signal should be delivered
241 * first, regardless of whether awakened due
242 * to receiving event.
243 */
244 if (!catch)
245 break;
246 /* else fall through */
247 case THREAD_INTERRUPTED:
248 if (catch) {
249 if (thread_should_abort(self)) {
250 error = EINTR;
251 } else if (SHOULDissignal(p, ut)) {
252 if ((sig = CURSIG(p)) != 0) {
253 if (p->p_sigacts->ps_sigintr & sigmask(sig))
254 error = EINTR;
255 else
256 error = ERESTART;
257 }
258 if (thread_should_abort(self)) {
259 error = EINTR;
260 }
261 }
262 } else
263 error = EINTR;
264 break;
265 }
266 out:
267 if (error == EINTR || error == ERESTART)
268 act_set_astbsd(self);
269 ut->uu_wchan = NULL;
270 ut->uu_wmesg = NULL;
271
272 return (error);
273 }
274
275 int
276 sleep(
277 void *chan,
278 int pri)
279 {
280 return _sleep((caddr_t)chan, pri, (char *)NULL, 0, (int (*)(int))0, (lck_mtx_t *)0);
281 }
282
283 int
284 msleep0(
285 void *chan,
286 lck_mtx_t *mtx,
287 int pri,
288 const char *wmsg,
289 int timo,
290 int (*continuation)(int))
291 {
292 u_int64_t abstime = 0;
293
294 if (timo)
295 clock_interval_to_deadline(timo, NSEC_PER_SEC / hz, &abstime);
296
297 return _sleep((caddr_t)chan, pri, wmsg, abstime, continuation, mtx);
298 }
299
300 int
301 msleep(
302 void *chan,
303 lck_mtx_t *mtx,
304 int pri,
305 const char *wmsg,
306 struct timespec *ts)
307 {
308 u_int64_t abstime = 0;
309
310 if (ts && (ts->tv_sec || ts->tv_nsec)) {
311 nanoseconds_to_absolutetime((uint64_t)ts->tv_sec * NSEC_PER_SEC + ts->tv_nsec, &abstime );
312 clock_absolutetime_interval_to_deadline( abstime, &abstime );
313 }
314
315 return _sleep((caddr_t)chan, pri, wmsg, abstime, (int (*)(int))0, mtx);
316 }
317
318 int
319 msleep1(
320 void *chan,
321 lck_mtx_t *mtx,
322 int pri,
323 const char *wmsg,
324 u_int64_t abstime)
325 {
326 return _sleep((caddr_t)chan, pri, wmsg, abstime, (int (*)(int))0, mtx);
327 }
328
329 int
330 tsleep(
331 void *chan,
332 int pri,
333 const char *wmsg,
334 int timo)
335 {
336 u_int64_t abstime = 0;
337
338 if (timo)
339 clock_interval_to_deadline(timo, NSEC_PER_SEC / hz, &abstime);
340 return _sleep((caddr_t)chan, pri, wmsg, abstime, (int (*)(int))0, (lck_mtx_t *)0);
341 }
342
343 int
344 tsleep0(
345 void *chan,
346 int pri,
347 const char *wmsg,
348 int timo,
349 int (*continuation)(int))
350 {
351 u_int64_t abstime = 0;
352
353 if (timo)
354 clock_interval_to_deadline(timo, NSEC_PER_SEC / hz, &abstime);
355 return _sleep((caddr_t)chan, pri, wmsg, abstime, continuation, (lck_mtx_t *)0);
356 }
357
358 int
359 tsleep1(
360 void *chan,
361 int pri,
362 const char *wmsg,
363 u_int64_t abstime,
364 int (*continuation)(int))
365 {
366 return _sleep((caddr_t)chan, pri, wmsg, abstime, continuation, (lck_mtx_t *)0);
367 }
368
369 /*
370 * Wake up all processes sleeping on chan.
371 */
372 void
373 wakeup(void *chan)
374 {
375 thread_wakeup_prim((caddr_t)chan, FALSE, THREAD_AWAKENED);
376 }
377
378 /*
379 * Wake up the first process sleeping on chan.
380 *
381 * Be very sure that the first process is really
382 * the right one to wakeup.
383 */
384 void
385 wakeup_one(caddr_t chan)
386 {
387 thread_wakeup_prim((caddr_t)chan, TRUE, THREAD_AWAKENED);
388 }
389
390 /*
391 * Compute the priority of a process when running in user mode.
392 * Arrange to reschedule if the resulting priority is better
393 * than that of the current process.
394 */
395 void
396 resetpriority(struct proc *p)
397 {
398 (void)task_importance(p->task, -p->p_nice);
399 }
400
401 struct loadavg averunnable =
402 { {0, 0, 0}, FSCALE }; /* load average, of runnable procs */
403 /*
404 * Constants for averages over 1, 5, and 15 minutes
405 * when sampling at 5 second intervals.
406 */
407 static fixpt_t cexp[3] = {
408 (fixpt_t)(0.9200444146293232 * FSCALE), /* exp(-1/12) */
409 (fixpt_t)(0.9834714538216174 * FSCALE), /* exp(-1/60) */
410 (fixpt_t)(0.9944598480048967 * FSCALE), /* exp(-1/180) */
411 };
412
413 void
414 compute_averunnable(void *arg)
415 {
416 unsigned int nrun = *(unsigned int *)arg;
417 struct loadavg *avg = &averunnable;
418 int i;
419
420 for (i = 0; i < 3; i++)
421 avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
422 nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
423 }