]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 1993-1995, 1999-2000 Apple Computer, Inc. | |
3 | * All rights reserved. | |
4 | * | |
5 | * @APPLE_LICENSE_HEADER_START@ | |
6 | * | |
7 | * The contents of this file constitute Original Code as defined in and | |
8 | * are subject to the Apple Public Source License Version 1.1 (the | |
9 | * "License"). You may not use this file except in compliance with the | |
10 | * License. Please obtain a copy of the License at | |
11 | * http://www.apple.com/publicsource and read it before using this file. | |
12 | * | |
13 | * This Original Code and all software distributed under the License are | |
14 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
15 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
16 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
18 | * License for the specific language governing rights and limitations | |
19 | * under the License. | |
20 | * | |
21 | * @APPLE_LICENSE_HEADER_END@ | |
22 | */ | |
23 | /* | |
24 | * Thread-based callout module. | |
25 | * | |
26 | * HISTORY | |
27 | * | |
28 | * 10 July 1999 (debo) | |
29 | * Pulled into Mac OS X (microkernel). | |
30 | * | |
31 | * 3 July 1993 (debo) | |
32 | * Created. | |
33 | */ | |
34 | ||
35 | #include <mach/mach_types.h> | |
36 | ||
37 | #include <kern/sched_prim.h> | |
38 | #include <kern/clock.h> | |
39 | #include <kern/task.h> | |
40 | #include <kern/thread.h> | |
41 | ||
42 | #include <kern/thread_call.h> | |
43 | #include <kern/call_entry.h> | |
44 | ||
45 | #include <kern/timer_call.h> | |
46 | ||
47 | #define internal_call_num 768 | |
48 | ||
49 | #define thread_call_thread_min 4 | |
50 | ||
51 | static | |
52 | thread_call_data_t | |
53 | internal_call_storage[internal_call_num]; | |
54 | ||
55 | decl_simple_lock_data(static,thread_call_lock) | |
56 | ||
57 | static | |
58 | timer_call_data_t | |
59 | thread_call_delayed_timers[NCPUS]; | |
60 | ||
61 | static | |
62 | queue_head_t | |
63 | internal_call_free_queue, | |
64 | pending_call_queue, delayed_call_queue; | |
65 | ||
66 | static | |
67 | queue_head_t | |
68 | idle_thread_queue; | |
69 | ||
70 | static | |
71 | thread_t | |
72 | activate_thread; | |
73 | ||
74 | static | |
75 | boolean_t | |
76 | activate_thread_awake; | |
77 | ||
78 | static struct { | |
79 | int pending_num, | |
80 | pending_hiwat; | |
81 | int active_num, | |
82 | active_hiwat; | |
83 | int delayed_num, | |
84 | delayed_hiwat; | |
85 | int idle_thread_num; | |
86 | int thread_num, | |
87 | thread_hiwat, | |
88 | thread_lowat; | |
89 | } thread_calls; | |
90 | ||
91 | static boolean_t | |
92 | thread_call_initialized = FALSE; | |
93 | ||
94 | static __inline__ thread_call_t | |
95 | _internal_call_allocate(void); | |
96 | ||
97 | static __inline__ void | |
98 | _internal_call_release( | |
99 | thread_call_t call | |
100 | ); | |
101 | ||
102 | static __inline__ void | |
103 | _pending_call_enqueue( | |
104 | thread_call_t call | |
105 | ), | |
106 | _pending_call_dequeue( | |
107 | thread_call_t call | |
108 | ), | |
109 | _delayed_call_enqueue( | |
110 | thread_call_t call | |
111 | ), | |
112 | _delayed_call_dequeue( | |
113 | thread_call_t call | |
114 | ); | |
115 | ||
116 | static void __inline__ | |
117 | _set_delayed_call_timer( | |
118 | thread_call_t call | |
119 | ); | |
120 | ||
121 | static boolean_t | |
122 | _remove_from_pending_queue( | |
123 | thread_call_func_t func, | |
124 | thread_call_param_t param0, | |
125 | boolean_t remove_all | |
126 | ), | |
127 | _remove_from_delayed_queue( | |
128 | thread_call_func_t func, | |
129 | thread_call_param_t param0, | |
130 | boolean_t remove_all | |
131 | ); | |
132 | ||
133 | static __inline__ void | |
134 | _call_thread_wake(void); | |
135 | ||
136 | static void | |
137 | _call_thread(void), | |
138 | _activate_thread(void); | |
139 | ||
140 | static void | |
141 | _delayed_call_timer( | |
142 | timer_call_param_t p0, | |
143 | timer_call_param_t p1 | |
144 | ); | |
145 | ||
146 | #define qe(x) ((queue_entry_t)(x)) | |
147 | #define TC(x) ((thread_call_t)(x)) | |
148 | ||
149 | /* | |
150 | * Routine: thread_call_initialize [public] | |
151 | * | |
152 | * Description: Initialize this module, called | |
153 | * early during system initialization. | |
154 | * | |
155 | * Preconditions: None. | |
156 | * | |
157 | * Postconditions: None. | |
158 | */ | |
159 | ||
160 | void | |
161 | thread_call_initialize(void) | |
162 | { | |
163 | thread_call_t call; | |
164 | spl_t s; | |
165 | int i; | |
166 | ||
167 | if (thread_call_initialized) | |
168 | panic("thread_call_initialize"); | |
169 | ||
170 | simple_lock_init(&thread_call_lock, ETAP_MISC_TIMER); | |
171 | ||
172 | s = splsched(); | |
173 | simple_lock(&thread_call_lock); | |
174 | ||
175 | queue_init(&pending_call_queue); | |
176 | queue_init(&delayed_call_queue); | |
177 | ||
178 | queue_init(&internal_call_free_queue); | |
179 | for ( | |
180 | call = internal_call_storage; | |
181 | call < &internal_call_storage[internal_call_num]; | |
182 | call++) { | |
183 | ||
184 | enqueue_tail(&internal_call_free_queue, qe(call)); | |
185 | } | |
186 | ||
187 | for (i = 0; i < NCPUS; i++) { | |
188 | timer_call_setup(&thread_call_delayed_timers[i], | |
189 | _delayed_call_timer, NULL); | |
190 | } | |
191 | ||
192 | queue_init(&idle_thread_queue); | |
193 | thread_calls.thread_lowat = thread_call_thread_min; | |
194 | ||
195 | activate_thread_awake = TRUE; | |
196 | thread_call_initialized = TRUE; | |
197 | ||
198 | simple_unlock(&thread_call_lock); | |
199 | splx(s); | |
200 | ||
201 | activate_thread = kernel_thread_with_priority(kernel_task, | |
202 | MAXPRI_KERNBAND-2, _activate_thread, TRUE); | |
203 | } | |
204 | ||
205 | void | |
206 | thread_call_setup( | |
207 | thread_call_t call, | |
208 | thread_call_func_t func, | |
209 | thread_call_param_t param0 | |
210 | ) | |
211 | { | |
212 | call_entry_setup(call, func, param0); | |
213 | } | |
214 | ||
215 | /* | |
216 | * Routine: _internal_call_allocate [private, inline] | |
217 | * | |
218 | * Purpose: Allocate an internal callout entry. | |
219 | * | |
220 | * Preconditions: thread_call_lock held. | |
221 | * | |
222 | * Postconditions: None. | |
223 | */ | |
224 | ||
225 | static __inline__ thread_call_t | |
226 | _internal_call_allocate(void) | |
227 | { | |
228 | thread_call_t call; | |
229 | ||
230 | if (queue_empty(&internal_call_free_queue)) | |
231 | panic("_internal_call_allocate"); | |
232 | ||
233 | call = TC(dequeue_head(&internal_call_free_queue)); | |
234 | ||
235 | return (call); | |
236 | } | |
237 | ||
238 | /* | |
239 | * Routine: _internal_call_release [private, inline] | |
240 | * | |
241 | * Purpose: Release an internal callout entry which | |
242 | * is no longer pending (or delayed). | |
243 | * | |
244 | * Preconditions: thread_call_lock held. | |
245 | * | |
246 | * Postconditions: None. | |
247 | */ | |
248 | ||
249 | static __inline__ | |
250 | void | |
251 | _internal_call_release( | |
252 | thread_call_t call | |
253 | ) | |
254 | { | |
255 | if ( call >= internal_call_storage && | |
256 | call < &internal_call_storage[internal_call_num] ) | |
257 | enqueue_tail(&internal_call_free_queue, qe(call)); | |
258 | } | |
259 | ||
260 | /* | |
261 | * Routine: _pending_call_enqueue [private, inline] | |
262 | * | |
263 | * Purpose: Place an entry at the end of the | |
264 | * pending queue, to be executed soon. | |
265 | * | |
266 | * Preconditions: thread_call_lock held. | |
267 | * | |
268 | * Postconditions: None. | |
269 | */ | |
270 | ||
271 | static __inline__ | |
272 | void | |
273 | _pending_call_enqueue( | |
274 | thread_call_t call | |
275 | ) | |
276 | { | |
277 | enqueue_tail(&pending_call_queue, qe(call)); | |
278 | if (++thread_calls.pending_num > thread_calls.pending_hiwat) | |
279 | thread_calls.pending_hiwat = thread_calls.pending_num; | |
280 | ||
281 | call->state = PENDING; | |
282 | } | |
283 | ||
284 | /* | |
285 | * Routine: _pending_call_dequeue [private, inline] | |
286 | * | |
287 | * Purpose: Remove an entry from the pending queue, | |
288 | * effectively unscheduling it. | |
289 | * | |
290 | * Preconditions: thread_call_lock held. | |
291 | * | |
292 | * Postconditions: None. | |
293 | */ | |
294 | ||
295 | static __inline__ | |
296 | void | |
297 | _pending_call_dequeue( | |
298 | thread_call_t call | |
299 | ) | |
300 | { | |
301 | (void)remque(qe(call)); | |
302 | thread_calls.pending_num--; | |
303 | ||
304 | call->state = IDLE; | |
305 | } | |
306 | ||
307 | /* | |
308 | * Routine: _delayed_call_enqueue [private, inline] | |
309 | * | |
310 | * Purpose: Place an entry on the delayed queue, | |
311 | * after existing entries with an earlier | |
312 | * (or identical) deadline. | |
313 | * | |
314 | * Preconditions: thread_call_lock held. | |
315 | * | |
316 | * Postconditions: None. | |
317 | */ | |
318 | ||
319 | static __inline__ | |
320 | void | |
321 | _delayed_call_enqueue( | |
322 | thread_call_t call | |
323 | ) | |
324 | { | |
325 | thread_call_t current; | |
326 | ||
327 | current = TC(queue_first(&delayed_call_queue)); | |
328 | ||
329 | while (TRUE) { | |
330 | if ( queue_end(&delayed_call_queue, qe(current)) || | |
331 | CMP_ABSOLUTETIME(&call->deadline, | |
332 | ¤t->deadline) < 0 ) { | |
333 | current = TC(queue_prev(qe(current))); | |
334 | break; | |
335 | } | |
336 | ||
337 | current = TC(queue_next(qe(current))); | |
338 | } | |
339 | ||
340 | insque(qe(call), qe(current)); | |
341 | if (++thread_calls.delayed_num > thread_calls.delayed_hiwat) | |
342 | thread_calls.delayed_hiwat = thread_calls.delayed_num; | |
343 | ||
344 | call->state = DELAYED; | |
345 | } | |
346 | ||
347 | /* | |
348 | * Routine: _delayed_call_dequeue [private, inline] | |
349 | * | |
350 | * Purpose: Remove an entry from the delayed queue, | |
351 | * effectively unscheduling it. | |
352 | * | |
353 | * Preconditions: thread_call_lock held. | |
354 | * | |
355 | * Postconditions: None. | |
356 | */ | |
357 | ||
358 | static __inline__ | |
359 | void | |
360 | _delayed_call_dequeue( | |
361 | thread_call_t call | |
362 | ) | |
363 | { | |
364 | (void)remque(qe(call)); | |
365 | thread_calls.delayed_num--; | |
366 | ||
367 | call->state = IDLE; | |
368 | } | |
369 | ||
370 | /* | |
371 | * Routine: _set_delayed_call_timer [private] | |
372 | * | |
373 | * Purpose: Reset the timer so that it | |
374 | * next expires when the entry is due. | |
375 | * | |
376 | * Preconditions: thread_call_lock held. | |
377 | * | |
378 | * Postconditions: None. | |
379 | */ | |
380 | ||
381 | static __inline__ void | |
382 | _set_delayed_call_timer( | |
383 | thread_call_t call | |
384 | ) | |
385 | { | |
386 | timer_call_t timer = &thread_call_delayed_timers[cpu_number()]; | |
387 | ||
388 | timer_call_enter(timer, call->deadline); | |
389 | } | |
390 | ||
391 | /* | |
392 | * Routine: _remove_from_pending_queue [private] | |
393 | * | |
394 | * Purpose: Remove the first (or all) matching | |
395 | * entries from the pending queue, | |
396 | * effectively unscheduling them. | |
397 | * Returns whether any matching entries | |
398 | * were found. | |
399 | * | |
400 | * Preconditions: thread_call_lock held. | |
401 | * | |
402 | * Postconditions: None. | |
403 | */ | |
404 | ||
405 | static | |
406 | boolean_t | |
407 | _remove_from_pending_queue( | |
408 | thread_call_func_t func, | |
409 | thread_call_param_t param0, | |
410 | boolean_t remove_all | |
411 | ) | |
412 | { | |
413 | boolean_t call_removed = FALSE; | |
414 | thread_call_t call; | |
415 | ||
416 | call = TC(queue_first(&pending_call_queue)); | |
417 | ||
418 | while (!queue_end(&pending_call_queue, qe(call))) { | |
419 | if ( call->func == func && | |
420 | call->param0 == param0 ) { | |
421 | thread_call_t next = TC(queue_next(qe(call))); | |
422 | ||
423 | _pending_call_dequeue(call); | |
424 | ||
425 | _internal_call_release(call); | |
426 | ||
427 | call_removed = TRUE; | |
428 | if (!remove_all) | |
429 | break; | |
430 | ||
431 | call = next; | |
432 | } | |
433 | else | |
434 | call = TC(queue_next(qe(call))); | |
435 | } | |
436 | ||
437 | return (call_removed); | |
438 | } | |
439 | ||
440 | /* | |
441 | * Routine: _remove_from_delayed_queue [private] | |
442 | * | |
443 | * Purpose: Remove the first (or all) matching | |
444 | * entries from the delayed queue, | |
445 | * effectively unscheduling them. | |
446 | * Returns whether any matching entries | |
447 | * were found. | |
448 | * | |
449 | * Preconditions: thread_call_lock held. | |
450 | * | |
451 | * Postconditions: None. | |
452 | */ | |
453 | ||
454 | static | |
455 | boolean_t | |
456 | _remove_from_delayed_queue( | |
457 | thread_call_func_t func, | |
458 | thread_call_param_t param0, | |
459 | boolean_t remove_all | |
460 | ) | |
461 | { | |
462 | boolean_t call_removed = FALSE; | |
463 | thread_call_t call; | |
464 | ||
465 | call = TC(queue_first(&delayed_call_queue)); | |
466 | ||
467 | while (!queue_end(&delayed_call_queue, qe(call))) { | |
468 | if ( call->func == func && | |
469 | call->param0 == param0 ) { | |
470 | thread_call_t next = TC(queue_next(qe(call))); | |
471 | ||
472 | _delayed_call_dequeue(call); | |
473 | ||
474 | _internal_call_release(call); | |
475 | ||
476 | call_removed = TRUE; | |
477 | if (!remove_all) | |
478 | break; | |
479 | ||
480 | call = next; | |
481 | } | |
482 | else | |
483 | call = TC(queue_next(qe(call))); | |
484 | } | |
485 | ||
486 | return (call_removed); | |
487 | } | |
488 | ||
489 | /* | |
490 | * Routine: thread_call_func [public] | |
491 | * | |
492 | * Purpose: Schedule a function callout. | |
493 | * Guarantees { function, argument } | |
494 | * uniqueness if unique_call is TRUE. | |
495 | * | |
496 | * Preconditions: Callable from an interrupt context | |
497 | * below splsched. | |
498 | * | |
499 | * Postconditions: None. | |
500 | */ | |
501 | ||
502 | void | |
503 | thread_call_func( | |
504 | thread_call_func_t func, | |
505 | thread_call_param_t param, | |
506 | boolean_t unique_call | |
507 | ) | |
508 | { | |
509 | thread_call_t call; | |
510 | int s; | |
511 | ||
512 | if (!thread_call_initialized) | |
513 | panic("thread_call_func"); | |
514 | ||
515 | s = splsched(); | |
516 | simple_lock(&thread_call_lock); | |
517 | ||
518 | call = TC(queue_first(&pending_call_queue)); | |
519 | ||
520 | while (unique_call && !queue_end(&pending_call_queue, qe(call))) { | |
521 | if ( call->func == func && | |
522 | call->param0 == param ) { | |
523 | break; | |
524 | } | |
525 | ||
526 | call = TC(queue_next(qe(call))); | |
527 | } | |
528 | ||
529 | if (!unique_call || queue_end(&pending_call_queue, qe(call))) { | |
530 | call = _internal_call_allocate(); | |
531 | call->func = func; | |
532 | call->param0 = param; | |
533 | call->param1 = 0; | |
534 | ||
535 | _pending_call_enqueue(call); | |
536 | ||
537 | _call_thread_wake(); | |
538 | } | |
539 | ||
540 | simple_unlock(&thread_call_lock); | |
541 | splx(s); | |
542 | } | |
543 | ||
544 | /* | |
545 | * Routine: thread_call_func_delayed [public] | |
546 | * | |
547 | * Purpose: Schedule a function callout to | |
548 | * occur at the stated time. | |
549 | * | |
550 | * Preconditions: Callable from an interrupt context | |
551 | * below splsched. | |
552 | * | |
553 | * Postconditions: None. | |
554 | */ | |
555 | ||
556 | void | |
557 | thread_call_func_delayed( | |
558 | thread_call_func_t func, | |
559 | thread_call_param_t param, | |
560 | AbsoluteTime deadline | |
561 | ) | |
562 | { | |
563 | thread_call_t call; | |
564 | int s; | |
565 | ||
566 | if (!thread_call_initialized) | |
567 | panic("thread_call_func_delayed"); | |
568 | ||
569 | s = splsched(); | |
570 | simple_lock(&thread_call_lock); | |
571 | ||
572 | call = _internal_call_allocate(); | |
573 | call->func = func; | |
574 | call->param0 = param; | |
575 | call->param1 = 0; | |
576 | call->deadline = deadline; | |
577 | ||
578 | _delayed_call_enqueue(call); | |
579 | ||
580 | if (queue_first(&delayed_call_queue) == qe(call)) | |
581 | _set_delayed_call_timer(call); | |
582 | ||
583 | simple_unlock(&thread_call_lock); | |
584 | splx(s); | |
585 | } | |
586 | ||
587 | /* | |
588 | * Routine: thread_call_func_cancel [public] | |
589 | * | |
590 | * Purpose: Unschedule a function callout. | |
591 | * Removes one (or all) | |
592 | * { function, argument } | |
593 | * instance(s) from either (or both) | |
594 | * the pending and the delayed queue, | |
595 | * in that order. Returns a boolean | |
596 | * indicating whether any calls were | |
597 | * cancelled. | |
598 | * | |
599 | * Preconditions: Callable from an interrupt context | |
600 | * below splsched. | |
601 | * | |
602 | * Postconditions: None. | |
603 | */ | |
604 | ||
605 | boolean_t | |
606 | thread_call_func_cancel( | |
607 | thread_call_func_t func, | |
608 | thread_call_param_t param, | |
609 | boolean_t cancel_all | |
610 | ) | |
611 | { | |
612 | boolean_t result; | |
613 | int s; | |
614 | ||
615 | s = splsched(); | |
616 | simple_lock(&thread_call_lock); | |
617 | ||
618 | if (cancel_all) | |
619 | result = _remove_from_pending_queue(func, param, cancel_all) | | |
620 | _remove_from_delayed_queue(func, param, cancel_all); | |
621 | else | |
622 | result = _remove_from_pending_queue(func, param, cancel_all) || | |
623 | _remove_from_delayed_queue(func, param, cancel_all); | |
624 | ||
625 | simple_unlock(&thread_call_lock); | |
626 | splx(s); | |
627 | ||
628 | return (result); | |
629 | } | |
630 | ||
631 | /* | |
632 | * Routine: thread_call_allocate [public] | |
633 | * | |
634 | * Purpose: Allocate an external callout | |
635 | * entry. | |
636 | * | |
637 | * Preconditions: None. | |
638 | * | |
639 | * Postconditions: None. | |
640 | */ | |
641 | ||
642 | thread_call_t | |
643 | thread_call_allocate( | |
644 | thread_call_func_t func, | |
645 | thread_call_param_t param0 | |
646 | ) | |
647 | { | |
648 | thread_call_t call = (void *)kalloc(sizeof (thread_call_data_t)); | |
649 | ||
650 | call->func = func; | |
651 | call->param0 = param0; | |
652 | call->state = IDLE; | |
653 | ||
654 | return (call); | |
655 | } | |
656 | ||
657 | /* | |
658 | * Routine: thread_call_free [public] | |
659 | * | |
660 | * Purpose: Free an external callout | |
661 | * entry. | |
662 | * | |
663 | * Preconditions: None. | |
664 | * | |
665 | * Postconditions: None. | |
666 | */ | |
667 | ||
668 | boolean_t | |
669 | thread_call_free( | |
670 | thread_call_t call | |
671 | ) | |
672 | { | |
673 | int s; | |
674 | ||
675 | s = splsched(); | |
676 | simple_lock(&thread_call_lock); | |
677 | ||
678 | if (call->state != IDLE) { | |
679 | simple_unlock(&thread_call_lock); | |
680 | splx(s); | |
681 | ||
682 | return (FALSE); | |
683 | } | |
684 | ||
685 | simple_unlock(&thread_call_lock); | |
686 | splx(s); | |
687 | ||
688 | kfree((vm_offset_t)call, sizeof (thread_call_data_t)); | |
689 | ||
690 | return (TRUE); | |
691 | } | |
692 | ||
693 | /* | |
694 | * Routine: thread_call_enter [public] | |
695 | * | |
696 | * Purpose: Schedule an external callout | |
697 | * entry to occur "soon". Returns a | |
698 | * boolean indicating whether the call | |
699 | * had been already scheduled. | |
700 | * | |
701 | * Preconditions: Callable from an interrupt context | |
702 | * below splsched. | |
703 | * | |
704 | * Postconditions: None. | |
705 | */ | |
706 | ||
707 | boolean_t | |
708 | thread_call_enter( | |
709 | thread_call_t call | |
710 | ) | |
711 | { | |
712 | boolean_t result = TRUE; | |
713 | int s; | |
714 | ||
715 | s = splsched(); | |
716 | simple_lock(&thread_call_lock); | |
717 | ||
718 | if (call->state != PENDING) { | |
719 | if (call->state == DELAYED) | |
720 | _delayed_call_dequeue(call); | |
721 | else if (call->state == IDLE) | |
722 | result = FALSE; | |
723 | ||
724 | _pending_call_enqueue(call); | |
725 | ||
726 | _call_thread_wake(); | |
727 | } | |
728 | ||
729 | call->param1 = 0; | |
730 | ||
731 | simple_unlock(&thread_call_lock); | |
732 | splx(s); | |
733 | ||
734 | return (result); | |
735 | } | |
736 | ||
737 | boolean_t | |
738 | thread_call_enter1( | |
739 | thread_call_t call, | |
740 | thread_call_param_t param1 | |
741 | ) | |
742 | { | |
743 | boolean_t result = TRUE; | |
744 | int s; | |
745 | ||
746 | s = splsched(); | |
747 | simple_lock(&thread_call_lock); | |
748 | ||
749 | if (call->state != PENDING) { | |
750 | if (call->state == DELAYED) | |
751 | _delayed_call_dequeue(call); | |
752 | else if (call->state == IDLE) | |
753 | result = FALSE; | |
754 | ||
755 | _pending_call_enqueue(call); | |
756 | ||
757 | _call_thread_wake(); | |
758 | } | |
759 | ||
760 | call->param1 = param1; | |
761 | ||
762 | simple_unlock(&thread_call_lock); | |
763 | splx(s); | |
764 | ||
765 | return (result); | |
766 | } | |
767 | ||
768 | /* | |
769 | * Routine: thread_call_enter_delayed [public] | |
770 | * | |
771 | * Purpose: Schedule an external callout | |
772 | * entry to occur at the stated time. | |
773 | * Returns a boolean indicating whether | |
774 | * the call had been already scheduled. | |
775 | * | |
776 | * Preconditions: Callable from an interrupt context | |
777 | * below splsched. | |
778 | * | |
779 | * Postconditions: None. | |
780 | */ | |
781 | ||
782 | boolean_t | |
783 | thread_call_enter_delayed( | |
784 | thread_call_t call, | |
785 | AbsoluteTime deadline | |
786 | ) | |
787 | { | |
788 | boolean_t result = TRUE; | |
789 | int s; | |
790 | ||
791 | s = splsched(); | |
792 | simple_lock(&thread_call_lock); | |
793 | ||
794 | if (call->state == PENDING) | |
795 | _pending_call_dequeue(call); | |
796 | else if (call->state == DELAYED) | |
797 | _delayed_call_dequeue(call); | |
798 | else if (call->state == IDLE) | |
799 | result = FALSE; | |
800 | ||
801 | call->param1 = 0; | |
802 | call->deadline = deadline; | |
803 | ||
804 | _delayed_call_enqueue(call); | |
805 | ||
806 | if (queue_first(&delayed_call_queue) == qe(call)) | |
807 | _set_delayed_call_timer(call); | |
808 | ||
809 | simple_unlock(&thread_call_lock); | |
810 | splx(s); | |
811 | ||
812 | return (result); | |
813 | } | |
814 | ||
815 | boolean_t | |
816 | thread_call_enter1_delayed( | |
817 | thread_call_t call, | |
818 | thread_call_param_t param1, | |
819 | AbsoluteTime deadline | |
820 | ) | |
821 | { | |
822 | boolean_t result = TRUE; | |
823 | int s; | |
824 | ||
825 | s = splsched(); | |
826 | simple_lock(&thread_call_lock); | |
827 | ||
828 | if (call->state == PENDING) | |
829 | _pending_call_dequeue(call); | |
830 | else if (call->state == DELAYED) | |
831 | _delayed_call_dequeue(call); | |
832 | else if (call->state == IDLE) | |
833 | result = FALSE; | |
834 | ||
835 | call->param1 = param1; | |
836 | call->deadline = deadline; | |
837 | ||
838 | _delayed_call_enqueue(call); | |
839 | ||
840 | if (queue_first(&delayed_call_queue) == qe(call)) | |
841 | _set_delayed_call_timer(call); | |
842 | ||
843 | simple_unlock(&thread_call_lock); | |
844 | splx(s); | |
845 | ||
846 | return (result); | |
847 | } | |
848 | ||
849 | /* | |
850 | * Routine: thread_call_cancel [public] | |
851 | * | |
852 | * Purpose: Unschedule a callout entry. | |
853 | * Returns a boolean indicating | |
854 | * whether the call had actually | |
855 | * been scheduled. | |
856 | * | |
857 | * Preconditions: Callable from an interrupt context | |
858 | * below splsched. | |
859 | * | |
860 | * Postconditions: None. | |
861 | */ | |
862 | ||
863 | boolean_t | |
864 | thread_call_cancel( | |
865 | thread_call_t call | |
866 | ) | |
867 | { | |
868 | boolean_t result = TRUE; | |
869 | int s; | |
870 | ||
871 | s = splsched(); | |
872 | simple_lock(&thread_call_lock); | |
873 | ||
874 | if (call->state == PENDING) | |
875 | _pending_call_dequeue(call); | |
876 | else if (call->state == DELAYED) | |
877 | _delayed_call_dequeue(call); | |
878 | else | |
879 | result = FALSE; | |
880 | ||
881 | simple_unlock(&thread_call_lock); | |
882 | splx(s); | |
883 | ||
884 | return (result); | |
885 | } | |
886 | ||
887 | /* | |
888 | * Routine: thread_call_is_delayed [public] | |
889 | * | |
890 | * Purpose: Returns a boolean indicating | |
891 | * whether a call is currently scheduled | |
892 | * to occur at a later time. Optionally | |
893 | * returns the expiration time. | |
894 | * | |
895 | * Preconditions: Callable from an interrupt context | |
896 | * below splsched. | |
897 | * | |
898 | * Postconditions: None. | |
899 | */ | |
900 | ||
901 | boolean_t | |
902 | thread_call_is_delayed( | |
903 | thread_call_t call, | |
904 | AbsoluteTime *deadline) | |
905 | { | |
906 | boolean_t result = FALSE; | |
907 | int s; | |
908 | ||
909 | s = splsched(); | |
910 | simple_lock(&thread_call_lock); | |
911 | ||
912 | if (call->state == DELAYED) { | |
913 | if (deadline != NULL) | |
914 | *deadline = call->deadline; | |
915 | result = TRUE; | |
916 | } | |
917 | ||
918 | simple_unlock(&thread_call_lock); | |
919 | splx(s); | |
920 | ||
921 | return (result); | |
922 | } | |
923 | ||
924 | /* | |
925 | * Routine: _call_thread_wake [private] | |
926 | * | |
927 | * Purpose: Wake a callout thread to service | |
928 | * newly pending callout entries. May wake | |
929 | * the activate thread to either wake or | |
930 | * create additional callout threads. | |
931 | * | |
932 | * Preconditions: thread_call_lock held. | |
933 | * | |
934 | * Postconditions: None. | |
935 | */ | |
936 | ||
937 | static __inline__ | |
938 | void | |
939 | _call_thread_wake(void) | |
940 | { | |
941 | thread_t thread_to_wake; | |
942 | ||
943 | if (!queue_empty(&idle_thread_queue)) { | |
944 | queue_remove_first( | |
945 | &idle_thread_queue, thread_to_wake, thread_t, wait_link); | |
946 | clear_wait(thread_to_wake, THREAD_AWAKENED); | |
947 | thread_calls.idle_thread_num--; | |
948 | } | |
949 | else | |
950 | thread_to_wake = THREAD_NULL; | |
951 | ||
952 | if (!activate_thread_awake && | |
953 | (thread_to_wake == THREAD_NULL || thread_calls.thread_num < | |
954 | (thread_calls.active_num + thread_calls.pending_num))) { | |
955 | clear_wait(activate_thread, THREAD_AWAKENED); | |
956 | activate_thread_awake = TRUE; | |
957 | } | |
958 | } | |
959 | ||
960 | #if defined (__i386__) | |
961 | #define NO_CONTINUATIONS (1) | |
962 | #else | |
963 | #define NO_CONTINUATIONS (0) | |
964 | #endif | |
965 | ||
966 | /* | |
967 | * Routine: _call_thread [private] | |
968 | * | |
969 | * Purpose: Executed by a callout thread. | |
970 | * | |
971 | * Preconditions: None. | |
972 | * | |
973 | * Postconditions: None. | |
974 | */ | |
975 | ||
976 | static | |
977 | void | |
978 | _call_thread_continue(void) | |
979 | { | |
980 | thread_t self = current_thread(); | |
981 | ||
982 | #if NO_CONTINUATIONS | |
983 | loop: | |
984 | #endif | |
985 | (void) splsched(); | |
986 | simple_lock(&thread_call_lock); | |
987 | ||
988 | while (thread_calls.pending_num > 0) { | |
989 | thread_call_t call; | |
990 | thread_call_func_t func; | |
991 | thread_call_param_t param0, param1; | |
992 | ||
993 | call = TC(dequeue_head(&pending_call_queue)); | |
994 | thread_calls.pending_num--; | |
995 | ||
996 | func = call->func; | |
997 | param0 = call->param0; | |
998 | param1 = call->param1; | |
999 | ||
1000 | call->state = IDLE; | |
1001 | ||
1002 | _internal_call_release(call); | |
1003 | ||
1004 | if (++thread_calls.active_num > thread_calls.active_hiwat) | |
1005 | thread_calls.active_hiwat = thread_calls.active_num; | |
1006 | ||
1007 | if (thread_calls.pending_num > 0) | |
1008 | _call_thread_wake(); | |
1009 | ||
1010 | simple_unlock(&thread_call_lock); | |
1011 | (void) spllo(); | |
1012 | ||
1013 | (*func)(param0, param1); | |
1014 | ||
1015 | (void)thread_funnel_set(self->funnel_lock, FALSE); | |
1016 | ||
1017 | (void) splsched(); | |
1018 | simple_lock(&thread_call_lock); | |
1019 | ||
1020 | thread_calls.active_num--; | |
1021 | } | |
1022 | ||
1023 | if ((thread_calls.thread_num - thread_calls.active_num) <= | |
1024 | thread_calls.thread_lowat) { | |
1025 | queue_enter(&idle_thread_queue, self, thread_t, wait_link); | |
1026 | thread_calls.idle_thread_num++; | |
1027 | ||
1028 | assert_wait(&idle_thread_queue, THREAD_INTERRUPTIBLE); | |
1029 | ||
1030 | simple_unlock(&thread_call_lock); | |
1031 | (void) spllo(); | |
1032 | ||
1033 | #if NO_CONTINUATIONS | |
1034 | thread_block((void (*)(void)) 0); | |
1035 | goto loop; | |
1036 | #else | |
1037 | thread_block(_call_thread_continue); | |
1038 | #endif | |
1039 | /* NOTREACHED */ | |
1040 | } | |
1041 | ||
1042 | thread_calls.thread_num--; | |
1043 | ||
1044 | simple_unlock(&thread_call_lock); | |
1045 | (void) spllo(); | |
1046 | ||
1047 | (void) thread_terminate(self->top_act); | |
1048 | /* NOTREACHED */ | |
1049 | } | |
1050 | ||
1051 | static | |
1052 | void | |
1053 | _call_thread(void) | |
1054 | { | |
1055 | thread_t self = current_thread(); | |
1056 | ||
1057 | stack_privilege(self); | |
1058 | ||
1059 | _call_thread_continue(); | |
1060 | /* NOTREACHED */ | |
1061 | } | |
1062 | ||
1063 | /* | |
1064 | * Routine: _activate_thread [private] | |
1065 | * | |
1066 | * Purpose: Executed by the activate thread. | |
1067 | * | |
1068 | * Preconditions: None. | |
1069 | * | |
1070 | * Postconditions: Never terminates. | |
1071 | */ | |
1072 | ||
1073 | static | |
1074 | void | |
1075 | _activate_thread_continue(void) | |
1076 | { | |
1077 | #if NO_CONTINUATIONS | |
1078 | loop: | |
1079 | #endif | |
1080 | (void) splsched(); | |
1081 | simple_lock(&thread_call_lock); | |
1082 | ||
1083 | if (thread_calls.thread_num < | |
1084 | (thread_calls.active_num + thread_calls.pending_num)) { | |
1085 | ||
1086 | if (++thread_calls.thread_num > thread_calls.thread_hiwat) | |
1087 | thread_calls.thread_hiwat = thread_calls.thread_num; | |
1088 | ||
1089 | simple_unlock(&thread_call_lock); | |
1090 | (void) spllo(); | |
1091 | ||
1092 | (void) kernel_thread_with_priority(kernel_task, | |
1093 | MAXPRI_KERNBAND-1, _call_thread, TRUE); | |
1094 | #if NO_CONTINUATIONS | |
1095 | thread_block((void (*)(void)) 0); | |
1096 | goto loop; | |
1097 | #else | |
1098 | thread_block(_activate_thread_continue); | |
1099 | #endif | |
1100 | /* NOTREACHED */ | |
1101 | } | |
1102 | else if (thread_calls.pending_num > 0) { | |
1103 | _call_thread_wake(); | |
1104 | ||
1105 | simple_unlock(&thread_call_lock); | |
1106 | (void) spllo(); | |
1107 | ||
1108 | #if NO_CONTINUATIONS | |
1109 | thread_block((void (*)(void)) 0); | |
1110 | goto loop; | |
1111 | #else | |
1112 | thread_block(_activate_thread_continue); | |
1113 | #endif | |
1114 | /* NOTREACHED */ | |
1115 | } | |
1116 | ||
1117 | assert_wait(&activate_thread_awake, THREAD_INTERRUPTIBLE); | |
1118 | activate_thread_awake = FALSE; | |
1119 | ||
1120 | simple_unlock(&thread_call_lock); | |
1121 | (void) spllo(); | |
1122 | ||
1123 | #if NO_CONTINUATIONS | |
1124 | thread_block((void (*)(void)) 0); | |
1125 | goto loop; | |
1126 | #else | |
1127 | thread_block(_activate_thread_continue); | |
1128 | #endif | |
1129 | /* NOTREACHED */ | |
1130 | } | |
1131 | ||
1132 | static | |
1133 | void | |
1134 | _activate_thread(void) | |
1135 | { | |
1136 | thread_t self = current_thread(); | |
1137 | ||
1138 | self->vm_privilege = TRUE; | |
1139 | vm_page_free_reserve(2); /* XXX */ | |
1140 | stack_privilege(self); | |
1141 | ||
1142 | _activate_thread_continue(); | |
1143 | /* NOTREACHED */ | |
1144 | } | |
1145 | ||
1146 | static | |
1147 | void | |
1148 | _delayed_call_timer( | |
1149 | timer_call_param_t p0, | |
1150 | timer_call_param_t p1 | |
1151 | ) | |
1152 | { | |
1153 | AbsoluteTime timestamp; | |
1154 | thread_call_t call; | |
1155 | boolean_t new_pending = FALSE; | |
1156 | int s; | |
1157 | ||
1158 | s = splsched(); | |
1159 | simple_lock(&thread_call_lock); | |
1160 | ||
1161 | clock_get_uptime(×tamp); | |
1162 | ||
1163 | call = TC(queue_first(&delayed_call_queue)); | |
1164 | ||
1165 | while (!queue_end(&delayed_call_queue, qe(call))) { | |
1166 | if (CMP_ABSOLUTETIME(&call->deadline, ×tamp) <= 0) { | |
1167 | _delayed_call_dequeue(call); | |
1168 | ||
1169 | _pending_call_enqueue(call); | |
1170 | new_pending = TRUE; | |
1171 | } | |
1172 | else | |
1173 | break; | |
1174 | ||
1175 | call = TC(queue_first(&delayed_call_queue)); | |
1176 | } | |
1177 | ||
1178 | if (!queue_end(&delayed_call_queue, qe(call))) | |
1179 | _set_delayed_call_timer(call); | |
1180 | ||
1181 | if (new_pending) | |
1182 | _call_thread_wake(); | |
1183 | ||
1184 | simple_unlock(&thread_call_lock); | |
1185 | splx(s); | |
1186 | } |