]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/thread_call.c
xnu-517.tar.gz
[apple/xnu.git] / osfmk / kern / thread_call.c
1 /*
2 * Copyright (c) 1993-1995, 1999-2000 Apple Computer, Inc.
3 * All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 *
9 * This file contains Original Code and/or Modifications of Original Code
10 * as defined in and that are subject to the Apple Public Source License
11 * Version 2.0 (the 'License'). You may not use this file except in
12 * compliance with the License. Please obtain a copy of the License at
13 * http://www.opensource.apple.com/apsl/ and read it before using this
14 * file.
15 *
16 * The Original Code and all software distributed under the License are
17 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
18 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
19 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
21 * Please see the License for the specific language governing rights and
22 * limitations under the License.
23 *
24 * @APPLE_LICENSE_HEADER_END@
25 */
26 /*
27 * Thread-based callout module.
28 *
29 * HISTORY
30 *
31 * 10 July 1999 (debo)
32 * Pulled into Mac OS X (microkernel).
33 *
34 * 3 July 1993 (debo)
35 * Created.
36 */
37
38 #include <mach/mach_types.h>
39
40 #include <kern/sched_prim.h>
41 #include <kern/clock.h>
42 #include <kern/task.h>
43 #include <kern/thread.h>
44
45 #include <kern/thread_call.h>
46 #include <kern/call_entry.h>
47
48 #include <kern/timer_call.h>
49
50 #include <sys/kdebug.h>
51
52 #define internal_call_num 768
53
54 #define thread_call_thread_min 4
55
56 static
57 thread_call_data_t
58 internal_call_storage[internal_call_num];
59
60 decl_simple_lock_data(static,thread_call_lock)
61
62 static
63 timer_call_data_t
64 thread_call_delaytimer;
65
66 static
67 queue_head_t
68 thread_call_xxx_queue,
69 thread_call_pending_queue, thread_call_delayed_queue;
70
71 static
72 struct wait_queue
73 call_thread_waitqueue;
74
75 static
76 boolean_t
77 activate_thread_awake;
78
79 static struct {
80 int pending_num,
81 pending_hiwat;
82 int active_num,
83 active_hiwat,
84 active_lowat;
85 int delayed_num,
86 delayed_hiwat;
87 int idle_thread_num;
88 int thread_num,
89 thread_hiwat,
90 thread_lowat;
91 } thread_call_vars;
92
93 static __inline__ thread_call_t
94 _internal_call_allocate(void);
95
96 static __inline__ void
97 _internal_call_release(
98 thread_call_t call
99 );
100
101 static __inline__ void
102 _pending_call_enqueue(
103 thread_call_t call
104 ),
105 _pending_call_dequeue(
106 thread_call_t call
107 ),
108 _delayed_call_enqueue(
109 thread_call_t call
110 ),
111 _delayed_call_dequeue(
112 thread_call_t call
113 );
114
115 static void __inline__
116 _set_delayed_call_timer(
117 thread_call_t call
118 );
119
120 static boolean_t
121 _remove_from_pending_queue(
122 thread_call_func_t func,
123 thread_call_param_t param0,
124 boolean_t remove_all
125 ),
126 _remove_from_delayed_queue(
127 thread_call_func_t func,
128 thread_call_param_t param0,
129 boolean_t remove_all
130 );
131
132 static __inline__ void
133 _call_thread_wake(void);
134
135 static void
136 _call_thread(void),
137 _activate_thread(void);
138
139 static void
140 _delayed_call_timer(
141 timer_call_param_t p0,
142 timer_call_param_t p1
143 );
144
145 #define qe(x) ((queue_entry_t)(x))
146 #define TC(x) ((thread_call_t)(x))
147
148 /*
149 * Routine: thread_call_initialize [public]
150 *
151 * Description: Initialize this module, called
152 * early during system initialization.
153 *
154 * Preconditions: None.
155 *
156 * Postconditions: None.
157 */
158
159 void
160 thread_call_initialize(void)
161 {
162 thread_call_t call;
163 spl_t s;
164
165 simple_lock_init(&thread_call_lock, ETAP_MISC_TIMER);
166
167 s = splsched();
168 simple_lock(&thread_call_lock);
169
170 queue_init(&thread_call_pending_queue);
171 queue_init(&thread_call_delayed_queue);
172
173 queue_init(&thread_call_xxx_queue);
174 for (
175 call = internal_call_storage;
176 call < &internal_call_storage[internal_call_num];
177 call++) {
178
179 enqueue_tail(&thread_call_xxx_queue, qe(call));
180 }
181
182 timer_call_setup(&thread_call_delaytimer, _delayed_call_timer, NULL);
183
184 wait_queue_init(&call_thread_waitqueue, SYNC_POLICY_FIFO);
185 thread_call_vars.thread_lowat = thread_call_thread_min;
186
187 activate_thread_awake = TRUE;
188
189 simple_unlock(&thread_call_lock);
190 splx(s);
191
192 kernel_thread_with_priority(_activate_thread, MAXPRI_KERNEL - 2);
193 }
194
195 void
196 thread_call_setup(
197 thread_call_t call,
198 thread_call_func_t func,
199 thread_call_param_t param0
200 )
201 {
202 call_entry_setup(call, func, param0);
203 }
204
205 /*
206 * Routine: _internal_call_allocate [private, inline]
207 *
208 * Purpose: Allocate an internal callout entry.
209 *
210 * Preconditions: thread_call_lock held.
211 *
212 * Postconditions: None.
213 */
214
215 static __inline__ thread_call_t
216 _internal_call_allocate(void)
217 {
218 thread_call_t call;
219
220 if (queue_empty(&thread_call_xxx_queue))
221 panic("_internal_call_allocate");
222
223 call = TC(dequeue_head(&thread_call_xxx_queue));
224
225 return (call);
226 }
227
228 /*
229 * Routine: _internal_call_release [private, inline]
230 *
231 * Purpose: Release an internal callout entry which
232 * is no longer pending (or delayed).
233 *
234 * Preconditions: thread_call_lock held.
235 *
236 * Postconditions: None.
237 */
238
239 static __inline__
240 void
241 _internal_call_release(
242 thread_call_t call
243 )
244 {
245 if ( call >= internal_call_storage &&
246 call < &internal_call_storage[internal_call_num] )
247 enqueue_head(&thread_call_xxx_queue, qe(call));
248 }
249
250 /*
251 * Routine: _pending_call_enqueue [private, inline]
252 *
253 * Purpose: Place an entry at the end of the
254 * pending queue, to be executed soon.
255 *
256 * Preconditions: thread_call_lock held.
257 *
258 * Postconditions: None.
259 */
260
261 static __inline__
262 void
263 _pending_call_enqueue(
264 thread_call_t call
265 )
266 {
267 enqueue_tail(&thread_call_pending_queue, qe(call));
268 if (++thread_call_vars.pending_num > thread_call_vars.pending_hiwat)
269 thread_call_vars.pending_hiwat = thread_call_vars.pending_num;
270
271 call->state = PENDING;
272 }
273
274 /*
275 * Routine: _pending_call_dequeue [private, inline]
276 *
277 * Purpose: Remove an entry from the pending queue,
278 * effectively unscheduling it.
279 *
280 * Preconditions: thread_call_lock held.
281 *
282 * Postconditions: None.
283 */
284
285 static __inline__
286 void
287 _pending_call_dequeue(
288 thread_call_t call
289 )
290 {
291 (void)remque(qe(call));
292 thread_call_vars.pending_num--;
293
294 call->state = IDLE;
295 }
296
297 /*
298 * Routine: _delayed_call_enqueue [private, inline]
299 *
300 * Purpose: Place an entry on the delayed queue,
301 * after existing entries with an earlier
302 * (or identical) deadline.
303 *
304 * Preconditions: thread_call_lock held.
305 *
306 * Postconditions: None.
307 */
308
309 static __inline__
310 void
311 _delayed_call_enqueue(
312 thread_call_t call
313 )
314 {
315 thread_call_t current;
316
317 current = TC(queue_first(&thread_call_delayed_queue));
318
319 while (TRUE) {
320 if ( queue_end(&thread_call_delayed_queue, qe(current)) ||
321 call->deadline < current->deadline ) {
322 current = TC(queue_prev(qe(current)));
323 break;
324 }
325
326 current = TC(queue_next(qe(current)));
327 }
328
329 insque(qe(call), qe(current));
330 if (++thread_call_vars.delayed_num > thread_call_vars.delayed_hiwat)
331 thread_call_vars.delayed_hiwat = thread_call_vars.delayed_num;
332
333 call->state = DELAYED;
334 }
335
336 /*
337 * Routine: _delayed_call_dequeue [private, inline]
338 *
339 * Purpose: Remove an entry from the delayed queue,
340 * effectively unscheduling it.
341 *
342 * Preconditions: thread_call_lock held.
343 *
344 * Postconditions: None.
345 */
346
347 static __inline__
348 void
349 _delayed_call_dequeue(
350 thread_call_t call
351 )
352 {
353 (void)remque(qe(call));
354 thread_call_vars.delayed_num--;
355
356 call->state = IDLE;
357 }
358
359 /*
360 * Routine: _set_delayed_call_timer [private]
361 *
362 * Purpose: Reset the timer so that it
363 * next expires when the entry is due.
364 *
365 * Preconditions: thread_call_lock held.
366 *
367 * Postconditions: None.
368 */
369
370 static __inline__ void
371 _set_delayed_call_timer(
372 thread_call_t call
373 )
374 {
375 timer_call_enter(&thread_call_delaytimer, call->deadline);
376 }
377
378 /*
379 * Routine: _remove_from_pending_queue [private]
380 *
381 * Purpose: Remove the first (or all) matching
382 * entries from the pending queue,
383 * effectively unscheduling them.
384 * Returns whether any matching entries
385 * were found.
386 *
387 * Preconditions: thread_call_lock held.
388 *
389 * Postconditions: None.
390 */
391
392 static
393 boolean_t
394 _remove_from_pending_queue(
395 thread_call_func_t func,
396 thread_call_param_t param0,
397 boolean_t remove_all
398 )
399 {
400 boolean_t call_removed = FALSE;
401 thread_call_t call;
402
403 call = TC(queue_first(&thread_call_pending_queue));
404
405 while (!queue_end(&thread_call_pending_queue, qe(call))) {
406 if ( call->func == func &&
407 call->param0 == param0 ) {
408 thread_call_t next = TC(queue_next(qe(call)));
409
410 _pending_call_dequeue(call);
411
412 _internal_call_release(call);
413
414 call_removed = TRUE;
415 if (!remove_all)
416 break;
417
418 call = next;
419 }
420 else
421 call = TC(queue_next(qe(call)));
422 }
423
424 return (call_removed);
425 }
426
427 /*
428 * Routine: _remove_from_delayed_queue [private]
429 *
430 * Purpose: Remove the first (or all) matching
431 * entries from the delayed queue,
432 * effectively unscheduling them.
433 * Returns whether any matching entries
434 * were found.
435 *
436 * Preconditions: thread_call_lock held.
437 *
438 * Postconditions: None.
439 */
440
441 static
442 boolean_t
443 _remove_from_delayed_queue(
444 thread_call_func_t func,
445 thread_call_param_t param0,
446 boolean_t remove_all
447 )
448 {
449 boolean_t call_removed = FALSE;
450 thread_call_t call;
451
452 call = TC(queue_first(&thread_call_delayed_queue));
453
454 while (!queue_end(&thread_call_delayed_queue, qe(call))) {
455 if ( call->func == func &&
456 call->param0 == param0 ) {
457 thread_call_t next = TC(queue_next(qe(call)));
458
459 _delayed_call_dequeue(call);
460
461 _internal_call_release(call);
462
463 call_removed = TRUE;
464 if (!remove_all)
465 break;
466
467 call = next;
468 }
469 else
470 call = TC(queue_next(qe(call)));
471 }
472
473 return (call_removed);
474 }
475
476 /*
477 * Routine: thread_call_func [public]
478 *
479 * Purpose: Schedule a function callout.
480 * Guarantees { function, argument }
481 * uniqueness if unique_call is TRUE.
482 *
483 * Preconditions: Callable from an interrupt context
484 * below splsched.
485 *
486 * Postconditions: None.
487 */
488
489 void
490 thread_call_func(
491 thread_call_func_t func,
492 thread_call_param_t param,
493 boolean_t unique_call
494 )
495 {
496 thread_call_t call;
497 spl_t s;
498
499 s = splsched();
500 simple_lock(&thread_call_lock);
501
502 call = TC(queue_first(&thread_call_pending_queue));
503
504 while (unique_call && !queue_end(&thread_call_pending_queue, qe(call))) {
505 if ( call->func == func &&
506 call->param0 == param ) {
507 break;
508 }
509
510 call = TC(queue_next(qe(call)));
511 }
512
513 if (!unique_call || queue_end(&thread_call_pending_queue, qe(call))) {
514 call = _internal_call_allocate();
515 call->func = func;
516 call->param0 = param;
517 call->param1 = 0;
518
519 _pending_call_enqueue(call);
520
521 if (thread_call_vars.active_num <= 0)
522 _call_thread_wake();
523 }
524
525 simple_unlock(&thread_call_lock);
526 splx(s);
527 }
528
529 /*
530 * Routine: thread_call_func_delayed [public]
531 *
532 * Purpose: Schedule a function callout to
533 * occur at the stated time.
534 *
535 * Preconditions: Callable from an interrupt context
536 * below splsched.
537 *
538 * Postconditions: None.
539 */
540
541 void
542 thread_call_func_delayed(
543 thread_call_func_t func,
544 thread_call_param_t param,
545 uint64_t deadline
546 )
547 {
548 thread_call_t call;
549 spl_t s;
550
551 s = splsched();
552 simple_lock(&thread_call_lock);
553
554 call = _internal_call_allocate();
555 call->func = func;
556 call->param0 = param;
557 call->param1 = 0;
558 call->deadline = deadline;
559
560 _delayed_call_enqueue(call);
561
562 if (queue_first(&thread_call_delayed_queue) == qe(call))
563 _set_delayed_call_timer(call);
564
565 simple_unlock(&thread_call_lock);
566 splx(s);
567 }
568
569 /*
570 * Routine: thread_call_func_cancel [public]
571 *
572 * Purpose: Unschedule a function callout.
573 * Removes one (or all)
574 * { function, argument }
575 * instance(s) from either (or both)
576 * the pending and the delayed queue,
577 * in that order. Returns a boolean
578 * indicating whether any calls were
579 * cancelled.
580 *
581 * Preconditions: Callable from an interrupt context
582 * below splsched.
583 *
584 * Postconditions: None.
585 */
586
587 boolean_t
588 thread_call_func_cancel(
589 thread_call_func_t func,
590 thread_call_param_t param,
591 boolean_t cancel_all
592 )
593 {
594 boolean_t result;
595 spl_t s;
596
597 s = splsched();
598 simple_lock(&thread_call_lock);
599
600 if (cancel_all)
601 result = _remove_from_pending_queue(func, param, cancel_all) |
602 _remove_from_delayed_queue(func, param, cancel_all);
603 else
604 result = _remove_from_pending_queue(func, param, cancel_all) ||
605 _remove_from_delayed_queue(func, param, cancel_all);
606
607 simple_unlock(&thread_call_lock);
608 splx(s);
609
610 return (result);
611 }
612
613 /*
614 * Routine: thread_call_allocate [public]
615 *
616 * Purpose: Allocate an external callout
617 * entry.
618 *
619 * Preconditions: None.
620 *
621 * Postconditions: None.
622 */
623
624 thread_call_t
625 thread_call_allocate(
626 thread_call_func_t func,
627 thread_call_param_t param0
628 )
629 {
630 thread_call_t call = (void *)kalloc(sizeof (thread_call_data_t));
631
632 call->func = func;
633 call->param0 = param0;
634 call->state = IDLE;
635
636 return (call);
637 }
638
639 /*
640 * Routine: thread_call_free [public]
641 *
642 * Purpose: Free an external callout
643 * entry.
644 *
645 * Preconditions: None.
646 *
647 * Postconditions: None.
648 */
649
650 boolean_t
651 thread_call_free(
652 thread_call_t call
653 )
654 {
655 spl_t s;
656
657 s = splsched();
658 simple_lock(&thread_call_lock);
659
660 if (call->state != IDLE) {
661 simple_unlock(&thread_call_lock);
662 splx(s);
663
664 return (FALSE);
665 }
666
667 simple_unlock(&thread_call_lock);
668 splx(s);
669
670 kfree((vm_offset_t)call, sizeof (thread_call_data_t));
671
672 return (TRUE);
673 }
674
675 /*
676 * Routine: thread_call_enter [public]
677 *
678 * Purpose: Schedule an external callout
679 * entry to occur "soon". Returns a
680 * boolean indicating whether the call
681 * had been already scheduled.
682 *
683 * Preconditions: Callable from an interrupt context
684 * below splsched.
685 *
686 * Postconditions: None.
687 */
688
689 boolean_t
690 thread_call_enter(
691 thread_call_t call
692 )
693 {
694 boolean_t result = TRUE;
695 spl_t s;
696
697 s = splsched();
698 simple_lock(&thread_call_lock);
699
700 if (call->state != PENDING) {
701 if (call->state == DELAYED)
702 _delayed_call_dequeue(call);
703 else if (call->state == IDLE)
704 result = FALSE;
705
706 _pending_call_enqueue(call);
707
708 if (thread_call_vars.active_num <= 0)
709 _call_thread_wake();
710 }
711
712 call->param1 = 0;
713
714 simple_unlock(&thread_call_lock);
715 splx(s);
716
717 return (result);
718 }
719
720 boolean_t
721 thread_call_enter1(
722 thread_call_t call,
723 thread_call_param_t param1
724 )
725 {
726 boolean_t result = TRUE;
727 spl_t s;
728
729 s = splsched();
730 simple_lock(&thread_call_lock);
731
732 if (call->state != PENDING) {
733 if (call->state == DELAYED)
734 _delayed_call_dequeue(call);
735 else if (call->state == IDLE)
736 result = FALSE;
737
738 _pending_call_enqueue(call);
739
740 if (thread_call_vars.active_num <= 0)
741 _call_thread_wake();
742 }
743
744 call->param1 = param1;
745
746 simple_unlock(&thread_call_lock);
747 splx(s);
748
749 return (result);
750 }
751
752 /*
753 * Routine: thread_call_enter_delayed [public]
754 *
755 * Purpose: Schedule an external callout
756 * entry to occur at the stated time.
757 * Returns a boolean indicating whether
758 * the call had been already scheduled.
759 *
760 * Preconditions: Callable from an interrupt context
761 * below splsched.
762 *
763 * Postconditions: None.
764 */
765
766 boolean_t
767 thread_call_enter_delayed(
768 thread_call_t call,
769 uint64_t deadline
770 )
771 {
772 boolean_t result = TRUE;
773 spl_t s;
774
775 s = splsched();
776 simple_lock(&thread_call_lock);
777
778 if (call->state == PENDING)
779 _pending_call_dequeue(call);
780 else if (call->state == DELAYED)
781 _delayed_call_dequeue(call);
782 else if (call->state == IDLE)
783 result = FALSE;
784
785 call->param1 = 0;
786 call->deadline = deadline;
787
788 _delayed_call_enqueue(call);
789
790 if (queue_first(&thread_call_delayed_queue) == qe(call))
791 _set_delayed_call_timer(call);
792
793 simple_unlock(&thread_call_lock);
794 splx(s);
795
796 return (result);
797 }
798
799 boolean_t
800 thread_call_enter1_delayed(
801 thread_call_t call,
802 thread_call_param_t param1,
803 uint64_t deadline
804 )
805 {
806 boolean_t result = TRUE;
807 spl_t s;
808
809 s = splsched();
810 simple_lock(&thread_call_lock);
811
812 if (call->state == PENDING)
813 _pending_call_dequeue(call);
814 else if (call->state == DELAYED)
815 _delayed_call_dequeue(call);
816 else if (call->state == IDLE)
817 result = FALSE;
818
819 call->param1 = param1;
820 call->deadline = deadline;
821
822 _delayed_call_enqueue(call);
823
824 if (queue_first(&thread_call_delayed_queue) == qe(call))
825 _set_delayed_call_timer(call);
826
827 simple_unlock(&thread_call_lock);
828 splx(s);
829
830 return (result);
831 }
832
833 /*
834 * Routine: thread_call_cancel [public]
835 *
836 * Purpose: Unschedule a callout entry.
837 * Returns a boolean indicating
838 * whether the call had actually
839 * been scheduled.
840 *
841 * Preconditions: Callable from an interrupt context
842 * below splsched.
843 *
844 * Postconditions: None.
845 */
846
847 boolean_t
848 thread_call_cancel(
849 thread_call_t call
850 )
851 {
852 boolean_t result = TRUE;
853 spl_t s;
854
855 s = splsched();
856 simple_lock(&thread_call_lock);
857
858 if (call->state == PENDING)
859 _pending_call_dequeue(call);
860 else if (call->state == DELAYED)
861 _delayed_call_dequeue(call);
862 else
863 result = FALSE;
864
865 simple_unlock(&thread_call_lock);
866 splx(s);
867
868 return (result);
869 }
870
871 /*
872 * Routine: thread_call_is_delayed [public]
873 *
874 * Purpose: Returns a boolean indicating
875 * whether a call is currently scheduled
876 * to occur at a later time. Optionally
877 * returns the expiration time.
878 *
879 * Preconditions: Callable from an interrupt context
880 * below splsched.
881 *
882 * Postconditions: None.
883 */
884
885 boolean_t
886 thread_call_is_delayed(
887 thread_call_t call,
888 uint64_t *deadline)
889 {
890 boolean_t result = FALSE;
891 spl_t s;
892
893 s = splsched();
894 simple_lock(&thread_call_lock);
895
896 if (call->state == DELAYED) {
897 if (deadline != NULL)
898 *deadline = call->deadline;
899 result = TRUE;
900 }
901
902 simple_unlock(&thread_call_lock);
903 splx(s);
904
905 return (result);
906 }
907
908 /*
909 * Routine: _call_thread_wake [private, inline]
910 *
911 * Purpose: Wake a callout thread to service
912 * pending callout entries. May wake
913 * the activate thread in order to
914 * create additional callout threads.
915 *
916 * Preconditions: thread_call_lock held.
917 *
918 * Postconditions: None.
919 */
920
921 static __inline__
922 void
923 _call_thread_wake(void)
924 {
925 if (wait_queue_wakeup_one(
926 &call_thread_waitqueue, &call_thread_waitqueue,
927 THREAD_AWAKENED) == KERN_SUCCESS) {
928 thread_call_vars.idle_thread_num--;
929
930 if (++thread_call_vars.active_num > thread_call_vars.active_hiwat)
931 thread_call_vars.active_hiwat = thread_call_vars.active_num;
932 }
933 else
934 if (!activate_thread_awake) {
935 thread_wakeup_one(&activate_thread_awake);
936 activate_thread_awake = TRUE;
937 }
938 }
939
940 /*
941 * Routine: call_thread_block [private]
942 *
943 * Purpose: Hook via thread dispatch on
944 * the occasion of a callout blocking.
945 *
946 * Preconditions: splsched.
947 *
948 * Postconditions: None.
949 */
950
951 void
952 call_thread_block(void)
953 {
954 simple_lock(&thread_call_lock);
955
956 if (--thread_call_vars.active_num < thread_call_vars.active_lowat)
957 thread_call_vars.active_lowat = thread_call_vars.active_num;
958
959 if ( thread_call_vars.active_num <= 0 &&
960 thread_call_vars.pending_num > 0 )
961 _call_thread_wake();
962
963 simple_unlock(&thread_call_lock);
964 }
965
966 /*
967 * Routine: call_thread_unblock [private]
968 *
969 * Purpose: Hook via thread wakeup on
970 * the occasion of a callout unblocking.
971 *
972 * Preconditions: splsched.
973 *
974 * Postconditions: None.
975 */
976
977 void
978 call_thread_unblock(void)
979 {
980 simple_lock(&thread_call_lock);
981
982 if (++thread_call_vars.active_num > thread_call_vars.active_hiwat)
983 thread_call_vars.active_hiwat = thread_call_vars.active_num;
984
985 simple_unlock(&thread_call_lock);
986 }
987
988 /*
989 * Routine: _call_thread [private]
990 *
991 * Purpose: Executed by a callout thread.
992 *
993 * Preconditions: None.
994 *
995 * Postconditions: None.
996 */
997
998 static
999 void
1000 _call_thread_continue(void)
1001 {
1002 thread_t self = current_thread();
1003
1004 (void) splsched();
1005 simple_lock(&thread_call_lock);
1006
1007 self->active_callout = TRUE;
1008
1009 while (thread_call_vars.pending_num > 0) {
1010 thread_call_t call;
1011 thread_call_func_t func;
1012 thread_call_param_t param0, param1;
1013
1014 call = TC(dequeue_head(&thread_call_pending_queue));
1015 thread_call_vars.pending_num--;
1016
1017 func = call->func;
1018 param0 = call->param0;
1019 param1 = call->param1;
1020
1021 call->state = IDLE;
1022
1023 _internal_call_release(call);
1024
1025 simple_unlock(&thread_call_lock);
1026 (void) spllo();
1027
1028 KERNEL_DEBUG_CONSTANT(
1029 MACHDBG_CODE(DBG_MACH_SCHED,MACH_CALLOUT) | DBG_FUNC_NONE,
1030 (int)func, (int)param0, (int)param1, 0, 0);
1031
1032 (*func)(param0, param1);
1033
1034 (void)thread_funnel_set(self->funnel_lock, FALSE);
1035
1036 (void) splsched();
1037 simple_lock(&thread_call_lock);
1038 }
1039
1040 self->active_callout = FALSE;
1041
1042 if (--thread_call_vars.active_num < thread_call_vars.active_lowat)
1043 thread_call_vars.active_lowat = thread_call_vars.active_num;
1044
1045 if (thread_call_vars.idle_thread_num < thread_call_vars.thread_lowat) {
1046 thread_call_vars.idle_thread_num++;
1047
1048 wait_queue_assert_wait(
1049 &call_thread_waitqueue, &call_thread_waitqueue,
1050 THREAD_INTERRUPTIBLE);
1051
1052 simple_unlock(&thread_call_lock);
1053 (void) spllo();
1054
1055 thread_block(_call_thread_continue);
1056 /* NOTREACHED */
1057 }
1058
1059 thread_call_vars.thread_num--;
1060
1061 simple_unlock(&thread_call_lock);
1062 (void) spllo();
1063
1064 (void) thread_terminate(self->top_act);
1065 /* NOTREACHED */
1066 }
1067
1068 static
1069 void
1070 _call_thread(void)
1071 {
1072 _call_thread_continue();
1073 /* NOTREACHED */
1074 }
1075
1076 /*
1077 * Routine: _activate_thread [private]
1078 *
1079 * Purpose: Executed by the activate thread.
1080 *
1081 * Preconditions: None.
1082 *
1083 * Postconditions: Never terminates.
1084 */
1085
1086 static
1087 void
1088 _activate_thread_continue(void)
1089 {
1090 (void) splsched();
1091 simple_lock(&thread_call_lock);
1092
1093 while ( thread_call_vars.active_num <= 0 &&
1094 thread_call_vars.pending_num > 0 ) {
1095
1096 if (++thread_call_vars.active_num > thread_call_vars.active_hiwat)
1097 thread_call_vars.active_hiwat = thread_call_vars.active_num;
1098
1099 if (++thread_call_vars.thread_num > thread_call_vars.thread_hiwat)
1100 thread_call_vars.thread_hiwat = thread_call_vars.thread_num;
1101
1102 simple_unlock(&thread_call_lock);
1103 (void) spllo();
1104
1105 kernel_thread_with_priority(_call_thread, MAXPRI_KERNEL - 1);
1106
1107 (void) splsched();
1108 simple_lock(&thread_call_lock);
1109 }
1110
1111 assert_wait(&activate_thread_awake, THREAD_INTERRUPTIBLE);
1112 activate_thread_awake = FALSE;
1113
1114 simple_unlock(&thread_call_lock);
1115 (void) spllo();
1116
1117 thread_block(_activate_thread_continue);
1118 /* NOTREACHED */
1119 }
1120
1121 static
1122 void
1123 _activate_thread(void)
1124 {
1125 thread_t self = current_thread();
1126
1127 self->vm_privilege = TRUE;
1128 vm_page_free_reserve(2); /* XXX */
1129
1130 _activate_thread_continue();
1131 /* NOTREACHED */
1132 }
1133
1134 static
1135 void
1136 _delayed_call_timer(
1137 timer_call_param_t p0,
1138 timer_call_param_t p1
1139 )
1140 {
1141 uint64_t timestamp;
1142 thread_call_t call;
1143 boolean_t new_pending = FALSE;
1144 spl_t s;
1145
1146 s = splsched();
1147 simple_lock(&thread_call_lock);
1148
1149 clock_get_uptime(&timestamp);
1150
1151 call = TC(queue_first(&thread_call_delayed_queue));
1152
1153 while (!queue_end(&thread_call_delayed_queue, qe(call))) {
1154 if (call->deadline <= timestamp) {
1155 _delayed_call_dequeue(call);
1156
1157 _pending_call_enqueue(call);
1158 new_pending = TRUE;
1159 }
1160 else
1161 break;
1162
1163 call = TC(queue_first(&thread_call_delayed_queue));
1164 }
1165
1166 if (!queue_end(&thread_call_delayed_queue, qe(call)))
1167 _set_delayed_call_timer(call);
1168
1169 if (new_pending && thread_call_vars.active_num <= 0)
1170 _call_thread_wake();
1171
1172 simple_unlock(&thread_call_lock);
1173 splx(s);
1174 }