]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/thread_call.c
xnu-517.7.7.tar.gz
[apple/xnu.git] / osfmk / kern / thread_call.c
1 /*
2 * Copyright (c) 1993-1995, 1999-2000 Apple Computer, Inc.
3 * All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * The contents of this file constitute Original Code as defined in and
8 * are subject to the Apple Public Source License Version 1.1 (the
9 * "License"). You may not use this file except in compliance with the
10 * License. Please obtain a copy of the License at
11 * http://www.apple.com/publicsource and read it before using this file.
12 *
13 * This Original Code and all software distributed under the License are
14 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
18 * License for the specific language governing rights and limitations
19 * under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * Thread-based callout module.
25 *
26 * HISTORY
27 *
28 * 10 July 1999 (debo)
29 * Pulled into Mac OS X (microkernel).
30 *
31 * 3 July 1993 (debo)
32 * Created.
33 */
34
35 #include <mach/mach_types.h>
36
37 #include <kern/sched_prim.h>
38 #include <kern/clock.h>
39 #include <kern/task.h>
40 #include <kern/thread.h>
41
42 #include <kern/thread_call.h>
43 #include <kern/call_entry.h>
44
45 #include <kern/timer_call.h>
46
47 #include <sys/kdebug.h>
48
49 #define internal_call_num 768
50
51 #define thread_call_thread_min 4
52
53 static
54 thread_call_data_t
55 internal_call_storage[internal_call_num];
56
57 decl_simple_lock_data(static,thread_call_lock)
58
59 static
60 timer_call_data_t
61 thread_call_delaytimer;
62
63 static
64 queue_head_t
65 thread_call_xxx_queue,
66 thread_call_pending_queue, thread_call_delayed_queue;
67
68 static
69 struct wait_queue
70 call_thread_waitqueue;
71
72 static
73 boolean_t
74 activate_thread_awake;
75
76 static struct {
77 int pending_num,
78 pending_hiwat;
79 int active_num,
80 active_hiwat,
81 active_lowat;
82 int delayed_num,
83 delayed_hiwat;
84 int idle_thread_num;
85 int thread_num,
86 thread_hiwat,
87 thread_lowat;
88 } thread_call_vars;
89
90 static __inline__ thread_call_t
91 _internal_call_allocate(void);
92
93 static __inline__ void
94 _internal_call_release(
95 thread_call_t call
96 );
97
98 static __inline__ void
99 _pending_call_enqueue(
100 thread_call_t call
101 ),
102 _pending_call_dequeue(
103 thread_call_t call
104 ),
105 _delayed_call_enqueue(
106 thread_call_t call
107 ),
108 _delayed_call_dequeue(
109 thread_call_t call
110 );
111
112 static void __inline__
113 _set_delayed_call_timer(
114 thread_call_t call
115 );
116
117 static boolean_t
118 _remove_from_pending_queue(
119 thread_call_func_t func,
120 thread_call_param_t param0,
121 boolean_t remove_all
122 ),
123 _remove_from_delayed_queue(
124 thread_call_func_t func,
125 thread_call_param_t param0,
126 boolean_t remove_all
127 );
128
129 static __inline__ void
130 _call_thread_wake(void);
131
132 static void
133 _call_thread(void),
134 _activate_thread(void);
135
136 static void
137 _delayed_call_timer(
138 timer_call_param_t p0,
139 timer_call_param_t p1
140 );
141
142 #define qe(x) ((queue_entry_t)(x))
143 #define TC(x) ((thread_call_t)(x))
144
145 /*
146 * Routine: thread_call_initialize [public]
147 *
148 * Description: Initialize this module, called
149 * early during system initialization.
150 *
151 * Preconditions: None.
152 *
153 * Postconditions: None.
154 */
155
156 void
157 thread_call_initialize(void)
158 {
159 thread_call_t call;
160 spl_t s;
161
162 simple_lock_init(&thread_call_lock, ETAP_MISC_TIMER);
163
164 s = splsched();
165 simple_lock(&thread_call_lock);
166
167 queue_init(&thread_call_pending_queue);
168 queue_init(&thread_call_delayed_queue);
169
170 queue_init(&thread_call_xxx_queue);
171 for (
172 call = internal_call_storage;
173 call < &internal_call_storage[internal_call_num];
174 call++) {
175
176 enqueue_tail(&thread_call_xxx_queue, qe(call));
177 }
178
179 timer_call_setup(&thread_call_delaytimer, _delayed_call_timer, NULL);
180
181 wait_queue_init(&call_thread_waitqueue, SYNC_POLICY_FIFO);
182 thread_call_vars.thread_lowat = thread_call_thread_min;
183
184 activate_thread_awake = TRUE;
185
186 simple_unlock(&thread_call_lock);
187 splx(s);
188
189 kernel_thread_with_priority(_activate_thread, MAXPRI_KERNEL - 2);
190 }
191
192 void
193 thread_call_setup(
194 thread_call_t call,
195 thread_call_func_t func,
196 thread_call_param_t param0
197 )
198 {
199 call_entry_setup(call, func, param0);
200 }
201
202 /*
203 * Routine: _internal_call_allocate [private, inline]
204 *
205 * Purpose: Allocate an internal callout entry.
206 *
207 * Preconditions: thread_call_lock held.
208 *
209 * Postconditions: None.
210 */
211
212 static __inline__ thread_call_t
213 _internal_call_allocate(void)
214 {
215 thread_call_t call;
216
217 if (queue_empty(&thread_call_xxx_queue))
218 panic("_internal_call_allocate");
219
220 call = TC(dequeue_head(&thread_call_xxx_queue));
221
222 return (call);
223 }
224
225 /*
226 * Routine: _internal_call_release [private, inline]
227 *
228 * Purpose: Release an internal callout entry which
229 * is no longer pending (or delayed).
230 *
231 * Preconditions: thread_call_lock held.
232 *
233 * Postconditions: None.
234 */
235
236 static __inline__
237 void
238 _internal_call_release(
239 thread_call_t call
240 )
241 {
242 if ( call >= internal_call_storage &&
243 call < &internal_call_storage[internal_call_num] )
244 enqueue_head(&thread_call_xxx_queue, qe(call));
245 }
246
247 /*
248 * Routine: _pending_call_enqueue [private, inline]
249 *
250 * Purpose: Place an entry at the end of the
251 * pending queue, to be executed soon.
252 *
253 * Preconditions: thread_call_lock held.
254 *
255 * Postconditions: None.
256 */
257
258 static __inline__
259 void
260 _pending_call_enqueue(
261 thread_call_t call
262 )
263 {
264 enqueue_tail(&thread_call_pending_queue, qe(call));
265 if (++thread_call_vars.pending_num > thread_call_vars.pending_hiwat)
266 thread_call_vars.pending_hiwat = thread_call_vars.pending_num;
267
268 call->state = PENDING;
269 }
270
271 /*
272 * Routine: _pending_call_dequeue [private, inline]
273 *
274 * Purpose: Remove an entry from the pending queue,
275 * effectively unscheduling it.
276 *
277 * Preconditions: thread_call_lock held.
278 *
279 * Postconditions: None.
280 */
281
282 static __inline__
283 void
284 _pending_call_dequeue(
285 thread_call_t call
286 )
287 {
288 (void)remque(qe(call));
289 thread_call_vars.pending_num--;
290
291 call->state = IDLE;
292 }
293
294 /*
295 * Routine: _delayed_call_enqueue [private, inline]
296 *
297 * Purpose: Place an entry on the delayed queue,
298 * after existing entries with an earlier
299 * (or identical) deadline.
300 *
301 * Preconditions: thread_call_lock held.
302 *
303 * Postconditions: None.
304 */
305
306 static __inline__
307 void
308 _delayed_call_enqueue(
309 thread_call_t call
310 )
311 {
312 thread_call_t current;
313
314 current = TC(queue_first(&thread_call_delayed_queue));
315
316 while (TRUE) {
317 if ( queue_end(&thread_call_delayed_queue, qe(current)) ||
318 call->deadline < current->deadline ) {
319 current = TC(queue_prev(qe(current)));
320 break;
321 }
322
323 current = TC(queue_next(qe(current)));
324 }
325
326 insque(qe(call), qe(current));
327 if (++thread_call_vars.delayed_num > thread_call_vars.delayed_hiwat)
328 thread_call_vars.delayed_hiwat = thread_call_vars.delayed_num;
329
330 call->state = DELAYED;
331 }
332
333 /*
334 * Routine: _delayed_call_dequeue [private, inline]
335 *
336 * Purpose: Remove an entry from the delayed queue,
337 * effectively unscheduling it.
338 *
339 * Preconditions: thread_call_lock held.
340 *
341 * Postconditions: None.
342 */
343
344 static __inline__
345 void
346 _delayed_call_dequeue(
347 thread_call_t call
348 )
349 {
350 (void)remque(qe(call));
351 thread_call_vars.delayed_num--;
352
353 call->state = IDLE;
354 }
355
356 /*
357 * Routine: _set_delayed_call_timer [private]
358 *
359 * Purpose: Reset the timer so that it
360 * next expires when the entry is due.
361 *
362 * Preconditions: thread_call_lock held.
363 *
364 * Postconditions: None.
365 */
366
367 static __inline__ void
368 _set_delayed_call_timer(
369 thread_call_t call
370 )
371 {
372 timer_call_enter(&thread_call_delaytimer, call->deadline);
373 }
374
375 /*
376 * Routine: _remove_from_pending_queue [private]
377 *
378 * Purpose: Remove the first (or all) matching
379 * entries from the pending queue,
380 * effectively unscheduling them.
381 * Returns whether any matching entries
382 * were found.
383 *
384 * Preconditions: thread_call_lock held.
385 *
386 * Postconditions: None.
387 */
388
389 static
390 boolean_t
391 _remove_from_pending_queue(
392 thread_call_func_t func,
393 thread_call_param_t param0,
394 boolean_t remove_all
395 )
396 {
397 boolean_t call_removed = FALSE;
398 thread_call_t call;
399
400 call = TC(queue_first(&thread_call_pending_queue));
401
402 while (!queue_end(&thread_call_pending_queue, qe(call))) {
403 if ( call->func == func &&
404 call->param0 == param0 ) {
405 thread_call_t next = TC(queue_next(qe(call)));
406
407 _pending_call_dequeue(call);
408
409 _internal_call_release(call);
410
411 call_removed = TRUE;
412 if (!remove_all)
413 break;
414
415 call = next;
416 }
417 else
418 call = TC(queue_next(qe(call)));
419 }
420
421 return (call_removed);
422 }
423
424 /*
425 * Routine: _remove_from_delayed_queue [private]
426 *
427 * Purpose: Remove the first (or all) matching
428 * entries from the delayed queue,
429 * effectively unscheduling them.
430 * Returns whether any matching entries
431 * were found.
432 *
433 * Preconditions: thread_call_lock held.
434 *
435 * Postconditions: None.
436 */
437
438 static
439 boolean_t
440 _remove_from_delayed_queue(
441 thread_call_func_t func,
442 thread_call_param_t param0,
443 boolean_t remove_all
444 )
445 {
446 boolean_t call_removed = FALSE;
447 thread_call_t call;
448
449 call = TC(queue_first(&thread_call_delayed_queue));
450
451 while (!queue_end(&thread_call_delayed_queue, qe(call))) {
452 if ( call->func == func &&
453 call->param0 == param0 ) {
454 thread_call_t next = TC(queue_next(qe(call)));
455
456 _delayed_call_dequeue(call);
457
458 _internal_call_release(call);
459
460 call_removed = TRUE;
461 if (!remove_all)
462 break;
463
464 call = next;
465 }
466 else
467 call = TC(queue_next(qe(call)));
468 }
469
470 return (call_removed);
471 }
472
473 /*
474 * Routine: thread_call_func [public]
475 *
476 * Purpose: Schedule a function callout.
477 * Guarantees { function, argument }
478 * uniqueness if unique_call is TRUE.
479 *
480 * Preconditions: Callable from an interrupt context
481 * below splsched.
482 *
483 * Postconditions: None.
484 */
485
486 void
487 thread_call_func(
488 thread_call_func_t func,
489 thread_call_param_t param,
490 boolean_t unique_call
491 )
492 {
493 thread_call_t call;
494 spl_t s;
495
496 s = splsched();
497 simple_lock(&thread_call_lock);
498
499 call = TC(queue_first(&thread_call_pending_queue));
500
501 while (unique_call && !queue_end(&thread_call_pending_queue, qe(call))) {
502 if ( call->func == func &&
503 call->param0 == param ) {
504 break;
505 }
506
507 call = TC(queue_next(qe(call)));
508 }
509
510 if (!unique_call || queue_end(&thread_call_pending_queue, qe(call))) {
511 call = _internal_call_allocate();
512 call->func = func;
513 call->param0 = param;
514 call->param1 = 0;
515
516 _pending_call_enqueue(call);
517
518 if (thread_call_vars.active_num <= 0)
519 _call_thread_wake();
520 }
521
522 simple_unlock(&thread_call_lock);
523 splx(s);
524 }
525
526 /*
527 * Routine: thread_call_func_delayed [public]
528 *
529 * Purpose: Schedule a function callout to
530 * occur at the stated time.
531 *
532 * Preconditions: Callable from an interrupt context
533 * below splsched.
534 *
535 * Postconditions: None.
536 */
537
538 void
539 thread_call_func_delayed(
540 thread_call_func_t func,
541 thread_call_param_t param,
542 uint64_t deadline
543 )
544 {
545 thread_call_t call;
546 spl_t s;
547
548 s = splsched();
549 simple_lock(&thread_call_lock);
550
551 call = _internal_call_allocate();
552 call->func = func;
553 call->param0 = param;
554 call->param1 = 0;
555 call->deadline = deadline;
556
557 _delayed_call_enqueue(call);
558
559 if (queue_first(&thread_call_delayed_queue) == qe(call))
560 _set_delayed_call_timer(call);
561
562 simple_unlock(&thread_call_lock);
563 splx(s);
564 }
565
566 /*
567 * Routine: thread_call_func_cancel [public]
568 *
569 * Purpose: Unschedule a function callout.
570 * Removes one (or all)
571 * { function, argument }
572 * instance(s) from either (or both)
573 * the pending and the delayed queue,
574 * in that order. Returns a boolean
575 * indicating whether any calls were
576 * cancelled.
577 *
578 * Preconditions: Callable from an interrupt context
579 * below splsched.
580 *
581 * Postconditions: None.
582 */
583
584 boolean_t
585 thread_call_func_cancel(
586 thread_call_func_t func,
587 thread_call_param_t param,
588 boolean_t cancel_all
589 )
590 {
591 boolean_t result;
592 spl_t s;
593
594 s = splsched();
595 simple_lock(&thread_call_lock);
596
597 if (cancel_all)
598 result = _remove_from_pending_queue(func, param, cancel_all) |
599 _remove_from_delayed_queue(func, param, cancel_all);
600 else
601 result = _remove_from_pending_queue(func, param, cancel_all) ||
602 _remove_from_delayed_queue(func, param, cancel_all);
603
604 simple_unlock(&thread_call_lock);
605 splx(s);
606
607 return (result);
608 }
609
610 /*
611 * Routine: thread_call_allocate [public]
612 *
613 * Purpose: Allocate an external callout
614 * entry.
615 *
616 * Preconditions: None.
617 *
618 * Postconditions: None.
619 */
620
621 thread_call_t
622 thread_call_allocate(
623 thread_call_func_t func,
624 thread_call_param_t param0
625 )
626 {
627 thread_call_t call = (void *)kalloc(sizeof (thread_call_data_t));
628
629 call->func = func;
630 call->param0 = param0;
631 call->state = IDLE;
632
633 return (call);
634 }
635
636 /*
637 * Routine: thread_call_free [public]
638 *
639 * Purpose: Free an external callout
640 * entry.
641 *
642 * Preconditions: None.
643 *
644 * Postconditions: None.
645 */
646
647 boolean_t
648 thread_call_free(
649 thread_call_t call
650 )
651 {
652 spl_t s;
653
654 s = splsched();
655 simple_lock(&thread_call_lock);
656
657 if (call->state != IDLE) {
658 simple_unlock(&thread_call_lock);
659 splx(s);
660
661 return (FALSE);
662 }
663
664 simple_unlock(&thread_call_lock);
665 splx(s);
666
667 kfree((vm_offset_t)call, sizeof (thread_call_data_t));
668
669 return (TRUE);
670 }
671
672 /*
673 * Routine: thread_call_enter [public]
674 *
675 * Purpose: Schedule an external callout
676 * entry to occur "soon". Returns a
677 * boolean indicating whether the call
678 * had been already scheduled.
679 *
680 * Preconditions: Callable from an interrupt context
681 * below splsched.
682 *
683 * Postconditions: None.
684 */
685
686 boolean_t
687 thread_call_enter(
688 thread_call_t call
689 )
690 {
691 boolean_t result = TRUE;
692 spl_t s;
693
694 s = splsched();
695 simple_lock(&thread_call_lock);
696
697 if (call->state != PENDING) {
698 if (call->state == DELAYED)
699 _delayed_call_dequeue(call);
700 else if (call->state == IDLE)
701 result = FALSE;
702
703 _pending_call_enqueue(call);
704
705 if (thread_call_vars.active_num <= 0)
706 _call_thread_wake();
707 }
708
709 call->param1 = 0;
710
711 simple_unlock(&thread_call_lock);
712 splx(s);
713
714 return (result);
715 }
716
717 boolean_t
718 thread_call_enter1(
719 thread_call_t call,
720 thread_call_param_t param1
721 )
722 {
723 boolean_t result = TRUE;
724 spl_t s;
725
726 s = splsched();
727 simple_lock(&thread_call_lock);
728
729 if (call->state != PENDING) {
730 if (call->state == DELAYED)
731 _delayed_call_dequeue(call);
732 else if (call->state == IDLE)
733 result = FALSE;
734
735 _pending_call_enqueue(call);
736
737 if (thread_call_vars.active_num <= 0)
738 _call_thread_wake();
739 }
740
741 call->param1 = param1;
742
743 simple_unlock(&thread_call_lock);
744 splx(s);
745
746 return (result);
747 }
748
749 /*
750 * Routine: thread_call_enter_delayed [public]
751 *
752 * Purpose: Schedule an external callout
753 * entry to occur at the stated time.
754 * Returns a boolean indicating whether
755 * the call had been already scheduled.
756 *
757 * Preconditions: Callable from an interrupt context
758 * below splsched.
759 *
760 * Postconditions: None.
761 */
762
763 boolean_t
764 thread_call_enter_delayed(
765 thread_call_t call,
766 uint64_t deadline
767 )
768 {
769 boolean_t result = TRUE;
770 spl_t s;
771
772 s = splsched();
773 simple_lock(&thread_call_lock);
774
775 if (call->state == PENDING)
776 _pending_call_dequeue(call);
777 else if (call->state == DELAYED)
778 _delayed_call_dequeue(call);
779 else if (call->state == IDLE)
780 result = FALSE;
781
782 call->param1 = 0;
783 call->deadline = deadline;
784
785 _delayed_call_enqueue(call);
786
787 if (queue_first(&thread_call_delayed_queue) == qe(call))
788 _set_delayed_call_timer(call);
789
790 simple_unlock(&thread_call_lock);
791 splx(s);
792
793 return (result);
794 }
795
796 boolean_t
797 thread_call_enter1_delayed(
798 thread_call_t call,
799 thread_call_param_t param1,
800 uint64_t deadline
801 )
802 {
803 boolean_t result = TRUE;
804 spl_t s;
805
806 s = splsched();
807 simple_lock(&thread_call_lock);
808
809 if (call->state == PENDING)
810 _pending_call_dequeue(call);
811 else if (call->state == DELAYED)
812 _delayed_call_dequeue(call);
813 else if (call->state == IDLE)
814 result = FALSE;
815
816 call->param1 = param1;
817 call->deadline = deadline;
818
819 _delayed_call_enqueue(call);
820
821 if (queue_first(&thread_call_delayed_queue) == qe(call))
822 _set_delayed_call_timer(call);
823
824 simple_unlock(&thread_call_lock);
825 splx(s);
826
827 return (result);
828 }
829
830 /*
831 * Routine: thread_call_cancel [public]
832 *
833 * Purpose: Unschedule a callout entry.
834 * Returns a boolean indicating
835 * whether the call had actually
836 * been scheduled.
837 *
838 * Preconditions: Callable from an interrupt context
839 * below splsched.
840 *
841 * Postconditions: None.
842 */
843
844 boolean_t
845 thread_call_cancel(
846 thread_call_t call
847 )
848 {
849 boolean_t result = TRUE;
850 spl_t s;
851
852 s = splsched();
853 simple_lock(&thread_call_lock);
854
855 if (call->state == PENDING)
856 _pending_call_dequeue(call);
857 else if (call->state == DELAYED)
858 _delayed_call_dequeue(call);
859 else
860 result = FALSE;
861
862 simple_unlock(&thread_call_lock);
863 splx(s);
864
865 return (result);
866 }
867
868 /*
869 * Routine: thread_call_is_delayed [public]
870 *
871 * Purpose: Returns a boolean indicating
872 * whether a call is currently scheduled
873 * to occur at a later time. Optionally
874 * returns the expiration time.
875 *
876 * Preconditions: Callable from an interrupt context
877 * below splsched.
878 *
879 * Postconditions: None.
880 */
881
882 boolean_t
883 thread_call_is_delayed(
884 thread_call_t call,
885 uint64_t *deadline)
886 {
887 boolean_t result = FALSE;
888 spl_t s;
889
890 s = splsched();
891 simple_lock(&thread_call_lock);
892
893 if (call->state == DELAYED) {
894 if (deadline != NULL)
895 *deadline = call->deadline;
896 result = TRUE;
897 }
898
899 simple_unlock(&thread_call_lock);
900 splx(s);
901
902 return (result);
903 }
904
905 /*
906 * Routine: _call_thread_wake [private, inline]
907 *
908 * Purpose: Wake a callout thread to service
909 * pending callout entries. May wake
910 * the activate thread in order to
911 * create additional callout threads.
912 *
913 * Preconditions: thread_call_lock held.
914 *
915 * Postconditions: None.
916 */
917
918 static __inline__
919 void
920 _call_thread_wake(void)
921 {
922 if (wait_queue_wakeup_one(
923 &call_thread_waitqueue, &call_thread_waitqueue,
924 THREAD_AWAKENED) == KERN_SUCCESS) {
925 thread_call_vars.idle_thread_num--;
926
927 if (++thread_call_vars.active_num > thread_call_vars.active_hiwat)
928 thread_call_vars.active_hiwat = thread_call_vars.active_num;
929 }
930 else
931 if (!activate_thread_awake) {
932 thread_wakeup_one(&activate_thread_awake);
933 activate_thread_awake = TRUE;
934 }
935 }
936
937 /*
938 * Routine: call_thread_block [private]
939 *
940 * Purpose: Hook via thread dispatch on
941 * the occasion of a callout blocking.
942 *
943 * Preconditions: splsched.
944 *
945 * Postconditions: None.
946 */
947
948 void
949 call_thread_block(void)
950 {
951 simple_lock(&thread_call_lock);
952
953 if (--thread_call_vars.active_num < thread_call_vars.active_lowat)
954 thread_call_vars.active_lowat = thread_call_vars.active_num;
955
956 if ( thread_call_vars.active_num <= 0 &&
957 thread_call_vars.pending_num > 0 )
958 _call_thread_wake();
959
960 simple_unlock(&thread_call_lock);
961 }
962
963 /*
964 * Routine: call_thread_unblock [private]
965 *
966 * Purpose: Hook via thread wakeup on
967 * the occasion of a callout unblocking.
968 *
969 * Preconditions: splsched.
970 *
971 * Postconditions: None.
972 */
973
974 void
975 call_thread_unblock(void)
976 {
977 simple_lock(&thread_call_lock);
978
979 if (++thread_call_vars.active_num > thread_call_vars.active_hiwat)
980 thread_call_vars.active_hiwat = thread_call_vars.active_num;
981
982 simple_unlock(&thread_call_lock);
983 }
984
985 /*
986 * Routine: _call_thread [private]
987 *
988 * Purpose: Executed by a callout thread.
989 *
990 * Preconditions: None.
991 *
992 * Postconditions: None.
993 */
994
995 static
996 void
997 _call_thread_continue(void)
998 {
999 thread_t self = current_thread();
1000
1001 (void) splsched();
1002 simple_lock(&thread_call_lock);
1003
1004 self->active_callout = TRUE;
1005
1006 while (thread_call_vars.pending_num > 0) {
1007 thread_call_t call;
1008 thread_call_func_t func;
1009 thread_call_param_t param0, param1;
1010
1011 call = TC(dequeue_head(&thread_call_pending_queue));
1012 thread_call_vars.pending_num--;
1013
1014 func = call->func;
1015 param0 = call->param0;
1016 param1 = call->param1;
1017
1018 call->state = IDLE;
1019
1020 _internal_call_release(call);
1021
1022 simple_unlock(&thread_call_lock);
1023 (void) spllo();
1024
1025 KERNEL_DEBUG_CONSTANT(
1026 MACHDBG_CODE(DBG_MACH_SCHED,MACH_CALLOUT) | DBG_FUNC_NONE,
1027 (int)func, (int)param0, (int)param1, 0, 0);
1028
1029 (*func)(param0, param1);
1030
1031 (void)thread_funnel_set(self->funnel_lock, FALSE);
1032
1033 (void) splsched();
1034 simple_lock(&thread_call_lock);
1035 }
1036
1037 self->active_callout = FALSE;
1038
1039 if (--thread_call_vars.active_num < thread_call_vars.active_lowat)
1040 thread_call_vars.active_lowat = thread_call_vars.active_num;
1041
1042 if (thread_call_vars.idle_thread_num < thread_call_vars.thread_lowat) {
1043 thread_call_vars.idle_thread_num++;
1044
1045 wait_queue_assert_wait(
1046 &call_thread_waitqueue, &call_thread_waitqueue,
1047 THREAD_INTERRUPTIBLE);
1048
1049 simple_unlock(&thread_call_lock);
1050 (void) spllo();
1051
1052 thread_block(_call_thread_continue);
1053 /* NOTREACHED */
1054 }
1055
1056 thread_call_vars.thread_num--;
1057
1058 simple_unlock(&thread_call_lock);
1059 (void) spllo();
1060
1061 (void) thread_terminate(self->top_act);
1062 /* NOTREACHED */
1063 }
1064
1065 static
1066 void
1067 _call_thread(void)
1068 {
1069 _call_thread_continue();
1070 /* NOTREACHED */
1071 }
1072
1073 /*
1074 * Routine: _activate_thread [private]
1075 *
1076 * Purpose: Executed by the activate thread.
1077 *
1078 * Preconditions: None.
1079 *
1080 * Postconditions: Never terminates.
1081 */
1082
1083 static
1084 void
1085 _activate_thread_continue(void)
1086 {
1087 (void) splsched();
1088 simple_lock(&thread_call_lock);
1089
1090 while ( thread_call_vars.active_num <= 0 &&
1091 thread_call_vars.pending_num > 0 ) {
1092
1093 if (++thread_call_vars.active_num > thread_call_vars.active_hiwat)
1094 thread_call_vars.active_hiwat = thread_call_vars.active_num;
1095
1096 if (++thread_call_vars.thread_num > thread_call_vars.thread_hiwat)
1097 thread_call_vars.thread_hiwat = thread_call_vars.thread_num;
1098
1099 simple_unlock(&thread_call_lock);
1100 (void) spllo();
1101
1102 kernel_thread_with_priority(_call_thread, MAXPRI_KERNEL - 1);
1103
1104 (void) splsched();
1105 simple_lock(&thread_call_lock);
1106 }
1107
1108 assert_wait(&activate_thread_awake, THREAD_INTERRUPTIBLE);
1109 activate_thread_awake = FALSE;
1110
1111 simple_unlock(&thread_call_lock);
1112 (void) spllo();
1113
1114 thread_block(_activate_thread_continue);
1115 /* NOTREACHED */
1116 }
1117
1118 static
1119 void
1120 _activate_thread(void)
1121 {
1122 thread_t self = current_thread();
1123
1124 self->vm_privilege = TRUE;
1125 vm_page_free_reserve(2); /* XXX */
1126
1127 _activate_thread_continue();
1128 /* NOTREACHED */
1129 }
1130
1131 static
1132 void
1133 _delayed_call_timer(
1134 timer_call_param_t p0,
1135 timer_call_param_t p1
1136 )
1137 {
1138 uint64_t timestamp;
1139 thread_call_t call;
1140 boolean_t new_pending = FALSE;
1141 spl_t s;
1142
1143 s = splsched();
1144 simple_lock(&thread_call_lock);
1145
1146 clock_get_uptime(&timestamp);
1147
1148 call = TC(queue_first(&thread_call_delayed_queue));
1149
1150 while (!queue_end(&thread_call_delayed_queue, qe(call))) {
1151 if (call->deadline <= timestamp) {
1152 _delayed_call_dequeue(call);
1153
1154 _pending_call_enqueue(call);
1155 new_pending = TRUE;
1156 }
1157 else
1158 break;
1159
1160 call = TC(queue_first(&thread_call_delayed_queue));
1161 }
1162
1163 if (!queue_end(&thread_call_delayed_queue, qe(call)))
1164 _set_delayed_call_timer(call);
1165
1166 if (new_pending && thread_call_vars.active_num <= 0)
1167 _call_thread_wake();
1168
1169 simple_unlock(&thread_call_lock);
1170 splx(s);
1171 }