]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/thread_call.c
xnu-344.tar.gz
[apple/xnu.git] / osfmk / kern / thread_call.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 1993-1995, 1999-2000 Apple Computer, Inc.
3 * All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * The contents of this file constitute Original Code as defined in and
8 * are subject to the Apple Public Source License Version 1.1 (the
9 * "License"). You may not use this file except in compliance with the
10 * License. Please obtain a copy of the License at
11 * http://www.apple.com/publicsource and read it before using this file.
12 *
13 * This Original Code and all software distributed under the License are
14 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
18 * License for the specific language governing rights and limitations
19 * under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23/*
24 * Thread-based callout module.
25 *
26 * HISTORY
27 *
28 * 10 July 1999 (debo)
29 * Pulled into Mac OS X (microkernel).
30 *
31 * 3 July 1993 (debo)
32 * Created.
33 */
34
35#include <mach/mach_types.h>
36
37#include <kern/sched_prim.h>
38#include <kern/clock.h>
39#include <kern/task.h>
40#include <kern/thread.h>
41
42#include <kern/thread_call.h>
43#include <kern/call_entry.h>
44
45#include <kern/timer_call.h>
46
47#define internal_call_num 768
48
49#define thread_call_thread_min 4
50
51static
52thread_call_data_t
53 internal_call_storage[internal_call_num];
54
55decl_simple_lock_data(static,thread_call_lock)
56
57static
58timer_call_data_t
9bccf70c 59 thread_call_delayed_timer;
1c79356b
A
60
61static
62queue_head_t
63 internal_call_free_queue,
64 pending_call_queue, delayed_call_queue;
65
66static
9bccf70c
A
67struct wait_queue
68 call_thread_idle_queue;
1c79356b
A
69
70static
71thread_t
72 activate_thread;
73
74static
75boolean_t
76 activate_thread_awake;
77
78static struct {
79 int pending_num,
80 pending_hiwat;
81 int active_num,
9bccf70c
A
82 active_hiwat,
83 active_lowat;
1c79356b
A
84 int delayed_num,
85 delayed_hiwat;
86 int idle_thread_num;
87 int thread_num,
88 thread_hiwat,
89 thread_lowat;
90} thread_calls;
91
92static boolean_t
93 thread_call_initialized = FALSE;
94
95static __inline__ thread_call_t
96 _internal_call_allocate(void);
97
98static __inline__ void
99_internal_call_release(
100 thread_call_t call
101);
102
103static __inline__ void
104_pending_call_enqueue(
105 thread_call_t call
106),
107_pending_call_dequeue(
108 thread_call_t call
109),
110_delayed_call_enqueue(
111 thread_call_t call
112),
113_delayed_call_dequeue(
114 thread_call_t call
115);
116
117static void __inline__
118_set_delayed_call_timer(
119 thread_call_t call
120);
121
122static boolean_t
123_remove_from_pending_queue(
124 thread_call_func_t func,
125 thread_call_param_t param0,
126 boolean_t remove_all
127),
128_remove_from_delayed_queue(
129 thread_call_func_t func,
130 thread_call_param_t param0,
131 boolean_t remove_all
132);
133
134static __inline__ void
135 _call_thread_wake(void);
136
137static void
138 _call_thread(void),
139 _activate_thread(void);
140
141static void
142_delayed_call_timer(
143 timer_call_param_t p0,
144 timer_call_param_t p1
145);
146
147#define qe(x) ((queue_entry_t)(x))
148#define TC(x) ((thread_call_t)(x))
149
150/*
151 * Routine: thread_call_initialize [public]
152 *
153 * Description: Initialize this module, called
154 * early during system initialization.
155 *
156 * Preconditions: None.
157 *
158 * Postconditions: None.
159 */
160
161void
162thread_call_initialize(void)
163{
164 thread_call_t call;
165 spl_t s;
1c79356b
A
166
167 if (thread_call_initialized)
168 panic("thread_call_initialize");
169
170 simple_lock_init(&thread_call_lock, ETAP_MISC_TIMER);
171
172 s = splsched();
173 simple_lock(&thread_call_lock);
174
175 queue_init(&pending_call_queue);
176 queue_init(&delayed_call_queue);
177
178 queue_init(&internal_call_free_queue);
179 for (
180 call = internal_call_storage;
181 call < &internal_call_storage[internal_call_num];
182 call++) {
183
184 enqueue_tail(&internal_call_free_queue, qe(call));
185 }
186
9bccf70c 187 timer_call_setup(&thread_call_delayed_timer, _delayed_call_timer, NULL);
1c79356b 188
9bccf70c 189 wait_queue_init(&call_thread_idle_queue, SYNC_POLICY_FIFO);
1c79356b
A
190 thread_calls.thread_lowat = thread_call_thread_min;
191
192 activate_thread_awake = TRUE;
193 thread_call_initialized = TRUE;
194
195 simple_unlock(&thread_call_lock);
196 splx(s);
197
0b4e3aa0
A
198 activate_thread = kernel_thread_with_priority(
199 kernel_task, MAXPRI_KERNEL - 2,
200 _activate_thread, TRUE, TRUE);
1c79356b
A
201}
202
203void
204thread_call_setup(
205 thread_call_t call,
206 thread_call_func_t func,
207 thread_call_param_t param0
208)
209{
210 call_entry_setup(call, func, param0);
211}
212
213/*
214 * Routine: _internal_call_allocate [private, inline]
215 *
216 * Purpose: Allocate an internal callout entry.
217 *
218 * Preconditions: thread_call_lock held.
219 *
220 * Postconditions: None.
221 */
222
223static __inline__ thread_call_t
224_internal_call_allocate(void)
225{
226 thread_call_t call;
227
228 if (queue_empty(&internal_call_free_queue))
229 panic("_internal_call_allocate");
230
231 call = TC(dequeue_head(&internal_call_free_queue));
232
233 return (call);
234}
235
236/*
237 * Routine: _internal_call_release [private, inline]
238 *
239 * Purpose: Release an internal callout entry which
240 * is no longer pending (or delayed).
241 *
242 * Preconditions: thread_call_lock held.
243 *
244 * Postconditions: None.
245 */
246
247static __inline__
248void
249_internal_call_release(
250 thread_call_t call
251)
252{
253 if ( call >= internal_call_storage &&
254 call < &internal_call_storage[internal_call_num] )
255 enqueue_tail(&internal_call_free_queue, qe(call));
256}
257
258/*
259 * Routine: _pending_call_enqueue [private, inline]
260 *
261 * Purpose: Place an entry at the end of the
262 * pending queue, to be executed soon.
263 *
264 * Preconditions: thread_call_lock held.
265 *
266 * Postconditions: None.
267 */
268
269static __inline__
270void
271_pending_call_enqueue(
272 thread_call_t call
273)
274{
275 enqueue_tail(&pending_call_queue, qe(call));
276 if (++thread_calls.pending_num > thread_calls.pending_hiwat)
277 thread_calls.pending_hiwat = thread_calls.pending_num;
278
279 call->state = PENDING;
280}
281
282/*
283 * Routine: _pending_call_dequeue [private, inline]
284 *
285 * Purpose: Remove an entry from the pending queue,
286 * effectively unscheduling it.
287 *
288 * Preconditions: thread_call_lock held.
289 *
290 * Postconditions: None.
291 */
292
293static __inline__
294void
295_pending_call_dequeue(
296 thread_call_t call
297)
298{
299 (void)remque(qe(call));
300 thread_calls.pending_num--;
301
302 call->state = IDLE;
303}
304
305/*
306 * Routine: _delayed_call_enqueue [private, inline]
307 *
308 * Purpose: Place an entry on the delayed queue,
309 * after existing entries with an earlier
310 * (or identical) deadline.
311 *
312 * Preconditions: thread_call_lock held.
313 *
314 * Postconditions: None.
315 */
316
317static __inline__
318void
319_delayed_call_enqueue(
320 thread_call_t call
321)
322{
323 thread_call_t current;
324
325 current = TC(queue_first(&delayed_call_queue));
326
327 while (TRUE) {
0b4e3aa0
A
328 if ( queue_end(&delayed_call_queue, qe(current)) ||
329 call->deadline < current->deadline ) {
1c79356b
A
330 current = TC(queue_prev(qe(current)));
331 break;
332 }
333
334 current = TC(queue_next(qe(current)));
335 }
336
337 insque(qe(call), qe(current));
338 if (++thread_calls.delayed_num > thread_calls.delayed_hiwat)
339 thread_calls.delayed_hiwat = thread_calls.delayed_num;
340
341 call->state = DELAYED;
342}
343
344/*
345 * Routine: _delayed_call_dequeue [private, inline]
346 *
347 * Purpose: Remove an entry from the delayed queue,
348 * effectively unscheduling it.
349 *
350 * Preconditions: thread_call_lock held.
351 *
352 * Postconditions: None.
353 */
354
355static __inline__
356void
357_delayed_call_dequeue(
358 thread_call_t call
359)
360{
361 (void)remque(qe(call));
362 thread_calls.delayed_num--;
363
364 call->state = IDLE;
365}
366
367/*
368 * Routine: _set_delayed_call_timer [private]
369 *
370 * Purpose: Reset the timer so that it
371 * next expires when the entry is due.
372 *
373 * Preconditions: thread_call_lock held.
374 *
375 * Postconditions: None.
376 */
377
378static __inline__ void
379_set_delayed_call_timer(
380 thread_call_t call
381)
382{
9bccf70c 383 timer_call_enter(&thread_call_delayed_timer, call->deadline);
1c79356b
A
384}
385
386/*
387 * Routine: _remove_from_pending_queue [private]
388 *
389 * Purpose: Remove the first (or all) matching
390 * entries from the pending queue,
391 * effectively unscheduling them.
392 * Returns whether any matching entries
393 * were found.
394 *
395 * Preconditions: thread_call_lock held.
396 *
397 * Postconditions: None.
398 */
399
400static
401boolean_t
402_remove_from_pending_queue(
403 thread_call_func_t func,
404 thread_call_param_t param0,
405 boolean_t remove_all
406)
407{
408 boolean_t call_removed = FALSE;
409 thread_call_t call;
410
411 call = TC(queue_first(&pending_call_queue));
412
413 while (!queue_end(&pending_call_queue, qe(call))) {
414 if ( call->func == func &&
415 call->param0 == param0 ) {
416 thread_call_t next = TC(queue_next(qe(call)));
417
418 _pending_call_dequeue(call);
419
420 _internal_call_release(call);
421
422 call_removed = TRUE;
423 if (!remove_all)
424 break;
425
426 call = next;
427 }
428 else
429 call = TC(queue_next(qe(call)));
430 }
431
432 return (call_removed);
433}
434
435/*
436 * Routine: _remove_from_delayed_queue [private]
437 *
438 * Purpose: Remove the first (or all) matching
439 * entries from the delayed queue,
440 * effectively unscheduling them.
441 * Returns whether any matching entries
442 * were found.
443 *
444 * Preconditions: thread_call_lock held.
445 *
446 * Postconditions: None.
447 */
448
449static
450boolean_t
451_remove_from_delayed_queue(
452 thread_call_func_t func,
453 thread_call_param_t param0,
454 boolean_t remove_all
455)
456{
457 boolean_t call_removed = FALSE;
458 thread_call_t call;
459
460 call = TC(queue_first(&delayed_call_queue));
461
462 while (!queue_end(&delayed_call_queue, qe(call))) {
463 if ( call->func == func &&
464 call->param0 == param0 ) {
465 thread_call_t next = TC(queue_next(qe(call)));
466
467 _delayed_call_dequeue(call);
468
469 _internal_call_release(call);
470
471 call_removed = TRUE;
472 if (!remove_all)
473 break;
474
475 call = next;
476 }
477 else
478 call = TC(queue_next(qe(call)));
479 }
480
481 return (call_removed);
482}
483
484/*
485 * Routine: thread_call_func [public]
486 *
487 * Purpose: Schedule a function callout.
488 * Guarantees { function, argument }
489 * uniqueness if unique_call is TRUE.
490 *
491 * Preconditions: Callable from an interrupt context
492 * below splsched.
493 *
494 * Postconditions: None.
495 */
496
497void
498thread_call_func(
499 thread_call_func_t func,
500 thread_call_param_t param,
501 boolean_t unique_call
502)
503{
504 thread_call_t call;
505 int s;
506
507 if (!thread_call_initialized)
508 panic("thread_call_func");
509
510 s = splsched();
511 simple_lock(&thread_call_lock);
512
513 call = TC(queue_first(&pending_call_queue));
514
515 while (unique_call && !queue_end(&pending_call_queue, qe(call))) {
516 if ( call->func == func &&
517 call->param0 == param ) {
518 break;
519 }
520
521 call = TC(queue_next(qe(call)));
522 }
523
524 if (!unique_call || queue_end(&pending_call_queue, qe(call))) {
525 call = _internal_call_allocate();
526 call->func = func;
527 call->param0 = param;
528 call->param1 = 0;
529
530 _pending_call_enqueue(call);
531
9bccf70c
A
532 if (thread_calls.active_num <= 0)
533 _call_thread_wake();
1c79356b
A
534 }
535
536 simple_unlock(&thread_call_lock);
537 splx(s);
538}
539
540/*
541 * Routine: thread_call_func_delayed [public]
542 *
543 * Purpose: Schedule a function callout to
544 * occur at the stated time.
545 *
546 * Preconditions: Callable from an interrupt context
547 * below splsched.
548 *
549 * Postconditions: None.
550 */
551
552void
553thread_call_func_delayed(
554 thread_call_func_t func,
555 thread_call_param_t param,
0b4e3aa0 556 uint64_t deadline
1c79356b
A
557)
558{
559 thread_call_t call;
560 int s;
561
562 if (!thread_call_initialized)
563 panic("thread_call_func_delayed");
564
565 s = splsched();
566 simple_lock(&thread_call_lock);
567
568 call = _internal_call_allocate();
569 call->func = func;
570 call->param0 = param;
571 call->param1 = 0;
572 call->deadline = deadline;
573
574 _delayed_call_enqueue(call);
575
576 if (queue_first(&delayed_call_queue) == qe(call))
577 _set_delayed_call_timer(call);
578
579 simple_unlock(&thread_call_lock);
580 splx(s);
581}
582
583/*
584 * Routine: thread_call_func_cancel [public]
585 *
586 * Purpose: Unschedule a function callout.
587 * Removes one (or all)
588 * { function, argument }
589 * instance(s) from either (or both)
590 * the pending and the delayed queue,
591 * in that order. Returns a boolean
592 * indicating whether any calls were
593 * cancelled.
594 *
595 * Preconditions: Callable from an interrupt context
596 * below splsched.
597 *
598 * Postconditions: None.
599 */
600
601boolean_t
602thread_call_func_cancel(
603 thread_call_func_t func,
604 thread_call_param_t param,
605 boolean_t cancel_all
606)
607{
608 boolean_t result;
609 int s;
610
611 s = splsched();
612 simple_lock(&thread_call_lock);
613
614 if (cancel_all)
615 result = _remove_from_pending_queue(func, param, cancel_all) |
616 _remove_from_delayed_queue(func, param, cancel_all);
617 else
618 result = _remove_from_pending_queue(func, param, cancel_all) ||
619 _remove_from_delayed_queue(func, param, cancel_all);
620
621 simple_unlock(&thread_call_lock);
622 splx(s);
623
624 return (result);
625}
626
627/*
628 * Routine: thread_call_allocate [public]
629 *
630 * Purpose: Allocate an external callout
631 * entry.
632 *
633 * Preconditions: None.
634 *
635 * Postconditions: None.
636 */
637
638thread_call_t
639thread_call_allocate(
640 thread_call_func_t func,
641 thread_call_param_t param0
642)
643{
644 thread_call_t call = (void *)kalloc(sizeof (thread_call_data_t));
645
646 call->func = func;
647 call->param0 = param0;
648 call->state = IDLE;
649
650 return (call);
651}
652
653/*
654 * Routine: thread_call_free [public]
655 *
656 * Purpose: Free an external callout
657 * entry.
658 *
659 * Preconditions: None.
660 *
661 * Postconditions: None.
662 */
663
664boolean_t
665thread_call_free(
666 thread_call_t call
667)
668{
669 int s;
670
671 s = splsched();
672 simple_lock(&thread_call_lock);
673
674 if (call->state != IDLE) {
675 simple_unlock(&thread_call_lock);
676 splx(s);
677
678 return (FALSE);
679 }
680
681 simple_unlock(&thread_call_lock);
682 splx(s);
683
684 kfree((vm_offset_t)call, sizeof (thread_call_data_t));
685
686 return (TRUE);
687}
688
689/*
690 * Routine: thread_call_enter [public]
691 *
692 * Purpose: Schedule an external callout
693 * entry to occur "soon". Returns a
694 * boolean indicating whether the call
695 * had been already scheduled.
696 *
697 * Preconditions: Callable from an interrupt context
698 * below splsched.
699 *
700 * Postconditions: None.
701 */
702
703boolean_t
704thread_call_enter(
705 thread_call_t call
706)
707{
708 boolean_t result = TRUE;
709 int s;
710
711 s = splsched();
712 simple_lock(&thread_call_lock);
713
714 if (call->state != PENDING) {
715 if (call->state == DELAYED)
716 _delayed_call_dequeue(call);
717 else if (call->state == IDLE)
718 result = FALSE;
719
720 _pending_call_enqueue(call);
9bccf70c
A
721
722 if (thread_calls.active_num <= 0)
723 _call_thread_wake();
1c79356b
A
724 }
725
726 call->param1 = 0;
727
728 simple_unlock(&thread_call_lock);
729 splx(s);
730
731 return (result);
732}
733
734boolean_t
735thread_call_enter1(
736 thread_call_t call,
737 thread_call_param_t param1
738)
739{
740 boolean_t result = TRUE;
741 int s;
742
743 s = splsched();
744 simple_lock(&thread_call_lock);
745
746 if (call->state != PENDING) {
747 if (call->state == DELAYED)
748 _delayed_call_dequeue(call);
749 else if (call->state == IDLE)
750 result = FALSE;
751
752 _pending_call_enqueue(call);
753
9bccf70c
A
754 if (thread_calls.active_num <= 0)
755 _call_thread_wake();
1c79356b
A
756 }
757
758 call->param1 = param1;
759
760 simple_unlock(&thread_call_lock);
761 splx(s);
762
763 return (result);
764}
765
766/*
767 * Routine: thread_call_enter_delayed [public]
768 *
769 * Purpose: Schedule an external callout
770 * entry to occur at the stated time.
771 * Returns a boolean indicating whether
772 * the call had been already scheduled.
773 *
774 * Preconditions: Callable from an interrupt context
775 * below splsched.
776 *
777 * Postconditions: None.
778 */
779
780boolean_t
781thread_call_enter_delayed(
782 thread_call_t call,
0b4e3aa0 783 uint64_t deadline
1c79356b
A
784)
785{
786 boolean_t result = TRUE;
787 int s;
788
789 s = splsched();
790 simple_lock(&thread_call_lock);
791
792 if (call->state == PENDING)
793 _pending_call_dequeue(call);
794 else if (call->state == DELAYED)
795 _delayed_call_dequeue(call);
796 else if (call->state == IDLE)
797 result = FALSE;
798
799 call->param1 = 0;
800 call->deadline = deadline;
801
802 _delayed_call_enqueue(call);
803
804 if (queue_first(&delayed_call_queue) == qe(call))
805 _set_delayed_call_timer(call);
806
807 simple_unlock(&thread_call_lock);
808 splx(s);
809
810 return (result);
811}
812
813boolean_t
814thread_call_enter1_delayed(
815 thread_call_t call,
816 thread_call_param_t param1,
0b4e3aa0 817 uint64_t deadline
1c79356b
A
818)
819{
820 boolean_t result = TRUE;
821 int s;
822
823 s = splsched();
824 simple_lock(&thread_call_lock);
825
826 if (call->state == PENDING)
827 _pending_call_dequeue(call);
828 else if (call->state == DELAYED)
829 _delayed_call_dequeue(call);
830 else if (call->state == IDLE)
831 result = FALSE;
832
833 call->param1 = param1;
834 call->deadline = deadline;
835
836 _delayed_call_enqueue(call);
837
838 if (queue_first(&delayed_call_queue) == qe(call))
839 _set_delayed_call_timer(call);
840
841 simple_unlock(&thread_call_lock);
842 splx(s);
843
844 return (result);
845}
846
847/*
848 * Routine: thread_call_cancel [public]
849 *
850 * Purpose: Unschedule a callout entry.
851 * Returns a boolean indicating
852 * whether the call had actually
853 * been scheduled.
854 *
855 * Preconditions: Callable from an interrupt context
856 * below splsched.
857 *
858 * Postconditions: None.
859 */
860
861boolean_t
862thread_call_cancel(
863 thread_call_t call
864)
865{
866 boolean_t result = TRUE;
867 int s;
868
869 s = splsched();
870 simple_lock(&thread_call_lock);
871
872 if (call->state == PENDING)
873 _pending_call_dequeue(call);
874 else if (call->state == DELAYED)
875 _delayed_call_dequeue(call);
876 else
877 result = FALSE;
878
879 simple_unlock(&thread_call_lock);
880 splx(s);
881
882 return (result);
883}
884
885/*
886 * Routine: thread_call_is_delayed [public]
887 *
888 * Purpose: Returns a boolean indicating
889 * whether a call is currently scheduled
890 * to occur at a later time. Optionally
891 * returns the expiration time.
892 *
893 * Preconditions: Callable from an interrupt context
894 * below splsched.
895 *
896 * Postconditions: None.
897 */
898
899boolean_t
900thread_call_is_delayed(
901 thread_call_t call,
0b4e3aa0 902 uint64_t *deadline)
1c79356b
A
903{
904 boolean_t result = FALSE;
905 int s;
906
907 s = splsched();
908 simple_lock(&thread_call_lock);
909
910 if (call->state == DELAYED) {
911 if (deadline != NULL)
912 *deadline = call->deadline;
913 result = TRUE;
914 }
915
916 simple_unlock(&thread_call_lock);
917 splx(s);
918
919 return (result);
920}
921
922/*
9bccf70c 923 * Routine: _call_thread_wake [private, inline]
1c79356b
A
924 *
925 * Purpose: Wake a callout thread to service
9bccf70c
A
926 * pending callout entries. May wake
927 * the activate thread in order to
1c79356b
A
928 * create additional callout threads.
929 *
930 * Preconditions: thread_call_lock held.
931 *
932 * Postconditions: None.
933 */
934
935static __inline__
936void
937_call_thread_wake(void)
938{
9bccf70c
A
939 if (wait_queue_wakeup_one(
940 &call_thread_idle_queue, &call_thread_idle_queue,
941 THREAD_AWAKENED) == KERN_SUCCESS) {
1c79356b 942 thread_calls.idle_thread_num--;
9bccf70c
A
943
944 if (++thread_calls.active_num > thread_calls.active_hiwat)
945 thread_calls.active_hiwat = thread_calls.active_num;
1c79356b
A
946 }
947 else
9bccf70c 948 if (!activate_thread_awake) {
1c79356b
A
949 clear_wait(activate_thread, THREAD_AWAKENED);
950 activate_thread_awake = TRUE;
951 }
952}
953
9bccf70c
A
954/*
955 * Routine: call_thread_block [private]
956 *
957 * Purpose: Hook via thread dispatch on
958 * the occasion of a callout blocking.
959 *
960 * Preconditions: splsched.
961 *
962 * Postconditions: None.
963 */
964
965void
966call_thread_block(void)
967{
968 simple_lock(&thread_call_lock);
969
970 if (--thread_calls.active_num < thread_calls.active_lowat)
971 thread_calls.active_lowat = thread_calls.active_num;
972
973 if ( thread_calls.active_num <= 0 &&
974 thread_calls.pending_num > 0 )
975 _call_thread_wake();
976
977 simple_unlock(&thread_call_lock);
978}
979
980/*
981 * Routine: call_thread_unblock [private]
982 *
983 * Purpose: Hook via thread wakeup on
984 * the occasion of a callout unblocking.
985 *
986 * Preconditions: splsched.
987 *
988 * Postconditions: None.
989 */
990
991void
992call_thread_unblock(void)
993{
994 simple_lock(&thread_call_lock);
995
996 if (++thread_calls.active_num > thread_calls.active_hiwat)
997 thread_calls.active_hiwat = thread_calls.active_num;
998
999 simple_unlock(&thread_call_lock);
1000}
1c79356b
A
1001
1002/*
1003 * Routine: _call_thread [private]
1004 *
1005 * Purpose: Executed by a callout thread.
1006 *
1007 * Preconditions: None.
1008 *
1009 * Postconditions: None.
1010 */
1011
1012static
1013void
1014_call_thread_continue(void)
1015{
1016 thread_t self = current_thread();
1017
1c79356b
A
1018 (void) splsched();
1019 simple_lock(&thread_call_lock);
1020
9bccf70c
A
1021 self->active_callout = TRUE;
1022
1c79356b
A
1023 while (thread_calls.pending_num > 0) {
1024 thread_call_t call;
1025 thread_call_func_t func;
1026 thread_call_param_t param0, param1;
1027
1028 call = TC(dequeue_head(&pending_call_queue));
1029 thread_calls.pending_num--;
1030
1031 func = call->func;
1032 param0 = call->param0;
1033 param1 = call->param1;
1034
1035 call->state = IDLE;
1036
1037 _internal_call_release(call);
1038
1c79356b
A
1039 simple_unlock(&thread_call_lock);
1040 (void) spllo();
1041
1042 (*func)(param0, param1);
1043
1044 (void)thread_funnel_set(self->funnel_lock, FALSE);
1045
1046 (void) splsched();
1047 simple_lock(&thread_call_lock);
1c79356b 1048 }
9bccf70c
A
1049
1050 self->active_callout = FALSE;
1051
1052 if (--thread_calls.active_num < thread_calls.active_lowat)
1053 thread_calls.active_lowat = thread_calls.active_num;
1c79356b 1054
9bccf70c 1055 if (thread_calls.idle_thread_num < thread_calls.thread_lowat) {
1c79356b
A
1056 thread_calls.idle_thread_num++;
1057
9bccf70c
A
1058 wait_queue_assert_wait(
1059 &call_thread_idle_queue, &call_thread_idle_queue,
1060 THREAD_INTERRUPTIBLE);
1c79356b
A
1061
1062 simple_unlock(&thread_call_lock);
1063 (void) spllo();
1064
1c79356b 1065 thread_block(_call_thread_continue);
1c79356b
A
1066 /* NOTREACHED */
1067 }
1068
1069 thread_calls.thread_num--;
1070
1071 simple_unlock(&thread_call_lock);
1072 (void) spllo();
1073
1074 (void) thread_terminate(self->top_act);
1075 /* NOTREACHED */
1076}
1077
1078static
1079void
1080_call_thread(void)
1081{
1082 thread_t self = current_thread();
1083
1084 stack_privilege(self);
1085
1086 _call_thread_continue();
1087 /* NOTREACHED */
1088}
1089
1090/*
1091 * Routine: _activate_thread [private]
1092 *
1093 * Purpose: Executed by the activate thread.
1094 *
1095 * Preconditions: None.
1096 *
1097 * Postconditions: Never terminates.
1098 */
1099
1100static
1101void
1102_activate_thread_continue(void)
1103{
1c79356b
A
1104 (void) splsched();
1105 simple_lock(&thread_call_lock);
1106
9bccf70c
A
1107 while ( thread_calls.active_num <= 0 &&
1108 thread_calls.pending_num > 0 ) {
1109
1110 if (++thread_calls.active_num > thread_calls.active_hiwat)
1111 thread_calls.active_hiwat = thread_calls.active_num;
1c79356b
A
1112
1113 if (++thread_calls.thread_num > thread_calls.thread_hiwat)
1114 thread_calls.thread_hiwat = thread_calls.thread_num;
1115
1116 simple_unlock(&thread_call_lock);
1117 (void) spllo();
1118
0b4e3aa0
A
1119 (void) kernel_thread_with_priority(
1120 kernel_task, MAXPRI_KERNEL - 1,
1121 _call_thread, TRUE, TRUE);
9bccf70c
A
1122 (void) splsched();
1123 simple_lock(&thread_call_lock);
1c79356b 1124 }
1c79356b
A
1125
1126 assert_wait(&activate_thread_awake, THREAD_INTERRUPTIBLE);
1127 activate_thread_awake = FALSE;
1128
1129 simple_unlock(&thread_call_lock);
1130 (void) spllo();
1131
1c79356b 1132 thread_block(_activate_thread_continue);
1c79356b
A
1133 /* NOTREACHED */
1134}
1135
1136static
1137void
1138_activate_thread(void)
1139{
1140 thread_t self = current_thread();
1141
1142 self->vm_privilege = TRUE;
1143 vm_page_free_reserve(2); /* XXX */
1144 stack_privilege(self);
1145
1146 _activate_thread_continue();
1147 /* NOTREACHED */
1148}
1149
1150static
1151void
1152_delayed_call_timer(
1153 timer_call_param_t p0,
1154 timer_call_param_t p1
1155)
1156{
0b4e3aa0 1157 uint64_t timestamp;
1c79356b
A
1158 thread_call_t call;
1159 boolean_t new_pending = FALSE;
1160 int s;
1161
1162 s = splsched();
1163 simple_lock(&thread_call_lock);
1164
1165 clock_get_uptime(&timestamp);
1166
1167 call = TC(queue_first(&delayed_call_queue));
1168
1169 while (!queue_end(&delayed_call_queue, qe(call))) {
0b4e3aa0 1170 if (call->deadline <= timestamp) {
1c79356b
A
1171 _delayed_call_dequeue(call);
1172
1173 _pending_call_enqueue(call);
1174 new_pending = TRUE;
1175 }
1176 else
1177 break;
1178
1179 call = TC(queue_first(&delayed_call_queue));
1180 }
1181
1182 if (!queue_end(&delayed_call_queue, qe(call)))
1183 _set_delayed_call_timer(call);
1184
9bccf70c 1185 if (new_pending && thread_calls.active_num <= 0)
1c79356b
A
1186 _call_thread_wake();
1187
1188 simple_unlock(&thread_call_lock);
1189 splx(s);
1190}