]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/thread_call.c
xnu-344.21.74.tar.gz
[apple/xnu.git] / osfmk / kern / thread_call.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 1993-1995, 1999-2000 Apple Computer, Inc.
3 * All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
d7e50217 7 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
1c79356b 8 *
d7e50217
A
9 * This file contains Original Code and/or Modifications of Original Code
10 * as defined in and that are subject to the Apple Public Source License
11 * Version 2.0 (the 'License'). You may not use this file except in
12 * compliance with the License. Please obtain a copy of the License at
13 * http://www.opensource.apple.com/apsl/ and read it before using this
14 * file.
15 *
16 * The Original Code and all software distributed under the License are
17 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
18 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
19 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
d7e50217
A
20 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
21 * Please see the License for the specific language governing rights and
22 * limitations under the License.
1c79356b
A
23 *
24 * @APPLE_LICENSE_HEADER_END@
25 */
26/*
27 * Thread-based callout module.
28 *
29 * HISTORY
30 *
31 * 10 July 1999 (debo)
32 * Pulled into Mac OS X (microkernel).
33 *
34 * 3 July 1993 (debo)
35 * Created.
36 */
37
38#include <mach/mach_types.h>
39
40#include <kern/sched_prim.h>
41#include <kern/clock.h>
42#include <kern/task.h>
43#include <kern/thread.h>
44
45#include <kern/thread_call.h>
46#include <kern/call_entry.h>
47
48#include <kern/timer_call.h>
49
50#define internal_call_num 768
51
52#define thread_call_thread_min 4
53
54static
55thread_call_data_t
56 internal_call_storage[internal_call_num];
57
58decl_simple_lock_data(static,thread_call_lock)
59
60static
61timer_call_data_t
9bccf70c 62 thread_call_delayed_timer;
1c79356b
A
63
64static
65queue_head_t
66 internal_call_free_queue,
67 pending_call_queue, delayed_call_queue;
68
69static
9bccf70c
A
70struct wait_queue
71 call_thread_idle_queue;
1c79356b
A
72
73static
74thread_t
75 activate_thread;
76
77static
78boolean_t
79 activate_thread_awake;
80
81static struct {
82 int pending_num,
83 pending_hiwat;
84 int active_num,
9bccf70c
A
85 active_hiwat,
86 active_lowat;
1c79356b
A
87 int delayed_num,
88 delayed_hiwat;
89 int idle_thread_num;
90 int thread_num,
91 thread_hiwat,
92 thread_lowat;
93} thread_calls;
94
95static boolean_t
96 thread_call_initialized = FALSE;
97
98static __inline__ thread_call_t
99 _internal_call_allocate(void);
100
101static __inline__ void
102_internal_call_release(
103 thread_call_t call
104);
105
106static __inline__ void
107_pending_call_enqueue(
108 thread_call_t call
109),
110_pending_call_dequeue(
111 thread_call_t call
112),
113_delayed_call_enqueue(
114 thread_call_t call
115),
116_delayed_call_dequeue(
117 thread_call_t call
118);
119
120static void __inline__
121_set_delayed_call_timer(
122 thread_call_t call
123);
124
125static boolean_t
126_remove_from_pending_queue(
127 thread_call_func_t func,
128 thread_call_param_t param0,
129 boolean_t remove_all
130),
131_remove_from_delayed_queue(
132 thread_call_func_t func,
133 thread_call_param_t param0,
134 boolean_t remove_all
135);
136
137static __inline__ void
138 _call_thread_wake(void);
139
140static void
141 _call_thread(void),
142 _activate_thread(void);
143
144static void
145_delayed_call_timer(
146 timer_call_param_t p0,
147 timer_call_param_t p1
148);
149
150#define qe(x) ((queue_entry_t)(x))
151#define TC(x) ((thread_call_t)(x))
152
153/*
154 * Routine: thread_call_initialize [public]
155 *
156 * Description: Initialize this module, called
157 * early during system initialization.
158 *
159 * Preconditions: None.
160 *
161 * Postconditions: None.
162 */
163
164void
165thread_call_initialize(void)
166{
167 thread_call_t call;
168 spl_t s;
1c79356b
A
169
170 if (thread_call_initialized)
171 panic("thread_call_initialize");
172
173 simple_lock_init(&thread_call_lock, ETAP_MISC_TIMER);
174
175 s = splsched();
176 simple_lock(&thread_call_lock);
177
178 queue_init(&pending_call_queue);
179 queue_init(&delayed_call_queue);
180
181 queue_init(&internal_call_free_queue);
182 for (
183 call = internal_call_storage;
184 call < &internal_call_storage[internal_call_num];
185 call++) {
186
187 enqueue_tail(&internal_call_free_queue, qe(call));
188 }
189
9bccf70c 190 timer_call_setup(&thread_call_delayed_timer, _delayed_call_timer, NULL);
1c79356b 191
9bccf70c 192 wait_queue_init(&call_thread_idle_queue, SYNC_POLICY_FIFO);
1c79356b
A
193 thread_calls.thread_lowat = thread_call_thread_min;
194
195 activate_thread_awake = TRUE;
196 thread_call_initialized = TRUE;
197
198 simple_unlock(&thread_call_lock);
199 splx(s);
200
0b4e3aa0
A
201 activate_thread = kernel_thread_with_priority(
202 kernel_task, MAXPRI_KERNEL - 2,
203 _activate_thread, TRUE, TRUE);
1c79356b
A
204}
205
206void
207thread_call_setup(
208 thread_call_t call,
209 thread_call_func_t func,
210 thread_call_param_t param0
211)
212{
213 call_entry_setup(call, func, param0);
214}
215
216/*
217 * Routine: _internal_call_allocate [private, inline]
218 *
219 * Purpose: Allocate an internal callout entry.
220 *
221 * Preconditions: thread_call_lock held.
222 *
223 * Postconditions: None.
224 */
225
226static __inline__ thread_call_t
227_internal_call_allocate(void)
228{
229 thread_call_t call;
230
231 if (queue_empty(&internal_call_free_queue))
232 panic("_internal_call_allocate");
233
234 call = TC(dequeue_head(&internal_call_free_queue));
235
236 return (call);
237}
238
239/*
240 * Routine: _internal_call_release [private, inline]
241 *
242 * Purpose: Release an internal callout entry which
243 * is no longer pending (or delayed).
244 *
245 * Preconditions: thread_call_lock held.
246 *
247 * Postconditions: None.
248 */
249
250static __inline__
251void
252_internal_call_release(
253 thread_call_t call
254)
255{
256 if ( call >= internal_call_storage &&
257 call < &internal_call_storage[internal_call_num] )
258 enqueue_tail(&internal_call_free_queue, qe(call));
259}
260
261/*
262 * Routine: _pending_call_enqueue [private, inline]
263 *
264 * Purpose: Place an entry at the end of the
265 * pending queue, to be executed soon.
266 *
267 * Preconditions: thread_call_lock held.
268 *
269 * Postconditions: None.
270 */
271
272static __inline__
273void
274_pending_call_enqueue(
275 thread_call_t call
276)
277{
278 enqueue_tail(&pending_call_queue, qe(call));
279 if (++thread_calls.pending_num > thread_calls.pending_hiwat)
280 thread_calls.pending_hiwat = thread_calls.pending_num;
281
282 call->state = PENDING;
283}
284
285/*
286 * Routine: _pending_call_dequeue [private, inline]
287 *
288 * Purpose: Remove an entry from the pending queue,
289 * effectively unscheduling it.
290 *
291 * Preconditions: thread_call_lock held.
292 *
293 * Postconditions: None.
294 */
295
296static __inline__
297void
298_pending_call_dequeue(
299 thread_call_t call
300)
301{
302 (void)remque(qe(call));
303 thread_calls.pending_num--;
304
305 call->state = IDLE;
306}
307
308/*
309 * Routine: _delayed_call_enqueue [private, inline]
310 *
311 * Purpose: Place an entry on the delayed queue,
312 * after existing entries with an earlier
313 * (or identical) deadline.
314 *
315 * Preconditions: thread_call_lock held.
316 *
317 * Postconditions: None.
318 */
319
320static __inline__
321void
322_delayed_call_enqueue(
323 thread_call_t call
324)
325{
326 thread_call_t current;
327
328 current = TC(queue_first(&delayed_call_queue));
329
330 while (TRUE) {
0b4e3aa0
A
331 if ( queue_end(&delayed_call_queue, qe(current)) ||
332 call->deadline < current->deadline ) {
1c79356b
A
333 current = TC(queue_prev(qe(current)));
334 break;
335 }
336
337 current = TC(queue_next(qe(current)));
338 }
339
340 insque(qe(call), qe(current));
341 if (++thread_calls.delayed_num > thread_calls.delayed_hiwat)
342 thread_calls.delayed_hiwat = thread_calls.delayed_num;
343
344 call->state = DELAYED;
345}
346
347/*
348 * Routine: _delayed_call_dequeue [private, inline]
349 *
350 * Purpose: Remove an entry from the delayed queue,
351 * effectively unscheduling it.
352 *
353 * Preconditions: thread_call_lock held.
354 *
355 * Postconditions: None.
356 */
357
358static __inline__
359void
360_delayed_call_dequeue(
361 thread_call_t call
362)
363{
364 (void)remque(qe(call));
365 thread_calls.delayed_num--;
366
367 call->state = IDLE;
368}
369
370/*
371 * Routine: _set_delayed_call_timer [private]
372 *
373 * Purpose: Reset the timer so that it
374 * next expires when the entry is due.
375 *
376 * Preconditions: thread_call_lock held.
377 *
378 * Postconditions: None.
379 */
380
381static __inline__ void
382_set_delayed_call_timer(
383 thread_call_t call
384)
385{
9bccf70c 386 timer_call_enter(&thread_call_delayed_timer, call->deadline);
1c79356b
A
387}
388
389/*
390 * Routine: _remove_from_pending_queue [private]
391 *
392 * Purpose: Remove the first (or all) matching
393 * entries from the pending queue,
394 * effectively unscheduling them.
395 * Returns whether any matching entries
396 * were found.
397 *
398 * Preconditions: thread_call_lock held.
399 *
400 * Postconditions: None.
401 */
402
403static
404boolean_t
405_remove_from_pending_queue(
406 thread_call_func_t func,
407 thread_call_param_t param0,
408 boolean_t remove_all
409)
410{
411 boolean_t call_removed = FALSE;
412 thread_call_t call;
413
414 call = TC(queue_first(&pending_call_queue));
415
416 while (!queue_end(&pending_call_queue, qe(call))) {
417 if ( call->func == func &&
418 call->param0 == param0 ) {
419 thread_call_t next = TC(queue_next(qe(call)));
420
421 _pending_call_dequeue(call);
422
423 _internal_call_release(call);
424
425 call_removed = TRUE;
426 if (!remove_all)
427 break;
428
429 call = next;
430 }
431 else
432 call = TC(queue_next(qe(call)));
433 }
434
435 return (call_removed);
436}
437
438/*
439 * Routine: _remove_from_delayed_queue [private]
440 *
441 * Purpose: Remove the first (or all) matching
442 * entries from the delayed queue,
443 * effectively unscheduling them.
444 * Returns whether any matching entries
445 * were found.
446 *
447 * Preconditions: thread_call_lock held.
448 *
449 * Postconditions: None.
450 */
451
452static
453boolean_t
454_remove_from_delayed_queue(
455 thread_call_func_t func,
456 thread_call_param_t param0,
457 boolean_t remove_all
458)
459{
460 boolean_t call_removed = FALSE;
461 thread_call_t call;
462
463 call = TC(queue_first(&delayed_call_queue));
464
465 while (!queue_end(&delayed_call_queue, qe(call))) {
466 if ( call->func == func &&
467 call->param0 == param0 ) {
468 thread_call_t next = TC(queue_next(qe(call)));
469
470 _delayed_call_dequeue(call);
471
472 _internal_call_release(call);
473
474 call_removed = TRUE;
475 if (!remove_all)
476 break;
477
478 call = next;
479 }
480 else
481 call = TC(queue_next(qe(call)));
482 }
483
484 return (call_removed);
485}
486
487/*
488 * Routine: thread_call_func [public]
489 *
490 * Purpose: Schedule a function callout.
491 * Guarantees { function, argument }
492 * uniqueness if unique_call is TRUE.
493 *
494 * Preconditions: Callable from an interrupt context
495 * below splsched.
496 *
497 * Postconditions: None.
498 */
499
500void
501thread_call_func(
502 thread_call_func_t func,
503 thread_call_param_t param,
504 boolean_t unique_call
505)
506{
507 thread_call_t call;
508 int s;
509
510 if (!thread_call_initialized)
511 panic("thread_call_func");
512
513 s = splsched();
514 simple_lock(&thread_call_lock);
515
516 call = TC(queue_first(&pending_call_queue));
517
518 while (unique_call && !queue_end(&pending_call_queue, qe(call))) {
519 if ( call->func == func &&
520 call->param0 == param ) {
521 break;
522 }
523
524 call = TC(queue_next(qe(call)));
525 }
526
527 if (!unique_call || queue_end(&pending_call_queue, qe(call))) {
528 call = _internal_call_allocate();
529 call->func = func;
530 call->param0 = param;
531 call->param1 = 0;
532
533 _pending_call_enqueue(call);
534
9bccf70c
A
535 if (thread_calls.active_num <= 0)
536 _call_thread_wake();
1c79356b
A
537 }
538
539 simple_unlock(&thread_call_lock);
540 splx(s);
541}
542
543/*
544 * Routine: thread_call_func_delayed [public]
545 *
546 * Purpose: Schedule a function callout to
547 * occur at the stated time.
548 *
549 * Preconditions: Callable from an interrupt context
550 * below splsched.
551 *
552 * Postconditions: None.
553 */
554
555void
556thread_call_func_delayed(
557 thread_call_func_t func,
558 thread_call_param_t param,
0b4e3aa0 559 uint64_t deadline
1c79356b
A
560)
561{
562 thread_call_t call;
563 int s;
564
565 if (!thread_call_initialized)
566 panic("thread_call_func_delayed");
567
568 s = splsched();
569 simple_lock(&thread_call_lock);
570
571 call = _internal_call_allocate();
572 call->func = func;
573 call->param0 = param;
574 call->param1 = 0;
575 call->deadline = deadline;
576
577 _delayed_call_enqueue(call);
578
579 if (queue_first(&delayed_call_queue) == qe(call))
580 _set_delayed_call_timer(call);
581
582 simple_unlock(&thread_call_lock);
583 splx(s);
584}
585
586/*
587 * Routine: thread_call_func_cancel [public]
588 *
589 * Purpose: Unschedule a function callout.
590 * Removes one (or all)
591 * { function, argument }
592 * instance(s) from either (or both)
593 * the pending and the delayed queue,
594 * in that order. Returns a boolean
595 * indicating whether any calls were
596 * cancelled.
597 *
598 * Preconditions: Callable from an interrupt context
599 * below splsched.
600 *
601 * Postconditions: None.
602 */
603
604boolean_t
605thread_call_func_cancel(
606 thread_call_func_t func,
607 thread_call_param_t param,
608 boolean_t cancel_all
609)
610{
611 boolean_t result;
612 int s;
613
614 s = splsched();
615 simple_lock(&thread_call_lock);
616
617 if (cancel_all)
618 result = _remove_from_pending_queue(func, param, cancel_all) |
619 _remove_from_delayed_queue(func, param, cancel_all);
620 else
621 result = _remove_from_pending_queue(func, param, cancel_all) ||
622 _remove_from_delayed_queue(func, param, cancel_all);
623
624 simple_unlock(&thread_call_lock);
625 splx(s);
626
627 return (result);
628}
629
630/*
631 * Routine: thread_call_allocate [public]
632 *
633 * Purpose: Allocate an external callout
634 * entry.
635 *
636 * Preconditions: None.
637 *
638 * Postconditions: None.
639 */
640
641thread_call_t
642thread_call_allocate(
643 thread_call_func_t func,
644 thread_call_param_t param0
645)
646{
647 thread_call_t call = (void *)kalloc(sizeof (thread_call_data_t));
648
649 call->func = func;
650 call->param0 = param0;
651 call->state = IDLE;
652
653 return (call);
654}
655
656/*
657 * Routine: thread_call_free [public]
658 *
659 * Purpose: Free an external callout
660 * entry.
661 *
662 * Preconditions: None.
663 *
664 * Postconditions: None.
665 */
666
667boolean_t
668thread_call_free(
669 thread_call_t call
670)
671{
672 int s;
673
674 s = splsched();
675 simple_lock(&thread_call_lock);
676
677 if (call->state != IDLE) {
678 simple_unlock(&thread_call_lock);
679 splx(s);
680
681 return (FALSE);
682 }
683
684 simple_unlock(&thread_call_lock);
685 splx(s);
686
687 kfree((vm_offset_t)call, sizeof (thread_call_data_t));
688
689 return (TRUE);
690}
691
692/*
693 * Routine: thread_call_enter [public]
694 *
695 * Purpose: Schedule an external callout
696 * entry to occur "soon". Returns a
697 * boolean indicating whether the call
698 * had been already scheduled.
699 *
700 * Preconditions: Callable from an interrupt context
701 * below splsched.
702 *
703 * Postconditions: None.
704 */
705
706boolean_t
707thread_call_enter(
708 thread_call_t call
709)
710{
711 boolean_t result = TRUE;
712 int s;
713
714 s = splsched();
715 simple_lock(&thread_call_lock);
716
717 if (call->state != PENDING) {
718 if (call->state == DELAYED)
719 _delayed_call_dequeue(call);
720 else if (call->state == IDLE)
721 result = FALSE;
722
723 _pending_call_enqueue(call);
9bccf70c
A
724
725 if (thread_calls.active_num <= 0)
726 _call_thread_wake();
1c79356b
A
727 }
728
729 call->param1 = 0;
730
731 simple_unlock(&thread_call_lock);
732 splx(s);
733
734 return (result);
735}
736
737boolean_t
738thread_call_enter1(
739 thread_call_t call,
740 thread_call_param_t param1
741)
742{
743 boolean_t result = TRUE;
744 int s;
745
746 s = splsched();
747 simple_lock(&thread_call_lock);
748
749 if (call->state != PENDING) {
750 if (call->state == DELAYED)
751 _delayed_call_dequeue(call);
752 else if (call->state == IDLE)
753 result = FALSE;
754
755 _pending_call_enqueue(call);
756
9bccf70c
A
757 if (thread_calls.active_num <= 0)
758 _call_thread_wake();
1c79356b
A
759 }
760
761 call->param1 = param1;
762
763 simple_unlock(&thread_call_lock);
764 splx(s);
765
766 return (result);
767}
768
769/*
770 * Routine: thread_call_enter_delayed [public]
771 *
772 * Purpose: Schedule an external callout
773 * entry to occur at the stated time.
774 * Returns a boolean indicating whether
775 * the call had been already scheduled.
776 *
777 * Preconditions: Callable from an interrupt context
778 * below splsched.
779 *
780 * Postconditions: None.
781 */
782
783boolean_t
784thread_call_enter_delayed(
785 thread_call_t call,
0b4e3aa0 786 uint64_t deadline
1c79356b
A
787)
788{
789 boolean_t result = TRUE;
790 int s;
791
792 s = splsched();
793 simple_lock(&thread_call_lock);
794
795 if (call->state == PENDING)
796 _pending_call_dequeue(call);
797 else if (call->state == DELAYED)
798 _delayed_call_dequeue(call);
799 else if (call->state == IDLE)
800 result = FALSE;
801
802 call->param1 = 0;
803 call->deadline = deadline;
804
805 _delayed_call_enqueue(call);
806
807 if (queue_first(&delayed_call_queue) == qe(call))
808 _set_delayed_call_timer(call);
809
810 simple_unlock(&thread_call_lock);
811 splx(s);
812
813 return (result);
814}
815
816boolean_t
817thread_call_enter1_delayed(
818 thread_call_t call,
819 thread_call_param_t param1,
0b4e3aa0 820 uint64_t deadline
1c79356b
A
821)
822{
823 boolean_t result = TRUE;
824 int s;
825
826 s = splsched();
827 simple_lock(&thread_call_lock);
828
829 if (call->state == PENDING)
830 _pending_call_dequeue(call);
831 else if (call->state == DELAYED)
832 _delayed_call_dequeue(call);
833 else if (call->state == IDLE)
834 result = FALSE;
835
836 call->param1 = param1;
837 call->deadline = deadline;
838
839 _delayed_call_enqueue(call);
840
841 if (queue_first(&delayed_call_queue) == qe(call))
842 _set_delayed_call_timer(call);
843
844 simple_unlock(&thread_call_lock);
845 splx(s);
846
847 return (result);
848}
849
850/*
851 * Routine: thread_call_cancel [public]
852 *
853 * Purpose: Unschedule a callout entry.
854 * Returns a boolean indicating
855 * whether the call had actually
856 * been scheduled.
857 *
858 * Preconditions: Callable from an interrupt context
859 * below splsched.
860 *
861 * Postconditions: None.
862 */
863
864boolean_t
865thread_call_cancel(
866 thread_call_t call
867)
868{
869 boolean_t result = TRUE;
870 int s;
871
872 s = splsched();
873 simple_lock(&thread_call_lock);
874
875 if (call->state == PENDING)
876 _pending_call_dequeue(call);
877 else if (call->state == DELAYED)
878 _delayed_call_dequeue(call);
879 else
880 result = FALSE;
881
882 simple_unlock(&thread_call_lock);
883 splx(s);
884
885 return (result);
886}
887
888/*
889 * Routine: thread_call_is_delayed [public]
890 *
891 * Purpose: Returns a boolean indicating
892 * whether a call is currently scheduled
893 * to occur at a later time. Optionally
894 * returns the expiration time.
895 *
896 * Preconditions: Callable from an interrupt context
897 * below splsched.
898 *
899 * Postconditions: None.
900 */
901
902boolean_t
903thread_call_is_delayed(
904 thread_call_t call,
0b4e3aa0 905 uint64_t *deadline)
1c79356b
A
906{
907 boolean_t result = FALSE;
908 int s;
909
910 s = splsched();
911 simple_lock(&thread_call_lock);
912
913 if (call->state == DELAYED) {
914 if (deadline != NULL)
915 *deadline = call->deadline;
916 result = TRUE;
917 }
918
919 simple_unlock(&thread_call_lock);
920 splx(s);
921
922 return (result);
923}
924
925/*
9bccf70c 926 * Routine: _call_thread_wake [private, inline]
1c79356b
A
927 *
928 * Purpose: Wake a callout thread to service
9bccf70c
A
929 * pending callout entries. May wake
930 * the activate thread in order to
1c79356b
A
931 * create additional callout threads.
932 *
933 * Preconditions: thread_call_lock held.
934 *
935 * Postconditions: None.
936 */
937
938static __inline__
939void
940_call_thread_wake(void)
941{
9bccf70c
A
942 if (wait_queue_wakeup_one(
943 &call_thread_idle_queue, &call_thread_idle_queue,
944 THREAD_AWAKENED) == KERN_SUCCESS) {
1c79356b 945 thread_calls.idle_thread_num--;
9bccf70c
A
946
947 if (++thread_calls.active_num > thread_calls.active_hiwat)
948 thread_calls.active_hiwat = thread_calls.active_num;
1c79356b
A
949 }
950 else
9bccf70c 951 if (!activate_thread_awake) {
1c79356b
A
952 clear_wait(activate_thread, THREAD_AWAKENED);
953 activate_thread_awake = TRUE;
954 }
955}
956
9bccf70c
A
957/*
958 * Routine: call_thread_block [private]
959 *
960 * Purpose: Hook via thread dispatch on
961 * the occasion of a callout blocking.
962 *
963 * Preconditions: splsched.
964 *
965 * Postconditions: None.
966 */
967
968void
969call_thread_block(void)
970{
971 simple_lock(&thread_call_lock);
972
973 if (--thread_calls.active_num < thread_calls.active_lowat)
974 thread_calls.active_lowat = thread_calls.active_num;
975
976 if ( thread_calls.active_num <= 0 &&
977 thread_calls.pending_num > 0 )
978 _call_thread_wake();
979
980 simple_unlock(&thread_call_lock);
981}
982
983/*
984 * Routine: call_thread_unblock [private]
985 *
986 * Purpose: Hook via thread wakeup on
987 * the occasion of a callout unblocking.
988 *
989 * Preconditions: splsched.
990 *
991 * Postconditions: None.
992 */
993
994void
995call_thread_unblock(void)
996{
997 simple_lock(&thread_call_lock);
998
999 if (++thread_calls.active_num > thread_calls.active_hiwat)
1000 thread_calls.active_hiwat = thread_calls.active_num;
1001
1002 simple_unlock(&thread_call_lock);
1003}
1c79356b
A
1004
1005/*
1006 * Routine: _call_thread [private]
1007 *
1008 * Purpose: Executed by a callout thread.
1009 *
1010 * Preconditions: None.
1011 *
1012 * Postconditions: None.
1013 */
1014
1015static
1016void
1017_call_thread_continue(void)
1018{
1019 thread_t self = current_thread();
1020
1c79356b
A
1021 (void) splsched();
1022 simple_lock(&thread_call_lock);
1023
9bccf70c
A
1024 self->active_callout = TRUE;
1025
1c79356b
A
1026 while (thread_calls.pending_num > 0) {
1027 thread_call_t call;
1028 thread_call_func_t func;
1029 thread_call_param_t param0, param1;
1030
1031 call = TC(dequeue_head(&pending_call_queue));
1032 thread_calls.pending_num--;
1033
1034 func = call->func;
1035 param0 = call->param0;
1036 param1 = call->param1;
1037
1038 call->state = IDLE;
1039
1040 _internal_call_release(call);
1041
1c79356b
A
1042 simple_unlock(&thread_call_lock);
1043 (void) spllo();
1044
1045 (*func)(param0, param1);
1046
1047 (void)thread_funnel_set(self->funnel_lock, FALSE);
1048
1049 (void) splsched();
1050 simple_lock(&thread_call_lock);
1c79356b 1051 }
9bccf70c
A
1052
1053 self->active_callout = FALSE;
1054
1055 if (--thread_calls.active_num < thread_calls.active_lowat)
1056 thread_calls.active_lowat = thread_calls.active_num;
1c79356b 1057
9bccf70c 1058 if (thread_calls.idle_thread_num < thread_calls.thread_lowat) {
1c79356b
A
1059 thread_calls.idle_thread_num++;
1060
9bccf70c
A
1061 wait_queue_assert_wait(
1062 &call_thread_idle_queue, &call_thread_idle_queue,
1063 THREAD_INTERRUPTIBLE);
1c79356b
A
1064
1065 simple_unlock(&thread_call_lock);
1066 (void) spllo();
1067
1c79356b 1068 thread_block(_call_thread_continue);
1c79356b
A
1069 /* NOTREACHED */
1070 }
1071
1072 thread_calls.thread_num--;
1073
1074 simple_unlock(&thread_call_lock);
1075 (void) spllo();
1076
1077 (void) thread_terminate(self->top_act);
1078 /* NOTREACHED */
1079}
1080
1081static
1082void
1083_call_thread(void)
1084{
1085 thread_t self = current_thread();
1086
1087 stack_privilege(self);
1088
1089 _call_thread_continue();
1090 /* NOTREACHED */
1091}
1092
1093/*
1094 * Routine: _activate_thread [private]
1095 *
1096 * Purpose: Executed by the activate thread.
1097 *
1098 * Preconditions: None.
1099 *
1100 * Postconditions: Never terminates.
1101 */
1102
1103static
1104void
1105_activate_thread_continue(void)
1106{
1c79356b
A
1107 (void) splsched();
1108 simple_lock(&thread_call_lock);
1109
9bccf70c
A
1110 while ( thread_calls.active_num <= 0 &&
1111 thread_calls.pending_num > 0 ) {
1112
1113 if (++thread_calls.active_num > thread_calls.active_hiwat)
1114 thread_calls.active_hiwat = thread_calls.active_num;
1c79356b
A
1115
1116 if (++thread_calls.thread_num > thread_calls.thread_hiwat)
1117 thread_calls.thread_hiwat = thread_calls.thread_num;
1118
1119 simple_unlock(&thread_call_lock);
1120 (void) spllo();
1121
0b4e3aa0
A
1122 (void) kernel_thread_with_priority(
1123 kernel_task, MAXPRI_KERNEL - 1,
1124 _call_thread, TRUE, TRUE);
9bccf70c
A
1125 (void) splsched();
1126 simple_lock(&thread_call_lock);
1c79356b 1127 }
1c79356b
A
1128
1129 assert_wait(&activate_thread_awake, THREAD_INTERRUPTIBLE);
1130 activate_thread_awake = FALSE;
1131
1132 simple_unlock(&thread_call_lock);
1133 (void) spllo();
1134
1c79356b 1135 thread_block(_activate_thread_continue);
1c79356b
A
1136 /* NOTREACHED */
1137}
1138
1139static
1140void
1141_activate_thread(void)
1142{
1143 thread_t self = current_thread();
1144
1145 self->vm_privilege = TRUE;
1146 vm_page_free_reserve(2); /* XXX */
1147 stack_privilege(self);
1148
1149 _activate_thread_continue();
1150 /* NOTREACHED */
1151}
1152
1153static
1154void
1155_delayed_call_timer(
1156 timer_call_param_t p0,
1157 timer_call_param_t p1
1158)
1159{
0b4e3aa0 1160 uint64_t timestamp;
1c79356b
A
1161 thread_call_t call;
1162 boolean_t new_pending = FALSE;
1163 int s;
1164
1165 s = splsched();
1166 simple_lock(&thread_call_lock);
1167
1168 clock_get_uptime(&timestamp);
1169
1170 call = TC(queue_first(&delayed_call_queue));
1171
1172 while (!queue_end(&delayed_call_queue, qe(call))) {
0b4e3aa0 1173 if (call->deadline <= timestamp) {
1c79356b
A
1174 _delayed_call_dequeue(call);
1175
1176 _pending_call_enqueue(call);
1177 new_pending = TRUE;
1178 }
1179 else
1180 break;
1181
1182 call = TC(queue_first(&delayed_call_queue));
1183 }
1184
1185 if (!queue_end(&delayed_call_queue, qe(call)))
1186 _set_delayed_call_timer(call);
1187
9bccf70c 1188 if (new_pending && thread_calls.active_num <= 0)
1c79356b
A
1189 _call_thread_wake();
1190
1191 simple_unlock(&thread_call_lock);
1192 splx(s);
1193}