]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/kern/thread_call.c
xnu-1228.9.59.tar.gz
[apple/xnu.git] / osfmk / kern / thread_call.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 1993-1995, 1999-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <mach/mach_types.h>
30#include <mach/thread_act.h>
31
32#include <kern/kern_types.h>
33#include <kern/kalloc.h>
34#include <kern/sched_prim.h>
35#include <kern/clock.h>
36#include <kern/task.h>
37#include <kern/thread.h>
38#include <kern/wait_queue.h>
39
40#include <vm/vm_pageout.h>
41
42#include <kern/thread_call.h>
43#include <kern/call_entry.h>
44
45#include <kern/timer_call.h>
46
47#include <sys/kdebug.h>
48
49#define internal_call_num 768
50
51#define thread_call_thread_min 4
52
53static
54thread_call_data_t
55 internal_call_storage[internal_call_num];
56
57decl_simple_lock_data(static,thread_call_lock)
58
59static
60timer_call_data_t
61 thread_call_delaytimer;
62
63static
64queue_head_t
65 thread_call_xxx_queue,
66 thread_call_pending_queue, thread_call_delayed_queue;
67
68static
69struct wait_queue
70 call_thread_waitqueue;
71
72static
73boolean_t
74 activate_thread_awake;
75
76static struct {
77 int pending_num,
78 pending_hiwat;
79 int active_num,
80 active_hiwat,
81 active_lowat;
82 int delayed_num,
83 delayed_hiwat;
84 int idle_thread_num;
85 int thread_num,
86 thread_hiwat,
87 thread_lowat;
88} thread_call_vars;
89
90static __inline__ thread_call_t
91 _internal_call_allocate(void);
92
93static __inline__ void
94_internal_call_release(
95 thread_call_t call
96);
97
98static __inline__ void
99_pending_call_enqueue(
100 thread_call_t call
101),
102_pending_call_dequeue(
103 thread_call_t call
104),
105_delayed_call_enqueue(
106 thread_call_t call
107),
108_delayed_call_dequeue(
109 thread_call_t call
110);
111
112static __inline__ void
113_set_delayed_call_timer(
114 thread_call_t call
115);
116
117static boolean_t
118_remove_from_pending_queue(
119 thread_call_func_t func,
120 thread_call_param_t param0,
121 boolean_t remove_all
122),
123_remove_from_delayed_queue(
124 thread_call_func_t func,
125 thread_call_param_t param0,
126 boolean_t remove_all
127);
128
129static inline void
130 _call_thread_wake(void);
131
132static void
133 _call_thread(void),
134 _activate_thread(void);
135
136static void
137_delayed_call_timer(
138 timer_call_param_t p0,
139 timer_call_param_t p1
140);
141
142#define qe(x) ((queue_entry_t)(x))
143#define TC(x) ((thread_call_t)(x))
144
145/*
146 * Routine: thread_call_initialize [public]
147 *
148 * Description: Initialize this module, called
149 * early during system initialization.
150 *
151 * Preconditions: None.
152 *
153 * Postconditions: None.
154 */
155
156void
157thread_call_initialize(void)
158{
159 kern_return_t result;
160 thread_t thread;
161 thread_call_t call;
162 spl_t s;
163
164 simple_lock_init(&thread_call_lock, 0);
165
166 s = splsched();
167 simple_lock(&thread_call_lock);
168
169 queue_init(&thread_call_pending_queue);
170 queue_init(&thread_call_delayed_queue);
171
172 queue_init(&thread_call_xxx_queue);
173 for (
174 call = internal_call_storage;
175 call < &internal_call_storage[internal_call_num];
176 call++) {
177
178 enqueue_tail(&thread_call_xxx_queue, qe(call));
179 }
180
181 timer_call_setup(&thread_call_delaytimer, _delayed_call_timer, NULL);
182
183 wait_queue_init(&call_thread_waitqueue, SYNC_POLICY_FIFO);
184 thread_call_vars.thread_lowat = thread_call_thread_min;
185
186 activate_thread_awake = TRUE;
187
188 simple_unlock(&thread_call_lock);
189 splx(s);
190
191 result = kernel_thread_start_priority((thread_continue_t)_activate_thread, NULL, MAXPRI_KERNEL - 2, &thread);
192 if (result != KERN_SUCCESS)
193 panic("thread_call_initialize");
194
195 thread_deallocate(thread);
196}
197
198void
199thread_call_setup(
200 thread_call_t call,
201 thread_call_func_t func,
202 thread_call_param_t param0
203)
204{
205 call_entry_setup(call, func, param0);
206}
207
208/*
209 * Routine: _internal_call_allocate [private, inline]
210 *
211 * Purpose: Allocate an internal callout entry.
212 *
213 * Preconditions: thread_call_lock held.
214 *
215 * Postconditions: None.
216 */
217
218static __inline__ thread_call_t
219_internal_call_allocate(void)
220{
221 thread_call_t call;
222
223 if (queue_empty(&thread_call_xxx_queue))
224 panic("_internal_call_allocate");
225
226 call = TC(dequeue_head(&thread_call_xxx_queue));
227
228 return (call);
229}
230
231/*
232 * Routine: _internal_call_release [private, inline]
233 *
234 * Purpose: Release an internal callout entry which
235 * is no longer pending (or delayed).
236 *
237 * Preconditions: thread_call_lock held.
238 *
239 * Postconditions: None.
240 */
241
242static __inline__
243void
244_internal_call_release(
245 thread_call_t call
246)
247{
248 if ( call >= internal_call_storage &&
249 call < &internal_call_storage[internal_call_num] )
250 enqueue_head(&thread_call_xxx_queue, qe(call));
251}
252
253/*
254 * Routine: _pending_call_enqueue [private, inline]
255 *
256 * Purpose: Place an entry at the end of the
257 * pending queue, to be executed soon.
258 *
259 * Preconditions: thread_call_lock held.
260 *
261 * Postconditions: None.
262 */
263
264static __inline__
265void
266_pending_call_enqueue(
267 thread_call_t call
268)
269{
270 enqueue_tail(&thread_call_pending_queue, qe(call));
271 if (++thread_call_vars.pending_num > thread_call_vars.pending_hiwat)
272 thread_call_vars.pending_hiwat = thread_call_vars.pending_num;
273
274 call->state = PENDING;
275}
276
277/*
278 * Routine: _pending_call_dequeue [private, inline]
279 *
280 * Purpose: Remove an entry from the pending queue,
281 * effectively unscheduling it.
282 *
283 * Preconditions: thread_call_lock held.
284 *
285 * Postconditions: None.
286 */
287
288static __inline__
289void
290_pending_call_dequeue(
291 thread_call_t call
292)
293{
294 (void)remque(qe(call));
295 thread_call_vars.pending_num--;
296
297 call->state = IDLE;
298}
299
300/*
301 * Routine: _delayed_call_enqueue [private, inline]
302 *
303 * Purpose: Place an entry on the delayed queue,
304 * after existing entries with an earlier
305 * (or identical) deadline.
306 *
307 * Preconditions: thread_call_lock held.
308 *
309 * Postconditions: None.
310 */
311
312static __inline__
313void
314_delayed_call_enqueue(
315 thread_call_t call
316)
317{
318 thread_call_t current;
319
320 current = TC(queue_first(&thread_call_delayed_queue));
321
322 while (TRUE) {
323 if ( queue_end(&thread_call_delayed_queue, qe(current)) ||
324 call->deadline < current->deadline ) {
325 current = TC(queue_prev(qe(current)));
326 break;
327 }
328
329 current = TC(queue_next(qe(current)));
330 }
331
332 insque(qe(call), qe(current));
333 if (++thread_call_vars.delayed_num > thread_call_vars.delayed_hiwat)
334 thread_call_vars.delayed_hiwat = thread_call_vars.delayed_num;
335
336 call->state = DELAYED;
337}
338
339/*
340 * Routine: _delayed_call_dequeue [private, inline]
341 *
342 * Purpose: Remove an entry from the delayed queue,
343 * effectively unscheduling it.
344 *
345 * Preconditions: thread_call_lock held.
346 *
347 * Postconditions: None.
348 */
349
350static __inline__
351void
352_delayed_call_dequeue(
353 thread_call_t call
354)
355{
356 (void)remque(qe(call));
357 thread_call_vars.delayed_num--;
358
359 call->state = IDLE;
360}
361
362/*
363 * Routine: _set_delayed_call_timer [private]
364 *
365 * Purpose: Reset the timer so that it
366 * next expires when the entry is due.
367 *
368 * Preconditions: thread_call_lock held.
369 *
370 * Postconditions: None.
371 */
372
373static __inline__ void
374_set_delayed_call_timer(
375 thread_call_t call
376)
377{
378 timer_call_enter(&thread_call_delaytimer, call->deadline);
379}
380
381/*
382 * Routine: _remove_from_pending_queue [private]
383 *
384 * Purpose: Remove the first (or all) matching
385 * entries from the pending queue,
386 * effectively unscheduling them.
387 * Returns whether any matching entries
388 * were found.
389 *
390 * Preconditions: thread_call_lock held.
391 *
392 * Postconditions: None.
393 */
394
395static
396boolean_t
397_remove_from_pending_queue(
398 thread_call_func_t func,
399 thread_call_param_t param0,
400 boolean_t remove_all
401)
402{
403 boolean_t call_removed = FALSE;
404 thread_call_t call;
405
406 call = TC(queue_first(&thread_call_pending_queue));
407
408 while (!queue_end(&thread_call_pending_queue, qe(call))) {
409 if ( call->func == func &&
410 call->param0 == param0 ) {
411 thread_call_t next = TC(queue_next(qe(call)));
412
413 _pending_call_dequeue(call);
414
415 _internal_call_release(call);
416
417 call_removed = TRUE;
418 if (!remove_all)
419 break;
420
421 call = next;
422 }
423 else
424 call = TC(queue_next(qe(call)));
425 }
426
427 return (call_removed);
428}
429
430/*
431 * Routine: _remove_from_delayed_queue [private]
432 *
433 * Purpose: Remove the first (or all) matching
434 * entries from the delayed queue,
435 * effectively unscheduling them.
436 * Returns whether any matching entries
437 * were found.
438 *
439 * Preconditions: thread_call_lock held.
440 *
441 * Postconditions: None.
442 */
443
444static
445boolean_t
446_remove_from_delayed_queue(
447 thread_call_func_t func,
448 thread_call_param_t param0,
449 boolean_t remove_all
450)
451{
452 boolean_t call_removed = FALSE;
453 thread_call_t call;
454
455 call = TC(queue_first(&thread_call_delayed_queue));
456
457 while (!queue_end(&thread_call_delayed_queue, qe(call))) {
458 if ( call->func == func &&
459 call->param0 == param0 ) {
460 thread_call_t next = TC(queue_next(qe(call)));
461
462 _delayed_call_dequeue(call);
463
464 _internal_call_release(call);
465
466 call_removed = TRUE;
467 if (!remove_all)
468 break;
469
470 call = next;
471 }
472 else
473 call = TC(queue_next(qe(call)));
474 }
475
476 return (call_removed);
477}
478
479/*
480 * Routine: thread_call_func [public]
481 *
482 * Purpose: Schedule a function callout.
483 * Guarantees { function, argument }
484 * uniqueness if unique_call is TRUE.
485 *
486 * Preconditions: Callable from an interrupt context
487 * below splsched.
488 *
489 * Postconditions: None.
490 */
491
492void
493thread_call_func(
494 thread_call_func_t func,
495 thread_call_param_t param,
496 boolean_t unique_call
497)
498{
499 thread_call_t call;
500 spl_t s;
501
502 s = splsched();
503 simple_lock(&thread_call_lock);
504
505 call = TC(queue_first(&thread_call_pending_queue));
506
507 while (unique_call && !queue_end(&thread_call_pending_queue, qe(call))) {
508 if ( call->func == func &&
509 call->param0 == param ) {
510 break;
511 }
512
513 call = TC(queue_next(qe(call)));
514 }
515
516 if (!unique_call || queue_end(&thread_call_pending_queue, qe(call))) {
517 call = _internal_call_allocate();
518 call->func = func;
519 call->param0 = param;
520 call->param1 = NULL;
521
522 _pending_call_enqueue(call);
523
524 if (thread_call_vars.active_num <= 0)
525 _call_thread_wake();
526 }
527
528 simple_unlock(&thread_call_lock);
529 splx(s);
530}
531
532/*
533 * Routine: thread_call_func_delayed [public]
534 *
535 * Purpose: Schedule a function callout to
536 * occur at the stated time.
537 *
538 * Preconditions: Callable from an interrupt context
539 * below splsched.
540 *
541 * Postconditions: None.
542 */
543
544void
545thread_call_func_delayed(
546 thread_call_func_t func,
547 thread_call_param_t param,
548 uint64_t deadline
549)
550{
551 thread_call_t call;
552 spl_t s;
553
554 s = splsched();
555 simple_lock(&thread_call_lock);
556
557 call = _internal_call_allocate();
558 call->func = func;
559 call->param0 = param;
560 call->param1 = 0;
561 call->deadline = deadline;
562
563 _delayed_call_enqueue(call);
564
565 if (queue_first(&thread_call_delayed_queue) == qe(call))
566 _set_delayed_call_timer(call);
567
568 simple_unlock(&thread_call_lock);
569 splx(s);
570}
571
572/*
573 * Routine: thread_call_func_cancel [public]
574 *
575 * Purpose: Unschedule a function callout.
576 * Removes one (or all)
577 * { function, argument }
578 * instance(s) from either (or both)
579 * the pending and the delayed queue,
580 * in that order. Returns a boolean
581 * indicating whether any calls were
582 * cancelled.
583 *
584 * Preconditions: Callable from an interrupt context
585 * below splsched.
586 *
587 * Postconditions: None.
588 */
589
590boolean_t
591thread_call_func_cancel(
592 thread_call_func_t func,
593 thread_call_param_t param,
594 boolean_t cancel_all
595)
596{
597 boolean_t result;
598 spl_t s;
599
600 s = splsched();
601 simple_lock(&thread_call_lock);
602
603 if (cancel_all)
604 result = _remove_from_pending_queue(func, param, cancel_all) |
605 _remove_from_delayed_queue(func, param, cancel_all);
606 else
607 result = _remove_from_pending_queue(func, param, cancel_all) ||
608 _remove_from_delayed_queue(func, param, cancel_all);
609
610 simple_unlock(&thread_call_lock);
611 splx(s);
612
613 return (result);
614}
615
616/*
617 * Routine: thread_call_allocate [public]
618 *
619 * Purpose: Allocate an external callout
620 * entry.
621 *
622 * Preconditions: None.
623 *
624 * Postconditions: None.
625 */
626
627thread_call_t
628thread_call_allocate(
629 thread_call_func_t func,
630 thread_call_param_t param0
631)
632{
633 thread_call_t call = (void *)kalloc(sizeof (thread_call_data_t));
634
635 call->func = func;
636 call->param0 = param0;
637 call->state = IDLE;
638
639 return (call);
640}
641
642/*
643 * Routine: thread_call_free [public]
644 *
645 * Purpose: Free an external callout
646 * entry.
647 *
648 * Preconditions: None.
649 *
650 * Postconditions: None.
651 */
652
653boolean_t
654thread_call_free(
655 thread_call_t call
656)
657{
658 spl_t s;
659
660 s = splsched();
661 simple_lock(&thread_call_lock);
662
663 if (call->state != IDLE) {
664 simple_unlock(&thread_call_lock);
665 splx(s);
666
667 return (FALSE);
668 }
669
670 simple_unlock(&thread_call_lock);
671 splx(s);
672
673 kfree(call, sizeof (thread_call_data_t));
674
675 return (TRUE);
676}
677
678/*
679 * Routine: thread_call_enter [public]
680 *
681 * Purpose: Schedule an external callout
682 * entry to occur "soon". Returns a
683 * boolean indicating whether the call
684 * had been already scheduled.
685 *
686 * Preconditions: Callable from an interrupt context
687 * below splsched.
688 *
689 * Postconditions: None.
690 */
691
692boolean_t
693thread_call_enter(
694 thread_call_t call
695)
696{
697 boolean_t result = TRUE;
698 spl_t s;
699
700 s = splsched();
701 simple_lock(&thread_call_lock);
702
703 if (call->state != PENDING) {
704 if (call->state == DELAYED)
705 _delayed_call_dequeue(call);
706 else if (call->state == IDLE)
707 result = FALSE;
708
709 _pending_call_enqueue(call);
710
711 if (thread_call_vars.active_num <= 0)
712 _call_thread_wake();
713 }
714
715 call->param1 = 0;
716
717 simple_unlock(&thread_call_lock);
718 splx(s);
719
720 return (result);
721}
722
723boolean_t
724thread_call_enter1(
725 thread_call_t call,
726 thread_call_param_t param1
727)
728{
729 boolean_t result = TRUE;
730 spl_t s;
731
732 s = splsched();
733 simple_lock(&thread_call_lock);
734
735 if (call->state != PENDING) {
736 if (call->state == DELAYED)
737 _delayed_call_dequeue(call);
738 else if (call->state == IDLE)
739 result = FALSE;
740
741 _pending_call_enqueue(call);
742
743 if (thread_call_vars.active_num <= 0)
744 _call_thread_wake();
745 }
746
747 call->param1 = param1;
748
749 simple_unlock(&thread_call_lock);
750 splx(s);
751
752 return (result);
753}
754
755/*
756 * Routine: thread_call_enter_delayed [public]
757 *
758 * Purpose: Schedule an external callout
759 * entry to occur at the stated time.
760 * Returns a boolean indicating whether
761 * the call had been already scheduled.
762 *
763 * Preconditions: Callable from an interrupt context
764 * below splsched.
765 *
766 * Postconditions: None.
767 */
768
769boolean_t
770thread_call_enter_delayed(
771 thread_call_t call,
772 uint64_t deadline
773)
774{
775 boolean_t result = TRUE;
776 spl_t s;
777
778 s = splsched();
779 simple_lock(&thread_call_lock);
780
781 if (call->state == PENDING)
782 _pending_call_dequeue(call);
783 else if (call->state == DELAYED)
784 _delayed_call_dequeue(call);
785 else if (call->state == IDLE)
786 result = FALSE;
787
788 call->param1 = 0;
789 call->deadline = deadline;
790
791 _delayed_call_enqueue(call);
792
793 if (queue_first(&thread_call_delayed_queue) == qe(call))
794 _set_delayed_call_timer(call);
795
796 simple_unlock(&thread_call_lock);
797 splx(s);
798
799 return (result);
800}
801
802boolean_t
803thread_call_enter1_delayed(
804 thread_call_t call,
805 thread_call_param_t param1,
806 uint64_t deadline
807)
808{
809 boolean_t result = TRUE;
810 spl_t s;
811
812 s = splsched();
813 simple_lock(&thread_call_lock);
814
815 if (call->state == PENDING)
816 _pending_call_dequeue(call);
817 else if (call->state == DELAYED)
818 _delayed_call_dequeue(call);
819 else if (call->state == IDLE)
820 result = FALSE;
821
822 call->param1 = param1;
823 call->deadline = deadline;
824
825 _delayed_call_enqueue(call);
826
827 if (queue_first(&thread_call_delayed_queue) == qe(call))
828 _set_delayed_call_timer(call);
829
830 simple_unlock(&thread_call_lock);
831 splx(s);
832
833 return (result);
834}
835
836/*
837 * Routine: thread_call_cancel [public]
838 *
839 * Purpose: Unschedule a callout entry.
840 * Returns a boolean indicating
841 * whether the call had actually
842 * been scheduled.
843 *
844 * Preconditions: Callable from an interrupt context
845 * below splsched.
846 *
847 * Postconditions: None.
848 */
849
850boolean_t
851thread_call_cancel(
852 thread_call_t call
853)
854{
855 boolean_t result = TRUE;
856 spl_t s;
857
858 s = splsched();
859 simple_lock(&thread_call_lock);
860
861 if (call->state == PENDING)
862 _pending_call_dequeue(call);
863 else if (call->state == DELAYED)
864 _delayed_call_dequeue(call);
865 else
866 result = FALSE;
867
868 simple_unlock(&thread_call_lock);
869 splx(s);
870
871 return (result);
872}
873
874/*
875 * Routine: thread_call_is_delayed [public]
876 *
877 * Purpose: Returns a boolean indicating
878 * whether a call is currently scheduled
879 * to occur at a later time. Optionally
880 * returns the expiration time.
881 *
882 * Preconditions: Callable from an interrupt context
883 * below splsched.
884 *
885 * Postconditions: None.
886 */
887
888boolean_t
889thread_call_is_delayed(
890 thread_call_t call,
891 uint64_t *deadline)
892{
893 boolean_t result = FALSE;
894 spl_t s;
895
896 s = splsched();
897 simple_lock(&thread_call_lock);
898
899 if (call->state == DELAYED) {
900 if (deadline != NULL)
901 *deadline = call->deadline;
902 result = TRUE;
903 }
904
905 simple_unlock(&thread_call_lock);
906 splx(s);
907
908 return (result);
909}
910
911/*
912 * Routine: _call_thread_wake [private, inline]
913 *
914 * Purpose: Wake a callout thread to service
915 * pending callout entries. May wake
916 * the activate thread in order to
917 * create additional callout threads.
918 *
919 * Preconditions: thread_call_lock held.
920 *
921 * Postconditions: None.
922 */
923
924static inline void
925_call_thread_wake(void)
926{
927 if (wait_queue_wakeup_one(&call_thread_waitqueue, NULL, THREAD_AWAKENED) == KERN_SUCCESS) {
928 thread_call_vars.idle_thread_num--;
929
930 if (++thread_call_vars.active_num > thread_call_vars.active_hiwat)
931 thread_call_vars.active_hiwat = thread_call_vars.active_num;
932 }
933 else
934 if (!activate_thread_awake) {
935 thread_wakeup_one(&activate_thread_awake);
936 activate_thread_awake = TRUE;
937 }
938}
939
940/*
941 * sched_call_thread:
942 *
943 * Call out invoked by the scheduler.
944 */
945
946static void
947sched_call_thread(
948 int type,
949__unused thread_t thread)
950{
951 simple_lock(&thread_call_lock);
952
953 switch (type) {
954
955 case SCHED_CALL_BLOCK:
956 if (--thread_call_vars.active_num < thread_call_vars.active_lowat)
957 thread_call_vars.active_lowat = thread_call_vars.active_num;
958
959 if ( thread_call_vars.active_num <= 0 &&
960 thread_call_vars.pending_num > 0 )
961 _call_thread_wake();
962 break;
963
964 case SCHED_CALL_UNBLOCK:
965 if (++thread_call_vars.active_num > thread_call_vars.active_hiwat)
966 thread_call_vars.active_hiwat = thread_call_vars.active_num;
967 break;
968 }
969
970 simple_unlock(&thread_call_lock);
971}
972
973/*
974 * Routine: _call_thread [private]
975 *
976 * Purpose: Executed by a callout thread.
977 *
978 * Preconditions: None.
979 *
980 * Postconditions: None.
981 */
982
983static
984void
985_call_thread_continue(void)
986{
987 thread_t self = current_thread();
988
989 (void) splsched();
990 simple_lock(&thread_call_lock);
991
992 thread_sched_call(self, sched_call_thread);
993
994 while (thread_call_vars.pending_num > 0) {
995 thread_call_t call;
996 thread_call_func_t func;
997 thread_call_param_t param0, param1;
998
999 call = TC(dequeue_head(&thread_call_pending_queue));
1000 thread_call_vars.pending_num--;
1001
1002 func = call->func;
1003 param0 = call->param0;
1004 param1 = call->param1;
1005
1006 call->state = IDLE;
1007
1008 _internal_call_release(call);
1009
1010 simple_unlock(&thread_call_lock);
1011 (void) spllo();
1012
1013 KERNEL_DEBUG_CONSTANT(
1014 MACHDBG_CODE(DBG_MACH_SCHED,MACH_CALLOUT) | DBG_FUNC_NONE,
1015 (int)func, (int)param0, (int)param1, 0, 0);
1016
1017 (*func)(param0, param1);
1018
1019 (void)thread_funnel_set(self->funnel_lock, FALSE);
1020
1021 (void) splsched();
1022 simple_lock(&thread_call_lock);
1023 }
1024
1025 thread_sched_call(self, NULL);
1026
1027 if (--thread_call_vars.active_num < thread_call_vars.active_lowat)
1028 thread_call_vars.active_lowat = thread_call_vars.active_num;
1029
1030 if (thread_call_vars.idle_thread_num < thread_call_vars.thread_lowat) {
1031 thread_call_vars.idle_thread_num++;
1032
1033 wait_queue_assert_wait(&call_thread_waitqueue, NULL, THREAD_UNINT, 0);
1034
1035 simple_unlock(&thread_call_lock);
1036 (void) spllo();
1037
1038 thread_block((thread_continue_t)_call_thread_continue);
1039 /* NOTREACHED */
1040 }
1041
1042 thread_call_vars.thread_num--;
1043
1044 simple_unlock(&thread_call_lock);
1045 (void) spllo();
1046
1047 thread_terminate(self);
1048 /* NOTREACHED */
1049}
1050
1051static
1052void
1053_call_thread(void)
1054{
1055 _call_thread_continue();
1056 /* NOTREACHED */
1057}
1058
1059/*
1060 * Routine: _activate_thread [private]
1061 *
1062 * Purpose: Executed by the activate thread.
1063 *
1064 * Preconditions: None.
1065 *
1066 * Postconditions: Never terminates.
1067 */
1068
1069static
1070void
1071_activate_thread_continue(void)
1072{
1073 kern_return_t result;
1074 thread_t thread;
1075
1076 (void) splsched();
1077 simple_lock(&thread_call_lock);
1078
1079 while ( thread_call_vars.active_num <= 0 &&
1080 thread_call_vars.pending_num > 0 ) {
1081
1082 if (++thread_call_vars.active_num > thread_call_vars.active_hiwat)
1083 thread_call_vars.active_hiwat = thread_call_vars.active_num;
1084
1085 if (++thread_call_vars.thread_num > thread_call_vars.thread_hiwat)
1086 thread_call_vars.thread_hiwat = thread_call_vars.thread_num;
1087
1088 simple_unlock(&thread_call_lock);
1089 (void) spllo();
1090
1091 result = kernel_thread_start_priority((thread_continue_t)_call_thread, NULL, MAXPRI_KERNEL - 1, &thread);
1092 if (result != KERN_SUCCESS)
1093 panic("activate_thread");
1094
1095 thread_deallocate(thread);
1096
1097 (void) splsched();
1098 simple_lock(&thread_call_lock);
1099 }
1100
1101 assert_wait(&activate_thread_awake, THREAD_INTERRUPTIBLE);
1102 activate_thread_awake = FALSE;
1103
1104 simple_unlock(&thread_call_lock);
1105 (void) spllo();
1106
1107 thread_block((thread_continue_t)_activate_thread_continue);
1108 /* NOTREACHED */
1109}
1110
1111static
1112void
1113_activate_thread(void)
1114{
1115 thread_t self = current_thread();
1116
1117 self->options |= TH_OPT_VMPRIV;
1118 vm_page_free_reserve(2); /* XXX */
1119
1120 _activate_thread_continue();
1121 /* NOTREACHED */
1122}
1123
1124static
1125void
1126_delayed_call_timer(
1127 __unused timer_call_param_t p0,
1128 __unused timer_call_param_t p1
1129)
1130{
1131 uint64_t timestamp;
1132 thread_call_t call;
1133 boolean_t new_pending = FALSE;
1134 spl_t s;
1135
1136 s = splsched();
1137 simple_lock(&thread_call_lock);
1138
1139 clock_get_uptime(&timestamp);
1140
1141 call = TC(queue_first(&thread_call_delayed_queue));
1142
1143 while (!queue_end(&thread_call_delayed_queue, qe(call))) {
1144 if (call->deadline <= timestamp) {
1145 _delayed_call_dequeue(call);
1146
1147 _pending_call_enqueue(call);
1148 new_pending = TRUE;
1149 }
1150 else
1151 break;
1152
1153 call = TC(queue_first(&thread_call_delayed_queue));
1154 }
1155
1156 if (!queue_end(&thread_call_delayed_queue, qe(call)))
1157 _set_delayed_call_timer(call);
1158
1159 if (new_pending && thread_call_vars.active_num <= 0)
1160 _call_thread_wake();
1161
1162 simple_unlock(&thread_call_lock);
1163 splx(s);
1164}