]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/thread_call.c
xnu-792.21.3.tar.gz
[apple/xnu.git] / osfmk / kern / thread_call.c
1 /*
2 * Copyright (c) 1993-1995, 1999-2005 Apple Computer, Inc.
3 * All rights reserved.
4 *
5 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. The rights granted to you under the License
11 * may not be used to create, or enable the creation or redistribution of,
12 * unlawful or unlicensed copies of an Apple operating system, or to
13 * circumvent, violate, or enable the circumvention or violation of, any
14 * terms of an Apple operating system software license agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 *
19 * The Original Code and all software distributed under the License are
20 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
21 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
22 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
24 * Please see the License for the specific language governing rights and
25 * limitations under the License.
26 *
27 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 */
29
30 #include <mach/mach_types.h>
31 #include <mach/thread_act.h>
32
33 #include <kern/kern_types.h>
34 #include <kern/kalloc.h>
35 #include <kern/sched_prim.h>
36 #include <kern/clock.h>
37 #include <kern/task.h>
38 #include <kern/thread.h>
39 #include <kern/wait_queue.h>
40
41 #include <vm/vm_pageout.h>
42
43 #include <kern/thread_call.h>
44 #include <kern/call_entry.h>
45
46 #include <kern/timer_call.h>
47
48 #include <sys/kdebug.h>
49
50 #define internal_call_num 768
51
52 #define thread_call_thread_min 4
53
54 static
55 thread_call_data_t
56 internal_call_storage[internal_call_num];
57
58 decl_simple_lock_data(static,thread_call_lock)
59
60 static
61 timer_call_data_t
62 thread_call_delaytimer;
63
64 static
65 queue_head_t
66 thread_call_xxx_queue,
67 thread_call_pending_queue, thread_call_delayed_queue;
68
69 static
70 struct wait_queue
71 call_thread_waitqueue;
72
73 static
74 boolean_t
75 activate_thread_awake;
76
77 static struct {
78 int pending_num,
79 pending_hiwat;
80 int active_num,
81 active_hiwat,
82 active_lowat;
83 int delayed_num,
84 delayed_hiwat;
85 int idle_thread_num;
86 int thread_num,
87 thread_hiwat,
88 thread_lowat;
89 } thread_call_vars;
90
91 static __inline__ thread_call_t
92 _internal_call_allocate(void);
93
94 static __inline__ void
95 _internal_call_release(
96 thread_call_t call
97 );
98
99 static __inline__ void
100 _pending_call_enqueue(
101 thread_call_t call
102 ),
103 _pending_call_dequeue(
104 thread_call_t call
105 ),
106 _delayed_call_enqueue(
107 thread_call_t call
108 ),
109 _delayed_call_dequeue(
110 thread_call_t call
111 );
112
113 static __inline__ void
114 _set_delayed_call_timer(
115 thread_call_t call
116 );
117
118 static boolean_t
119 _remove_from_pending_queue(
120 thread_call_func_t func,
121 thread_call_param_t param0,
122 boolean_t remove_all
123 ),
124 _remove_from_delayed_queue(
125 thread_call_func_t func,
126 thread_call_param_t param0,
127 boolean_t remove_all
128 );
129
130 static __inline__ void
131 _call_thread_wake(void);
132
133 static void
134 _call_thread(void),
135 _activate_thread(void);
136
137 static void
138 _delayed_call_timer(
139 timer_call_param_t p0,
140 timer_call_param_t p1
141 );
142
143 #define qe(x) ((queue_entry_t)(x))
144 #define TC(x) ((thread_call_t)(x))
145
146 /*
147 * Routine: thread_call_initialize [public]
148 *
149 * Description: Initialize this module, called
150 * early during system initialization.
151 *
152 * Preconditions: None.
153 *
154 * Postconditions: None.
155 */
156
157 void
158 thread_call_initialize(void)
159 {
160 kern_return_t result;
161 thread_t thread;
162 thread_call_t call;
163 spl_t s;
164
165 simple_lock_init(&thread_call_lock, 0);
166
167 s = splsched();
168 simple_lock(&thread_call_lock);
169
170 queue_init(&thread_call_pending_queue);
171 queue_init(&thread_call_delayed_queue);
172
173 queue_init(&thread_call_xxx_queue);
174 for (
175 call = internal_call_storage;
176 call < &internal_call_storage[internal_call_num];
177 call++) {
178
179 enqueue_tail(&thread_call_xxx_queue, qe(call));
180 }
181
182 timer_call_setup(&thread_call_delaytimer, _delayed_call_timer, NULL);
183
184 wait_queue_init(&call_thread_waitqueue, SYNC_POLICY_FIFO);
185 thread_call_vars.thread_lowat = thread_call_thread_min;
186
187 activate_thread_awake = TRUE;
188
189 simple_unlock(&thread_call_lock);
190 splx(s);
191
192 result = kernel_thread_start_priority((thread_continue_t)_activate_thread, NULL, MAXPRI_KERNEL - 2, &thread);
193 if (result != KERN_SUCCESS)
194 panic("thread_call_initialize");
195
196 thread_deallocate(thread);
197 }
198
199 void
200 thread_call_setup(
201 thread_call_t call,
202 thread_call_func_t func,
203 thread_call_param_t param0
204 )
205 {
206 call_entry_setup(call, func, param0);
207 }
208
209 /*
210 * Routine: _internal_call_allocate [private, inline]
211 *
212 * Purpose: Allocate an internal callout entry.
213 *
214 * Preconditions: thread_call_lock held.
215 *
216 * Postconditions: None.
217 */
218
219 static __inline__ thread_call_t
220 _internal_call_allocate(void)
221 {
222 thread_call_t call;
223
224 if (queue_empty(&thread_call_xxx_queue))
225 panic("_internal_call_allocate");
226
227 call = TC(dequeue_head(&thread_call_xxx_queue));
228
229 return (call);
230 }
231
232 /*
233 * Routine: _internal_call_release [private, inline]
234 *
235 * Purpose: Release an internal callout entry which
236 * is no longer pending (or delayed).
237 *
238 * Preconditions: thread_call_lock held.
239 *
240 * Postconditions: None.
241 */
242
243 static __inline__
244 void
245 _internal_call_release(
246 thread_call_t call
247 )
248 {
249 if ( call >= internal_call_storage &&
250 call < &internal_call_storage[internal_call_num] )
251 enqueue_head(&thread_call_xxx_queue, qe(call));
252 }
253
254 /*
255 * Routine: _pending_call_enqueue [private, inline]
256 *
257 * Purpose: Place an entry at the end of the
258 * pending queue, to be executed soon.
259 *
260 * Preconditions: thread_call_lock held.
261 *
262 * Postconditions: None.
263 */
264
265 static __inline__
266 void
267 _pending_call_enqueue(
268 thread_call_t call
269 )
270 {
271 enqueue_tail(&thread_call_pending_queue, qe(call));
272 if (++thread_call_vars.pending_num > thread_call_vars.pending_hiwat)
273 thread_call_vars.pending_hiwat = thread_call_vars.pending_num;
274
275 call->state = PENDING;
276 }
277
278 /*
279 * Routine: _pending_call_dequeue [private, inline]
280 *
281 * Purpose: Remove an entry from the pending queue,
282 * effectively unscheduling it.
283 *
284 * Preconditions: thread_call_lock held.
285 *
286 * Postconditions: None.
287 */
288
289 static __inline__
290 void
291 _pending_call_dequeue(
292 thread_call_t call
293 )
294 {
295 (void)remque(qe(call));
296 thread_call_vars.pending_num--;
297
298 call->state = IDLE;
299 }
300
301 /*
302 * Routine: _delayed_call_enqueue [private, inline]
303 *
304 * Purpose: Place an entry on the delayed queue,
305 * after existing entries with an earlier
306 * (or identical) deadline.
307 *
308 * Preconditions: thread_call_lock held.
309 *
310 * Postconditions: None.
311 */
312
313 static __inline__
314 void
315 _delayed_call_enqueue(
316 thread_call_t call
317 )
318 {
319 thread_call_t current;
320
321 current = TC(queue_first(&thread_call_delayed_queue));
322
323 while (TRUE) {
324 if ( queue_end(&thread_call_delayed_queue, qe(current)) ||
325 call->deadline < current->deadline ) {
326 current = TC(queue_prev(qe(current)));
327 break;
328 }
329
330 current = TC(queue_next(qe(current)));
331 }
332
333 insque(qe(call), qe(current));
334 if (++thread_call_vars.delayed_num > thread_call_vars.delayed_hiwat)
335 thread_call_vars.delayed_hiwat = thread_call_vars.delayed_num;
336
337 call->state = DELAYED;
338 }
339
340 /*
341 * Routine: _delayed_call_dequeue [private, inline]
342 *
343 * Purpose: Remove an entry from the delayed queue,
344 * effectively unscheduling it.
345 *
346 * Preconditions: thread_call_lock held.
347 *
348 * Postconditions: None.
349 */
350
351 static __inline__
352 void
353 _delayed_call_dequeue(
354 thread_call_t call
355 )
356 {
357 (void)remque(qe(call));
358 thread_call_vars.delayed_num--;
359
360 call->state = IDLE;
361 }
362
363 /*
364 * Routine: _set_delayed_call_timer [private]
365 *
366 * Purpose: Reset the timer so that it
367 * next expires when the entry is due.
368 *
369 * Preconditions: thread_call_lock held.
370 *
371 * Postconditions: None.
372 */
373
374 static __inline__ void
375 _set_delayed_call_timer(
376 thread_call_t call
377 )
378 {
379 timer_call_enter(&thread_call_delaytimer, call->deadline);
380 }
381
382 /*
383 * Routine: _remove_from_pending_queue [private]
384 *
385 * Purpose: Remove the first (or all) matching
386 * entries from the pending queue,
387 * effectively unscheduling them.
388 * Returns whether any matching entries
389 * were found.
390 *
391 * Preconditions: thread_call_lock held.
392 *
393 * Postconditions: None.
394 */
395
396 static
397 boolean_t
398 _remove_from_pending_queue(
399 thread_call_func_t func,
400 thread_call_param_t param0,
401 boolean_t remove_all
402 )
403 {
404 boolean_t call_removed = FALSE;
405 thread_call_t call;
406
407 call = TC(queue_first(&thread_call_pending_queue));
408
409 while (!queue_end(&thread_call_pending_queue, qe(call))) {
410 if ( call->func == func &&
411 call->param0 == param0 ) {
412 thread_call_t next = TC(queue_next(qe(call)));
413
414 _pending_call_dequeue(call);
415
416 _internal_call_release(call);
417
418 call_removed = TRUE;
419 if (!remove_all)
420 break;
421
422 call = next;
423 }
424 else
425 call = TC(queue_next(qe(call)));
426 }
427
428 return (call_removed);
429 }
430
431 /*
432 * Routine: _remove_from_delayed_queue [private]
433 *
434 * Purpose: Remove the first (or all) matching
435 * entries from the delayed queue,
436 * effectively unscheduling them.
437 * Returns whether any matching entries
438 * were found.
439 *
440 * Preconditions: thread_call_lock held.
441 *
442 * Postconditions: None.
443 */
444
445 static
446 boolean_t
447 _remove_from_delayed_queue(
448 thread_call_func_t func,
449 thread_call_param_t param0,
450 boolean_t remove_all
451 )
452 {
453 boolean_t call_removed = FALSE;
454 thread_call_t call;
455
456 call = TC(queue_first(&thread_call_delayed_queue));
457
458 while (!queue_end(&thread_call_delayed_queue, qe(call))) {
459 if ( call->func == func &&
460 call->param0 == param0 ) {
461 thread_call_t next = TC(queue_next(qe(call)));
462
463 _delayed_call_dequeue(call);
464
465 _internal_call_release(call);
466
467 call_removed = TRUE;
468 if (!remove_all)
469 break;
470
471 call = next;
472 }
473 else
474 call = TC(queue_next(qe(call)));
475 }
476
477 return (call_removed);
478 }
479
480 /*
481 * Routine: thread_call_func [public]
482 *
483 * Purpose: Schedule a function callout.
484 * Guarantees { function, argument }
485 * uniqueness if unique_call is TRUE.
486 *
487 * Preconditions: Callable from an interrupt context
488 * below splsched.
489 *
490 * Postconditions: None.
491 */
492
493 void
494 thread_call_func(
495 thread_call_func_t func,
496 thread_call_param_t param,
497 boolean_t unique_call
498 )
499 {
500 thread_call_t call;
501 spl_t s;
502
503 s = splsched();
504 simple_lock(&thread_call_lock);
505
506 call = TC(queue_first(&thread_call_pending_queue));
507
508 while (unique_call && !queue_end(&thread_call_pending_queue, qe(call))) {
509 if ( call->func == func &&
510 call->param0 == param ) {
511 break;
512 }
513
514 call = TC(queue_next(qe(call)));
515 }
516
517 if (!unique_call || queue_end(&thread_call_pending_queue, qe(call))) {
518 call = _internal_call_allocate();
519 call->func = func;
520 call->param0 = param;
521 call->param1 = 0;
522
523 _pending_call_enqueue(call);
524
525 if (thread_call_vars.active_num <= 0)
526 _call_thread_wake();
527 }
528
529 simple_unlock(&thread_call_lock);
530 splx(s);
531 }
532
533 /*
534 * Routine: thread_call_func_delayed [public]
535 *
536 * Purpose: Schedule a function callout to
537 * occur at the stated time.
538 *
539 * Preconditions: Callable from an interrupt context
540 * below splsched.
541 *
542 * Postconditions: None.
543 */
544
545 void
546 thread_call_func_delayed(
547 thread_call_func_t func,
548 thread_call_param_t param,
549 uint64_t deadline
550 )
551 {
552 thread_call_t call;
553 spl_t s;
554
555 s = splsched();
556 simple_lock(&thread_call_lock);
557
558 call = _internal_call_allocate();
559 call->func = func;
560 call->param0 = param;
561 call->param1 = 0;
562 call->deadline = deadline;
563
564 _delayed_call_enqueue(call);
565
566 if (queue_first(&thread_call_delayed_queue) == qe(call))
567 _set_delayed_call_timer(call);
568
569 simple_unlock(&thread_call_lock);
570 splx(s);
571 }
572
573 /*
574 * Routine: thread_call_func_cancel [public]
575 *
576 * Purpose: Unschedule a function callout.
577 * Removes one (or all)
578 * { function, argument }
579 * instance(s) from either (or both)
580 * the pending and the delayed queue,
581 * in that order. Returns a boolean
582 * indicating whether any calls were
583 * cancelled.
584 *
585 * Preconditions: Callable from an interrupt context
586 * below splsched.
587 *
588 * Postconditions: None.
589 */
590
591 boolean_t
592 thread_call_func_cancel(
593 thread_call_func_t func,
594 thread_call_param_t param,
595 boolean_t cancel_all
596 )
597 {
598 boolean_t result;
599 spl_t s;
600
601 s = splsched();
602 simple_lock(&thread_call_lock);
603
604 if (cancel_all)
605 result = _remove_from_pending_queue(func, param, cancel_all) |
606 _remove_from_delayed_queue(func, param, cancel_all);
607 else
608 result = _remove_from_pending_queue(func, param, cancel_all) ||
609 _remove_from_delayed_queue(func, param, cancel_all);
610
611 simple_unlock(&thread_call_lock);
612 splx(s);
613
614 return (result);
615 }
616
617 /*
618 * Routine: thread_call_allocate [public]
619 *
620 * Purpose: Allocate an external callout
621 * entry.
622 *
623 * Preconditions: None.
624 *
625 * Postconditions: None.
626 */
627
628 thread_call_t
629 thread_call_allocate(
630 thread_call_func_t func,
631 thread_call_param_t param0
632 )
633 {
634 thread_call_t call = (void *)kalloc(sizeof (thread_call_data_t));
635
636 call->func = func;
637 call->param0 = param0;
638 call->state = IDLE;
639
640 return (call);
641 }
642
643 /*
644 * Routine: thread_call_free [public]
645 *
646 * Purpose: Free an external callout
647 * entry.
648 *
649 * Preconditions: None.
650 *
651 * Postconditions: None.
652 */
653
654 boolean_t
655 thread_call_free(
656 thread_call_t call
657 )
658 {
659 spl_t s;
660
661 s = splsched();
662 simple_lock(&thread_call_lock);
663
664 if (call->state != IDLE) {
665 simple_unlock(&thread_call_lock);
666 splx(s);
667
668 return (FALSE);
669 }
670
671 simple_unlock(&thread_call_lock);
672 splx(s);
673
674 kfree(call, sizeof (thread_call_data_t));
675
676 return (TRUE);
677 }
678
679 /*
680 * Routine: thread_call_enter [public]
681 *
682 * Purpose: Schedule an external callout
683 * entry to occur "soon". Returns a
684 * boolean indicating whether the call
685 * had been already scheduled.
686 *
687 * Preconditions: Callable from an interrupt context
688 * below splsched.
689 *
690 * Postconditions: None.
691 */
692
693 boolean_t
694 thread_call_enter(
695 thread_call_t call
696 )
697 {
698 boolean_t result = TRUE;
699 spl_t s;
700
701 s = splsched();
702 simple_lock(&thread_call_lock);
703
704 if (call->state != PENDING) {
705 if (call->state == DELAYED)
706 _delayed_call_dequeue(call);
707 else if (call->state == IDLE)
708 result = FALSE;
709
710 _pending_call_enqueue(call);
711
712 if (thread_call_vars.active_num <= 0)
713 _call_thread_wake();
714 }
715
716 call->param1 = 0;
717
718 simple_unlock(&thread_call_lock);
719 splx(s);
720
721 return (result);
722 }
723
724 boolean_t
725 thread_call_enter1(
726 thread_call_t call,
727 thread_call_param_t param1
728 )
729 {
730 boolean_t result = TRUE;
731 spl_t s;
732
733 s = splsched();
734 simple_lock(&thread_call_lock);
735
736 if (call->state != PENDING) {
737 if (call->state == DELAYED)
738 _delayed_call_dequeue(call);
739 else if (call->state == IDLE)
740 result = FALSE;
741
742 _pending_call_enqueue(call);
743
744 if (thread_call_vars.active_num <= 0)
745 _call_thread_wake();
746 }
747
748 call->param1 = param1;
749
750 simple_unlock(&thread_call_lock);
751 splx(s);
752
753 return (result);
754 }
755
756 /*
757 * Routine: thread_call_enter_delayed [public]
758 *
759 * Purpose: Schedule an external callout
760 * entry to occur at the stated time.
761 * Returns a boolean indicating whether
762 * the call had been already scheduled.
763 *
764 * Preconditions: Callable from an interrupt context
765 * below splsched.
766 *
767 * Postconditions: None.
768 */
769
770 boolean_t
771 thread_call_enter_delayed(
772 thread_call_t call,
773 uint64_t deadline
774 )
775 {
776 boolean_t result = TRUE;
777 spl_t s;
778
779 s = splsched();
780 simple_lock(&thread_call_lock);
781
782 if (call->state == PENDING)
783 _pending_call_dequeue(call);
784 else if (call->state == DELAYED)
785 _delayed_call_dequeue(call);
786 else if (call->state == IDLE)
787 result = FALSE;
788
789 call->param1 = 0;
790 call->deadline = deadline;
791
792 _delayed_call_enqueue(call);
793
794 if (queue_first(&thread_call_delayed_queue) == qe(call))
795 _set_delayed_call_timer(call);
796
797 simple_unlock(&thread_call_lock);
798 splx(s);
799
800 return (result);
801 }
802
803 boolean_t
804 thread_call_enter1_delayed(
805 thread_call_t call,
806 thread_call_param_t param1,
807 uint64_t deadline
808 )
809 {
810 boolean_t result = TRUE;
811 spl_t s;
812
813 s = splsched();
814 simple_lock(&thread_call_lock);
815
816 if (call->state == PENDING)
817 _pending_call_dequeue(call);
818 else if (call->state == DELAYED)
819 _delayed_call_dequeue(call);
820 else if (call->state == IDLE)
821 result = FALSE;
822
823 call->param1 = param1;
824 call->deadline = deadline;
825
826 _delayed_call_enqueue(call);
827
828 if (queue_first(&thread_call_delayed_queue) == qe(call))
829 _set_delayed_call_timer(call);
830
831 simple_unlock(&thread_call_lock);
832 splx(s);
833
834 return (result);
835 }
836
837 /*
838 * Routine: thread_call_cancel [public]
839 *
840 * Purpose: Unschedule a callout entry.
841 * Returns a boolean indicating
842 * whether the call had actually
843 * been scheduled.
844 *
845 * Preconditions: Callable from an interrupt context
846 * below splsched.
847 *
848 * Postconditions: None.
849 */
850
851 boolean_t
852 thread_call_cancel(
853 thread_call_t call
854 )
855 {
856 boolean_t result = TRUE;
857 spl_t s;
858
859 s = splsched();
860 simple_lock(&thread_call_lock);
861
862 if (call->state == PENDING)
863 _pending_call_dequeue(call);
864 else if (call->state == DELAYED)
865 _delayed_call_dequeue(call);
866 else
867 result = FALSE;
868
869 simple_unlock(&thread_call_lock);
870 splx(s);
871
872 return (result);
873 }
874
875 /*
876 * Routine: thread_call_is_delayed [public]
877 *
878 * Purpose: Returns a boolean indicating
879 * whether a call is currently scheduled
880 * to occur at a later time. Optionally
881 * returns the expiration time.
882 *
883 * Preconditions: Callable from an interrupt context
884 * below splsched.
885 *
886 * Postconditions: None.
887 */
888
889 boolean_t
890 thread_call_is_delayed(
891 thread_call_t call,
892 uint64_t *deadline)
893 {
894 boolean_t result = FALSE;
895 spl_t s;
896
897 s = splsched();
898 simple_lock(&thread_call_lock);
899
900 if (call->state == DELAYED) {
901 if (deadline != NULL)
902 *deadline = call->deadline;
903 result = TRUE;
904 }
905
906 simple_unlock(&thread_call_lock);
907 splx(s);
908
909 return (result);
910 }
911
912 /*
913 * Routine: _call_thread_wake [private, inline]
914 *
915 * Purpose: Wake a callout thread to service
916 * pending callout entries. May wake
917 * the activate thread in order to
918 * create additional callout threads.
919 *
920 * Preconditions: thread_call_lock held.
921 *
922 * Postconditions: None.
923 */
924
925 static __inline__
926 void
927 _call_thread_wake(void)
928 {
929 if (wait_queue_wakeup_one(&call_thread_waitqueue, NULL, THREAD_AWAKENED) == KERN_SUCCESS) {
930 thread_call_vars.idle_thread_num--;
931
932 if (++thread_call_vars.active_num > thread_call_vars.active_hiwat)
933 thread_call_vars.active_hiwat = thread_call_vars.active_num;
934 }
935 else
936 if (!activate_thread_awake) {
937 thread_wakeup_one(&activate_thread_awake);
938 activate_thread_awake = TRUE;
939 }
940 }
941
942 /*
943 * Routine: call_thread_block [private]
944 *
945 * Purpose: Hook via thread dispatch on
946 * the occasion of a callout blocking.
947 *
948 * Preconditions: splsched.
949 *
950 * Postconditions: None.
951 */
952
953 void
954 call_thread_block(void)
955 {
956 simple_lock(&thread_call_lock);
957
958 if (--thread_call_vars.active_num < thread_call_vars.active_lowat)
959 thread_call_vars.active_lowat = thread_call_vars.active_num;
960
961 if ( thread_call_vars.active_num <= 0 &&
962 thread_call_vars.pending_num > 0 )
963 _call_thread_wake();
964
965 simple_unlock(&thread_call_lock);
966 }
967
968 /*
969 * Routine: call_thread_unblock [private]
970 *
971 * Purpose: Hook via thread wakeup on
972 * the occasion of a callout unblocking.
973 *
974 * Preconditions: splsched.
975 *
976 * Postconditions: None.
977 */
978
979 void
980 call_thread_unblock(void)
981 {
982 simple_lock(&thread_call_lock);
983
984 if (++thread_call_vars.active_num > thread_call_vars.active_hiwat)
985 thread_call_vars.active_hiwat = thread_call_vars.active_num;
986
987 simple_unlock(&thread_call_lock);
988 }
989
990 /*
991 * Routine: _call_thread [private]
992 *
993 * Purpose: Executed by a callout thread.
994 *
995 * Preconditions: None.
996 *
997 * Postconditions: None.
998 */
999
1000 static
1001 void
1002 _call_thread_continue(void)
1003 {
1004 thread_t self = current_thread();
1005
1006 (void) splsched();
1007 simple_lock(&thread_call_lock);
1008
1009 self->options |= TH_OPT_CALLOUT;
1010
1011 while (thread_call_vars.pending_num > 0) {
1012 thread_call_t call;
1013 thread_call_func_t func;
1014 thread_call_param_t param0, param1;
1015
1016 call = TC(dequeue_head(&thread_call_pending_queue));
1017 thread_call_vars.pending_num--;
1018
1019 func = call->func;
1020 param0 = call->param0;
1021 param1 = call->param1;
1022
1023 call->state = IDLE;
1024
1025 _internal_call_release(call);
1026
1027 simple_unlock(&thread_call_lock);
1028 (void) spllo();
1029
1030 KERNEL_DEBUG_CONSTANT(
1031 MACHDBG_CODE(DBG_MACH_SCHED,MACH_CALLOUT) | DBG_FUNC_NONE,
1032 (int)func, (int)param0, (int)param1, 0, 0);
1033
1034 (*func)(param0, param1);
1035
1036 (void)thread_funnel_set(self->funnel_lock, FALSE);
1037
1038 (void) splsched();
1039 simple_lock(&thread_call_lock);
1040 }
1041
1042 self->options &= ~TH_OPT_CALLOUT;
1043
1044 if (--thread_call_vars.active_num < thread_call_vars.active_lowat)
1045 thread_call_vars.active_lowat = thread_call_vars.active_num;
1046
1047 if (thread_call_vars.idle_thread_num < thread_call_vars.thread_lowat) {
1048 thread_call_vars.idle_thread_num++;
1049
1050 wait_queue_assert_wait(&call_thread_waitqueue, NULL, THREAD_UNINT, 0);
1051
1052 simple_unlock(&thread_call_lock);
1053 (void) spllo();
1054
1055 thread_block((thread_continue_t)_call_thread_continue);
1056 /* NOTREACHED */
1057 }
1058
1059 thread_call_vars.thread_num--;
1060
1061 simple_unlock(&thread_call_lock);
1062 (void) spllo();
1063
1064 thread_terminate(self);
1065 /* NOTREACHED */
1066 }
1067
1068 static
1069 void
1070 _call_thread(void)
1071 {
1072 _call_thread_continue();
1073 /* NOTREACHED */
1074 }
1075
1076 /*
1077 * Routine: _activate_thread [private]
1078 *
1079 * Purpose: Executed by the activate thread.
1080 *
1081 * Preconditions: None.
1082 *
1083 * Postconditions: Never terminates.
1084 */
1085
1086 static
1087 void
1088 _activate_thread_continue(void)
1089 {
1090 kern_return_t result;
1091 thread_t thread;
1092
1093 (void) splsched();
1094 simple_lock(&thread_call_lock);
1095
1096 while ( thread_call_vars.active_num <= 0 &&
1097 thread_call_vars.pending_num > 0 ) {
1098
1099 if (++thread_call_vars.active_num > thread_call_vars.active_hiwat)
1100 thread_call_vars.active_hiwat = thread_call_vars.active_num;
1101
1102 if (++thread_call_vars.thread_num > thread_call_vars.thread_hiwat)
1103 thread_call_vars.thread_hiwat = thread_call_vars.thread_num;
1104
1105 simple_unlock(&thread_call_lock);
1106 (void) spllo();
1107
1108 result = kernel_thread_start_priority((thread_continue_t)_call_thread, NULL, MAXPRI_KERNEL - 1, &thread);
1109 if (result != KERN_SUCCESS)
1110 panic("activate_thread");
1111
1112 thread_deallocate(thread);
1113
1114 (void) splsched();
1115 simple_lock(&thread_call_lock);
1116 }
1117
1118 assert_wait(&activate_thread_awake, THREAD_INTERRUPTIBLE);
1119 activate_thread_awake = FALSE;
1120
1121 simple_unlock(&thread_call_lock);
1122 (void) spllo();
1123
1124 thread_block((thread_continue_t)_activate_thread_continue);
1125 /* NOTREACHED */
1126 }
1127
1128 static
1129 void
1130 _activate_thread(void)
1131 {
1132 thread_t self = current_thread();
1133
1134 self->options |= TH_OPT_VMPRIV;
1135 vm_page_free_reserve(2); /* XXX */
1136
1137 _activate_thread_continue();
1138 /* NOTREACHED */
1139 }
1140
1141 static
1142 void
1143 _delayed_call_timer(
1144 __unused timer_call_param_t p0,
1145 __unused timer_call_param_t p1
1146 )
1147 {
1148 uint64_t timestamp;
1149 thread_call_t call;
1150 boolean_t new_pending = FALSE;
1151 spl_t s;
1152
1153 s = splsched();
1154 simple_lock(&thread_call_lock);
1155
1156 clock_get_uptime(&timestamp);
1157
1158 call = TC(queue_first(&thread_call_delayed_queue));
1159
1160 while (!queue_end(&thread_call_delayed_queue, qe(call))) {
1161 if (call->deadline <= timestamp) {
1162 _delayed_call_dequeue(call);
1163
1164 _pending_call_enqueue(call);
1165 new_pending = TRUE;
1166 }
1167 else
1168 break;
1169
1170 call = TC(queue_first(&thread_call_delayed_queue));
1171 }
1172
1173 if (!queue_end(&thread_call_delayed_queue, qe(call)))
1174 _set_delayed_call_timer(call);
1175
1176 if (new_pending && thread_call_vars.active_num <= 0)
1177 _call_thread_wake();
1178
1179 simple_unlock(&thread_call_lock);
1180 splx(s);
1181 }