]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/thread_call.c
xnu-792.25.20.tar.gz
[apple/xnu.git] / osfmk / kern / thread_call.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 1993-1995, 1999-2005 Apple Computer, Inc.
1c79356b
A
3 * All rights reserved.
4 *
6601e61a 5 * @APPLE_LICENSE_HEADER_START@
1c79356b 6 *
6601e61a
A
7 * The contents of this file constitute Original Code as defined in and
8 * are subject to the Apple Public Source License Version 1.1 (the
9 * "License"). You may not use this file except in compliance with the
10 * License. Please obtain a copy of the License at
11 * http://www.apple.com/publicsource and read it before using this file.
8f6c56a5 12 *
6601e61a
A
13 * This Original Code and all software distributed under the License are
14 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
6601e61a
A
17 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
18 * License for the specific language governing rights and limitations
19 * under the License.
8f6c56a5 20 *
6601e61a 21 * @APPLE_LICENSE_HEADER_END@
1c79356b 22 */
1c79356b
A
23
24#include <mach/mach_types.h>
91447636 25#include <mach/thread_act.h>
1c79356b 26
91447636
A
27#include <kern/kern_types.h>
28#include <kern/kalloc.h>
1c79356b
A
29#include <kern/sched_prim.h>
30#include <kern/clock.h>
31#include <kern/task.h>
32#include <kern/thread.h>
91447636
A
33#include <kern/wait_queue.h>
34
35#include <vm/vm_pageout.h>
1c79356b
A
36
37#include <kern/thread_call.h>
38#include <kern/call_entry.h>
39
40#include <kern/timer_call.h>
41
55e303ae
A
42#include <sys/kdebug.h>
43
1c79356b
A
44#define internal_call_num 768
45
46#define thread_call_thread_min 4
47
48static
49thread_call_data_t
50 internal_call_storage[internal_call_num];
51
52decl_simple_lock_data(static,thread_call_lock)
53
54static
55timer_call_data_t
55e303ae 56 thread_call_delaytimer;
1c79356b
A
57
58static
59queue_head_t
55e303ae
A
60 thread_call_xxx_queue,
61 thread_call_pending_queue, thread_call_delayed_queue;
1c79356b
A
62
63static
9bccf70c 64struct wait_queue
55e303ae 65 call_thread_waitqueue;
1c79356b
A
66
67static
68boolean_t
69 activate_thread_awake;
70
71static struct {
72 int pending_num,
73 pending_hiwat;
74 int active_num,
9bccf70c
A
75 active_hiwat,
76 active_lowat;
1c79356b
A
77 int delayed_num,
78 delayed_hiwat;
79 int idle_thread_num;
80 int thread_num,
81 thread_hiwat,
82 thread_lowat;
55e303ae 83} thread_call_vars;
1c79356b
A
84
85static __inline__ thread_call_t
86 _internal_call_allocate(void);
87
88static __inline__ void
89_internal_call_release(
90 thread_call_t call
91);
92
93static __inline__ void
94_pending_call_enqueue(
95 thread_call_t call
96),
97_pending_call_dequeue(
98 thread_call_t call
99),
100_delayed_call_enqueue(
101 thread_call_t call
102),
103_delayed_call_dequeue(
104 thread_call_t call
105);
106
91447636 107static __inline__ void
1c79356b
A
108_set_delayed_call_timer(
109 thread_call_t call
110);
111
112static boolean_t
113_remove_from_pending_queue(
114 thread_call_func_t func,
115 thread_call_param_t param0,
116 boolean_t remove_all
117),
118_remove_from_delayed_queue(
119 thread_call_func_t func,
120 thread_call_param_t param0,
121 boolean_t remove_all
122);
123
124static __inline__ void
125 _call_thread_wake(void);
126
127static void
128 _call_thread(void),
129 _activate_thread(void);
130
131static void
132_delayed_call_timer(
133 timer_call_param_t p0,
134 timer_call_param_t p1
135);
136
137#define qe(x) ((queue_entry_t)(x))
138#define TC(x) ((thread_call_t)(x))
139
140/*
141 * Routine: thread_call_initialize [public]
142 *
143 * Description: Initialize this module, called
144 * early during system initialization.
145 *
146 * Preconditions: None.
147 *
148 * Postconditions: None.
149 */
150
151void
152thread_call_initialize(void)
153{
91447636
A
154 kern_return_t result;
155 thread_t thread;
156 thread_call_t call;
157 spl_t s;
1c79356b 158
91447636 159 simple_lock_init(&thread_call_lock, 0);
1c79356b
A
160
161 s = splsched();
162 simple_lock(&thread_call_lock);
163
55e303ae
A
164 queue_init(&thread_call_pending_queue);
165 queue_init(&thread_call_delayed_queue);
1c79356b 166
55e303ae 167 queue_init(&thread_call_xxx_queue);
1c79356b
A
168 for (
169 call = internal_call_storage;
170 call < &internal_call_storage[internal_call_num];
171 call++) {
172
55e303ae 173 enqueue_tail(&thread_call_xxx_queue, qe(call));
1c79356b
A
174 }
175
55e303ae 176 timer_call_setup(&thread_call_delaytimer, _delayed_call_timer, NULL);
1c79356b 177
55e303ae
A
178 wait_queue_init(&call_thread_waitqueue, SYNC_POLICY_FIFO);
179 thread_call_vars.thread_lowat = thread_call_thread_min;
1c79356b
A
180
181 activate_thread_awake = TRUE;
1c79356b
A
182
183 simple_unlock(&thread_call_lock);
184 splx(s);
185
91447636
A
186 result = kernel_thread_start_priority((thread_continue_t)_activate_thread, NULL, MAXPRI_KERNEL - 2, &thread);
187 if (result != KERN_SUCCESS)
188 panic("thread_call_initialize");
189
190 thread_deallocate(thread);
1c79356b
A
191}
192
193void
194thread_call_setup(
195 thread_call_t call,
196 thread_call_func_t func,
197 thread_call_param_t param0
198)
199{
200 call_entry_setup(call, func, param0);
201}
202
203/*
204 * Routine: _internal_call_allocate [private, inline]
205 *
206 * Purpose: Allocate an internal callout entry.
207 *
208 * Preconditions: thread_call_lock held.
209 *
210 * Postconditions: None.
211 */
212
213static __inline__ thread_call_t
214_internal_call_allocate(void)
215{
216 thread_call_t call;
217
55e303ae 218 if (queue_empty(&thread_call_xxx_queue))
1c79356b
A
219 panic("_internal_call_allocate");
220
55e303ae 221 call = TC(dequeue_head(&thread_call_xxx_queue));
1c79356b
A
222
223 return (call);
224}
225
226/*
227 * Routine: _internal_call_release [private, inline]
228 *
229 * Purpose: Release an internal callout entry which
230 * is no longer pending (or delayed).
231 *
232 * Preconditions: thread_call_lock held.
233 *
234 * Postconditions: None.
235 */
236
237static __inline__
238void
239_internal_call_release(
240 thread_call_t call
241)
242{
243 if ( call >= internal_call_storage &&
244 call < &internal_call_storage[internal_call_num] )
55e303ae 245 enqueue_head(&thread_call_xxx_queue, qe(call));
1c79356b
A
246}
247
248/*
249 * Routine: _pending_call_enqueue [private, inline]
250 *
251 * Purpose: Place an entry at the end of the
252 * pending queue, to be executed soon.
253 *
254 * Preconditions: thread_call_lock held.
255 *
256 * Postconditions: None.
257 */
258
259static __inline__
260void
261_pending_call_enqueue(
262 thread_call_t call
263)
264{
55e303ae
A
265 enqueue_tail(&thread_call_pending_queue, qe(call));
266 if (++thread_call_vars.pending_num > thread_call_vars.pending_hiwat)
267 thread_call_vars.pending_hiwat = thread_call_vars.pending_num;
1c79356b
A
268
269 call->state = PENDING;
270}
271
272/*
273 * Routine: _pending_call_dequeue [private, inline]
274 *
275 * Purpose: Remove an entry from the pending queue,
276 * effectively unscheduling it.
277 *
278 * Preconditions: thread_call_lock held.
279 *
280 * Postconditions: None.
281 */
282
283static __inline__
284void
285_pending_call_dequeue(
286 thread_call_t call
287)
288{
289 (void)remque(qe(call));
55e303ae 290 thread_call_vars.pending_num--;
1c79356b
A
291
292 call->state = IDLE;
293}
294
295/*
296 * Routine: _delayed_call_enqueue [private, inline]
297 *
298 * Purpose: Place an entry on the delayed queue,
299 * after existing entries with an earlier
300 * (or identical) deadline.
301 *
302 * Preconditions: thread_call_lock held.
303 *
304 * Postconditions: None.
305 */
306
307static __inline__
308void
309_delayed_call_enqueue(
310 thread_call_t call
311)
312{
313 thread_call_t current;
314
55e303ae 315 current = TC(queue_first(&thread_call_delayed_queue));
1c79356b
A
316
317 while (TRUE) {
55e303ae 318 if ( queue_end(&thread_call_delayed_queue, qe(current)) ||
0b4e3aa0 319 call->deadline < current->deadline ) {
1c79356b
A
320 current = TC(queue_prev(qe(current)));
321 break;
322 }
323
324 current = TC(queue_next(qe(current)));
325 }
326
327 insque(qe(call), qe(current));
55e303ae
A
328 if (++thread_call_vars.delayed_num > thread_call_vars.delayed_hiwat)
329 thread_call_vars.delayed_hiwat = thread_call_vars.delayed_num;
1c79356b
A
330
331 call->state = DELAYED;
332}
333
334/*
335 * Routine: _delayed_call_dequeue [private, inline]
336 *
337 * Purpose: Remove an entry from the delayed queue,
338 * effectively unscheduling it.
339 *
340 * Preconditions: thread_call_lock held.
341 *
342 * Postconditions: None.
343 */
344
345static __inline__
346void
347_delayed_call_dequeue(
348 thread_call_t call
349)
350{
351 (void)remque(qe(call));
55e303ae 352 thread_call_vars.delayed_num--;
1c79356b
A
353
354 call->state = IDLE;
355}
356
357/*
358 * Routine: _set_delayed_call_timer [private]
359 *
360 * Purpose: Reset the timer so that it
361 * next expires when the entry is due.
362 *
363 * Preconditions: thread_call_lock held.
364 *
365 * Postconditions: None.
366 */
367
368static __inline__ void
369_set_delayed_call_timer(
370 thread_call_t call
371)
372{
55e303ae 373 timer_call_enter(&thread_call_delaytimer, call->deadline);
1c79356b
A
374}
375
376/*
377 * Routine: _remove_from_pending_queue [private]
378 *
379 * Purpose: Remove the first (or all) matching
380 * entries from the pending queue,
381 * effectively unscheduling them.
382 * Returns whether any matching entries
383 * were found.
384 *
385 * Preconditions: thread_call_lock held.
386 *
387 * Postconditions: None.
388 */
389
390static
391boolean_t
392_remove_from_pending_queue(
393 thread_call_func_t func,
394 thread_call_param_t param0,
395 boolean_t remove_all
396)
397{
398 boolean_t call_removed = FALSE;
399 thread_call_t call;
400
55e303ae 401 call = TC(queue_first(&thread_call_pending_queue));
1c79356b 402
55e303ae 403 while (!queue_end(&thread_call_pending_queue, qe(call))) {
1c79356b
A
404 if ( call->func == func &&
405 call->param0 == param0 ) {
406 thread_call_t next = TC(queue_next(qe(call)));
407
408 _pending_call_dequeue(call);
409
410 _internal_call_release(call);
411
412 call_removed = TRUE;
413 if (!remove_all)
414 break;
415
416 call = next;
417 }
418 else
419 call = TC(queue_next(qe(call)));
420 }
421
422 return (call_removed);
423}
424
425/*
426 * Routine: _remove_from_delayed_queue [private]
427 *
428 * Purpose: Remove the first (or all) matching
429 * entries from the delayed queue,
430 * effectively unscheduling them.
431 * Returns whether any matching entries
432 * were found.
433 *
434 * Preconditions: thread_call_lock held.
435 *
436 * Postconditions: None.
437 */
438
439static
440boolean_t
441_remove_from_delayed_queue(
442 thread_call_func_t func,
443 thread_call_param_t param0,
444 boolean_t remove_all
445)
446{
447 boolean_t call_removed = FALSE;
448 thread_call_t call;
449
55e303ae 450 call = TC(queue_first(&thread_call_delayed_queue));
1c79356b 451
55e303ae 452 while (!queue_end(&thread_call_delayed_queue, qe(call))) {
1c79356b
A
453 if ( call->func == func &&
454 call->param0 == param0 ) {
455 thread_call_t next = TC(queue_next(qe(call)));
456
457 _delayed_call_dequeue(call);
458
459 _internal_call_release(call);
460
461 call_removed = TRUE;
462 if (!remove_all)
463 break;
464
465 call = next;
466 }
467 else
468 call = TC(queue_next(qe(call)));
469 }
470
471 return (call_removed);
472}
473
474/*
475 * Routine: thread_call_func [public]
476 *
477 * Purpose: Schedule a function callout.
478 * Guarantees { function, argument }
479 * uniqueness if unique_call is TRUE.
480 *
481 * Preconditions: Callable from an interrupt context
482 * below splsched.
483 *
484 * Postconditions: None.
485 */
486
487void
488thread_call_func(
489 thread_call_func_t func,
490 thread_call_param_t param,
491 boolean_t unique_call
492)
493{
494 thread_call_t call;
55e303ae 495 spl_t s;
1c79356b 496
1c79356b
A
497 s = splsched();
498 simple_lock(&thread_call_lock);
499
55e303ae 500 call = TC(queue_first(&thread_call_pending_queue));
1c79356b 501
55e303ae 502 while (unique_call && !queue_end(&thread_call_pending_queue, qe(call))) {
1c79356b
A
503 if ( call->func == func &&
504 call->param0 == param ) {
505 break;
506 }
507
508 call = TC(queue_next(qe(call)));
509 }
510
55e303ae 511 if (!unique_call || queue_end(&thread_call_pending_queue, qe(call))) {
1c79356b
A
512 call = _internal_call_allocate();
513 call->func = func;
514 call->param0 = param;
515 call->param1 = 0;
516
517 _pending_call_enqueue(call);
518
55e303ae 519 if (thread_call_vars.active_num <= 0)
9bccf70c 520 _call_thread_wake();
1c79356b
A
521 }
522
523 simple_unlock(&thread_call_lock);
524 splx(s);
525}
526
527/*
528 * Routine: thread_call_func_delayed [public]
529 *
530 * Purpose: Schedule a function callout to
531 * occur at the stated time.
532 *
533 * Preconditions: Callable from an interrupt context
534 * below splsched.
535 *
536 * Postconditions: None.
537 */
538
539void
540thread_call_func_delayed(
541 thread_call_func_t func,
542 thread_call_param_t param,
0b4e3aa0 543 uint64_t deadline
1c79356b
A
544)
545{
546 thread_call_t call;
55e303ae 547 spl_t s;
1c79356b 548
1c79356b
A
549 s = splsched();
550 simple_lock(&thread_call_lock);
551
552 call = _internal_call_allocate();
553 call->func = func;
554 call->param0 = param;
555 call->param1 = 0;
556 call->deadline = deadline;
557
558 _delayed_call_enqueue(call);
559
55e303ae 560 if (queue_first(&thread_call_delayed_queue) == qe(call))
1c79356b
A
561 _set_delayed_call_timer(call);
562
563 simple_unlock(&thread_call_lock);
564 splx(s);
565}
566
567/*
568 * Routine: thread_call_func_cancel [public]
569 *
570 * Purpose: Unschedule a function callout.
571 * Removes one (or all)
572 * { function, argument }
573 * instance(s) from either (or both)
574 * the pending and the delayed queue,
575 * in that order. Returns a boolean
576 * indicating whether any calls were
577 * cancelled.
578 *
579 * Preconditions: Callable from an interrupt context
580 * below splsched.
581 *
582 * Postconditions: None.
583 */
584
585boolean_t
586thread_call_func_cancel(
587 thread_call_func_t func,
588 thread_call_param_t param,
589 boolean_t cancel_all
590)
591{
592 boolean_t result;
55e303ae 593 spl_t s;
1c79356b
A
594
595 s = splsched();
596 simple_lock(&thread_call_lock);
597
598 if (cancel_all)
599 result = _remove_from_pending_queue(func, param, cancel_all) |
600 _remove_from_delayed_queue(func, param, cancel_all);
601 else
602 result = _remove_from_pending_queue(func, param, cancel_all) ||
603 _remove_from_delayed_queue(func, param, cancel_all);
604
605 simple_unlock(&thread_call_lock);
606 splx(s);
607
608 return (result);
609}
610
611/*
612 * Routine: thread_call_allocate [public]
613 *
614 * Purpose: Allocate an external callout
615 * entry.
616 *
617 * Preconditions: None.
618 *
619 * Postconditions: None.
620 */
621
622thread_call_t
623thread_call_allocate(
624 thread_call_func_t func,
625 thread_call_param_t param0
626)
627{
628 thread_call_t call = (void *)kalloc(sizeof (thread_call_data_t));
629
630 call->func = func;
631 call->param0 = param0;
632 call->state = IDLE;
633
634 return (call);
635}
636
637/*
638 * Routine: thread_call_free [public]
639 *
640 * Purpose: Free an external callout
641 * entry.
642 *
643 * Preconditions: None.
644 *
645 * Postconditions: None.
646 */
647
648boolean_t
649thread_call_free(
650 thread_call_t call
651)
652{
55e303ae 653 spl_t s;
1c79356b
A
654
655 s = splsched();
656 simple_lock(&thread_call_lock);
657
658 if (call->state != IDLE) {
659 simple_unlock(&thread_call_lock);
660 splx(s);
661
662 return (FALSE);
663 }
664
665 simple_unlock(&thread_call_lock);
666 splx(s);
667
91447636 668 kfree(call, sizeof (thread_call_data_t));
1c79356b
A
669
670 return (TRUE);
671}
672
673/*
674 * Routine: thread_call_enter [public]
675 *
676 * Purpose: Schedule an external callout
677 * entry to occur "soon". Returns a
678 * boolean indicating whether the call
679 * had been already scheduled.
680 *
681 * Preconditions: Callable from an interrupt context
682 * below splsched.
683 *
684 * Postconditions: None.
685 */
686
687boolean_t
688thread_call_enter(
689 thread_call_t call
690)
691{
692 boolean_t result = TRUE;
55e303ae 693 spl_t s;
1c79356b
A
694
695 s = splsched();
696 simple_lock(&thread_call_lock);
697
698 if (call->state != PENDING) {
699 if (call->state == DELAYED)
700 _delayed_call_dequeue(call);
701 else if (call->state == IDLE)
702 result = FALSE;
703
704 _pending_call_enqueue(call);
9bccf70c 705
55e303ae 706 if (thread_call_vars.active_num <= 0)
9bccf70c 707 _call_thread_wake();
1c79356b
A
708 }
709
710 call->param1 = 0;
711
712 simple_unlock(&thread_call_lock);
713 splx(s);
714
715 return (result);
716}
717
718boolean_t
719thread_call_enter1(
720 thread_call_t call,
721 thread_call_param_t param1
722)
723{
724 boolean_t result = TRUE;
55e303ae 725 spl_t s;
1c79356b
A
726
727 s = splsched();
728 simple_lock(&thread_call_lock);
729
730 if (call->state != PENDING) {
731 if (call->state == DELAYED)
732 _delayed_call_dequeue(call);
733 else if (call->state == IDLE)
734 result = FALSE;
735
736 _pending_call_enqueue(call);
737
55e303ae 738 if (thread_call_vars.active_num <= 0)
9bccf70c 739 _call_thread_wake();
1c79356b
A
740 }
741
742 call->param1 = param1;
743
744 simple_unlock(&thread_call_lock);
745 splx(s);
746
747 return (result);
748}
749
750/*
751 * Routine: thread_call_enter_delayed [public]
752 *
753 * Purpose: Schedule an external callout
754 * entry to occur at the stated time.
755 * Returns a boolean indicating whether
756 * the call had been already scheduled.
757 *
758 * Preconditions: Callable from an interrupt context
759 * below splsched.
760 *
761 * Postconditions: None.
762 */
763
764boolean_t
765thread_call_enter_delayed(
766 thread_call_t call,
0b4e3aa0 767 uint64_t deadline
1c79356b
A
768)
769{
770 boolean_t result = TRUE;
55e303ae 771 spl_t s;
1c79356b
A
772
773 s = splsched();
774 simple_lock(&thread_call_lock);
775
776 if (call->state == PENDING)
777 _pending_call_dequeue(call);
778 else if (call->state == DELAYED)
779 _delayed_call_dequeue(call);
780 else if (call->state == IDLE)
781 result = FALSE;
782
783 call->param1 = 0;
784 call->deadline = deadline;
785
786 _delayed_call_enqueue(call);
787
55e303ae 788 if (queue_first(&thread_call_delayed_queue) == qe(call))
1c79356b
A
789 _set_delayed_call_timer(call);
790
791 simple_unlock(&thread_call_lock);
792 splx(s);
793
794 return (result);
795}
796
797boolean_t
798thread_call_enter1_delayed(
799 thread_call_t call,
800 thread_call_param_t param1,
0b4e3aa0 801 uint64_t deadline
1c79356b
A
802)
803{
804 boolean_t result = TRUE;
55e303ae 805 spl_t s;
1c79356b
A
806
807 s = splsched();
808 simple_lock(&thread_call_lock);
809
810 if (call->state == PENDING)
811 _pending_call_dequeue(call);
812 else if (call->state == DELAYED)
813 _delayed_call_dequeue(call);
814 else if (call->state == IDLE)
815 result = FALSE;
816
817 call->param1 = param1;
818 call->deadline = deadline;
819
820 _delayed_call_enqueue(call);
821
55e303ae 822 if (queue_first(&thread_call_delayed_queue) == qe(call))
1c79356b
A
823 _set_delayed_call_timer(call);
824
825 simple_unlock(&thread_call_lock);
826 splx(s);
827
828 return (result);
829}
830
831/*
832 * Routine: thread_call_cancel [public]
833 *
834 * Purpose: Unschedule a callout entry.
835 * Returns a boolean indicating
836 * whether the call had actually
837 * been scheduled.
838 *
839 * Preconditions: Callable from an interrupt context
840 * below splsched.
841 *
842 * Postconditions: None.
843 */
844
845boolean_t
846thread_call_cancel(
847 thread_call_t call
848)
849{
850 boolean_t result = TRUE;
55e303ae 851 spl_t s;
1c79356b
A
852
853 s = splsched();
854 simple_lock(&thread_call_lock);
855
856 if (call->state == PENDING)
857 _pending_call_dequeue(call);
858 else if (call->state == DELAYED)
859 _delayed_call_dequeue(call);
860 else
861 result = FALSE;
862
863 simple_unlock(&thread_call_lock);
864 splx(s);
865
866 return (result);
867}
868
869/*
870 * Routine: thread_call_is_delayed [public]
871 *
872 * Purpose: Returns a boolean indicating
873 * whether a call is currently scheduled
874 * to occur at a later time. Optionally
875 * returns the expiration time.
876 *
877 * Preconditions: Callable from an interrupt context
878 * below splsched.
879 *
880 * Postconditions: None.
881 */
882
883boolean_t
884thread_call_is_delayed(
885 thread_call_t call,
0b4e3aa0 886 uint64_t *deadline)
1c79356b
A
887{
888 boolean_t result = FALSE;
55e303ae 889 spl_t s;
1c79356b
A
890
891 s = splsched();
892 simple_lock(&thread_call_lock);
893
894 if (call->state == DELAYED) {
895 if (deadline != NULL)
896 *deadline = call->deadline;
897 result = TRUE;
898 }
899
900 simple_unlock(&thread_call_lock);
901 splx(s);
902
903 return (result);
904}
905
906/*
9bccf70c 907 * Routine: _call_thread_wake [private, inline]
1c79356b
A
908 *
909 * Purpose: Wake a callout thread to service
9bccf70c
A
910 * pending callout entries. May wake
911 * the activate thread in order to
1c79356b
A
912 * create additional callout threads.
913 *
914 * Preconditions: thread_call_lock held.
915 *
916 * Postconditions: None.
917 */
918
919static __inline__
920void
921_call_thread_wake(void)
922{
91447636 923 if (wait_queue_wakeup_one(&call_thread_waitqueue, NULL, THREAD_AWAKENED) == KERN_SUCCESS) {
55e303ae 924 thread_call_vars.idle_thread_num--;
9bccf70c 925
55e303ae
A
926 if (++thread_call_vars.active_num > thread_call_vars.active_hiwat)
927 thread_call_vars.active_hiwat = thread_call_vars.active_num;
1c79356b
A
928 }
929 else
9bccf70c 930 if (!activate_thread_awake) {
55e303ae 931 thread_wakeup_one(&activate_thread_awake);
1c79356b
A
932 activate_thread_awake = TRUE;
933 }
934}
935
9bccf70c
A
936/*
937 * Routine: call_thread_block [private]
938 *
939 * Purpose: Hook via thread dispatch on
940 * the occasion of a callout blocking.
941 *
942 * Preconditions: splsched.
943 *
944 * Postconditions: None.
945 */
946
947void
948call_thread_block(void)
949{
950 simple_lock(&thread_call_lock);
951
55e303ae
A
952 if (--thread_call_vars.active_num < thread_call_vars.active_lowat)
953 thread_call_vars.active_lowat = thread_call_vars.active_num;
9bccf70c 954
55e303ae
A
955 if ( thread_call_vars.active_num <= 0 &&
956 thread_call_vars.pending_num > 0 )
9bccf70c
A
957 _call_thread_wake();
958
959 simple_unlock(&thread_call_lock);
960}
961
962/*
963 * Routine: call_thread_unblock [private]
964 *
965 * Purpose: Hook via thread wakeup on
966 * the occasion of a callout unblocking.
967 *
968 * Preconditions: splsched.
969 *
970 * Postconditions: None.
971 */
972
973void
974call_thread_unblock(void)
975{
976 simple_lock(&thread_call_lock);
977
55e303ae
A
978 if (++thread_call_vars.active_num > thread_call_vars.active_hiwat)
979 thread_call_vars.active_hiwat = thread_call_vars.active_num;
9bccf70c
A
980
981 simple_unlock(&thread_call_lock);
982}
1c79356b
A
983
984/*
985 * Routine: _call_thread [private]
986 *
987 * Purpose: Executed by a callout thread.
988 *
989 * Preconditions: None.
990 *
991 * Postconditions: None.
992 */
993
994static
995void
996_call_thread_continue(void)
997{
998 thread_t self = current_thread();
999
1c79356b
A
1000 (void) splsched();
1001 simple_lock(&thread_call_lock);
1002
91447636 1003 self->options |= TH_OPT_CALLOUT;
9bccf70c 1004
55e303ae 1005 while (thread_call_vars.pending_num > 0) {
1c79356b
A
1006 thread_call_t call;
1007 thread_call_func_t func;
1008 thread_call_param_t param0, param1;
1009
55e303ae
A
1010 call = TC(dequeue_head(&thread_call_pending_queue));
1011 thread_call_vars.pending_num--;
1c79356b
A
1012
1013 func = call->func;
1014 param0 = call->param0;
1015 param1 = call->param1;
1016
1017 call->state = IDLE;
1018
1019 _internal_call_release(call);
1020
1c79356b
A
1021 simple_unlock(&thread_call_lock);
1022 (void) spllo();
1023
55e303ae
A
1024 KERNEL_DEBUG_CONSTANT(
1025 MACHDBG_CODE(DBG_MACH_SCHED,MACH_CALLOUT) | DBG_FUNC_NONE,
1026 (int)func, (int)param0, (int)param1, 0, 0);
1027
1c79356b
A
1028 (*func)(param0, param1);
1029
1030 (void)thread_funnel_set(self->funnel_lock, FALSE);
1031
1032 (void) splsched();
1033 simple_lock(&thread_call_lock);
1c79356b 1034 }
9bccf70c 1035
91447636 1036 self->options &= ~TH_OPT_CALLOUT;
9bccf70c 1037
55e303ae
A
1038 if (--thread_call_vars.active_num < thread_call_vars.active_lowat)
1039 thread_call_vars.active_lowat = thread_call_vars.active_num;
1c79356b 1040
55e303ae
A
1041 if (thread_call_vars.idle_thread_num < thread_call_vars.thread_lowat) {
1042 thread_call_vars.idle_thread_num++;
1c79356b 1043
91447636 1044 wait_queue_assert_wait(&call_thread_waitqueue, NULL, THREAD_UNINT, 0);
1c79356b
A
1045
1046 simple_unlock(&thread_call_lock);
1047 (void) spllo();
1048
91447636 1049 thread_block((thread_continue_t)_call_thread_continue);
1c79356b
A
1050 /* NOTREACHED */
1051 }
1052
55e303ae 1053 thread_call_vars.thread_num--;
1c79356b
A
1054
1055 simple_unlock(&thread_call_lock);
1056 (void) spllo();
1057
91447636 1058 thread_terminate(self);
1c79356b
A
1059 /* NOTREACHED */
1060}
1061
1062static
1063void
1064_call_thread(void)
1065{
1c79356b
A
1066 _call_thread_continue();
1067 /* NOTREACHED */
1068}
1069
1070/*
1071 * Routine: _activate_thread [private]
1072 *
1073 * Purpose: Executed by the activate thread.
1074 *
1075 * Preconditions: None.
1076 *
1077 * Postconditions: Never terminates.
1078 */
1079
1080static
1081void
1082_activate_thread_continue(void)
1083{
91447636
A
1084 kern_return_t result;
1085 thread_t thread;
1086
1c79356b
A
1087 (void) splsched();
1088 simple_lock(&thread_call_lock);
1089
55e303ae
A
1090 while ( thread_call_vars.active_num <= 0 &&
1091 thread_call_vars.pending_num > 0 ) {
9bccf70c 1092
55e303ae
A
1093 if (++thread_call_vars.active_num > thread_call_vars.active_hiwat)
1094 thread_call_vars.active_hiwat = thread_call_vars.active_num;
1c79356b 1095
55e303ae
A
1096 if (++thread_call_vars.thread_num > thread_call_vars.thread_hiwat)
1097 thread_call_vars.thread_hiwat = thread_call_vars.thread_num;
1c79356b
A
1098
1099 simple_unlock(&thread_call_lock);
1100 (void) spllo();
1101
91447636
A
1102 result = kernel_thread_start_priority((thread_continue_t)_call_thread, NULL, MAXPRI_KERNEL - 1, &thread);
1103 if (result != KERN_SUCCESS)
1104 panic("activate_thread");
1105
1106 thread_deallocate(thread);
55e303ae 1107
9bccf70c
A
1108 (void) splsched();
1109 simple_lock(&thread_call_lock);
1c79356b 1110 }
1c79356b
A
1111
1112 assert_wait(&activate_thread_awake, THREAD_INTERRUPTIBLE);
1113 activate_thread_awake = FALSE;
1114
1115 simple_unlock(&thread_call_lock);
1116 (void) spllo();
1117
91447636 1118 thread_block((thread_continue_t)_activate_thread_continue);
1c79356b
A
1119 /* NOTREACHED */
1120}
1121
1122static
1123void
1124_activate_thread(void)
1125{
55e303ae 1126 thread_t self = current_thread();
1c79356b 1127
91447636 1128 self->options |= TH_OPT_VMPRIV;
1c79356b 1129 vm_page_free_reserve(2); /* XXX */
1c79356b
A
1130
1131 _activate_thread_continue();
1132 /* NOTREACHED */
1133}
1134
1135static
1136void
1137_delayed_call_timer(
91447636
A
1138 __unused timer_call_param_t p0,
1139 __unused timer_call_param_t p1
1c79356b
A
1140)
1141{
0b4e3aa0 1142 uint64_t timestamp;
1c79356b
A
1143 thread_call_t call;
1144 boolean_t new_pending = FALSE;
55e303ae 1145 spl_t s;
1c79356b
A
1146
1147 s = splsched();
1148 simple_lock(&thread_call_lock);
1149
1150 clock_get_uptime(&timestamp);
1151
55e303ae 1152 call = TC(queue_first(&thread_call_delayed_queue));
1c79356b 1153
55e303ae 1154 while (!queue_end(&thread_call_delayed_queue, qe(call))) {
0b4e3aa0 1155 if (call->deadline <= timestamp) {
1c79356b
A
1156 _delayed_call_dequeue(call);
1157
1158 _pending_call_enqueue(call);
1159 new_pending = TRUE;
1160 }
1161 else
1162 break;
1163
55e303ae 1164 call = TC(queue_first(&thread_call_delayed_queue));
1c79356b
A
1165 }
1166
55e303ae 1167 if (!queue_end(&thread_call_delayed_queue, qe(call)))
1c79356b
A
1168 _set_delayed_call_timer(call);
1169
55e303ae 1170 if (new_pending && thread_call_vars.active_num <= 0)
1c79356b
A
1171 _call_thread_wake();
1172
1173 simple_unlock(&thread_call_lock);
1174 splx(s);
1175}