]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/thread_call.c
xnu-1228.9.59.tar.gz
[apple/xnu.git] / osfmk / kern / thread_call.c
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 1993-1995, 1999-2007 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b 27 */
1c79356b
A
28
29#include <mach/mach_types.h>
91447636 30#include <mach/thread_act.h>
1c79356b 31
91447636
A
32#include <kern/kern_types.h>
33#include <kern/kalloc.h>
1c79356b
A
34#include <kern/sched_prim.h>
35#include <kern/clock.h>
36#include <kern/task.h>
37#include <kern/thread.h>
91447636
A
38#include <kern/wait_queue.h>
39
40#include <vm/vm_pageout.h>
1c79356b
A
41
42#include <kern/thread_call.h>
43#include <kern/call_entry.h>
44
45#include <kern/timer_call.h>
46
55e303ae
A
47#include <sys/kdebug.h>
48
1c79356b
A
49#define internal_call_num 768
50
51#define thread_call_thread_min 4
52
53static
54thread_call_data_t
55 internal_call_storage[internal_call_num];
56
57decl_simple_lock_data(static,thread_call_lock)
58
59static
60timer_call_data_t
55e303ae 61 thread_call_delaytimer;
1c79356b
A
62
63static
64queue_head_t
55e303ae
A
65 thread_call_xxx_queue,
66 thread_call_pending_queue, thread_call_delayed_queue;
1c79356b
A
67
68static
9bccf70c 69struct wait_queue
55e303ae 70 call_thread_waitqueue;
1c79356b
A
71
72static
73boolean_t
74 activate_thread_awake;
75
76static struct {
77 int pending_num,
78 pending_hiwat;
79 int active_num,
9bccf70c
A
80 active_hiwat,
81 active_lowat;
1c79356b
A
82 int delayed_num,
83 delayed_hiwat;
84 int idle_thread_num;
85 int thread_num,
86 thread_hiwat,
87 thread_lowat;
55e303ae 88} thread_call_vars;
1c79356b
A
89
90static __inline__ thread_call_t
91 _internal_call_allocate(void);
92
93static __inline__ void
94_internal_call_release(
95 thread_call_t call
96);
97
98static __inline__ void
99_pending_call_enqueue(
100 thread_call_t call
101),
102_pending_call_dequeue(
103 thread_call_t call
104),
105_delayed_call_enqueue(
106 thread_call_t call
107),
108_delayed_call_dequeue(
109 thread_call_t call
110);
111
91447636 112static __inline__ void
1c79356b
A
113_set_delayed_call_timer(
114 thread_call_t call
115);
116
117static boolean_t
118_remove_from_pending_queue(
119 thread_call_func_t func,
120 thread_call_param_t param0,
121 boolean_t remove_all
122),
123_remove_from_delayed_queue(
124 thread_call_func_t func,
125 thread_call_param_t param0,
126 boolean_t remove_all
127);
128
2d21ac55 129static inline void
1c79356b
A
130 _call_thread_wake(void);
131
132static void
133 _call_thread(void),
134 _activate_thread(void);
135
136static void
137_delayed_call_timer(
138 timer_call_param_t p0,
139 timer_call_param_t p1
140);
141
142#define qe(x) ((queue_entry_t)(x))
143#define TC(x) ((thread_call_t)(x))
144
145/*
146 * Routine: thread_call_initialize [public]
147 *
148 * Description: Initialize this module, called
149 * early during system initialization.
150 *
151 * Preconditions: None.
152 *
153 * Postconditions: None.
154 */
155
156void
157thread_call_initialize(void)
158{
91447636
A
159 kern_return_t result;
160 thread_t thread;
161 thread_call_t call;
162 spl_t s;
1c79356b 163
91447636 164 simple_lock_init(&thread_call_lock, 0);
1c79356b
A
165
166 s = splsched();
167 simple_lock(&thread_call_lock);
168
55e303ae
A
169 queue_init(&thread_call_pending_queue);
170 queue_init(&thread_call_delayed_queue);
1c79356b 171
55e303ae 172 queue_init(&thread_call_xxx_queue);
1c79356b
A
173 for (
174 call = internal_call_storage;
175 call < &internal_call_storage[internal_call_num];
176 call++) {
177
55e303ae 178 enqueue_tail(&thread_call_xxx_queue, qe(call));
1c79356b
A
179 }
180
55e303ae 181 timer_call_setup(&thread_call_delaytimer, _delayed_call_timer, NULL);
1c79356b 182
55e303ae
A
183 wait_queue_init(&call_thread_waitqueue, SYNC_POLICY_FIFO);
184 thread_call_vars.thread_lowat = thread_call_thread_min;
1c79356b
A
185
186 activate_thread_awake = TRUE;
1c79356b
A
187
188 simple_unlock(&thread_call_lock);
189 splx(s);
190
91447636
A
191 result = kernel_thread_start_priority((thread_continue_t)_activate_thread, NULL, MAXPRI_KERNEL - 2, &thread);
192 if (result != KERN_SUCCESS)
193 panic("thread_call_initialize");
194
195 thread_deallocate(thread);
1c79356b
A
196}
197
198void
199thread_call_setup(
200 thread_call_t call,
201 thread_call_func_t func,
202 thread_call_param_t param0
203)
204{
205 call_entry_setup(call, func, param0);
206}
207
208/*
209 * Routine: _internal_call_allocate [private, inline]
210 *
211 * Purpose: Allocate an internal callout entry.
212 *
213 * Preconditions: thread_call_lock held.
214 *
215 * Postconditions: None.
216 */
217
218static __inline__ thread_call_t
219_internal_call_allocate(void)
220{
221 thread_call_t call;
222
55e303ae 223 if (queue_empty(&thread_call_xxx_queue))
1c79356b
A
224 panic("_internal_call_allocate");
225
55e303ae 226 call = TC(dequeue_head(&thread_call_xxx_queue));
1c79356b
A
227
228 return (call);
229}
230
231/*
232 * Routine: _internal_call_release [private, inline]
233 *
234 * Purpose: Release an internal callout entry which
235 * is no longer pending (or delayed).
236 *
237 * Preconditions: thread_call_lock held.
238 *
239 * Postconditions: None.
240 */
241
242static __inline__
243void
244_internal_call_release(
245 thread_call_t call
246)
247{
248 if ( call >= internal_call_storage &&
249 call < &internal_call_storage[internal_call_num] )
55e303ae 250 enqueue_head(&thread_call_xxx_queue, qe(call));
1c79356b
A
251}
252
253/*
254 * Routine: _pending_call_enqueue [private, inline]
255 *
256 * Purpose: Place an entry at the end of the
257 * pending queue, to be executed soon.
258 *
259 * Preconditions: thread_call_lock held.
260 *
261 * Postconditions: None.
262 */
263
264static __inline__
265void
266_pending_call_enqueue(
267 thread_call_t call
268)
269{
55e303ae
A
270 enqueue_tail(&thread_call_pending_queue, qe(call));
271 if (++thread_call_vars.pending_num > thread_call_vars.pending_hiwat)
272 thread_call_vars.pending_hiwat = thread_call_vars.pending_num;
1c79356b
A
273
274 call->state = PENDING;
275}
276
277/*
278 * Routine: _pending_call_dequeue [private, inline]
279 *
280 * Purpose: Remove an entry from the pending queue,
281 * effectively unscheduling it.
282 *
283 * Preconditions: thread_call_lock held.
284 *
285 * Postconditions: None.
286 */
287
288static __inline__
289void
290_pending_call_dequeue(
291 thread_call_t call
292)
293{
294 (void)remque(qe(call));
55e303ae 295 thread_call_vars.pending_num--;
1c79356b
A
296
297 call->state = IDLE;
298}
299
300/*
301 * Routine: _delayed_call_enqueue [private, inline]
302 *
303 * Purpose: Place an entry on the delayed queue,
304 * after existing entries with an earlier
305 * (or identical) deadline.
306 *
307 * Preconditions: thread_call_lock held.
308 *
309 * Postconditions: None.
310 */
311
312static __inline__
313void
314_delayed_call_enqueue(
315 thread_call_t call
316)
317{
318 thread_call_t current;
319
55e303ae 320 current = TC(queue_first(&thread_call_delayed_queue));
1c79356b
A
321
322 while (TRUE) {
55e303ae 323 if ( queue_end(&thread_call_delayed_queue, qe(current)) ||
0b4e3aa0 324 call->deadline < current->deadline ) {
1c79356b
A
325 current = TC(queue_prev(qe(current)));
326 break;
327 }
328
329 current = TC(queue_next(qe(current)));
330 }
331
332 insque(qe(call), qe(current));
55e303ae
A
333 if (++thread_call_vars.delayed_num > thread_call_vars.delayed_hiwat)
334 thread_call_vars.delayed_hiwat = thread_call_vars.delayed_num;
1c79356b
A
335
336 call->state = DELAYED;
337}
338
339/*
340 * Routine: _delayed_call_dequeue [private, inline]
341 *
342 * Purpose: Remove an entry from the delayed queue,
343 * effectively unscheduling it.
344 *
345 * Preconditions: thread_call_lock held.
346 *
347 * Postconditions: None.
348 */
349
350static __inline__
351void
352_delayed_call_dequeue(
353 thread_call_t call
354)
355{
356 (void)remque(qe(call));
55e303ae 357 thread_call_vars.delayed_num--;
1c79356b
A
358
359 call->state = IDLE;
360}
361
362/*
363 * Routine: _set_delayed_call_timer [private]
364 *
365 * Purpose: Reset the timer so that it
366 * next expires when the entry is due.
367 *
368 * Preconditions: thread_call_lock held.
369 *
370 * Postconditions: None.
371 */
372
373static __inline__ void
374_set_delayed_call_timer(
375 thread_call_t call
376)
377{
55e303ae 378 timer_call_enter(&thread_call_delaytimer, call->deadline);
1c79356b
A
379}
380
381/*
382 * Routine: _remove_from_pending_queue [private]
383 *
384 * Purpose: Remove the first (or all) matching
385 * entries from the pending queue,
386 * effectively unscheduling them.
387 * Returns whether any matching entries
388 * were found.
389 *
390 * Preconditions: thread_call_lock held.
391 *
392 * Postconditions: None.
393 */
394
395static
396boolean_t
397_remove_from_pending_queue(
398 thread_call_func_t func,
399 thread_call_param_t param0,
400 boolean_t remove_all
401)
402{
403 boolean_t call_removed = FALSE;
404 thread_call_t call;
405
55e303ae 406 call = TC(queue_first(&thread_call_pending_queue));
1c79356b 407
55e303ae 408 while (!queue_end(&thread_call_pending_queue, qe(call))) {
1c79356b
A
409 if ( call->func == func &&
410 call->param0 == param0 ) {
411 thread_call_t next = TC(queue_next(qe(call)));
412
413 _pending_call_dequeue(call);
414
415 _internal_call_release(call);
416
417 call_removed = TRUE;
418 if (!remove_all)
419 break;
420
421 call = next;
422 }
423 else
424 call = TC(queue_next(qe(call)));
425 }
426
427 return (call_removed);
428}
429
430/*
431 * Routine: _remove_from_delayed_queue [private]
432 *
433 * Purpose: Remove the first (or all) matching
434 * entries from the delayed queue,
435 * effectively unscheduling them.
436 * Returns whether any matching entries
437 * were found.
438 *
439 * Preconditions: thread_call_lock held.
440 *
441 * Postconditions: None.
442 */
443
444static
445boolean_t
446_remove_from_delayed_queue(
447 thread_call_func_t func,
448 thread_call_param_t param0,
449 boolean_t remove_all
450)
451{
452 boolean_t call_removed = FALSE;
453 thread_call_t call;
454
55e303ae 455 call = TC(queue_first(&thread_call_delayed_queue));
1c79356b 456
55e303ae 457 while (!queue_end(&thread_call_delayed_queue, qe(call))) {
1c79356b
A
458 if ( call->func == func &&
459 call->param0 == param0 ) {
460 thread_call_t next = TC(queue_next(qe(call)));
461
462 _delayed_call_dequeue(call);
463
464 _internal_call_release(call);
465
466 call_removed = TRUE;
467 if (!remove_all)
468 break;
469
470 call = next;
471 }
472 else
473 call = TC(queue_next(qe(call)));
474 }
475
476 return (call_removed);
477}
478
479/*
480 * Routine: thread_call_func [public]
481 *
482 * Purpose: Schedule a function callout.
483 * Guarantees { function, argument }
484 * uniqueness if unique_call is TRUE.
485 *
486 * Preconditions: Callable from an interrupt context
487 * below splsched.
488 *
489 * Postconditions: None.
490 */
491
492void
493thread_call_func(
494 thread_call_func_t func,
495 thread_call_param_t param,
496 boolean_t unique_call
497)
498{
499 thread_call_t call;
55e303ae 500 spl_t s;
1c79356b 501
1c79356b
A
502 s = splsched();
503 simple_lock(&thread_call_lock);
504
55e303ae 505 call = TC(queue_first(&thread_call_pending_queue));
1c79356b 506
55e303ae 507 while (unique_call && !queue_end(&thread_call_pending_queue, qe(call))) {
1c79356b
A
508 if ( call->func == func &&
509 call->param0 == param ) {
510 break;
511 }
512
513 call = TC(queue_next(qe(call)));
514 }
515
55e303ae 516 if (!unique_call || queue_end(&thread_call_pending_queue, qe(call))) {
1c79356b
A
517 call = _internal_call_allocate();
518 call->func = func;
519 call->param0 = param;
2d21ac55 520 call->param1 = NULL;
1c79356b
A
521
522 _pending_call_enqueue(call);
523
55e303ae 524 if (thread_call_vars.active_num <= 0)
9bccf70c 525 _call_thread_wake();
1c79356b
A
526 }
527
528 simple_unlock(&thread_call_lock);
529 splx(s);
530}
531
532/*
533 * Routine: thread_call_func_delayed [public]
534 *
535 * Purpose: Schedule a function callout to
536 * occur at the stated time.
537 *
538 * Preconditions: Callable from an interrupt context
539 * below splsched.
540 *
541 * Postconditions: None.
542 */
543
544void
545thread_call_func_delayed(
546 thread_call_func_t func,
547 thread_call_param_t param,
0b4e3aa0 548 uint64_t deadline
1c79356b
A
549)
550{
551 thread_call_t call;
55e303ae 552 spl_t s;
1c79356b 553
1c79356b
A
554 s = splsched();
555 simple_lock(&thread_call_lock);
556
557 call = _internal_call_allocate();
558 call->func = func;
559 call->param0 = param;
560 call->param1 = 0;
561 call->deadline = deadline;
562
563 _delayed_call_enqueue(call);
564
55e303ae 565 if (queue_first(&thread_call_delayed_queue) == qe(call))
1c79356b
A
566 _set_delayed_call_timer(call);
567
568 simple_unlock(&thread_call_lock);
569 splx(s);
570}
571
572/*
573 * Routine: thread_call_func_cancel [public]
574 *
575 * Purpose: Unschedule a function callout.
576 * Removes one (or all)
577 * { function, argument }
578 * instance(s) from either (or both)
579 * the pending and the delayed queue,
580 * in that order. Returns a boolean
581 * indicating whether any calls were
582 * cancelled.
583 *
584 * Preconditions: Callable from an interrupt context
585 * below splsched.
586 *
587 * Postconditions: None.
588 */
589
590boolean_t
591thread_call_func_cancel(
592 thread_call_func_t func,
593 thread_call_param_t param,
594 boolean_t cancel_all
595)
596{
597 boolean_t result;
55e303ae 598 spl_t s;
1c79356b
A
599
600 s = splsched();
601 simple_lock(&thread_call_lock);
602
603 if (cancel_all)
604 result = _remove_from_pending_queue(func, param, cancel_all) |
605 _remove_from_delayed_queue(func, param, cancel_all);
606 else
607 result = _remove_from_pending_queue(func, param, cancel_all) ||
608 _remove_from_delayed_queue(func, param, cancel_all);
609
610 simple_unlock(&thread_call_lock);
611 splx(s);
612
613 return (result);
614}
615
616/*
617 * Routine: thread_call_allocate [public]
618 *
619 * Purpose: Allocate an external callout
620 * entry.
621 *
622 * Preconditions: None.
623 *
624 * Postconditions: None.
625 */
626
627thread_call_t
628thread_call_allocate(
629 thread_call_func_t func,
630 thread_call_param_t param0
631)
632{
633 thread_call_t call = (void *)kalloc(sizeof (thread_call_data_t));
634
635 call->func = func;
636 call->param0 = param0;
637 call->state = IDLE;
638
639 return (call);
640}
641
642/*
643 * Routine: thread_call_free [public]
644 *
645 * Purpose: Free an external callout
646 * entry.
647 *
648 * Preconditions: None.
649 *
650 * Postconditions: None.
651 */
652
653boolean_t
654thread_call_free(
655 thread_call_t call
656)
657{
55e303ae 658 spl_t s;
1c79356b
A
659
660 s = splsched();
661 simple_lock(&thread_call_lock);
662
663 if (call->state != IDLE) {
664 simple_unlock(&thread_call_lock);
665 splx(s);
666
667 return (FALSE);
668 }
669
670 simple_unlock(&thread_call_lock);
671 splx(s);
672
91447636 673 kfree(call, sizeof (thread_call_data_t));
1c79356b
A
674
675 return (TRUE);
676}
677
678/*
679 * Routine: thread_call_enter [public]
680 *
681 * Purpose: Schedule an external callout
682 * entry to occur "soon". Returns a
683 * boolean indicating whether the call
684 * had been already scheduled.
685 *
686 * Preconditions: Callable from an interrupt context
687 * below splsched.
688 *
689 * Postconditions: None.
690 */
691
692boolean_t
693thread_call_enter(
694 thread_call_t call
695)
696{
697 boolean_t result = TRUE;
55e303ae 698 spl_t s;
1c79356b
A
699
700 s = splsched();
701 simple_lock(&thread_call_lock);
702
703 if (call->state != PENDING) {
704 if (call->state == DELAYED)
705 _delayed_call_dequeue(call);
706 else if (call->state == IDLE)
707 result = FALSE;
708
709 _pending_call_enqueue(call);
9bccf70c 710
55e303ae 711 if (thread_call_vars.active_num <= 0)
9bccf70c 712 _call_thread_wake();
1c79356b
A
713 }
714
715 call->param1 = 0;
716
717 simple_unlock(&thread_call_lock);
718 splx(s);
719
720 return (result);
721}
722
723boolean_t
724thread_call_enter1(
725 thread_call_t call,
726 thread_call_param_t param1
727)
728{
729 boolean_t result = TRUE;
55e303ae 730 spl_t s;
1c79356b
A
731
732 s = splsched();
733 simple_lock(&thread_call_lock);
734
735 if (call->state != PENDING) {
736 if (call->state == DELAYED)
737 _delayed_call_dequeue(call);
738 else if (call->state == IDLE)
739 result = FALSE;
740
741 _pending_call_enqueue(call);
742
55e303ae 743 if (thread_call_vars.active_num <= 0)
9bccf70c 744 _call_thread_wake();
1c79356b
A
745 }
746
747 call->param1 = param1;
748
749 simple_unlock(&thread_call_lock);
750 splx(s);
751
752 return (result);
753}
754
755/*
756 * Routine: thread_call_enter_delayed [public]
757 *
758 * Purpose: Schedule an external callout
759 * entry to occur at the stated time.
760 * Returns a boolean indicating whether
761 * the call had been already scheduled.
762 *
763 * Preconditions: Callable from an interrupt context
764 * below splsched.
765 *
766 * Postconditions: None.
767 */
768
769boolean_t
770thread_call_enter_delayed(
771 thread_call_t call,
0b4e3aa0 772 uint64_t deadline
1c79356b
A
773)
774{
775 boolean_t result = TRUE;
55e303ae 776 spl_t s;
1c79356b
A
777
778 s = splsched();
779 simple_lock(&thread_call_lock);
780
781 if (call->state == PENDING)
782 _pending_call_dequeue(call);
783 else if (call->state == DELAYED)
784 _delayed_call_dequeue(call);
785 else if (call->state == IDLE)
786 result = FALSE;
787
788 call->param1 = 0;
789 call->deadline = deadline;
790
791 _delayed_call_enqueue(call);
792
55e303ae 793 if (queue_first(&thread_call_delayed_queue) == qe(call))
1c79356b
A
794 _set_delayed_call_timer(call);
795
796 simple_unlock(&thread_call_lock);
797 splx(s);
798
799 return (result);
800}
801
802boolean_t
803thread_call_enter1_delayed(
804 thread_call_t call,
805 thread_call_param_t param1,
0b4e3aa0 806 uint64_t deadline
1c79356b
A
807)
808{
809 boolean_t result = TRUE;
55e303ae 810 spl_t s;
1c79356b
A
811
812 s = splsched();
813 simple_lock(&thread_call_lock);
814
815 if (call->state == PENDING)
816 _pending_call_dequeue(call);
817 else if (call->state == DELAYED)
818 _delayed_call_dequeue(call);
819 else if (call->state == IDLE)
820 result = FALSE;
821
822 call->param1 = param1;
823 call->deadline = deadline;
824
825 _delayed_call_enqueue(call);
826
55e303ae 827 if (queue_first(&thread_call_delayed_queue) == qe(call))
1c79356b
A
828 _set_delayed_call_timer(call);
829
830 simple_unlock(&thread_call_lock);
831 splx(s);
832
833 return (result);
834}
835
836/*
837 * Routine: thread_call_cancel [public]
838 *
839 * Purpose: Unschedule a callout entry.
840 * Returns a boolean indicating
841 * whether the call had actually
842 * been scheduled.
843 *
844 * Preconditions: Callable from an interrupt context
845 * below splsched.
846 *
847 * Postconditions: None.
848 */
849
850boolean_t
851thread_call_cancel(
852 thread_call_t call
853)
854{
855 boolean_t result = TRUE;
55e303ae 856 spl_t s;
1c79356b
A
857
858 s = splsched();
859 simple_lock(&thread_call_lock);
860
861 if (call->state == PENDING)
862 _pending_call_dequeue(call);
863 else if (call->state == DELAYED)
864 _delayed_call_dequeue(call);
865 else
866 result = FALSE;
867
868 simple_unlock(&thread_call_lock);
869 splx(s);
870
871 return (result);
872}
873
874/*
875 * Routine: thread_call_is_delayed [public]
876 *
877 * Purpose: Returns a boolean indicating
878 * whether a call is currently scheduled
879 * to occur at a later time. Optionally
880 * returns the expiration time.
881 *
882 * Preconditions: Callable from an interrupt context
883 * below splsched.
884 *
885 * Postconditions: None.
886 */
887
888boolean_t
889thread_call_is_delayed(
890 thread_call_t call,
0b4e3aa0 891 uint64_t *deadline)
1c79356b
A
892{
893 boolean_t result = FALSE;
55e303ae 894 spl_t s;
1c79356b
A
895
896 s = splsched();
897 simple_lock(&thread_call_lock);
898
899 if (call->state == DELAYED) {
900 if (deadline != NULL)
901 *deadline = call->deadline;
902 result = TRUE;
903 }
904
905 simple_unlock(&thread_call_lock);
906 splx(s);
907
908 return (result);
909}
910
911/*
9bccf70c 912 * Routine: _call_thread_wake [private, inline]
1c79356b
A
913 *
914 * Purpose: Wake a callout thread to service
9bccf70c
A
915 * pending callout entries. May wake
916 * the activate thread in order to
1c79356b
A
917 * create additional callout threads.
918 *
919 * Preconditions: thread_call_lock held.
920 *
921 * Postconditions: None.
922 */
923
2d21ac55 924static inline void
1c79356b
A
925_call_thread_wake(void)
926{
91447636 927 if (wait_queue_wakeup_one(&call_thread_waitqueue, NULL, THREAD_AWAKENED) == KERN_SUCCESS) {
55e303ae 928 thread_call_vars.idle_thread_num--;
9bccf70c 929
55e303ae
A
930 if (++thread_call_vars.active_num > thread_call_vars.active_hiwat)
931 thread_call_vars.active_hiwat = thread_call_vars.active_num;
1c79356b
A
932 }
933 else
9bccf70c 934 if (!activate_thread_awake) {
55e303ae 935 thread_wakeup_one(&activate_thread_awake);
1c79356b
A
936 activate_thread_awake = TRUE;
937 }
938}
939
9bccf70c 940/*
2d21ac55 941 * sched_call_thread:
9bccf70c 942 *
2d21ac55 943 * Call out invoked by the scheduler.
9bccf70c
A
944 */
945
2d21ac55
A
946static void
947sched_call_thread(
948 int type,
949__unused thread_t thread)
9bccf70c
A
950{
951 simple_lock(&thread_call_lock);
952
2d21ac55 953 switch (type) {
9bccf70c 954
2d21ac55
A
955 case SCHED_CALL_BLOCK:
956 if (--thread_call_vars.active_num < thread_call_vars.active_lowat)
957 thread_call_vars.active_lowat = thread_call_vars.active_num;
9bccf70c 958
2d21ac55
A
959 if ( thread_call_vars.active_num <= 0 &&
960 thread_call_vars.pending_num > 0 )
961 _call_thread_wake();
962 break;
9bccf70c 963
2d21ac55
A
964 case SCHED_CALL_UNBLOCK:
965 if (++thread_call_vars.active_num > thread_call_vars.active_hiwat)
966 thread_call_vars.active_hiwat = thread_call_vars.active_num;
967 break;
968 }
9bccf70c
A
969
970 simple_unlock(&thread_call_lock);
971}
1c79356b
A
972
973/*
974 * Routine: _call_thread [private]
975 *
976 * Purpose: Executed by a callout thread.
977 *
978 * Preconditions: None.
979 *
980 * Postconditions: None.
981 */
982
983static
984void
985_call_thread_continue(void)
986{
987 thread_t self = current_thread();
988
1c79356b
A
989 (void) splsched();
990 simple_lock(&thread_call_lock);
991
2d21ac55 992 thread_sched_call(self, sched_call_thread);
9bccf70c 993
55e303ae 994 while (thread_call_vars.pending_num > 0) {
1c79356b
A
995 thread_call_t call;
996 thread_call_func_t func;
997 thread_call_param_t param0, param1;
998
55e303ae
A
999 call = TC(dequeue_head(&thread_call_pending_queue));
1000 thread_call_vars.pending_num--;
1c79356b
A
1001
1002 func = call->func;
1003 param0 = call->param0;
1004 param1 = call->param1;
1005
1006 call->state = IDLE;
1007
1008 _internal_call_release(call);
1009
1c79356b
A
1010 simple_unlock(&thread_call_lock);
1011 (void) spllo();
1012
55e303ae
A
1013 KERNEL_DEBUG_CONSTANT(
1014 MACHDBG_CODE(DBG_MACH_SCHED,MACH_CALLOUT) | DBG_FUNC_NONE,
1015 (int)func, (int)param0, (int)param1, 0, 0);
1016
1c79356b
A
1017 (*func)(param0, param1);
1018
1019 (void)thread_funnel_set(self->funnel_lock, FALSE);
1020
1021 (void) splsched();
1022 simple_lock(&thread_call_lock);
1c79356b 1023 }
9bccf70c 1024
2d21ac55 1025 thread_sched_call(self, NULL);
9bccf70c 1026
55e303ae
A
1027 if (--thread_call_vars.active_num < thread_call_vars.active_lowat)
1028 thread_call_vars.active_lowat = thread_call_vars.active_num;
1c79356b 1029
55e303ae
A
1030 if (thread_call_vars.idle_thread_num < thread_call_vars.thread_lowat) {
1031 thread_call_vars.idle_thread_num++;
1c79356b 1032
91447636 1033 wait_queue_assert_wait(&call_thread_waitqueue, NULL, THREAD_UNINT, 0);
1c79356b
A
1034
1035 simple_unlock(&thread_call_lock);
1036 (void) spllo();
1037
91447636 1038 thread_block((thread_continue_t)_call_thread_continue);
1c79356b
A
1039 /* NOTREACHED */
1040 }
1041
55e303ae 1042 thread_call_vars.thread_num--;
1c79356b
A
1043
1044 simple_unlock(&thread_call_lock);
1045 (void) spllo();
1046
91447636 1047 thread_terminate(self);
1c79356b
A
1048 /* NOTREACHED */
1049}
1050
1051static
1052void
1053_call_thread(void)
1054{
1c79356b
A
1055 _call_thread_continue();
1056 /* NOTREACHED */
1057}
1058
1059/*
1060 * Routine: _activate_thread [private]
1061 *
1062 * Purpose: Executed by the activate thread.
1063 *
1064 * Preconditions: None.
1065 *
1066 * Postconditions: Never terminates.
1067 */
1068
1069static
1070void
1071_activate_thread_continue(void)
1072{
91447636
A
1073 kern_return_t result;
1074 thread_t thread;
1075
1c79356b
A
1076 (void) splsched();
1077 simple_lock(&thread_call_lock);
1078
55e303ae
A
1079 while ( thread_call_vars.active_num <= 0 &&
1080 thread_call_vars.pending_num > 0 ) {
9bccf70c 1081
55e303ae
A
1082 if (++thread_call_vars.active_num > thread_call_vars.active_hiwat)
1083 thread_call_vars.active_hiwat = thread_call_vars.active_num;
1c79356b 1084
55e303ae
A
1085 if (++thread_call_vars.thread_num > thread_call_vars.thread_hiwat)
1086 thread_call_vars.thread_hiwat = thread_call_vars.thread_num;
1c79356b
A
1087
1088 simple_unlock(&thread_call_lock);
1089 (void) spllo();
1090
91447636
A
1091 result = kernel_thread_start_priority((thread_continue_t)_call_thread, NULL, MAXPRI_KERNEL - 1, &thread);
1092 if (result != KERN_SUCCESS)
1093 panic("activate_thread");
1094
1095 thread_deallocate(thread);
55e303ae 1096
9bccf70c
A
1097 (void) splsched();
1098 simple_lock(&thread_call_lock);
1c79356b 1099 }
1c79356b
A
1100
1101 assert_wait(&activate_thread_awake, THREAD_INTERRUPTIBLE);
1102 activate_thread_awake = FALSE;
1103
1104 simple_unlock(&thread_call_lock);
1105 (void) spllo();
1106
91447636 1107 thread_block((thread_continue_t)_activate_thread_continue);
1c79356b
A
1108 /* NOTREACHED */
1109}
1110
1111static
1112void
1113_activate_thread(void)
1114{
55e303ae 1115 thread_t self = current_thread();
1c79356b 1116
91447636 1117 self->options |= TH_OPT_VMPRIV;
1c79356b 1118 vm_page_free_reserve(2); /* XXX */
1c79356b
A
1119
1120 _activate_thread_continue();
1121 /* NOTREACHED */
1122}
1123
1124static
1125void
1126_delayed_call_timer(
91447636
A
1127 __unused timer_call_param_t p0,
1128 __unused timer_call_param_t p1
1c79356b
A
1129)
1130{
0b4e3aa0 1131 uint64_t timestamp;
1c79356b
A
1132 thread_call_t call;
1133 boolean_t new_pending = FALSE;
55e303ae 1134 spl_t s;
1c79356b
A
1135
1136 s = splsched();
1137 simple_lock(&thread_call_lock);
1138
1139 clock_get_uptime(&timestamp);
1140
55e303ae 1141 call = TC(queue_first(&thread_call_delayed_queue));
1c79356b 1142
55e303ae 1143 while (!queue_end(&thread_call_delayed_queue, qe(call))) {
0b4e3aa0 1144 if (call->deadline <= timestamp) {
1c79356b
A
1145 _delayed_call_dequeue(call);
1146
1147 _pending_call_enqueue(call);
1148 new_pending = TRUE;
1149 }
1150 else
1151 break;
1152
55e303ae 1153 call = TC(queue_first(&thread_call_delayed_queue));
1c79356b
A
1154 }
1155
55e303ae 1156 if (!queue_end(&thread_call_delayed_queue, qe(call)))
1c79356b
A
1157 _set_delayed_call_timer(call);
1158
55e303ae 1159 if (new_pending && thread_call_vars.active_num <= 0)
1c79356b
A
1160 _call_thread_wake();
1161
1162 simple_unlock(&thread_call_lock);
1163 splx(s);
1164}