]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/thread_call.c
xnu-792.12.6.tar.gz
[apple/xnu.git] / osfmk / kern / thread_call.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 1993-1995, 1999-2005 Apple Computer, Inc.
1c79356b
A
3 * All rights reserved.
4 *
8ad349bb 5 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
1c79356b 6 *
8ad349bb
A
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. The rights granted to you under the
11 * License may not be used to create, or enable the creation or
12 * redistribution of, unlawful or unlicensed copies of an Apple operating
13 * system, or to circumvent, violate, or enable the circumvention or
14 * violation of, any terms of an Apple operating system software license
15 * agreement.
16 *
17 * Please obtain a copy of the License at
18 * http://www.opensource.apple.com/apsl/ and read it before using this
19 * file.
20 *
21 * The Original Code and all software distributed under the License are
22 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
23 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
24 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
26 * Please see the License for the specific language governing rights and
27 * limitations under the License.
28 *
29 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
1c79356b 30 */
1c79356b
A
31
32#include <mach/mach_types.h>
91447636 33#include <mach/thread_act.h>
1c79356b 34
91447636
A
35#include <kern/kern_types.h>
36#include <kern/kalloc.h>
1c79356b
A
37#include <kern/sched_prim.h>
38#include <kern/clock.h>
39#include <kern/task.h>
40#include <kern/thread.h>
91447636
A
41#include <kern/wait_queue.h>
42
43#include <vm/vm_pageout.h>
1c79356b
A
44
45#include <kern/thread_call.h>
46#include <kern/call_entry.h>
47
48#include <kern/timer_call.h>
49
55e303ae
A
50#include <sys/kdebug.h>
51
1c79356b
A
52#define internal_call_num 768
53
54#define thread_call_thread_min 4
55
56static
57thread_call_data_t
58 internal_call_storage[internal_call_num];
59
60decl_simple_lock_data(static,thread_call_lock)
61
62static
63timer_call_data_t
55e303ae 64 thread_call_delaytimer;
1c79356b
A
65
66static
67queue_head_t
55e303ae
A
68 thread_call_xxx_queue,
69 thread_call_pending_queue, thread_call_delayed_queue;
1c79356b
A
70
71static
9bccf70c 72struct wait_queue
55e303ae 73 call_thread_waitqueue;
1c79356b
A
74
75static
76boolean_t
77 activate_thread_awake;
78
79static struct {
80 int pending_num,
81 pending_hiwat;
82 int active_num,
9bccf70c
A
83 active_hiwat,
84 active_lowat;
1c79356b
A
85 int delayed_num,
86 delayed_hiwat;
87 int idle_thread_num;
88 int thread_num,
89 thread_hiwat,
90 thread_lowat;
55e303ae 91} thread_call_vars;
1c79356b
A
92
93static __inline__ thread_call_t
94 _internal_call_allocate(void);
95
96static __inline__ void
97_internal_call_release(
98 thread_call_t call
99);
100
101static __inline__ void
102_pending_call_enqueue(
103 thread_call_t call
104),
105_pending_call_dequeue(
106 thread_call_t call
107),
108_delayed_call_enqueue(
109 thread_call_t call
110),
111_delayed_call_dequeue(
112 thread_call_t call
113);
114
91447636 115static __inline__ void
1c79356b
A
116_set_delayed_call_timer(
117 thread_call_t call
118);
119
120static boolean_t
121_remove_from_pending_queue(
122 thread_call_func_t func,
123 thread_call_param_t param0,
124 boolean_t remove_all
125),
126_remove_from_delayed_queue(
127 thread_call_func_t func,
128 thread_call_param_t param0,
129 boolean_t remove_all
130);
131
132static __inline__ void
133 _call_thread_wake(void);
134
135static void
136 _call_thread(void),
137 _activate_thread(void);
138
139static void
140_delayed_call_timer(
141 timer_call_param_t p0,
142 timer_call_param_t p1
143);
144
145#define qe(x) ((queue_entry_t)(x))
146#define TC(x) ((thread_call_t)(x))
147
148/*
149 * Routine: thread_call_initialize [public]
150 *
151 * Description: Initialize this module, called
152 * early during system initialization.
153 *
154 * Preconditions: None.
155 *
156 * Postconditions: None.
157 */
158
159void
160thread_call_initialize(void)
161{
91447636
A
162 kern_return_t result;
163 thread_t thread;
164 thread_call_t call;
165 spl_t s;
1c79356b 166
91447636 167 simple_lock_init(&thread_call_lock, 0);
1c79356b
A
168
169 s = splsched();
170 simple_lock(&thread_call_lock);
171
55e303ae
A
172 queue_init(&thread_call_pending_queue);
173 queue_init(&thread_call_delayed_queue);
1c79356b 174
55e303ae 175 queue_init(&thread_call_xxx_queue);
1c79356b
A
176 for (
177 call = internal_call_storage;
178 call < &internal_call_storage[internal_call_num];
179 call++) {
180
55e303ae 181 enqueue_tail(&thread_call_xxx_queue, qe(call));
1c79356b
A
182 }
183
55e303ae 184 timer_call_setup(&thread_call_delaytimer, _delayed_call_timer, NULL);
1c79356b 185
55e303ae
A
186 wait_queue_init(&call_thread_waitqueue, SYNC_POLICY_FIFO);
187 thread_call_vars.thread_lowat = thread_call_thread_min;
1c79356b
A
188
189 activate_thread_awake = TRUE;
1c79356b
A
190
191 simple_unlock(&thread_call_lock);
192 splx(s);
193
91447636
A
194 result = kernel_thread_start_priority((thread_continue_t)_activate_thread, NULL, MAXPRI_KERNEL - 2, &thread);
195 if (result != KERN_SUCCESS)
196 panic("thread_call_initialize");
197
198 thread_deallocate(thread);
1c79356b
A
199}
200
201void
202thread_call_setup(
203 thread_call_t call,
204 thread_call_func_t func,
205 thread_call_param_t param0
206)
207{
208 call_entry_setup(call, func, param0);
209}
210
211/*
212 * Routine: _internal_call_allocate [private, inline]
213 *
214 * Purpose: Allocate an internal callout entry.
215 *
216 * Preconditions: thread_call_lock held.
217 *
218 * Postconditions: None.
219 */
220
221static __inline__ thread_call_t
222_internal_call_allocate(void)
223{
224 thread_call_t call;
225
55e303ae 226 if (queue_empty(&thread_call_xxx_queue))
1c79356b
A
227 panic("_internal_call_allocate");
228
55e303ae 229 call = TC(dequeue_head(&thread_call_xxx_queue));
1c79356b
A
230
231 return (call);
232}
233
234/*
235 * Routine: _internal_call_release [private, inline]
236 *
237 * Purpose: Release an internal callout entry which
238 * is no longer pending (or delayed).
239 *
240 * Preconditions: thread_call_lock held.
241 *
242 * Postconditions: None.
243 */
244
245static __inline__
246void
247_internal_call_release(
248 thread_call_t call
249)
250{
251 if ( call >= internal_call_storage &&
252 call < &internal_call_storage[internal_call_num] )
55e303ae 253 enqueue_head(&thread_call_xxx_queue, qe(call));
1c79356b
A
254}
255
256/*
257 * Routine: _pending_call_enqueue [private, inline]
258 *
259 * Purpose: Place an entry at the end of the
260 * pending queue, to be executed soon.
261 *
262 * Preconditions: thread_call_lock held.
263 *
264 * Postconditions: None.
265 */
266
267static __inline__
268void
269_pending_call_enqueue(
270 thread_call_t call
271)
272{
55e303ae
A
273 enqueue_tail(&thread_call_pending_queue, qe(call));
274 if (++thread_call_vars.pending_num > thread_call_vars.pending_hiwat)
275 thread_call_vars.pending_hiwat = thread_call_vars.pending_num;
1c79356b
A
276
277 call->state = PENDING;
278}
279
280/*
281 * Routine: _pending_call_dequeue [private, inline]
282 *
283 * Purpose: Remove an entry from the pending queue,
284 * effectively unscheduling it.
285 *
286 * Preconditions: thread_call_lock held.
287 *
288 * Postconditions: None.
289 */
290
291static __inline__
292void
293_pending_call_dequeue(
294 thread_call_t call
295)
296{
297 (void)remque(qe(call));
55e303ae 298 thread_call_vars.pending_num--;
1c79356b
A
299
300 call->state = IDLE;
301}
302
303/*
304 * Routine: _delayed_call_enqueue [private, inline]
305 *
306 * Purpose: Place an entry on the delayed queue,
307 * after existing entries with an earlier
308 * (or identical) deadline.
309 *
310 * Preconditions: thread_call_lock held.
311 *
312 * Postconditions: None.
313 */
314
315static __inline__
316void
317_delayed_call_enqueue(
318 thread_call_t call
319)
320{
321 thread_call_t current;
322
55e303ae 323 current = TC(queue_first(&thread_call_delayed_queue));
1c79356b
A
324
325 while (TRUE) {
55e303ae 326 if ( queue_end(&thread_call_delayed_queue, qe(current)) ||
0b4e3aa0 327 call->deadline < current->deadline ) {
1c79356b
A
328 current = TC(queue_prev(qe(current)));
329 break;
330 }
331
332 current = TC(queue_next(qe(current)));
333 }
334
335 insque(qe(call), qe(current));
55e303ae
A
336 if (++thread_call_vars.delayed_num > thread_call_vars.delayed_hiwat)
337 thread_call_vars.delayed_hiwat = thread_call_vars.delayed_num;
1c79356b
A
338
339 call->state = DELAYED;
340}
341
342/*
343 * Routine: _delayed_call_dequeue [private, inline]
344 *
345 * Purpose: Remove an entry from the delayed queue,
346 * effectively unscheduling it.
347 *
348 * Preconditions: thread_call_lock held.
349 *
350 * Postconditions: None.
351 */
352
353static __inline__
354void
355_delayed_call_dequeue(
356 thread_call_t call
357)
358{
359 (void)remque(qe(call));
55e303ae 360 thread_call_vars.delayed_num--;
1c79356b
A
361
362 call->state = IDLE;
363}
364
365/*
366 * Routine: _set_delayed_call_timer [private]
367 *
368 * Purpose: Reset the timer so that it
369 * next expires when the entry is due.
370 *
371 * Preconditions: thread_call_lock held.
372 *
373 * Postconditions: None.
374 */
375
376static __inline__ void
377_set_delayed_call_timer(
378 thread_call_t call
379)
380{
55e303ae 381 timer_call_enter(&thread_call_delaytimer, call->deadline);
1c79356b
A
382}
383
384/*
385 * Routine: _remove_from_pending_queue [private]
386 *
387 * Purpose: Remove the first (or all) matching
388 * entries from the pending queue,
389 * effectively unscheduling them.
390 * Returns whether any matching entries
391 * were found.
392 *
393 * Preconditions: thread_call_lock held.
394 *
395 * Postconditions: None.
396 */
397
398static
399boolean_t
400_remove_from_pending_queue(
401 thread_call_func_t func,
402 thread_call_param_t param0,
403 boolean_t remove_all
404)
405{
406 boolean_t call_removed = FALSE;
407 thread_call_t call;
408
55e303ae 409 call = TC(queue_first(&thread_call_pending_queue));
1c79356b 410
55e303ae 411 while (!queue_end(&thread_call_pending_queue, qe(call))) {
1c79356b
A
412 if ( call->func == func &&
413 call->param0 == param0 ) {
414 thread_call_t next = TC(queue_next(qe(call)));
415
416 _pending_call_dequeue(call);
417
418 _internal_call_release(call);
419
420 call_removed = TRUE;
421 if (!remove_all)
422 break;
423
424 call = next;
425 }
426 else
427 call = TC(queue_next(qe(call)));
428 }
429
430 return (call_removed);
431}
432
433/*
434 * Routine: _remove_from_delayed_queue [private]
435 *
436 * Purpose: Remove the first (or all) matching
437 * entries from the delayed queue,
438 * effectively unscheduling them.
439 * Returns whether any matching entries
440 * were found.
441 *
442 * Preconditions: thread_call_lock held.
443 *
444 * Postconditions: None.
445 */
446
447static
448boolean_t
449_remove_from_delayed_queue(
450 thread_call_func_t func,
451 thread_call_param_t param0,
452 boolean_t remove_all
453)
454{
455 boolean_t call_removed = FALSE;
456 thread_call_t call;
457
55e303ae 458 call = TC(queue_first(&thread_call_delayed_queue));
1c79356b 459
55e303ae 460 while (!queue_end(&thread_call_delayed_queue, qe(call))) {
1c79356b
A
461 if ( call->func == func &&
462 call->param0 == param0 ) {
463 thread_call_t next = TC(queue_next(qe(call)));
464
465 _delayed_call_dequeue(call);
466
467 _internal_call_release(call);
468
469 call_removed = TRUE;
470 if (!remove_all)
471 break;
472
473 call = next;
474 }
475 else
476 call = TC(queue_next(qe(call)));
477 }
478
479 return (call_removed);
480}
481
482/*
483 * Routine: thread_call_func [public]
484 *
485 * Purpose: Schedule a function callout.
486 * Guarantees { function, argument }
487 * uniqueness if unique_call is TRUE.
488 *
489 * Preconditions: Callable from an interrupt context
490 * below splsched.
491 *
492 * Postconditions: None.
493 */
494
495void
496thread_call_func(
497 thread_call_func_t func,
498 thread_call_param_t param,
499 boolean_t unique_call
500)
501{
502 thread_call_t call;
55e303ae 503 spl_t s;
1c79356b 504
1c79356b
A
505 s = splsched();
506 simple_lock(&thread_call_lock);
507
55e303ae 508 call = TC(queue_first(&thread_call_pending_queue));
1c79356b 509
55e303ae 510 while (unique_call && !queue_end(&thread_call_pending_queue, qe(call))) {
1c79356b
A
511 if ( call->func == func &&
512 call->param0 == param ) {
513 break;
514 }
515
516 call = TC(queue_next(qe(call)));
517 }
518
55e303ae 519 if (!unique_call || queue_end(&thread_call_pending_queue, qe(call))) {
1c79356b
A
520 call = _internal_call_allocate();
521 call->func = func;
522 call->param0 = param;
523 call->param1 = 0;
524
525 _pending_call_enqueue(call);
526
55e303ae 527 if (thread_call_vars.active_num <= 0)
9bccf70c 528 _call_thread_wake();
1c79356b
A
529 }
530
531 simple_unlock(&thread_call_lock);
532 splx(s);
533}
534
535/*
536 * Routine: thread_call_func_delayed [public]
537 *
538 * Purpose: Schedule a function callout to
539 * occur at the stated time.
540 *
541 * Preconditions: Callable from an interrupt context
542 * below splsched.
543 *
544 * Postconditions: None.
545 */
546
547void
548thread_call_func_delayed(
549 thread_call_func_t func,
550 thread_call_param_t param,
0b4e3aa0 551 uint64_t deadline
1c79356b
A
552)
553{
554 thread_call_t call;
55e303ae 555 spl_t s;
1c79356b 556
1c79356b
A
557 s = splsched();
558 simple_lock(&thread_call_lock);
559
560 call = _internal_call_allocate();
561 call->func = func;
562 call->param0 = param;
563 call->param1 = 0;
564 call->deadline = deadline;
565
566 _delayed_call_enqueue(call);
567
55e303ae 568 if (queue_first(&thread_call_delayed_queue) == qe(call))
1c79356b
A
569 _set_delayed_call_timer(call);
570
571 simple_unlock(&thread_call_lock);
572 splx(s);
573}
574
575/*
576 * Routine: thread_call_func_cancel [public]
577 *
578 * Purpose: Unschedule a function callout.
579 * Removes one (or all)
580 * { function, argument }
581 * instance(s) from either (or both)
582 * the pending and the delayed queue,
583 * in that order. Returns a boolean
584 * indicating whether any calls were
585 * cancelled.
586 *
587 * Preconditions: Callable from an interrupt context
588 * below splsched.
589 *
590 * Postconditions: None.
591 */
592
593boolean_t
594thread_call_func_cancel(
595 thread_call_func_t func,
596 thread_call_param_t param,
597 boolean_t cancel_all
598)
599{
600 boolean_t result;
55e303ae 601 spl_t s;
1c79356b
A
602
603 s = splsched();
604 simple_lock(&thread_call_lock);
605
606 if (cancel_all)
607 result = _remove_from_pending_queue(func, param, cancel_all) |
608 _remove_from_delayed_queue(func, param, cancel_all);
609 else
610 result = _remove_from_pending_queue(func, param, cancel_all) ||
611 _remove_from_delayed_queue(func, param, cancel_all);
612
613 simple_unlock(&thread_call_lock);
614 splx(s);
615
616 return (result);
617}
618
619/*
620 * Routine: thread_call_allocate [public]
621 *
622 * Purpose: Allocate an external callout
623 * entry.
624 *
625 * Preconditions: None.
626 *
627 * Postconditions: None.
628 */
629
630thread_call_t
631thread_call_allocate(
632 thread_call_func_t func,
633 thread_call_param_t param0
634)
635{
636 thread_call_t call = (void *)kalloc(sizeof (thread_call_data_t));
637
638 call->func = func;
639 call->param0 = param0;
640 call->state = IDLE;
641
642 return (call);
643}
644
645/*
646 * Routine: thread_call_free [public]
647 *
648 * Purpose: Free an external callout
649 * entry.
650 *
651 * Preconditions: None.
652 *
653 * Postconditions: None.
654 */
655
656boolean_t
657thread_call_free(
658 thread_call_t call
659)
660{
55e303ae 661 spl_t s;
1c79356b
A
662
663 s = splsched();
664 simple_lock(&thread_call_lock);
665
666 if (call->state != IDLE) {
667 simple_unlock(&thread_call_lock);
668 splx(s);
669
670 return (FALSE);
671 }
672
673 simple_unlock(&thread_call_lock);
674 splx(s);
675
91447636 676 kfree(call, sizeof (thread_call_data_t));
1c79356b
A
677
678 return (TRUE);
679}
680
681/*
682 * Routine: thread_call_enter [public]
683 *
684 * Purpose: Schedule an external callout
685 * entry to occur "soon". Returns a
686 * boolean indicating whether the call
687 * had been already scheduled.
688 *
689 * Preconditions: Callable from an interrupt context
690 * below splsched.
691 *
692 * Postconditions: None.
693 */
694
695boolean_t
696thread_call_enter(
697 thread_call_t call
698)
699{
700 boolean_t result = TRUE;
55e303ae 701 spl_t s;
1c79356b
A
702
703 s = splsched();
704 simple_lock(&thread_call_lock);
705
706 if (call->state != PENDING) {
707 if (call->state == DELAYED)
708 _delayed_call_dequeue(call);
709 else if (call->state == IDLE)
710 result = FALSE;
711
712 _pending_call_enqueue(call);
9bccf70c 713
55e303ae 714 if (thread_call_vars.active_num <= 0)
9bccf70c 715 _call_thread_wake();
1c79356b
A
716 }
717
718 call->param1 = 0;
719
720 simple_unlock(&thread_call_lock);
721 splx(s);
722
723 return (result);
724}
725
726boolean_t
727thread_call_enter1(
728 thread_call_t call,
729 thread_call_param_t param1
730)
731{
732 boolean_t result = TRUE;
55e303ae 733 spl_t s;
1c79356b
A
734
735 s = splsched();
736 simple_lock(&thread_call_lock);
737
738 if (call->state != PENDING) {
739 if (call->state == DELAYED)
740 _delayed_call_dequeue(call);
741 else if (call->state == IDLE)
742 result = FALSE;
743
744 _pending_call_enqueue(call);
745
55e303ae 746 if (thread_call_vars.active_num <= 0)
9bccf70c 747 _call_thread_wake();
1c79356b
A
748 }
749
750 call->param1 = param1;
751
752 simple_unlock(&thread_call_lock);
753 splx(s);
754
755 return (result);
756}
757
758/*
759 * Routine: thread_call_enter_delayed [public]
760 *
761 * Purpose: Schedule an external callout
762 * entry to occur at the stated time.
763 * Returns a boolean indicating whether
764 * the call had been already scheduled.
765 *
766 * Preconditions: Callable from an interrupt context
767 * below splsched.
768 *
769 * Postconditions: None.
770 */
771
772boolean_t
773thread_call_enter_delayed(
774 thread_call_t call,
0b4e3aa0 775 uint64_t deadline
1c79356b
A
776)
777{
778 boolean_t result = TRUE;
55e303ae 779 spl_t s;
1c79356b
A
780
781 s = splsched();
782 simple_lock(&thread_call_lock);
783
784 if (call->state == PENDING)
785 _pending_call_dequeue(call);
786 else if (call->state == DELAYED)
787 _delayed_call_dequeue(call);
788 else if (call->state == IDLE)
789 result = FALSE;
790
791 call->param1 = 0;
792 call->deadline = deadline;
793
794 _delayed_call_enqueue(call);
795
55e303ae 796 if (queue_first(&thread_call_delayed_queue) == qe(call))
1c79356b
A
797 _set_delayed_call_timer(call);
798
799 simple_unlock(&thread_call_lock);
800 splx(s);
801
802 return (result);
803}
804
805boolean_t
806thread_call_enter1_delayed(
807 thread_call_t call,
808 thread_call_param_t param1,
0b4e3aa0 809 uint64_t deadline
1c79356b
A
810)
811{
812 boolean_t result = TRUE;
55e303ae 813 spl_t s;
1c79356b
A
814
815 s = splsched();
816 simple_lock(&thread_call_lock);
817
818 if (call->state == PENDING)
819 _pending_call_dequeue(call);
820 else if (call->state == DELAYED)
821 _delayed_call_dequeue(call);
822 else if (call->state == IDLE)
823 result = FALSE;
824
825 call->param1 = param1;
826 call->deadline = deadline;
827
828 _delayed_call_enqueue(call);
829
55e303ae 830 if (queue_first(&thread_call_delayed_queue) == qe(call))
1c79356b
A
831 _set_delayed_call_timer(call);
832
833 simple_unlock(&thread_call_lock);
834 splx(s);
835
836 return (result);
837}
838
839/*
840 * Routine: thread_call_cancel [public]
841 *
842 * Purpose: Unschedule a callout entry.
843 * Returns a boolean indicating
844 * whether the call had actually
845 * been scheduled.
846 *
847 * Preconditions: Callable from an interrupt context
848 * below splsched.
849 *
850 * Postconditions: None.
851 */
852
853boolean_t
854thread_call_cancel(
855 thread_call_t call
856)
857{
858 boolean_t result = TRUE;
55e303ae 859 spl_t s;
1c79356b
A
860
861 s = splsched();
862 simple_lock(&thread_call_lock);
863
864 if (call->state == PENDING)
865 _pending_call_dequeue(call);
866 else if (call->state == DELAYED)
867 _delayed_call_dequeue(call);
868 else
869 result = FALSE;
870
871 simple_unlock(&thread_call_lock);
872 splx(s);
873
874 return (result);
875}
876
877/*
878 * Routine: thread_call_is_delayed [public]
879 *
880 * Purpose: Returns a boolean indicating
881 * whether a call is currently scheduled
882 * to occur at a later time. Optionally
883 * returns the expiration time.
884 *
885 * Preconditions: Callable from an interrupt context
886 * below splsched.
887 *
888 * Postconditions: None.
889 */
890
891boolean_t
892thread_call_is_delayed(
893 thread_call_t call,
0b4e3aa0 894 uint64_t *deadline)
1c79356b
A
895{
896 boolean_t result = FALSE;
55e303ae 897 spl_t s;
1c79356b
A
898
899 s = splsched();
900 simple_lock(&thread_call_lock);
901
902 if (call->state == DELAYED) {
903 if (deadline != NULL)
904 *deadline = call->deadline;
905 result = TRUE;
906 }
907
908 simple_unlock(&thread_call_lock);
909 splx(s);
910
911 return (result);
912}
913
914/*
9bccf70c 915 * Routine: _call_thread_wake [private, inline]
1c79356b
A
916 *
917 * Purpose: Wake a callout thread to service
9bccf70c
A
918 * pending callout entries. May wake
919 * the activate thread in order to
1c79356b
A
920 * create additional callout threads.
921 *
922 * Preconditions: thread_call_lock held.
923 *
924 * Postconditions: None.
925 */
926
927static __inline__
928void
929_call_thread_wake(void)
930{
91447636 931 if (wait_queue_wakeup_one(&call_thread_waitqueue, NULL, THREAD_AWAKENED) == KERN_SUCCESS) {
55e303ae 932 thread_call_vars.idle_thread_num--;
9bccf70c 933
55e303ae
A
934 if (++thread_call_vars.active_num > thread_call_vars.active_hiwat)
935 thread_call_vars.active_hiwat = thread_call_vars.active_num;
1c79356b
A
936 }
937 else
9bccf70c 938 if (!activate_thread_awake) {
55e303ae 939 thread_wakeup_one(&activate_thread_awake);
1c79356b
A
940 activate_thread_awake = TRUE;
941 }
942}
943
9bccf70c
A
944/*
945 * Routine: call_thread_block [private]
946 *
947 * Purpose: Hook via thread dispatch on
948 * the occasion of a callout blocking.
949 *
950 * Preconditions: splsched.
951 *
952 * Postconditions: None.
953 */
954
955void
956call_thread_block(void)
957{
958 simple_lock(&thread_call_lock);
959
55e303ae
A
960 if (--thread_call_vars.active_num < thread_call_vars.active_lowat)
961 thread_call_vars.active_lowat = thread_call_vars.active_num;
9bccf70c 962
55e303ae
A
963 if ( thread_call_vars.active_num <= 0 &&
964 thread_call_vars.pending_num > 0 )
9bccf70c
A
965 _call_thread_wake();
966
967 simple_unlock(&thread_call_lock);
968}
969
970/*
971 * Routine: call_thread_unblock [private]
972 *
973 * Purpose: Hook via thread wakeup on
974 * the occasion of a callout unblocking.
975 *
976 * Preconditions: splsched.
977 *
978 * Postconditions: None.
979 */
980
981void
982call_thread_unblock(void)
983{
984 simple_lock(&thread_call_lock);
985
55e303ae
A
986 if (++thread_call_vars.active_num > thread_call_vars.active_hiwat)
987 thread_call_vars.active_hiwat = thread_call_vars.active_num;
9bccf70c
A
988
989 simple_unlock(&thread_call_lock);
990}
1c79356b
A
991
992/*
993 * Routine: _call_thread [private]
994 *
995 * Purpose: Executed by a callout thread.
996 *
997 * Preconditions: None.
998 *
999 * Postconditions: None.
1000 */
1001
1002static
1003void
1004_call_thread_continue(void)
1005{
1006 thread_t self = current_thread();
1007
1c79356b
A
1008 (void) splsched();
1009 simple_lock(&thread_call_lock);
1010
91447636 1011 self->options |= TH_OPT_CALLOUT;
9bccf70c 1012
55e303ae 1013 while (thread_call_vars.pending_num > 0) {
1c79356b
A
1014 thread_call_t call;
1015 thread_call_func_t func;
1016 thread_call_param_t param0, param1;
1017
55e303ae
A
1018 call = TC(dequeue_head(&thread_call_pending_queue));
1019 thread_call_vars.pending_num--;
1c79356b
A
1020
1021 func = call->func;
1022 param0 = call->param0;
1023 param1 = call->param1;
1024
1025 call->state = IDLE;
1026
1027 _internal_call_release(call);
1028
1c79356b
A
1029 simple_unlock(&thread_call_lock);
1030 (void) spllo();
1031
55e303ae
A
1032 KERNEL_DEBUG_CONSTANT(
1033 MACHDBG_CODE(DBG_MACH_SCHED,MACH_CALLOUT) | DBG_FUNC_NONE,
1034 (int)func, (int)param0, (int)param1, 0, 0);
1035
1c79356b
A
1036 (*func)(param0, param1);
1037
1038 (void)thread_funnel_set(self->funnel_lock, FALSE);
1039
1040 (void) splsched();
1041 simple_lock(&thread_call_lock);
1c79356b 1042 }
9bccf70c 1043
91447636 1044 self->options &= ~TH_OPT_CALLOUT;
9bccf70c 1045
55e303ae
A
1046 if (--thread_call_vars.active_num < thread_call_vars.active_lowat)
1047 thread_call_vars.active_lowat = thread_call_vars.active_num;
1c79356b 1048
55e303ae
A
1049 if (thread_call_vars.idle_thread_num < thread_call_vars.thread_lowat) {
1050 thread_call_vars.idle_thread_num++;
1c79356b 1051
91447636 1052 wait_queue_assert_wait(&call_thread_waitqueue, NULL, THREAD_UNINT, 0);
1c79356b
A
1053
1054 simple_unlock(&thread_call_lock);
1055 (void) spllo();
1056
91447636 1057 thread_block((thread_continue_t)_call_thread_continue);
1c79356b
A
1058 /* NOTREACHED */
1059 }
1060
55e303ae 1061 thread_call_vars.thread_num--;
1c79356b
A
1062
1063 simple_unlock(&thread_call_lock);
1064 (void) spllo();
1065
91447636 1066 thread_terminate(self);
1c79356b
A
1067 /* NOTREACHED */
1068}
1069
1070static
1071void
1072_call_thread(void)
1073{
1c79356b
A
1074 _call_thread_continue();
1075 /* NOTREACHED */
1076}
1077
1078/*
1079 * Routine: _activate_thread [private]
1080 *
1081 * Purpose: Executed by the activate thread.
1082 *
1083 * Preconditions: None.
1084 *
1085 * Postconditions: Never terminates.
1086 */
1087
1088static
1089void
1090_activate_thread_continue(void)
1091{
91447636
A
1092 kern_return_t result;
1093 thread_t thread;
1094
1c79356b
A
1095 (void) splsched();
1096 simple_lock(&thread_call_lock);
1097
55e303ae
A
1098 while ( thread_call_vars.active_num <= 0 &&
1099 thread_call_vars.pending_num > 0 ) {
9bccf70c 1100
55e303ae
A
1101 if (++thread_call_vars.active_num > thread_call_vars.active_hiwat)
1102 thread_call_vars.active_hiwat = thread_call_vars.active_num;
1c79356b 1103
55e303ae
A
1104 if (++thread_call_vars.thread_num > thread_call_vars.thread_hiwat)
1105 thread_call_vars.thread_hiwat = thread_call_vars.thread_num;
1c79356b
A
1106
1107 simple_unlock(&thread_call_lock);
1108 (void) spllo();
1109
91447636
A
1110 result = kernel_thread_start_priority((thread_continue_t)_call_thread, NULL, MAXPRI_KERNEL - 1, &thread);
1111 if (result != KERN_SUCCESS)
1112 panic("activate_thread");
1113
1114 thread_deallocate(thread);
55e303ae 1115
9bccf70c
A
1116 (void) splsched();
1117 simple_lock(&thread_call_lock);
1c79356b 1118 }
1c79356b
A
1119
1120 assert_wait(&activate_thread_awake, THREAD_INTERRUPTIBLE);
1121 activate_thread_awake = FALSE;
1122
1123 simple_unlock(&thread_call_lock);
1124 (void) spllo();
1125
91447636 1126 thread_block((thread_continue_t)_activate_thread_continue);
1c79356b
A
1127 /* NOTREACHED */
1128}
1129
1130static
1131void
1132_activate_thread(void)
1133{
55e303ae 1134 thread_t self = current_thread();
1c79356b 1135
91447636 1136 self->options |= TH_OPT_VMPRIV;
1c79356b 1137 vm_page_free_reserve(2); /* XXX */
1c79356b
A
1138
1139 _activate_thread_continue();
1140 /* NOTREACHED */
1141}
1142
1143static
1144void
1145_delayed_call_timer(
91447636
A
1146 __unused timer_call_param_t p0,
1147 __unused timer_call_param_t p1
1c79356b
A
1148)
1149{
0b4e3aa0 1150 uint64_t timestamp;
1c79356b
A
1151 thread_call_t call;
1152 boolean_t new_pending = FALSE;
55e303ae 1153 spl_t s;
1c79356b
A
1154
1155 s = splsched();
1156 simple_lock(&thread_call_lock);
1157
1158 clock_get_uptime(&timestamp);
1159
55e303ae 1160 call = TC(queue_first(&thread_call_delayed_queue));
1c79356b 1161
55e303ae 1162 while (!queue_end(&thread_call_delayed_queue, qe(call))) {
0b4e3aa0 1163 if (call->deadline <= timestamp) {
1c79356b
A
1164 _delayed_call_dequeue(call);
1165
1166 _pending_call_enqueue(call);
1167 new_pending = TRUE;
1168 }
1169 else
1170 break;
1171
55e303ae 1172 call = TC(queue_first(&thread_call_delayed_queue));
1c79356b
A
1173 }
1174
55e303ae 1175 if (!queue_end(&thread_call_delayed_queue, qe(call)))
1c79356b
A
1176 _set_delayed_call_timer(call);
1177
55e303ae 1178 if (new_pending && thread_call_vars.active_num <= 0)
1c79356b
A
1179 _call_thread_wake();
1180
1181 simple_unlock(&thread_call_lock);
1182 splx(s);
1183}