]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/thread_call.c
xnu-792.21.3.tar.gz
[apple/xnu.git] / osfmk / kern / thread_call.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 1993-1995, 1999-2005 Apple Computer, Inc.
1c79356b
A
3 * All rights reserved.
4 *
8f6c56a5 5 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 6 *
8f6c56a5
A
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. The rights granted to you under the License
11 * may not be used to create, or enable the creation or redistribution of,
12 * unlawful or unlicensed copies of an Apple operating system, or to
13 * circumvent, violate, or enable the circumvention or violation of, any
14 * terms of an Apple operating system software license agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 *
19 * The Original Code and all software distributed under the License are
20 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
21 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
22 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
24 * Please see the License for the specific language governing rights and
8ad349bb 25 * limitations under the License.
8f6c56a5
A
26 *
27 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b 28 */
1c79356b
A
29
30#include <mach/mach_types.h>
91447636 31#include <mach/thread_act.h>
1c79356b 32
91447636
A
33#include <kern/kern_types.h>
34#include <kern/kalloc.h>
1c79356b
A
35#include <kern/sched_prim.h>
36#include <kern/clock.h>
37#include <kern/task.h>
38#include <kern/thread.h>
91447636
A
39#include <kern/wait_queue.h>
40
41#include <vm/vm_pageout.h>
1c79356b
A
42
43#include <kern/thread_call.h>
44#include <kern/call_entry.h>
45
46#include <kern/timer_call.h>
47
55e303ae
A
48#include <sys/kdebug.h>
49
1c79356b
A
50#define internal_call_num 768
51
52#define thread_call_thread_min 4
53
54static
55thread_call_data_t
56 internal_call_storage[internal_call_num];
57
58decl_simple_lock_data(static,thread_call_lock)
59
60static
61timer_call_data_t
55e303ae 62 thread_call_delaytimer;
1c79356b
A
63
64static
65queue_head_t
55e303ae
A
66 thread_call_xxx_queue,
67 thread_call_pending_queue, thread_call_delayed_queue;
1c79356b
A
68
69static
9bccf70c 70struct wait_queue
55e303ae 71 call_thread_waitqueue;
1c79356b
A
72
73static
74boolean_t
75 activate_thread_awake;
76
77static struct {
78 int pending_num,
79 pending_hiwat;
80 int active_num,
9bccf70c
A
81 active_hiwat,
82 active_lowat;
1c79356b
A
83 int delayed_num,
84 delayed_hiwat;
85 int idle_thread_num;
86 int thread_num,
87 thread_hiwat,
88 thread_lowat;
55e303ae 89} thread_call_vars;
1c79356b
A
90
91static __inline__ thread_call_t
92 _internal_call_allocate(void);
93
94static __inline__ void
95_internal_call_release(
96 thread_call_t call
97);
98
99static __inline__ void
100_pending_call_enqueue(
101 thread_call_t call
102),
103_pending_call_dequeue(
104 thread_call_t call
105),
106_delayed_call_enqueue(
107 thread_call_t call
108),
109_delayed_call_dequeue(
110 thread_call_t call
111);
112
91447636 113static __inline__ void
1c79356b
A
114_set_delayed_call_timer(
115 thread_call_t call
116);
117
118static boolean_t
119_remove_from_pending_queue(
120 thread_call_func_t func,
121 thread_call_param_t param0,
122 boolean_t remove_all
123),
124_remove_from_delayed_queue(
125 thread_call_func_t func,
126 thread_call_param_t param0,
127 boolean_t remove_all
128);
129
130static __inline__ void
131 _call_thread_wake(void);
132
133static void
134 _call_thread(void),
135 _activate_thread(void);
136
137static void
138_delayed_call_timer(
139 timer_call_param_t p0,
140 timer_call_param_t p1
141);
142
143#define qe(x) ((queue_entry_t)(x))
144#define TC(x) ((thread_call_t)(x))
145
146/*
147 * Routine: thread_call_initialize [public]
148 *
149 * Description: Initialize this module, called
150 * early during system initialization.
151 *
152 * Preconditions: None.
153 *
154 * Postconditions: None.
155 */
156
157void
158thread_call_initialize(void)
159{
91447636
A
160 kern_return_t result;
161 thread_t thread;
162 thread_call_t call;
163 spl_t s;
1c79356b 164
91447636 165 simple_lock_init(&thread_call_lock, 0);
1c79356b
A
166
167 s = splsched();
168 simple_lock(&thread_call_lock);
169
55e303ae
A
170 queue_init(&thread_call_pending_queue);
171 queue_init(&thread_call_delayed_queue);
1c79356b 172
55e303ae 173 queue_init(&thread_call_xxx_queue);
1c79356b
A
174 for (
175 call = internal_call_storage;
176 call < &internal_call_storage[internal_call_num];
177 call++) {
178
55e303ae 179 enqueue_tail(&thread_call_xxx_queue, qe(call));
1c79356b
A
180 }
181
55e303ae 182 timer_call_setup(&thread_call_delaytimer, _delayed_call_timer, NULL);
1c79356b 183
55e303ae
A
184 wait_queue_init(&call_thread_waitqueue, SYNC_POLICY_FIFO);
185 thread_call_vars.thread_lowat = thread_call_thread_min;
1c79356b
A
186
187 activate_thread_awake = TRUE;
1c79356b
A
188
189 simple_unlock(&thread_call_lock);
190 splx(s);
191
91447636
A
192 result = kernel_thread_start_priority((thread_continue_t)_activate_thread, NULL, MAXPRI_KERNEL - 2, &thread);
193 if (result != KERN_SUCCESS)
194 panic("thread_call_initialize");
195
196 thread_deallocate(thread);
1c79356b
A
197}
198
199void
200thread_call_setup(
201 thread_call_t call,
202 thread_call_func_t func,
203 thread_call_param_t param0
204)
205{
206 call_entry_setup(call, func, param0);
207}
208
209/*
210 * Routine: _internal_call_allocate [private, inline]
211 *
212 * Purpose: Allocate an internal callout entry.
213 *
214 * Preconditions: thread_call_lock held.
215 *
216 * Postconditions: None.
217 */
218
219static __inline__ thread_call_t
220_internal_call_allocate(void)
221{
222 thread_call_t call;
223
55e303ae 224 if (queue_empty(&thread_call_xxx_queue))
1c79356b
A
225 panic("_internal_call_allocate");
226
55e303ae 227 call = TC(dequeue_head(&thread_call_xxx_queue));
1c79356b
A
228
229 return (call);
230}
231
232/*
233 * Routine: _internal_call_release [private, inline]
234 *
235 * Purpose: Release an internal callout entry which
236 * is no longer pending (or delayed).
237 *
238 * Preconditions: thread_call_lock held.
239 *
240 * Postconditions: None.
241 */
242
243static __inline__
244void
245_internal_call_release(
246 thread_call_t call
247)
248{
249 if ( call >= internal_call_storage &&
250 call < &internal_call_storage[internal_call_num] )
55e303ae 251 enqueue_head(&thread_call_xxx_queue, qe(call));
1c79356b
A
252}
253
254/*
255 * Routine: _pending_call_enqueue [private, inline]
256 *
257 * Purpose: Place an entry at the end of the
258 * pending queue, to be executed soon.
259 *
260 * Preconditions: thread_call_lock held.
261 *
262 * Postconditions: None.
263 */
264
265static __inline__
266void
267_pending_call_enqueue(
268 thread_call_t call
269)
270{
55e303ae
A
271 enqueue_tail(&thread_call_pending_queue, qe(call));
272 if (++thread_call_vars.pending_num > thread_call_vars.pending_hiwat)
273 thread_call_vars.pending_hiwat = thread_call_vars.pending_num;
1c79356b
A
274
275 call->state = PENDING;
276}
277
278/*
279 * Routine: _pending_call_dequeue [private, inline]
280 *
281 * Purpose: Remove an entry from the pending queue,
282 * effectively unscheduling it.
283 *
284 * Preconditions: thread_call_lock held.
285 *
286 * Postconditions: None.
287 */
288
289static __inline__
290void
291_pending_call_dequeue(
292 thread_call_t call
293)
294{
295 (void)remque(qe(call));
55e303ae 296 thread_call_vars.pending_num--;
1c79356b
A
297
298 call->state = IDLE;
299}
300
301/*
302 * Routine: _delayed_call_enqueue [private, inline]
303 *
304 * Purpose: Place an entry on the delayed queue,
305 * after existing entries with an earlier
306 * (or identical) deadline.
307 *
308 * Preconditions: thread_call_lock held.
309 *
310 * Postconditions: None.
311 */
312
313static __inline__
314void
315_delayed_call_enqueue(
316 thread_call_t call
317)
318{
319 thread_call_t current;
320
55e303ae 321 current = TC(queue_first(&thread_call_delayed_queue));
1c79356b
A
322
323 while (TRUE) {
55e303ae 324 if ( queue_end(&thread_call_delayed_queue, qe(current)) ||
0b4e3aa0 325 call->deadline < current->deadline ) {
1c79356b
A
326 current = TC(queue_prev(qe(current)));
327 break;
328 }
329
330 current = TC(queue_next(qe(current)));
331 }
332
333 insque(qe(call), qe(current));
55e303ae
A
334 if (++thread_call_vars.delayed_num > thread_call_vars.delayed_hiwat)
335 thread_call_vars.delayed_hiwat = thread_call_vars.delayed_num;
1c79356b
A
336
337 call->state = DELAYED;
338}
339
340/*
341 * Routine: _delayed_call_dequeue [private, inline]
342 *
343 * Purpose: Remove an entry from the delayed queue,
344 * effectively unscheduling it.
345 *
346 * Preconditions: thread_call_lock held.
347 *
348 * Postconditions: None.
349 */
350
351static __inline__
352void
353_delayed_call_dequeue(
354 thread_call_t call
355)
356{
357 (void)remque(qe(call));
55e303ae 358 thread_call_vars.delayed_num--;
1c79356b
A
359
360 call->state = IDLE;
361}
362
363/*
364 * Routine: _set_delayed_call_timer [private]
365 *
366 * Purpose: Reset the timer so that it
367 * next expires when the entry is due.
368 *
369 * Preconditions: thread_call_lock held.
370 *
371 * Postconditions: None.
372 */
373
374static __inline__ void
375_set_delayed_call_timer(
376 thread_call_t call
377)
378{
55e303ae 379 timer_call_enter(&thread_call_delaytimer, call->deadline);
1c79356b
A
380}
381
382/*
383 * Routine: _remove_from_pending_queue [private]
384 *
385 * Purpose: Remove the first (or all) matching
386 * entries from the pending queue,
387 * effectively unscheduling them.
388 * Returns whether any matching entries
389 * were found.
390 *
391 * Preconditions: thread_call_lock held.
392 *
393 * Postconditions: None.
394 */
395
396static
397boolean_t
398_remove_from_pending_queue(
399 thread_call_func_t func,
400 thread_call_param_t param0,
401 boolean_t remove_all
402)
403{
404 boolean_t call_removed = FALSE;
405 thread_call_t call;
406
55e303ae 407 call = TC(queue_first(&thread_call_pending_queue));
1c79356b 408
55e303ae 409 while (!queue_end(&thread_call_pending_queue, qe(call))) {
1c79356b
A
410 if ( call->func == func &&
411 call->param0 == param0 ) {
412 thread_call_t next = TC(queue_next(qe(call)));
413
414 _pending_call_dequeue(call);
415
416 _internal_call_release(call);
417
418 call_removed = TRUE;
419 if (!remove_all)
420 break;
421
422 call = next;
423 }
424 else
425 call = TC(queue_next(qe(call)));
426 }
427
428 return (call_removed);
429}
430
431/*
432 * Routine: _remove_from_delayed_queue [private]
433 *
434 * Purpose: Remove the first (or all) matching
435 * entries from the delayed queue,
436 * effectively unscheduling them.
437 * Returns whether any matching entries
438 * were found.
439 *
440 * Preconditions: thread_call_lock held.
441 *
442 * Postconditions: None.
443 */
444
445static
446boolean_t
447_remove_from_delayed_queue(
448 thread_call_func_t func,
449 thread_call_param_t param0,
450 boolean_t remove_all
451)
452{
453 boolean_t call_removed = FALSE;
454 thread_call_t call;
455
55e303ae 456 call = TC(queue_first(&thread_call_delayed_queue));
1c79356b 457
55e303ae 458 while (!queue_end(&thread_call_delayed_queue, qe(call))) {
1c79356b
A
459 if ( call->func == func &&
460 call->param0 == param0 ) {
461 thread_call_t next = TC(queue_next(qe(call)));
462
463 _delayed_call_dequeue(call);
464
465 _internal_call_release(call);
466
467 call_removed = TRUE;
468 if (!remove_all)
469 break;
470
471 call = next;
472 }
473 else
474 call = TC(queue_next(qe(call)));
475 }
476
477 return (call_removed);
478}
479
480/*
481 * Routine: thread_call_func [public]
482 *
483 * Purpose: Schedule a function callout.
484 * Guarantees { function, argument }
485 * uniqueness if unique_call is TRUE.
486 *
487 * Preconditions: Callable from an interrupt context
488 * below splsched.
489 *
490 * Postconditions: None.
491 */
492
493void
494thread_call_func(
495 thread_call_func_t func,
496 thread_call_param_t param,
497 boolean_t unique_call
498)
499{
500 thread_call_t call;
55e303ae 501 spl_t s;
1c79356b 502
1c79356b
A
503 s = splsched();
504 simple_lock(&thread_call_lock);
505
55e303ae 506 call = TC(queue_first(&thread_call_pending_queue));
1c79356b 507
55e303ae 508 while (unique_call && !queue_end(&thread_call_pending_queue, qe(call))) {
1c79356b
A
509 if ( call->func == func &&
510 call->param0 == param ) {
511 break;
512 }
513
514 call = TC(queue_next(qe(call)));
515 }
516
55e303ae 517 if (!unique_call || queue_end(&thread_call_pending_queue, qe(call))) {
1c79356b
A
518 call = _internal_call_allocate();
519 call->func = func;
520 call->param0 = param;
521 call->param1 = 0;
522
523 _pending_call_enqueue(call);
524
55e303ae 525 if (thread_call_vars.active_num <= 0)
9bccf70c 526 _call_thread_wake();
1c79356b
A
527 }
528
529 simple_unlock(&thread_call_lock);
530 splx(s);
531}
532
533/*
534 * Routine: thread_call_func_delayed [public]
535 *
536 * Purpose: Schedule a function callout to
537 * occur at the stated time.
538 *
539 * Preconditions: Callable from an interrupt context
540 * below splsched.
541 *
542 * Postconditions: None.
543 */
544
545void
546thread_call_func_delayed(
547 thread_call_func_t func,
548 thread_call_param_t param,
0b4e3aa0 549 uint64_t deadline
1c79356b
A
550)
551{
552 thread_call_t call;
55e303ae 553 spl_t s;
1c79356b 554
1c79356b
A
555 s = splsched();
556 simple_lock(&thread_call_lock);
557
558 call = _internal_call_allocate();
559 call->func = func;
560 call->param0 = param;
561 call->param1 = 0;
562 call->deadline = deadline;
563
564 _delayed_call_enqueue(call);
565
55e303ae 566 if (queue_first(&thread_call_delayed_queue) == qe(call))
1c79356b
A
567 _set_delayed_call_timer(call);
568
569 simple_unlock(&thread_call_lock);
570 splx(s);
571}
572
573/*
574 * Routine: thread_call_func_cancel [public]
575 *
576 * Purpose: Unschedule a function callout.
577 * Removes one (or all)
578 * { function, argument }
579 * instance(s) from either (or both)
580 * the pending and the delayed queue,
581 * in that order. Returns a boolean
582 * indicating whether any calls were
583 * cancelled.
584 *
585 * Preconditions: Callable from an interrupt context
586 * below splsched.
587 *
588 * Postconditions: None.
589 */
590
591boolean_t
592thread_call_func_cancel(
593 thread_call_func_t func,
594 thread_call_param_t param,
595 boolean_t cancel_all
596)
597{
598 boolean_t result;
55e303ae 599 spl_t s;
1c79356b
A
600
601 s = splsched();
602 simple_lock(&thread_call_lock);
603
604 if (cancel_all)
605 result = _remove_from_pending_queue(func, param, cancel_all) |
606 _remove_from_delayed_queue(func, param, cancel_all);
607 else
608 result = _remove_from_pending_queue(func, param, cancel_all) ||
609 _remove_from_delayed_queue(func, param, cancel_all);
610
611 simple_unlock(&thread_call_lock);
612 splx(s);
613
614 return (result);
615}
616
617/*
618 * Routine: thread_call_allocate [public]
619 *
620 * Purpose: Allocate an external callout
621 * entry.
622 *
623 * Preconditions: None.
624 *
625 * Postconditions: None.
626 */
627
628thread_call_t
629thread_call_allocate(
630 thread_call_func_t func,
631 thread_call_param_t param0
632)
633{
634 thread_call_t call = (void *)kalloc(sizeof (thread_call_data_t));
635
636 call->func = func;
637 call->param0 = param0;
638 call->state = IDLE;
639
640 return (call);
641}
642
643/*
644 * Routine: thread_call_free [public]
645 *
646 * Purpose: Free an external callout
647 * entry.
648 *
649 * Preconditions: None.
650 *
651 * Postconditions: None.
652 */
653
654boolean_t
655thread_call_free(
656 thread_call_t call
657)
658{
55e303ae 659 spl_t s;
1c79356b
A
660
661 s = splsched();
662 simple_lock(&thread_call_lock);
663
664 if (call->state != IDLE) {
665 simple_unlock(&thread_call_lock);
666 splx(s);
667
668 return (FALSE);
669 }
670
671 simple_unlock(&thread_call_lock);
672 splx(s);
673
91447636 674 kfree(call, sizeof (thread_call_data_t));
1c79356b
A
675
676 return (TRUE);
677}
678
679/*
680 * Routine: thread_call_enter [public]
681 *
682 * Purpose: Schedule an external callout
683 * entry to occur "soon". Returns a
684 * boolean indicating whether the call
685 * had been already scheduled.
686 *
687 * Preconditions: Callable from an interrupt context
688 * below splsched.
689 *
690 * Postconditions: None.
691 */
692
693boolean_t
694thread_call_enter(
695 thread_call_t call
696)
697{
698 boolean_t result = TRUE;
55e303ae 699 spl_t s;
1c79356b
A
700
701 s = splsched();
702 simple_lock(&thread_call_lock);
703
704 if (call->state != PENDING) {
705 if (call->state == DELAYED)
706 _delayed_call_dequeue(call);
707 else if (call->state == IDLE)
708 result = FALSE;
709
710 _pending_call_enqueue(call);
9bccf70c 711
55e303ae 712 if (thread_call_vars.active_num <= 0)
9bccf70c 713 _call_thread_wake();
1c79356b
A
714 }
715
716 call->param1 = 0;
717
718 simple_unlock(&thread_call_lock);
719 splx(s);
720
721 return (result);
722}
723
724boolean_t
725thread_call_enter1(
726 thread_call_t call,
727 thread_call_param_t param1
728)
729{
730 boolean_t result = TRUE;
55e303ae 731 spl_t s;
1c79356b
A
732
733 s = splsched();
734 simple_lock(&thread_call_lock);
735
736 if (call->state != PENDING) {
737 if (call->state == DELAYED)
738 _delayed_call_dequeue(call);
739 else if (call->state == IDLE)
740 result = FALSE;
741
742 _pending_call_enqueue(call);
743
55e303ae 744 if (thread_call_vars.active_num <= 0)
9bccf70c 745 _call_thread_wake();
1c79356b
A
746 }
747
748 call->param1 = param1;
749
750 simple_unlock(&thread_call_lock);
751 splx(s);
752
753 return (result);
754}
755
756/*
757 * Routine: thread_call_enter_delayed [public]
758 *
759 * Purpose: Schedule an external callout
760 * entry to occur at the stated time.
761 * Returns a boolean indicating whether
762 * the call had been already scheduled.
763 *
764 * Preconditions: Callable from an interrupt context
765 * below splsched.
766 *
767 * Postconditions: None.
768 */
769
770boolean_t
771thread_call_enter_delayed(
772 thread_call_t call,
0b4e3aa0 773 uint64_t deadline
1c79356b
A
774)
775{
776 boolean_t result = TRUE;
55e303ae 777 spl_t s;
1c79356b
A
778
779 s = splsched();
780 simple_lock(&thread_call_lock);
781
782 if (call->state == PENDING)
783 _pending_call_dequeue(call);
784 else if (call->state == DELAYED)
785 _delayed_call_dequeue(call);
786 else if (call->state == IDLE)
787 result = FALSE;
788
789 call->param1 = 0;
790 call->deadline = deadline;
791
792 _delayed_call_enqueue(call);
793
55e303ae 794 if (queue_first(&thread_call_delayed_queue) == qe(call))
1c79356b
A
795 _set_delayed_call_timer(call);
796
797 simple_unlock(&thread_call_lock);
798 splx(s);
799
800 return (result);
801}
802
803boolean_t
804thread_call_enter1_delayed(
805 thread_call_t call,
806 thread_call_param_t param1,
0b4e3aa0 807 uint64_t deadline
1c79356b
A
808)
809{
810 boolean_t result = TRUE;
55e303ae 811 spl_t s;
1c79356b
A
812
813 s = splsched();
814 simple_lock(&thread_call_lock);
815
816 if (call->state == PENDING)
817 _pending_call_dequeue(call);
818 else if (call->state == DELAYED)
819 _delayed_call_dequeue(call);
820 else if (call->state == IDLE)
821 result = FALSE;
822
823 call->param1 = param1;
824 call->deadline = deadline;
825
826 _delayed_call_enqueue(call);
827
55e303ae 828 if (queue_first(&thread_call_delayed_queue) == qe(call))
1c79356b
A
829 _set_delayed_call_timer(call);
830
831 simple_unlock(&thread_call_lock);
832 splx(s);
833
834 return (result);
835}
836
837/*
838 * Routine: thread_call_cancel [public]
839 *
840 * Purpose: Unschedule a callout entry.
841 * Returns a boolean indicating
842 * whether the call had actually
843 * been scheduled.
844 *
845 * Preconditions: Callable from an interrupt context
846 * below splsched.
847 *
848 * Postconditions: None.
849 */
850
851boolean_t
852thread_call_cancel(
853 thread_call_t call
854)
855{
856 boolean_t result = TRUE;
55e303ae 857 spl_t s;
1c79356b
A
858
859 s = splsched();
860 simple_lock(&thread_call_lock);
861
862 if (call->state == PENDING)
863 _pending_call_dequeue(call);
864 else if (call->state == DELAYED)
865 _delayed_call_dequeue(call);
866 else
867 result = FALSE;
868
869 simple_unlock(&thread_call_lock);
870 splx(s);
871
872 return (result);
873}
874
875/*
876 * Routine: thread_call_is_delayed [public]
877 *
878 * Purpose: Returns a boolean indicating
879 * whether a call is currently scheduled
880 * to occur at a later time. Optionally
881 * returns the expiration time.
882 *
883 * Preconditions: Callable from an interrupt context
884 * below splsched.
885 *
886 * Postconditions: None.
887 */
888
889boolean_t
890thread_call_is_delayed(
891 thread_call_t call,
0b4e3aa0 892 uint64_t *deadline)
1c79356b
A
893{
894 boolean_t result = FALSE;
55e303ae 895 spl_t s;
1c79356b
A
896
897 s = splsched();
898 simple_lock(&thread_call_lock);
899
900 if (call->state == DELAYED) {
901 if (deadline != NULL)
902 *deadline = call->deadline;
903 result = TRUE;
904 }
905
906 simple_unlock(&thread_call_lock);
907 splx(s);
908
909 return (result);
910}
911
912/*
9bccf70c 913 * Routine: _call_thread_wake [private, inline]
1c79356b
A
914 *
915 * Purpose: Wake a callout thread to service
9bccf70c
A
916 * pending callout entries. May wake
917 * the activate thread in order to
1c79356b
A
918 * create additional callout threads.
919 *
920 * Preconditions: thread_call_lock held.
921 *
922 * Postconditions: None.
923 */
924
925static __inline__
926void
927_call_thread_wake(void)
928{
91447636 929 if (wait_queue_wakeup_one(&call_thread_waitqueue, NULL, THREAD_AWAKENED) == KERN_SUCCESS) {
55e303ae 930 thread_call_vars.idle_thread_num--;
9bccf70c 931
55e303ae
A
932 if (++thread_call_vars.active_num > thread_call_vars.active_hiwat)
933 thread_call_vars.active_hiwat = thread_call_vars.active_num;
1c79356b
A
934 }
935 else
9bccf70c 936 if (!activate_thread_awake) {
55e303ae 937 thread_wakeup_one(&activate_thread_awake);
1c79356b
A
938 activate_thread_awake = TRUE;
939 }
940}
941
9bccf70c
A
942/*
943 * Routine: call_thread_block [private]
944 *
945 * Purpose: Hook via thread dispatch on
946 * the occasion of a callout blocking.
947 *
948 * Preconditions: splsched.
949 *
950 * Postconditions: None.
951 */
952
953void
954call_thread_block(void)
955{
956 simple_lock(&thread_call_lock);
957
55e303ae
A
958 if (--thread_call_vars.active_num < thread_call_vars.active_lowat)
959 thread_call_vars.active_lowat = thread_call_vars.active_num;
9bccf70c 960
55e303ae
A
961 if ( thread_call_vars.active_num <= 0 &&
962 thread_call_vars.pending_num > 0 )
9bccf70c
A
963 _call_thread_wake();
964
965 simple_unlock(&thread_call_lock);
966}
967
968/*
969 * Routine: call_thread_unblock [private]
970 *
971 * Purpose: Hook via thread wakeup on
972 * the occasion of a callout unblocking.
973 *
974 * Preconditions: splsched.
975 *
976 * Postconditions: None.
977 */
978
979void
980call_thread_unblock(void)
981{
982 simple_lock(&thread_call_lock);
983
55e303ae
A
984 if (++thread_call_vars.active_num > thread_call_vars.active_hiwat)
985 thread_call_vars.active_hiwat = thread_call_vars.active_num;
9bccf70c
A
986
987 simple_unlock(&thread_call_lock);
988}
1c79356b
A
989
990/*
991 * Routine: _call_thread [private]
992 *
993 * Purpose: Executed by a callout thread.
994 *
995 * Preconditions: None.
996 *
997 * Postconditions: None.
998 */
999
1000static
1001void
1002_call_thread_continue(void)
1003{
1004 thread_t self = current_thread();
1005
1c79356b
A
1006 (void) splsched();
1007 simple_lock(&thread_call_lock);
1008
91447636 1009 self->options |= TH_OPT_CALLOUT;
9bccf70c 1010
55e303ae 1011 while (thread_call_vars.pending_num > 0) {
1c79356b
A
1012 thread_call_t call;
1013 thread_call_func_t func;
1014 thread_call_param_t param0, param1;
1015
55e303ae
A
1016 call = TC(dequeue_head(&thread_call_pending_queue));
1017 thread_call_vars.pending_num--;
1c79356b
A
1018
1019 func = call->func;
1020 param0 = call->param0;
1021 param1 = call->param1;
1022
1023 call->state = IDLE;
1024
1025 _internal_call_release(call);
1026
1c79356b
A
1027 simple_unlock(&thread_call_lock);
1028 (void) spllo();
1029
55e303ae
A
1030 KERNEL_DEBUG_CONSTANT(
1031 MACHDBG_CODE(DBG_MACH_SCHED,MACH_CALLOUT) | DBG_FUNC_NONE,
1032 (int)func, (int)param0, (int)param1, 0, 0);
1033
1c79356b
A
1034 (*func)(param0, param1);
1035
1036 (void)thread_funnel_set(self->funnel_lock, FALSE);
1037
1038 (void) splsched();
1039 simple_lock(&thread_call_lock);
1c79356b 1040 }
9bccf70c 1041
91447636 1042 self->options &= ~TH_OPT_CALLOUT;
9bccf70c 1043
55e303ae
A
1044 if (--thread_call_vars.active_num < thread_call_vars.active_lowat)
1045 thread_call_vars.active_lowat = thread_call_vars.active_num;
1c79356b 1046
55e303ae
A
1047 if (thread_call_vars.idle_thread_num < thread_call_vars.thread_lowat) {
1048 thread_call_vars.idle_thread_num++;
1c79356b 1049
91447636 1050 wait_queue_assert_wait(&call_thread_waitqueue, NULL, THREAD_UNINT, 0);
1c79356b
A
1051
1052 simple_unlock(&thread_call_lock);
1053 (void) spllo();
1054
91447636 1055 thread_block((thread_continue_t)_call_thread_continue);
1c79356b
A
1056 /* NOTREACHED */
1057 }
1058
55e303ae 1059 thread_call_vars.thread_num--;
1c79356b
A
1060
1061 simple_unlock(&thread_call_lock);
1062 (void) spllo();
1063
91447636 1064 thread_terminate(self);
1c79356b
A
1065 /* NOTREACHED */
1066}
1067
1068static
1069void
1070_call_thread(void)
1071{
1c79356b
A
1072 _call_thread_continue();
1073 /* NOTREACHED */
1074}
1075
1076/*
1077 * Routine: _activate_thread [private]
1078 *
1079 * Purpose: Executed by the activate thread.
1080 *
1081 * Preconditions: None.
1082 *
1083 * Postconditions: Never terminates.
1084 */
1085
1086static
1087void
1088_activate_thread_continue(void)
1089{
91447636
A
1090 kern_return_t result;
1091 thread_t thread;
1092
1c79356b
A
1093 (void) splsched();
1094 simple_lock(&thread_call_lock);
1095
55e303ae
A
1096 while ( thread_call_vars.active_num <= 0 &&
1097 thread_call_vars.pending_num > 0 ) {
9bccf70c 1098
55e303ae
A
1099 if (++thread_call_vars.active_num > thread_call_vars.active_hiwat)
1100 thread_call_vars.active_hiwat = thread_call_vars.active_num;
1c79356b 1101
55e303ae
A
1102 if (++thread_call_vars.thread_num > thread_call_vars.thread_hiwat)
1103 thread_call_vars.thread_hiwat = thread_call_vars.thread_num;
1c79356b
A
1104
1105 simple_unlock(&thread_call_lock);
1106 (void) spllo();
1107
91447636
A
1108 result = kernel_thread_start_priority((thread_continue_t)_call_thread, NULL, MAXPRI_KERNEL - 1, &thread);
1109 if (result != KERN_SUCCESS)
1110 panic("activate_thread");
1111
1112 thread_deallocate(thread);
55e303ae 1113
9bccf70c
A
1114 (void) splsched();
1115 simple_lock(&thread_call_lock);
1c79356b 1116 }
1c79356b
A
1117
1118 assert_wait(&activate_thread_awake, THREAD_INTERRUPTIBLE);
1119 activate_thread_awake = FALSE;
1120
1121 simple_unlock(&thread_call_lock);
1122 (void) spllo();
1123
91447636 1124 thread_block((thread_continue_t)_activate_thread_continue);
1c79356b
A
1125 /* NOTREACHED */
1126}
1127
1128static
1129void
1130_activate_thread(void)
1131{
55e303ae 1132 thread_t self = current_thread();
1c79356b 1133
91447636 1134 self->options |= TH_OPT_VMPRIV;
1c79356b 1135 vm_page_free_reserve(2); /* XXX */
1c79356b
A
1136
1137 _activate_thread_continue();
1138 /* NOTREACHED */
1139}
1140
1141static
1142void
1143_delayed_call_timer(
91447636
A
1144 __unused timer_call_param_t p0,
1145 __unused timer_call_param_t p1
1c79356b
A
1146)
1147{
0b4e3aa0 1148 uint64_t timestamp;
1c79356b
A
1149 thread_call_t call;
1150 boolean_t new_pending = FALSE;
55e303ae 1151 spl_t s;
1c79356b
A
1152
1153 s = splsched();
1154 simple_lock(&thread_call_lock);
1155
1156 clock_get_uptime(&timestamp);
1157
55e303ae 1158 call = TC(queue_first(&thread_call_delayed_queue));
1c79356b 1159
55e303ae 1160 while (!queue_end(&thread_call_delayed_queue, qe(call))) {
0b4e3aa0 1161 if (call->deadline <= timestamp) {
1c79356b
A
1162 _delayed_call_dequeue(call);
1163
1164 _pending_call_enqueue(call);
1165 new_pending = TRUE;
1166 }
1167 else
1168 break;
1169
55e303ae 1170 call = TC(queue_first(&thread_call_delayed_queue));
1c79356b
A
1171 }
1172
55e303ae 1173 if (!queue_end(&thread_call_delayed_queue, qe(call)))
1c79356b
A
1174 _set_delayed_call_timer(call);
1175
55e303ae 1176 if (new_pending && thread_call_vars.active_num <= 0)
1c79356b
A
1177 _call_thread_wake();
1178
1179 simple_unlock(&thread_call_lock);
1180 splx(s);
1181}