]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/thread_call.c
xnu-1504.9.26.tar.gz
[apple/xnu.git] / osfmk / kern / thread_call.c
1 /*
2 * Copyright (c) 1993-1995, 1999-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/thread_act.h>
31
32 #include <kern/kern_types.h>
33 #include <kern/zalloc.h>
34 #include <kern/sched_prim.h>
35 #include <kern/clock.h>
36 #include <kern/task.h>
37 #include <kern/thread.h>
38 #include <kern/wait_queue.h>
39
40 #include <vm/vm_pageout.h>
41
42 #include <kern/thread_call.h>
43 #include <kern/call_entry.h>
44
45 #include <kern/timer_call.h>
46
47 #include <sys/kdebug.h>
48
49 decl_simple_lock_data(static,thread_call_lock)
50
51 static zone_t thread_call_zone;
52
53 struct thread_call_group {
54 queue_head_t pending_queue;
55 uint32_t pending_count;
56
57 queue_head_t delayed_queue;
58
59 timer_call_data_t delayed_timer;
60
61 struct wait_queue idle_wqueue;
62 struct wait_queue daemon_wqueue;
63 uint32_t idle_count, active_count;
64 };
65
66 typedef struct thread_call_group *thread_call_group_t;
67
68 static struct thread_call_group thread_call_group0;
69
70 static boolean_t thread_call_daemon_awake;
71
72 #define thread_call_thread_min 4
73
74 #define internal_call_count 768
75
76 static thread_call_data_t internal_call_storage[internal_call_count];
77 static queue_head_t thread_call_internal_queue;
78
79 static __inline__ thread_call_t _internal_call_allocate(void);
80
81 static __inline__ void _internal_call_release(
82 thread_call_t call);
83
84 static __inline__ boolean_t _pending_call_enqueue(
85 thread_call_t call,
86 thread_call_group_t group),
87 _delayed_call_enqueue(
88 thread_call_t call,
89 thread_call_group_t group,
90 uint64_t deadline),
91 _call_dequeue(
92 thread_call_t call,
93 thread_call_group_t group);
94
95 static __inline__ void thread_call_wake(
96 thread_call_group_t group);
97
98 static __inline__ void _set_delayed_call_timer(
99 thread_call_t call,
100 thread_call_group_t group);
101
102 static boolean_t _remove_from_pending_queue(
103 thread_call_func_t func,
104 thread_call_param_t param0,
105 boolean_t remove_all),
106 _remove_from_delayed_queue(
107 thread_call_func_t func,
108 thread_call_param_t param0,
109 boolean_t remove_all);
110
111 static void thread_call_daemon(
112 thread_call_group_t group),
113 thread_call_thread(
114 thread_call_group_t group);
115
116 static void thread_call_delayed_timer(
117 timer_call_param_t p0,
118 timer_call_param_t p1);
119
120 #define qe(x) ((queue_entry_t)(x))
121 #define TC(x) ((thread_call_t)(x))
122
123 /*
124 * thread_call_initialize:
125 *
126 * Initialize this module, called
127 * early during system initialization.
128 */
129 void
130 thread_call_initialize(void)
131 {
132 thread_call_t call;
133 thread_call_group_t group = &thread_call_group0;
134 kern_return_t result;
135 thread_t thread;
136 int i;
137 spl_t s;
138
139 i = sizeof (thread_call_data_t);
140 thread_call_zone = zinit(i, 4096 * i, 16 * i, "thread_call");
141 zone_change(thread_call_zone, Z_NOENCRYPT, TRUE);
142
143 simple_lock_init(&thread_call_lock, 0);
144
145 s = splsched();
146 simple_lock(&thread_call_lock);
147
148 queue_init(&group->pending_queue);
149 queue_init(&group->delayed_queue);
150
151 timer_call_setup(&group->delayed_timer, thread_call_delayed_timer, group);
152
153 wait_queue_init(&group->idle_wqueue, SYNC_POLICY_FIFO);
154 wait_queue_init(&group->daemon_wqueue, SYNC_POLICY_FIFO);
155
156 queue_init(&thread_call_internal_queue);
157 for (
158 call = internal_call_storage;
159 call < &internal_call_storage[internal_call_count];
160 call++) {
161
162 enqueue_tail(&thread_call_internal_queue, qe(call));
163 }
164
165 thread_call_daemon_awake = TRUE;
166
167 simple_unlock(&thread_call_lock);
168 splx(s);
169
170 result = kernel_thread_start_priority((thread_continue_t)thread_call_daemon, group, BASEPRI_PREEMPT + 1, &thread);
171 if (result != KERN_SUCCESS)
172 panic("thread_call_initialize");
173
174 thread_deallocate(thread);
175 }
176
177 void
178 thread_call_setup(
179 thread_call_t call,
180 thread_call_func_t func,
181 thread_call_param_t param0)
182 {
183 call_entry_setup(call, func, param0);
184 }
185
186 /*
187 * _internal_call_allocate:
188 *
189 * Allocate an internal callout entry.
190 *
191 * Called with thread_call_lock held.
192 */
193 static __inline__ thread_call_t
194 _internal_call_allocate(void)
195 {
196 thread_call_t call;
197
198 if (queue_empty(&thread_call_internal_queue))
199 panic("_internal_call_allocate");
200
201 call = TC(dequeue_head(&thread_call_internal_queue));
202
203 return (call);
204 }
205
206 /*
207 * _internal_call_release:
208 *
209 * Release an internal callout entry which
210 * is no longer pending (or delayed).
211 *
212 * Called with thread_call_lock held.
213 */
214 static __inline__ void
215 _internal_call_release(
216 thread_call_t call)
217 {
218 if ( call >= internal_call_storage &&
219 call < &internal_call_storage[internal_call_count] )
220 enqueue_head(&thread_call_internal_queue, qe(call));
221 }
222
223 /*
224 * _pending_call_enqueue:
225 *
226 * Place an entry at the end of the
227 * pending queue, to be executed soon.
228 *
229 * Returns TRUE if the entry was already
230 * on a queue.
231 *
232 * Called with thread_call_lock held.
233 */
234 static __inline__ boolean_t
235 _pending_call_enqueue(
236 thread_call_t call,
237 thread_call_group_t group)
238 {
239 queue_t old_queue;
240
241 old_queue = call_entry_enqueue_tail(call, &group->pending_queue);
242
243 group->pending_count++;
244
245 return (old_queue != NULL);
246 }
247
248 /*
249 * _delayed_call_enqueue:
250 *
251 * Place an entry on the delayed queue,
252 * after existing entries with an earlier
253 * (or identical) deadline.
254 *
255 * Returns TRUE if the entry was already
256 * on a queue.
257 *
258 * Called with thread_call_lock held.
259 */
260 static __inline__ boolean_t
261 _delayed_call_enqueue(
262 thread_call_t call,
263 thread_call_group_t group,
264 uint64_t deadline)
265 {
266 queue_t old_queue;
267
268 old_queue = call_entry_enqueue_deadline(call, &group->delayed_queue, deadline);
269
270 if (old_queue == &group->pending_queue)
271 group->pending_count--;
272
273 return (old_queue != NULL);
274 }
275
276 /*
277 * _call_dequeue:
278 *
279 * Remove an entry from a queue.
280 *
281 * Returns TRUE if the entry was on a queue.
282 *
283 * Called with thread_call_lock held.
284 */
285 static __inline__ boolean_t
286 _call_dequeue(
287 thread_call_t call,
288 thread_call_group_t group)
289 {
290 queue_t old_queue;
291
292 old_queue = call_entry_dequeue(call);
293
294 if (old_queue == &group->pending_queue)
295 group->pending_count--;
296
297 return (old_queue != NULL);
298 }
299
300 /*
301 * _set_delayed_call_timer:
302 *
303 * Reset the timer so that it
304 * next expires when the entry is due.
305 *
306 * Called with thread_call_lock held.
307 */
308 static __inline__ void
309 _set_delayed_call_timer(
310 thread_call_t call,
311 thread_call_group_t group)
312 {
313 timer_call_enter(&group->delayed_timer, call->deadline);
314 }
315
316 /*
317 * _remove_from_pending_queue:
318 *
319 * Remove the first (or all) matching
320 * entries from the pending queue.
321 *
322 * Returns TRUE if any matching entries
323 * were found.
324 *
325 * Called with thread_call_lock held.
326 */
327 static boolean_t
328 _remove_from_pending_queue(
329 thread_call_func_t func,
330 thread_call_param_t param0,
331 boolean_t remove_all)
332 {
333 boolean_t call_removed = FALSE;
334 thread_call_t call;
335 thread_call_group_t group = &thread_call_group0;
336
337 call = TC(queue_first(&group->pending_queue));
338
339 while (!queue_end(&group->pending_queue, qe(call))) {
340 if ( call->func == func &&
341 call->param0 == param0 ) {
342 thread_call_t next = TC(queue_next(qe(call)));
343
344 _call_dequeue(call, group);
345
346 _internal_call_release(call);
347
348 call_removed = TRUE;
349 if (!remove_all)
350 break;
351
352 call = next;
353 }
354 else
355 call = TC(queue_next(qe(call)));
356 }
357
358 return (call_removed);
359 }
360
361 /*
362 * _remove_from_delayed_queue:
363 *
364 * Remove the first (or all) matching
365 * entries from the delayed queue.
366 *
367 * Returns TRUE if any matching entries
368 * were found.
369 *
370 * Called with thread_call_lock held.
371 */
372 static boolean_t
373 _remove_from_delayed_queue(
374 thread_call_func_t func,
375 thread_call_param_t param0,
376 boolean_t remove_all)
377 {
378 boolean_t call_removed = FALSE;
379 thread_call_t call;
380 thread_call_group_t group = &thread_call_group0;
381
382 call = TC(queue_first(&group->delayed_queue));
383
384 while (!queue_end(&group->delayed_queue, qe(call))) {
385 if ( call->func == func &&
386 call->param0 == param0 ) {
387 thread_call_t next = TC(queue_next(qe(call)));
388
389 _call_dequeue(call, group);
390
391 _internal_call_release(call);
392
393 call_removed = TRUE;
394 if (!remove_all)
395 break;
396
397 call = next;
398 }
399 else
400 call = TC(queue_next(qe(call)));
401 }
402
403 return (call_removed);
404 }
405
406 #ifndef __LP64__
407
408 /*
409 * thread_call_func:
410 *
411 * Enqueue a function callout.
412 *
413 * Guarantees { function, argument }
414 * uniqueness if unique_call is TRUE.
415 */
416 void
417 thread_call_func(
418 thread_call_func_t func,
419 thread_call_param_t param,
420 boolean_t unique_call)
421 {
422 thread_call_t call;
423 thread_call_group_t group = &thread_call_group0;
424 spl_t s;
425
426 s = splsched();
427 simple_lock(&thread_call_lock);
428
429 call = TC(queue_first(&group->pending_queue));
430
431 while (unique_call && !queue_end(&group->pending_queue, qe(call))) {
432 if ( call->func == func &&
433 call->param0 == param ) {
434 break;
435 }
436
437 call = TC(queue_next(qe(call)));
438 }
439
440 if (!unique_call || queue_end(&group->pending_queue, qe(call))) {
441 call = _internal_call_allocate();
442 call->func = func;
443 call->param0 = param;
444 call->param1 = NULL;
445
446 _pending_call_enqueue(call, group);
447
448 if (group->active_count == 0)
449 thread_call_wake(group);
450 }
451
452 simple_unlock(&thread_call_lock);
453 splx(s);
454 }
455
456 #endif /* __LP64__ */
457
458 /*
459 * thread_call_func_delayed:
460 *
461 * Enqueue a function callout to
462 * occur at the stated time.
463 */
464 void
465 thread_call_func_delayed(
466 thread_call_func_t func,
467 thread_call_param_t param,
468 uint64_t deadline)
469 {
470 thread_call_t call;
471 thread_call_group_t group = &thread_call_group0;
472 spl_t s;
473
474 s = splsched();
475 simple_lock(&thread_call_lock);
476
477 call = _internal_call_allocate();
478 call->func = func;
479 call->param0 = param;
480 call->param1 = 0;
481
482 _delayed_call_enqueue(call, group, deadline);
483
484 if (queue_first(&group->delayed_queue) == qe(call))
485 _set_delayed_call_timer(call, group);
486
487 simple_unlock(&thread_call_lock);
488 splx(s);
489 }
490
491 /*
492 * thread_call_func_cancel:
493 *
494 * Dequeue a function callout.
495 *
496 * Removes one (or all) { function, argument }
497 * instance(s) from either (or both)
498 * the pending and the delayed queue,
499 * in that order.
500 *
501 * Returns TRUE if any calls were cancelled.
502 */
503 boolean_t
504 thread_call_func_cancel(
505 thread_call_func_t func,
506 thread_call_param_t param,
507 boolean_t cancel_all)
508 {
509 boolean_t result;
510 spl_t s;
511
512 s = splsched();
513 simple_lock(&thread_call_lock);
514
515 if (cancel_all)
516 result = _remove_from_pending_queue(func, param, cancel_all) |
517 _remove_from_delayed_queue(func, param, cancel_all);
518 else
519 result = _remove_from_pending_queue(func, param, cancel_all) ||
520 _remove_from_delayed_queue(func, param, cancel_all);
521
522 simple_unlock(&thread_call_lock);
523 splx(s);
524
525 return (result);
526 }
527
528 /*
529 * thread_call_allocate:
530 *
531 * Allocate a callout entry.
532 */
533 thread_call_t
534 thread_call_allocate(
535 thread_call_func_t func,
536 thread_call_param_t param0)
537 {
538 thread_call_t call = zalloc(thread_call_zone);
539
540 call_entry_setup(call, func, param0);
541
542 return (call);
543 }
544
545 /*
546 * thread_call_free:
547 *
548 * Free a callout entry.
549 */
550 boolean_t
551 thread_call_free(
552 thread_call_t call)
553 {
554 spl_t s;
555
556 s = splsched();
557 simple_lock(&thread_call_lock);
558
559 if (call->queue != NULL) {
560 simple_unlock(&thread_call_lock);
561 splx(s);
562
563 return (FALSE);
564 }
565
566 simple_unlock(&thread_call_lock);
567 splx(s);
568
569 zfree(thread_call_zone, call);
570
571 return (TRUE);
572 }
573
574 /*
575 * thread_call_enter:
576 *
577 * Enqueue a callout entry to occur "soon".
578 *
579 * Returns TRUE if the call was
580 * already on a queue.
581 */
582 boolean_t
583 thread_call_enter(
584 thread_call_t call)
585 {
586 boolean_t result = TRUE;
587 thread_call_group_t group = &thread_call_group0;
588 spl_t s;
589
590 s = splsched();
591 simple_lock(&thread_call_lock);
592
593 if (call->queue != &group->pending_queue) {
594 result = _pending_call_enqueue(call, group);
595
596 if (group->active_count == 0)
597 thread_call_wake(group);
598 }
599
600 call->param1 = 0;
601
602 simple_unlock(&thread_call_lock);
603 splx(s);
604
605 return (result);
606 }
607
608 boolean_t
609 thread_call_enter1(
610 thread_call_t call,
611 thread_call_param_t param1)
612 {
613 boolean_t result = TRUE;
614 thread_call_group_t group = &thread_call_group0;
615 spl_t s;
616
617 s = splsched();
618 simple_lock(&thread_call_lock);
619
620 if (call->queue != &group->pending_queue) {
621 result = _pending_call_enqueue(call, group);
622
623 if (group->active_count == 0)
624 thread_call_wake(group);
625 }
626
627 call->param1 = param1;
628
629 simple_unlock(&thread_call_lock);
630 splx(s);
631
632 return (result);
633 }
634
635 /*
636 * thread_call_enter_delayed:
637 *
638 * Enqueue a callout entry to occur
639 * at the stated time.
640 *
641 * Returns TRUE if the call was
642 * already on a queue.
643 */
644 boolean_t
645 thread_call_enter_delayed(
646 thread_call_t call,
647 uint64_t deadline)
648 {
649 boolean_t result = TRUE;
650 thread_call_group_t group = &thread_call_group0;
651 spl_t s;
652
653 s = splsched();
654 simple_lock(&thread_call_lock);
655
656 result = _delayed_call_enqueue(call, group, deadline);
657
658 if (queue_first(&group->delayed_queue) == qe(call))
659 _set_delayed_call_timer(call, group);
660
661 call->param1 = 0;
662
663 simple_unlock(&thread_call_lock);
664 splx(s);
665
666 return (result);
667 }
668
669 boolean_t
670 thread_call_enter1_delayed(
671 thread_call_t call,
672 thread_call_param_t param1,
673 uint64_t deadline)
674 {
675 boolean_t result = TRUE;
676 thread_call_group_t group = &thread_call_group0;
677 spl_t s;
678
679 s = splsched();
680 simple_lock(&thread_call_lock);
681
682 result = _delayed_call_enqueue(call, group, deadline);
683
684 if (queue_first(&group->delayed_queue) == qe(call))
685 _set_delayed_call_timer(call, group);
686
687 call->param1 = param1;
688
689 simple_unlock(&thread_call_lock);
690 splx(s);
691
692 return (result);
693 }
694
695 /*
696 * thread_call_cancel:
697 *
698 * Dequeue a callout entry.
699 *
700 * Returns TRUE if the call was
701 * on a queue.
702 */
703 boolean_t
704 thread_call_cancel(
705 thread_call_t call)
706 {
707 boolean_t result;
708 thread_call_group_t group = &thread_call_group0;
709 spl_t s;
710
711 s = splsched();
712 simple_lock(&thread_call_lock);
713
714 result = _call_dequeue(call, group);
715
716 simple_unlock(&thread_call_lock);
717 splx(s);
718
719 return (result);
720 }
721
722 #ifndef __LP64__
723
724 /*
725 * thread_call_is_delayed:
726 *
727 * Returns TRUE if the call is
728 * currently on a delayed queue.
729 *
730 * Optionally returns the expiration time.
731 */
732 boolean_t
733 thread_call_is_delayed(
734 thread_call_t call,
735 uint64_t *deadline)
736 {
737 boolean_t result = FALSE;
738 thread_call_group_t group = &thread_call_group0;
739 spl_t s;
740
741 s = splsched();
742 simple_lock(&thread_call_lock);
743
744 if (call->queue == &group->delayed_queue) {
745 if (deadline != NULL)
746 *deadline = call->deadline;
747 result = TRUE;
748 }
749
750 simple_unlock(&thread_call_lock);
751 splx(s);
752
753 return (result);
754 }
755
756 #endif /* __LP64__ */
757
758 /*
759 * thread_call_wake:
760 *
761 * Wake a call thread to service
762 * pending call entries. May wake
763 * the daemon thread in order to
764 * create additional call threads.
765 *
766 * Called with thread_call_lock held.
767 */
768 static __inline__ void
769 thread_call_wake(
770 thread_call_group_t group)
771 {
772 if (group->idle_count > 0 && wait_queue_wakeup_one(&group->idle_wqueue, NULL, THREAD_AWAKENED) == KERN_SUCCESS) {
773 group->idle_count--; group->active_count++;
774 }
775 else
776 if (!thread_call_daemon_awake) {
777 thread_call_daemon_awake = TRUE;
778 wait_queue_wakeup_one(&group->daemon_wqueue, NULL, THREAD_AWAKENED);
779 }
780 }
781
782 /*
783 * sched_call_thread:
784 *
785 * Call out invoked by the scheduler.
786 */
787 static void
788 sched_call_thread(
789 int type,
790 __unused thread_t thread)
791 {
792 thread_call_group_t group = &thread_call_group0;
793
794 simple_lock(&thread_call_lock);
795
796 switch (type) {
797
798 case SCHED_CALL_BLOCK:
799 if (--group->active_count == 0 && group->pending_count > 0)
800 thread_call_wake(group);
801 break;
802
803 case SCHED_CALL_UNBLOCK:
804 group->active_count++;
805 break;
806 }
807
808 simple_unlock(&thread_call_lock);
809 }
810
811 /*
812 * thread_call_thread:
813 */
814 static void
815 thread_call_thread(
816 thread_call_group_t group)
817 {
818 thread_t self = current_thread();
819
820 (void) splsched();
821 simple_lock(&thread_call_lock);
822
823 thread_sched_call(self, sched_call_thread);
824
825 while (group->pending_count > 0) {
826 thread_call_t call;
827 thread_call_func_t func;
828 thread_call_param_t param0, param1;
829
830 call = TC(dequeue_head(&group->pending_queue));
831 group->pending_count--;
832
833 func = call->func;
834 param0 = call->param0;
835 param1 = call->param1;
836
837 call->queue = NULL;
838
839 _internal_call_release(call);
840
841 simple_unlock(&thread_call_lock);
842 (void) spllo();
843
844 KERNEL_DEBUG_CONSTANT(
845 MACHDBG_CODE(DBG_MACH_SCHED,MACH_CALLOUT) | DBG_FUNC_NONE,
846 func, param0, param1, 0, 0);
847
848 (*func)(param0, param1);
849
850 (void)thread_funnel_set(self->funnel_lock, FALSE); /* XXX */
851
852 (void) splsched();
853 simple_lock(&thread_call_lock);
854 }
855
856 thread_sched_call(self, NULL);
857 group->active_count--;
858
859 if (group->idle_count < thread_call_thread_min) {
860 group->idle_count++;
861
862 wait_queue_assert_wait(&group->idle_wqueue, NULL, THREAD_UNINT, 0);
863
864 simple_unlock(&thread_call_lock);
865 (void) spllo();
866
867 thread_block_parameter((thread_continue_t)thread_call_thread, group);
868 /* NOTREACHED */
869 }
870
871 simple_unlock(&thread_call_lock);
872 (void) spllo();
873
874 thread_terminate(self);
875 /* NOTREACHED */
876 }
877
878 /*
879 * thread_call_daemon:
880 */
881 static void
882 thread_call_daemon_continue(
883 thread_call_group_t group)
884 {
885 kern_return_t result;
886 thread_t thread;
887
888 (void) splsched();
889 simple_lock(&thread_call_lock);
890
891 while (group->active_count == 0 && group->pending_count > 0) {
892 group->active_count++;
893
894 simple_unlock(&thread_call_lock);
895 (void) spllo();
896
897 result = kernel_thread_start_priority((thread_continue_t)thread_call_thread, group, BASEPRI_PREEMPT, &thread);
898 if (result != KERN_SUCCESS)
899 panic("thread_call_daemon");
900
901 thread_deallocate(thread);
902
903 (void) splsched();
904 simple_lock(&thread_call_lock);
905 }
906
907 thread_call_daemon_awake = FALSE;
908 wait_queue_assert_wait(&group->daemon_wqueue, NULL, THREAD_UNINT, 0);
909
910 simple_unlock(&thread_call_lock);
911 (void) spllo();
912
913 thread_block_parameter((thread_continue_t)thread_call_daemon_continue, group);
914 /* NOTREACHED */
915 }
916
917 static void
918 thread_call_daemon(
919 thread_call_group_t group)
920 {
921 thread_t self = current_thread();
922
923 self->options |= TH_OPT_VMPRIV;
924 vm_page_free_reserve(2); /* XXX */
925
926 thread_call_daemon_continue(group);
927 /* NOTREACHED */
928 }
929
930 static void
931 thread_call_delayed_timer(
932 timer_call_param_t p0,
933 __unused timer_call_param_t p1
934 )
935 {
936 thread_call_t call;
937 thread_call_group_t group = p0;
938 boolean_t new_pending = FALSE;
939 uint64_t timestamp;
940
941 simple_lock(&thread_call_lock);
942
943 timestamp = mach_absolute_time();
944
945 call = TC(queue_first(&group->delayed_queue));
946
947 while (!queue_end(&group->delayed_queue, qe(call))) {
948 if (call->deadline <= timestamp) {
949 _pending_call_enqueue(call, group);
950 new_pending = TRUE;
951 }
952 else
953 break;
954
955 call = TC(queue_first(&group->delayed_queue));
956 }
957
958 if (!queue_end(&group->delayed_queue, qe(call)))
959 _set_delayed_call_timer(call, group);
960
961 if (new_pending && group->active_count == 0)
962 thread_call_wake(group);
963
964 simple_unlock(&thread_call_lock);
965 }