]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/thread_call.c
92f0b642b9d15503020e79d1846da479ef3be081
[apple/xnu.git] / osfmk / kern / thread_call.c
1 /*
2 * Copyright (c) 1993-1995, 1999-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/thread_act.h>
31
32 #include <kern/kern_types.h>
33 #include <kern/zalloc.h>
34 #include <kern/sched_prim.h>
35 #include <kern/clock.h>
36 #include <kern/task.h>
37 #include <kern/thread.h>
38 #include <kern/wait_queue.h>
39
40 #include <vm/vm_pageout.h>
41
42 #include <kern/thread_call.h>
43 #include <kern/call_entry.h>
44
45 #include <kern/timer_call.h>
46
47 #include <sys/kdebug.h>
48
49 decl_simple_lock_data(static,thread_call_lock)
50
51 static zone_t thread_call_zone;
52
53 struct thread_call_group {
54 queue_head_t pending_queue;
55 uint32_t pending_count;
56
57 queue_head_t delayed_queue;
58
59 timer_call_data_t delayed_timer;
60
61 struct wait_queue idle_wqueue;
62 struct wait_queue daemon_wqueue;
63 uint32_t idle_count, active_count;
64 };
65
66 typedef struct thread_call_group *thread_call_group_t;
67
68 static struct thread_call_group thread_call_group0;
69
70 static boolean_t thread_call_daemon_awake;
71
72 #define thread_call_thread_min 4
73
74 #define internal_call_count 768
75
76 static thread_call_data_t internal_call_storage[internal_call_count];
77 static queue_head_t thread_call_internal_queue;
78
79 static __inline__ thread_call_t _internal_call_allocate(void);
80
81 static __inline__ void _internal_call_release(
82 thread_call_t call);
83
84 static __inline__ boolean_t _pending_call_enqueue(
85 thread_call_t call,
86 thread_call_group_t group),
87 _delayed_call_enqueue(
88 thread_call_t call,
89 thread_call_group_t group,
90 uint64_t deadline),
91 _call_dequeue(
92 thread_call_t call,
93 thread_call_group_t group);
94
95 static __inline__ void thread_call_wake(
96 thread_call_group_t group);
97
98 static __inline__ void _set_delayed_call_timer(
99 thread_call_t call,
100 thread_call_group_t group);
101
102 static boolean_t _remove_from_pending_queue(
103 thread_call_func_t func,
104 thread_call_param_t param0,
105 boolean_t remove_all),
106 _remove_from_delayed_queue(
107 thread_call_func_t func,
108 thread_call_param_t param0,
109 boolean_t remove_all);
110
111 static void thread_call_daemon(
112 thread_call_group_t group),
113 thread_call_thread(
114 thread_call_group_t group);
115
116 static void thread_call_delayed_timer(
117 timer_call_param_t p0,
118 timer_call_param_t p1);
119
120 #define qe(x) ((queue_entry_t)(x))
121 #define TC(x) ((thread_call_t)(x))
122
123 /*
124 * thread_call_initialize:
125 *
126 * Initialize this module, called
127 * early during system initialization.
128 */
129 void
130 thread_call_initialize(void)
131 {
132 thread_call_t call;
133 thread_call_group_t group = &thread_call_group0;
134 kern_return_t result;
135 thread_t thread;
136 int i;
137 spl_t s;
138
139 i = sizeof (thread_call_data_t);
140 thread_call_zone = zinit(i, 4096 * i, 16 * i, "thread_call");
141
142 simple_lock_init(&thread_call_lock, 0);
143
144 s = splsched();
145 simple_lock(&thread_call_lock);
146
147 queue_init(&group->pending_queue);
148 queue_init(&group->delayed_queue);
149
150 timer_call_setup(&group->delayed_timer, thread_call_delayed_timer, group);
151
152 wait_queue_init(&group->idle_wqueue, SYNC_POLICY_FIFO);
153 wait_queue_init(&group->daemon_wqueue, SYNC_POLICY_FIFO);
154
155 queue_init(&thread_call_internal_queue);
156 for (
157 call = internal_call_storage;
158 call < &internal_call_storage[internal_call_count];
159 call++) {
160
161 enqueue_tail(&thread_call_internal_queue, qe(call));
162 }
163
164 thread_call_daemon_awake = TRUE;
165
166 simple_unlock(&thread_call_lock);
167 splx(s);
168
169 result = kernel_thread_start_priority((thread_continue_t)thread_call_daemon, group, BASEPRI_PREEMPT + 1, &thread);
170 if (result != KERN_SUCCESS)
171 panic("thread_call_initialize");
172
173 thread_deallocate(thread);
174 }
175
176 void
177 thread_call_setup(
178 thread_call_t call,
179 thread_call_func_t func,
180 thread_call_param_t param0)
181 {
182 call_entry_setup(call, func, param0);
183 }
184
185 /*
186 * _internal_call_allocate:
187 *
188 * Allocate an internal callout entry.
189 *
190 * Called with thread_call_lock held.
191 */
192 static __inline__ thread_call_t
193 _internal_call_allocate(void)
194 {
195 thread_call_t call;
196
197 if (queue_empty(&thread_call_internal_queue))
198 panic("_internal_call_allocate");
199
200 call = TC(dequeue_head(&thread_call_internal_queue));
201
202 return (call);
203 }
204
205 /*
206 * _internal_call_release:
207 *
208 * Release an internal callout entry which
209 * is no longer pending (or delayed).
210 *
211 * Called with thread_call_lock held.
212 */
213 static __inline__ void
214 _internal_call_release(
215 thread_call_t call)
216 {
217 if ( call >= internal_call_storage &&
218 call < &internal_call_storage[internal_call_count] )
219 enqueue_head(&thread_call_internal_queue, qe(call));
220 }
221
222 /*
223 * _pending_call_enqueue:
224 *
225 * Place an entry at the end of the
226 * pending queue, to be executed soon.
227 *
228 * Returns TRUE if the entry was already
229 * on a queue.
230 *
231 * Called with thread_call_lock held.
232 */
233 static __inline__ boolean_t
234 _pending_call_enqueue(
235 thread_call_t call,
236 thread_call_group_t group)
237 {
238 queue_t old_queue;
239
240 old_queue = call_entry_enqueue_tail(call, &group->pending_queue);
241
242 group->pending_count++;
243
244 return (old_queue != NULL);
245 }
246
247 /*
248 * _delayed_call_enqueue:
249 *
250 * Place an entry on the delayed queue,
251 * after existing entries with an earlier
252 * (or identical) deadline.
253 *
254 * Returns TRUE if the entry was already
255 * on a queue.
256 *
257 * Called with thread_call_lock held.
258 */
259 static __inline__ boolean_t
260 _delayed_call_enqueue(
261 thread_call_t call,
262 thread_call_group_t group,
263 uint64_t deadline)
264 {
265 queue_t old_queue;
266
267 old_queue = call_entry_enqueue_deadline(call, &group->delayed_queue, deadline);
268
269 if (old_queue == &group->pending_queue)
270 group->pending_count--;
271
272 return (old_queue != NULL);
273 }
274
275 /*
276 * _call_dequeue:
277 *
278 * Remove an entry from a queue.
279 *
280 * Returns TRUE if the entry was on a queue.
281 *
282 * Called with thread_call_lock held.
283 */
284 static __inline__ boolean_t
285 _call_dequeue(
286 thread_call_t call,
287 thread_call_group_t group)
288 {
289 queue_t old_queue;
290
291 old_queue = call_entry_dequeue(call);
292
293 if (old_queue == &group->pending_queue)
294 group->pending_count--;
295
296 return (old_queue != NULL);
297 }
298
299 /*
300 * _set_delayed_call_timer:
301 *
302 * Reset the timer so that it
303 * next expires when the entry is due.
304 *
305 * Called with thread_call_lock held.
306 */
307 static __inline__ void
308 _set_delayed_call_timer(
309 thread_call_t call,
310 thread_call_group_t group)
311 {
312 timer_call_enter(&group->delayed_timer, call->deadline);
313 }
314
315 /*
316 * _remove_from_pending_queue:
317 *
318 * Remove the first (or all) matching
319 * entries from the pending queue.
320 *
321 * Returns TRUE if any matching entries
322 * were found.
323 *
324 * Called with thread_call_lock held.
325 */
326 static boolean_t
327 _remove_from_pending_queue(
328 thread_call_func_t func,
329 thread_call_param_t param0,
330 boolean_t remove_all)
331 {
332 boolean_t call_removed = FALSE;
333 thread_call_t call;
334 thread_call_group_t group = &thread_call_group0;
335
336 call = TC(queue_first(&group->pending_queue));
337
338 while (!queue_end(&group->pending_queue, qe(call))) {
339 if ( call->func == func &&
340 call->param0 == param0 ) {
341 thread_call_t next = TC(queue_next(qe(call)));
342
343 _call_dequeue(call, group);
344
345 _internal_call_release(call);
346
347 call_removed = TRUE;
348 if (!remove_all)
349 break;
350
351 call = next;
352 }
353 else
354 call = TC(queue_next(qe(call)));
355 }
356
357 return (call_removed);
358 }
359
360 /*
361 * _remove_from_delayed_queue:
362 *
363 * Remove the first (or all) matching
364 * entries from the delayed queue.
365 *
366 * Returns TRUE if any matching entries
367 * were found.
368 *
369 * Called with thread_call_lock held.
370 */
371 static boolean_t
372 _remove_from_delayed_queue(
373 thread_call_func_t func,
374 thread_call_param_t param0,
375 boolean_t remove_all)
376 {
377 boolean_t call_removed = FALSE;
378 thread_call_t call;
379 thread_call_group_t group = &thread_call_group0;
380
381 call = TC(queue_first(&group->delayed_queue));
382
383 while (!queue_end(&group->delayed_queue, qe(call))) {
384 if ( call->func == func &&
385 call->param0 == param0 ) {
386 thread_call_t next = TC(queue_next(qe(call)));
387
388 _call_dequeue(call, group);
389
390 _internal_call_release(call);
391
392 call_removed = TRUE;
393 if (!remove_all)
394 break;
395
396 call = next;
397 }
398 else
399 call = TC(queue_next(qe(call)));
400 }
401
402 return (call_removed);
403 }
404
405 #ifndef __LP64__
406
407 /*
408 * thread_call_func:
409 *
410 * Enqueue a function callout.
411 *
412 * Guarantees { function, argument }
413 * uniqueness if unique_call is TRUE.
414 */
415 void
416 thread_call_func(
417 thread_call_func_t func,
418 thread_call_param_t param,
419 boolean_t unique_call)
420 {
421 thread_call_t call;
422 thread_call_group_t group = &thread_call_group0;
423 spl_t s;
424
425 s = splsched();
426 simple_lock(&thread_call_lock);
427
428 call = TC(queue_first(&group->pending_queue));
429
430 while (unique_call && !queue_end(&group->pending_queue, qe(call))) {
431 if ( call->func == func &&
432 call->param0 == param ) {
433 break;
434 }
435
436 call = TC(queue_next(qe(call)));
437 }
438
439 if (!unique_call || queue_end(&group->pending_queue, qe(call))) {
440 call = _internal_call_allocate();
441 call->func = func;
442 call->param0 = param;
443 call->param1 = NULL;
444
445 _pending_call_enqueue(call, group);
446
447 if (group->active_count == 0)
448 thread_call_wake(group);
449 }
450
451 simple_unlock(&thread_call_lock);
452 splx(s);
453 }
454
455 #endif /* __LP64__ */
456
457 /*
458 * thread_call_func_delayed:
459 *
460 * Enqueue a function callout to
461 * occur at the stated time.
462 */
463 void
464 thread_call_func_delayed(
465 thread_call_func_t func,
466 thread_call_param_t param,
467 uint64_t deadline)
468 {
469 thread_call_t call;
470 thread_call_group_t group = &thread_call_group0;
471 spl_t s;
472
473 s = splsched();
474 simple_lock(&thread_call_lock);
475
476 call = _internal_call_allocate();
477 call->func = func;
478 call->param0 = param;
479 call->param1 = 0;
480
481 _delayed_call_enqueue(call, group, deadline);
482
483 if (queue_first(&group->delayed_queue) == qe(call))
484 _set_delayed_call_timer(call, group);
485
486 simple_unlock(&thread_call_lock);
487 splx(s);
488 }
489
490 /*
491 * thread_call_func_cancel:
492 *
493 * Dequeue a function callout.
494 *
495 * Removes one (or all) { function, argument }
496 * instance(s) from either (or both)
497 * the pending and the delayed queue,
498 * in that order.
499 *
500 * Returns TRUE if any calls were cancelled.
501 */
502 boolean_t
503 thread_call_func_cancel(
504 thread_call_func_t func,
505 thread_call_param_t param,
506 boolean_t cancel_all)
507 {
508 boolean_t result;
509 spl_t s;
510
511 s = splsched();
512 simple_lock(&thread_call_lock);
513
514 if (cancel_all)
515 result = _remove_from_pending_queue(func, param, cancel_all) |
516 _remove_from_delayed_queue(func, param, cancel_all);
517 else
518 result = _remove_from_pending_queue(func, param, cancel_all) ||
519 _remove_from_delayed_queue(func, param, cancel_all);
520
521 simple_unlock(&thread_call_lock);
522 splx(s);
523
524 return (result);
525 }
526
527 /*
528 * thread_call_allocate:
529 *
530 * Allocate a callout entry.
531 */
532 thread_call_t
533 thread_call_allocate(
534 thread_call_func_t func,
535 thread_call_param_t param0)
536 {
537 thread_call_t call = zalloc(thread_call_zone);
538
539 call_entry_setup(call, func, param0);
540
541 return (call);
542 }
543
544 /*
545 * thread_call_free:
546 *
547 * Free a callout entry.
548 */
549 boolean_t
550 thread_call_free(
551 thread_call_t call)
552 {
553 spl_t s;
554
555 s = splsched();
556 simple_lock(&thread_call_lock);
557
558 if (call->queue != NULL) {
559 simple_unlock(&thread_call_lock);
560 splx(s);
561
562 return (FALSE);
563 }
564
565 simple_unlock(&thread_call_lock);
566 splx(s);
567
568 zfree(thread_call_zone, call);
569
570 return (TRUE);
571 }
572
573 /*
574 * thread_call_enter:
575 *
576 * Enqueue a callout entry to occur "soon".
577 *
578 * Returns TRUE if the call was
579 * already on a queue.
580 */
581 boolean_t
582 thread_call_enter(
583 thread_call_t call)
584 {
585 boolean_t result = TRUE;
586 thread_call_group_t group = &thread_call_group0;
587 spl_t s;
588
589 s = splsched();
590 simple_lock(&thread_call_lock);
591
592 if (call->queue != &group->pending_queue) {
593 result = _pending_call_enqueue(call, group);
594
595 if (group->active_count == 0)
596 thread_call_wake(group);
597 }
598
599 call->param1 = 0;
600
601 simple_unlock(&thread_call_lock);
602 splx(s);
603
604 return (result);
605 }
606
607 boolean_t
608 thread_call_enter1(
609 thread_call_t call,
610 thread_call_param_t param1)
611 {
612 boolean_t result = TRUE;
613 thread_call_group_t group = &thread_call_group0;
614 spl_t s;
615
616 s = splsched();
617 simple_lock(&thread_call_lock);
618
619 if (call->queue != &group->pending_queue) {
620 result = _pending_call_enqueue(call, group);
621
622 if (group->active_count == 0)
623 thread_call_wake(group);
624 }
625
626 call->param1 = param1;
627
628 simple_unlock(&thread_call_lock);
629 splx(s);
630
631 return (result);
632 }
633
634 /*
635 * thread_call_enter_delayed:
636 *
637 * Enqueue a callout entry to occur
638 * at the stated time.
639 *
640 * Returns TRUE if the call was
641 * already on a queue.
642 */
643 boolean_t
644 thread_call_enter_delayed(
645 thread_call_t call,
646 uint64_t deadline)
647 {
648 boolean_t result = TRUE;
649 thread_call_group_t group = &thread_call_group0;
650 spl_t s;
651
652 s = splsched();
653 simple_lock(&thread_call_lock);
654
655 result = _delayed_call_enqueue(call, group, deadline);
656
657 if (queue_first(&group->delayed_queue) == qe(call))
658 _set_delayed_call_timer(call, group);
659
660 call->param1 = 0;
661
662 simple_unlock(&thread_call_lock);
663 splx(s);
664
665 return (result);
666 }
667
668 boolean_t
669 thread_call_enter1_delayed(
670 thread_call_t call,
671 thread_call_param_t param1,
672 uint64_t deadline)
673 {
674 boolean_t result = TRUE;
675 thread_call_group_t group = &thread_call_group0;
676 spl_t s;
677
678 s = splsched();
679 simple_lock(&thread_call_lock);
680
681 result = _delayed_call_enqueue(call, group, deadline);
682
683 if (queue_first(&group->delayed_queue) == qe(call))
684 _set_delayed_call_timer(call, group);
685
686 call->param1 = param1;
687
688 simple_unlock(&thread_call_lock);
689 splx(s);
690
691 return (result);
692 }
693
694 /*
695 * thread_call_cancel:
696 *
697 * Dequeue a callout entry.
698 *
699 * Returns TRUE if the call was
700 * on a queue.
701 */
702 boolean_t
703 thread_call_cancel(
704 thread_call_t call)
705 {
706 boolean_t result;
707 thread_call_group_t group = &thread_call_group0;
708 spl_t s;
709
710 s = splsched();
711 simple_lock(&thread_call_lock);
712
713 result = _call_dequeue(call, group);
714
715 simple_unlock(&thread_call_lock);
716 splx(s);
717
718 return (result);
719 }
720
721 #ifndef __LP64__
722
723 /*
724 * thread_call_is_delayed:
725 *
726 * Returns TRUE if the call is
727 * currently on a delayed queue.
728 *
729 * Optionally returns the expiration time.
730 */
731 boolean_t
732 thread_call_is_delayed(
733 thread_call_t call,
734 uint64_t *deadline)
735 {
736 boolean_t result = FALSE;
737 thread_call_group_t group = &thread_call_group0;
738 spl_t s;
739
740 s = splsched();
741 simple_lock(&thread_call_lock);
742
743 if (call->queue == &group->delayed_queue) {
744 if (deadline != NULL)
745 *deadline = call->deadline;
746 result = TRUE;
747 }
748
749 simple_unlock(&thread_call_lock);
750 splx(s);
751
752 return (result);
753 }
754
755 #endif /* __LP64__ */
756
757 /*
758 * thread_call_wake:
759 *
760 * Wake a call thread to service
761 * pending call entries. May wake
762 * the daemon thread in order to
763 * create additional call threads.
764 *
765 * Called with thread_call_lock held.
766 */
767 static __inline__ void
768 thread_call_wake(
769 thread_call_group_t group)
770 {
771 if (group->idle_count > 0 && wait_queue_wakeup_one(&group->idle_wqueue, NULL, THREAD_AWAKENED) == KERN_SUCCESS) {
772 group->idle_count--; group->active_count++;
773 }
774 else
775 if (!thread_call_daemon_awake) {
776 thread_call_daemon_awake = TRUE;
777 wait_queue_wakeup_one(&group->daemon_wqueue, NULL, THREAD_AWAKENED);
778 }
779 }
780
781 /*
782 * sched_call_thread:
783 *
784 * Call out invoked by the scheduler.
785 */
786 static void
787 sched_call_thread(
788 int type,
789 __unused thread_t thread)
790 {
791 thread_call_group_t group = &thread_call_group0;
792
793 simple_lock(&thread_call_lock);
794
795 switch (type) {
796
797 case SCHED_CALL_BLOCK:
798 if (--group->active_count == 0 && group->pending_count > 0)
799 thread_call_wake(group);
800 break;
801
802 case SCHED_CALL_UNBLOCK:
803 group->active_count++;
804 break;
805 }
806
807 simple_unlock(&thread_call_lock);
808 }
809
810 /*
811 * thread_call_thread:
812 */
813 static void
814 thread_call_thread(
815 thread_call_group_t group)
816 {
817 thread_t self = current_thread();
818
819 (void) splsched();
820 simple_lock(&thread_call_lock);
821
822 thread_sched_call(self, sched_call_thread);
823
824 while (group->pending_count > 0) {
825 thread_call_t call;
826 thread_call_func_t func;
827 thread_call_param_t param0, param1;
828
829 call = TC(dequeue_head(&group->pending_queue));
830 group->pending_count--;
831
832 func = call->func;
833 param0 = call->param0;
834 param1 = call->param1;
835
836 call->queue = NULL;
837
838 _internal_call_release(call);
839
840 simple_unlock(&thread_call_lock);
841 (void) spllo();
842
843 KERNEL_DEBUG_CONSTANT(
844 MACHDBG_CODE(DBG_MACH_SCHED,MACH_CALLOUT) | DBG_FUNC_NONE,
845 func, param0, param1, 0, 0);
846
847 (*func)(param0, param1);
848
849 (void)thread_funnel_set(self->funnel_lock, FALSE); /* XXX */
850
851 (void) splsched();
852 simple_lock(&thread_call_lock);
853 }
854
855 thread_sched_call(self, NULL);
856 group->active_count--;
857
858 if (group->idle_count < thread_call_thread_min) {
859 group->idle_count++;
860
861 wait_queue_assert_wait(&group->idle_wqueue, NULL, THREAD_UNINT, 0);
862
863 simple_unlock(&thread_call_lock);
864 (void) spllo();
865
866 thread_block_parameter((thread_continue_t)thread_call_thread, group);
867 /* NOTREACHED */
868 }
869
870 simple_unlock(&thread_call_lock);
871 (void) spllo();
872
873 thread_terminate(self);
874 /* NOTREACHED */
875 }
876
877 /*
878 * thread_call_daemon:
879 */
880 static void
881 thread_call_daemon_continue(
882 thread_call_group_t group)
883 {
884 kern_return_t result;
885 thread_t thread;
886
887 (void) splsched();
888 simple_lock(&thread_call_lock);
889
890 while (group->active_count == 0 && group->pending_count > 0) {
891 group->active_count++;
892
893 simple_unlock(&thread_call_lock);
894 (void) spllo();
895
896 result = kernel_thread_start_priority((thread_continue_t)thread_call_thread, group, BASEPRI_PREEMPT, &thread);
897 if (result != KERN_SUCCESS)
898 panic("thread_call_daemon");
899
900 thread_deallocate(thread);
901
902 (void) splsched();
903 simple_lock(&thread_call_lock);
904 }
905
906 thread_call_daemon_awake = FALSE;
907 wait_queue_assert_wait(&group->daemon_wqueue, NULL, THREAD_UNINT, 0);
908
909 simple_unlock(&thread_call_lock);
910 (void) spllo();
911
912 thread_block_parameter((thread_continue_t)thread_call_daemon_continue, group);
913 /* NOTREACHED */
914 }
915
916 static void
917 thread_call_daemon(
918 thread_call_group_t group)
919 {
920 thread_t self = current_thread();
921
922 self->options |= TH_OPT_VMPRIV;
923 vm_page_free_reserve(2); /* XXX */
924
925 thread_call_daemon_continue(group);
926 /* NOTREACHED */
927 }
928
929 static void
930 thread_call_delayed_timer(
931 timer_call_param_t p0,
932 __unused timer_call_param_t p1
933 )
934 {
935 thread_call_t call;
936 thread_call_group_t group = p0;
937 boolean_t new_pending = FALSE;
938 uint64_t timestamp;
939
940 simple_lock(&thread_call_lock);
941
942 timestamp = mach_absolute_time();
943
944 call = TC(queue_first(&group->delayed_queue));
945
946 while (!queue_end(&group->delayed_queue, qe(call))) {
947 if (call->deadline <= timestamp) {
948 _pending_call_enqueue(call, group);
949 new_pending = TRUE;
950 }
951 else
952 break;
953
954 call = TC(queue_first(&group->delayed_queue));
955 }
956
957 if (!queue_end(&group->delayed_queue, qe(call)))
958 _set_delayed_call_timer(call, group);
959
960 if (new_pending && group->active_count == 0)
961 thread_call_wake(group);
962
963 simple_unlock(&thread_call_lock);
964 }