]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/thread_call.c
xnu-1486.2.11.tar.gz
[apple/xnu.git] / osfmk / kern / thread_call.c
1 /*
2 * Copyright (c) 1993-1995, 1999-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/thread_act.h>
31
32 #include <kern/kern_types.h>
33 #include <kern/zalloc.h>
34 #include <kern/sched_prim.h>
35 #include <kern/clock.h>
36 #include <kern/task.h>
37 #include <kern/thread.h>
38 #include <kern/wait_queue.h>
39
40 #include <vm/vm_pageout.h>
41
42 #include <kern/thread_call.h>
43 #include <kern/call_entry.h>
44
45 #include <kern/timer_call.h>
46
47 #include <sys/kdebug.h>
48
49 decl_simple_lock_data(static,thread_call_lock)
50
51 static zone_t thread_call_zone;
52
53 struct thread_call_group {
54 queue_head_t pending_queue;
55 uint32_t pending_count;
56
57 queue_head_t delayed_queue;
58
59 timer_call_data_t delayed_timer;
60
61 struct wait_queue idle_wqueue;
62 uint32_t idle_count, active_count;
63 };
64
65 typedef struct thread_call_group *thread_call_group_t;
66
67 static struct thread_call_group thread_call_group0;
68
69 static boolean_t thread_call_daemon_awake;
70
71 #define thread_call_thread_min 4
72
73 #define internal_call_count 768
74
75 static thread_call_data_t internal_call_storage[internal_call_count];
76 static queue_head_t thread_call_internal_queue;
77
78 static __inline__ thread_call_t _internal_call_allocate(void);
79
80 static __inline__ void _internal_call_release(
81 thread_call_t call);
82
83 static __inline__ boolean_t _pending_call_enqueue(
84 thread_call_t call,
85 thread_call_group_t group),
86 _delayed_call_enqueue(
87 thread_call_t call,
88 thread_call_group_t group,
89 uint64_t deadline),
90 _call_dequeue(
91 thread_call_t call,
92 thread_call_group_t group);
93
94 static __inline__ void thread_call_wake(
95 thread_call_group_t group);
96
97 static __inline__ void _set_delayed_call_timer(
98 thread_call_t call,
99 thread_call_group_t group);
100
101 static boolean_t _remove_from_pending_queue(
102 thread_call_func_t func,
103 thread_call_param_t param0,
104 boolean_t remove_all),
105 _remove_from_delayed_queue(
106 thread_call_func_t func,
107 thread_call_param_t param0,
108 boolean_t remove_all);
109
110 static void thread_call_daemon(
111 thread_call_group_t group),
112 thread_call_thread(
113 thread_call_group_t group);
114
115 static void thread_call_delayed_timer(
116 timer_call_param_t p0,
117 timer_call_param_t p1);
118
119 #define qe(x) ((queue_entry_t)(x))
120 #define TC(x) ((thread_call_t)(x))
121
122 /*
123 * thread_call_initialize:
124 *
125 * Initialize this module, called
126 * early during system initialization.
127 */
128 void
129 thread_call_initialize(void)
130 {
131 thread_call_t call;
132 thread_call_group_t group = &thread_call_group0;
133 kern_return_t result;
134 thread_t thread;
135 int i;
136 spl_t s;
137
138 i = sizeof (thread_call_data_t);
139 thread_call_zone = zinit(i, 4096 * i, 16 * i, "thread_call");
140
141 simple_lock_init(&thread_call_lock, 0);
142
143 s = splsched();
144 simple_lock(&thread_call_lock);
145
146 queue_init(&group->pending_queue);
147 queue_init(&group->delayed_queue);
148
149 timer_call_setup(&group->delayed_timer, thread_call_delayed_timer, group);
150
151 wait_queue_init(&group->idle_wqueue, SYNC_POLICY_FIFO);
152
153 queue_init(&thread_call_internal_queue);
154 for (
155 call = internal_call_storage;
156 call < &internal_call_storage[internal_call_count];
157 call++) {
158
159 enqueue_tail(&thread_call_internal_queue, qe(call));
160 }
161
162 thread_call_daemon_awake = TRUE;
163
164 simple_unlock(&thread_call_lock);
165 splx(s);
166
167 result = kernel_thread_start_priority((thread_continue_t)thread_call_daemon, group, BASEPRI_PREEMPT + 1, &thread);
168 if (result != KERN_SUCCESS)
169 panic("thread_call_initialize");
170
171 thread_deallocate(thread);
172 }
173
174 void
175 thread_call_setup(
176 thread_call_t call,
177 thread_call_func_t func,
178 thread_call_param_t param0)
179 {
180 call_entry_setup(call, func, param0);
181 }
182
183 /*
184 * _internal_call_allocate:
185 *
186 * Allocate an internal callout entry.
187 *
188 * Called with thread_call_lock held.
189 */
190 static __inline__ thread_call_t
191 _internal_call_allocate(void)
192 {
193 thread_call_t call;
194
195 if (queue_empty(&thread_call_internal_queue))
196 panic("_internal_call_allocate");
197
198 call = TC(dequeue_head(&thread_call_internal_queue));
199
200 return (call);
201 }
202
203 /*
204 * _internal_call_release:
205 *
206 * Release an internal callout entry which
207 * is no longer pending (or delayed).
208 *
209 * Called with thread_call_lock held.
210 */
211 static __inline__ void
212 _internal_call_release(
213 thread_call_t call)
214 {
215 if ( call >= internal_call_storage &&
216 call < &internal_call_storage[internal_call_count] )
217 enqueue_head(&thread_call_internal_queue, qe(call));
218 }
219
220 /*
221 * _pending_call_enqueue:
222 *
223 * Place an entry at the end of the
224 * pending queue, to be executed soon.
225 *
226 * Returns TRUE if the entry was already
227 * on a queue.
228 *
229 * Called with thread_call_lock held.
230 */
231 static __inline__ boolean_t
232 _pending_call_enqueue(
233 thread_call_t call,
234 thread_call_group_t group)
235 {
236 queue_t old_queue;
237
238 old_queue = call_entry_enqueue_tail(call, &group->pending_queue);
239
240 group->pending_count++;
241
242 return (old_queue != NULL);
243 }
244
245 /*
246 * _delayed_call_enqueue:
247 *
248 * Place an entry on the delayed queue,
249 * after existing entries with an earlier
250 * (or identical) deadline.
251 *
252 * Returns TRUE if the entry was already
253 * on a queue.
254 *
255 * Called with thread_call_lock held.
256 */
257 static __inline__ boolean_t
258 _delayed_call_enqueue(
259 thread_call_t call,
260 thread_call_group_t group,
261 uint64_t deadline)
262 {
263 queue_t old_queue;
264
265 old_queue = call_entry_enqueue_deadline(call, &group->delayed_queue, deadline);
266
267 if (old_queue == &group->pending_queue)
268 group->pending_count--;
269
270 return (old_queue != NULL);
271 }
272
273 /*
274 * _call_dequeue:
275 *
276 * Remove an entry from a queue.
277 *
278 * Returns TRUE if the entry was on a queue.
279 *
280 * Called with thread_call_lock held.
281 */
282 static __inline__ boolean_t
283 _call_dequeue(
284 thread_call_t call,
285 thread_call_group_t group)
286 {
287 queue_t old_queue;
288
289 old_queue = call_entry_dequeue(call);
290
291 if (old_queue == &group->pending_queue)
292 group->pending_count--;
293
294 return (old_queue != NULL);
295 }
296
297 /*
298 * _set_delayed_call_timer:
299 *
300 * Reset the timer so that it
301 * next expires when the entry is due.
302 *
303 * Called with thread_call_lock held.
304 */
305 static __inline__ void
306 _set_delayed_call_timer(
307 thread_call_t call,
308 thread_call_group_t group)
309 {
310 timer_call_enter(&group->delayed_timer, call->deadline);
311 }
312
313 /*
314 * _remove_from_pending_queue:
315 *
316 * Remove the first (or all) matching
317 * entries from the pending queue.
318 *
319 * Returns TRUE if any matching entries
320 * were found.
321 *
322 * Called with thread_call_lock held.
323 */
324 static boolean_t
325 _remove_from_pending_queue(
326 thread_call_func_t func,
327 thread_call_param_t param0,
328 boolean_t remove_all)
329 {
330 boolean_t call_removed = FALSE;
331 thread_call_t call;
332 thread_call_group_t group = &thread_call_group0;
333
334 call = TC(queue_first(&group->pending_queue));
335
336 while (!queue_end(&group->pending_queue, qe(call))) {
337 if ( call->func == func &&
338 call->param0 == param0 ) {
339 thread_call_t next = TC(queue_next(qe(call)));
340
341 _call_dequeue(call, group);
342
343 _internal_call_release(call);
344
345 call_removed = TRUE;
346 if (!remove_all)
347 break;
348
349 call = next;
350 }
351 else
352 call = TC(queue_next(qe(call)));
353 }
354
355 return (call_removed);
356 }
357
358 /*
359 * _remove_from_delayed_queue:
360 *
361 * Remove the first (or all) matching
362 * entries from the delayed queue.
363 *
364 * Returns TRUE if any matching entries
365 * were found.
366 *
367 * Called with thread_call_lock held.
368 */
369 static boolean_t
370 _remove_from_delayed_queue(
371 thread_call_func_t func,
372 thread_call_param_t param0,
373 boolean_t remove_all)
374 {
375 boolean_t call_removed = FALSE;
376 thread_call_t call;
377 thread_call_group_t group = &thread_call_group0;
378
379 call = TC(queue_first(&group->delayed_queue));
380
381 while (!queue_end(&group->delayed_queue, qe(call))) {
382 if ( call->func == func &&
383 call->param0 == param0 ) {
384 thread_call_t next = TC(queue_next(qe(call)));
385
386 _call_dequeue(call, group);
387
388 _internal_call_release(call);
389
390 call_removed = TRUE;
391 if (!remove_all)
392 break;
393
394 call = next;
395 }
396 else
397 call = TC(queue_next(qe(call)));
398 }
399
400 return (call_removed);
401 }
402
403 #ifndef __LP64__
404
405 /*
406 * thread_call_func:
407 *
408 * Enqueue a function callout.
409 *
410 * Guarantees { function, argument }
411 * uniqueness if unique_call is TRUE.
412 */
413 void
414 thread_call_func(
415 thread_call_func_t func,
416 thread_call_param_t param,
417 boolean_t unique_call)
418 {
419 thread_call_t call;
420 thread_call_group_t group = &thread_call_group0;
421 spl_t s;
422
423 s = splsched();
424 simple_lock(&thread_call_lock);
425
426 call = TC(queue_first(&group->pending_queue));
427
428 while (unique_call && !queue_end(&group->pending_queue, qe(call))) {
429 if ( call->func == func &&
430 call->param0 == param ) {
431 break;
432 }
433
434 call = TC(queue_next(qe(call)));
435 }
436
437 if (!unique_call || queue_end(&group->pending_queue, qe(call))) {
438 call = _internal_call_allocate();
439 call->func = func;
440 call->param0 = param;
441 call->param1 = NULL;
442
443 _pending_call_enqueue(call, group);
444
445 if (group->active_count == 0)
446 thread_call_wake(group);
447 }
448
449 simple_unlock(&thread_call_lock);
450 splx(s);
451 }
452
453 #endif /* __LP64__ */
454
455 /*
456 * thread_call_func_delayed:
457 *
458 * Enqueue a function callout to
459 * occur at the stated time.
460 */
461 void
462 thread_call_func_delayed(
463 thread_call_func_t func,
464 thread_call_param_t param,
465 uint64_t deadline)
466 {
467 thread_call_t call;
468 thread_call_group_t group = &thread_call_group0;
469 spl_t s;
470
471 s = splsched();
472 simple_lock(&thread_call_lock);
473
474 call = _internal_call_allocate();
475 call->func = func;
476 call->param0 = param;
477 call->param1 = 0;
478
479 _delayed_call_enqueue(call, group, deadline);
480
481 if (queue_first(&group->delayed_queue) == qe(call))
482 _set_delayed_call_timer(call, group);
483
484 simple_unlock(&thread_call_lock);
485 splx(s);
486 }
487
488 /*
489 * thread_call_func_cancel:
490 *
491 * Dequeue a function callout.
492 *
493 * Removes one (or all) { function, argument }
494 * instance(s) from either (or both)
495 * the pending and the delayed queue,
496 * in that order.
497 *
498 * Returns TRUE if any calls were cancelled.
499 */
500 boolean_t
501 thread_call_func_cancel(
502 thread_call_func_t func,
503 thread_call_param_t param,
504 boolean_t cancel_all)
505 {
506 boolean_t result;
507 spl_t s;
508
509 s = splsched();
510 simple_lock(&thread_call_lock);
511
512 if (cancel_all)
513 result = _remove_from_pending_queue(func, param, cancel_all) |
514 _remove_from_delayed_queue(func, param, cancel_all);
515 else
516 result = _remove_from_pending_queue(func, param, cancel_all) ||
517 _remove_from_delayed_queue(func, param, cancel_all);
518
519 simple_unlock(&thread_call_lock);
520 splx(s);
521
522 return (result);
523 }
524
525 /*
526 * thread_call_allocate:
527 *
528 * Allocate a callout entry.
529 */
530 thread_call_t
531 thread_call_allocate(
532 thread_call_func_t func,
533 thread_call_param_t param0)
534 {
535 thread_call_t call = zalloc(thread_call_zone);
536
537 call_entry_setup(call, func, param0);
538
539 return (call);
540 }
541
542 /*
543 * thread_call_free:
544 *
545 * Free a callout entry.
546 */
547 boolean_t
548 thread_call_free(
549 thread_call_t call)
550 {
551 spl_t s;
552
553 s = splsched();
554 simple_lock(&thread_call_lock);
555
556 if (call->queue != NULL) {
557 simple_unlock(&thread_call_lock);
558 splx(s);
559
560 return (FALSE);
561 }
562
563 simple_unlock(&thread_call_lock);
564 splx(s);
565
566 zfree(thread_call_zone, call);
567
568 return (TRUE);
569 }
570
571 /*
572 * thread_call_enter:
573 *
574 * Enqueue a callout entry to occur "soon".
575 *
576 * Returns TRUE if the call was
577 * already on a queue.
578 */
579 boolean_t
580 thread_call_enter(
581 thread_call_t call)
582 {
583 boolean_t result = TRUE;
584 thread_call_group_t group = &thread_call_group0;
585 spl_t s;
586
587 s = splsched();
588 simple_lock(&thread_call_lock);
589
590 if (call->queue != &group->pending_queue) {
591 result = _pending_call_enqueue(call, group);
592
593 if (group->active_count == 0)
594 thread_call_wake(group);
595 }
596
597 call->param1 = 0;
598
599 simple_unlock(&thread_call_lock);
600 splx(s);
601
602 return (result);
603 }
604
605 boolean_t
606 thread_call_enter1(
607 thread_call_t call,
608 thread_call_param_t param1)
609 {
610 boolean_t result = TRUE;
611 thread_call_group_t group = &thread_call_group0;
612 spl_t s;
613
614 s = splsched();
615 simple_lock(&thread_call_lock);
616
617 if (call->queue != &group->pending_queue) {
618 result = _pending_call_enqueue(call, group);
619
620 if (group->active_count == 0)
621 thread_call_wake(group);
622 }
623
624 call->param1 = param1;
625
626 simple_unlock(&thread_call_lock);
627 splx(s);
628
629 return (result);
630 }
631
632 /*
633 * thread_call_enter_delayed:
634 *
635 * Enqueue a callout entry to occur
636 * at the stated time.
637 *
638 * Returns TRUE if the call was
639 * already on a queue.
640 */
641 boolean_t
642 thread_call_enter_delayed(
643 thread_call_t call,
644 uint64_t deadline)
645 {
646 boolean_t result = TRUE;
647 thread_call_group_t group = &thread_call_group0;
648 spl_t s;
649
650 s = splsched();
651 simple_lock(&thread_call_lock);
652
653 result = _delayed_call_enqueue(call, group, deadline);
654
655 if (queue_first(&group->delayed_queue) == qe(call))
656 _set_delayed_call_timer(call, group);
657
658 call->param1 = 0;
659
660 simple_unlock(&thread_call_lock);
661 splx(s);
662
663 return (result);
664 }
665
666 boolean_t
667 thread_call_enter1_delayed(
668 thread_call_t call,
669 thread_call_param_t param1,
670 uint64_t deadline)
671 {
672 boolean_t result = TRUE;
673 thread_call_group_t group = &thread_call_group0;
674 spl_t s;
675
676 s = splsched();
677 simple_lock(&thread_call_lock);
678
679 result = _delayed_call_enqueue(call, group, deadline);
680
681 if (queue_first(&group->delayed_queue) == qe(call))
682 _set_delayed_call_timer(call, group);
683
684 call->param1 = param1;
685
686 simple_unlock(&thread_call_lock);
687 splx(s);
688
689 return (result);
690 }
691
692 /*
693 * thread_call_cancel:
694 *
695 * Dequeue a callout entry.
696 *
697 * Returns TRUE if the call was
698 * on a queue.
699 */
700 boolean_t
701 thread_call_cancel(
702 thread_call_t call)
703 {
704 boolean_t result;
705 thread_call_group_t group = &thread_call_group0;
706 spl_t s;
707
708 s = splsched();
709 simple_lock(&thread_call_lock);
710
711 result = _call_dequeue(call, group);
712
713 simple_unlock(&thread_call_lock);
714 splx(s);
715
716 return (result);
717 }
718
719 #ifndef __LP64__
720
721 /*
722 * thread_call_is_delayed:
723 *
724 * Returns TRUE if the call is
725 * currently on a delayed queue.
726 *
727 * Optionally returns the expiration time.
728 */
729 boolean_t
730 thread_call_is_delayed(
731 thread_call_t call,
732 uint64_t *deadline)
733 {
734 boolean_t result = FALSE;
735 thread_call_group_t group = &thread_call_group0;
736 spl_t s;
737
738 s = splsched();
739 simple_lock(&thread_call_lock);
740
741 if (call->queue == &group->delayed_queue) {
742 if (deadline != NULL)
743 *deadline = call->deadline;
744 result = TRUE;
745 }
746
747 simple_unlock(&thread_call_lock);
748 splx(s);
749
750 return (result);
751 }
752
753 #endif /* __LP64__ */
754
755 /*
756 * thread_call_wake:
757 *
758 * Wake a call thread to service
759 * pending call entries. May wake
760 * the daemon thread in order to
761 * create additional call threads.
762 *
763 * Called with thread_call_lock held.
764 */
765 static __inline__ void
766 thread_call_wake(
767 thread_call_group_t group)
768 {
769 if (group->idle_count > 0 && wait_queue_wakeup_one(&group->idle_wqueue, NULL, THREAD_AWAKENED) == KERN_SUCCESS) {
770 group->idle_count--; group->active_count++;
771 }
772 else
773 if (!thread_call_daemon_awake) {
774 thread_call_daemon_awake = TRUE;
775 thread_wakeup_one(&thread_call_daemon_awake);
776 }
777 }
778
779 /*
780 * sched_call_thread:
781 *
782 * Call out invoked by the scheduler.
783 */
784 static void
785 sched_call_thread(
786 int type,
787 __unused thread_t thread)
788 {
789 thread_call_group_t group = &thread_call_group0;
790
791 simple_lock(&thread_call_lock);
792
793 switch (type) {
794
795 case SCHED_CALL_BLOCK:
796 if (--group->active_count == 0 && group->pending_count > 0)
797 thread_call_wake(group);
798 break;
799
800 case SCHED_CALL_UNBLOCK:
801 group->active_count++;
802 break;
803 }
804
805 simple_unlock(&thread_call_lock);
806 }
807
808 /*
809 * thread_call_thread:
810 */
811 static void
812 thread_call_thread(
813 thread_call_group_t group)
814 {
815 thread_t self = current_thread();
816
817 (void) splsched();
818 simple_lock(&thread_call_lock);
819
820 thread_sched_call(self, sched_call_thread);
821
822 while (group->pending_count > 0) {
823 thread_call_t call;
824 thread_call_func_t func;
825 thread_call_param_t param0, param1;
826
827 call = TC(dequeue_head(&group->pending_queue));
828 group->pending_count--;
829
830 func = call->func;
831 param0 = call->param0;
832 param1 = call->param1;
833
834 call->queue = NULL;
835
836 _internal_call_release(call);
837
838 simple_unlock(&thread_call_lock);
839 (void) spllo();
840
841 KERNEL_DEBUG_CONSTANT(
842 MACHDBG_CODE(DBG_MACH_SCHED,MACH_CALLOUT) | DBG_FUNC_NONE,
843 func, param0, param1, 0, 0);
844
845 (*func)(param0, param1);
846
847 (void)thread_funnel_set(self->funnel_lock, FALSE); /* XXX */
848
849 (void) splsched();
850 simple_lock(&thread_call_lock);
851 }
852
853 thread_sched_call(self, NULL);
854 group->active_count--;
855
856 if (group->idle_count < thread_call_thread_min) {
857 group->idle_count++;
858
859 wait_queue_assert_wait(&group->idle_wqueue, NULL, THREAD_UNINT, 0);
860
861 simple_unlock(&thread_call_lock);
862 (void) spllo();
863
864 thread_block_parameter((thread_continue_t)thread_call_thread, group);
865 /* NOTREACHED */
866 }
867
868 simple_unlock(&thread_call_lock);
869 (void) spllo();
870
871 thread_terminate(self);
872 /* NOTREACHED */
873 }
874
875 /*
876 * thread_call_daemon:
877 */
878 static void
879 thread_call_daemon_continue(
880 thread_call_group_t group)
881 {
882 kern_return_t result;
883 thread_t thread;
884
885 (void) splsched();
886 simple_lock(&thread_call_lock);
887
888 while (group->active_count == 0 && group->pending_count > 0) {
889 group->active_count++;
890
891 simple_unlock(&thread_call_lock);
892 (void) spllo();
893
894 result = kernel_thread_start_priority((thread_continue_t)thread_call_thread, group, BASEPRI_PREEMPT, &thread);
895 if (result != KERN_SUCCESS)
896 panic("thread_call_daemon");
897
898 thread_deallocate(thread);
899
900 (void) splsched();
901 simple_lock(&thread_call_lock);
902 }
903
904 thread_call_daemon_awake = FALSE;
905 assert_wait(&thread_call_daemon_awake, THREAD_UNINT);
906
907 simple_unlock(&thread_call_lock);
908 (void) spllo();
909
910 thread_block_parameter((thread_continue_t)thread_call_daemon_continue, group);
911 /* NOTREACHED */
912 }
913
914 static void
915 thread_call_daemon(
916 thread_call_group_t group)
917 {
918 thread_t self = current_thread();
919
920 self->options |= TH_OPT_VMPRIV;
921 vm_page_free_reserve(2); /* XXX */
922
923 thread_call_daemon_continue(group);
924 /* NOTREACHED */
925 }
926
927 static void
928 thread_call_delayed_timer(
929 timer_call_param_t p0,
930 __unused timer_call_param_t p1
931 )
932 {
933 thread_call_t call;
934 thread_call_group_t group = p0;
935 boolean_t new_pending = FALSE;
936 uint64_t timestamp;
937
938 simple_lock(&thread_call_lock);
939
940 timestamp = mach_absolute_time();
941
942 call = TC(queue_first(&group->delayed_queue));
943
944 while (!queue_end(&group->delayed_queue, qe(call))) {
945 if (call->deadline <= timestamp) {
946 _pending_call_enqueue(call, group);
947 new_pending = TRUE;
948 }
949 else
950 break;
951
952 call = TC(queue_first(&group->delayed_queue));
953 }
954
955 if (!queue_end(&group->delayed_queue, qe(call)))
956 _set_delayed_call_timer(call, group);
957
958 if (new_pending && group->active_count == 0)
959 thread_call_wake(group);
960
961 simple_unlock(&thread_call_lock);
962 }