]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/thread_call.c
7ae31523c600e427db88d68e10d3083403288c97
[apple/xnu.git] / osfmk / kern / thread_call.c
1 /*
2 * Copyright (c) 1993-1995, 1999-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/thread_act.h>
31
32 #include <kern/kern_types.h>
33 #include <kern/zalloc.h>
34 #include <kern/sched_prim.h>
35 #include <kern/clock.h>
36 #include <kern/task.h>
37 #include <kern/thread.h>
38 #include <kern/wait_queue.h>
39
40 #include <vm/vm_pageout.h>
41
42 #include <kern/thread_call.h>
43 #include <kern/call_entry.h>
44
45 #include <kern/timer_call.h>
46
47 #include <sys/kdebug.h>
48
49 decl_simple_lock_data(static,thread_call_lock)
50
51 static zone_t thread_call_zone;
52
53 struct thread_call_group {
54 queue_head_t pending_queue;
55 uint32_t pending_count;
56
57 queue_head_t delayed_queue;
58
59 timer_call_data_t delayed_timer;
60
61 struct wait_queue idle_wqueue;
62 uint32_t idle_count, active_count;
63 };
64
65 typedef struct thread_call_group *thread_call_group_t;
66
67 static struct thread_call_group thread_call_group0;
68
69 static boolean_t thread_call_daemon_awake;
70
71 #define thread_call_thread_min 4
72
73 #define internal_call_count 768
74
75 static thread_call_data_t internal_call_storage[internal_call_count];
76 static queue_head_t thread_call_internal_queue;
77
78 static __inline__ thread_call_t _internal_call_allocate(void);
79
80 static __inline__ void _internal_call_release(
81 thread_call_t call);
82
83 static __inline__ boolean_t _pending_call_enqueue(
84 thread_call_t call,
85 thread_call_group_t group),
86 _delayed_call_enqueue(
87 thread_call_t call,
88 thread_call_group_t group,
89 uint64_t deadline),
90 _call_dequeue(
91 thread_call_t call,
92 thread_call_group_t group);
93
94 static __inline__ void thread_call_wake(
95 thread_call_group_t group);
96
97 static __inline__ void _set_delayed_call_timer(
98 thread_call_t call,
99 thread_call_group_t group);
100
101 static boolean_t _remove_from_pending_queue(
102 thread_call_func_t func,
103 thread_call_param_t param0,
104 boolean_t remove_all),
105 _remove_from_delayed_queue(
106 thread_call_func_t func,
107 thread_call_param_t param0,
108 boolean_t remove_all);
109
110 static void thread_call_daemon(
111 thread_call_group_t group),
112 thread_call_thread(
113 thread_call_group_t group);
114
115 static void thread_call_delayed_timer(
116 timer_call_param_t p0,
117 timer_call_param_t p1);
118
119 #define qe(x) ((queue_entry_t)(x))
120 #define TC(x) ((thread_call_t)(x))
121
122 /*
123 * thread_call_initialize:
124 *
125 * Initialize this module, called
126 * early during system initialization.
127 */
128 void
129 thread_call_initialize(void)
130 {
131 thread_call_t call;
132 thread_call_group_t group = &thread_call_group0;
133 kern_return_t result;
134 thread_t thread;
135 int i;
136 spl_t s;
137
138 i = sizeof (thread_call_data_t);
139 thread_call_zone = zinit(i, 4096 * i, 16 * i, "thread_call");
140
141 simple_lock_init(&thread_call_lock, 0);
142
143 s = splsched();
144 simple_lock(&thread_call_lock);
145
146 queue_init(&group->pending_queue);
147 queue_init(&group->delayed_queue);
148
149 timer_call_setup(&group->delayed_timer, thread_call_delayed_timer, group);
150
151 wait_queue_init(&group->idle_wqueue, SYNC_POLICY_FIFO);
152
153 queue_init(&thread_call_internal_queue);
154 for (
155 call = internal_call_storage;
156 call < &internal_call_storage[internal_call_count];
157 call++) {
158
159 enqueue_tail(&thread_call_internal_queue, qe(call));
160 }
161
162 thread_call_daemon_awake = TRUE;
163
164 simple_unlock(&thread_call_lock);
165 splx(s);
166
167 result = kernel_thread_start_priority((thread_continue_t)thread_call_daemon, group, BASEPRI_PREEMPT + 1, &thread);
168 if (result != KERN_SUCCESS)
169 panic("thread_call_initialize");
170
171 thread_deallocate(thread);
172 }
173
174 void
175 thread_call_setup(
176 thread_call_t call,
177 thread_call_func_t func,
178 thread_call_param_t param0)
179 {
180 call_entry_setup(call, func, param0);
181 }
182
183 /*
184 * _internal_call_allocate:
185 *
186 * Allocate an internal callout entry.
187 *
188 * Called with thread_call_lock held.
189 */
190 static __inline__ thread_call_t
191 _internal_call_allocate(void)
192 {
193 thread_call_t call;
194
195 if (queue_empty(&thread_call_internal_queue))
196 panic("_internal_call_allocate");
197
198 call = TC(dequeue_head(&thread_call_internal_queue));
199
200 return (call);
201 }
202
203 /*
204 * _internal_call_release:
205 *
206 * Release an internal callout entry which
207 * is no longer pending (or delayed).
208 *
209 * Called with thread_call_lock held.
210 */
211 static __inline__ void
212 _internal_call_release(
213 thread_call_t call)
214 {
215 if ( call >= internal_call_storage &&
216 call < &internal_call_storage[internal_call_count] )
217 enqueue_head(&thread_call_internal_queue, qe(call));
218 }
219
220 /*
221 * _pending_call_enqueue:
222 *
223 * Place an entry at the end of the
224 * pending queue, to be executed soon.
225 *
226 * Returns TRUE if the entry was already
227 * on a queue.
228 *
229 * Called with thread_call_lock held.
230 */
231 static __inline__ boolean_t
232 _pending_call_enqueue(
233 thread_call_t call,
234 thread_call_group_t group)
235 {
236 queue_t old_queue;
237
238 old_queue = call_entry_enqueue_tail(call, &group->pending_queue);
239
240 group->pending_count++;
241
242 return (old_queue != NULL);
243 }
244
245 /*
246 * _delayed_call_enqueue:
247 *
248 * Place an entry on the delayed queue,
249 * after existing entries with an earlier
250 * (or identical) deadline.
251 *
252 * Returns TRUE if the entry was already
253 * on a queue.
254 *
255 * Called with thread_call_lock held.
256 */
257 static __inline__ boolean_t
258 _delayed_call_enqueue(
259 thread_call_t call,
260 thread_call_group_t group,
261 uint64_t deadline)
262 {
263 queue_t old_queue;
264
265 old_queue = call_entry_enqueue_deadline(call, &group->delayed_queue, deadline);
266
267 if (old_queue == &group->pending_queue)
268 group->pending_count--;
269
270 return (old_queue != NULL);
271 }
272
273 /*
274 * _call_dequeue:
275 *
276 * Remove an entry from a queue.
277 *
278 * Returns TRUE if the entry was on a queue.
279 *
280 * Called with thread_call_lock held.
281 */
282 static __inline__ boolean_t
283 _call_dequeue(
284 thread_call_t call,
285 thread_call_group_t group)
286 {
287 queue_t old_queue;
288
289 old_queue = call_entry_dequeue(call);
290
291 if (old_queue == &group->pending_queue)
292 group->pending_count--;
293
294 return (old_queue != NULL);
295 }
296
297 /*
298 * _set_delayed_call_timer:
299 *
300 * Reset the timer so that it
301 * next expires when the entry is due.
302 *
303 * Called with thread_call_lock held.
304 */
305 static __inline__ void
306 _set_delayed_call_timer(
307 thread_call_t call,
308 thread_call_group_t group)
309 {
310 timer_call_enter(&group->delayed_timer, call->deadline);
311 }
312
313 /*
314 * _remove_from_pending_queue:
315 *
316 * Remove the first (or all) matching
317 * entries from the pending queue.
318 *
319 * Returns TRUE if any matching entries
320 * were found.
321 *
322 * Called with thread_call_lock held.
323 */
324 static boolean_t
325 _remove_from_pending_queue(
326 thread_call_func_t func,
327 thread_call_param_t param0,
328 boolean_t remove_all)
329 {
330 boolean_t call_removed = FALSE;
331 thread_call_t call;
332 thread_call_group_t group = &thread_call_group0;
333
334 call = TC(queue_first(&group->pending_queue));
335
336 while (!queue_end(&group->pending_queue, qe(call))) {
337 if ( call->func == func &&
338 call->param0 == param0 ) {
339 thread_call_t next = TC(queue_next(qe(call)));
340
341 _call_dequeue(call, group);
342
343 _internal_call_release(call);
344
345 call_removed = TRUE;
346 if (!remove_all)
347 break;
348
349 call = next;
350 }
351 else
352 call = TC(queue_next(qe(call)));
353 }
354
355 return (call_removed);
356 }
357
358 /*
359 * _remove_from_delayed_queue:
360 *
361 * Remove the first (or all) matching
362 * entries from the delayed queue.
363 *
364 * Returns TRUE if any matching entries
365 * were found.
366 *
367 * Called with thread_call_lock held.
368 */
369 static boolean_t
370 _remove_from_delayed_queue(
371 thread_call_func_t func,
372 thread_call_param_t param0,
373 boolean_t remove_all)
374 {
375 boolean_t call_removed = FALSE;
376 thread_call_t call;
377 thread_call_group_t group = &thread_call_group0;
378
379 call = TC(queue_first(&group->delayed_queue));
380
381 while (!queue_end(&group->delayed_queue, qe(call))) {
382 if ( call->func == func &&
383 call->param0 == param0 ) {
384 thread_call_t next = TC(queue_next(qe(call)));
385
386 _call_dequeue(call, group);
387
388 _internal_call_release(call);
389
390 call_removed = TRUE;
391 if (!remove_all)
392 break;
393
394 call = next;
395 }
396 else
397 call = TC(queue_next(qe(call)));
398 }
399
400 return (call_removed);
401 }
402
403 /*
404 * thread_call_func:
405 *
406 * Enqueue a function callout.
407 *
408 * Guarantees { function, argument }
409 * uniqueness if unique_call is TRUE.
410 */
411 void
412 thread_call_func(
413 thread_call_func_t func,
414 thread_call_param_t param,
415 boolean_t unique_call)
416 {
417 thread_call_t call;
418 thread_call_group_t group = &thread_call_group0;
419 spl_t s;
420
421 s = splsched();
422 simple_lock(&thread_call_lock);
423
424 call = TC(queue_first(&group->pending_queue));
425
426 while (unique_call && !queue_end(&group->pending_queue, qe(call))) {
427 if ( call->func == func &&
428 call->param0 == param ) {
429 break;
430 }
431
432 call = TC(queue_next(qe(call)));
433 }
434
435 if (!unique_call || queue_end(&group->pending_queue, qe(call))) {
436 call = _internal_call_allocate();
437 call->func = func;
438 call->param0 = param;
439 call->param1 = NULL;
440
441 _pending_call_enqueue(call, group);
442
443 if (group->active_count == 0)
444 thread_call_wake(group);
445 }
446
447 simple_unlock(&thread_call_lock);
448 splx(s);
449 }
450
451 /*
452 * thread_call_func_delayed:
453 *
454 * Enqueue a function callout to
455 * occur at the stated time.
456 */
457 void
458 thread_call_func_delayed(
459 thread_call_func_t func,
460 thread_call_param_t param,
461 uint64_t deadline)
462 {
463 thread_call_t call;
464 thread_call_group_t group = &thread_call_group0;
465 spl_t s;
466
467 s = splsched();
468 simple_lock(&thread_call_lock);
469
470 call = _internal_call_allocate();
471 call->func = func;
472 call->param0 = param;
473 call->param1 = 0;
474
475 _delayed_call_enqueue(call, group, deadline);
476
477 if (queue_first(&group->delayed_queue) == qe(call))
478 _set_delayed_call_timer(call, group);
479
480 simple_unlock(&thread_call_lock);
481 splx(s);
482 }
483
484 /*
485 * thread_call_func_cancel:
486 *
487 * Dequeue a function callout.
488 *
489 * Removes one (or all) { function, argument }
490 * instance(s) from either (or both)
491 * the pending and the delayed queue,
492 * in that order.
493 *
494 * Returns TRUE if any calls were cancelled.
495 */
496 boolean_t
497 thread_call_func_cancel(
498 thread_call_func_t func,
499 thread_call_param_t param,
500 boolean_t cancel_all)
501 {
502 boolean_t result;
503 spl_t s;
504
505 s = splsched();
506 simple_lock(&thread_call_lock);
507
508 if (cancel_all)
509 result = _remove_from_pending_queue(func, param, cancel_all) |
510 _remove_from_delayed_queue(func, param, cancel_all);
511 else
512 result = _remove_from_pending_queue(func, param, cancel_all) ||
513 _remove_from_delayed_queue(func, param, cancel_all);
514
515 simple_unlock(&thread_call_lock);
516 splx(s);
517
518 return (result);
519 }
520
521 /*
522 * thread_call_allocate:
523 *
524 * Allocate a callout entry.
525 */
526 thread_call_t
527 thread_call_allocate(
528 thread_call_func_t func,
529 thread_call_param_t param0)
530 {
531 thread_call_t call = zalloc(thread_call_zone);
532
533 call_entry_setup(call, func, param0);
534
535 return (call);
536 }
537
538 /*
539 * thread_call_free:
540 *
541 * Free a callout entry.
542 */
543 boolean_t
544 thread_call_free(
545 thread_call_t call)
546 {
547 spl_t s;
548
549 s = splsched();
550 simple_lock(&thread_call_lock);
551
552 if (call->queue != NULL) {
553 simple_unlock(&thread_call_lock);
554 splx(s);
555
556 return (FALSE);
557 }
558
559 simple_unlock(&thread_call_lock);
560 splx(s);
561
562 zfree(thread_call_zone, call);
563
564 return (TRUE);
565 }
566
567 /*
568 * thread_call_enter:
569 *
570 * Enqueue a callout entry to occur "soon".
571 *
572 * Returns TRUE if the call was
573 * already on a queue.
574 */
575 boolean_t
576 thread_call_enter(
577 thread_call_t call)
578 {
579 boolean_t result = TRUE;
580 thread_call_group_t group = &thread_call_group0;
581 spl_t s;
582
583 s = splsched();
584 simple_lock(&thread_call_lock);
585
586 if (call->queue != &group->pending_queue) {
587 result = _pending_call_enqueue(call, group);
588
589 if (group->active_count == 0)
590 thread_call_wake(group);
591 }
592
593 call->param1 = 0;
594
595 simple_unlock(&thread_call_lock);
596 splx(s);
597
598 return (result);
599 }
600
601 boolean_t
602 thread_call_enter1(
603 thread_call_t call,
604 thread_call_param_t param1)
605 {
606 boolean_t result = TRUE;
607 thread_call_group_t group = &thread_call_group0;
608 spl_t s;
609
610 s = splsched();
611 simple_lock(&thread_call_lock);
612
613 if (call->queue != &group->pending_queue) {
614 result = _pending_call_enqueue(call, group);
615
616 if (group->active_count == 0)
617 thread_call_wake(group);
618 }
619
620 call->param1 = param1;
621
622 simple_unlock(&thread_call_lock);
623 splx(s);
624
625 return (result);
626 }
627
628 /*
629 * thread_call_enter_delayed:
630 *
631 * Enqueue a callout entry to occur
632 * at the stated time.
633 *
634 * Returns TRUE if the call was
635 * already on a queue.
636 */
637 boolean_t
638 thread_call_enter_delayed(
639 thread_call_t call,
640 uint64_t deadline)
641 {
642 boolean_t result = TRUE;
643 thread_call_group_t group = &thread_call_group0;
644 spl_t s;
645
646 s = splsched();
647 simple_lock(&thread_call_lock);
648
649 result = _delayed_call_enqueue(call, group, deadline);
650
651 if (queue_first(&group->delayed_queue) == qe(call))
652 _set_delayed_call_timer(call, group);
653
654 call->param1 = 0;
655
656 simple_unlock(&thread_call_lock);
657 splx(s);
658
659 return (result);
660 }
661
662 boolean_t
663 thread_call_enter1_delayed(
664 thread_call_t call,
665 thread_call_param_t param1,
666 uint64_t deadline)
667 {
668 boolean_t result = TRUE;
669 thread_call_group_t group = &thread_call_group0;
670 spl_t s;
671
672 s = splsched();
673 simple_lock(&thread_call_lock);
674
675 result = _delayed_call_enqueue(call, group, deadline);
676
677 if (queue_first(&group->delayed_queue) == qe(call))
678 _set_delayed_call_timer(call, group);
679
680 call->param1 = param1;
681
682 simple_unlock(&thread_call_lock);
683 splx(s);
684
685 return (result);
686 }
687
688 /*
689 * thread_call_cancel:
690 *
691 * Dequeue a callout entry.
692 *
693 * Returns TRUE if the call was
694 * on a queue.
695 */
696 boolean_t
697 thread_call_cancel(
698 thread_call_t call)
699 {
700 boolean_t result;
701 thread_call_group_t group = &thread_call_group0;
702 spl_t s;
703
704 s = splsched();
705 simple_lock(&thread_call_lock);
706
707 result = _call_dequeue(call, group);
708
709 simple_unlock(&thread_call_lock);
710 splx(s);
711
712 return (result);
713 }
714
715 /*
716 * thread_call_is_delayed:
717 *
718 * Returns TRUE if the call is
719 * currently on a delayed queue.
720 *
721 * Optionally returns the expiration time.
722 */
723 boolean_t
724 thread_call_is_delayed(
725 thread_call_t call,
726 uint64_t *deadline)
727 {
728 boolean_t result = FALSE;
729 thread_call_group_t group = &thread_call_group0;
730 spl_t s;
731
732 s = splsched();
733 simple_lock(&thread_call_lock);
734
735 if (call->queue == &group->delayed_queue) {
736 if (deadline != NULL)
737 *deadline = call->deadline;
738 result = TRUE;
739 }
740
741 simple_unlock(&thread_call_lock);
742 splx(s);
743
744 return (result);
745 }
746
747 /*
748 * thread_call_wake:
749 *
750 * Wake a call thread to service
751 * pending call entries. May wake
752 * the daemon thread in order to
753 * create additional call threads.
754 *
755 * Called with thread_call_lock held.
756 */
757 static __inline__ void
758 thread_call_wake(
759 thread_call_group_t group)
760 {
761 if (group->idle_count > 0 && wait_queue_wakeup_one(&group->idle_wqueue, NULL, THREAD_AWAKENED) == KERN_SUCCESS) {
762 group->idle_count--; group->active_count++;
763 }
764 else
765 if (!thread_call_daemon_awake) {
766 thread_call_daemon_awake = TRUE;
767 thread_wakeup_one(&thread_call_daemon_awake);
768 }
769 }
770
771 /*
772 * sched_call_thread:
773 *
774 * Call out invoked by the scheduler.
775 */
776 static void
777 sched_call_thread(
778 int type,
779 __unused thread_t thread)
780 {
781 thread_call_group_t group = &thread_call_group0;
782
783 simple_lock(&thread_call_lock);
784
785 switch (type) {
786
787 case SCHED_CALL_BLOCK:
788 if (--group->active_count == 0 && group->pending_count > 0)
789 thread_call_wake(group);
790 break;
791
792 case SCHED_CALL_UNBLOCK:
793 group->active_count++;
794 break;
795 }
796
797 simple_unlock(&thread_call_lock);
798 }
799
800 /*
801 * thread_call_thread:
802 */
803 static void
804 thread_call_thread(
805 thread_call_group_t group)
806 {
807 thread_t self = current_thread();
808
809 (void) splsched();
810 simple_lock(&thread_call_lock);
811
812 thread_sched_call(self, sched_call_thread);
813
814 while (group->pending_count > 0) {
815 thread_call_t call;
816 thread_call_func_t func;
817 thread_call_param_t param0, param1;
818
819 call = TC(dequeue_head(&group->pending_queue));
820 group->pending_count--;
821
822 func = call->func;
823 param0 = call->param0;
824 param1 = call->param1;
825
826 call->queue = NULL;
827
828 _internal_call_release(call);
829
830 simple_unlock(&thread_call_lock);
831 (void) spllo();
832
833 KERNEL_DEBUG_CONSTANT(
834 MACHDBG_CODE(DBG_MACH_SCHED,MACH_CALLOUT) | DBG_FUNC_NONE,
835 (int)func, (int)param0, (int)param1, 0, 0);
836
837 (*func)(param0, param1);
838
839 (void)thread_funnel_set(self->funnel_lock, FALSE); /* XXX */
840
841 (void) splsched();
842 simple_lock(&thread_call_lock);
843 }
844
845 thread_sched_call(self, NULL);
846 group->active_count--;
847
848 if (group->idle_count < thread_call_thread_min) {
849 group->idle_count++;
850
851 wait_queue_assert_wait(&group->idle_wqueue, NULL, THREAD_UNINT, 0);
852
853 simple_unlock(&thread_call_lock);
854 (void) spllo();
855
856 thread_block_parameter((thread_continue_t)thread_call_thread, group);
857 /* NOTREACHED */
858 }
859
860 simple_unlock(&thread_call_lock);
861 (void) spllo();
862
863 thread_terminate(self);
864 /* NOTREACHED */
865 }
866
867 /*
868 * thread_call_daemon:
869 */
870 static void
871 thread_call_daemon_continue(
872 thread_call_group_t group)
873 {
874 kern_return_t result;
875 thread_t thread;
876
877 (void) splsched();
878 simple_lock(&thread_call_lock);
879
880 while (group->active_count == 0 && group->pending_count > 0) {
881 group->active_count++;
882
883 simple_unlock(&thread_call_lock);
884 (void) spllo();
885
886 result = kernel_thread_start_priority((thread_continue_t)thread_call_thread, group, BASEPRI_PREEMPT, &thread);
887 if (result != KERN_SUCCESS)
888 panic("thread_call_daemon");
889
890 thread_deallocate(thread);
891
892 (void) splsched();
893 simple_lock(&thread_call_lock);
894 }
895
896 thread_call_daemon_awake = FALSE;
897 assert_wait(&thread_call_daemon_awake, THREAD_UNINT);
898
899 simple_unlock(&thread_call_lock);
900 (void) spllo();
901
902 thread_block_parameter((thread_continue_t)thread_call_daemon_continue, group);
903 /* NOTREACHED */
904 }
905
906 static void
907 thread_call_daemon(
908 thread_call_group_t group)
909 {
910 thread_t self = current_thread();
911
912 self->options |= TH_OPT_VMPRIV;
913 vm_page_free_reserve(2); /* XXX */
914
915 thread_call_daemon_continue(group);
916 /* NOTREACHED */
917 }
918
919 static void
920 thread_call_delayed_timer(
921 timer_call_param_t p0,
922 __unused timer_call_param_t p1
923 )
924 {
925 thread_call_t call;
926 thread_call_group_t group = p0;
927 boolean_t new_pending = FALSE;
928 uint64_t timestamp;
929
930 simple_lock(&thread_call_lock);
931
932 timestamp = mach_absolute_time();
933
934 call = TC(queue_first(&group->delayed_queue));
935
936 while (!queue_end(&group->delayed_queue, qe(call))) {
937 if (call->deadline <= timestamp) {
938 _pending_call_enqueue(call, group);
939 new_pending = TRUE;
940 }
941 else
942 break;
943
944 call = TC(queue_first(&group->delayed_queue));
945 }
946
947 if (!queue_end(&group->delayed_queue, qe(call)))
948 _set_delayed_call_timer(call, group);
949
950 if (new_pending && group->active_count == 0)
951 thread_call_wake(group);
952
953 simple_unlock(&thread_call_lock);
954 }