]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/thread_call.c
82339e20864c66fca9c5bf76323955eaa9f365e5
[apple/xnu.git] / osfmk / kern / thread_call.c
1 /*
2 * Copyright (c) 1993-1995, 1999-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/thread_act.h>
31
32 #include <kern/kern_types.h>
33 #include <kern/zalloc.h>
34 #include <kern/sched_prim.h>
35 #include <kern/clock.h>
36 #include <kern/task.h>
37 #include <kern/thread.h>
38 #include <kern/waitq.h>
39 #include <kern/ledger.h>
40
41 #include <vm/vm_pageout.h>
42
43 #include <kern/thread_call.h>
44 #include <kern/call_entry.h>
45 #include <kern/timer_call.h>
46
47 #include <libkern/OSAtomic.h>
48 #include <kern/timer_queue.h>
49
50 #include <sys/kdebug.h>
51 #if CONFIG_DTRACE
52 #include <mach/sdt.h>
53 #endif
54 #include <machine/machine_routines.h>
55
56 static zone_t thread_call_zone;
57 static struct waitq daemon_waitq;
58
59 struct thread_call_group {
60 queue_head_t pending_queue;
61 uint32_t pending_count;
62
63 queue_head_t delayed_queue;
64 uint32_t delayed_count;
65
66 timer_call_data_t delayed_timer;
67 timer_call_data_t dealloc_timer;
68
69 struct waitq idle_waitq;
70 uint32_t idle_count, active_count;
71
72 integer_t pri;
73 uint32_t target_thread_count;
74 uint64_t idle_timestamp;
75
76 uint32_t flags;
77 sched_call_t sched_call;
78 };
79
80 typedef struct thread_call_group *thread_call_group_t;
81
82 #define TCG_PARALLEL 0x01
83 #define TCG_DEALLOC_ACTIVE 0x02
84
85 #define THREAD_CALL_GROUP_COUNT 4
86 #define THREAD_CALL_THREAD_MIN 4
87 #define INTERNAL_CALL_COUNT 768
88 #define THREAD_CALL_DEALLOC_INTERVAL_NS (5 * 1000 * 1000) /* 5 ms */
89 #define THREAD_CALL_ADD_RATIO 4
90 #define THREAD_CALL_MACH_FACTOR_CAP 3
91
92 static struct thread_call_group thread_call_groups[THREAD_CALL_GROUP_COUNT];
93 static boolean_t thread_call_daemon_awake;
94 static thread_call_data_t internal_call_storage[INTERNAL_CALL_COUNT];
95 static queue_head_t thread_call_internal_queue;
96 int thread_call_internal_queue_count = 0;
97 static uint64_t thread_call_dealloc_interval_abs;
98
99 static __inline__ thread_call_t _internal_call_allocate(thread_call_func_t func, thread_call_param_t param0);
100 static __inline__ void _internal_call_release(thread_call_t call);
101 static __inline__ boolean_t _pending_call_enqueue(thread_call_t call, thread_call_group_t group);
102 static __inline__ boolean_t _delayed_call_enqueue(thread_call_t call, thread_call_group_t group, uint64_t deadline);
103 static __inline__ boolean_t _call_dequeue(thread_call_t call, thread_call_group_t group);
104 static __inline__ void thread_call_wake(thread_call_group_t group);
105 static __inline__ void _set_delayed_call_timer(thread_call_t call, thread_call_group_t group);
106 static boolean_t _remove_from_pending_queue(thread_call_func_t func, thread_call_param_t param0, boolean_t remove_all);
107 static boolean_t _remove_from_delayed_queue(thread_call_func_t func, thread_call_param_t param0, boolean_t remove_all);
108 static void thread_call_daemon(void *arg);
109 static void thread_call_thread(thread_call_group_t group, wait_result_t wres);
110 extern void thread_call_delayed_timer(timer_call_param_t p0, timer_call_param_t p1);
111 static void thread_call_dealloc_timer(timer_call_param_t p0, timer_call_param_t p1);
112 static void thread_call_group_setup(thread_call_group_t group, thread_call_priority_t pri, uint32_t target_thread_count, boolean_t parallel);
113 static void sched_call_thread(int type, thread_t thread);
114 static void thread_call_start_deallocate_timer(thread_call_group_t group);
115 static void thread_call_wait_locked(thread_call_t call);
116 static boolean_t thread_call_enter_delayed_internal(thread_call_t call,
117 thread_call_func_t alt_func, thread_call_param_t alt_param0,
118 thread_call_param_t param1, uint64_t deadline,
119 uint64_t leeway, unsigned int flags);
120
121 #define qe(x) ((queue_entry_t)(x))
122 #define TC(x) ((thread_call_t)(x))
123
124
125 lck_grp_t thread_call_queues_lck_grp;
126 lck_grp_t thread_call_lck_grp;
127 lck_attr_t thread_call_lck_attr;
128 lck_grp_attr_t thread_call_lck_grp_attr;
129
130 #if defined(__i386__) || defined(__x86_64__)
131 lck_mtx_t thread_call_lock_data;
132 #else
133 lck_spin_t thread_call_lock_data;
134 #endif
135
136
137 #define thread_call_lock_spin() \
138 lck_mtx_lock_spin_always(&thread_call_lock_data)
139
140 #define thread_call_unlock() \
141 lck_mtx_unlock_always(&thread_call_lock_data)
142
143 extern boolean_t mach_timer_coalescing_enabled;
144
145 static inline spl_t
146 disable_ints_and_lock(void)
147 {
148 spl_t s;
149
150 s = splsched();
151 thread_call_lock_spin();
152
153 return s;
154 }
155
156 static inline void
157 enable_ints_and_unlock(spl_t s)
158 {
159 thread_call_unlock();
160 splx(s);
161 }
162
163
164 static inline boolean_t
165 group_isparallel(thread_call_group_t group)
166 {
167 return ((group->flags & TCG_PARALLEL) != 0);
168 }
169
170 static boolean_t
171 thread_call_group_should_add_thread(thread_call_group_t group)
172 {
173 uint32_t thread_count;
174
175 if (!group_isparallel(group)) {
176 if (group->pending_count > 0 && group->active_count == 0) {
177 return TRUE;
178 }
179
180 return FALSE;
181 }
182
183 if (group->pending_count > 0) {
184 if (group->idle_count > 0) {
185 panic("Pending work, but threads are idle?");
186 }
187
188 thread_count = group->active_count;
189
190 /*
191 * Add a thread if either there are no threads,
192 * the group has fewer than its target number of
193 * threads, or the amount of work is large relative
194 * to the number of threads. In the last case, pay attention
195 * to the total load on the system, and back off if
196 * it's high.
197 */
198 if ((thread_count == 0) ||
199 (thread_count < group->target_thread_count) ||
200 ((group->pending_count > THREAD_CALL_ADD_RATIO * thread_count) &&
201 (sched_mach_factor < THREAD_CALL_MACH_FACTOR_CAP))) {
202 return TRUE;
203 }
204 }
205
206 return FALSE;
207 }
208
209 static inline integer_t
210 thread_call_priority_to_sched_pri(thread_call_priority_t pri)
211 {
212 switch (pri) {
213 case THREAD_CALL_PRIORITY_HIGH:
214 return BASEPRI_PREEMPT;
215 case THREAD_CALL_PRIORITY_KERNEL:
216 return BASEPRI_KERNEL;
217 case THREAD_CALL_PRIORITY_USER:
218 return BASEPRI_DEFAULT;
219 case THREAD_CALL_PRIORITY_LOW:
220 return MAXPRI_THROTTLE;
221 default:
222 panic("Invalid priority.");
223 }
224
225 return 0;
226 }
227
228 /* Lock held */
229 static inline thread_call_group_t
230 thread_call_get_group(
231 thread_call_t call)
232 {
233 thread_call_priority_t pri = call->tc_pri;
234
235 assert(pri == THREAD_CALL_PRIORITY_LOW ||
236 pri == THREAD_CALL_PRIORITY_USER ||
237 pri == THREAD_CALL_PRIORITY_KERNEL ||
238 pri == THREAD_CALL_PRIORITY_HIGH);
239
240 return &thread_call_groups[pri];
241 }
242
243 static void
244 thread_call_group_setup(
245 thread_call_group_t group,
246 thread_call_priority_t pri,
247 uint32_t target_thread_count,
248 boolean_t parallel)
249 {
250 queue_init(&group->pending_queue);
251 queue_init(&group->delayed_queue);
252
253 timer_call_setup(&group->delayed_timer, thread_call_delayed_timer, group);
254 timer_call_setup(&group->dealloc_timer, thread_call_dealloc_timer, group);
255
256 waitq_init(&group->idle_waitq, SYNC_POLICY_FIFO|SYNC_POLICY_DISABLE_IRQ);
257
258 group->target_thread_count = target_thread_count;
259 group->pri = thread_call_priority_to_sched_pri(pri);
260
261 group->sched_call = sched_call_thread;
262 if (parallel) {
263 group->flags |= TCG_PARALLEL;
264 group->sched_call = NULL;
265 }
266 }
267
268 /*
269 * Simple wrapper for creating threads bound to
270 * thread call groups.
271 */
272 static kern_return_t
273 thread_call_thread_create(
274 thread_call_group_t group)
275 {
276 thread_t thread;
277 kern_return_t result;
278
279 result = kernel_thread_start_priority((thread_continue_t)thread_call_thread, group, group->pri, &thread);
280 if (result != KERN_SUCCESS) {
281 return result;
282 }
283
284 if (group->pri < BASEPRI_PREEMPT) {
285 /*
286 * New style doesn't get to run to completion in
287 * kernel if there are higher priority threads
288 * available.
289 */
290 thread_set_eager_preempt(thread);
291 }
292
293 thread_deallocate(thread);
294 return KERN_SUCCESS;
295 }
296
297 /*
298 * thread_call_initialize:
299 *
300 * Initialize this module, called
301 * early during system initialization.
302 */
303 void
304 thread_call_initialize(void)
305 {
306 thread_call_t call;
307 kern_return_t result;
308 thread_t thread;
309 int i;
310 spl_t s;
311
312 i = sizeof (thread_call_data_t);
313 thread_call_zone = zinit(i, 4096 * i, 16 * i, "thread_call");
314 zone_change(thread_call_zone, Z_CALLERACCT, FALSE);
315 zone_change(thread_call_zone, Z_NOENCRYPT, TRUE);
316
317 lck_attr_setdefault(&thread_call_lck_attr);
318 lck_grp_attr_setdefault(&thread_call_lck_grp_attr);
319 lck_grp_init(&thread_call_queues_lck_grp, "thread_call_queues", &thread_call_lck_grp_attr);
320 lck_grp_init(&thread_call_lck_grp, "thread_call", &thread_call_lck_grp_attr);
321
322 #if defined(__i386__) || defined(__x86_64__)
323 lck_mtx_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr);
324 #else
325 lck_spin_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr);
326 #endif
327
328 nanotime_to_absolutetime(0, THREAD_CALL_DEALLOC_INTERVAL_NS, &thread_call_dealloc_interval_abs);
329 waitq_init(&daemon_waitq, SYNC_POLICY_FIFO);
330
331 thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_LOW], THREAD_CALL_PRIORITY_LOW, 0, TRUE);
332 thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_USER], THREAD_CALL_PRIORITY_USER, 0, TRUE);
333 thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_KERNEL], THREAD_CALL_PRIORITY_KERNEL, 1, TRUE);
334 thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_HIGH], THREAD_CALL_PRIORITY_HIGH, THREAD_CALL_THREAD_MIN, FALSE);
335
336 s = disable_ints_and_lock();
337
338 queue_init(&thread_call_internal_queue);
339 for (
340 call = internal_call_storage;
341 call < &internal_call_storage[INTERNAL_CALL_COUNT];
342 call++) {
343
344 enqueue_tail(&thread_call_internal_queue, qe(call));
345 thread_call_internal_queue_count++;
346 }
347
348 thread_call_daemon_awake = TRUE;
349
350 enable_ints_and_unlock(s);
351
352 result = kernel_thread_start_priority((thread_continue_t)thread_call_daemon, NULL, BASEPRI_PREEMPT + 1, &thread);
353 if (result != KERN_SUCCESS)
354 panic("thread_call_initialize");
355
356 thread_deallocate(thread);
357 }
358
359 void
360 thread_call_setup(
361 thread_call_t call,
362 thread_call_func_t func,
363 thread_call_param_t param0)
364 {
365 bzero(call, sizeof(*call));
366 call_entry_setup((call_entry_t)call, func, param0);
367 call->tc_pri = THREAD_CALL_PRIORITY_HIGH; /* Default priority */
368 }
369
370 /*
371 * _internal_call_allocate:
372 *
373 * Allocate an internal callout entry.
374 *
375 * Called with thread_call_lock held.
376 */
377 static __inline__ thread_call_t
378 _internal_call_allocate(thread_call_func_t func, thread_call_param_t param0)
379 {
380 thread_call_t call;
381
382 if (queue_empty(&thread_call_internal_queue))
383 panic("_internal_call_allocate");
384
385 call = TC(dequeue_head(&thread_call_internal_queue));
386 thread_call_internal_queue_count--;
387
388 thread_call_setup(call, func, param0);
389 call->tc_refs = 0;
390 call->tc_flags = 0; /* THREAD_CALL_ALLOC not set, do not free back to zone */
391
392 return (call);
393 }
394
395 /*
396 * _internal_call_release:
397 *
398 * Release an internal callout entry which
399 * is no longer pending (or delayed). This is
400 * safe to call on a non-internal entry, in which
401 * case nothing happens.
402 *
403 * Called with thread_call_lock held.
404 */
405 static __inline__ void
406 _internal_call_release(
407 thread_call_t call)
408 {
409 if ( call >= internal_call_storage &&
410 call < &internal_call_storage[INTERNAL_CALL_COUNT] ) {
411 assert((call->tc_flags & THREAD_CALL_ALLOC) == 0);
412 enqueue_head(&thread_call_internal_queue, qe(call));
413 thread_call_internal_queue_count++;
414 }
415 }
416
417 /*
418 * _pending_call_enqueue:
419 *
420 * Place an entry at the end of the
421 * pending queue, to be executed soon.
422 *
423 * Returns TRUE if the entry was already
424 * on a queue.
425 *
426 * Called with thread_call_lock held.
427 */
428 static __inline__ boolean_t
429 _pending_call_enqueue(
430 thread_call_t call,
431 thread_call_group_t group)
432 {
433 queue_head_t *old_queue;
434
435 old_queue = call_entry_enqueue_tail(CE(call), &group->pending_queue);
436
437 if (old_queue == NULL) {
438 call->tc_submit_count++;
439 }
440
441 group->pending_count++;
442
443 thread_call_wake(group);
444
445 return (old_queue != NULL);
446 }
447
448 /*
449 * _delayed_call_enqueue:
450 *
451 * Place an entry on the delayed queue,
452 * after existing entries with an earlier
453 * (or identical) deadline.
454 *
455 * Returns TRUE if the entry was already
456 * on a queue.
457 *
458 * Called with thread_call_lock held.
459 */
460 static __inline__ boolean_t
461 _delayed_call_enqueue(
462 thread_call_t call,
463 thread_call_group_t group,
464 uint64_t deadline)
465 {
466 queue_head_t *old_queue;
467
468 old_queue = call_entry_enqueue_deadline(CE(call), &group->delayed_queue, deadline);
469
470 if (old_queue == &group->pending_queue)
471 group->pending_count--;
472 else if (old_queue == NULL)
473 call->tc_submit_count++;
474
475 return (old_queue != NULL);
476 }
477
478 /*
479 * _call_dequeue:
480 *
481 * Remove an entry from a queue.
482 *
483 * Returns TRUE if the entry was on a queue.
484 *
485 * Called with thread_call_lock held.
486 */
487 static __inline__ boolean_t
488 _call_dequeue(
489 thread_call_t call,
490 thread_call_group_t group)
491 {
492 queue_head_t *old_queue;
493
494 old_queue = call_entry_dequeue(CE(call));
495
496 if (old_queue != NULL) {
497 call->tc_finish_count++;
498 if (old_queue == &group->pending_queue)
499 group->pending_count--;
500 }
501
502 return (old_queue != NULL);
503 }
504
505 /*
506 * _set_delayed_call_timer:
507 *
508 * Reset the timer so that it
509 * next expires when the entry is due.
510 *
511 * Called with thread_call_lock held.
512 */
513 static __inline__ void
514 _set_delayed_call_timer(
515 thread_call_t call,
516 thread_call_group_t group)
517 {
518 uint64_t leeway;
519
520 assert((call->tc_soft_deadline != 0) && ((call->tc_soft_deadline <= call->tc_call.deadline)));
521
522 leeway = call->tc_call.deadline - call->tc_soft_deadline;
523 timer_call_enter_with_leeway(&group->delayed_timer, NULL,
524 call->tc_soft_deadline, leeway,
525 TIMER_CALL_SYS_CRITICAL|TIMER_CALL_LEEWAY,
526 ((call->tc_flags & THREAD_CALL_RATELIMITED) == THREAD_CALL_RATELIMITED));
527 }
528
529 /*
530 * _remove_from_pending_queue:
531 *
532 * Remove the first (or all) matching
533 * entries from the pending queue.
534 *
535 * Returns TRUE if any matching entries
536 * were found.
537 *
538 * Called with thread_call_lock held.
539 */
540 static boolean_t
541 _remove_from_pending_queue(
542 thread_call_func_t func,
543 thread_call_param_t param0,
544 boolean_t remove_all)
545 {
546 boolean_t call_removed = FALSE;
547 thread_call_t call;
548 thread_call_group_t group = &thread_call_groups[THREAD_CALL_PRIORITY_HIGH];
549
550 call = TC(queue_first(&group->pending_queue));
551
552 while (!queue_end(&group->pending_queue, qe(call))) {
553 if (call->tc_call.func == func &&
554 call->tc_call.param0 == param0) {
555 thread_call_t next = TC(queue_next(qe(call)));
556
557 _call_dequeue(call, group);
558
559 _internal_call_release(call);
560
561 call_removed = TRUE;
562 if (!remove_all)
563 break;
564
565 call = next;
566 }
567 else
568 call = TC(queue_next(qe(call)));
569 }
570
571 return (call_removed);
572 }
573
574 /*
575 * _remove_from_delayed_queue:
576 *
577 * Remove the first (or all) matching
578 * entries from the delayed queue.
579 *
580 * Returns TRUE if any matching entries
581 * were found.
582 *
583 * Called with thread_call_lock held.
584 */
585 static boolean_t
586 _remove_from_delayed_queue(
587 thread_call_func_t func,
588 thread_call_param_t param0,
589 boolean_t remove_all)
590 {
591 boolean_t call_removed = FALSE;
592 thread_call_t call;
593 thread_call_group_t group = &thread_call_groups[THREAD_CALL_PRIORITY_HIGH];
594
595 call = TC(queue_first(&group->delayed_queue));
596
597 while (!queue_end(&group->delayed_queue, qe(call))) {
598 if (call->tc_call.func == func &&
599 call->tc_call.param0 == param0) {
600 thread_call_t next = TC(queue_next(qe(call)));
601
602 _call_dequeue(call, group);
603
604 _internal_call_release(call);
605
606 call_removed = TRUE;
607 if (!remove_all)
608 break;
609
610 call = next;
611 }
612 else
613 call = TC(queue_next(qe(call)));
614 }
615
616 return (call_removed);
617 }
618
619 /*
620 * thread_call_func_delayed:
621 *
622 * Enqueue a function callout to
623 * occur at the stated time.
624 */
625 void
626 thread_call_func_delayed(
627 thread_call_func_t func,
628 thread_call_param_t param,
629 uint64_t deadline)
630 {
631 (void)thread_call_enter_delayed_internal(NULL, func, param, 0, deadline, 0, 0);
632 }
633
634 /*
635 * thread_call_func_delayed_with_leeway:
636 *
637 * Same as thread_call_func_delayed(), but with
638 * leeway/flags threaded through.
639 */
640
641 void
642 thread_call_func_delayed_with_leeway(
643 thread_call_func_t func,
644 thread_call_param_t param,
645 uint64_t deadline,
646 uint64_t leeway,
647 uint32_t flags)
648 {
649 (void)thread_call_enter_delayed_internal(NULL, func, param, 0, deadline, leeway, flags);
650 }
651
652 /*
653 * thread_call_func_cancel:
654 *
655 * Dequeue a function callout.
656 *
657 * Removes one (or all) { function, argument }
658 * instance(s) from either (or both)
659 * the pending and the delayed queue,
660 * in that order.
661 *
662 * Returns TRUE if any calls were cancelled.
663 */
664 boolean_t
665 thread_call_func_cancel(
666 thread_call_func_t func,
667 thread_call_param_t param,
668 boolean_t cancel_all)
669 {
670 boolean_t result;
671 spl_t s;
672
673 s = splsched();
674 thread_call_lock_spin();
675
676 if (cancel_all)
677 result = _remove_from_pending_queue(func, param, cancel_all) |
678 _remove_from_delayed_queue(func, param, cancel_all);
679 else
680 result = _remove_from_pending_queue(func, param, cancel_all) ||
681 _remove_from_delayed_queue(func, param, cancel_all);
682
683 thread_call_unlock();
684 splx(s);
685
686 return (result);
687 }
688
689 /*
690 * Allocate a thread call with a given priority. Importances
691 * other than THREAD_CALL_PRIORITY_HIGH will be run in threads
692 * with eager preemption enabled (i.e. may be aggressively preempted
693 * by higher-priority threads which are not in the normal "urgent" bands).
694 */
695 thread_call_t
696 thread_call_allocate_with_priority(
697 thread_call_func_t func,
698 thread_call_param_t param0,
699 thread_call_priority_t pri)
700 {
701 thread_call_t call;
702
703 if (pri > THREAD_CALL_PRIORITY_LOW) {
704 panic("Invalid pri: %d\n", pri);
705 }
706
707 call = thread_call_allocate(func, param0);
708 call->tc_pri = pri;
709
710 return call;
711 }
712
713 /*
714 * thread_call_allocate:
715 *
716 * Allocate a callout entry.
717 */
718 thread_call_t
719 thread_call_allocate(
720 thread_call_func_t func,
721 thread_call_param_t param0)
722 {
723 thread_call_t call = zalloc(thread_call_zone);
724
725 thread_call_setup(call, func, param0);
726 call->tc_refs = 1;
727 call->tc_flags = THREAD_CALL_ALLOC;
728
729 return (call);
730 }
731
732 /*
733 * thread_call_free:
734 *
735 * Release a callout. If the callout is currently
736 * executing, it will be freed when all invocations
737 * finish.
738 */
739 boolean_t
740 thread_call_free(
741 thread_call_t call)
742 {
743 spl_t s;
744 int32_t refs;
745
746 s = splsched();
747 thread_call_lock_spin();
748
749 if (call->tc_call.queue != NULL) {
750 thread_call_unlock();
751 splx(s);
752
753 return (FALSE);
754 }
755
756 refs = --call->tc_refs;
757 if (refs < 0) {
758 panic("Refcount negative: %d\n", refs);
759 }
760
761 thread_call_unlock();
762 splx(s);
763
764 if (refs == 0) {
765 zfree(thread_call_zone, call);
766 }
767
768 return (TRUE);
769 }
770
771 /*
772 * thread_call_enter:
773 *
774 * Enqueue a callout entry to occur "soon".
775 *
776 * Returns TRUE if the call was
777 * already on a queue.
778 */
779 boolean_t
780 thread_call_enter(
781 thread_call_t call)
782 {
783 boolean_t result = TRUE;
784 thread_call_group_t group;
785 spl_t s;
786
787 group = thread_call_get_group(call);
788
789 s = splsched();
790 thread_call_lock_spin();
791
792 if (call->tc_call.queue != &group->pending_queue) {
793 result = _pending_call_enqueue(call, group);
794 }
795
796 call->tc_call.param1 = 0;
797
798 thread_call_unlock();
799 splx(s);
800
801 return (result);
802 }
803
804 boolean_t
805 thread_call_enter1(
806 thread_call_t call,
807 thread_call_param_t param1)
808 {
809 boolean_t result = TRUE;
810 thread_call_group_t group;
811 spl_t s;
812
813 group = thread_call_get_group(call);
814
815 s = splsched();
816 thread_call_lock_spin();
817
818 if (call->tc_call.queue != &group->pending_queue) {
819 result = _pending_call_enqueue(call, group);
820 }
821
822 call->tc_call.param1 = param1;
823
824 thread_call_unlock();
825 splx(s);
826
827 return (result);
828 }
829
830 /*
831 * thread_call_enter_delayed:
832 *
833 * Enqueue a callout entry to occur
834 * at the stated time.
835 *
836 * Returns TRUE if the call was
837 * already on a queue.
838 */
839 boolean_t
840 thread_call_enter_delayed(
841 thread_call_t call,
842 uint64_t deadline)
843 {
844 assert(call);
845 return thread_call_enter_delayed_internal(call, NULL, 0, 0, deadline, 0, 0);
846 }
847
848 boolean_t
849 thread_call_enter1_delayed(
850 thread_call_t call,
851 thread_call_param_t param1,
852 uint64_t deadline)
853 {
854 assert(call);
855 return thread_call_enter_delayed_internal(call, NULL, 0, param1, deadline, 0, 0);
856 }
857
858 boolean_t
859 thread_call_enter_delayed_with_leeway(
860 thread_call_t call,
861 thread_call_param_t param1,
862 uint64_t deadline,
863 uint64_t leeway,
864 unsigned int flags)
865 {
866 assert(call);
867 return thread_call_enter_delayed_internal(call, NULL, 0, param1, deadline, leeway, flags);
868 }
869
870
871 /*
872 * thread_call_enter_delayed_internal:
873 * enqueue a callout entry to occur at the stated time
874 *
875 * Returns True if the call was already on a queue
876 * params:
877 * call - structure encapsulating state of the callout
878 * alt_func/alt_param0 - if call is NULL, allocate temporary storage using these parameters
879 * deadline - time deadline in nanoseconds
880 * leeway - timer slack represented as delta of deadline.
881 * flags - THREAD_CALL_DELAY_XXX : classification of caller's desires wrt timer coalescing.
882 * THREAD_CALL_DELAY_LEEWAY : value in leeway is used for timer coalescing.
883 */
884 boolean_t
885 thread_call_enter_delayed_internal(
886 thread_call_t call,
887 thread_call_func_t alt_func,
888 thread_call_param_t alt_param0,
889 thread_call_param_t param1,
890 uint64_t deadline,
891 uint64_t leeway,
892 unsigned int flags)
893 {
894 boolean_t result = TRUE;
895 thread_call_group_t group;
896 spl_t s;
897 uint64_t abstime, sdeadline, slop;
898 uint32_t urgency;
899
900 /* direct mapping between thread_call, timer_call, and timeout_urgency values */
901 urgency = (flags & TIMEOUT_URGENCY_MASK);
902
903 s = splsched();
904 thread_call_lock_spin();
905
906 if (call == NULL) {
907 /* allocate a structure out of internal storage, as a convenience for BSD callers */
908 call = _internal_call_allocate(alt_func, alt_param0);
909 }
910
911 group = thread_call_get_group(call);
912 abstime = mach_absolute_time();
913
914 call->tc_flags |= THREAD_CALL_DELAYED;
915
916 call->tc_soft_deadline = sdeadline = deadline;
917
918 boolean_t ratelimited = FALSE;
919 slop = timer_call_slop(deadline, abstime, urgency, current_thread(), &ratelimited);
920
921 if ((flags & THREAD_CALL_DELAY_LEEWAY) != 0 && leeway > slop)
922 slop = leeway;
923
924 if (UINT64_MAX - deadline <= slop)
925 deadline = UINT64_MAX;
926 else
927 deadline += slop;
928
929 if (ratelimited) {
930 call->tc_flags |= TIMER_CALL_RATELIMITED;
931 } else {
932 call->tc_flags &= ~TIMER_CALL_RATELIMITED;
933 }
934
935
936 call->tc_call.param1 = param1;
937 call->ttd = (sdeadline > abstime) ? (sdeadline - abstime) : 0;
938
939 result = _delayed_call_enqueue(call, group, deadline);
940
941 if (queue_first(&group->delayed_queue) == qe(call))
942 _set_delayed_call_timer(call, group);
943
944 #if CONFIG_DTRACE
945 DTRACE_TMR5(thread_callout__create, thread_call_func_t, call->tc_call.func, uint64_t, (deadline - sdeadline), uint64_t, (call->ttd >> 32), (unsigned) (call->ttd & 0xFFFFFFFF), call);
946 #endif
947 thread_call_unlock();
948 splx(s);
949
950 return (result);
951 }
952
953 /*
954 * thread_call_cancel:
955 *
956 * Dequeue a callout entry.
957 *
958 * Returns TRUE if the call was
959 * on a queue.
960 */
961 boolean_t
962 thread_call_cancel(
963 thread_call_t call)
964 {
965 boolean_t result, do_cancel_callout = FALSE;
966 thread_call_group_t group;
967 spl_t s;
968
969 group = thread_call_get_group(call);
970
971 s = splsched();
972 thread_call_lock_spin();
973
974 if ((call->tc_call.deadline != 0) &&
975 (queue_first(&group->delayed_queue) == qe(call))) {
976 assert (call->tc_call.queue == &group->delayed_queue);
977 do_cancel_callout = TRUE;
978 }
979
980 result = _call_dequeue(call, group);
981
982 if (do_cancel_callout) {
983 timer_call_cancel(&group->delayed_timer);
984 if (!queue_empty(&group->delayed_queue)) {
985 _set_delayed_call_timer(TC(queue_first(&group->delayed_queue)), group);
986 }
987 }
988
989 thread_call_unlock();
990 splx(s);
991 #if CONFIG_DTRACE
992 DTRACE_TMR4(thread_callout__cancel, thread_call_func_t, call->tc_call.func, 0, (call->ttd >> 32), (unsigned) (call->ttd & 0xFFFFFFFF));
993 #endif
994
995 return (result);
996 }
997
998 /*
999 * Cancel a thread call. If it cannot be cancelled (i.e.
1000 * is already in flight), waits for the most recent invocation
1001 * to finish. Note that if clients re-submit this thread call,
1002 * it may still be pending or in flight when thread_call_cancel_wait
1003 * returns, but all requests to execute this work item prior
1004 * to the call to thread_call_cancel_wait will have finished.
1005 */
1006 boolean_t
1007 thread_call_cancel_wait(
1008 thread_call_t call)
1009 {
1010 boolean_t result;
1011 thread_call_group_t group;
1012
1013 if ((call->tc_flags & THREAD_CALL_ALLOC) == 0) {
1014 panic("%s: Can't wait on thread call whose storage I don't own.", __FUNCTION__);
1015 }
1016
1017 group = thread_call_get_group(call);
1018
1019 (void) splsched();
1020 thread_call_lock_spin();
1021
1022 result = _call_dequeue(call, group);
1023 if (result == FALSE) {
1024 thread_call_wait_locked(call);
1025 }
1026
1027 thread_call_unlock();
1028 (void) spllo();
1029
1030 return result;
1031 }
1032
1033
1034 /*
1035 * thread_call_wake:
1036 *
1037 * Wake a call thread to service
1038 * pending call entries. May wake
1039 * the daemon thread in order to
1040 * create additional call threads.
1041 *
1042 * Called with thread_call_lock held.
1043 *
1044 * For high-priority group, only does wakeup/creation if there are no threads
1045 * running.
1046 */
1047 static __inline__ void
1048 thread_call_wake(
1049 thread_call_group_t group)
1050 {
1051 /*
1052 * New behavior: use threads if you've got 'em.
1053 * Traditional behavior: wake only if no threads running.
1054 */
1055 if (group_isparallel(group) || group->active_count == 0) {
1056 if (waitq_wakeup64_one(&group->idle_waitq, NO_EVENT64,
1057 THREAD_AWAKENED, WAITQ_ALL_PRIORITIES) == KERN_SUCCESS) {
1058 group->idle_count--; group->active_count++;
1059
1060 if (group->idle_count == 0) {
1061 timer_call_cancel(&group->dealloc_timer);
1062 group->flags &= TCG_DEALLOC_ACTIVE;
1063 }
1064 } else {
1065 if (!thread_call_daemon_awake && thread_call_group_should_add_thread(group)) {
1066 thread_call_daemon_awake = TRUE;
1067 waitq_wakeup64_one(&daemon_waitq, NO_EVENT64,
1068 THREAD_AWAKENED, WAITQ_ALL_PRIORITIES);
1069 }
1070 }
1071 }
1072 }
1073
1074 /*
1075 * sched_call_thread:
1076 *
1077 * Call out invoked by the scheduler. Used only for high-priority
1078 * thread call group.
1079 */
1080 static void
1081 sched_call_thread(
1082 int type,
1083 __unused thread_t thread)
1084 {
1085 thread_call_group_t group;
1086
1087 group = &thread_call_groups[THREAD_CALL_PRIORITY_HIGH]; /* XXX */
1088
1089 thread_call_lock_spin();
1090
1091 switch (type) {
1092
1093 case SCHED_CALL_BLOCK:
1094 --group->active_count;
1095 if (group->pending_count > 0)
1096 thread_call_wake(group);
1097 break;
1098
1099 case SCHED_CALL_UNBLOCK:
1100 group->active_count++;
1101 break;
1102 }
1103
1104 thread_call_unlock();
1105 }
1106
1107 /*
1108 * Interrupts disabled, lock held; returns the same way.
1109 * Only called on thread calls whose storage we own. Wakes up
1110 * anyone who might be waiting on this work item and frees it
1111 * if the client has so requested.
1112 */
1113 static void
1114 thread_call_finish(thread_call_t call, spl_t *s)
1115 {
1116 boolean_t dowake = FALSE;
1117
1118 call->tc_finish_count++;
1119 call->tc_refs--;
1120
1121 if ((call->tc_flags & THREAD_CALL_WAIT) != 0) {
1122 dowake = TRUE;
1123 call->tc_flags &= ~THREAD_CALL_WAIT;
1124
1125 /*
1126 * Dropping lock here because the sched call for the
1127 * high-pri group can take the big lock from under
1128 * a thread lock.
1129 */
1130 thread_call_unlock();
1131 thread_wakeup((event_t)call);
1132 thread_call_lock_spin();
1133 }
1134
1135 if (call->tc_refs == 0) {
1136 if (dowake) {
1137 panic("Someone waiting on a thread call that is scheduled for free: %p\n", call->tc_call.func);
1138 }
1139
1140 enable_ints_and_unlock(*s);
1141
1142 zfree(thread_call_zone, call);
1143
1144 *s = disable_ints_and_lock();
1145 }
1146
1147 }
1148
1149 /*
1150 * thread_call_thread:
1151 */
1152 static void
1153 thread_call_thread(
1154 thread_call_group_t group,
1155 wait_result_t wres)
1156 {
1157 thread_t self = current_thread();
1158 boolean_t canwait;
1159 spl_t s;
1160
1161 if ((thread_get_tag_internal(self) & THREAD_TAG_CALLOUT) == 0)
1162 (void)thread_set_tag_internal(self, THREAD_TAG_CALLOUT);
1163
1164 /*
1165 * A wakeup with THREAD_INTERRUPTED indicates that
1166 * we should terminate.
1167 */
1168 if (wres == THREAD_INTERRUPTED) {
1169 thread_terminate(self);
1170
1171 /* NOTREACHED */
1172 panic("thread_terminate() returned?");
1173 }
1174
1175 s = disable_ints_and_lock();
1176
1177 thread_sched_call(self, group->sched_call);
1178
1179 while (group->pending_count > 0) {
1180 thread_call_t call;
1181 thread_call_func_t func;
1182 thread_call_param_t param0, param1;
1183
1184 call = TC(dequeue_head(&group->pending_queue));
1185 group->pending_count--;
1186
1187 func = call->tc_call.func;
1188 param0 = call->tc_call.param0;
1189 param1 = call->tc_call.param1;
1190
1191 call->tc_call.queue = NULL;
1192
1193 _internal_call_release(call);
1194
1195 /*
1196 * Can only do wakeups for thread calls whose storage
1197 * we control.
1198 */
1199 if ((call->tc_flags & THREAD_CALL_ALLOC) != 0) {
1200 canwait = TRUE;
1201 call->tc_refs++; /* Delay free until we're done */
1202 } else
1203 canwait = FALSE;
1204
1205 enable_ints_and_unlock(s);
1206
1207 #if DEVELOPMENT || DEBUG
1208 KERNEL_DEBUG_CONSTANT(
1209 MACHDBG_CODE(DBG_MACH_SCHED,MACH_CALLOUT) | DBG_FUNC_NONE,
1210 VM_KERNEL_UNSLIDE(func), param0, param1, 0, 0);
1211 #endif /* DEVELOPMENT || DEBUG */
1212
1213 #if CONFIG_DTRACE
1214 DTRACE_TMR6(thread_callout__start, thread_call_func_t, func, int, 0, int, (call->ttd >> 32), (unsigned) (call->ttd & 0xFFFFFFFF), (call->tc_flags & THREAD_CALL_DELAYED), call);
1215 #endif
1216
1217 (*func)(param0, param1);
1218
1219 #if CONFIG_DTRACE
1220 DTRACE_TMR6(thread_callout__end, thread_call_func_t, func, int, 0, int, (call->ttd >> 32), (unsigned) (call->ttd & 0xFFFFFFFF), (call->tc_flags & THREAD_CALL_DELAYED), call);
1221 #endif
1222
1223 if (get_preemption_level() != 0) {
1224 int pl = get_preemption_level();
1225 panic("thread_call_thread: preemption_level %d, last callout %p(%p, %p)",
1226 pl, (void *)VM_KERNEL_UNSLIDE(func), param0, param1);
1227 }
1228
1229 s = disable_ints_and_lock();
1230
1231 if (canwait) {
1232 /* Frees if so desired */
1233 thread_call_finish(call, &s);
1234 }
1235 }
1236
1237 thread_sched_call(self, NULL);
1238 group->active_count--;
1239
1240 if (self->callout_woken_from_icontext && !self->callout_woke_thread) {
1241 ledger_credit(self->t_ledger, task_ledgers.interrupt_wakeups, 1);
1242 if (self->callout_woken_from_platform_idle)
1243 ledger_credit(self->t_ledger, task_ledgers.platform_idle_wakeups, 1);
1244 }
1245
1246 self->callout_woken_from_icontext = FALSE;
1247 self->callout_woken_from_platform_idle = FALSE;
1248 self->callout_woke_thread = FALSE;
1249
1250 if (group_isparallel(group)) {
1251 /*
1252 * For new style of thread group, thread always blocks.
1253 * If we have more than the target number of threads,
1254 * and this is the first to block, and it isn't active
1255 * already, set a timer for deallocating a thread if we
1256 * continue to have a surplus.
1257 */
1258 group->idle_count++;
1259
1260 if (group->idle_count == 1) {
1261 group->idle_timestamp = mach_absolute_time();
1262 }
1263
1264 if (((group->flags & TCG_DEALLOC_ACTIVE) == 0) &&
1265 ((group->active_count + group->idle_count) > group->target_thread_count)) {
1266 group->flags |= TCG_DEALLOC_ACTIVE;
1267 thread_call_start_deallocate_timer(group);
1268 }
1269
1270 /* Wait for more work (or termination) */
1271 wres = waitq_assert_wait64(&group->idle_waitq, NO_EVENT64, THREAD_INTERRUPTIBLE, 0);
1272 if (wres != THREAD_WAITING) {
1273 panic("kcall worker unable to assert wait?");
1274 }
1275
1276 enable_ints_and_unlock(s);
1277
1278 thread_block_parameter((thread_continue_t)thread_call_thread, group);
1279 } else {
1280 if (group->idle_count < group->target_thread_count) {
1281 group->idle_count++;
1282
1283 waitq_assert_wait64(&group->idle_waitq, NO_EVENT64, THREAD_UNINT, 0); /* Interrupted means to exit */
1284
1285 enable_ints_and_unlock(s);
1286
1287 thread_block_parameter((thread_continue_t)thread_call_thread, group);
1288 /* NOTREACHED */
1289 }
1290 }
1291
1292 enable_ints_and_unlock(s);
1293
1294 thread_terminate(self);
1295 /* NOTREACHED */
1296 }
1297
1298 /*
1299 * thread_call_daemon: walk list of groups, allocating
1300 * threads if appropriate (as determined by
1301 * thread_call_group_should_add_thread()).
1302 */
1303 static void
1304 thread_call_daemon_continue(__unused void *arg)
1305 {
1306 int i;
1307 kern_return_t kr;
1308 thread_call_group_t group;
1309 spl_t s;
1310
1311 s = disable_ints_and_lock();
1312
1313 /* Starting at zero happens to be high-priority first. */
1314 for (i = 0; i < THREAD_CALL_GROUP_COUNT; i++) {
1315 group = &thread_call_groups[i];
1316 while (thread_call_group_should_add_thread(group)) {
1317 group->active_count++;
1318
1319 enable_ints_and_unlock(s);
1320
1321 kr = thread_call_thread_create(group);
1322 if (kr != KERN_SUCCESS) {
1323 /*
1324 * On failure, just pause for a moment and give up.
1325 * We can try again later.
1326 */
1327 delay(10000); /* 10 ms */
1328 s = disable_ints_and_lock();
1329 goto out;
1330 }
1331
1332 s = disable_ints_and_lock();
1333 }
1334 }
1335
1336 out:
1337 thread_call_daemon_awake = FALSE;
1338 waitq_assert_wait64(&daemon_waitq, NO_EVENT64, THREAD_UNINT, 0);
1339
1340 enable_ints_and_unlock(s);
1341
1342 thread_block_parameter((thread_continue_t)thread_call_daemon_continue, NULL);
1343 /* NOTREACHED */
1344 }
1345
1346 static void
1347 thread_call_daemon(
1348 __unused void *arg)
1349 {
1350 thread_t self = current_thread();
1351
1352 self->options |= TH_OPT_VMPRIV;
1353 vm_page_free_reserve(2); /* XXX */
1354
1355 thread_call_daemon_continue(NULL);
1356 /* NOTREACHED */
1357 }
1358
1359 /*
1360 * Schedule timer to deallocate a worker thread if we have a surplus
1361 * of threads (in excess of the group's target) and at least one thread
1362 * is idle the whole time.
1363 */
1364 static void
1365 thread_call_start_deallocate_timer(
1366 thread_call_group_t group)
1367 {
1368 uint64_t deadline;
1369 boolean_t onqueue;
1370
1371 assert(group->idle_count > 0);
1372
1373 group->flags |= TCG_DEALLOC_ACTIVE;
1374 deadline = group->idle_timestamp + thread_call_dealloc_interval_abs;
1375 onqueue = timer_call_enter(&group->dealloc_timer, deadline, 0);
1376
1377 if (onqueue) {
1378 panic("Deallocate timer already active?");
1379 }
1380 }
1381
1382 void
1383 thread_call_delayed_timer(
1384 timer_call_param_t p0,
1385 __unused timer_call_param_t p1
1386 )
1387 {
1388 thread_call_t call;
1389 thread_call_group_t group = p0;
1390 uint64_t timestamp;
1391
1392 thread_call_lock_spin();
1393
1394 timestamp = mach_absolute_time();
1395
1396 call = TC(queue_first(&group->delayed_queue));
1397
1398 while (!queue_end(&group->delayed_queue, qe(call))) {
1399 if (call->tc_soft_deadline <= timestamp) {
1400 if ((call->tc_flags & THREAD_CALL_RATELIMITED) &&
1401 (CE(call)->deadline > timestamp) &&
1402 (ml_timer_forced_evaluation() == FALSE)) {
1403 break;
1404 }
1405 _pending_call_enqueue(call, group);
1406 } /* TODO, identify differentially coalesced timers */
1407 else
1408 break;
1409
1410 call = TC(queue_first(&group->delayed_queue));
1411 }
1412
1413 if (!queue_end(&group->delayed_queue, qe(call)))
1414 _set_delayed_call_timer(call, group);
1415
1416 thread_call_unlock();
1417 }
1418
1419 static void
1420 thread_call_delayed_timer_rescan(timer_call_param_t p0, __unused timer_call_param_t p1)
1421 {
1422 thread_call_t call;
1423 thread_call_group_t group = p0;
1424 uint64_t timestamp;
1425 boolean_t istate;
1426
1427 istate = ml_set_interrupts_enabled(FALSE);
1428 thread_call_lock_spin();
1429
1430 assert(ml_timer_forced_evaluation() == TRUE);
1431 timestamp = mach_absolute_time();
1432
1433 call = TC(queue_first(&group->delayed_queue));
1434
1435 while (!queue_end(&group->delayed_queue, qe(call))) {
1436 if (call->tc_soft_deadline <= timestamp) {
1437 _pending_call_enqueue(call, group);
1438 call = TC(queue_first(&group->delayed_queue));
1439 }
1440 else {
1441 uint64_t skew = call->tc_call.deadline - call->tc_soft_deadline;
1442 assert (call->tc_call.deadline >= call->tc_soft_deadline);
1443 /* On a latency quality-of-service level change,
1444 * re-sort potentially rate-limited callout. The platform
1445 * layer determines which timers require this.
1446 */
1447 if (timer_resort_threshold(skew)) {
1448 _call_dequeue(call, group);
1449 _delayed_call_enqueue(call, group, call->tc_soft_deadline);
1450 }
1451 call = TC(queue_next(qe(call)));
1452 }
1453 }
1454
1455 if (!queue_empty(&group->delayed_queue))
1456 _set_delayed_call_timer(TC(queue_first(&group->delayed_queue)), group);
1457 thread_call_unlock();
1458 ml_set_interrupts_enabled(istate);
1459 }
1460
1461 void
1462 thread_call_delayed_timer_rescan_all(void) {
1463 thread_call_delayed_timer_rescan((timer_call_param_t)&thread_call_groups[THREAD_CALL_PRIORITY_LOW], NULL);
1464 thread_call_delayed_timer_rescan((timer_call_param_t)&thread_call_groups[THREAD_CALL_PRIORITY_USER], NULL);
1465 thread_call_delayed_timer_rescan((timer_call_param_t)&thread_call_groups[THREAD_CALL_PRIORITY_KERNEL], NULL);
1466 thread_call_delayed_timer_rescan((timer_call_param_t)&thread_call_groups[THREAD_CALL_PRIORITY_HIGH], NULL);
1467 }
1468
1469 /*
1470 * Timer callback to tell a thread to terminate if
1471 * we have an excess of threads and at least one has been
1472 * idle for a long time.
1473 */
1474 static void
1475 thread_call_dealloc_timer(
1476 timer_call_param_t p0,
1477 __unused timer_call_param_t p1)
1478 {
1479 thread_call_group_t group = (thread_call_group_t)p0;
1480 uint64_t now;
1481 kern_return_t res;
1482 boolean_t terminated = FALSE;
1483
1484 thread_call_lock_spin();
1485
1486 now = mach_absolute_time();
1487 if (group->idle_count > 0) {
1488 if (now > group->idle_timestamp + thread_call_dealloc_interval_abs) {
1489 terminated = TRUE;
1490 group->idle_count--;
1491 res = waitq_wakeup64_one(&group->idle_waitq, NO_EVENT64,
1492 THREAD_INTERRUPTED, WAITQ_ALL_PRIORITIES);
1493 if (res != KERN_SUCCESS) {
1494 panic("Unable to wake up idle thread for termination?");
1495 }
1496 }
1497
1498 }
1499
1500 /*
1501 * If we still have an excess of threads, schedule another
1502 * invocation of this function.
1503 */
1504 if (group->idle_count > 0 && (group->idle_count + group->active_count > group->target_thread_count)) {
1505 /*
1506 * If we killed someone just now, push out the
1507 * next deadline.
1508 */
1509 if (terminated) {
1510 group->idle_timestamp = now;
1511 }
1512
1513 thread_call_start_deallocate_timer(group);
1514 } else {
1515 group->flags &= ~TCG_DEALLOC_ACTIVE;
1516 }
1517
1518 thread_call_unlock();
1519 }
1520
1521 /*
1522 * Wait for all requested invocations of a thread call prior to now
1523 * to finish. Can only be invoked on thread calls whose storage we manage.
1524 * Just waits for the finish count to catch up to the submit count we find
1525 * at the beginning of our wait.
1526 */
1527 static void
1528 thread_call_wait_locked(thread_call_t call)
1529 {
1530 uint64_t submit_count;
1531 wait_result_t res;
1532
1533 assert(call->tc_flags & THREAD_CALL_ALLOC);
1534
1535 submit_count = call->tc_submit_count;
1536
1537 while (call->tc_finish_count < submit_count) {
1538 call->tc_flags |= THREAD_CALL_WAIT;
1539
1540 res = assert_wait(call, THREAD_UNINT);
1541 if (res != THREAD_WAITING) {
1542 panic("Unable to assert wait?");
1543 }
1544
1545 thread_call_unlock();
1546 (void) spllo();
1547
1548 res = thread_block(NULL);
1549 if (res != THREAD_AWAKENED) {
1550 panic("Awoken with %d?", res);
1551 }
1552
1553 (void) splsched();
1554 thread_call_lock_spin();
1555 }
1556 }
1557
1558 /*
1559 * Determine whether a thread call is either on a queue or
1560 * currently being executed.
1561 */
1562 boolean_t
1563 thread_call_isactive(thread_call_t call)
1564 {
1565 boolean_t active;
1566 spl_t s;
1567
1568 s = disable_ints_and_lock();
1569 active = (call->tc_submit_count > call->tc_finish_count);
1570 enable_ints_and_unlock(s);
1571
1572 return active;
1573 }