2 * Copyright (c) 1993-1995, 1999-2008 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/thread_act.h>
32 #include <kern/kern_types.h>
33 #include <kern/zalloc.h>
34 #include <kern/sched_prim.h>
35 #include <kern/clock.h>
36 #include <kern/task.h>
37 #include <kern/thread.h>
38 #include <kern/waitq.h>
39 #include <kern/ledger.h>
40 #include <kern/policy_internal.h>
42 #include <vm/vm_pageout.h>
44 #include <kern/thread_call.h>
45 #include <kern/call_entry.h>
46 #include <kern/timer_call.h>
48 #include <libkern/OSAtomic.h>
49 #include <kern/timer_queue.h>
51 #include <sys/kdebug.h>
55 #include <machine/machine_routines.h>
57 static zone_t thread_call_zone
;
58 static struct waitq daemon_waitq
;
64 } thread_call_flavor_t
;
69 TCG_DEALLOC_ACTIVE
= 0x2,
70 } thread_call_group_flags_t
;
72 static struct thread_call_group
{
73 const char * tcg_name
;
75 queue_head_t pending_queue
;
76 uint32_t pending_count
;
78 queue_head_t delayed_queues
[TCF_COUNT
];
79 timer_call_data_t delayed_timers
[TCF_COUNT
];
81 timer_call_data_t dealloc_timer
;
83 struct waitq idle_waitq
;
84 uint32_t idle_count
, active_count
, blocked_count
;
86 uint32_t tcg_thread_pri
;
87 uint32_t target_thread_count
;
88 uint64_t idle_timestamp
;
90 thread_call_group_flags_t flags
;
91 } thread_call_groups
[THREAD_CALL_INDEX_MAX
] = {
92 [THREAD_CALL_INDEX_HIGH
] = {
94 .tcg_thread_pri
= BASEPRI_PREEMPT_HIGH
,
95 .target_thread_count
= 4,
98 [THREAD_CALL_INDEX_KERNEL
] = {
100 .tcg_thread_pri
= BASEPRI_KERNEL
,
101 .target_thread_count
= 1,
102 .flags
= TCG_PARALLEL
,
104 [THREAD_CALL_INDEX_USER
] = {
106 .tcg_thread_pri
= BASEPRI_DEFAULT
,
107 .target_thread_count
= 1,
108 .flags
= TCG_PARALLEL
,
110 [THREAD_CALL_INDEX_LOW
] = {
112 .tcg_thread_pri
= MAXPRI_THROTTLE
,
113 .target_thread_count
= 1,
114 .flags
= TCG_PARALLEL
,
116 [THREAD_CALL_INDEX_KERNEL_HIGH
] = {
117 .tcg_name
= "kernel-high",
118 .tcg_thread_pri
= BASEPRI_PREEMPT
,
119 .target_thread_count
= 2,
122 [THREAD_CALL_INDEX_QOS_UI
] = {
123 .tcg_name
= "qos-ui",
124 .tcg_thread_pri
= BASEPRI_FOREGROUND
,
125 .target_thread_count
= 1,
128 [THREAD_CALL_INDEX_QOS_IN
] = {
129 .tcg_name
= "qos-in",
130 .tcg_thread_pri
= BASEPRI_USER_INITIATED
,
131 .target_thread_count
= 1,
134 [THREAD_CALL_INDEX_QOS_UT
] = {
135 .tcg_name
= "qos-ut",
136 .tcg_thread_pri
= BASEPRI_UTILITY
,
137 .target_thread_count
= 1,
142 typedef struct thread_call_group
*thread_call_group_t
;
144 #define INTERNAL_CALL_COUNT 768
145 #define THREAD_CALL_DEALLOC_INTERVAL_NS (5 * NSEC_PER_MSEC) /* 5 ms */
146 #define THREAD_CALL_ADD_RATIO 4
147 #define THREAD_CALL_MACH_FACTOR_CAP 3
148 #define THREAD_CALL_GROUP_MAX_THREADS 500
150 static boolean_t thread_call_daemon_awake
;
151 static thread_call_data_t internal_call_storage
[INTERNAL_CALL_COUNT
];
152 static queue_head_t thread_call_internal_queue
;
153 int thread_call_internal_queue_count
= 0;
154 static uint64_t thread_call_dealloc_interval_abs
;
156 static __inline__ thread_call_t
_internal_call_allocate(thread_call_func_t func
, thread_call_param_t param0
);
157 static __inline__
void _internal_call_release(thread_call_t call
);
158 static __inline__ boolean_t
_pending_call_enqueue(thread_call_t call
, thread_call_group_t group
);
159 static boolean_t
_delayed_call_enqueue(thread_call_t call
, thread_call_group_t group
,
160 uint64_t deadline
, thread_call_flavor_t flavor
);
161 static __inline__ boolean_t
_call_dequeue(thread_call_t call
, thread_call_group_t group
);
162 static __inline__
void thread_call_wake(thread_call_group_t group
);
163 static void thread_call_daemon(void *arg
);
164 static void thread_call_thread(thread_call_group_t group
, wait_result_t wres
);
165 static void thread_call_dealloc_timer(timer_call_param_t p0
, timer_call_param_t p1
);
166 static void thread_call_group_setup(thread_call_group_t group
);
167 static void sched_call_thread(int type
, thread_t thread
);
168 static void thread_call_start_deallocate_timer(thread_call_group_t group
);
169 static void thread_call_wait_locked(thread_call_t call
, spl_t s
);
170 static boolean_t
thread_call_wait_once_locked(thread_call_t call
, spl_t s
);
172 static boolean_t
thread_call_enter_delayed_internal(thread_call_t call
,
173 thread_call_func_t alt_func
, thread_call_param_t alt_param0
,
174 thread_call_param_t param1
, uint64_t deadline
,
175 uint64_t leeway
, unsigned int flags
);
177 /* non-static so dtrace can find it rdar://problem/31156135&31379348 */
178 extern void thread_call_delayed_timer(timer_call_param_t p0
, timer_call_param_t p1
);
180 lck_grp_t thread_call_lck_grp
;
181 lck_mtx_t thread_call_lock_data
;
183 #define thread_call_lock_spin() \
184 lck_mtx_lock_spin_always(&thread_call_lock_data)
186 #define thread_call_unlock() \
187 lck_mtx_unlock_always(&thread_call_lock_data)
189 #define tc_deadline tc_call.deadline
191 extern boolean_t mach_timer_coalescing_enabled
;
194 disable_ints_and_lock(void)
196 spl_t s
= splsched();
197 thread_call_lock_spin();
203 enable_ints_and_unlock(spl_t s
)
205 thread_call_unlock();
209 static inline boolean_t
210 group_isparallel(thread_call_group_t group
)
212 return (group
->flags
& TCG_PARALLEL
) != 0;
216 thread_call_group_should_add_thread(thread_call_group_t group
)
218 if ((group
->active_count
+ group
->blocked_count
+ group
->idle_count
) >= THREAD_CALL_GROUP_MAX_THREADS
) {
219 panic("thread_call group '%s' reached max thread cap (%d): active: %d, blocked: %d, idle: %d",
220 group
->tcg_name
, THREAD_CALL_GROUP_MAX_THREADS
,
221 group
->active_count
, group
->blocked_count
, group
->idle_count
);
224 if (group_isparallel(group
) == FALSE
) {
225 if (group
->pending_count
> 0 && group
->active_count
== 0) {
232 if (group
->pending_count
> 0) {
233 if (group
->idle_count
> 0) {
237 uint32_t thread_count
= group
->active_count
;
240 * Add a thread if either there are no threads,
241 * the group has fewer than its target number of
242 * threads, or the amount of work is large relative
243 * to the number of threads. In the last case, pay attention
244 * to the total load on the system, and back off if
247 if ((thread_count
== 0) ||
248 (thread_count
< group
->target_thread_count
) ||
249 ((group
->pending_count
> THREAD_CALL_ADD_RATIO
* thread_count
) &&
250 (sched_mach_factor
< THREAD_CALL_MACH_FACTOR_CAP
))) {
259 static inline thread_call_group_t
260 thread_call_get_group(thread_call_t call
)
262 thread_call_index_t index
= call
->tc_index
;
264 assert(index
>= 0 && index
< THREAD_CALL_INDEX_MAX
);
266 return &thread_call_groups
[index
];
270 static inline thread_call_flavor_t
271 thread_call_get_flavor(thread_call_t call
)
273 return (call
->tc_flags
& THREAD_CALL_CONTINUOUS
) ? TCF_CONTINUOUS
: TCF_ABSOLUTE
;
277 thread_call_group_setup(thread_call_group_t group
)
279 queue_init(&group
->pending_queue
);
280 queue_init(&group
->delayed_queues
[TCF_ABSOLUTE
]);
281 queue_init(&group
->delayed_queues
[TCF_CONTINUOUS
]);
283 /* TODO: Consolidate to one hard timer for each group */
284 timer_call_setup(&group
->delayed_timers
[TCF_ABSOLUTE
], thread_call_delayed_timer
, group
);
285 timer_call_setup(&group
->delayed_timers
[TCF_CONTINUOUS
], thread_call_delayed_timer
, group
);
286 timer_call_setup(&group
->dealloc_timer
, thread_call_dealloc_timer
, group
);
288 /* Reverse the wait order so we re-use the most recently parked thread from the pool */
289 waitq_init(&group
->idle_waitq
, SYNC_POLICY_REVERSED
| SYNC_POLICY_DISABLE_IRQ
);
293 * Simple wrapper for creating threads bound to
294 * thread call groups.
297 thread_call_thread_create(
298 thread_call_group_t group
)
301 kern_return_t result
;
303 int thread_pri
= group
->tcg_thread_pri
;
305 result
= kernel_thread_start_priority((thread_continue_t
)thread_call_thread
,
306 group
, thread_pri
, &thread
);
307 if (result
!= KERN_SUCCESS
) {
311 if (thread_pri
<= BASEPRI_KERNEL
) {
313 * THREAD_CALL_PRIORITY_KERNEL and lower don't get to run to completion
314 * in kernel if there are higher priority threads available.
316 thread_set_eager_preempt(thread
);
319 char name
[MAXTHREADNAMESIZE
] = "";
321 int group_thread_count
= group
->idle_count
+ group
->active_count
+ group
->blocked_count
;
323 snprintf(name
, sizeof(name
), "thread call %s #%d", group
->tcg_name
, group_thread_count
);
324 thread_set_thread_name(thread
, name
);
326 thread_deallocate(thread
);
331 * thread_call_initialize:
333 * Initialize this module, called
334 * early during system initialization.
337 thread_call_initialize(void)
339 int tc_size
= sizeof(thread_call_data_t
);
340 thread_call_zone
= zinit(tc_size
, 4096 * tc_size
, 16 * tc_size
, "thread_call");
341 zone_change(thread_call_zone
, Z_CALLERACCT
, FALSE
);
342 zone_change(thread_call_zone
, Z_NOENCRYPT
, TRUE
);
344 lck_grp_init(&thread_call_lck_grp
, "thread_call", LCK_GRP_ATTR_NULL
);
345 lck_mtx_init(&thread_call_lock_data
, &thread_call_lck_grp
, LCK_ATTR_NULL
);
347 nanotime_to_absolutetime(0, THREAD_CALL_DEALLOC_INTERVAL_NS
, &thread_call_dealloc_interval_abs
);
348 waitq_init(&daemon_waitq
, SYNC_POLICY_DISABLE_IRQ
| SYNC_POLICY_FIFO
);
350 for (uint32_t i
= 0; i
< THREAD_CALL_INDEX_MAX
; i
++) {
351 thread_call_group_setup(&thread_call_groups
[i
]);
354 spl_t s
= disable_ints_and_lock();
356 queue_init(&thread_call_internal_queue
);
358 thread_call_t call
= internal_call_storage
;
359 call
< &internal_call_storage
[INTERNAL_CALL_COUNT
];
361 enqueue_tail(&thread_call_internal_queue
, &call
->tc_call
.q_link
);
362 thread_call_internal_queue_count
++;
365 thread_call_daemon_awake
= TRUE
;
367 enable_ints_and_unlock(s
);
370 kern_return_t result
;
372 result
= kernel_thread_start_priority((thread_continue_t
)thread_call_daemon
,
373 NULL
, BASEPRI_PREEMPT_HIGH
+ 1, &thread
);
374 if (result
!= KERN_SUCCESS
) {
375 panic("thread_call_initialize");
378 thread_deallocate(thread
);
384 thread_call_func_t func
,
385 thread_call_param_t param0
)
387 bzero(call
, sizeof(*call
));
388 call_entry_setup((call_entry_t
)call
, func
, param0
);
390 /* Thread calls default to the HIGH group unless otherwise specified */
391 call
->tc_index
= THREAD_CALL_INDEX_HIGH
;
393 /* THREAD_CALL_ALLOC not set, memory owned by caller */
397 * _internal_call_allocate:
399 * Allocate an internal callout entry.
401 * Called with thread_call_lock held.
403 static __inline__ thread_call_t
404 _internal_call_allocate(thread_call_func_t func
, thread_call_param_t param0
)
408 if (queue_empty(&thread_call_internal_queue
)) {
409 panic("_internal_call_allocate");
412 call
= qe_dequeue_head(&thread_call_internal_queue
, struct thread_call
, tc_call
.q_link
);
414 thread_call_internal_queue_count
--;
416 thread_call_setup(call
, func
, param0
);
418 call
->tc_flags
= 0; /* THREAD_CALL_ALLOC not set, do not free back to zone */
424 * _internal_call_release:
426 * Release an internal callout entry which
427 * is no longer pending (or delayed). This is
428 * safe to call on a non-internal entry, in which
429 * case nothing happens.
431 * Called with thread_call_lock held.
433 static __inline__
void
434 _internal_call_release(thread_call_t call
)
436 if (call
>= internal_call_storage
&&
437 call
< &internal_call_storage
[INTERNAL_CALL_COUNT
]) {
438 assert((call
->tc_flags
& THREAD_CALL_ALLOC
) == 0);
439 enqueue_head(&thread_call_internal_queue
, &call
->tc_call
.q_link
);
440 thread_call_internal_queue_count
++;
445 * _pending_call_enqueue:
447 * Place an entry at the end of the
448 * pending queue, to be executed soon.
450 * Returns TRUE if the entry was already
453 * Called with thread_call_lock held.
455 static __inline__ boolean_t
456 _pending_call_enqueue(thread_call_t call
,
457 thread_call_group_t group
)
459 if ((THREAD_CALL_ONCE
| THREAD_CALL_RUNNING
)
460 == (call
->tc_flags
& (THREAD_CALL_ONCE
| THREAD_CALL_RUNNING
))) {
461 call
->tc_deadline
= 0;
463 uint32_t flags
= call
->tc_flags
;
464 call
->tc_flags
|= THREAD_CALL_RESCHEDULE
;
466 if ((flags
& THREAD_CALL_RESCHEDULE
) != 0) {
473 queue_head_t
*old_queue
= call_entry_enqueue_tail(CE(call
), &group
->pending_queue
);
475 if (old_queue
== NULL
) {
476 call
->tc_submit_count
++;
477 } else if (old_queue
!= &group
->pending_queue
&&
478 old_queue
!= &group
->delayed_queues
[TCF_ABSOLUTE
] &&
479 old_queue
!= &group
->delayed_queues
[TCF_CONTINUOUS
]) {
480 panic("tried to move a thread call (%p) between groups (old_queue: %p)", call
, old_queue
);
483 group
->pending_count
++;
485 thread_call_wake(group
);
487 return old_queue
!= NULL
;
491 * _delayed_call_enqueue:
493 * Place an entry on the delayed queue,
494 * after existing entries with an earlier
495 * (or identical) deadline.
497 * Returns TRUE if the entry was already
500 * Called with thread_call_lock held.
503 _delayed_call_enqueue(
505 thread_call_group_t group
,
507 thread_call_flavor_t flavor
)
509 if ((THREAD_CALL_ONCE
| THREAD_CALL_RUNNING
)
510 == (call
->tc_flags
& (THREAD_CALL_ONCE
| THREAD_CALL_RUNNING
))) {
511 call
->tc_deadline
= deadline
;
513 uint32_t flags
= call
->tc_flags
;
514 call
->tc_flags
|= THREAD_CALL_RESCHEDULE
;
516 if ((flags
& THREAD_CALL_RESCHEDULE
) != 0) {
523 queue_head_t
*old_queue
= call_entry_enqueue_deadline(CE(call
),
524 &group
->delayed_queues
[flavor
],
527 if (old_queue
== &group
->pending_queue
) {
528 group
->pending_count
--;
529 } else if (old_queue
== NULL
) {
530 call
->tc_submit_count
++;
531 } else if (old_queue
== &group
->delayed_queues
[TCF_ABSOLUTE
] ||
532 old_queue
== &group
->delayed_queues
[TCF_CONTINUOUS
]) {
533 /* TODO: if it's in the other delayed queue, that might not be OK */
534 // we did nothing, and that's fine
536 panic("tried to move a thread call (%p) between groups (old_queue: %p)", call
, old_queue
);
539 return old_queue
!= NULL
;
545 * Remove an entry from a queue.
547 * Returns TRUE if the entry was on a queue.
549 * Called with thread_call_lock held.
551 static __inline__ boolean_t
554 thread_call_group_t group
)
556 queue_head_t
*old_queue
;
558 old_queue
= call_entry_dequeue(CE(call
));
560 if (old_queue
!= NULL
) {
561 assert(old_queue
== &group
->pending_queue
||
562 old_queue
== &group
->delayed_queues
[TCF_ABSOLUTE
] ||
563 old_queue
== &group
->delayed_queues
[TCF_CONTINUOUS
]);
565 call
->tc_finish_count
++;
566 if (old_queue
== &group
->pending_queue
) {
567 group
->pending_count
--;
571 return old_queue
!= NULL
;
575 * _arm_delayed_call_timer:
577 * Check if the timer needs to be armed for this flavor,
580 * If call is non-NULL, only re-arm the timer if the specified call
581 * is the first in the queue.
583 * Returns true if the timer was armed/re-armed, false if it was left unset
584 * Caller should cancel the timer if need be.
586 * Called with thread_call_lock held.
589 _arm_delayed_call_timer(thread_call_t new_call
,
590 thread_call_group_t group
,
591 thread_call_flavor_t flavor
)
593 /* No calls implies no timer needed */
594 if (queue_empty(&group
->delayed_queues
[flavor
])) {
598 thread_call_t call
= qe_queue_first(&group
->delayed_queues
[flavor
], struct thread_call
, tc_call
.q_link
);
600 /* We only need to change the hard timer if this new call is the first in the list */
601 if (new_call
!= NULL
&& new_call
!= call
) {
605 assert((call
->tc_soft_deadline
!= 0) && ((call
->tc_soft_deadline
<= call
->tc_call
.deadline
)));
607 uint64_t fire_at
= call
->tc_soft_deadline
;
609 if (flavor
== TCF_CONTINUOUS
) {
610 assert((call
->tc_flags
& THREAD_CALL_CONTINUOUS
) == THREAD_CALL_CONTINUOUS
);
611 fire_at
= continuoustime_to_absolutetime(fire_at
);
613 assert((call
->tc_flags
& THREAD_CALL_CONTINUOUS
) == 0);
617 * Note: This picks the soonest-deadline call's leeway as the hard timer's leeway,
618 * which does not take into account later-deadline timers with a larger leeway.
619 * This is a valid coalescing behavior, but masks a possible window to
620 * fire a timer instead of going idle.
622 uint64_t leeway
= call
->tc_call
.deadline
- call
->tc_soft_deadline
;
624 timer_call_enter_with_leeway(&group
->delayed_timers
[flavor
], (timer_call_param_t
)flavor
,
626 TIMER_CALL_SYS_CRITICAL
| TIMER_CALL_LEEWAY
,
627 ((call
->tc_flags
& THREAD_CALL_RATELIMITED
) == THREAD_CALL_RATELIMITED
));
633 * _cancel_func_from_queue:
635 * Remove the first (or all) matching
636 * entries from the specified queue.
638 * Returns TRUE if any matching entries
641 * Called with thread_call_lock held.
644 _cancel_func_from_queue(thread_call_func_t func
,
645 thread_call_param_t param0
,
646 thread_call_group_t group
,
647 boolean_t remove_all
,
650 boolean_t call_removed
= FALSE
;
653 qe_foreach_element_safe(call
, queue
, tc_call
.q_link
) {
654 if (call
->tc_call
.func
!= func
||
655 call
->tc_call
.param0
!= param0
) {
659 _call_dequeue(call
, group
);
661 _internal_call_release(call
);
673 * thread_call_func_delayed:
675 * Enqueue a function callout to
676 * occur at the stated time.
679 thread_call_func_delayed(
680 thread_call_func_t func
,
681 thread_call_param_t param
,
684 (void)thread_call_enter_delayed_internal(NULL
, func
, param
, 0, deadline
, 0, 0);
688 * thread_call_func_delayed_with_leeway:
690 * Same as thread_call_func_delayed(), but with
691 * leeway/flags threaded through.
695 thread_call_func_delayed_with_leeway(
696 thread_call_func_t func
,
697 thread_call_param_t param
,
702 (void)thread_call_enter_delayed_internal(NULL
, func
, param
, 0, deadline
, leeway
, flags
);
706 * thread_call_func_cancel:
708 * Dequeue a function callout.
710 * Removes one (or all) { function, argument }
711 * instance(s) from either (or both)
712 * the pending and the delayed queue,
715 * Returns TRUE if any calls were cancelled.
717 * This iterates all of the pending or delayed thread calls in the group,
718 * which is really inefficient. Switch to an allocated thread call instead.
721 thread_call_func_cancel(
722 thread_call_func_t func
,
723 thread_call_param_t param
,
724 boolean_t cancel_all
)
728 assert(func
!= NULL
);
730 spl_t s
= disable_ints_and_lock();
732 /* Function-only thread calls are only kept in the default HIGH group */
733 thread_call_group_t group
= &thread_call_groups
[THREAD_CALL_INDEX_HIGH
];
736 /* exhaustively search every queue, and return true if any search found something */
737 result
= _cancel_func_from_queue(func
, param
, group
, cancel_all
, &group
->pending_queue
) |
738 _cancel_func_from_queue(func
, param
, group
, cancel_all
, &group
->delayed_queues
[TCF_ABSOLUTE
]) |
739 _cancel_func_from_queue(func
, param
, group
, cancel_all
, &group
->delayed_queues
[TCF_CONTINUOUS
]);
741 /* early-exit as soon as we find something, don't search other queues */
742 result
= _cancel_func_from_queue(func
, param
, group
, cancel_all
, &group
->pending_queue
) ||
743 _cancel_func_from_queue(func
, param
, group
, cancel_all
, &group
->delayed_queues
[TCF_ABSOLUTE
]) ||
744 _cancel_func_from_queue(func
, param
, group
, cancel_all
, &group
->delayed_queues
[TCF_CONTINUOUS
]);
747 enable_ints_and_unlock(s
);
753 * Allocate a thread call with a given priority. Importances other than
754 * THREAD_CALL_PRIORITY_HIGH or THREAD_CALL_PRIORITY_KERNEL_HIGH will be run in threads
755 * with eager preemption enabled (i.e. may be aggressively preempted by higher-priority
756 * threads which are not in the normal "urgent" bands).
759 thread_call_allocate_with_priority(
760 thread_call_func_t func
,
761 thread_call_param_t param0
,
762 thread_call_priority_t pri
)
764 return thread_call_allocate_with_options(func
, param0
, pri
, 0);
768 thread_call_allocate_with_options(
769 thread_call_func_t func
,
770 thread_call_param_t param0
,
771 thread_call_priority_t pri
,
772 thread_call_options_t options
)
774 thread_call_t call
= thread_call_allocate(func
, param0
);
777 case THREAD_CALL_PRIORITY_HIGH
:
778 call
->tc_index
= THREAD_CALL_INDEX_HIGH
;
780 case THREAD_CALL_PRIORITY_KERNEL
:
781 call
->tc_index
= THREAD_CALL_INDEX_KERNEL
;
783 case THREAD_CALL_PRIORITY_USER
:
784 call
->tc_index
= THREAD_CALL_INDEX_USER
;
786 case THREAD_CALL_PRIORITY_LOW
:
787 call
->tc_index
= THREAD_CALL_INDEX_LOW
;
789 case THREAD_CALL_PRIORITY_KERNEL_HIGH
:
790 call
->tc_index
= THREAD_CALL_INDEX_KERNEL_HIGH
;
793 panic("Invalid thread call pri value: %d", pri
);
797 if (options
& THREAD_CALL_OPTIONS_ONCE
) {
798 call
->tc_flags
|= THREAD_CALL_ONCE
;
800 if (options
& THREAD_CALL_OPTIONS_SIGNAL
) {
801 call
->tc_flags
|= THREAD_CALL_SIGNAL
| THREAD_CALL_ONCE
;
808 thread_call_allocate_with_qos(thread_call_func_t func
,
809 thread_call_param_t param0
,
811 thread_call_options_t options
)
813 thread_call_t call
= thread_call_allocate(func
, param0
);
816 case THREAD_QOS_UNSPECIFIED
:
817 call
->tc_index
= THREAD_CALL_INDEX_HIGH
;
819 case THREAD_QOS_LEGACY
:
820 call
->tc_index
= THREAD_CALL_INDEX_USER
;
822 case THREAD_QOS_MAINTENANCE
:
823 case THREAD_QOS_BACKGROUND
:
824 call
->tc_index
= THREAD_CALL_INDEX_LOW
;
826 case THREAD_QOS_UTILITY
:
827 call
->tc_index
= THREAD_CALL_INDEX_QOS_UT
;
829 case THREAD_QOS_USER_INITIATED
:
830 call
->tc_index
= THREAD_CALL_INDEX_QOS_IN
;
832 case THREAD_QOS_USER_INTERACTIVE
:
833 call
->tc_index
= THREAD_CALL_INDEX_QOS_UI
;
836 panic("Invalid thread call qos value: %d", qos_tier
);
840 if (options
& THREAD_CALL_OPTIONS_ONCE
) {
841 call
->tc_flags
|= THREAD_CALL_ONCE
;
844 /* does not support THREAD_CALL_OPTIONS_SIGNAL */
851 * thread_call_allocate:
853 * Allocate a callout entry.
856 thread_call_allocate(
857 thread_call_func_t func
,
858 thread_call_param_t param0
)
860 thread_call_t call
= zalloc(thread_call_zone
);
862 thread_call_setup(call
, func
, param0
);
864 call
->tc_flags
= THREAD_CALL_ALLOC
;
872 * Release a callout. If the callout is currently
873 * executing, it will be freed when all invocations
876 * If the callout is currently armed to fire again, then
877 * freeing is not allowed and returns FALSE. The
878 * client must have canceled the pending invocation before freeing.
884 spl_t s
= disable_ints_and_lock();
886 if (call
->tc_call
.queue
!= NULL
||
887 ((call
->tc_flags
& THREAD_CALL_RESCHEDULE
) != 0)) {
888 thread_call_unlock();
894 int32_t refs
= --call
->tc_refs
;
896 panic("Refcount negative: %d\n", refs
);
899 if ((THREAD_CALL_SIGNAL
| THREAD_CALL_RUNNING
)
900 == ((THREAD_CALL_SIGNAL
| THREAD_CALL_RUNNING
) & call
->tc_flags
)) {
901 thread_call_wait_once_locked(call
, s
);
902 /* thread call lock has been unlocked */
904 enable_ints_and_unlock(s
);
908 assert(call
->tc_finish_count
== call
->tc_submit_count
);
909 zfree(thread_call_zone
, call
);
918 * Enqueue a callout entry to occur "soon".
920 * Returns TRUE if the call was
921 * already on a queue.
927 return thread_call_enter1(call
, 0);
933 thread_call_param_t param1
)
935 boolean_t result
= TRUE
;
936 thread_call_group_t group
;
938 assert(call
->tc_call
.func
!= NULL
);
940 assert((call
->tc_flags
& THREAD_CALL_SIGNAL
) == 0);
942 group
= thread_call_get_group(call
);
944 spl_t s
= disable_ints_and_lock();
946 if (call
->tc_call
.queue
!= &group
->pending_queue
) {
947 result
= _pending_call_enqueue(call
, group
);
950 call
->tc_call
.param1
= param1
;
952 enable_ints_and_unlock(s
);
958 * thread_call_enter_delayed:
960 * Enqueue a callout entry to occur
961 * at the stated time.
963 * Returns TRUE if the call was
964 * already on a queue.
967 thread_call_enter_delayed(
971 assert(call
!= NULL
);
972 return thread_call_enter_delayed_internal(call
, NULL
, 0, 0, deadline
, 0, 0);
976 thread_call_enter1_delayed(
978 thread_call_param_t param1
,
981 assert(call
!= NULL
);
982 return thread_call_enter_delayed_internal(call
, NULL
, 0, param1
, deadline
, 0, 0);
986 thread_call_enter_delayed_with_leeway(
988 thread_call_param_t param1
,
993 assert(call
!= NULL
);
994 return thread_call_enter_delayed_internal(call
, NULL
, 0, param1
, deadline
, leeway
, flags
);
999 * thread_call_enter_delayed_internal:
1000 * enqueue a callout entry to occur at the stated time
1002 * Returns True if the call was already on a queue
1004 * call - structure encapsulating state of the callout
1005 * alt_func/alt_param0 - if call is NULL, allocate temporary storage using these parameters
1006 * deadline - time deadline in nanoseconds
1007 * leeway - timer slack represented as delta of deadline.
1008 * flags - THREAD_CALL_DELAY_XXX : classification of caller's desires wrt timer coalescing.
1009 * THREAD_CALL_DELAY_LEEWAY : value in leeway is used for timer coalescing.
1010 * THREAD_CALL_CONTINUOUS: thread call will be called according to mach_continuous_time rather
1011 * than mach_absolute_time
1014 thread_call_enter_delayed_internal(
1016 thread_call_func_t alt_func
,
1017 thread_call_param_t alt_param0
,
1018 thread_call_param_t param1
,
1023 boolean_t result
= TRUE
;
1024 thread_call_group_t group
;
1025 uint64_t now
, sdeadline
, slop
;
1028 thread_call_flavor_t flavor
= (flags
& THREAD_CALL_CONTINUOUS
) ? TCF_CONTINUOUS
: TCF_ABSOLUTE
;
1030 /* direct mapping between thread_call, timer_call, and timeout_urgency values */
1031 urgency
= (flags
& TIMEOUT_URGENCY_MASK
);
1033 spl_t s
= disable_ints_and_lock();
1036 /* allocate a structure out of internal storage, as a convenience for BSD callers */
1037 call
= _internal_call_allocate(alt_func
, alt_param0
);
1040 assert(call
->tc_call
.func
!= NULL
);
1041 group
= thread_call_get_group(call
);
1043 /* TODO: assert that call is not enqueued before flipping the flag */
1044 if (flavor
== TCF_CONTINUOUS
) {
1045 now
= mach_continuous_time();
1046 call
->tc_flags
|= THREAD_CALL_CONTINUOUS
;
1048 now
= mach_absolute_time();
1049 call
->tc_flags
&= ~THREAD_CALL_CONTINUOUS
;
1052 call
->tc_flags
|= THREAD_CALL_DELAYED
;
1054 call
->tc_soft_deadline
= sdeadline
= deadline
;
1056 boolean_t ratelimited
= FALSE
;
1057 slop
= timer_call_slop(deadline
, now
, urgency
, current_thread(), &ratelimited
);
1059 if ((flags
& THREAD_CALL_DELAY_LEEWAY
) != 0 && leeway
> slop
) {
1063 if (UINT64_MAX
- deadline
<= slop
) {
1064 deadline
= UINT64_MAX
;
1070 call
->tc_flags
|= TIMER_CALL_RATELIMITED
;
1072 call
->tc_flags
&= ~TIMER_CALL_RATELIMITED
;
1075 call
->tc_call
.param1
= param1
;
1077 call
->tc_ttd
= (sdeadline
> now
) ? (sdeadline
- now
) : 0;
1079 result
= _delayed_call_enqueue(call
, group
, deadline
, flavor
);
1081 _arm_delayed_call_timer(call
, group
, flavor
);
1084 DTRACE_TMR5(thread_callout__create
, thread_call_func_t
, call
->tc_call
.func
,
1085 uint64_t, (deadline
- sdeadline
), uint64_t, (call
->tc_ttd
>> 32),
1086 (unsigned) (call
->tc_ttd
& 0xFFFFFFFF), call
);
1089 enable_ints_and_unlock(s
);
1095 * Remove a callout entry from the queue
1096 * Called with thread_call_lock held
1099 thread_call_cancel_locked(thread_call_t call
)
1101 boolean_t canceled
= (0 != (THREAD_CALL_RESCHEDULE
& call
->tc_flags
));
1102 call
->tc_flags
&= ~THREAD_CALL_RESCHEDULE
;
1105 /* if reschedule was set, it must not have been queued */
1106 assert(call
->tc_call
.queue
== NULL
);
1108 boolean_t do_cancel_callout
= FALSE
;
1110 thread_call_flavor_t flavor
= thread_call_get_flavor(call
);
1111 thread_call_group_t group
= thread_call_get_group(call
);
1113 if ((call
->tc_call
.deadline
!= 0) &&
1114 (call
== qe_queue_first(&group
->delayed_queues
[flavor
], struct thread_call
, tc_call
.q_link
))) {
1115 assert(call
->tc_call
.queue
== &group
->delayed_queues
[flavor
]);
1116 do_cancel_callout
= TRUE
;
1119 canceled
= _call_dequeue(call
, group
);
1121 if (do_cancel_callout
) {
1122 if (_arm_delayed_call_timer(NULL
, group
, flavor
) == false) {
1123 timer_call_cancel(&group
->delayed_timers
[flavor
]);
1129 DTRACE_TMR4(thread_callout__cancel
, thread_call_func_t
, call
->tc_call
.func
,
1130 0, (call
->tc_ttd
>> 32), (unsigned) (call
->tc_ttd
& 0xFFFFFFFF));
1137 * thread_call_cancel:
1139 * Dequeue a callout entry.
1141 * Returns TRUE if the call was
1145 thread_call_cancel(thread_call_t call
)
1147 spl_t s
= disable_ints_and_lock();
1149 boolean_t result
= thread_call_cancel_locked(call
);
1151 enable_ints_and_unlock(s
);
1157 * Cancel a thread call. If it cannot be cancelled (i.e.
1158 * is already in flight), waits for the most recent invocation
1159 * to finish. Note that if clients re-submit this thread call,
1160 * it may still be pending or in flight when thread_call_cancel_wait
1161 * returns, but all requests to execute this work item prior
1162 * to the call to thread_call_cancel_wait will have finished.
1165 thread_call_cancel_wait(thread_call_t call
)
1167 if ((call
->tc_flags
& THREAD_CALL_ALLOC
) == 0) {
1168 panic("thread_call_cancel_wait: can't wait on thread call whose storage I don't own");
1171 if (!ml_get_interrupts_enabled()) {
1172 panic("unsafe thread_call_cancel_wait");
1175 if (current_thread()->thc_state
.thc_call
== call
) {
1176 panic("thread_call_cancel_wait: deadlock waiting on self from inside call: %p to function %p",
1177 call
, call
->tc_call
.func
);
1180 spl_t s
= disable_ints_and_lock();
1182 boolean_t canceled
= thread_call_cancel_locked(call
);
1184 if ((call
->tc_flags
& THREAD_CALL_ONCE
) == THREAD_CALL_ONCE
) {
1186 * A cancel-wait on a 'once' call will both cancel
1187 * the pending call and wait for the in-flight call
1190 thread_call_wait_once_locked(call
, s
);
1191 /* thread call lock unlocked */
1194 * A cancel-wait on a normal call will only wait for the in-flight calls
1195 * if it did not cancel the pending call.
1197 * TODO: This seems less than useful - shouldn't it do the wait as well?
1200 if (canceled
== FALSE
) {
1201 thread_call_wait_locked(call
, s
);
1202 /* thread call lock unlocked */
1204 enable_ints_and_unlock(s
);
1215 * Wake a call thread to service
1216 * pending call entries. May wake
1217 * the daemon thread in order to
1218 * create additional call threads.
1220 * Called with thread_call_lock held.
1222 * For high-priority group, only does wakeup/creation if there are no threads
1225 static __inline__
void
1227 thread_call_group_t group
)
1230 * New behavior: use threads if you've got 'em.
1231 * Traditional behavior: wake only if no threads running.
1233 if (group_isparallel(group
) || group
->active_count
== 0) {
1234 if (waitq_wakeup64_one(&group
->idle_waitq
, NO_EVENT64
,
1235 THREAD_AWAKENED
, WAITQ_ALL_PRIORITIES
) == KERN_SUCCESS
) {
1236 group
->idle_count
--; group
->active_count
++;
1238 if (group
->idle_count
== 0 && (group
->flags
& TCG_DEALLOC_ACTIVE
) == TCG_DEALLOC_ACTIVE
) {
1239 if (timer_call_cancel(&group
->dealloc_timer
) == TRUE
) {
1240 group
->flags
&= ~TCG_DEALLOC_ACTIVE
;
1244 if (!thread_call_daemon_awake
&& thread_call_group_should_add_thread(group
)) {
1245 thread_call_daemon_awake
= TRUE
;
1246 waitq_wakeup64_one(&daemon_waitq
, NO_EVENT64
,
1247 THREAD_AWAKENED
, WAITQ_ALL_PRIORITIES
);
1254 * sched_call_thread:
1256 * Call out invoked by the scheduler.
1263 thread_call_group_t group
;
1265 group
= thread
->thc_state
.thc_group
;
1266 assert((group
- &thread_call_groups
[0]) < THREAD_CALL_INDEX_MAX
);
1268 thread_call_lock_spin();
1271 case SCHED_CALL_BLOCK
:
1272 assert(group
->active_count
);
1273 --group
->active_count
;
1274 group
->blocked_count
++;
1275 if (group
->pending_count
> 0) {
1276 thread_call_wake(group
);
1280 case SCHED_CALL_UNBLOCK
:
1281 assert(group
->blocked_count
);
1282 --group
->blocked_count
;
1283 group
->active_count
++;
1287 thread_call_unlock();
1291 * Interrupts disabled, lock held; returns the same way.
1292 * Only called on thread calls whose storage we own. Wakes up
1293 * anyone who might be waiting on this work item and frees it
1294 * if the client has so requested.
1297 thread_call_finish(thread_call_t call
, thread_call_group_t group
, spl_t
*s
)
1302 boolean_t repend
= FALSE
;
1304 call
->tc_finish_count
++;
1305 flags
= call
->tc_flags
;
1306 signal
= ((THREAD_CALL_SIGNAL
& flags
) != 0);
1309 /* The thread call thread owns a ref until the call is finished */
1310 if (call
->tc_refs
<= 0) {
1311 panic("thread_call_finish: detected over-released thread call: %p", call
);
1316 call
->tc_flags
&= ~(THREAD_CALL_RESCHEDULE
| THREAD_CALL_RUNNING
| THREAD_CALL_WAIT
);
1318 if ((call
->tc_refs
!= 0) && ((flags
& THREAD_CALL_RESCHEDULE
) != 0)) {
1319 assert(flags
& THREAD_CALL_ONCE
);
1320 thread_call_flavor_t flavor
= thread_call_get_flavor(call
);
1322 if (THREAD_CALL_DELAYED
& flags
) {
1323 time
= mach_absolute_time();
1324 if (flavor
== TCF_CONTINUOUS
) {
1325 time
= absolutetime_to_continuoustime(time
);
1327 if (call
->tc_soft_deadline
<= time
) {
1328 call
->tc_flags
&= ~(THREAD_CALL_DELAYED
| TIMER_CALL_RATELIMITED
);
1329 call
->tc_deadline
= 0;
1332 if (call
->tc_deadline
) {
1333 _delayed_call_enqueue(call
, group
, call
->tc_deadline
, flavor
);
1335 _arm_delayed_call_timer(call
, group
, flavor
);
1337 } else if (signal
) {
1338 call
->tc_submit_count
++;
1341 _pending_call_enqueue(call
, group
);
1345 if (!signal
&& (call
->tc_refs
== 0)) {
1346 if ((flags
& THREAD_CALL_WAIT
) != 0) {
1347 panic("Someone waiting on a thread call that is scheduled for free: %p\n", call
->tc_call
.func
);
1350 assert(call
->tc_finish_count
== call
->tc_submit_count
);
1352 enable_ints_and_unlock(*s
);
1354 zfree(thread_call_zone
, call
);
1356 *s
= disable_ints_and_lock();
1359 if ((flags
& THREAD_CALL_WAIT
) != 0) {
1361 * Dropping lock here because the sched call for the
1362 * high-pri group can take the big lock from under
1365 thread_call_unlock();
1366 thread_wakeup((event_t
)call
);
1367 thread_call_lock_spin();
1368 /* THREAD_CALL_SIGNAL call may have been freed */
1375 * thread_call_invoke
1377 * Invoke the function provided for this thread call
1379 * Note that the thread call object can be deallocated by the function if we do not control its storage.
1381 static void __attribute__((noinline
))
1382 thread_call_invoke(thread_call_func_t func
, thread_call_param_t param0
, thread_call_param_t param1
, thread_call_t call
)
1384 current_thread()->thc_state
.thc_call
= call
;
1386 #if DEVELOPMENT || DEBUG
1387 KERNEL_DEBUG_CONSTANT(
1388 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_CALLOUT
) | DBG_FUNC_START
,
1389 VM_KERNEL_UNSLIDE(func
), VM_KERNEL_ADDRHIDE(param0
), VM_KERNEL_ADDRHIDE(param1
), 0, 0);
1390 #endif /* DEVELOPMENT || DEBUG */
1393 uint64_t tc_ttd
= call
->tc_ttd
;
1394 boolean_t is_delayed
= call
->tc_flags
& THREAD_CALL_DELAYED
;
1395 DTRACE_TMR6(thread_callout__start
, thread_call_func_t
, func
, int, 0, int, (tc_ttd
>> 32),
1396 (unsigned) (tc_ttd
& 0xFFFFFFFF), is_delayed
, call
);
1399 (*func
)(param0
, param1
);
1402 DTRACE_TMR6(thread_callout__end
, thread_call_func_t
, func
, int, 0, int, (tc_ttd
>> 32),
1403 (unsigned) (tc_ttd
& 0xFFFFFFFF), is_delayed
, call
);
1406 #if DEVELOPMENT || DEBUG
1407 KERNEL_DEBUG_CONSTANT(
1408 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_CALLOUT
) | DBG_FUNC_END
,
1409 VM_KERNEL_UNSLIDE(func
), 0, 0, 0, 0);
1410 #endif /* DEVELOPMENT || DEBUG */
1412 current_thread()->thc_state
.thc_call
= NULL
;
1416 * thread_call_thread:
1420 thread_call_group_t group
,
1423 thread_t self
= current_thread();
1426 if ((thread_get_tag_internal(self
) & THREAD_TAG_CALLOUT
) == 0) {
1427 (void)thread_set_tag_internal(self
, THREAD_TAG_CALLOUT
);
1431 * A wakeup with THREAD_INTERRUPTED indicates that
1432 * we should terminate.
1434 if (wres
== THREAD_INTERRUPTED
) {
1435 thread_terminate(self
);
1438 panic("thread_terminate() returned?");
1441 spl_t s
= disable_ints_and_lock();
1443 self
->thc_state
.thc_group
= group
;
1444 thread_sched_call(self
, sched_call_thread
);
1446 while (group
->pending_count
> 0) {
1448 thread_call_func_t func
;
1449 thread_call_param_t param0
, param1
;
1451 call
= qe_dequeue_head(&group
->pending_queue
, struct thread_call
, tc_call
.q_link
);
1452 assert(call
!= NULL
);
1453 group
->pending_count
--;
1455 func
= call
->tc_call
.func
;
1456 param0
= call
->tc_call
.param0
;
1457 param1
= call
->tc_call
.param1
;
1459 call
->tc_call
.queue
= NULL
;
1461 _internal_call_release(call
);
1464 * Can only do wakeups for thread calls whose storage
1467 if ((call
->tc_flags
& THREAD_CALL_ALLOC
) != 0) {
1469 call
->tc_flags
|= THREAD_CALL_RUNNING
;
1470 call
->tc_refs
++; /* Delay free until we're done */
1475 enable_ints_and_unlock(s
);
1477 thread_call_invoke(func
, param0
, param1
, call
);
1479 if (get_preemption_level() != 0) {
1480 int pl
= get_preemption_level();
1481 panic("thread_call_thread: preemption_level %d, last callout %p(%p, %p)",
1482 pl
, (void *)VM_KERNEL_UNSLIDE(func
), param0
, param1
);
1485 s
= disable_ints_and_lock();
1488 /* Frees if so desired */
1489 thread_call_finish(call
, group
, &s
);
1493 thread_sched_call(self
, NULL
);
1494 group
->active_count
--;
1496 if (self
->callout_woken_from_icontext
&& !self
->callout_woke_thread
) {
1497 ledger_credit(self
->t_ledger
, task_ledgers
.interrupt_wakeups
, 1);
1498 if (self
->callout_woken_from_platform_idle
) {
1499 ledger_credit(self
->t_ledger
, task_ledgers
.platform_idle_wakeups
, 1);
1503 self
->callout_woken_from_icontext
= FALSE
;
1504 self
->callout_woken_from_platform_idle
= FALSE
;
1505 self
->callout_woke_thread
= FALSE
;
1507 if (group_isparallel(group
)) {
1509 * For new style of thread group, thread always blocks.
1510 * If we have more than the target number of threads,
1511 * and this is the first to block, and it isn't active
1512 * already, set a timer for deallocating a thread if we
1513 * continue to have a surplus.
1515 group
->idle_count
++;
1517 if (group
->idle_count
== 1) {
1518 group
->idle_timestamp
= mach_absolute_time();
1521 if (((group
->flags
& TCG_DEALLOC_ACTIVE
) == 0) &&
1522 ((group
->active_count
+ group
->idle_count
) > group
->target_thread_count
)) {
1523 thread_call_start_deallocate_timer(group
);
1526 /* Wait for more work (or termination) */
1527 wres
= waitq_assert_wait64(&group
->idle_waitq
, NO_EVENT64
, THREAD_INTERRUPTIBLE
, 0);
1528 if (wres
!= THREAD_WAITING
) {
1529 panic("kcall worker unable to assert wait?");
1532 enable_ints_and_unlock(s
);
1534 thread_block_parameter((thread_continue_t
)thread_call_thread
, group
);
1536 if (group
->idle_count
< group
->target_thread_count
) {
1537 group
->idle_count
++;
1539 waitq_assert_wait64(&group
->idle_waitq
, NO_EVENT64
, THREAD_UNINT
, 0); /* Interrupted means to exit */
1541 enable_ints_and_unlock(s
);
1543 thread_block_parameter((thread_continue_t
)thread_call_thread
, group
);
1548 enable_ints_and_unlock(s
);
1550 thread_terminate(self
);
1555 * thread_call_daemon: walk list of groups, allocating
1556 * threads if appropriate (as determined by
1557 * thread_call_group_should_add_thread()).
1560 thread_call_daemon_continue(__unused
void *arg
)
1562 spl_t s
= disable_ints_and_lock();
1564 /* Starting at zero happens to be high-priority first. */
1565 for (int i
= 0; i
< THREAD_CALL_INDEX_MAX
; i
++) {
1566 thread_call_group_t group
= &thread_call_groups
[i
];
1567 while (thread_call_group_should_add_thread(group
)) {
1568 group
->active_count
++;
1570 enable_ints_and_unlock(s
);
1572 kern_return_t kr
= thread_call_thread_create(group
);
1573 if (kr
!= KERN_SUCCESS
) {
1575 * On failure, just pause for a moment and give up.
1576 * We can try again later.
1578 delay(10000); /* 10 ms */
1579 s
= disable_ints_and_lock();
1583 s
= disable_ints_and_lock();
1588 thread_call_daemon_awake
= FALSE
;
1589 waitq_assert_wait64(&daemon_waitq
, NO_EVENT64
, THREAD_UNINT
, 0);
1591 enable_ints_and_unlock(s
);
1593 thread_block_parameter((thread_continue_t
)thread_call_daemon_continue
, NULL
);
1601 thread_t self
= current_thread();
1603 self
->options
|= TH_OPT_VMPRIV
;
1604 vm_page_free_reserve(2); /* XXX */
1606 thread_set_thread_name(self
, "thread_call_daemon");
1608 thread_call_daemon_continue(NULL
);
1613 * Schedule timer to deallocate a worker thread if we have a surplus
1614 * of threads (in excess of the group's target) and at least one thread
1615 * is idle the whole time.
1618 thread_call_start_deallocate_timer(thread_call_group_t group
)
1620 __assert_only boolean_t already_enqueued
;
1622 assert(group
->idle_count
> 0);
1623 assert((group
->flags
& TCG_DEALLOC_ACTIVE
) == 0);
1625 group
->flags
|= TCG_DEALLOC_ACTIVE
;
1627 uint64_t deadline
= group
->idle_timestamp
+ thread_call_dealloc_interval_abs
;
1629 already_enqueued
= timer_call_enter(&group
->dealloc_timer
, deadline
, 0);
1631 assert(already_enqueued
== FALSE
);
1634 /* non-static so dtrace can find it rdar://problem/31156135&31379348 */
1636 thread_call_delayed_timer(timer_call_param_t p0
, timer_call_param_t p1
)
1638 thread_call_group_t group
= (thread_call_group_t
) p0
;
1639 thread_call_flavor_t flavor
= (thread_call_flavor_t
) p1
;
1646 thread_call_lock_spin();
1648 if (flavor
== TCF_CONTINUOUS
) {
1649 now
= mach_continuous_time();
1650 } else if (flavor
== TCF_ABSOLUTE
) {
1651 now
= mach_absolute_time();
1653 panic("invalid timer flavor: %d", flavor
);
1658 qe_foreach_element_safe(call
, &group
->delayed_queues
[flavor
], tc_call
.q_link
) {
1659 if (flavor
== TCF_CONTINUOUS
) {
1660 assert((call
->tc_flags
& THREAD_CALL_CONTINUOUS
) == THREAD_CALL_CONTINUOUS
);
1662 assert((call
->tc_flags
& THREAD_CALL_CONTINUOUS
) == 0);
1666 * if we hit a call that isn't yet ready to expire,
1667 * then we're done for now
1668 * TODO: The next timer in the list could have a larger leeway
1669 * and therefore be ready to expire.
1670 * Sort by deadline then by soft deadline to avoid this
1672 if (call
->tc_soft_deadline
> now
) {
1677 * If we hit a rate-limited timer, don't eagerly wake it up.
1678 * Wait until it reaches the end of the leeway window.
1680 * TODO: What if the next timer is not rate-limited?
1681 * Have a separate rate-limited queue to avoid this
1683 if ((call
->tc_flags
& THREAD_CALL_RATELIMITED
) &&
1684 (call
->tc_call
.deadline
> now
) &&
1685 (ml_timer_forced_evaluation() == FALSE
)) {
1689 if (THREAD_CALL_SIGNAL
& call
->tc_flags
) {
1690 __assert_only queue_head_t
*old_queue
;
1691 old_queue
= call_entry_dequeue(&call
->tc_call
);
1692 assert(old_queue
== &group
->delayed_queues
[flavor
]);
1695 thread_call_func_t func
= call
->tc_call
.func
;
1696 thread_call_param_t param0
= call
->tc_call
.param0
;
1697 thread_call_param_t param1
= call
->tc_call
.param1
;
1699 call
->tc_flags
|= THREAD_CALL_RUNNING
;
1700 thread_call_unlock();
1701 thread_call_invoke(func
, param0
, param1
, call
);
1702 thread_call_lock_spin();
1704 repend
= thread_call_finish(call
, group
, NULL
);
1707 /* call may have been freed */
1711 _pending_call_enqueue(call
, group
);
1716 _arm_delayed_call_timer(call
, group
, flavor
);
1718 thread_call_unlock();
1722 thread_call_delayed_timer_rescan(thread_call_group_t group
,
1723 thread_call_flavor_t flavor
)
1728 spl_t s
= disable_ints_and_lock();
1730 assert(ml_timer_forced_evaluation() == TRUE
);
1732 if (flavor
== TCF_CONTINUOUS
) {
1733 now
= mach_continuous_time();
1735 now
= mach_absolute_time();
1738 qe_foreach_element_safe(call
, &group
->delayed_queues
[flavor
], tc_call
.q_link
) {
1739 if (call
->tc_soft_deadline
<= now
) {
1740 _pending_call_enqueue(call
, group
);
1742 uint64_t skew
= call
->tc_call
.deadline
- call
->tc_soft_deadline
;
1743 assert(call
->tc_call
.deadline
>= call
->tc_soft_deadline
);
1745 * On a latency quality-of-service level change,
1746 * re-sort potentially rate-limited callout. The platform
1747 * layer determines which timers require this.
1749 if (timer_resort_threshold(skew
)) {
1750 _call_dequeue(call
, group
);
1751 _delayed_call_enqueue(call
, group
, call
->tc_soft_deadline
, flavor
);
1756 _arm_delayed_call_timer(NULL
, group
, flavor
);
1758 enable_ints_and_unlock(s
);
1762 thread_call_delayed_timer_rescan_all(void)
1764 for (int i
= 0; i
< THREAD_CALL_INDEX_MAX
; i
++) {
1765 thread_call_delayed_timer_rescan(&thread_call_groups
[i
], TCF_ABSOLUTE
);
1766 thread_call_delayed_timer_rescan(&thread_call_groups
[i
], TCF_CONTINUOUS
);
1771 * Timer callback to tell a thread to terminate if
1772 * we have an excess of threads and at least one has been
1773 * idle for a long time.
1776 thread_call_dealloc_timer(
1777 timer_call_param_t p0
,
1778 __unused timer_call_param_t p1
)
1780 thread_call_group_t group
= (thread_call_group_t
)p0
;
1783 boolean_t terminated
= FALSE
;
1785 thread_call_lock_spin();
1787 assert((group
->flags
& TCG_DEALLOC_ACTIVE
) == TCG_DEALLOC_ACTIVE
);
1789 now
= mach_absolute_time();
1791 if (group
->idle_count
> 0) {
1792 if (now
> group
->idle_timestamp
+ thread_call_dealloc_interval_abs
) {
1794 group
->idle_count
--;
1795 res
= waitq_wakeup64_one(&group
->idle_waitq
, NO_EVENT64
,
1796 THREAD_INTERRUPTED
, WAITQ_ALL_PRIORITIES
);
1797 if (res
!= KERN_SUCCESS
) {
1798 panic("Unable to wake up idle thread for termination?");
1803 group
->flags
&= ~TCG_DEALLOC_ACTIVE
;
1806 * If we still have an excess of threads, schedule another
1807 * invocation of this function.
1809 if (group
->idle_count
> 0 && (group
->idle_count
+ group
->active_count
> group
->target_thread_count
)) {
1811 * If we killed someone just now, push out the
1815 group
->idle_timestamp
= now
;
1818 thread_call_start_deallocate_timer(group
);
1821 thread_call_unlock();
1825 * Wait for the invocation of the thread call to complete
1826 * We know there's only one in flight because of the 'once' flag.
1828 * If a subsequent invocation comes in before we wake up, that's OK
1830 * TODO: Here is where we will add priority inheritance to the thread executing
1831 * the thread call in case it's lower priority than the current thread
1832 * <rdar://problem/30321792> Priority inheritance for thread_call_wait_once
1834 * Takes the thread call lock locked, returns unlocked
1835 * This lets us avoid a spurious take/drop after waking up from thread_block
1838 thread_call_wait_once_locked(thread_call_t call
, spl_t s
)
1840 assert(call
->tc_flags
& THREAD_CALL_ALLOC
);
1841 assert(call
->tc_flags
& THREAD_CALL_ONCE
);
1843 if ((call
->tc_flags
& THREAD_CALL_RUNNING
) == 0) {
1844 enable_ints_and_unlock(s
);
1848 /* call is running, so we have to wait for it */
1849 call
->tc_flags
|= THREAD_CALL_WAIT
;
1851 wait_result_t res
= assert_wait(call
, THREAD_UNINT
);
1852 if (res
!= THREAD_WAITING
) {
1853 panic("Unable to assert wait: %d", res
);
1856 enable_ints_and_unlock(s
);
1858 res
= thread_block(THREAD_CONTINUE_NULL
);
1859 if (res
!= THREAD_AWAKENED
) {
1860 panic("Awoken with %d?", res
);
1863 /* returns unlocked */
1868 * Wait for an in-flight invocation to complete
1869 * Does NOT try to cancel, so the client doesn't need to hold their
1870 * lock while calling this function.
1872 * Returns whether or not it had to wait.
1874 * Only works for THREAD_CALL_ONCE calls.
1877 thread_call_wait_once(thread_call_t call
)
1879 if ((call
->tc_flags
& THREAD_CALL_ALLOC
) == 0) {
1880 panic("thread_call_wait_once: can't wait on thread call whose storage I don't own");
1883 if ((call
->tc_flags
& THREAD_CALL_ONCE
) == 0) {
1884 panic("thread_call_wait_once: can't wait_once on a non-once call");
1887 if (!ml_get_interrupts_enabled()) {
1888 panic("unsafe thread_call_wait_once");
1891 if (current_thread()->thc_state
.thc_call
== call
) {
1892 panic("thread_call_wait_once: deadlock waiting on self from inside call: %p to function %p",
1893 call
, call
->tc_call
.func
);
1896 spl_t s
= disable_ints_and_lock();
1898 boolean_t waited
= thread_call_wait_once_locked(call
, s
);
1899 /* thread call lock unlocked */
1906 * Wait for all requested invocations of a thread call prior to now
1907 * to finish. Can only be invoked on thread calls whose storage we manage.
1908 * Just waits for the finish count to catch up to the submit count we find
1909 * at the beginning of our wait.
1911 * Called with thread_call_lock held. Returns with lock released.
1914 thread_call_wait_locked(thread_call_t call
, spl_t s
)
1916 uint64_t submit_count
;
1919 assert(call
->tc_flags
& THREAD_CALL_ALLOC
);
1921 submit_count
= call
->tc_submit_count
;
1923 while (call
->tc_finish_count
< submit_count
) {
1924 call
->tc_flags
|= THREAD_CALL_WAIT
;
1926 res
= assert_wait(call
, THREAD_UNINT
);
1927 if (res
!= THREAD_WAITING
) {
1928 panic("Unable to assert wait: %d", res
);
1931 enable_ints_and_unlock(s
);
1933 res
= thread_block(THREAD_CONTINUE_NULL
);
1934 if (res
!= THREAD_AWAKENED
) {
1935 panic("Awoken with %d?", res
);
1938 s
= disable_ints_and_lock();
1941 enable_ints_and_unlock(s
);
1945 * Determine whether a thread call is either on a queue or
1946 * currently being executed.
1949 thread_call_isactive(thread_call_t call
)
1953 spl_t s
= disable_ints_and_lock();
1954 active
= (call
->tc_submit_count
> call
->tc_finish_count
);
1955 enable_ints_and_unlock(s
);
1961 * adjust_cont_time_thread_calls
1962 * on wake, reenqueue delayed call timer for continuous time thread call groups
1965 adjust_cont_time_thread_calls(void)
1967 spl_t s
= disable_ints_and_lock();
1969 for (int i
= 0; i
< THREAD_CALL_INDEX_MAX
; i
++) {
1970 thread_call_group_t group
= &thread_call_groups
[i
];
1972 /* only the continuous timers need to be re-armed */
1974 _arm_delayed_call_timer(NULL
, group
, TCF_CONTINUOUS
);
1977 enable_ints_and_unlock(s
);