]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
c910b4d9 | 2 | * Copyright (c) 1993-1995, 1999-2008 Apple Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
0a7de745 | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
0a7de745 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
0a7de745 | 17 | * |
2d21ac55 A |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
0a7de745 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b | 27 | */ |
0a7de745 | 28 | |
1c79356b | 29 | #include <mach/mach_types.h> |
91447636 | 30 | #include <mach/thread_act.h> |
1c79356b | 31 | |
91447636 | 32 | #include <kern/kern_types.h> |
c910b4d9 | 33 | #include <kern/zalloc.h> |
1c79356b A |
34 | #include <kern/sched_prim.h> |
35 | #include <kern/clock.h> | |
36 | #include <kern/task.h> | |
37 | #include <kern/thread.h> | |
3e170ce0 | 38 | #include <kern/waitq.h> |
39236c6e | 39 | #include <kern/ledger.h> |
5ba3f43e | 40 | #include <kern/policy_internal.h> |
91447636 A |
41 | |
42 | #include <vm/vm_pageout.h> | |
1c79356b A |
43 | |
44 | #include <kern/thread_call.h> | |
45 | #include <kern/call_entry.h> | |
1c79356b A |
46 | #include <kern/timer_call.h> |
47 | ||
316670eb | 48 | #include <libkern/OSAtomic.h> |
39236c6e | 49 | #include <kern/timer_queue.h> |
316670eb | 50 | |
55e303ae | 51 | #include <sys/kdebug.h> |
4b17d6b6 A |
52 | #if CONFIG_DTRACE |
53 | #include <mach/sdt.h> | |
54 | #endif | |
39236c6e | 55 | #include <machine/machine_routines.h> |
1c79356b | 56 | |
0a7de745 A |
57 | static zone_t thread_call_zone; |
58 | static struct waitq daemon_waitq; | |
1c79356b | 59 | |
5ba3f43e A |
60 | typedef enum { |
61 | TCF_ABSOLUTE = 0, | |
62 | TCF_CONTINUOUS = 1, | |
63 | TCF_COUNT = 2, | |
64 | } thread_call_flavor_t; | |
65 | ||
cb323159 | 66 | __options_decl(thread_call_group_flags_t, uint32_t, { |
5ba3f43e A |
67 | TCG_NONE = 0x0, |
68 | TCG_PARALLEL = 0x1, | |
69 | TCG_DEALLOC_ACTIVE = 0x2, | |
cb323159 | 70 | }); |
5ba3f43e A |
71 | |
72 | static struct thread_call_group { | |
73 | const char * tcg_name; | |
74 | ||
0a7de745 A |
75 | queue_head_t pending_queue; |
76 | uint32_t pending_count; | |
1c79356b | 77 | |
5ba3f43e A |
78 | queue_head_t delayed_queues[TCF_COUNT]; |
79 | timer_call_data_t delayed_timers[TCF_COUNT]; | |
1c79356b | 80 | |
0a7de745 | 81 | timer_call_data_t dealloc_timer; |
1c79356b | 82 | |
0a7de745 A |
83 | struct waitq idle_waitq; |
84 | uint32_t idle_count, active_count, blocked_count; | |
1c79356b | 85 | |
5ba3f43e | 86 | uint32_t tcg_thread_pri; |
0a7de745 A |
87 | uint32_t target_thread_count; |
88 | uint64_t idle_timestamp; | |
c910b4d9 | 89 | |
5ba3f43e | 90 | thread_call_group_flags_t flags; |
5ba3f43e A |
91 | } thread_call_groups[THREAD_CALL_INDEX_MAX] = { |
92 | [THREAD_CALL_INDEX_HIGH] = { | |
93 | .tcg_name = "high", | |
94 | .tcg_thread_pri = BASEPRI_PREEMPT_HIGH, | |
95 | .target_thread_count = 4, | |
96 | .flags = TCG_NONE, | |
97 | }, | |
98 | [THREAD_CALL_INDEX_KERNEL] = { | |
99 | .tcg_name = "kernel", | |
100 | .tcg_thread_pri = BASEPRI_KERNEL, | |
101 | .target_thread_count = 1, | |
102 | .flags = TCG_PARALLEL, | |
103 | }, | |
104 | [THREAD_CALL_INDEX_USER] = { | |
105 | .tcg_name = "user", | |
106 | .tcg_thread_pri = BASEPRI_DEFAULT, | |
107 | .target_thread_count = 1, | |
108 | .flags = TCG_PARALLEL, | |
109 | }, | |
110 | [THREAD_CALL_INDEX_LOW] = { | |
111 | .tcg_name = "low", | |
112 | .tcg_thread_pri = MAXPRI_THROTTLE, | |
113 | .target_thread_count = 1, | |
114 | .flags = TCG_PARALLEL, | |
115 | }, | |
116 | [THREAD_CALL_INDEX_KERNEL_HIGH] = { | |
117 | .tcg_name = "kernel-high", | |
118 | .tcg_thread_pri = BASEPRI_PREEMPT, | |
119 | .target_thread_count = 2, | |
120 | .flags = TCG_NONE, | |
121 | }, | |
122 | [THREAD_CALL_INDEX_QOS_UI] = { | |
123 | .tcg_name = "qos-ui", | |
124 | .tcg_thread_pri = BASEPRI_FOREGROUND, | |
125 | .target_thread_count = 1, | |
126 | .flags = TCG_NONE, | |
127 | }, | |
128 | [THREAD_CALL_INDEX_QOS_IN] = { | |
129 | .tcg_name = "qos-in", | |
130 | .tcg_thread_pri = BASEPRI_USER_INITIATED, | |
131 | .target_thread_count = 1, | |
132 | .flags = TCG_NONE, | |
133 | }, | |
134 | [THREAD_CALL_INDEX_QOS_UT] = { | |
135 | .tcg_name = "qos-ut", | |
136 | .tcg_thread_pri = BASEPRI_UTILITY, | |
137 | .target_thread_count = 1, | |
138 | .flags = TCG_NONE, | |
139 | }, | |
316670eb | 140 | }; |
c910b4d9 | 141 | |
0a7de745 | 142 | typedef struct thread_call_group *thread_call_group_t; |
c910b4d9 | 143 | |
0a7de745 | 144 | #define INTERNAL_CALL_COUNT 768 |
5ba3f43e | 145 | #define THREAD_CALL_DEALLOC_INTERVAL_NS (5 * NSEC_PER_MSEC) /* 5 ms */ |
0a7de745 A |
146 | #define THREAD_CALL_ADD_RATIO 4 |
147 | #define THREAD_CALL_MACH_FACTOR_CAP 3 | |
148 | #define THREAD_CALL_GROUP_MAX_THREADS 500 | |
149 | ||
150 | static boolean_t thread_call_daemon_awake; | |
151 | static thread_call_data_t internal_call_storage[INTERNAL_CALL_COUNT]; | |
152 | static queue_head_t thread_call_internal_queue; | |
153 | int thread_call_internal_queue_count = 0; | |
154 | static uint64_t thread_call_dealloc_interval_abs; | |
155 | ||
156 | static __inline__ thread_call_t _internal_call_allocate(thread_call_func_t func, thread_call_param_t param0); | |
157 | static __inline__ void _internal_call_release(thread_call_t call); | |
158 | static __inline__ boolean_t _pending_call_enqueue(thread_call_t call, thread_call_group_t group); | |
5ba3f43e | 159 | static boolean_t _delayed_call_enqueue(thread_call_t call, thread_call_group_t group, |
0a7de745 A |
160 | uint64_t deadline, thread_call_flavor_t flavor); |
161 | static __inline__ boolean_t _call_dequeue(thread_call_t call, thread_call_group_t group); | |
162 | static __inline__ void thread_call_wake(thread_call_group_t group); | |
163 | static void thread_call_daemon(void *arg); | |
164 | static void thread_call_thread(thread_call_group_t group, wait_result_t wres); | |
165 | static void thread_call_dealloc_timer(timer_call_param_t p0, timer_call_param_t p1); | |
5ba3f43e | 166 | static void thread_call_group_setup(thread_call_group_t group); |
0a7de745 A |
167 | static void sched_call_thread(int type, thread_t thread); |
168 | static void thread_call_start_deallocate_timer(thread_call_group_t group); | |
169 | static void thread_call_wait_locked(thread_call_t call, spl_t s); | |
5ba3f43e A |
170 | static boolean_t thread_call_wait_once_locked(thread_call_t call, spl_t s); |
171 | ||
0a7de745 A |
172 | static boolean_t thread_call_enter_delayed_internal(thread_call_t call, |
173 | thread_call_func_t alt_func, thread_call_param_t alt_param0, | |
174 | thread_call_param_t param1, uint64_t deadline, | |
175 | uint64_t leeway, unsigned int flags); | |
1c79356b | 176 | |
5ba3f43e A |
177 | /* non-static so dtrace can find it rdar://problem/31156135&31379348 */ |
178 | extern void thread_call_delayed_timer(timer_call_param_t p0, timer_call_param_t p1); | |
6d2010ae | 179 | |
6d2010ae | 180 | lck_grp_t thread_call_lck_grp; |
5ba3f43e | 181 | lck_mtx_t thread_call_lock_data; |
316670eb | 182 | |
0a7de745 | 183 | #define thread_call_lock_spin() \ |
6d2010ae A |
184 | lck_mtx_lock_spin_always(&thread_call_lock_data) |
185 | ||
0a7de745 | 186 | #define thread_call_unlock() \ |
6d2010ae A |
187 | lck_mtx_unlock_always(&thread_call_lock_data) |
188 | ||
5ba3f43e A |
189 | #define tc_deadline tc_call.deadline |
190 | ||
0a7de745 | 191 | extern boolean_t mach_timer_coalescing_enabled; |
6d2010ae | 192 | |
316670eb A |
193 | static inline spl_t |
194 | disable_ints_and_lock(void) | |
195 | { | |
5ba3f43e | 196 | spl_t s = splsched(); |
316670eb A |
197 | thread_call_lock_spin(); |
198 | ||
199 | return s; | |
200 | } | |
201 | ||
5ba3f43e | 202 | static inline void |
fe8ab488 | 203 | enable_ints_and_unlock(spl_t s) |
316670eb A |
204 | { |
205 | thread_call_unlock(); | |
fe8ab488 | 206 | splx(s); |
316670eb A |
207 | } |
208 | ||
316670eb A |
209 | static inline boolean_t |
210 | group_isparallel(thread_call_group_t group) | |
211 | { | |
0a7de745 | 212 | return (group->flags & TCG_PARALLEL) != 0; |
316670eb A |
213 | } |
214 | ||
215 | static boolean_t | |
5ba3f43e | 216 | thread_call_group_should_add_thread(thread_call_group_t group) |
316670eb | 217 | { |
5ba3f43e A |
218 | if ((group->active_count + group->blocked_count + group->idle_count) >= THREAD_CALL_GROUP_MAX_THREADS) { |
219 | panic("thread_call group '%s' reached max thread cap (%d): active: %d, blocked: %d, idle: %d", | |
0a7de745 A |
220 | group->tcg_name, THREAD_CALL_GROUP_MAX_THREADS, |
221 | group->active_count, group->blocked_count, group->idle_count); | |
5ba3f43e | 222 | } |
316670eb | 223 | |
5ba3f43e | 224 | if (group_isparallel(group) == FALSE) { |
316670eb A |
225 | if (group->pending_count > 0 && group->active_count == 0) { |
226 | return TRUE; | |
227 | } | |
228 | ||
229 | return FALSE; | |
230 | } | |
231 | ||
232 | if (group->pending_count > 0) { | |
233 | if (group->idle_count > 0) { | |
813fb2f6 | 234 | return FALSE; |
316670eb A |
235 | } |
236 | ||
5ba3f43e | 237 | uint32_t thread_count = group->active_count; |
316670eb A |
238 | |
239 | /* | |
240 | * Add a thread if either there are no threads, | |
241 | * the group has fewer than its target number of | |
242 | * threads, or the amount of work is large relative | |
243 | * to the number of threads. In the last case, pay attention | |
0a7de745 | 244 | * to the total load on the system, and back off if |
5ba3f43e | 245 | * it's high. |
316670eb A |
246 | */ |
247 | if ((thread_count == 0) || | |
0a7de745 A |
248 | (thread_count < group->target_thread_count) || |
249 | ((group->pending_count > THREAD_CALL_ADD_RATIO * thread_count) && | |
250 | (sched_mach_factor < THREAD_CALL_MACH_FACTOR_CAP))) { | |
316670eb A |
251 | return TRUE; |
252 | } | |
253 | } | |
316670eb | 254 | |
5ba3f43e | 255 | return FALSE; |
316670eb A |
256 | } |
257 | ||
258 | /* Lock held */ | |
259 | static inline thread_call_group_t | |
5ba3f43e | 260 | thread_call_get_group(thread_call_t call) |
316670eb | 261 | { |
5ba3f43e | 262 | thread_call_index_t index = call->tc_index; |
316670eb | 263 | |
5ba3f43e | 264 | assert(index >= 0 && index < THREAD_CALL_INDEX_MAX); |
39037602 | 265 | |
5ba3f43e A |
266 | return &thread_call_groups[index]; |
267 | } | |
39037602 | 268 | |
5ba3f43e A |
269 | /* Lock held */ |
270 | static inline thread_call_flavor_t | |
271 | thread_call_get_flavor(thread_call_t call) | |
272 | { | |
273 | return (call->tc_flags & THREAD_CALL_CONTINUOUS) ? TCF_CONTINUOUS : TCF_ABSOLUTE; | |
316670eb A |
274 | } |
275 | ||
276 | static void | |
5ba3f43e | 277 | thread_call_group_setup(thread_call_group_t group) |
316670eb A |
278 | { |
279 | queue_init(&group->pending_queue); | |
5ba3f43e A |
280 | queue_init(&group->delayed_queues[TCF_ABSOLUTE]); |
281 | queue_init(&group->delayed_queues[TCF_CONTINUOUS]); | |
316670eb | 282 | |
5ba3f43e | 283 | /* TODO: Consolidate to one hard timer for each group */ |
0a7de745 | 284 | timer_call_setup(&group->delayed_timers[TCF_ABSOLUTE], thread_call_delayed_timer, group); |
5ba3f43e | 285 | timer_call_setup(&group->delayed_timers[TCF_CONTINUOUS], thread_call_delayed_timer, group); |
316670eb A |
286 | timer_call_setup(&group->dealloc_timer, thread_call_dealloc_timer, group); |
287 | ||
5ba3f43e | 288 | /* Reverse the wait order so we re-use the most recently parked thread from the pool */ |
0a7de745 | 289 | waitq_init(&group->idle_waitq, SYNC_POLICY_REVERSED | SYNC_POLICY_DISABLE_IRQ); |
316670eb A |
290 | } |
291 | ||
292 | /* | |
0a7de745 | 293 | * Simple wrapper for creating threads bound to |
316670eb A |
294 | * thread call groups. |
295 | */ | |
296 | static kern_return_t | |
297 | thread_call_thread_create( | |
0a7de745 | 298 | thread_call_group_t group) |
316670eb A |
299 | { |
300 | thread_t thread; | |
301 | kern_return_t result; | |
302 | ||
5ba3f43e A |
303 | int thread_pri = group->tcg_thread_pri; |
304 | ||
305 | result = kernel_thread_start_priority((thread_continue_t)thread_call_thread, | |
0a7de745 | 306 | group, thread_pri, &thread); |
316670eb A |
307 | if (result != KERN_SUCCESS) { |
308 | return result; | |
309 | } | |
310 | ||
5ba3f43e | 311 | if (thread_pri <= BASEPRI_KERNEL) { |
316670eb | 312 | /* |
5ba3f43e A |
313 | * THREAD_CALL_PRIORITY_KERNEL and lower don't get to run to completion |
314 | * in kernel if there are higher priority threads available. | |
316670eb A |
315 | */ |
316 | thread_set_eager_preempt(thread); | |
317 | } | |
318 | ||
5ba3f43e A |
319 | char name[MAXTHREADNAMESIZE] = ""; |
320 | ||
321 | int group_thread_count = group->idle_count + group->active_count + group->blocked_count; | |
322 | ||
323 | snprintf(name, sizeof(name), "thread call %s #%d", group->tcg_name, group_thread_count); | |
324 | thread_set_thread_name(thread, name); | |
325 | ||
316670eb A |
326 | thread_deallocate(thread); |
327 | return KERN_SUCCESS; | |
328 | } | |
329 | ||
1c79356b | 330 | /* |
c910b4d9 | 331 | * thread_call_initialize: |
1c79356b | 332 | * |
c910b4d9 A |
333 | * Initialize this module, called |
334 | * early during system initialization. | |
1c79356b | 335 | */ |
1c79356b A |
336 | void |
337 | thread_call_initialize(void) | |
338 | { | |
0a7de745 | 339 | int tc_size = sizeof(thread_call_data_t); |
5ba3f43e | 340 | thread_call_zone = zinit(tc_size, 4096 * tc_size, 16 * tc_size, "thread_call"); |
6d2010ae | 341 | zone_change(thread_call_zone, Z_CALLERACCT, FALSE); |
0b4c1975 | 342 | zone_change(thread_call_zone, Z_NOENCRYPT, TRUE); |
1c79356b | 343 | |
5ba3f43e A |
344 | lck_grp_init(&thread_call_lck_grp, "thread_call", LCK_GRP_ATTR_NULL); |
345 | lck_mtx_init(&thread_call_lock_data, &thread_call_lck_grp, LCK_ATTR_NULL); | |
39037602 | 346 | |
316670eb | 347 | nanotime_to_absolutetime(0, THREAD_CALL_DEALLOC_INTERVAL_NS, &thread_call_dealloc_interval_abs); |
39037602 | 348 | waitq_init(&daemon_waitq, SYNC_POLICY_DISABLE_IRQ | SYNC_POLICY_FIFO); |
c910b4d9 | 349 | |
0a7de745 | 350 | for (uint32_t i = 0; i < THREAD_CALL_INDEX_MAX; i++) { |
5ba3f43e | 351 | thread_call_group_setup(&thread_call_groups[i]); |
0a7de745 | 352 | } |
1c79356b | 353 | |
5ba3f43e | 354 | spl_t s = disable_ints_and_lock(); |
c910b4d9 | 355 | |
6d2010ae A |
356 | queue_init(&thread_call_internal_queue); |
357 | for ( | |
0a7de745 A |
358 | thread_call_t call = internal_call_storage; |
359 | call < &internal_call_storage[INTERNAL_CALL_COUNT]; | |
360 | call++) { | |
5ba3f43e | 361 | enqueue_tail(&thread_call_internal_queue, &call->tc_call.q_link); |
39236c6e | 362 | thread_call_internal_queue_count++; |
6d2010ae | 363 | } |
1c79356b | 364 | |
c910b4d9 | 365 | thread_call_daemon_awake = TRUE; |
1c79356b | 366 | |
fe8ab488 | 367 | enable_ints_and_unlock(s); |
1c79356b | 368 | |
5ba3f43e A |
369 | thread_t thread; |
370 | kern_return_t result; | |
371 | ||
372 | result = kernel_thread_start_priority((thread_continue_t)thread_call_daemon, | |
0a7de745 A |
373 | NULL, BASEPRI_PREEMPT_HIGH + 1, &thread); |
374 | if (result != KERN_SUCCESS) { | |
91447636 | 375 | panic("thread_call_initialize"); |
0a7de745 | 376 | } |
91447636 A |
377 | |
378 | thread_deallocate(thread); | |
1c79356b A |
379 | } |
380 | ||
381 | void | |
382 | thread_call_setup( | |
0a7de745 A |
383 | thread_call_t call, |
384 | thread_call_func_t func, | |
385 | thread_call_param_t param0) | |
1c79356b | 386 | { |
316670eb A |
387 | bzero(call, sizeof(*call)); |
388 | call_entry_setup((call_entry_t)call, func, param0); | |
5ba3f43e A |
389 | |
390 | /* Thread calls default to the HIGH group unless otherwise specified */ | |
391 | call->tc_index = THREAD_CALL_INDEX_HIGH; | |
392 | ||
393 | /* THREAD_CALL_ALLOC not set, memory owned by caller */ | |
1c79356b A |
394 | } |
395 | ||
396 | /* | |
c910b4d9 | 397 | * _internal_call_allocate: |
1c79356b | 398 | * |
c910b4d9 | 399 | * Allocate an internal callout entry. |
1c79356b | 400 | * |
c910b4d9 | 401 | * Called with thread_call_lock held. |
1c79356b | 402 | */ |
1c79356b | 403 | static __inline__ thread_call_t |
39236c6e | 404 | _internal_call_allocate(thread_call_func_t func, thread_call_param_t param0) |
1c79356b | 405 | { |
0a7de745 A |
406 | thread_call_t call; |
407 | ||
408 | if (queue_empty(&thread_call_internal_queue)) { | |
409 | panic("_internal_call_allocate"); | |
410 | } | |
5ba3f43e A |
411 | |
412 | call = qe_dequeue_head(&thread_call_internal_queue, struct thread_call, tc_call.q_link); | |
413 | ||
0a7de745 | 414 | thread_call_internal_queue_count--; |
39236c6e | 415 | |
0a7de745 A |
416 | thread_call_setup(call, func, param0); |
417 | call->tc_refs = 0; | |
418 | call->tc_flags = 0; /* THREAD_CALL_ALLOC not set, do not free back to zone */ | |
39236c6e | 419 | |
0a7de745 | 420 | return call; |
1c79356b A |
421 | } |
422 | ||
423 | /* | |
c910b4d9 | 424 | * _internal_call_release: |
1c79356b | 425 | * |
c910b4d9 | 426 | * Release an internal callout entry which |
39236c6e A |
427 | * is no longer pending (or delayed). This is |
428 | * safe to call on a non-internal entry, in which | |
429 | * case nothing happens. | |
1c79356b | 430 | * |
0a7de745 | 431 | * Called with thread_call_lock held. |
1c79356b | 432 | */ |
c910b4d9 | 433 | static __inline__ void |
5ba3f43e | 434 | _internal_call_release(thread_call_t call) |
1c79356b | 435 | { |
5ba3f43e A |
436 | if (call >= internal_call_storage && |
437 | call < &internal_call_storage[INTERNAL_CALL_COUNT]) { | |
39236c6e | 438 | assert((call->tc_flags & THREAD_CALL_ALLOC) == 0); |
5ba3f43e | 439 | enqueue_head(&thread_call_internal_queue, &call->tc_call.q_link); |
39236c6e A |
440 | thread_call_internal_queue_count++; |
441 | } | |
1c79356b A |
442 | } |
443 | ||
444 | /* | |
c910b4d9 | 445 | * _pending_call_enqueue: |
1c79356b | 446 | * |
c910b4d9 A |
447 | * Place an entry at the end of the |
448 | * pending queue, to be executed soon. | |
1c79356b | 449 | * |
c910b4d9 A |
450 | * Returns TRUE if the entry was already |
451 | * on a queue. | |
1c79356b | 452 | * |
c910b4d9 | 453 | * Called with thread_call_lock held. |
1c79356b | 454 | */ |
c910b4d9 | 455 | static __inline__ boolean_t |
5ba3f43e | 456 | _pending_call_enqueue(thread_call_t call, |
0a7de745 | 457 | thread_call_group_t group) |
1c79356b | 458 | { |
5ba3f43e | 459 | if ((THREAD_CALL_ONCE | THREAD_CALL_RUNNING) |
0a7de745 | 460 | == (call->tc_flags & (THREAD_CALL_ONCE | THREAD_CALL_RUNNING))) { |
5ba3f43e A |
461 | call->tc_deadline = 0; |
462 | ||
463 | uint32_t flags = call->tc_flags; | |
464 | call->tc_flags |= THREAD_CALL_RESCHEDULE; | |
465 | ||
0a7de745 A |
466 | if ((flags & THREAD_CALL_RESCHEDULE) != 0) { |
467 | return TRUE; | |
468 | } else { | |
469 | return FALSE; | |
470 | } | |
5ba3f43e | 471 | } |
1c79356b | 472 | |
5ba3f43e | 473 | queue_head_t *old_queue = call_entry_enqueue_tail(CE(call), &group->pending_queue); |
316670eb A |
474 | |
475 | if (old_queue == NULL) { | |
476 | call->tc_submit_count++; | |
39037602 | 477 | } else if (old_queue != &group->pending_queue && |
0a7de745 A |
478 | old_queue != &group->delayed_queues[TCF_ABSOLUTE] && |
479 | old_queue != &group->delayed_queues[TCF_CONTINUOUS]) { | |
39037602 | 480 | panic("tried to move a thread call (%p) between groups (old_queue: %p)", call, old_queue); |
316670eb | 481 | } |
1c79356b | 482 | |
c910b4d9 | 483 | group->pending_count++; |
1c79356b | 484 | |
316670eb A |
485 | thread_call_wake(group); |
486 | ||
0a7de745 | 487 | return old_queue != NULL; |
1c79356b A |
488 | } |
489 | ||
490 | /* | |
c910b4d9 | 491 | * _delayed_call_enqueue: |
1c79356b | 492 | * |
c910b4d9 A |
493 | * Place an entry on the delayed queue, |
494 | * after existing entries with an earlier | |
0a7de745 | 495 | * (or identical) deadline. |
1c79356b | 496 | * |
c910b4d9 A |
497 | * Returns TRUE if the entry was already |
498 | * on a queue. | |
1c79356b | 499 | * |
c910b4d9 | 500 | * Called with thread_call_lock held. |
1c79356b | 501 | */ |
5ba3f43e | 502 | static boolean_t |
1c79356b | 503 | _delayed_call_enqueue( |
5ba3f43e A |
504 | thread_call_t call, |
505 | thread_call_group_t group, | |
506 | uint64_t deadline, | |
507 | thread_call_flavor_t flavor) | |
1c79356b | 508 | { |
5ba3f43e | 509 | if ((THREAD_CALL_ONCE | THREAD_CALL_RUNNING) |
0a7de745 | 510 | == (call->tc_flags & (THREAD_CALL_ONCE | THREAD_CALL_RUNNING))) { |
5ba3f43e | 511 | call->tc_deadline = deadline; |
1c79356b | 512 | |
5ba3f43e A |
513 | uint32_t flags = call->tc_flags; |
514 | call->tc_flags |= THREAD_CALL_RESCHEDULE; | |
515 | ||
0a7de745 A |
516 | if ((flags & THREAD_CALL_RESCHEDULE) != 0) { |
517 | return TRUE; | |
518 | } else { | |
519 | return FALSE; | |
520 | } | |
5ba3f43e A |
521 | } |
522 | ||
523 | queue_head_t *old_queue = call_entry_enqueue_deadline(CE(call), | |
0a7de745 A |
524 | &group->delayed_queues[flavor], |
525 | deadline); | |
c910b4d9 | 526 | |
39037602 | 527 | if (old_queue == &group->pending_queue) { |
c910b4d9 | 528 | group->pending_count--; |
39037602 | 529 | } else if (old_queue == NULL) { |
316670eb | 530 | call->tc_submit_count++; |
5ba3f43e | 531 | } else if (old_queue == &group->delayed_queues[TCF_ABSOLUTE] || |
0a7de745 | 532 | old_queue == &group->delayed_queues[TCF_CONTINUOUS]) { |
5ba3f43e | 533 | /* TODO: if it's in the other delayed queue, that might not be OK */ |
39037602 A |
534 | // we did nothing, and that's fine |
535 | } else { | |
536 | panic("tried to move a thread call (%p) between groups (old_queue: %p)", call, old_queue); | |
537 | } | |
c910b4d9 | 538 | |
0a7de745 | 539 | return old_queue != NULL; |
1c79356b A |
540 | } |
541 | ||
542 | /* | |
c910b4d9 | 543 | * _call_dequeue: |
1c79356b | 544 | * |
c910b4d9 | 545 | * Remove an entry from a queue. |
1c79356b | 546 | * |
c910b4d9 | 547 | * Returns TRUE if the entry was on a queue. |
1c79356b | 548 | * |
c910b4d9 | 549 | * Called with thread_call_lock held. |
1c79356b | 550 | */ |
c910b4d9 A |
551 | static __inline__ boolean_t |
552 | _call_dequeue( | |
0a7de745 A |
553 | thread_call_t call, |
554 | thread_call_group_t group) | |
1c79356b | 555 | { |
0a7de745 | 556 | queue_head_t *old_queue; |
c910b4d9 | 557 | |
316670eb | 558 | old_queue = call_entry_dequeue(CE(call)); |
c910b4d9 | 559 | |
316670eb | 560 | if (old_queue != NULL) { |
5ba3f43e | 561 | assert(old_queue == &group->pending_queue || |
0a7de745 A |
562 | old_queue == &group->delayed_queues[TCF_ABSOLUTE] || |
563 | old_queue == &group->delayed_queues[TCF_CONTINUOUS]); | |
5ba3f43e | 564 | |
316670eb | 565 | call->tc_finish_count++; |
0a7de745 | 566 | if (old_queue == &group->pending_queue) { |
316670eb | 567 | group->pending_count--; |
0a7de745 | 568 | } |
316670eb | 569 | } |
c910b4d9 | 570 | |
0a7de745 | 571 | return old_queue != NULL; |
1c79356b A |
572 | } |
573 | ||
574 | /* | |
5ba3f43e | 575 | * _arm_delayed_call_timer: |
1c79356b | 576 | * |
5ba3f43e A |
577 | * Check if the timer needs to be armed for this flavor, |
578 | * and if so, arm it. | |
1c79356b | 579 | * |
5ba3f43e A |
580 | * If call is non-NULL, only re-arm the timer if the specified call |
581 | * is the first in the queue. | |
582 | * | |
583 | * Returns true if the timer was armed/re-armed, false if it was left unset | |
584 | * Caller should cancel the timer if need be. | |
585 | * | |
586 | * Called with thread_call_lock held. | |
1c79356b | 587 | */ |
5ba3f43e A |
588 | static bool |
589 | _arm_delayed_call_timer(thread_call_t new_call, | |
0a7de745 A |
590 | thread_call_group_t group, |
591 | thread_call_flavor_t flavor) | |
1c79356b | 592 | { |
5ba3f43e | 593 | /* No calls implies no timer needed */ |
0a7de745 | 594 | if (queue_empty(&group->delayed_queues[flavor])) { |
5ba3f43e | 595 | return false; |
0a7de745 | 596 | } |
5ba3f43e A |
597 | |
598 | thread_call_t call = qe_queue_first(&group->delayed_queues[flavor], struct thread_call, tc_call.q_link); | |
599 | ||
600 | /* We only need to change the hard timer if this new call is the first in the list */ | |
0a7de745 | 601 | if (new_call != NULL && new_call != call) { |
5ba3f43e | 602 | return false; |
0a7de745 | 603 | } |
39236c6e A |
604 | |
605 | assert((call->tc_soft_deadline != 0) && ((call->tc_soft_deadline <= call->tc_call.deadline))); | |
39037602 | 606 | |
5ba3f43e | 607 | uint64_t fire_at = call->tc_soft_deadline; |
39037602 | 608 | |
5ba3f43e A |
609 | if (flavor == TCF_CONTINUOUS) { |
610 | assert((call->tc_flags & THREAD_CALL_CONTINUOUS) == THREAD_CALL_CONTINUOUS); | |
39037602 | 611 | fire_at = continuoustime_to_absolutetime(fire_at); |
5ba3f43e A |
612 | } else { |
613 | assert((call->tc_flags & THREAD_CALL_CONTINUOUS) == 0); | |
39037602 | 614 | } |
39236c6e | 615 | |
5ba3f43e A |
616 | /* |
617 | * Note: This picks the soonest-deadline call's leeway as the hard timer's leeway, | |
618 | * which does not take into account later-deadline timers with a larger leeway. | |
619 | * This is a valid coalescing behavior, but masks a possible window to | |
620 | * fire a timer instead of going idle. | |
621 | */ | |
622 | uint64_t leeway = call->tc_call.deadline - call->tc_soft_deadline; | |
623 | ||
624 | timer_call_enter_with_leeway(&group->delayed_timers[flavor], (timer_call_param_t)flavor, | |
39037602 | 625 | fire_at, leeway, |
0a7de745 | 626 | TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LEEWAY, |
fe8ab488 | 627 | ((call->tc_flags & THREAD_CALL_RATELIMITED) == THREAD_CALL_RATELIMITED)); |
316670eb | 628 | |
5ba3f43e | 629 | return true; |
1c79356b A |
630 | } |
631 | ||
632 | /* | |
5ba3f43e | 633 | * _cancel_func_from_queue: |
1c79356b | 634 | * |
c910b4d9 | 635 | * Remove the first (or all) matching |
5ba3f43e | 636 | * entries from the specified queue. |
1c79356b | 637 | * |
5ba3f43e | 638 | * Returns TRUE if any matching entries |
c910b4d9 | 639 | * were found. |
1c79356b | 640 | * |
c910b4d9 | 641 | * Called with thread_call_lock held. |
1c79356b | 642 | */ |
c910b4d9 | 643 | static boolean_t |
5ba3f43e | 644 | _cancel_func_from_queue(thread_call_func_t func, |
0a7de745 A |
645 | thread_call_param_t param0, |
646 | thread_call_group_t group, | |
647 | boolean_t remove_all, | |
648 | queue_head_t *queue) | |
1c79356b | 649 | { |
5ba3f43e A |
650 | boolean_t call_removed = FALSE; |
651 | thread_call_t call; | |
316670eb | 652 | |
5ba3f43e | 653 | qe_foreach_element_safe(call, queue, tc_call.q_link) { |
0a7de745 | 654 | if (call->tc_call.func != func || |
5ba3f43e A |
655 | call->tc_call.param0 != param0) { |
656 | continue; | |
657 | } | |
316670eb | 658 | |
5ba3f43e | 659 | _call_dequeue(call, group); |
316670eb | 660 | |
5ba3f43e | 661 | _internal_call_release(call); |
316670eb | 662 | |
5ba3f43e | 663 | call_removed = TRUE; |
0a7de745 | 664 | if (!remove_all) { |
5ba3f43e | 665 | break; |
0a7de745 | 666 | } |
316670eb A |
667 | } |
668 | ||
0a7de745 | 669 | return call_removed; |
1c79356b A |
670 | } |
671 | ||
1c79356b | 672 | /* |
c910b4d9 | 673 | * thread_call_func_delayed: |
1c79356b | 674 | * |
c910b4d9 A |
675 | * Enqueue a function callout to |
676 | * occur at the stated time. | |
1c79356b | 677 | */ |
1c79356b A |
678 | void |
679 | thread_call_func_delayed( | |
0a7de745 A |
680 | thread_call_func_t func, |
681 | thread_call_param_t param, | |
682 | uint64_t deadline) | |
1c79356b | 683 | { |
39236c6e A |
684 | (void)thread_call_enter_delayed_internal(NULL, func, param, 0, deadline, 0, 0); |
685 | } | |
316670eb | 686 | |
39236c6e A |
687 | /* |
688 | * thread_call_func_delayed_with_leeway: | |
689 | * | |
690 | * Same as thread_call_func_delayed(), but with | |
691 | * leeway/flags threaded through. | |
692 | */ | |
316670eb | 693 | |
39236c6e A |
694 | void |
695 | thread_call_func_delayed_with_leeway( | |
0a7de745 A |
696 | thread_call_func_t func, |
697 | thread_call_param_t param, | |
698 | uint64_t deadline, | |
699 | uint64_t leeway, | |
700 | uint32_t flags) | |
39236c6e A |
701 | { |
702 | (void)thread_call_enter_delayed_internal(NULL, func, param, 0, deadline, leeway, flags); | |
1c79356b A |
703 | } |
704 | ||
705 | /* | |
c910b4d9 | 706 | * thread_call_func_cancel: |
1c79356b | 707 | * |
c910b4d9 | 708 | * Dequeue a function callout. |
1c79356b | 709 | * |
c910b4d9 A |
710 | * Removes one (or all) { function, argument } |
711 | * instance(s) from either (or both) | |
712 | * the pending and the delayed queue, | |
713 | * in that order. | |
1c79356b | 714 | * |
c910b4d9 | 715 | * Returns TRUE if any calls were cancelled. |
5ba3f43e A |
716 | * |
717 | * This iterates all of the pending or delayed thread calls in the group, | |
718 | * which is really inefficient. Switch to an allocated thread call instead. | |
1c79356b | 719 | */ |
1c79356b A |
720 | boolean_t |
721 | thread_call_func_cancel( | |
0a7de745 A |
722 | thread_call_func_t func, |
723 | thread_call_param_t param, | |
724 | boolean_t cancel_all) | |
1c79356b | 725 | { |
0a7de745 | 726 | boolean_t result; |
1c79356b | 727 | |
39037602 A |
728 | assert(func != NULL); |
729 | ||
5ba3f43e | 730 | spl_t s = disable_ints_and_lock(); |
316670eb | 731 | |
5ba3f43e A |
732 | /* Function-only thread calls are only kept in the default HIGH group */ |
733 | thread_call_group_t group = &thread_call_groups[THREAD_CALL_INDEX_HIGH]; | |
316670eb | 734 | |
5ba3f43e A |
735 | if (cancel_all) { |
736 | /* exhaustively search every queue, and return true if any search found something */ | |
737 | result = _cancel_func_from_queue(func, param, group, cancel_all, &group->pending_queue) | | |
0a7de745 A |
738 | _cancel_func_from_queue(func, param, group, cancel_all, &group->delayed_queues[TCF_ABSOLUTE]) | |
739 | _cancel_func_from_queue(func, param, group, cancel_all, &group->delayed_queues[TCF_CONTINUOUS]); | |
5ba3f43e A |
740 | } else { |
741 | /* early-exit as soon as we find something, don't search other queues */ | |
742 | result = _cancel_func_from_queue(func, param, group, cancel_all, &group->pending_queue) || | |
0a7de745 A |
743 | _cancel_func_from_queue(func, param, group, cancel_all, &group->delayed_queues[TCF_ABSOLUTE]) || |
744 | _cancel_func_from_queue(func, param, group, cancel_all, &group->delayed_queues[TCF_CONTINUOUS]); | |
5ba3f43e A |
745 | } |
746 | ||
747 | enable_ints_and_unlock(s); | |
1c79356b | 748 | |
0a7de745 | 749 | return result; |
1c79356b A |
750 | } |
751 | ||
316670eb | 752 | /* |
5ba3f43e A |
753 | * Allocate a thread call with a given priority. Importances other than |
754 | * THREAD_CALL_PRIORITY_HIGH or THREAD_CALL_PRIORITY_KERNEL_HIGH will be run in threads | |
755 | * with eager preemption enabled (i.e. may be aggressively preempted by higher-priority | |
756 | * threads which are not in the normal "urgent" bands). | |
316670eb A |
757 | */ |
758 | thread_call_t | |
759 | thread_call_allocate_with_priority( | |
0a7de745 A |
760 | thread_call_func_t func, |
761 | thread_call_param_t param0, | |
762 | thread_call_priority_t pri) | |
316670eb | 763 | { |
5ba3f43e A |
764 | return thread_call_allocate_with_options(func, param0, pri, 0); |
765 | } | |
316670eb | 766 | |
5ba3f43e A |
767 | thread_call_t |
768 | thread_call_allocate_with_options( | |
0a7de745 A |
769 | thread_call_func_t func, |
770 | thread_call_param_t param0, | |
771 | thread_call_priority_t pri, | |
772 | thread_call_options_t options) | |
5ba3f43e A |
773 | { |
774 | thread_call_t call = thread_call_allocate(func, param0); | |
775 | ||
776 | switch (pri) { | |
0a7de745 A |
777 | case THREAD_CALL_PRIORITY_HIGH: |
778 | call->tc_index = THREAD_CALL_INDEX_HIGH; | |
779 | break; | |
780 | case THREAD_CALL_PRIORITY_KERNEL: | |
781 | call->tc_index = THREAD_CALL_INDEX_KERNEL; | |
782 | break; | |
783 | case THREAD_CALL_PRIORITY_USER: | |
784 | call->tc_index = THREAD_CALL_INDEX_USER; | |
785 | break; | |
786 | case THREAD_CALL_PRIORITY_LOW: | |
787 | call->tc_index = THREAD_CALL_INDEX_LOW; | |
788 | break; | |
789 | case THREAD_CALL_PRIORITY_KERNEL_HIGH: | |
790 | call->tc_index = THREAD_CALL_INDEX_KERNEL_HIGH; | |
791 | break; | |
792 | default: | |
793 | panic("Invalid thread call pri value: %d", pri); | |
794 | break; | |
316670eb A |
795 | } |
796 | ||
5ba3f43e | 797 | if (options & THREAD_CALL_OPTIONS_ONCE) { |
0a7de745 | 798 | call->tc_flags |= THREAD_CALL_ONCE; |
5ba3f43e A |
799 | } |
800 | if (options & THREAD_CALL_OPTIONS_SIGNAL) { | |
0a7de745 | 801 | call->tc_flags |= THREAD_CALL_SIGNAL | THREAD_CALL_ONCE; |
5ba3f43e | 802 | } |
316670eb A |
803 | |
804 | return call; | |
805 | } | |
806 | ||
5ba3f43e A |
807 | thread_call_t |
808 | thread_call_allocate_with_qos(thread_call_func_t func, | |
0a7de745 A |
809 | thread_call_param_t param0, |
810 | int qos_tier, | |
811 | thread_call_options_t options) | |
5ba3f43e A |
812 | { |
813 | thread_call_t call = thread_call_allocate(func, param0); | |
814 | ||
815 | switch (qos_tier) { | |
0a7de745 A |
816 | case THREAD_QOS_UNSPECIFIED: |
817 | call->tc_index = THREAD_CALL_INDEX_HIGH; | |
818 | break; | |
819 | case THREAD_QOS_LEGACY: | |
820 | call->tc_index = THREAD_CALL_INDEX_USER; | |
821 | break; | |
822 | case THREAD_QOS_MAINTENANCE: | |
823 | case THREAD_QOS_BACKGROUND: | |
824 | call->tc_index = THREAD_CALL_INDEX_LOW; | |
825 | break; | |
826 | case THREAD_QOS_UTILITY: | |
827 | call->tc_index = THREAD_CALL_INDEX_QOS_UT; | |
828 | break; | |
829 | case THREAD_QOS_USER_INITIATED: | |
830 | call->tc_index = THREAD_CALL_INDEX_QOS_IN; | |
831 | break; | |
832 | case THREAD_QOS_USER_INTERACTIVE: | |
833 | call->tc_index = THREAD_CALL_INDEX_QOS_UI; | |
834 | break; | |
835 | default: | |
836 | panic("Invalid thread call qos value: %d", qos_tier); | |
837 | break; | |
5ba3f43e A |
838 | } |
839 | ||
0a7de745 | 840 | if (options & THREAD_CALL_OPTIONS_ONCE) { |
5ba3f43e | 841 | call->tc_flags |= THREAD_CALL_ONCE; |
0a7de745 | 842 | } |
5ba3f43e A |
843 | |
844 | /* does not support THREAD_CALL_OPTIONS_SIGNAL */ | |
845 | ||
846 | return call; | |
847 | } | |
848 | ||
849 | ||
1c79356b | 850 | /* |
c910b4d9 | 851 | * thread_call_allocate: |
1c79356b | 852 | * |
c910b4d9 | 853 | * Allocate a callout entry. |
1c79356b | 854 | */ |
1c79356b A |
855 | thread_call_t |
856 | thread_call_allocate( | |
0a7de745 A |
857 | thread_call_func_t func, |
858 | thread_call_param_t param0) | |
1c79356b | 859 | { |
0a7de745 | 860 | thread_call_t call = zalloc(thread_call_zone); |
c910b4d9 | 861 | |
316670eb A |
862 | thread_call_setup(call, func, param0); |
863 | call->tc_refs = 1; | |
864 | call->tc_flags = THREAD_CALL_ALLOC; | |
c910b4d9 | 865 | |
0a7de745 | 866 | return call; |
1c79356b A |
867 | } |
868 | ||
869 | /* | |
c910b4d9 | 870 | * thread_call_free: |
1c79356b | 871 | * |
316670eb A |
872 | * Release a callout. If the callout is currently |
873 | * executing, it will be freed when all invocations | |
874 | * finish. | |
5ba3f43e A |
875 | * |
876 | * If the callout is currently armed to fire again, then | |
877 | * freeing is not allowed and returns FALSE. The | |
878 | * client must have canceled the pending invocation before freeing. | |
1c79356b | 879 | */ |
1c79356b A |
880 | boolean_t |
881 | thread_call_free( | |
0a7de745 | 882 | thread_call_t call) |
1c79356b | 883 | { |
5ba3f43e | 884 | spl_t s = disable_ints_and_lock(); |
1c79356b | 885 | |
5ba3f43e | 886 | if (call->tc_call.queue != NULL || |
0a7de745 | 887 | ((call->tc_flags & THREAD_CALL_RESCHEDULE) != 0)) { |
316670eb A |
888 | thread_call_unlock(); |
889 | splx(s); | |
890 | ||
0a7de745 | 891 | return FALSE; |
316670eb A |
892 | } |
893 | ||
5ba3f43e | 894 | int32_t refs = --call->tc_refs; |
316670eb A |
895 | if (refs < 0) { |
896 | panic("Refcount negative: %d\n", refs); | |
5ba3f43e | 897 | } |
316670eb | 898 | |
5ba3f43e | 899 | if ((THREAD_CALL_SIGNAL | THREAD_CALL_RUNNING) |
0a7de745 | 900 | == ((THREAD_CALL_SIGNAL | THREAD_CALL_RUNNING) & call->tc_flags)) { |
5ba3f43e A |
901 | thread_call_wait_once_locked(call, s); |
902 | /* thread call lock has been unlocked */ | |
903 | } else { | |
904 | enable_ints_and_unlock(s); | |
905 | } | |
316670eb A |
906 | |
907 | if (refs == 0) { | |
5ba3f43e | 908 | assert(call->tc_finish_count == call->tc_submit_count); |
316670eb A |
909 | zfree(thread_call_zone, call); |
910 | } | |
1c79356b | 911 | |
0a7de745 | 912 | return TRUE; |
1c79356b A |
913 | } |
914 | ||
915 | /* | |
c910b4d9 | 916 | * thread_call_enter: |
1c79356b | 917 | * |
c910b4d9 | 918 | * Enqueue a callout entry to occur "soon". |
1c79356b | 919 | * |
c910b4d9 A |
920 | * Returns TRUE if the call was |
921 | * already on a queue. | |
1c79356b | 922 | */ |
1c79356b A |
923 | boolean_t |
924 | thread_call_enter( | |
0a7de745 | 925 | thread_call_t call) |
1c79356b | 926 | { |
39037602 | 927 | return thread_call_enter1(call, 0); |
1c79356b A |
928 | } |
929 | ||
930 | boolean_t | |
931 | thread_call_enter1( | |
0a7de745 A |
932 | thread_call_t call, |
933 | thread_call_param_t param1) | |
1c79356b | 934 | { |
0a7de745 A |
935 | boolean_t result = TRUE; |
936 | thread_call_group_t group; | |
316670eb | 937 | |
39037602 A |
938 | assert(call->tc_call.func != NULL); |
939 | ||
5ba3f43e A |
940 | assert((call->tc_flags & THREAD_CALL_SIGNAL) == 0); |
941 | ||
316670eb A |
942 | group = thread_call_get_group(call); |
943 | ||
5ba3f43e | 944 | spl_t s = disable_ints_and_lock(); |
316670eb A |
945 | |
946 | if (call->tc_call.queue != &group->pending_queue) { | |
947 | result = _pending_call_enqueue(call, group); | |
c910b4d9 | 948 | } |
1c79356b | 949 | |
316670eb | 950 | call->tc_call.param1 = param1; |
1c79356b | 951 | |
5ba3f43e | 952 | enable_ints_and_unlock(s); |
1c79356b | 953 | |
0a7de745 | 954 | return result; |
1c79356b A |
955 | } |
956 | ||
957 | /* | |
c910b4d9 | 958 | * thread_call_enter_delayed: |
1c79356b | 959 | * |
c910b4d9 A |
960 | * Enqueue a callout entry to occur |
961 | * at the stated time. | |
1c79356b | 962 | * |
c910b4d9 A |
963 | * Returns TRUE if the call was |
964 | * already on a queue. | |
1c79356b | 965 | */ |
1c79356b A |
966 | boolean_t |
967 | thread_call_enter_delayed( | |
0a7de745 A |
968 | thread_call_t call, |
969 | uint64_t deadline) | |
1c79356b | 970 | { |
39037602 | 971 | assert(call != NULL); |
39236c6e | 972 | return thread_call_enter_delayed_internal(call, NULL, 0, 0, deadline, 0, 0); |
1c79356b A |
973 | } |
974 | ||
975 | boolean_t | |
976 | thread_call_enter1_delayed( | |
0a7de745 A |
977 | thread_call_t call, |
978 | thread_call_param_t param1, | |
979 | uint64_t deadline) | |
39236c6e | 980 | { |
39037602 | 981 | assert(call != NULL); |
39236c6e A |
982 | return thread_call_enter_delayed_internal(call, NULL, 0, param1, deadline, 0, 0); |
983 | } | |
984 | ||
985 | boolean_t | |
986 | thread_call_enter_delayed_with_leeway( | |
0a7de745 A |
987 | thread_call_t call, |
988 | thread_call_param_t param1, | |
989 | uint64_t deadline, | |
990 | uint64_t leeway, | |
991 | unsigned int flags) | |
39236c6e | 992 | { |
39037602 | 993 | assert(call != NULL); |
39236c6e A |
994 | return thread_call_enter_delayed_internal(call, NULL, 0, param1, deadline, leeway, flags); |
995 | } | |
996 | ||
997 | ||
998 | /* | |
999 | * thread_call_enter_delayed_internal: | |
1000 | * enqueue a callout entry to occur at the stated time | |
1001 | * | |
1002 | * Returns True if the call was already on a queue | |
1003 | * params: | |
1004 | * call - structure encapsulating state of the callout | |
1005 | * alt_func/alt_param0 - if call is NULL, allocate temporary storage using these parameters | |
1006 | * deadline - time deadline in nanoseconds | |
1007 | * leeway - timer slack represented as delta of deadline. | |
1008 | * flags - THREAD_CALL_DELAY_XXX : classification of caller's desires wrt timer coalescing. | |
1009 | * THREAD_CALL_DELAY_LEEWAY : value in leeway is used for timer coalescing. | |
39037602 | 1010 | * THREAD_CALL_CONTINUOUS: thread call will be called according to mach_continuous_time rather |
0a7de745 | 1011 | * than mach_absolute_time |
39236c6e A |
1012 | */ |
1013 | boolean_t | |
1014 | thread_call_enter_delayed_internal( | |
0a7de745 A |
1015 | thread_call_t call, |
1016 | thread_call_func_t alt_func, | |
1017 | thread_call_param_t alt_param0, | |
1018 | thread_call_param_t param1, | |
1019 | uint64_t deadline, | |
1020 | uint64_t leeway, | |
1021 | unsigned int flags) | |
1c79356b | 1022 | { |
0a7de745 A |
1023 | boolean_t result = TRUE; |
1024 | thread_call_group_t group; | |
1025 | uint64_t now, sdeadline, slop; | |
1026 | uint32_t urgency; | |
5ba3f43e A |
1027 | |
1028 | thread_call_flavor_t flavor = (flags & THREAD_CALL_CONTINUOUS) ? TCF_CONTINUOUS : TCF_ABSOLUTE; | |
316670eb | 1029 | |
39236c6e A |
1030 | /* direct mapping between thread_call, timer_call, and timeout_urgency values */ |
1031 | urgency = (flags & TIMEOUT_URGENCY_MASK); | |
1c79356b | 1032 | |
5ba3f43e | 1033 | spl_t s = disable_ints_and_lock(); |
39236c6e A |
1034 | |
1035 | if (call == NULL) { | |
1036 | /* allocate a structure out of internal storage, as a convenience for BSD callers */ | |
1037 | call = _internal_call_allocate(alt_func, alt_param0); | |
1038 | } | |
1039 | ||
5ba3f43e A |
1040 | assert(call->tc_call.func != NULL); |
1041 | group = thread_call_get_group(call); | |
1042 | ||
1043 | /* TODO: assert that call is not enqueued before flipping the flag */ | |
1044 | if (flavor == TCF_CONTINUOUS) { | |
1045 | now = mach_continuous_time(); | |
39037602 | 1046 | call->tc_flags |= THREAD_CALL_CONTINUOUS; |
5ba3f43e A |
1047 | } else { |
1048 | now = mach_absolute_time(); | |
1049 | call->tc_flags &= ~THREAD_CALL_CONTINUOUS; | |
39037602 A |
1050 | } |
1051 | ||
39236c6e A |
1052 | call->tc_flags |= THREAD_CALL_DELAYED; |
1053 | ||
1054 | call->tc_soft_deadline = sdeadline = deadline; | |
1055 | ||
1056 | boolean_t ratelimited = FALSE; | |
5ba3f43e A |
1057 | slop = timer_call_slop(deadline, now, urgency, current_thread(), &ratelimited); |
1058 | ||
0a7de745 | 1059 | if ((flags & THREAD_CALL_DELAY_LEEWAY) != 0 && leeway > slop) { |
39236c6e | 1060 | slop = leeway; |
0a7de745 | 1061 | } |
39236c6e | 1062 | |
0a7de745 | 1063 | if (UINT64_MAX - deadline <= slop) { |
39236c6e | 1064 | deadline = UINT64_MAX; |
0a7de745 | 1065 | } else { |
39236c6e | 1066 | deadline += slop; |
0a7de745 | 1067 | } |
39236c6e | 1068 | |
39236c6e | 1069 | if (ratelimited) { |
fe8ab488 | 1070 | call->tc_flags |= TIMER_CALL_RATELIMITED; |
39236c6e | 1071 | } else { |
fe8ab488 | 1072 | call->tc_flags &= ~TIMER_CALL_RATELIMITED; |
39236c6e A |
1073 | } |
1074 | ||
1075 | call->tc_call.param1 = param1; | |
39037602 | 1076 | |
5ba3f43e | 1077 | call->tc_ttd = (sdeadline > now) ? (sdeadline - now) : 0; |
1c79356b | 1078 | |
5ba3f43e | 1079 | result = _delayed_call_enqueue(call, group, deadline, flavor); |
1c79356b | 1080 | |
5ba3f43e | 1081 | _arm_delayed_call_timer(call, group, flavor); |
1c79356b | 1082 | |
4b17d6b6 | 1083 | #if CONFIG_DTRACE |
5ba3f43e | 1084 | DTRACE_TMR5(thread_callout__create, thread_call_func_t, call->tc_call.func, |
0a7de745 A |
1085 | uint64_t, (deadline - sdeadline), uint64_t, (call->tc_ttd >> 32), |
1086 | (unsigned) (call->tc_ttd & 0xFFFFFFFF), call); | |
4b17d6b6 | 1087 | #endif |
39037602 | 1088 | |
5ba3f43e | 1089 | enable_ints_and_unlock(s); |
1c79356b | 1090 | |
0a7de745 | 1091 | return result; |
1c79356b A |
1092 | } |
1093 | ||
1094 | /* | |
5ba3f43e A |
1095 | * Remove a callout entry from the queue |
1096 | * Called with thread_call_lock held | |
1c79356b | 1097 | */ |
5ba3f43e A |
1098 | static boolean_t |
1099 | thread_call_cancel_locked(thread_call_t call) | |
1c79356b | 1100 | { |
5ba3f43e A |
1101 | boolean_t canceled = (0 != (THREAD_CALL_RESCHEDULE & call->tc_flags)); |
1102 | call->tc_flags &= ~THREAD_CALL_RESCHEDULE; | |
316670eb | 1103 | |
5ba3f43e A |
1104 | if (canceled) { |
1105 | /* if reschedule was set, it must not have been queued */ | |
1106 | assert(call->tc_call.queue == NULL); | |
1107 | } else { | |
1108 | boolean_t do_cancel_callout = FALSE; | |
316670eb | 1109 | |
5ba3f43e A |
1110 | thread_call_flavor_t flavor = thread_call_get_flavor(call); |
1111 | thread_call_group_t group = thread_call_get_group(call); | |
c910b4d9 | 1112 | |
5ba3f43e A |
1113 | if ((call->tc_call.deadline != 0) && |
1114 | (call == qe_queue_first(&group->delayed_queues[flavor], struct thread_call, tc_call.q_link))) { | |
1115 | assert(call->tc_call.queue == &group->delayed_queues[flavor]); | |
1116 | do_cancel_callout = TRUE; | |
1117 | } | |
39236c6e | 1118 | |
5ba3f43e | 1119 | canceled = _call_dequeue(call, group); |
316670eb | 1120 | |
5ba3f43e | 1121 | if (do_cancel_callout) { |
0a7de745 | 1122 | if (_arm_delayed_call_timer(NULL, group, flavor) == false) { |
5ba3f43e | 1123 | timer_call_cancel(&group->delayed_timers[flavor]); |
0a7de745 | 1124 | } |
39236c6e A |
1125 | } |
1126 | } | |
1127 | ||
4b17d6b6 | 1128 | #if CONFIG_DTRACE |
5ba3f43e | 1129 | DTRACE_TMR4(thread_callout__cancel, thread_call_func_t, call->tc_call.func, |
0a7de745 | 1130 | 0, (call->tc_ttd >> 32), (unsigned) (call->tc_ttd & 0xFFFFFFFF)); |
4b17d6b6 | 1131 | #endif |
1c79356b | 1132 | |
5ba3f43e A |
1133 | return canceled; |
1134 | } | |
1135 | ||
1136 | /* | |
1137 | * thread_call_cancel: | |
1138 | * | |
1139 | * Dequeue a callout entry. | |
1140 | * | |
1141 | * Returns TRUE if the call was | |
1142 | * on a queue. | |
1143 | */ | |
1144 | boolean_t | |
1145 | thread_call_cancel(thread_call_t call) | |
1146 | { | |
1147 | spl_t s = disable_ints_and_lock(); | |
1148 | ||
1149 | boolean_t result = thread_call_cancel_locked(call); | |
1150 | ||
1151 | enable_ints_and_unlock(s); | |
1152 | ||
1153 | return result; | |
1c79356b A |
1154 | } |
1155 | ||
316670eb A |
1156 | /* |
1157 | * Cancel a thread call. If it cannot be cancelled (i.e. | |
1158 | * is already in flight), waits for the most recent invocation | |
1159 | * to finish. Note that if clients re-submit this thread call, | |
1160 | * it may still be pending or in flight when thread_call_cancel_wait | |
1161 | * returns, but all requests to execute this work item prior | |
1162 | * to the call to thread_call_cancel_wait will have finished. | |
1163 | */ | |
1164 | boolean_t | |
5ba3f43e | 1165 | thread_call_cancel_wait(thread_call_t call) |
316670eb | 1166 | { |
0a7de745 | 1167 | if ((call->tc_flags & THREAD_CALL_ALLOC) == 0) { |
5ba3f43e | 1168 | panic("thread_call_cancel_wait: can't wait on thread call whose storage I don't own"); |
0a7de745 | 1169 | } |
316670eb | 1170 | |
0a7de745 | 1171 | if (!ml_get_interrupts_enabled()) { |
5ba3f43e | 1172 | panic("unsafe thread_call_cancel_wait"); |
0a7de745 | 1173 | } |
316670eb | 1174 | |
0a7de745 | 1175 | if (current_thread()->thc_state.thc_call == call) { |
5ba3f43e | 1176 | panic("thread_call_cancel_wait: deadlock waiting on self from inside call: %p to function %p", |
0a7de745 A |
1177 | call, call->tc_call.func); |
1178 | } | |
316670eb | 1179 | |
5ba3f43e | 1180 | spl_t s = disable_ints_and_lock(); |
316670eb | 1181 | |
5ba3f43e | 1182 | boolean_t canceled = thread_call_cancel_locked(call); |
316670eb | 1183 | |
5ba3f43e A |
1184 | if ((call->tc_flags & THREAD_CALL_ONCE) == THREAD_CALL_ONCE) { |
1185 | /* | |
1186 | * A cancel-wait on a 'once' call will both cancel | |
1187 | * the pending call and wait for the in-flight call | |
1188 | */ | |
316670eb | 1189 | |
5ba3f43e A |
1190 | thread_call_wait_once_locked(call, s); |
1191 | /* thread call lock unlocked */ | |
1192 | } else { | |
1193 | /* | |
1194 | * A cancel-wait on a normal call will only wait for the in-flight calls | |
1195 | * if it did not cancel the pending call. | |
1196 | * | |
1197 | * TODO: This seems less than useful - shouldn't it do the wait as well? | |
1198 | */ | |
1199 | ||
1200 | if (canceled == FALSE) { | |
1201 | thread_call_wait_locked(call, s); | |
1202 | /* thread call lock unlocked */ | |
1203 | } else { | |
1204 | enable_ints_and_unlock(s); | |
1205 | } | |
1206 | } | |
1207 | ||
1208 | return canceled; | |
316670eb A |
1209 | } |
1210 | ||
1211 | ||
1c79356b | 1212 | /* |
c910b4d9 | 1213 | * thread_call_wake: |
1c79356b | 1214 | * |
c910b4d9 A |
1215 | * Wake a call thread to service |
1216 | * pending call entries. May wake | |
1217 | * the daemon thread in order to | |
1218 | * create additional call threads. | |
1c79356b | 1219 | * |
c910b4d9 | 1220 | * Called with thread_call_lock held. |
316670eb A |
1221 | * |
1222 | * For high-priority group, only does wakeup/creation if there are no threads | |
1223 | * running. | |
1c79356b | 1224 | */ |
c910b4d9 A |
1225 | static __inline__ void |
1226 | thread_call_wake( | |
0a7de745 | 1227 | thread_call_group_t group) |
1c79356b | 1228 | { |
0a7de745 | 1229 | /* |
316670eb A |
1230 | * New behavior: use threads if you've got 'em. |
1231 | * Traditional behavior: wake only if no threads running. | |
1232 | */ | |
1233 | if (group_isparallel(group) || group->active_count == 0) { | |
3e170ce0 | 1234 | if (waitq_wakeup64_one(&group->idle_waitq, NO_EVENT64, |
0a7de745 | 1235 | THREAD_AWAKENED, WAITQ_ALL_PRIORITIES) == KERN_SUCCESS) { |
316670eb A |
1236 | group->idle_count--; group->active_count++; |
1237 | ||
cc8bc92a A |
1238 | if (group->idle_count == 0 && (group->flags & TCG_DEALLOC_ACTIVE) == TCG_DEALLOC_ACTIVE) { |
1239 | if (timer_call_cancel(&group->dealloc_timer) == TRUE) { | |
1240 | group->flags &= ~TCG_DEALLOC_ACTIVE; | |
1241 | } | |
316670eb A |
1242 | } |
1243 | } else { | |
1244 | if (!thread_call_daemon_awake && thread_call_group_should_add_thread(group)) { | |
1245 | thread_call_daemon_awake = TRUE; | |
3e170ce0 | 1246 | waitq_wakeup64_one(&daemon_waitq, NO_EVENT64, |
0a7de745 | 1247 | THREAD_AWAKENED, WAITQ_ALL_PRIORITIES); |
316670eb A |
1248 | } |
1249 | } | |
1c79356b A |
1250 | } |
1251 | } | |
1252 | ||
9bccf70c | 1253 | /* |
2d21ac55 | 1254 | * sched_call_thread: |
9bccf70c | 1255 | * |
5ba3f43e | 1256 | * Call out invoked by the scheduler. |
9bccf70c | 1257 | */ |
2d21ac55 A |
1258 | static void |
1259 | sched_call_thread( | |
0a7de745 A |
1260 | int type, |
1261 | thread_t thread) | |
9bccf70c | 1262 | { |
0a7de745 | 1263 | thread_call_group_t group; |
316670eb | 1264 | |
5ba3f43e A |
1265 | group = thread->thc_state.thc_group; |
1266 | assert((group - &thread_call_groups[0]) < THREAD_CALL_INDEX_MAX); | |
c910b4d9 | 1267 | |
6d2010ae | 1268 | thread_call_lock_spin(); |
9bccf70c | 1269 | |
2d21ac55 | 1270 | switch (type) { |
0a7de745 A |
1271 | case SCHED_CALL_BLOCK: |
1272 | assert(group->active_count); | |
1273 | --group->active_count; | |
1274 | group->blocked_count++; | |
1275 | if (group->pending_count > 0) { | |
1276 | thread_call_wake(group); | |
1277 | } | |
1278 | break; | |
9bccf70c | 1279 | |
0a7de745 A |
1280 | case SCHED_CALL_UNBLOCK: |
1281 | assert(group->blocked_count); | |
1282 | --group->blocked_count; | |
1283 | group->active_count++; | |
1284 | break; | |
2d21ac55 | 1285 | } |
9bccf70c | 1286 | |
6d2010ae | 1287 | thread_call_unlock(); |
9bccf70c | 1288 | } |
1c79356b | 1289 | |
0a7de745 A |
1290 | /* |
1291 | * Interrupts disabled, lock held; returns the same way. | |
316670eb A |
1292 | * Only called on thread calls whose storage we own. Wakes up |
1293 | * anyone who might be waiting on this work item and frees it | |
1294 | * if the client has so requested. | |
1295 | */ | |
5ba3f43e A |
1296 | static boolean_t |
1297 | thread_call_finish(thread_call_t call, thread_call_group_t group, spl_t *s) | |
316670eb | 1298 | { |
5ba3f43e A |
1299 | uint64_t time; |
1300 | uint32_t flags; | |
1301 | boolean_t signal; | |
5ba3f43e | 1302 | boolean_t repend = FALSE; |
316670eb A |
1303 | |
1304 | call->tc_finish_count++; | |
5ba3f43e A |
1305 | flags = call->tc_flags; |
1306 | signal = ((THREAD_CALL_SIGNAL & flags) != 0); | |
1307 | ||
0a7de745 | 1308 | if (!signal) { |
5ba3f43e | 1309 | /* The thread call thread owns a ref until the call is finished */ |
0a7de745 | 1310 | if (call->tc_refs <= 0) { |
5ba3f43e | 1311 | panic("thread_call_finish: detected over-released thread call: %p", call); |
0a7de745 | 1312 | } |
5ba3f43e | 1313 | call->tc_refs--; |
0a7de745 | 1314 | } |
5ba3f43e A |
1315 | |
1316 | call->tc_flags &= ~(THREAD_CALL_RESCHEDULE | THREAD_CALL_RUNNING | THREAD_CALL_WAIT); | |
1317 | ||
1318 | if ((call->tc_refs != 0) && ((flags & THREAD_CALL_RESCHEDULE) != 0)) { | |
1319 | assert(flags & THREAD_CALL_ONCE); | |
1320 | thread_call_flavor_t flavor = thread_call_get_flavor(call); | |
1321 | ||
1322 | if (THREAD_CALL_DELAYED & flags) { | |
1323 | time = mach_absolute_time(); | |
1324 | if (flavor == TCF_CONTINUOUS) { | |
1325 | time = absolutetime_to_continuoustime(time); | |
1326 | } | |
1327 | if (call->tc_soft_deadline <= time) { | |
1328 | call->tc_flags &= ~(THREAD_CALL_DELAYED | TIMER_CALL_RATELIMITED); | |
1329 | call->tc_deadline = 0; | |
1330 | } | |
1331 | } | |
1332 | if (call->tc_deadline) { | |
1333 | _delayed_call_enqueue(call, group, call->tc_deadline, flavor); | |
1334 | if (!signal) { | |
1335 | _arm_delayed_call_timer(call, group, flavor); | |
1336 | } | |
1337 | } else if (signal) { | |
1338 | call->tc_submit_count++; | |
1339 | repend = TRUE; | |
1340 | } else { | |
1341 | _pending_call_enqueue(call, group); | |
1342 | } | |
1343 | } | |
316670eb | 1344 | |
5ba3f43e | 1345 | if (!signal && (call->tc_refs == 0)) { |
d9a64523 | 1346 | if ((flags & THREAD_CALL_WAIT) != 0) { |
316670eb A |
1347 | panic("Someone waiting on a thread call that is scheduled for free: %p\n", call->tc_call.func); |
1348 | } | |
1349 | ||
5ba3f43e A |
1350 | assert(call->tc_finish_count == call->tc_submit_count); |
1351 | ||
fe8ab488 | 1352 | enable_ints_and_unlock(*s); |
316670eb A |
1353 | |
1354 | zfree(thread_call_zone, call); | |
1355 | ||
fe8ab488 | 1356 | *s = disable_ints_and_lock(); |
316670eb A |
1357 | } |
1358 | ||
d9a64523 A |
1359 | if ((flags & THREAD_CALL_WAIT) != 0) { |
1360 | /* | |
1361 | * Dropping lock here because the sched call for the | |
1362 | * high-pri group can take the big lock from under | |
1363 | * a thread lock. | |
1364 | */ | |
1365 | thread_call_unlock(); | |
1366 | thread_wakeup((event_t)call); | |
1367 | thread_call_lock_spin(); | |
1368 | /* THREAD_CALL_SIGNAL call may have been freed */ | |
1369 | } | |
1370 | ||
0a7de745 | 1371 | return repend; |
5ba3f43e A |
1372 | } |
1373 | ||
1374 | /* | |
1375 | * thread_call_invoke | |
1376 | * | |
1377 | * Invoke the function provided for this thread call | |
1378 | * | |
1379 | * Note that the thread call object can be deallocated by the function if we do not control its storage. | |
1380 | */ | |
1381 | static void __attribute__((noinline)) | |
1382 | thread_call_invoke(thread_call_func_t func, thread_call_param_t param0, thread_call_param_t param1, thread_call_t call) | |
1383 | { | |
1384 | current_thread()->thc_state.thc_call = call; | |
1385 | ||
1386 | #if DEVELOPMENT || DEBUG | |
1387 | KERNEL_DEBUG_CONSTANT( | |
0a7de745 A |
1388 | MACHDBG_CODE(DBG_MACH_SCHED, MACH_CALLOUT) | DBG_FUNC_START, |
1389 | VM_KERNEL_UNSLIDE(func), VM_KERNEL_ADDRHIDE(param0), VM_KERNEL_ADDRHIDE(param1), 0, 0); | |
5ba3f43e A |
1390 | #endif /* DEVELOPMENT || DEBUG */ |
1391 | ||
1392 | #if CONFIG_DTRACE | |
1393 | uint64_t tc_ttd = call->tc_ttd; | |
1394 | boolean_t is_delayed = call->tc_flags & THREAD_CALL_DELAYED; | |
1395 | DTRACE_TMR6(thread_callout__start, thread_call_func_t, func, int, 0, int, (tc_ttd >> 32), | |
0a7de745 | 1396 | (unsigned) (tc_ttd & 0xFFFFFFFF), is_delayed, call); |
5ba3f43e A |
1397 | #endif |
1398 | ||
1399 | (*func)(param0, param1); | |
1400 | ||
1401 | #if CONFIG_DTRACE | |
1402 | DTRACE_TMR6(thread_callout__end, thread_call_func_t, func, int, 0, int, (tc_ttd >> 32), | |
0a7de745 | 1403 | (unsigned) (tc_ttd & 0xFFFFFFFF), is_delayed, call); |
5ba3f43e A |
1404 | #endif |
1405 | ||
1406 | #if DEVELOPMENT || DEBUG | |
1407 | KERNEL_DEBUG_CONSTANT( | |
0a7de745 A |
1408 | MACHDBG_CODE(DBG_MACH_SCHED, MACH_CALLOUT) | DBG_FUNC_END, |
1409 | VM_KERNEL_UNSLIDE(func), 0, 0, 0, 0); | |
5ba3f43e A |
1410 | #endif /* DEVELOPMENT || DEBUG */ |
1411 | ||
1412 | current_thread()->thc_state.thc_call = NULL; | |
316670eb A |
1413 | } |
1414 | ||
1c79356b | 1415 | /* |
c910b4d9 | 1416 | * thread_call_thread: |
1c79356b | 1417 | */ |
c910b4d9 A |
1418 | static void |
1419 | thread_call_thread( | |
0a7de745 A |
1420 | thread_call_group_t group, |
1421 | wait_result_t wres) | |
1c79356b | 1422 | { |
0a7de745 A |
1423 | thread_t self = current_thread(); |
1424 | boolean_t canwait; | |
1c79356b | 1425 | |
0a7de745 | 1426 | if ((thread_get_tag_internal(self) & THREAD_TAG_CALLOUT) == 0) { |
4b17d6b6 | 1427 | (void)thread_set_tag_internal(self, THREAD_TAG_CALLOUT); |
0a7de745 | 1428 | } |
4b17d6b6 | 1429 | |
316670eb | 1430 | /* |
0a7de745 | 1431 | * A wakeup with THREAD_INTERRUPTED indicates that |
316670eb A |
1432 | * we should terminate. |
1433 | */ | |
1434 | if (wres == THREAD_INTERRUPTED) { | |
1435 | thread_terminate(self); | |
1436 | ||
1437 | /* NOTREACHED */ | |
1438 | panic("thread_terminate() returned?"); | |
1439 | } | |
1440 | ||
5ba3f43e | 1441 | spl_t s = disable_ints_and_lock(); |
1c79356b | 1442 | |
5ba3f43e A |
1443 | self->thc_state.thc_group = group; |
1444 | thread_sched_call(self, sched_call_thread); | |
9bccf70c | 1445 | |
316670eb | 1446 | while (group->pending_count > 0) { |
0a7de745 A |
1447 | thread_call_t call; |
1448 | thread_call_func_t func; | |
1449 | thread_call_param_t param0, param1; | |
1c79356b | 1450 | |
5ba3f43e | 1451 | call = qe_dequeue_head(&group->pending_queue, struct thread_call, tc_call.q_link); |
39037602 | 1452 | assert(call != NULL); |
c910b4d9 | 1453 | group->pending_count--; |
1c79356b | 1454 | |
316670eb A |
1455 | func = call->tc_call.func; |
1456 | param0 = call->tc_call.param0; | |
1457 | param1 = call->tc_call.param1; | |
1458 | ||
1459 | call->tc_call.queue = NULL; | |
1c79356b A |
1460 | |
1461 | _internal_call_release(call); | |
1462 | ||
316670eb A |
1463 | /* |
1464 | * Can only do wakeups for thread calls whose storage | |
1465 | * we control. | |
1466 | */ | |
1467 | if ((call->tc_flags & THREAD_CALL_ALLOC) != 0) { | |
1468 | canwait = TRUE; | |
5ba3f43e | 1469 | call->tc_flags |= THREAD_CALL_RUNNING; |
0a7de745 A |
1470 | call->tc_refs++; /* Delay free until we're done */ |
1471 | } else { | |
316670eb | 1472 | canwait = FALSE; |
0a7de745 | 1473 | } |
316670eb | 1474 | |
fe8ab488 | 1475 | enable_ints_and_unlock(s); |
1c79356b | 1476 | |
5ba3f43e | 1477 | thread_call_invoke(func, param0, param1, call); |
39236c6e | 1478 | |
6d2010ae A |
1479 | if (get_preemption_level() != 0) { |
1480 | int pl = get_preemption_level(); | |
1481 | panic("thread_call_thread: preemption_level %d, last callout %p(%p, %p)", | |
0a7de745 | 1482 | pl, (void *)VM_KERNEL_UNSLIDE(func), param0, param1); |
6d2010ae | 1483 | } |
316670eb | 1484 | |
fe8ab488 | 1485 | s = disable_ints_and_lock(); |
5ba3f43e | 1486 | |
316670eb A |
1487 | if (canwait) { |
1488 | /* Frees if so desired */ | |
5ba3f43e | 1489 | thread_call_finish(call, group, &s); |
316670eb A |
1490 | } |
1491 | } | |
9bccf70c | 1492 | |
2d21ac55 | 1493 | thread_sched_call(self, NULL); |
c910b4d9 | 1494 | group->active_count--; |
0a7de745 | 1495 | |
39236c6e A |
1496 | if (self->callout_woken_from_icontext && !self->callout_woke_thread) { |
1497 | ledger_credit(self->t_ledger, task_ledgers.interrupt_wakeups, 1); | |
0a7de745 A |
1498 | if (self->callout_woken_from_platform_idle) { |
1499 | ledger_credit(self->t_ledger, task_ledgers.platform_idle_wakeups, 1); | |
1500 | } | |
39236c6e | 1501 | } |
0a7de745 | 1502 | |
39236c6e A |
1503 | self->callout_woken_from_icontext = FALSE; |
1504 | self->callout_woken_from_platform_idle = FALSE; | |
1505 | self->callout_woke_thread = FALSE; | |
9bccf70c | 1506 | |
316670eb A |
1507 | if (group_isparallel(group)) { |
1508 | /* | |
0a7de745 | 1509 | * For new style of thread group, thread always blocks. |
316670eb | 1510 | * If we have more than the target number of threads, |
0a7de745 A |
1511 | * and this is the first to block, and it isn't active |
1512 | * already, set a timer for deallocating a thread if we | |
316670eb A |
1513 | * continue to have a surplus. |
1514 | */ | |
c910b4d9 | 1515 | group->idle_count++; |
1c79356b | 1516 | |
316670eb A |
1517 | if (group->idle_count == 1) { |
1518 | group->idle_timestamp = mach_absolute_time(); | |
cc8bc92a | 1519 | } |
316670eb A |
1520 | |
1521 | if (((group->flags & TCG_DEALLOC_ACTIVE) == 0) && | |
cc8bc92a | 1522 | ((group->active_count + group->idle_count) > group->target_thread_count)) { |
316670eb | 1523 | thread_call_start_deallocate_timer(group); |
cc8bc92a | 1524 | } |
316670eb A |
1525 | |
1526 | /* Wait for more work (or termination) */ | |
3e170ce0 | 1527 | wres = waitq_assert_wait64(&group->idle_waitq, NO_EVENT64, THREAD_INTERRUPTIBLE, 0); |
316670eb A |
1528 | if (wres != THREAD_WAITING) { |
1529 | panic("kcall worker unable to assert wait?"); | |
cc8bc92a | 1530 | } |
316670eb | 1531 | |
fe8ab488 | 1532 | enable_ints_and_unlock(s); |
1c79356b | 1533 | |
c910b4d9 | 1534 | thread_block_parameter((thread_continue_t)thread_call_thread, group); |
316670eb A |
1535 | } else { |
1536 | if (group->idle_count < group->target_thread_count) { | |
1537 | group->idle_count++; | |
c910b4d9 | 1538 | |
3e170ce0 | 1539 | waitq_assert_wait64(&group->idle_waitq, NO_EVENT64, THREAD_UNINT, 0); /* Interrupted means to exit */ |
316670eb | 1540 | |
fe8ab488 | 1541 | enable_ints_and_unlock(s); |
316670eb A |
1542 | |
1543 | thread_block_parameter((thread_continue_t)thread_call_thread, group); | |
1544 | /* NOTREACHED */ | |
1545 | } | |
1546 | } | |
1547 | ||
fe8ab488 | 1548 | enable_ints_and_unlock(s); |
316670eb A |
1549 | |
1550 | thread_terminate(self); | |
1c79356b A |
1551 | /* NOTREACHED */ |
1552 | } | |
1553 | ||
1c79356b | 1554 | /* |
316670eb | 1555 | * thread_call_daemon: walk list of groups, allocating |
0a7de745 A |
1556 | * threads if appropriate (as determined by |
1557 | * thread_call_group_should_add_thread()). | |
1c79356b | 1558 | */ |
c910b4d9 | 1559 | static void |
316670eb | 1560 | thread_call_daemon_continue(__unused void *arg) |
1c79356b | 1561 | { |
5ba3f43e | 1562 | spl_t s = disable_ints_and_lock(); |
316670eb A |
1563 | |
1564 | /* Starting at zero happens to be high-priority first. */ | |
5ba3f43e A |
1565 | for (int i = 0; i < THREAD_CALL_INDEX_MAX; i++) { |
1566 | thread_call_group_t group = &thread_call_groups[i]; | |
316670eb A |
1567 | while (thread_call_group_should_add_thread(group)) { |
1568 | group->active_count++; | |
1569 | ||
fe8ab488 | 1570 | enable_ints_and_unlock(s); |
316670eb | 1571 | |
5ba3f43e | 1572 | kern_return_t kr = thread_call_thread_create(group); |
316670eb A |
1573 | if (kr != KERN_SUCCESS) { |
1574 | /* | |
0a7de745 | 1575 | * On failure, just pause for a moment and give up. |
316670eb A |
1576 | * We can try again later. |
1577 | */ | |
1578 | delay(10000); /* 10 ms */ | |
fe8ab488 | 1579 | s = disable_ints_and_lock(); |
316670eb A |
1580 | goto out; |
1581 | } | |
1582 | ||
fe8ab488 | 1583 | s = disable_ints_and_lock(); |
316670eb A |
1584 | } |
1585 | } | |
91447636 | 1586 | |
316670eb A |
1587 | out: |
1588 | thread_call_daemon_awake = FALSE; | |
3e170ce0 | 1589 | waitq_assert_wait64(&daemon_waitq, NO_EVENT64, THREAD_UNINT, 0); |
55e303ae | 1590 | |
fe8ab488 | 1591 | enable_ints_and_unlock(s); |
c910b4d9 | 1592 | |
316670eb | 1593 | thread_block_parameter((thread_continue_t)thread_call_daemon_continue, NULL); |
1c79356b A |
1594 | /* NOTREACHED */ |
1595 | } | |
1596 | ||
c910b4d9 A |
1597 | static void |
1598 | thread_call_daemon( | |
0a7de745 | 1599 | __unused void *arg) |
1c79356b | 1600 | { |
0a7de745 | 1601 | thread_t self = current_thread(); |
1c79356b | 1602 | |
91447636 | 1603 | self->options |= TH_OPT_VMPRIV; |
0a7de745 | 1604 | vm_page_free_reserve(2); /* XXX */ |
316670eb | 1605 | |
5ba3f43e A |
1606 | thread_set_thread_name(self, "thread_call_daemon"); |
1607 | ||
316670eb A |
1608 | thread_call_daemon_continue(NULL); |
1609 | /* NOTREACHED */ | |
1610 | } | |
1611 | ||
1612 | /* | |
0a7de745 | 1613 | * Schedule timer to deallocate a worker thread if we have a surplus |
316670eb A |
1614 | * of threads (in excess of the group's target) and at least one thread |
1615 | * is idle the whole time. | |
1616 | */ | |
1617 | static void | |
cc8bc92a | 1618 | thread_call_start_deallocate_timer(thread_call_group_t group) |
316670eb | 1619 | { |
cc8bc92a | 1620 | __assert_only boolean_t already_enqueued; |
316670eb A |
1621 | |
1622 | assert(group->idle_count > 0); | |
cc8bc92a | 1623 | assert((group->flags & TCG_DEALLOC_ACTIVE) == 0); |
316670eb | 1624 | |
cc8bc92a | 1625 | group->flags |= TCG_DEALLOC_ACTIVE; |
316670eb | 1626 | |
cc8bc92a A |
1627 | uint64_t deadline = group->idle_timestamp + thread_call_dealloc_interval_abs; |
1628 | ||
1629 | already_enqueued = timer_call_enter(&group->dealloc_timer, deadline, 0); | |
1630 | ||
1631 | assert(already_enqueued == FALSE); | |
1c79356b A |
1632 | } |
1633 | ||
5ba3f43e | 1634 | /* non-static so dtrace can find it rdar://problem/31156135&31379348 */ |
6d2010ae | 1635 | void |
5ba3f43e | 1636 | thread_call_delayed_timer(timer_call_param_t p0, timer_call_param_t p1) |
1c79356b | 1637 | { |
5ba3f43e A |
1638 | thread_call_group_t group = (thread_call_group_t) p0; |
1639 | thread_call_flavor_t flavor = (thread_call_flavor_t) p1; | |
1c79356b | 1640 | |
5ba3f43e A |
1641 | thread_call_t call; |
1642 | uint64_t now; | |
1643 | boolean_t restart; | |
1644 | boolean_t repend; | |
316670eb | 1645 | |
5ba3f43e | 1646 | thread_call_lock_spin(); |
316670eb | 1647 | |
0a7de745 | 1648 | if (flavor == TCF_CONTINUOUS) { |
5ba3f43e | 1649 | now = mach_continuous_time(); |
0a7de745 | 1650 | } else if (flavor == TCF_ABSOLUTE) { |
5ba3f43e | 1651 | now = mach_absolute_time(); |
0a7de745 | 1652 | } else { |
5ba3f43e | 1653 | panic("invalid timer flavor: %d", flavor); |
0a7de745 | 1654 | } |
5ba3f43e | 1655 | |
0a7de745 | 1656 | do { |
5ba3f43e A |
1657 | restart = FALSE; |
1658 | qe_foreach_element_safe(call, &group->delayed_queues[flavor], tc_call.q_link) { | |
0a7de745 | 1659 | if (flavor == TCF_CONTINUOUS) { |
5ba3f43e | 1660 | assert((call->tc_flags & THREAD_CALL_CONTINUOUS) == THREAD_CALL_CONTINUOUS); |
0a7de745 | 1661 | } else { |
5ba3f43e | 1662 | assert((call->tc_flags & THREAD_CALL_CONTINUOUS) == 0); |
0a7de745 | 1663 | } |
5ba3f43e A |
1664 | |
1665 | /* | |
1666 | * if we hit a call that isn't yet ready to expire, | |
1667 | * then we're done for now | |
1668 | * TODO: The next timer in the list could have a larger leeway | |
1669 | * and therefore be ready to expire. | |
1670 | * Sort by deadline then by soft deadline to avoid this | |
1671 | */ | |
0a7de745 | 1672 | if (call->tc_soft_deadline > now) { |
5ba3f43e | 1673 | break; |
0a7de745 | 1674 | } |
39037602 | 1675 | |
5ba3f43e A |
1676 | /* |
1677 | * If we hit a rate-limited timer, don't eagerly wake it up. | |
1678 | * Wait until it reaches the end of the leeway window. | |
1679 | * | |
1680 | * TODO: What if the next timer is not rate-limited? | |
1681 | * Have a separate rate-limited queue to avoid this | |
1682 | */ | |
fe8ab488 | 1683 | if ((call->tc_flags & THREAD_CALL_RATELIMITED) && |
5ba3f43e | 1684 | (call->tc_call.deadline > now) && |
39236c6e A |
1685 | (ml_timer_forced_evaluation() == FALSE)) { |
1686 | break; | |
1687 | } | |
316670eb | 1688 | |
5ba3f43e A |
1689 | if (THREAD_CALL_SIGNAL & call->tc_flags) { |
1690 | __assert_only queue_head_t *old_queue; | |
1691 | old_queue = call_entry_dequeue(&call->tc_call); | |
1692 | assert(old_queue == &group->delayed_queues[flavor]); | |
1c79356b | 1693 | |
0a7de745 | 1694 | do { |
5ba3f43e A |
1695 | thread_call_func_t func = call->tc_call.func; |
1696 | thread_call_param_t param0 = call->tc_call.param0; | |
1697 | thread_call_param_t param1 = call->tc_call.param1; | |
1698 | ||
1699 | call->tc_flags |= THREAD_CALL_RUNNING; | |
1700 | thread_call_unlock(); | |
1701 | thread_call_invoke(func, param0, param1, call); | |
1702 | thread_call_lock_spin(); | |
1703 | ||
1704 | repend = thread_call_finish(call, group, NULL); | |
0a7de745 | 1705 | } while (repend); |
5ba3f43e A |
1706 | |
1707 | /* call may have been freed */ | |
1708 | restart = TRUE; | |
1709 | break; | |
1710 | } else { | |
1711 | _pending_call_enqueue(call, group); | |
1712 | } | |
1713 | } | |
1714 | } while (restart); | |
1715 | ||
1716 | _arm_delayed_call_timer(call, group, flavor); | |
1c79356b | 1717 | |
316670eb A |
1718 | thread_call_unlock(); |
1719 | } | |
1720 | ||
39236c6e | 1721 | static void |
5ba3f43e | 1722 | thread_call_delayed_timer_rescan(thread_call_group_t group, |
0a7de745 | 1723 | thread_call_flavor_t flavor) |
39236c6e | 1724 | { |
5ba3f43e A |
1725 | thread_call_t call; |
1726 | uint64_t now; | |
39236c6e | 1727 | |
5ba3f43e | 1728 | spl_t s = disable_ints_and_lock(); |
39236c6e A |
1729 | |
1730 | assert(ml_timer_forced_evaluation() == TRUE); | |
39037602 | 1731 | |
5ba3f43e A |
1732 | if (flavor == TCF_CONTINUOUS) { |
1733 | now = mach_continuous_time(); | |
39037602 | 1734 | } else { |
5ba3f43e | 1735 | now = mach_absolute_time(); |
39037602 | 1736 | } |
39236c6e | 1737 | |
5ba3f43e A |
1738 | qe_foreach_element_safe(call, &group->delayed_queues[flavor], tc_call.q_link) { |
1739 | if (call->tc_soft_deadline <= now) { | |
39236c6e | 1740 | _pending_call_enqueue(call, group); |
5ba3f43e | 1741 | } else { |
39236c6e | 1742 | uint64_t skew = call->tc_call.deadline - call->tc_soft_deadline; |
0a7de745 | 1743 | assert(call->tc_call.deadline >= call->tc_soft_deadline); |
5ba3f43e A |
1744 | /* |
1745 | * On a latency quality-of-service level change, | |
39236c6e A |
1746 | * re-sort potentially rate-limited callout. The platform |
1747 | * layer determines which timers require this. | |
1748 | */ | |
1749 | if (timer_resort_threshold(skew)) { | |
1750 | _call_dequeue(call, group); | |
5ba3f43e | 1751 | _delayed_call_enqueue(call, group, call->tc_soft_deadline, flavor); |
39236c6e | 1752 | } |
39236c6e A |
1753 | } |
1754 | } | |
1755 | ||
5ba3f43e A |
1756 | _arm_delayed_call_timer(NULL, group, flavor); |
1757 | ||
1758 | enable_ints_and_unlock(s); | |
39236c6e A |
1759 | } |
1760 | ||
1761 | void | |
0a7de745 A |
1762 | thread_call_delayed_timer_rescan_all(void) |
1763 | { | |
5ba3f43e A |
1764 | for (int i = 0; i < THREAD_CALL_INDEX_MAX; i++) { |
1765 | thread_call_delayed_timer_rescan(&thread_call_groups[i], TCF_ABSOLUTE); | |
1766 | thread_call_delayed_timer_rescan(&thread_call_groups[i], TCF_CONTINUOUS); | |
39037602 | 1767 | } |
39236c6e A |
1768 | } |
1769 | ||
316670eb A |
1770 | /* |
1771 | * Timer callback to tell a thread to terminate if | |
1772 | * we have an excess of threads and at least one has been | |
1773 | * idle for a long time. | |
1774 | */ | |
1775 | static void | |
1776 | thread_call_dealloc_timer( | |
0a7de745 A |
1777 | timer_call_param_t p0, |
1778 | __unused timer_call_param_t p1) | |
316670eb A |
1779 | { |
1780 | thread_call_group_t group = (thread_call_group_t)p0; | |
1781 | uint64_t now; | |
1782 | kern_return_t res; | |
1783 | boolean_t terminated = FALSE; | |
cc8bc92a | 1784 | |
316670eb A |
1785 | thread_call_lock_spin(); |
1786 | ||
cc8bc92a A |
1787 | assert((group->flags & TCG_DEALLOC_ACTIVE) == TCG_DEALLOC_ACTIVE); |
1788 | ||
316670eb | 1789 | now = mach_absolute_time(); |
cc8bc92a | 1790 | |
316670eb A |
1791 | if (group->idle_count > 0) { |
1792 | if (now > group->idle_timestamp + thread_call_dealloc_interval_abs) { | |
1793 | terminated = TRUE; | |
1794 | group->idle_count--; | |
3e170ce0 | 1795 | res = waitq_wakeup64_one(&group->idle_waitq, NO_EVENT64, |
0a7de745 | 1796 | THREAD_INTERRUPTED, WAITQ_ALL_PRIORITIES); |
316670eb A |
1797 | if (res != KERN_SUCCESS) { |
1798 | panic("Unable to wake up idle thread for termination?"); | |
1799 | } | |
1800 | } | |
316670eb A |
1801 | } |
1802 | ||
cc8bc92a A |
1803 | group->flags &= ~TCG_DEALLOC_ACTIVE; |
1804 | ||
316670eb A |
1805 | /* |
1806 | * If we still have an excess of threads, schedule another | |
1807 | * invocation of this function. | |
1808 | */ | |
1809 | if (group->idle_count > 0 && (group->idle_count + group->active_count > group->target_thread_count)) { | |
1810 | /* | |
1811 | * If we killed someone just now, push out the | |
1812 | * next deadline. | |
1813 | */ | |
1814 | if (terminated) { | |
1815 | group->idle_timestamp = now; | |
1816 | } | |
1c79356b | 1817 | |
316670eb | 1818 | thread_call_start_deallocate_timer(group); |
316670eb A |
1819 | } |
1820 | ||
1821 | thread_call_unlock(); | |
1c79356b | 1822 | } |
316670eb | 1823 | |
5ba3f43e A |
1824 | /* |
1825 | * Wait for the invocation of the thread call to complete | |
1826 | * We know there's only one in flight because of the 'once' flag. | |
1827 | * | |
1828 | * If a subsequent invocation comes in before we wake up, that's OK | |
1829 | * | |
1830 | * TODO: Here is where we will add priority inheritance to the thread executing | |
1831 | * the thread call in case it's lower priority than the current thread | |
1832 | * <rdar://problem/30321792> Priority inheritance for thread_call_wait_once | |
1833 | * | |
1834 | * Takes the thread call lock locked, returns unlocked | |
1835 | * This lets us avoid a spurious take/drop after waking up from thread_block | |
1836 | */ | |
1837 | static boolean_t | |
1838 | thread_call_wait_once_locked(thread_call_t call, spl_t s) | |
1839 | { | |
1840 | assert(call->tc_flags & THREAD_CALL_ALLOC); | |
1841 | assert(call->tc_flags & THREAD_CALL_ONCE); | |
1842 | ||
1843 | if ((call->tc_flags & THREAD_CALL_RUNNING) == 0) { | |
1844 | enable_ints_and_unlock(s); | |
1845 | return FALSE; | |
1846 | } | |
1847 | ||
1848 | /* call is running, so we have to wait for it */ | |
1849 | call->tc_flags |= THREAD_CALL_WAIT; | |
1850 | ||
1851 | wait_result_t res = assert_wait(call, THREAD_UNINT); | |
0a7de745 | 1852 | if (res != THREAD_WAITING) { |
5ba3f43e | 1853 | panic("Unable to assert wait: %d", res); |
0a7de745 | 1854 | } |
5ba3f43e A |
1855 | |
1856 | enable_ints_and_unlock(s); | |
1857 | ||
1858 | res = thread_block(THREAD_CONTINUE_NULL); | |
0a7de745 | 1859 | if (res != THREAD_AWAKENED) { |
5ba3f43e | 1860 | panic("Awoken with %d?", res); |
0a7de745 | 1861 | } |
5ba3f43e A |
1862 | |
1863 | /* returns unlocked */ | |
1864 | return TRUE; | |
1865 | } | |
1866 | ||
1867 | /* | |
1868 | * Wait for an in-flight invocation to complete | |
1869 | * Does NOT try to cancel, so the client doesn't need to hold their | |
1870 | * lock while calling this function. | |
1871 | * | |
1872 | * Returns whether or not it had to wait. | |
1873 | * | |
1874 | * Only works for THREAD_CALL_ONCE calls. | |
1875 | */ | |
1876 | boolean_t | |
1877 | thread_call_wait_once(thread_call_t call) | |
1878 | { | |
0a7de745 | 1879 | if ((call->tc_flags & THREAD_CALL_ALLOC) == 0) { |
5ba3f43e | 1880 | panic("thread_call_wait_once: can't wait on thread call whose storage I don't own"); |
0a7de745 | 1881 | } |
5ba3f43e | 1882 | |
0a7de745 | 1883 | if ((call->tc_flags & THREAD_CALL_ONCE) == 0) { |
5ba3f43e | 1884 | panic("thread_call_wait_once: can't wait_once on a non-once call"); |
0a7de745 | 1885 | } |
5ba3f43e | 1886 | |
0a7de745 | 1887 | if (!ml_get_interrupts_enabled()) { |
5ba3f43e | 1888 | panic("unsafe thread_call_wait_once"); |
0a7de745 | 1889 | } |
5ba3f43e | 1890 | |
0a7de745 | 1891 | if (current_thread()->thc_state.thc_call == call) { |
5ba3f43e | 1892 | panic("thread_call_wait_once: deadlock waiting on self from inside call: %p to function %p", |
0a7de745 A |
1893 | call, call->tc_call.func); |
1894 | } | |
5ba3f43e A |
1895 | |
1896 | spl_t s = disable_ints_and_lock(); | |
1897 | ||
1898 | boolean_t waited = thread_call_wait_once_locked(call, s); | |
1899 | /* thread call lock unlocked */ | |
1900 | ||
1901 | return waited; | |
1902 | } | |
1903 | ||
1904 | ||
316670eb A |
1905 | /* |
1906 | * Wait for all requested invocations of a thread call prior to now | |
5ba3f43e | 1907 | * to finish. Can only be invoked on thread calls whose storage we manage. |
316670eb A |
1908 | * Just waits for the finish count to catch up to the submit count we find |
1909 | * at the beginning of our wait. | |
5ba3f43e A |
1910 | * |
1911 | * Called with thread_call_lock held. Returns with lock released. | |
316670eb A |
1912 | */ |
1913 | static void | |
5ba3f43e | 1914 | thread_call_wait_locked(thread_call_t call, spl_t s) |
316670eb A |
1915 | { |
1916 | uint64_t submit_count; | |
1917 | wait_result_t res; | |
1918 | ||
1919 | assert(call->tc_flags & THREAD_CALL_ALLOC); | |
1920 | ||
1921 | submit_count = call->tc_submit_count; | |
1922 | ||
1923 | while (call->tc_finish_count < submit_count) { | |
1924 | call->tc_flags |= THREAD_CALL_WAIT; | |
1925 | ||
1926 | res = assert_wait(call, THREAD_UNINT); | |
0a7de745 | 1927 | if (res != THREAD_WAITING) { |
5ba3f43e | 1928 | panic("Unable to assert wait: %d", res); |
0a7de745 | 1929 | } |
316670eb | 1930 | |
5ba3f43e | 1931 | enable_ints_and_unlock(s); |
316670eb | 1932 | |
5ba3f43e | 1933 | res = thread_block(THREAD_CONTINUE_NULL); |
0a7de745 | 1934 | if (res != THREAD_AWAKENED) { |
316670eb | 1935 | panic("Awoken with %d?", res); |
0a7de745 | 1936 | } |
5ba3f43e A |
1937 | |
1938 | s = disable_ints_and_lock(); | |
316670eb | 1939 | } |
5ba3f43e A |
1940 | |
1941 | enable_ints_and_unlock(s); | |
316670eb A |
1942 | } |
1943 | ||
1944 | /* | |
1945 | * Determine whether a thread call is either on a queue or | |
1946 | * currently being executed. | |
1947 | */ | |
1948 | boolean_t | |
0a7de745 | 1949 | thread_call_isactive(thread_call_t call) |
316670eb A |
1950 | { |
1951 | boolean_t active; | |
1952 | ||
5ba3f43e | 1953 | spl_t s = disable_ints_and_lock(); |
316670eb | 1954 | active = (call->tc_submit_count > call->tc_finish_count); |
fe8ab488 | 1955 | enable_ints_and_unlock(s); |
316670eb A |
1956 | |
1957 | return active; | |
1958 | } | |
39037602 A |
1959 | |
1960 | /* | |
1961 | * adjust_cont_time_thread_calls | |
1962 | * on wake, reenqueue delayed call timer for continuous time thread call groups | |
1963 | */ | |
1964 | void | |
1965 | adjust_cont_time_thread_calls(void) | |
1966 | { | |
5ba3f43e | 1967 | spl_t s = disable_ints_and_lock(); |
39037602 | 1968 | |
5ba3f43e A |
1969 | for (int i = 0; i < THREAD_CALL_INDEX_MAX; i++) { |
1970 | thread_call_group_t group = &thread_call_groups[i]; | |
39037602 | 1971 | |
5ba3f43e A |
1972 | /* only the continuous timers need to be re-armed */ |
1973 | ||
1974 | _arm_delayed_call_timer(NULL, group, TCF_CONTINUOUS); | |
1975 | } | |
39037602 A |
1976 | |
1977 | enable_ints_and_unlock(s); | |
1978 | } |