]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
c910b4d9 | 2 | * Copyright (c) 1993-1995, 1999-2008 Apple Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b | 27 | */ |
1c79356b A |
28 | |
29 | #include <mach/mach_types.h> | |
91447636 | 30 | #include <mach/thread_act.h> |
1c79356b | 31 | |
91447636 | 32 | #include <kern/kern_types.h> |
c910b4d9 | 33 | #include <kern/zalloc.h> |
1c79356b A |
34 | #include <kern/sched_prim.h> |
35 | #include <kern/clock.h> | |
36 | #include <kern/task.h> | |
37 | #include <kern/thread.h> | |
3e170ce0 | 38 | #include <kern/waitq.h> |
39236c6e | 39 | #include <kern/ledger.h> |
5ba3f43e | 40 | #include <kern/policy_internal.h> |
91447636 A |
41 | |
42 | #include <vm/vm_pageout.h> | |
1c79356b A |
43 | |
44 | #include <kern/thread_call.h> | |
45 | #include <kern/call_entry.h> | |
1c79356b A |
46 | #include <kern/timer_call.h> |
47 | ||
316670eb | 48 | #include <libkern/OSAtomic.h> |
39236c6e | 49 | #include <kern/timer_queue.h> |
316670eb | 50 | |
55e303ae | 51 | #include <sys/kdebug.h> |
4b17d6b6 A |
52 | #if CONFIG_DTRACE |
53 | #include <mach/sdt.h> | |
54 | #endif | |
39236c6e | 55 | #include <machine/machine_routines.h> |
1c79356b | 56 | |
316670eb | 57 | static zone_t thread_call_zone; |
3e170ce0 | 58 | static struct waitq daemon_waitq; |
1c79356b | 59 | |
5ba3f43e A |
60 | typedef enum { |
61 | TCF_ABSOLUTE = 0, | |
62 | TCF_CONTINUOUS = 1, | |
63 | TCF_COUNT = 2, | |
64 | } thread_call_flavor_t; | |
65 | ||
66 | typedef enum { | |
67 | TCG_NONE = 0x0, | |
68 | TCG_PARALLEL = 0x1, | |
69 | TCG_DEALLOC_ACTIVE = 0x2, | |
70 | } thread_call_group_flags_t; | |
71 | ||
72 | static struct thread_call_group { | |
73 | const char * tcg_name; | |
74 | ||
c910b4d9 | 75 | queue_head_t pending_queue; |
6d2010ae | 76 | uint32_t pending_count; |
1c79356b | 77 | |
5ba3f43e A |
78 | queue_head_t delayed_queues[TCF_COUNT]; |
79 | timer_call_data_t delayed_timers[TCF_COUNT]; | |
1c79356b | 80 | |
316670eb | 81 | timer_call_data_t dealloc_timer; |
1c79356b | 82 | |
3e170ce0 | 83 | struct waitq idle_waitq; |
5ba3f43e | 84 | uint32_t idle_count, active_count, blocked_count; |
1c79356b | 85 | |
5ba3f43e | 86 | uint32_t tcg_thread_pri; |
316670eb A |
87 | uint32_t target_thread_count; |
88 | uint64_t idle_timestamp; | |
c910b4d9 | 89 | |
5ba3f43e A |
90 | thread_call_group_flags_t flags; |
91 | ||
92 | } thread_call_groups[THREAD_CALL_INDEX_MAX] = { | |
93 | [THREAD_CALL_INDEX_HIGH] = { | |
94 | .tcg_name = "high", | |
95 | .tcg_thread_pri = BASEPRI_PREEMPT_HIGH, | |
96 | .target_thread_count = 4, | |
97 | .flags = TCG_NONE, | |
98 | }, | |
99 | [THREAD_CALL_INDEX_KERNEL] = { | |
100 | .tcg_name = "kernel", | |
101 | .tcg_thread_pri = BASEPRI_KERNEL, | |
102 | .target_thread_count = 1, | |
103 | .flags = TCG_PARALLEL, | |
104 | }, | |
105 | [THREAD_CALL_INDEX_USER] = { | |
106 | .tcg_name = "user", | |
107 | .tcg_thread_pri = BASEPRI_DEFAULT, | |
108 | .target_thread_count = 1, | |
109 | .flags = TCG_PARALLEL, | |
110 | }, | |
111 | [THREAD_CALL_INDEX_LOW] = { | |
112 | .tcg_name = "low", | |
113 | .tcg_thread_pri = MAXPRI_THROTTLE, | |
114 | .target_thread_count = 1, | |
115 | .flags = TCG_PARALLEL, | |
116 | }, | |
117 | [THREAD_CALL_INDEX_KERNEL_HIGH] = { | |
118 | .tcg_name = "kernel-high", | |
119 | .tcg_thread_pri = BASEPRI_PREEMPT, | |
120 | .target_thread_count = 2, | |
121 | .flags = TCG_NONE, | |
122 | }, | |
123 | [THREAD_CALL_INDEX_QOS_UI] = { | |
124 | .tcg_name = "qos-ui", | |
125 | .tcg_thread_pri = BASEPRI_FOREGROUND, | |
126 | .target_thread_count = 1, | |
127 | .flags = TCG_NONE, | |
128 | }, | |
129 | [THREAD_CALL_INDEX_QOS_IN] = { | |
130 | .tcg_name = "qos-in", | |
131 | .tcg_thread_pri = BASEPRI_USER_INITIATED, | |
132 | .target_thread_count = 1, | |
133 | .flags = TCG_NONE, | |
134 | }, | |
135 | [THREAD_CALL_INDEX_QOS_UT] = { | |
136 | .tcg_name = "qos-ut", | |
137 | .tcg_thread_pri = BASEPRI_UTILITY, | |
138 | .target_thread_count = 1, | |
139 | .flags = TCG_NONE, | |
140 | }, | |
316670eb | 141 | }; |
c910b4d9 | 142 | |
316670eb | 143 | typedef struct thread_call_group *thread_call_group_t; |
c910b4d9 | 144 | |
316670eb | 145 | #define INTERNAL_CALL_COUNT 768 |
5ba3f43e | 146 | #define THREAD_CALL_DEALLOC_INTERVAL_NS (5 * NSEC_PER_MSEC) /* 5 ms */ |
316670eb A |
147 | #define THREAD_CALL_ADD_RATIO 4 |
148 | #define THREAD_CALL_MACH_FACTOR_CAP 3 | |
5ba3f43e | 149 | #define THREAD_CALL_GROUP_MAX_THREADS 500 |
39037602 | 150 | |
316670eb A |
151 | static boolean_t thread_call_daemon_awake; |
152 | static thread_call_data_t internal_call_storage[INTERNAL_CALL_COUNT]; | |
153 | static queue_head_t thread_call_internal_queue; | |
39236c6e | 154 | int thread_call_internal_queue_count = 0; |
316670eb A |
155 | static uint64_t thread_call_dealloc_interval_abs; |
156 | ||
39236c6e | 157 | static __inline__ thread_call_t _internal_call_allocate(thread_call_func_t func, thread_call_param_t param0); |
316670eb A |
158 | static __inline__ void _internal_call_release(thread_call_t call); |
159 | static __inline__ boolean_t _pending_call_enqueue(thread_call_t call, thread_call_group_t group); | |
5ba3f43e A |
160 | static boolean_t _delayed_call_enqueue(thread_call_t call, thread_call_group_t group, |
161 | uint64_t deadline, thread_call_flavor_t flavor); | |
316670eb A |
162 | static __inline__ boolean_t _call_dequeue(thread_call_t call, thread_call_group_t group); |
163 | static __inline__ void thread_call_wake(thread_call_group_t group); | |
316670eb A |
164 | static void thread_call_daemon(void *arg); |
165 | static void thread_call_thread(thread_call_group_t group, wait_result_t wres); | |
316670eb | 166 | static void thread_call_dealloc_timer(timer_call_param_t p0, timer_call_param_t p1); |
5ba3f43e | 167 | static void thread_call_group_setup(thread_call_group_t group); |
316670eb A |
168 | static void sched_call_thread(int type, thread_t thread); |
169 | static void thread_call_start_deallocate_timer(thread_call_group_t group); | |
5ba3f43e A |
170 | static void thread_call_wait_locked(thread_call_t call, spl_t s); |
171 | static boolean_t thread_call_wait_once_locked(thread_call_t call, spl_t s); | |
172 | ||
39236c6e A |
173 | static boolean_t thread_call_enter_delayed_internal(thread_call_t call, |
174 | thread_call_func_t alt_func, thread_call_param_t alt_param0, | |
175 | thread_call_param_t param1, uint64_t deadline, | |
176 | uint64_t leeway, unsigned int flags); | |
1c79356b | 177 | |
5ba3f43e A |
178 | /* non-static so dtrace can find it rdar://problem/31156135&31379348 */ |
179 | extern void thread_call_delayed_timer(timer_call_param_t p0, timer_call_param_t p1); | |
6d2010ae | 180 | |
6d2010ae | 181 | lck_grp_t thread_call_lck_grp; |
5ba3f43e | 182 | lck_mtx_t thread_call_lock_data; |
316670eb | 183 | |
6d2010ae A |
184 | #define thread_call_lock_spin() \ |
185 | lck_mtx_lock_spin_always(&thread_call_lock_data) | |
186 | ||
187 | #define thread_call_unlock() \ | |
188 | lck_mtx_unlock_always(&thread_call_lock_data) | |
189 | ||
5ba3f43e A |
190 | #define tc_deadline tc_call.deadline |
191 | ||
39236c6e | 192 | extern boolean_t mach_timer_coalescing_enabled; |
6d2010ae | 193 | |
316670eb A |
194 | static inline spl_t |
195 | disable_ints_and_lock(void) | |
196 | { | |
5ba3f43e | 197 | spl_t s = splsched(); |
316670eb A |
198 | thread_call_lock_spin(); |
199 | ||
200 | return s; | |
201 | } | |
202 | ||
5ba3f43e | 203 | static inline void |
fe8ab488 | 204 | enable_ints_and_unlock(spl_t s) |
316670eb A |
205 | { |
206 | thread_call_unlock(); | |
fe8ab488 | 207 | splx(s); |
316670eb A |
208 | } |
209 | ||
316670eb A |
210 | static inline boolean_t |
211 | group_isparallel(thread_call_group_t group) | |
212 | { | |
213 | return ((group->flags & TCG_PARALLEL) != 0); | |
214 | } | |
215 | ||
216 | static boolean_t | |
5ba3f43e | 217 | thread_call_group_should_add_thread(thread_call_group_t group) |
316670eb | 218 | { |
5ba3f43e A |
219 | if ((group->active_count + group->blocked_count + group->idle_count) >= THREAD_CALL_GROUP_MAX_THREADS) { |
220 | panic("thread_call group '%s' reached max thread cap (%d): active: %d, blocked: %d, idle: %d", | |
221 | group->tcg_name, THREAD_CALL_GROUP_MAX_THREADS, | |
222 | group->active_count, group->blocked_count, group->idle_count); | |
223 | } | |
316670eb | 224 | |
5ba3f43e | 225 | if (group_isparallel(group) == FALSE) { |
316670eb A |
226 | if (group->pending_count > 0 && group->active_count == 0) { |
227 | return TRUE; | |
228 | } | |
229 | ||
230 | return FALSE; | |
231 | } | |
232 | ||
233 | if (group->pending_count > 0) { | |
234 | if (group->idle_count > 0) { | |
813fb2f6 | 235 | return FALSE; |
316670eb A |
236 | } |
237 | ||
5ba3f43e | 238 | uint32_t thread_count = group->active_count; |
316670eb A |
239 | |
240 | /* | |
241 | * Add a thread if either there are no threads, | |
242 | * the group has fewer than its target number of | |
243 | * threads, or the amount of work is large relative | |
244 | * to the number of threads. In the last case, pay attention | |
245 | * to the total load on the system, and back off if | |
5ba3f43e | 246 | * it's high. |
316670eb A |
247 | */ |
248 | if ((thread_count == 0) || | |
249 | (thread_count < group->target_thread_count) || | |
250 | ((group->pending_count > THREAD_CALL_ADD_RATIO * thread_count) && | |
251 | (sched_mach_factor < THREAD_CALL_MACH_FACTOR_CAP))) { | |
252 | return TRUE; | |
253 | } | |
254 | } | |
316670eb | 255 | |
5ba3f43e | 256 | return FALSE; |
316670eb A |
257 | } |
258 | ||
259 | /* Lock held */ | |
260 | static inline thread_call_group_t | |
5ba3f43e | 261 | thread_call_get_group(thread_call_t call) |
316670eb | 262 | { |
5ba3f43e | 263 | thread_call_index_t index = call->tc_index; |
316670eb | 264 | |
5ba3f43e | 265 | assert(index >= 0 && index < THREAD_CALL_INDEX_MAX); |
39037602 | 266 | |
5ba3f43e A |
267 | return &thread_call_groups[index]; |
268 | } | |
39037602 | 269 | |
5ba3f43e A |
270 | /* Lock held */ |
271 | static inline thread_call_flavor_t | |
272 | thread_call_get_flavor(thread_call_t call) | |
273 | { | |
274 | return (call->tc_flags & THREAD_CALL_CONTINUOUS) ? TCF_CONTINUOUS : TCF_ABSOLUTE; | |
316670eb A |
275 | } |
276 | ||
277 | static void | |
5ba3f43e | 278 | thread_call_group_setup(thread_call_group_t group) |
316670eb A |
279 | { |
280 | queue_init(&group->pending_queue); | |
5ba3f43e A |
281 | queue_init(&group->delayed_queues[TCF_ABSOLUTE]); |
282 | queue_init(&group->delayed_queues[TCF_CONTINUOUS]); | |
316670eb | 283 | |
5ba3f43e A |
284 | /* TODO: Consolidate to one hard timer for each group */ |
285 | timer_call_setup(&group->delayed_timers[TCF_ABSOLUTE], thread_call_delayed_timer, group); | |
286 | timer_call_setup(&group->delayed_timers[TCF_CONTINUOUS], thread_call_delayed_timer, group); | |
316670eb A |
287 | timer_call_setup(&group->dealloc_timer, thread_call_dealloc_timer, group); |
288 | ||
5ba3f43e A |
289 | /* Reverse the wait order so we re-use the most recently parked thread from the pool */ |
290 | waitq_init(&group->idle_waitq, SYNC_POLICY_REVERSED|SYNC_POLICY_DISABLE_IRQ); | |
316670eb A |
291 | } |
292 | ||
293 | /* | |
294 | * Simple wrapper for creating threads bound to | |
295 | * thread call groups. | |
296 | */ | |
297 | static kern_return_t | |
298 | thread_call_thread_create( | |
299 | thread_call_group_t group) | |
300 | { | |
301 | thread_t thread; | |
302 | kern_return_t result; | |
303 | ||
5ba3f43e A |
304 | int thread_pri = group->tcg_thread_pri; |
305 | ||
306 | result = kernel_thread_start_priority((thread_continue_t)thread_call_thread, | |
307 | group, thread_pri, &thread); | |
316670eb A |
308 | if (result != KERN_SUCCESS) { |
309 | return result; | |
310 | } | |
311 | ||
5ba3f43e | 312 | if (thread_pri <= BASEPRI_KERNEL) { |
316670eb | 313 | /* |
5ba3f43e A |
314 | * THREAD_CALL_PRIORITY_KERNEL and lower don't get to run to completion |
315 | * in kernel if there are higher priority threads available. | |
316670eb A |
316 | */ |
317 | thread_set_eager_preempt(thread); | |
318 | } | |
319 | ||
5ba3f43e A |
320 | char name[MAXTHREADNAMESIZE] = ""; |
321 | ||
322 | int group_thread_count = group->idle_count + group->active_count + group->blocked_count; | |
323 | ||
324 | snprintf(name, sizeof(name), "thread call %s #%d", group->tcg_name, group_thread_count); | |
325 | thread_set_thread_name(thread, name); | |
326 | ||
316670eb A |
327 | thread_deallocate(thread); |
328 | return KERN_SUCCESS; | |
329 | } | |
330 | ||
1c79356b | 331 | /* |
c910b4d9 | 332 | * thread_call_initialize: |
1c79356b | 333 | * |
c910b4d9 A |
334 | * Initialize this module, called |
335 | * early during system initialization. | |
1c79356b | 336 | */ |
1c79356b A |
337 | void |
338 | thread_call_initialize(void) | |
339 | { | |
5ba3f43e A |
340 | int tc_size = sizeof (thread_call_data_t); |
341 | thread_call_zone = zinit(tc_size, 4096 * tc_size, 16 * tc_size, "thread_call"); | |
6d2010ae | 342 | zone_change(thread_call_zone, Z_CALLERACCT, FALSE); |
0b4c1975 | 343 | zone_change(thread_call_zone, Z_NOENCRYPT, TRUE); |
1c79356b | 344 | |
5ba3f43e A |
345 | lck_grp_init(&thread_call_lck_grp, "thread_call", LCK_GRP_ATTR_NULL); |
346 | lck_mtx_init(&thread_call_lock_data, &thread_call_lck_grp, LCK_ATTR_NULL); | |
39037602 | 347 | |
316670eb | 348 | nanotime_to_absolutetime(0, THREAD_CALL_DEALLOC_INTERVAL_NS, &thread_call_dealloc_interval_abs); |
39037602 | 349 | waitq_init(&daemon_waitq, SYNC_POLICY_DISABLE_IRQ | SYNC_POLICY_FIFO); |
c910b4d9 | 350 | |
5ba3f43e A |
351 | for (uint32_t i = 0; i < THREAD_CALL_INDEX_MAX; i++) |
352 | thread_call_group_setup(&thread_call_groups[i]); | |
1c79356b | 353 | |
5ba3f43e | 354 | spl_t s = disable_ints_and_lock(); |
c910b4d9 | 355 | |
6d2010ae A |
356 | queue_init(&thread_call_internal_queue); |
357 | for ( | |
5ba3f43e | 358 | thread_call_t call = internal_call_storage; |
316670eb | 359 | call < &internal_call_storage[INTERNAL_CALL_COUNT]; |
1c79356b A |
360 | call++) { |
361 | ||
5ba3f43e | 362 | enqueue_tail(&thread_call_internal_queue, &call->tc_call.q_link); |
39236c6e | 363 | thread_call_internal_queue_count++; |
6d2010ae | 364 | } |
1c79356b | 365 | |
c910b4d9 | 366 | thread_call_daemon_awake = TRUE; |
1c79356b | 367 | |
fe8ab488 | 368 | enable_ints_and_unlock(s); |
1c79356b | 369 | |
5ba3f43e A |
370 | thread_t thread; |
371 | kern_return_t result; | |
372 | ||
373 | result = kernel_thread_start_priority((thread_continue_t)thread_call_daemon, | |
374 | NULL, BASEPRI_PREEMPT_HIGH + 1, &thread); | |
91447636 A |
375 | if (result != KERN_SUCCESS) |
376 | panic("thread_call_initialize"); | |
377 | ||
378 | thread_deallocate(thread); | |
1c79356b A |
379 | } |
380 | ||
381 | void | |
382 | thread_call_setup( | |
383 | thread_call_t call, | |
384 | thread_call_func_t func, | |
c910b4d9 | 385 | thread_call_param_t param0) |
1c79356b | 386 | { |
316670eb A |
387 | bzero(call, sizeof(*call)); |
388 | call_entry_setup((call_entry_t)call, func, param0); | |
5ba3f43e A |
389 | |
390 | /* Thread calls default to the HIGH group unless otherwise specified */ | |
391 | call->tc_index = THREAD_CALL_INDEX_HIGH; | |
392 | ||
393 | /* THREAD_CALL_ALLOC not set, memory owned by caller */ | |
1c79356b A |
394 | } |
395 | ||
396 | /* | |
c910b4d9 | 397 | * _internal_call_allocate: |
1c79356b | 398 | * |
c910b4d9 | 399 | * Allocate an internal callout entry. |
1c79356b | 400 | * |
c910b4d9 | 401 | * Called with thread_call_lock held. |
1c79356b | 402 | */ |
1c79356b | 403 | static __inline__ thread_call_t |
39236c6e | 404 | _internal_call_allocate(thread_call_func_t func, thread_call_param_t param0) |
1c79356b A |
405 | { |
406 | thread_call_t call; | |
407 | ||
c910b4d9 | 408 | if (queue_empty(&thread_call_internal_queue)) |
1c79356b | 409 | panic("_internal_call_allocate"); |
5ba3f43e A |
410 | |
411 | call = qe_dequeue_head(&thread_call_internal_queue, struct thread_call, tc_call.q_link); | |
412 | ||
39236c6e A |
413 | thread_call_internal_queue_count--; |
414 | ||
415 | thread_call_setup(call, func, param0); | |
416 | call->tc_refs = 0; | |
417 | call->tc_flags = 0; /* THREAD_CALL_ALLOC not set, do not free back to zone */ | |
418 | ||
1c79356b A |
419 | return (call); |
420 | } | |
421 | ||
422 | /* | |
c910b4d9 | 423 | * _internal_call_release: |
1c79356b | 424 | * |
c910b4d9 | 425 | * Release an internal callout entry which |
39236c6e A |
426 | * is no longer pending (or delayed). This is |
427 | * safe to call on a non-internal entry, in which | |
428 | * case nothing happens. | |
1c79356b | 429 | * |
c910b4d9 | 430 | * Called with thread_call_lock held. |
1c79356b | 431 | */ |
c910b4d9 | 432 | static __inline__ void |
5ba3f43e | 433 | _internal_call_release(thread_call_t call) |
1c79356b | 434 | { |
5ba3f43e A |
435 | if (call >= internal_call_storage && |
436 | call < &internal_call_storage[INTERNAL_CALL_COUNT]) { | |
39236c6e | 437 | assert((call->tc_flags & THREAD_CALL_ALLOC) == 0); |
5ba3f43e | 438 | enqueue_head(&thread_call_internal_queue, &call->tc_call.q_link); |
39236c6e A |
439 | thread_call_internal_queue_count++; |
440 | } | |
1c79356b A |
441 | } |
442 | ||
443 | /* | |
c910b4d9 | 444 | * _pending_call_enqueue: |
1c79356b | 445 | * |
c910b4d9 A |
446 | * Place an entry at the end of the |
447 | * pending queue, to be executed soon. | |
1c79356b | 448 | * |
c910b4d9 A |
449 | * Returns TRUE if the entry was already |
450 | * on a queue. | |
1c79356b | 451 | * |
c910b4d9 | 452 | * Called with thread_call_lock held. |
1c79356b | 453 | */ |
c910b4d9 | 454 | static __inline__ boolean_t |
5ba3f43e A |
455 | _pending_call_enqueue(thread_call_t call, |
456 | thread_call_group_t group) | |
1c79356b | 457 | { |
5ba3f43e A |
458 | if ((THREAD_CALL_ONCE | THREAD_CALL_RUNNING) |
459 | == (call->tc_flags & (THREAD_CALL_ONCE | THREAD_CALL_RUNNING))) { | |
460 | call->tc_deadline = 0; | |
461 | ||
462 | uint32_t flags = call->tc_flags; | |
463 | call->tc_flags |= THREAD_CALL_RESCHEDULE; | |
464 | ||
465 | if ((flags & THREAD_CALL_RESCHEDULE) != 0) | |
466 | return (TRUE); | |
467 | else | |
468 | return (FALSE); | |
469 | } | |
1c79356b | 470 | |
5ba3f43e | 471 | queue_head_t *old_queue = call_entry_enqueue_tail(CE(call), &group->pending_queue); |
316670eb A |
472 | |
473 | if (old_queue == NULL) { | |
474 | call->tc_submit_count++; | |
39037602 | 475 | } else if (old_queue != &group->pending_queue && |
5ba3f43e A |
476 | old_queue != &group->delayed_queues[TCF_ABSOLUTE] && |
477 | old_queue != &group->delayed_queues[TCF_CONTINUOUS]) { | |
39037602 | 478 | panic("tried to move a thread call (%p) between groups (old_queue: %p)", call, old_queue); |
316670eb | 479 | } |
1c79356b | 480 | |
c910b4d9 | 481 | group->pending_count++; |
1c79356b | 482 | |
316670eb A |
483 | thread_call_wake(group); |
484 | ||
c910b4d9 | 485 | return (old_queue != NULL); |
1c79356b A |
486 | } |
487 | ||
488 | /* | |
c910b4d9 | 489 | * _delayed_call_enqueue: |
1c79356b | 490 | * |
c910b4d9 A |
491 | * Place an entry on the delayed queue, |
492 | * after existing entries with an earlier | |
493 | * (or identical) deadline. | |
1c79356b | 494 | * |
c910b4d9 A |
495 | * Returns TRUE if the entry was already |
496 | * on a queue. | |
1c79356b | 497 | * |
c910b4d9 | 498 | * Called with thread_call_lock held. |
1c79356b | 499 | */ |
5ba3f43e | 500 | static boolean_t |
1c79356b | 501 | _delayed_call_enqueue( |
5ba3f43e A |
502 | thread_call_t call, |
503 | thread_call_group_t group, | |
504 | uint64_t deadline, | |
505 | thread_call_flavor_t flavor) | |
1c79356b | 506 | { |
5ba3f43e A |
507 | if ((THREAD_CALL_ONCE | THREAD_CALL_RUNNING) |
508 | == (call->tc_flags & (THREAD_CALL_ONCE | THREAD_CALL_RUNNING))) { | |
509 | call->tc_deadline = deadline; | |
1c79356b | 510 | |
5ba3f43e A |
511 | uint32_t flags = call->tc_flags; |
512 | call->tc_flags |= THREAD_CALL_RESCHEDULE; | |
513 | ||
514 | if ((flags & THREAD_CALL_RESCHEDULE) != 0) | |
515 | return (TRUE); | |
516 | else | |
517 | return (FALSE); | |
518 | } | |
519 | ||
520 | queue_head_t *old_queue = call_entry_enqueue_deadline(CE(call), | |
521 | &group->delayed_queues[flavor], | |
522 | deadline); | |
c910b4d9 | 523 | |
39037602 | 524 | if (old_queue == &group->pending_queue) { |
c910b4d9 | 525 | group->pending_count--; |
39037602 | 526 | } else if (old_queue == NULL) { |
316670eb | 527 | call->tc_submit_count++; |
5ba3f43e A |
528 | } else if (old_queue == &group->delayed_queues[TCF_ABSOLUTE] || |
529 | old_queue == &group->delayed_queues[TCF_CONTINUOUS]) { | |
530 | /* TODO: if it's in the other delayed queue, that might not be OK */ | |
39037602 A |
531 | // we did nothing, and that's fine |
532 | } else { | |
533 | panic("tried to move a thread call (%p) between groups (old_queue: %p)", call, old_queue); | |
534 | } | |
c910b4d9 A |
535 | |
536 | return (old_queue != NULL); | |
1c79356b A |
537 | } |
538 | ||
539 | /* | |
c910b4d9 | 540 | * _call_dequeue: |
1c79356b | 541 | * |
c910b4d9 | 542 | * Remove an entry from a queue. |
1c79356b | 543 | * |
c910b4d9 | 544 | * Returns TRUE if the entry was on a queue. |
1c79356b | 545 | * |
c910b4d9 | 546 | * Called with thread_call_lock held. |
1c79356b | 547 | */ |
c910b4d9 A |
548 | static __inline__ boolean_t |
549 | _call_dequeue( | |
550 | thread_call_t call, | |
551 | thread_call_group_t group) | |
1c79356b | 552 | { |
6d2010ae | 553 | queue_head_t *old_queue; |
c910b4d9 | 554 | |
316670eb | 555 | old_queue = call_entry_dequeue(CE(call)); |
c910b4d9 | 556 | |
316670eb | 557 | if (old_queue != NULL) { |
5ba3f43e A |
558 | assert(old_queue == &group->pending_queue || |
559 | old_queue == &group->delayed_queues[TCF_ABSOLUTE] || | |
560 | old_queue == &group->delayed_queues[TCF_CONTINUOUS]); | |
561 | ||
316670eb A |
562 | call->tc_finish_count++; |
563 | if (old_queue == &group->pending_queue) | |
564 | group->pending_count--; | |
565 | } | |
c910b4d9 A |
566 | |
567 | return (old_queue != NULL); | |
1c79356b A |
568 | } |
569 | ||
570 | /* | |
5ba3f43e | 571 | * _arm_delayed_call_timer: |
1c79356b | 572 | * |
5ba3f43e A |
573 | * Check if the timer needs to be armed for this flavor, |
574 | * and if so, arm it. | |
1c79356b | 575 | * |
5ba3f43e A |
576 | * If call is non-NULL, only re-arm the timer if the specified call |
577 | * is the first in the queue. | |
578 | * | |
579 | * Returns true if the timer was armed/re-armed, false if it was left unset | |
580 | * Caller should cancel the timer if need be. | |
581 | * | |
582 | * Called with thread_call_lock held. | |
1c79356b | 583 | */ |
5ba3f43e A |
584 | static bool |
585 | _arm_delayed_call_timer(thread_call_t new_call, | |
586 | thread_call_group_t group, | |
587 | thread_call_flavor_t flavor) | |
1c79356b | 588 | { |
5ba3f43e A |
589 | /* No calls implies no timer needed */ |
590 | if (queue_empty(&group->delayed_queues[flavor])) | |
591 | return false; | |
592 | ||
593 | thread_call_t call = qe_queue_first(&group->delayed_queues[flavor], struct thread_call, tc_call.q_link); | |
594 | ||
595 | /* We only need to change the hard timer if this new call is the first in the list */ | |
596 | if (new_call != NULL && new_call != call) | |
597 | return false; | |
39236c6e A |
598 | |
599 | assert((call->tc_soft_deadline != 0) && ((call->tc_soft_deadline <= call->tc_call.deadline))); | |
39037602 | 600 | |
5ba3f43e | 601 | uint64_t fire_at = call->tc_soft_deadline; |
39037602 | 602 | |
5ba3f43e A |
603 | if (flavor == TCF_CONTINUOUS) { |
604 | assert((call->tc_flags & THREAD_CALL_CONTINUOUS) == THREAD_CALL_CONTINUOUS); | |
39037602 | 605 | fire_at = continuoustime_to_absolutetime(fire_at); |
5ba3f43e A |
606 | } else { |
607 | assert((call->tc_flags & THREAD_CALL_CONTINUOUS) == 0); | |
39037602 | 608 | } |
39236c6e | 609 | |
5ba3f43e A |
610 | /* |
611 | * Note: This picks the soonest-deadline call's leeway as the hard timer's leeway, | |
612 | * which does not take into account later-deadline timers with a larger leeway. | |
613 | * This is a valid coalescing behavior, but masks a possible window to | |
614 | * fire a timer instead of going idle. | |
615 | */ | |
616 | uint64_t leeway = call->tc_call.deadline - call->tc_soft_deadline; | |
617 | ||
618 | timer_call_enter_with_leeway(&group->delayed_timers[flavor], (timer_call_param_t)flavor, | |
39037602 | 619 | fire_at, leeway, |
39236c6e | 620 | TIMER_CALL_SYS_CRITICAL|TIMER_CALL_LEEWAY, |
fe8ab488 | 621 | ((call->tc_flags & THREAD_CALL_RATELIMITED) == THREAD_CALL_RATELIMITED)); |
316670eb | 622 | |
5ba3f43e | 623 | return true; |
1c79356b A |
624 | } |
625 | ||
626 | /* | |
5ba3f43e | 627 | * _cancel_func_from_queue: |
1c79356b | 628 | * |
c910b4d9 | 629 | * Remove the first (or all) matching |
5ba3f43e | 630 | * entries from the specified queue. |
1c79356b | 631 | * |
5ba3f43e | 632 | * Returns TRUE if any matching entries |
c910b4d9 | 633 | * were found. |
1c79356b | 634 | * |
c910b4d9 | 635 | * Called with thread_call_lock held. |
1c79356b | 636 | */ |
c910b4d9 | 637 | static boolean_t |
5ba3f43e A |
638 | _cancel_func_from_queue(thread_call_func_t func, |
639 | thread_call_param_t param0, | |
640 | thread_call_group_t group, | |
641 | boolean_t remove_all, | |
642 | queue_head_t *queue) | |
1c79356b | 643 | { |
5ba3f43e A |
644 | boolean_t call_removed = FALSE; |
645 | thread_call_t call; | |
316670eb | 646 | |
5ba3f43e A |
647 | qe_foreach_element_safe(call, queue, tc_call.q_link) { |
648 | if (call->tc_call.func != func || | |
649 | call->tc_call.param0 != param0) { | |
650 | continue; | |
651 | } | |
316670eb | 652 | |
5ba3f43e | 653 | _call_dequeue(call, group); |
316670eb | 654 | |
5ba3f43e | 655 | _internal_call_release(call); |
316670eb | 656 | |
5ba3f43e A |
657 | call_removed = TRUE; |
658 | if (!remove_all) | |
659 | break; | |
316670eb A |
660 | } |
661 | ||
662 | return (call_removed); | |
1c79356b A |
663 | } |
664 | ||
1c79356b | 665 | /* |
c910b4d9 | 666 | * thread_call_func_delayed: |
1c79356b | 667 | * |
c910b4d9 A |
668 | * Enqueue a function callout to |
669 | * occur at the stated time. | |
1c79356b | 670 | */ |
1c79356b A |
671 | void |
672 | thread_call_func_delayed( | |
316670eb A |
673 | thread_call_func_t func, |
674 | thread_call_param_t param, | |
675 | uint64_t deadline) | |
1c79356b | 676 | { |
39236c6e A |
677 | (void)thread_call_enter_delayed_internal(NULL, func, param, 0, deadline, 0, 0); |
678 | } | |
316670eb | 679 | |
39236c6e A |
680 | /* |
681 | * thread_call_func_delayed_with_leeway: | |
682 | * | |
683 | * Same as thread_call_func_delayed(), but with | |
684 | * leeway/flags threaded through. | |
685 | */ | |
316670eb | 686 | |
39236c6e A |
687 | void |
688 | thread_call_func_delayed_with_leeway( | |
689 | thread_call_func_t func, | |
690 | thread_call_param_t param, | |
691 | uint64_t deadline, | |
692 | uint64_t leeway, | |
693 | uint32_t flags) | |
694 | { | |
695 | (void)thread_call_enter_delayed_internal(NULL, func, param, 0, deadline, leeway, flags); | |
1c79356b A |
696 | } |
697 | ||
698 | /* | |
c910b4d9 | 699 | * thread_call_func_cancel: |
1c79356b | 700 | * |
c910b4d9 | 701 | * Dequeue a function callout. |
1c79356b | 702 | * |
c910b4d9 A |
703 | * Removes one (or all) { function, argument } |
704 | * instance(s) from either (or both) | |
705 | * the pending and the delayed queue, | |
706 | * in that order. | |
1c79356b | 707 | * |
c910b4d9 | 708 | * Returns TRUE if any calls were cancelled. |
5ba3f43e A |
709 | * |
710 | * This iterates all of the pending or delayed thread calls in the group, | |
711 | * which is really inefficient. Switch to an allocated thread call instead. | |
1c79356b | 712 | */ |
1c79356b A |
713 | boolean_t |
714 | thread_call_func_cancel( | |
316670eb A |
715 | thread_call_func_t func, |
716 | thread_call_param_t param, | |
717 | boolean_t cancel_all) | |
1c79356b | 718 | { |
316670eb | 719 | boolean_t result; |
1c79356b | 720 | |
39037602 A |
721 | assert(func != NULL); |
722 | ||
5ba3f43e | 723 | spl_t s = disable_ints_and_lock(); |
316670eb | 724 | |
5ba3f43e A |
725 | /* Function-only thread calls are only kept in the default HIGH group */ |
726 | thread_call_group_t group = &thread_call_groups[THREAD_CALL_INDEX_HIGH]; | |
316670eb | 727 | |
5ba3f43e A |
728 | if (cancel_all) { |
729 | /* exhaustively search every queue, and return true if any search found something */ | |
730 | result = _cancel_func_from_queue(func, param, group, cancel_all, &group->pending_queue) | | |
731 | _cancel_func_from_queue(func, param, group, cancel_all, &group->delayed_queues[TCF_ABSOLUTE]) | | |
732 | _cancel_func_from_queue(func, param, group, cancel_all, &group->delayed_queues[TCF_CONTINUOUS]); | |
733 | } else { | |
734 | /* early-exit as soon as we find something, don't search other queues */ | |
735 | result = _cancel_func_from_queue(func, param, group, cancel_all, &group->pending_queue) || | |
736 | _cancel_func_from_queue(func, param, group, cancel_all, &group->delayed_queues[TCF_ABSOLUTE]) || | |
737 | _cancel_func_from_queue(func, param, group, cancel_all, &group->delayed_queues[TCF_CONTINUOUS]); | |
738 | } | |
739 | ||
740 | enable_ints_and_unlock(s); | |
1c79356b A |
741 | |
742 | return (result); | |
743 | } | |
744 | ||
316670eb | 745 | /* |
5ba3f43e A |
746 | * Allocate a thread call with a given priority. Importances other than |
747 | * THREAD_CALL_PRIORITY_HIGH or THREAD_CALL_PRIORITY_KERNEL_HIGH will be run in threads | |
748 | * with eager preemption enabled (i.e. may be aggressively preempted by higher-priority | |
749 | * threads which are not in the normal "urgent" bands). | |
316670eb A |
750 | */ |
751 | thread_call_t | |
752 | thread_call_allocate_with_priority( | |
753 | thread_call_func_t func, | |
754 | thread_call_param_t param0, | |
755 | thread_call_priority_t pri) | |
756 | { | |
5ba3f43e A |
757 | return thread_call_allocate_with_options(func, param0, pri, 0); |
758 | } | |
316670eb | 759 | |
5ba3f43e A |
760 | thread_call_t |
761 | thread_call_allocate_with_options( | |
762 | thread_call_func_t func, | |
763 | thread_call_param_t param0, | |
764 | thread_call_priority_t pri, | |
765 | thread_call_options_t options) | |
766 | { | |
767 | thread_call_t call = thread_call_allocate(func, param0); | |
768 | ||
769 | switch (pri) { | |
770 | case THREAD_CALL_PRIORITY_HIGH: | |
771 | call->tc_index = THREAD_CALL_INDEX_HIGH; | |
772 | break; | |
773 | case THREAD_CALL_PRIORITY_KERNEL: | |
774 | call->tc_index = THREAD_CALL_INDEX_KERNEL; | |
775 | break; | |
776 | case THREAD_CALL_PRIORITY_USER: | |
777 | call->tc_index = THREAD_CALL_INDEX_USER; | |
778 | break; | |
779 | case THREAD_CALL_PRIORITY_LOW: | |
780 | call->tc_index = THREAD_CALL_INDEX_LOW; | |
781 | break; | |
782 | case THREAD_CALL_PRIORITY_KERNEL_HIGH: | |
783 | call->tc_index = THREAD_CALL_INDEX_KERNEL_HIGH; | |
784 | break; | |
785 | default: | |
786 | panic("Invalid thread call pri value: %d", pri); | |
787 | break; | |
316670eb A |
788 | } |
789 | ||
5ba3f43e A |
790 | if (options & THREAD_CALL_OPTIONS_ONCE) { |
791 | call->tc_flags |= THREAD_CALL_ONCE; | |
792 | } | |
793 | if (options & THREAD_CALL_OPTIONS_SIGNAL) { | |
794 | call->tc_flags |= THREAD_CALL_SIGNAL | THREAD_CALL_ONCE; | |
795 | } | |
316670eb A |
796 | |
797 | return call; | |
798 | } | |
799 | ||
5ba3f43e A |
800 | thread_call_t |
801 | thread_call_allocate_with_qos(thread_call_func_t func, | |
802 | thread_call_param_t param0, | |
803 | int qos_tier, | |
804 | thread_call_options_t options) | |
805 | { | |
806 | thread_call_t call = thread_call_allocate(func, param0); | |
807 | ||
808 | switch (qos_tier) { | |
809 | case THREAD_QOS_UNSPECIFIED: | |
810 | call->tc_index = THREAD_CALL_INDEX_HIGH; | |
811 | break; | |
812 | case THREAD_QOS_LEGACY: | |
813 | call->tc_index = THREAD_CALL_INDEX_USER; | |
814 | break; | |
815 | case THREAD_QOS_MAINTENANCE: | |
816 | case THREAD_QOS_BACKGROUND: | |
817 | call->tc_index = THREAD_CALL_INDEX_LOW; | |
818 | break; | |
819 | case THREAD_QOS_UTILITY: | |
820 | call->tc_index = THREAD_CALL_INDEX_QOS_UT; | |
821 | break; | |
822 | case THREAD_QOS_USER_INITIATED: | |
823 | call->tc_index = THREAD_CALL_INDEX_QOS_IN; | |
824 | break; | |
825 | case THREAD_QOS_USER_INTERACTIVE: | |
826 | call->tc_index = THREAD_CALL_INDEX_QOS_UI; | |
827 | break; | |
828 | default: | |
829 | panic("Invalid thread call qos value: %d", qos_tier); | |
830 | break; | |
831 | } | |
832 | ||
833 | if (options & THREAD_CALL_OPTIONS_ONCE) | |
834 | call->tc_flags |= THREAD_CALL_ONCE; | |
835 | ||
836 | /* does not support THREAD_CALL_OPTIONS_SIGNAL */ | |
837 | ||
838 | return call; | |
839 | } | |
840 | ||
841 | ||
1c79356b | 842 | /* |
c910b4d9 | 843 | * thread_call_allocate: |
1c79356b | 844 | * |
c910b4d9 | 845 | * Allocate a callout entry. |
1c79356b | 846 | */ |
1c79356b A |
847 | thread_call_t |
848 | thread_call_allocate( | |
316670eb A |
849 | thread_call_func_t func, |
850 | thread_call_param_t param0) | |
1c79356b | 851 | { |
316670eb | 852 | thread_call_t call = zalloc(thread_call_zone); |
c910b4d9 | 853 | |
316670eb A |
854 | thread_call_setup(call, func, param0); |
855 | call->tc_refs = 1; | |
856 | call->tc_flags = THREAD_CALL_ALLOC; | |
c910b4d9 | 857 | |
316670eb | 858 | return (call); |
1c79356b A |
859 | } |
860 | ||
861 | /* | |
c910b4d9 | 862 | * thread_call_free: |
1c79356b | 863 | * |
316670eb A |
864 | * Release a callout. If the callout is currently |
865 | * executing, it will be freed when all invocations | |
866 | * finish. | |
5ba3f43e A |
867 | * |
868 | * If the callout is currently armed to fire again, then | |
869 | * freeing is not allowed and returns FALSE. The | |
870 | * client must have canceled the pending invocation before freeing. | |
1c79356b | 871 | */ |
1c79356b A |
872 | boolean_t |
873 | thread_call_free( | |
316670eb | 874 | thread_call_t call) |
1c79356b | 875 | { |
5ba3f43e | 876 | spl_t s = disable_ints_and_lock(); |
1c79356b | 877 | |
5ba3f43e A |
878 | if (call->tc_call.queue != NULL || |
879 | ((call->tc_flags & THREAD_CALL_RESCHEDULE) != 0)) { | |
316670eb A |
880 | thread_call_unlock(); |
881 | splx(s); | |
882 | ||
883 | return (FALSE); | |
884 | } | |
885 | ||
5ba3f43e | 886 | int32_t refs = --call->tc_refs; |
316670eb A |
887 | if (refs < 0) { |
888 | panic("Refcount negative: %d\n", refs); | |
5ba3f43e | 889 | } |
316670eb | 890 | |
5ba3f43e A |
891 | if ((THREAD_CALL_SIGNAL | THREAD_CALL_RUNNING) |
892 | == ((THREAD_CALL_SIGNAL | THREAD_CALL_RUNNING) & call->tc_flags)) { | |
893 | thread_call_wait_once_locked(call, s); | |
894 | /* thread call lock has been unlocked */ | |
895 | } else { | |
896 | enable_ints_and_unlock(s); | |
897 | } | |
316670eb A |
898 | |
899 | if (refs == 0) { | |
5ba3f43e | 900 | assert(call->tc_finish_count == call->tc_submit_count); |
316670eb A |
901 | zfree(thread_call_zone, call); |
902 | } | |
1c79356b A |
903 | |
904 | return (TRUE); | |
905 | } | |
906 | ||
907 | /* | |
c910b4d9 | 908 | * thread_call_enter: |
1c79356b | 909 | * |
c910b4d9 | 910 | * Enqueue a callout entry to occur "soon". |
1c79356b | 911 | * |
c910b4d9 A |
912 | * Returns TRUE if the call was |
913 | * already on a queue. | |
1c79356b | 914 | */ |
1c79356b A |
915 | boolean_t |
916 | thread_call_enter( | |
316670eb | 917 | thread_call_t call) |
1c79356b | 918 | { |
39037602 | 919 | return thread_call_enter1(call, 0); |
1c79356b A |
920 | } |
921 | ||
922 | boolean_t | |
923 | thread_call_enter1( | |
316670eb A |
924 | thread_call_t call, |
925 | thread_call_param_t param1) | |
1c79356b | 926 | { |
316670eb A |
927 | boolean_t result = TRUE; |
928 | thread_call_group_t group; | |
316670eb | 929 | |
39037602 A |
930 | assert(call->tc_call.func != NULL); |
931 | ||
5ba3f43e A |
932 | assert((call->tc_flags & THREAD_CALL_SIGNAL) == 0); |
933 | ||
316670eb A |
934 | group = thread_call_get_group(call); |
935 | ||
5ba3f43e | 936 | spl_t s = disable_ints_and_lock(); |
316670eb A |
937 | |
938 | if (call->tc_call.queue != &group->pending_queue) { | |
939 | result = _pending_call_enqueue(call, group); | |
c910b4d9 | 940 | } |
1c79356b | 941 | |
316670eb | 942 | call->tc_call.param1 = param1; |
1c79356b | 943 | |
5ba3f43e | 944 | enable_ints_and_unlock(s); |
1c79356b A |
945 | |
946 | return (result); | |
947 | } | |
948 | ||
949 | /* | |
c910b4d9 | 950 | * thread_call_enter_delayed: |
1c79356b | 951 | * |
c910b4d9 A |
952 | * Enqueue a callout entry to occur |
953 | * at the stated time. | |
1c79356b | 954 | * |
c910b4d9 A |
955 | * Returns TRUE if the call was |
956 | * already on a queue. | |
1c79356b | 957 | */ |
1c79356b A |
958 | boolean_t |
959 | thread_call_enter_delayed( | |
316670eb | 960 | thread_call_t call, |
39236c6e | 961 | uint64_t deadline) |
1c79356b | 962 | { |
39037602 | 963 | assert(call != NULL); |
39236c6e | 964 | return thread_call_enter_delayed_internal(call, NULL, 0, 0, deadline, 0, 0); |
1c79356b A |
965 | } |
966 | ||
967 | boolean_t | |
968 | thread_call_enter1_delayed( | |
316670eb A |
969 | thread_call_t call, |
970 | thread_call_param_t param1, | |
971 | uint64_t deadline) | |
39236c6e | 972 | { |
39037602 | 973 | assert(call != NULL); |
39236c6e A |
974 | return thread_call_enter_delayed_internal(call, NULL, 0, param1, deadline, 0, 0); |
975 | } | |
976 | ||
977 | boolean_t | |
978 | thread_call_enter_delayed_with_leeway( | |
979 | thread_call_t call, | |
980 | thread_call_param_t param1, | |
981 | uint64_t deadline, | |
982 | uint64_t leeway, | |
983 | unsigned int flags) | |
984 | { | |
39037602 | 985 | assert(call != NULL); |
39236c6e A |
986 | return thread_call_enter_delayed_internal(call, NULL, 0, param1, deadline, leeway, flags); |
987 | } | |
988 | ||
989 | ||
990 | /* | |
991 | * thread_call_enter_delayed_internal: | |
992 | * enqueue a callout entry to occur at the stated time | |
993 | * | |
994 | * Returns True if the call was already on a queue | |
995 | * params: | |
996 | * call - structure encapsulating state of the callout | |
997 | * alt_func/alt_param0 - if call is NULL, allocate temporary storage using these parameters | |
998 | * deadline - time deadline in nanoseconds | |
999 | * leeway - timer slack represented as delta of deadline. | |
1000 | * flags - THREAD_CALL_DELAY_XXX : classification of caller's desires wrt timer coalescing. | |
1001 | * THREAD_CALL_DELAY_LEEWAY : value in leeway is used for timer coalescing. | |
39037602 A |
1002 | * THREAD_CALL_CONTINUOUS: thread call will be called according to mach_continuous_time rather |
1003 | * than mach_absolute_time | |
39236c6e A |
1004 | */ |
1005 | boolean_t | |
1006 | thread_call_enter_delayed_internal( | |
1007 | thread_call_t call, | |
1008 | thread_call_func_t alt_func, | |
1009 | thread_call_param_t alt_param0, | |
1010 | thread_call_param_t param1, | |
1011 | uint64_t deadline, | |
1012 | uint64_t leeway, | |
1013 | unsigned int flags) | |
1c79356b | 1014 | { |
316670eb A |
1015 | boolean_t result = TRUE; |
1016 | thread_call_group_t group; | |
5ba3f43e | 1017 | uint64_t now, sdeadline, slop; |
39236c6e | 1018 | uint32_t urgency; |
5ba3f43e A |
1019 | |
1020 | thread_call_flavor_t flavor = (flags & THREAD_CALL_CONTINUOUS) ? TCF_CONTINUOUS : TCF_ABSOLUTE; | |
316670eb | 1021 | |
39236c6e A |
1022 | /* direct mapping between thread_call, timer_call, and timeout_urgency values */ |
1023 | urgency = (flags & TIMEOUT_URGENCY_MASK); | |
1c79356b | 1024 | |
5ba3f43e | 1025 | spl_t s = disable_ints_and_lock(); |
39236c6e A |
1026 | |
1027 | if (call == NULL) { | |
1028 | /* allocate a structure out of internal storage, as a convenience for BSD callers */ | |
1029 | call = _internal_call_allocate(alt_func, alt_param0); | |
1030 | } | |
1031 | ||
5ba3f43e A |
1032 | assert(call->tc_call.func != NULL); |
1033 | group = thread_call_get_group(call); | |
1034 | ||
1035 | /* TODO: assert that call is not enqueued before flipping the flag */ | |
1036 | if (flavor == TCF_CONTINUOUS) { | |
1037 | now = mach_continuous_time(); | |
39037602 | 1038 | call->tc_flags |= THREAD_CALL_CONTINUOUS; |
5ba3f43e A |
1039 | } else { |
1040 | now = mach_absolute_time(); | |
1041 | call->tc_flags &= ~THREAD_CALL_CONTINUOUS; | |
39037602 A |
1042 | } |
1043 | ||
39236c6e A |
1044 | call->tc_flags |= THREAD_CALL_DELAYED; |
1045 | ||
1046 | call->tc_soft_deadline = sdeadline = deadline; | |
1047 | ||
1048 | boolean_t ratelimited = FALSE; | |
5ba3f43e A |
1049 | slop = timer_call_slop(deadline, now, urgency, current_thread(), &ratelimited); |
1050 | ||
39236c6e A |
1051 | if ((flags & THREAD_CALL_DELAY_LEEWAY) != 0 && leeway > slop) |
1052 | slop = leeway; | |
1053 | ||
1054 | if (UINT64_MAX - deadline <= slop) | |
1055 | deadline = UINT64_MAX; | |
1056 | else | |
1057 | deadline += slop; | |
1058 | ||
39236c6e | 1059 | if (ratelimited) { |
fe8ab488 | 1060 | call->tc_flags |= TIMER_CALL_RATELIMITED; |
39236c6e | 1061 | } else { |
fe8ab488 | 1062 | call->tc_flags &= ~TIMER_CALL_RATELIMITED; |
39236c6e A |
1063 | } |
1064 | ||
1065 | call->tc_call.param1 = param1; | |
39037602 | 1066 | |
5ba3f43e | 1067 | call->tc_ttd = (sdeadline > now) ? (sdeadline - now) : 0; |
1c79356b | 1068 | |
5ba3f43e | 1069 | result = _delayed_call_enqueue(call, group, deadline, flavor); |
1c79356b | 1070 | |
5ba3f43e | 1071 | _arm_delayed_call_timer(call, group, flavor); |
1c79356b | 1072 | |
4b17d6b6 | 1073 | #if CONFIG_DTRACE |
5ba3f43e A |
1074 | DTRACE_TMR5(thread_callout__create, thread_call_func_t, call->tc_call.func, |
1075 | uint64_t, (deadline - sdeadline), uint64_t, (call->tc_ttd >> 32), | |
1076 | (unsigned) (call->tc_ttd & 0xFFFFFFFF), call); | |
4b17d6b6 | 1077 | #endif |
39037602 | 1078 | |
5ba3f43e | 1079 | enable_ints_and_unlock(s); |
1c79356b A |
1080 | |
1081 | return (result); | |
1082 | } | |
1083 | ||
1084 | /* | |
5ba3f43e A |
1085 | * Remove a callout entry from the queue |
1086 | * Called with thread_call_lock held | |
1c79356b | 1087 | */ |
5ba3f43e A |
1088 | static boolean_t |
1089 | thread_call_cancel_locked(thread_call_t call) | |
1c79356b | 1090 | { |
5ba3f43e A |
1091 | boolean_t canceled = (0 != (THREAD_CALL_RESCHEDULE & call->tc_flags)); |
1092 | call->tc_flags &= ~THREAD_CALL_RESCHEDULE; | |
316670eb | 1093 | |
5ba3f43e A |
1094 | if (canceled) { |
1095 | /* if reschedule was set, it must not have been queued */ | |
1096 | assert(call->tc_call.queue == NULL); | |
1097 | } else { | |
1098 | boolean_t do_cancel_callout = FALSE; | |
316670eb | 1099 | |
5ba3f43e A |
1100 | thread_call_flavor_t flavor = thread_call_get_flavor(call); |
1101 | thread_call_group_t group = thread_call_get_group(call); | |
c910b4d9 | 1102 | |
5ba3f43e A |
1103 | if ((call->tc_call.deadline != 0) && |
1104 | (call == qe_queue_first(&group->delayed_queues[flavor], struct thread_call, tc_call.q_link))) { | |
1105 | assert(call->tc_call.queue == &group->delayed_queues[flavor]); | |
1106 | do_cancel_callout = TRUE; | |
1107 | } | |
39236c6e | 1108 | |
5ba3f43e | 1109 | canceled = _call_dequeue(call, group); |
316670eb | 1110 | |
5ba3f43e A |
1111 | if (do_cancel_callout) { |
1112 | if (_arm_delayed_call_timer(NULL, group, flavor) == false) | |
1113 | timer_call_cancel(&group->delayed_timers[flavor]); | |
39236c6e A |
1114 | } |
1115 | } | |
1116 | ||
4b17d6b6 | 1117 | #if CONFIG_DTRACE |
5ba3f43e A |
1118 | DTRACE_TMR4(thread_callout__cancel, thread_call_func_t, call->tc_call.func, |
1119 | 0, (call->tc_ttd >> 32), (unsigned) (call->tc_ttd & 0xFFFFFFFF)); | |
4b17d6b6 | 1120 | #endif |
1c79356b | 1121 | |
5ba3f43e A |
1122 | return canceled; |
1123 | } | |
1124 | ||
1125 | /* | |
1126 | * thread_call_cancel: | |
1127 | * | |
1128 | * Dequeue a callout entry. | |
1129 | * | |
1130 | * Returns TRUE if the call was | |
1131 | * on a queue. | |
1132 | */ | |
1133 | boolean_t | |
1134 | thread_call_cancel(thread_call_t call) | |
1135 | { | |
1136 | spl_t s = disable_ints_and_lock(); | |
1137 | ||
1138 | boolean_t result = thread_call_cancel_locked(call); | |
1139 | ||
1140 | enable_ints_and_unlock(s); | |
1141 | ||
1142 | return result; | |
1c79356b A |
1143 | } |
1144 | ||
316670eb A |
1145 | /* |
1146 | * Cancel a thread call. If it cannot be cancelled (i.e. | |
1147 | * is already in flight), waits for the most recent invocation | |
1148 | * to finish. Note that if clients re-submit this thread call, | |
1149 | * it may still be pending or in flight when thread_call_cancel_wait | |
1150 | * returns, but all requests to execute this work item prior | |
1151 | * to the call to thread_call_cancel_wait will have finished. | |
1152 | */ | |
1153 | boolean_t | |
5ba3f43e | 1154 | thread_call_cancel_wait(thread_call_t call) |
316670eb | 1155 | { |
5ba3f43e A |
1156 | if ((call->tc_flags & THREAD_CALL_ALLOC) == 0) |
1157 | panic("thread_call_cancel_wait: can't wait on thread call whose storage I don't own"); | |
316670eb | 1158 | |
5ba3f43e A |
1159 | if (!ml_get_interrupts_enabled()) |
1160 | panic("unsafe thread_call_cancel_wait"); | |
316670eb | 1161 | |
5ba3f43e A |
1162 | if (current_thread()->thc_state.thc_call == call) |
1163 | panic("thread_call_cancel_wait: deadlock waiting on self from inside call: %p to function %p", | |
1164 | call, call->tc_call.func); | |
316670eb | 1165 | |
5ba3f43e | 1166 | spl_t s = disable_ints_and_lock(); |
316670eb | 1167 | |
5ba3f43e | 1168 | boolean_t canceled = thread_call_cancel_locked(call); |
316670eb | 1169 | |
5ba3f43e A |
1170 | if ((call->tc_flags & THREAD_CALL_ONCE) == THREAD_CALL_ONCE) { |
1171 | /* | |
1172 | * A cancel-wait on a 'once' call will both cancel | |
1173 | * the pending call and wait for the in-flight call | |
1174 | */ | |
316670eb | 1175 | |
5ba3f43e A |
1176 | thread_call_wait_once_locked(call, s); |
1177 | /* thread call lock unlocked */ | |
1178 | } else { | |
1179 | /* | |
1180 | * A cancel-wait on a normal call will only wait for the in-flight calls | |
1181 | * if it did not cancel the pending call. | |
1182 | * | |
1183 | * TODO: This seems less than useful - shouldn't it do the wait as well? | |
1184 | */ | |
1185 | ||
1186 | if (canceled == FALSE) { | |
1187 | thread_call_wait_locked(call, s); | |
1188 | /* thread call lock unlocked */ | |
1189 | } else { | |
1190 | enable_ints_and_unlock(s); | |
1191 | } | |
1192 | } | |
1193 | ||
1194 | return canceled; | |
316670eb A |
1195 | } |
1196 | ||
1197 | ||
1c79356b | 1198 | /* |
c910b4d9 | 1199 | * thread_call_wake: |
1c79356b | 1200 | * |
c910b4d9 A |
1201 | * Wake a call thread to service |
1202 | * pending call entries. May wake | |
1203 | * the daemon thread in order to | |
1204 | * create additional call threads. | |
1c79356b | 1205 | * |
c910b4d9 | 1206 | * Called with thread_call_lock held. |
316670eb A |
1207 | * |
1208 | * For high-priority group, only does wakeup/creation if there are no threads | |
1209 | * running. | |
1c79356b | 1210 | */ |
c910b4d9 A |
1211 | static __inline__ void |
1212 | thread_call_wake( | |
1213 | thread_call_group_t group) | |
1c79356b | 1214 | { |
316670eb A |
1215 | /* |
1216 | * New behavior: use threads if you've got 'em. | |
1217 | * Traditional behavior: wake only if no threads running. | |
1218 | */ | |
1219 | if (group_isparallel(group) || group->active_count == 0) { | |
3e170ce0 A |
1220 | if (waitq_wakeup64_one(&group->idle_waitq, NO_EVENT64, |
1221 | THREAD_AWAKENED, WAITQ_ALL_PRIORITIES) == KERN_SUCCESS) { | |
316670eb A |
1222 | group->idle_count--; group->active_count++; |
1223 | ||
cc8bc92a A |
1224 | if (group->idle_count == 0 && (group->flags & TCG_DEALLOC_ACTIVE) == TCG_DEALLOC_ACTIVE) { |
1225 | if (timer_call_cancel(&group->dealloc_timer) == TRUE) { | |
1226 | group->flags &= ~TCG_DEALLOC_ACTIVE; | |
1227 | } | |
316670eb A |
1228 | } |
1229 | } else { | |
1230 | if (!thread_call_daemon_awake && thread_call_group_should_add_thread(group)) { | |
1231 | thread_call_daemon_awake = TRUE; | |
3e170ce0 A |
1232 | waitq_wakeup64_one(&daemon_waitq, NO_EVENT64, |
1233 | THREAD_AWAKENED, WAITQ_ALL_PRIORITIES); | |
316670eb A |
1234 | } |
1235 | } | |
1c79356b A |
1236 | } |
1237 | } | |
1238 | ||
9bccf70c | 1239 | /* |
2d21ac55 | 1240 | * sched_call_thread: |
9bccf70c | 1241 | * |
5ba3f43e | 1242 | * Call out invoked by the scheduler. |
9bccf70c | 1243 | */ |
2d21ac55 A |
1244 | static void |
1245 | sched_call_thread( | |
316670eb | 1246 | int type, |
5ba3f43e | 1247 | thread_t thread) |
9bccf70c | 1248 | { |
316670eb A |
1249 | thread_call_group_t group; |
1250 | ||
5ba3f43e A |
1251 | group = thread->thc_state.thc_group; |
1252 | assert((group - &thread_call_groups[0]) < THREAD_CALL_INDEX_MAX); | |
c910b4d9 | 1253 | |
6d2010ae | 1254 | thread_call_lock_spin(); |
9bccf70c | 1255 | |
2d21ac55 | 1256 | switch (type) { |
9bccf70c | 1257 | |
316670eb | 1258 | case SCHED_CALL_BLOCK: |
5ba3f43e | 1259 | assert(group->active_count); |
316670eb | 1260 | --group->active_count; |
5ba3f43e | 1261 | group->blocked_count++; |
316670eb A |
1262 | if (group->pending_count > 0) |
1263 | thread_call_wake(group); | |
1264 | break; | |
9bccf70c | 1265 | |
316670eb | 1266 | case SCHED_CALL_UNBLOCK: |
5ba3f43e A |
1267 | assert(group->blocked_count); |
1268 | --group->blocked_count; | |
316670eb A |
1269 | group->active_count++; |
1270 | break; | |
2d21ac55 | 1271 | } |
9bccf70c | 1272 | |
6d2010ae | 1273 | thread_call_unlock(); |
9bccf70c | 1274 | } |
1c79356b | 1275 | |
316670eb A |
1276 | /* |
1277 | * Interrupts disabled, lock held; returns the same way. | |
1278 | * Only called on thread calls whose storage we own. Wakes up | |
1279 | * anyone who might be waiting on this work item and frees it | |
1280 | * if the client has so requested. | |
1281 | */ | |
5ba3f43e A |
1282 | static boolean_t |
1283 | thread_call_finish(thread_call_t call, thread_call_group_t group, spl_t *s) | |
316670eb | 1284 | { |
5ba3f43e A |
1285 | uint64_t time; |
1286 | uint32_t flags; | |
1287 | boolean_t signal; | |
5ba3f43e | 1288 | boolean_t repend = FALSE; |
316670eb A |
1289 | |
1290 | call->tc_finish_count++; | |
5ba3f43e A |
1291 | flags = call->tc_flags; |
1292 | signal = ((THREAD_CALL_SIGNAL & flags) != 0); | |
1293 | ||
1294 | if (!signal) { | |
1295 | /* The thread call thread owns a ref until the call is finished */ | |
1296 | if (call->tc_refs <= 0) | |
1297 | panic("thread_call_finish: detected over-released thread call: %p", call); | |
1298 | call->tc_refs--; | |
1299 | } | |
1300 | ||
1301 | call->tc_flags &= ~(THREAD_CALL_RESCHEDULE | THREAD_CALL_RUNNING | THREAD_CALL_WAIT); | |
1302 | ||
1303 | if ((call->tc_refs != 0) && ((flags & THREAD_CALL_RESCHEDULE) != 0)) { | |
1304 | assert(flags & THREAD_CALL_ONCE); | |
1305 | thread_call_flavor_t flavor = thread_call_get_flavor(call); | |
1306 | ||
1307 | if (THREAD_CALL_DELAYED & flags) { | |
1308 | time = mach_absolute_time(); | |
1309 | if (flavor == TCF_CONTINUOUS) { | |
1310 | time = absolutetime_to_continuoustime(time); | |
1311 | } | |
1312 | if (call->tc_soft_deadline <= time) { | |
1313 | call->tc_flags &= ~(THREAD_CALL_DELAYED | TIMER_CALL_RATELIMITED); | |
1314 | call->tc_deadline = 0; | |
1315 | } | |
1316 | } | |
1317 | if (call->tc_deadline) { | |
1318 | _delayed_call_enqueue(call, group, call->tc_deadline, flavor); | |
1319 | if (!signal) { | |
1320 | _arm_delayed_call_timer(call, group, flavor); | |
1321 | } | |
1322 | } else if (signal) { | |
1323 | call->tc_submit_count++; | |
1324 | repend = TRUE; | |
1325 | } else { | |
1326 | _pending_call_enqueue(call, group); | |
1327 | } | |
1328 | } | |
316670eb | 1329 | |
5ba3f43e | 1330 | if (!signal && (call->tc_refs == 0)) { |
d9a64523 | 1331 | if ((flags & THREAD_CALL_WAIT) != 0) { |
316670eb A |
1332 | panic("Someone waiting on a thread call that is scheduled for free: %p\n", call->tc_call.func); |
1333 | } | |
1334 | ||
5ba3f43e A |
1335 | assert(call->tc_finish_count == call->tc_submit_count); |
1336 | ||
fe8ab488 | 1337 | enable_ints_and_unlock(*s); |
316670eb A |
1338 | |
1339 | zfree(thread_call_zone, call); | |
1340 | ||
fe8ab488 | 1341 | *s = disable_ints_and_lock(); |
316670eb A |
1342 | } |
1343 | ||
d9a64523 A |
1344 | if ((flags & THREAD_CALL_WAIT) != 0) { |
1345 | /* | |
1346 | * Dropping lock here because the sched call for the | |
1347 | * high-pri group can take the big lock from under | |
1348 | * a thread lock. | |
1349 | */ | |
1350 | thread_call_unlock(); | |
1351 | thread_wakeup((event_t)call); | |
1352 | thread_call_lock_spin(); | |
1353 | /* THREAD_CALL_SIGNAL call may have been freed */ | |
1354 | } | |
1355 | ||
5ba3f43e A |
1356 | return (repend); |
1357 | } | |
1358 | ||
1359 | /* | |
1360 | * thread_call_invoke | |
1361 | * | |
1362 | * Invoke the function provided for this thread call | |
1363 | * | |
1364 | * Note that the thread call object can be deallocated by the function if we do not control its storage. | |
1365 | */ | |
1366 | static void __attribute__((noinline)) | |
1367 | thread_call_invoke(thread_call_func_t func, thread_call_param_t param0, thread_call_param_t param1, thread_call_t call) | |
1368 | { | |
1369 | current_thread()->thc_state.thc_call = call; | |
1370 | ||
1371 | #if DEVELOPMENT || DEBUG | |
1372 | KERNEL_DEBUG_CONSTANT( | |
1373 | MACHDBG_CODE(DBG_MACH_SCHED,MACH_CALLOUT) | DBG_FUNC_START, | |
1374 | VM_KERNEL_UNSLIDE(func), VM_KERNEL_ADDRHIDE(param0), VM_KERNEL_ADDRHIDE(param1), 0, 0); | |
1375 | #endif /* DEVELOPMENT || DEBUG */ | |
1376 | ||
1377 | #if CONFIG_DTRACE | |
1378 | uint64_t tc_ttd = call->tc_ttd; | |
1379 | boolean_t is_delayed = call->tc_flags & THREAD_CALL_DELAYED; | |
1380 | DTRACE_TMR6(thread_callout__start, thread_call_func_t, func, int, 0, int, (tc_ttd >> 32), | |
1381 | (unsigned) (tc_ttd & 0xFFFFFFFF), is_delayed, call); | |
1382 | #endif | |
1383 | ||
1384 | (*func)(param0, param1); | |
1385 | ||
1386 | #if CONFIG_DTRACE | |
1387 | DTRACE_TMR6(thread_callout__end, thread_call_func_t, func, int, 0, int, (tc_ttd >> 32), | |
1388 | (unsigned) (tc_ttd & 0xFFFFFFFF), is_delayed, call); | |
1389 | #endif | |
1390 | ||
1391 | #if DEVELOPMENT || DEBUG | |
1392 | KERNEL_DEBUG_CONSTANT( | |
1393 | MACHDBG_CODE(DBG_MACH_SCHED,MACH_CALLOUT) | DBG_FUNC_END, | |
1394 | VM_KERNEL_UNSLIDE(func), 0, 0, 0, 0); | |
1395 | #endif /* DEVELOPMENT || DEBUG */ | |
1396 | ||
1397 | current_thread()->thc_state.thc_call = NULL; | |
316670eb A |
1398 | } |
1399 | ||
1c79356b | 1400 | /* |
c910b4d9 | 1401 | * thread_call_thread: |
1c79356b | 1402 | */ |
c910b4d9 A |
1403 | static void |
1404 | thread_call_thread( | |
316670eb A |
1405 | thread_call_group_t group, |
1406 | wait_result_t wres) | |
1c79356b | 1407 | { |
316670eb A |
1408 | thread_t self = current_thread(); |
1409 | boolean_t canwait; | |
1c79356b | 1410 | |
4b17d6b6 A |
1411 | if ((thread_get_tag_internal(self) & THREAD_TAG_CALLOUT) == 0) |
1412 | (void)thread_set_tag_internal(self, THREAD_TAG_CALLOUT); | |
1413 | ||
316670eb A |
1414 | /* |
1415 | * A wakeup with THREAD_INTERRUPTED indicates that | |
1416 | * we should terminate. | |
1417 | */ | |
1418 | if (wres == THREAD_INTERRUPTED) { | |
1419 | thread_terminate(self); | |
1420 | ||
1421 | /* NOTREACHED */ | |
1422 | panic("thread_terminate() returned?"); | |
1423 | } | |
1424 | ||
5ba3f43e | 1425 | spl_t s = disable_ints_and_lock(); |
1c79356b | 1426 | |
5ba3f43e A |
1427 | self->thc_state.thc_group = group; |
1428 | thread_sched_call(self, sched_call_thread); | |
9bccf70c | 1429 | |
316670eb | 1430 | while (group->pending_count > 0) { |
1c79356b A |
1431 | thread_call_t call; |
1432 | thread_call_func_t func; | |
1433 | thread_call_param_t param0, param1; | |
1434 | ||
5ba3f43e | 1435 | call = qe_dequeue_head(&group->pending_queue, struct thread_call, tc_call.q_link); |
39037602 | 1436 | assert(call != NULL); |
c910b4d9 | 1437 | group->pending_count--; |
1c79356b | 1438 | |
316670eb A |
1439 | func = call->tc_call.func; |
1440 | param0 = call->tc_call.param0; | |
1441 | param1 = call->tc_call.param1; | |
1442 | ||
1443 | call->tc_call.queue = NULL; | |
1c79356b A |
1444 | |
1445 | _internal_call_release(call); | |
1446 | ||
316670eb A |
1447 | /* |
1448 | * Can only do wakeups for thread calls whose storage | |
1449 | * we control. | |
1450 | */ | |
1451 | if ((call->tc_flags & THREAD_CALL_ALLOC) != 0) { | |
1452 | canwait = TRUE; | |
5ba3f43e | 1453 | call->tc_flags |= THREAD_CALL_RUNNING; |
316670eb A |
1454 | call->tc_refs++; /* Delay free until we're done */ |
1455 | } else | |
1456 | canwait = FALSE; | |
1457 | ||
fe8ab488 | 1458 | enable_ints_and_unlock(s); |
1c79356b | 1459 | |
5ba3f43e | 1460 | thread_call_invoke(func, param0, param1, call); |
39236c6e | 1461 | |
6d2010ae A |
1462 | if (get_preemption_level() != 0) { |
1463 | int pl = get_preemption_level(); | |
1464 | panic("thread_call_thread: preemption_level %d, last callout %p(%p, %p)", | |
316670eb | 1465 | pl, (void *)VM_KERNEL_UNSLIDE(func), param0, param1); |
6d2010ae | 1466 | } |
316670eb | 1467 | |
fe8ab488 | 1468 | s = disable_ints_and_lock(); |
5ba3f43e | 1469 | |
316670eb A |
1470 | if (canwait) { |
1471 | /* Frees if so desired */ | |
5ba3f43e | 1472 | thread_call_finish(call, group, &s); |
316670eb A |
1473 | } |
1474 | } | |
9bccf70c | 1475 | |
2d21ac55 | 1476 | thread_sched_call(self, NULL); |
c910b4d9 | 1477 | group->active_count--; |
39236c6e A |
1478 | |
1479 | if (self->callout_woken_from_icontext && !self->callout_woke_thread) { | |
1480 | ledger_credit(self->t_ledger, task_ledgers.interrupt_wakeups, 1); | |
1481 | if (self->callout_woken_from_platform_idle) | |
1482 | ledger_credit(self->t_ledger, task_ledgers.platform_idle_wakeups, 1); | |
1483 | } | |
1484 | ||
1485 | self->callout_woken_from_icontext = FALSE; | |
1486 | self->callout_woken_from_platform_idle = FALSE; | |
1487 | self->callout_woke_thread = FALSE; | |
9bccf70c | 1488 | |
316670eb A |
1489 | if (group_isparallel(group)) { |
1490 | /* | |
1491 | * For new style of thread group, thread always blocks. | |
1492 | * If we have more than the target number of threads, | |
1493 | * and this is the first to block, and it isn't active | |
1494 | * already, set a timer for deallocating a thread if we | |
1495 | * continue to have a surplus. | |
1496 | */ | |
c910b4d9 | 1497 | group->idle_count++; |
1c79356b | 1498 | |
316670eb A |
1499 | if (group->idle_count == 1) { |
1500 | group->idle_timestamp = mach_absolute_time(); | |
cc8bc92a | 1501 | } |
316670eb A |
1502 | |
1503 | if (((group->flags & TCG_DEALLOC_ACTIVE) == 0) && | |
cc8bc92a | 1504 | ((group->active_count + group->idle_count) > group->target_thread_count)) { |
316670eb | 1505 | thread_call_start_deallocate_timer(group); |
cc8bc92a | 1506 | } |
316670eb A |
1507 | |
1508 | /* Wait for more work (or termination) */ | |
3e170ce0 | 1509 | wres = waitq_assert_wait64(&group->idle_waitq, NO_EVENT64, THREAD_INTERRUPTIBLE, 0); |
316670eb A |
1510 | if (wres != THREAD_WAITING) { |
1511 | panic("kcall worker unable to assert wait?"); | |
cc8bc92a | 1512 | } |
316670eb | 1513 | |
fe8ab488 | 1514 | enable_ints_and_unlock(s); |
1c79356b | 1515 | |
c910b4d9 | 1516 | thread_block_parameter((thread_continue_t)thread_call_thread, group); |
316670eb A |
1517 | } else { |
1518 | if (group->idle_count < group->target_thread_count) { | |
1519 | group->idle_count++; | |
c910b4d9 | 1520 | |
3e170ce0 | 1521 | waitq_assert_wait64(&group->idle_waitq, NO_EVENT64, THREAD_UNINT, 0); /* Interrupted means to exit */ |
316670eb | 1522 | |
fe8ab488 | 1523 | enable_ints_and_unlock(s); |
316670eb A |
1524 | |
1525 | thread_block_parameter((thread_continue_t)thread_call_thread, group); | |
1526 | /* NOTREACHED */ | |
1527 | } | |
1528 | } | |
1529 | ||
fe8ab488 | 1530 | enable_ints_and_unlock(s); |
316670eb A |
1531 | |
1532 | thread_terminate(self); | |
1c79356b A |
1533 | /* NOTREACHED */ |
1534 | } | |
1535 | ||
1c79356b | 1536 | /* |
316670eb A |
1537 | * thread_call_daemon: walk list of groups, allocating |
1538 | * threads if appropriate (as determined by | |
1539 | * thread_call_group_should_add_thread()). | |
1c79356b | 1540 | */ |
c910b4d9 | 1541 | static void |
316670eb | 1542 | thread_call_daemon_continue(__unused void *arg) |
1c79356b | 1543 | { |
5ba3f43e | 1544 | spl_t s = disable_ints_and_lock(); |
316670eb A |
1545 | |
1546 | /* Starting at zero happens to be high-priority first. */ | |
5ba3f43e A |
1547 | for (int i = 0; i < THREAD_CALL_INDEX_MAX; i++) { |
1548 | thread_call_group_t group = &thread_call_groups[i]; | |
316670eb A |
1549 | while (thread_call_group_should_add_thread(group)) { |
1550 | group->active_count++; | |
1551 | ||
fe8ab488 | 1552 | enable_ints_and_unlock(s); |
316670eb | 1553 | |
5ba3f43e | 1554 | kern_return_t kr = thread_call_thread_create(group); |
316670eb A |
1555 | if (kr != KERN_SUCCESS) { |
1556 | /* | |
1557 | * On failure, just pause for a moment and give up. | |
1558 | * We can try again later. | |
1559 | */ | |
1560 | delay(10000); /* 10 ms */ | |
fe8ab488 | 1561 | s = disable_ints_and_lock(); |
316670eb A |
1562 | goto out; |
1563 | } | |
1564 | ||
fe8ab488 | 1565 | s = disable_ints_and_lock(); |
316670eb A |
1566 | } |
1567 | } | |
91447636 | 1568 | |
316670eb A |
1569 | out: |
1570 | thread_call_daemon_awake = FALSE; | |
3e170ce0 | 1571 | waitq_assert_wait64(&daemon_waitq, NO_EVENT64, THREAD_UNINT, 0); |
55e303ae | 1572 | |
fe8ab488 | 1573 | enable_ints_and_unlock(s); |
c910b4d9 | 1574 | |
316670eb | 1575 | thread_block_parameter((thread_continue_t)thread_call_daemon_continue, NULL); |
1c79356b A |
1576 | /* NOTREACHED */ |
1577 | } | |
1578 | ||
c910b4d9 A |
1579 | static void |
1580 | thread_call_daemon( | |
316670eb | 1581 | __unused void *arg) |
1c79356b | 1582 | { |
55e303ae | 1583 | thread_t self = current_thread(); |
1c79356b | 1584 | |
91447636 | 1585 | self->options |= TH_OPT_VMPRIV; |
1c79356b | 1586 | vm_page_free_reserve(2); /* XXX */ |
316670eb | 1587 | |
5ba3f43e A |
1588 | thread_set_thread_name(self, "thread_call_daemon"); |
1589 | ||
316670eb A |
1590 | thread_call_daemon_continue(NULL); |
1591 | /* NOTREACHED */ | |
1592 | } | |
1593 | ||
1594 | /* | |
1595 | * Schedule timer to deallocate a worker thread if we have a surplus | |
1596 | * of threads (in excess of the group's target) and at least one thread | |
1597 | * is idle the whole time. | |
1598 | */ | |
1599 | static void | |
cc8bc92a | 1600 | thread_call_start_deallocate_timer(thread_call_group_t group) |
316670eb | 1601 | { |
cc8bc92a | 1602 | __assert_only boolean_t already_enqueued; |
316670eb A |
1603 | |
1604 | assert(group->idle_count > 0); | |
cc8bc92a | 1605 | assert((group->flags & TCG_DEALLOC_ACTIVE) == 0); |
316670eb | 1606 | |
cc8bc92a | 1607 | group->flags |= TCG_DEALLOC_ACTIVE; |
316670eb | 1608 | |
cc8bc92a A |
1609 | uint64_t deadline = group->idle_timestamp + thread_call_dealloc_interval_abs; |
1610 | ||
1611 | already_enqueued = timer_call_enter(&group->dealloc_timer, deadline, 0); | |
1612 | ||
1613 | assert(already_enqueued == FALSE); | |
1c79356b A |
1614 | } |
1615 | ||
5ba3f43e | 1616 | /* non-static so dtrace can find it rdar://problem/31156135&31379348 */ |
6d2010ae | 1617 | void |
5ba3f43e | 1618 | thread_call_delayed_timer(timer_call_param_t p0, timer_call_param_t p1) |
1c79356b | 1619 | { |
5ba3f43e A |
1620 | thread_call_group_t group = (thread_call_group_t) p0; |
1621 | thread_call_flavor_t flavor = (thread_call_flavor_t) p1; | |
1c79356b | 1622 | |
5ba3f43e A |
1623 | thread_call_t call; |
1624 | uint64_t now; | |
1625 | boolean_t restart; | |
1626 | boolean_t repend; | |
316670eb | 1627 | |
5ba3f43e | 1628 | thread_call_lock_spin(); |
316670eb | 1629 | |
5ba3f43e A |
1630 | if (flavor == TCF_CONTINUOUS) |
1631 | now = mach_continuous_time(); | |
1632 | else if (flavor == TCF_ABSOLUTE) | |
1633 | now = mach_absolute_time(); | |
1634 | else | |
1635 | panic("invalid timer flavor: %d", flavor); | |
1636 | ||
1637 | do { | |
1638 | restart = FALSE; | |
1639 | qe_foreach_element_safe(call, &group->delayed_queues[flavor], tc_call.q_link) { | |
1640 | if (flavor == TCF_CONTINUOUS) | |
1641 | assert((call->tc_flags & THREAD_CALL_CONTINUOUS) == THREAD_CALL_CONTINUOUS); | |
1642 | else | |
1643 | assert((call->tc_flags & THREAD_CALL_CONTINUOUS) == 0); | |
1644 | ||
1645 | /* | |
1646 | * if we hit a call that isn't yet ready to expire, | |
1647 | * then we're done for now | |
1648 | * TODO: The next timer in the list could have a larger leeway | |
1649 | * and therefore be ready to expire. | |
1650 | * Sort by deadline then by soft deadline to avoid this | |
1651 | */ | |
1652 | if (call->tc_soft_deadline > now) | |
1653 | break; | |
39037602 | 1654 | |
5ba3f43e A |
1655 | /* |
1656 | * If we hit a rate-limited timer, don't eagerly wake it up. | |
1657 | * Wait until it reaches the end of the leeway window. | |
1658 | * | |
1659 | * TODO: What if the next timer is not rate-limited? | |
1660 | * Have a separate rate-limited queue to avoid this | |
1661 | */ | |
fe8ab488 | 1662 | if ((call->tc_flags & THREAD_CALL_RATELIMITED) && |
5ba3f43e | 1663 | (call->tc_call.deadline > now) && |
39236c6e A |
1664 | (ml_timer_forced_evaluation() == FALSE)) { |
1665 | break; | |
1666 | } | |
316670eb | 1667 | |
5ba3f43e A |
1668 | if (THREAD_CALL_SIGNAL & call->tc_flags) { |
1669 | __assert_only queue_head_t *old_queue; | |
1670 | old_queue = call_entry_dequeue(&call->tc_call); | |
1671 | assert(old_queue == &group->delayed_queues[flavor]); | |
1c79356b | 1672 | |
5ba3f43e A |
1673 | do { |
1674 | thread_call_func_t func = call->tc_call.func; | |
1675 | thread_call_param_t param0 = call->tc_call.param0; | |
1676 | thread_call_param_t param1 = call->tc_call.param1; | |
1677 | ||
1678 | call->tc_flags |= THREAD_CALL_RUNNING; | |
1679 | thread_call_unlock(); | |
1680 | thread_call_invoke(func, param0, param1, call); | |
1681 | thread_call_lock_spin(); | |
1682 | ||
1683 | repend = thread_call_finish(call, group, NULL); | |
1684 | } while (repend); | |
1685 | ||
1686 | /* call may have been freed */ | |
1687 | restart = TRUE; | |
1688 | break; | |
1689 | } else { | |
1690 | _pending_call_enqueue(call, group); | |
1691 | } | |
1692 | } | |
1693 | } while (restart); | |
1694 | ||
1695 | _arm_delayed_call_timer(call, group, flavor); | |
1c79356b | 1696 | |
316670eb A |
1697 | thread_call_unlock(); |
1698 | } | |
1699 | ||
39236c6e | 1700 | static void |
5ba3f43e A |
1701 | thread_call_delayed_timer_rescan(thread_call_group_t group, |
1702 | thread_call_flavor_t flavor) | |
39236c6e | 1703 | { |
5ba3f43e A |
1704 | thread_call_t call; |
1705 | uint64_t now; | |
39236c6e | 1706 | |
5ba3f43e | 1707 | spl_t s = disable_ints_and_lock(); |
39236c6e A |
1708 | |
1709 | assert(ml_timer_forced_evaluation() == TRUE); | |
39037602 | 1710 | |
5ba3f43e A |
1711 | if (flavor == TCF_CONTINUOUS) { |
1712 | now = mach_continuous_time(); | |
39037602 | 1713 | } else { |
5ba3f43e | 1714 | now = mach_absolute_time(); |
39037602 | 1715 | } |
39236c6e | 1716 | |
5ba3f43e A |
1717 | qe_foreach_element_safe(call, &group->delayed_queues[flavor], tc_call.q_link) { |
1718 | if (call->tc_soft_deadline <= now) { | |
39236c6e | 1719 | _pending_call_enqueue(call, group); |
5ba3f43e | 1720 | } else { |
39236c6e A |
1721 | uint64_t skew = call->tc_call.deadline - call->tc_soft_deadline; |
1722 | assert (call->tc_call.deadline >= call->tc_soft_deadline); | |
5ba3f43e A |
1723 | /* |
1724 | * On a latency quality-of-service level change, | |
39236c6e A |
1725 | * re-sort potentially rate-limited callout. The platform |
1726 | * layer determines which timers require this. | |
1727 | */ | |
1728 | if (timer_resort_threshold(skew)) { | |
1729 | _call_dequeue(call, group); | |
5ba3f43e | 1730 | _delayed_call_enqueue(call, group, call->tc_soft_deadline, flavor); |
39236c6e | 1731 | } |
39236c6e A |
1732 | } |
1733 | } | |
1734 | ||
5ba3f43e A |
1735 | _arm_delayed_call_timer(NULL, group, flavor); |
1736 | ||
1737 | enable_ints_and_unlock(s); | |
39236c6e A |
1738 | } |
1739 | ||
1740 | void | |
1741 | thread_call_delayed_timer_rescan_all(void) { | |
5ba3f43e A |
1742 | for (int i = 0; i < THREAD_CALL_INDEX_MAX; i++) { |
1743 | thread_call_delayed_timer_rescan(&thread_call_groups[i], TCF_ABSOLUTE); | |
1744 | thread_call_delayed_timer_rescan(&thread_call_groups[i], TCF_CONTINUOUS); | |
39037602 | 1745 | } |
39236c6e A |
1746 | } |
1747 | ||
316670eb A |
1748 | /* |
1749 | * Timer callback to tell a thread to terminate if | |
1750 | * we have an excess of threads and at least one has been | |
1751 | * idle for a long time. | |
1752 | */ | |
1753 | static void | |
1754 | thread_call_dealloc_timer( | |
1755 | timer_call_param_t p0, | |
1756 | __unused timer_call_param_t p1) | |
1757 | { | |
1758 | thread_call_group_t group = (thread_call_group_t)p0; | |
1759 | uint64_t now; | |
1760 | kern_return_t res; | |
1761 | boolean_t terminated = FALSE; | |
cc8bc92a | 1762 | |
316670eb A |
1763 | thread_call_lock_spin(); |
1764 | ||
cc8bc92a A |
1765 | assert((group->flags & TCG_DEALLOC_ACTIVE) == TCG_DEALLOC_ACTIVE); |
1766 | ||
316670eb | 1767 | now = mach_absolute_time(); |
cc8bc92a | 1768 | |
316670eb A |
1769 | if (group->idle_count > 0) { |
1770 | if (now > group->idle_timestamp + thread_call_dealloc_interval_abs) { | |
1771 | terminated = TRUE; | |
1772 | group->idle_count--; | |
3e170ce0 A |
1773 | res = waitq_wakeup64_one(&group->idle_waitq, NO_EVENT64, |
1774 | THREAD_INTERRUPTED, WAITQ_ALL_PRIORITIES); | |
316670eb A |
1775 | if (res != KERN_SUCCESS) { |
1776 | panic("Unable to wake up idle thread for termination?"); | |
1777 | } | |
1778 | } | |
316670eb A |
1779 | } |
1780 | ||
cc8bc92a A |
1781 | group->flags &= ~TCG_DEALLOC_ACTIVE; |
1782 | ||
316670eb A |
1783 | /* |
1784 | * If we still have an excess of threads, schedule another | |
1785 | * invocation of this function. | |
1786 | */ | |
1787 | if (group->idle_count > 0 && (group->idle_count + group->active_count > group->target_thread_count)) { | |
1788 | /* | |
1789 | * If we killed someone just now, push out the | |
1790 | * next deadline. | |
1791 | */ | |
1792 | if (terminated) { | |
1793 | group->idle_timestamp = now; | |
1794 | } | |
1c79356b | 1795 | |
316670eb | 1796 | thread_call_start_deallocate_timer(group); |
316670eb A |
1797 | } |
1798 | ||
1799 | thread_call_unlock(); | |
1c79356b | 1800 | } |
316670eb | 1801 | |
5ba3f43e A |
1802 | /* |
1803 | * Wait for the invocation of the thread call to complete | |
1804 | * We know there's only one in flight because of the 'once' flag. | |
1805 | * | |
1806 | * If a subsequent invocation comes in before we wake up, that's OK | |
1807 | * | |
1808 | * TODO: Here is where we will add priority inheritance to the thread executing | |
1809 | * the thread call in case it's lower priority than the current thread | |
1810 | * <rdar://problem/30321792> Priority inheritance for thread_call_wait_once | |
1811 | * | |
1812 | * Takes the thread call lock locked, returns unlocked | |
1813 | * This lets us avoid a spurious take/drop after waking up from thread_block | |
1814 | */ | |
1815 | static boolean_t | |
1816 | thread_call_wait_once_locked(thread_call_t call, spl_t s) | |
1817 | { | |
1818 | assert(call->tc_flags & THREAD_CALL_ALLOC); | |
1819 | assert(call->tc_flags & THREAD_CALL_ONCE); | |
1820 | ||
1821 | if ((call->tc_flags & THREAD_CALL_RUNNING) == 0) { | |
1822 | enable_ints_and_unlock(s); | |
1823 | return FALSE; | |
1824 | } | |
1825 | ||
1826 | /* call is running, so we have to wait for it */ | |
1827 | call->tc_flags |= THREAD_CALL_WAIT; | |
1828 | ||
1829 | wait_result_t res = assert_wait(call, THREAD_UNINT); | |
1830 | if (res != THREAD_WAITING) | |
1831 | panic("Unable to assert wait: %d", res); | |
1832 | ||
1833 | enable_ints_and_unlock(s); | |
1834 | ||
1835 | res = thread_block(THREAD_CONTINUE_NULL); | |
1836 | if (res != THREAD_AWAKENED) | |
1837 | panic("Awoken with %d?", res); | |
1838 | ||
1839 | /* returns unlocked */ | |
1840 | return TRUE; | |
1841 | } | |
1842 | ||
1843 | /* | |
1844 | * Wait for an in-flight invocation to complete | |
1845 | * Does NOT try to cancel, so the client doesn't need to hold their | |
1846 | * lock while calling this function. | |
1847 | * | |
1848 | * Returns whether or not it had to wait. | |
1849 | * | |
1850 | * Only works for THREAD_CALL_ONCE calls. | |
1851 | */ | |
1852 | boolean_t | |
1853 | thread_call_wait_once(thread_call_t call) | |
1854 | { | |
1855 | if ((call->tc_flags & THREAD_CALL_ALLOC) == 0) | |
1856 | panic("thread_call_wait_once: can't wait on thread call whose storage I don't own"); | |
1857 | ||
1858 | if ((call->tc_flags & THREAD_CALL_ONCE) == 0) | |
1859 | panic("thread_call_wait_once: can't wait_once on a non-once call"); | |
1860 | ||
1861 | if (!ml_get_interrupts_enabled()) | |
1862 | panic("unsafe thread_call_wait_once"); | |
1863 | ||
1864 | if (current_thread()->thc_state.thc_call == call) | |
1865 | panic("thread_call_wait_once: deadlock waiting on self from inside call: %p to function %p", | |
1866 | call, call->tc_call.func); | |
1867 | ||
1868 | spl_t s = disable_ints_and_lock(); | |
1869 | ||
1870 | boolean_t waited = thread_call_wait_once_locked(call, s); | |
1871 | /* thread call lock unlocked */ | |
1872 | ||
1873 | return waited; | |
1874 | } | |
1875 | ||
1876 | ||
316670eb A |
1877 | /* |
1878 | * Wait for all requested invocations of a thread call prior to now | |
5ba3f43e | 1879 | * to finish. Can only be invoked on thread calls whose storage we manage. |
316670eb A |
1880 | * Just waits for the finish count to catch up to the submit count we find |
1881 | * at the beginning of our wait. | |
5ba3f43e A |
1882 | * |
1883 | * Called with thread_call_lock held. Returns with lock released. | |
316670eb A |
1884 | */ |
1885 | static void | |
5ba3f43e | 1886 | thread_call_wait_locked(thread_call_t call, spl_t s) |
316670eb A |
1887 | { |
1888 | uint64_t submit_count; | |
1889 | wait_result_t res; | |
1890 | ||
1891 | assert(call->tc_flags & THREAD_CALL_ALLOC); | |
1892 | ||
1893 | submit_count = call->tc_submit_count; | |
1894 | ||
1895 | while (call->tc_finish_count < submit_count) { | |
1896 | call->tc_flags |= THREAD_CALL_WAIT; | |
1897 | ||
1898 | res = assert_wait(call, THREAD_UNINT); | |
5ba3f43e A |
1899 | if (res != THREAD_WAITING) |
1900 | panic("Unable to assert wait: %d", res); | |
316670eb | 1901 | |
5ba3f43e | 1902 | enable_ints_and_unlock(s); |
316670eb | 1903 | |
5ba3f43e A |
1904 | res = thread_block(THREAD_CONTINUE_NULL); |
1905 | if (res != THREAD_AWAKENED) | |
316670eb | 1906 | panic("Awoken with %d?", res); |
5ba3f43e A |
1907 | |
1908 | s = disable_ints_and_lock(); | |
316670eb | 1909 | } |
5ba3f43e A |
1910 | |
1911 | enable_ints_and_unlock(s); | |
316670eb A |
1912 | } |
1913 | ||
1914 | /* | |
1915 | * Determine whether a thread call is either on a queue or | |
1916 | * currently being executed. | |
1917 | */ | |
1918 | boolean_t | |
1919 | thread_call_isactive(thread_call_t call) | |
1920 | { | |
1921 | boolean_t active; | |
1922 | ||
5ba3f43e | 1923 | spl_t s = disable_ints_and_lock(); |
316670eb | 1924 | active = (call->tc_submit_count > call->tc_finish_count); |
fe8ab488 | 1925 | enable_ints_and_unlock(s); |
316670eb A |
1926 | |
1927 | return active; | |
1928 | } | |
39037602 A |
1929 | |
1930 | /* | |
1931 | * adjust_cont_time_thread_calls | |
1932 | * on wake, reenqueue delayed call timer for continuous time thread call groups | |
1933 | */ | |
1934 | void | |
1935 | adjust_cont_time_thread_calls(void) | |
1936 | { | |
5ba3f43e | 1937 | spl_t s = disable_ints_and_lock(); |
39037602 | 1938 | |
5ba3f43e A |
1939 | for (int i = 0; i < THREAD_CALL_INDEX_MAX; i++) { |
1940 | thread_call_group_t group = &thread_call_groups[i]; | |
39037602 | 1941 | |
5ba3f43e A |
1942 | /* only the continuous timers need to be re-armed */ |
1943 | ||
1944 | _arm_delayed_call_timer(NULL, group, TCF_CONTINUOUS); | |
1945 | } | |
39037602 A |
1946 | |
1947 | enable_ints_and_unlock(s); | |
1948 | } | |
5ba3f43e | 1949 |