]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/thread_call.c
xnu-3789.1.32.tar.gz
[apple/xnu.git] / osfmk / kern / thread_call.c
CommitLineData
1c79356b 1/*
c910b4d9 2 * Copyright (c) 1993-1995, 1999-2008 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b 27 */
1c79356b
A
28
29#include <mach/mach_types.h>
91447636 30#include <mach/thread_act.h>
1c79356b 31
91447636 32#include <kern/kern_types.h>
c910b4d9 33#include <kern/zalloc.h>
1c79356b
A
34#include <kern/sched_prim.h>
35#include <kern/clock.h>
36#include <kern/task.h>
37#include <kern/thread.h>
3e170ce0 38#include <kern/waitq.h>
39236c6e 39#include <kern/ledger.h>
91447636
A
40
41#include <vm/vm_pageout.h>
1c79356b
A
42
43#include <kern/thread_call.h>
44#include <kern/call_entry.h>
1c79356b
A
45#include <kern/timer_call.h>
46
316670eb 47#include <libkern/OSAtomic.h>
39236c6e 48#include <kern/timer_queue.h>
316670eb 49
55e303ae 50#include <sys/kdebug.h>
4b17d6b6
A
51#if CONFIG_DTRACE
52#include <mach/sdt.h>
53#endif
39236c6e 54#include <machine/machine_routines.h>
1c79356b 55
316670eb 56static zone_t thread_call_zone;
3e170ce0 57static struct waitq daemon_waitq;
1c79356b 58
c910b4d9
A
59struct thread_call_group {
60 queue_head_t pending_queue;
6d2010ae 61 uint32_t pending_count;
1c79356b 62
c910b4d9 63 queue_head_t delayed_queue;
316670eb 64 uint32_t delayed_count;
1c79356b 65
c910b4d9 66 timer_call_data_t delayed_timer;
316670eb 67 timer_call_data_t dealloc_timer;
1c79356b 68
3e170ce0 69 struct waitq idle_waitq;
6d2010ae 70 uint32_t idle_count, active_count;
1c79356b 71
316670eb
A
72 integer_t pri;
73 uint32_t target_thread_count;
74 uint64_t idle_timestamp;
c910b4d9 75
316670eb
A
76 uint32_t flags;
77 sched_call_t sched_call;
78};
c910b4d9 79
316670eb 80typedef struct thread_call_group *thread_call_group_t;
c910b4d9 81
316670eb
A
82#define TCG_PARALLEL 0x01
83#define TCG_DEALLOC_ACTIVE 0x02
39037602 84#define TCG_CONTINUOUS 0x04
316670eb 85
39037602
A
86#define THREAD_CALL_PRIO_COUNT 4
87#define THREAD_CALL_ABSTIME_COUNT 4
88#define THREAD_CALL_CONTTIME_COUNT 4
89#define THREAD_CALL_GROUP_COUNT (THREAD_CALL_CONTTIME_COUNT + THREAD_CALL_ABSTIME_COUNT)
316670eb
A
90#define THREAD_CALL_THREAD_MIN 4
91#define INTERNAL_CALL_COUNT 768
92#define THREAD_CALL_DEALLOC_INTERVAL_NS (5 * 1000 * 1000) /* 5 ms */
93#define THREAD_CALL_ADD_RATIO 4
94#define THREAD_CALL_MACH_FACTOR_CAP 3
95
39037602
A
96#define IS_CONT_GROUP(group) \
97 (((group)->flags & TCG_CONTINUOUS) ? TRUE : FALSE)
98
99// groups [0..4]: thread calls in mach_absolute_time
100// groups [4..8]: thread calls in mach_continuous_time
101static struct thread_call_group thread_call_groups[THREAD_CALL_GROUP_COUNT];
102
103static struct thread_call_group *abstime_thread_call_groups;
104static struct thread_call_group *conttime_thread_call_groups;
105
316670eb
A
106static boolean_t thread_call_daemon_awake;
107static thread_call_data_t internal_call_storage[INTERNAL_CALL_COUNT];
108static queue_head_t thread_call_internal_queue;
39236c6e 109int thread_call_internal_queue_count = 0;
316670eb
A
110static uint64_t thread_call_dealloc_interval_abs;
111
39236c6e 112static __inline__ thread_call_t _internal_call_allocate(thread_call_func_t func, thread_call_param_t param0);
316670eb
A
113static __inline__ void _internal_call_release(thread_call_t call);
114static __inline__ boolean_t _pending_call_enqueue(thread_call_t call, thread_call_group_t group);
115static __inline__ boolean_t _delayed_call_enqueue(thread_call_t call, thread_call_group_t group, uint64_t deadline);
116static __inline__ boolean_t _call_dequeue(thread_call_t call, thread_call_group_t group);
117static __inline__ void thread_call_wake(thread_call_group_t group);
118static __inline__ void _set_delayed_call_timer(thread_call_t call, thread_call_group_t group);
119static boolean_t _remove_from_pending_queue(thread_call_func_t func, thread_call_param_t param0, boolean_t remove_all);
120static boolean_t _remove_from_delayed_queue(thread_call_func_t func, thread_call_param_t param0, boolean_t remove_all);
121static void thread_call_daemon(void *arg);
122static void thread_call_thread(thread_call_group_t group, wait_result_t wres);
123extern void thread_call_delayed_timer(timer_call_param_t p0, timer_call_param_t p1);
124static void thread_call_dealloc_timer(timer_call_param_t p0, timer_call_param_t p1);
39037602 125static void thread_call_group_setup(thread_call_group_t group, thread_call_priority_t pri, uint32_t target_thread_count, boolean_t parallel, boolean_t continuous);
316670eb
A
126static void sched_call_thread(int type, thread_t thread);
127static void thread_call_start_deallocate_timer(thread_call_group_t group);
128static void thread_call_wait_locked(thread_call_t call);
39236c6e
A
129static boolean_t thread_call_enter_delayed_internal(thread_call_t call,
130 thread_call_func_t alt_func, thread_call_param_t alt_param0,
131 thread_call_param_t param1, uint64_t deadline,
132 uint64_t leeway, unsigned int flags);
1c79356b
A
133
134#define qe(x) ((queue_entry_t)(x))
135#define TC(x) ((thread_call_t)(x))
136
6d2010ae
A
137
138lck_grp_t thread_call_queues_lck_grp;
139lck_grp_t thread_call_lck_grp;
140lck_attr_t thread_call_lck_attr;
141lck_grp_attr_t thread_call_lck_grp_attr;
142
6d2010ae 143lck_mtx_t thread_call_lock_data;
6d2010ae 144
316670eb 145
6d2010ae
A
146#define thread_call_lock_spin() \
147 lck_mtx_lock_spin_always(&thread_call_lock_data)
148
149#define thread_call_unlock() \
150 lck_mtx_unlock_always(&thread_call_lock_data)
151
39236c6e 152extern boolean_t mach_timer_coalescing_enabled;
6d2010ae 153
316670eb
A
154static inline spl_t
155disable_ints_and_lock(void)
156{
157 spl_t s;
158
159 s = splsched();
160 thread_call_lock_spin();
161
162 return s;
163}
164
165static inline void
fe8ab488 166enable_ints_and_unlock(spl_t s)
316670eb
A
167{
168 thread_call_unlock();
fe8ab488 169 splx(s);
316670eb
A
170}
171
172
173static inline boolean_t
174group_isparallel(thread_call_group_t group)
175{
176 return ((group->flags & TCG_PARALLEL) != 0);
177}
178
179static boolean_t
180thread_call_group_should_add_thread(thread_call_group_t group)
181{
182 uint32_t thread_count;
183
184 if (!group_isparallel(group)) {
185 if (group->pending_count > 0 && group->active_count == 0) {
186 return TRUE;
187 }
188
189 return FALSE;
190 }
191
192 if (group->pending_count > 0) {
193 if (group->idle_count > 0) {
194 panic("Pending work, but threads are idle?");
195 }
196
197 thread_count = group->active_count;
198
199 /*
200 * Add a thread if either there are no threads,
201 * the group has fewer than its target number of
202 * threads, or the amount of work is large relative
203 * to the number of threads. In the last case, pay attention
204 * to the total load on the system, and back off if
205 * it's high.
206 */
207 if ((thread_count == 0) ||
208 (thread_count < group->target_thread_count) ||
209 ((group->pending_count > THREAD_CALL_ADD_RATIO * thread_count) &&
210 (sched_mach_factor < THREAD_CALL_MACH_FACTOR_CAP))) {
211 return TRUE;
212 }
213 }
214
215 return FALSE;
216}
217
218static inline integer_t
219thread_call_priority_to_sched_pri(thread_call_priority_t pri)
220{
221 switch (pri) {
222 case THREAD_CALL_PRIORITY_HIGH:
223 return BASEPRI_PREEMPT;
224 case THREAD_CALL_PRIORITY_KERNEL:
225 return BASEPRI_KERNEL;
226 case THREAD_CALL_PRIORITY_USER:
227 return BASEPRI_DEFAULT;
228 case THREAD_CALL_PRIORITY_LOW:
39236c6e 229 return MAXPRI_THROTTLE;
316670eb
A
230 default:
231 panic("Invalid priority.");
232 }
233
234 return 0;
235}
236
237/* Lock held */
238static inline thread_call_group_t
239thread_call_get_group(
240 thread_call_t call)
241{
242 thread_call_priority_t pri = call->tc_pri;
243
244 assert(pri == THREAD_CALL_PRIORITY_LOW ||
245 pri == THREAD_CALL_PRIORITY_USER ||
246 pri == THREAD_CALL_PRIORITY_KERNEL ||
247 pri == THREAD_CALL_PRIORITY_HIGH);
248
39037602
A
249 thread_call_group_t group;
250
251 if(call->tc_flags & THREAD_CALL_CONTINUOUS) {
252 group = &conttime_thread_call_groups[pri];
253 } else {
254 group = &abstime_thread_call_groups[pri];
255 }
256
257 assert(IS_CONT_GROUP(group) == ((call->tc_flags & THREAD_CALL_CONTINUOUS) ? TRUE : FALSE));
258 return group;
316670eb
A
259}
260
261static void
262thread_call_group_setup(
263 thread_call_group_t group,
264 thread_call_priority_t pri,
265 uint32_t target_thread_count,
39037602
A
266 boolean_t parallel,
267 boolean_t continuous)
316670eb
A
268{
269 queue_init(&group->pending_queue);
270 queue_init(&group->delayed_queue);
271
272 timer_call_setup(&group->delayed_timer, thread_call_delayed_timer, group);
273 timer_call_setup(&group->dealloc_timer, thread_call_dealloc_timer, group);
274
3e170ce0 275 waitq_init(&group->idle_waitq, SYNC_POLICY_FIFO|SYNC_POLICY_DISABLE_IRQ);
316670eb
A
276
277 group->target_thread_count = target_thread_count;
278 group->pri = thread_call_priority_to_sched_pri(pri);
279
280 group->sched_call = sched_call_thread;
281 if (parallel) {
282 group->flags |= TCG_PARALLEL;
283 group->sched_call = NULL;
39037602
A
284 }
285
286 if(continuous) {
287 group->flags |= TCG_CONTINUOUS;
288 }
316670eb
A
289}
290
291/*
292 * Simple wrapper for creating threads bound to
293 * thread call groups.
294 */
295static kern_return_t
296thread_call_thread_create(
297 thread_call_group_t group)
298{
299 thread_t thread;
300 kern_return_t result;
301
302 result = kernel_thread_start_priority((thread_continue_t)thread_call_thread, group, group->pri, &thread);
303 if (result != KERN_SUCCESS) {
304 return result;
305 }
306
307 if (group->pri < BASEPRI_PREEMPT) {
308 /*
309 * New style doesn't get to run to completion in
310 * kernel if there are higher priority threads
311 * available.
312 */
313 thread_set_eager_preempt(thread);
314 }
315
316 thread_deallocate(thread);
317 return KERN_SUCCESS;
318}
319
1c79356b 320/*
c910b4d9 321 * thread_call_initialize:
1c79356b 322 *
c910b4d9
A
323 * Initialize this module, called
324 * early during system initialization.
1c79356b 325 */
1c79356b
A
326void
327thread_call_initialize(void)
328{
6d2010ae 329 thread_call_t call;
c910b4d9 330 kern_return_t result;
316670eb
A
331 thread_t thread;
332 int i;
fe8ab488 333 spl_t s;
c910b4d9
A
334
335 i = sizeof (thread_call_data_t);
336 thread_call_zone = zinit(i, 4096 * i, 16 * i, "thread_call");
6d2010ae 337 zone_change(thread_call_zone, Z_CALLERACCT, FALSE);
0b4c1975 338 zone_change(thread_call_zone, Z_NOENCRYPT, TRUE);
1c79356b 339
39037602
A
340 abstime_thread_call_groups = &thread_call_groups[0];
341 conttime_thread_call_groups = &thread_call_groups[THREAD_CALL_ABSTIME_COUNT];
342
6d2010ae
A
343 lck_attr_setdefault(&thread_call_lck_attr);
344 lck_grp_attr_setdefault(&thread_call_lck_grp_attr);
345 lck_grp_init(&thread_call_queues_lck_grp, "thread_call_queues", &thread_call_lck_grp_attr);
346 lck_grp_init(&thread_call_lck_grp, "thread_call", &thread_call_lck_grp_attr);
39037602 347 lck_mtx_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr);
316670eb 348 nanotime_to_absolutetime(0, THREAD_CALL_DEALLOC_INTERVAL_NS, &thread_call_dealloc_interval_abs);
39037602 349 waitq_init(&daemon_waitq, SYNC_POLICY_DISABLE_IRQ | SYNC_POLICY_FIFO);
c910b4d9 350
39037602
A
351 thread_call_group_setup(&abstime_thread_call_groups[THREAD_CALL_PRIORITY_LOW], THREAD_CALL_PRIORITY_LOW, 0, TRUE, FALSE);
352 thread_call_group_setup(&abstime_thread_call_groups[THREAD_CALL_PRIORITY_USER], THREAD_CALL_PRIORITY_USER, 0, TRUE, FALSE);
353 thread_call_group_setup(&abstime_thread_call_groups[THREAD_CALL_PRIORITY_KERNEL], THREAD_CALL_PRIORITY_KERNEL, 1, TRUE, FALSE);
354 thread_call_group_setup(&abstime_thread_call_groups[THREAD_CALL_PRIORITY_HIGH], THREAD_CALL_PRIORITY_HIGH, THREAD_CALL_THREAD_MIN, FALSE, FALSE);
355 thread_call_group_setup(&conttime_thread_call_groups[THREAD_CALL_PRIORITY_LOW], THREAD_CALL_PRIORITY_LOW, 0, TRUE, TRUE);
356 thread_call_group_setup(&conttime_thread_call_groups[THREAD_CALL_PRIORITY_USER], THREAD_CALL_PRIORITY_USER, 0, TRUE, TRUE);
357 thread_call_group_setup(&conttime_thread_call_groups[THREAD_CALL_PRIORITY_KERNEL], THREAD_CALL_PRIORITY_KERNEL, 0, TRUE, TRUE);
358 thread_call_group_setup(&conttime_thread_call_groups[THREAD_CALL_PRIORITY_HIGH], THREAD_CALL_PRIORITY_HIGH, 1, FALSE, TRUE);
1c79356b 359
fe8ab488 360 s = disable_ints_and_lock();
c910b4d9 361
6d2010ae
A
362 queue_init(&thread_call_internal_queue);
363 for (
316670eb
A
364 call = internal_call_storage;
365 call < &internal_call_storage[INTERNAL_CALL_COUNT];
1c79356b
A
366 call++) {
367
c910b4d9 368 enqueue_tail(&thread_call_internal_queue, qe(call));
39236c6e 369 thread_call_internal_queue_count++;
6d2010ae 370 }
1c79356b 371
c910b4d9 372 thread_call_daemon_awake = TRUE;
1c79356b 373
fe8ab488 374 enable_ints_and_unlock(s);
1c79356b 375
316670eb 376 result = kernel_thread_start_priority((thread_continue_t)thread_call_daemon, NULL, BASEPRI_PREEMPT + 1, &thread);
91447636
A
377 if (result != KERN_SUCCESS)
378 panic("thread_call_initialize");
379
380 thread_deallocate(thread);
1c79356b
A
381}
382
383void
384thread_call_setup(
385 thread_call_t call,
386 thread_call_func_t func,
c910b4d9 387 thread_call_param_t param0)
1c79356b 388{
316670eb
A
389 bzero(call, sizeof(*call));
390 call_entry_setup((call_entry_t)call, func, param0);
391 call->tc_pri = THREAD_CALL_PRIORITY_HIGH; /* Default priority */
1c79356b
A
392}
393
394/*
c910b4d9 395 * _internal_call_allocate:
1c79356b 396 *
c910b4d9 397 * Allocate an internal callout entry.
1c79356b 398 *
c910b4d9 399 * Called with thread_call_lock held.
1c79356b 400 */
1c79356b 401static __inline__ thread_call_t
39236c6e 402_internal_call_allocate(thread_call_func_t func, thread_call_param_t param0)
1c79356b
A
403{
404 thread_call_t call;
405
c910b4d9 406 if (queue_empty(&thread_call_internal_queue))
1c79356b
A
407 panic("_internal_call_allocate");
408
c910b4d9 409 call = TC(dequeue_head(&thread_call_internal_queue));
39236c6e
A
410 thread_call_internal_queue_count--;
411
412 thread_call_setup(call, func, param0);
413 call->tc_refs = 0;
414 call->tc_flags = 0; /* THREAD_CALL_ALLOC not set, do not free back to zone */
415
1c79356b
A
416 return (call);
417}
418
419/*
c910b4d9 420 * _internal_call_release:
1c79356b 421 *
c910b4d9 422 * Release an internal callout entry which
39236c6e
A
423 * is no longer pending (or delayed). This is
424 * safe to call on a non-internal entry, in which
425 * case nothing happens.
1c79356b 426 *
c910b4d9 427 * Called with thread_call_lock held.
1c79356b 428 */
c910b4d9 429static __inline__ void
1c79356b 430_internal_call_release(
c910b4d9 431 thread_call_t call)
1c79356b
A
432{
433 if ( call >= internal_call_storage &&
39236c6e
A
434 call < &internal_call_storage[INTERNAL_CALL_COUNT] ) {
435 assert((call->tc_flags & THREAD_CALL_ALLOC) == 0);
c910b4d9 436 enqueue_head(&thread_call_internal_queue, qe(call));
39236c6e
A
437 thread_call_internal_queue_count++;
438 }
1c79356b
A
439}
440
441/*
c910b4d9 442 * _pending_call_enqueue:
1c79356b 443 *
c910b4d9
A
444 * Place an entry at the end of the
445 * pending queue, to be executed soon.
1c79356b 446 *
c910b4d9
A
447 * Returns TRUE if the entry was already
448 * on a queue.
1c79356b 449 *
c910b4d9 450 * Called with thread_call_lock held.
1c79356b 451 */
c910b4d9 452static __inline__ boolean_t
1c79356b 453_pending_call_enqueue(
c910b4d9
A
454 thread_call_t call,
455 thread_call_group_t group)
1c79356b 456{
6d2010ae 457 queue_head_t *old_queue;
1c79356b 458
316670eb
A
459 old_queue = call_entry_enqueue_tail(CE(call), &group->pending_queue);
460
461 if (old_queue == NULL) {
462 call->tc_submit_count++;
39037602
A
463 } else if (old_queue != &group->pending_queue &&
464 old_queue != &group->delayed_queue){
465 panic("tried to move a thread call (%p) between groups (old_queue: %p)", call, old_queue);
316670eb 466 }
1c79356b 467
c910b4d9 468 group->pending_count++;
1c79356b 469
316670eb
A
470 thread_call_wake(group);
471
c910b4d9 472 return (old_queue != NULL);
1c79356b
A
473}
474
475/*
c910b4d9 476 * _delayed_call_enqueue:
1c79356b 477 *
c910b4d9
A
478 * Place an entry on the delayed queue,
479 * after existing entries with an earlier
480 * (or identical) deadline.
1c79356b 481 *
c910b4d9
A
482 * Returns TRUE if the entry was already
483 * on a queue.
1c79356b 484 *
c910b4d9 485 * Called with thread_call_lock held.
1c79356b 486 */
c910b4d9 487static __inline__ boolean_t
1c79356b 488_delayed_call_enqueue(
316670eb 489 thread_call_t call,
c910b4d9 490 thread_call_group_t group,
6d2010ae 491 uint64_t deadline)
1c79356b 492{
6d2010ae 493 queue_head_t *old_queue;
1c79356b 494
316670eb 495 old_queue = call_entry_enqueue_deadline(CE(call), &group->delayed_queue, deadline);
c910b4d9 496
39037602 497 if (old_queue == &group->pending_queue) {
c910b4d9 498 group->pending_count--;
39037602 499 } else if (old_queue == NULL) {
316670eb 500 call->tc_submit_count++;
39037602
A
501 } else if (old_queue == &group->delayed_queue) {
502 // we did nothing, and that's fine
503 } else {
504 panic("tried to move a thread call (%p) between groups (old_queue: %p)", call, old_queue);
505 }
c910b4d9
A
506
507 return (old_queue != NULL);
1c79356b
A
508}
509
510/*
c910b4d9 511 * _call_dequeue:
1c79356b 512 *
c910b4d9 513 * Remove an entry from a queue.
1c79356b 514 *
c910b4d9 515 * Returns TRUE if the entry was on a queue.
1c79356b 516 *
c910b4d9 517 * Called with thread_call_lock held.
1c79356b 518 */
c910b4d9
A
519static __inline__ boolean_t
520_call_dequeue(
521 thread_call_t call,
522 thread_call_group_t group)
1c79356b 523{
6d2010ae 524 queue_head_t *old_queue;
c910b4d9 525
316670eb 526 old_queue = call_entry_dequeue(CE(call));
c910b4d9 527
316670eb
A
528 if (old_queue != NULL) {
529 call->tc_finish_count++;
530 if (old_queue == &group->pending_queue)
531 group->pending_count--;
532 }
c910b4d9
A
533
534 return (old_queue != NULL);
1c79356b
A
535}
536
537/*
c910b4d9 538 * _set_delayed_call_timer:
1c79356b 539 *
c910b4d9
A
540 * Reset the timer so that it
541 * next expires when the entry is due.
1c79356b 542 *
c910b4d9 543 * Called with thread_call_lock held.
1c79356b 544 */
1c79356b
A
545static __inline__ void
546_set_delayed_call_timer(
c910b4d9
A
547 thread_call_t call,
548 thread_call_group_t group)
1c79356b 549{
39037602 550 uint64_t leeway, fire_at;
39236c6e
A
551
552 assert((call->tc_soft_deadline != 0) && ((call->tc_soft_deadline <= call->tc_call.deadline)));
39037602
A
553 assert(IS_CONT_GROUP(group) == ((call->tc_flags & THREAD_CALL_CONTINUOUS) ? TRUE : FALSE));
554
555 fire_at = call->tc_soft_deadline;
556
557 if (IS_CONT_GROUP(group)) {
558 fire_at = continuoustime_to_absolutetime(fire_at);
559 }
39236c6e
A
560
561 leeway = call->tc_call.deadline - call->tc_soft_deadline;
562 timer_call_enter_with_leeway(&group->delayed_timer, NULL,
39037602 563 fire_at, leeway,
39236c6e 564 TIMER_CALL_SYS_CRITICAL|TIMER_CALL_LEEWAY,
fe8ab488 565 ((call->tc_flags & THREAD_CALL_RATELIMITED) == THREAD_CALL_RATELIMITED));
1c79356b
A
566}
567
568/*
c910b4d9 569 * _remove_from_pending_queue:
1c79356b 570 *
c910b4d9
A
571 * Remove the first (or all) matching
572 * entries from the pending queue.
1c79356b 573 *
c910b4d9
A
574 * Returns TRUE if any matching entries
575 * were found.
1c79356b 576 *
c910b4d9 577 * Called with thread_call_lock held.
1c79356b 578 */
c910b4d9 579static boolean_t
1c79356b
A
580_remove_from_pending_queue(
581 thread_call_func_t func,
582 thread_call_param_t param0,
c910b4d9 583 boolean_t remove_all)
1c79356b 584{
316670eb 585 boolean_t call_removed = FALSE;
c910b4d9 586 thread_call_t call;
39037602 587 thread_call_group_t group = &abstime_thread_call_groups[THREAD_CALL_PRIORITY_HIGH];
316670eb
A
588
589 call = TC(queue_first(&group->pending_queue));
590
591 while (!queue_end(&group->pending_queue, qe(call))) {
592 if (call->tc_call.func == func &&
593 call->tc_call.param0 == param0) {
1c79356b 594 thread_call_t next = TC(queue_next(qe(call)));
316670eb 595
c910b4d9 596 _call_dequeue(call, group);
1c79356b
A
597
598 _internal_call_release(call);
316670eb 599
1c79356b
A
600 call_removed = TRUE;
601 if (!remove_all)
602 break;
316670eb 603
1c79356b
A
604 call = next;
605 }
606 else
607 call = TC(queue_next(qe(call)));
316670eb
A
608 }
609
610 return (call_removed);
1c79356b
A
611}
612
613/*
c910b4d9 614 * _remove_from_delayed_queue:
1c79356b 615 *
c910b4d9
A
616 * Remove the first (or all) matching
617 * entries from the delayed queue.
1c79356b 618 *
c910b4d9
A
619 * Returns TRUE if any matching entries
620 * were found.
1c79356b 621 *
c910b4d9 622 * Called with thread_call_lock held.
1c79356b 623 */
c910b4d9 624static boolean_t
1c79356b
A
625_remove_from_delayed_queue(
626 thread_call_func_t func,
627 thread_call_param_t param0,
c910b4d9 628 boolean_t remove_all)
1c79356b 629{
316670eb
A
630 boolean_t call_removed = FALSE;
631 thread_call_t call;
39037602 632 thread_call_group_t group = &abstime_thread_call_groups[THREAD_CALL_PRIORITY_HIGH];
316670eb
A
633
634 call = TC(queue_first(&group->delayed_queue));
635
636 while (!queue_end(&group->delayed_queue, qe(call))) {
637 if (call->tc_call.func == func &&
638 call->tc_call.param0 == param0) {
1c79356b 639 thread_call_t next = TC(queue_next(qe(call)));
316670eb 640
c910b4d9 641 _call_dequeue(call, group);
316670eb 642
1c79356b 643 _internal_call_release(call);
316670eb 644
1c79356b
A
645 call_removed = TRUE;
646 if (!remove_all)
647 break;
316670eb 648
1c79356b
A
649 call = next;
650 }
651 else
652 call = TC(queue_next(qe(call)));
316670eb
A
653 }
654
655 return (call_removed);
1c79356b
A
656}
657
1c79356b 658/*
c910b4d9 659 * thread_call_func_delayed:
1c79356b 660 *
c910b4d9
A
661 * Enqueue a function callout to
662 * occur at the stated time.
1c79356b 663 */
1c79356b
A
664void
665thread_call_func_delayed(
316670eb
A
666 thread_call_func_t func,
667 thread_call_param_t param,
668 uint64_t deadline)
1c79356b 669{
39236c6e
A
670 (void)thread_call_enter_delayed_internal(NULL, func, param, 0, deadline, 0, 0);
671}
316670eb 672
39236c6e
A
673/*
674 * thread_call_func_delayed_with_leeway:
675 *
676 * Same as thread_call_func_delayed(), but with
677 * leeway/flags threaded through.
678 */
316670eb 679
39236c6e
A
680void
681thread_call_func_delayed_with_leeway(
682 thread_call_func_t func,
683 thread_call_param_t param,
684 uint64_t deadline,
685 uint64_t leeway,
686 uint32_t flags)
687{
688 (void)thread_call_enter_delayed_internal(NULL, func, param, 0, deadline, leeway, flags);
1c79356b
A
689}
690
691/*
c910b4d9 692 * thread_call_func_cancel:
1c79356b 693 *
c910b4d9 694 * Dequeue a function callout.
1c79356b 695 *
c910b4d9
A
696 * Removes one (or all) { function, argument }
697 * instance(s) from either (or both)
698 * the pending and the delayed queue,
699 * in that order.
1c79356b 700 *
c910b4d9 701 * Returns TRUE if any calls were cancelled.
1c79356b 702 */
1c79356b
A
703boolean_t
704thread_call_func_cancel(
316670eb
A
705 thread_call_func_t func,
706 thread_call_param_t param,
707 boolean_t cancel_all)
1c79356b 708{
316670eb
A
709 boolean_t result;
710 spl_t s;
1c79356b 711
39037602
A
712 assert(func != NULL);
713
316670eb
A
714 s = splsched();
715 thread_call_lock_spin();
716
717 if (cancel_all)
1c79356b 718 result = _remove_from_pending_queue(func, param, cancel_all) |
316670eb 719 _remove_from_delayed_queue(func, param, cancel_all);
1c79356b
A
720 else
721 result = _remove_from_pending_queue(func, param, cancel_all) ||
316670eb
A
722 _remove_from_delayed_queue(func, param, cancel_all);
723
724 thread_call_unlock();
725 splx(s);
1c79356b
A
726
727 return (result);
728}
729
316670eb
A
730/*
731 * Allocate a thread call with a given priority. Importances
732 * other than THREAD_CALL_PRIORITY_HIGH will be run in threads
733 * with eager preemption enabled (i.e. may be aggressively preempted
734 * by higher-priority threads which are not in the normal "urgent" bands).
735 */
736thread_call_t
737thread_call_allocate_with_priority(
738 thread_call_func_t func,
739 thread_call_param_t param0,
740 thread_call_priority_t pri)
741{
742 thread_call_t call;
743
744 if (pri > THREAD_CALL_PRIORITY_LOW) {
745 panic("Invalid pri: %d\n", pri);
746 }
747
748 call = thread_call_allocate(func, param0);
749 call->tc_pri = pri;
750
751 return call;
752}
753
1c79356b 754/*
c910b4d9 755 * thread_call_allocate:
1c79356b 756 *
c910b4d9 757 * Allocate a callout entry.
1c79356b 758 */
1c79356b
A
759thread_call_t
760thread_call_allocate(
316670eb
A
761 thread_call_func_t func,
762 thread_call_param_t param0)
1c79356b 763{
316670eb 764 thread_call_t call = zalloc(thread_call_zone);
c910b4d9 765
316670eb
A
766 thread_call_setup(call, func, param0);
767 call->tc_refs = 1;
768 call->tc_flags = THREAD_CALL_ALLOC;
c910b4d9 769
316670eb 770 return (call);
1c79356b
A
771}
772
773/*
c910b4d9 774 * thread_call_free:
1c79356b 775 *
316670eb
A
776 * Release a callout. If the callout is currently
777 * executing, it will be freed when all invocations
778 * finish.
1c79356b 779 */
1c79356b
A
780boolean_t
781thread_call_free(
316670eb 782 thread_call_t call)
1c79356b 783{
316670eb
A
784 spl_t s;
785 int32_t refs;
1c79356b 786
316670eb
A
787 s = splsched();
788 thread_call_lock_spin();
789
790 if (call->tc_call.queue != NULL) {
791 thread_call_unlock();
792 splx(s);
793
794 return (FALSE);
795 }
796
797 refs = --call->tc_refs;
798 if (refs < 0) {
799 panic("Refcount negative: %d\n", refs);
800 }
801
802 thread_call_unlock();
803 splx(s);
804
805 if (refs == 0) {
806 zfree(thread_call_zone, call);
807 }
1c79356b
A
808
809 return (TRUE);
810}
811
812/*
c910b4d9 813 * thread_call_enter:
1c79356b 814 *
c910b4d9 815 * Enqueue a callout entry to occur "soon".
1c79356b 816 *
c910b4d9
A
817 * Returns TRUE if the call was
818 * already on a queue.
1c79356b 819 */
1c79356b
A
820boolean_t
821thread_call_enter(
316670eb 822 thread_call_t call)
1c79356b 823{
39037602 824 return thread_call_enter1(call, 0);
1c79356b
A
825}
826
827boolean_t
828thread_call_enter1(
316670eb
A
829 thread_call_t call,
830 thread_call_param_t param1)
1c79356b 831{
316670eb
A
832 boolean_t result = TRUE;
833 thread_call_group_t group;
834 spl_t s;
835
39037602
A
836 assert(call->tc_call.func != NULL);
837
316670eb
A
838 group = thread_call_get_group(call);
839
6d2010ae
A
840 s = splsched();
841 thread_call_lock_spin();
316670eb
A
842
843 if (call->tc_call.queue != &group->pending_queue) {
844 result = _pending_call_enqueue(call, group);
c910b4d9 845 }
1c79356b 846
316670eb 847 call->tc_call.param1 = param1;
1c79356b 848
6d2010ae
A
849 thread_call_unlock();
850 splx(s);
1c79356b
A
851
852 return (result);
853}
854
855/*
c910b4d9 856 * thread_call_enter_delayed:
1c79356b 857 *
c910b4d9
A
858 * Enqueue a callout entry to occur
859 * at the stated time.
1c79356b 860 *
c910b4d9
A
861 * Returns TRUE if the call was
862 * already on a queue.
1c79356b 863 */
1c79356b
A
864boolean_t
865thread_call_enter_delayed(
316670eb 866 thread_call_t call,
39236c6e 867 uint64_t deadline)
1c79356b 868{
39037602 869 assert(call != NULL);
39236c6e 870 return thread_call_enter_delayed_internal(call, NULL, 0, 0, deadline, 0, 0);
1c79356b
A
871}
872
873boolean_t
874thread_call_enter1_delayed(
316670eb
A
875 thread_call_t call,
876 thread_call_param_t param1,
877 uint64_t deadline)
39236c6e 878{
39037602 879 assert(call != NULL);
39236c6e
A
880 return thread_call_enter_delayed_internal(call, NULL, 0, param1, deadline, 0, 0);
881}
882
883boolean_t
884thread_call_enter_delayed_with_leeway(
885 thread_call_t call,
886 thread_call_param_t param1,
887 uint64_t deadline,
888 uint64_t leeway,
889 unsigned int flags)
890{
39037602 891 assert(call != NULL);
39236c6e
A
892 return thread_call_enter_delayed_internal(call, NULL, 0, param1, deadline, leeway, flags);
893}
894
895
896/*
897 * thread_call_enter_delayed_internal:
898 * enqueue a callout entry to occur at the stated time
899 *
900 * Returns True if the call was already on a queue
901 * params:
902 * call - structure encapsulating state of the callout
903 * alt_func/alt_param0 - if call is NULL, allocate temporary storage using these parameters
904 * deadline - time deadline in nanoseconds
905 * leeway - timer slack represented as delta of deadline.
906 * flags - THREAD_CALL_DELAY_XXX : classification of caller's desires wrt timer coalescing.
907 * THREAD_CALL_DELAY_LEEWAY : value in leeway is used for timer coalescing.
39037602
A
908 * THREAD_CALL_CONTINUOUS: thread call will be called according to mach_continuous_time rather
909 * than mach_absolute_time
39236c6e
A
910 */
911boolean_t
912thread_call_enter_delayed_internal(
913 thread_call_t call,
914 thread_call_func_t alt_func,
915 thread_call_param_t alt_param0,
916 thread_call_param_t param1,
917 uint64_t deadline,
918 uint64_t leeway,
919 unsigned int flags)
1c79356b 920{
316670eb
A
921 boolean_t result = TRUE;
922 thread_call_group_t group;
923 spl_t s;
39037602 924 uint64_t abstime, conttime, sdeadline, slop;
39236c6e 925 uint32_t urgency;
39037602 926 const boolean_t is_cont_time = (flags & THREAD_CALL_CONTINUOUS) ? TRUE : FALSE;
316670eb 927
39236c6e
A
928 /* direct mapping between thread_call, timer_call, and timeout_urgency values */
929 urgency = (flags & TIMEOUT_URGENCY_MASK);
1c79356b 930
6d2010ae
A
931 s = splsched();
932 thread_call_lock_spin();
39236c6e
A
933
934 if (call == NULL) {
935 /* allocate a structure out of internal storage, as a convenience for BSD callers */
936 call = _internal_call_allocate(alt_func, alt_param0);
937 }
938
39037602
A
939 if (is_cont_time) {
940 call->tc_flags |= THREAD_CALL_CONTINUOUS;
941 }
942
943 assert(call->tc_call.func != NULL);
39236c6e 944 group = thread_call_get_group(call);
4b17d6b6 945 abstime = mach_absolute_time();
39037602 946 conttime = absolutetime_to_continuoustime(abstime);
39236c6e
A
947
948 call->tc_flags |= THREAD_CALL_DELAYED;
949
950 call->tc_soft_deadline = sdeadline = deadline;
951
952 boolean_t ratelimited = FALSE;
39037602 953 slop = timer_call_slop(deadline, is_cont_time ? conttime : abstime, urgency, current_thread(), &ratelimited);
39236c6e
A
954
955 if ((flags & THREAD_CALL_DELAY_LEEWAY) != 0 && leeway > slop)
956 slop = leeway;
957
958 if (UINT64_MAX - deadline <= slop)
959 deadline = UINT64_MAX;
960 else
961 deadline += slop;
962
39236c6e 963 if (ratelimited) {
fe8ab488 964 call->tc_flags |= TIMER_CALL_RATELIMITED;
39236c6e 965 } else {
fe8ab488 966 call->tc_flags &= ~TIMER_CALL_RATELIMITED;
39236c6e
A
967 }
968
fe8ab488 969
39236c6e 970 call->tc_call.param1 = param1;
39037602
A
971
972 if(is_cont_time) {
973 call->ttd = (sdeadline > conttime) ? (sdeadline - conttime) : 0;
974 }
975 else {
976 call->ttd = (sdeadline > abstime) ? (sdeadline - abstime) : 0;
977 }
1c79356b 978
c910b4d9 979 result = _delayed_call_enqueue(call, group, deadline);
1c79356b 980
39037602 981 if (queue_first(&group->delayed_queue) == qe(call)) {
c910b4d9 982 _set_delayed_call_timer(call, group);
39037602 983 }
1c79356b 984
4b17d6b6 985#if CONFIG_DTRACE
39236c6e 986 DTRACE_TMR5(thread_callout__create, thread_call_func_t, call->tc_call.func, uint64_t, (deadline - sdeadline), uint64_t, (call->ttd >> 32), (unsigned) (call->ttd & 0xFFFFFFFF), call);
4b17d6b6 987#endif
39037602 988
6d2010ae
A
989 thread_call_unlock();
990 splx(s);
1c79356b
A
991
992 return (result);
993}
994
995/*
c910b4d9 996 * thread_call_cancel:
1c79356b 997 *
c910b4d9 998 * Dequeue a callout entry.
1c79356b 999 *
c910b4d9
A
1000 * Returns TRUE if the call was
1001 * on a queue.
1c79356b 1002 */
1c79356b
A
1003boolean_t
1004thread_call_cancel(
316670eb 1005 thread_call_t call)
1c79356b 1006{
39236c6e 1007 boolean_t result, do_cancel_callout = FALSE;
316670eb
A
1008 thread_call_group_t group;
1009 spl_t s;
1010
1011 group = thread_call_get_group(call);
1012
6d2010ae
A
1013 s = splsched();
1014 thread_call_lock_spin();
c910b4d9 1015
39236c6e
A
1016 if ((call->tc_call.deadline != 0) &&
1017 (queue_first(&group->delayed_queue) == qe(call))) {
1018 assert (call->tc_call.queue == &group->delayed_queue);
1019 do_cancel_callout = TRUE;
1020 }
1021
c910b4d9 1022 result = _call_dequeue(call, group);
316670eb 1023
39236c6e
A
1024 if (do_cancel_callout) {
1025 timer_call_cancel(&group->delayed_timer);
1026 if (!queue_empty(&group->delayed_queue)) {
1027 _set_delayed_call_timer(TC(queue_first(&group->delayed_queue)), group);
1028 }
1029 }
1030
6d2010ae
A
1031 thread_call_unlock();
1032 splx(s);
4b17d6b6
A
1033#if CONFIG_DTRACE
1034 DTRACE_TMR4(thread_callout__cancel, thread_call_func_t, call->tc_call.func, 0, (call->ttd >> 32), (unsigned) (call->ttd & 0xFFFFFFFF));
1035#endif
1c79356b
A
1036
1037 return (result);
1038}
1039
316670eb
A
1040/*
1041 * Cancel a thread call. If it cannot be cancelled (i.e.
1042 * is already in flight), waits for the most recent invocation
1043 * to finish. Note that if clients re-submit this thread call,
1044 * it may still be pending or in flight when thread_call_cancel_wait
1045 * returns, but all requests to execute this work item prior
1046 * to the call to thread_call_cancel_wait will have finished.
1047 */
1048boolean_t
1049thread_call_cancel_wait(
1050 thread_call_t call)
1051{
1052 boolean_t result;
1053 thread_call_group_t group;
1054
1055 if ((call->tc_flags & THREAD_CALL_ALLOC) == 0) {
1056 panic("%s: Can't wait on thread call whose storage I don't own.", __FUNCTION__);
1057 }
1058
1059 group = thread_call_get_group(call);
1060
1061 (void) splsched();
1062 thread_call_lock_spin();
1063
1064 result = _call_dequeue(call, group);
1065 if (result == FALSE) {
1066 thread_call_wait_locked(call);
1067 }
1068
1069 thread_call_unlock();
1070 (void) spllo();
1071
1072 return result;
1073}
1074
1075
1c79356b 1076/*
c910b4d9 1077 * thread_call_wake:
1c79356b 1078 *
c910b4d9
A
1079 * Wake a call thread to service
1080 * pending call entries. May wake
1081 * the daemon thread in order to
1082 * create additional call threads.
1c79356b 1083 *
c910b4d9 1084 * Called with thread_call_lock held.
316670eb
A
1085 *
1086 * For high-priority group, only does wakeup/creation if there are no threads
1087 * running.
1c79356b 1088 */
c910b4d9
A
1089static __inline__ void
1090thread_call_wake(
1091 thread_call_group_t group)
1c79356b 1092{
316670eb
A
1093 /*
1094 * New behavior: use threads if you've got 'em.
1095 * Traditional behavior: wake only if no threads running.
1096 */
1097 if (group_isparallel(group) || group->active_count == 0) {
3e170ce0
A
1098 if (waitq_wakeup64_one(&group->idle_waitq, NO_EVENT64,
1099 THREAD_AWAKENED, WAITQ_ALL_PRIORITIES) == KERN_SUCCESS) {
316670eb
A
1100 group->idle_count--; group->active_count++;
1101
1102 if (group->idle_count == 0) {
1103 timer_call_cancel(&group->dealloc_timer);
39037602 1104 group->flags &= ~TCG_DEALLOC_ACTIVE;
316670eb
A
1105 }
1106 } else {
1107 if (!thread_call_daemon_awake && thread_call_group_should_add_thread(group)) {
1108 thread_call_daemon_awake = TRUE;
3e170ce0
A
1109 waitq_wakeup64_one(&daemon_waitq, NO_EVENT64,
1110 THREAD_AWAKENED, WAITQ_ALL_PRIORITIES);
316670eb
A
1111 }
1112 }
1c79356b
A
1113 }
1114}
1115
9bccf70c 1116/*
2d21ac55 1117 * sched_call_thread:
9bccf70c 1118 *
316670eb
A
1119 * Call out invoked by the scheduler. Used only for high-priority
1120 * thread call group.
9bccf70c 1121 */
2d21ac55
A
1122static void
1123sched_call_thread(
316670eb
A
1124 int type,
1125 __unused thread_t thread)
9bccf70c 1126{
316670eb
A
1127 thread_call_group_t group;
1128
1129 group = &thread_call_groups[THREAD_CALL_PRIORITY_HIGH]; /* XXX */
c910b4d9 1130
6d2010ae 1131 thread_call_lock_spin();
9bccf70c 1132
2d21ac55 1133 switch (type) {
9bccf70c 1134
316670eb
A
1135 case SCHED_CALL_BLOCK:
1136 --group->active_count;
1137 if (group->pending_count > 0)
1138 thread_call_wake(group);
1139 break;
9bccf70c 1140
316670eb
A
1141 case SCHED_CALL_UNBLOCK:
1142 group->active_count++;
1143 break;
2d21ac55 1144 }
9bccf70c 1145
6d2010ae 1146 thread_call_unlock();
9bccf70c 1147}
1c79356b 1148
316670eb
A
1149/*
1150 * Interrupts disabled, lock held; returns the same way.
1151 * Only called on thread calls whose storage we own. Wakes up
1152 * anyone who might be waiting on this work item and frees it
1153 * if the client has so requested.
1154 */
1155static void
fe8ab488 1156thread_call_finish(thread_call_t call, spl_t *s)
316670eb
A
1157{
1158 boolean_t dowake = FALSE;
1159
1160 call->tc_finish_count++;
1161 call->tc_refs--;
1162
1163 if ((call->tc_flags & THREAD_CALL_WAIT) != 0) {
1164 dowake = TRUE;
1165 call->tc_flags &= ~THREAD_CALL_WAIT;
1166
1167 /*
1168 * Dropping lock here because the sched call for the
1169 * high-pri group can take the big lock from under
1170 * a thread lock.
1171 */
1172 thread_call_unlock();
1173 thread_wakeup((event_t)call);
1174 thread_call_lock_spin();
1175 }
1176
1177 if (call->tc_refs == 0) {
1178 if (dowake) {
1179 panic("Someone waiting on a thread call that is scheduled for free: %p\n", call->tc_call.func);
1180 }
1181
fe8ab488 1182 enable_ints_and_unlock(*s);
316670eb
A
1183
1184 zfree(thread_call_zone, call);
1185
fe8ab488 1186 *s = disable_ints_and_lock();
316670eb
A
1187 }
1188
1189}
1190
1c79356b 1191/*
c910b4d9 1192 * thread_call_thread:
1c79356b 1193 */
c910b4d9
A
1194static void
1195thread_call_thread(
316670eb
A
1196 thread_call_group_t group,
1197 wait_result_t wres)
1c79356b 1198{
316670eb
A
1199 thread_t self = current_thread();
1200 boolean_t canwait;
fe8ab488 1201 spl_t s;
1c79356b 1202
4b17d6b6
A
1203 if ((thread_get_tag_internal(self) & THREAD_TAG_CALLOUT) == 0)
1204 (void)thread_set_tag_internal(self, THREAD_TAG_CALLOUT);
1205
316670eb
A
1206 /*
1207 * A wakeup with THREAD_INTERRUPTED indicates that
1208 * we should terminate.
1209 */
1210 if (wres == THREAD_INTERRUPTED) {
1211 thread_terminate(self);
1212
1213 /* NOTREACHED */
1214 panic("thread_terminate() returned?");
1215 }
1216
fe8ab488 1217 s = disable_ints_and_lock();
1c79356b 1218
316670eb 1219 thread_sched_call(self, group->sched_call);
9bccf70c 1220
316670eb 1221 while (group->pending_count > 0) {
1c79356b
A
1222 thread_call_t call;
1223 thread_call_func_t func;
1224 thread_call_param_t param0, param1;
1225
c910b4d9 1226 call = TC(dequeue_head(&group->pending_queue));
39037602 1227 assert(call != NULL);
c910b4d9 1228 group->pending_count--;
1c79356b 1229
316670eb
A
1230 func = call->tc_call.func;
1231 param0 = call->tc_call.param0;
1232 param1 = call->tc_call.param1;
1233
1234 call->tc_call.queue = NULL;
1c79356b
A
1235
1236 _internal_call_release(call);
1237
316670eb
A
1238 /*
1239 * Can only do wakeups for thread calls whose storage
1240 * we control.
1241 */
1242 if ((call->tc_flags & THREAD_CALL_ALLOC) != 0) {
1243 canwait = TRUE;
1244 call->tc_refs++; /* Delay free until we're done */
1245 } else
1246 canwait = FALSE;
1247
fe8ab488 1248 enable_ints_and_unlock(s);
1c79356b 1249
3e170ce0 1250#if DEVELOPMENT || DEBUG
55e303ae 1251 KERNEL_DEBUG_CONSTANT(
316670eb 1252 MACHDBG_CODE(DBG_MACH_SCHED,MACH_CALLOUT) | DBG_FUNC_NONE,
4bd07ac2 1253 VM_KERNEL_UNSLIDE(func), VM_KERNEL_UNSLIDE_OR_PERM(param0), VM_KERNEL_UNSLIDE_OR_PERM(param1), 0, 0);
3e170ce0 1254#endif /* DEVELOPMENT || DEBUG */
55e303ae 1255
39236c6e
A
1256#if CONFIG_DTRACE
1257 DTRACE_TMR6(thread_callout__start, thread_call_func_t, func, int, 0, int, (call->ttd >> 32), (unsigned) (call->ttd & 0xFFFFFFFF), (call->tc_flags & THREAD_CALL_DELAYED), call);
1258#endif
1259
1c79356b
A
1260 (*func)(param0, param1);
1261
39236c6e
A
1262#if CONFIG_DTRACE
1263 DTRACE_TMR6(thread_callout__end, thread_call_func_t, func, int, 0, int, (call->ttd >> 32), (unsigned) (call->ttd & 0xFFFFFFFF), (call->tc_flags & THREAD_CALL_DELAYED), call);
1264#endif
1265
6d2010ae
A
1266 if (get_preemption_level() != 0) {
1267 int pl = get_preemption_level();
1268 panic("thread_call_thread: preemption_level %d, last callout %p(%p, %p)",
316670eb 1269 pl, (void *)VM_KERNEL_UNSLIDE(func), param0, param1);
6d2010ae 1270 }
316670eb 1271
fe8ab488 1272 s = disable_ints_and_lock();
316670eb
A
1273
1274 if (canwait) {
1275 /* Frees if so desired */
fe8ab488 1276 thread_call_finish(call, &s);
316670eb
A
1277 }
1278 }
9bccf70c 1279
2d21ac55 1280 thread_sched_call(self, NULL);
c910b4d9 1281 group->active_count--;
39236c6e
A
1282
1283 if (self->callout_woken_from_icontext && !self->callout_woke_thread) {
1284 ledger_credit(self->t_ledger, task_ledgers.interrupt_wakeups, 1);
1285 if (self->callout_woken_from_platform_idle)
1286 ledger_credit(self->t_ledger, task_ledgers.platform_idle_wakeups, 1);
1287 }
1288
1289 self->callout_woken_from_icontext = FALSE;
1290 self->callout_woken_from_platform_idle = FALSE;
1291 self->callout_woke_thread = FALSE;
9bccf70c 1292
316670eb
A
1293 if (group_isparallel(group)) {
1294 /*
1295 * For new style of thread group, thread always blocks.
1296 * If we have more than the target number of threads,
1297 * and this is the first to block, and it isn't active
1298 * already, set a timer for deallocating a thread if we
1299 * continue to have a surplus.
1300 */
c910b4d9 1301 group->idle_count++;
1c79356b 1302
316670eb
A
1303 if (group->idle_count == 1) {
1304 group->idle_timestamp = mach_absolute_time();
1305 }
1306
1307 if (((group->flags & TCG_DEALLOC_ACTIVE) == 0) &&
1308 ((group->active_count + group->idle_count) > group->target_thread_count)) {
1309 group->flags |= TCG_DEALLOC_ACTIVE;
1310 thread_call_start_deallocate_timer(group);
1311 }
1312
1313 /* Wait for more work (or termination) */
3e170ce0 1314 wres = waitq_assert_wait64(&group->idle_waitq, NO_EVENT64, THREAD_INTERRUPTIBLE, 0);
316670eb
A
1315 if (wres != THREAD_WAITING) {
1316 panic("kcall worker unable to assert wait?");
1317 }
1318
fe8ab488 1319 enable_ints_and_unlock(s);
1c79356b 1320
c910b4d9 1321 thread_block_parameter((thread_continue_t)thread_call_thread, group);
316670eb
A
1322 } else {
1323 if (group->idle_count < group->target_thread_count) {
1324 group->idle_count++;
c910b4d9 1325
3e170ce0 1326 waitq_assert_wait64(&group->idle_waitq, NO_EVENT64, THREAD_UNINT, 0); /* Interrupted means to exit */
316670eb 1327
fe8ab488 1328 enable_ints_and_unlock(s);
316670eb
A
1329
1330 thread_block_parameter((thread_continue_t)thread_call_thread, group);
1331 /* NOTREACHED */
1332 }
1333 }
1334
fe8ab488 1335 enable_ints_and_unlock(s);
316670eb
A
1336
1337 thread_terminate(self);
1c79356b
A
1338 /* NOTREACHED */
1339}
1340
1c79356b 1341/*
316670eb
A
1342 * thread_call_daemon: walk list of groups, allocating
1343 * threads if appropriate (as determined by
1344 * thread_call_group_should_add_thread()).
1c79356b 1345 */
c910b4d9 1346static void
316670eb 1347thread_call_daemon_continue(__unused void *arg)
1c79356b 1348{
316670eb
A
1349 int i;
1350 kern_return_t kr;
1351 thread_call_group_t group;
fe8ab488 1352 spl_t s;
316670eb 1353
fe8ab488 1354 s = disable_ints_and_lock();
316670eb
A
1355
1356 /* Starting at zero happens to be high-priority first. */
1357 for (i = 0; i < THREAD_CALL_GROUP_COUNT; i++) {
1358 group = &thread_call_groups[i];
1359 while (thread_call_group_should_add_thread(group)) {
1360 group->active_count++;
1361
fe8ab488 1362 enable_ints_and_unlock(s);
316670eb
A
1363
1364 kr = thread_call_thread_create(group);
1365 if (kr != KERN_SUCCESS) {
1366 /*
1367 * On failure, just pause for a moment and give up.
1368 * We can try again later.
1369 */
1370 delay(10000); /* 10 ms */
fe8ab488 1371 s = disable_ints_and_lock();
316670eb
A
1372 goto out;
1373 }
1374
fe8ab488 1375 s = disable_ints_and_lock();
316670eb
A
1376 }
1377 }
91447636 1378
316670eb
A
1379out:
1380 thread_call_daemon_awake = FALSE;
3e170ce0 1381 waitq_assert_wait64(&daemon_waitq, NO_EVENT64, THREAD_UNINT, 0);
55e303ae 1382
fe8ab488 1383 enable_ints_and_unlock(s);
c910b4d9 1384
316670eb 1385 thread_block_parameter((thread_continue_t)thread_call_daemon_continue, NULL);
1c79356b
A
1386 /* NOTREACHED */
1387}
1388
c910b4d9
A
1389static void
1390thread_call_daemon(
316670eb 1391 __unused void *arg)
1c79356b 1392{
55e303ae 1393 thread_t self = current_thread();
1c79356b 1394
91447636 1395 self->options |= TH_OPT_VMPRIV;
1c79356b 1396 vm_page_free_reserve(2); /* XXX */
316670eb
A
1397
1398 thread_call_daemon_continue(NULL);
1399 /* NOTREACHED */
1400}
1401
1402/*
1403 * Schedule timer to deallocate a worker thread if we have a surplus
1404 * of threads (in excess of the group's target) and at least one thread
1405 * is idle the whole time.
1406 */
1407static void
1408thread_call_start_deallocate_timer(
1409 thread_call_group_t group)
1410{
1411 uint64_t deadline;
1412 boolean_t onqueue;
1413
1414 assert(group->idle_count > 0);
1415
1416 group->flags |= TCG_DEALLOC_ACTIVE;
1417 deadline = group->idle_timestamp + thread_call_dealloc_interval_abs;
1418 onqueue = timer_call_enter(&group->dealloc_timer, deadline, 0);
1419
1420 if (onqueue) {
1421 panic("Deallocate timer already active?");
1422 }
1c79356b
A
1423}
1424
6d2010ae 1425void
c910b4d9 1426thread_call_delayed_timer(
316670eb
A
1427 timer_call_param_t p0,
1428 __unused timer_call_param_t p1
1c79356b
A
1429)
1430{
316670eb 1431 thread_call_t call;
c910b4d9 1432 thread_call_group_t group = p0;
39236c6e 1433 uint64_t timestamp;
1c79356b 1434
6d2010ae 1435 thread_call_lock_spin();
1c79356b 1436
39037602
A
1437 const boolean_t is_cont_time = IS_CONT_GROUP(group) ? TRUE : FALSE;
1438
1439 if (is_cont_time) {
1440 timestamp = mach_continuous_time();
1441 }
1442 else {
1443 timestamp = mach_absolute_time();
1444 }
316670eb
A
1445
1446 call = TC(queue_first(&group->delayed_queue));
1447
1448 while (!queue_end(&group->delayed_queue, qe(call))) {
39037602
A
1449 assert((!is_cont_time) || (call->tc_flags & THREAD_CALL_CONTINUOUS));
1450
39236c6e 1451 if (call->tc_soft_deadline <= timestamp) {
fe8ab488 1452 if ((call->tc_flags & THREAD_CALL_RATELIMITED) &&
39236c6e
A
1453 (CE(call)->deadline > timestamp) &&
1454 (ml_timer_forced_evaluation() == FALSE)) {
1455 break;
1456 }
c910b4d9 1457 _pending_call_enqueue(call, group);
39236c6e 1458 } /* TODO, identify differentially coalesced timers */
1c79356b
A
1459 else
1460 break;
316670eb 1461
c910b4d9 1462 call = TC(queue_first(&group->delayed_queue));
316670eb 1463 }
1c79356b 1464
39037602 1465 if (!queue_end(&group->delayed_queue, qe(call))) {
c910b4d9 1466 _set_delayed_call_timer(call, group);
39037602 1467 }
1c79356b 1468
316670eb
A
1469 thread_call_unlock();
1470}
1471
39236c6e 1472static void
39037602 1473thread_call_delayed_timer_rescan(thread_call_group_t group)
39236c6e
A
1474{
1475 thread_call_t call;
39236c6e
A
1476 uint64_t timestamp;
1477 boolean_t istate;
1478
1479 istate = ml_set_interrupts_enabled(FALSE);
1480 thread_call_lock_spin();
1481
1482 assert(ml_timer_forced_evaluation() == TRUE);
39037602
A
1483
1484 if (IS_CONT_GROUP(group)) {
1485 timestamp = mach_continuous_time();
1486 } else {
1487 timestamp = mach_absolute_time();
1488 }
39236c6e
A
1489
1490 call = TC(queue_first(&group->delayed_queue));
1491
1492 while (!queue_end(&group->delayed_queue, qe(call))) {
1493 if (call->tc_soft_deadline <= timestamp) {
1494 _pending_call_enqueue(call, group);
1495 call = TC(queue_first(&group->delayed_queue));
1496 }
1497 else {
1498 uint64_t skew = call->tc_call.deadline - call->tc_soft_deadline;
1499 assert (call->tc_call.deadline >= call->tc_soft_deadline);
1500 /* On a latency quality-of-service level change,
1501 * re-sort potentially rate-limited callout. The platform
1502 * layer determines which timers require this.
1503 */
1504 if (timer_resort_threshold(skew)) {
1505 _call_dequeue(call, group);
1506 _delayed_call_enqueue(call, group, call->tc_soft_deadline);
1507 }
1508 call = TC(queue_next(qe(call)));
1509 }
1510 }
1511
1512 if (!queue_empty(&group->delayed_queue))
1513 _set_delayed_call_timer(TC(queue_first(&group->delayed_queue)), group);
1514 thread_call_unlock();
1515 ml_set_interrupts_enabled(istate);
1516}
1517
1518void
1519thread_call_delayed_timer_rescan_all(void) {
39037602
A
1520 int i;
1521 for(i = 0; i < THREAD_CALL_GROUP_COUNT; i++) {
1522 thread_call_delayed_timer_rescan(&thread_call_groups[i]);
1523 }
39236c6e
A
1524}
1525
316670eb
A
1526/*
1527 * Timer callback to tell a thread to terminate if
1528 * we have an excess of threads and at least one has been
1529 * idle for a long time.
1530 */
1531static void
1532thread_call_dealloc_timer(
1533 timer_call_param_t p0,
1534 __unused timer_call_param_t p1)
1535{
1536 thread_call_group_t group = (thread_call_group_t)p0;
1537 uint64_t now;
1538 kern_return_t res;
1539 boolean_t terminated = FALSE;
1540
1541 thread_call_lock_spin();
1542
1543 now = mach_absolute_time();
1544 if (group->idle_count > 0) {
1545 if (now > group->idle_timestamp + thread_call_dealloc_interval_abs) {
1546 terminated = TRUE;
1547 group->idle_count--;
3e170ce0
A
1548 res = waitq_wakeup64_one(&group->idle_waitq, NO_EVENT64,
1549 THREAD_INTERRUPTED, WAITQ_ALL_PRIORITIES);
316670eb
A
1550 if (res != KERN_SUCCESS) {
1551 panic("Unable to wake up idle thread for termination?");
1552 }
1553 }
1554
1555 }
1556
1557 /*
1558 * If we still have an excess of threads, schedule another
1559 * invocation of this function.
1560 */
1561 if (group->idle_count > 0 && (group->idle_count + group->active_count > group->target_thread_count)) {
1562 /*
1563 * If we killed someone just now, push out the
1564 * next deadline.
1565 */
1566 if (terminated) {
1567 group->idle_timestamp = now;
1568 }
1c79356b 1569
316670eb
A
1570 thread_call_start_deallocate_timer(group);
1571 } else {
1572 group->flags &= ~TCG_DEALLOC_ACTIVE;
1573 }
1574
1575 thread_call_unlock();
1c79356b 1576}
316670eb
A
1577
1578/*
1579 * Wait for all requested invocations of a thread call prior to now
1580 * to finish. Can only be invoked on thread calls whose storage we manage.
1581 * Just waits for the finish count to catch up to the submit count we find
1582 * at the beginning of our wait.
1583 */
1584static void
1585thread_call_wait_locked(thread_call_t call)
1586{
1587 uint64_t submit_count;
1588 wait_result_t res;
1589
1590 assert(call->tc_flags & THREAD_CALL_ALLOC);
1591
1592 submit_count = call->tc_submit_count;
1593
1594 while (call->tc_finish_count < submit_count) {
1595 call->tc_flags |= THREAD_CALL_WAIT;
1596
1597 res = assert_wait(call, THREAD_UNINT);
1598 if (res != THREAD_WAITING) {
1599 panic("Unable to assert wait?");
1600 }
1601
1602 thread_call_unlock();
1603 (void) spllo();
1604
1605 res = thread_block(NULL);
1606 if (res != THREAD_AWAKENED) {
1607 panic("Awoken with %d?", res);
1608 }
1609
1610 (void) splsched();
1611 thread_call_lock_spin();
1612 }
1613}
1614
1615/*
1616 * Determine whether a thread call is either on a queue or
1617 * currently being executed.
1618 */
1619boolean_t
1620thread_call_isactive(thread_call_t call)
1621{
1622 boolean_t active;
fe8ab488 1623 spl_t s;
316670eb 1624
fe8ab488 1625 s = disable_ints_and_lock();
316670eb 1626 active = (call->tc_submit_count > call->tc_finish_count);
fe8ab488 1627 enable_ints_and_unlock(s);
316670eb
A
1628
1629 return active;
1630}
39037602
A
1631
1632/*
1633 * adjust_cont_time_thread_calls
1634 * on wake, reenqueue delayed call timer for continuous time thread call groups
1635 */
1636void
1637adjust_cont_time_thread_calls(void)
1638{
1639 thread_call_group_t group;
1640
1641 spl_t s;
1642 int i;
1643 s = disable_ints_and_lock();
1644
1645 for (i = 0; i < THREAD_CALL_CONTTIME_COUNT; i++) {
1646 // only the continuous thread call groups
1647 group = &conttime_thread_call_groups[i];
1648 assert(IS_CONT_GROUP(group));
1649
1650 if (!queue_empty(&group->delayed_queue)) {
1651 _set_delayed_call_timer(TC(queue_first(&group->delayed_queue)), group);
1652 }
1653 }
1654
1655 enable_ints_and_unlock(s);
1656}