]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/thread_call.c
xnu-1504.9.17.tar.gz
[apple/xnu.git] / osfmk / kern / thread_call.c
CommitLineData
1c79356b 1/*
c910b4d9 2 * Copyright (c) 1993-1995, 1999-2008 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b 27 */
1c79356b
A
28
29#include <mach/mach_types.h>
91447636 30#include <mach/thread_act.h>
1c79356b 31
91447636 32#include <kern/kern_types.h>
c910b4d9 33#include <kern/zalloc.h>
1c79356b
A
34#include <kern/sched_prim.h>
35#include <kern/clock.h>
36#include <kern/task.h>
37#include <kern/thread.h>
91447636
A
38#include <kern/wait_queue.h>
39
40#include <vm/vm_pageout.h>
1c79356b
A
41
42#include <kern/thread_call.h>
43#include <kern/call_entry.h>
44
45#include <kern/timer_call.h>
46
55e303ae
A
47#include <sys/kdebug.h>
48
c910b4d9 49decl_simple_lock_data(static,thread_call_lock)
1c79356b 50
c910b4d9 51static zone_t thread_call_zone;
1c79356b 52
c910b4d9
A
53struct thread_call_group {
54 queue_head_t pending_queue;
55 uint32_t pending_count;
1c79356b 56
c910b4d9 57 queue_head_t delayed_queue;
1c79356b 58
c910b4d9 59 timer_call_data_t delayed_timer;
1c79356b 60
c910b4d9 61 struct wait_queue idle_wqueue;
b7266188 62 struct wait_queue daemon_wqueue;
c910b4d9
A
63 uint32_t idle_count, active_count;
64};
1c79356b 65
c910b4d9 66typedef struct thread_call_group *thread_call_group_t;
1c79356b 67
c910b4d9 68static struct thread_call_group thread_call_group0;
1c79356b 69
c910b4d9 70static boolean_t thread_call_daemon_awake;
1c79356b 71
c910b4d9 72#define thread_call_thread_min 4
1c79356b 73
c910b4d9 74#define internal_call_count 768
1c79356b 75
c910b4d9
A
76static thread_call_data_t internal_call_storage[internal_call_count];
77static queue_head_t thread_call_internal_queue;
1c79356b 78
c910b4d9 79static __inline__ thread_call_t _internal_call_allocate(void);
1c79356b 80
c910b4d9
A
81static __inline__ void _internal_call_release(
82 thread_call_t call);
1c79356b 83
c910b4d9
A
84static __inline__ boolean_t _pending_call_enqueue(
85 thread_call_t call,
86 thread_call_group_t group),
87 _delayed_call_enqueue(
88 thread_call_t call,
89 thread_call_group_t group,
90 uint64_t deadline),
91 _call_dequeue(
92 thread_call_t call,
93 thread_call_group_t group);
94
95static __inline__ void thread_call_wake(
96 thread_call_group_t group);
97
98static __inline__ void _set_delayed_call_timer(
99 thread_call_t call,
100 thread_call_group_t group);
101
102static boolean_t _remove_from_pending_queue(
103 thread_call_func_t func,
104 thread_call_param_t param0,
105 boolean_t remove_all),
106 _remove_from_delayed_queue(
107 thread_call_func_t func,
108 thread_call_param_t param0,
109 boolean_t remove_all);
110
111static void thread_call_daemon(
112 thread_call_group_t group),
113 thread_call_thread(
114 thread_call_group_t group);
115
116static void thread_call_delayed_timer(
117 timer_call_param_t p0,
118 timer_call_param_t p1);
1c79356b
A
119
120#define qe(x) ((queue_entry_t)(x))
121#define TC(x) ((thread_call_t)(x))
122
123/*
c910b4d9 124 * thread_call_initialize:
1c79356b 125 *
c910b4d9
A
126 * Initialize this module, called
127 * early during system initialization.
1c79356b 128 */
1c79356b
A
129void
130thread_call_initialize(void)
131{
c910b4d9
A
132 thread_call_t call;
133 thread_call_group_t group = &thread_call_group0;
134 kern_return_t result;
135 thread_t thread;
136 int i;
137 spl_t s;
138
139 i = sizeof (thread_call_data_t);
140 thread_call_zone = zinit(i, 4096 * i, 16 * i, "thread_call");
0b4c1975 141 zone_change(thread_call_zone, Z_NOENCRYPT, TRUE);
1c79356b 142
91447636 143 simple_lock_init(&thread_call_lock, 0);
1c79356b
A
144
145 s = splsched();
146 simple_lock(&thread_call_lock);
147
c910b4d9
A
148 queue_init(&group->pending_queue);
149 queue_init(&group->delayed_queue);
150
151 timer_call_setup(&group->delayed_timer, thread_call_delayed_timer, group);
1c79356b 152
c910b4d9 153 wait_queue_init(&group->idle_wqueue, SYNC_POLICY_FIFO);
b7266188 154 wait_queue_init(&group->daemon_wqueue, SYNC_POLICY_FIFO);
c910b4d9
A
155
156 queue_init(&thread_call_internal_queue);
1c79356b
A
157 for (
158 call = internal_call_storage;
c910b4d9 159 call < &internal_call_storage[internal_call_count];
1c79356b
A
160 call++) {
161
c910b4d9 162 enqueue_tail(&thread_call_internal_queue, qe(call));
1c79356b
A
163 }
164
c910b4d9 165 thread_call_daemon_awake = TRUE;
1c79356b
A
166
167 simple_unlock(&thread_call_lock);
168 splx(s);
169
c910b4d9 170 result = kernel_thread_start_priority((thread_continue_t)thread_call_daemon, group, BASEPRI_PREEMPT + 1, &thread);
91447636
A
171 if (result != KERN_SUCCESS)
172 panic("thread_call_initialize");
173
174 thread_deallocate(thread);
1c79356b
A
175}
176
177void
178thread_call_setup(
179 thread_call_t call,
180 thread_call_func_t func,
c910b4d9 181 thread_call_param_t param0)
1c79356b
A
182{
183 call_entry_setup(call, func, param0);
184}
185
186/*
c910b4d9 187 * _internal_call_allocate:
1c79356b 188 *
c910b4d9 189 * Allocate an internal callout entry.
1c79356b 190 *
c910b4d9 191 * Called with thread_call_lock held.
1c79356b 192 */
1c79356b
A
193static __inline__ thread_call_t
194_internal_call_allocate(void)
195{
196 thread_call_t call;
197
c910b4d9 198 if (queue_empty(&thread_call_internal_queue))
1c79356b
A
199 panic("_internal_call_allocate");
200
c910b4d9 201 call = TC(dequeue_head(&thread_call_internal_queue));
1c79356b
A
202
203 return (call);
204}
205
206/*
c910b4d9 207 * _internal_call_release:
1c79356b 208 *
c910b4d9
A
209 * Release an internal callout entry which
210 * is no longer pending (or delayed).
1c79356b 211 *
c910b4d9 212 * Called with thread_call_lock held.
1c79356b 213 */
c910b4d9 214static __inline__ void
1c79356b 215_internal_call_release(
c910b4d9 216 thread_call_t call)
1c79356b
A
217{
218 if ( call >= internal_call_storage &&
c910b4d9
A
219 call < &internal_call_storage[internal_call_count] )
220 enqueue_head(&thread_call_internal_queue, qe(call));
1c79356b
A
221}
222
223/*
c910b4d9 224 * _pending_call_enqueue:
1c79356b 225 *
c910b4d9
A
226 * Place an entry at the end of the
227 * pending queue, to be executed soon.
1c79356b 228 *
c910b4d9
A
229 * Returns TRUE if the entry was already
230 * on a queue.
1c79356b 231 *
c910b4d9 232 * Called with thread_call_lock held.
1c79356b 233 */
c910b4d9 234static __inline__ boolean_t
1c79356b 235_pending_call_enqueue(
c910b4d9
A
236 thread_call_t call,
237 thread_call_group_t group)
1c79356b 238{
c910b4d9 239 queue_t old_queue;
1c79356b 240
c910b4d9 241 old_queue = call_entry_enqueue_tail(call, &group->pending_queue);
1c79356b 242
c910b4d9 243 group->pending_count++;
1c79356b 244
c910b4d9 245 return (old_queue != NULL);
1c79356b
A
246}
247
248/*
c910b4d9 249 * _delayed_call_enqueue:
1c79356b 250 *
c910b4d9
A
251 * Place an entry on the delayed queue,
252 * after existing entries with an earlier
253 * (or identical) deadline.
1c79356b 254 *
c910b4d9
A
255 * Returns TRUE if the entry was already
256 * on a queue.
1c79356b 257 *
c910b4d9 258 * Called with thread_call_lock held.
1c79356b 259 */
c910b4d9 260static __inline__ boolean_t
1c79356b 261_delayed_call_enqueue(
c910b4d9
A
262 thread_call_t call,
263 thread_call_group_t group,
264 uint64_t deadline)
1c79356b 265{
c910b4d9 266 queue_t old_queue;
1c79356b 267
c910b4d9
A
268 old_queue = call_entry_enqueue_deadline(call, &group->delayed_queue, deadline);
269
270 if (old_queue == &group->pending_queue)
271 group->pending_count--;
272
273 return (old_queue != NULL);
1c79356b
A
274}
275
276/*
c910b4d9 277 * _call_dequeue:
1c79356b 278 *
c910b4d9 279 * Remove an entry from a queue.
1c79356b 280 *
c910b4d9 281 * Returns TRUE if the entry was on a queue.
1c79356b 282 *
c910b4d9 283 * Called with thread_call_lock held.
1c79356b 284 */
c910b4d9
A
285static __inline__ boolean_t
286_call_dequeue(
287 thread_call_t call,
288 thread_call_group_t group)
1c79356b 289{
c910b4d9
A
290 queue_t old_queue;
291
292 old_queue = call_entry_dequeue(call);
293
294 if (old_queue == &group->pending_queue)
295 group->pending_count--;
296
297 return (old_queue != NULL);
1c79356b
A
298}
299
300/*
c910b4d9 301 * _set_delayed_call_timer:
1c79356b 302 *
c910b4d9
A
303 * Reset the timer so that it
304 * next expires when the entry is due.
1c79356b 305 *
c910b4d9 306 * Called with thread_call_lock held.
1c79356b 307 */
1c79356b
A
308static __inline__ void
309_set_delayed_call_timer(
c910b4d9
A
310 thread_call_t call,
311 thread_call_group_t group)
1c79356b 312{
c910b4d9 313 timer_call_enter(&group->delayed_timer, call->deadline);
1c79356b
A
314}
315
316/*
c910b4d9 317 * _remove_from_pending_queue:
1c79356b 318 *
c910b4d9
A
319 * Remove the first (or all) matching
320 * entries from the pending queue.
1c79356b 321 *
c910b4d9
A
322 * Returns TRUE if any matching entries
323 * were found.
1c79356b 324 *
c910b4d9 325 * Called with thread_call_lock held.
1c79356b 326 */
c910b4d9 327static boolean_t
1c79356b
A
328_remove_from_pending_queue(
329 thread_call_func_t func,
330 thread_call_param_t param0,
c910b4d9 331 boolean_t remove_all)
1c79356b 332{
c910b4d9
A
333 boolean_t call_removed = FALSE;
334 thread_call_t call;
335 thread_call_group_t group = &thread_call_group0;
1c79356b 336
c910b4d9 337 call = TC(queue_first(&group->pending_queue));
1c79356b 338
c910b4d9 339 while (!queue_end(&group->pending_queue, qe(call))) {
1c79356b
A
340 if ( call->func == func &&
341 call->param0 == param0 ) {
342 thread_call_t next = TC(queue_next(qe(call)));
343
c910b4d9 344 _call_dequeue(call, group);
1c79356b
A
345
346 _internal_call_release(call);
347
348 call_removed = TRUE;
349 if (!remove_all)
350 break;
351
352 call = next;
353 }
354 else
355 call = TC(queue_next(qe(call)));
356 }
357
358 return (call_removed);
359}
360
361/*
c910b4d9 362 * _remove_from_delayed_queue:
1c79356b 363 *
c910b4d9
A
364 * Remove the first (or all) matching
365 * entries from the delayed queue.
1c79356b 366 *
c910b4d9
A
367 * Returns TRUE if any matching entries
368 * were found.
1c79356b 369 *
c910b4d9 370 * Called with thread_call_lock held.
1c79356b 371 */
c910b4d9 372static boolean_t
1c79356b
A
373_remove_from_delayed_queue(
374 thread_call_func_t func,
375 thread_call_param_t param0,
c910b4d9 376 boolean_t remove_all)
1c79356b 377{
c910b4d9
A
378 boolean_t call_removed = FALSE;
379 thread_call_t call;
380 thread_call_group_t group = &thread_call_group0;
1c79356b 381
c910b4d9 382 call = TC(queue_first(&group->delayed_queue));
1c79356b 383
c910b4d9 384 while (!queue_end(&group->delayed_queue, qe(call))) {
1c79356b
A
385 if ( call->func == func &&
386 call->param0 == param0 ) {
387 thread_call_t next = TC(queue_next(qe(call)));
388
c910b4d9 389 _call_dequeue(call, group);
1c79356b
A
390
391 _internal_call_release(call);
392
393 call_removed = TRUE;
394 if (!remove_all)
395 break;
396
397 call = next;
398 }
399 else
400 call = TC(queue_next(qe(call)));
401 }
402
403 return (call_removed);
404}
405
b0d623f7
A
406#ifndef __LP64__
407
1c79356b 408/*
c910b4d9 409 * thread_call_func:
1c79356b 410 *
c910b4d9 411 * Enqueue a function callout.
1c79356b 412 *
c910b4d9
A
413 * Guarantees { function, argument }
414 * uniqueness if unique_call is TRUE.
1c79356b 415 */
1c79356b
A
416void
417thread_call_func(
418 thread_call_func_t func,
419 thread_call_param_t param,
c910b4d9 420 boolean_t unique_call)
1c79356b 421{
c910b4d9
A
422 thread_call_t call;
423 thread_call_group_t group = &thread_call_group0;
424 spl_t s;
1c79356b 425
1c79356b
A
426 s = splsched();
427 simple_lock(&thread_call_lock);
428
c910b4d9 429 call = TC(queue_first(&group->pending_queue));
1c79356b 430
c910b4d9 431 while (unique_call && !queue_end(&group->pending_queue, qe(call))) {
1c79356b
A
432 if ( call->func == func &&
433 call->param0 == param ) {
434 break;
435 }
436
437 call = TC(queue_next(qe(call)));
438 }
439
c910b4d9 440 if (!unique_call || queue_end(&group->pending_queue, qe(call))) {
1c79356b
A
441 call = _internal_call_allocate();
442 call->func = func;
443 call->param0 = param;
2d21ac55 444 call->param1 = NULL;
1c79356b 445
c910b4d9 446 _pending_call_enqueue(call, group);
1c79356b 447
c910b4d9
A
448 if (group->active_count == 0)
449 thread_call_wake(group);
1c79356b
A
450 }
451
452 simple_unlock(&thread_call_lock);
453 splx(s);
454}
455
b0d623f7
A
456#endif /* __LP64__ */
457
1c79356b 458/*
c910b4d9 459 * thread_call_func_delayed:
1c79356b 460 *
c910b4d9
A
461 * Enqueue a function callout to
462 * occur at the stated time.
1c79356b 463 */
1c79356b
A
464void
465thread_call_func_delayed(
466 thread_call_func_t func,
467 thread_call_param_t param,
c910b4d9 468 uint64_t deadline)
1c79356b 469{
c910b4d9
A
470 thread_call_t call;
471 thread_call_group_t group = &thread_call_group0;
472 spl_t s;
1c79356b 473
1c79356b
A
474 s = splsched();
475 simple_lock(&thread_call_lock);
476
477 call = _internal_call_allocate();
478 call->func = func;
479 call->param0 = param;
480 call->param1 = 0;
1c79356b 481
c910b4d9 482 _delayed_call_enqueue(call, group, deadline);
1c79356b 483
c910b4d9
A
484 if (queue_first(&group->delayed_queue) == qe(call))
485 _set_delayed_call_timer(call, group);
1c79356b
A
486
487 simple_unlock(&thread_call_lock);
488 splx(s);
489}
490
491/*
c910b4d9 492 * thread_call_func_cancel:
1c79356b 493 *
c910b4d9 494 * Dequeue a function callout.
1c79356b 495 *
c910b4d9
A
496 * Removes one (or all) { function, argument }
497 * instance(s) from either (or both)
498 * the pending and the delayed queue,
499 * in that order.
1c79356b 500 *
c910b4d9 501 * Returns TRUE if any calls were cancelled.
1c79356b 502 */
1c79356b
A
503boolean_t
504thread_call_func_cancel(
505 thread_call_func_t func,
506 thread_call_param_t param,
c910b4d9 507 boolean_t cancel_all)
1c79356b
A
508{
509 boolean_t result;
55e303ae 510 spl_t s;
1c79356b
A
511
512 s = splsched();
513 simple_lock(&thread_call_lock);
514
515 if (cancel_all)
516 result = _remove_from_pending_queue(func, param, cancel_all) |
517 _remove_from_delayed_queue(func, param, cancel_all);
518 else
519 result = _remove_from_pending_queue(func, param, cancel_all) ||
520 _remove_from_delayed_queue(func, param, cancel_all);
521
522 simple_unlock(&thread_call_lock);
523 splx(s);
524
525 return (result);
526}
527
528/*
c910b4d9 529 * thread_call_allocate:
1c79356b 530 *
c910b4d9 531 * Allocate a callout entry.
1c79356b 532 */
1c79356b
A
533thread_call_t
534thread_call_allocate(
535 thread_call_func_t func,
c910b4d9 536 thread_call_param_t param0)
1c79356b 537{
c910b4d9
A
538 thread_call_t call = zalloc(thread_call_zone);
539
540 call_entry_setup(call, func, param0);
541
1c79356b
A
542 return (call);
543}
544
545/*
c910b4d9 546 * thread_call_free:
1c79356b 547 *
c910b4d9 548 * Free a callout entry.
1c79356b 549 */
1c79356b
A
550boolean_t
551thread_call_free(
c910b4d9 552 thread_call_t call)
1c79356b 553{
55e303ae 554 spl_t s;
1c79356b
A
555
556 s = splsched();
557 simple_lock(&thread_call_lock);
558
c910b4d9 559 if (call->queue != NULL) {
1c79356b
A
560 simple_unlock(&thread_call_lock);
561 splx(s);
562
563 return (FALSE);
564 }
565
566 simple_unlock(&thread_call_lock);
567 splx(s);
568
c910b4d9 569 zfree(thread_call_zone, call);
1c79356b
A
570
571 return (TRUE);
572}
573
574/*
c910b4d9 575 * thread_call_enter:
1c79356b 576 *
c910b4d9 577 * Enqueue a callout entry to occur "soon".
1c79356b 578 *
c910b4d9
A
579 * Returns TRUE if the call was
580 * already on a queue.
1c79356b 581 */
1c79356b
A
582boolean_t
583thread_call_enter(
c910b4d9 584 thread_call_t call)
1c79356b 585{
c910b4d9
A
586 boolean_t result = TRUE;
587 thread_call_group_t group = &thread_call_group0;
588 spl_t s;
1c79356b
A
589
590 s = splsched();
591 simple_lock(&thread_call_lock);
592
c910b4d9
A
593 if (call->queue != &group->pending_queue) {
594 result = _pending_call_enqueue(call, group);
9bccf70c 595
c910b4d9
A
596 if (group->active_count == 0)
597 thread_call_wake(group);
1c79356b
A
598 }
599
600 call->param1 = 0;
601
602 simple_unlock(&thread_call_lock);
603 splx(s);
604
605 return (result);
606}
607
608boolean_t
609thread_call_enter1(
610 thread_call_t call,
c910b4d9 611 thread_call_param_t param1)
1c79356b 612{
c910b4d9
A
613 boolean_t result = TRUE;
614 thread_call_group_t group = &thread_call_group0;
615 spl_t s;
1c79356b
A
616
617 s = splsched();
618 simple_lock(&thread_call_lock);
619
c910b4d9
A
620 if (call->queue != &group->pending_queue) {
621 result = _pending_call_enqueue(call, group);
622
623 if (group->active_count == 0)
624 thread_call_wake(group);
625 }
1c79356b
A
626
627 call->param1 = param1;
628
629 simple_unlock(&thread_call_lock);
630 splx(s);
631
632 return (result);
633}
634
635/*
c910b4d9 636 * thread_call_enter_delayed:
1c79356b 637 *
c910b4d9
A
638 * Enqueue a callout entry to occur
639 * at the stated time.
1c79356b 640 *
c910b4d9
A
641 * Returns TRUE if the call was
642 * already on a queue.
1c79356b 643 */
1c79356b
A
644boolean_t
645thread_call_enter_delayed(
646 thread_call_t call,
c910b4d9 647 uint64_t deadline)
1c79356b 648{
c910b4d9
A
649 boolean_t result = TRUE;
650 thread_call_group_t group = &thread_call_group0;
651 spl_t s;
1c79356b
A
652
653 s = splsched();
654 simple_lock(&thread_call_lock);
655
c910b4d9 656 result = _delayed_call_enqueue(call, group, deadline);
1c79356b 657
c910b4d9
A
658 if (queue_first(&group->delayed_queue) == qe(call))
659 _set_delayed_call_timer(call, group);
1c79356b 660
c910b4d9 661 call->param1 = 0;
1c79356b
A
662
663 simple_unlock(&thread_call_lock);
664 splx(s);
665
666 return (result);
667}
668
669boolean_t
670thread_call_enter1_delayed(
671 thread_call_t call,
672 thread_call_param_t param1,
c910b4d9 673 uint64_t deadline)
1c79356b 674{
c910b4d9
A
675 boolean_t result = TRUE;
676 thread_call_group_t group = &thread_call_group0;
677 spl_t s;
1c79356b
A
678
679 s = splsched();
680 simple_lock(&thread_call_lock);
681
c910b4d9 682 result = _delayed_call_enqueue(call, group, deadline);
1c79356b 683
c910b4d9
A
684 if (queue_first(&group->delayed_queue) == qe(call))
685 _set_delayed_call_timer(call, group);
1c79356b 686
c910b4d9 687 call->param1 = param1;
1c79356b
A
688
689 simple_unlock(&thread_call_lock);
690 splx(s);
691
692 return (result);
693}
694
695/*
c910b4d9 696 * thread_call_cancel:
1c79356b 697 *
c910b4d9 698 * Dequeue a callout entry.
1c79356b 699 *
c910b4d9
A
700 * Returns TRUE if the call was
701 * on a queue.
1c79356b 702 */
1c79356b
A
703boolean_t
704thread_call_cancel(
c910b4d9 705 thread_call_t call)
1c79356b 706{
c910b4d9
A
707 boolean_t result;
708 thread_call_group_t group = &thread_call_group0;
709 spl_t s;
1c79356b
A
710
711 s = splsched();
712 simple_lock(&thread_call_lock);
c910b4d9
A
713
714 result = _call_dequeue(call, group);
1c79356b
A
715
716 simple_unlock(&thread_call_lock);
717 splx(s);
718
719 return (result);
720}
721
b0d623f7
A
722#ifndef __LP64__
723
1c79356b 724/*
c910b4d9 725 * thread_call_is_delayed:
1c79356b 726 *
c910b4d9
A
727 * Returns TRUE if the call is
728 * currently on a delayed queue.
1c79356b 729 *
c910b4d9 730 * Optionally returns the expiration time.
1c79356b 731 */
1c79356b
A
732boolean_t
733thread_call_is_delayed(
734 thread_call_t call,
0b4e3aa0 735 uint64_t *deadline)
1c79356b 736{
c910b4d9
A
737 boolean_t result = FALSE;
738 thread_call_group_t group = &thread_call_group0;
739 spl_t s;
1c79356b
A
740
741 s = splsched();
742 simple_lock(&thread_call_lock);
743
c910b4d9 744 if (call->queue == &group->delayed_queue) {
1c79356b
A
745 if (deadline != NULL)
746 *deadline = call->deadline;
747 result = TRUE;
748 }
749
750 simple_unlock(&thread_call_lock);
751 splx(s);
752
753 return (result);
754}
755
b0d623f7
A
756#endif /* __LP64__ */
757
1c79356b 758/*
c910b4d9 759 * thread_call_wake:
1c79356b 760 *
c910b4d9
A
761 * Wake a call thread to service
762 * pending call entries. May wake
763 * the daemon thread in order to
764 * create additional call threads.
1c79356b 765 *
c910b4d9 766 * Called with thread_call_lock held.
1c79356b 767 */
c910b4d9
A
768static __inline__ void
769thread_call_wake(
770 thread_call_group_t group)
1c79356b 771{
c910b4d9
A
772 if (group->idle_count > 0 && wait_queue_wakeup_one(&group->idle_wqueue, NULL, THREAD_AWAKENED) == KERN_SUCCESS) {
773 group->idle_count--; group->active_count++;
1c79356b
A
774 }
775 else
c910b4d9
A
776 if (!thread_call_daemon_awake) {
777 thread_call_daemon_awake = TRUE;
b7266188 778 wait_queue_wakeup_one(&group->daemon_wqueue, NULL, THREAD_AWAKENED);
1c79356b
A
779 }
780}
781
9bccf70c 782/*
2d21ac55 783 * sched_call_thread:
9bccf70c 784 *
2d21ac55 785 * Call out invoked by the scheduler.
9bccf70c 786 */
2d21ac55
A
787static void
788sched_call_thread(
c910b4d9
A
789 int type,
790__unused thread_t thread)
9bccf70c 791{
c910b4d9
A
792 thread_call_group_t group = &thread_call_group0;
793
9bccf70c
A
794 simple_lock(&thread_call_lock);
795
2d21ac55 796 switch (type) {
9bccf70c 797
2d21ac55 798 case SCHED_CALL_BLOCK:
c910b4d9
A
799 if (--group->active_count == 0 && group->pending_count > 0)
800 thread_call_wake(group);
2d21ac55 801 break;
9bccf70c 802
2d21ac55 803 case SCHED_CALL_UNBLOCK:
c910b4d9 804 group->active_count++;
2d21ac55
A
805 break;
806 }
9bccf70c
A
807
808 simple_unlock(&thread_call_lock);
809}
1c79356b
A
810
811/*
c910b4d9 812 * thread_call_thread:
1c79356b 813 */
c910b4d9
A
814static void
815thread_call_thread(
816 thread_call_group_t group)
1c79356b
A
817{
818 thread_t self = current_thread();
819
1c79356b
A
820 (void) splsched();
821 simple_lock(&thread_call_lock);
822
2d21ac55 823 thread_sched_call(self, sched_call_thread);
9bccf70c 824
c910b4d9 825 while (group->pending_count > 0) {
1c79356b
A
826 thread_call_t call;
827 thread_call_func_t func;
828 thread_call_param_t param0, param1;
829
c910b4d9
A
830 call = TC(dequeue_head(&group->pending_queue));
831 group->pending_count--;
1c79356b
A
832
833 func = call->func;
834 param0 = call->param0;
835 param1 = call->param1;
836
c910b4d9 837 call->queue = NULL;
1c79356b
A
838
839 _internal_call_release(call);
840
1c79356b
A
841 simple_unlock(&thread_call_lock);
842 (void) spllo();
843
55e303ae
A
844 KERNEL_DEBUG_CONSTANT(
845 MACHDBG_CODE(DBG_MACH_SCHED,MACH_CALLOUT) | DBG_FUNC_NONE,
b0d623f7 846 func, param0, param1, 0, 0);
55e303ae 847
1c79356b
A
848 (*func)(param0, param1);
849
c910b4d9 850 (void)thread_funnel_set(self->funnel_lock, FALSE); /* XXX */
1c79356b
A
851
852 (void) splsched();
853 simple_lock(&thread_call_lock);
1c79356b 854 }
9bccf70c 855
2d21ac55 856 thread_sched_call(self, NULL);
c910b4d9 857 group->active_count--;
9bccf70c 858
c910b4d9
A
859 if (group->idle_count < thread_call_thread_min) {
860 group->idle_count++;
1c79356b 861
c910b4d9 862 wait_queue_assert_wait(&group->idle_wqueue, NULL, THREAD_UNINT, 0);
1c79356b
A
863
864 simple_unlock(&thread_call_lock);
865 (void) spllo();
866
c910b4d9 867 thread_block_parameter((thread_continue_t)thread_call_thread, group);
1c79356b
A
868 /* NOTREACHED */
869 }
c910b4d9 870
1c79356b
A
871 simple_unlock(&thread_call_lock);
872 (void) spllo();
873
91447636 874 thread_terminate(self);
1c79356b
A
875 /* NOTREACHED */
876}
877
1c79356b 878/*
c910b4d9 879 * thread_call_daemon:
1c79356b 880 */
c910b4d9
A
881static void
882thread_call_daemon_continue(
883 thread_call_group_t group)
1c79356b 884{
91447636
A
885 kern_return_t result;
886 thread_t thread;
887
1c79356b
A
888 (void) splsched();
889 simple_lock(&thread_call_lock);
890
c910b4d9
A
891 while (group->active_count == 0 && group->pending_count > 0) {
892 group->active_count++;
1c79356b
A
893
894 simple_unlock(&thread_call_lock);
895 (void) spllo();
896
c910b4d9 897 result = kernel_thread_start_priority((thread_continue_t)thread_call_thread, group, BASEPRI_PREEMPT, &thread);
91447636 898 if (result != KERN_SUCCESS)
c910b4d9 899 panic("thread_call_daemon");
91447636
A
900
901 thread_deallocate(thread);
55e303ae 902
9bccf70c
A
903 (void) splsched();
904 simple_lock(&thread_call_lock);
1c79356b 905 }
c910b4d9 906
b7266188
A
907 thread_call_daemon_awake = FALSE;
908 wait_queue_assert_wait(&group->daemon_wqueue, NULL, THREAD_UNINT, 0);
1c79356b
A
909
910 simple_unlock(&thread_call_lock);
911 (void) spllo();
912
c910b4d9 913 thread_block_parameter((thread_continue_t)thread_call_daemon_continue, group);
1c79356b
A
914 /* NOTREACHED */
915}
916
c910b4d9
A
917static void
918thread_call_daemon(
919 thread_call_group_t group)
1c79356b 920{
55e303ae 921 thread_t self = current_thread();
1c79356b 922
91447636 923 self->options |= TH_OPT_VMPRIV;
1c79356b 924 vm_page_free_reserve(2); /* XXX */
1c79356b 925
c910b4d9 926 thread_call_daemon_continue(group);
1c79356b
A
927 /* NOTREACHED */
928}
929
c910b4d9
A
930static void
931thread_call_delayed_timer(
932 timer_call_param_t p0,
91447636 933 __unused timer_call_param_t p1
1c79356b
A
934)
935{
c910b4d9
A
936 thread_call_t call;
937 thread_call_group_t group = p0;
938 boolean_t new_pending = FALSE;
939 uint64_t timestamp;
1c79356b 940
1c79356b
A
941 simple_lock(&thread_call_lock);
942
c910b4d9 943 timestamp = mach_absolute_time();
1c79356b 944
c910b4d9 945 call = TC(queue_first(&group->delayed_queue));
1c79356b 946
c910b4d9 947 while (!queue_end(&group->delayed_queue, qe(call))) {
0b4e3aa0 948 if (call->deadline <= timestamp) {
c910b4d9 949 _pending_call_enqueue(call, group);
1c79356b
A
950 new_pending = TRUE;
951 }
952 else
953 break;
954
c910b4d9 955 call = TC(queue_first(&group->delayed_queue));
1c79356b
A
956 }
957
c910b4d9
A
958 if (!queue_end(&group->delayed_queue, qe(call)))
959 _set_delayed_call_timer(call, group);
1c79356b 960
c910b4d9
A
961 if (new_pending && group->active_count == 0)
962 thread_call_wake(group);
1c79356b
A
963
964 simple_unlock(&thread_call_lock);
1c79356b 965}