]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/thread_call.c
xnu-1456.1.26.tar.gz
[apple/xnu.git] / osfmk / kern / thread_call.c
CommitLineData
1c79356b 1/*
c910b4d9 2 * Copyright (c) 1993-1995, 1999-2008 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b 27 */
1c79356b
A
28
29#include <mach/mach_types.h>
91447636 30#include <mach/thread_act.h>
1c79356b 31
91447636 32#include <kern/kern_types.h>
c910b4d9 33#include <kern/zalloc.h>
1c79356b
A
34#include <kern/sched_prim.h>
35#include <kern/clock.h>
36#include <kern/task.h>
37#include <kern/thread.h>
91447636
A
38#include <kern/wait_queue.h>
39
40#include <vm/vm_pageout.h>
1c79356b
A
41
42#include <kern/thread_call.h>
43#include <kern/call_entry.h>
44
45#include <kern/timer_call.h>
46
55e303ae
A
47#include <sys/kdebug.h>
48
c910b4d9 49decl_simple_lock_data(static,thread_call_lock)
1c79356b 50
c910b4d9 51static zone_t thread_call_zone;
1c79356b 52
c910b4d9
A
53struct thread_call_group {
54 queue_head_t pending_queue;
55 uint32_t pending_count;
1c79356b 56
c910b4d9 57 queue_head_t delayed_queue;
1c79356b 58
c910b4d9 59 timer_call_data_t delayed_timer;
1c79356b 60
c910b4d9
A
61 struct wait_queue idle_wqueue;
62 uint32_t idle_count, active_count;
63};
1c79356b 64
c910b4d9 65typedef struct thread_call_group *thread_call_group_t;
1c79356b 66
c910b4d9 67static struct thread_call_group thread_call_group0;
1c79356b 68
c910b4d9 69static boolean_t thread_call_daemon_awake;
1c79356b 70
c910b4d9 71#define thread_call_thread_min 4
1c79356b 72
c910b4d9 73#define internal_call_count 768
1c79356b 74
c910b4d9
A
75static thread_call_data_t internal_call_storage[internal_call_count];
76static queue_head_t thread_call_internal_queue;
1c79356b 77
c910b4d9 78static __inline__ thread_call_t _internal_call_allocate(void);
1c79356b 79
c910b4d9
A
80static __inline__ void _internal_call_release(
81 thread_call_t call);
1c79356b 82
c910b4d9
A
83static __inline__ boolean_t _pending_call_enqueue(
84 thread_call_t call,
85 thread_call_group_t group),
86 _delayed_call_enqueue(
87 thread_call_t call,
88 thread_call_group_t group,
89 uint64_t deadline),
90 _call_dequeue(
91 thread_call_t call,
92 thread_call_group_t group);
93
94static __inline__ void thread_call_wake(
95 thread_call_group_t group);
96
97static __inline__ void _set_delayed_call_timer(
98 thread_call_t call,
99 thread_call_group_t group);
100
101static boolean_t _remove_from_pending_queue(
102 thread_call_func_t func,
103 thread_call_param_t param0,
104 boolean_t remove_all),
105 _remove_from_delayed_queue(
106 thread_call_func_t func,
107 thread_call_param_t param0,
108 boolean_t remove_all);
109
110static void thread_call_daemon(
111 thread_call_group_t group),
112 thread_call_thread(
113 thread_call_group_t group);
114
115static void thread_call_delayed_timer(
116 timer_call_param_t p0,
117 timer_call_param_t p1);
1c79356b
A
118
119#define qe(x) ((queue_entry_t)(x))
120#define TC(x) ((thread_call_t)(x))
121
122/*
c910b4d9 123 * thread_call_initialize:
1c79356b 124 *
c910b4d9
A
125 * Initialize this module, called
126 * early during system initialization.
1c79356b 127 */
1c79356b
A
128void
129thread_call_initialize(void)
130{
c910b4d9
A
131 thread_call_t call;
132 thread_call_group_t group = &thread_call_group0;
133 kern_return_t result;
134 thread_t thread;
135 int i;
136 spl_t s;
137
138 i = sizeof (thread_call_data_t);
139 thread_call_zone = zinit(i, 4096 * i, 16 * i, "thread_call");
1c79356b 140
91447636 141 simple_lock_init(&thread_call_lock, 0);
1c79356b
A
142
143 s = splsched();
144 simple_lock(&thread_call_lock);
145
c910b4d9
A
146 queue_init(&group->pending_queue);
147 queue_init(&group->delayed_queue);
148
149 timer_call_setup(&group->delayed_timer, thread_call_delayed_timer, group);
1c79356b 150
c910b4d9
A
151 wait_queue_init(&group->idle_wqueue, SYNC_POLICY_FIFO);
152
153 queue_init(&thread_call_internal_queue);
1c79356b
A
154 for (
155 call = internal_call_storage;
c910b4d9 156 call < &internal_call_storage[internal_call_count];
1c79356b
A
157 call++) {
158
c910b4d9 159 enqueue_tail(&thread_call_internal_queue, qe(call));
1c79356b
A
160 }
161
c910b4d9 162 thread_call_daemon_awake = TRUE;
1c79356b
A
163
164 simple_unlock(&thread_call_lock);
165 splx(s);
166
c910b4d9 167 result = kernel_thread_start_priority((thread_continue_t)thread_call_daemon, group, BASEPRI_PREEMPT + 1, &thread);
91447636
A
168 if (result != KERN_SUCCESS)
169 panic("thread_call_initialize");
170
171 thread_deallocate(thread);
1c79356b
A
172}
173
174void
175thread_call_setup(
176 thread_call_t call,
177 thread_call_func_t func,
c910b4d9 178 thread_call_param_t param0)
1c79356b
A
179{
180 call_entry_setup(call, func, param0);
181}
182
183/*
c910b4d9 184 * _internal_call_allocate:
1c79356b 185 *
c910b4d9 186 * Allocate an internal callout entry.
1c79356b 187 *
c910b4d9 188 * Called with thread_call_lock held.
1c79356b 189 */
1c79356b
A
190static __inline__ thread_call_t
191_internal_call_allocate(void)
192{
193 thread_call_t call;
194
c910b4d9 195 if (queue_empty(&thread_call_internal_queue))
1c79356b
A
196 panic("_internal_call_allocate");
197
c910b4d9 198 call = TC(dequeue_head(&thread_call_internal_queue));
1c79356b
A
199
200 return (call);
201}
202
203/*
c910b4d9 204 * _internal_call_release:
1c79356b 205 *
c910b4d9
A
206 * Release an internal callout entry which
207 * is no longer pending (or delayed).
1c79356b 208 *
c910b4d9 209 * Called with thread_call_lock held.
1c79356b 210 */
c910b4d9 211static __inline__ void
1c79356b 212_internal_call_release(
c910b4d9 213 thread_call_t call)
1c79356b
A
214{
215 if ( call >= internal_call_storage &&
c910b4d9
A
216 call < &internal_call_storage[internal_call_count] )
217 enqueue_head(&thread_call_internal_queue, qe(call));
1c79356b
A
218}
219
220/*
c910b4d9 221 * _pending_call_enqueue:
1c79356b 222 *
c910b4d9
A
223 * Place an entry at the end of the
224 * pending queue, to be executed soon.
1c79356b 225 *
c910b4d9
A
226 * Returns TRUE if the entry was already
227 * on a queue.
1c79356b 228 *
c910b4d9 229 * Called with thread_call_lock held.
1c79356b 230 */
c910b4d9 231static __inline__ boolean_t
1c79356b 232_pending_call_enqueue(
c910b4d9
A
233 thread_call_t call,
234 thread_call_group_t group)
1c79356b 235{
c910b4d9 236 queue_t old_queue;
1c79356b 237
c910b4d9 238 old_queue = call_entry_enqueue_tail(call, &group->pending_queue);
1c79356b 239
c910b4d9 240 group->pending_count++;
1c79356b 241
c910b4d9 242 return (old_queue != NULL);
1c79356b
A
243}
244
245/*
c910b4d9 246 * _delayed_call_enqueue:
1c79356b 247 *
c910b4d9
A
248 * Place an entry on the delayed queue,
249 * after existing entries with an earlier
250 * (or identical) deadline.
1c79356b 251 *
c910b4d9
A
252 * Returns TRUE if the entry was already
253 * on a queue.
1c79356b 254 *
c910b4d9 255 * Called with thread_call_lock held.
1c79356b 256 */
c910b4d9 257static __inline__ boolean_t
1c79356b 258_delayed_call_enqueue(
c910b4d9
A
259 thread_call_t call,
260 thread_call_group_t group,
261 uint64_t deadline)
1c79356b 262{
c910b4d9 263 queue_t old_queue;
1c79356b 264
c910b4d9
A
265 old_queue = call_entry_enqueue_deadline(call, &group->delayed_queue, deadline);
266
267 if (old_queue == &group->pending_queue)
268 group->pending_count--;
269
270 return (old_queue != NULL);
1c79356b
A
271}
272
273/*
c910b4d9 274 * _call_dequeue:
1c79356b 275 *
c910b4d9 276 * Remove an entry from a queue.
1c79356b 277 *
c910b4d9 278 * Returns TRUE if the entry was on a queue.
1c79356b 279 *
c910b4d9 280 * Called with thread_call_lock held.
1c79356b 281 */
c910b4d9
A
282static __inline__ boolean_t
283_call_dequeue(
284 thread_call_t call,
285 thread_call_group_t group)
1c79356b 286{
c910b4d9
A
287 queue_t old_queue;
288
289 old_queue = call_entry_dequeue(call);
290
291 if (old_queue == &group->pending_queue)
292 group->pending_count--;
293
294 return (old_queue != NULL);
1c79356b
A
295}
296
297/*
c910b4d9 298 * _set_delayed_call_timer:
1c79356b 299 *
c910b4d9
A
300 * Reset the timer so that it
301 * next expires when the entry is due.
1c79356b 302 *
c910b4d9 303 * Called with thread_call_lock held.
1c79356b 304 */
1c79356b
A
305static __inline__ void
306_set_delayed_call_timer(
c910b4d9
A
307 thread_call_t call,
308 thread_call_group_t group)
1c79356b 309{
c910b4d9 310 timer_call_enter(&group->delayed_timer, call->deadline);
1c79356b
A
311}
312
313/*
c910b4d9 314 * _remove_from_pending_queue:
1c79356b 315 *
c910b4d9
A
316 * Remove the first (or all) matching
317 * entries from the pending queue.
1c79356b 318 *
c910b4d9
A
319 * Returns TRUE if any matching entries
320 * were found.
1c79356b 321 *
c910b4d9 322 * Called with thread_call_lock held.
1c79356b 323 */
c910b4d9 324static boolean_t
1c79356b
A
325_remove_from_pending_queue(
326 thread_call_func_t func,
327 thread_call_param_t param0,
c910b4d9 328 boolean_t remove_all)
1c79356b 329{
c910b4d9
A
330 boolean_t call_removed = FALSE;
331 thread_call_t call;
332 thread_call_group_t group = &thread_call_group0;
1c79356b 333
c910b4d9 334 call = TC(queue_first(&group->pending_queue));
1c79356b 335
c910b4d9 336 while (!queue_end(&group->pending_queue, qe(call))) {
1c79356b
A
337 if ( call->func == func &&
338 call->param0 == param0 ) {
339 thread_call_t next = TC(queue_next(qe(call)));
340
c910b4d9 341 _call_dequeue(call, group);
1c79356b
A
342
343 _internal_call_release(call);
344
345 call_removed = TRUE;
346 if (!remove_all)
347 break;
348
349 call = next;
350 }
351 else
352 call = TC(queue_next(qe(call)));
353 }
354
355 return (call_removed);
356}
357
358/*
c910b4d9 359 * _remove_from_delayed_queue:
1c79356b 360 *
c910b4d9
A
361 * Remove the first (or all) matching
362 * entries from the delayed queue.
1c79356b 363 *
c910b4d9
A
364 * Returns TRUE if any matching entries
365 * were found.
1c79356b 366 *
c910b4d9 367 * Called with thread_call_lock held.
1c79356b 368 */
c910b4d9 369static boolean_t
1c79356b
A
370_remove_from_delayed_queue(
371 thread_call_func_t func,
372 thread_call_param_t param0,
c910b4d9 373 boolean_t remove_all)
1c79356b 374{
c910b4d9
A
375 boolean_t call_removed = FALSE;
376 thread_call_t call;
377 thread_call_group_t group = &thread_call_group0;
1c79356b 378
c910b4d9 379 call = TC(queue_first(&group->delayed_queue));
1c79356b 380
c910b4d9 381 while (!queue_end(&group->delayed_queue, qe(call))) {
1c79356b
A
382 if ( call->func == func &&
383 call->param0 == param0 ) {
384 thread_call_t next = TC(queue_next(qe(call)));
385
c910b4d9 386 _call_dequeue(call, group);
1c79356b
A
387
388 _internal_call_release(call);
389
390 call_removed = TRUE;
391 if (!remove_all)
392 break;
393
394 call = next;
395 }
396 else
397 call = TC(queue_next(qe(call)));
398 }
399
400 return (call_removed);
401}
402
b0d623f7
A
403#ifndef __LP64__
404
1c79356b 405/*
c910b4d9 406 * thread_call_func:
1c79356b 407 *
c910b4d9 408 * Enqueue a function callout.
1c79356b 409 *
c910b4d9
A
410 * Guarantees { function, argument }
411 * uniqueness if unique_call is TRUE.
1c79356b 412 */
1c79356b
A
413void
414thread_call_func(
415 thread_call_func_t func,
416 thread_call_param_t param,
c910b4d9 417 boolean_t unique_call)
1c79356b 418{
c910b4d9
A
419 thread_call_t call;
420 thread_call_group_t group = &thread_call_group0;
421 spl_t s;
1c79356b 422
1c79356b
A
423 s = splsched();
424 simple_lock(&thread_call_lock);
425
c910b4d9 426 call = TC(queue_first(&group->pending_queue));
1c79356b 427
c910b4d9 428 while (unique_call && !queue_end(&group->pending_queue, qe(call))) {
1c79356b
A
429 if ( call->func == func &&
430 call->param0 == param ) {
431 break;
432 }
433
434 call = TC(queue_next(qe(call)));
435 }
436
c910b4d9 437 if (!unique_call || queue_end(&group->pending_queue, qe(call))) {
1c79356b
A
438 call = _internal_call_allocate();
439 call->func = func;
440 call->param0 = param;
2d21ac55 441 call->param1 = NULL;
1c79356b 442
c910b4d9 443 _pending_call_enqueue(call, group);
1c79356b 444
c910b4d9
A
445 if (group->active_count == 0)
446 thread_call_wake(group);
1c79356b
A
447 }
448
449 simple_unlock(&thread_call_lock);
450 splx(s);
451}
452
b0d623f7
A
453#endif /* __LP64__ */
454
1c79356b 455/*
c910b4d9 456 * thread_call_func_delayed:
1c79356b 457 *
c910b4d9
A
458 * Enqueue a function callout to
459 * occur at the stated time.
1c79356b 460 */
1c79356b
A
461void
462thread_call_func_delayed(
463 thread_call_func_t func,
464 thread_call_param_t param,
c910b4d9 465 uint64_t deadline)
1c79356b 466{
c910b4d9
A
467 thread_call_t call;
468 thread_call_group_t group = &thread_call_group0;
469 spl_t s;
1c79356b 470
1c79356b
A
471 s = splsched();
472 simple_lock(&thread_call_lock);
473
474 call = _internal_call_allocate();
475 call->func = func;
476 call->param0 = param;
477 call->param1 = 0;
1c79356b 478
c910b4d9 479 _delayed_call_enqueue(call, group, deadline);
1c79356b 480
c910b4d9
A
481 if (queue_first(&group->delayed_queue) == qe(call))
482 _set_delayed_call_timer(call, group);
1c79356b
A
483
484 simple_unlock(&thread_call_lock);
485 splx(s);
486}
487
488/*
c910b4d9 489 * thread_call_func_cancel:
1c79356b 490 *
c910b4d9 491 * Dequeue a function callout.
1c79356b 492 *
c910b4d9
A
493 * Removes one (or all) { function, argument }
494 * instance(s) from either (or both)
495 * the pending and the delayed queue,
496 * in that order.
1c79356b 497 *
c910b4d9 498 * Returns TRUE if any calls were cancelled.
1c79356b 499 */
1c79356b
A
500boolean_t
501thread_call_func_cancel(
502 thread_call_func_t func,
503 thread_call_param_t param,
c910b4d9 504 boolean_t cancel_all)
1c79356b
A
505{
506 boolean_t result;
55e303ae 507 spl_t s;
1c79356b
A
508
509 s = splsched();
510 simple_lock(&thread_call_lock);
511
512 if (cancel_all)
513 result = _remove_from_pending_queue(func, param, cancel_all) |
514 _remove_from_delayed_queue(func, param, cancel_all);
515 else
516 result = _remove_from_pending_queue(func, param, cancel_all) ||
517 _remove_from_delayed_queue(func, param, cancel_all);
518
519 simple_unlock(&thread_call_lock);
520 splx(s);
521
522 return (result);
523}
524
525/*
c910b4d9 526 * thread_call_allocate:
1c79356b 527 *
c910b4d9 528 * Allocate a callout entry.
1c79356b 529 */
1c79356b
A
530thread_call_t
531thread_call_allocate(
532 thread_call_func_t func,
c910b4d9 533 thread_call_param_t param0)
1c79356b 534{
c910b4d9
A
535 thread_call_t call = zalloc(thread_call_zone);
536
537 call_entry_setup(call, func, param0);
538
1c79356b
A
539 return (call);
540}
541
542/*
c910b4d9 543 * thread_call_free:
1c79356b 544 *
c910b4d9 545 * Free a callout entry.
1c79356b 546 */
1c79356b
A
547boolean_t
548thread_call_free(
c910b4d9 549 thread_call_t call)
1c79356b 550{
55e303ae 551 spl_t s;
1c79356b
A
552
553 s = splsched();
554 simple_lock(&thread_call_lock);
555
c910b4d9 556 if (call->queue != NULL) {
1c79356b
A
557 simple_unlock(&thread_call_lock);
558 splx(s);
559
560 return (FALSE);
561 }
562
563 simple_unlock(&thread_call_lock);
564 splx(s);
565
c910b4d9 566 zfree(thread_call_zone, call);
1c79356b
A
567
568 return (TRUE);
569}
570
571/*
c910b4d9 572 * thread_call_enter:
1c79356b 573 *
c910b4d9 574 * Enqueue a callout entry to occur "soon".
1c79356b 575 *
c910b4d9
A
576 * Returns TRUE if the call was
577 * already on a queue.
1c79356b 578 */
1c79356b
A
579boolean_t
580thread_call_enter(
c910b4d9 581 thread_call_t call)
1c79356b 582{
c910b4d9
A
583 boolean_t result = TRUE;
584 thread_call_group_t group = &thread_call_group0;
585 spl_t s;
1c79356b
A
586
587 s = splsched();
588 simple_lock(&thread_call_lock);
589
c910b4d9
A
590 if (call->queue != &group->pending_queue) {
591 result = _pending_call_enqueue(call, group);
9bccf70c 592
c910b4d9
A
593 if (group->active_count == 0)
594 thread_call_wake(group);
1c79356b
A
595 }
596
597 call->param1 = 0;
598
599 simple_unlock(&thread_call_lock);
600 splx(s);
601
602 return (result);
603}
604
605boolean_t
606thread_call_enter1(
607 thread_call_t call,
c910b4d9 608 thread_call_param_t param1)
1c79356b 609{
c910b4d9
A
610 boolean_t result = TRUE;
611 thread_call_group_t group = &thread_call_group0;
612 spl_t s;
1c79356b
A
613
614 s = splsched();
615 simple_lock(&thread_call_lock);
616
c910b4d9
A
617 if (call->queue != &group->pending_queue) {
618 result = _pending_call_enqueue(call, group);
619
620 if (group->active_count == 0)
621 thread_call_wake(group);
622 }
1c79356b
A
623
624 call->param1 = param1;
625
626 simple_unlock(&thread_call_lock);
627 splx(s);
628
629 return (result);
630}
631
632/*
c910b4d9 633 * thread_call_enter_delayed:
1c79356b 634 *
c910b4d9
A
635 * Enqueue a callout entry to occur
636 * at the stated time.
1c79356b 637 *
c910b4d9
A
638 * Returns TRUE if the call was
639 * already on a queue.
1c79356b 640 */
1c79356b
A
641boolean_t
642thread_call_enter_delayed(
643 thread_call_t call,
c910b4d9 644 uint64_t deadline)
1c79356b 645{
c910b4d9
A
646 boolean_t result = TRUE;
647 thread_call_group_t group = &thread_call_group0;
648 spl_t s;
1c79356b
A
649
650 s = splsched();
651 simple_lock(&thread_call_lock);
652
c910b4d9 653 result = _delayed_call_enqueue(call, group, deadline);
1c79356b 654
c910b4d9
A
655 if (queue_first(&group->delayed_queue) == qe(call))
656 _set_delayed_call_timer(call, group);
1c79356b 657
c910b4d9 658 call->param1 = 0;
1c79356b
A
659
660 simple_unlock(&thread_call_lock);
661 splx(s);
662
663 return (result);
664}
665
666boolean_t
667thread_call_enter1_delayed(
668 thread_call_t call,
669 thread_call_param_t param1,
c910b4d9 670 uint64_t deadline)
1c79356b 671{
c910b4d9
A
672 boolean_t result = TRUE;
673 thread_call_group_t group = &thread_call_group0;
674 spl_t s;
1c79356b
A
675
676 s = splsched();
677 simple_lock(&thread_call_lock);
678
c910b4d9 679 result = _delayed_call_enqueue(call, group, deadline);
1c79356b 680
c910b4d9
A
681 if (queue_first(&group->delayed_queue) == qe(call))
682 _set_delayed_call_timer(call, group);
1c79356b 683
c910b4d9 684 call->param1 = param1;
1c79356b
A
685
686 simple_unlock(&thread_call_lock);
687 splx(s);
688
689 return (result);
690}
691
692/*
c910b4d9 693 * thread_call_cancel:
1c79356b 694 *
c910b4d9 695 * Dequeue a callout entry.
1c79356b 696 *
c910b4d9
A
697 * Returns TRUE if the call was
698 * on a queue.
1c79356b 699 */
1c79356b
A
700boolean_t
701thread_call_cancel(
c910b4d9 702 thread_call_t call)
1c79356b 703{
c910b4d9
A
704 boolean_t result;
705 thread_call_group_t group = &thread_call_group0;
706 spl_t s;
1c79356b
A
707
708 s = splsched();
709 simple_lock(&thread_call_lock);
c910b4d9
A
710
711 result = _call_dequeue(call, group);
1c79356b
A
712
713 simple_unlock(&thread_call_lock);
714 splx(s);
715
716 return (result);
717}
718
b0d623f7
A
719#ifndef __LP64__
720
1c79356b 721/*
c910b4d9 722 * thread_call_is_delayed:
1c79356b 723 *
c910b4d9
A
724 * Returns TRUE if the call is
725 * currently on a delayed queue.
1c79356b 726 *
c910b4d9 727 * Optionally returns the expiration time.
1c79356b 728 */
1c79356b
A
729boolean_t
730thread_call_is_delayed(
731 thread_call_t call,
0b4e3aa0 732 uint64_t *deadline)
1c79356b 733{
c910b4d9
A
734 boolean_t result = FALSE;
735 thread_call_group_t group = &thread_call_group0;
736 spl_t s;
1c79356b
A
737
738 s = splsched();
739 simple_lock(&thread_call_lock);
740
c910b4d9 741 if (call->queue == &group->delayed_queue) {
1c79356b
A
742 if (deadline != NULL)
743 *deadline = call->deadline;
744 result = TRUE;
745 }
746
747 simple_unlock(&thread_call_lock);
748 splx(s);
749
750 return (result);
751}
752
b0d623f7
A
753#endif /* __LP64__ */
754
1c79356b 755/*
c910b4d9 756 * thread_call_wake:
1c79356b 757 *
c910b4d9
A
758 * Wake a call thread to service
759 * pending call entries. May wake
760 * the daemon thread in order to
761 * create additional call threads.
1c79356b 762 *
c910b4d9 763 * Called with thread_call_lock held.
1c79356b 764 */
c910b4d9
A
765static __inline__ void
766thread_call_wake(
767 thread_call_group_t group)
1c79356b 768{
c910b4d9
A
769 if (group->idle_count > 0 && wait_queue_wakeup_one(&group->idle_wqueue, NULL, THREAD_AWAKENED) == KERN_SUCCESS) {
770 group->idle_count--; group->active_count++;
1c79356b
A
771 }
772 else
c910b4d9
A
773 if (!thread_call_daemon_awake) {
774 thread_call_daemon_awake = TRUE;
775 thread_wakeup_one(&thread_call_daemon_awake);
1c79356b
A
776 }
777}
778
9bccf70c 779/*
2d21ac55 780 * sched_call_thread:
9bccf70c 781 *
2d21ac55 782 * Call out invoked by the scheduler.
9bccf70c 783 */
2d21ac55
A
784static void
785sched_call_thread(
c910b4d9
A
786 int type,
787__unused thread_t thread)
9bccf70c 788{
c910b4d9
A
789 thread_call_group_t group = &thread_call_group0;
790
9bccf70c
A
791 simple_lock(&thread_call_lock);
792
2d21ac55 793 switch (type) {
9bccf70c 794
2d21ac55 795 case SCHED_CALL_BLOCK:
c910b4d9
A
796 if (--group->active_count == 0 && group->pending_count > 0)
797 thread_call_wake(group);
2d21ac55 798 break;
9bccf70c 799
2d21ac55 800 case SCHED_CALL_UNBLOCK:
c910b4d9 801 group->active_count++;
2d21ac55
A
802 break;
803 }
9bccf70c
A
804
805 simple_unlock(&thread_call_lock);
806}
1c79356b
A
807
808/*
c910b4d9 809 * thread_call_thread:
1c79356b 810 */
c910b4d9
A
811static void
812thread_call_thread(
813 thread_call_group_t group)
1c79356b
A
814{
815 thread_t self = current_thread();
816
1c79356b
A
817 (void) splsched();
818 simple_lock(&thread_call_lock);
819
2d21ac55 820 thread_sched_call(self, sched_call_thread);
9bccf70c 821
c910b4d9 822 while (group->pending_count > 0) {
1c79356b
A
823 thread_call_t call;
824 thread_call_func_t func;
825 thread_call_param_t param0, param1;
826
c910b4d9
A
827 call = TC(dequeue_head(&group->pending_queue));
828 group->pending_count--;
1c79356b
A
829
830 func = call->func;
831 param0 = call->param0;
832 param1 = call->param1;
833
c910b4d9 834 call->queue = NULL;
1c79356b
A
835
836 _internal_call_release(call);
837
1c79356b
A
838 simple_unlock(&thread_call_lock);
839 (void) spllo();
840
55e303ae
A
841 KERNEL_DEBUG_CONSTANT(
842 MACHDBG_CODE(DBG_MACH_SCHED,MACH_CALLOUT) | DBG_FUNC_NONE,
b0d623f7 843 func, param0, param1, 0, 0);
55e303ae 844
1c79356b
A
845 (*func)(param0, param1);
846
c910b4d9 847 (void)thread_funnel_set(self->funnel_lock, FALSE); /* XXX */
1c79356b
A
848
849 (void) splsched();
850 simple_lock(&thread_call_lock);
1c79356b 851 }
9bccf70c 852
2d21ac55 853 thread_sched_call(self, NULL);
c910b4d9 854 group->active_count--;
9bccf70c 855
c910b4d9
A
856 if (group->idle_count < thread_call_thread_min) {
857 group->idle_count++;
1c79356b 858
c910b4d9 859 wait_queue_assert_wait(&group->idle_wqueue, NULL, THREAD_UNINT, 0);
1c79356b
A
860
861 simple_unlock(&thread_call_lock);
862 (void) spllo();
863
c910b4d9 864 thread_block_parameter((thread_continue_t)thread_call_thread, group);
1c79356b
A
865 /* NOTREACHED */
866 }
c910b4d9 867
1c79356b
A
868 simple_unlock(&thread_call_lock);
869 (void) spllo();
870
91447636 871 thread_terminate(self);
1c79356b
A
872 /* NOTREACHED */
873}
874
1c79356b 875/*
c910b4d9 876 * thread_call_daemon:
1c79356b 877 */
c910b4d9
A
878static void
879thread_call_daemon_continue(
880 thread_call_group_t group)
1c79356b 881{
91447636
A
882 kern_return_t result;
883 thread_t thread;
884
1c79356b
A
885 (void) splsched();
886 simple_lock(&thread_call_lock);
887
c910b4d9
A
888 while (group->active_count == 0 && group->pending_count > 0) {
889 group->active_count++;
1c79356b
A
890
891 simple_unlock(&thread_call_lock);
892 (void) spllo();
893
c910b4d9 894 result = kernel_thread_start_priority((thread_continue_t)thread_call_thread, group, BASEPRI_PREEMPT, &thread);
91447636 895 if (result != KERN_SUCCESS)
c910b4d9 896 panic("thread_call_daemon");
91447636
A
897
898 thread_deallocate(thread);
55e303ae 899
9bccf70c
A
900 (void) splsched();
901 simple_lock(&thread_call_lock);
1c79356b 902 }
c910b4d9
A
903
904 thread_call_daemon_awake = FALSE;
905 assert_wait(&thread_call_daemon_awake, THREAD_UNINT);
1c79356b
A
906
907 simple_unlock(&thread_call_lock);
908 (void) spllo();
909
c910b4d9 910 thread_block_parameter((thread_continue_t)thread_call_daemon_continue, group);
1c79356b
A
911 /* NOTREACHED */
912}
913
c910b4d9
A
914static void
915thread_call_daemon(
916 thread_call_group_t group)
1c79356b 917{
55e303ae 918 thread_t self = current_thread();
1c79356b 919
91447636 920 self->options |= TH_OPT_VMPRIV;
1c79356b 921 vm_page_free_reserve(2); /* XXX */
1c79356b 922
c910b4d9 923 thread_call_daemon_continue(group);
1c79356b
A
924 /* NOTREACHED */
925}
926
c910b4d9
A
927static void
928thread_call_delayed_timer(
929 timer_call_param_t p0,
91447636 930 __unused timer_call_param_t p1
1c79356b
A
931)
932{
c910b4d9
A
933 thread_call_t call;
934 thread_call_group_t group = p0;
935 boolean_t new_pending = FALSE;
936 uint64_t timestamp;
1c79356b 937
1c79356b
A
938 simple_lock(&thread_call_lock);
939
c910b4d9 940 timestamp = mach_absolute_time();
1c79356b 941
c910b4d9 942 call = TC(queue_first(&group->delayed_queue));
1c79356b 943
c910b4d9 944 while (!queue_end(&group->delayed_queue, qe(call))) {
0b4e3aa0 945 if (call->deadline <= timestamp) {
c910b4d9 946 _pending_call_enqueue(call, group);
1c79356b
A
947 new_pending = TRUE;
948 }
949 else
950 break;
951
c910b4d9 952 call = TC(queue_first(&group->delayed_queue));
1c79356b
A
953 }
954
c910b4d9
A
955 if (!queue_end(&group->delayed_queue, qe(call)))
956 _set_delayed_call_timer(call, group);
1c79356b 957
c910b4d9
A
958 if (new_pending && group->active_count == 0)
959 thread_call_wake(group);
1c79356b
A
960
961 simple_unlock(&thread_call_lock);
1c79356b 962}