]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/thread_call.c
xnu-1699.24.23.tar.gz
[apple/xnu.git] / osfmk / kern / thread_call.c
CommitLineData
1c79356b 1/*
c910b4d9 2 * Copyright (c) 1993-1995, 1999-2008 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b 27 */
1c79356b
A
28
29#include <mach/mach_types.h>
91447636 30#include <mach/thread_act.h>
1c79356b 31
91447636 32#include <kern/kern_types.h>
c910b4d9 33#include <kern/zalloc.h>
1c79356b
A
34#include <kern/sched_prim.h>
35#include <kern/clock.h>
36#include <kern/task.h>
37#include <kern/thread.h>
91447636
A
38#include <kern/wait_queue.h>
39
40#include <vm/vm_pageout.h>
1c79356b
A
41
42#include <kern/thread_call.h>
43#include <kern/call_entry.h>
44
45#include <kern/timer_call.h>
46
55e303ae
A
47#include <sys/kdebug.h>
48
1c79356b 49
c910b4d9 50static zone_t thread_call_zone;
1c79356b 51
c910b4d9
A
52struct thread_call_group {
53 queue_head_t pending_queue;
6d2010ae 54 uint32_t pending_count;
1c79356b 55
c910b4d9 56 queue_head_t delayed_queue;
1c79356b 57
c910b4d9 58 timer_call_data_t delayed_timer;
1c79356b 59
c910b4d9 60 struct wait_queue idle_wqueue;
b7266188 61 struct wait_queue daemon_wqueue;
6d2010ae 62 uint32_t idle_count, active_count;
c910b4d9 63};
1c79356b 64
c910b4d9 65typedef struct thread_call_group *thread_call_group_t;
1c79356b 66
c910b4d9 67static struct thread_call_group thread_call_group0;
1c79356b 68
c910b4d9 69static boolean_t thread_call_daemon_awake;
1c79356b 70
c910b4d9 71#define thread_call_thread_min 4
1c79356b 72
c910b4d9 73#define internal_call_count 768
1c79356b 74
c910b4d9
A
75static thread_call_data_t internal_call_storage[internal_call_count];
76static queue_head_t thread_call_internal_queue;
1c79356b 77
c910b4d9 78static __inline__ thread_call_t _internal_call_allocate(void);
1c79356b 79
c910b4d9
A
80static __inline__ void _internal_call_release(
81 thread_call_t call);
1c79356b 82
c910b4d9
A
83static __inline__ boolean_t _pending_call_enqueue(
84 thread_call_t call,
85 thread_call_group_t group),
86 _delayed_call_enqueue(
87 thread_call_t call,
88 thread_call_group_t group,
89 uint64_t deadline),
90 _call_dequeue(
91 thread_call_t call,
92 thread_call_group_t group);
93
94static __inline__ void thread_call_wake(
95 thread_call_group_t group);
96
97static __inline__ void _set_delayed_call_timer(
98 thread_call_t call,
99 thread_call_group_t group);
100
101static boolean_t _remove_from_pending_queue(
102 thread_call_func_t func,
103 thread_call_param_t param0,
104 boolean_t remove_all),
105 _remove_from_delayed_queue(
106 thread_call_func_t func,
107 thread_call_param_t param0,
108 boolean_t remove_all);
109
110static void thread_call_daemon(
111 thread_call_group_t group),
112 thread_call_thread(
113 thread_call_group_t group);
114
6d2010ae 115extern void thread_call_delayed_timer(
c910b4d9
A
116 timer_call_param_t p0,
117 timer_call_param_t p1);
1c79356b
A
118
119#define qe(x) ((queue_entry_t)(x))
120#define TC(x) ((thread_call_t)(x))
121
6d2010ae
A
122
123lck_grp_t thread_call_queues_lck_grp;
124lck_grp_t thread_call_lck_grp;
125lck_attr_t thread_call_lck_attr;
126lck_grp_attr_t thread_call_lck_grp_attr;
127
128#if defined(__i386__) || defined(__x86_64__)
129lck_mtx_t thread_call_lock_data;
130#else
131lck_spin_t thread_call_lock_data;
132#endif
133
134#define thread_call_lock_spin() \
135 lck_mtx_lock_spin_always(&thread_call_lock_data)
136
137#define thread_call_unlock() \
138 lck_mtx_unlock_always(&thread_call_lock_data)
139
140
1c79356b 141/*
c910b4d9 142 * thread_call_initialize:
1c79356b 143 *
c910b4d9
A
144 * Initialize this module, called
145 * early during system initialization.
1c79356b 146 */
1c79356b
A
147void
148thread_call_initialize(void)
149{
6d2010ae 150 thread_call_t call;
c910b4d9
A
151 thread_call_group_t group = &thread_call_group0;
152 kern_return_t result;
153 thread_t thread;
154 int i;
155 spl_t s;
156
157 i = sizeof (thread_call_data_t);
158 thread_call_zone = zinit(i, 4096 * i, 16 * i, "thread_call");
6d2010ae 159 zone_change(thread_call_zone, Z_CALLERACCT, FALSE);
0b4c1975 160 zone_change(thread_call_zone, Z_NOENCRYPT, TRUE);
1c79356b 161
6d2010ae
A
162 lck_attr_setdefault(&thread_call_lck_attr);
163 lck_grp_attr_setdefault(&thread_call_lck_grp_attr);
164 lck_grp_init(&thread_call_queues_lck_grp, "thread_call_queues", &thread_call_lck_grp_attr);
165 lck_grp_init(&thread_call_lck_grp, "thread_call", &thread_call_lck_grp_attr);
1c79356b 166
6d2010ae
A
167#if defined(__i386__) || defined(__x86_64__)
168 lck_mtx_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr);
169#else
170 lck_spin_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr);
171#endif
172 queue_init(&group->pending_queue);
173 queue_init(&group->delayed_queue);
1c79356b 174
6d2010ae
A
175 s = splsched();
176 thread_call_lock_spin();
c910b4d9
A
177
178 timer_call_setup(&group->delayed_timer, thread_call_delayed_timer, group);
1c79356b 179
c910b4d9 180 wait_queue_init(&group->idle_wqueue, SYNC_POLICY_FIFO);
b7266188 181 wait_queue_init(&group->daemon_wqueue, SYNC_POLICY_FIFO);
c910b4d9 182
6d2010ae
A
183 queue_init(&thread_call_internal_queue);
184 for (
1c79356b 185 call = internal_call_storage;
c910b4d9 186 call < &internal_call_storage[internal_call_count];
1c79356b
A
187 call++) {
188
c910b4d9 189 enqueue_tail(&thread_call_internal_queue, qe(call));
6d2010ae 190 }
1c79356b 191
c910b4d9 192 thread_call_daemon_awake = TRUE;
1c79356b 193
6d2010ae 194 thread_call_unlock();
1c79356b
A
195 splx(s);
196
c910b4d9 197 result = kernel_thread_start_priority((thread_continue_t)thread_call_daemon, group, BASEPRI_PREEMPT + 1, &thread);
91447636
A
198 if (result != KERN_SUCCESS)
199 panic("thread_call_initialize");
200
201 thread_deallocate(thread);
1c79356b
A
202}
203
204void
205thread_call_setup(
206 thread_call_t call,
207 thread_call_func_t func,
c910b4d9 208 thread_call_param_t param0)
1c79356b
A
209{
210 call_entry_setup(call, func, param0);
211}
212
213/*
c910b4d9 214 * _internal_call_allocate:
1c79356b 215 *
c910b4d9 216 * Allocate an internal callout entry.
1c79356b 217 *
c910b4d9 218 * Called with thread_call_lock held.
1c79356b 219 */
1c79356b
A
220static __inline__ thread_call_t
221_internal_call_allocate(void)
222{
223 thread_call_t call;
224
c910b4d9 225 if (queue_empty(&thread_call_internal_queue))
1c79356b
A
226 panic("_internal_call_allocate");
227
c910b4d9 228 call = TC(dequeue_head(&thread_call_internal_queue));
1c79356b
A
229
230 return (call);
231}
232
233/*
c910b4d9 234 * _internal_call_release:
1c79356b 235 *
c910b4d9
A
236 * Release an internal callout entry which
237 * is no longer pending (or delayed).
1c79356b 238 *
c910b4d9 239 * Called with thread_call_lock held.
1c79356b 240 */
c910b4d9 241static __inline__ void
1c79356b 242_internal_call_release(
c910b4d9 243 thread_call_t call)
1c79356b
A
244{
245 if ( call >= internal_call_storage &&
c910b4d9
A
246 call < &internal_call_storage[internal_call_count] )
247 enqueue_head(&thread_call_internal_queue, qe(call));
1c79356b
A
248}
249
250/*
c910b4d9 251 * _pending_call_enqueue:
1c79356b 252 *
c910b4d9
A
253 * Place an entry at the end of the
254 * pending queue, to be executed soon.
1c79356b 255 *
c910b4d9
A
256 * Returns TRUE if the entry was already
257 * on a queue.
1c79356b 258 *
c910b4d9 259 * Called with thread_call_lock held.
1c79356b 260 */
c910b4d9 261static __inline__ boolean_t
1c79356b 262_pending_call_enqueue(
c910b4d9
A
263 thread_call_t call,
264 thread_call_group_t group)
1c79356b 265{
6d2010ae 266 queue_head_t *old_queue;
1c79356b 267
c910b4d9 268 old_queue = call_entry_enqueue_tail(call, &group->pending_queue);
1c79356b 269
c910b4d9 270 group->pending_count++;
1c79356b 271
c910b4d9 272 return (old_queue != NULL);
1c79356b
A
273}
274
275/*
c910b4d9 276 * _delayed_call_enqueue:
1c79356b 277 *
c910b4d9
A
278 * Place an entry on the delayed queue,
279 * after existing entries with an earlier
280 * (or identical) deadline.
1c79356b 281 *
c910b4d9
A
282 * Returns TRUE if the entry was already
283 * on a queue.
1c79356b 284 *
c910b4d9 285 * Called with thread_call_lock held.
1c79356b 286 */
c910b4d9 287static __inline__ boolean_t
1c79356b 288_delayed_call_enqueue(
c910b4d9
A
289 thread_call_t call,
290 thread_call_group_t group,
6d2010ae 291 uint64_t deadline)
1c79356b 292{
6d2010ae 293 queue_head_t *old_queue;
1c79356b 294
c910b4d9
A
295 old_queue = call_entry_enqueue_deadline(call, &group->delayed_queue, deadline);
296
297 if (old_queue == &group->pending_queue)
298 group->pending_count--;
299
300 return (old_queue != NULL);
1c79356b
A
301}
302
303/*
c910b4d9 304 * _call_dequeue:
1c79356b 305 *
c910b4d9 306 * Remove an entry from a queue.
1c79356b 307 *
c910b4d9 308 * Returns TRUE if the entry was on a queue.
1c79356b 309 *
c910b4d9 310 * Called with thread_call_lock held.
1c79356b 311 */
c910b4d9
A
312static __inline__ boolean_t
313_call_dequeue(
314 thread_call_t call,
315 thread_call_group_t group)
1c79356b 316{
6d2010ae 317 queue_head_t *old_queue;
c910b4d9
A
318
319 old_queue = call_entry_dequeue(call);
320
321 if (old_queue == &group->pending_queue)
322 group->pending_count--;
323
324 return (old_queue != NULL);
1c79356b
A
325}
326
327/*
c910b4d9 328 * _set_delayed_call_timer:
1c79356b 329 *
c910b4d9
A
330 * Reset the timer so that it
331 * next expires when the entry is due.
1c79356b 332 *
c910b4d9 333 * Called with thread_call_lock held.
1c79356b 334 */
1c79356b
A
335static __inline__ void
336_set_delayed_call_timer(
c910b4d9
A
337 thread_call_t call,
338 thread_call_group_t group)
1c79356b 339{
6d2010ae 340 timer_call_enter(&group->delayed_timer, call->deadline, 0);
1c79356b
A
341}
342
343/*
c910b4d9 344 * _remove_from_pending_queue:
1c79356b 345 *
c910b4d9
A
346 * Remove the first (or all) matching
347 * entries from the pending queue.
1c79356b 348 *
c910b4d9
A
349 * Returns TRUE if any matching entries
350 * were found.
1c79356b 351 *
c910b4d9 352 * Called with thread_call_lock held.
1c79356b 353 */
c910b4d9 354static boolean_t
1c79356b
A
355_remove_from_pending_queue(
356 thread_call_func_t func,
357 thread_call_param_t param0,
c910b4d9 358 boolean_t remove_all)
1c79356b 359{
6d2010ae 360 boolean_t call_removed = FALSE;
c910b4d9
A
361 thread_call_t call;
362 thread_call_group_t group = &thread_call_group0;
1c79356b 363
c910b4d9 364 call = TC(queue_first(&group->pending_queue));
1c79356b 365
c910b4d9 366 while (!queue_end(&group->pending_queue, qe(call))) {
1c79356b
A
367 if ( call->func == func &&
368 call->param0 == param0 ) {
369 thread_call_t next = TC(queue_next(qe(call)));
370
c910b4d9 371 _call_dequeue(call, group);
1c79356b
A
372
373 _internal_call_release(call);
374
375 call_removed = TRUE;
376 if (!remove_all)
377 break;
378
379 call = next;
380 }
381 else
382 call = TC(queue_next(qe(call)));
383 }
384
385 return (call_removed);
386}
387
388/*
c910b4d9 389 * _remove_from_delayed_queue:
1c79356b 390 *
c910b4d9
A
391 * Remove the first (or all) matching
392 * entries from the delayed queue.
1c79356b 393 *
c910b4d9
A
394 * Returns TRUE if any matching entries
395 * were found.
1c79356b 396 *
c910b4d9 397 * Called with thread_call_lock held.
1c79356b 398 */
c910b4d9 399static boolean_t
1c79356b
A
400_remove_from_delayed_queue(
401 thread_call_func_t func,
402 thread_call_param_t param0,
c910b4d9 403 boolean_t remove_all)
1c79356b 404{
c910b4d9
A
405 boolean_t call_removed = FALSE;
406 thread_call_t call;
407 thread_call_group_t group = &thread_call_group0;
1c79356b 408
c910b4d9 409 call = TC(queue_first(&group->delayed_queue));
1c79356b 410
c910b4d9 411 while (!queue_end(&group->delayed_queue, qe(call))) {
1c79356b
A
412 if ( call->func == func &&
413 call->param0 == param0 ) {
414 thread_call_t next = TC(queue_next(qe(call)));
415
c910b4d9 416 _call_dequeue(call, group);
1c79356b
A
417
418 _internal_call_release(call);
419
420 call_removed = TRUE;
421 if (!remove_all)
422 break;
423
424 call = next;
425 }
426 else
427 call = TC(queue_next(qe(call)));
428 }
429
430 return (call_removed);
431}
432
b0d623f7
A
433#ifndef __LP64__
434
1c79356b 435/*
c910b4d9 436 * thread_call_func:
1c79356b 437 *
c910b4d9 438 * Enqueue a function callout.
1c79356b 439 *
c910b4d9
A
440 * Guarantees { function, argument }
441 * uniqueness if unique_call is TRUE.
1c79356b 442 */
1c79356b
A
443void
444thread_call_func(
445 thread_call_func_t func,
446 thread_call_param_t param,
c910b4d9 447 boolean_t unique_call)
1c79356b 448{
c910b4d9
A
449 thread_call_t call;
450 thread_call_group_t group = &thread_call_group0;
451 spl_t s;
1c79356b 452
1c79356b 453 s = splsched();
6d2010ae 454 thread_call_lock_spin();
1c79356b 455
c910b4d9 456 call = TC(queue_first(&group->pending_queue));
1c79356b 457
c910b4d9 458 while (unique_call && !queue_end(&group->pending_queue, qe(call))) {
1c79356b
A
459 if ( call->func == func &&
460 call->param0 == param ) {
461 break;
462 }
463
464 call = TC(queue_next(qe(call)));
465 }
466
c910b4d9 467 if (!unique_call || queue_end(&group->pending_queue, qe(call))) {
1c79356b
A
468 call = _internal_call_allocate();
469 call->func = func;
470 call->param0 = param;
2d21ac55 471 call->param1 = NULL;
1c79356b 472
c910b4d9 473 _pending_call_enqueue(call, group);
1c79356b 474
c910b4d9
A
475 if (group->active_count == 0)
476 thread_call_wake(group);
1c79356b
A
477 }
478
6d2010ae 479 thread_call_unlock();
1c79356b
A
480 splx(s);
481}
482
b0d623f7
A
483#endif /* __LP64__ */
484
1c79356b 485/*
c910b4d9 486 * thread_call_func_delayed:
1c79356b 487 *
c910b4d9
A
488 * Enqueue a function callout to
489 * occur at the stated time.
1c79356b 490 */
1c79356b
A
491void
492thread_call_func_delayed(
493 thread_call_func_t func,
494 thread_call_param_t param,
c910b4d9 495 uint64_t deadline)
1c79356b 496{
c910b4d9
A
497 thread_call_t call;
498 thread_call_group_t group = &thread_call_group0;
499 spl_t s;
1c79356b 500
1c79356b 501 s = splsched();
6d2010ae 502 thread_call_lock_spin();
1c79356b
A
503
504 call = _internal_call_allocate();
505 call->func = func;
506 call->param0 = param;
507 call->param1 = 0;
1c79356b 508
c910b4d9 509 _delayed_call_enqueue(call, group, deadline);
1c79356b 510
c910b4d9
A
511 if (queue_first(&group->delayed_queue) == qe(call))
512 _set_delayed_call_timer(call, group);
1c79356b 513
6d2010ae 514 thread_call_unlock();
1c79356b
A
515 splx(s);
516}
517
518/*
c910b4d9 519 * thread_call_func_cancel:
1c79356b 520 *
c910b4d9 521 * Dequeue a function callout.
1c79356b 522 *
c910b4d9
A
523 * Removes one (or all) { function, argument }
524 * instance(s) from either (or both)
525 * the pending and the delayed queue,
526 * in that order.
1c79356b 527 *
c910b4d9 528 * Returns TRUE if any calls were cancelled.
1c79356b 529 */
1c79356b
A
530boolean_t
531thread_call_func_cancel(
532 thread_call_func_t func,
533 thread_call_param_t param,
c910b4d9 534 boolean_t cancel_all)
1c79356b
A
535{
536 boolean_t result;
55e303ae 537 spl_t s;
1c79356b
A
538
539 s = splsched();
6d2010ae 540 thread_call_lock_spin();
1c79356b
A
541
542 if (cancel_all)
543 result = _remove_from_pending_queue(func, param, cancel_all) |
544 _remove_from_delayed_queue(func, param, cancel_all);
545 else
546 result = _remove_from_pending_queue(func, param, cancel_all) ||
547 _remove_from_delayed_queue(func, param, cancel_all);
548
6d2010ae 549 thread_call_unlock();
1c79356b
A
550 splx(s);
551
552 return (result);
553}
554
555/*
c910b4d9 556 * thread_call_allocate:
1c79356b 557 *
c910b4d9 558 * Allocate a callout entry.
1c79356b 559 */
1c79356b
A
560thread_call_t
561thread_call_allocate(
562 thread_call_func_t func,
c910b4d9 563 thread_call_param_t param0)
1c79356b 564{
c910b4d9
A
565 thread_call_t call = zalloc(thread_call_zone);
566
567 call_entry_setup(call, func, param0);
568
1c79356b
A
569 return (call);
570}
571
572/*
c910b4d9 573 * thread_call_free:
1c79356b 574 *
c910b4d9 575 * Free a callout entry.
1c79356b 576 */
1c79356b
A
577boolean_t
578thread_call_free(
c910b4d9 579 thread_call_t call)
1c79356b 580{
55e303ae 581 spl_t s;
1c79356b
A
582
583 s = splsched();
6d2010ae 584 thread_call_lock_spin();
1c79356b 585
c910b4d9 586 if (call->queue != NULL) {
6d2010ae
A
587 thread_call_unlock();
588 splx(s);
1c79356b 589
6d2010ae 590 return (FALSE);
1c79356b
A
591 }
592
6d2010ae 593 thread_call_unlock();
1c79356b
A
594 splx(s);
595
c910b4d9 596 zfree(thread_call_zone, call);
1c79356b
A
597
598 return (TRUE);
599}
600
601/*
c910b4d9 602 * thread_call_enter:
1c79356b 603 *
c910b4d9 604 * Enqueue a callout entry to occur "soon".
1c79356b 605 *
c910b4d9
A
606 * Returns TRUE if the call was
607 * already on a queue.
1c79356b 608 */
1c79356b
A
609boolean_t
610thread_call_enter(
c910b4d9 611 thread_call_t call)
1c79356b 612{
c910b4d9
A
613 boolean_t result = TRUE;
614 thread_call_group_t group = &thread_call_group0;
6d2010ae 615 spl_t s;
1c79356b 616
6d2010ae
A
617 s = splsched();
618 thread_call_lock_spin();
1c79356b 619
c910b4d9
A
620 if (call->queue != &group->pending_queue) {
621 result = _pending_call_enqueue(call, group);
9bccf70c 622
c910b4d9
A
623 if (group->active_count == 0)
624 thread_call_wake(group);
1c79356b
A
625 }
626
627 call->param1 = 0;
628
6d2010ae
A
629 thread_call_unlock();
630 splx(s);
1c79356b
A
631
632 return (result);
633}
634
635boolean_t
636thread_call_enter1(
637 thread_call_t call,
c910b4d9 638 thread_call_param_t param1)
1c79356b 639{
c910b4d9
A
640 boolean_t result = TRUE;
641 thread_call_group_t group = &thread_call_group0;
6d2010ae 642 spl_t s;
1c79356b 643
6d2010ae
A
644 s = splsched();
645 thread_call_lock_spin();
1c79356b 646
c910b4d9
A
647 if (call->queue != &group->pending_queue) {
648 result = _pending_call_enqueue(call, group);
649
650 if (group->active_count == 0)
651 thread_call_wake(group);
652 }
1c79356b
A
653
654 call->param1 = param1;
655
6d2010ae
A
656 thread_call_unlock();
657 splx(s);
1c79356b
A
658
659 return (result);
660}
661
662/*
c910b4d9 663 * thread_call_enter_delayed:
1c79356b 664 *
c910b4d9
A
665 * Enqueue a callout entry to occur
666 * at the stated time.
1c79356b 667 *
c910b4d9
A
668 * Returns TRUE if the call was
669 * already on a queue.
1c79356b 670 */
1c79356b
A
671boolean_t
672thread_call_enter_delayed(
673 thread_call_t call,
c910b4d9 674 uint64_t deadline)
1c79356b 675{
c910b4d9
A
676 boolean_t result = TRUE;
677 thread_call_group_t group = &thread_call_group0;
6d2010ae 678 spl_t s;
1c79356b 679
6d2010ae
A
680 s = splsched();
681 thread_call_lock_spin();
1c79356b 682
c910b4d9 683 result = _delayed_call_enqueue(call, group, deadline);
1c79356b 684
c910b4d9
A
685 if (queue_first(&group->delayed_queue) == qe(call))
686 _set_delayed_call_timer(call, group);
1c79356b 687
c910b4d9 688 call->param1 = 0;
1c79356b 689
6d2010ae
A
690 thread_call_unlock();
691 splx(s);
1c79356b
A
692
693 return (result);
694}
695
696boolean_t
697thread_call_enter1_delayed(
698 thread_call_t call,
699 thread_call_param_t param1,
c910b4d9 700 uint64_t deadline)
1c79356b 701{
c910b4d9
A
702 boolean_t result = TRUE;
703 thread_call_group_t group = &thread_call_group0;
6d2010ae 704 spl_t s;
1c79356b 705
6d2010ae
A
706 s = splsched();
707 thread_call_lock_spin();
1c79356b 708
c910b4d9 709 result = _delayed_call_enqueue(call, group, deadline);
1c79356b 710
c910b4d9
A
711 if (queue_first(&group->delayed_queue) == qe(call))
712 _set_delayed_call_timer(call, group);
1c79356b 713
c910b4d9 714 call->param1 = param1;
1c79356b 715
6d2010ae
A
716 thread_call_unlock();
717 splx(s);
1c79356b
A
718
719 return (result);
720}
721
722/*
c910b4d9 723 * thread_call_cancel:
1c79356b 724 *
c910b4d9 725 * Dequeue a callout entry.
1c79356b 726 *
c910b4d9
A
727 * Returns TRUE if the call was
728 * on a queue.
1c79356b 729 */
1c79356b
A
730boolean_t
731thread_call_cancel(
c910b4d9 732 thread_call_t call)
1c79356b 733{
c910b4d9
A
734 boolean_t result;
735 thread_call_group_t group = &thread_call_group0;
6d2010ae 736 spl_t s;
1c79356b 737
6d2010ae
A
738 s = splsched();
739 thread_call_lock_spin();
c910b4d9
A
740
741 result = _call_dequeue(call, group);
1c79356b 742
6d2010ae
A
743 thread_call_unlock();
744 splx(s);
1c79356b
A
745
746 return (result);
747}
748
b0d623f7
A
749#ifndef __LP64__
750
1c79356b 751/*
c910b4d9 752 * thread_call_is_delayed:
1c79356b 753 *
c910b4d9
A
754 * Returns TRUE if the call is
755 * currently on a delayed queue.
1c79356b 756 *
c910b4d9 757 * Optionally returns the expiration time.
1c79356b 758 */
1c79356b
A
759boolean_t
760thread_call_is_delayed(
761 thread_call_t call,
0b4e3aa0 762 uint64_t *deadline)
1c79356b 763{
c910b4d9
A
764 boolean_t result = FALSE;
765 thread_call_group_t group = &thread_call_group0;
766 spl_t s;
1c79356b
A
767
768 s = splsched();
6d2010ae 769 thread_call_lock_spin();
1c79356b 770
c910b4d9 771 if (call->queue == &group->delayed_queue) {
1c79356b
A
772 if (deadline != NULL)
773 *deadline = call->deadline;
774 result = TRUE;
775 }
776
6d2010ae 777 thread_call_unlock();
1c79356b
A
778 splx(s);
779
780 return (result);
781}
782
b0d623f7
A
783#endif /* __LP64__ */
784
1c79356b 785/*
c910b4d9 786 * thread_call_wake:
1c79356b 787 *
c910b4d9
A
788 * Wake a call thread to service
789 * pending call entries. May wake
790 * the daemon thread in order to
791 * create additional call threads.
1c79356b 792 *
c910b4d9 793 * Called with thread_call_lock held.
1c79356b 794 */
c910b4d9
A
795static __inline__ void
796thread_call_wake(
797 thread_call_group_t group)
1c79356b 798{
6d2010ae 799 if (group->idle_count > 0 && wait_queue_wakeup_one(&group->idle_wqueue, NO_EVENT, THREAD_AWAKENED, -1) == KERN_SUCCESS) {
c910b4d9 800 group->idle_count--; group->active_count++;
1c79356b
A
801 }
802 else
c910b4d9
A
803 if (!thread_call_daemon_awake) {
804 thread_call_daemon_awake = TRUE;
6d2010ae 805 wait_queue_wakeup_one(&group->daemon_wqueue, NO_EVENT, THREAD_AWAKENED, -1);
1c79356b
A
806 }
807}
808
9bccf70c 809/*
2d21ac55 810 * sched_call_thread:
9bccf70c 811 *
2d21ac55 812 * Call out invoked by the scheduler.
9bccf70c 813 */
2d21ac55
A
814static void
815sched_call_thread(
c910b4d9
A
816 int type,
817__unused thread_t thread)
9bccf70c 818{
c910b4d9
A
819 thread_call_group_t group = &thread_call_group0;
820
6d2010ae 821 thread_call_lock_spin();
9bccf70c 822
2d21ac55 823 switch (type) {
9bccf70c 824
2d21ac55 825 case SCHED_CALL_BLOCK:
c910b4d9
A
826 if (--group->active_count == 0 && group->pending_count > 0)
827 thread_call_wake(group);
2d21ac55 828 break;
9bccf70c 829
2d21ac55 830 case SCHED_CALL_UNBLOCK:
c910b4d9 831 group->active_count++;
2d21ac55
A
832 break;
833 }
9bccf70c 834
6d2010ae 835 thread_call_unlock();
9bccf70c 836}
1c79356b
A
837
838/*
c910b4d9 839 * thread_call_thread:
1c79356b 840 */
c910b4d9
A
841static void
842thread_call_thread(
843 thread_call_group_t group)
1c79356b
A
844{
845 thread_t self = current_thread();
846
6d2010ae
A
847 (void) splsched();
848 thread_call_lock_spin();
1c79356b 849
2d21ac55 850 thread_sched_call(self, sched_call_thread);
9bccf70c 851
c910b4d9 852 while (group->pending_count > 0) {
1c79356b
A
853 thread_call_t call;
854 thread_call_func_t func;
855 thread_call_param_t param0, param1;
856
c910b4d9
A
857 call = TC(dequeue_head(&group->pending_queue));
858 group->pending_count--;
1c79356b
A
859
860 func = call->func;
861 param0 = call->param0;
862 param1 = call->param1;
863
c910b4d9 864 call->queue = NULL;
1c79356b
A
865
866 _internal_call_release(call);
867
6d2010ae 868 thread_call_unlock();
1c79356b
A
869 (void) spllo();
870
55e303ae
A
871 KERNEL_DEBUG_CONSTANT(
872 MACHDBG_CODE(DBG_MACH_SCHED,MACH_CALLOUT) | DBG_FUNC_NONE,
b0d623f7 873 func, param0, param1, 0, 0);
55e303ae 874
1c79356b
A
875 (*func)(param0, param1);
876
6d2010ae
A
877 if (get_preemption_level() != 0) {
878 int pl = get_preemption_level();
879 panic("thread_call_thread: preemption_level %d, last callout %p(%p, %p)",
880 pl, func, param0, param1);
881 }
882
c910b4d9 883 (void)thread_funnel_set(self->funnel_lock, FALSE); /* XXX */
1c79356b
A
884
885 (void) splsched();
6d2010ae 886 thread_call_lock_spin();
1c79356b 887 }
9bccf70c 888
2d21ac55 889 thread_sched_call(self, NULL);
c910b4d9 890 group->active_count--;
9bccf70c 891
c910b4d9
A
892 if (group->idle_count < thread_call_thread_min) {
893 group->idle_count++;
1c79356b 894
6d2010ae 895 wait_queue_assert_wait(&group->idle_wqueue, NO_EVENT, THREAD_UNINT, 0);
1c79356b 896
6d2010ae 897 thread_call_unlock();
1c79356b
A
898 (void) spllo();
899
c910b4d9 900 thread_block_parameter((thread_continue_t)thread_call_thread, group);
1c79356b
A
901 /* NOTREACHED */
902 }
c910b4d9 903
6d2010ae 904 thread_call_unlock();
1c79356b
A
905 (void) spllo();
906
91447636 907 thread_terminate(self);
1c79356b
A
908 /* NOTREACHED */
909}
910
1c79356b 911/*
c910b4d9 912 * thread_call_daemon:
1c79356b 913 */
c910b4d9
A
914static void
915thread_call_daemon_continue(
916 thread_call_group_t group)
1c79356b 917{
91447636
A
918 kern_return_t result;
919 thread_t thread;
920
1c79356b 921 (void) splsched();
6d2010ae 922 thread_call_lock_spin();
1c79356b 923
c910b4d9
A
924 while (group->active_count == 0 && group->pending_count > 0) {
925 group->active_count++;
1c79356b 926
6d2010ae 927 thread_call_unlock();
1c79356b
A
928 (void) spllo();
929
c910b4d9 930 result = kernel_thread_start_priority((thread_continue_t)thread_call_thread, group, BASEPRI_PREEMPT, &thread);
91447636 931 if (result != KERN_SUCCESS)
c910b4d9 932 panic("thread_call_daemon");
91447636
A
933
934 thread_deallocate(thread);
55e303ae 935
9bccf70c 936 (void) splsched();
6d2010ae 937 thread_call_lock_spin();
1c79356b 938 }
c910b4d9 939
b7266188 940 thread_call_daemon_awake = FALSE;
6d2010ae 941 wait_queue_assert_wait(&group->daemon_wqueue, NO_EVENT, THREAD_UNINT, 0);
1c79356b 942
6d2010ae 943 thread_call_unlock();
1c79356b
A
944 (void) spllo();
945
c910b4d9 946 thread_block_parameter((thread_continue_t)thread_call_daemon_continue, group);
1c79356b
A
947 /* NOTREACHED */
948}
949
c910b4d9
A
950static void
951thread_call_daemon(
952 thread_call_group_t group)
1c79356b 953{
55e303ae 954 thread_t self = current_thread();
1c79356b 955
91447636 956 self->options |= TH_OPT_VMPRIV;
1c79356b 957 vm_page_free_reserve(2); /* XXX */
1c79356b 958
c910b4d9 959 thread_call_daemon_continue(group);
1c79356b
A
960 /* NOTREACHED */
961}
962
6d2010ae 963void
c910b4d9
A
964thread_call_delayed_timer(
965 timer_call_param_t p0,
91447636 966 __unused timer_call_param_t p1
1c79356b
A
967)
968{
c910b4d9
A
969 thread_call_t call;
970 thread_call_group_t group = p0;
971 boolean_t new_pending = FALSE;
972 uint64_t timestamp;
1c79356b 973
6d2010ae 974 thread_call_lock_spin();
1c79356b 975
c910b4d9 976 timestamp = mach_absolute_time();
1c79356b 977
c910b4d9 978 call = TC(queue_first(&group->delayed_queue));
1c79356b 979
c910b4d9 980 while (!queue_end(&group->delayed_queue, qe(call))) {
0b4e3aa0 981 if (call->deadline <= timestamp) {
c910b4d9 982 _pending_call_enqueue(call, group);
1c79356b
A
983 new_pending = TRUE;
984 }
985 else
986 break;
987
c910b4d9 988 call = TC(queue_first(&group->delayed_queue));
1c79356b
A
989 }
990
c910b4d9
A
991 if (!queue_end(&group->delayed_queue, qe(call)))
992 _set_delayed_call_timer(call, group);
1c79356b 993
c910b4d9
A
994 if (new_pending && group->active_count == 0)
995 thread_call_wake(group);
1c79356b 996
6d2010ae 997 thread_call_unlock();
1c79356b 998}