2 * Copyright (c) 1993-1995, 1999-2008 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/thread_act.h>
32 #include <kern/kern_types.h>
33 #include <kern/zalloc.h>
34 #include <kern/sched_prim.h>
35 #include <kern/clock.h>
36 #include <kern/task.h>
37 #include <kern/thread.h>
38 #include <kern/wait_queue.h>
40 #include <vm/vm_pageout.h>
42 #include <kern/thread_call.h>
43 #include <kern/call_entry.h>
45 #include <kern/timer_call.h>
47 #include <sys/kdebug.h>
49 decl_simple_lock_data(static,thread_call_lock
)
51 static zone_t thread_call_zone
;
53 struct thread_call_group
{
54 queue_head_t pending_queue
;
55 uint32_t pending_count
;
57 queue_head_t delayed_queue
;
59 timer_call_data_t delayed_timer
;
61 struct wait_queue idle_wqueue
;
62 struct wait_queue daemon_wqueue
;
63 uint32_t idle_count
, active_count
;
66 typedef struct thread_call_group
*thread_call_group_t
;
68 static struct thread_call_group thread_call_group0
;
70 static boolean_t thread_call_daemon_awake
;
72 #define thread_call_thread_min 4
74 #define internal_call_count 768
76 static thread_call_data_t internal_call_storage
[internal_call_count
];
77 static queue_head_t thread_call_internal_queue
;
79 static __inline__ thread_call_t
_internal_call_allocate(void);
81 static __inline__
void _internal_call_release(
84 static __inline__ boolean_t
_pending_call_enqueue(
86 thread_call_group_t group
),
87 _delayed_call_enqueue(
89 thread_call_group_t group
,
93 thread_call_group_t group
);
95 static __inline__
void thread_call_wake(
96 thread_call_group_t group
);
98 static __inline__
void _set_delayed_call_timer(
100 thread_call_group_t group
);
102 static boolean_t
_remove_from_pending_queue(
103 thread_call_func_t func
,
104 thread_call_param_t param0
,
105 boolean_t remove_all
),
106 _remove_from_delayed_queue(
107 thread_call_func_t func
,
108 thread_call_param_t param0
,
109 boolean_t remove_all
);
111 static void thread_call_daemon(
112 thread_call_group_t group
),
114 thread_call_group_t group
);
116 static void thread_call_delayed_timer(
117 timer_call_param_t p0
,
118 timer_call_param_t p1
);
120 #define qe(x) ((queue_entry_t)(x))
121 #define TC(x) ((thread_call_t)(x))
124 * thread_call_initialize:
126 * Initialize this module, called
127 * early during system initialization.
130 thread_call_initialize(void)
133 thread_call_group_t group
= &thread_call_group0
;
134 kern_return_t result
;
139 i
= sizeof (thread_call_data_t
);
140 thread_call_zone
= zinit(i
, 4096 * i
, 16 * i
, "thread_call");
142 simple_lock_init(&thread_call_lock
, 0);
145 simple_lock(&thread_call_lock
);
147 queue_init(&group
->pending_queue
);
148 queue_init(&group
->delayed_queue
);
150 timer_call_setup(&group
->delayed_timer
, thread_call_delayed_timer
, group
);
152 wait_queue_init(&group
->idle_wqueue
, SYNC_POLICY_FIFO
);
153 wait_queue_init(&group
->daemon_wqueue
, SYNC_POLICY_FIFO
);
155 queue_init(&thread_call_internal_queue
);
157 call
= internal_call_storage
;
158 call
< &internal_call_storage
[internal_call_count
];
161 enqueue_tail(&thread_call_internal_queue
, qe(call
));
164 thread_call_daemon_awake
= TRUE
;
166 simple_unlock(&thread_call_lock
);
169 result
= kernel_thread_start_priority((thread_continue_t
)thread_call_daemon
, group
, BASEPRI_PREEMPT
+ 1, &thread
);
170 if (result
!= KERN_SUCCESS
)
171 panic("thread_call_initialize");
173 thread_deallocate(thread
);
179 thread_call_func_t func
,
180 thread_call_param_t param0
)
182 call_entry_setup(call
, func
, param0
);
186 * _internal_call_allocate:
188 * Allocate an internal callout entry.
190 * Called with thread_call_lock held.
192 static __inline__ thread_call_t
193 _internal_call_allocate(void)
197 if (queue_empty(&thread_call_internal_queue
))
198 panic("_internal_call_allocate");
200 call
= TC(dequeue_head(&thread_call_internal_queue
));
206 * _internal_call_release:
208 * Release an internal callout entry which
209 * is no longer pending (or delayed).
211 * Called with thread_call_lock held.
213 static __inline__
void
214 _internal_call_release(
217 if ( call
>= internal_call_storage
&&
218 call
< &internal_call_storage
[internal_call_count
] )
219 enqueue_head(&thread_call_internal_queue
, qe(call
));
223 * _pending_call_enqueue:
225 * Place an entry at the end of the
226 * pending queue, to be executed soon.
228 * Returns TRUE if the entry was already
231 * Called with thread_call_lock held.
233 static __inline__ boolean_t
234 _pending_call_enqueue(
236 thread_call_group_t group
)
240 old_queue
= call_entry_enqueue_tail(call
, &group
->pending_queue
);
242 group
->pending_count
++;
244 return (old_queue
!= NULL
);
248 * _delayed_call_enqueue:
250 * Place an entry on the delayed queue,
251 * after existing entries with an earlier
252 * (or identical) deadline.
254 * Returns TRUE if the entry was already
257 * Called with thread_call_lock held.
259 static __inline__ boolean_t
260 _delayed_call_enqueue(
262 thread_call_group_t group
,
267 old_queue
= call_entry_enqueue_deadline(call
, &group
->delayed_queue
, deadline
);
269 if (old_queue
== &group
->pending_queue
)
270 group
->pending_count
--;
272 return (old_queue
!= NULL
);
278 * Remove an entry from a queue.
280 * Returns TRUE if the entry was on a queue.
282 * Called with thread_call_lock held.
284 static __inline__ boolean_t
287 thread_call_group_t group
)
291 old_queue
= call_entry_dequeue(call
);
293 if (old_queue
== &group
->pending_queue
)
294 group
->pending_count
--;
296 return (old_queue
!= NULL
);
300 * _set_delayed_call_timer:
302 * Reset the timer so that it
303 * next expires when the entry is due.
305 * Called with thread_call_lock held.
307 static __inline__
void
308 _set_delayed_call_timer(
310 thread_call_group_t group
)
312 timer_call_enter(&group
->delayed_timer
, call
->deadline
);
316 * _remove_from_pending_queue:
318 * Remove the first (or all) matching
319 * entries from the pending queue.
321 * Returns TRUE if any matching entries
324 * Called with thread_call_lock held.
327 _remove_from_pending_queue(
328 thread_call_func_t func
,
329 thread_call_param_t param0
,
330 boolean_t remove_all
)
332 boolean_t call_removed
= FALSE
;
334 thread_call_group_t group
= &thread_call_group0
;
336 call
= TC(queue_first(&group
->pending_queue
));
338 while (!queue_end(&group
->pending_queue
, qe(call
))) {
339 if ( call
->func
== func
&&
340 call
->param0
== param0
) {
341 thread_call_t next
= TC(queue_next(qe(call
)));
343 _call_dequeue(call
, group
);
345 _internal_call_release(call
);
354 call
= TC(queue_next(qe(call
)));
357 return (call_removed
);
361 * _remove_from_delayed_queue:
363 * Remove the first (or all) matching
364 * entries from the delayed queue.
366 * Returns TRUE if any matching entries
369 * Called with thread_call_lock held.
372 _remove_from_delayed_queue(
373 thread_call_func_t func
,
374 thread_call_param_t param0
,
375 boolean_t remove_all
)
377 boolean_t call_removed
= FALSE
;
379 thread_call_group_t group
= &thread_call_group0
;
381 call
= TC(queue_first(&group
->delayed_queue
));
383 while (!queue_end(&group
->delayed_queue
, qe(call
))) {
384 if ( call
->func
== func
&&
385 call
->param0
== param0
) {
386 thread_call_t next
= TC(queue_next(qe(call
)));
388 _call_dequeue(call
, group
);
390 _internal_call_release(call
);
399 call
= TC(queue_next(qe(call
)));
402 return (call_removed
);
410 * Enqueue a function callout.
412 * Guarantees { function, argument }
413 * uniqueness if unique_call is TRUE.
417 thread_call_func_t func
,
418 thread_call_param_t param
,
419 boolean_t unique_call
)
422 thread_call_group_t group
= &thread_call_group0
;
426 simple_lock(&thread_call_lock
);
428 call
= TC(queue_first(&group
->pending_queue
));
430 while (unique_call
&& !queue_end(&group
->pending_queue
, qe(call
))) {
431 if ( call
->func
== func
&&
432 call
->param0
== param
) {
436 call
= TC(queue_next(qe(call
)));
439 if (!unique_call
|| queue_end(&group
->pending_queue
, qe(call
))) {
440 call
= _internal_call_allocate();
442 call
->param0
= param
;
445 _pending_call_enqueue(call
, group
);
447 if (group
->active_count
== 0)
448 thread_call_wake(group
);
451 simple_unlock(&thread_call_lock
);
455 #endif /* __LP64__ */
458 * thread_call_func_delayed:
460 * Enqueue a function callout to
461 * occur at the stated time.
464 thread_call_func_delayed(
465 thread_call_func_t func
,
466 thread_call_param_t param
,
470 thread_call_group_t group
= &thread_call_group0
;
474 simple_lock(&thread_call_lock
);
476 call
= _internal_call_allocate();
478 call
->param0
= param
;
481 _delayed_call_enqueue(call
, group
, deadline
);
483 if (queue_first(&group
->delayed_queue
) == qe(call
))
484 _set_delayed_call_timer(call
, group
);
486 simple_unlock(&thread_call_lock
);
491 * thread_call_func_cancel:
493 * Dequeue a function callout.
495 * Removes one (or all) { function, argument }
496 * instance(s) from either (or both)
497 * the pending and the delayed queue,
500 * Returns TRUE if any calls were cancelled.
503 thread_call_func_cancel(
504 thread_call_func_t func
,
505 thread_call_param_t param
,
506 boolean_t cancel_all
)
512 simple_lock(&thread_call_lock
);
515 result
= _remove_from_pending_queue(func
, param
, cancel_all
) |
516 _remove_from_delayed_queue(func
, param
, cancel_all
);
518 result
= _remove_from_pending_queue(func
, param
, cancel_all
) ||
519 _remove_from_delayed_queue(func
, param
, cancel_all
);
521 simple_unlock(&thread_call_lock
);
528 * thread_call_allocate:
530 * Allocate a callout entry.
533 thread_call_allocate(
534 thread_call_func_t func
,
535 thread_call_param_t param0
)
537 thread_call_t call
= zalloc(thread_call_zone
);
539 call_entry_setup(call
, func
, param0
);
547 * Free a callout entry.
556 simple_lock(&thread_call_lock
);
558 if (call
->queue
!= NULL
) {
559 simple_unlock(&thread_call_lock
);
565 simple_unlock(&thread_call_lock
);
568 zfree(thread_call_zone
, call
);
576 * Enqueue a callout entry to occur "soon".
578 * Returns TRUE if the call was
579 * already on a queue.
585 boolean_t result
= TRUE
;
586 thread_call_group_t group
= &thread_call_group0
;
590 simple_lock(&thread_call_lock
);
592 if (call
->queue
!= &group
->pending_queue
) {
593 result
= _pending_call_enqueue(call
, group
);
595 if (group
->active_count
== 0)
596 thread_call_wake(group
);
601 simple_unlock(&thread_call_lock
);
610 thread_call_param_t param1
)
612 boolean_t result
= TRUE
;
613 thread_call_group_t group
= &thread_call_group0
;
617 simple_lock(&thread_call_lock
);
619 if (call
->queue
!= &group
->pending_queue
) {
620 result
= _pending_call_enqueue(call
, group
);
622 if (group
->active_count
== 0)
623 thread_call_wake(group
);
626 call
->param1
= param1
;
628 simple_unlock(&thread_call_lock
);
635 * thread_call_enter_delayed:
637 * Enqueue a callout entry to occur
638 * at the stated time.
640 * Returns TRUE if the call was
641 * already on a queue.
644 thread_call_enter_delayed(
648 boolean_t result
= TRUE
;
649 thread_call_group_t group
= &thread_call_group0
;
653 simple_lock(&thread_call_lock
);
655 result
= _delayed_call_enqueue(call
, group
, deadline
);
657 if (queue_first(&group
->delayed_queue
) == qe(call
))
658 _set_delayed_call_timer(call
, group
);
662 simple_unlock(&thread_call_lock
);
669 thread_call_enter1_delayed(
671 thread_call_param_t param1
,
674 boolean_t result
= TRUE
;
675 thread_call_group_t group
= &thread_call_group0
;
679 simple_lock(&thread_call_lock
);
681 result
= _delayed_call_enqueue(call
, group
, deadline
);
683 if (queue_first(&group
->delayed_queue
) == qe(call
))
684 _set_delayed_call_timer(call
, group
);
686 call
->param1
= param1
;
688 simple_unlock(&thread_call_lock
);
695 * thread_call_cancel:
697 * Dequeue a callout entry.
699 * Returns TRUE if the call was
707 thread_call_group_t group
= &thread_call_group0
;
711 simple_lock(&thread_call_lock
);
713 result
= _call_dequeue(call
, group
);
715 simple_unlock(&thread_call_lock
);
724 * thread_call_is_delayed:
726 * Returns TRUE if the call is
727 * currently on a delayed queue.
729 * Optionally returns the expiration time.
732 thread_call_is_delayed(
736 boolean_t result
= FALSE
;
737 thread_call_group_t group
= &thread_call_group0
;
741 simple_lock(&thread_call_lock
);
743 if (call
->queue
== &group
->delayed_queue
) {
744 if (deadline
!= NULL
)
745 *deadline
= call
->deadline
;
749 simple_unlock(&thread_call_lock
);
755 #endif /* __LP64__ */
760 * Wake a call thread to service
761 * pending call entries. May wake
762 * the daemon thread in order to
763 * create additional call threads.
765 * Called with thread_call_lock held.
767 static __inline__
void
769 thread_call_group_t group
)
771 if (group
->idle_count
> 0 && wait_queue_wakeup_one(&group
->idle_wqueue
, NULL
, THREAD_AWAKENED
) == KERN_SUCCESS
) {
772 group
->idle_count
--; group
->active_count
++;
775 if (!thread_call_daemon_awake
) {
776 thread_call_daemon_awake
= TRUE
;
777 wait_queue_wakeup_one(&group
->daemon_wqueue
, NULL
, THREAD_AWAKENED
);
784 * Call out invoked by the scheduler.
789 __unused thread_t thread
)
791 thread_call_group_t group
= &thread_call_group0
;
793 simple_lock(&thread_call_lock
);
797 case SCHED_CALL_BLOCK
:
798 if (--group
->active_count
== 0 && group
->pending_count
> 0)
799 thread_call_wake(group
);
802 case SCHED_CALL_UNBLOCK
:
803 group
->active_count
++;
807 simple_unlock(&thread_call_lock
);
811 * thread_call_thread:
815 thread_call_group_t group
)
817 thread_t self
= current_thread();
820 simple_lock(&thread_call_lock
);
822 thread_sched_call(self
, sched_call_thread
);
824 while (group
->pending_count
> 0) {
826 thread_call_func_t func
;
827 thread_call_param_t param0
, param1
;
829 call
= TC(dequeue_head(&group
->pending_queue
));
830 group
->pending_count
--;
833 param0
= call
->param0
;
834 param1
= call
->param1
;
838 _internal_call_release(call
);
840 simple_unlock(&thread_call_lock
);
843 KERNEL_DEBUG_CONSTANT(
844 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_CALLOUT
) | DBG_FUNC_NONE
,
845 func
, param0
, param1
, 0, 0);
847 (*func
)(param0
, param1
);
849 (void)thread_funnel_set(self
->funnel_lock
, FALSE
); /* XXX */
852 simple_lock(&thread_call_lock
);
855 thread_sched_call(self
, NULL
);
856 group
->active_count
--;
858 if (group
->idle_count
< thread_call_thread_min
) {
861 wait_queue_assert_wait(&group
->idle_wqueue
, NULL
, THREAD_UNINT
, 0);
863 simple_unlock(&thread_call_lock
);
866 thread_block_parameter((thread_continue_t
)thread_call_thread
, group
);
870 simple_unlock(&thread_call_lock
);
873 thread_terminate(self
);
878 * thread_call_daemon:
881 thread_call_daemon_continue(
882 thread_call_group_t group
)
884 kern_return_t result
;
888 simple_lock(&thread_call_lock
);
890 while (group
->active_count
== 0 && group
->pending_count
> 0) {
891 group
->active_count
++;
893 simple_unlock(&thread_call_lock
);
896 result
= kernel_thread_start_priority((thread_continue_t
)thread_call_thread
, group
, BASEPRI_PREEMPT
, &thread
);
897 if (result
!= KERN_SUCCESS
)
898 panic("thread_call_daemon");
900 thread_deallocate(thread
);
903 simple_lock(&thread_call_lock
);
906 thread_call_daemon_awake
= FALSE
;
907 wait_queue_assert_wait(&group
->daemon_wqueue
, NULL
, THREAD_UNINT
, 0);
909 simple_unlock(&thread_call_lock
);
912 thread_block_parameter((thread_continue_t
)thread_call_daemon_continue
, group
);
918 thread_call_group_t group
)
920 thread_t self
= current_thread();
922 self
->options
|= TH_OPT_VMPRIV
;
923 vm_page_free_reserve(2); /* XXX */
925 thread_call_daemon_continue(group
);
930 thread_call_delayed_timer(
931 timer_call_param_t p0
,
932 __unused timer_call_param_t p1
936 thread_call_group_t group
= p0
;
937 boolean_t new_pending
= FALSE
;
940 simple_lock(&thread_call_lock
);
942 timestamp
= mach_absolute_time();
944 call
= TC(queue_first(&group
->delayed_queue
));
946 while (!queue_end(&group
->delayed_queue
, qe(call
))) {
947 if (call
->deadline
<= timestamp
) {
948 _pending_call_enqueue(call
, group
);
954 call
= TC(queue_first(&group
->delayed_queue
));
957 if (!queue_end(&group
->delayed_queue
, qe(call
)))
958 _set_delayed_call_timer(call
, group
);
960 if (new_pending
&& group
->active_count
== 0)
961 thread_call_wake(group
);
963 simple_unlock(&thread_call_lock
);