2 * Copyright (c) 1993-1995, 1999-2008 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/thread_act.h>
32 #include <kern/kern_types.h>
33 #include <kern/zalloc.h>
34 #include <kern/sched_prim.h>
35 #include <kern/clock.h>
36 #include <kern/task.h>
37 #include <kern/thread.h>
38 #include <kern/wait_queue.h>
40 #include <vm/vm_pageout.h>
42 #include <kern/thread_call.h>
43 #include <kern/call_entry.h>
45 #include <kern/timer_call.h>
47 #include <sys/kdebug.h>
49 decl_simple_lock_data(static,thread_call_lock
)
51 static zone_t thread_call_zone
;
53 struct thread_call_group
{
54 queue_head_t pending_queue
;
55 uint32_t pending_count
;
57 queue_head_t delayed_queue
;
59 timer_call_data_t delayed_timer
;
61 struct wait_queue idle_wqueue
;
62 uint32_t idle_count
, active_count
;
65 typedef struct thread_call_group
*thread_call_group_t
;
67 static struct thread_call_group thread_call_group0
;
69 static boolean_t thread_call_daemon_awake
;
71 #define thread_call_thread_min 4
73 #define internal_call_count 768
75 static thread_call_data_t internal_call_storage
[internal_call_count
];
76 static queue_head_t thread_call_internal_queue
;
78 static __inline__ thread_call_t
_internal_call_allocate(void);
80 static __inline__
void _internal_call_release(
83 static __inline__ boolean_t
_pending_call_enqueue(
85 thread_call_group_t group
),
86 _delayed_call_enqueue(
88 thread_call_group_t group
,
92 thread_call_group_t group
);
94 static __inline__
void thread_call_wake(
95 thread_call_group_t group
);
97 static __inline__
void _set_delayed_call_timer(
99 thread_call_group_t group
);
101 static boolean_t
_remove_from_pending_queue(
102 thread_call_func_t func
,
103 thread_call_param_t param0
,
104 boolean_t remove_all
),
105 _remove_from_delayed_queue(
106 thread_call_func_t func
,
107 thread_call_param_t param0
,
108 boolean_t remove_all
);
110 static void thread_call_daemon(
111 thread_call_group_t group
),
113 thread_call_group_t group
);
115 static void thread_call_delayed_timer(
116 timer_call_param_t p0
,
117 timer_call_param_t p1
);
119 #define qe(x) ((queue_entry_t)(x))
120 #define TC(x) ((thread_call_t)(x))
123 * thread_call_initialize:
125 * Initialize this module, called
126 * early during system initialization.
129 thread_call_initialize(void)
132 thread_call_group_t group
= &thread_call_group0
;
133 kern_return_t result
;
138 i
= sizeof (thread_call_data_t
);
139 thread_call_zone
= zinit(i
, 4096 * i
, 16 * i
, "thread_call");
141 simple_lock_init(&thread_call_lock
, 0);
144 simple_lock(&thread_call_lock
);
146 queue_init(&group
->pending_queue
);
147 queue_init(&group
->delayed_queue
);
149 timer_call_setup(&group
->delayed_timer
, thread_call_delayed_timer
, group
);
151 wait_queue_init(&group
->idle_wqueue
, SYNC_POLICY_FIFO
);
153 queue_init(&thread_call_internal_queue
);
155 call
= internal_call_storage
;
156 call
< &internal_call_storage
[internal_call_count
];
159 enqueue_tail(&thread_call_internal_queue
, qe(call
));
162 thread_call_daemon_awake
= TRUE
;
164 simple_unlock(&thread_call_lock
);
167 result
= kernel_thread_start_priority((thread_continue_t
)thread_call_daemon
, group
, BASEPRI_PREEMPT
+ 1, &thread
);
168 if (result
!= KERN_SUCCESS
)
169 panic("thread_call_initialize");
171 thread_deallocate(thread
);
177 thread_call_func_t func
,
178 thread_call_param_t param0
)
180 call_entry_setup(call
, func
, param0
);
184 * _internal_call_allocate:
186 * Allocate an internal callout entry.
188 * Called with thread_call_lock held.
190 static __inline__ thread_call_t
191 _internal_call_allocate(void)
195 if (queue_empty(&thread_call_internal_queue
))
196 panic("_internal_call_allocate");
198 call
= TC(dequeue_head(&thread_call_internal_queue
));
204 * _internal_call_release:
206 * Release an internal callout entry which
207 * is no longer pending (or delayed).
209 * Called with thread_call_lock held.
211 static __inline__
void
212 _internal_call_release(
215 if ( call
>= internal_call_storage
&&
216 call
< &internal_call_storage
[internal_call_count
] )
217 enqueue_head(&thread_call_internal_queue
, qe(call
));
221 * _pending_call_enqueue:
223 * Place an entry at the end of the
224 * pending queue, to be executed soon.
226 * Returns TRUE if the entry was already
229 * Called with thread_call_lock held.
231 static __inline__ boolean_t
232 _pending_call_enqueue(
234 thread_call_group_t group
)
238 old_queue
= call_entry_enqueue_tail(call
, &group
->pending_queue
);
240 group
->pending_count
++;
242 return (old_queue
!= NULL
);
246 * _delayed_call_enqueue:
248 * Place an entry on the delayed queue,
249 * after existing entries with an earlier
250 * (or identical) deadline.
252 * Returns TRUE if the entry was already
255 * Called with thread_call_lock held.
257 static __inline__ boolean_t
258 _delayed_call_enqueue(
260 thread_call_group_t group
,
265 old_queue
= call_entry_enqueue_deadline(call
, &group
->delayed_queue
, deadline
);
267 if (old_queue
== &group
->pending_queue
)
268 group
->pending_count
--;
270 return (old_queue
!= NULL
);
276 * Remove an entry from a queue.
278 * Returns TRUE if the entry was on a queue.
280 * Called with thread_call_lock held.
282 static __inline__ boolean_t
285 thread_call_group_t group
)
289 old_queue
= call_entry_dequeue(call
);
291 if (old_queue
== &group
->pending_queue
)
292 group
->pending_count
--;
294 return (old_queue
!= NULL
);
298 * _set_delayed_call_timer:
300 * Reset the timer so that it
301 * next expires when the entry is due.
303 * Called with thread_call_lock held.
305 static __inline__
void
306 _set_delayed_call_timer(
308 thread_call_group_t group
)
310 timer_call_enter(&group
->delayed_timer
, call
->deadline
);
314 * _remove_from_pending_queue:
316 * Remove the first (or all) matching
317 * entries from the pending queue.
319 * Returns TRUE if any matching entries
322 * Called with thread_call_lock held.
325 _remove_from_pending_queue(
326 thread_call_func_t func
,
327 thread_call_param_t param0
,
328 boolean_t remove_all
)
330 boolean_t call_removed
= FALSE
;
332 thread_call_group_t group
= &thread_call_group0
;
334 call
= TC(queue_first(&group
->pending_queue
));
336 while (!queue_end(&group
->pending_queue
, qe(call
))) {
337 if ( call
->func
== func
&&
338 call
->param0
== param0
) {
339 thread_call_t next
= TC(queue_next(qe(call
)));
341 _call_dequeue(call
, group
);
343 _internal_call_release(call
);
352 call
= TC(queue_next(qe(call
)));
355 return (call_removed
);
359 * _remove_from_delayed_queue:
361 * Remove the first (or all) matching
362 * entries from the delayed queue.
364 * Returns TRUE if any matching entries
367 * Called with thread_call_lock held.
370 _remove_from_delayed_queue(
371 thread_call_func_t func
,
372 thread_call_param_t param0
,
373 boolean_t remove_all
)
375 boolean_t call_removed
= FALSE
;
377 thread_call_group_t group
= &thread_call_group0
;
379 call
= TC(queue_first(&group
->delayed_queue
));
381 while (!queue_end(&group
->delayed_queue
, qe(call
))) {
382 if ( call
->func
== func
&&
383 call
->param0
== param0
) {
384 thread_call_t next
= TC(queue_next(qe(call
)));
386 _call_dequeue(call
, group
);
388 _internal_call_release(call
);
397 call
= TC(queue_next(qe(call
)));
400 return (call_removed
);
406 * Enqueue a function callout.
408 * Guarantees { function, argument }
409 * uniqueness if unique_call is TRUE.
413 thread_call_func_t func
,
414 thread_call_param_t param
,
415 boolean_t unique_call
)
418 thread_call_group_t group
= &thread_call_group0
;
422 simple_lock(&thread_call_lock
);
424 call
= TC(queue_first(&group
->pending_queue
));
426 while (unique_call
&& !queue_end(&group
->pending_queue
, qe(call
))) {
427 if ( call
->func
== func
&&
428 call
->param0
== param
) {
432 call
= TC(queue_next(qe(call
)));
435 if (!unique_call
|| queue_end(&group
->pending_queue
, qe(call
))) {
436 call
= _internal_call_allocate();
438 call
->param0
= param
;
441 _pending_call_enqueue(call
, group
);
443 if (group
->active_count
== 0)
444 thread_call_wake(group
);
447 simple_unlock(&thread_call_lock
);
452 * thread_call_func_delayed:
454 * Enqueue a function callout to
455 * occur at the stated time.
458 thread_call_func_delayed(
459 thread_call_func_t func
,
460 thread_call_param_t param
,
464 thread_call_group_t group
= &thread_call_group0
;
468 simple_lock(&thread_call_lock
);
470 call
= _internal_call_allocate();
472 call
->param0
= param
;
475 _delayed_call_enqueue(call
, group
, deadline
);
477 if (queue_first(&group
->delayed_queue
) == qe(call
))
478 _set_delayed_call_timer(call
, group
);
480 simple_unlock(&thread_call_lock
);
485 * thread_call_func_cancel:
487 * Dequeue a function callout.
489 * Removes one (or all) { function, argument }
490 * instance(s) from either (or both)
491 * the pending and the delayed queue,
494 * Returns TRUE if any calls were cancelled.
497 thread_call_func_cancel(
498 thread_call_func_t func
,
499 thread_call_param_t param
,
500 boolean_t cancel_all
)
506 simple_lock(&thread_call_lock
);
509 result
= _remove_from_pending_queue(func
, param
, cancel_all
) |
510 _remove_from_delayed_queue(func
, param
, cancel_all
);
512 result
= _remove_from_pending_queue(func
, param
, cancel_all
) ||
513 _remove_from_delayed_queue(func
, param
, cancel_all
);
515 simple_unlock(&thread_call_lock
);
522 * thread_call_allocate:
524 * Allocate a callout entry.
527 thread_call_allocate(
528 thread_call_func_t func
,
529 thread_call_param_t param0
)
531 thread_call_t call
= zalloc(thread_call_zone
);
533 call_entry_setup(call
, func
, param0
);
541 * Free a callout entry.
550 simple_lock(&thread_call_lock
);
552 if (call
->queue
!= NULL
) {
553 simple_unlock(&thread_call_lock
);
559 simple_unlock(&thread_call_lock
);
562 zfree(thread_call_zone
, call
);
570 * Enqueue a callout entry to occur "soon".
572 * Returns TRUE if the call was
573 * already on a queue.
579 boolean_t result
= TRUE
;
580 thread_call_group_t group
= &thread_call_group0
;
584 simple_lock(&thread_call_lock
);
586 if (call
->queue
!= &group
->pending_queue
) {
587 result
= _pending_call_enqueue(call
, group
);
589 if (group
->active_count
== 0)
590 thread_call_wake(group
);
595 simple_unlock(&thread_call_lock
);
604 thread_call_param_t param1
)
606 boolean_t result
= TRUE
;
607 thread_call_group_t group
= &thread_call_group0
;
611 simple_lock(&thread_call_lock
);
613 if (call
->queue
!= &group
->pending_queue
) {
614 result
= _pending_call_enqueue(call
, group
);
616 if (group
->active_count
== 0)
617 thread_call_wake(group
);
620 call
->param1
= param1
;
622 simple_unlock(&thread_call_lock
);
629 * thread_call_enter_delayed:
631 * Enqueue a callout entry to occur
632 * at the stated time.
634 * Returns TRUE if the call was
635 * already on a queue.
638 thread_call_enter_delayed(
642 boolean_t result
= TRUE
;
643 thread_call_group_t group
= &thread_call_group0
;
647 simple_lock(&thread_call_lock
);
649 result
= _delayed_call_enqueue(call
, group
, deadline
);
651 if (queue_first(&group
->delayed_queue
) == qe(call
))
652 _set_delayed_call_timer(call
, group
);
656 simple_unlock(&thread_call_lock
);
663 thread_call_enter1_delayed(
665 thread_call_param_t param1
,
668 boolean_t result
= TRUE
;
669 thread_call_group_t group
= &thread_call_group0
;
673 simple_lock(&thread_call_lock
);
675 result
= _delayed_call_enqueue(call
, group
, deadline
);
677 if (queue_first(&group
->delayed_queue
) == qe(call
))
678 _set_delayed_call_timer(call
, group
);
680 call
->param1
= param1
;
682 simple_unlock(&thread_call_lock
);
689 * thread_call_cancel:
691 * Dequeue a callout entry.
693 * Returns TRUE if the call was
701 thread_call_group_t group
= &thread_call_group0
;
705 simple_lock(&thread_call_lock
);
707 result
= _call_dequeue(call
, group
);
709 simple_unlock(&thread_call_lock
);
716 * thread_call_is_delayed:
718 * Returns TRUE if the call is
719 * currently on a delayed queue.
721 * Optionally returns the expiration time.
724 thread_call_is_delayed(
728 boolean_t result
= FALSE
;
729 thread_call_group_t group
= &thread_call_group0
;
733 simple_lock(&thread_call_lock
);
735 if (call
->queue
== &group
->delayed_queue
) {
736 if (deadline
!= NULL
)
737 *deadline
= call
->deadline
;
741 simple_unlock(&thread_call_lock
);
750 * Wake a call thread to service
751 * pending call entries. May wake
752 * the daemon thread in order to
753 * create additional call threads.
755 * Called with thread_call_lock held.
757 static __inline__
void
759 thread_call_group_t group
)
761 if (group
->idle_count
> 0 && wait_queue_wakeup_one(&group
->idle_wqueue
, NULL
, THREAD_AWAKENED
) == KERN_SUCCESS
) {
762 group
->idle_count
--; group
->active_count
++;
765 if (!thread_call_daemon_awake
) {
766 thread_call_daemon_awake
= TRUE
;
767 thread_wakeup_one(&thread_call_daemon_awake
);
774 * Call out invoked by the scheduler.
779 __unused thread_t thread
)
781 thread_call_group_t group
= &thread_call_group0
;
783 simple_lock(&thread_call_lock
);
787 case SCHED_CALL_BLOCK
:
788 if (--group
->active_count
== 0 && group
->pending_count
> 0)
789 thread_call_wake(group
);
792 case SCHED_CALL_UNBLOCK
:
793 group
->active_count
++;
797 simple_unlock(&thread_call_lock
);
801 * thread_call_thread:
805 thread_call_group_t group
)
807 thread_t self
= current_thread();
810 simple_lock(&thread_call_lock
);
812 thread_sched_call(self
, sched_call_thread
);
814 while (group
->pending_count
> 0) {
816 thread_call_func_t func
;
817 thread_call_param_t param0
, param1
;
819 call
= TC(dequeue_head(&group
->pending_queue
));
820 group
->pending_count
--;
823 param0
= call
->param0
;
824 param1
= call
->param1
;
828 _internal_call_release(call
);
830 simple_unlock(&thread_call_lock
);
833 KERNEL_DEBUG_CONSTANT(
834 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_CALLOUT
) | DBG_FUNC_NONE
,
835 (int)func
, (int)param0
, (int)param1
, 0, 0);
837 (*func
)(param0
, param1
);
839 (void)thread_funnel_set(self
->funnel_lock
, FALSE
); /* XXX */
842 simple_lock(&thread_call_lock
);
845 thread_sched_call(self
, NULL
);
846 group
->active_count
--;
848 if (group
->idle_count
< thread_call_thread_min
) {
851 wait_queue_assert_wait(&group
->idle_wqueue
, NULL
, THREAD_UNINT
, 0);
853 simple_unlock(&thread_call_lock
);
856 thread_block_parameter((thread_continue_t
)thread_call_thread
, group
);
860 simple_unlock(&thread_call_lock
);
863 thread_terminate(self
);
868 * thread_call_daemon:
871 thread_call_daemon_continue(
872 thread_call_group_t group
)
874 kern_return_t result
;
878 simple_lock(&thread_call_lock
);
880 while (group
->active_count
== 0 && group
->pending_count
> 0) {
881 group
->active_count
++;
883 simple_unlock(&thread_call_lock
);
886 result
= kernel_thread_start_priority((thread_continue_t
)thread_call_thread
, group
, BASEPRI_PREEMPT
, &thread
);
887 if (result
!= KERN_SUCCESS
)
888 panic("thread_call_daemon");
890 thread_deallocate(thread
);
893 simple_lock(&thread_call_lock
);
896 thread_call_daemon_awake
= FALSE
;
897 assert_wait(&thread_call_daemon_awake
, THREAD_UNINT
);
899 simple_unlock(&thread_call_lock
);
902 thread_block_parameter((thread_continue_t
)thread_call_daemon_continue
, group
);
908 thread_call_group_t group
)
910 thread_t self
= current_thread();
912 self
->options
|= TH_OPT_VMPRIV
;
913 vm_page_free_reserve(2); /* XXX */
915 thread_call_daemon_continue(group
);
920 thread_call_delayed_timer(
921 timer_call_param_t p0
,
922 __unused timer_call_param_t p1
926 thread_call_group_t group
= p0
;
927 boolean_t new_pending
= FALSE
;
930 simple_lock(&thread_call_lock
);
932 timestamp
= mach_absolute_time();
934 call
= TC(queue_first(&group
->delayed_queue
));
936 while (!queue_end(&group
->delayed_queue
, qe(call
))) {
937 if (call
->deadline
<= timestamp
) {
938 _pending_call_enqueue(call
, group
);
944 call
= TC(queue_first(&group
->delayed_queue
));
947 if (!queue_end(&group
->delayed_queue
, qe(call
)))
948 _set_delayed_call_timer(call
, group
);
950 if (new_pending
&& group
->active_count
== 0)
951 thread_call_wake(group
);
953 simple_unlock(&thread_call_lock
);