2 * Copyright (c) 1993-1995, 1999-2008 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/thread_act.h>
32 #include <kern/kern_types.h>
33 #include <kern/zalloc.h>
34 #include <kern/sched_prim.h>
35 #include <kern/clock.h>
36 #include <kern/task.h>
37 #include <kern/thread.h>
38 #include <kern/wait_queue.h>
40 #include <vm/vm_pageout.h>
42 #include <kern/thread_call.h>
43 #include <kern/call_entry.h>
45 #include <kern/timer_call.h>
47 #include <sys/kdebug.h>
49 decl_simple_lock_data(static,thread_call_lock
)
51 static zone_t thread_call_zone
;
53 struct thread_call_group
{
54 queue_head_t pending_queue
;
55 uint32_t pending_count
;
57 queue_head_t delayed_queue
;
59 timer_call_data_t delayed_timer
;
61 struct wait_queue idle_wqueue
;
62 uint32_t idle_count
, active_count
;
65 typedef struct thread_call_group
*thread_call_group_t
;
67 static struct thread_call_group thread_call_group0
;
69 static boolean_t thread_call_daemon_awake
;
71 #define thread_call_thread_min 4
73 #define internal_call_count 768
75 static thread_call_data_t internal_call_storage
[internal_call_count
];
76 static queue_head_t thread_call_internal_queue
;
78 static __inline__ thread_call_t
_internal_call_allocate(void);
80 static __inline__
void _internal_call_release(
83 static __inline__ boolean_t
_pending_call_enqueue(
85 thread_call_group_t group
),
86 _delayed_call_enqueue(
88 thread_call_group_t group
,
92 thread_call_group_t group
);
94 static __inline__
void thread_call_wake(
95 thread_call_group_t group
);
97 static __inline__
void _set_delayed_call_timer(
99 thread_call_group_t group
);
101 static boolean_t
_remove_from_pending_queue(
102 thread_call_func_t func
,
103 thread_call_param_t param0
,
104 boolean_t remove_all
),
105 _remove_from_delayed_queue(
106 thread_call_func_t func
,
107 thread_call_param_t param0
,
108 boolean_t remove_all
);
110 static void thread_call_daemon(
111 thread_call_group_t group
),
113 thread_call_group_t group
);
115 static void thread_call_delayed_timer(
116 timer_call_param_t p0
,
117 timer_call_param_t p1
);
119 #define qe(x) ((queue_entry_t)(x))
120 #define TC(x) ((thread_call_t)(x))
123 * thread_call_initialize:
125 * Initialize this module, called
126 * early during system initialization.
129 thread_call_initialize(void)
132 thread_call_group_t group
= &thread_call_group0
;
133 kern_return_t result
;
138 i
= sizeof (thread_call_data_t
);
139 thread_call_zone
= zinit(i
, 4096 * i
, 16 * i
, "thread_call");
141 simple_lock_init(&thread_call_lock
, 0);
144 simple_lock(&thread_call_lock
);
146 queue_init(&group
->pending_queue
);
147 queue_init(&group
->delayed_queue
);
149 timer_call_setup(&group
->delayed_timer
, thread_call_delayed_timer
, group
);
151 wait_queue_init(&group
->idle_wqueue
, SYNC_POLICY_FIFO
);
153 queue_init(&thread_call_internal_queue
);
155 call
= internal_call_storage
;
156 call
< &internal_call_storage
[internal_call_count
];
159 enqueue_tail(&thread_call_internal_queue
, qe(call
));
162 thread_call_daemon_awake
= TRUE
;
164 simple_unlock(&thread_call_lock
);
167 result
= kernel_thread_start_priority((thread_continue_t
)thread_call_daemon
, group
, BASEPRI_PREEMPT
+ 1, &thread
);
168 if (result
!= KERN_SUCCESS
)
169 panic("thread_call_initialize");
171 thread_deallocate(thread
);
177 thread_call_func_t func
,
178 thread_call_param_t param0
)
180 call_entry_setup(call
, func
, param0
);
184 * _internal_call_allocate:
186 * Allocate an internal callout entry.
188 * Called with thread_call_lock held.
190 static __inline__ thread_call_t
191 _internal_call_allocate(void)
195 if (queue_empty(&thread_call_internal_queue
))
196 panic("_internal_call_allocate");
198 call
= TC(dequeue_head(&thread_call_internal_queue
));
204 * _internal_call_release:
206 * Release an internal callout entry which
207 * is no longer pending (or delayed).
209 * Called with thread_call_lock held.
211 static __inline__
void
212 _internal_call_release(
215 if ( call
>= internal_call_storage
&&
216 call
< &internal_call_storage
[internal_call_count
] )
217 enqueue_head(&thread_call_internal_queue
, qe(call
));
221 * _pending_call_enqueue:
223 * Place an entry at the end of the
224 * pending queue, to be executed soon.
226 * Returns TRUE if the entry was already
229 * Called with thread_call_lock held.
231 static __inline__ boolean_t
232 _pending_call_enqueue(
234 thread_call_group_t group
)
238 old_queue
= call_entry_enqueue_tail(call
, &group
->pending_queue
);
240 group
->pending_count
++;
242 return (old_queue
!= NULL
);
246 * _delayed_call_enqueue:
248 * Place an entry on the delayed queue,
249 * after existing entries with an earlier
250 * (or identical) deadline.
252 * Returns TRUE if the entry was already
255 * Called with thread_call_lock held.
257 static __inline__ boolean_t
258 _delayed_call_enqueue(
260 thread_call_group_t group
,
265 old_queue
= call_entry_enqueue_deadline(call
, &group
->delayed_queue
, deadline
);
267 if (old_queue
== &group
->pending_queue
)
268 group
->pending_count
--;
270 return (old_queue
!= NULL
);
276 * Remove an entry from a queue.
278 * Returns TRUE if the entry was on a queue.
280 * Called with thread_call_lock held.
282 static __inline__ boolean_t
285 thread_call_group_t group
)
289 old_queue
= call_entry_dequeue(call
);
291 if (old_queue
== &group
->pending_queue
)
292 group
->pending_count
--;
294 return (old_queue
!= NULL
);
298 * _set_delayed_call_timer:
300 * Reset the timer so that it
301 * next expires when the entry is due.
303 * Called with thread_call_lock held.
305 static __inline__
void
306 _set_delayed_call_timer(
308 thread_call_group_t group
)
310 timer_call_enter(&group
->delayed_timer
, call
->deadline
);
314 * _remove_from_pending_queue:
316 * Remove the first (or all) matching
317 * entries from the pending queue.
319 * Returns TRUE if any matching entries
322 * Called with thread_call_lock held.
325 _remove_from_pending_queue(
326 thread_call_func_t func
,
327 thread_call_param_t param0
,
328 boolean_t remove_all
)
330 boolean_t call_removed
= FALSE
;
332 thread_call_group_t group
= &thread_call_group0
;
334 call
= TC(queue_first(&group
->pending_queue
));
336 while (!queue_end(&group
->pending_queue
, qe(call
))) {
337 if ( call
->func
== func
&&
338 call
->param0
== param0
) {
339 thread_call_t next
= TC(queue_next(qe(call
)));
341 _call_dequeue(call
, group
);
343 _internal_call_release(call
);
352 call
= TC(queue_next(qe(call
)));
355 return (call_removed
);
359 * _remove_from_delayed_queue:
361 * Remove the first (or all) matching
362 * entries from the delayed queue.
364 * Returns TRUE if any matching entries
367 * Called with thread_call_lock held.
370 _remove_from_delayed_queue(
371 thread_call_func_t func
,
372 thread_call_param_t param0
,
373 boolean_t remove_all
)
375 boolean_t call_removed
= FALSE
;
377 thread_call_group_t group
= &thread_call_group0
;
379 call
= TC(queue_first(&group
->delayed_queue
));
381 while (!queue_end(&group
->delayed_queue
, qe(call
))) {
382 if ( call
->func
== func
&&
383 call
->param0
== param0
) {
384 thread_call_t next
= TC(queue_next(qe(call
)));
386 _call_dequeue(call
, group
);
388 _internal_call_release(call
);
397 call
= TC(queue_next(qe(call
)));
400 return (call_removed
);
408 * Enqueue a function callout.
410 * Guarantees { function, argument }
411 * uniqueness if unique_call is TRUE.
415 thread_call_func_t func
,
416 thread_call_param_t param
,
417 boolean_t unique_call
)
420 thread_call_group_t group
= &thread_call_group0
;
424 simple_lock(&thread_call_lock
);
426 call
= TC(queue_first(&group
->pending_queue
));
428 while (unique_call
&& !queue_end(&group
->pending_queue
, qe(call
))) {
429 if ( call
->func
== func
&&
430 call
->param0
== param
) {
434 call
= TC(queue_next(qe(call
)));
437 if (!unique_call
|| queue_end(&group
->pending_queue
, qe(call
))) {
438 call
= _internal_call_allocate();
440 call
->param0
= param
;
443 _pending_call_enqueue(call
, group
);
445 if (group
->active_count
== 0)
446 thread_call_wake(group
);
449 simple_unlock(&thread_call_lock
);
453 #endif /* __LP64__ */
456 * thread_call_func_delayed:
458 * Enqueue a function callout to
459 * occur at the stated time.
462 thread_call_func_delayed(
463 thread_call_func_t func
,
464 thread_call_param_t param
,
468 thread_call_group_t group
= &thread_call_group0
;
472 simple_lock(&thread_call_lock
);
474 call
= _internal_call_allocate();
476 call
->param0
= param
;
479 _delayed_call_enqueue(call
, group
, deadline
);
481 if (queue_first(&group
->delayed_queue
) == qe(call
))
482 _set_delayed_call_timer(call
, group
);
484 simple_unlock(&thread_call_lock
);
489 * thread_call_func_cancel:
491 * Dequeue a function callout.
493 * Removes one (or all) { function, argument }
494 * instance(s) from either (or both)
495 * the pending and the delayed queue,
498 * Returns TRUE if any calls were cancelled.
501 thread_call_func_cancel(
502 thread_call_func_t func
,
503 thread_call_param_t param
,
504 boolean_t cancel_all
)
510 simple_lock(&thread_call_lock
);
513 result
= _remove_from_pending_queue(func
, param
, cancel_all
) |
514 _remove_from_delayed_queue(func
, param
, cancel_all
);
516 result
= _remove_from_pending_queue(func
, param
, cancel_all
) ||
517 _remove_from_delayed_queue(func
, param
, cancel_all
);
519 simple_unlock(&thread_call_lock
);
526 * thread_call_allocate:
528 * Allocate a callout entry.
531 thread_call_allocate(
532 thread_call_func_t func
,
533 thread_call_param_t param0
)
535 thread_call_t call
= zalloc(thread_call_zone
);
537 call_entry_setup(call
, func
, param0
);
545 * Free a callout entry.
554 simple_lock(&thread_call_lock
);
556 if (call
->queue
!= NULL
) {
557 simple_unlock(&thread_call_lock
);
563 simple_unlock(&thread_call_lock
);
566 zfree(thread_call_zone
, call
);
574 * Enqueue a callout entry to occur "soon".
576 * Returns TRUE if the call was
577 * already on a queue.
583 boolean_t result
= TRUE
;
584 thread_call_group_t group
= &thread_call_group0
;
588 simple_lock(&thread_call_lock
);
590 if (call
->queue
!= &group
->pending_queue
) {
591 result
= _pending_call_enqueue(call
, group
);
593 if (group
->active_count
== 0)
594 thread_call_wake(group
);
599 simple_unlock(&thread_call_lock
);
608 thread_call_param_t param1
)
610 boolean_t result
= TRUE
;
611 thread_call_group_t group
= &thread_call_group0
;
615 simple_lock(&thread_call_lock
);
617 if (call
->queue
!= &group
->pending_queue
) {
618 result
= _pending_call_enqueue(call
, group
);
620 if (group
->active_count
== 0)
621 thread_call_wake(group
);
624 call
->param1
= param1
;
626 simple_unlock(&thread_call_lock
);
633 * thread_call_enter_delayed:
635 * Enqueue a callout entry to occur
636 * at the stated time.
638 * Returns TRUE if the call was
639 * already on a queue.
642 thread_call_enter_delayed(
646 boolean_t result
= TRUE
;
647 thread_call_group_t group
= &thread_call_group0
;
651 simple_lock(&thread_call_lock
);
653 result
= _delayed_call_enqueue(call
, group
, deadline
);
655 if (queue_first(&group
->delayed_queue
) == qe(call
))
656 _set_delayed_call_timer(call
, group
);
660 simple_unlock(&thread_call_lock
);
667 thread_call_enter1_delayed(
669 thread_call_param_t param1
,
672 boolean_t result
= TRUE
;
673 thread_call_group_t group
= &thread_call_group0
;
677 simple_lock(&thread_call_lock
);
679 result
= _delayed_call_enqueue(call
, group
, deadline
);
681 if (queue_first(&group
->delayed_queue
) == qe(call
))
682 _set_delayed_call_timer(call
, group
);
684 call
->param1
= param1
;
686 simple_unlock(&thread_call_lock
);
693 * thread_call_cancel:
695 * Dequeue a callout entry.
697 * Returns TRUE if the call was
705 thread_call_group_t group
= &thread_call_group0
;
709 simple_lock(&thread_call_lock
);
711 result
= _call_dequeue(call
, group
);
713 simple_unlock(&thread_call_lock
);
722 * thread_call_is_delayed:
724 * Returns TRUE if the call is
725 * currently on a delayed queue.
727 * Optionally returns the expiration time.
730 thread_call_is_delayed(
734 boolean_t result
= FALSE
;
735 thread_call_group_t group
= &thread_call_group0
;
739 simple_lock(&thread_call_lock
);
741 if (call
->queue
== &group
->delayed_queue
) {
742 if (deadline
!= NULL
)
743 *deadline
= call
->deadline
;
747 simple_unlock(&thread_call_lock
);
753 #endif /* __LP64__ */
758 * Wake a call thread to service
759 * pending call entries. May wake
760 * the daemon thread in order to
761 * create additional call threads.
763 * Called with thread_call_lock held.
765 static __inline__
void
767 thread_call_group_t group
)
769 if (group
->idle_count
> 0 && wait_queue_wakeup_one(&group
->idle_wqueue
, NULL
, THREAD_AWAKENED
) == KERN_SUCCESS
) {
770 group
->idle_count
--; group
->active_count
++;
773 if (!thread_call_daemon_awake
) {
774 thread_call_daemon_awake
= TRUE
;
775 thread_wakeup_one(&thread_call_daemon_awake
);
782 * Call out invoked by the scheduler.
787 __unused thread_t thread
)
789 thread_call_group_t group
= &thread_call_group0
;
791 simple_lock(&thread_call_lock
);
795 case SCHED_CALL_BLOCK
:
796 if (--group
->active_count
== 0 && group
->pending_count
> 0)
797 thread_call_wake(group
);
800 case SCHED_CALL_UNBLOCK
:
801 group
->active_count
++;
805 simple_unlock(&thread_call_lock
);
809 * thread_call_thread:
813 thread_call_group_t group
)
815 thread_t self
= current_thread();
818 simple_lock(&thread_call_lock
);
820 thread_sched_call(self
, sched_call_thread
);
822 while (group
->pending_count
> 0) {
824 thread_call_func_t func
;
825 thread_call_param_t param0
, param1
;
827 call
= TC(dequeue_head(&group
->pending_queue
));
828 group
->pending_count
--;
831 param0
= call
->param0
;
832 param1
= call
->param1
;
836 _internal_call_release(call
);
838 simple_unlock(&thread_call_lock
);
841 KERNEL_DEBUG_CONSTANT(
842 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_CALLOUT
) | DBG_FUNC_NONE
,
843 func
, param0
, param1
, 0, 0);
845 (*func
)(param0
, param1
);
847 (void)thread_funnel_set(self
->funnel_lock
, FALSE
); /* XXX */
850 simple_lock(&thread_call_lock
);
853 thread_sched_call(self
, NULL
);
854 group
->active_count
--;
856 if (group
->idle_count
< thread_call_thread_min
) {
859 wait_queue_assert_wait(&group
->idle_wqueue
, NULL
, THREAD_UNINT
, 0);
861 simple_unlock(&thread_call_lock
);
864 thread_block_parameter((thread_continue_t
)thread_call_thread
, group
);
868 simple_unlock(&thread_call_lock
);
871 thread_terminate(self
);
876 * thread_call_daemon:
879 thread_call_daemon_continue(
880 thread_call_group_t group
)
882 kern_return_t result
;
886 simple_lock(&thread_call_lock
);
888 while (group
->active_count
== 0 && group
->pending_count
> 0) {
889 group
->active_count
++;
891 simple_unlock(&thread_call_lock
);
894 result
= kernel_thread_start_priority((thread_continue_t
)thread_call_thread
, group
, BASEPRI_PREEMPT
, &thread
);
895 if (result
!= KERN_SUCCESS
)
896 panic("thread_call_daemon");
898 thread_deallocate(thread
);
901 simple_lock(&thread_call_lock
);
904 thread_call_daemon_awake
= FALSE
;
905 assert_wait(&thread_call_daemon_awake
, THREAD_UNINT
);
907 simple_unlock(&thread_call_lock
);
910 thread_block_parameter((thread_continue_t
)thread_call_daemon_continue
, group
);
916 thread_call_group_t group
)
918 thread_t self
= current_thread();
920 self
->options
|= TH_OPT_VMPRIV
;
921 vm_page_free_reserve(2); /* XXX */
923 thread_call_daemon_continue(group
);
928 thread_call_delayed_timer(
929 timer_call_param_t p0
,
930 __unused timer_call_param_t p1
934 thread_call_group_t group
= p0
;
935 boolean_t new_pending
= FALSE
;
938 simple_lock(&thread_call_lock
);
940 timestamp
= mach_absolute_time();
942 call
= TC(queue_first(&group
->delayed_queue
));
944 while (!queue_end(&group
->delayed_queue
, qe(call
))) {
945 if (call
->deadline
<= timestamp
) {
946 _pending_call_enqueue(call
, group
);
952 call
= TC(queue_first(&group
->delayed_queue
));
955 if (!queue_end(&group
->delayed_queue
, qe(call
)))
956 _set_delayed_call_timer(call
, group
);
958 if (new_pending
&& group
->active_count
== 0)
959 thread_call_wake(group
);
961 simple_unlock(&thread_call_lock
);