2 * Copyright (c) 1993-1995, 1999-2008 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/thread_act.h>
32 #include <kern/kern_types.h>
33 #include <kern/zalloc.h>
34 #include <kern/sched_prim.h>
35 #include <kern/clock.h>
36 #include <kern/task.h>
37 #include <kern/thread.h>
38 #include <kern/wait_queue.h>
40 #include <vm/vm_pageout.h>
42 #include <kern/thread_call.h>
43 #include <kern/call_entry.h>
45 #include <kern/timer_call.h>
47 #include <sys/kdebug.h>
49 decl_simple_lock_data(static,thread_call_lock
)
51 static zone_t thread_call_zone
;
53 struct thread_call_group
{
54 queue_head_t pending_queue
;
55 uint32_t pending_count
;
57 queue_head_t delayed_queue
;
59 timer_call_data_t delayed_timer
;
61 struct wait_queue idle_wqueue
;
62 struct wait_queue daemon_wqueue
;
63 uint32_t idle_count
, active_count
;
66 typedef struct thread_call_group
*thread_call_group_t
;
68 static struct thread_call_group thread_call_group0
;
70 static boolean_t thread_call_daemon_awake
;
72 #define thread_call_thread_min 4
74 #define internal_call_count 768
76 static thread_call_data_t internal_call_storage
[internal_call_count
];
77 static queue_head_t thread_call_internal_queue
;
79 static __inline__ thread_call_t
_internal_call_allocate(void);
81 static __inline__
void _internal_call_release(
84 static __inline__ boolean_t
_pending_call_enqueue(
86 thread_call_group_t group
),
87 _delayed_call_enqueue(
89 thread_call_group_t group
,
93 thread_call_group_t group
);
95 static __inline__
void thread_call_wake(
96 thread_call_group_t group
);
98 static __inline__
void _set_delayed_call_timer(
100 thread_call_group_t group
);
102 static boolean_t
_remove_from_pending_queue(
103 thread_call_func_t func
,
104 thread_call_param_t param0
,
105 boolean_t remove_all
),
106 _remove_from_delayed_queue(
107 thread_call_func_t func
,
108 thread_call_param_t param0
,
109 boolean_t remove_all
);
111 static void thread_call_daemon(
112 thread_call_group_t group
),
114 thread_call_group_t group
);
116 static void thread_call_delayed_timer(
117 timer_call_param_t p0
,
118 timer_call_param_t p1
);
120 #define qe(x) ((queue_entry_t)(x))
121 #define TC(x) ((thread_call_t)(x))
124 * thread_call_initialize:
126 * Initialize this module, called
127 * early during system initialization.
130 thread_call_initialize(void)
133 thread_call_group_t group
= &thread_call_group0
;
134 kern_return_t result
;
139 i
= sizeof (thread_call_data_t
);
140 thread_call_zone
= zinit(i
, 4096 * i
, 16 * i
, "thread_call");
141 zone_change(thread_call_zone
, Z_NOENCRYPT
, TRUE
);
143 simple_lock_init(&thread_call_lock
, 0);
146 simple_lock(&thread_call_lock
);
148 queue_init(&group
->pending_queue
);
149 queue_init(&group
->delayed_queue
);
151 timer_call_setup(&group
->delayed_timer
, thread_call_delayed_timer
, group
);
153 wait_queue_init(&group
->idle_wqueue
, SYNC_POLICY_FIFO
);
154 wait_queue_init(&group
->daemon_wqueue
, SYNC_POLICY_FIFO
);
156 queue_init(&thread_call_internal_queue
);
158 call
= internal_call_storage
;
159 call
< &internal_call_storage
[internal_call_count
];
162 enqueue_tail(&thread_call_internal_queue
, qe(call
));
165 thread_call_daemon_awake
= TRUE
;
167 simple_unlock(&thread_call_lock
);
170 result
= kernel_thread_start_priority((thread_continue_t
)thread_call_daemon
, group
, BASEPRI_PREEMPT
+ 1, &thread
);
171 if (result
!= KERN_SUCCESS
)
172 panic("thread_call_initialize");
174 thread_deallocate(thread
);
180 thread_call_func_t func
,
181 thread_call_param_t param0
)
183 call_entry_setup(call
, func
, param0
);
187 * _internal_call_allocate:
189 * Allocate an internal callout entry.
191 * Called with thread_call_lock held.
193 static __inline__ thread_call_t
194 _internal_call_allocate(void)
198 if (queue_empty(&thread_call_internal_queue
))
199 panic("_internal_call_allocate");
201 call
= TC(dequeue_head(&thread_call_internal_queue
));
207 * _internal_call_release:
209 * Release an internal callout entry which
210 * is no longer pending (or delayed).
212 * Called with thread_call_lock held.
214 static __inline__
void
215 _internal_call_release(
218 if ( call
>= internal_call_storage
&&
219 call
< &internal_call_storage
[internal_call_count
] )
220 enqueue_head(&thread_call_internal_queue
, qe(call
));
224 * _pending_call_enqueue:
226 * Place an entry at the end of the
227 * pending queue, to be executed soon.
229 * Returns TRUE if the entry was already
232 * Called with thread_call_lock held.
234 static __inline__ boolean_t
235 _pending_call_enqueue(
237 thread_call_group_t group
)
241 old_queue
= call_entry_enqueue_tail(call
, &group
->pending_queue
);
243 group
->pending_count
++;
245 return (old_queue
!= NULL
);
249 * _delayed_call_enqueue:
251 * Place an entry on the delayed queue,
252 * after existing entries with an earlier
253 * (or identical) deadline.
255 * Returns TRUE if the entry was already
258 * Called with thread_call_lock held.
260 static __inline__ boolean_t
261 _delayed_call_enqueue(
263 thread_call_group_t group
,
268 old_queue
= call_entry_enqueue_deadline(call
, &group
->delayed_queue
, deadline
);
270 if (old_queue
== &group
->pending_queue
)
271 group
->pending_count
--;
273 return (old_queue
!= NULL
);
279 * Remove an entry from a queue.
281 * Returns TRUE if the entry was on a queue.
283 * Called with thread_call_lock held.
285 static __inline__ boolean_t
288 thread_call_group_t group
)
292 old_queue
= call_entry_dequeue(call
);
294 if (old_queue
== &group
->pending_queue
)
295 group
->pending_count
--;
297 return (old_queue
!= NULL
);
301 * _set_delayed_call_timer:
303 * Reset the timer so that it
304 * next expires when the entry is due.
306 * Called with thread_call_lock held.
308 static __inline__
void
309 _set_delayed_call_timer(
311 thread_call_group_t group
)
313 timer_call_enter(&group
->delayed_timer
, call
->deadline
);
317 * _remove_from_pending_queue:
319 * Remove the first (or all) matching
320 * entries from the pending queue.
322 * Returns TRUE if any matching entries
325 * Called with thread_call_lock held.
328 _remove_from_pending_queue(
329 thread_call_func_t func
,
330 thread_call_param_t param0
,
331 boolean_t remove_all
)
333 boolean_t call_removed
= FALSE
;
335 thread_call_group_t group
= &thread_call_group0
;
337 call
= TC(queue_first(&group
->pending_queue
));
339 while (!queue_end(&group
->pending_queue
, qe(call
))) {
340 if ( call
->func
== func
&&
341 call
->param0
== param0
) {
342 thread_call_t next
= TC(queue_next(qe(call
)));
344 _call_dequeue(call
, group
);
346 _internal_call_release(call
);
355 call
= TC(queue_next(qe(call
)));
358 return (call_removed
);
362 * _remove_from_delayed_queue:
364 * Remove the first (or all) matching
365 * entries from the delayed queue.
367 * Returns TRUE if any matching entries
370 * Called with thread_call_lock held.
373 _remove_from_delayed_queue(
374 thread_call_func_t func
,
375 thread_call_param_t param0
,
376 boolean_t remove_all
)
378 boolean_t call_removed
= FALSE
;
380 thread_call_group_t group
= &thread_call_group0
;
382 call
= TC(queue_first(&group
->delayed_queue
));
384 while (!queue_end(&group
->delayed_queue
, qe(call
))) {
385 if ( call
->func
== func
&&
386 call
->param0
== param0
) {
387 thread_call_t next
= TC(queue_next(qe(call
)));
389 _call_dequeue(call
, group
);
391 _internal_call_release(call
);
400 call
= TC(queue_next(qe(call
)));
403 return (call_removed
);
411 * Enqueue a function callout.
413 * Guarantees { function, argument }
414 * uniqueness if unique_call is TRUE.
418 thread_call_func_t func
,
419 thread_call_param_t param
,
420 boolean_t unique_call
)
423 thread_call_group_t group
= &thread_call_group0
;
427 simple_lock(&thread_call_lock
);
429 call
= TC(queue_first(&group
->pending_queue
));
431 while (unique_call
&& !queue_end(&group
->pending_queue
, qe(call
))) {
432 if ( call
->func
== func
&&
433 call
->param0
== param
) {
437 call
= TC(queue_next(qe(call
)));
440 if (!unique_call
|| queue_end(&group
->pending_queue
, qe(call
))) {
441 call
= _internal_call_allocate();
443 call
->param0
= param
;
446 _pending_call_enqueue(call
, group
);
448 if (group
->active_count
== 0)
449 thread_call_wake(group
);
452 simple_unlock(&thread_call_lock
);
456 #endif /* __LP64__ */
459 * thread_call_func_delayed:
461 * Enqueue a function callout to
462 * occur at the stated time.
465 thread_call_func_delayed(
466 thread_call_func_t func
,
467 thread_call_param_t param
,
471 thread_call_group_t group
= &thread_call_group0
;
475 simple_lock(&thread_call_lock
);
477 call
= _internal_call_allocate();
479 call
->param0
= param
;
482 _delayed_call_enqueue(call
, group
, deadline
);
484 if (queue_first(&group
->delayed_queue
) == qe(call
))
485 _set_delayed_call_timer(call
, group
);
487 simple_unlock(&thread_call_lock
);
492 * thread_call_func_cancel:
494 * Dequeue a function callout.
496 * Removes one (or all) { function, argument }
497 * instance(s) from either (or both)
498 * the pending and the delayed queue,
501 * Returns TRUE if any calls were cancelled.
504 thread_call_func_cancel(
505 thread_call_func_t func
,
506 thread_call_param_t param
,
507 boolean_t cancel_all
)
513 simple_lock(&thread_call_lock
);
516 result
= _remove_from_pending_queue(func
, param
, cancel_all
) |
517 _remove_from_delayed_queue(func
, param
, cancel_all
);
519 result
= _remove_from_pending_queue(func
, param
, cancel_all
) ||
520 _remove_from_delayed_queue(func
, param
, cancel_all
);
522 simple_unlock(&thread_call_lock
);
529 * thread_call_allocate:
531 * Allocate a callout entry.
534 thread_call_allocate(
535 thread_call_func_t func
,
536 thread_call_param_t param0
)
538 thread_call_t call
= zalloc(thread_call_zone
);
540 call_entry_setup(call
, func
, param0
);
548 * Free a callout entry.
557 simple_lock(&thread_call_lock
);
559 if (call
->queue
!= NULL
) {
560 simple_unlock(&thread_call_lock
);
566 simple_unlock(&thread_call_lock
);
569 zfree(thread_call_zone
, call
);
577 * Enqueue a callout entry to occur "soon".
579 * Returns TRUE if the call was
580 * already on a queue.
586 boolean_t result
= TRUE
;
587 thread_call_group_t group
= &thread_call_group0
;
591 simple_lock(&thread_call_lock
);
593 if (call
->queue
!= &group
->pending_queue
) {
594 result
= _pending_call_enqueue(call
, group
);
596 if (group
->active_count
== 0)
597 thread_call_wake(group
);
602 simple_unlock(&thread_call_lock
);
611 thread_call_param_t param1
)
613 boolean_t result
= TRUE
;
614 thread_call_group_t group
= &thread_call_group0
;
618 simple_lock(&thread_call_lock
);
620 if (call
->queue
!= &group
->pending_queue
) {
621 result
= _pending_call_enqueue(call
, group
);
623 if (group
->active_count
== 0)
624 thread_call_wake(group
);
627 call
->param1
= param1
;
629 simple_unlock(&thread_call_lock
);
636 * thread_call_enter_delayed:
638 * Enqueue a callout entry to occur
639 * at the stated time.
641 * Returns TRUE if the call was
642 * already on a queue.
645 thread_call_enter_delayed(
649 boolean_t result
= TRUE
;
650 thread_call_group_t group
= &thread_call_group0
;
654 simple_lock(&thread_call_lock
);
656 result
= _delayed_call_enqueue(call
, group
, deadline
);
658 if (queue_first(&group
->delayed_queue
) == qe(call
))
659 _set_delayed_call_timer(call
, group
);
663 simple_unlock(&thread_call_lock
);
670 thread_call_enter1_delayed(
672 thread_call_param_t param1
,
675 boolean_t result
= TRUE
;
676 thread_call_group_t group
= &thread_call_group0
;
680 simple_lock(&thread_call_lock
);
682 result
= _delayed_call_enqueue(call
, group
, deadline
);
684 if (queue_first(&group
->delayed_queue
) == qe(call
))
685 _set_delayed_call_timer(call
, group
);
687 call
->param1
= param1
;
689 simple_unlock(&thread_call_lock
);
696 * thread_call_cancel:
698 * Dequeue a callout entry.
700 * Returns TRUE if the call was
708 thread_call_group_t group
= &thread_call_group0
;
712 simple_lock(&thread_call_lock
);
714 result
= _call_dequeue(call
, group
);
716 simple_unlock(&thread_call_lock
);
725 * thread_call_is_delayed:
727 * Returns TRUE if the call is
728 * currently on a delayed queue.
730 * Optionally returns the expiration time.
733 thread_call_is_delayed(
737 boolean_t result
= FALSE
;
738 thread_call_group_t group
= &thread_call_group0
;
742 simple_lock(&thread_call_lock
);
744 if (call
->queue
== &group
->delayed_queue
) {
745 if (deadline
!= NULL
)
746 *deadline
= call
->deadline
;
750 simple_unlock(&thread_call_lock
);
756 #endif /* __LP64__ */
761 * Wake a call thread to service
762 * pending call entries. May wake
763 * the daemon thread in order to
764 * create additional call threads.
766 * Called with thread_call_lock held.
768 static __inline__
void
770 thread_call_group_t group
)
772 if (group
->idle_count
> 0 && wait_queue_wakeup_one(&group
->idle_wqueue
, NULL
, THREAD_AWAKENED
) == KERN_SUCCESS
) {
773 group
->idle_count
--; group
->active_count
++;
776 if (!thread_call_daemon_awake
) {
777 thread_call_daemon_awake
= TRUE
;
778 wait_queue_wakeup_one(&group
->daemon_wqueue
, NULL
, THREAD_AWAKENED
);
785 * Call out invoked by the scheduler.
790 __unused thread_t thread
)
792 thread_call_group_t group
= &thread_call_group0
;
794 simple_lock(&thread_call_lock
);
798 case SCHED_CALL_BLOCK
:
799 if (--group
->active_count
== 0 && group
->pending_count
> 0)
800 thread_call_wake(group
);
803 case SCHED_CALL_UNBLOCK
:
804 group
->active_count
++;
808 simple_unlock(&thread_call_lock
);
812 * thread_call_thread:
816 thread_call_group_t group
)
818 thread_t self
= current_thread();
821 simple_lock(&thread_call_lock
);
823 thread_sched_call(self
, sched_call_thread
);
825 while (group
->pending_count
> 0) {
827 thread_call_func_t func
;
828 thread_call_param_t param0
, param1
;
830 call
= TC(dequeue_head(&group
->pending_queue
));
831 group
->pending_count
--;
834 param0
= call
->param0
;
835 param1
= call
->param1
;
839 _internal_call_release(call
);
841 simple_unlock(&thread_call_lock
);
844 KERNEL_DEBUG_CONSTANT(
845 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_CALLOUT
) | DBG_FUNC_NONE
,
846 func
, param0
, param1
, 0, 0);
848 (*func
)(param0
, param1
);
850 (void)thread_funnel_set(self
->funnel_lock
, FALSE
); /* XXX */
853 simple_lock(&thread_call_lock
);
856 thread_sched_call(self
, NULL
);
857 group
->active_count
--;
859 if (group
->idle_count
< thread_call_thread_min
) {
862 wait_queue_assert_wait(&group
->idle_wqueue
, NULL
, THREAD_UNINT
, 0);
864 simple_unlock(&thread_call_lock
);
867 thread_block_parameter((thread_continue_t
)thread_call_thread
, group
);
871 simple_unlock(&thread_call_lock
);
874 thread_terminate(self
);
879 * thread_call_daemon:
882 thread_call_daemon_continue(
883 thread_call_group_t group
)
885 kern_return_t result
;
889 simple_lock(&thread_call_lock
);
891 while (group
->active_count
== 0 && group
->pending_count
> 0) {
892 group
->active_count
++;
894 simple_unlock(&thread_call_lock
);
897 result
= kernel_thread_start_priority((thread_continue_t
)thread_call_thread
, group
, BASEPRI_PREEMPT
, &thread
);
898 if (result
!= KERN_SUCCESS
)
899 panic("thread_call_daemon");
901 thread_deallocate(thread
);
904 simple_lock(&thread_call_lock
);
907 thread_call_daemon_awake
= FALSE
;
908 wait_queue_assert_wait(&group
->daemon_wqueue
, NULL
, THREAD_UNINT
, 0);
910 simple_unlock(&thread_call_lock
);
913 thread_block_parameter((thread_continue_t
)thread_call_daemon_continue
, group
);
919 thread_call_group_t group
)
921 thread_t self
= current_thread();
923 self
->options
|= TH_OPT_VMPRIV
;
924 vm_page_free_reserve(2); /* XXX */
926 thread_call_daemon_continue(group
);
931 thread_call_delayed_timer(
932 timer_call_param_t p0
,
933 __unused timer_call_param_t p1
937 thread_call_group_t group
= p0
;
938 boolean_t new_pending
= FALSE
;
941 simple_lock(&thread_call_lock
);
943 timestamp
= mach_absolute_time();
945 call
= TC(queue_first(&group
->delayed_queue
));
947 while (!queue_end(&group
->delayed_queue
, qe(call
))) {
948 if (call
->deadline
<= timestamp
) {
949 _pending_call_enqueue(call
, group
);
955 call
= TC(queue_first(&group
->delayed_queue
));
958 if (!queue_end(&group
->delayed_queue
, qe(call
)))
959 _set_delayed_call_timer(call
, group
);
961 if (new_pending
&& group
->active_count
== 0)
962 thread_call_wake(group
);
964 simple_unlock(&thread_call_lock
);