2 * Copyright (c) 1993-1995, 1999-2008 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/thread_act.h>
32 #include <kern/kern_types.h>
33 #include <kern/zalloc.h>
34 #include <kern/sched_prim.h>
35 #include <kern/clock.h>
36 #include <kern/task.h>
37 #include <kern/thread.h>
38 #include <kern/wait_queue.h>
40 #include <vm/vm_pageout.h>
42 #include <kern/thread_call.h>
43 #include <kern/call_entry.h>
44 #include <kern/timer_call.h>
46 #include <libkern/OSAtomic.h>
48 #include <sys/kdebug.h>
53 static zone_t thread_call_zone
;
54 static struct wait_queue daemon_wqueue
;
56 struct thread_call_group
{
57 queue_head_t pending_queue
;
58 uint32_t pending_count
;
60 queue_head_t delayed_queue
;
61 uint32_t delayed_count
;
63 timer_call_data_t delayed_timer
;
64 timer_call_data_t dealloc_timer
;
66 struct wait_queue idle_wqueue
;
67 uint32_t idle_count
, active_count
;
70 uint32_t target_thread_count
;
71 uint64_t idle_timestamp
;
74 sched_call_t sched_call
;
77 typedef struct thread_call_group
*thread_call_group_t
;
79 #define TCG_PARALLEL 0x01
80 #define TCG_DEALLOC_ACTIVE 0x02
82 #define THREAD_CALL_GROUP_COUNT 4
83 #define THREAD_CALL_THREAD_MIN 4
84 #define INTERNAL_CALL_COUNT 768
85 #define THREAD_CALL_DEALLOC_INTERVAL_NS (5 * 1000 * 1000) /* 5 ms */
86 #define THREAD_CALL_ADD_RATIO 4
87 #define THREAD_CALL_MACH_FACTOR_CAP 3
89 static struct thread_call_group thread_call_groups
[THREAD_CALL_GROUP_COUNT
];
90 static boolean_t thread_call_daemon_awake
;
91 static thread_call_data_t internal_call_storage
[INTERNAL_CALL_COUNT
];
92 static queue_head_t thread_call_internal_queue
;
93 static uint64_t thread_call_dealloc_interval_abs
;
95 static __inline__ thread_call_t
_internal_call_allocate(void);
96 static __inline__
void _internal_call_release(thread_call_t call
);
97 static __inline__ boolean_t
_pending_call_enqueue(thread_call_t call
, thread_call_group_t group
);
98 static __inline__ boolean_t
_delayed_call_enqueue(thread_call_t call
, thread_call_group_t group
, uint64_t deadline
);
99 static __inline__ boolean_t
_call_dequeue(thread_call_t call
, thread_call_group_t group
);
100 static __inline__
void thread_call_wake(thread_call_group_t group
);
101 static __inline__
void _set_delayed_call_timer(thread_call_t call
, thread_call_group_t group
);
102 static boolean_t
_remove_from_pending_queue(thread_call_func_t func
, thread_call_param_t param0
, boolean_t remove_all
);
103 static boolean_t
_remove_from_delayed_queue(thread_call_func_t func
, thread_call_param_t param0
, boolean_t remove_all
);
104 static void thread_call_daemon(void *arg
);
105 static void thread_call_thread(thread_call_group_t group
, wait_result_t wres
);
106 extern void thread_call_delayed_timer(timer_call_param_t p0
, timer_call_param_t p1
);
107 static void thread_call_dealloc_timer(timer_call_param_t p0
, timer_call_param_t p1
);
108 static void thread_call_group_setup(thread_call_group_t group
, thread_call_priority_t pri
, uint32_t target_thread_count
, boolean_t parallel
);
109 static void sched_call_thread(int type
, thread_t thread
);
110 static void thread_call_start_deallocate_timer(thread_call_group_t group
);
111 static void thread_call_wait_locked(thread_call_t call
);
113 #define qe(x) ((queue_entry_t)(x))
114 #define TC(x) ((thread_call_t)(x))
117 lck_grp_t thread_call_queues_lck_grp
;
118 lck_grp_t thread_call_lck_grp
;
119 lck_attr_t thread_call_lck_attr
;
120 lck_grp_attr_t thread_call_lck_grp_attr
;
122 #if defined(__i386__) || defined(__x86_64__)
123 lck_mtx_t thread_call_lock_data
;
125 lck_spin_t thread_call_lock_data
;
129 #define thread_call_lock_spin() \
130 lck_mtx_lock_spin_always(&thread_call_lock_data)
132 #define thread_call_unlock() \
133 lck_mtx_unlock_always(&thread_call_lock_data)
137 disable_ints_and_lock(void)
142 thread_call_lock_spin();
148 enable_ints_and_unlock(void)
150 thread_call_unlock();
155 static inline boolean_t
156 group_isparallel(thread_call_group_t group
)
158 return ((group
->flags
& TCG_PARALLEL
) != 0);
162 thread_call_group_should_add_thread(thread_call_group_t group
)
164 uint32_t thread_count
;
166 if (!group_isparallel(group
)) {
167 if (group
->pending_count
> 0 && group
->active_count
== 0) {
174 if (group
->pending_count
> 0) {
175 if (group
->idle_count
> 0) {
176 panic("Pending work, but threads are idle?");
179 thread_count
= group
->active_count
;
182 * Add a thread if either there are no threads,
183 * the group has fewer than its target number of
184 * threads, or the amount of work is large relative
185 * to the number of threads. In the last case, pay attention
186 * to the total load on the system, and back off if
189 if ((thread_count
== 0) ||
190 (thread_count
< group
->target_thread_count
) ||
191 ((group
->pending_count
> THREAD_CALL_ADD_RATIO
* thread_count
) &&
192 (sched_mach_factor
< THREAD_CALL_MACH_FACTOR_CAP
))) {
200 static inline integer_t
201 thread_call_priority_to_sched_pri(thread_call_priority_t pri
)
204 case THREAD_CALL_PRIORITY_HIGH
:
205 return BASEPRI_PREEMPT
;
206 case THREAD_CALL_PRIORITY_KERNEL
:
207 return BASEPRI_KERNEL
;
208 case THREAD_CALL_PRIORITY_USER
:
209 return BASEPRI_DEFAULT
;
210 case THREAD_CALL_PRIORITY_LOW
:
213 panic("Invalid priority.");
220 static inline thread_call_group_t
221 thread_call_get_group(
224 thread_call_priority_t pri
= call
->tc_pri
;
226 assert(pri
== THREAD_CALL_PRIORITY_LOW
||
227 pri
== THREAD_CALL_PRIORITY_USER
||
228 pri
== THREAD_CALL_PRIORITY_KERNEL
||
229 pri
== THREAD_CALL_PRIORITY_HIGH
);
231 return &thread_call_groups
[pri
];
235 thread_call_group_setup(
236 thread_call_group_t group
,
237 thread_call_priority_t pri
,
238 uint32_t target_thread_count
,
241 queue_init(&group
->pending_queue
);
242 queue_init(&group
->delayed_queue
);
244 timer_call_setup(&group
->delayed_timer
, thread_call_delayed_timer
, group
);
245 timer_call_setup(&group
->dealloc_timer
, thread_call_dealloc_timer
, group
);
247 wait_queue_init(&group
->idle_wqueue
, SYNC_POLICY_FIFO
);
249 group
->target_thread_count
= target_thread_count
;
250 group
->pri
= thread_call_priority_to_sched_pri(pri
);
252 group
->sched_call
= sched_call_thread
;
254 group
->flags
|= TCG_PARALLEL
;
255 group
->sched_call
= NULL
;
260 * Simple wrapper for creating threads bound to
261 * thread call groups.
264 thread_call_thread_create(
265 thread_call_group_t group
)
268 kern_return_t result
;
270 result
= kernel_thread_start_priority((thread_continue_t
)thread_call_thread
, group
, group
->pri
, &thread
);
271 if (result
!= KERN_SUCCESS
) {
275 if (group
->pri
< BASEPRI_PREEMPT
) {
277 * New style doesn't get to run to completion in
278 * kernel if there are higher priority threads
281 thread_set_eager_preempt(thread
);
284 thread_deallocate(thread
);
289 * thread_call_initialize:
291 * Initialize this module, called
292 * early during system initialization.
295 thread_call_initialize(void)
298 kern_return_t result
;
302 i
= sizeof (thread_call_data_t
);
303 thread_call_zone
= zinit(i
, 4096 * i
, 16 * i
, "thread_call");
304 zone_change(thread_call_zone
, Z_CALLERACCT
, FALSE
);
305 zone_change(thread_call_zone
, Z_NOENCRYPT
, TRUE
);
307 lck_attr_setdefault(&thread_call_lck_attr
);
308 lck_grp_attr_setdefault(&thread_call_lck_grp_attr
);
309 lck_grp_init(&thread_call_queues_lck_grp
, "thread_call_queues", &thread_call_lck_grp_attr
);
310 lck_grp_init(&thread_call_lck_grp
, "thread_call", &thread_call_lck_grp_attr
);
312 #if defined(__i386__) || defined(__x86_64__)
313 lck_mtx_init(&thread_call_lock_data
, &thread_call_lck_grp
, &thread_call_lck_attr
);
315 lck_spin_init(&thread_call_lock_data
, &thread_call_lck_grp
, &thread_call_lck_attr
);
318 nanotime_to_absolutetime(0, THREAD_CALL_DEALLOC_INTERVAL_NS
, &thread_call_dealloc_interval_abs
);
319 wait_queue_init(&daemon_wqueue
, SYNC_POLICY_FIFO
);
321 thread_call_group_setup(&thread_call_groups
[THREAD_CALL_PRIORITY_LOW
], THREAD_CALL_PRIORITY_LOW
, 0, TRUE
);
322 thread_call_group_setup(&thread_call_groups
[THREAD_CALL_PRIORITY_USER
], THREAD_CALL_PRIORITY_USER
, 0, TRUE
);
323 thread_call_group_setup(&thread_call_groups
[THREAD_CALL_PRIORITY_KERNEL
], THREAD_CALL_PRIORITY_KERNEL
, 1, TRUE
);
324 thread_call_group_setup(&thread_call_groups
[THREAD_CALL_PRIORITY_HIGH
], THREAD_CALL_PRIORITY_HIGH
, THREAD_CALL_THREAD_MIN
, FALSE
);
326 disable_ints_and_lock();
328 queue_init(&thread_call_internal_queue
);
330 call
= internal_call_storage
;
331 call
< &internal_call_storage
[INTERNAL_CALL_COUNT
];
334 enqueue_tail(&thread_call_internal_queue
, qe(call
));
337 thread_call_daemon_awake
= TRUE
;
339 enable_ints_and_unlock();
341 result
= kernel_thread_start_priority((thread_continue_t
)thread_call_daemon
, NULL
, BASEPRI_PREEMPT
+ 1, &thread
);
342 if (result
!= KERN_SUCCESS
)
343 panic("thread_call_initialize");
345 thread_deallocate(thread
);
351 thread_call_func_t func
,
352 thread_call_param_t param0
)
354 bzero(call
, sizeof(*call
));
355 call_entry_setup((call_entry_t
)call
, func
, param0
);
356 call
->tc_pri
= THREAD_CALL_PRIORITY_HIGH
; /* Default priority */
360 * _internal_call_allocate:
362 * Allocate an internal callout entry.
364 * Called with thread_call_lock held.
366 static __inline__ thread_call_t
367 _internal_call_allocate(void)
371 if (queue_empty(&thread_call_internal_queue
))
372 panic("_internal_call_allocate");
374 call
= TC(dequeue_head(&thread_call_internal_queue
));
380 * _internal_call_release:
382 * Release an internal callout entry which
383 * is no longer pending (or delayed).
385 * Called with thread_call_lock held.
387 static __inline__
void
388 _internal_call_release(
391 if ( call
>= internal_call_storage
&&
392 call
< &internal_call_storage
[INTERNAL_CALL_COUNT
] )
393 enqueue_head(&thread_call_internal_queue
, qe(call
));
397 * _pending_call_enqueue:
399 * Place an entry at the end of the
400 * pending queue, to be executed soon.
402 * Returns TRUE if the entry was already
405 * Called with thread_call_lock held.
407 static __inline__ boolean_t
408 _pending_call_enqueue(
410 thread_call_group_t group
)
412 queue_head_t
*old_queue
;
414 old_queue
= call_entry_enqueue_tail(CE(call
), &group
->pending_queue
);
416 if (old_queue
== NULL
) {
417 call
->tc_submit_count
++;
420 group
->pending_count
++;
422 thread_call_wake(group
);
424 return (old_queue
!= NULL
);
428 * _delayed_call_enqueue:
430 * Place an entry on the delayed queue,
431 * after existing entries with an earlier
432 * (or identical) deadline.
434 * Returns TRUE if the entry was already
437 * Called with thread_call_lock held.
439 static __inline__ boolean_t
440 _delayed_call_enqueue(
442 thread_call_group_t group
,
445 queue_head_t
*old_queue
;
447 old_queue
= call_entry_enqueue_deadline(CE(call
), &group
->delayed_queue
, deadline
);
449 if (old_queue
== &group
->pending_queue
)
450 group
->pending_count
--;
451 else if (old_queue
== NULL
)
452 call
->tc_submit_count
++;
454 return (old_queue
!= NULL
);
460 * Remove an entry from a queue.
462 * Returns TRUE if the entry was on a queue.
464 * Called with thread_call_lock held.
466 static __inline__ boolean_t
469 thread_call_group_t group
)
471 queue_head_t
*old_queue
;
473 old_queue
= call_entry_dequeue(CE(call
));
475 if (old_queue
!= NULL
) {
476 call
->tc_finish_count
++;
477 if (old_queue
== &group
->pending_queue
)
478 group
->pending_count
--;
481 return (old_queue
!= NULL
);
485 * _set_delayed_call_timer:
487 * Reset the timer so that it
488 * next expires when the entry is due.
490 * Called with thread_call_lock held.
492 static __inline__
void
493 _set_delayed_call_timer(
495 thread_call_group_t group
)
497 timer_call_enter(&group
->delayed_timer
, call
->tc_call
.deadline
, 0);
501 * _remove_from_pending_queue:
503 * Remove the first (or all) matching
504 * entries from the pending queue.
506 * Returns TRUE if any matching entries
509 * Called with thread_call_lock held.
512 _remove_from_pending_queue(
513 thread_call_func_t func
,
514 thread_call_param_t param0
,
515 boolean_t remove_all
)
517 boolean_t call_removed
= FALSE
;
519 thread_call_group_t group
= &thread_call_groups
[THREAD_CALL_PRIORITY_HIGH
];
521 call
= TC(queue_first(&group
->pending_queue
));
523 while (!queue_end(&group
->pending_queue
, qe(call
))) {
524 if (call
->tc_call
.func
== func
&&
525 call
->tc_call
.param0
== param0
) {
526 thread_call_t next
= TC(queue_next(qe(call
)));
528 _call_dequeue(call
, group
);
530 _internal_call_release(call
);
539 call
= TC(queue_next(qe(call
)));
542 return (call_removed
);
546 * _remove_from_delayed_queue:
548 * Remove the first (or all) matching
549 * entries from the delayed queue.
551 * Returns TRUE if any matching entries
554 * Called with thread_call_lock held.
557 _remove_from_delayed_queue(
558 thread_call_func_t func
,
559 thread_call_param_t param0
,
560 boolean_t remove_all
)
562 boolean_t call_removed
= FALSE
;
564 thread_call_group_t group
= &thread_call_groups
[THREAD_CALL_PRIORITY_HIGH
];
566 call
= TC(queue_first(&group
->delayed_queue
));
568 while (!queue_end(&group
->delayed_queue
, qe(call
))) {
569 if (call
->tc_call
.func
== func
&&
570 call
->tc_call
.param0
== param0
) {
571 thread_call_t next
= TC(queue_next(qe(call
)));
573 _call_dequeue(call
, group
);
575 _internal_call_release(call
);
584 call
= TC(queue_next(qe(call
)));
587 return (call_removed
);
595 * Enqueue a function callout.
597 * Guarantees { function, argument }
598 * uniqueness if unique_call is TRUE.
602 thread_call_func_t func
,
603 thread_call_param_t param
,
604 boolean_t unique_call
)
607 thread_call_group_t group
= &thread_call_groups
[THREAD_CALL_PRIORITY_HIGH
];
611 thread_call_lock_spin();
613 call
= TC(queue_first(&group
->pending_queue
));
615 while (unique_call
&& !queue_end(&group
->pending_queue
, qe(call
))) {
616 if (call
->tc_call
.func
== func
&& call
->tc_call
.param0
== param
) {
620 call
= TC(queue_next(qe(call
)));
623 if (!unique_call
|| queue_end(&group
->pending_queue
, qe(call
))) {
624 call
= _internal_call_allocate();
625 call
->tc_call
.func
= func
;
626 call
->tc_call
.param0
= param
;
627 call
->tc_call
.param1
= NULL
;
629 _pending_call_enqueue(call
, group
);
632 thread_call_unlock();
636 #endif /* __LP64__ */
639 * thread_call_func_delayed:
641 * Enqueue a function callout to
642 * occur at the stated time.
645 thread_call_func_delayed(
646 thread_call_func_t func
,
647 thread_call_param_t param
,
651 thread_call_group_t group
= &thread_call_groups
[THREAD_CALL_PRIORITY_HIGH
];
655 thread_call_lock_spin();
657 call
= _internal_call_allocate();
658 call
->tc_call
.func
= func
;
659 call
->tc_call
.param0
= param
;
660 call
->tc_call
.param1
= 0;
662 _delayed_call_enqueue(call
, group
, deadline
);
664 if (queue_first(&group
->delayed_queue
) == qe(call
))
665 _set_delayed_call_timer(call
, group
);
667 thread_call_unlock();
672 * thread_call_func_cancel:
674 * Dequeue a function callout.
676 * Removes one (or all) { function, argument }
677 * instance(s) from either (or both)
678 * the pending and the delayed queue,
681 * Returns TRUE if any calls were cancelled.
684 thread_call_func_cancel(
685 thread_call_func_t func
,
686 thread_call_param_t param
,
687 boolean_t cancel_all
)
693 thread_call_lock_spin();
696 result
= _remove_from_pending_queue(func
, param
, cancel_all
) |
697 _remove_from_delayed_queue(func
, param
, cancel_all
);
699 result
= _remove_from_pending_queue(func
, param
, cancel_all
) ||
700 _remove_from_delayed_queue(func
, param
, cancel_all
);
702 thread_call_unlock();
709 * Allocate a thread call with a given priority. Importances
710 * other than THREAD_CALL_PRIORITY_HIGH will be run in threads
711 * with eager preemption enabled (i.e. may be aggressively preempted
712 * by higher-priority threads which are not in the normal "urgent" bands).
715 thread_call_allocate_with_priority(
716 thread_call_func_t func
,
717 thread_call_param_t param0
,
718 thread_call_priority_t pri
)
722 if (pri
> THREAD_CALL_PRIORITY_LOW
) {
723 panic("Invalid pri: %d\n", pri
);
726 call
= thread_call_allocate(func
, param0
);
733 * thread_call_allocate:
735 * Allocate a callout entry.
738 thread_call_allocate(
739 thread_call_func_t func
,
740 thread_call_param_t param0
)
742 thread_call_t call
= zalloc(thread_call_zone
);
744 thread_call_setup(call
, func
, param0
);
746 call
->tc_flags
= THREAD_CALL_ALLOC
;
754 * Release a callout. If the callout is currently
755 * executing, it will be freed when all invocations
766 thread_call_lock_spin();
768 if (call
->tc_call
.queue
!= NULL
) {
769 thread_call_unlock();
775 refs
= --call
->tc_refs
;
777 panic("Refcount negative: %d\n", refs
);
780 thread_call_unlock();
784 zfree(thread_call_zone
, call
);
793 * Enqueue a callout entry to occur "soon".
795 * Returns TRUE if the call was
796 * already on a queue.
802 boolean_t result
= TRUE
;
803 thread_call_group_t group
;
806 group
= thread_call_get_group(call
);
809 thread_call_lock_spin();
811 if (call
->tc_call
.queue
!= &group
->pending_queue
) {
812 result
= _pending_call_enqueue(call
, group
);
815 call
->tc_call
.param1
= 0;
817 thread_call_unlock();
826 thread_call_param_t param1
)
828 boolean_t result
= TRUE
;
829 thread_call_group_t group
;
832 group
= thread_call_get_group(call
);
835 thread_call_lock_spin();
837 if (call
->tc_call
.queue
!= &group
->pending_queue
) {
838 result
= _pending_call_enqueue(call
, group
);
841 call
->tc_call
.param1
= param1
;
843 thread_call_unlock();
850 * thread_call_enter_delayed:
852 * Enqueue a callout entry to occur
853 * at the stated time.
855 * Returns TRUE if the call was
856 * already on a queue.
859 thread_call_enter_delayed(
863 boolean_t result
= TRUE
;
864 thread_call_group_t group
;
867 group
= thread_call_get_group(call
);
870 thread_call_lock_spin();
872 result
= _delayed_call_enqueue(call
, group
, deadline
);
874 if (queue_first(&group
->delayed_queue
) == qe(call
))
875 _set_delayed_call_timer(call
, group
);
877 call
->tc_call
.param1
= 0;
879 thread_call_unlock();
886 thread_call_enter1_delayed(
888 thread_call_param_t param1
,
891 boolean_t result
= TRUE
;
892 thread_call_group_t group
;
896 group
= thread_call_get_group(call
);
899 thread_call_lock_spin();
900 abstime
= mach_absolute_time();
902 result
= _delayed_call_enqueue(call
, group
, deadline
);
904 if (queue_first(&group
->delayed_queue
) == qe(call
))
905 _set_delayed_call_timer(call
, group
);
907 call
->tc_call
.param1
= param1
;
909 call
->ttd
= (deadline
> abstime
) ? (deadline
- abstime
) : 0;
911 DTRACE_TMR4(thread_callout__create
, thread_call_func_t
, call
->tc_call
.func
, 0, (call
->ttd
>> 32), (unsigned) (call
->ttd
& 0xFFFFFFFF));
913 thread_call_unlock();
920 * thread_call_cancel:
922 * Dequeue a callout entry.
924 * Returns TRUE if the call was
932 thread_call_group_t group
;
935 group
= thread_call_get_group(call
);
938 thread_call_lock_spin();
940 result
= _call_dequeue(call
, group
);
942 thread_call_unlock();
945 DTRACE_TMR4(thread_callout__cancel
, thread_call_func_t
, call
->tc_call
.func
, 0, (call
->ttd
>> 32), (unsigned) (call
->ttd
& 0xFFFFFFFF));
952 * Cancel a thread call. If it cannot be cancelled (i.e.
953 * is already in flight), waits for the most recent invocation
954 * to finish. Note that if clients re-submit this thread call,
955 * it may still be pending or in flight when thread_call_cancel_wait
956 * returns, but all requests to execute this work item prior
957 * to the call to thread_call_cancel_wait will have finished.
960 thread_call_cancel_wait(
964 thread_call_group_t group
;
966 if ((call
->tc_flags
& THREAD_CALL_ALLOC
) == 0) {
967 panic("%s: Can't wait on thread call whose storage I don't own.", __FUNCTION__
);
970 group
= thread_call_get_group(call
);
973 thread_call_lock_spin();
975 result
= _call_dequeue(call
, group
);
976 if (result
== FALSE
) {
977 thread_call_wait_locked(call
);
980 thread_call_unlock();
990 * thread_call_is_delayed:
992 * Returns TRUE if the call is
993 * currently on a delayed queue.
995 * Optionally returns the expiration time.
998 thread_call_is_delayed(
1002 boolean_t result
= FALSE
;
1003 thread_call_group_t group
;
1006 group
= thread_call_get_group(call
);
1009 thread_call_lock_spin();
1011 if (call
->tc_call
.queue
== &group
->delayed_queue
) {
1012 if (deadline
!= NULL
)
1013 *deadline
= call
->tc_call
.deadline
;
1017 thread_call_unlock();
1023 #endif /* __LP64__ */
1028 * Wake a call thread to service
1029 * pending call entries. May wake
1030 * the daemon thread in order to
1031 * create additional call threads.
1033 * Called with thread_call_lock held.
1035 * For high-priority group, only does wakeup/creation if there are no threads
1038 static __inline__
void
1040 thread_call_group_t group
)
1043 * New behavior: use threads if you've got 'em.
1044 * Traditional behavior: wake only if no threads running.
1046 if (group_isparallel(group
) || group
->active_count
== 0) {
1047 if (wait_queue_wakeup_one(&group
->idle_wqueue
, NO_EVENT
, THREAD_AWAKENED
, -1) == KERN_SUCCESS
) {
1048 group
->idle_count
--; group
->active_count
++;
1050 if (group
->idle_count
== 0) {
1051 timer_call_cancel(&group
->dealloc_timer
);
1052 group
->flags
&= TCG_DEALLOC_ACTIVE
;
1055 if (!thread_call_daemon_awake
&& thread_call_group_should_add_thread(group
)) {
1056 thread_call_daemon_awake
= TRUE
;
1057 wait_queue_wakeup_one(&daemon_wqueue
, NO_EVENT
, THREAD_AWAKENED
, -1);
1064 * sched_call_thread:
1066 * Call out invoked by the scheduler. Used only for high-priority
1067 * thread call group.
1072 __unused thread_t thread
)
1074 thread_call_group_t group
;
1076 group
= &thread_call_groups
[THREAD_CALL_PRIORITY_HIGH
]; /* XXX */
1078 thread_call_lock_spin();
1082 case SCHED_CALL_BLOCK
:
1083 --group
->active_count
;
1084 if (group
->pending_count
> 0)
1085 thread_call_wake(group
);
1088 case SCHED_CALL_UNBLOCK
:
1089 group
->active_count
++;
1093 thread_call_unlock();
1097 * Interrupts disabled, lock held; returns the same way.
1098 * Only called on thread calls whose storage we own. Wakes up
1099 * anyone who might be waiting on this work item and frees it
1100 * if the client has so requested.
1103 thread_call_finish(thread_call_t call
)
1105 boolean_t dowake
= FALSE
;
1107 call
->tc_finish_count
++;
1110 if ((call
->tc_flags
& THREAD_CALL_WAIT
) != 0) {
1112 call
->tc_flags
&= ~THREAD_CALL_WAIT
;
1115 * Dropping lock here because the sched call for the
1116 * high-pri group can take the big lock from under
1119 thread_call_unlock();
1120 thread_wakeup((event_t
)call
);
1121 thread_call_lock_spin();
1124 if (call
->tc_refs
== 0) {
1126 panic("Someone waiting on a thread call that is scheduled for free: %p\n", call
->tc_call
.func
);
1129 enable_ints_and_unlock();
1131 zfree(thread_call_zone
, call
);
1133 (void)disable_ints_and_lock();
1139 * thread_call_thread:
1143 thread_call_group_t group
,
1146 thread_t self
= current_thread();
1149 if ((thread_get_tag_internal(self
) & THREAD_TAG_CALLOUT
) == 0)
1150 (void)thread_set_tag_internal(self
, THREAD_TAG_CALLOUT
);
1153 * A wakeup with THREAD_INTERRUPTED indicates that
1154 * we should terminate.
1156 if (wres
== THREAD_INTERRUPTED
) {
1157 thread_terminate(self
);
1160 panic("thread_terminate() returned?");
1163 (void)disable_ints_and_lock();
1165 thread_sched_call(self
, group
->sched_call
);
1167 while (group
->pending_count
> 0) {
1169 thread_call_func_t func
;
1170 thread_call_param_t param0
, param1
;
1172 call
= TC(dequeue_head(&group
->pending_queue
));
1173 group
->pending_count
--;
1175 func
= call
->tc_call
.func
;
1176 param0
= call
->tc_call
.param0
;
1177 param1
= call
->tc_call
.param1
;
1179 call
->tc_call
.queue
= NULL
;
1181 _internal_call_release(call
);
1184 * Can only do wakeups for thread calls whose storage
1187 if ((call
->tc_flags
& THREAD_CALL_ALLOC
) != 0) {
1189 call
->tc_refs
++; /* Delay free until we're done */
1193 enable_ints_and_unlock();
1195 KERNEL_DEBUG_CONSTANT(
1196 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_CALLOUT
) | DBG_FUNC_NONE
,
1197 VM_KERNEL_UNSLIDE(func
), param0
, param1
, 0, 0);
1199 (*func
)(param0
, param1
);
1201 if (get_preemption_level() != 0) {
1202 int pl
= get_preemption_level();
1203 panic("thread_call_thread: preemption_level %d, last callout %p(%p, %p)",
1204 pl
, (void *)VM_KERNEL_UNSLIDE(func
), param0
, param1
);
1207 (void)thread_funnel_set(self
->funnel_lock
, FALSE
); /* XXX */
1209 (void) disable_ints_and_lock();
1212 /* Frees if so desired */
1213 thread_call_finish(call
);
1217 thread_sched_call(self
, NULL
);
1218 group
->active_count
--;
1220 if (group_isparallel(group
)) {
1222 * For new style of thread group, thread always blocks.
1223 * If we have more than the target number of threads,
1224 * and this is the first to block, and it isn't active
1225 * already, set a timer for deallocating a thread if we
1226 * continue to have a surplus.
1228 group
->idle_count
++;
1230 if (group
->idle_count
== 1) {
1231 group
->idle_timestamp
= mach_absolute_time();
1234 if (((group
->flags
& TCG_DEALLOC_ACTIVE
) == 0) &&
1235 ((group
->active_count
+ group
->idle_count
) > group
->target_thread_count
)) {
1236 group
->flags
|= TCG_DEALLOC_ACTIVE
;
1237 thread_call_start_deallocate_timer(group
);
1240 /* Wait for more work (or termination) */
1241 wres
= wait_queue_assert_wait(&group
->idle_wqueue
, NO_EVENT
, THREAD_INTERRUPTIBLE
, 0);
1242 if (wres
!= THREAD_WAITING
) {
1243 panic("kcall worker unable to assert wait?");
1246 enable_ints_and_unlock();
1248 thread_block_parameter((thread_continue_t
)thread_call_thread
, group
);
1250 if (group
->idle_count
< group
->target_thread_count
) {
1251 group
->idle_count
++;
1253 wait_queue_assert_wait(&group
->idle_wqueue
, NO_EVENT
, THREAD_UNINT
, 0); /* Interrupted means to exit */
1255 enable_ints_and_unlock();
1257 thread_block_parameter((thread_continue_t
)thread_call_thread
, group
);
1262 enable_ints_and_unlock();
1264 thread_terminate(self
);
1269 * thread_call_daemon: walk list of groups, allocating
1270 * threads if appropriate (as determined by
1271 * thread_call_group_should_add_thread()).
1274 thread_call_daemon_continue(__unused
void *arg
)
1278 thread_call_group_t group
;
1280 (void)disable_ints_and_lock();
1282 /* Starting at zero happens to be high-priority first. */
1283 for (i
= 0; i
< THREAD_CALL_GROUP_COUNT
; i
++) {
1284 group
= &thread_call_groups
[i
];
1285 while (thread_call_group_should_add_thread(group
)) {
1286 group
->active_count
++;
1288 enable_ints_and_unlock();
1290 kr
= thread_call_thread_create(group
);
1291 if (kr
!= KERN_SUCCESS
) {
1293 * On failure, just pause for a moment and give up.
1294 * We can try again later.
1296 delay(10000); /* 10 ms */
1297 (void)disable_ints_and_lock();
1301 (void)disable_ints_and_lock();
1306 thread_call_daemon_awake
= FALSE
;
1307 wait_queue_assert_wait(&daemon_wqueue
, NO_EVENT
, THREAD_UNINT
, 0);
1309 enable_ints_and_unlock();
1311 thread_block_parameter((thread_continue_t
)thread_call_daemon_continue
, NULL
);
1319 thread_t self
= current_thread();
1321 self
->options
|= TH_OPT_VMPRIV
;
1322 vm_page_free_reserve(2); /* XXX */
1324 thread_call_daemon_continue(NULL
);
1329 * Schedule timer to deallocate a worker thread if we have a surplus
1330 * of threads (in excess of the group's target) and at least one thread
1331 * is idle the whole time.
1334 thread_call_start_deallocate_timer(
1335 thread_call_group_t group
)
1340 assert(group
->idle_count
> 0);
1342 group
->flags
|= TCG_DEALLOC_ACTIVE
;
1343 deadline
= group
->idle_timestamp
+ thread_call_dealloc_interval_abs
;
1344 onqueue
= timer_call_enter(&group
->dealloc_timer
, deadline
, 0);
1347 panic("Deallocate timer already active?");
1352 thread_call_delayed_timer(
1353 timer_call_param_t p0
,
1354 __unused timer_call_param_t p1
1358 thread_call_group_t group
= p0
;
1361 thread_call_lock_spin();
1363 timestamp
= mach_absolute_time();
1365 call
= TC(queue_first(&group
->delayed_queue
));
1367 while (!queue_end(&group
->delayed_queue
, qe(call
))) {
1368 if (call
->tc_call
.deadline
<= timestamp
) {
1369 _pending_call_enqueue(call
, group
);
1374 call
= TC(queue_first(&group
->delayed_queue
));
1377 if (!queue_end(&group
->delayed_queue
, qe(call
)))
1378 _set_delayed_call_timer(call
, group
);
1380 thread_call_unlock();
1384 * Timer callback to tell a thread to terminate if
1385 * we have an excess of threads and at least one has been
1386 * idle for a long time.
1389 thread_call_dealloc_timer(
1390 timer_call_param_t p0
,
1391 __unused timer_call_param_t p1
)
1393 thread_call_group_t group
= (thread_call_group_t
)p0
;
1396 boolean_t terminated
= FALSE
;
1398 thread_call_lock_spin();
1400 now
= mach_absolute_time();
1401 if (group
->idle_count
> 0) {
1402 if (now
> group
->idle_timestamp
+ thread_call_dealloc_interval_abs
) {
1404 group
->idle_count
--;
1405 res
= wait_queue_wakeup_one(&group
->idle_wqueue
, NO_EVENT
, THREAD_INTERRUPTED
, -1);
1406 if (res
!= KERN_SUCCESS
) {
1407 panic("Unable to wake up idle thread for termination?");
1414 * If we still have an excess of threads, schedule another
1415 * invocation of this function.
1417 if (group
->idle_count
> 0 && (group
->idle_count
+ group
->active_count
> group
->target_thread_count
)) {
1419 * If we killed someone just now, push out the
1423 group
->idle_timestamp
= now
;
1426 thread_call_start_deallocate_timer(group
);
1428 group
->flags
&= ~TCG_DEALLOC_ACTIVE
;
1431 thread_call_unlock();
1435 * Wait for all requested invocations of a thread call prior to now
1436 * to finish. Can only be invoked on thread calls whose storage we manage.
1437 * Just waits for the finish count to catch up to the submit count we find
1438 * at the beginning of our wait.
1441 thread_call_wait_locked(thread_call_t call
)
1443 uint64_t submit_count
;
1446 assert(call
->tc_flags
& THREAD_CALL_ALLOC
);
1448 submit_count
= call
->tc_submit_count
;
1450 while (call
->tc_finish_count
< submit_count
) {
1451 call
->tc_flags
|= THREAD_CALL_WAIT
;
1453 res
= assert_wait(call
, THREAD_UNINT
);
1454 if (res
!= THREAD_WAITING
) {
1455 panic("Unable to assert wait?");
1458 thread_call_unlock();
1461 res
= thread_block(NULL
);
1462 if (res
!= THREAD_AWAKENED
) {
1463 panic("Awoken with %d?", res
);
1467 thread_call_lock_spin();
1472 * Determine whether a thread call is either on a queue or
1473 * currently being executed.
1476 thread_call_isactive(thread_call_t call
)
1480 disable_ints_and_lock();
1481 active
= (call
->tc_submit_count
> call
->tc_finish_count
);
1482 enable_ints_and_unlock();