2 * Copyright (c) 1993-1995, 1999-2008 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/thread_act.h>
32 #include <kern/kern_types.h>
33 #include <kern/zalloc.h>
34 #include <kern/sched_prim.h>
35 #include <kern/clock.h>
36 #include <kern/task.h>
37 #include <kern/thread.h>
38 #include <kern/wait_queue.h>
40 #include <vm/vm_pageout.h>
42 #include <kern/thread_call.h>
43 #include <kern/call_entry.h>
44 #include <kern/timer_call.h>
46 #include <libkern/OSAtomic.h>
48 #include <sys/kdebug.h>
51 static zone_t thread_call_zone
;
52 static struct wait_queue daemon_wqueue
;
54 struct thread_call_group
{
55 queue_head_t pending_queue
;
56 uint32_t pending_count
;
58 queue_head_t delayed_queue
;
59 uint32_t delayed_count
;
61 timer_call_data_t delayed_timer
;
62 timer_call_data_t dealloc_timer
;
64 struct wait_queue idle_wqueue
;
65 uint32_t idle_count
, active_count
;
68 uint32_t target_thread_count
;
69 uint64_t idle_timestamp
;
72 sched_call_t sched_call
;
75 typedef struct thread_call_group
*thread_call_group_t
;
77 #define TCG_PARALLEL 0x01
78 #define TCG_DEALLOC_ACTIVE 0x02
80 #define THREAD_CALL_GROUP_COUNT 4
81 #define THREAD_CALL_THREAD_MIN 4
82 #define INTERNAL_CALL_COUNT 768
83 #define THREAD_CALL_DEALLOC_INTERVAL_NS (5 * 1000 * 1000) /* 5 ms */
84 #define THREAD_CALL_ADD_RATIO 4
85 #define THREAD_CALL_MACH_FACTOR_CAP 3
87 static struct thread_call_group thread_call_groups
[THREAD_CALL_GROUP_COUNT
];
88 static boolean_t thread_call_daemon_awake
;
89 static thread_call_data_t internal_call_storage
[INTERNAL_CALL_COUNT
];
90 static queue_head_t thread_call_internal_queue
;
91 static uint64_t thread_call_dealloc_interval_abs
;
93 static __inline__ thread_call_t
_internal_call_allocate(void);
94 static __inline__
void _internal_call_release(thread_call_t call
);
95 static __inline__ boolean_t
_pending_call_enqueue(thread_call_t call
, thread_call_group_t group
);
96 static __inline__ boolean_t
_delayed_call_enqueue(thread_call_t call
, thread_call_group_t group
, uint64_t deadline
);
97 static __inline__ boolean_t
_call_dequeue(thread_call_t call
, thread_call_group_t group
);
98 static __inline__
void thread_call_wake(thread_call_group_t group
);
99 static __inline__
void _set_delayed_call_timer(thread_call_t call
, thread_call_group_t group
);
100 static boolean_t
_remove_from_pending_queue(thread_call_func_t func
, thread_call_param_t param0
, boolean_t remove_all
);
101 static boolean_t
_remove_from_delayed_queue(thread_call_func_t func
, thread_call_param_t param0
, boolean_t remove_all
);
102 static void thread_call_daemon(void *arg
);
103 static void thread_call_thread(thread_call_group_t group
, wait_result_t wres
);
104 extern void thread_call_delayed_timer(timer_call_param_t p0
, timer_call_param_t p1
);
105 static void thread_call_dealloc_timer(timer_call_param_t p0
, timer_call_param_t p1
);
106 static void thread_call_group_setup(thread_call_group_t group
, thread_call_priority_t pri
, uint32_t target_thread_count
, boolean_t parallel
);
107 static void sched_call_thread(int type
, thread_t thread
);
108 static void thread_call_start_deallocate_timer(thread_call_group_t group
);
109 static void thread_call_wait_locked(thread_call_t call
);
111 #define qe(x) ((queue_entry_t)(x))
112 #define TC(x) ((thread_call_t)(x))
115 lck_grp_t thread_call_queues_lck_grp
;
116 lck_grp_t thread_call_lck_grp
;
117 lck_attr_t thread_call_lck_attr
;
118 lck_grp_attr_t thread_call_lck_grp_attr
;
120 #if defined(__i386__) || defined(__x86_64__)
121 lck_mtx_t thread_call_lock_data
;
123 lck_spin_t thread_call_lock_data
;
127 #define thread_call_lock_spin() \
128 lck_mtx_lock_spin_always(&thread_call_lock_data)
130 #define thread_call_unlock() \
131 lck_mtx_unlock_always(&thread_call_lock_data)
135 disable_ints_and_lock(void)
140 thread_call_lock_spin();
146 enable_ints_and_unlock(void)
148 thread_call_unlock();
153 static inline boolean_t
154 group_isparallel(thread_call_group_t group
)
156 return ((group
->flags
& TCG_PARALLEL
) != 0);
160 thread_call_group_should_add_thread(thread_call_group_t group
)
162 uint32_t thread_count
;
164 if (!group_isparallel(group
)) {
165 if (group
->pending_count
> 0 && group
->active_count
== 0) {
172 if (group
->pending_count
> 0) {
173 if (group
->idle_count
> 0) {
174 panic("Pending work, but threads are idle?");
177 thread_count
= group
->active_count
;
180 * Add a thread if either there are no threads,
181 * the group has fewer than its target number of
182 * threads, or the amount of work is large relative
183 * to the number of threads. In the last case, pay attention
184 * to the total load on the system, and back off if
187 if ((thread_count
== 0) ||
188 (thread_count
< group
->target_thread_count
) ||
189 ((group
->pending_count
> THREAD_CALL_ADD_RATIO
* thread_count
) &&
190 (sched_mach_factor
< THREAD_CALL_MACH_FACTOR_CAP
))) {
198 static inline integer_t
199 thread_call_priority_to_sched_pri(thread_call_priority_t pri
)
202 case THREAD_CALL_PRIORITY_HIGH
:
203 return BASEPRI_PREEMPT
;
204 case THREAD_CALL_PRIORITY_KERNEL
:
205 return BASEPRI_KERNEL
;
206 case THREAD_CALL_PRIORITY_USER
:
207 return BASEPRI_DEFAULT
;
208 case THREAD_CALL_PRIORITY_LOW
:
211 panic("Invalid priority.");
218 static inline thread_call_group_t
219 thread_call_get_group(
222 thread_call_priority_t pri
= call
->tc_pri
;
224 assert(pri
== THREAD_CALL_PRIORITY_LOW
||
225 pri
== THREAD_CALL_PRIORITY_USER
||
226 pri
== THREAD_CALL_PRIORITY_KERNEL
||
227 pri
== THREAD_CALL_PRIORITY_HIGH
);
229 return &thread_call_groups
[pri
];
233 thread_call_group_setup(
234 thread_call_group_t group
,
235 thread_call_priority_t pri
,
236 uint32_t target_thread_count
,
239 queue_init(&group
->pending_queue
);
240 queue_init(&group
->delayed_queue
);
242 timer_call_setup(&group
->delayed_timer
, thread_call_delayed_timer
, group
);
243 timer_call_setup(&group
->dealloc_timer
, thread_call_dealloc_timer
, group
);
245 wait_queue_init(&group
->idle_wqueue
, SYNC_POLICY_FIFO
);
247 group
->target_thread_count
= target_thread_count
;
248 group
->pri
= thread_call_priority_to_sched_pri(pri
);
250 group
->sched_call
= sched_call_thread
;
252 group
->flags
|= TCG_PARALLEL
;
253 group
->sched_call
= NULL
;
258 * Simple wrapper for creating threads bound to
259 * thread call groups.
262 thread_call_thread_create(
263 thread_call_group_t group
)
266 kern_return_t result
;
268 result
= kernel_thread_start_priority((thread_continue_t
)thread_call_thread
, group
, group
->pri
, &thread
);
269 if (result
!= KERN_SUCCESS
) {
273 if (group
->pri
< BASEPRI_PREEMPT
) {
275 * New style doesn't get to run to completion in
276 * kernel if there are higher priority threads
279 thread_set_eager_preempt(thread
);
282 thread_deallocate(thread
);
287 * thread_call_initialize:
289 * Initialize this module, called
290 * early during system initialization.
293 thread_call_initialize(void)
296 kern_return_t result
;
300 i
= sizeof (thread_call_data_t
);
301 thread_call_zone
= zinit(i
, 4096 * i
, 16 * i
, "thread_call");
302 zone_change(thread_call_zone
, Z_CALLERACCT
, FALSE
);
303 zone_change(thread_call_zone
, Z_NOENCRYPT
, TRUE
);
305 lck_attr_setdefault(&thread_call_lck_attr
);
306 lck_grp_attr_setdefault(&thread_call_lck_grp_attr
);
307 lck_grp_init(&thread_call_queues_lck_grp
, "thread_call_queues", &thread_call_lck_grp_attr
);
308 lck_grp_init(&thread_call_lck_grp
, "thread_call", &thread_call_lck_grp_attr
);
310 #if defined(__i386__) || defined(__x86_64__)
311 lck_mtx_init(&thread_call_lock_data
, &thread_call_lck_grp
, &thread_call_lck_attr
);
313 lck_spin_init(&thread_call_lock_data
, &thread_call_lck_grp
, &thread_call_lck_attr
);
316 nanotime_to_absolutetime(0, THREAD_CALL_DEALLOC_INTERVAL_NS
, &thread_call_dealloc_interval_abs
);
317 wait_queue_init(&daemon_wqueue
, SYNC_POLICY_FIFO
);
319 thread_call_group_setup(&thread_call_groups
[THREAD_CALL_PRIORITY_LOW
], THREAD_CALL_PRIORITY_LOW
, 0, TRUE
);
320 thread_call_group_setup(&thread_call_groups
[THREAD_CALL_PRIORITY_USER
], THREAD_CALL_PRIORITY_USER
, 0, TRUE
);
321 thread_call_group_setup(&thread_call_groups
[THREAD_CALL_PRIORITY_KERNEL
], THREAD_CALL_PRIORITY_KERNEL
, 1, TRUE
);
322 thread_call_group_setup(&thread_call_groups
[THREAD_CALL_PRIORITY_HIGH
], THREAD_CALL_PRIORITY_HIGH
, THREAD_CALL_THREAD_MIN
, FALSE
);
324 disable_ints_and_lock();
326 queue_init(&thread_call_internal_queue
);
328 call
= internal_call_storage
;
329 call
< &internal_call_storage
[INTERNAL_CALL_COUNT
];
332 enqueue_tail(&thread_call_internal_queue
, qe(call
));
335 thread_call_daemon_awake
= TRUE
;
337 enable_ints_and_unlock();
339 result
= kernel_thread_start_priority((thread_continue_t
)thread_call_daemon
, NULL
, BASEPRI_PREEMPT
+ 1, &thread
);
340 if (result
!= KERN_SUCCESS
)
341 panic("thread_call_initialize");
343 thread_deallocate(thread
);
349 thread_call_func_t func
,
350 thread_call_param_t param0
)
352 bzero(call
, sizeof(*call
));
353 call_entry_setup((call_entry_t
)call
, func
, param0
);
354 call
->tc_pri
= THREAD_CALL_PRIORITY_HIGH
; /* Default priority */
358 * _internal_call_allocate:
360 * Allocate an internal callout entry.
362 * Called with thread_call_lock held.
364 static __inline__ thread_call_t
365 _internal_call_allocate(void)
369 if (queue_empty(&thread_call_internal_queue
))
370 panic("_internal_call_allocate");
372 call
= TC(dequeue_head(&thread_call_internal_queue
));
378 * _internal_call_release:
380 * Release an internal callout entry which
381 * is no longer pending (or delayed).
383 * Called with thread_call_lock held.
385 static __inline__
void
386 _internal_call_release(
389 if ( call
>= internal_call_storage
&&
390 call
< &internal_call_storage
[INTERNAL_CALL_COUNT
] )
391 enqueue_head(&thread_call_internal_queue
, qe(call
));
395 * _pending_call_enqueue:
397 * Place an entry at the end of the
398 * pending queue, to be executed soon.
400 * Returns TRUE if the entry was already
403 * Called with thread_call_lock held.
405 static __inline__ boolean_t
406 _pending_call_enqueue(
408 thread_call_group_t group
)
410 queue_head_t
*old_queue
;
412 old_queue
= call_entry_enqueue_tail(CE(call
), &group
->pending_queue
);
414 if (old_queue
== NULL
) {
415 call
->tc_submit_count
++;
418 group
->pending_count
++;
420 thread_call_wake(group
);
422 return (old_queue
!= NULL
);
426 * _delayed_call_enqueue:
428 * Place an entry on the delayed queue,
429 * after existing entries with an earlier
430 * (or identical) deadline.
432 * Returns TRUE if the entry was already
435 * Called with thread_call_lock held.
437 static __inline__ boolean_t
438 _delayed_call_enqueue(
440 thread_call_group_t group
,
443 queue_head_t
*old_queue
;
445 old_queue
= call_entry_enqueue_deadline(CE(call
), &group
->delayed_queue
, deadline
);
447 if (old_queue
== &group
->pending_queue
)
448 group
->pending_count
--;
449 else if (old_queue
== NULL
)
450 call
->tc_submit_count
++;
452 return (old_queue
!= NULL
);
458 * Remove an entry from a queue.
460 * Returns TRUE if the entry was on a queue.
462 * Called with thread_call_lock held.
464 static __inline__ boolean_t
467 thread_call_group_t group
)
469 queue_head_t
*old_queue
;
471 old_queue
= call_entry_dequeue(CE(call
));
473 if (old_queue
!= NULL
) {
474 call
->tc_finish_count
++;
475 if (old_queue
== &group
->pending_queue
)
476 group
->pending_count
--;
479 return (old_queue
!= NULL
);
483 * _set_delayed_call_timer:
485 * Reset the timer so that it
486 * next expires when the entry is due.
488 * Called with thread_call_lock held.
490 static __inline__
void
491 _set_delayed_call_timer(
493 thread_call_group_t group
)
495 timer_call_enter(&group
->delayed_timer
, call
->tc_call
.deadline
, 0);
499 * _remove_from_pending_queue:
501 * Remove the first (or all) matching
502 * entries from the pending queue.
504 * Returns TRUE if any matching entries
507 * Called with thread_call_lock held.
510 _remove_from_pending_queue(
511 thread_call_func_t func
,
512 thread_call_param_t param0
,
513 boolean_t remove_all
)
515 boolean_t call_removed
= FALSE
;
517 thread_call_group_t group
= &thread_call_groups
[THREAD_CALL_PRIORITY_HIGH
];
519 call
= TC(queue_first(&group
->pending_queue
));
521 while (!queue_end(&group
->pending_queue
, qe(call
))) {
522 if (call
->tc_call
.func
== func
&&
523 call
->tc_call
.param0
== param0
) {
524 thread_call_t next
= TC(queue_next(qe(call
)));
526 _call_dequeue(call
, group
);
528 _internal_call_release(call
);
537 call
= TC(queue_next(qe(call
)));
540 return (call_removed
);
544 * _remove_from_delayed_queue:
546 * Remove the first (or all) matching
547 * entries from the delayed queue.
549 * Returns TRUE if any matching entries
552 * Called with thread_call_lock held.
555 _remove_from_delayed_queue(
556 thread_call_func_t func
,
557 thread_call_param_t param0
,
558 boolean_t remove_all
)
560 boolean_t call_removed
= FALSE
;
562 thread_call_group_t group
= &thread_call_groups
[THREAD_CALL_PRIORITY_HIGH
];
564 call
= TC(queue_first(&group
->delayed_queue
));
566 while (!queue_end(&group
->delayed_queue
, qe(call
))) {
567 if (call
->tc_call
.func
== func
&&
568 call
->tc_call
.param0
== param0
) {
569 thread_call_t next
= TC(queue_next(qe(call
)));
571 _call_dequeue(call
, group
);
573 _internal_call_release(call
);
582 call
= TC(queue_next(qe(call
)));
585 return (call_removed
);
593 * Enqueue a function callout.
595 * Guarantees { function, argument }
596 * uniqueness if unique_call is TRUE.
600 thread_call_func_t func
,
601 thread_call_param_t param
,
602 boolean_t unique_call
)
605 thread_call_group_t group
= &thread_call_groups
[THREAD_CALL_PRIORITY_HIGH
];
609 thread_call_lock_spin();
611 call
= TC(queue_first(&group
->pending_queue
));
613 while (unique_call
&& !queue_end(&group
->pending_queue
, qe(call
))) {
614 if (call
->tc_call
.func
== func
&& call
->tc_call
.param0
== param
) {
618 call
= TC(queue_next(qe(call
)));
621 if (!unique_call
|| queue_end(&group
->pending_queue
, qe(call
))) {
622 call
= _internal_call_allocate();
623 call
->tc_call
.func
= func
;
624 call
->tc_call
.param0
= param
;
625 call
->tc_call
.param1
= NULL
;
627 _pending_call_enqueue(call
, group
);
630 thread_call_unlock();
634 #endif /* __LP64__ */
637 * thread_call_func_delayed:
639 * Enqueue a function callout to
640 * occur at the stated time.
643 thread_call_func_delayed(
644 thread_call_func_t func
,
645 thread_call_param_t param
,
649 thread_call_group_t group
= &thread_call_groups
[THREAD_CALL_PRIORITY_HIGH
];
653 thread_call_lock_spin();
655 call
= _internal_call_allocate();
656 call
->tc_call
.func
= func
;
657 call
->tc_call
.param0
= param
;
658 call
->tc_call
.param1
= 0;
660 _delayed_call_enqueue(call
, group
, deadline
);
662 if (queue_first(&group
->delayed_queue
) == qe(call
))
663 _set_delayed_call_timer(call
, group
);
665 thread_call_unlock();
670 * thread_call_func_cancel:
672 * Dequeue a function callout.
674 * Removes one (or all) { function, argument }
675 * instance(s) from either (or both)
676 * the pending and the delayed queue,
679 * Returns TRUE if any calls were cancelled.
682 thread_call_func_cancel(
683 thread_call_func_t func
,
684 thread_call_param_t param
,
685 boolean_t cancel_all
)
691 thread_call_lock_spin();
694 result
= _remove_from_pending_queue(func
, param
, cancel_all
) |
695 _remove_from_delayed_queue(func
, param
, cancel_all
);
697 result
= _remove_from_pending_queue(func
, param
, cancel_all
) ||
698 _remove_from_delayed_queue(func
, param
, cancel_all
);
700 thread_call_unlock();
707 * Allocate a thread call with a given priority. Importances
708 * other than THREAD_CALL_PRIORITY_HIGH will be run in threads
709 * with eager preemption enabled (i.e. may be aggressively preempted
710 * by higher-priority threads which are not in the normal "urgent" bands).
713 thread_call_allocate_with_priority(
714 thread_call_func_t func
,
715 thread_call_param_t param0
,
716 thread_call_priority_t pri
)
720 if (pri
> THREAD_CALL_PRIORITY_LOW
) {
721 panic("Invalid pri: %d\n", pri
);
724 call
= thread_call_allocate(func
, param0
);
731 * thread_call_allocate:
733 * Allocate a callout entry.
736 thread_call_allocate(
737 thread_call_func_t func
,
738 thread_call_param_t param0
)
740 thread_call_t call
= zalloc(thread_call_zone
);
742 thread_call_setup(call
, func
, param0
);
744 call
->tc_flags
= THREAD_CALL_ALLOC
;
752 * Release a callout. If the callout is currently
753 * executing, it will be freed when all invocations
764 thread_call_lock_spin();
766 if (call
->tc_call
.queue
!= NULL
) {
767 thread_call_unlock();
773 refs
= --call
->tc_refs
;
775 panic("Refcount negative: %d\n", refs
);
778 thread_call_unlock();
782 zfree(thread_call_zone
, call
);
791 * Enqueue a callout entry to occur "soon".
793 * Returns TRUE if the call was
794 * already on a queue.
800 boolean_t result
= TRUE
;
801 thread_call_group_t group
;
804 group
= thread_call_get_group(call
);
807 thread_call_lock_spin();
809 if (call
->tc_call
.queue
!= &group
->pending_queue
) {
810 result
= _pending_call_enqueue(call
, group
);
813 call
->tc_call
.param1
= 0;
815 thread_call_unlock();
824 thread_call_param_t param1
)
826 boolean_t result
= TRUE
;
827 thread_call_group_t group
;
830 group
= thread_call_get_group(call
);
833 thread_call_lock_spin();
835 if (call
->tc_call
.queue
!= &group
->pending_queue
) {
836 result
= _pending_call_enqueue(call
, group
);
839 call
->tc_call
.param1
= param1
;
841 thread_call_unlock();
848 * thread_call_enter_delayed:
850 * Enqueue a callout entry to occur
851 * at the stated time.
853 * Returns TRUE if the call was
854 * already on a queue.
857 thread_call_enter_delayed(
861 boolean_t result
= TRUE
;
862 thread_call_group_t group
;
865 group
= thread_call_get_group(call
);
868 thread_call_lock_spin();
870 result
= _delayed_call_enqueue(call
, group
, deadline
);
872 if (queue_first(&group
->delayed_queue
) == qe(call
))
873 _set_delayed_call_timer(call
, group
);
875 call
->tc_call
.param1
= 0;
877 thread_call_unlock();
884 thread_call_enter1_delayed(
886 thread_call_param_t param1
,
889 boolean_t result
= TRUE
;
890 thread_call_group_t group
;
893 group
= thread_call_get_group(call
);
896 thread_call_lock_spin();
898 result
= _delayed_call_enqueue(call
, group
, deadline
);
900 if (queue_first(&group
->delayed_queue
) == qe(call
))
901 _set_delayed_call_timer(call
, group
);
903 call
->tc_call
.param1
= param1
;
905 thread_call_unlock();
912 * thread_call_cancel:
914 * Dequeue a callout entry.
916 * Returns TRUE if the call was
924 thread_call_group_t group
;
927 group
= thread_call_get_group(call
);
930 thread_call_lock_spin();
932 result
= _call_dequeue(call
, group
);
934 thread_call_unlock();
941 * Cancel a thread call. If it cannot be cancelled (i.e.
942 * is already in flight), waits for the most recent invocation
943 * to finish. Note that if clients re-submit this thread call,
944 * it may still be pending or in flight when thread_call_cancel_wait
945 * returns, but all requests to execute this work item prior
946 * to the call to thread_call_cancel_wait will have finished.
949 thread_call_cancel_wait(
953 thread_call_group_t group
;
955 if ((call
->tc_flags
& THREAD_CALL_ALLOC
) == 0) {
956 panic("%s: Can't wait on thread call whose storage I don't own.", __FUNCTION__
);
959 group
= thread_call_get_group(call
);
962 thread_call_lock_spin();
964 result
= _call_dequeue(call
, group
);
965 if (result
== FALSE
) {
966 thread_call_wait_locked(call
);
969 thread_call_unlock();
979 * thread_call_is_delayed:
981 * Returns TRUE if the call is
982 * currently on a delayed queue.
984 * Optionally returns the expiration time.
987 thread_call_is_delayed(
991 boolean_t result
= FALSE
;
992 thread_call_group_t group
;
995 group
= thread_call_get_group(call
);
998 thread_call_lock_spin();
1000 if (call
->tc_call
.queue
== &group
->delayed_queue
) {
1001 if (deadline
!= NULL
)
1002 *deadline
= call
->tc_call
.deadline
;
1006 thread_call_unlock();
1012 #endif /* __LP64__ */
1017 * Wake a call thread to service
1018 * pending call entries. May wake
1019 * the daemon thread in order to
1020 * create additional call threads.
1022 * Called with thread_call_lock held.
1024 * For high-priority group, only does wakeup/creation if there are no threads
1027 static __inline__
void
1029 thread_call_group_t group
)
1032 * New behavior: use threads if you've got 'em.
1033 * Traditional behavior: wake only if no threads running.
1035 if (group_isparallel(group
) || group
->active_count
== 0) {
1036 if (wait_queue_wakeup_one(&group
->idle_wqueue
, NO_EVENT
, THREAD_AWAKENED
, -1) == KERN_SUCCESS
) {
1037 group
->idle_count
--; group
->active_count
++;
1039 if (group
->idle_count
== 0) {
1040 timer_call_cancel(&group
->dealloc_timer
);
1041 group
->flags
&= TCG_DEALLOC_ACTIVE
;
1044 if (!thread_call_daemon_awake
&& thread_call_group_should_add_thread(group
)) {
1045 thread_call_daemon_awake
= TRUE
;
1046 wait_queue_wakeup_one(&daemon_wqueue
, NO_EVENT
, THREAD_AWAKENED
, -1);
1053 * sched_call_thread:
1055 * Call out invoked by the scheduler. Used only for high-priority
1056 * thread call group.
1061 __unused thread_t thread
)
1063 thread_call_group_t group
;
1065 group
= &thread_call_groups
[THREAD_CALL_PRIORITY_HIGH
]; /* XXX */
1067 thread_call_lock_spin();
1071 case SCHED_CALL_BLOCK
:
1072 --group
->active_count
;
1073 if (group
->pending_count
> 0)
1074 thread_call_wake(group
);
1077 case SCHED_CALL_UNBLOCK
:
1078 group
->active_count
++;
1082 thread_call_unlock();
1086 * Interrupts disabled, lock held; returns the same way.
1087 * Only called on thread calls whose storage we own. Wakes up
1088 * anyone who might be waiting on this work item and frees it
1089 * if the client has so requested.
1092 thread_call_finish(thread_call_t call
)
1094 boolean_t dowake
= FALSE
;
1096 call
->tc_finish_count
++;
1099 if ((call
->tc_flags
& THREAD_CALL_WAIT
) != 0) {
1101 call
->tc_flags
&= ~THREAD_CALL_WAIT
;
1104 * Dropping lock here because the sched call for the
1105 * high-pri group can take the big lock from under
1108 thread_call_unlock();
1109 thread_wakeup((event_t
)call
);
1110 thread_call_lock_spin();
1113 if (call
->tc_refs
== 0) {
1115 panic("Someone waiting on a thread call that is scheduled for free: %p\n", call
->tc_call
.func
);
1118 enable_ints_and_unlock();
1120 zfree(thread_call_zone
, call
);
1122 (void)disable_ints_and_lock();
1128 * thread_call_thread:
1132 thread_call_group_t group
,
1135 thread_t self
= current_thread();
1139 * A wakeup with THREAD_INTERRUPTED indicates that
1140 * we should terminate.
1142 if (wres
== THREAD_INTERRUPTED
) {
1143 thread_terminate(self
);
1146 panic("thread_terminate() returned?");
1149 (void)disable_ints_and_lock();
1151 thread_sched_call(self
, group
->sched_call
);
1153 while (group
->pending_count
> 0) {
1155 thread_call_func_t func
;
1156 thread_call_param_t param0
, param1
;
1158 call
= TC(dequeue_head(&group
->pending_queue
));
1159 group
->pending_count
--;
1161 func
= call
->tc_call
.func
;
1162 param0
= call
->tc_call
.param0
;
1163 param1
= call
->tc_call
.param1
;
1165 call
->tc_call
.queue
= NULL
;
1167 _internal_call_release(call
);
1170 * Can only do wakeups for thread calls whose storage
1173 if ((call
->tc_flags
& THREAD_CALL_ALLOC
) != 0) {
1175 call
->tc_refs
++; /* Delay free until we're done */
1179 enable_ints_and_unlock();
1181 KERNEL_DEBUG_CONSTANT(
1182 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_CALLOUT
) | DBG_FUNC_NONE
,
1183 VM_KERNEL_UNSLIDE(func
), param0
, param1
, 0, 0);
1185 (*func
)(param0
, param1
);
1187 if (get_preemption_level() != 0) {
1188 int pl
= get_preemption_level();
1189 panic("thread_call_thread: preemption_level %d, last callout %p(%p, %p)",
1190 pl
, (void *)VM_KERNEL_UNSLIDE(func
), param0
, param1
);
1193 (void)thread_funnel_set(self
->funnel_lock
, FALSE
); /* XXX */
1195 (void) disable_ints_and_lock();
1198 /* Frees if so desired */
1199 thread_call_finish(call
);
1203 thread_sched_call(self
, NULL
);
1204 group
->active_count
--;
1206 if (group_isparallel(group
)) {
1208 * For new style of thread group, thread always blocks.
1209 * If we have more than the target number of threads,
1210 * and this is the first to block, and it isn't active
1211 * already, set a timer for deallocating a thread if we
1212 * continue to have a surplus.
1214 group
->idle_count
++;
1216 if (group
->idle_count
== 1) {
1217 group
->idle_timestamp
= mach_absolute_time();
1220 if (((group
->flags
& TCG_DEALLOC_ACTIVE
) == 0) &&
1221 ((group
->active_count
+ group
->idle_count
) > group
->target_thread_count
)) {
1222 group
->flags
|= TCG_DEALLOC_ACTIVE
;
1223 thread_call_start_deallocate_timer(group
);
1226 /* Wait for more work (or termination) */
1227 wres
= wait_queue_assert_wait(&group
->idle_wqueue
, NO_EVENT
, THREAD_INTERRUPTIBLE
, 0);
1228 if (wres
!= THREAD_WAITING
) {
1229 panic("kcall worker unable to assert wait?");
1232 enable_ints_and_unlock();
1234 thread_block_parameter((thread_continue_t
)thread_call_thread
, group
);
1236 if (group
->idle_count
< group
->target_thread_count
) {
1237 group
->idle_count
++;
1239 wait_queue_assert_wait(&group
->idle_wqueue
, NO_EVENT
, THREAD_UNINT
, 0); /* Interrupted means to exit */
1241 enable_ints_and_unlock();
1243 thread_block_parameter((thread_continue_t
)thread_call_thread
, group
);
1248 enable_ints_and_unlock();
1250 thread_terminate(self
);
1255 * thread_call_daemon: walk list of groups, allocating
1256 * threads if appropriate (as determined by
1257 * thread_call_group_should_add_thread()).
1260 thread_call_daemon_continue(__unused
void *arg
)
1264 thread_call_group_t group
;
1266 (void)disable_ints_and_lock();
1268 /* Starting at zero happens to be high-priority first. */
1269 for (i
= 0; i
< THREAD_CALL_GROUP_COUNT
; i
++) {
1270 group
= &thread_call_groups
[i
];
1271 while (thread_call_group_should_add_thread(group
)) {
1272 group
->active_count
++;
1274 enable_ints_and_unlock();
1276 kr
= thread_call_thread_create(group
);
1277 if (kr
!= KERN_SUCCESS
) {
1279 * On failure, just pause for a moment and give up.
1280 * We can try again later.
1282 delay(10000); /* 10 ms */
1283 (void)disable_ints_and_lock();
1287 (void)disable_ints_and_lock();
1292 thread_call_daemon_awake
= FALSE
;
1293 wait_queue_assert_wait(&daemon_wqueue
, NO_EVENT
, THREAD_UNINT
, 0);
1295 enable_ints_and_unlock();
1297 thread_block_parameter((thread_continue_t
)thread_call_daemon_continue
, NULL
);
1305 thread_t self
= current_thread();
1307 self
->options
|= TH_OPT_VMPRIV
;
1308 vm_page_free_reserve(2); /* XXX */
1310 thread_call_daemon_continue(NULL
);
1315 * Schedule timer to deallocate a worker thread if we have a surplus
1316 * of threads (in excess of the group's target) and at least one thread
1317 * is idle the whole time.
1320 thread_call_start_deallocate_timer(
1321 thread_call_group_t group
)
1326 assert(group
->idle_count
> 0);
1328 group
->flags
|= TCG_DEALLOC_ACTIVE
;
1329 deadline
= group
->idle_timestamp
+ thread_call_dealloc_interval_abs
;
1330 onqueue
= timer_call_enter(&group
->dealloc_timer
, deadline
, 0);
1333 panic("Deallocate timer already active?");
1338 thread_call_delayed_timer(
1339 timer_call_param_t p0
,
1340 __unused timer_call_param_t p1
1344 thread_call_group_t group
= p0
;
1347 thread_call_lock_spin();
1349 timestamp
= mach_absolute_time();
1351 call
= TC(queue_first(&group
->delayed_queue
));
1353 while (!queue_end(&group
->delayed_queue
, qe(call
))) {
1354 if (call
->tc_call
.deadline
<= timestamp
) {
1355 _pending_call_enqueue(call
, group
);
1360 call
= TC(queue_first(&group
->delayed_queue
));
1363 if (!queue_end(&group
->delayed_queue
, qe(call
)))
1364 _set_delayed_call_timer(call
, group
);
1366 thread_call_unlock();
1370 * Timer callback to tell a thread to terminate if
1371 * we have an excess of threads and at least one has been
1372 * idle for a long time.
1375 thread_call_dealloc_timer(
1376 timer_call_param_t p0
,
1377 __unused timer_call_param_t p1
)
1379 thread_call_group_t group
= (thread_call_group_t
)p0
;
1382 boolean_t terminated
= FALSE
;
1384 thread_call_lock_spin();
1386 now
= mach_absolute_time();
1387 if (group
->idle_count
> 0) {
1388 if (now
> group
->idle_timestamp
+ thread_call_dealloc_interval_abs
) {
1390 group
->idle_count
--;
1391 res
= wait_queue_wakeup_one(&group
->idle_wqueue
, NO_EVENT
, THREAD_INTERRUPTED
, -1);
1392 if (res
!= KERN_SUCCESS
) {
1393 panic("Unable to wake up idle thread for termination?");
1400 * If we still have an excess of threads, schedule another
1401 * invocation of this function.
1403 if (group
->idle_count
> 0 && (group
->idle_count
+ group
->active_count
> group
->target_thread_count
)) {
1405 * If we killed someone just now, push out the
1409 group
->idle_timestamp
= now
;
1412 thread_call_start_deallocate_timer(group
);
1414 group
->flags
&= ~TCG_DEALLOC_ACTIVE
;
1417 thread_call_unlock();
1421 * Wait for all requested invocations of a thread call prior to now
1422 * to finish. Can only be invoked on thread calls whose storage we manage.
1423 * Just waits for the finish count to catch up to the submit count we find
1424 * at the beginning of our wait.
1427 thread_call_wait_locked(thread_call_t call
)
1429 uint64_t submit_count
;
1432 assert(call
->tc_flags
& THREAD_CALL_ALLOC
);
1434 submit_count
= call
->tc_submit_count
;
1436 while (call
->tc_finish_count
< submit_count
) {
1437 call
->tc_flags
|= THREAD_CALL_WAIT
;
1439 res
= assert_wait(call
, THREAD_UNINT
);
1440 if (res
!= THREAD_WAITING
) {
1441 panic("Unable to assert wait?");
1444 thread_call_unlock();
1447 res
= thread_block(NULL
);
1448 if (res
!= THREAD_AWAKENED
) {
1449 panic("Awoken with %d?", res
);
1453 thread_call_lock_spin();
1458 * Determine whether a thread call is either on a queue or
1459 * currently being executed.
1462 thread_call_isactive(thread_call_t call
)
1466 disable_ints_and_lock();
1467 active
= (call
->tc_submit_count
> call
->tc_finish_count
);
1468 enable_ints_and_unlock();