2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * @OSF_FREE_COPYRIGHT@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
54 * Author: Avadis Tevanian, Jr.
57 * Scheduling primitives
64 #include <simple_clock.h>
66 #include <ddb/db_output.h>
67 #include <mach/machine.h>
68 #include <machine/machine_routines.h>
69 #include <machine/sched_param.h>
71 #include <kern/clock.h>
72 #include <kern/counters.h>
73 #include <kern/cpu_number.h>
74 #include <kern/cpu_data.h>
75 #include <kern/etap_macros.h>
76 #include <kern/lock.h>
77 #include <kern/macro_help.h>
78 #include <kern/machine.h>
79 #include <kern/misc_protos.h>
80 #include <kern/processor.h>
81 #include <kern/queue.h>
82 #include <kern/sched.h>
83 #include <kern/sched_prim.h>
84 #include <kern/syscall_subr.h>
85 #include <kern/task.h>
86 #include <kern/thread.h>
87 #include <kern/thread_swap.h>
89 #include <vm/vm_kern.h>
90 #include <vm/vm_map.h>
91 #include <mach/policy.h>
92 #include <mach/sync_policy.h>
93 #include <kern/mk_sp.h> /*** ??? fix so this can be removed ***/
94 #include <sys/kdebug.h>
96 #define DEFAULT_PREEMPTION_RATE 100 /* (1/s) */
97 int default_preemption_rate
= DEFAULT_PREEMPTION_RATE
;
99 #define MAX_UNSAFE_QUANTA 800
100 int max_unsafe_quanta
= MAX_UNSAFE_QUANTA
;
102 #define MAX_POLL_QUANTA 2
103 int max_poll_quanta
= MAX_POLL_QUANTA
;
105 #define SCHED_POLL_YIELD_SHIFT 4 /* 1/16 */
106 int sched_poll_yield_shift
= SCHED_POLL_YIELD_SHIFT
;
108 uint32_t std_quantum_us
;
110 uint64_t max_unsafe_computation
;
111 uint32_t sched_safe_duration
;
112 uint64_t max_poll_computation
;
114 uint32_t std_quantum
;
115 uint32_t min_std_quantum
;
117 uint32_t max_rt_quantum
;
118 uint32_t min_rt_quantum
;
120 static uint32_t sched_tick_interval
;
126 #endif /* SIMPLE_CLOCK */
129 void wait_queues_init(void);
131 static thread_t
choose_thread(
132 processor_set_t pset
,
133 processor_t processor
);
135 static void do_thread_scan(void);
139 boolean_t
thread_runnable(
148 * states are combinations of:
150 * W waiting (or on wait queue)
151 * N non-interruptible
156 * assert_wait thread_block clear_wait swapout swapin
158 * R RW, RWN R; setrun - -
159 * RN RWN RN; setrun - -
172 * Waiting protocols and implementation:
174 * Each thread may be waiting for exactly one event; this event
175 * is set using assert_wait(). That thread may be awakened either
176 * by performing a thread_wakeup_prim() on its event,
177 * or by directly waking that thread up with clear_wait().
179 * The implementation of wait events uses a hash table. Each
180 * bucket is queue of threads having the same hash function
181 * value; the chain for the queue (linked list) is the run queue
182 * field. [It is not possible to be waiting and runnable at the
185 * Locks on both the thread and on the hash buckets govern the
186 * wait event field and the queue chain field. Because wakeup
187 * operations only have the event as an argument, the event hash
188 * bucket must be locked before any thread.
190 * Scheduling operations may also occur at interrupt level; therefore,
191 * interrupts below splsched() must be prevented when holding
192 * thread or hash bucket locks.
194 * The wait event hash table declarations are as follows:
199 struct wait_queue wait_queues
[NUMQUEUES
];
201 #define wait_hash(event) \
202 ((((int)(event) < 0)? ~(int)(event): (int)(event)) % NUMQUEUES)
208 * Calculate the timeslicing quantum
211 if (default_preemption_rate
< 1)
212 default_preemption_rate
= DEFAULT_PREEMPTION_RATE
;
213 std_quantum_us
= (1000 * 1000) / default_preemption_rate
;
215 printf("standard timeslicing quantum is %d us\n", std_quantum_us
);
217 sched_safe_duration
= (2 * max_unsafe_quanta
/ default_preemption_rate
) *
218 (1 << SCHED_TICK_SHIFT
);
221 pset_sys_bootstrap(); /* initialize processor mgmt. */
225 #endif /* SIMPLE_CLOCK */
230 sched_timebase_init(void)
234 clock_interval_to_absolutetime_interval(
235 std_quantum_us
, NSEC_PER_USEC
, &abstime
);
236 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
237 std_quantum
= abstime
;
240 clock_interval_to_absolutetime_interval(250, NSEC_PER_USEC
, &abstime
);
241 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
242 min_std_quantum
= abstime
;
245 clock_interval_to_absolutetime_interval(50, NSEC_PER_USEC
, &abstime
);
246 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
247 min_rt_quantum
= abstime
;
250 clock_interval_to_absolutetime_interval(
251 50, 1000*NSEC_PER_USEC
, &abstime
);
252 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
253 max_rt_quantum
= abstime
;
255 clock_interval_to_absolutetime_interval(1000 >> SCHED_TICK_SHIFT
,
256 USEC_PER_SEC
, &abstime
);
257 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
258 sched_tick_interval
= abstime
;
260 max_unsafe_computation
= max_unsafe_quanta
* std_quantum
;
261 max_poll_computation
= max_poll_quanta
* std_quantum
;
265 wait_queues_init(void)
269 for (i
= 0; i
< NUMQUEUES
; i
++) {
270 wait_queue_init(&wait_queues
[i
], SYNC_POLICY_FIFO
);
275 * Thread wait timer expiration.
279 timer_call_param_t p0
,
280 timer_call_param_t p1
)
282 thread_t thread
= p0
;
287 if (--thread
->wait_timer_active
== 1) {
288 if (thread
->wait_timer_is_set
) {
289 thread
->wait_timer_is_set
= FALSE
;
290 clear_wait_internal(thread
, THREAD_TIMED_OUT
);
293 thread_unlock(thread
);
300 * Set a timer for the current thread, if the thread
301 * is ready to wait. Must be called between assert_wait()
302 * and thread_block().
307 uint32_t scale_factor
)
309 thread_t thread
= current_thread();
315 if ((thread
->state
& TH_WAIT
) != 0) {
316 clock_interval_to_deadline(interval
, scale_factor
, &deadline
);
317 timer_call_enter(&thread
->wait_timer
, deadline
);
318 assert(!thread
->wait_timer_is_set
);
319 thread
->wait_timer_active
++;
320 thread
->wait_timer_is_set
= TRUE
;
322 thread_unlock(thread
);
327 thread_set_timer_deadline(
330 thread_t thread
= current_thread();
335 if ((thread
->state
& TH_WAIT
) != 0) {
336 timer_call_enter(&thread
->wait_timer
, deadline
);
337 assert(!thread
->wait_timer_is_set
);
338 thread
->wait_timer_active
++;
339 thread
->wait_timer_is_set
= TRUE
;
341 thread_unlock(thread
);
346 thread_cancel_timer(void)
348 thread_t thread
= current_thread();
353 if (thread
->wait_timer_is_set
) {
354 if (timer_call_cancel(&thread
->wait_timer
))
355 thread
->wait_timer_active
--;
356 thread
->wait_timer_is_set
= FALSE
;
358 thread_unlock(thread
);
363 * Set up thread timeout element when thread is created.
369 extern void thread_depress_expire(
370 timer_call_param_t p0
,
371 timer_call_param_t p1
);
373 timer_call_setup(&thread
->wait_timer
, thread_timer_expire
, thread
);
374 thread
->wait_timer_is_set
= FALSE
;
375 thread
->wait_timer_active
= 1;
377 timer_call_setup(&thread
->depress_timer
, thread_depress_expire
, thread
);
378 thread
->depress_timer_active
= 1;
384 thread_timer_terminate(void)
386 thread_t thread
= current_thread();
392 if (thread
->wait_timer_is_set
) {
393 if (timer_call_cancel(&thread
->wait_timer
))
394 thread
->wait_timer_active
--;
395 thread
->wait_timer_is_set
= FALSE
;
398 thread
->wait_timer_active
--;
400 while (thread
->wait_timer_active
> 0) {
401 thread_unlock(thread
);
410 thread
->depress_timer_active
--;
412 while (thread
->depress_timer_active
> 0) {
413 thread_unlock(thread
);
422 thread_unlock(thread
);
425 thread_deallocate(thread
);
429 * Routine: thread_go_locked
431 * Start a thread running.
433 * thread lock held, IPC locks may be held.
434 * thread must have been pulled from wait queue under same lock hold.
436 * KERN_SUCCESS - Thread was set running
437 * KERN_NOT_WAITING - Thread was not waiting
442 wait_result_t wresult
)
444 assert(thread
->at_safe_point
== FALSE
);
445 assert(thread
->wait_event
== NO_EVENT64
);
446 assert(thread
->wait_queue
== WAIT_QUEUE_NULL
);
448 if ((thread
->state
& (TH_WAIT
|TH_TERMINATE
)) == TH_WAIT
) {
449 thread_roust_t roust_hint
;
451 thread
->state
&= ~(TH_WAIT
|TH_UNINT
);
452 _mk_sp_thread_unblock(thread
);
454 roust_hint
= thread
->roust
;
455 thread
->roust
= NULL
;
456 if ( roust_hint
!= NULL
&&
457 (*roust_hint
)(thread
, wresult
) ) {
458 if (thread
->wait_timer_is_set
) {
459 if (timer_call_cancel(&thread
->wait_timer
))
460 thread
->wait_timer_active
--;
461 thread
->wait_timer_is_set
= FALSE
;
464 return (KERN_SUCCESS
);
467 thread
->wait_result
= wresult
;
469 if (!(thread
->state
& TH_RUN
)) {
470 thread
->state
|= TH_RUN
;
472 if (thread
->active_callout
)
473 call_thread_unblock();
475 pset_run_incr(thread
->processor_set
);
476 if (thread
->sched_mode
& TH_MODE_TIMESHARE
)
477 pset_share_incr(thread
->processor_set
);
479 thread_setrun(thread
, SCHED_PREEMPT
| SCHED_TAILQ
);
482 KERNEL_DEBUG_CONSTANT(
483 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_MAKE_RUNNABLE
) | DBG_FUNC_NONE
,
484 (int)thread
, (int)thread
->sched_pri
, 0, 0, 0);
486 return (KERN_SUCCESS
);
489 return (KERN_NOT_WAITING
);
493 * Routine: thread_mark_wait_locked
495 * Mark a thread as waiting. If, given the circumstances,
496 * it doesn't want to wait (i.e. already aborted), then
497 * indicate that in the return value.
499 * at splsched() and thread is locked.
503 thread_mark_wait_locked(
505 wait_interrupt_t interruptible
)
507 boolean_t at_safe_point
;
510 * The thread may have certain types of interrupts/aborts masked
511 * off. Even if the wait location says these types of interrupts
512 * are OK, we have to honor mask settings (outer-scoped code may
513 * not be able to handle aborts at the moment).
515 if (interruptible
> thread
->interrupt_level
)
516 interruptible
= thread
->interrupt_level
;
518 at_safe_point
= (interruptible
== THREAD_ABORTSAFE
);
520 if ( interruptible
== THREAD_UNINT
||
521 !(thread
->state
& TH_ABORT
) ||
523 (thread
->state
& TH_ABORT_SAFELY
)) ) {
524 thread
->state
|= (interruptible
) ? TH_WAIT
: (TH_WAIT
| TH_UNINT
);
525 thread
->at_safe_point
= at_safe_point
;
526 thread
->sleep_stamp
= sched_tick
;
527 return (thread
->wait_result
= THREAD_WAITING
);
530 if (thread
->state
& TH_ABORT_SAFELY
)
531 thread
->state
&= ~(TH_ABORT
|TH_ABORT_SAFELY
);
533 return (thread
->wait_result
= THREAD_INTERRUPTED
);
537 * Routine: thread_interrupt_level
539 * Set the maximum interruptible state for the
540 * current thread. The effective value of any
541 * interruptible flag passed into assert_wait
542 * will never exceed this.
544 * Useful for code that must not be interrupted,
545 * but which calls code that doesn't know that.
547 * The old interrupt level for the thread.
551 thread_interrupt_level(
552 wait_interrupt_t new_level
)
554 thread_t thread
= current_thread();
555 wait_interrupt_t result
= thread
->interrupt_level
;
557 thread
->interrupt_level
= new_level
;
562 * Routine: assert_wait_timeout
564 * Assert that the thread intends to block,
565 * waiting for a timeout (no user known event).
567 unsigned int assert_wait_timeout_event
;
571 mach_msg_timeout_t msecs
,
572 wait_interrupt_t interruptible
)
576 res
= assert_wait((event_t
)&assert_wait_timeout_event
, interruptible
);
577 if (res
== THREAD_WAITING
)
578 thread_set_timer(msecs
, 1000*NSEC_PER_USEC
);
583 * Check to see if an assert wait is possible, without actually doing one.
584 * This is used by debug code in locks and elsewhere to verify that it is
585 * always OK to block when trying to take a blocking lock (since waiting
586 * for the actual assert_wait to catch the case may make it hard to detect
590 assert_wait_possible(void)
594 extern unsigned int debug_mode
;
597 if(debug_mode
) return TRUE
; /* Always succeed in debug mode */
600 thread
= current_thread();
602 return (thread
== NULL
|| wait_queue_assert_possible(thread
));
608 * Assert that the current thread is about to go to
609 * sleep until the specified event occurs.
614 wait_interrupt_t interruptible
)
616 register wait_queue_t wq
;
619 assert(event
!= NO_EVENT
);
621 index
= wait_hash(event
);
622 wq
= &wait_queues
[index
];
623 return wait_queue_assert_wait(wq
, event
, interruptible
);
628 wait_event_wait_queue(
631 assert(event
!= NO_EVENT
);
633 return (&wait_queues
[wait_hash(event
)]);
639 thread_roust_t roust_hint
,
641 wait_interrupt_t interruptible
)
643 thread_t thread
= current_thread();
644 wait_result_t wresult
;
648 assert(event
!= NO_EVENT
);
650 wq
= &wait_queues
[wait_hash(event
)];
656 wresult
= wait_queue_assert_wait64_locked(wq
, (uint32_t)event
,
657 interruptible
, thread
);
658 if (wresult
== THREAD_WAITING
) {
659 if (roust_hint
!= NULL
)
660 thread
->roust
= roust_hint
;
663 timer_call_enter(&thread
->wait_timer
, deadline
);
664 assert(!thread
->wait_timer_is_set
);
665 thread
->wait_timer_active
++;
666 thread
->wait_timer_is_set
= TRUE
;
670 thread_unlock(thread
);
671 wait_queue_unlock(wq
);
678 * thread_sleep_fast_usimple_lock:
680 * Cause the current thread to wait until the specified event
681 * occurs. The specified simple_lock is unlocked before releasing
682 * the cpu and re-acquired as part of waking up.
684 * This is the simple lock sleep interface for components that use a
685 * faster version of simple_lock() than is provided by usimple_lock().
687 __private_extern__ wait_result_t
688 thread_sleep_fast_usimple_lock(
691 wait_interrupt_t interruptible
)
695 res
= assert_wait(event
, interruptible
);
696 if (res
== THREAD_WAITING
) {
698 res
= thread_block(THREAD_CONTINUE_NULL
);
706 * thread_sleep_usimple_lock:
708 * Cause the current thread to wait until the specified event
709 * occurs. The specified usimple_lock is unlocked before releasing
710 * the cpu and re-acquired as part of waking up.
712 * This is the simple lock sleep interface for components where
713 * simple_lock() is defined in terms of usimple_lock().
716 thread_sleep_usimple_lock(
719 wait_interrupt_t interruptible
)
723 res
= assert_wait(event
, interruptible
);
724 if (res
== THREAD_WAITING
) {
725 usimple_unlock(lock
);
726 res
= thread_block(THREAD_CONTINUE_NULL
);
733 * thread_sleep_mutex:
735 * Cause the current thread to wait until the specified event
736 * occurs. The specified mutex is unlocked before releasing
737 * the cpu. The mutex will be re-acquired before returning.
739 * JMM - Add hint to make sure mutex is available before rousting
745 wait_interrupt_t interruptible
)
749 res
= assert_wait(event
, interruptible
);
750 if (res
== THREAD_WAITING
) {
752 res
= thread_block(THREAD_CONTINUE_NULL
);
759 * thread_sleep_mutex_deadline:
761 * Cause the current thread to wait until the specified event
762 * (or deadline) occurs. The specified mutex is unlocked before
763 * releasing the cpu. The mutex will be re-acquired before returning.
765 * JMM - Add hint to make sure mutex is available before rousting
768 thread_sleep_mutex_deadline(
772 wait_interrupt_t interruptible
)
776 res
= assert_wait(event
, interruptible
);
777 if (res
== THREAD_WAITING
) {
779 thread_set_timer_deadline(deadline
);
780 res
= thread_block(THREAD_CONTINUE_NULL
);
781 if (res
!= THREAD_TIMED_OUT
)
782 thread_cancel_timer();
789 * thread_sleep_lock_write:
791 * Cause the current thread to wait until the specified event
792 * occurs. The specified (write) lock is unlocked before releasing
793 * the cpu. The (write) lock will be re-acquired before returning.
795 * JMM - Add hint to make sure mutex is available before rousting
798 thread_sleep_lock_write(
801 wait_interrupt_t interruptible
)
805 res
= assert_wait(event
, interruptible
);
806 if (res
== THREAD_WAITING
) {
807 lock_write_done(lock
);
808 res
= thread_block(THREAD_CONTINUE_NULL
);
816 * thread_sleep_funnel:
818 * Cause the current thread to wait until the specified event
819 * occurs. If the thread is funnelled, the funnel will be released
820 * before giving up the cpu. The funnel will be re-acquired before returning.
822 * JMM - Right now the funnel is dropped and re-acquired inside
823 * thread_block(). At some point, this may give thread_block() a hint.
828 wait_interrupt_t interruptible
)
832 res
= assert_wait(event
, interruptible
);
833 if (res
== THREAD_WAITING
) {
834 res
= thread_block(THREAD_CONTINUE_NULL
);
840 * thread_[un]stop(thread)
841 * Once a thread has blocked interruptibly (via assert_wait) prevent
842 * it from running until thread_unstop.
844 * If someone else has already stopped the thread, wait for the
845 * stop to be cleared, and then stop it again.
847 * Return FALSE if interrupted.
849 * NOTE: thread_hold/thread_suspend should be called on the activation
850 * before calling thread_stop. TH_SUSP is only recognized when
851 * a thread blocks and only prevents clear_wait/thread_wakeup
852 * from restarting an interruptible wait. The wake_active flag is
853 * used to indicate that someone is waiting on the thread.
859 spl_t s
= splsched();
863 while (thread
->state
& TH_SUSP
) {
864 wait_result_t result
;
866 thread
->wake_active
= TRUE
;
867 result
= assert_wait(&thread
->wake_active
, THREAD_ABORTSAFE
);
871 if (result
== THREAD_WAITING
)
872 result
= thread_block(THREAD_CONTINUE_NULL
);
874 if (result
!= THREAD_AWAKENED
)
882 thread
->state
|= TH_SUSP
;
884 while (thread
->state
& TH_RUN
) {
885 wait_result_t result
;
886 processor_t processor
= thread
->last_processor
;
888 if ( processor
!= PROCESSOR_NULL
&&
889 processor
->state
== PROCESSOR_RUNNING
&&
890 processor
->active_thread
== thread
)
891 cause_ast_check(processor
);
892 thread_unlock(thread
);
894 thread
->wake_active
= TRUE
;
895 result
= assert_wait(&thread
->wake_active
, THREAD_ABORTSAFE
);
899 if (result
== THREAD_WAITING
)
900 result
= thread_block(THREAD_CONTINUE_NULL
);
902 if (result
!= THREAD_AWAKENED
) {
903 thread_unstop(thread
);
912 thread_unlock(thread
);
920 * Clear TH_SUSP and if the thread has been stopped and is now runnable,
921 * put it back on the run queue.
927 spl_t s
= splsched();
932 if ((thread
->state
& (TH_RUN
|TH_WAIT
|TH_SUSP
)) == TH_SUSP
) {
933 thread
->state
&= ~TH_SUSP
;
934 thread
->state
|= TH_RUN
;
936 _mk_sp_thread_unblock(thread
);
938 pset_run_incr(thread
->processor_set
);
939 if (thread
->sched_mode
& TH_MODE_TIMESHARE
)
940 pset_share_incr(thread
->processor_set
);
942 thread_setrun(thread
, SCHED_PREEMPT
| SCHED_TAILQ
);
944 KERNEL_DEBUG_CONSTANT(
945 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_MAKE_RUNNABLE
) | DBG_FUNC_NONE
,
946 (int)thread
, (int)thread
->sched_pri
, 0, 0, 0);
949 if (thread
->state
& TH_SUSP
) {
950 thread
->state
&= ~TH_SUSP
;
952 if (thread
->wake_active
) {
953 thread
->wake_active
= FALSE
;
954 thread_unlock(thread
);
958 thread_wakeup(&thread
->wake_active
);
963 thread_unlock(thread
);
969 * Wait for the thread's RUN bit to clear
975 spl_t s
= splsched();
980 while (thread
->state
& TH_RUN
) {
981 wait_result_t result
;
982 processor_t processor
= thread
->last_processor
;
984 if ( processor
!= PROCESSOR_NULL
&&
985 processor
->state
== PROCESSOR_RUNNING
&&
986 processor
->active_thread
== thread
)
987 cause_ast_check(processor
);
988 thread_unlock(thread
);
990 thread
->wake_active
= TRUE
;
991 result
= assert_wait(&thread
->wake_active
, THREAD_ABORTSAFE
);
995 if (result
== THREAD_WAITING
)
996 result
= thread_block(THREAD_CONTINUE_NULL
);
998 if (result
!= THREAD_AWAKENED
)
1003 thread_lock(thread
);
1006 thread_unlock(thread
);
1007 wake_unlock(thread
);
1014 * Routine: clear_wait_internal
1016 * Clear the wait condition for the specified thread.
1017 * Start the thread executing if that is appropriate.
1019 * thread thread to awaken
1020 * result Wakeup result the thread should see
1023 * the thread is locked.
1025 * KERN_SUCCESS thread was rousted out a wait
1026 * KERN_FAILURE thread was waiting but could not be rousted
1027 * KERN_NOT_WAITING thread was not waiting
1029 __private_extern__ kern_return_t
1030 clear_wait_internal(
1032 wait_result_t wresult
)
1034 wait_queue_t wq
= thread
->wait_queue
;
1035 int i
= LockTimeOut
;
1038 if (wresult
== THREAD_INTERRUPTED
&& (thread
->state
& TH_UNINT
))
1039 return (KERN_FAILURE
);
1041 if (wq
!= WAIT_QUEUE_NULL
) {
1042 if (wait_queue_lock_try(wq
)) {
1043 wait_queue_pull_thread_locked(wq
, thread
, TRUE
);
1044 /* wait queue unlocked, thread still locked */
1047 thread_unlock(thread
);
1050 thread_lock(thread
);
1051 if (wq
!= thread
->wait_queue
)
1052 return (KERN_NOT_WAITING
);
1058 return (thread_go_locked(thread
, wresult
));
1061 panic("clear_wait_internal: deadlock: thread=0x%x, wq=0x%x, cpu=%d\n",
1062 thread
, wq
, cpu_number());
1064 return (KERN_FAILURE
);
1071 * Clear the wait condition for the specified thread. Start the thread
1072 * executing if that is appropriate.
1075 * thread thread to awaken
1076 * result Wakeup result the thread should see
1081 wait_result_t result
)
1087 thread_lock(thread
);
1088 ret
= clear_wait_internal(thread
, result
);
1089 thread_unlock(thread
);
1096 * thread_wakeup_prim:
1098 * Common routine for thread_wakeup, thread_wakeup_with_result,
1099 * and thread_wakeup_one.
1105 boolean_t one_thread
,
1106 wait_result_t result
)
1108 register wait_queue_t wq
;
1111 index
= wait_hash(event
);
1112 wq
= &wait_queues
[index
];
1114 return (wait_queue_wakeup_one(wq
, event
, result
));
1116 return (wait_queue_wakeup_all(wq
, event
, result
));
1122 * Force a thread to execute on the specified processor.
1124 * Returns the previous binding. PROCESSOR_NULL means
1127 * XXX - DO NOT export this to users - XXX
1131 register thread_t thread
,
1132 processor_t processor
)
1135 run_queue_t runq
= RUN_QUEUE_NULL
;
1139 thread_lock(thread
);
1140 prev
= thread
->bound_processor
;
1141 if (prev
!= PROCESSOR_NULL
)
1142 runq
= run_queue_remove(thread
);
1144 thread
->bound_processor
= processor
;
1146 if (runq
!= RUN_QUEUE_NULL
)
1147 thread_setrun(thread
, SCHED_PREEMPT
| SCHED_TAILQ
);
1148 thread_unlock(thread
);
1155 uint32_t idle_pset_last
,
1165 uint32_t realtime_self
,
1169 uint32_t missed_realtime
,
1174 * Select a thread for the current processor to run.
1176 * May select the current thread, which must be locked.
1180 register processor_t processor
)
1182 register thread_t thread
;
1183 processor_set_t pset
;
1184 boolean_t other_runnable
;
1187 * Check for other non-idle runnable threads.
1189 pset
= processor
->processor_set
;
1190 thread
= processor
->active_thread
;
1192 /* Update the thread's priority */
1193 if (thread
->sched_stamp
!= sched_tick
)
1194 update_priority(thread
);
1196 processor
->current_pri
= thread
->sched_pri
;
1198 simple_lock(&pset
->sched_lock
);
1200 other_runnable
= processor
->runq
.count
> 0 || pset
->runq
.count
> 0;
1202 if ( thread
->state
== TH_RUN
&&
1203 thread
->processor_set
== pset
&&
1204 (thread
->bound_processor
== PROCESSOR_NULL
||
1205 thread
->bound_processor
== processor
) ) {
1206 if ( thread
->sched_pri
>= BASEPRI_RTQUEUES
&&
1207 first_timeslice(processor
) ) {
1208 if (pset
->runq
.highq
>= BASEPRI_RTQUEUES
) {
1209 register run_queue_t runq
= &pset
->runq
;
1212 q
= runq
->queues
+ runq
->highq
;
1213 if (((thread_t
)q
->next
)->realtime
.deadline
<
1214 processor
->deadline
) {
1215 thread
= (thread_t
)q
->next
;
1216 ((queue_entry_t
)thread
)->next
->prev
= q
;
1217 q
->next
= ((queue_entry_t
)thread
)->next
;
1218 thread
->runq
= RUN_QUEUE_NULL
;
1219 assert(thread
->sched_mode
& TH_MODE_PREEMPT
);
1220 runq
->count
--; runq
->urgency
--;
1221 if (queue_empty(q
)) {
1222 if (runq
->highq
!= IDLEPRI
)
1223 clrbit(MAXPRI
- runq
->highq
, runq
->bitmap
);
1224 runq
->highq
= MAXPRI
- ffsbit(runq
->bitmap
);
1229 processor
->deadline
= thread
->realtime
.deadline
;
1231 simple_unlock(&pset
->sched_lock
);
1236 if ( (!other_runnable
||
1237 (processor
->runq
.highq
< thread
->sched_pri
&&
1238 pset
->runq
.highq
< thread
->sched_pri
)) ) {
1240 /* I am the highest priority runnable (non-idle) thread */
1242 processor
->deadline
= UINT64_MAX
;
1244 simple_unlock(&pset
->sched_lock
);
1251 thread
= choose_thread(pset
, processor
);
1254 * Nothing is runnable, so set this processor idle if it
1255 * was running. Return its idle thread.
1257 if (processor
->state
== PROCESSOR_RUNNING
) {
1258 remqueue(&pset
->active_queue
, (queue_entry_t
)processor
);
1259 processor
->state
= PROCESSOR_IDLE
;
1261 enqueue_tail(&pset
->idle_queue
, (queue_entry_t
)processor
);
1265 processor
->deadline
= UINT64_MAX
;
1267 thread
= processor
->idle_thread
;
1270 simple_unlock(&pset
->sched_lock
);
1276 * Perform a context switch and start executing the new thread.
1278 * If continuation is non-zero, resume the old (current) thread
1279 * next by executing at continuation on a new stack, in lieu
1282 * Returns TRUE if the hand-off succeeds.
1284 * Called at splsched.
1287 #define funnel_release_check(thread, debug) \
1289 if ((thread)->funnel_state & TH_FN_OWNED) { \
1290 (thread)->funnel_state = TH_FN_REFUNNEL; \
1291 KERNEL_DEBUG(0x603242c | DBG_FUNC_NONE, \
1292 (thread)->funnel_lock, (debug), 0, 0, 0); \
1293 funnel_unlock((thread)->funnel_lock); \
1297 #define funnel_refunnel_check(thread, debug) \
1299 if ((thread)->funnel_state & TH_FN_REFUNNEL) { \
1300 kern_return_t result = (thread)->wait_result; \
1302 (thread)->funnel_state = 0; \
1303 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE, \
1304 (thread)->funnel_lock, (debug), 0, 0, 0); \
1305 funnel_lock((thread)->funnel_lock); \
1306 KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE, \
1307 (thread)->funnel_lock, (debug), 0, 0, 0); \
1308 (thread)->funnel_state = TH_FN_OWNED; \
1309 (thread)->wait_result = result; \
1314 __current_thread(void)
1316 return (current_thread());
1321 register thread_t old_thread
,
1322 register thread_t new_thread
,
1324 thread_continue_t old_cont
)
1326 thread_continue_t new_cont
;
1327 processor_t processor
;
1329 if (get_preemption_level() != 0)
1330 panic("thread_invoke: preemption_level %d\n",
1331 get_preemption_level());
1334 * Mark thread interruptible.
1336 thread_lock(new_thread
);
1337 new_thread
->state
&= ~TH_UNINT
;
1339 assert(thread_runnable(new_thread
));
1341 assert(old_thread
->continuation
== NULL
);
1344 * Allow time constraint threads to hang onto
1347 if ( (old_thread
->sched_mode
& TH_MODE_REALTIME
) &&
1348 !old_thread
->reserved_stack
) {
1349 old_thread
->reserved_stack
= old_thread
->kernel_stack
;
1352 if (old_cont
!= NULL
) {
1353 if (new_thread
->state
& TH_STACK_HANDOFF
) {
1355 * If the old thread is using a privileged stack,
1356 * check to see whether we can exchange it with
1357 * that of the new thread.
1359 if ( old_thread
->kernel_stack
== old_thread
->reserved_stack
&&
1360 !new_thread
->reserved_stack
)
1363 new_thread
->state
&= ~TH_STACK_HANDOFF
;
1364 new_cont
= new_thread
->continuation
;
1365 new_thread
->continuation
= NULL
;
1368 * Set up ast context of new thread and switch
1371 processor
= current_processor();
1372 processor
->active_thread
= new_thread
;
1373 processor
->current_pri
= new_thread
->sched_pri
;
1374 new_thread
->last_processor
= processor
;
1375 ast_context(new_thread
->top_act
, processor
->slot_num
);
1376 timer_switch(&new_thread
->system_timer
);
1377 thread_unlock(new_thread
);
1379 current_task()->csw
++;
1381 old_thread
->reason
= reason
;
1382 old_thread
->continuation
= old_cont
;
1384 _mk_sp_thread_done(old_thread
, new_thread
, processor
);
1386 machine_stack_handoff(old_thread
, new_thread
);
1388 _mk_sp_thread_begin(new_thread
, processor
);
1390 wake_lock(old_thread
);
1391 thread_lock(old_thread
);
1394 * Inline thread_dispatch but
1398 switch (old_thread
->state
& (TH_RUN
|TH_WAIT
|TH_UNINT
|TH_IDLE
)) {
1400 case TH_RUN
| TH_UNINT
:
1403 * Still running, put back
1406 old_thread
->state
|= TH_STACK_HANDOFF
;
1407 _mk_sp_thread_dispatch(old_thread
);
1409 thread_unlock(old_thread
);
1410 wake_unlock(old_thread
);
1413 case TH_RUN
| TH_WAIT
| TH_UNINT
:
1414 case TH_RUN
| TH_WAIT
:
1416 boolean_t term
, wake
, callout
;
1421 old_thread
->sleep_stamp
= sched_tick
;
1422 old_thread
->state
|= TH_STACK_HANDOFF
;
1423 old_thread
->state
&= ~TH_RUN
;
1425 term
= (old_thread
->state
& TH_TERMINATE
)? TRUE
: FALSE
;
1426 callout
= old_thread
->active_callout
;
1427 wake
= old_thread
->wake_active
;
1428 old_thread
->wake_active
= FALSE
;
1430 if (old_thread
->sched_mode
& TH_MODE_TIMESHARE
)
1431 pset_share_decr(old_thread
->processor_set
);
1432 pset_run_decr(old_thread
->processor_set
);
1434 thread_unlock(old_thread
);
1435 wake_unlock(old_thread
);
1438 call_thread_block();
1441 thread_wakeup((event_t
)&old_thread
->wake_active
);
1444 thread_reaper_enqueue(old_thread
);
1448 case TH_RUN
| TH_IDLE
:
1450 * The idle threads don't go
1453 old_thread
->state
|= TH_STACK_HANDOFF
;
1454 thread_unlock(old_thread
);
1455 wake_unlock(old_thread
);
1459 panic("thread_invoke: state 0x%x\n", old_thread
->state
);
1462 counter_always(c_thread_invoke_hits
++);
1464 funnel_refunnel_check(new_thread
, 2);
1468 call_continuation(new_cont
);
1473 if (new_thread
->state
& TH_STACK_ALLOC
) {
1475 * Waiting for a stack
1477 counter_always(c_thread_invoke_misses
++);
1478 thread_unlock(new_thread
);
1482 if (new_thread
== old_thread
) {
1483 /* same thread but with continuation */
1484 counter(++c_thread_invoke_same
);
1485 thread_unlock(new_thread
);
1487 funnel_refunnel_check(new_thread
, 3);
1490 call_continuation(old_cont
);
1496 * Check that the new thread has a stack
1498 if (new_thread
->state
& TH_STACK_HANDOFF
) {
1500 if (!stack_alloc_try(new_thread
, thread_continue
)) {
1501 counter_always(c_thread_invoke_misses
++);
1502 thread_swapin(new_thread
);
1506 new_thread
->state
&= ~TH_STACK_HANDOFF
;
1509 if (new_thread
->state
& TH_STACK_ALLOC
) {
1511 * Waiting for a stack
1513 counter_always(c_thread_invoke_misses
++);
1514 thread_unlock(new_thread
);
1518 if (old_thread
== new_thread
) {
1519 counter(++c_thread_invoke_same
);
1520 thread_unlock(new_thread
);
1526 * Set up ast context of new thread and switch to its timer.
1528 processor
= current_processor();
1529 processor
->active_thread
= new_thread
;
1530 processor
->current_pri
= new_thread
->sched_pri
;
1531 new_thread
->last_processor
= processor
;
1532 ast_context(new_thread
->top_act
, processor
->slot_num
);
1533 timer_switch(&new_thread
->system_timer
);
1534 assert(thread_runnable(new_thread
));
1535 thread_unlock(new_thread
);
1537 counter_always(c_thread_invoke_csw
++);
1538 current_task()->csw
++;
1540 assert(old_thread
->runq
== RUN_QUEUE_NULL
);
1541 old_thread
->reason
= reason
;
1542 old_thread
->continuation
= old_cont
;
1544 _mk_sp_thread_done(old_thread
, new_thread
, processor
);
1547 * Here is where we actually change register context,
1548 * and address space if required. Note that control
1549 * will not return here immediately.
1551 old_thread
= machine_switch_context(old_thread
, old_cont
, new_thread
);
1553 /* Now on new thread's stack. Set a local variable to refer to it. */
1554 new_thread
= __current_thread();
1555 assert(old_thread
!= new_thread
);
1557 assert(thread_runnable(new_thread
));
1558 _mk_sp_thread_begin(new_thread
, new_thread
->last_processor
);
1561 * We're back. Now old_thread is the thread that resumed
1562 * us, and we have to dispatch it.
1564 thread_dispatch(old_thread
);
1567 funnel_refunnel_check(new_thread
, 3);
1570 call_continuation(old_cont
);
1580 * Called at splsched when a thread first receives
1581 * a new stack after a continuation.
1585 register thread_t old_thread
)
1587 register thread_t self
= current_thread();
1588 register thread_continue_t continuation
;
1590 continuation
= self
->continuation
;
1591 self
->continuation
= NULL
;
1593 _mk_sp_thread_begin(self
, self
->last_processor
);
1596 * We must dispatch the old thread and then
1597 * call the current thread's continuation.
1598 * There might not be an old thread, if we are
1599 * the first thread to run on this processor.
1601 if (old_thread
!= THREAD_NULL
)
1602 thread_dispatch(old_thread
);
1604 funnel_refunnel_check(self
, 4);
1607 call_continuation(continuation
);
1612 * thread_block_reason:
1614 * Forces a reschedule, blocking the caller if a wait
1615 * has been asserted.
1617 * If a continuation is specified, then thread_invoke will
1618 * attempt to discard the thread's kernel stack. When the
1619 * thread resumes, it will execute the continuation function
1620 * on a new kernel stack.
1622 counter(mach_counter_t c_thread_block_calls
= 0;)
1625 thread_block_reason(
1626 thread_continue_t continuation
,
1629 register thread_t thread
= current_thread();
1630 register processor_t processor
;
1631 register thread_t new_thread
;
1634 counter(++c_thread_block_calls
);
1636 check_simple_locks();
1640 if (!(reason
& AST_PREEMPT
))
1641 funnel_release_check(thread
, 2);
1643 processor
= current_processor();
1645 /* If we're explicitly yielding, force a subsequent quantum */
1646 if (reason
& AST_YIELD
)
1647 processor
->timeslice
= 0;
1649 /* We're handling all scheduling AST's */
1650 ast_off(AST_SCHEDULING
);
1652 thread_lock(thread
);
1653 new_thread
= thread_select(processor
);
1654 assert(new_thread
&& thread_runnable(new_thread
));
1655 thread_unlock(thread
);
1656 while (!thread_invoke(thread
, new_thread
, reason
, continuation
)) {
1657 thread_lock(thread
);
1658 new_thread
= thread_select(processor
);
1659 assert(new_thread
&& thread_runnable(new_thread
));
1660 thread_unlock(thread
);
1663 funnel_refunnel_check(thread
, 5);
1666 return (thread
->wait_result
);
1672 * Block the current thread if a wait has been asserted.
1676 thread_continue_t continuation
)
1678 return thread_block_reason(continuation
, AST_NONE
);
1684 * Switch directly from the current (old) thread to the
1685 * new thread, handing off our quantum if appropriate.
1687 * New thread must be runnable, and not on a run queue.
1689 * Called at splsched.
1693 thread_t old_thread
,
1694 thread_continue_t continuation
,
1695 thread_t new_thread
)
1697 ast_t handoff
= AST_HANDOFF
;
1699 assert(old_thread
== current_thread());
1701 funnel_release_check(old_thread
, 3);
1703 while (!thread_invoke(old_thread
, new_thread
, handoff
, continuation
)) {
1704 register processor_t processor
= current_processor();
1706 thread_lock(old_thread
);
1707 new_thread
= thread_select(processor
);
1708 thread_unlock(old_thread
);
1712 funnel_refunnel_check(old_thread
, 6);
1714 return (old_thread
->wait_result
);
1718 * Dispatches a running thread that is not on a
1721 * Called at splsched.
1725 register thread_t thread
)
1728 thread_lock(thread
);
1731 * If we are discarding the thread's stack, we must do it
1732 * before the thread has a chance to run.
1735 if (thread
->continuation
!= NULL
) {
1736 assert((thread
->state
& TH_STACK_STATE
) == 0);
1737 thread
->state
|= TH_STACK_HANDOFF
;
1742 switch (thread
->state
& (TH_RUN
|TH_WAIT
|TH_UNINT
|TH_IDLE
)) {
1744 case TH_RUN
| TH_UNINT
:
1747 * No reason to stop. Put back on a run queue.
1749 _mk_sp_thread_dispatch(thread
);
1752 case TH_RUN
| TH_WAIT
| TH_UNINT
:
1753 case TH_RUN
| TH_WAIT
:
1755 boolean_t term
, wake
, callout
;
1760 thread
->sleep_stamp
= sched_tick
;
1761 thread
->state
&= ~TH_RUN
;
1763 term
= (thread
->state
& TH_TERMINATE
)? TRUE
: FALSE
;
1764 callout
= thread
->active_callout
;
1765 wake
= thread
->wake_active
;
1766 thread
->wake_active
= FALSE
;
1768 if (thread
->sched_mode
& TH_MODE_TIMESHARE
)
1769 pset_share_decr(thread
->processor_set
);
1770 pset_run_decr(thread
->processor_set
);
1772 thread_unlock(thread
);
1773 wake_unlock(thread
);
1776 call_thread_block();
1779 thread_wakeup((event_t
)&thread
->wake_active
);
1782 thread_reaper_enqueue(thread
);
1787 case TH_RUN
| TH_IDLE
:
1789 * The idle threads don't go
1795 panic("thread_dispatch: state 0x%x\n", thread
->state
);
1798 thread_unlock(thread
);
1799 wake_unlock(thread
);
1803 * Enqueue thread on run queue. Thread must be locked,
1804 * and not already be on a run queue. Returns TRUE
1805 * if a preemption is indicated based on the state
1808 * Run queue must be locked, see run_queue_remove()
1813 register run_queue_t rq
,
1814 register thread_t thread
,
1817 register int whichq
= thread
->sched_pri
;
1818 register queue_t queue
= &rq
->queues
[whichq
];
1819 boolean_t result
= FALSE
;
1821 assert(whichq
>= MINPRI
&& whichq
<= MAXPRI
);
1823 assert(thread
->runq
== RUN_QUEUE_NULL
);
1824 if (queue_empty(queue
)) {
1825 enqueue_tail(queue
, (queue_entry_t
)thread
);
1827 setbit(MAXPRI
- whichq
, rq
->bitmap
);
1828 if (whichq
> rq
->highq
) {
1834 if (options
& SCHED_HEADQ
)
1835 enqueue_head(queue
, (queue_entry_t
)thread
);
1837 enqueue_tail(queue
, (queue_entry_t
)thread
);
1840 if (thread
->sched_mode
& TH_MODE_PREEMPT
)
1848 * Enqueue a thread for realtime execution, similar
1849 * to above. Handles preemption directly.
1852 realtime_schedule_insert(
1853 register processor_set_t pset
,
1854 register thread_t thread
)
1856 register run_queue_t rq
= &pset
->runq
;
1857 register int whichq
= thread
->sched_pri
;
1858 register queue_t queue
= &rq
->queues
[whichq
];
1859 uint64_t deadline
= thread
->realtime
.deadline
;
1860 boolean_t try_preempt
= FALSE
;
1862 assert(whichq
>= BASEPRI_REALTIME
&& whichq
<= MAXPRI
);
1864 assert(thread
->runq
== RUN_QUEUE_NULL
);
1865 if (queue_empty(queue
)) {
1866 enqueue_tail(queue
, (queue_entry_t
)thread
);
1868 setbit(MAXPRI
- whichq
, rq
->bitmap
);
1869 if (whichq
> rq
->highq
)
1874 register thread_t entry
= (thread_t
)queue_first(queue
);
1877 if ( queue_end(queue
, (queue_entry_t
)entry
) ||
1878 deadline
< entry
->realtime
.deadline
) {
1879 entry
= (thread_t
)queue_prev((queue_entry_t
)entry
);
1883 entry
= (thread_t
)queue_next((queue_entry_t
)entry
);
1886 if ((queue_entry_t
)entry
== queue
)
1889 insque((queue_entry_t
)thread
, (queue_entry_t
)entry
);
1893 assert(thread
->sched_mode
& TH_MODE_PREEMPT
);
1894 rq
->count
++; rq
->urgency
++;
1897 register processor_t processor
;
1899 processor
= current_processor();
1900 if ( pset
== processor
->processor_set
&&
1901 (thread
->sched_pri
> processor
->current_pri
||
1902 deadline
< processor
->deadline
) ) {
1903 dispatch_counts
.realtime_self
++;
1904 simple_unlock(&pset
->sched_lock
);
1906 ast_on(AST_PREEMPT
| AST_URGENT
);
1910 if ( pset
->processor_count
> 1 ||
1911 pset
!= processor
->processor_set
) {
1912 processor_t myprocessor
, lastprocessor
;
1915 myprocessor
= processor
;
1916 processor
= thread
->last_processor
;
1917 if ( processor
!= myprocessor
&&
1918 processor
!= PROCESSOR_NULL
&&
1919 processor
->processor_set
== pset
&&
1920 processor
->state
== PROCESSOR_RUNNING
&&
1921 (thread
->sched_pri
> processor
->current_pri
||
1922 deadline
< processor
->deadline
) ) {
1923 dispatch_counts
.realtime_last
++;
1924 cause_ast_check(processor
);
1925 simple_unlock(&pset
->sched_lock
);
1929 lastprocessor
= processor
;
1930 queue
= &pset
->active_queue
;
1931 processor
= (processor_t
)queue_first(queue
);
1932 while (!queue_end(queue
, (queue_entry_t
)processor
)) {
1933 next
= queue_next((queue_entry_t
)processor
);
1935 if ( processor
!= myprocessor
&&
1936 processor
!= lastprocessor
&&
1937 (thread
->sched_pri
> processor
->current_pri
||
1938 deadline
< processor
->deadline
) ) {
1939 if (!queue_end(queue
, next
)) {
1940 remqueue(queue
, (queue_entry_t
)processor
);
1941 enqueue_tail(queue
, (queue_entry_t
)processor
);
1943 dispatch_counts
.realtime_other
++;
1944 cause_ast_check(processor
);
1945 simple_unlock(&pset
->sched_lock
);
1949 processor
= (processor_t
)next
;
1954 simple_unlock(&pset
->sched_lock
);
1960 * Dispatch thread for execution, directly onto an idle
1961 * processor if possible. Else put on appropriate run
1962 * queue. (local if bound, else processor set)
1964 * Thread must be locked.
1968 register thread_t new_thread
,
1971 register processor_t processor
;
1972 register processor_set_t pset
;
1973 register thread_t thread
;
1974 ast_t preempt
= (options
& SCHED_PREEMPT
)?
1975 AST_PREEMPT
: AST_NONE
;
1977 assert(thread_runnable(new_thread
));
1980 * Update priority if needed.
1982 if (new_thread
->sched_stamp
!= sched_tick
)
1983 update_priority(new_thread
);
1986 * Check for urgent preemption.
1988 if (new_thread
->sched_mode
& TH_MODE_PREEMPT
)
1989 preempt
= (AST_PREEMPT
| AST_URGENT
);
1991 assert(new_thread
->runq
== RUN_QUEUE_NULL
);
1993 if ((processor
= new_thread
->bound_processor
) == PROCESSOR_NULL
) {
1995 * First try to dispatch on
1996 * the last processor.
1998 pset
= new_thread
->processor_set
;
1999 processor
= new_thread
->last_processor
;
2000 if ( pset
->processor_count
> 1 &&
2001 processor
!= PROCESSOR_NULL
&&
2002 processor
->state
== PROCESSOR_IDLE
) {
2003 processor_lock(processor
);
2004 simple_lock(&pset
->sched_lock
);
2005 if ( processor
->processor_set
== pset
&&
2006 processor
->state
== PROCESSOR_IDLE
) {
2007 remqueue(&pset
->idle_queue
, (queue_entry_t
)processor
);
2009 processor
->next_thread
= new_thread
;
2010 if (new_thread
->sched_pri
>= BASEPRI_RTQUEUES
)
2011 processor
->deadline
= new_thread
->realtime
.deadline
;
2013 processor
->deadline
= UINT64_MAX
;
2014 processor
->state
= PROCESSOR_DISPATCHING
;
2015 dispatch_counts
.idle_pset_last
++;
2016 simple_unlock(&pset
->sched_lock
);
2017 processor_unlock(processor
);
2018 if (processor
!= current_processor())
2019 machine_signal_idle(processor
);
2022 processor_unlock(processor
);
2025 simple_lock(&pset
->sched_lock
);
2028 * Next pick any idle processor
2029 * in the processor set.
2031 if (pset
->idle_count
> 0) {
2032 processor
= (processor_t
)dequeue_head(&pset
->idle_queue
);
2034 processor
->next_thread
= new_thread
;
2035 if (new_thread
->sched_pri
>= BASEPRI_RTQUEUES
)
2036 processor
->deadline
= new_thread
->realtime
.deadline
;
2038 processor
->deadline
= UINT64_MAX
;
2039 processor
->state
= PROCESSOR_DISPATCHING
;
2040 dispatch_counts
.idle_pset_any
++;
2041 simple_unlock(&pset
->sched_lock
);
2042 if (processor
!= current_processor())
2043 machine_signal_idle(processor
);
2047 if (new_thread
->sched_pri
>= BASEPRI_RTQUEUES
)
2048 realtime_schedule_insert(pset
, new_thread
);
2050 if (!run_queue_enqueue(&pset
->runq
, new_thread
, options
))
2054 * Update the timesharing quanta.
2056 timeshare_quanta_update(pset
);
2061 if (preempt
!= AST_NONE
) {
2063 * First try the current processor
2064 * if it is a member of the correct
2067 processor
= current_processor();
2068 thread
= processor
->active_thread
;
2069 if ( pset
== processor
->processor_set
&&
2070 csw_needed(thread
, processor
) ) {
2071 dispatch_counts
.pset_self
++;
2072 simple_unlock(&pset
->sched_lock
);
2079 * If that failed and we have other
2080 * processors available keep trying.
2082 if ( pset
->processor_count
> 1 ||
2083 pset
!= processor
->processor_set
) {
2084 queue_t queue
= &pset
->active_queue
;
2085 processor_t myprocessor
, lastprocessor
;
2089 * Next try the last processor
2092 myprocessor
= processor
;
2093 processor
= new_thread
->last_processor
;
2094 if ( processor
!= myprocessor
&&
2095 processor
!= PROCESSOR_NULL
&&
2096 processor
->processor_set
== pset
&&
2097 processor
->state
== PROCESSOR_RUNNING
&&
2098 new_thread
->sched_pri
> processor
->current_pri
) {
2099 dispatch_counts
.pset_last
++;
2100 cause_ast_check(processor
);
2101 simple_unlock(&pset
->sched_lock
);
2106 * Lastly, pick any other
2107 * available processor.
2109 lastprocessor
= processor
;
2110 processor
= (processor_t
)queue_first(queue
);
2111 while (!queue_end(queue
, (queue_entry_t
)processor
)) {
2112 next
= queue_next((queue_entry_t
)processor
);
2114 if ( processor
!= myprocessor
&&
2115 processor
!= lastprocessor
&&
2116 new_thread
->sched_pri
>
2117 processor
->current_pri
) {
2118 if (!queue_end(queue
, next
)) {
2119 remqueue(queue
, (queue_entry_t
)processor
);
2120 enqueue_tail(queue
, (queue_entry_t
)processor
);
2122 dispatch_counts
.pset_other
++;
2123 cause_ast_check(processor
);
2124 simple_unlock(&pset
->sched_lock
);
2128 processor
= (processor_t
)next
;
2133 simple_unlock(&pset
->sched_lock
);
2138 * Bound, can only run on bound processor. Have to lock
2139 * processor here because it may not be the current one.
2141 processor_lock(processor
);
2142 pset
= processor
->processor_set
;
2143 if (pset
!= PROCESSOR_SET_NULL
) {
2144 simple_lock(&pset
->sched_lock
);
2145 if (processor
->state
== PROCESSOR_IDLE
) {
2146 remqueue(&pset
->idle_queue
, (queue_entry_t
)processor
);
2148 processor
->next_thread
= new_thread
;
2149 processor
->deadline
= UINT64_MAX
;
2150 processor
->state
= PROCESSOR_DISPATCHING
;
2151 dispatch_counts
.idle_bound
++;
2152 simple_unlock(&pset
->sched_lock
);
2153 processor_unlock(processor
);
2154 if (processor
!= current_processor())
2155 machine_signal_idle(processor
);
2160 if (!run_queue_enqueue(&processor
->runq
, new_thread
, options
))
2163 if (preempt
!= AST_NONE
) {
2164 if (processor
== current_processor()) {
2165 thread
= processor
->active_thread
;
2166 if (csw_needed(thread
, processor
)) {
2167 dispatch_counts
.bound_self
++;
2172 if ( processor
->state
== PROCESSOR_RUNNING
&&
2173 new_thread
->sched_pri
> processor
->current_pri
) {
2174 dispatch_counts
.bound_other
++;
2175 cause_ast_check(processor
);
2179 if (pset
!= PROCESSOR_SET_NULL
)
2180 simple_unlock(&pset
->sched_lock
);
2182 processor_unlock(processor
);
2187 * Check for a possible preemption point in
2188 * the (current) thread.
2190 * Called at splsched.
2195 processor_t processor
)
2197 int current_pri
= thread
->sched_pri
;
2198 ast_t result
= AST_NONE
;
2201 if (first_timeslice(processor
)) {
2202 runq
= &processor
->processor_set
->runq
;
2203 if (runq
->highq
>= BASEPRI_RTQUEUES
)
2204 return (AST_PREEMPT
| AST_URGENT
);
2206 if (runq
->highq
> current_pri
) {
2207 if (runq
->urgency
> 0)
2208 return (AST_PREEMPT
| AST_URGENT
);
2210 result
|= AST_PREEMPT
;
2213 runq
= &processor
->runq
;
2214 if (runq
->highq
> current_pri
) {
2215 if (runq
->urgency
> 0)
2216 return (AST_PREEMPT
| AST_URGENT
);
2218 result
|= AST_PREEMPT
;
2222 runq
= &processor
->processor_set
->runq
;
2223 if (runq
->highq
>= current_pri
) {
2224 if (runq
->urgency
> 0)
2225 return (AST_PREEMPT
| AST_URGENT
);
2227 result
|= AST_PREEMPT
;
2230 runq
= &processor
->runq
;
2231 if (runq
->highq
>= current_pri
) {
2232 if (runq
->urgency
> 0)
2233 return (AST_PREEMPT
| AST_URGENT
);
2235 result
|= AST_PREEMPT
;
2239 if (result
!= AST_NONE
)
2242 if (thread
->state
& TH_SUSP
)
2243 result
|= AST_PREEMPT
;
2251 * Set the scheduled priority of the specified thread.
2253 * This may cause the thread to change queues.
2255 * Thread must be locked.
2262 register struct run_queue
*rq
= run_queue_remove(thread
);
2264 if ( !(thread
->sched_mode
& TH_MODE_TIMESHARE
) &&
2265 (priority
>= BASEPRI_PREEMPT
||
2266 (thread
->task_priority
< MINPRI_KERNEL
&&
2267 thread
->task_priority
>= BASEPRI_BACKGROUND
&&
2268 priority
> thread
->task_priority
) ||
2269 (thread
->sched_mode
& TH_MODE_FORCEDPREEMPT
) ) )
2270 thread
->sched_mode
|= TH_MODE_PREEMPT
;
2272 thread
->sched_mode
&= ~TH_MODE_PREEMPT
;
2274 thread
->sched_pri
= priority
;
2275 if (rq
!= RUN_QUEUE_NULL
)
2276 thread_setrun(thread
, SCHED_PREEMPT
| SCHED_TAILQ
);
2278 if (thread
->state
& TH_RUN
) {
2279 processor_t processor
= thread
->last_processor
;
2281 if (thread
== current_thread()) {
2282 ast_t preempt
= csw_check(thread
, processor
);
2284 if (preempt
!= AST_NONE
)
2286 processor
->current_pri
= priority
;
2289 if ( processor
!= PROCESSOR_NULL
&&
2290 processor
->active_thread
== thread
)
2291 cause_ast_check(processor
);
2298 * Remove a thread from its current run queue and
2299 * return the run queue if successful.
2301 * Thread must be locked.
2307 register run_queue_t rq
= thread
->runq
;
2310 * If rq is RUN_QUEUE_NULL, the thread will stay out of the
2311 * run queues because the caller locked the thread. Otherwise
2312 * the thread is on a run queue, but could be chosen for dispatch
2315 if (rq
!= RUN_QUEUE_NULL
) {
2316 processor_set_t pset
= thread
->processor_set
;
2317 processor_t processor
= thread
->bound_processor
;
2320 * The run queues are locked by the pset scheduling
2321 * lock, except when a processor is off-line the
2322 * local run queue is locked by the processor lock.
2324 if (processor
!= PROCESSOR_NULL
) {
2325 processor_lock(processor
);
2326 pset
= processor
->processor_set
;
2329 if (pset
!= PROCESSOR_SET_NULL
)
2330 simple_lock(&pset
->sched_lock
);
2332 if (rq
== thread
->runq
) {
2334 * Thread is on a run queue and we have a lock on
2337 remqueue(&rq
->queues
[0], (queue_entry_t
)thread
);
2339 if (thread
->sched_mode
& TH_MODE_PREEMPT
)
2341 assert(rq
->urgency
>= 0);
2343 if (queue_empty(rq
->queues
+ thread
->sched_pri
)) {
2344 /* update run queue status */
2345 if (thread
->sched_pri
!= IDLEPRI
)
2346 clrbit(MAXPRI
- thread
->sched_pri
, rq
->bitmap
);
2347 rq
->highq
= MAXPRI
- ffsbit(rq
->bitmap
);
2350 thread
->runq
= RUN_QUEUE_NULL
;
2354 * The thread left the run queue before we could
2355 * lock the run queue.
2357 assert(thread
->runq
== RUN_QUEUE_NULL
);
2358 rq
= RUN_QUEUE_NULL
;
2361 if (pset
!= PROCESSOR_SET_NULL
)
2362 simple_unlock(&pset
->sched_lock
);
2364 if (processor
!= PROCESSOR_NULL
)
2365 processor_unlock(processor
);
2374 * Remove a thread to execute from the run queues
2377 * Called with pset scheduling lock held.
2381 processor_set_t pset
,
2382 processor_t processor
)
2384 register run_queue_t runq
;
2385 register thread_t thread
;
2388 runq
= &processor
->runq
;
2390 if (runq
->count
> 0 && runq
->highq
>= pset
->runq
.highq
) {
2391 q
= runq
->queues
+ runq
->highq
;
2393 thread
= (thread_t
)q
->next
;
2394 ((queue_entry_t
)thread
)->next
->prev
= q
;
2395 q
->next
= ((queue_entry_t
)thread
)->next
;
2396 thread
->runq
= RUN_QUEUE_NULL
;
2398 if (thread
->sched_mode
& TH_MODE_PREEMPT
)
2400 assert(runq
->urgency
>= 0);
2401 if (queue_empty(q
)) {
2402 if (runq
->highq
!= IDLEPRI
)
2403 clrbit(MAXPRI
- runq
->highq
, runq
->bitmap
);
2404 runq
->highq
= MAXPRI
- ffsbit(runq
->bitmap
);
2407 processor
->deadline
= UINT64_MAX
;
2414 assert(runq
->count
> 0);
2415 q
= runq
->queues
+ runq
->highq
;
2417 thread
= (thread_t
)q
->next
;
2418 ((queue_entry_t
)thread
)->next
->prev
= q
;
2419 q
->next
= ((queue_entry_t
)thread
)->next
;
2420 thread
->runq
= RUN_QUEUE_NULL
;
2422 if (runq
->highq
>= BASEPRI_RTQUEUES
)
2423 processor
->deadline
= thread
->realtime
.deadline
;
2425 processor
->deadline
= UINT64_MAX
;
2426 if (thread
->sched_mode
& TH_MODE_PREEMPT
)
2428 assert(runq
->urgency
>= 0);
2429 if (queue_empty(q
)) {
2430 if (runq
->highq
!= IDLEPRI
)
2431 clrbit(MAXPRI
- runq
->highq
, runq
->bitmap
);
2432 runq
->highq
= MAXPRI
- ffsbit(runq
->bitmap
);
2435 timeshare_quanta_update(pset
);
2441 * no_dispatch_count counts number of times processors go non-idle
2442 * without being dispatched. This should be very rare.
2444 int no_dispatch_count
= 0;
2447 * This is the idle thread, which just looks for other threads
2451 idle_thread_continue(void)
2453 register processor_t processor
;
2454 register volatile thread_t
*threadp
;
2455 register volatile int *gcount
;
2456 register volatile int *lcount
;
2457 register thread_t new_thread
;
2459 register processor_set_t pset
;
2462 mycpu
= cpu_number();
2463 processor
= cpu_to_processor(mycpu
);
2464 threadp
= (volatile thread_t
*) &processor
->next_thread
;
2465 lcount
= (volatile int *) &processor
->runq
.count
;
2467 gcount
= (volatile int *)&processor
->processor_set
->runq
.count
;
2470 while ( (*threadp
== (volatile thread_t
)THREAD_NULL
) &&
2471 (*gcount
== 0) && (*lcount
== 0) ) {
2473 /* check for ASTs while we wait */
2474 if (need_ast
[mycpu
] &~ ( AST_SCHEDULING
| AST_BSD
)) {
2475 /* no ASTs for us */
2476 need_ast
[mycpu
] &= AST_NONE
;
2486 * This is not a switch statement to avoid the
2487 * bounds checking code in the common case.
2489 pset
= processor
->processor_set
;
2490 simple_lock(&pset
->sched_lock
);
2492 state
= processor
->state
;
2493 if (state
== PROCESSOR_DISPATCHING
) {
2495 * Commmon case -- cpu dispatched.
2497 new_thread
= *threadp
;
2498 *threadp
= (volatile thread_t
) THREAD_NULL
;
2499 processor
->state
= PROCESSOR_RUNNING
;
2500 enqueue_tail(&pset
->active_queue
, (queue_entry_t
)processor
);
2502 if ( pset
->runq
.highq
>= BASEPRI_RTQUEUES
&&
2503 new_thread
->sched_pri
>= BASEPRI_RTQUEUES
) {
2504 register run_queue_t runq
= &pset
->runq
;
2507 q
= runq
->queues
+ runq
->highq
;
2508 if (((thread_t
)q
->next
)->realtime
.deadline
<
2509 processor
->deadline
) {
2510 thread_t thread
= new_thread
;
2512 new_thread
= (thread_t
)q
->next
;
2513 ((queue_entry_t
)new_thread
)->next
->prev
= q
;
2514 q
->next
= ((queue_entry_t
)new_thread
)->next
;
2515 new_thread
->runq
= RUN_QUEUE_NULL
;
2516 processor
->deadline
= new_thread
->realtime
.deadline
;
2517 assert(new_thread
->sched_mode
& TH_MODE_PREEMPT
);
2518 runq
->count
--; runq
->urgency
--;
2519 if (queue_empty(q
)) {
2520 if (runq
->highq
!= IDLEPRI
)
2521 clrbit(MAXPRI
- runq
->highq
, runq
->bitmap
);
2522 runq
->highq
= MAXPRI
- ffsbit(runq
->bitmap
);
2524 dispatch_counts
.missed_realtime
++;
2525 simple_unlock(&pset
->sched_lock
);
2527 thread_lock(thread
);
2528 thread_setrun(thread
, SCHED_HEADQ
);
2529 thread_unlock(thread
);
2531 counter(c_idle_thread_handoff
++);
2532 thread_run(processor
->idle_thread
,
2533 idle_thread_continue
, new_thread
);
2536 simple_unlock(&pset
->sched_lock
);
2538 counter(c_idle_thread_handoff
++);
2539 thread_run(processor
->idle_thread
,
2540 idle_thread_continue
, new_thread
);
2544 if ( processor
->runq
.highq
> new_thread
->sched_pri
||
2545 pset
->runq
.highq
> new_thread
->sched_pri
) {
2546 thread_t thread
= new_thread
;
2548 new_thread
= choose_thread(pset
, processor
);
2549 dispatch_counts
.missed_other
++;
2550 simple_unlock(&pset
->sched_lock
);
2552 thread_lock(thread
);
2553 thread_setrun(thread
, SCHED_HEADQ
);
2554 thread_unlock(thread
);
2556 counter(c_idle_thread_handoff
++);
2557 thread_run(processor
->idle_thread
,
2558 idle_thread_continue
, new_thread
);
2562 simple_unlock(&pset
->sched_lock
);
2564 counter(c_idle_thread_handoff
++);
2565 thread_run(processor
->idle_thread
,
2566 idle_thread_continue
, new_thread
);
2571 if (state
== PROCESSOR_IDLE
) {
2573 * Processor was not dispatched (Rare).
2574 * Set it running again and force a
2577 no_dispatch_count
++;
2579 remqueue(&pset
->idle_queue
, (queue_entry_t
)processor
);
2580 processor
->state
= PROCESSOR_RUNNING
;
2581 enqueue_tail(&pset
->active_queue
, (queue_entry_t
)processor
);
2582 simple_unlock(&pset
->sched_lock
);
2584 counter(c_idle_thread_block
++);
2585 thread_block(idle_thread_continue
);
2589 if (state
== PROCESSOR_SHUTDOWN
) {
2591 * Going off-line. Force a
2594 if ((new_thread
= (thread_t
)*threadp
) != THREAD_NULL
) {
2595 *threadp
= (volatile thread_t
) THREAD_NULL
;
2596 processor
->deadline
= UINT64_MAX
;
2597 simple_unlock(&pset
->sched_lock
);
2599 thread_lock(new_thread
);
2600 thread_setrun(new_thread
, SCHED_HEADQ
);
2601 thread_unlock(new_thread
);
2604 simple_unlock(&pset
->sched_lock
);
2606 counter(c_idle_thread_block
++);
2607 thread_block(idle_thread_continue
);
2611 simple_unlock(&pset
->sched_lock
);
2613 panic("idle_thread: state %d\n", cpu_state(mycpu
));
2620 counter(c_idle_thread_block
++);
2621 thread_block(idle_thread_continue
);
2625 static uint64_t sched_tick_deadline
;
2627 void sched_tick_thread(void);
2630 sched_tick_init(void)
2632 kernel_thread_with_priority(sched_tick_thread
, MAXPRI_STANDARD
);
2638 * Perform periodic bookkeeping functions about ten
2642 sched_tick_thread_continue(void)
2647 #endif /* SIMPLE_CLOCK */
2649 abstime
= mach_absolute_time();
2651 sched_tick
++; /* age usage one more time */
2654 * Compensate for clock drift. sched_usec is an
2655 * exponential average of the number of microseconds in
2656 * a second. It decays in the same fashion as cpu_usage.
2658 new_usec
= sched_usec_elapsed();
2659 sched_usec
= (5*sched_usec
+ 3*new_usec
)/8;
2660 #endif /* SIMPLE_CLOCK */
2663 * Compute the scheduler load factors.
2665 compute_mach_factor();
2668 * Scan the run queues for timesharing threads which
2669 * may need to have their priorities recalculated.
2673 clock_deadline_for_periodic_event(sched_tick_interval
, abstime
,
2674 &sched_tick_deadline
);
2676 assert_wait((event_t
)sched_tick_thread_continue
, THREAD_INTERRUPTIBLE
);
2677 thread_set_timer_deadline(sched_tick_deadline
);
2678 thread_block(sched_tick_thread_continue
);
2683 sched_tick_thread(void)
2685 sched_tick_deadline
= mach_absolute_time();
2687 thread_block(sched_tick_thread_continue
);
2694 * Scan the run queues for timesharing threads which need
2695 * to be aged, possibily adjusting their priorities upwards.
2697 * Scanner runs in two passes. Pass one squirrels likely
2698 * thread away in an array (takes out references for them).
2699 * Pass two does the priority updates. This is necessary because
2700 * the run queue lock is required for the candidate scan, but
2701 * cannot be held during updates.
2703 * Array length should be enough so that restart isn't necessary,
2704 * but restart logic is included.
2708 #define MAX_STUCK_THREADS 128
2710 static thread_t stuck_threads
[MAX_STUCK_THREADS
];
2711 static int stuck_count
= 0;
2714 * do_runq_scan is the guts of pass 1. It scans a runq for
2715 * stuck threads. A boolean is returned indicating whether
2716 * a retry is needed.
2723 register thread_t thread
;
2725 boolean_t result
= FALSE
;
2727 if ((count
= runq
->count
) > 0) {
2728 q
= runq
->queues
+ runq
->highq
;
2730 queue_iterate(q
, thread
, thread_t
, links
) {
2731 if ( thread
->sched_stamp
!= sched_tick
&&
2732 (thread
->sched_mode
& TH_MODE_TIMESHARE
) ) {
2734 * Stuck, save its id for later.
2736 if (stuck_count
== MAX_STUCK_THREADS
) {
2738 * !@#$% No more room.
2743 if (thread_lock_try(thread
)) {
2744 thread
->ref_count
++;
2745 thread_unlock(thread
);
2746 stuck_threads
[stuck_count
++] = thread
;
2762 boolean_t thread_scan_enabled
= TRUE
;
2765 do_thread_scan(void)
2767 register boolean_t restart_needed
= FALSE
;
2768 register thread_t thread
;
2769 register processor_set_t pset
= &default_pset
;
2770 register processor_t processor
;
2773 if (!thread_scan_enabled
)
2778 simple_lock(&pset
->sched_lock
);
2779 restart_needed
= do_runq_scan(&pset
->runq
);
2780 simple_unlock(&pset
->sched_lock
);
2782 if (!restart_needed
) {
2783 simple_lock(&pset
->sched_lock
);
2784 processor
= (processor_t
)queue_first(&pset
->processors
);
2785 while (!queue_end(&pset
->processors
, (queue_entry_t
)processor
)) {
2786 if (restart_needed
= do_runq_scan(&processor
->runq
))
2789 thread
= processor
->idle_thread
;
2790 if (thread
->sched_stamp
!= sched_tick
) {
2791 if (stuck_count
== MAX_STUCK_THREADS
) {
2792 restart_needed
= TRUE
;
2796 stuck_threads
[stuck_count
++] = thread
;
2799 processor
= (processor_t
)queue_next(&processor
->processors
);
2801 simple_unlock(&pset
->sched_lock
);
2806 * Ok, we now have a collection of candidates -- fix them.
2808 while (stuck_count
> 0) {
2809 boolean_t idle_thread
;
2811 thread
= stuck_threads
[--stuck_count
];
2812 stuck_threads
[stuck_count
] = THREAD_NULL
;
2815 thread_lock(thread
);
2816 idle_thread
= (thread
->state
& TH_IDLE
) != 0;
2817 if ( !(thread
->state
& (TH_WAIT
|TH_SUSP
)) &&
2818 thread
->sched_stamp
!= sched_tick
)
2819 update_priority(thread
);
2820 thread_unlock(thread
);
2824 thread_deallocate(thread
);
2830 } while (restart_needed
);
2834 * Just in case someone doesn't use the macro
2836 #undef thread_wakeup
2845 thread_wakeup_with_result(x
, THREAD_AWAKENED
);
2854 return ((thread
->state
& (TH_RUN
|TH_WAIT
)) == TH_RUN
);
2859 #include <ddb/db_output.h>
2860 #define printf kdbprintf
2861 extern int db_indent
;
2862 void db_sched(void);
2867 iprintf("Scheduling Statistics:\n");
2869 iprintf("Thread invocations: csw %d same %d\n",
2870 c_thread_invoke_csw
, c_thread_invoke_same
);
2872 iprintf("Thread block: calls %d\n",
2873 c_thread_block_calls
);
2874 iprintf("Idle thread:\n\thandoff %d block %d no_dispatch %d\n",
2875 c_idle_thread_handoff
,
2876 c_idle_thread_block
, no_dispatch_count
);
2877 iprintf("Sched thread blocks: %d\n", c_sched_thread_block
);
2878 #endif /* MACH_COUNTERS */
2882 #include <ddb/db_output.h>
2883 void db_show_thread_log(void);
2886 db_show_thread_log(void)
2889 #endif /* MACH_KDB */