2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * @OSF_FREE_COPYRIGHT@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
54 * Author: Avadis Tevanian, Jr.
57 * Scheduling primitives
64 #include <ddb/db_output.h>
66 #include <mach/mach_types.h>
67 #include <mach/machine.h>
68 #include <mach/policy.h>
69 #include <mach/sync_policy.h>
71 #include <machine/machine_routines.h>
72 #include <machine/sched_param.h>
74 #include <kern/kern_types.h>
75 #include <kern/clock.h>
76 #include <kern/counters.h>
77 #include <kern/cpu_number.h>
78 #include <kern/cpu_data.h>
79 #include <kern/debug.h>
80 #include <kern/lock.h>
81 #include <kern/macro_help.h>
82 #include <kern/machine.h>
83 #include <kern/misc_protos.h>
84 #include <kern/processor.h>
85 #include <kern/queue.h>
86 #include <kern/sched.h>
87 #include <kern/sched_prim.h>
88 #include <kern/syscall_subr.h>
89 #include <kern/task.h>
90 #include <kern/thread.h>
91 #include <kern/wait_queue.h>
94 #include <vm/vm_kern.h>
95 #include <vm/vm_map.h>
97 #include <sys/kdebug.h>
103 #define DEFAULT_PREEMPTION_RATE 100 /* (1/s) */
104 int default_preemption_rate
= DEFAULT_PREEMPTION_RATE
;
106 #define MAX_UNSAFE_QUANTA 800
107 int max_unsafe_quanta
= MAX_UNSAFE_QUANTA
;
109 #define MAX_POLL_QUANTA 2
110 int max_poll_quanta
= MAX_POLL_QUANTA
;
112 #define SCHED_POLL_YIELD_SHIFT 4 /* 1/16 */
113 int sched_poll_yield_shift
= SCHED_POLL_YIELD_SHIFT
;
115 uint64_t max_unsafe_computation
;
116 uint32_t sched_safe_duration
;
117 uint64_t max_poll_computation
;
119 uint32_t std_quantum
;
120 uint32_t min_std_quantum
;
122 uint32_t std_quantum_us
;
124 uint32_t max_rt_quantum
;
125 uint32_t min_rt_quantum
;
127 uint32_t sched_cswtime
;
129 static uint32_t delay_idle_limit
, delay_idle_spin
;
130 static processor_t
delay_idle(
131 processor_t processor
,
135 uint32_t sched_tick_interval
;
137 uint32_t sched_pri_shift
;
140 void wait_queues_init(void);
142 static void load_shift_init(void);
144 static thread_t
choose_thread(
145 processor_set_t pset
,
146 processor_t processor
);
148 static void thread_update_scan(void);
152 boolean_t
thread_runnable(
161 * states are combinations of:
163 * W waiting (or on wait queue)
164 * N non-interruptible
169 * assert_wait thread_block clear_wait swapout swapin
171 * R RW, RWN R; setrun - -
172 * RN RWN RN; setrun - -
185 * Waiting protocols and implementation:
187 * Each thread may be waiting for exactly one event; this event
188 * is set using assert_wait(). That thread may be awakened either
189 * by performing a thread_wakeup_prim() on its event,
190 * or by directly waking that thread up with clear_wait().
192 * The implementation of wait events uses a hash table. Each
193 * bucket is queue of threads having the same hash function
194 * value; the chain for the queue (linked list) is the run queue
195 * field. [It is not possible to be waiting and runnable at the
198 * Locks on both the thread and on the hash buckets govern the
199 * wait event field and the queue chain field. Because wakeup
200 * operations only have the event as an argument, the event hash
201 * bucket must be locked before any thread.
203 * Scheduling operations may also occur at interrupt level; therefore,
204 * interrupts below splsched() must be prevented when holding
205 * thread or hash bucket locks.
207 * The wait event hash table declarations are as follows:
212 struct wait_queue wait_queues
[NUMQUEUES
];
214 #define wait_hash(event) \
215 ((((int)(event) < 0)? ~(int)(event): (int)(event)) % NUMQUEUES)
217 int8_t sched_load_shifts
[NRQS
];
223 * Calculate the timeslicing quantum
226 if (default_preemption_rate
< 1)
227 default_preemption_rate
= DEFAULT_PREEMPTION_RATE
;
228 std_quantum_us
= (1000 * 1000) / default_preemption_rate
;
230 printf("standard timeslicing quantum is %d us\n", std_quantum_us
);
232 sched_safe_duration
= (2 * max_unsafe_quanta
/ default_preemption_rate
) *
233 (1 << SCHED_TICK_SHIFT
);
237 pset_init(&default_pset
);
243 sched_timebase_init(void)
248 /* standard timeslicing quantum */
249 clock_interval_to_absolutetime_interval(
250 std_quantum_us
, NSEC_PER_USEC
, &abstime
);
251 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
252 std_quantum
= abstime
;
254 /* smallest remaining quantum (250 us) */
255 clock_interval_to_absolutetime_interval(250, NSEC_PER_USEC
, &abstime
);
256 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
257 min_std_quantum
= abstime
;
259 /* smallest rt computaton (50 us) */
260 clock_interval_to_absolutetime_interval(50, NSEC_PER_USEC
, &abstime
);
261 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
262 min_rt_quantum
= abstime
;
264 /* maximum rt computation (50 ms) */
265 clock_interval_to_absolutetime_interval(
266 50, 1000*NSEC_PER_USEC
, &abstime
);
267 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
268 max_rt_quantum
= abstime
;
270 /* scheduler tick interval */
271 clock_interval_to_absolutetime_interval(USEC_PER_SEC
>> SCHED_TICK_SHIFT
,
272 NSEC_PER_USEC
, &abstime
);
273 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
274 sched_tick_interval
= abstime
;
277 * Compute conversion factor from usage to
278 * timesharing priorities with 5/8 ** n aging.
280 abstime
= (abstime
* 5) / 3;
281 for (shift
= 0; abstime
> BASEPRI_DEFAULT
; ++shift
)
283 sched_pri_shift
= shift
;
285 max_unsafe_computation
= max_unsafe_quanta
* std_quantum
;
286 max_poll_computation
= max_poll_quanta
* std_quantum
;
288 /* delay idle constant(s) (60, 1 us) */
289 clock_interval_to_absolutetime_interval(60, NSEC_PER_USEC
, &abstime
);
290 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
291 delay_idle_limit
= abstime
;
293 clock_interval_to_absolutetime_interval(1, NSEC_PER_USEC
, &abstime
);
294 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
295 delay_idle_spin
= abstime
;
299 wait_queues_init(void)
303 for (i
= 0; i
< NUMQUEUES
; i
++) {
304 wait_queue_init(&wait_queues
[i
], SYNC_POLICY_FIFO
);
309 * Set up values for timeshare
313 load_shift_init(void)
315 int8_t k
, *p
= sched_load_shifts
;
318 *p
++ = INT8_MIN
; *p
++ = 0;
320 for (i
= j
= 2, k
= 1; i
< NRQS
; ++k
) {
321 for (j
<<= 1; i
< j
; ++i
)
327 * Thread wait timer expiration.
334 thread_t thread
= p0
;
339 if (--thread
->wait_timer_active
== 0) {
340 if (thread
->wait_timer_is_set
) {
341 thread
->wait_timer_is_set
= FALSE
;
342 clear_wait_internal(thread
, THREAD_TIMED_OUT
);
345 thread_unlock(thread
);
352 * Set a timer for the current thread, if the thread
353 * is ready to wait. Must be called between assert_wait()
354 * and thread_block().
359 uint32_t scale_factor
)
361 thread_t thread
= current_thread();
367 if ((thread
->state
& TH_WAIT
) != 0) {
368 clock_interval_to_deadline(interval
, scale_factor
, &deadline
);
369 if (!timer_call_enter(&thread
->wait_timer
, deadline
))
370 thread
->wait_timer_active
++;
371 thread
->wait_timer_is_set
= TRUE
;
373 thread_unlock(thread
);
378 thread_set_timer_deadline(
381 thread_t thread
= current_thread();
386 if ((thread
->state
& TH_WAIT
) != 0) {
387 if (!timer_call_enter(&thread
->wait_timer
, deadline
))
388 thread
->wait_timer_active
++;
389 thread
->wait_timer_is_set
= TRUE
;
391 thread_unlock(thread
);
396 thread_cancel_timer(void)
398 thread_t thread
= current_thread();
403 if (thread
->wait_timer_is_set
) {
404 if (timer_call_cancel(&thread
->wait_timer
))
405 thread
->wait_timer_active
--;
406 thread
->wait_timer_is_set
= FALSE
;
408 thread_unlock(thread
);
415 * Unblock thread on wake up.
417 * Returns TRUE if the thread is still running.
419 * Thread must be locked.
424 wait_result_t wresult
)
426 boolean_t result
= FALSE
;
431 thread
->wait_result
= wresult
;
434 * Cancel pending wait timer.
436 if (thread
->wait_timer_is_set
) {
437 if (timer_call_cancel(&thread
->wait_timer
))
438 thread
->wait_timer_active
--;
439 thread
->wait_timer_is_set
= FALSE
;
443 * Update scheduling state.
445 thread
->state
&= ~(TH_WAIT
|TH_UNINT
);
447 if (!(thread
->state
& TH_RUN
)) {
448 thread
->state
|= TH_RUN
;
451 * Mark unblocked if call out.
453 if (thread
->options
& TH_OPT_CALLOUT
)
454 call_thread_unblock();
457 * Update pset run counts.
459 pset_run_incr(thread
->processor_set
);
460 if (thread
->sched_mode
& TH_MODE_TIMESHARE
)
461 pset_share_incr(thread
->processor_set
);
467 * Calculate deadline for real-time threads.
469 if (thread
->sched_mode
& TH_MODE_REALTIME
) {
470 thread
->realtime
.deadline
= mach_absolute_time();
471 thread
->realtime
.deadline
+= thread
->realtime
.constraint
;
475 * Clear old quantum, fail-safe computation, etc.
477 thread
->current_quantum
= 0;
478 thread
->computation_metered
= 0;
479 thread
->reason
= AST_NONE
;
481 KERNEL_DEBUG_CONSTANT(
482 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_MAKE_RUNNABLE
) | DBG_FUNC_NONE
,
483 (int)thread
, (int)thread
->sched_pri
, 0, 0, 0);
491 * Unblock and dispatch thread.
493 * thread lock held, IPC locks may be held.
494 * thread must have been pulled from wait queue under same lock hold.
496 * KERN_SUCCESS - Thread was set running
497 * KERN_NOT_WAITING - Thread was not waiting
502 wait_result_t wresult
)
504 assert(thread
->at_safe_point
== FALSE
);
505 assert(thread
->wait_event
== NO_EVENT64
);
506 assert(thread
->wait_queue
== WAIT_QUEUE_NULL
);
508 if ((thread
->state
& (TH_WAIT
|TH_TERMINATE
)) == TH_WAIT
) {
509 if (!thread_unblock(thread
, wresult
))
510 thread_setrun(thread
, SCHED_PREEMPT
| SCHED_TAILQ
);
512 return (KERN_SUCCESS
);
515 return (KERN_NOT_WAITING
);
519 * Routine: thread_mark_wait_locked
521 * Mark a thread as waiting. If, given the circumstances,
522 * it doesn't want to wait (i.e. already aborted), then
523 * indicate that in the return value.
525 * at splsched() and thread is locked.
529 thread_mark_wait_locked(
531 wait_interrupt_t interruptible
)
533 boolean_t at_safe_point
;
536 * The thread may have certain types of interrupts/aborts masked
537 * off. Even if the wait location says these types of interrupts
538 * are OK, we have to honor mask settings (outer-scoped code may
539 * not be able to handle aborts at the moment).
541 if (interruptible
> (thread
->options
& TH_OPT_INTMASK
))
542 interruptible
= thread
->options
& TH_OPT_INTMASK
;
544 at_safe_point
= (interruptible
== THREAD_ABORTSAFE
);
546 if ( interruptible
== THREAD_UNINT
||
547 !(thread
->state
& TH_ABORT
) ||
549 (thread
->state
& TH_ABORT_SAFELY
)) ) {
550 thread
->state
|= (interruptible
) ? TH_WAIT
: (TH_WAIT
| TH_UNINT
);
551 thread
->at_safe_point
= at_safe_point
;
552 return (thread
->wait_result
= THREAD_WAITING
);
555 if (thread
->state
& TH_ABORT_SAFELY
)
556 thread
->state
&= ~(TH_ABORT
|TH_ABORT_SAFELY
);
558 return (thread
->wait_result
= THREAD_INTERRUPTED
);
562 * Routine: thread_interrupt_level
564 * Set the maximum interruptible state for the
565 * current thread. The effective value of any
566 * interruptible flag passed into assert_wait
567 * will never exceed this.
569 * Useful for code that must not be interrupted,
570 * but which calls code that doesn't know that.
572 * The old interrupt level for the thread.
576 thread_interrupt_level(
577 wait_interrupt_t new_level
)
579 thread_t thread
= current_thread();
580 wait_interrupt_t result
= thread
->options
& TH_OPT_INTMASK
;
582 thread
->options
= (thread
->options
& ~TH_OPT_INTMASK
) | (new_level
& TH_OPT_INTMASK
);
588 * Check to see if an assert wait is possible, without actually doing one.
589 * This is used by debug code in locks and elsewhere to verify that it is
590 * always OK to block when trying to take a blocking lock (since waiting
591 * for the actual assert_wait to catch the case may make it hard to detect
595 assert_wait_possible(void)
601 if(debug_mode
) return TRUE
; /* Always succeed in debug mode */
604 thread
= current_thread();
606 return (thread
== NULL
|| wait_queue_assert_possible(thread
));
612 * Assert that the current thread is about to go to
613 * sleep until the specified event occurs.
618 wait_interrupt_t interruptible
)
620 register wait_queue_t wq
;
623 assert(event
!= NO_EVENT
);
625 index
= wait_hash(event
);
626 wq
= &wait_queues
[index
];
627 return wait_queue_assert_wait(wq
, event
, interruptible
, 0);
633 wait_interrupt_t interruptible
,
635 uint32_t scale_factor
)
637 thread_t thread
= current_thread();
638 wait_result_t wresult
;
643 assert(event
!= NO_EVENT
);
644 wqueue
= &wait_queues
[wait_hash(event
)];
647 wait_queue_lock(wqueue
);
650 clock_interval_to_deadline(interval
, scale_factor
, &deadline
);
651 wresult
= wait_queue_assert_wait64_locked(wqueue
, (uint32_t)event
,
652 interruptible
, deadline
, thread
);
654 thread_unlock(thread
);
655 wait_queue_unlock(wqueue
);
662 assert_wait_deadline(
664 wait_interrupt_t interruptible
,
667 thread_t thread
= current_thread();
668 wait_result_t wresult
;
672 assert(event
!= NO_EVENT
);
673 wqueue
= &wait_queues
[wait_hash(event
)];
676 wait_queue_lock(wqueue
);
679 wresult
= wait_queue_assert_wait64_locked(wqueue
, (uint32_t)event
,
680 interruptible
, deadline
, thread
);
682 thread_unlock(thread
);
683 wait_queue_unlock(wqueue
);
690 * thread_sleep_fast_usimple_lock:
692 * Cause the current thread to wait until the specified event
693 * occurs. The specified simple_lock is unlocked before releasing
694 * the cpu and re-acquired as part of waking up.
696 * This is the simple lock sleep interface for components that use a
697 * faster version of simple_lock() than is provided by usimple_lock().
699 __private_extern__ wait_result_t
700 thread_sleep_fast_usimple_lock(
703 wait_interrupt_t interruptible
)
707 res
= assert_wait(event
, interruptible
);
708 if (res
== THREAD_WAITING
) {
710 res
= thread_block(THREAD_CONTINUE_NULL
);
718 * thread_sleep_usimple_lock:
720 * Cause the current thread to wait until the specified event
721 * occurs. The specified usimple_lock is unlocked before releasing
722 * the cpu and re-acquired as part of waking up.
724 * This is the simple lock sleep interface for components where
725 * simple_lock() is defined in terms of usimple_lock().
728 thread_sleep_usimple_lock(
731 wait_interrupt_t interruptible
)
735 res
= assert_wait(event
, interruptible
);
736 if (res
== THREAD_WAITING
) {
737 usimple_unlock(lock
);
738 res
= thread_block(THREAD_CONTINUE_NULL
);
745 * thread_sleep_mutex:
747 * Cause the current thread to wait until the specified event
748 * occurs. The specified mutex is unlocked before releasing
749 * the cpu. The mutex will be re-acquired before returning.
751 * JMM - Add hint to make sure mutex is available before rousting
757 wait_interrupt_t interruptible
)
761 res
= assert_wait(event
, interruptible
);
762 if (res
== THREAD_WAITING
) {
764 res
= thread_block(THREAD_CONTINUE_NULL
);
771 * thread_sleep_mutex_deadline:
773 * Cause the current thread to wait until the specified event
774 * (or deadline) occurs. The specified mutex is unlocked before
775 * releasing the cpu. The mutex will be re-acquired before returning.
778 thread_sleep_mutex_deadline(
782 wait_interrupt_t interruptible
)
786 res
= assert_wait_deadline(event
, interruptible
, deadline
);
787 if (res
== THREAD_WAITING
) {
789 res
= thread_block(THREAD_CONTINUE_NULL
);
796 * thread_sleep_lock_write:
798 * Cause the current thread to wait until the specified event
799 * occurs. The specified (write) lock is unlocked before releasing
800 * the cpu. The (write) lock will be re-acquired before returning.
803 thread_sleep_lock_write(
806 wait_interrupt_t interruptible
)
810 res
= assert_wait(event
, interruptible
);
811 if (res
== THREAD_WAITING
) {
812 lock_write_done(lock
);
813 res
= thread_block(THREAD_CONTINUE_NULL
);
822 * Force a preemption point for a thread and wait
823 * for it to stop running. Arbitrates access among
824 * multiple stop requests. (released by unstop)
826 * The thread must enter a wait state and stop via a
829 * Returns FALSE if interrupted.
835 wait_result_t wresult
;
841 while (thread
->state
& TH_SUSP
) {
842 thread
->wake_active
= TRUE
;
843 wresult
= assert_wait(&thread
->wake_active
, THREAD_ABORTSAFE
);
847 if (wresult
== THREAD_WAITING
)
848 wresult
= thread_block(THREAD_CONTINUE_NULL
);
850 if (wresult
!= THREAD_AWAKENED
)
858 thread
->state
|= TH_SUSP
;
860 while (thread
->state
& TH_RUN
) {
861 processor_t processor
= thread
->last_processor
;
863 if ( processor
!= PROCESSOR_NULL
&&
864 processor
->state
== PROCESSOR_RUNNING
&&
865 processor
->active_thread
== thread
)
866 cause_ast_check(processor
);
867 thread_unlock(thread
);
869 thread
->wake_active
= TRUE
;
870 wresult
= assert_wait(&thread
->wake_active
, THREAD_ABORTSAFE
);
874 if (wresult
== THREAD_WAITING
)
875 wresult
= thread_block(THREAD_CONTINUE_NULL
);
877 if (wresult
!= THREAD_AWAKENED
) {
878 thread_unstop(thread
);
887 thread_unlock(thread
);
897 * Release a previous stop request and set
898 * the thread running if appropriate.
900 * Use only after a successful stop operation.
906 spl_t s
= splsched();
911 if ((thread
->state
& (TH_RUN
|TH_WAIT
|TH_SUSP
)) == TH_SUSP
) {
912 thread
->state
&= ~TH_SUSP
;
913 thread_unblock(thread
, THREAD_AWAKENED
);
915 thread_setrun(thread
, SCHED_PREEMPT
| SCHED_TAILQ
);
918 if (thread
->state
& TH_SUSP
) {
919 thread
->state
&= ~TH_SUSP
;
921 if (thread
->wake_active
) {
922 thread
->wake_active
= FALSE
;
923 thread_unlock(thread
);
927 thread_wakeup(&thread
->wake_active
);
932 thread_unlock(thread
);
940 * Wait for a thread to stop running. (non-interruptible)
947 wait_result_t wresult
;
948 spl_t s
= splsched();
953 while (thread
->state
& TH_RUN
) {
954 processor_t processor
= thread
->last_processor
;
956 if ( processor
!= PROCESSOR_NULL
&&
957 processor
->state
== PROCESSOR_RUNNING
&&
958 processor
->active_thread
== thread
)
959 cause_ast_check(processor
);
960 thread_unlock(thread
);
962 thread
->wake_active
= TRUE
;
963 wresult
= assert_wait(&thread
->wake_active
, THREAD_UNINT
);
967 if (wresult
== THREAD_WAITING
)
968 thread_block(THREAD_CONTINUE_NULL
);
975 thread_unlock(thread
);
981 * Routine: clear_wait_internal
983 * Clear the wait condition for the specified thread.
984 * Start the thread executing if that is appropriate.
986 * thread thread to awaken
987 * result Wakeup result the thread should see
990 * the thread is locked.
992 * KERN_SUCCESS thread was rousted out a wait
993 * KERN_FAILURE thread was waiting but could not be rousted
994 * KERN_NOT_WAITING thread was not waiting
996 __private_extern__ kern_return_t
999 wait_result_t wresult
)
1001 wait_queue_t wq
= thread
->wait_queue
;
1002 int i
= LockTimeOut
;
1005 if (wresult
== THREAD_INTERRUPTED
&& (thread
->state
& TH_UNINT
))
1006 return (KERN_FAILURE
);
1008 if (wq
!= WAIT_QUEUE_NULL
) {
1009 if (wait_queue_lock_try(wq
)) {
1010 wait_queue_pull_thread_locked(wq
, thread
, TRUE
);
1011 /* wait queue unlocked, thread still locked */
1014 thread_unlock(thread
);
1017 thread_lock(thread
);
1018 if (wq
!= thread
->wait_queue
)
1019 return (KERN_NOT_WAITING
);
1025 return (thread_go(thread
, wresult
));
1028 panic("clear_wait_internal: deadlock: thread=0x%x, wq=0x%x, cpu=%d\n",
1029 thread
, wq
, cpu_number());
1031 return (KERN_FAILURE
);
1038 * Clear the wait condition for the specified thread. Start the thread
1039 * executing if that is appropriate.
1042 * thread thread to awaken
1043 * result Wakeup result the thread should see
1048 wait_result_t result
)
1054 thread_lock(thread
);
1055 ret
= clear_wait_internal(thread
, result
);
1056 thread_unlock(thread
);
1063 * thread_wakeup_prim:
1065 * Common routine for thread_wakeup, thread_wakeup_with_result,
1066 * and thread_wakeup_one.
1072 boolean_t one_thread
,
1073 wait_result_t result
)
1075 register wait_queue_t wq
;
1078 index
= wait_hash(event
);
1079 wq
= &wait_queues
[index
];
1081 return (wait_queue_wakeup_one(wq
, event
, result
));
1083 return (wait_queue_wakeup_all(wq
, event
, result
));
1089 * Force a thread to execute on the specified processor.
1091 * Returns the previous binding. PROCESSOR_NULL means
1094 * XXX - DO NOT export this to users - XXX
1098 register thread_t thread
,
1099 processor_t processor
)
1102 run_queue_t runq
= RUN_QUEUE_NULL
;
1106 thread_lock(thread
);
1107 prev
= thread
->bound_processor
;
1108 if (prev
!= PROCESSOR_NULL
)
1109 runq
= run_queue_remove(thread
);
1111 thread
->bound_processor
= processor
;
1113 if (runq
!= RUN_QUEUE_NULL
)
1114 thread_setrun(thread
, SCHED_PREEMPT
| SCHED_TAILQ
);
1115 thread_unlock(thread
);
1122 uint32_t idle_pset_last
,
1132 uint32_t realtime_self
,
1136 uint32_t missed_realtime
,
1141 * Select a thread for the current processor to run.
1143 * May select the current thread, which must be locked.
1147 register processor_t processor
)
1149 register thread_t thread
;
1150 processor_set_t pset
;
1151 boolean_t other_runnable
;
1154 * Check for other non-idle runnable threads.
1156 pset
= processor
->processor_set
;
1157 thread
= processor
->active_thread
;
1159 /* Update the thread's priority */
1160 if (thread
->sched_stamp
!= sched_tick
)
1161 update_priority(thread
);
1163 processor
->current_pri
= thread
->sched_pri
;
1165 simple_lock(&pset
->sched_lock
);
1167 other_runnable
= processor
->runq
.count
> 0 || pset
->runq
.count
> 0;
1169 if ( thread
->state
== TH_RUN
&&
1170 thread
->processor_set
== pset
&&
1171 (thread
->bound_processor
== PROCESSOR_NULL
||
1172 thread
->bound_processor
== processor
) ) {
1173 if ( thread
->sched_pri
>= BASEPRI_RTQUEUES
&&
1174 first_timeslice(processor
) ) {
1175 if (pset
->runq
.highq
>= BASEPRI_RTQUEUES
) {
1176 register run_queue_t runq
= &pset
->runq
;
1179 q
= runq
->queues
+ runq
->highq
;
1180 if (((thread_t
)q
->next
)->realtime
.deadline
<
1181 processor
->deadline
) {
1182 thread
= (thread_t
)q
->next
;
1183 ((queue_entry_t
)thread
)->next
->prev
= q
;
1184 q
->next
= ((queue_entry_t
)thread
)->next
;
1185 thread
->runq
= RUN_QUEUE_NULL
;
1186 assert(thread
->sched_mode
& TH_MODE_PREEMPT
);
1187 runq
->count
--; runq
->urgency
--;
1188 if (queue_empty(q
)) {
1189 if (runq
->highq
!= IDLEPRI
)
1190 clrbit(MAXPRI
- runq
->highq
, runq
->bitmap
);
1191 runq
->highq
= MAXPRI
- ffsbit(runq
->bitmap
);
1196 processor
->deadline
= thread
->realtime
.deadline
;
1198 simple_unlock(&pset
->sched_lock
);
1203 if ( (!other_runnable
||
1204 (processor
->runq
.highq
< thread
->sched_pri
&&
1205 pset
->runq
.highq
< thread
->sched_pri
)) ) {
1207 /* I am the highest priority runnable (non-idle) thread */
1209 processor
->deadline
= UINT64_MAX
;
1211 simple_unlock(&pset
->sched_lock
);
1218 thread
= choose_thread(pset
, processor
);
1221 * Nothing is runnable, so set this processor idle if it
1222 * was running. Return its idle thread.
1224 if (processor
->state
== PROCESSOR_RUNNING
) {
1225 remqueue(&pset
->active_queue
, (queue_entry_t
)processor
);
1226 processor
->state
= PROCESSOR_IDLE
;
1228 enqueue_tail(&pset
->idle_queue
, (queue_entry_t
)processor
);
1232 processor
->deadline
= UINT64_MAX
;
1234 thread
= processor
->idle_thread
;
1237 simple_unlock(&pset
->sched_lock
);
1243 * Perform a context switch and start executing the new thread.
1245 * Returns FALSE on failure, and the thread is re-dispatched.
1247 * Called at splsched.
1250 #define funnel_release_check(thread, debug) \
1252 if ((thread)->funnel_state & TH_FN_OWNED) { \
1253 (thread)->funnel_state = TH_FN_REFUNNEL; \
1254 KERNEL_DEBUG(0x603242c | DBG_FUNC_NONE, \
1255 (thread)->funnel_lock, (debug), 0, 0, 0); \
1256 funnel_unlock((thread)->funnel_lock); \
1260 #define funnel_refunnel_check(thread, debug) \
1262 if ((thread)->funnel_state & TH_FN_REFUNNEL) { \
1263 kern_return_t result = (thread)->wait_result; \
1265 (thread)->funnel_state = 0; \
1266 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE, \
1267 (thread)->funnel_lock, (debug), 0, 0, 0); \
1268 funnel_lock((thread)->funnel_lock); \
1269 KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE, \
1270 (thread)->funnel_lock, (debug), 0, 0, 0); \
1271 (thread)->funnel_state = TH_FN_OWNED; \
1272 (thread)->wait_result = result; \
1278 register thread_t old_thread
,
1279 register thread_t new_thread
,
1282 thread_continue_t new_cont
, continuation
= old_thread
->continuation
;
1283 void *new_param
, *parameter
= old_thread
->parameter
;
1284 processor_t processor
;
1285 thread_t prev_thread
;
1287 if (get_preemption_level() != 0)
1288 panic("thread_invoke: preemption_level %d\n",
1289 get_preemption_level());
1291 assert(old_thread
== current_thread());
1294 * Mark thread interruptible.
1296 thread_lock(new_thread
);
1297 new_thread
->state
&= ~TH_UNINT
;
1299 assert(thread_runnable(new_thread
));
1302 * Allow time constraint threads to hang onto
1305 if ( (old_thread
->sched_mode
& TH_MODE_REALTIME
) &&
1306 !old_thread
->reserved_stack
) {
1307 old_thread
->reserved_stack
= old_thread
->kernel_stack
;
1310 if (continuation
!= NULL
) {
1311 if (!new_thread
->kernel_stack
) {
1313 * If the old thread is using a privileged stack,
1314 * check to see whether we can exchange it with
1315 * that of the new thread.
1317 if ( old_thread
->kernel_stack
== old_thread
->reserved_stack
&&
1318 !new_thread
->reserved_stack
)
1322 * Context switch by performing a stack handoff.
1324 new_cont
= new_thread
->continuation
;
1325 new_thread
->continuation
= NULL
;
1326 new_param
= new_thread
->parameter
;
1327 new_thread
->parameter
= NULL
;
1329 processor
= current_processor();
1330 processor
->active_thread
= new_thread
;
1331 processor
->current_pri
= new_thread
->sched_pri
;
1332 new_thread
->last_processor
= processor
;
1333 ast_context(new_thread
);
1334 thread_unlock(new_thread
);
1336 current_task()->csw
++;
1338 old_thread
->reason
= reason
;
1340 processor
->last_dispatch
= mach_absolute_time();
1341 timer_event((uint32_t)processor
->last_dispatch
,
1342 &new_thread
->system_timer
);
1344 thread_done(old_thread
, new_thread
, processor
);
1346 machine_stack_handoff(old_thread
, new_thread
);
1348 thread_begin(new_thread
, processor
);
1351 * Now dispatch the old thread.
1353 thread_dispatch(old_thread
);
1355 counter_always(c_thread_invoke_hits
++);
1357 funnel_refunnel_check(new_thread
, 2);
1361 call_continuation(new_cont
, new_param
, new_thread
->wait_result
);
1365 if (new_thread
== old_thread
) {
1366 /* same thread but with continuation */
1367 counter(++c_thread_invoke_same
);
1368 thread_unlock(new_thread
);
1370 funnel_refunnel_check(new_thread
, 3);
1373 call_continuation(continuation
, parameter
, new_thread
->wait_result
);
1379 * Check that the new thread has a stack
1381 if (!new_thread
->kernel_stack
) {
1383 if (!stack_alloc_try(new_thread
)) {
1384 counter_always(c_thread_invoke_misses
++);
1385 thread_unlock(new_thread
);
1386 thread_stack_enqueue(new_thread
);
1391 if (new_thread
== old_thread
) {
1392 counter(++c_thread_invoke_same
);
1393 thread_unlock(new_thread
);
1399 * Context switch by full context save.
1401 processor
= current_processor();
1402 processor
->active_thread
= new_thread
;
1403 processor
->current_pri
= new_thread
->sched_pri
;
1404 new_thread
->last_processor
= processor
;
1405 ast_context(new_thread
);
1406 assert(thread_runnable(new_thread
));
1407 thread_unlock(new_thread
);
1409 counter_always(c_thread_invoke_csw
++);
1410 current_task()->csw
++;
1412 assert(old_thread
->runq
== RUN_QUEUE_NULL
);
1413 old_thread
->reason
= reason
;
1415 processor
->last_dispatch
= mach_absolute_time();
1416 timer_event((uint32_t)processor
->last_dispatch
, &new_thread
->system_timer
);
1418 thread_done(old_thread
, new_thread
, processor
);
1421 * This is where we actually switch register context,
1422 * and address space if required. Control will not
1423 * return here immediately.
1425 prev_thread
= machine_switch_context(old_thread
, continuation
, new_thread
);
1428 * We are still old_thread, possibly on a different processor,
1429 * and new_thread is now stale.
1431 thread_begin(old_thread
, old_thread
->last_processor
);
1434 * Now dispatch the thread which resumed us.
1436 thread_dispatch(prev_thread
);
1439 funnel_refunnel_check(old_thread
, 3);
1442 call_continuation(continuation
, parameter
, old_thread
->wait_result
);
1452 * Perform calculations for thread
1453 * finishing execution on the current processor.
1455 * Called at splsched.
1459 thread_t old_thread
,
1460 thread_t new_thread
,
1461 processor_t processor
)
1463 if (!(old_thread
->state
& TH_IDLE
)) {
1465 * Compute remainder of current quantum.
1467 if ( first_timeslice(processor
) &&
1468 processor
->quantum_end
> processor
->last_dispatch
)
1469 old_thread
->current_quantum
=
1470 (processor
->quantum_end
- processor
->last_dispatch
);
1472 old_thread
->current_quantum
= 0;
1474 if (old_thread
->sched_mode
& TH_MODE_REALTIME
) {
1476 * Cancel the deadline if the thread has
1477 * consumed the entire quantum.
1479 if (old_thread
->current_quantum
== 0) {
1480 old_thread
->realtime
.deadline
= UINT64_MAX
;
1481 old_thread
->reason
|= AST_QUANTUM
;
1486 * For non-realtime threads treat a tiny
1487 * remaining quantum as an expired quantum
1488 * but include what's left next time.
1490 if (old_thread
->current_quantum
< min_std_quantum
) {
1491 old_thread
->reason
|= AST_QUANTUM
;
1492 old_thread
->current_quantum
+= std_quantum
;
1497 * If we are doing a direct handoff then
1498 * give the remainder of our quantum to
1501 if ((old_thread
->reason
& (AST_HANDOFF
|AST_QUANTUM
)) == AST_HANDOFF
) {
1502 new_thread
->current_quantum
= old_thread
->current_quantum
;
1503 old_thread
->reason
|= AST_QUANTUM
;
1504 old_thread
->current_quantum
= 0;
1507 old_thread
->last_switch
= processor
->last_dispatch
;
1509 old_thread
->computation_metered
+=
1510 (old_thread
->last_switch
- old_thread
->computation_epoch
);
1517 * Set up for thread beginning execution on
1518 * the current processor.
1520 * Called at splsched.
1525 processor_t processor
)
1527 if (!(thread
->state
& TH_IDLE
)) {
1529 * Give the thread a new quantum
1530 * if none remaining.
1532 if (thread
->current_quantum
== 0)
1533 thread_quantum_init(thread
);
1536 * Set up quantum timer and timeslice.
1538 processor
->quantum_end
=
1539 (processor
->last_dispatch
+ thread
->current_quantum
);
1540 timer_call_enter1(&processor
->quantum_timer
,
1541 thread
, processor
->quantum_end
);
1543 processor_timeslice_setup(processor
, thread
);
1545 thread
->last_switch
= processor
->last_dispatch
;
1547 thread
->computation_epoch
= thread
->last_switch
;
1550 timer_call_cancel(&processor
->quantum_timer
);
1551 processor
->timeslice
= 1;
1558 * Handle previous thread at context switch. Re-dispatch
1559 * if still running, otherwise update run state and perform
1562 * Called at splsched.
1566 register thread_t thread
)
1569 * If blocked at a continuation, discard
1573 if (thread
->continuation
!= NULL
&& thread
->kernel_stack
)
1577 if (!(thread
->state
& TH_IDLE
)) {
1579 thread_lock(thread
);
1581 if (!(thread
->state
& TH_WAIT
)) {
1585 if (thread
->reason
& AST_QUANTUM
)
1586 thread_setrun(thread
, SCHED_TAILQ
);
1588 if (thread
->reason
& AST_PREEMPT
)
1589 thread_setrun(thread
, SCHED_HEADQ
);
1591 thread_setrun(thread
, SCHED_PREEMPT
| SCHED_TAILQ
);
1593 thread
->reason
= AST_NONE
;
1595 thread_unlock(thread
);
1596 wake_unlock(thread
);
1604 thread
->state
&= ~TH_RUN
;
1606 wake
= thread
->wake_active
;
1607 thread
->wake_active
= FALSE
;
1609 if (thread
->sched_mode
& TH_MODE_TIMESHARE
)
1610 pset_share_decr(thread
->processor_set
);
1611 pset_run_decr(thread
->processor_set
);
1613 thread_unlock(thread
);
1614 wake_unlock(thread
);
1616 if (thread
->options
& TH_OPT_CALLOUT
)
1617 call_thread_block();
1620 thread_wakeup((event_t
)&thread
->wake_active
);
1622 if (thread
->state
& TH_TERMINATE
)
1623 thread_terminate_enqueue(thread
);
1629 * thread_block_reason:
1631 * Forces a reschedule, blocking the caller if a wait
1632 * has been asserted.
1634 * If a continuation is specified, then thread_invoke will
1635 * attempt to discard the thread's kernel stack. When the
1636 * thread resumes, it will execute the continuation function
1637 * on a new kernel stack.
1639 counter(mach_counter_t c_thread_block_calls
= 0;)
1642 thread_block_reason(
1643 thread_continue_t continuation
,
1647 register thread_t self
= current_thread();
1648 register processor_t processor
;
1649 register thread_t new_thread
;
1652 counter(++c_thread_block_calls
);
1656 if (!(reason
& AST_PREEMPT
))
1657 funnel_release_check(self
, 2);
1659 processor
= current_processor();
1662 * Delay switching to the idle thread under certain conditions.
1664 if (s
!= FALSE
&& (self
->state
& (TH_IDLE
|TH_TERMINATE
|TH_WAIT
)) == TH_WAIT
) {
1665 if ( processor
->processor_set
->processor_count
> 1 &&
1666 processor
->processor_set
->runq
.count
== 0 &&
1667 processor
->runq
.count
== 0 )
1668 processor
= delay_idle(processor
, self
);
1671 /* If we're explicitly yielding, force a subsequent quantum */
1672 if (reason
& AST_YIELD
)
1673 processor
->timeslice
= 0;
1675 /* We're handling all scheduling AST's */
1676 ast_off(AST_SCHEDULING
);
1678 self
->continuation
= continuation
;
1679 self
->parameter
= parameter
;
1682 new_thread
= thread_select(processor
);
1683 assert(new_thread
&& thread_runnable(new_thread
));
1684 thread_unlock(self
);
1685 while (!thread_invoke(self
, new_thread
, reason
)) {
1687 new_thread
= thread_select(processor
);
1688 assert(new_thread
&& thread_runnable(new_thread
));
1689 thread_unlock(self
);
1692 funnel_refunnel_check(self
, 5);
1695 return (self
->wait_result
);
1701 * Block the current thread if a wait has been asserted.
1705 thread_continue_t continuation
)
1707 return thread_block_reason(continuation
, NULL
, AST_NONE
);
1711 thread_block_parameter(
1712 thread_continue_t continuation
,
1715 return thread_block_reason(continuation
, parameter
, AST_NONE
);
1721 * Switch directly from the current thread to the
1722 * new thread, handing off our quantum if appropriate.
1724 * New thread must be runnable, and not on a run queue.
1726 * Called at splsched.
1731 thread_continue_t continuation
,
1733 thread_t new_thread
)
1735 ast_t handoff
= AST_HANDOFF
;
1737 funnel_release_check(self
, 3);
1739 self
->continuation
= continuation
;
1740 self
->parameter
= parameter
;
1742 while (!thread_invoke(self
, new_thread
, handoff
)) {
1743 register processor_t processor
= current_processor();
1746 new_thread
= thread_select(processor
);
1747 thread_unlock(self
);
1751 funnel_refunnel_check(self
, 6);
1753 return (self
->wait_result
);
1759 * Called at splsched when a thread first receives
1760 * a new stack after a continuation.
1764 register thread_t old_thread
)
1766 register thread_t self
= current_thread();
1767 register thread_continue_t continuation
;
1768 register void *parameter
;
1770 continuation
= self
->continuation
;
1771 self
->continuation
= NULL
;
1772 parameter
= self
->parameter
;
1773 self
->parameter
= NULL
;
1775 thread_begin(self
, self
->last_processor
);
1777 if (old_thread
!= THREAD_NULL
)
1778 thread_dispatch(old_thread
);
1780 funnel_refunnel_check(self
, 4);
1782 if (old_thread
!= THREAD_NULL
)
1785 call_continuation(continuation
, parameter
, self
->wait_result
);
1790 * Enqueue thread on run queue. Thread must be locked,
1791 * and not already be on a run queue. Returns TRUE
1792 * if a preemption is indicated based on the state
1795 * Run queue must be locked, see run_queue_remove()
1800 register run_queue_t rq
,
1801 register thread_t thread
,
1804 register int whichq
= thread
->sched_pri
;
1805 register queue_t queue
= &rq
->queues
[whichq
];
1806 boolean_t result
= FALSE
;
1808 assert(whichq
>= MINPRI
&& whichq
<= MAXPRI
);
1810 assert(thread
->runq
== RUN_QUEUE_NULL
);
1811 if (queue_empty(queue
)) {
1812 enqueue_tail(queue
, (queue_entry_t
)thread
);
1814 setbit(MAXPRI
- whichq
, rq
->bitmap
);
1815 if (whichq
> rq
->highq
) {
1821 if (options
& SCHED_HEADQ
)
1822 enqueue_head(queue
, (queue_entry_t
)thread
);
1824 enqueue_tail(queue
, (queue_entry_t
)thread
);
1827 if (thread
->sched_mode
& TH_MODE_PREEMPT
)
1835 * Enqueue a thread for realtime execution, similar
1836 * to above. Handles preemption directly.
1839 realtime_schedule_insert(
1840 register processor_set_t pset
,
1841 register thread_t thread
)
1843 register run_queue_t rq
= &pset
->runq
;
1844 register int whichq
= thread
->sched_pri
;
1845 register queue_t queue
= &rq
->queues
[whichq
];
1846 uint64_t deadline
= thread
->realtime
.deadline
;
1847 boolean_t try_preempt
= FALSE
;
1849 assert(whichq
>= BASEPRI_REALTIME
&& whichq
<= MAXPRI
);
1851 assert(thread
->runq
== RUN_QUEUE_NULL
);
1852 if (queue_empty(queue
)) {
1853 enqueue_tail(queue
, (queue_entry_t
)thread
);
1855 setbit(MAXPRI
- whichq
, rq
->bitmap
);
1856 if (whichq
> rq
->highq
)
1861 register thread_t entry
= (thread_t
)queue_first(queue
);
1864 if ( queue_end(queue
, (queue_entry_t
)entry
) ||
1865 deadline
< entry
->realtime
.deadline
) {
1866 entry
= (thread_t
)queue_prev((queue_entry_t
)entry
);
1870 entry
= (thread_t
)queue_next((queue_entry_t
)entry
);
1873 if ((queue_entry_t
)entry
== queue
)
1876 insque((queue_entry_t
)thread
, (queue_entry_t
)entry
);
1880 assert(thread
->sched_mode
& TH_MODE_PREEMPT
);
1881 rq
->count
++; rq
->urgency
++;
1884 register processor_t processor
;
1886 processor
= current_processor();
1887 if ( pset
== processor
->processor_set
&&
1888 (thread
->sched_pri
> processor
->current_pri
||
1889 deadline
< processor
->deadline
) ) {
1890 dispatch_counts
.realtime_self
++;
1891 simple_unlock(&pset
->sched_lock
);
1893 ast_on(AST_PREEMPT
| AST_URGENT
);
1897 if ( pset
->processor_count
> 1 ||
1898 pset
!= processor
->processor_set
) {
1899 processor_t myprocessor
, lastprocessor
;
1902 myprocessor
= processor
;
1903 processor
= thread
->last_processor
;
1904 if ( processor
!= myprocessor
&&
1905 processor
!= PROCESSOR_NULL
&&
1906 processor
->processor_set
== pset
&&
1907 processor
->state
== PROCESSOR_RUNNING
&&
1908 (thread
->sched_pri
> processor
->current_pri
||
1909 deadline
< processor
->deadline
) ) {
1910 dispatch_counts
.realtime_last
++;
1911 cause_ast_check(processor
);
1912 simple_unlock(&pset
->sched_lock
);
1916 lastprocessor
= processor
;
1917 queue
= &pset
->active_queue
;
1918 processor
= (processor_t
)queue_first(queue
);
1919 while (!queue_end(queue
, (queue_entry_t
)processor
)) {
1920 next
= queue_next((queue_entry_t
)processor
);
1922 if ( processor
!= myprocessor
&&
1923 processor
!= lastprocessor
&&
1924 (thread
->sched_pri
> processor
->current_pri
||
1925 deadline
< processor
->deadline
) ) {
1926 if (!queue_end(queue
, next
)) {
1927 remqueue(queue
, (queue_entry_t
)processor
);
1928 enqueue_tail(queue
, (queue_entry_t
)processor
);
1930 dispatch_counts
.realtime_other
++;
1931 cause_ast_check(processor
);
1932 simple_unlock(&pset
->sched_lock
);
1936 processor
= (processor_t
)next
;
1941 simple_unlock(&pset
->sched_lock
);
1947 * Dispatch thread for execution, directly onto an idle
1948 * processor if possible. Else put on appropriate run
1949 * queue. (local if bound, else processor set)
1951 * Thread must be locked.
1955 register thread_t new_thread
,
1958 register processor_t processor
;
1959 register processor_set_t pset
;
1960 register thread_t thread
;
1961 ast_t preempt
= (options
& SCHED_PREEMPT
)?
1962 AST_PREEMPT
: AST_NONE
;
1964 assert(thread_runnable(new_thread
));
1967 * Update priority if needed.
1969 if (new_thread
->sched_stamp
!= sched_tick
)
1970 update_priority(new_thread
);
1973 * Check for urgent preemption.
1975 if (new_thread
->sched_mode
& TH_MODE_PREEMPT
)
1976 preempt
= (AST_PREEMPT
| AST_URGENT
);
1978 assert(new_thread
->runq
== RUN_QUEUE_NULL
);
1980 if ((processor
= new_thread
->bound_processor
) == PROCESSOR_NULL
) {
1982 * First try to dispatch on
1983 * the last processor.
1985 pset
= new_thread
->processor_set
;
1986 processor
= new_thread
->last_processor
;
1987 if ( pset
->processor_count
> 1 &&
1988 processor
!= PROCESSOR_NULL
&&
1989 processor
->state
== PROCESSOR_IDLE
) {
1990 processor_lock(processor
);
1991 simple_lock(&pset
->sched_lock
);
1992 if ( processor
->processor_set
== pset
&&
1993 processor
->state
== PROCESSOR_IDLE
) {
1994 remqueue(&pset
->idle_queue
, (queue_entry_t
)processor
);
1996 processor
->next_thread
= new_thread
;
1997 if (new_thread
->sched_pri
>= BASEPRI_RTQUEUES
)
1998 processor
->deadline
= new_thread
->realtime
.deadline
;
2000 processor
->deadline
= UINT64_MAX
;
2001 processor
->state
= PROCESSOR_DISPATCHING
;
2002 dispatch_counts
.idle_pset_last
++;
2003 simple_unlock(&pset
->sched_lock
);
2004 processor_unlock(processor
);
2005 if (processor
!= current_processor())
2006 machine_signal_idle(processor
);
2009 processor_unlock(processor
);
2012 simple_lock(&pset
->sched_lock
);
2015 * Next pick any idle processor
2016 * in the processor set.
2018 if (pset
->idle_count
> 0) {
2019 processor
= (processor_t
)dequeue_head(&pset
->idle_queue
);
2021 processor
->next_thread
= new_thread
;
2022 if (new_thread
->sched_pri
>= BASEPRI_RTQUEUES
)
2023 processor
->deadline
= new_thread
->realtime
.deadline
;
2025 processor
->deadline
= UINT64_MAX
;
2026 processor
->state
= PROCESSOR_DISPATCHING
;
2027 dispatch_counts
.idle_pset_any
++;
2028 simple_unlock(&pset
->sched_lock
);
2029 if (processor
!= current_processor())
2030 machine_signal_idle(processor
);
2034 if (new_thread
->sched_pri
>= BASEPRI_RTQUEUES
)
2035 realtime_schedule_insert(pset
, new_thread
);
2037 if (!run_queue_enqueue(&pset
->runq
, new_thread
, options
))
2041 * Update the timesharing quanta.
2043 timeshare_quanta_update(pset
);
2048 if (preempt
!= AST_NONE
) {
2050 * First try the current processor
2051 * if it is a member of the correct
2054 processor
= current_processor();
2055 thread
= processor
->active_thread
;
2056 if ( pset
== processor
->processor_set
&&
2057 csw_needed(thread
, processor
) ) {
2058 dispatch_counts
.pset_self
++;
2059 simple_unlock(&pset
->sched_lock
);
2066 * If that failed and we have other
2067 * processors available keep trying.
2069 if ( pset
->processor_count
> 1 ||
2070 pset
!= processor
->processor_set
) {
2071 queue_t queue
= &pset
->active_queue
;
2072 processor_t myprocessor
, lastprocessor
;
2076 * Next try the last processor
2079 myprocessor
= processor
;
2080 processor
= new_thread
->last_processor
;
2081 if ( processor
!= myprocessor
&&
2082 processor
!= PROCESSOR_NULL
&&
2083 processor
->processor_set
== pset
&&
2084 processor
->state
== PROCESSOR_RUNNING
&&
2085 new_thread
->sched_pri
> processor
->current_pri
) {
2086 dispatch_counts
.pset_last
++;
2087 cause_ast_check(processor
);
2088 simple_unlock(&pset
->sched_lock
);
2093 * Lastly, pick any other
2094 * available processor.
2096 lastprocessor
= processor
;
2097 processor
= (processor_t
)queue_first(queue
);
2098 while (!queue_end(queue
, (queue_entry_t
)processor
)) {
2099 next
= queue_next((queue_entry_t
)processor
);
2101 if ( processor
!= myprocessor
&&
2102 processor
!= lastprocessor
&&
2103 new_thread
->sched_pri
>
2104 processor
->current_pri
) {
2105 if (!queue_end(queue
, next
)) {
2106 remqueue(queue
, (queue_entry_t
)processor
);
2107 enqueue_tail(queue
, (queue_entry_t
)processor
);
2109 dispatch_counts
.pset_other
++;
2110 cause_ast_check(processor
);
2111 simple_unlock(&pset
->sched_lock
);
2115 processor
= (processor_t
)next
;
2120 simple_unlock(&pset
->sched_lock
);
2125 * Bound, can only run on bound processor. Have to lock
2126 * processor here because it may not be the current one.
2128 processor_lock(processor
);
2129 pset
= processor
->processor_set
;
2130 if (pset
!= PROCESSOR_SET_NULL
) {
2131 simple_lock(&pset
->sched_lock
);
2132 if (processor
->state
== PROCESSOR_IDLE
) {
2133 remqueue(&pset
->idle_queue
, (queue_entry_t
)processor
);
2135 processor
->next_thread
= new_thread
;
2136 processor
->deadline
= UINT64_MAX
;
2137 processor
->state
= PROCESSOR_DISPATCHING
;
2138 dispatch_counts
.idle_bound
++;
2139 simple_unlock(&pset
->sched_lock
);
2140 processor_unlock(processor
);
2141 if (processor
!= current_processor())
2142 machine_signal_idle(processor
);
2147 if (!run_queue_enqueue(&processor
->runq
, new_thread
, options
))
2150 if (preempt
!= AST_NONE
) {
2151 if (processor
== current_processor()) {
2152 thread
= processor
->active_thread
;
2153 if (csw_needed(thread
, processor
)) {
2154 dispatch_counts
.bound_self
++;
2159 if ( processor
->state
== PROCESSOR_RUNNING
&&
2160 new_thread
->sched_pri
> processor
->current_pri
) {
2161 dispatch_counts
.bound_other
++;
2162 cause_ast_check(processor
);
2166 if (pset
!= PROCESSOR_SET_NULL
)
2167 simple_unlock(&pset
->sched_lock
);
2169 processor_unlock(processor
);
2174 * Check for a possible preemption point in
2175 * the (current) thread.
2177 * Called at splsched.
2182 processor_t processor
)
2184 int current_pri
= thread
->sched_pri
;
2185 ast_t result
= AST_NONE
;
2188 if (first_timeslice(processor
)) {
2189 runq
= &processor
->processor_set
->runq
;
2190 if (runq
->highq
>= BASEPRI_RTQUEUES
)
2191 return (AST_PREEMPT
| AST_URGENT
);
2193 if (runq
->highq
> current_pri
) {
2194 if (runq
->urgency
> 0)
2195 return (AST_PREEMPT
| AST_URGENT
);
2197 result
|= AST_PREEMPT
;
2200 runq
= &processor
->runq
;
2201 if (runq
->highq
> current_pri
) {
2202 if (runq
->urgency
> 0)
2203 return (AST_PREEMPT
| AST_URGENT
);
2205 result
|= AST_PREEMPT
;
2209 runq
= &processor
->processor_set
->runq
;
2210 if (runq
->highq
>= current_pri
) {
2211 if (runq
->urgency
> 0)
2212 return (AST_PREEMPT
| AST_URGENT
);
2214 result
|= AST_PREEMPT
;
2217 runq
= &processor
->runq
;
2218 if (runq
->highq
>= current_pri
) {
2219 if (runq
->urgency
> 0)
2220 return (AST_PREEMPT
| AST_URGENT
);
2222 result
|= AST_PREEMPT
;
2226 if (result
!= AST_NONE
)
2229 if (thread
->state
& TH_SUSP
)
2230 result
|= AST_PREEMPT
;
2238 * Set the scheduled priority of the specified thread.
2240 * This may cause the thread to change queues.
2242 * Thread must be locked.
2249 register struct run_queue
*rq
= run_queue_remove(thread
);
2251 if ( !(thread
->sched_mode
& TH_MODE_TIMESHARE
) &&
2252 (priority
>= BASEPRI_PREEMPT
||
2253 (thread
->task_priority
< MINPRI_KERNEL
&&
2254 thread
->task_priority
>= BASEPRI_BACKGROUND
&&
2255 priority
> thread
->task_priority
) ) )
2256 thread
->sched_mode
|= TH_MODE_PREEMPT
;
2258 thread
->sched_mode
&= ~TH_MODE_PREEMPT
;
2260 thread
->sched_pri
= priority
;
2261 if (rq
!= RUN_QUEUE_NULL
)
2262 thread_setrun(thread
, SCHED_PREEMPT
| SCHED_TAILQ
);
2264 if (thread
->state
& TH_RUN
) {
2265 processor_t processor
= thread
->last_processor
;
2267 if (thread
== current_thread()) {
2268 ast_t preempt
= csw_check(thread
, processor
);
2270 if (preempt
!= AST_NONE
)
2272 processor
->current_pri
= priority
;
2275 if ( processor
!= PROCESSOR_NULL
&&
2276 processor
->active_thread
== thread
)
2277 cause_ast_check(processor
);
2291 if (rq
!= thread
->runq
)
2292 panic("run_queue_check: thread runq");
2294 if (thread
->sched_pri
> MAXPRI
|| thread
->sched_pri
< MINPRI
)
2295 panic("run_queue_check: thread sched_pri");
2297 q
= &rq
->queues
[thread
->sched_pri
];
2298 qe
= queue_first(q
);
2299 while (!queue_end(q
, qe
)) {
2300 if (qe
== (queue_entry_t
)thread
)
2303 qe
= queue_next(qe
);
2306 panic("run_queue_check: end");
2314 * Remove a thread from its current run queue and
2315 * return the run queue if successful.
2317 * Thread must be locked.
2323 register run_queue_t rq
= thread
->runq
;
2326 * If rq is RUN_QUEUE_NULL, the thread will stay out of the
2327 * run queues because the caller locked the thread. Otherwise
2328 * the thread is on a run queue, but could be chosen for dispatch
2331 if (rq
!= RUN_QUEUE_NULL
) {
2332 processor_set_t pset
= thread
->processor_set
;
2333 processor_t processor
= thread
->bound_processor
;
2336 * The run queues are locked by the pset scheduling
2337 * lock, except when a processor is off-line the
2338 * local run queue is locked by the processor lock.
2340 if (processor
!= PROCESSOR_NULL
) {
2341 processor_lock(processor
);
2342 pset
= processor
->processor_set
;
2345 if (pset
!= PROCESSOR_SET_NULL
)
2346 simple_lock(&pset
->sched_lock
);
2348 if (rq
== thread
->runq
) {
2350 * Thread is on a run queue and we have a lock on
2353 remqueue(&rq
->queues
[0], (queue_entry_t
)thread
);
2355 if (thread
->sched_mode
& TH_MODE_PREEMPT
)
2357 assert(rq
->urgency
>= 0);
2359 if (queue_empty(rq
->queues
+ thread
->sched_pri
)) {
2360 /* update run queue status */
2361 if (thread
->sched_pri
!= IDLEPRI
)
2362 clrbit(MAXPRI
- thread
->sched_pri
, rq
->bitmap
);
2363 rq
->highq
= MAXPRI
- ffsbit(rq
->bitmap
);
2366 thread
->runq
= RUN_QUEUE_NULL
;
2370 * The thread left the run queue before we could
2371 * lock the run queue.
2373 assert(thread
->runq
== RUN_QUEUE_NULL
);
2374 rq
= RUN_QUEUE_NULL
;
2377 if (pset
!= PROCESSOR_SET_NULL
)
2378 simple_unlock(&pset
->sched_lock
);
2380 if (processor
!= PROCESSOR_NULL
)
2381 processor_unlock(processor
);
2390 * Remove a thread to execute from the run queues
2393 * Called with pset scheduling lock held.
2397 processor_set_t pset
,
2398 processor_t processor
)
2400 register run_queue_t runq
;
2401 register thread_t thread
;
2404 runq
= &processor
->runq
;
2406 if (runq
->count
> 0 && runq
->highq
>= pset
->runq
.highq
) {
2407 q
= runq
->queues
+ runq
->highq
;
2409 thread
= (thread_t
)q
->next
;
2410 ((queue_entry_t
)thread
)->next
->prev
= q
;
2411 q
->next
= ((queue_entry_t
)thread
)->next
;
2412 thread
->runq
= RUN_QUEUE_NULL
;
2414 if (thread
->sched_mode
& TH_MODE_PREEMPT
)
2416 assert(runq
->urgency
>= 0);
2417 if (queue_empty(q
)) {
2418 if (runq
->highq
!= IDLEPRI
)
2419 clrbit(MAXPRI
- runq
->highq
, runq
->bitmap
);
2420 runq
->highq
= MAXPRI
- ffsbit(runq
->bitmap
);
2423 processor
->deadline
= UINT64_MAX
;
2430 assert(runq
->count
> 0);
2431 q
= runq
->queues
+ runq
->highq
;
2433 thread
= (thread_t
)q
->next
;
2434 ((queue_entry_t
)thread
)->next
->prev
= q
;
2435 q
->next
= ((queue_entry_t
)thread
)->next
;
2436 thread
->runq
= RUN_QUEUE_NULL
;
2438 if (runq
->highq
>= BASEPRI_RTQUEUES
)
2439 processor
->deadline
= thread
->realtime
.deadline
;
2441 processor
->deadline
= UINT64_MAX
;
2442 if (thread
->sched_mode
& TH_MODE_PREEMPT
)
2444 assert(runq
->urgency
>= 0);
2445 if (queue_empty(q
)) {
2446 if (runq
->highq
!= IDLEPRI
)
2447 clrbit(MAXPRI
- runq
->highq
, runq
->bitmap
);
2448 runq
->highq
= MAXPRI
- ffsbit(runq
->bitmap
);
2451 timeshare_quanta_update(pset
);
2458 processor_t processor
,
2461 int *gcount
, *lcount
;
2462 uint64_t abstime
, spin
, limit
;
2464 lcount
= &processor
->runq
.count
;
2465 gcount
= &processor
->processor_set
->runq
.count
;
2467 abstime
= mach_absolute_time();
2468 limit
= abstime
+ delay_idle_limit
;
2469 spin
= abstime
+ delay_idle_spin
;
2471 timer_event((uint32_t)abstime
, &processor
->idle_thread
->system_timer
);
2473 self
->options
|= TH_OPT_DELAYIDLE
;
2475 while ( *gcount
== 0 && *lcount
== 0 &&
2476 (self
->state
& TH_WAIT
) != 0 &&
2478 if (abstime
>= spin
) {
2482 processor
= current_processor();
2483 lcount
= &processor
->runq
.count
;
2484 gcount
= &processor
->processor_set
->runq
.count
;
2486 abstime
= mach_absolute_time();
2487 spin
= abstime
+ delay_idle_spin
;
2489 timer_event((uint32_t)abstime
, &processor
->idle_thread
->system_timer
);
2492 abstime
= mach_absolute_time();
2495 timer_event((uint32_t)abstime
, &self
->system_timer
);
2497 self
->options
&= ~TH_OPT_DELAYIDLE
;
2503 * no_dispatch_count counts number of times processors go non-idle
2504 * without being dispatched. This should be very rare.
2506 int no_dispatch_count
= 0;
2509 * This is the idle processor thread, which just looks for other threads
2515 register processor_t processor
;
2516 register thread_t
*threadp
;
2517 register int *gcount
;
2518 register int *lcount
;
2519 register thread_t new_thread
;
2521 register processor_set_t pset
;
2522 ast_t
*myast
= ast_pending();
2524 processor
= current_processor();
2526 threadp
= &processor
->next_thread
;
2527 lcount
= &processor
->runq
.count
;
2528 gcount
= &processor
->processor_set
->runq
.count
;
2531 (void)splsched(); /* Turn interruptions off */
2534 pmsDown(); /* Step power down. Note: interruptions must be disabled for this call */
2537 while ( (*threadp
== THREAD_NULL
) &&
2538 (*gcount
== 0) && (*lcount
== 0) ) {
2540 /* check for ASTs while we wait */
2541 if (*myast
&~ (AST_SCHEDULING
| AST_BSD
)) {
2542 /* no ASTs for us */
2553 * This is not a switch statement to avoid the
2554 * bounds checking code in the common case.
2556 pset
= processor
->processor_set
;
2557 simple_lock(&pset
->sched_lock
);
2560 pmsStep(0); /* Step up out of idle power, may start timer for next step */
2563 state
= processor
->state
;
2564 if (state
== PROCESSOR_DISPATCHING
) {
2566 * Commmon case -- cpu dispatched.
2568 new_thread
= *threadp
;
2569 *threadp
= (volatile thread_t
) THREAD_NULL
;
2570 processor
->state
= PROCESSOR_RUNNING
;
2571 enqueue_tail(&pset
->active_queue
, (queue_entry_t
)processor
);
2573 if ( pset
->runq
.highq
>= BASEPRI_RTQUEUES
&&
2574 new_thread
->sched_pri
>= BASEPRI_RTQUEUES
) {
2575 register run_queue_t runq
= &pset
->runq
;
2578 q
= runq
->queues
+ runq
->highq
;
2579 if (((thread_t
)q
->next
)->realtime
.deadline
<
2580 processor
->deadline
) {
2581 thread_t thread
= new_thread
;
2583 new_thread
= (thread_t
)q
->next
;
2584 ((queue_entry_t
)new_thread
)->next
->prev
= q
;
2585 q
->next
= ((queue_entry_t
)new_thread
)->next
;
2586 new_thread
->runq
= RUN_QUEUE_NULL
;
2587 processor
->deadline
= new_thread
->realtime
.deadline
;
2588 assert(new_thread
->sched_mode
& TH_MODE_PREEMPT
);
2589 runq
->count
--; runq
->urgency
--;
2590 if (queue_empty(q
)) {
2591 if (runq
->highq
!= IDLEPRI
)
2592 clrbit(MAXPRI
- runq
->highq
, runq
->bitmap
);
2593 runq
->highq
= MAXPRI
- ffsbit(runq
->bitmap
);
2595 dispatch_counts
.missed_realtime
++;
2596 simple_unlock(&pset
->sched_lock
);
2598 thread_lock(thread
);
2599 thread_setrun(thread
, SCHED_HEADQ
);
2600 thread_unlock(thread
);
2602 counter(c_idle_thread_handoff
++);
2603 thread_run(processor
->idle_thread
, (thread_continue_t
)idle_thread
, NULL
, new_thread
);
2606 simple_unlock(&pset
->sched_lock
);
2608 counter(c_idle_thread_handoff
++);
2609 thread_run(processor
->idle_thread
, (thread_continue_t
)idle_thread
, NULL
, new_thread
);
2613 if ( processor
->runq
.highq
> new_thread
->sched_pri
||
2614 pset
->runq
.highq
> new_thread
->sched_pri
) {
2615 thread_t thread
= new_thread
;
2617 new_thread
= choose_thread(pset
, processor
);
2618 dispatch_counts
.missed_other
++;
2619 simple_unlock(&pset
->sched_lock
);
2621 thread_lock(thread
);
2622 thread_setrun(thread
, SCHED_HEADQ
);
2623 thread_unlock(thread
);
2625 counter(c_idle_thread_handoff
++);
2626 thread_run(processor
->idle_thread
, (thread_continue_t
)idle_thread
, NULL
, new_thread
);
2630 simple_unlock(&pset
->sched_lock
);
2632 counter(c_idle_thread_handoff
++);
2633 thread_run(processor
->idle_thread
, (thread_continue_t
)idle_thread
, NULL
, new_thread
);
2638 if (state
== PROCESSOR_IDLE
) {
2640 * Processor was not dispatched (Rare).
2641 * Set it running again and force a
2644 no_dispatch_count
++;
2646 remqueue(&pset
->idle_queue
, (queue_entry_t
)processor
);
2647 processor
->state
= PROCESSOR_RUNNING
;
2648 enqueue_tail(&pset
->active_queue
, (queue_entry_t
)processor
);
2649 simple_unlock(&pset
->sched_lock
);
2651 counter(c_idle_thread_block
++);
2652 thread_block((thread_continue_t
)idle_thread
);
2656 if (state
== PROCESSOR_SHUTDOWN
) {
2658 * Going off-line. Force a
2661 if ((new_thread
= (thread_t
)*threadp
) != THREAD_NULL
) {
2662 *threadp
= (volatile thread_t
) THREAD_NULL
;
2663 processor
->deadline
= UINT64_MAX
;
2664 simple_unlock(&pset
->sched_lock
);
2666 thread_lock(new_thread
);
2667 thread_setrun(new_thread
, SCHED_HEADQ
);
2668 thread_unlock(new_thread
);
2671 simple_unlock(&pset
->sched_lock
);
2673 counter(c_idle_thread_block
++);
2674 thread_block((thread_continue_t
)idle_thread
);
2678 simple_unlock(&pset
->sched_lock
);
2680 panic("idle_thread: state %d\n", processor
->state
);
2686 processor_t processor
)
2688 kern_return_t result
;
2692 result
= kernel_thread_create((thread_continue_t
)idle_thread
, NULL
, MAXPRI_KERNEL
, &thread
);
2693 if (result
!= KERN_SUCCESS
)
2697 thread_lock(thread
);
2698 thread
->bound_processor
= processor
;
2699 processor
->idle_thread
= thread
;
2700 thread
->sched_pri
= thread
->priority
= IDLEPRI
;
2701 thread
->state
= (TH_RUN
| TH_IDLE
);
2702 thread_unlock(thread
);
2705 thread_deallocate(thread
);
2707 return (KERN_SUCCESS
);
2710 static uint64_t sched_tick_deadline
;
2715 * Kicks off scheduler services.
2717 * Called at splsched.
2722 kern_return_t result
;
2725 result
= kernel_thread_start_priority((thread_continue_t
)sched_tick_thread
, NULL
, MAXPRI_KERNEL
, &thread
);
2726 if (result
!= KERN_SUCCESS
)
2727 panic("sched_startup");
2729 thread_deallocate(thread
);
2732 * Yield to the sched_tick_thread while it times
2733 * a series of context switches back. It stores
2734 * the baseline value in sched_cswtime.
2736 * The current thread is the only other thread
2737 * active at this point.
2739 while (sched_cswtime
== 0)
2740 thread_block(THREAD_CONTINUE_NULL
);
2742 thread_daemon_init();
2744 thread_call_initialize();
2748 * sched_tick_thread:
2750 * Perform periodic bookkeeping functions about ten
2754 sched_tick_continue(void)
2756 uint64_t abstime
= mach_absolute_time();
2761 * Compute various averages.
2766 * Scan the run queues for threads which
2767 * may need to be updated.
2769 thread_update_scan();
2771 clock_deadline_for_periodic_event(sched_tick_interval
, abstime
,
2772 &sched_tick_deadline
);
2774 assert_wait_deadline((event_t
)sched_tick_thread
, THREAD_UNINT
, sched_tick_deadline
);
2775 thread_block((thread_continue_t
)sched_tick_continue
);
2780 * Time a series of context switches to determine
2781 * a baseline. Toss the high and low and return
2782 * the one-way value.
2787 uint32_t new, hi
, low
, accum
;
2791 accum
= hi
= low
= 0;
2792 for (i
= 0; i
< tries
; ++i
) {
2793 abstime
= mach_absolute_time();
2794 thread_block(THREAD_CONTINUE_NULL
);
2796 new = mach_absolute_time() - abstime
;
2799 accum
= hi
= low
= new;
2810 return ((accum
- hi
- low
) / (2 * (tries
- 2)));
2814 sched_tick_thread(void)
2816 sched_cswtime
= time_cswitch();
2818 sched_tick_deadline
= mach_absolute_time();
2820 sched_tick_continue();
2825 * thread_update_scan / runq_scan:
2827 * Scan the run queues to account for timesharing threads
2828 * which need to be updated.
2830 * Scanner runs in two passes. Pass one squirrels likely
2831 * threads away in an array, pass two does the update.
2833 * This is necessary because the run queue is locked for
2834 * the candidate scan, but the thread is locked for the update.
2836 * Array should be sized to make forward progress, without
2837 * disabling preemption for long periods.
2840 #define THREAD_UPDATE_SIZE 128
2842 static thread_t thread_update_array
[THREAD_UPDATE_SIZE
];
2843 static int thread_update_count
= 0;
2846 * Scan a runq for candidate threads.
2848 * Returns TRUE if retry is needed.
2856 register thread_t thread
;
2858 if ((count
= runq
->count
) > 0) {
2859 q
= runq
->queues
+ runq
->highq
;
2861 queue_iterate(q
, thread
, thread_t
, links
) {
2862 if ( thread
->sched_stamp
!= sched_tick
&&
2863 (thread
->sched_mode
& TH_MODE_TIMESHARE
) ) {
2864 if (thread_update_count
== THREAD_UPDATE_SIZE
)
2867 thread_update_array
[thread_update_count
++] = thread
;
2868 thread_reference_internal(thread
);
2882 thread_update_scan(void)
2884 register boolean_t restart_needed
;
2885 register processor_set_t pset
= &default_pset
;
2886 register processor_t processor
;
2887 register thread_t thread
;
2892 simple_lock(&pset
->sched_lock
);
2893 restart_needed
= runq_scan(&pset
->runq
);
2894 simple_unlock(&pset
->sched_lock
);
2896 if (!restart_needed
) {
2897 simple_lock(&pset
->sched_lock
);
2898 processor
= (processor_t
)queue_first(&pset
->processors
);
2899 while (!queue_end(&pset
->processors
, (queue_entry_t
)processor
)) {
2900 if ((restart_needed
= runq_scan(&processor
->runq
)) != 0)
2903 thread
= processor
->idle_thread
;
2904 if (thread
->sched_stamp
!= sched_tick
) {
2905 if (thread_update_count
== THREAD_UPDATE_SIZE
) {
2906 restart_needed
= TRUE
;
2910 thread_update_array
[thread_update_count
++] = thread
;
2911 thread_reference_internal(thread
);
2914 processor
= (processor_t
)queue_next(&processor
->processors
);
2916 simple_unlock(&pset
->sched_lock
);
2921 * Ok, we now have a collection of candidates -- fix them.
2923 while (thread_update_count
> 0) {
2924 thread
= thread_update_array
[--thread_update_count
];
2925 thread_update_array
[thread_update_count
] = THREAD_NULL
;
2928 thread_lock(thread
);
2929 if ( !(thread
->state
& (TH_WAIT
|TH_SUSP
)) &&
2930 thread
->sched_stamp
!= sched_tick
)
2931 update_priority(thread
);
2932 thread_unlock(thread
);
2935 thread_deallocate(thread
);
2937 } while (restart_needed
);
2941 * Just in case someone doesn't use the macro
2943 #undef thread_wakeup
2952 thread_wakeup_with_result(x
, THREAD_AWAKENED
);
2956 preemption_enabled(void)
2958 return (get_preemption_level() == 0 && ml_get_interrupts_enabled());
2966 return ((thread
->state
& (TH_RUN
|TH_WAIT
)) == TH_RUN
);
2971 #include <ddb/db_output.h>
2972 #define printf kdbprintf
2973 void db_sched(void);
2978 iprintf("Scheduling Statistics:\n");
2980 iprintf("Thread invocations: csw %d same %d\n",
2981 c_thread_invoke_csw
, c_thread_invoke_same
);
2983 iprintf("Thread block: calls %d\n",
2984 c_thread_block_calls
);
2985 iprintf("Idle thread:\n\thandoff %d block %d no_dispatch %d\n",
2986 c_idle_thread_handoff
,
2987 c_idle_thread_block
, no_dispatch_count
);
2988 iprintf("Sched thread blocks: %d\n", c_sched_thread_block
);
2989 #endif /* MACH_COUNTERS */
2993 #include <ddb/db_output.h>
2994 void db_show_thread_log(void);
2997 db_show_thread_log(void)
3000 #endif /* MACH_KDB */