2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * @OSF_FREE_COPYRIGHT@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
54 * Author: Avadis Tevanian, Jr.
57 * Scheduling primitives
64 #include <ddb/db_output.h>
66 #include <mach/mach_types.h>
67 #include <mach/machine.h>
68 #include <mach/policy.h>
69 #include <mach/sync_policy.h>
71 #include <machine/machine_routines.h>
72 #include <machine/sched_param.h>
73 #include <machine/machine_cpu.h>
75 #include <kern/kern_types.h>
76 #include <kern/clock.h>
77 #include <kern/counters.h>
78 #include <kern/cpu_number.h>
79 #include <kern/cpu_data.h>
80 #include <kern/debug.h>
81 #include <kern/lock.h>
82 #include <kern/macro_help.h>
83 #include <kern/machine.h>
84 #include <kern/misc_protos.h>
85 #include <kern/processor.h>
86 #include <kern/queue.h>
87 #include <kern/sched.h>
88 #include <kern/sched_prim.h>
89 #include <kern/syscall_subr.h>
90 #include <kern/task.h>
91 #include <kern/thread.h>
92 #include <kern/wait_queue.h>
95 #include <vm/vm_kern.h>
96 #include <vm/vm_map.h>
98 #include <sys/kdebug.h>
100 #include <kern/pms.h>
102 #define DEFAULT_PREEMPTION_RATE 100 /* (1/s) */
103 int default_preemption_rate
= DEFAULT_PREEMPTION_RATE
;
105 #define MAX_UNSAFE_QUANTA 800
106 int max_unsafe_quanta
= MAX_UNSAFE_QUANTA
;
108 #define MAX_POLL_QUANTA 2
109 int max_poll_quanta
= MAX_POLL_QUANTA
;
111 #define SCHED_POLL_YIELD_SHIFT 4 /* 1/16 */
112 int sched_poll_yield_shift
= SCHED_POLL_YIELD_SHIFT
;
114 uint64_t max_unsafe_computation
;
115 uint32_t sched_safe_duration
;
116 uint64_t max_poll_computation
;
118 uint32_t std_quantum
;
119 uint32_t min_std_quantum
;
121 uint32_t std_quantum_us
;
123 uint32_t max_rt_quantum
;
124 uint32_t min_rt_quantum
;
126 uint32_t sched_cswtime
;
128 static uint32_t delay_idle_limit
, delay_idle_spin
;
129 static processor_t
delay_idle(
130 processor_t processor
,
134 uint32_t sched_tick_interval
;
136 uint32_t sched_pri_shift
;
139 void wait_queues_init(void);
141 static void load_shift_init(void);
143 static thread_t
choose_thread(
144 processor_set_t pset
,
145 processor_t processor
);
147 static void thread_update_scan(void);
151 boolean_t
thread_runnable(
160 * states are combinations of:
162 * W waiting (or on wait queue)
163 * N non-interruptible
168 * assert_wait thread_block clear_wait swapout swapin
170 * R RW, RWN R; setrun - -
171 * RN RWN RN; setrun - -
184 * Waiting protocols and implementation:
186 * Each thread may be waiting for exactly one event; this event
187 * is set using assert_wait(). That thread may be awakened either
188 * by performing a thread_wakeup_prim() on its event,
189 * or by directly waking that thread up with clear_wait().
191 * The implementation of wait events uses a hash table. Each
192 * bucket is queue of threads having the same hash function
193 * value; the chain for the queue (linked list) is the run queue
194 * field. [It is not possible to be waiting and runnable at the
197 * Locks on both the thread and on the hash buckets govern the
198 * wait event field and the queue chain field. Because wakeup
199 * operations only have the event as an argument, the event hash
200 * bucket must be locked before any thread.
202 * Scheduling operations may also occur at interrupt level; therefore,
203 * interrupts below splsched() must be prevented when holding
204 * thread or hash bucket locks.
206 * The wait event hash table declarations are as follows:
211 struct wait_queue wait_queues
[NUMQUEUES
];
213 #define wait_hash(event) \
214 ((((int)(event) < 0)? ~(int)(event): (int)(event)) % NUMQUEUES)
216 int8_t sched_load_shifts
[NRQS
];
222 * Calculate the timeslicing quantum
225 if (default_preemption_rate
< 1)
226 default_preemption_rate
= DEFAULT_PREEMPTION_RATE
;
227 std_quantum_us
= (1000 * 1000) / default_preemption_rate
;
229 printf("standard timeslicing quantum is %d us\n", std_quantum_us
);
231 sched_safe_duration
= (2 * max_unsafe_quanta
/ default_preemption_rate
) *
232 (1 << SCHED_TICK_SHIFT
);
236 pset_init(&default_pset
);
242 sched_timebase_init(void)
247 /* standard timeslicing quantum */
248 clock_interval_to_absolutetime_interval(
249 std_quantum_us
, NSEC_PER_USEC
, &abstime
);
250 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
251 std_quantum
= abstime
;
253 /* smallest remaining quantum (250 us) */
254 clock_interval_to_absolutetime_interval(250, NSEC_PER_USEC
, &abstime
);
255 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
256 min_std_quantum
= abstime
;
258 /* smallest rt computaton (50 us) */
259 clock_interval_to_absolutetime_interval(50, NSEC_PER_USEC
, &abstime
);
260 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
261 min_rt_quantum
= abstime
;
263 /* maximum rt computation (50 ms) */
264 clock_interval_to_absolutetime_interval(
265 50, 1000*NSEC_PER_USEC
, &abstime
);
266 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
267 max_rt_quantum
= abstime
;
269 /* scheduler tick interval */
270 clock_interval_to_absolutetime_interval(USEC_PER_SEC
>> SCHED_TICK_SHIFT
,
271 NSEC_PER_USEC
, &abstime
);
272 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
273 sched_tick_interval
= abstime
;
276 * Compute conversion factor from usage to
277 * timesharing priorities with 5/8 ** n aging.
279 abstime
= (abstime
* 5) / 3;
280 for (shift
= 0; abstime
> BASEPRI_DEFAULT
; ++shift
)
282 sched_pri_shift
= shift
;
284 max_unsafe_computation
= max_unsafe_quanta
* std_quantum
;
285 max_poll_computation
= max_poll_quanta
* std_quantum
;
287 /* delay idle constant(s) (60, 1 us) */
288 clock_interval_to_absolutetime_interval(60, NSEC_PER_USEC
, &abstime
);
289 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
290 delay_idle_limit
= abstime
;
292 clock_interval_to_absolutetime_interval(1, NSEC_PER_USEC
, &abstime
);
293 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
294 delay_idle_spin
= abstime
;
298 wait_queues_init(void)
302 for (i
= 0; i
< NUMQUEUES
; i
++) {
303 wait_queue_init(&wait_queues
[i
], SYNC_POLICY_FIFO
);
308 * Set up values for timeshare
312 load_shift_init(void)
314 int8_t k
, *p
= sched_load_shifts
;
317 *p
++ = INT8_MIN
; *p
++ = 0;
319 for (i
= j
= 2, k
= 1; i
< NRQS
; ++k
) {
320 for (j
<<= 1; i
< j
; ++i
)
326 * Thread wait timer expiration.
333 thread_t thread
= p0
;
338 if (--thread
->wait_timer_active
== 0) {
339 if (thread
->wait_timer_is_set
) {
340 thread
->wait_timer_is_set
= FALSE
;
341 clear_wait_internal(thread
, THREAD_TIMED_OUT
);
344 thread_unlock(thread
);
351 * Set a timer for the current thread, if the thread
352 * is ready to wait. Must be called between assert_wait()
353 * and thread_block().
358 uint32_t scale_factor
)
360 thread_t thread
= current_thread();
366 if ((thread
->state
& TH_WAIT
) != 0) {
367 clock_interval_to_deadline(interval
, scale_factor
, &deadline
);
368 if (!timer_call_enter(&thread
->wait_timer
, deadline
))
369 thread
->wait_timer_active
++;
370 thread
->wait_timer_is_set
= TRUE
;
372 thread_unlock(thread
);
377 thread_set_timer_deadline(
380 thread_t thread
= current_thread();
385 if ((thread
->state
& TH_WAIT
) != 0) {
386 if (!timer_call_enter(&thread
->wait_timer
, deadline
))
387 thread
->wait_timer_active
++;
388 thread
->wait_timer_is_set
= TRUE
;
390 thread_unlock(thread
);
395 thread_cancel_timer(void)
397 thread_t thread
= current_thread();
402 if (thread
->wait_timer_is_set
) {
403 if (timer_call_cancel(&thread
->wait_timer
))
404 thread
->wait_timer_active
--;
405 thread
->wait_timer_is_set
= FALSE
;
407 thread_unlock(thread
);
414 * Unblock thread on wake up.
416 * Returns TRUE if the thread is still running.
418 * Thread must be locked.
423 wait_result_t wresult
)
425 boolean_t result
= FALSE
;
430 thread
->wait_result
= wresult
;
433 * Cancel pending wait timer.
435 if (thread
->wait_timer_is_set
) {
436 if (timer_call_cancel(&thread
->wait_timer
))
437 thread
->wait_timer_active
--;
438 thread
->wait_timer_is_set
= FALSE
;
442 * Update scheduling state.
444 thread
->state
&= ~(TH_WAIT
|TH_UNINT
);
446 if (!(thread
->state
& TH_RUN
)) {
447 thread
->state
|= TH_RUN
;
450 * Mark unblocked if call out.
452 if (thread
->options
& TH_OPT_CALLOUT
)
453 call_thread_unblock();
456 * Update pset run counts.
458 pset_run_incr(thread
->processor_set
);
459 if (thread
->sched_mode
& TH_MODE_TIMESHARE
)
460 pset_share_incr(thread
->processor_set
);
466 * Calculate deadline for real-time threads.
468 if (thread
->sched_mode
& TH_MODE_REALTIME
) {
469 thread
->realtime
.deadline
= mach_absolute_time();
470 thread
->realtime
.deadline
+= thread
->realtime
.constraint
;
474 * Clear old quantum, fail-safe computation, etc.
476 thread
->current_quantum
= 0;
477 thread
->computation_metered
= 0;
478 thread
->reason
= AST_NONE
;
480 KERNEL_DEBUG_CONSTANT(
481 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_MAKE_RUNNABLE
) | DBG_FUNC_NONE
,
482 (int)thread
, (int)thread
->sched_pri
, 0, 0, 0);
490 * Unblock and dispatch thread.
492 * thread lock held, IPC locks may be held.
493 * thread must have been pulled from wait queue under same lock hold.
495 * KERN_SUCCESS - Thread was set running
496 * KERN_NOT_WAITING - Thread was not waiting
501 wait_result_t wresult
)
503 assert(thread
->at_safe_point
== FALSE
);
504 assert(thread
->wait_event
== NO_EVENT64
);
505 assert(thread
->wait_queue
== WAIT_QUEUE_NULL
);
507 if ((thread
->state
& (TH_WAIT
|TH_TERMINATE
)) == TH_WAIT
) {
508 if (!thread_unblock(thread
, wresult
))
509 thread_setrun(thread
, SCHED_PREEMPT
| SCHED_TAILQ
);
511 return (KERN_SUCCESS
);
514 return (KERN_NOT_WAITING
);
518 * Routine: thread_mark_wait_locked
520 * Mark a thread as waiting. If, given the circumstances,
521 * it doesn't want to wait (i.e. already aborted), then
522 * indicate that in the return value.
524 * at splsched() and thread is locked.
528 thread_mark_wait_locked(
530 wait_interrupt_t interruptible
)
532 boolean_t at_safe_point
;
535 * The thread may have certain types of interrupts/aborts masked
536 * off. Even if the wait location says these types of interrupts
537 * are OK, we have to honor mask settings (outer-scoped code may
538 * not be able to handle aborts at the moment).
540 if (interruptible
> (thread
->options
& TH_OPT_INTMASK
))
541 interruptible
= thread
->options
& TH_OPT_INTMASK
;
543 at_safe_point
= (interruptible
== THREAD_ABORTSAFE
);
545 if ( interruptible
== THREAD_UNINT
||
546 !(thread
->state
& TH_ABORT
) ||
548 (thread
->state
& TH_ABORT_SAFELY
)) ) {
549 thread
->state
|= (interruptible
) ? TH_WAIT
: (TH_WAIT
| TH_UNINT
);
550 thread
->at_safe_point
= at_safe_point
;
551 return (thread
->wait_result
= THREAD_WAITING
);
554 if (thread
->state
& TH_ABORT_SAFELY
)
555 thread
->state
&= ~(TH_ABORT
|TH_ABORT_SAFELY
);
557 return (thread
->wait_result
= THREAD_INTERRUPTED
);
561 * Routine: thread_interrupt_level
563 * Set the maximum interruptible state for the
564 * current thread. The effective value of any
565 * interruptible flag passed into assert_wait
566 * will never exceed this.
568 * Useful for code that must not be interrupted,
569 * but which calls code that doesn't know that.
571 * The old interrupt level for the thread.
575 thread_interrupt_level(
576 wait_interrupt_t new_level
)
578 thread_t thread
= current_thread();
579 wait_interrupt_t result
= thread
->options
& TH_OPT_INTMASK
;
581 thread
->options
= (thread
->options
& ~TH_OPT_INTMASK
) | (new_level
& TH_OPT_INTMASK
);
587 * Check to see if an assert wait is possible, without actually doing one.
588 * This is used by debug code in locks and elsewhere to verify that it is
589 * always OK to block when trying to take a blocking lock (since waiting
590 * for the actual assert_wait to catch the case may make it hard to detect
594 assert_wait_possible(void)
600 if(debug_mode
) return TRUE
; /* Always succeed in debug mode */
603 thread
= current_thread();
605 return (thread
== NULL
|| wait_queue_assert_possible(thread
));
611 * Assert that the current thread is about to go to
612 * sleep until the specified event occurs.
617 wait_interrupt_t interruptible
)
619 register wait_queue_t wq
;
622 assert(event
!= NO_EVENT
);
624 index
= wait_hash(event
);
625 wq
= &wait_queues
[index
];
626 return wait_queue_assert_wait(wq
, event
, interruptible
, 0);
632 wait_interrupt_t interruptible
,
634 uint32_t scale_factor
)
636 thread_t thread
= current_thread();
637 wait_result_t wresult
;
642 assert(event
!= NO_EVENT
);
643 wqueue
= &wait_queues
[wait_hash(event
)];
646 wait_queue_lock(wqueue
);
649 clock_interval_to_deadline(interval
, scale_factor
, &deadline
);
650 wresult
= wait_queue_assert_wait64_locked(wqueue
, (uint32_t)event
,
651 interruptible
, deadline
, thread
);
653 thread_unlock(thread
);
654 wait_queue_unlock(wqueue
);
661 assert_wait_deadline(
663 wait_interrupt_t interruptible
,
666 thread_t thread
= current_thread();
667 wait_result_t wresult
;
671 assert(event
!= NO_EVENT
);
672 wqueue
= &wait_queues
[wait_hash(event
)];
675 wait_queue_lock(wqueue
);
678 wresult
= wait_queue_assert_wait64_locked(wqueue
, (uint32_t)event
,
679 interruptible
, deadline
, thread
);
681 thread_unlock(thread
);
682 wait_queue_unlock(wqueue
);
689 * thread_sleep_fast_usimple_lock:
691 * Cause the current thread to wait until the specified event
692 * occurs. The specified simple_lock is unlocked before releasing
693 * the cpu and re-acquired as part of waking up.
695 * This is the simple lock sleep interface for components that use a
696 * faster version of simple_lock() than is provided by usimple_lock().
698 __private_extern__ wait_result_t
699 thread_sleep_fast_usimple_lock(
702 wait_interrupt_t interruptible
)
706 res
= assert_wait(event
, interruptible
);
707 if (res
== THREAD_WAITING
) {
709 res
= thread_block(THREAD_CONTINUE_NULL
);
717 * thread_sleep_usimple_lock:
719 * Cause the current thread to wait until the specified event
720 * occurs. The specified usimple_lock is unlocked before releasing
721 * the cpu and re-acquired as part of waking up.
723 * This is the simple lock sleep interface for components where
724 * simple_lock() is defined in terms of usimple_lock().
727 thread_sleep_usimple_lock(
730 wait_interrupt_t interruptible
)
734 res
= assert_wait(event
, interruptible
);
735 if (res
== THREAD_WAITING
) {
736 usimple_unlock(lock
);
737 res
= thread_block(THREAD_CONTINUE_NULL
);
744 * thread_sleep_mutex:
746 * Cause the current thread to wait until the specified event
747 * occurs. The specified mutex is unlocked before releasing
748 * the cpu. The mutex will be re-acquired before returning.
750 * JMM - Add hint to make sure mutex is available before rousting
756 wait_interrupt_t interruptible
)
760 res
= assert_wait(event
, interruptible
);
761 if (res
== THREAD_WAITING
) {
763 res
= thread_block(THREAD_CONTINUE_NULL
);
770 * thread_sleep_mutex_deadline:
772 * Cause the current thread to wait until the specified event
773 * (or deadline) occurs. The specified mutex is unlocked before
774 * releasing the cpu. The mutex will be re-acquired before returning.
777 thread_sleep_mutex_deadline(
781 wait_interrupt_t interruptible
)
785 res
= assert_wait_deadline(event
, interruptible
, deadline
);
786 if (res
== THREAD_WAITING
) {
788 res
= thread_block(THREAD_CONTINUE_NULL
);
795 * thread_sleep_lock_write:
797 * Cause the current thread to wait until the specified event
798 * occurs. The specified (write) lock is unlocked before releasing
799 * the cpu. The (write) lock will be re-acquired before returning.
802 thread_sleep_lock_write(
805 wait_interrupt_t interruptible
)
809 res
= assert_wait(event
, interruptible
);
810 if (res
== THREAD_WAITING
) {
811 lock_write_done(lock
);
812 res
= thread_block(THREAD_CONTINUE_NULL
);
821 * Force a preemption point for a thread and wait
822 * for it to stop running. Arbitrates access among
823 * multiple stop requests. (released by unstop)
825 * The thread must enter a wait state and stop via a
828 * Returns FALSE if interrupted.
834 wait_result_t wresult
;
840 while (thread
->state
& TH_SUSP
) {
841 thread
->wake_active
= TRUE
;
842 wresult
= assert_wait(&thread
->wake_active
, THREAD_ABORTSAFE
);
846 if (wresult
== THREAD_WAITING
)
847 wresult
= thread_block(THREAD_CONTINUE_NULL
);
849 if (wresult
!= THREAD_AWAKENED
)
857 thread
->state
|= TH_SUSP
;
859 while (thread
->state
& TH_RUN
) {
860 processor_t processor
= thread
->last_processor
;
862 if ( processor
!= PROCESSOR_NULL
&&
863 processor
->state
== PROCESSOR_RUNNING
&&
864 processor
->active_thread
== thread
)
865 cause_ast_check(processor
);
866 thread_unlock(thread
);
868 thread
->wake_active
= TRUE
;
869 wresult
= assert_wait(&thread
->wake_active
, THREAD_ABORTSAFE
);
873 if (wresult
== THREAD_WAITING
)
874 wresult
= thread_block(THREAD_CONTINUE_NULL
);
876 if (wresult
!= THREAD_AWAKENED
) {
877 thread_unstop(thread
);
886 thread_unlock(thread
);
896 * Release a previous stop request and set
897 * the thread running if appropriate.
899 * Use only after a successful stop operation.
905 spl_t s
= splsched();
910 if ((thread
->state
& (TH_RUN
|TH_WAIT
|TH_SUSP
)) == TH_SUSP
) {
911 thread
->state
&= ~TH_SUSP
;
912 thread_unblock(thread
, THREAD_AWAKENED
);
914 thread_setrun(thread
, SCHED_PREEMPT
| SCHED_TAILQ
);
917 if (thread
->state
& TH_SUSP
) {
918 thread
->state
&= ~TH_SUSP
;
920 if (thread
->wake_active
) {
921 thread
->wake_active
= FALSE
;
922 thread_unlock(thread
);
926 thread_wakeup(&thread
->wake_active
);
931 thread_unlock(thread
);
939 * Wait for a thread to stop running. (non-interruptible)
946 wait_result_t wresult
;
947 spl_t s
= splsched();
952 while (thread
->state
& TH_RUN
) {
953 processor_t processor
= thread
->last_processor
;
955 if ( processor
!= PROCESSOR_NULL
&&
956 processor
->state
== PROCESSOR_RUNNING
&&
957 processor
->active_thread
== thread
)
958 cause_ast_check(processor
);
959 thread_unlock(thread
);
961 thread
->wake_active
= TRUE
;
962 wresult
= assert_wait(&thread
->wake_active
, THREAD_UNINT
);
966 if (wresult
== THREAD_WAITING
)
967 thread_block(THREAD_CONTINUE_NULL
);
974 thread_unlock(thread
);
980 * Routine: clear_wait_internal
982 * Clear the wait condition for the specified thread.
983 * Start the thread executing if that is appropriate.
985 * thread thread to awaken
986 * result Wakeup result the thread should see
989 * the thread is locked.
991 * KERN_SUCCESS thread was rousted out a wait
992 * KERN_FAILURE thread was waiting but could not be rousted
993 * KERN_NOT_WAITING thread was not waiting
995 __private_extern__ kern_return_t
998 wait_result_t wresult
)
1000 wait_queue_t wq
= thread
->wait_queue
;
1001 int i
= LockTimeOut
;
1004 if (wresult
== THREAD_INTERRUPTED
&& (thread
->state
& TH_UNINT
))
1005 return (KERN_FAILURE
);
1007 if (wq
!= WAIT_QUEUE_NULL
) {
1008 if (wait_queue_lock_try(wq
)) {
1009 wait_queue_pull_thread_locked(wq
, thread
, TRUE
);
1010 /* wait queue unlocked, thread still locked */
1013 thread_unlock(thread
);
1016 thread_lock(thread
);
1017 if (wq
!= thread
->wait_queue
)
1018 return (KERN_NOT_WAITING
);
1024 return (thread_go(thread
, wresult
));
1027 panic("clear_wait_internal: deadlock: thread=0x%x, wq=0x%x, cpu=%d\n",
1028 thread
, wq
, cpu_number());
1030 return (KERN_FAILURE
);
1037 * Clear the wait condition for the specified thread. Start the thread
1038 * executing if that is appropriate.
1041 * thread thread to awaken
1042 * result Wakeup result the thread should see
1047 wait_result_t result
)
1053 thread_lock(thread
);
1054 ret
= clear_wait_internal(thread
, result
);
1055 thread_unlock(thread
);
1062 * thread_wakeup_prim:
1064 * Common routine for thread_wakeup, thread_wakeup_with_result,
1065 * and thread_wakeup_one.
1071 boolean_t one_thread
,
1072 wait_result_t result
)
1074 register wait_queue_t wq
;
1077 index
= wait_hash(event
);
1078 wq
= &wait_queues
[index
];
1080 return (wait_queue_wakeup_one(wq
, event
, result
));
1082 return (wait_queue_wakeup_all(wq
, event
, result
));
1088 * Force a thread to execute on the specified processor.
1090 * Returns the previous binding. PROCESSOR_NULL means
1093 * XXX - DO NOT export this to users - XXX
1097 register thread_t thread
,
1098 processor_t processor
)
1101 run_queue_t runq
= RUN_QUEUE_NULL
;
1105 thread_lock(thread
);
1106 prev
= thread
->bound_processor
;
1107 if (prev
!= PROCESSOR_NULL
)
1108 runq
= run_queue_remove(thread
);
1110 thread
->bound_processor
= processor
;
1112 if (runq
!= RUN_QUEUE_NULL
)
1113 thread_setrun(thread
, SCHED_PREEMPT
| SCHED_TAILQ
);
1114 thread_unlock(thread
);
1121 uint32_t idle_pset_last
,
1131 uint32_t realtime_self
,
1135 uint32_t missed_realtime
,
1140 * Select a thread for the current processor to run.
1142 * May select the current thread, which must be locked.
1146 register processor_t processor
)
1148 register thread_t thread
;
1149 processor_set_t pset
;
1150 boolean_t other_runnable
;
1153 * Check for other non-idle runnable threads.
1155 pset
= processor
->processor_set
;
1156 thread
= processor
->active_thread
;
1158 /* Update the thread's priority */
1159 if (thread
->sched_stamp
!= sched_tick
)
1160 update_priority(thread
);
1162 processor
->current_pri
= thread
->sched_pri
;
1164 simple_lock(&pset
->sched_lock
);
1166 other_runnable
= processor
->runq
.count
> 0 || pset
->runq
.count
> 0;
1168 if ( thread
->state
== TH_RUN
&&
1169 thread
->processor_set
== pset
&&
1170 (thread
->bound_processor
== PROCESSOR_NULL
||
1171 thread
->bound_processor
== processor
) ) {
1172 if ( thread
->sched_pri
>= BASEPRI_RTQUEUES
&&
1173 first_timeslice(processor
) ) {
1174 if (pset
->runq
.highq
>= BASEPRI_RTQUEUES
) {
1175 register run_queue_t runq
= &pset
->runq
;
1178 q
= runq
->queues
+ runq
->highq
;
1179 if (((thread_t
)q
->next
)->realtime
.deadline
<
1180 processor
->deadline
) {
1181 thread
= (thread_t
)q
->next
;
1182 ((queue_entry_t
)thread
)->next
->prev
= q
;
1183 q
->next
= ((queue_entry_t
)thread
)->next
;
1184 thread
->runq
= RUN_QUEUE_NULL
;
1185 assert(thread
->sched_mode
& TH_MODE_PREEMPT
);
1186 runq
->count
--; runq
->urgency
--;
1187 if (queue_empty(q
)) {
1188 if (runq
->highq
!= IDLEPRI
)
1189 clrbit(MAXPRI
- runq
->highq
, runq
->bitmap
);
1190 runq
->highq
= MAXPRI
- ffsbit(runq
->bitmap
);
1195 processor
->deadline
= thread
->realtime
.deadline
;
1197 simple_unlock(&pset
->sched_lock
);
1202 if ( (!other_runnable
||
1203 (processor
->runq
.highq
< thread
->sched_pri
&&
1204 pset
->runq
.highq
< thread
->sched_pri
)) ) {
1206 /* I am the highest priority runnable (non-idle) thread */
1208 processor
->deadline
= UINT64_MAX
;
1210 simple_unlock(&pset
->sched_lock
);
1217 thread
= choose_thread(pset
, processor
);
1220 * Nothing is runnable, so set this processor idle if it
1221 * was running. Return its idle thread.
1223 if (processor
->state
== PROCESSOR_RUNNING
) {
1224 remqueue(&pset
->active_queue
, (queue_entry_t
)processor
);
1225 processor
->state
= PROCESSOR_IDLE
;
1227 enqueue_tail(&pset
->idle_queue
, (queue_entry_t
)processor
);
1231 processor
->deadline
= UINT64_MAX
;
1233 thread
= processor
->idle_thread
;
1236 simple_unlock(&pset
->sched_lock
);
1242 * Perform a context switch and start executing the new thread.
1244 * Returns FALSE on failure, and the thread is re-dispatched.
1246 * Called at splsched.
1249 #define funnel_release_check(thread, debug) \
1251 if ((thread)->funnel_state & TH_FN_OWNED) { \
1252 (thread)->funnel_state = TH_FN_REFUNNEL; \
1253 KERNEL_DEBUG(0x603242c | DBG_FUNC_NONE, \
1254 (thread)->funnel_lock, (debug), 0, 0, 0); \
1255 funnel_unlock((thread)->funnel_lock); \
1259 #define funnel_refunnel_check(thread, debug) \
1261 if ((thread)->funnel_state & TH_FN_REFUNNEL) { \
1262 kern_return_t result = (thread)->wait_result; \
1264 (thread)->funnel_state = 0; \
1265 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE, \
1266 (thread)->funnel_lock, (debug), 0, 0, 0); \
1267 funnel_lock((thread)->funnel_lock); \
1268 KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE, \
1269 (thread)->funnel_lock, (debug), 0, 0, 0); \
1270 (thread)->funnel_state = TH_FN_OWNED; \
1271 (thread)->wait_result = result; \
1277 register thread_t old_thread
,
1278 register thread_t new_thread
,
1281 thread_continue_t new_cont
, continuation
= old_thread
->continuation
;
1282 void *new_param
, *parameter
= old_thread
->parameter
;
1283 processor_t processor
;
1284 thread_t prev_thread
;
1286 if (get_preemption_level() != 0)
1287 panic("thread_invoke: preemption_level %d\n",
1288 get_preemption_level());
1290 assert(old_thread
== current_thread());
1293 * Mark thread interruptible.
1295 thread_lock(new_thread
);
1296 new_thread
->state
&= ~TH_UNINT
;
1298 assert(thread_runnable(new_thread
));
1301 * Allow time constraint threads to hang onto
1304 if ( (old_thread
->sched_mode
& TH_MODE_REALTIME
) &&
1305 !old_thread
->reserved_stack
) {
1306 old_thread
->reserved_stack
= old_thread
->kernel_stack
;
1309 if (continuation
!= NULL
) {
1310 if (!new_thread
->kernel_stack
) {
1312 * If the old thread is using a privileged stack,
1313 * check to see whether we can exchange it with
1314 * that of the new thread.
1316 if ( old_thread
->kernel_stack
== old_thread
->reserved_stack
&&
1317 !new_thread
->reserved_stack
)
1321 * Context switch by performing a stack handoff.
1323 new_cont
= new_thread
->continuation
;
1324 new_thread
->continuation
= NULL
;
1325 new_param
= new_thread
->parameter
;
1326 new_thread
->parameter
= NULL
;
1328 processor
= current_processor();
1329 processor
->active_thread
= new_thread
;
1330 processor
->current_pri
= new_thread
->sched_pri
;
1331 new_thread
->last_processor
= processor
;
1332 ast_context(new_thread
);
1333 thread_unlock(new_thread
);
1335 current_task()->csw
++;
1337 old_thread
->reason
= reason
;
1339 processor
->last_dispatch
= mach_absolute_time();
1340 timer_event((uint32_t)processor
->last_dispatch
,
1341 &new_thread
->system_timer
);
1343 thread_done(old_thread
, new_thread
, processor
);
1345 machine_stack_handoff(old_thread
, new_thread
);
1347 thread_begin(new_thread
, processor
);
1350 * Now dispatch the old thread.
1352 thread_dispatch(old_thread
);
1354 counter_always(c_thread_invoke_hits
++);
1356 funnel_refunnel_check(new_thread
, 2);
1360 call_continuation(new_cont
, new_param
, new_thread
->wait_result
);
1364 if (new_thread
== old_thread
) {
1365 /* same thread but with continuation */
1366 counter(++c_thread_invoke_same
);
1367 thread_unlock(new_thread
);
1369 funnel_refunnel_check(new_thread
, 3);
1372 call_continuation(continuation
, parameter
, new_thread
->wait_result
);
1378 * Check that the new thread has a stack
1380 if (!new_thread
->kernel_stack
) {
1382 if (!stack_alloc_try(new_thread
)) {
1383 counter_always(c_thread_invoke_misses
++);
1384 thread_unlock(new_thread
);
1385 thread_stack_enqueue(new_thread
);
1390 if (new_thread
== old_thread
) {
1391 counter(++c_thread_invoke_same
);
1392 thread_unlock(new_thread
);
1398 * Context switch by full context save.
1400 processor
= current_processor();
1401 processor
->active_thread
= new_thread
;
1402 processor
->current_pri
= new_thread
->sched_pri
;
1403 new_thread
->last_processor
= processor
;
1404 ast_context(new_thread
);
1405 assert(thread_runnable(new_thread
));
1406 thread_unlock(new_thread
);
1408 counter_always(c_thread_invoke_csw
++);
1409 current_task()->csw
++;
1411 assert(old_thread
->runq
== RUN_QUEUE_NULL
);
1412 old_thread
->reason
= reason
;
1414 processor
->last_dispatch
= mach_absolute_time();
1415 timer_event((uint32_t)processor
->last_dispatch
, &new_thread
->system_timer
);
1417 thread_done(old_thread
, new_thread
, processor
);
1420 * This is where we actually switch register context,
1421 * and address space if required. Control will not
1422 * return here immediately.
1424 prev_thread
= machine_switch_context(old_thread
, continuation
, new_thread
);
1427 * We are still old_thread, possibly on a different processor,
1428 * and new_thread is now stale.
1430 thread_begin(old_thread
, old_thread
->last_processor
);
1433 * Now dispatch the thread which resumed us.
1435 thread_dispatch(prev_thread
);
1438 funnel_refunnel_check(old_thread
, 3);
1441 call_continuation(continuation
, parameter
, old_thread
->wait_result
);
1451 * Perform calculations for thread
1452 * finishing execution on the current processor.
1454 * Called at splsched.
1458 thread_t old_thread
,
1459 thread_t new_thread
,
1460 processor_t processor
)
1462 if (!(old_thread
->state
& TH_IDLE
)) {
1464 * Compute remainder of current quantum.
1466 if ( first_timeslice(processor
) &&
1467 processor
->quantum_end
> processor
->last_dispatch
)
1468 old_thread
->current_quantum
=
1469 (processor
->quantum_end
- processor
->last_dispatch
);
1471 old_thread
->current_quantum
= 0;
1473 if (old_thread
->sched_mode
& TH_MODE_REALTIME
) {
1475 * Cancel the deadline if the thread has
1476 * consumed the entire quantum.
1478 if (old_thread
->current_quantum
== 0) {
1479 old_thread
->realtime
.deadline
= UINT64_MAX
;
1480 old_thread
->reason
|= AST_QUANTUM
;
1485 * For non-realtime threads treat a tiny
1486 * remaining quantum as an expired quantum
1487 * but include what's left next time.
1489 if (old_thread
->current_quantum
< min_std_quantum
) {
1490 old_thread
->reason
|= AST_QUANTUM
;
1491 old_thread
->current_quantum
+= std_quantum
;
1496 * If we are doing a direct handoff then
1497 * give the remainder of our quantum to
1500 if ((old_thread
->reason
& (AST_HANDOFF
|AST_QUANTUM
)) == AST_HANDOFF
) {
1501 new_thread
->current_quantum
= old_thread
->current_quantum
;
1502 old_thread
->reason
|= AST_QUANTUM
;
1503 old_thread
->current_quantum
= 0;
1506 old_thread
->last_switch
= processor
->last_dispatch
;
1508 old_thread
->computation_metered
+=
1509 (old_thread
->last_switch
- old_thread
->computation_epoch
);
1516 * Set up for thread beginning execution on
1517 * the current processor.
1519 * Called at splsched.
1524 processor_t processor
)
1526 if (!(thread
->state
& TH_IDLE
)) {
1528 * Give the thread a new quantum
1529 * if none remaining.
1531 if (thread
->current_quantum
== 0)
1532 thread_quantum_init(thread
);
1535 * Set up quantum timer and timeslice.
1537 processor
->quantum_end
=
1538 (processor
->last_dispatch
+ thread
->current_quantum
);
1539 timer_call_enter1(&processor
->quantum_timer
,
1540 thread
, processor
->quantum_end
);
1542 processor_timeslice_setup(processor
, thread
);
1544 thread
->last_switch
= processor
->last_dispatch
;
1546 thread
->computation_epoch
= thread
->last_switch
;
1549 timer_call_cancel(&processor
->quantum_timer
);
1550 processor
->timeslice
= 1;
1557 * Handle previous thread at context switch. Re-dispatch
1558 * if still running, otherwise update run state and perform
1561 * Called at splsched.
1565 register thread_t thread
)
1568 * If blocked at a continuation, discard
1571 if (thread
->continuation
!= NULL
&& thread
->kernel_stack
)
1574 if (!(thread
->state
& TH_IDLE
)) {
1576 thread_lock(thread
);
1578 if (!(thread
->state
& TH_WAIT
)) {
1582 if (thread
->reason
& AST_QUANTUM
)
1583 thread_setrun(thread
, SCHED_TAILQ
);
1585 if (thread
->reason
& AST_PREEMPT
)
1586 thread_setrun(thread
, SCHED_HEADQ
);
1588 thread_setrun(thread
, SCHED_PREEMPT
| SCHED_TAILQ
);
1590 thread
->reason
= AST_NONE
;
1592 thread_unlock(thread
);
1593 wake_unlock(thread
);
1601 thread
->state
&= ~TH_RUN
;
1603 wake
= thread
->wake_active
;
1604 thread
->wake_active
= FALSE
;
1606 if (thread
->sched_mode
& TH_MODE_TIMESHARE
)
1607 pset_share_decr(thread
->processor_set
);
1608 pset_run_decr(thread
->processor_set
);
1610 thread_unlock(thread
);
1611 wake_unlock(thread
);
1613 if (thread
->options
& TH_OPT_CALLOUT
)
1614 call_thread_block();
1617 thread_wakeup((event_t
)&thread
->wake_active
);
1619 if (thread
->state
& TH_TERMINATE
)
1620 thread_terminate_enqueue(thread
);
1626 * thread_block_reason:
1628 * Forces a reschedule, blocking the caller if a wait
1629 * has been asserted.
1631 * If a continuation is specified, then thread_invoke will
1632 * attempt to discard the thread's kernel stack. When the
1633 * thread resumes, it will execute the continuation function
1634 * on a new kernel stack.
1636 counter(mach_counter_t c_thread_block_calls
= 0;)
1639 thread_block_reason(
1640 thread_continue_t continuation
,
1644 register thread_t self
= current_thread();
1645 register processor_t processor
;
1646 register thread_t new_thread
;
1649 counter(++c_thread_block_calls
);
1656 extern void db_chkpmgr(void);
1657 db_chkpmgr(); /* (BRINGUP) See if pm config changed */
1663 if (!(reason
& AST_PREEMPT
))
1664 funnel_release_check(self
, 2);
1666 processor
= current_processor();
1669 * Delay switching to the idle thread under certain conditions.
1671 if (s
!= FALSE
&& (self
->state
& (TH_IDLE
|TH_TERMINATE
|TH_WAIT
)) == TH_WAIT
) {
1672 if ( processor
->processor_set
->processor_count
> 1 &&
1673 processor
->processor_set
->runq
.count
== 0 &&
1674 processor
->runq
.count
== 0 )
1675 processor
= delay_idle(processor
, self
);
1678 /* If we're explicitly yielding, force a subsequent quantum */
1679 if (reason
& AST_YIELD
)
1680 processor
->timeslice
= 0;
1682 /* We're handling all scheduling AST's */
1683 ast_off(AST_SCHEDULING
);
1685 self
->continuation
= continuation
;
1686 self
->parameter
= parameter
;
1689 new_thread
= thread_select(processor
);
1690 assert(new_thread
&& thread_runnable(new_thread
));
1691 thread_unlock(self
);
1692 while (!thread_invoke(self
, new_thread
, reason
)) {
1694 new_thread
= thread_select(processor
);
1695 assert(new_thread
&& thread_runnable(new_thread
));
1696 thread_unlock(self
);
1699 funnel_refunnel_check(self
, 5);
1702 return (self
->wait_result
);
1708 * Block the current thread if a wait has been asserted.
1712 thread_continue_t continuation
)
1714 return thread_block_reason(continuation
, NULL
, AST_NONE
);
1718 thread_block_parameter(
1719 thread_continue_t continuation
,
1722 return thread_block_reason(continuation
, parameter
, AST_NONE
);
1728 * Switch directly from the current thread to the
1729 * new thread, handing off our quantum if appropriate.
1731 * New thread must be runnable, and not on a run queue.
1733 * Called at splsched.
1738 thread_continue_t continuation
,
1740 thread_t new_thread
)
1742 ast_t handoff
= AST_HANDOFF
;
1744 funnel_release_check(self
, 3);
1746 self
->continuation
= continuation
;
1747 self
->parameter
= parameter
;
1749 while (!thread_invoke(self
, new_thread
, handoff
)) {
1750 register processor_t processor
= current_processor();
1753 new_thread
= thread_select(processor
);
1754 thread_unlock(self
);
1758 funnel_refunnel_check(self
, 6);
1760 return (self
->wait_result
);
1766 * Called at splsched when a thread first receives
1767 * a new stack after a continuation.
1771 register thread_t old_thread
)
1773 register thread_t self
= current_thread();
1774 register thread_continue_t continuation
;
1775 register void *parameter
;
1777 continuation
= self
->continuation
;
1778 self
->continuation
= NULL
;
1779 parameter
= self
->parameter
;
1780 self
->parameter
= NULL
;
1782 thread_begin(self
, self
->last_processor
);
1784 if (old_thread
!= THREAD_NULL
)
1785 thread_dispatch(old_thread
);
1787 funnel_refunnel_check(self
, 4);
1789 if (old_thread
!= THREAD_NULL
)
1792 call_continuation(continuation
, parameter
, self
->wait_result
);
1797 * Enqueue thread on run queue. Thread must be locked,
1798 * and not already be on a run queue. Returns TRUE
1799 * if a preemption is indicated based on the state
1802 * Run queue must be locked, see run_queue_remove()
1807 register run_queue_t rq
,
1808 register thread_t thread
,
1811 register int whichq
= thread
->sched_pri
;
1812 register queue_t queue
= &rq
->queues
[whichq
];
1813 boolean_t result
= FALSE
;
1815 assert(whichq
>= MINPRI
&& whichq
<= MAXPRI
);
1817 assert(thread
->runq
== RUN_QUEUE_NULL
);
1818 if (queue_empty(queue
)) {
1819 enqueue_tail(queue
, (queue_entry_t
)thread
);
1821 setbit(MAXPRI
- whichq
, rq
->bitmap
);
1822 if (whichq
> rq
->highq
) {
1828 if (options
& SCHED_HEADQ
)
1829 enqueue_head(queue
, (queue_entry_t
)thread
);
1831 enqueue_tail(queue
, (queue_entry_t
)thread
);
1834 if (thread
->sched_mode
& TH_MODE_PREEMPT
)
1842 * Enqueue a thread for realtime execution, similar
1843 * to above. Handles preemption directly.
1846 realtime_schedule_insert(
1847 register processor_set_t pset
,
1848 register thread_t thread
)
1850 register run_queue_t rq
= &pset
->runq
;
1851 register int whichq
= thread
->sched_pri
;
1852 register queue_t queue
= &rq
->queues
[whichq
];
1853 uint64_t deadline
= thread
->realtime
.deadline
;
1854 boolean_t try_preempt
= FALSE
;
1856 assert(whichq
>= BASEPRI_REALTIME
&& whichq
<= MAXPRI
);
1858 assert(thread
->runq
== RUN_QUEUE_NULL
);
1859 if (queue_empty(queue
)) {
1860 enqueue_tail(queue
, (queue_entry_t
)thread
);
1862 setbit(MAXPRI
- whichq
, rq
->bitmap
);
1863 if (whichq
> rq
->highq
)
1868 register thread_t entry
= (thread_t
)queue_first(queue
);
1871 if ( queue_end(queue
, (queue_entry_t
)entry
) ||
1872 deadline
< entry
->realtime
.deadline
) {
1873 entry
= (thread_t
)queue_prev((queue_entry_t
)entry
);
1877 entry
= (thread_t
)queue_next((queue_entry_t
)entry
);
1880 if ((queue_entry_t
)entry
== queue
)
1883 insque((queue_entry_t
)thread
, (queue_entry_t
)entry
);
1887 assert(thread
->sched_mode
& TH_MODE_PREEMPT
);
1888 rq
->count
++; rq
->urgency
++;
1891 register processor_t processor
;
1893 processor
= current_processor();
1894 if ( pset
== processor
->processor_set
&&
1895 (thread
->sched_pri
> processor
->current_pri
||
1896 deadline
< processor
->deadline
) ) {
1897 dispatch_counts
.realtime_self
++;
1898 simple_unlock(&pset
->sched_lock
);
1900 ast_on(AST_PREEMPT
| AST_URGENT
);
1904 if ( pset
->processor_count
> 1 ||
1905 pset
!= processor
->processor_set
) {
1906 processor_t myprocessor
, lastprocessor
;
1909 myprocessor
= processor
;
1910 processor
= thread
->last_processor
;
1911 if ( processor
!= myprocessor
&&
1912 processor
!= PROCESSOR_NULL
&&
1913 processor
->processor_set
== pset
&&
1914 processor
->state
== PROCESSOR_RUNNING
&&
1915 (thread
->sched_pri
> processor
->current_pri
||
1916 deadline
< processor
->deadline
) ) {
1917 dispatch_counts
.realtime_last
++;
1918 cause_ast_check(processor
);
1919 simple_unlock(&pset
->sched_lock
);
1923 lastprocessor
= processor
;
1924 queue
= &pset
->active_queue
;
1925 processor
= (processor_t
)queue_first(queue
);
1926 while (!queue_end(queue
, (queue_entry_t
)processor
)) {
1927 next
= queue_next((queue_entry_t
)processor
);
1929 if ( processor
!= myprocessor
&&
1930 processor
!= lastprocessor
&&
1931 (thread
->sched_pri
> processor
->current_pri
||
1932 deadline
< processor
->deadline
) ) {
1933 if (!queue_end(queue
, next
)) {
1934 remqueue(queue
, (queue_entry_t
)processor
);
1935 enqueue_tail(queue
, (queue_entry_t
)processor
);
1937 dispatch_counts
.realtime_other
++;
1938 cause_ast_check(processor
);
1939 simple_unlock(&pset
->sched_lock
);
1943 processor
= (processor_t
)next
;
1948 simple_unlock(&pset
->sched_lock
);
1954 * Dispatch thread for execution, directly onto an idle
1955 * processor if possible. Else put on appropriate run
1956 * queue. (local if bound, else processor set)
1958 * Thread must be locked.
1962 register thread_t new_thread
,
1965 register processor_t processor
;
1966 register processor_set_t pset
;
1967 register thread_t thread
;
1968 ast_t preempt
= (options
& SCHED_PREEMPT
)?
1969 AST_PREEMPT
: AST_NONE
;
1971 assert(thread_runnable(new_thread
));
1974 * Update priority if needed.
1976 if (new_thread
->sched_stamp
!= sched_tick
)
1977 update_priority(new_thread
);
1980 * Check for urgent preemption.
1982 if (new_thread
->sched_mode
& TH_MODE_PREEMPT
)
1983 preempt
= (AST_PREEMPT
| AST_URGENT
);
1985 assert(new_thread
->runq
== RUN_QUEUE_NULL
);
1987 if ((processor
= new_thread
->bound_processor
) == PROCESSOR_NULL
) {
1989 * First try to dispatch on
1990 * the last processor.
1992 pset
= new_thread
->processor_set
;
1993 processor
= new_thread
->last_processor
;
1994 if ( pset
->processor_count
> 1 &&
1995 processor
!= PROCESSOR_NULL
&&
1996 processor
->state
== PROCESSOR_IDLE
) {
1997 processor_lock(processor
);
1998 simple_lock(&pset
->sched_lock
);
1999 if ( processor
->processor_set
== pset
&&
2000 processor
->state
== PROCESSOR_IDLE
) {
2001 remqueue(&pset
->idle_queue
, (queue_entry_t
)processor
);
2003 processor
->next_thread
= new_thread
;
2004 if (new_thread
->sched_pri
>= BASEPRI_RTQUEUES
)
2005 processor
->deadline
= new_thread
->realtime
.deadline
;
2007 processor
->deadline
= UINT64_MAX
;
2008 processor
->state
= PROCESSOR_DISPATCHING
;
2009 dispatch_counts
.idle_pset_last
++;
2010 simple_unlock(&pset
->sched_lock
);
2011 processor_unlock(processor
);
2012 if (processor
!= current_processor())
2013 machine_signal_idle(processor
);
2016 processor_unlock(processor
);
2019 simple_lock(&pset
->sched_lock
);
2022 * Next pick any idle processor
2023 * in the processor set.
2025 if (pset
->idle_count
> 0) {
2026 processor
= (processor_t
)dequeue_head(&pset
->idle_queue
);
2028 processor
->next_thread
= new_thread
;
2029 if (new_thread
->sched_pri
>= BASEPRI_RTQUEUES
)
2030 processor
->deadline
= new_thread
->realtime
.deadline
;
2032 processor
->deadline
= UINT64_MAX
;
2033 processor
->state
= PROCESSOR_DISPATCHING
;
2034 dispatch_counts
.idle_pset_any
++;
2035 simple_unlock(&pset
->sched_lock
);
2036 if (processor
!= current_processor())
2037 machine_signal_idle(processor
);
2041 if (new_thread
->sched_pri
>= BASEPRI_RTQUEUES
)
2042 realtime_schedule_insert(pset
, new_thread
);
2044 if (!run_queue_enqueue(&pset
->runq
, new_thread
, options
))
2048 * Update the timesharing quanta.
2050 timeshare_quanta_update(pset
);
2055 if (preempt
!= AST_NONE
) {
2057 * First try the current processor
2058 * if it is a member of the correct
2061 processor
= current_processor();
2062 thread
= processor
->active_thread
;
2063 if ( pset
== processor
->processor_set
&&
2064 csw_needed(thread
, processor
) ) {
2065 dispatch_counts
.pset_self
++;
2066 simple_unlock(&pset
->sched_lock
);
2073 * If that failed and we have other
2074 * processors available keep trying.
2076 if ( pset
->processor_count
> 1 ||
2077 pset
!= processor
->processor_set
) {
2078 queue_t queue
= &pset
->active_queue
;
2079 processor_t myprocessor
, lastprocessor
;
2083 * Next try the last processor
2086 myprocessor
= processor
;
2087 processor
= new_thread
->last_processor
;
2088 if ( processor
!= myprocessor
&&
2089 processor
!= PROCESSOR_NULL
&&
2090 processor
->processor_set
== pset
&&
2091 processor
->state
== PROCESSOR_RUNNING
&&
2092 new_thread
->sched_pri
> processor
->current_pri
) {
2093 dispatch_counts
.pset_last
++;
2094 cause_ast_check(processor
);
2095 simple_unlock(&pset
->sched_lock
);
2100 * Lastly, pick any other
2101 * available processor.
2103 lastprocessor
= processor
;
2104 processor
= (processor_t
)queue_first(queue
);
2105 while (!queue_end(queue
, (queue_entry_t
)processor
)) {
2106 next
= queue_next((queue_entry_t
)processor
);
2108 if ( processor
!= myprocessor
&&
2109 processor
!= lastprocessor
&&
2110 new_thread
->sched_pri
>
2111 processor
->current_pri
) {
2112 if (!queue_end(queue
, next
)) {
2113 remqueue(queue
, (queue_entry_t
)processor
);
2114 enqueue_tail(queue
, (queue_entry_t
)processor
);
2116 dispatch_counts
.pset_other
++;
2117 cause_ast_check(processor
);
2118 simple_unlock(&pset
->sched_lock
);
2122 processor
= (processor_t
)next
;
2127 simple_unlock(&pset
->sched_lock
);
2132 * Bound, can only run on bound processor. Have to lock
2133 * processor here because it may not be the current one.
2135 processor_lock(processor
);
2136 pset
= processor
->processor_set
;
2137 if (pset
!= PROCESSOR_SET_NULL
) {
2138 simple_lock(&pset
->sched_lock
);
2139 if (processor
->state
== PROCESSOR_IDLE
) {
2140 remqueue(&pset
->idle_queue
, (queue_entry_t
)processor
);
2142 processor
->next_thread
= new_thread
;
2143 processor
->deadline
= UINT64_MAX
;
2144 processor
->state
= PROCESSOR_DISPATCHING
;
2145 dispatch_counts
.idle_bound
++;
2146 simple_unlock(&pset
->sched_lock
);
2147 processor_unlock(processor
);
2148 if (processor
!= current_processor())
2149 machine_signal_idle(processor
);
2154 if (!run_queue_enqueue(&processor
->runq
, new_thread
, options
))
2157 if (preempt
!= AST_NONE
) {
2158 if (processor
== current_processor()) {
2159 thread
= processor
->active_thread
;
2160 if (csw_needed(thread
, processor
)) {
2161 dispatch_counts
.bound_self
++;
2166 if ( processor
->state
== PROCESSOR_RUNNING
&&
2167 new_thread
->sched_pri
> processor
->current_pri
) {
2168 dispatch_counts
.bound_other
++;
2169 cause_ast_check(processor
);
2173 if (pset
!= PROCESSOR_SET_NULL
)
2174 simple_unlock(&pset
->sched_lock
);
2176 processor_unlock(processor
);
2181 * Check for a possible preemption point in
2182 * the (current) thread.
2184 * Called at splsched.
2189 processor_t processor
)
2191 int current_pri
= thread
->sched_pri
;
2192 ast_t result
= AST_NONE
;
2195 if (first_timeslice(processor
)) {
2196 runq
= &processor
->processor_set
->runq
;
2197 if (runq
->highq
>= BASEPRI_RTQUEUES
)
2198 return (AST_PREEMPT
| AST_URGENT
);
2200 if (runq
->highq
> current_pri
) {
2201 if (runq
->urgency
> 0)
2202 return (AST_PREEMPT
| AST_URGENT
);
2204 result
|= AST_PREEMPT
;
2207 runq
= &processor
->runq
;
2208 if (runq
->highq
> current_pri
) {
2209 if (runq
->urgency
> 0)
2210 return (AST_PREEMPT
| AST_URGENT
);
2212 result
|= AST_PREEMPT
;
2216 runq
= &processor
->processor_set
->runq
;
2217 if (runq
->highq
>= current_pri
) {
2218 if (runq
->urgency
> 0)
2219 return (AST_PREEMPT
| AST_URGENT
);
2221 result
|= AST_PREEMPT
;
2224 runq
= &processor
->runq
;
2225 if (runq
->highq
>= current_pri
) {
2226 if (runq
->urgency
> 0)
2227 return (AST_PREEMPT
| AST_URGENT
);
2229 result
|= AST_PREEMPT
;
2233 if (result
!= AST_NONE
)
2236 if (thread
->state
& TH_SUSP
)
2237 result
|= AST_PREEMPT
;
2245 * Set the scheduled priority of the specified thread.
2247 * This may cause the thread to change queues.
2249 * Thread must be locked.
2256 register struct run_queue
*rq
= run_queue_remove(thread
);
2258 if ( !(thread
->sched_mode
& TH_MODE_TIMESHARE
) &&
2259 (priority
>= BASEPRI_PREEMPT
||
2260 (thread
->task_priority
< MINPRI_KERNEL
&&
2261 thread
->task_priority
>= BASEPRI_BACKGROUND
&&
2262 priority
> thread
->task_priority
) ) )
2263 thread
->sched_mode
|= TH_MODE_PREEMPT
;
2265 thread
->sched_mode
&= ~TH_MODE_PREEMPT
;
2267 thread
->sched_pri
= priority
;
2268 if (rq
!= RUN_QUEUE_NULL
)
2269 thread_setrun(thread
, SCHED_PREEMPT
| SCHED_TAILQ
);
2271 if (thread
->state
& TH_RUN
) {
2272 processor_t processor
= thread
->last_processor
;
2274 if (thread
== current_thread()) {
2275 ast_t preempt
= csw_check(thread
, processor
);
2277 if (preempt
!= AST_NONE
)
2279 processor
->current_pri
= priority
;
2282 if ( processor
!= PROCESSOR_NULL
&&
2283 processor
->active_thread
== thread
)
2284 cause_ast_check(processor
);
2298 if (rq
!= thread
->runq
)
2299 panic("run_queue_check: thread runq");
2301 if (thread
->sched_pri
> MAXPRI
|| thread
->sched_pri
< MINPRI
)
2302 panic("run_queue_check: thread sched_pri");
2304 q
= &rq
->queues
[thread
->sched_pri
];
2305 qe
= queue_first(q
);
2306 while (!queue_end(q
, qe
)) {
2307 if (qe
== (queue_entry_t
)thread
)
2310 qe
= queue_next(qe
);
2313 panic("run_queue_check: end");
2321 * Remove a thread from its current run queue and
2322 * return the run queue if successful.
2324 * Thread must be locked.
2330 register run_queue_t rq
= thread
->runq
;
2333 * If rq is RUN_QUEUE_NULL, the thread will stay out of the
2334 * run queues because the caller locked the thread. Otherwise
2335 * the thread is on a run queue, but could be chosen for dispatch
2338 if (rq
!= RUN_QUEUE_NULL
) {
2339 processor_set_t pset
= thread
->processor_set
;
2340 processor_t processor
= thread
->bound_processor
;
2343 * The run queues are locked by the pset scheduling
2344 * lock, except when a processor is off-line the
2345 * local run queue is locked by the processor lock.
2347 if (processor
!= PROCESSOR_NULL
) {
2348 processor_lock(processor
);
2349 pset
= processor
->processor_set
;
2352 if (pset
!= PROCESSOR_SET_NULL
)
2353 simple_lock(&pset
->sched_lock
);
2355 if (rq
== thread
->runq
) {
2357 * Thread is on a run queue and we have a lock on
2360 remqueue(&rq
->queues
[0], (queue_entry_t
)thread
);
2362 if (thread
->sched_mode
& TH_MODE_PREEMPT
)
2364 assert(rq
->urgency
>= 0);
2366 if (queue_empty(rq
->queues
+ thread
->sched_pri
)) {
2367 /* update run queue status */
2368 if (thread
->sched_pri
!= IDLEPRI
)
2369 clrbit(MAXPRI
- thread
->sched_pri
, rq
->bitmap
);
2370 rq
->highq
= MAXPRI
- ffsbit(rq
->bitmap
);
2373 thread
->runq
= RUN_QUEUE_NULL
;
2377 * The thread left the run queue before we could
2378 * lock the run queue.
2380 assert(thread
->runq
== RUN_QUEUE_NULL
);
2381 rq
= RUN_QUEUE_NULL
;
2384 if (pset
!= PROCESSOR_SET_NULL
)
2385 simple_unlock(&pset
->sched_lock
);
2387 if (processor
!= PROCESSOR_NULL
)
2388 processor_unlock(processor
);
2397 * Remove a thread to execute from the run queues
2400 * Called with pset scheduling lock held.
2404 processor_set_t pset
,
2405 processor_t processor
)
2407 register run_queue_t runq
;
2408 register thread_t thread
;
2411 runq
= &processor
->runq
;
2413 if (runq
->count
> 0 && runq
->highq
>= pset
->runq
.highq
) {
2414 q
= runq
->queues
+ runq
->highq
;
2416 thread
= (thread_t
)q
->next
;
2417 ((queue_entry_t
)thread
)->next
->prev
= q
;
2418 q
->next
= ((queue_entry_t
)thread
)->next
;
2419 thread
->runq
= RUN_QUEUE_NULL
;
2421 if (thread
->sched_mode
& TH_MODE_PREEMPT
)
2423 assert(runq
->urgency
>= 0);
2424 if (queue_empty(q
)) {
2425 if (runq
->highq
!= IDLEPRI
)
2426 clrbit(MAXPRI
- runq
->highq
, runq
->bitmap
);
2427 runq
->highq
= MAXPRI
- ffsbit(runq
->bitmap
);
2430 processor
->deadline
= UINT64_MAX
;
2437 assert(runq
->count
> 0);
2438 q
= runq
->queues
+ runq
->highq
;
2440 thread
= (thread_t
)q
->next
;
2441 ((queue_entry_t
)thread
)->next
->prev
= q
;
2442 q
->next
= ((queue_entry_t
)thread
)->next
;
2443 thread
->runq
= RUN_QUEUE_NULL
;
2445 if (runq
->highq
>= BASEPRI_RTQUEUES
)
2446 processor
->deadline
= thread
->realtime
.deadline
;
2448 processor
->deadline
= UINT64_MAX
;
2449 if (thread
->sched_mode
& TH_MODE_PREEMPT
)
2451 assert(runq
->urgency
>= 0);
2452 if (queue_empty(q
)) {
2453 if (runq
->highq
!= IDLEPRI
)
2454 clrbit(MAXPRI
- runq
->highq
, runq
->bitmap
);
2455 runq
->highq
= MAXPRI
- ffsbit(runq
->bitmap
);
2458 timeshare_quanta_update(pset
);
2465 processor_t processor
,
2468 int *gcount
, *lcount
;
2469 uint64_t abstime
, spin
, limit
;
2471 lcount
= &processor
->runq
.count
;
2472 gcount
= &processor
->processor_set
->runq
.count
;
2474 abstime
= mach_absolute_time();
2475 limit
= abstime
+ delay_idle_limit
;
2476 spin
= abstime
+ delay_idle_spin
;
2478 timer_event((uint32_t)abstime
, &processor
->idle_thread
->system_timer
);
2480 self
->options
|= TH_OPT_DELAYIDLE
;
2482 while ( *gcount
== 0 && *lcount
== 0 &&
2483 (self
->state
& TH_WAIT
) != 0 &&
2485 if (abstime
>= spin
) {
2489 processor
= current_processor();
2490 lcount
= &processor
->runq
.count
;
2491 gcount
= &processor
->processor_set
->runq
.count
;
2493 abstime
= mach_absolute_time();
2494 spin
= abstime
+ delay_idle_spin
;
2496 timer_event((uint32_t)abstime
, &processor
->idle_thread
->system_timer
);
2500 abstime
= mach_absolute_time();
2504 timer_event((uint32_t)abstime
, &self
->system_timer
);
2506 self
->options
&= ~TH_OPT_DELAYIDLE
;
2512 * no_dispatch_count counts number of times processors go non-idle
2513 * without being dispatched. This should be very rare.
2515 int no_dispatch_count
= 0;
2518 * This is the idle processor thread, which just looks for other threads
2524 register processor_t processor
;
2525 register thread_t
*threadp
;
2526 register int *gcount
;
2527 register int *lcount
;
2528 register thread_t new_thread
;
2530 register processor_set_t pset
;
2531 ast_t
*myast
= ast_pending();
2533 processor
= current_processor();
2535 threadp
= &processor
->next_thread
;
2536 lcount
= &processor
->runq
.count
;
2537 gcount
= &processor
->processor_set
->runq
.count
;
2540 (void)splsched(); /* Turn interruptions off */
2542 pmsDown(); /* Step power down. Note: interruptions must be disabled for this call */
2544 while ( (*threadp
== THREAD_NULL
) &&
2545 (*gcount
== 0) && (*lcount
== 0) ) {
2547 /* check for ASTs while we wait */
2548 if (*myast
&~ (AST_SCHEDULING
| AST_BSD
)) {
2549 /* no ASTs for us */
2560 * This is not a switch statement to avoid the
2561 * bounds checking code in the common case.
2563 pset
= processor
->processor_set
;
2564 simple_lock(&pset
->sched_lock
);
2566 pmsStep(0); /* Step up out of idle power, may start timer for next step */
2568 state
= processor
->state
;
2569 if (state
== PROCESSOR_DISPATCHING
) {
2571 * Commmon case -- cpu dispatched.
2573 new_thread
= *threadp
;
2574 *threadp
= (volatile thread_t
) THREAD_NULL
;
2575 processor
->state
= PROCESSOR_RUNNING
;
2576 enqueue_tail(&pset
->active_queue
, (queue_entry_t
)processor
);
2578 if ( pset
->runq
.highq
>= BASEPRI_RTQUEUES
&&
2579 new_thread
->sched_pri
>= BASEPRI_RTQUEUES
) {
2580 register run_queue_t runq
= &pset
->runq
;
2583 q
= runq
->queues
+ runq
->highq
;
2584 if (((thread_t
)q
->next
)->realtime
.deadline
<
2585 processor
->deadline
) {
2586 thread_t thread
= new_thread
;
2588 new_thread
= (thread_t
)q
->next
;
2589 ((queue_entry_t
)new_thread
)->next
->prev
= q
;
2590 q
->next
= ((queue_entry_t
)new_thread
)->next
;
2591 new_thread
->runq
= RUN_QUEUE_NULL
;
2592 processor
->deadline
= new_thread
->realtime
.deadline
;
2593 assert(new_thread
->sched_mode
& TH_MODE_PREEMPT
);
2594 runq
->count
--; runq
->urgency
--;
2595 if (queue_empty(q
)) {
2596 if (runq
->highq
!= IDLEPRI
)
2597 clrbit(MAXPRI
- runq
->highq
, runq
->bitmap
);
2598 runq
->highq
= MAXPRI
- ffsbit(runq
->bitmap
);
2600 dispatch_counts
.missed_realtime
++;
2601 simple_unlock(&pset
->sched_lock
);
2603 thread_lock(thread
);
2604 thread_setrun(thread
, SCHED_HEADQ
);
2605 thread_unlock(thread
);
2607 counter(c_idle_thread_handoff
++);
2608 thread_run(processor
->idle_thread
, (thread_continue_t
)idle_thread
, NULL
, new_thread
);
2611 simple_unlock(&pset
->sched_lock
);
2613 counter(c_idle_thread_handoff
++);
2614 thread_run(processor
->idle_thread
, (thread_continue_t
)idle_thread
, NULL
, new_thread
);
2618 if ( processor
->runq
.highq
> new_thread
->sched_pri
||
2619 pset
->runq
.highq
> new_thread
->sched_pri
) {
2620 thread_t thread
= new_thread
;
2622 new_thread
= choose_thread(pset
, processor
);
2623 dispatch_counts
.missed_other
++;
2624 simple_unlock(&pset
->sched_lock
);
2626 thread_lock(thread
);
2627 thread_setrun(thread
, SCHED_HEADQ
);
2628 thread_unlock(thread
);
2630 counter(c_idle_thread_handoff
++);
2631 thread_run(processor
->idle_thread
, (thread_continue_t
)idle_thread
, NULL
, new_thread
);
2635 simple_unlock(&pset
->sched_lock
);
2637 counter(c_idle_thread_handoff
++);
2638 thread_run(processor
->idle_thread
, (thread_continue_t
)idle_thread
, NULL
, new_thread
);
2643 if (state
== PROCESSOR_IDLE
) {
2645 * Processor was not dispatched (Rare).
2646 * Set it running again and force a
2649 no_dispatch_count
++;
2651 remqueue(&pset
->idle_queue
, (queue_entry_t
)processor
);
2652 processor
->state
= PROCESSOR_RUNNING
;
2653 enqueue_tail(&pset
->active_queue
, (queue_entry_t
)processor
);
2654 simple_unlock(&pset
->sched_lock
);
2656 counter(c_idle_thread_block
++);
2657 thread_block((thread_continue_t
)idle_thread
);
2661 if (state
== PROCESSOR_SHUTDOWN
) {
2663 * Going off-line. Force a
2666 if ((new_thread
= (thread_t
)*threadp
) != THREAD_NULL
) {
2667 *threadp
= (volatile thread_t
) THREAD_NULL
;
2668 processor
->deadline
= UINT64_MAX
;
2669 simple_unlock(&pset
->sched_lock
);
2671 thread_lock(new_thread
);
2672 thread_setrun(new_thread
, SCHED_HEADQ
);
2673 thread_unlock(new_thread
);
2676 simple_unlock(&pset
->sched_lock
);
2678 counter(c_idle_thread_block
++);
2679 thread_block((thread_continue_t
)idle_thread
);
2683 simple_unlock(&pset
->sched_lock
);
2685 panic("idle_thread: state %d\n", processor
->state
);
2691 processor_t processor
)
2693 kern_return_t result
;
2697 result
= kernel_thread_create((thread_continue_t
)idle_thread
, NULL
, MAXPRI_KERNEL
, &thread
);
2698 if (result
!= KERN_SUCCESS
)
2702 thread_lock(thread
);
2703 thread
->bound_processor
= processor
;
2704 processor
->idle_thread
= thread
;
2705 thread
->sched_pri
= thread
->priority
= IDLEPRI
;
2706 thread
->state
= (TH_RUN
| TH_IDLE
);
2707 thread_unlock(thread
);
2710 thread_deallocate(thread
);
2712 return (KERN_SUCCESS
);
2715 static uint64_t sched_tick_deadline
;
2720 * Kicks off scheduler services.
2722 * Called at splsched.
2727 kern_return_t result
;
2730 result
= kernel_thread_start_priority((thread_continue_t
)sched_tick_thread
, NULL
, MAXPRI_KERNEL
, &thread
);
2731 if (result
!= KERN_SUCCESS
)
2732 panic("sched_startup");
2734 thread_deallocate(thread
);
2737 * Yield to the sched_tick_thread while it times
2738 * a series of context switches back. It stores
2739 * the baseline value in sched_cswtime.
2741 * The current thread is the only other thread
2742 * active at this point.
2744 while (sched_cswtime
== 0)
2745 thread_block(THREAD_CONTINUE_NULL
);
2747 thread_daemon_init();
2749 thread_call_initialize();
2753 * sched_tick_thread:
2755 * Perform periodic bookkeeping functions about ten
2759 sched_tick_continue(void)
2761 uint64_t abstime
= mach_absolute_time();
2766 * Compute various averages.
2771 * Scan the run queues for threads which
2772 * may need to be updated.
2774 thread_update_scan();
2776 clock_deadline_for_periodic_event(sched_tick_interval
, abstime
,
2777 &sched_tick_deadline
);
2779 assert_wait_deadline((event_t
)sched_tick_thread
, THREAD_UNINT
, sched_tick_deadline
);
2780 thread_block((thread_continue_t
)sched_tick_continue
);
2785 * Time a series of context switches to determine
2786 * a baseline. Toss the high and low and return
2787 * the one-way value.
2792 uint32_t new, hi
, low
, accum
;
2796 accum
= hi
= low
= 0;
2797 for (i
= 0; i
< tries
; ++i
) {
2798 abstime
= mach_absolute_time();
2799 thread_block(THREAD_CONTINUE_NULL
);
2801 new = mach_absolute_time() - abstime
;
2804 accum
= hi
= low
= new;
2815 return ((accum
- hi
- low
) / (2 * (tries
- 2)));
2819 sched_tick_thread(void)
2821 sched_cswtime
= time_cswitch();
2823 sched_tick_deadline
= mach_absolute_time();
2825 sched_tick_continue();
2830 * thread_update_scan / runq_scan:
2832 * Scan the run queues to account for timesharing threads
2833 * which need to be updated.
2835 * Scanner runs in two passes. Pass one squirrels likely
2836 * threads away in an array, pass two does the update.
2838 * This is necessary because the run queue is locked for
2839 * the candidate scan, but the thread is locked for the update.
2841 * Array should be sized to make forward progress, without
2842 * disabling preemption for long periods.
2845 #define THREAD_UPDATE_SIZE 128
2847 static thread_t thread_update_array
[THREAD_UPDATE_SIZE
];
2848 static int thread_update_count
= 0;
2851 * Scan a runq for candidate threads.
2853 * Returns TRUE if retry is needed.
2861 register thread_t thread
;
2863 if ((count
= runq
->count
) > 0) {
2864 q
= runq
->queues
+ runq
->highq
;
2866 queue_iterate(q
, thread
, thread_t
, links
) {
2867 if ( thread
->sched_stamp
!= sched_tick
&&
2868 (thread
->sched_mode
& TH_MODE_TIMESHARE
) ) {
2869 if (thread_update_count
== THREAD_UPDATE_SIZE
)
2872 thread_update_array
[thread_update_count
++] = thread
;
2873 thread_reference_internal(thread
);
2887 thread_update_scan(void)
2889 register boolean_t restart_needed
;
2890 register processor_set_t pset
= &default_pset
;
2891 register processor_t processor
;
2892 register thread_t thread
;
2897 simple_lock(&pset
->sched_lock
);
2898 restart_needed
= runq_scan(&pset
->runq
);
2899 simple_unlock(&pset
->sched_lock
);
2901 if (!restart_needed
) {
2902 simple_lock(&pset
->sched_lock
);
2903 processor
= (processor_t
)queue_first(&pset
->processors
);
2904 while (!queue_end(&pset
->processors
, (queue_entry_t
)processor
)) {
2905 if ((restart_needed
= runq_scan(&processor
->runq
)) != 0)
2908 thread
= processor
->idle_thread
;
2909 if (thread
->sched_stamp
!= sched_tick
) {
2910 if (thread_update_count
== THREAD_UPDATE_SIZE
) {
2911 restart_needed
= TRUE
;
2915 thread_update_array
[thread_update_count
++] = thread
;
2916 thread_reference_internal(thread
);
2919 processor
= (processor_t
)queue_next(&processor
->processors
);
2921 simple_unlock(&pset
->sched_lock
);
2926 * Ok, we now have a collection of candidates -- fix them.
2928 while (thread_update_count
> 0) {
2929 thread
= thread_update_array
[--thread_update_count
];
2930 thread_update_array
[thread_update_count
] = THREAD_NULL
;
2933 thread_lock(thread
);
2934 if ( !(thread
->state
& (TH_WAIT
|TH_SUSP
)) &&
2935 thread
->sched_stamp
!= sched_tick
)
2936 update_priority(thread
);
2937 thread_unlock(thread
);
2940 thread_deallocate(thread
);
2942 } while (restart_needed
);
2946 * Just in case someone doesn't use the macro
2948 #undef thread_wakeup
2957 thread_wakeup_with_result(x
, THREAD_AWAKENED
);
2961 preemption_enabled(void)
2963 return (get_preemption_level() == 0 && ml_get_interrupts_enabled());
2971 return ((thread
->state
& (TH_RUN
|TH_WAIT
)) == TH_RUN
);
2976 #include <ddb/db_output.h>
2977 #define printf kdbprintf
2978 void db_sched(void);
2983 iprintf("Scheduling Statistics:\n");
2985 iprintf("Thread invocations: csw %d same %d\n",
2986 c_thread_invoke_csw
, c_thread_invoke_same
);
2988 iprintf("Thread block: calls %d\n",
2989 c_thread_block_calls
);
2990 iprintf("Idle thread:\n\thandoff %d block %d no_dispatch %d\n",
2991 c_idle_thread_handoff
,
2992 c_idle_thread_block
, no_dispatch_count
);
2993 iprintf("Sched thread blocks: %d\n", c_sched_thread_block
);
2994 #endif /* MACH_COUNTERS */
2998 #include <ddb/db_output.h>
2999 void db_show_thread_log(void);
3002 db_show_thread_log(void)
3005 #endif /* MACH_KDB */