2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
26 * @OSF_FREE_COPYRIGHT@
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
31 * All Rights Reserved.
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
43 * Carnegie Mellon requests users of this software to return to
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
57 * Author: Avadis Tevanian, Jr.
60 * Scheduling primitives
67 #include <simple_clock.h>
68 #include <power_save.h>
69 #include <task_swapper.h>
71 #include <ddb/db_output.h>
72 #include <mach/machine.h>
73 #include <machine/machine_routines.h>
74 #include <machine/sched_param.h>
76 #include <kern/clock.h>
77 #include <kern/counters.h>
78 #include <kern/cpu_number.h>
79 #include <kern/cpu_data.h>
80 #include <kern/etap_macros.h>
81 #include <kern/lock.h>
82 #include <kern/macro_help.h>
83 #include <kern/machine.h>
84 #include <kern/misc_protos.h>
85 #include <kern/processor.h>
86 #include <kern/queue.h>
87 #include <kern/sched.h>
88 #include <kern/sched_prim.h>
89 #include <kern/syscall_subr.h>
90 #include <kern/task.h>
91 #include <kern/thread.h>
92 #include <kern/thread_swap.h>
94 #include <vm/vm_kern.h>
95 #include <vm/vm_map.h>
96 #include <mach/policy.h>
97 #include <mach/sync_policy.h>
98 #include <kern/mk_sp.h> /*** ??? fix so this can be removed ***/
99 #include <sys/kdebug.h>
102 #include <kern/task_swap.h>
103 extern int task_swap_on
;
104 #endif /* TASK_SWAPPER */
108 #define DEFAULT_PREEMPTION_RATE 100 /* (1/s) */
109 int default_preemption_rate
= DEFAULT_PREEMPTION_RATE
;
111 #define MAX_UNSAFE_QUANTA 800
112 int max_unsafe_quanta
= MAX_UNSAFE_QUANTA
;
114 #define MAX_POLL_QUANTA 2
115 int max_poll_quanta
= MAX_POLL_QUANTA
;
117 #define SCHED_POLL_YIELD_SHIFT 4 /* 1/16 */
118 int sched_poll_yield_shift
= SCHED_POLL_YIELD_SHIFT
;
120 uint32_t std_quantum_us
;
126 #endif /* SIMPLE_CLOCK */
129 void wait_queues_init(void);
131 thread_t
choose_pset_thread(
132 processor_t myprocessor
,
133 processor_set_t pset
);
135 thread_t
choose_thread(
136 processor_t myprocessor
);
138 boolean_t
run_queue_enqueue(
143 void do_thread_scan(void);
146 void dump_run_queues(
148 void dump_run_queue_struct(
152 void dump_processor_set(
164 boolean_t
thread_runnable(
173 * states are combinations of:
175 * W waiting (or on wait queue)
176 * N non-interruptible
181 * assert_wait thread_block clear_wait swapout swapin
183 * R RW, RWN R; setrun - -
184 * RN RWN RN; setrun - -
197 * Waiting protocols and implementation:
199 * Each thread may be waiting for exactly one event; this event
200 * is set using assert_wait(). That thread may be awakened either
201 * by performing a thread_wakeup_prim() on its event,
202 * or by directly waking that thread up with clear_wait().
204 * The implementation of wait events uses a hash table. Each
205 * bucket is queue of threads having the same hash function
206 * value; the chain for the queue (linked list) is the run queue
207 * field. [It is not possible to be waiting and runnable at the
210 * Locks on both the thread and on the hash buckets govern the
211 * wait event field and the queue chain field. Because wakeup
212 * operations only have the event as an argument, the event hash
213 * bucket must be locked before any thread.
215 * Scheduling operations may also occur at interrupt level; therefore,
216 * interrupts below splsched() must be prevented when holding
217 * thread or hash bucket locks.
219 * The wait event hash table declarations are as follows:
224 struct wait_queue wait_queues
[NUMQUEUES
];
226 #define wait_hash(event) \
227 ((((int)(event) < 0)? ~(int)(event): (int)(event)) % NUMQUEUES)
233 * Calculate the timeslicing quantum
236 if (default_preemption_rate
< 1)
237 default_preemption_rate
= DEFAULT_PREEMPTION_RATE
;
238 std_quantum_us
= (1000 * 1000) / default_preemption_rate
;
240 printf("standard timeslicing quantum is %d us\n", std_quantum_us
);
243 pset_sys_bootstrap(); /* initialize processor mgmt. */
248 #endif /* SIMPLE_CLOCK */
253 wait_queues_init(void)
257 for (i
= 0; i
< NUMQUEUES
; i
++) {
258 wait_queue_init(&wait_queues
[i
], SYNC_POLICY_FIFO
);
263 * Thread wait timer expiration.
267 timer_call_param_t p0
,
268 timer_call_param_t p1
)
270 thread_t thread
= p0
;
275 if (--thread
->wait_timer_active
== 1) {
276 if (thread
->wait_timer_is_set
) {
277 thread
->wait_timer_is_set
= FALSE
;
280 clear_wait_internal(thread
, THREAD_TIMED_OUT
);
281 thread_unlock(thread
);
285 if (thread
->wait_timer_active
== 0)
286 thread_wakeup_one(&thread
->wait_timer_active
);
294 * Set a timer for the current thread, if the thread
295 * is ready to wait. Must be called between assert_wait()
296 * and thread_block().
301 uint32_t scale_factor
)
303 thread_t thread
= current_thread();
310 if ((thread
->state
& TH_WAIT
) != 0) {
311 clock_interval_to_deadline(interval
, scale_factor
, &deadline
);
312 timer_call_enter(&thread
->wait_timer
, deadline
);
313 assert(!thread
->wait_timer_is_set
);
314 thread
->wait_timer_active
++;
315 thread
->wait_timer_is_set
= TRUE
;
317 thread_unlock(thread
);
323 thread_set_timer_deadline(
326 thread_t thread
= current_thread();
332 if ((thread
->state
& TH_WAIT
) != 0) {
333 timer_call_enter(&thread
->wait_timer
, deadline
);
334 assert(!thread
->wait_timer_is_set
);
335 thread
->wait_timer_active
++;
336 thread
->wait_timer_is_set
= TRUE
;
338 thread_unlock(thread
);
344 thread_cancel_timer(void)
346 thread_t thread
= current_thread();
351 if (thread
->wait_timer_is_set
) {
352 if (timer_call_cancel(&thread
->wait_timer
))
353 thread
->wait_timer_active
--;
354 thread
->wait_timer_is_set
= FALSE
;
361 * Set up thread timeout element when thread is created.
367 extern void thread_depress_expire(
368 timer_call_param_t p0
,
369 timer_call_param_t p1
);
371 timer_call_setup(&thread
->wait_timer
, thread_timer_expire
, thread
);
372 thread
->wait_timer_is_set
= FALSE
;
373 thread
->wait_timer_active
= 1;
375 timer_call_setup(&thread
->depress_timer
, thread_depress_expire
, thread
);
376 thread
->depress_timer_active
= 1;
382 thread_timer_terminate(void)
384 thread_t thread
= current_thread();
390 if (thread
->wait_timer_is_set
) {
391 if (timer_call_cancel(&thread
->wait_timer
))
392 thread
->wait_timer_active
--;
393 thread
->wait_timer_is_set
= FALSE
;
396 thread
->wait_timer_active
--;
398 while (thread
->wait_timer_active
> 0) {
399 res
= assert_wait((event_t
)&thread
->wait_timer_active
, THREAD_UNINT
);
400 assert(res
== THREAD_WAITING
);
404 res
= thread_block(THREAD_CONTINUE_NULL
);
405 assert(res
== THREAD_AWAKENED
);
411 thread
->depress_timer_active
--;
413 while (thread
->depress_timer_active
> 0) {
414 res
= assert_wait((event_t
)&thread
->depress_timer_active
, THREAD_UNINT
);
415 assert(res
== THREAD_WAITING
);
419 res
= thread_block(THREAD_CONTINUE_NULL
);
420 assert(res
== THREAD_AWAKENED
);
429 thread_deallocate(thread
);
433 * Routine: thread_go_locked
435 * Start a thread running.
437 * thread lock held, IPC locks may be held.
438 * thread must have been pulled from wait queue under same lock hold.
440 * KERN_SUCCESS - Thread was set running
441 * KERN_NOT_WAITING - Thread was not waiting
446 wait_result_t result
)
448 assert(thread
->at_safe_point
== FALSE
);
449 assert(thread
->wait_event
== NO_EVENT64
);
450 assert(thread
->wait_queue
== WAIT_QUEUE_NULL
);
452 if ((thread
->state
& (TH_WAIT
|TH_TERMINATE
)) == TH_WAIT
) {
453 thread
->state
&= ~(TH_WAIT
|TH_UNINT
);
454 if (!(thread
->state
& TH_RUN
)) {
455 thread
->state
|= TH_RUN
;
457 if (thread
->active_callout
)
458 call_thread_unblock();
460 if (!(thread
->state
& TH_IDLE
)) {
461 _mk_sp_thread_unblock(thread
);
462 hw_atomic_add(&thread
->processor_set
->run_count
, 1);
466 thread
->wait_result
= result
;
469 return KERN_NOT_WAITING
;
473 * Routine: thread_mark_wait_locked
475 * Mark a thread as waiting. If, given the circumstances,
476 * it doesn't want to wait (i.e. already aborted), then
477 * indicate that in the return value.
479 * at splsched() and thread is locked.
483 thread_mark_wait_locked(
485 wait_interrupt_t interruptible
)
487 wait_result_t wait_result
;
488 boolean_t at_safe_point
;
490 assert(thread
== current_thread());
493 * The thread may have certain types of interrupts/aborts masked
494 * off. Even if the wait location says these types of interrupts
495 * are OK, we have to honor mask settings (outer-scoped code may
496 * not be able to handle aborts at the moment).
498 if (interruptible
> thread
->interrupt_level
)
499 interruptible
= thread
->interrupt_level
;
501 at_safe_point
= (interruptible
== THREAD_ABORTSAFE
);
503 if ((interruptible
== THREAD_UNINT
) ||
504 !(thread
->state
& TH_ABORT
) ||
505 (!at_safe_point
&& (thread
->state
& TH_ABORT_SAFELY
))) {
506 thread
->state
|= (interruptible
) ? TH_WAIT
: (TH_WAIT
| TH_UNINT
);
507 thread
->at_safe_point
= at_safe_point
;
508 thread
->sleep_stamp
= sched_tick
;
509 return (thread
->wait_result
= THREAD_WAITING
);
510 } else if (thread
->state
& TH_ABORT_SAFELY
) {
511 thread
->state
&= ~(TH_ABORT
|TH_ABORT_SAFELY
);
513 return (thread
->wait_result
= THREAD_INTERRUPTED
);
517 * Routine: thread_interrupt_level
519 * Set the maximum interruptible state for the
520 * current thread. The effective value of any
521 * interruptible flag passed into assert_wait
522 * will never exceed this.
524 * Useful for code that must not be interrupted,
525 * but which calls code that doesn't know that.
527 * The old interrupt level for the thread.
531 thread_interrupt_level(
532 wait_interrupt_t new_level
)
534 thread_t thread
= current_thread();
535 wait_interrupt_t result
= thread
->interrupt_level
;
537 thread
->interrupt_level
= new_level
;
542 * Routine: assert_wait_timeout
544 * Assert that the thread intends to block,
545 * waiting for a timeout (no user known event).
547 unsigned int assert_wait_timeout_event
;
551 mach_msg_timeout_t msecs
,
552 wait_interrupt_t interruptible
)
556 res
= assert_wait((event_t
)&assert_wait_timeout_event
, interruptible
);
557 if (res
== THREAD_WAITING
)
558 thread_set_timer(msecs
, 1000*NSEC_PER_USEC
);
563 * Check to see if an assert wait is possible, without actually doing one.
564 * This is used by debug code in locks and elsewhere to verify that it is
565 * always OK to block when trying to take a blocking lock (since waiting
566 * for the actual assert_wait to catch the case may make it hard to detect
570 assert_wait_possible(void)
574 extern unsigned int debug_mode
;
577 if(debug_mode
) return TRUE
; /* Always succeed in debug mode */
580 thread
= current_thread();
582 return (thread
== NULL
|| wait_queue_assert_possible(thread
));
588 * Assert that the current thread is about to go to
589 * sleep until the specified event occurs.
594 wait_interrupt_t interruptible
)
596 register wait_queue_t wq
;
599 assert(event
!= NO_EVENT
);
600 assert(assert_wait_possible());
602 index
= wait_hash(event
);
603 wq
= &wait_queues
[index
];
604 return wait_queue_assert_wait(wq
, event
, interruptible
);
609 * thread_sleep_fast_usimple_lock:
611 * Cause the current thread to wait until the specified event
612 * occurs. The specified simple_lock is unlocked before releasing
613 * the cpu and re-acquired as part of waking up.
615 * This is the simple lock sleep interface for components that use a
616 * faster version of simple_lock() than is provided by usimple_lock().
618 __private_extern__ wait_result_t
619 thread_sleep_fast_usimple_lock(
622 wait_interrupt_t interruptible
)
626 res
= assert_wait(event
, interruptible
);
627 if (res
== THREAD_WAITING
) {
629 res
= thread_block(THREAD_CONTINUE_NULL
);
637 * thread_sleep_usimple_lock:
639 * Cause the current thread to wait until the specified event
640 * occurs. The specified usimple_lock is unlocked before releasing
641 * the cpu and re-acquired as part of waking up.
643 * This is the simple lock sleep interface for components where
644 * simple_lock() is defined in terms of usimple_lock().
647 thread_sleep_usimple_lock(
650 wait_interrupt_t interruptible
)
654 res
= assert_wait(event
, interruptible
);
655 if (res
== THREAD_WAITING
) {
656 usimple_unlock(lock
);
657 res
= thread_block(THREAD_CONTINUE_NULL
);
664 * thread_sleep_mutex:
666 * Cause the current thread to wait until the specified event
667 * occurs. The specified mutex is unlocked before releasing
668 * the cpu. The mutex will be re-acquired before returning.
670 * JMM - Add hint to make sure mutex is available before rousting
676 wait_interrupt_t interruptible
)
680 res
= assert_wait(event
, interruptible
);
681 if (res
== THREAD_WAITING
) {
683 res
= thread_block(THREAD_CONTINUE_NULL
);
690 * thread_sleep_mutex_deadline:
692 * Cause the current thread to wait until the specified event
693 * (or deadline) occurs. The specified mutex is unlocked before
694 * releasing the cpu. The mutex will be re-acquired before returning.
696 * JMM - Add hint to make sure mutex is available before rousting
699 thread_sleep_mutex_deadline(
703 wait_interrupt_t interruptible
)
707 res
= assert_wait(event
, interruptible
);
708 if (res
== THREAD_WAITING
) {
710 thread_set_timer_deadline(deadline
);
711 res
= thread_block(THREAD_CONTINUE_NULL
);
712 if (res
!= THREAD_TIMED_OUT
)
713 thread_cancel_timer();
720 * thread_sleep_lock_write:
722 * Cause the current thread to wait until the specified event
723 * occurs. The specified (write) lock is unlocked before releasing
724 * the cpu. The (write) lock will be re-acquired before returning.
726 * JMM - Add hint to make sure mutex is available before rousting
729 thread_sleep_lock_write(
732 wait_interrupt_t interruptible
)
736 res
= assert_wait(event
, interruptible
);
737 if (res
== THREAD_WAITING
) {
738 lock_write_done(lock
);
739 res
= thread_block(THREAD_CONTINUE_NULL
);
747 * thread_sleep_funnel:
749 * Cause the current thread to wait until the specified event
750 * occurs. If the thread is funnelled, the funnel will be released
751 * before giving up the cpu. The funnel will be re-acquired before returning.
753 * JMM - Right now the funnel is dropped and re-acquired inside
754 * thread_block(). At some point, this may give thread_block() a hint.
759 wait_interrupt_t interruptible
)
763 res
= assert_wait(event
, interruptible
);
764 if (res
== THREAD_WAITING
) {
765 res
= thread_block(THREAD_CONTINUE_NULL
);
771 * thread_[un]stop(thread)
772 * Once a thread has blocked interruptibly (via assert_wait) prevent
773 * it from running until thread_unstop.
775 * If someone else has already stopped the thread, wait for the
776 * stop to be cleared, and then stop it again.
778 * Return FALSE if interrupted.
780 * NOTE: thread_hold/thread_suspend should be called on the activation
781 * before calling thread_stop. TH_SUSP is only recognized when
782 * a thread blocks and only prevents clear_wait/thread_wakeup
783 * from restarting an interruptible wait. The wake_active flag is
784 * used to indicate that someone is waiting on the thread.
790 spl_t s
= splsched();
794 while (thread
->state
& TH_SUSP
) {
795 wait_result_t result
;
797 thread
->wake_active
= TRUE
;
798 result
= assert_wait(&thread
->wake_active
, THREAD_ABORTSAFE
);
802 if (result
== THREAD_WAITING
)
803 result
= thread_block(THREAD_CONTINUE_NULL
);
805 if (result
!= THREAD_AWAKENED
)
813 thread
->state
|= TH_SUSP
;
815 while (thread
->state
& TH_RUN
) {
816 wait_result_t result
;
817 processor_t processor
= thread
->last_processor
;
819 if ( processor
!= PROCESSOR_NULL
&&
820 processor
->state
== PROCESSOR_RUNNING
&&
821 processor
->cpu_data
->active_thread
== thread
)
822 cause_ast_check(processor
);
823 thread_unlock(thread
);
825 thread
->wake_active
= TRUE
;
826 result
= assert_wait(&thread
->wake_active
, THREAD_ABORTSAFE
);
830 if (result
== THREAD_WAITING
)
831 result
= thread_block(THREAD_CONTINUE_NULL
);
833 if (result
!= THREAD_AWAKENED
) {
834 thread_unstop(thread
);
843 thread_unlock(thread
);
851 * Clear TH_SUSP and if the thread has been stopped and is now runnable,
852 * put it back on the run queue.
858 spl_t s
= splsched();
863 if ((thread
->state
& (TH_RUN
|TH_WAIT
|TH_SUSP
)) == TH_SUSP
) {
864 thread
->state
&= ~TH_SUSP
;
865 thread
->state
|= TH_RUN
;
867 assert(!(thread
->state
& TH_IDLE
));
868 _mk_sp_thread_unblock(thread
);
869 hw_atomic_add(&thread
->processor_set
->run_count
, 1);
872 if (thread
->state
& TH_SUSP
) {
873 thread
->state
&= ~TH_SUSP
;
875 if (thread
->wake_active
) {
876 thread
->wake_active
= FALSE
;
877 thread_unlock(thread
);
881 thread_wakeup(&thread
->wake_active
);
886 thread_unlock(thread
);
892 * Wait for the thread's RUN bit to clear
898 spl_t s
= splsched();
903 while (thread
->state
& TH_RUN
) {
904 wait_result_t result
;
905 processor_t processor
= thread
->last_processor
;
907 if ( processor
!= PROCESSOR_NULL
&&
908 processor
->state
== PROCESSOR_RUNNING
&&
909 processor
->cpu_data
->active_thread
== thread
)
910 cause_ast_check(processor
);
911 thread_unlock(thread
);
913 thread
->wake_active
= TRUE
;
914 result
= assert_wait(&thread
->wake_active
, THREAD_ABORTSAFE
);
918 if (result
== THREAD_WAITING
)
919 result
= thread_block(THREAD_CONTINUE_NULL
);
921 if (result
!= THREAD_AWAKENED
)
929 thread_unlock(thread
);
937 * Routine: clear_wait_internal
939 * Clear the wait condition for the specified thread.
940 * Start the thread executing if that is appropriate.
942 * thread thread to awaken
943 * result Wakeup result the thread should see
946 * the thread is locked.
948 * KERN_SUCCESS thread was rousted out a wait
949 * KERN_FAILURE thread was waiting but could not be rousted
950 * KERN_NOT_WAITING thread was not waiting
952 __private_extern__ kern_return_t
955 wait_result_t result
)
957 wait_queue_t wq
= thread
->wait_queue
;
963 if ((result
== THREAD_INTERRUPTED
) && (thread
->state
& TH_UNINT
))
966 if (wq
!= WAIT_QUEUE_NULL
) {
967 if (wait_queue_lock_try(wq
)) {
968 wait_queue_pull_thread_locked(wq
, thread
, TRUE
);
969 /* wait queue unlocked, thread still locked */
971 thread_unlock(thread
);
975 if (wq
!= thread
->wait_queue
) {
976 return KERN_NOT_WAITING
; /* we know it moved */
981 ret
= thread_go_locked(thread
, result
);
983 } while (++loop_count
< LockTimeOut
);
984 panic("clear_wait_internal: deadlock: thread=0x%x, wq=0x%x, cpu=%d\n",
985 thread
, wq
, cpu_number());
993 * Clear the wait condition for the specified thread. Start the thread
994 * executing if that is appropriate.
997 * thread thread to awaken
998 * result Wakeup result the thread should see
1003 wait_result_t result
)
1009 thread_lock(thread
);
1010 ret
= clear_wait_internal(thread
, result
);
1011 thread_unlock(thread
);
1018 * thread_wakeup_prim:
1020 * Common routine for thread_wakeup, thread_wakeup_with_result,
1021 * and thread_wakeup_one.
1027 boolean_t one_thread
,
1028 wait_result_t result
)
1030 register wait_queue_t wq
;
1033 index
= wait_hash(event
);
1034 wq
= &wait_queues
[index
];
1036 return (wait_queue_wakeup_one(wq
, event
, result
));
1038 return (wait_queue_wakeup_all(wq
, event
, result
));
1044 * Force a thread to execute on the specified processor.
1045 * If the thread is currently executing, it may wait until its
1046 * time slice is up before switching onto the specified processor.
1048 * A processor of PROCESSOR_NULL causes the thread to be unbound.
1049 * xxx - DO NOT export this to users.
1053 register thread_t thread
,
1054 processor_t processor
)
1059 thread_lock(thread
);
1060 thread_bind_locked(thread
, processor
);
1061 thread_unlock(thread
);
1066 * Select a thread for this processor (the current processor) to run.
1067 * May select the current thread, which must already be locked.
1071 register processor_t myprocessor
)
1073 register thread_t thread
;
1074 processor_set_t pset
;
1075 register run_queue_t runq
= &myprocessor
->runq
;
1076 boolean_t other_runnable
;
1079 * Check for other non-idle runnable threads.
1081 pset
= myprocessor
->processor_set
;
1082 thread
= myprocessor
->cpu_data
->active_thread
;
1084 /* Update the thread's priority */
1085 if (thread
->sched_stamp
!= sched_tick
)
1086 update_priority(thread
);
1088 myprocessor
->current_pri
= thread
->sched_pri
;
1090 simple_lock(&runq
->lock
);
1091 simple_lock(&pset
->runq
.lock
);
1093 other_runnable
= runq
->count
> 0 || pset
->runq
.count
> 0;
1095 if ( thread
->state
== TH_RUN
&&
1097 (runq
->highq
< thread
->sched_pri
&&
1098 pset
->runq
.highq
< thread
->sched_pri
)) &&
1099 thread
->processor_set
== pset
&&
1100 (thread
->bound_processor
== PROCESSOR_NULL
||
1101 thread
->bound_processor
== myprocessor
) ) {
1103 /* I am the highest priority runnable (non-idle) thread */
1104 simple_unlock(&pset
->runq
.lock
);
1105 simple_unlock(&runq
->lock
);
1107 myprocessor
->slice_quanta
=
1108 (thread
->sched_mode
& TH_MODE_TIMESHARE
)? pset
->set_quanta
: 1;
1112 thread
= choose_thread(myprocessor
);
1114 simple_unlock(&pset
->runq
.lock
);
1115 simple_unlock(&runq
->lock
);
1118 * Nothing is runnable, so set this processor idle if it
1119 * was running. If it was in an assignment or shutdown,
1120 * leave it alone. Return its idle thread.
1122 simple_lock(&pset
->sched_lock
);
1123 if (myprocessor
->state
== PROCESSOR_RUNNING
) {
1124 remqueue(&pset
->active_queue
, (queue_entry_t
)myprocessor
);
1125 myprocessor
->state
= PROCESSOR_IDLE
;
1127 if (myprocessor
== master_processor
)
1128 enqueue_tail(&pset
->idle_queue
, (queue_entry_t
)myprocessor
);
1130 enqueue_head(&pset
->idle_queue
, (queue_entry_t
)myprocessor
);
1134 simple_unlock(&pset
->sched_lock
);
1136 thread
= myprocessor
->idle_thread
;
1144 * Stop running the current thread and start running the new thread.
1145 * If continuation is non-zero, and the current thread is blocked,
1146 * then it will resume by executing continuation on a new stack.
1147 * Returns TRUE if the hand-off succeeds.
1153 __current_thread(void)
1155 return (current_thread());
1160 register thread_t old_thread
,
1161 register thread_t new_thread
,
1163 thread_continue_t old_cont
)
1165 thread_continue_t new_cont
;
1166 processor_t processor
;
1168 if (get_preemption_level() != 0)
1169 panic("thread_invoke: preemption_level %d\n",
1170 get_preemption_level());
1173 * Mark thread interruptible.
1175 thread_lock(new_thread
);
1176 new_thread
->state
&= ~TH_UNINT
;
1178 assert(thread_runnable(new_thread
));
1180 assert(old_thread
->continuation
== NULL
);
1183 * Allow time constraint threads to hang onto
1186 if ( (old_thread
->sched_mode
& TH_MODE_REALTIME
) &&
1187 !old_thread
->stack_privilege
) {
1188 old_thread
->stack_privilege
= old_thread
->kernel_stack
;
1191 if (old_cont
!= NULL
) {
1192 if (new_thread
->state
& TH_STACK_HANDOFF
) {
1194 * If the old thread is using a privileged stack,
1195 * check to see whether we can exchange it with
1196 * that of the new thread.
1198 if ( old_thread
->kernel_stack
== old_thread
->stack_privilege
&&
1199 !new_thread
->stack_privilege
)
1202 new_thread
->state
&= ~TH_STACK_HANDOFF
;
1203 new_cont
= new_thread
->continuation
;
1204 new_thread
->continuation
= NULL
;
1207 * Set up ast context of new thread and switch
1210 processor
= current_processor();
1211 new_thread
->last_processor
= processor
;
1212 processor
->current_pri
= new_thread
->sched_pri
;
1213 ast_context(new_thread
->top_act
, processor
->slot_num
);
1214 timer_switch(&new_thread
->system_timer
);
1215 thread_unlock(new_thread
);
1217 current_task()->csw
++;
1219 old_thread
->reason
= reason
;
1220 old_thread
->continuation
= old_cont
;
1222 _mk_sp_thread_done(old_thread
, new_thread
, processor
);
1224 stack_handoff(old_thread
, new_thread
);
1226 _mk_sp_thread_begin(new_thread
, processor
);
1228 wake_lock(old_thread
);
1229 thread_lock(old_thread
);
1232 * Inline thread_dispatch but
1236 switch (old_thread
->state
& (TH_RUN
|TH_WAIT
|TH_UNINT
|TH_IDLE
)) {
1238 case TH_RUN
| TH_UNINT
:
1241 * Still running, put back
1244 old_thread
->state
|= TH_STACK_HANDOFF
;
1245 _mk_sp_thread_dispatch(old_thread
);
1247 thread_unlock(old_thread
);
1248 wake_unlock(old_thread
);
1251 case TH_RUN
| TH_WAIT
| TH_UNINT
:
1252 case TH_RUN
| TH_WAIT
:
1254 boolean_t reap
, wake
, callblock
;
1259 old_thread
->sleep_stamp
= sched_tick
;
1260 old_thread
->state
|= TH_STACK_HANDOFF
;
1261 old_thread
->state
&= ~TH_RUN
;
1262 hw_atomic_sub(&old_thread
->processor_set
->run_count
, 1);
1263 callblock
= old_thread
->active_callout
;
1264 wake
= old_thread
->wake_active
;
1265 old_thread
->wake_active
= FALSE
;
1266 reap
= (old_thread
->state
& TH_TERMINATE
)? TRUE
: FALSE
;
1268 thread_unlock(old_thread
);
1269 wake_unlock(old_thread
);
1272 call_thread_block();
1275 thread_wakeup((event_t
)&old_thread
->wake_active
);
1278 thread_reaper_enqueue(old_thread
);
1282 case TH_RUN
| TH_IDLE
:
1284 * The idle threads don't go
1287 old_thread
->state
|= TH_STACK_HANDOFF
;
1288 thread_unlock(old_thread
);
1289 wake_unlock(old_thread
);
1293 panic("thread_invoke: state 0x%x\n", old_thread
->state
);
1296 counter_always(c_thread_invoke_hits
++);
1298 if (new_thread
->funnel_state
& TH_FN_REFUNNEL
) {
1299 kern_return_t wait_result
= new_thread
->wait_result
;
1301 new_thread
->funnel_state
= 0;
1302 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE
,
1303 new_thread
->funnel_lock
, 2, 0, 0, 0);
1304 funnel_lock(new_thread
->funnel_lock
);
1305 KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE
,
1306 new_thread
->funnel_lock
, 2, 0, 0, 0);
1307 new_thread
->funnel_state
= TH_FN_OWNED
;
1308 new_thread
->wait_result
= wait_result
;
1313 call_continuation(new_cont
);
1318 if (new_thread
->state
& TH_STACK_ALLOC
) {
1320 * Waiting for a stack
1322 counter_always(c_thread_invoke_misses
++);
1323 thread_unlock(new_thread
);
1327 if (new_thread
== old_thread
) {
1328 /* same thread but with continuation */
1329 counter(++c_thread_invoke_same
);
1330 thread_unlock(new_thread
);
1332 if (new_thread
->funnel_state
& TH_FN_REFUNNEL
) {
1333 kern_return_t wait_result
= new_thread
->wait_result
;
1335 new_thread
->funnel_state
= 0;
1336 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE
,
1337 new_thread
->funnel_lock
, 3, 0, 0, 0);
1338 funnel_lock(new_thread
->funnel_lock
);
1339 KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE
,
1340 new_thread
->funnel_lock
, 3, 0, 0, 0);
1341 new_thread
->funnel_state
= TH_FN_OWNED
;
1342 new_thread
->wait_result
= wait_result
;
1345 call_continuation(old_cont
);
1351 * Check that the new thread has a stack
1353 if (new_thread
->state
& TH_STACK_HANDOFF
) {
1355 if (!stack_alloc_try(new_thread
, thread_continue
)) {
1356 counter_always(c_thread_invoke_misses
++);
1357 thread_swapin(new_thread
);
1361 new_thread
->state
&= ~TH_STACK_HANDOFF
;
1364 if (new_thread
->state
& TH_STACK_ALLOC
) {
1366 * Waiting for a stack
1368 counter_always(c_thread_invoke_misses
++);
1369 thread_unlock(new_thread
);
1373 if (old_thread
== new_thread
) {
1374 counter(++c_thread_invoke_same
);
1375 thread_unlock(new_thread
);
1381 * Set up ast context of new thread and switch to its timer.
1383 processor
= current_processor();
1384 new_thread
->last_processor
= processor
;
1385 processor
->current_pri
= new_thread
->sched_pri
;
1386 ast_context(new_thread
->top_act
, processor
->slot_num
);
1387 timer_switch(&new_thread
->system_timer
);
1388 assert(thread_runnable(new_thread
));
1389 thread_unlock(new_thread
);
1391 counter_always(c_thread_invoke_csw
++);
1392 current_task()->csw
++;
1394 assert(old_thread
->runq
== RUN_QUEUE_NULL
);
1395 old_thread
->reason
= reason
;
1396 old_thread
->continuation
= old_cont
;
1398 _mk_sp_thread_done(old_thread
, new_thread
, processor
);
1401 * switch_context is machine-dependent. It does the
1402 * machine-dependent components of a context-switch, like
1403 * changing address spaces. It updates active_threads.
1405 old_thread
= switch_context(old_thread
, old_cont
, new_thread
);
1407 /* Now on new thread's stack. Set a local variable to refer to it. */
1408 new_thread
= __current_thread();
1409 assert(old_thread
!= new_thread
);
1411 assert(thread_runnable(new_thread
));
1412 _mk_sp_thread_begin(new_thread
, new_thread
->last_processor
);
1415 * We're back. Now old_thread is the thread that resumed
1416 * us, and we have to dispatch it.
1418 thread_dispatch(old_thread
);
1421 if (new_thread
->funnel_state
& TH_FN_REFUNNEL
) {
1422 kern_return_t wait_result
= new_thread
->wait_result
;
1424 new_thread
->funnel_state
= 0;
1425 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE
,
1426 new_thread
->funnel_lock
, 3, 0, 0, 0);
1427 funnel_lock(new_thread
->funnel_lock
);
1428 KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE
,
1429 new_thread
->funnel_lock
, 3, 0, 0, 0);
1430 new_thread
->funnel_state
= TH_FN_OWNED
;
1431 new_thread
->wait_result
= wait_result
;
1434 call_continuation(old_cont
);
1444 * Called when a thread gets a new stack, at splsched();
1448 register thread_t old_thread
)
1450 register thread_t self
= current_thread();
1451 register thread_continue_t continuation
;
1453 continuation
= self
->continuation
;
1454 self
->continuation
= NULL
;
1456 _mk_sp_thread_begin(self
, self
->last_processor
);
1459 * We must dispatch the old thread and then
1460 * call the current thread's continuation.
1461 * There might not be an old thread, if we are
1462 * the first thread to run on this processor.
1464 if (old_thread
!= THREAD_NULL
)
1465 thread_dispatch(old_thread
);
1467 if (self
->funnel_state
& TH_FN_REFUNNEL
) {
1468 kern_return_t wait_result
= self
->wait_result
;
1470 self
->funnel_state
= 0;
1471 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE
, self
->funnel_lock
, 4, 0, 0, 0);
1472 funnel_lock(self
->funnel_lock
);
1473 KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE
, self
->funnel_lock
, 4, 0, 0, 0);
1474 self
->funnel_state
= TH_FN_OWNED
;
1475 self
->wait_result
= wait_result
;
1478 assert(continuation
);
1479 call_continuation(continuation
);
1483 #if MACH_LDEBUG || MACH_KDB
1485 #define THREAD_LOG_SIZE 300
1499 } thread_log
[THREAD_LOG_SIZE
];
1501 int thread_log_index
;
1503 void check_thread_time(long n
);
1506 int check_thread_time_crash
;
1510 check_thread_time(long us
)
1514 if (!check_thread_time_crash
)
1517 temp
= thread_log
[0].stamp
;
1518 cyctm05_diff (&thread_log
[1].stamp
, &thread_log
[0].stamp
, &temp
);
1520 if (temp
.l
>= us
&& thread_log
[1].info
!= 0x49) /* HACK!!! */
1521 panic ("check_thread_time");
1526 log_thread_action(char * action
, long info1
, long info2
, long info3
)
1530 static unsigned int tstamp
;
1534 for (i
= THREAD_LOG_SIZE
-1; i
> 0; i
--) {
1535 thread_log
[i
] = thread_log
[i
-1];
1538 thread_log
[0].stamp
.h
= 0;
1539 thread_log
[0].stamp
.l
= tstamp
++;
1540 thread_log
[0].thread
= current_thread();
1541 thread_log
[0].info1
= info1
;
1542 thread_log
[0].info2
= info2
;
1543 thread_log
[0].info3
= info3
;
1544 thread_log
[0].action
= action
;
1545 /* strcpy (&thread_log[0].action[0], action);*/
1549 #endif /* MACH_LDEBUG || MACH_KDB */
1552 #include <ddb/db_output.h>
1553 void db_show_thread_log(void);
1556 db_show_thread_log(void)
1560 db_printf ("%s %s %s %s %s %s\n", " Thread ", " Info1 ", " Info2 ",
1561 " Info3 ", " Timestamp ", "Action");
1563 for (i
= 0; i
< THREAD_LOG_SIZE
; i
++) {
1564 db_printf ("%08x %08x %08x %08x %08x/%08x %s\n",
1565 thread_log
[i
].thread
,
1566 thread_log
[i
].info1
,
1567 thread_log
[i
].info2
,
1568 thread_log
[i
].info3
,
1569 thread_log
[i
].stamp
.h
,
1570 thread_log
[i
].stamp
.l
,
1571 thread_log
[i
].action
);
1574 #endif /* MACH_KDB */
1577 * thread_block_reason:
1579 * Block the current thread if a wait has been asserted,
1580 * otherwise unconditionally yield the remainder of the
1581 * current quantum unless reason contains AST_BLOCK.
1583 * If a continuation is specified, then thread_block will
1584 * attempt to discard the thread's kernel stack. When the
1585 * thread resumes, it will execute the continuation function
1586 * on a new kernel stack.
1588 counter(mach_counter_t c_thread_block_calls
= 0;)
1591 thread_block_reason(
1592 thread_continue_t continuation
,
1595 register thread_t thread
= current_thread();
1596 register processor_t myprocessor
;
1597 register thread_t new_thread
;
1600 counter(++c_thread_block_calls
);
1602 check_simple_locks();
1604 machine_clock_assist();
1608 if ((thread
->funnel_state
& TH_FN_OWNED
) && !(reason
& AST_PREEMPT
)) {
1609 thread
->funnel_state
= TH_FN_REFUNNEL
;
1611 0x603242c | DBG_FUNC_NONE
, thread
->funnel_lock
, 2, 0, 0, 0);
1612 funnel_unlock(thread
->funnel_lock
);
1615 myprocessor
= current_processor();
1617 /* If we're explicitly yielding, force a subsequent quantum */
1618 if (reason
& AST_YIELD
)
1619 myprocessor
->slice_quanta
= 0;
1621 /* We're handling all scheduling AST's */
1622 ast_off(AST_SCHEDULING
);
1624 thread_lock(thread
);
1625 new_thread
= thread_select(myprocessor
);
1626 assert(new_thread
&& thread_runnable(new_thread
));
1627 thread_unlock(thread
);
1628 while (!thread_invoke(thread
, new_thread
, reason
, continuation
)) {
1629 thread_lock(thread
);
1630 new_thread
= thread_select(myprocessor
);
1631 assert(new_thread
&& thread_runnable(new_thread
));
1632 thread_unlock(thread
);
1635 if (thread
->funnel_state
& TH_FN_REFUNNEL
) {
1636 kern_return_t wait_result
= thread
->wait_result
;
1638 thread
->funnel_state
= 0;
1640 0x6032428 | DBG_FUNC_NONE
, thread
->funnel_lock
, 5, 0, 0, 0);
1641 funnel_lock(thread
->funnel_lock
);
1643 0x6032430 | DBG_FUNC_NONE
, thread
->funnel_lock
, 5, 0, 0, 0);
1644 thread
->funnel_state
= TH_FN_OWNED
;
1645 thread
->wait_result
= wait_result
;
1650 return (thread
->wait_result
);
1656 * Block the current thread if a wait has been asserted.
1660 thread_continue_t continuation
)
1662 return thread_block_reason(continuation
, AST_NONE
);
1668 * Switch directly from the current (old) thread to the
1669 * specified thread, handing off our quantum if possible.
1671 * New thread must be runnable, and not on a run queue.
1678 thread_t old_thread
,
1679 thread_continue_t continuation
,
1680 thread_t new_thread
)
1682 ast_t handoff
= AST_HANDOFF
;
1684 assert(old_thread
== current_thread());
1686 machine_clock_assist();
1688 if (old_thread
->funnel_state
& TH_FN_OWNED
) {
1689 old_thread
->funnel_state
= TH_FN_REFUNNEL
;
1691 0x603242c | DBG_FUNC_NONE
, old_thread
->funnel_lock
, 3, 0, 0, 0);
1692 funnel_unlock(old_thread
->funnel_lock
);
1695 while (!thread_invoke(old_thread
, new_thread
, handoff
, continuation
)) {
1696 register processor_t myprocessor
= current_processor();
1698 thread_lock(old_thread
);
1699 new_thread
= thread_select(myprocessor
);
1700 thread_unlock(old_thread
);
1704 /* if we fell thru */
1705 if (old_thread
->funnel_state
& TH_FN_REFUNNEL
) {
1706 kern_return_t wait_result
= old_thread
->wait_result
;
1708 old_thread
->funnel_state
= 0;
1710 0x6032428 | DBG_FUNC_NONE
, old_thread
->funnel_lock
, 6, 0, 0, 0);
1711 funnel_lock(old_thread
->funnel_lock
);
1713 0x6032430 | DBG_FUNC_NONE
, old_thread
->funnel_lock
, 6, 0, 0, 0);
1714 old_thread
->funnel_state
= TH_FN_OWNED
;
1715 old_thread
->wait_result
= wait_result
;
1718 return (old_thread
->wait_result
);
1722 * Dispatches a running thread that is not on a runq.
1723 * Called at splsched.
1727 register thread_t thread
)
1730 thread_lock(thread
);
1733 * If we are discarding the thread's stack, we must do it
1734 * before the thread has a chance to run.
1737 if (thread
->continuation
!= NULL
) {
1738 assert((thread
->state
& TH_STACK_STATE
) == 0);
1739 thread
->state
|= TH_STACK_HANDOFF
;
1744 switch (thread
->state
& (TH_RUN
|TH_WAIT
|TH_UNINT
|TH_IDLE
)) {
1746 case TH_RUN
| TH_UNINT
:
1749 * No reason to stop. Put back on a run queue.
1751 _mk_sp_thread_dispatch(thread
);
1754 case TH_RUN
| TH_WAIT
| TH_UNINT
:
1755 case TH_RUN
| TH_WAIT
:
1757 boolean_t reap
, wake
, callblock
;
1762 thread
->sleep_stamp
= sched_tick
;
1763 thread
->state
&= ~TH_RUN
;
1764 hw_atomic_sub(&thread
->processor_set
->run_count
, 1);
1765 callblock
= thread
->active_callout
;
1766 wake
= thread
->wake_active
;
1767 thread
->wake_active
= FALSE
;
1768 reap
= (thread
->state
& TH_TERMINATE
)? TRUE
: FALSE
;
1770 thread_unlock(thread
);
1771 wake_unlock(thread
);
1774 call_thread_block();
1777 thread_wakeup((event_t
)&thread
->wake_active
);
1780 thread_reaper_enqueue(thread
);
1785 case TH_RUN
| TH_IDLE
:
1787 * The idle threads don't go
1793 panic("thread_dispatch: bad thread state 0x%x\n", thread
->state
);
1796 thread_unlock(thread
);
1797 wake_unlock(thread
);
1801 * Enqueue thread on run queue. Thread must be locked,
1802 * and not already be on a run queue. Returns TRUE iff
1803 * the particular queue level was empty beforehand.
1807 register run_queue_t rq
,
1808 register thread_t thread
,
1811 register int whichq
= thread
->sched_pri
;
1812 register queue_t queue
= &rq
->queues
[whichq
];
1813 boolean_t result
= FALSE
;
1815 assert(whichq
>= MINPRI
&& whichq
<= MAXPRI
);
1817 simple_lock(&rq
->lock
);
1818 assert(thread
->runq
== RUN_QUEUE_NULL
);
1819 if (queue_empty(queue
)) {
1820 enqueue_tail(queue
, (queue_entry_t
)thread
);
1822 setbit(MAXPRI
- whichq
, rq
->bitmap
);
1823 if (whichq
> rq
->highq
)
1829 enqueue_tail(queue
, (queue_entry_t
)thread
);
1831 enqueue_head(queue
, (queue_entry_t
)thread
);
1834 if (thread
->sched_mode
& TH_MODE_PREEMPT
)
1838 thread_check(thread
, rq
);
1840 simple_unlock(&rq
->lock
);
1846 uint32_t pset_idle_last
,
1859 * Dispatch thread for execution, directly onto an idle
1860 * processor if possible. Else put on appropriate run
1861 * queue. (local if bound, else processor set)
1863 * Thread must be locked.
1865 * The tail parameter indicates the proper placement of
1866 * the thread on a run queue.
1870 register thread_t new_thread
,
1873 register processor_t processor
;
1874 register processor_set_t pset
;
1875 register thread_t thread
;
1876 boolean_t try_preempt
= FALSE
;
1877 ast_t preempt
= AST_BLOCK
;
1879 assert(thread_runnable(new_thread
));
1882 * Update priority if needed.
1884 if (new_thread
->sched_stamp
!= sched_tick
)
1885 update_priority(new_thread
);
1888 * Check for urgent preemption.
1890 if (new_thread
->sched_mode
& TH_MODE_PREEMPT
)
1891 preempt
|= AST_URGENT
;
1893 assert(new_thread
->runq
== RUN_QUEUE_NULL
);
1895 if ((processor
= new_thread
->bound_processor
) == PROCESSOR_NULL
) {
1897 * First try to dispatch on
1898 * the last processor.
1900 pset
= new_thread
->processor_set
;
1901 processor
= new_thread
->last_processor
;
1902 if ( pset
->processor_count
> 1 &&
1903 processor
!= PROCESSOR_NULL
&&
1904 processor
->state
== PROCESSOR_IDLE
) {
1905 simple_lock(&processor
->lock
);
1906 simple_lock(&pset
->sched_lock
);
1907 if ( processor
->processor_set
== pset
&&
1908 processor
->state
== PROCESSOR_IDLE
) {
1909 remqueue(&pset
->idle_queue
, (queue_entry_t
)processor
);
1911 processor
->next_thread
= new_thread
;
1912 processor
->state
= PROCESSOR_DISPATCHING
;
1913 simple_unlock(&pset
->sched_lock
);
1914 simple_unlock(&processor
->lock
);
1915 if (processor
!= current_processor())
1916 machine_signal_idle(processor
);
1917 dispatch_counts
.pset_idle_last
++;
1920 simple_unlock(&processor
->lock
);
1923 simple_lock(&pset
->sched_lock
);
1926 * Next pick any idle processor
1927 * in the processor set.
1929 if (pset
->idle_count
> 0) {
1930 processor
= (processor_t
)dequeue_head(&pset
->idle_queue
);
1932 processor
->next_thread
= new_thread
;
1933 processor
->state
= PROCESSOR_DISPATCHING
;
1934 simple_unlock(&pset
->sched_lock
);
1935 if (processor
!= current_processor())
1936 machine_signal_idle(processor
);
1937 dispatch_counts
.pset_idle_any
++;
1942 * Place thread on run queue.
1944 if (run_queue_enqueue(&pset
->runq
, new_thread
, tail
))
1948 * Update the timesharing quanta.
1950 pset_quanta_update(pset
);
1955 processor
= current_processor();
1956 thread
= processor
->cpu_data
->active_thread
;
1959 * First try the current processor
1960 * if it is a member of the correct
1963 if ( pset
== processor
->processor_set
&&
1964 csw_needed(thread
, processor
) ) {
1965 simple_unlock(&pset
->sched_lock
);
1968 dispatch_counts
.pset_self
++;
1973 * If that failed and we have other
1974 * processors available keep trying.
1976 if ( pset
->processor_count
> 1 ||
1977 pset
!= processor
->processor_set
) {
1978 queue_t active
= &pset
->active_queue
;
1979 processor_t myprocessor
, lastprocessor
;
1983 * Next try the last processor
1986 myprocessor
= processor
;
1987 processor
= new_thread
->last_processor
;
1988 if ( processor
!= myprocessor
&&
1989 processor
!= PROCESSOR_NULL
&&
1990 processor
->processor_set
== pset
&&
1991 processor
->state
== PROCESSOR_RUNNING
&&
1992 new_thread
->sched_pri
> processor
->current_pri
) {
1993 cause_ast_check(processor
);
1994 simple_unlock(&pset
->sched_lock
);
1995 dispatch_counts
.pset_last
++;
2000 * Lastly, pick any other
2001 * available processor.
2003 lastprocessor
= processor
;
2004 processor
= (processor_t
)queue_first(active
);
2005 while (!queue_end(active
, (queue_entry_t
)processor
)) {
2006 next
= queue_next((queue_entry_t
)processor
);
2008 if ( processor
!= myprocessor
&&
2009 processor
!= lastprocessor
&&
2010 new_thread
->sched_pri
> processor
->current_pri
) {
2011 if (!queue_end(active
, next
)) {
2012 remqueue(active
, (queue_entry_t
)processor
);
2013 enqueue_tail(active
, (queue_entry_t
)processor
);
2015 cause_ast_check(processor
);
2016 simple_unlock(&pset
->sched_lock
);
2017 dispatch_counts
.pset_other
++;
2021 processor
= (processor_t
)next
;
2026 simple_unlock(&pset
->sched_lock
);
2030 * Bound, can only run on bound processor. Have to lock
2031 * processor here because it may not be the current one.
2033 if (processor
->state
== PROCESSOR_IDLE
) {
2034 simple_lock(&processor
->lock
);
2035 pset
= processor
->processor_set
;
2036 simple_lock(&pset
->sched_lock
);
2037 if (processor
->state
== PROCESSOR_IDLE
) {
2038 remqueue(&pset
->idle_queue
, (queue_entry_t
)processor
);
2040 processor
->next_thread
= new_thread
;
2041 processor
->state
= PROCESSOR_DISPATCHING
;
2042 simple_unlock(&pset
->sched_lock
);
2043 simple_unlock(&processor
->lock
);
2044 if (processor
!= current_processor())
2045 machine_signal_idle(processor
);
2046 dispatch_counts
.bound_idle
++;
2049 simple_unlock(&pset
->sched_lock
);
2050 simple_unlock(&processor
->lock
);
2053 if (run_queue_enqueue(&processor
->runq
, new_thread
, tail
))
2056 if (processor
== current_processor()) {
2058 thread
= processor
->cpu_data
->active_thread
;
2059 if (csw_needed(thread
, processor
)) {
2061 dispatch_counts
.bound_self
++;
2067 if ( processor
->state
== PROCESSOR_RUNNING
&&
2068 new_thread
->sched_pri
> processor
->current_pri
) {
2069 cause_ast_check(processor
);
2070 dispatch_counts
.bound_other
++;
2075 if (processor
->state
== PROCESSOR_IDLE
) {
2076 machine_signal_idle(processor
);
2077 dispatch_counts
.bound_idle
++;
2084 * Called at splsched by a thread on itself.
2089 processor_t processor
)
2091 int current_pri
= thread
->sched_pri
;
2092 ast_t result
= AST_NONE
;
2095 if (first_quantum(processor
)) {
2096 runq
= &processor
->processor_set
->runq
;
2097 if (runq
->highq
> current_pri
) {
2098 if (runq
->urgency
> 0)
2099 return (AST_BLOCK
| AST_URGENT
);
2101 result
|= AST_BLOCK
;
2104 runq
= &processor
->runq
;
2105 if (runq
->highq
> current_pri
) {
2106 if (runq
->urgency
> 0)
2107 return (AST_BLOCK
| AST_URGENT
);
2109 result
|= AST_BLOCK
;
2113 runq
= &processor
->processor_set
->runq
;
2114 if (runq
->highq
>= current_pri
) {
2115 if (runq
->urgency
> 0)
2116 return (AST_BLOCK
| AST_URGENT
);
2118 result
|= AST_BLOCK
;
2121 runq
= &processor
->runq
;
2122 if (runq
->highq
>= current_pri
) {
2123 if (runq
->urgency
> 0)
2124 return (AST_BLOCK
| AST_URGENT
);
2126 result
|= AST_BLOCK
;
2130 if (result
!= AST_NONE
)
2133 if (thread
->state
& TH_SUSP
)
2134 result
|= AST_BLOCK
;
2142 * Set the current scheduled priority of the specified thread.
2143 * This may cause the thread to change queues.
2145 * The thread *must* be locked by the caller.
2152 register struct run_queue
*rq
= rem_runq(thread
);
2154 if ( !(thread
->sched_mode
& TH_MODE_TIMESHARE
) &&
2155 (priority
>= BASEPRI_PREEMPT
||
2156 (thread
->task_priority
< MINPRI_KERNEL
&&
2157 thread
->task_priority
>= BASEPRI_BACKGROUND
&&
2158 priority
> thread
->task_priority
) ||
2159 (thread
->sched_mode
& TH_MODE_FORCEDPREEMPT
) ) )
2160 thread
->sched_mode
|= TH_MODE_PREEMPT
;
2162 thread
->sched_mode
&= ~TH_MODE_PREEMPT
;
2164 thread
->sched_pri
= priority
;
2165 if (rq
!= RUN_QUEUE_NULL
)
2166 thread_setrun(thread
, TAIL_Q
);
2168 if ((thread
->state
& (TH_RUN
|TH_WAIT
)) == TH_RUN
) {
2169 processor_t processor
= thread
->last_processor
;
2171 if (thread
== current_thread()) {
2172 ast_t preempt
= csw_check(thread
, processor
);
2174 if (preempt
!= AST_NONE
)
2176 processor
->current_pri
= priority
;
2179 if ( processor
!= PROCESSOR_NULL
&&
2180 processor
->cpu_data
->active_thread
== thread
)
2181 cause_ast_check(processor
);
2188 * Remove a thread from its run queue.
2189 * The run queue that the process was on is returned
2190 * (or RUN_QUEUE_NULL if not on a run queue). Thread *must* be locked
2191 * before calling this routine. Unusual locking protocol on runq
2192 * field in thread structure makes this code interesting; see thread.h.
2198 register struct run_queue
*rq
;
2202 * If rq is RUN_QUEUE_NULL, the thread will stay out of the
2203 * run_queues because the caller locked the thread. Otherwise
2204 * the thread is on a runq, but could leave.
2206 if (rq
!= RUN_QUEUE_NULL
) {
2207 simple_lock(&rq
->lock
);
2208 if (rq
== thread
->runq
) {
2210 * Thread is in a runq and we have a lock on
2214 thread_check(thread
, rq
);
2216 remqueue(&rq
->queues
[0], (queue_entry_t
)thread
);
2218 if (thread
->sched_mode
& TH_MODE_PREEMPT
)
2220 assert(rq
->urgency
>= 0);
2222 if (queue_empty(rq
->queues
+ thread
->sched_pri
)) {
2223 /* update run queue status */
2224 if (thread
->sched_pri
!= IDLEPRI
)
2225 clrbit(MAXPRI
- thread
->sched_pri
, rq
->bitmap
);
2226 rq
->highq
= MAXPRI
- ffsbit(rq
->bitmap
);
2228 thread
->runq
= RUN_QUEUE_NULL
;
2229 simple_unlock(&rq
->lock
);
2233 * The thread left the runq before we could
2234 * lock the runq. It is not on a runq now, and
2235 * can't move again because this routine's
2236 * caller locked the thread.
2238 assert(thread
->runq
== RUN_QUEUE_NULL
);
2239 simple_unlock(&rq
->lock
);
2240 rq
= RUN_QUEUE_NULL
;
2250 * Choose a thread to execute. The thread chosen is removed
2251 * from its run queue. Note that this requires only that the runq
2255 * Check processor runq first; if anything found, run it.
2256 * Else check pset runq; if nothing found, return idle thread.
2258 * Second line of strategy is implemented by choose_pset_thread.
2260 * Called with both the local & pset run queues locked, returned
2265 processor_t myprocessor
)
2269 register run_queue_t runq
;
2270 processor_set_t pset
;
2272 runq
= &myprocessor
->runq
;
2273 pset
= myprocessor
->processor_set
;
2275 if (runq
->count
> 0 && runq
->highq
>= pset
->runq
.highq
) {
2276 simple_unlock(&pset
->runq
.lock
);
2277 q
= runq
->queues
+ runq
->highq
;
2279 if (!queue_empty(q
)) {
2280 #endif /*MACH_ASSERT*/
2281 thread
= (thread_t
)q
->next
;
2282 ((queue_entry_t
)thread
)->next
->prev
= q
;
2283 q
->next
= ((queue_entry_t
)thread
)->next
;
2284 thread
->runq
= RUN_QUEUE_NULL
;
2286 if (thread
->sched_mode
& TH_MODE_PREEMPT
)
2288 assert(runq
->urgency
>= 0);
2289 if (queue_empty(q
)) {
2290 if (runq
->highq
!= IDLEPRI
)
2291 clrbit(MAXPRI
- runq
->highq
, runq
->bitmap
);
2292 runq
->highq
= MAXPRI
- ffsbit(runq
->bitmap
);
2294 simple_unlock(&runq
->lock
);
2298 panic("choose_thread");
2299 #endif /*MACH_ASSERT*/
2302 simple_unlock(&myprocessor
->runq
.lock
);
2304 return (choose_pset_thread(myprocessor
, pset
));
2308 * choose_pset_thread: choose a thread from processor_set runq or
2309 * set processor idle and choose its idle thread.
2311 * This routine chooses and removes a thread from the runq if there
2312 * is one (and returns it), else it sets the processor idle and
2313 * returns its idle thread.
2315 * Called with both local & pset run queues locked, returned
2320 register processor_t myprocessor
,
2321 processor_set_t pset
)
2323 register run_queue_t runq
;
2324 register thread_t thread
;
2328 if (runq
->count
> 0) {
2329 q
= runq
->queues
+ runq
->highq
;
2331 if (!queue_empty(q
)) {
2332 #endif /*MACH_ASSERT*/
2333 thread
= (thread_t
)q
->next
;
2334 ((queue_entry_t
)thread
)->next
->prev
= q
;
2335 q
->next
= ((queue_entry_t
)thread
)->next
;
2336 thread
->runq
= RUN_QUEUE_NULL
;
2338 if (thread
->sched_mode
& TH_MODE_PREEMPT
)
2340 assert(runq
->urgency
>= 0);
2341 if (queue_empty(q
)) {
2342 if (runq
->highq
!= IDLEPRI
)
2343 clrbit(MAXPRI
- runq
->highq
, runq
->bitmap
);
2344 runq
->highq
= MAXPRI
- ffsbit(runq
->bitmap
);
2346 pset_quanta_update(pset
);
2347 simple_unlock(&runq
->lock
);
2351 panic("choose_pset_thread");
2352 #endif /*MACH_ASSERT*/
2355 simple_unlock(&runq
->lock
);
2358 * Nothing is runnable, so set this processor idle if it
2359 * was running. If it was in an assignment or shutdown,
2360 * leave it alone. Return its idle thread.
2362 simple_lock(&pset
->sched_lock
);
2363 if (myprocessor
->state
== PROCESSOR_RUNNING
) {
2364 remqueue(&pset
->active_queue
, (queue_entry_t
)myprocessor
);
2365 myprocessor
->state
= PROCESSOR_IDLE
;
2367 if (myprocessor
== master_processor
)
2368 enqueue_tail(&pset
->idle_queue
, (queue_entry_t
)myprocessor
);
2370 enqueue_head(&pset
->idle_queue
, (queue_entry_t
)myprocessor
);
2374 simple_unlock(&pset
->sched_lock
);
2376 return (myprocessor
->idle_thread
);
2380 * no_dispatch_count counts number of times processors go non-idle
2381 * without being dispatched. This should be very rare.
2383 int no_dispatch_count
= 0;
2386 * This is the idle thread, which just looks for other threads
2390 idle_thread_continue(void)
2392 register processor_t myprocessor
;
2393 register volatile thread_t
*threadp
;
2394 register volatile int *gcount
;
2395 register volatile int *lcount
;
2396 register thread_t new_thread
;
2398 register processor_set_t pset
;
2401 mycpu
= cpu_number();
2402 myprocessor
= cpu_to_processor(mycpu
);
2403 threadp
= (volatile thread_t
*) &myprocessor
->next_thread
;
2404 lcount
= (volatile int *) &myprocessor
->runq
.count
;
2407 gcount
= (volatile int *)&myprocessor
->processor_set
->runq
.count
;
2410 while ( (*threadp
== (volatile thread_t
)THREAD_NULL
) &&
2411 (*gcount
== 0) && (*lcount
== 0) ) {
2413 /* check for ASTs while we wait */
2414 if (need_ast
[mycpu
] &~ ( AST_SCHEDULING
| AST_BSD
)) {
2415 /* don't allow scheduling ASTs */
2416 need_ast
[mycpu
] &= ~( AST_SCHEDULING
| AST_BSD
);
2417 ast_taken(AST_ALL
, TRUE
); /* back at spllo */
2425 machine_clock_assist();
2431 * This is not a switch statement to avoid the
2432 * bounds checking code in the common case.
2434 pset
= myprocessor
->processor_set
;
2435 simple_lock(&pset
->sched_lock
);
2437 state
= myprocessor
->state
;
2438 if (state
== PROCESSOR_DISPATCHING
) {
2440 * Commmon case -- cpu dispatched.
2442 new_thread
= *threadp
;
2443 *threadp
= (volatile thread_t
) THREAD_NULL
;
2444 myprocessor
->state
= PROCESSOR_RUNNING
;
2445 enqueue_tail(&pset
->active_queue
, (queue_entry_t
)myprocessor
);
2446 simple_unlock(&pset
->sched_lock
);
2448 if ( myprocessor
->runq
.highq
> new_thread
->sched_pri
||
2449 pset
->runq
.highq
> new_thread
->sched_pri
) {
2450 thread_lock(new_thread
);
2451 thread_setrun(new_thread
, HEAD_Q
);
2452 thread_unlock(new_thread
);
2454 counter(c_idle_thread_block
++);
2455 thread_block(idle_thread_continue
);
2459 counter(c_idle_thread_handoff
++);
2460 thread_run(myprocessor
->idle_thread
,
2461 idle_thread_continue
, new_thread
);
2466 if (state
== PROCESSOR_IDLE
) {
2467 if (myprocessor
->state
!= PROCESSOR_IDLE
) {
2469 * Something happened, try again.
2474 * Processor was not dispatched (Rare).
2475 * Set it running again.
2477 no_dispatch_count
++;
2479 remqueue(&pset
->idle_queue
, (queue_entry_t
)myprocessor
);
2480 myprocessor
->state
= PROCESSOR_RUNNING
;
2481 enqueue_tail(&pset
->active_queue
, (queue_entry_t
)myprocessor
);
2482 simple_unlock(&pset
->sched_lock
);
2484 counter(c_idle_thread_block
++);
2485 thread_block(idle_thread_continue
);
2489 if ( state
== PROCESSOR_ASSIGN
||
2490 state
== PROCESSOR_SHUTDOWN
) {
2492 * Changing processor sets, or going off-line.
2493 * Release next_thread if there is one. Actual
2494 * thread to run is on a runq.
2496 if ((new_thread
= (thread_t
)*threadp
) != THREAD_NULL
) {
2497 *threadp
= (volatile thread_t
) THREAD_NULL
;
2498 simple_unlock(&pset
->sched_lock
);
2500 thread_lock(new_thread
);
2501 thread_setrun(new_thread
, TAIL_Q
);
2502 thread_unlock(new_thread
);
2505 simple_unlock(&pset
->sched_lock
);
2507 counter(c_idle_thread_block
++);
2508 thread_block(idle_thread_continue
);
2512 simple_unlock(&pset
->sched_lock
);
2514 panic("idle_thread: bad processor state %d\n", cpu_state(mycpu
));
2524 thread_t self
= current_thread();
2527 stack_privilege(self
);
2531 self
->priority
= IDLEPRI
;
2532 set_sched_pri(self
, self
->priority
);
2533 thread_unlock(self
);
2536 counter(c_idle_thread_block
++);
2537 thread_block(idle_thread_continue
);
2541 static uint64_t sched_tick_interval
, sched_tick_deadline
;
2543 void sched_tick_thread(void);
2546 sched_tick_init(void)
2548 kernel_thread_with_priority(
2549 kernel_task
, MAXPRI_STANDARD
,
2550 sched_tick_thread
, TRUE
, TRUE
);
2556 * Update the priorities of all threads periodically.
2559 sched_tick_thread_continue(void)
2564 #endif /* SIMPLE_CLOCK */
2566 clock_get_uptime(&abstime
);
2568 sched_tick
++; /* age usage one more time */
2571 * Compensate for clock drift. sched_usec is an
2572 * exponential average of the number of microseconds in
2573 * a second. It decays in the same fashion as cpu_usage.
2575 new_usec
= sched_usec_elapsed();
2576 sched_usec
= (5*sched_usec
+ 3*new_usec
)/8;
2577 #endif /* SIMPLE_CLOCK */
2580 * Compute the scheduler load factors.
2582 compute_mach_factor();
2585 * Scan the run queues for runnable threads that need to
2586 * have their priorities recalculated.
2590 clock_deadline_for_periodic_event(sched_tick_interval
, abstime
,
2591 &sched_tick_deadline
);
2593 assert_wait((event_t
)sched_tick_thread_continue
, THREAD_INTERRUPTIBLE
);
2594 thread_set_timer_deadline(sched_tick_deadline
);
2595 thread_block(sched_tick_thread_continue
);
2600 sched_tick_thread(void)
2602 thread_t self
= current_thread();
2606 stack_privilege(self
);
2608 rate
= (1000 >> SCHED_TICK_SHIFT
);
2609 clock_interval_to_absolutetime_interval(rate
, USEC_PER_SEC
,
2610 &sched_tick_interval
);
2611 clock_get_uptime(&sched_tick_deadline
);
2613 thread_block(sched_tick_thread_continue
);
2617 #define MAX_STUCK_THREADS 128
2620 * do_thread_scan: scan for stuck threads. A thread is stuck if
2621 * it is runnable but its priority is so low that it has not
2622 * run for several seconds. Its priority should be higher, but
2623 * won't be until it runs and calls update_priority. The scanner
2624 * finds these threads and does the updates.
2626 * Scanner runs in two passes. Pass one squirrels likely
2627 * thread ids away in an array (takes out references for them).
2628 * Pass two does the priority updates. This is necessary because
2629 * the run queue lock is required for the candidate scan, but
2630 * cannot be held during updates.
2632 * Array length should be enough so that restart isn't necessary,
2633 * but restart logic is included.
2636 thread_t stuck_threads
[MAX_STUCK_THREADS
];
2637 int stuck_count
= 0;
2640 * do_runq_scan is the guts of pass 1. It scans a runq for
2641 * stuck threads. A boolean is returned indicating whether
2642 * a retry is needed.
2649 register thread_t thread
;
2652 boolean_t result
= FALSE
;
2655 simple_lock(&runq
->lock
);
2656 if ((count
= runq
->count
) > 0) {
2657 q
= runq
->queues
+ runq
->highq
;
2659 queue_iterate(q
, thread
, thread_t
, links
) {
2660 if ( !(thread
->state
& (TH_WAIT
|TH_SUSP
)) &&
2661 (thread
->sched_mode
& TH_MODE_TIMESHARE
) ) {
2662 if (thread
->sched_stamp
!= sched_tick
) {
2664 * Stuck, save its id for later.
2666 if (stuck_count
== MAX_STUCK_THREADS
) {
2668 * !@#$% No more room.
2670 simple_unlock(&runq
->lock
);
2677 * Inline version of thread_reference
2678 * XXX - lock ordering problem here:
2679 * thread locks should be taken before runq
2680 * locks: just try and get the thread's locks
2681 * and ignore this thread if we fail, we might
2682 * have better luck next time.
2684 if (thread_lock_try(thread
)) {
2685 thread
->ref_count
++;
2686 thread_unlock(thread
);
2687 stuck_threads
[stuck_count
++] = thread
;
2700 simple_unlock(&runq
->lock
);
2706 boolean_t thread_scan_enabled
= TRUE
;
2709 do_thread_scan(void)
2711 register boolean_t restart_needed
= FALSE
;
2712 register thread_t thread
;
2713 register processor_set_t pset
= &default_pset
;
2714 register processor_t processor
;
2717 if (!thread_scan_enabled
)
2721 restart_needed
= do_runq_scan(&pset
->runq
);
2722 if (!restart_needed
) {
2723 simple_lock(&pset
->processors_lock
);
2724 processor
= (processor_t
)queue_first(&pset
->processors
);
2725 while (!queue_end(&pset
->processors
, (queue_entry_t
)processor
)) {
2726 if (restart_needed
= do_runq_scan(&processor
->runq
))
2729 thread
= processor
->idle_thread
;
2730 if (thread
->sched_stamp
!= sched_tick
) {
2731 if (stuck_count
== MAX_STUCK_THREADS
) {
2732 restart_needed
= TRUE
;
2736 stuck_threads
[stuck_count
++] = thread
;
2739 processor
= (processor_t
)queue_next(&processor
->processors
);
2741 simple_unlock(&pset
->processors_lock
);
2745 * Ok, we now have a collection of candidates -- fix them.
2747 while (stuck_count
> 0) {
2748 thread
= stuck_threads
[--stuck_count
];
2749 stuck_threads
[stuck_count
] = THREAD_NULL
;
2751 thread_lock(thread
);
2752 if ( (thread
->sched_mode
& TH_MODE_TIMESHARE
) ||
2753 (thread
->state
& TH_IDLE
) ) {
2754 if ( !(thread
->state
& (TH_WAIT
|TH_SUSP
)) &&
2755 thread
->sched_stamp
!= sched_tick
)
2756 update_priority(thread
);
2758 thread_unlock(thread
);
2760 if (!(thread
->state
& TH_IDLE
))
2761 thread_deallocate(thread
);
2767 } while (restart_needed
);
2771 * Just in case someone doesn't use the macro
2773 #undef thread_wakeup
2782 thread_wakeup_with_result(x
, THREAD_AWAKENED
);
2792 return ((thread
->state
& (TH_RUN
|TH_WAIT
)) == TH_RUN
);
2799 printf("processor_set: %08x\n",ps
);
2800 printf("idle_queue: %08x %08x, idle_count: 0x%x\n",
2801 ps
->idle_queue
.next
,ps
->idle_queue
.prev
,ps
->idle_count
);
2802 printf("processors: %08x %08x, processor_count: 0x%x\n",
2803 ps
->processors
.next
,ps
->processors
.prev
,ps
->processor_count
);
2804 printf("tasks: %08x %08x, task_count: 0x%x\n",
2805 ps
->tasks
.next
,ps
->tasks
.prev
,ps
->task_count
);
2806 printf("threads: %08x %08x, thread_count: 0x%x\n",
2807 ps
->threads
.next
,ps
->threads
.prev
,ps
->thread_count
);
2808 printf("ref_count: 0x%x, active: %x\n",
2809 ps
->ref_count
,ps
->active
);
2810 printf("pset_self: %08x, pset_name_self: %08x\n",ps
->pset_self
, ps
->pset_name_self
);
2811 printf("set_quanta: 0x%x\n", ps
->set_quanta
);
2814 #define processor_state(s) (((s)>PROCESSOR_SHUTDOWN)?"*unknown*":states[s])
2820 char *states
[]={"OFF_LINE","RUNNING","IDLE","DISPATCHING",
2821 "ASSIGN","SHUTDOWN"};
2823 printf("processor: %08x\n",p
);
2824 printf("processor_queue: %08x %08x\n",
2825 p
->processor_queue
.next
,p
->processor_queue
.prev
);
2826 printf("state: %8s, next_thread: %08x, idle_thread: %08x\n",
2827 processor_state(p
->state
), p
->next_thread
, p
->idle_thread
);
2828 printf("slice_quanta: %x\n", p
->slice_quanta
);
2829 printf("processor_set: %08x, processor_set_next: %08x\n",
2830 p
->processor_set
, p
->processor_set_next
);
2831 printf("processors: %08x %08x\n", p
->processors
.next
,p
->processors
.prev
);
2832 printf("processor_self: %08x, slot_num: 0x%x\n", p
->processor_self
, p
->slot_num
);
2836 dump_run_queue_struct(
2842 for( i
=0; i
< NRQS
; ) {
2845 printf("%6s",(i
==0)?"runq:":"");
2846 for( j
=0; (j
<8) && (i
< NRQS
); j
++,i
++ ) {
2847 if( rq
->queues
[i
].next
== &rq
->queues
[i
] )
2848 printf( " --------");
2850 printf(" %08x",rq
->queues
[i
].next
);
2854 for( i
=0; i
< NRQBM
; ) {
2855 register unsigned int mask
;
2862 *d
++ = ((rq
->bitmap
[i
]&mask
)?'r':'e');
2866 printf("%8s%s\n",((i
==0)?"bitmap:":""),dump_buf
);
2869 printf("highq: 0x%x, count: %u\n", rq
->highq
, rq
->count
);
2876 register queue_t q1
;
2878 register queue_entry_t e
;
2881 for (i
= 0; i
< NRQS
; i
++) {
2882 if (q1
->next
!= q1
) {
2886 for (t_cnt
=0, e
= q1
->next
; e
!= q1
; e
= e
->next
) {
2887 printf("\t0x%08x",e
);
2888 if( (t_cnt
= ++t_cnt%4
) == 0 )
2895 printf("[%u]\t<empty>\n",i);
2906 register queue_t q1
;
2908 register queue_entry_t e
;
2914 for (i
= MAXPRI
; i
>= 0; i
--) {
2915 if (q1
->next
== q1
) {
2916 if (q1
->prev
!= q1
) {
2917 panic("checkrq: empty at %s", msg
);
2924 for (e
= q1
->next
; e
!= q1
; e
= e
->next
) {
2926 if (e
->next
->prev
!= e
)
2927 panic("checkrq-2 at %s", msg
);
2928 if (e
->prev
->next
!= e
)
2929 panic("checkrq-3 at %s", msg
);
2935 panic("checkrq: count wrong at %s", msg
);
2936 if (rq
->count
!= 0 && highq
> rq
->highq
)
2937 panic("checkrq: highq wrong at %s", msg
);
2942 register thread_t thread
,
2943 register run_queue_t rq
)
2945 register int whichq
= thread
->sched_pri
;
2946 register queue_entry_t queue
, entry
;
2948 if (whichq
< MINPRI
|| whichq
> MAXPRI
)
2949 panic("thread_check: bad pri");
2951 queue
= &rq
->queues
[whichq
];
2952 entry
= queue_first(queue
);
2953 while (!queue_end(queue
, entry
)) {
2954 if (entry
== (queue_entry_t
)thread
)
2957 entry
= queue_next(entry
);
2960 panic("thread_check: not found");
2966 #include <ddb/db_output.h>
2967 #define printf kdbprintf
2968 extern int db_indent
;
2969 void db_sched(void);
2974 iprintf("Scheduling Statistics:\n");
2976 iprintf("Thread invocations: csw %d same %d\n",
2977 c_thread_invoke_csw
, c_thread_invoke_same
);
2979 iprintf("Thread block: calls %d\n",
2980 c_thread_block_calls
);
2981 iprintf("Idle thread:\n\thandoff %d block %d no_dispatch %d\n",
2982 c_idle_thread_handoff
,
2983 c_idle_thread_block
, no_dispatch_count
);
2984 iprintf("Sched thread blocks: %d\n", c_sched_thread_block
);
2985 #endif /* MACH_COUNTERS */
2988 #endif /* MACH_KDB */