2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * @OSF_FREE_COPYRIGHT@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
54 * Author: Avadis Tevanian, Jr.
57 * Scheduling primitives
64 #include <simple_clock.h>
65 #include <power_save.h>
66 #include <task_swapper.h>
68 #include <ddb/db_output.h>
69 #include <mach/machine.h>
70 #include <machine/machine_routines.h>
71 #include <machine/sched_param.h>
73 #include <kern/clock.h>
74 #include <kern/counters.h>
75 #include <kern/cpu_number.h>
76 #include <kern/cpu_data.h>
77 #include <kern/etap_macros.h>
78 #include <kern/lock.h>
79 #include <kern/macro_help.h>
80 #include <kern/machine.h>
81 #include <kern/misc_protos.h>
82 #include <kern/processor.h>
83 #include <kern/queue.h>
84 #include <kern/sched.h>
85 #include <kern/sched_prim.h>
86 #include <kern/syscall_subr.h>
87 #include <kern/task.h>
88 #include <kern/thread.h>
89 #include <kern/thread_swap.h>
91 #include <vm/vm_kern.h>
92 #include <vm/vm_map.h>
93 #include <mach/policy.h>
94 #include <mach/sync_policy.h>
95 #include <kern/mk_sp.h> /*** ??? fix so this can be removed ***/
96 #include <sys/kdebug.h>
99 #include <kern/task_swap.h>
100 extern int task_swap_on
;
101 #endif /* TASK_SWAPPER */
105 #define DEFAULT_PREEMPTION_RATE 100 /* (1/s) */
106 int default_preemption_rate
= DEFAULT_PREEMPTION_RATE
;
108 #define MAX_UNSAFE_QUANTA 800
109 int max_unsafe_quanta
= MAX_UNSAFE_QUANTA
;
111 #define MAX_POLL_QUANTA 2
112 int max_poll_quanta
= MAX_POLL_QUANTA
;
114 #define SCHED_POLL_YIELD_SHIFT 4 /* 1/16 */
115 int sched_poll_yield_shift
= SCHED_POLL_YIELD_SHIFT
;
117 #define NO_KERNEL_PREEMPT 0
118 #define KERNEL_PREEMPT 1
119 int kernel_preemption_mode
= KERNEL_PREEMPT
;
121 uint32_t std_quantum_us
;
127 #endif /* SIMPLE_CLOCK */
130 void thread_continue(thread_t
);
132 void wait_queues_init(void);
139 thread_t
choose_pset_thread(
140 processor_t myprocessor
,
141 processor_set_t pset
);
143 thread_t
choose_thread(
144 processor_t myprocessor
);
146 int run_queue_enqueue(
151 void idle_thread_continue(void);
152 void do_thread_scan(void);
154 void clear_wait_internal(
159 void dump_run_queues(
161 void dump_run_queue_struct(
165 void dump_processor_set(
177 boolean_t
thread_runnable(
186 * states are combinations of:
188 * W waiting (or on wait queue)
189 * N non-interruptible
194 * assert_wait thread_block clear_wait swapout swapin
196 * R RW, RWN R; setrun - -
197 * RN RWN RN; setrun - -
210 * Waiting protocols and implementation:
212 * Each thread may be waiting for exactly one event; this event
213 * is set using assert_wait(). That thread may be awakened either
214 * by performing a thread_wakeup_prim() on its event,
215 * or by directly waking that thread up with clear_wait().
217 * The implementation of wait events uses a hash table. Each
218 * bucket is queue of threads having the same hash function
219 * value; the chain for the queue (linked list) is the run queue
220 * field. [It is not possible to be waiting and runnable at the
223 * Locks on both the thread and on the hash buckets govern the
224 * wait event field and the queue chain field. Because wakeup
225 * operations only have the event as an argument, the event hash
226 * bucket must be locked before any thread.
228 * Scheduling operations may also occur at interrupt level; therefore,
229 * interrupts below splsched() must be prevented when holding
230 * thread or hash bucket locks.
232 * The wait event hash table declarations are as follows:
237 struct wait_queue wait_queues
[NUMQUEUES
];
239 #define wait_hash(event) \
240 ((((int)(event) < 0)? ~(int)(event): (int)(event)) % NUMQUEUES)
246 * Calculate the timeslicing quantum
249 if (default_preemption_rate
< 1)
250 default_preemption_rate
= DEFAULT_PREEMPTION_RATE
;
251 std_quantum_us
= (1000 * 1000) / default_preemption_rate
;
253 printf("standard timeslicing quantum is %d us\n", std_quantum_us
);
256 pset_sys_bootstrap(); /* initialize processor mgmt. */
261 #endif /* SIMPLE_CLOCK */
266 wait_queues_init(void)
270 for (i
= 0; i
< NUMQUEUES
; i
++) {
271 wait_queue_init(&wait_queues
[i
], SYNC_POLICY_FIFO
);
276 * Thread wait timer expiration.
280 timer_call_param_t p0
,
281 timer_call_param_t p1
)
283 thread_t thread
= p0
;
288 if (--thread
->wait_timer_active
== 1) {
289 if (thread
->wait_timer_is_set
) {
290 thread
->wait_timer_is_set
= FALSE
;
293 clear_wait_internal(thread
, THREAD_TIMED_OUT
);
294 thread_unlock(thread
);
298 if (thread
->wait_timer_active
== 0)
299 thread_wakeup_one(&thread
->wait_timer_active
);
307 * Set a timer for the current thread, if the thread
308 * is ready to wait. Must be called between assert_wait()
309 * and thread_block().
314 uint32_t scale_factor
)
316 thread_t thread
= current_thread();
323 if ((thread
->state
& TH_WAIT
) != 0) {
324 clock_interval_to_deadline(interval
, scale_factor
, &deadline
);
325 timer_call_enter(&thread
->wait_timer
, deadline
);
326 assert(!thread
->wait_timer_is_set
);
327 thread
->wait_timer_active
++;
328 thread
->wait_timer_is_set
= TRUE
;
330 thread_unlock(thread
);
336 thread_set_timer_deadline(
339 thread_t thread
= current_thread();
345 if ((thread
->state
& TH_WAIT
) != 0) {
346 timer_call_enter(&thread
->wait_timer
, deadline
);
347 assert(!thread
->wait_timer_is_set
);
348 thread
->wait_timer_active
++;
349 thread
->wait_timer_is_set
= TRUE
;
351 thread_unlock(thread
);
357 thread_cancel_timer(void)
359 thread_t thread
= current_thread();
364 if (thread
->wait_timer_is_set
) {
365 if (timer_call_cancel(&thread
->wait_timer
))
366 thread
->wait_timer_active
--;
367 thread
->wait_timer_is_set
= FALSE
;
374 * Set up thread timeout element when thread is created.
380 extern void thread_depress_expire(
381 timer_call_param_t p0
,
382 timer_call_param_t p1
);
384 timer_call_setup(&thread
->wait_timer
, thread_timer_expire
, thread
);
385 thread
->wait_timer_is_set
= FALSE
;
386 thread
->wait_timer_active
= 1;
388 timer_call_setup(&thread
->depress_timer
, thread_depress_expire
, thread
);
389 thread
->depress_timer_active
= 1;
395 thread_timer_terminate(void)
397 thread_t thread
= current_thread();
402 if (thread
->wait_timer_is_set
) {
403 if (timer_call_cancel(&thread
->wait_timer
))
404 thread
->wait_timer_active
--;
405 thread
->wait_timer_is_set
= FALSE
;
408 thread
->wait_timer_active
--;
410 while (thread
->wait_timer_active
> 0) {
411 assert_wait((event_t
)&thread
->wait_timer_active
, THREAD_UNINT
);
415 thread_block((void (*)(void)) 0);
421 thread
->depress_timer_active
--;
423 while (thread
->depress_timer_active
> 0) {
424 assert_wait((event_t
)&thread
->depress_timer_active
, THREAD_UNINT
);
428 thread_block((void (*)(void)) 0);
437 thread_deallocate(thread
);
441 * Routine: thread_go_locked
443 * Start a thread running.
445 * thread lock held, IPC locks may be held.
446 * thread must have been pulled from wait queue under same lock hold.
453 assert(thread
->at_safe_point
== FALSE
);
454 assert(thread
->wait_event
== NO_EVENT
);
455 assert(thread
->wait_queue
== WAIT_QUEUE_NULL
);
457 if (thread
->state
& TH_WAIT
) {
458 thread
->state
&= ~(TH_WAIT
|TH_UNINT
);
459 if (!(thread
->state
& TH_RUN
)) {
460 thread
->state
|= TH_RUN
;
462 _mk_sp_thread_unblock(thread
);
465 thread
->wait_result
= result
;
470 thread_mark_wait_locked(
475 assert(thread
== current_thread());
477 thread
->wait_result
= -1; /* JMM - Needed for non-assert kernel */
478 thread
->state
|= (interruptible
&& thread
->interruptible
) ?
479 TH_WAIT
: (TH_WAIT
| TH_UNINT
);
480 thread
->at_safe_point
= (interruptible
== THREAD_ABORTSAFE
) && (thread
->interruptible
);
481 thread
->sleep_stamp
= sched_tick
;
487 * Routine: assert_wait_timeout
489 * Assert that the thread intends to block,
490 * waiting for a timeout (no user known event).
492 unsigned int assert_wait_timeout_event
;
496 mach_msg_timeout_t msecs
,
501 assert_wait((event_t
)&assert_wait_timeout_event
, interruptible
);
502 thread_set_timer(msecs
, 1000*NSEC_PER_USEC
);
506 * Check to see if an assert wait is possible, without actually doing one.
507 * This is used by debug code in locks and elsewhere to verify that it is
508 * always OK to block when trying to take a blocking lock (since waiting
509 * for the actual assert_wait to catch the case may make it hard to detect
513 assert_wait_possible(void)
517 extern unsigned int debug_mode
;
520 if(debug_mode
) return TRUE
; /* Always succeed in debug mode */
523 thread
= current_thread();
525 return (thread
== NULL
|| wait_queue_assert_possible(thread
));
531 * Assert that the current thread is about to go to
532 * sleep until the specified event occurs.
539 register wait_queue_t wq
;
542 assert(event
!= NO_EVENT
);
543 assert(assert_wait_possible());
545 index
= wait_hash(event
);
546 wq
= &wait_queues
[index
];
547 (void)wait_queue_assert_wait(wq
,
554 * thread_[un]stop(thread)
555 * Once a thread has blocked interruptibly (via assert_wait) prevent
556 * it from running until thread_unstop.
558 * If someone else has already stopped the thread, wait for the
559 * stop to be cleared, and then stop it again.
561 * Return FALSE if interrupted.
563 * NOTE: thread_hold/thread_suspend should be called on the activation
564 * before calling thread_stop. TH_SUSP is only recognized when
565 * a thread blocks and only prevents clear_wait/thread_wakeup
566 * from restarting an interruptible wait. The wake_active flag is
567 * used to indicate that someone is waiting on the thread.
578 while (thread
->state
& TH_SUSP
) {
581 thread
->wake_active
= TRUE
;
582 assert_wait((event_t
)&thread
->wake_active
, THREAD_ABORTSAFE
);
586 wait_result
= thread_block((void (*)(void)) 0);
587 if (wait_result
!= THREAD_AWAKENED
)
594 thread
->state
|= TH_SUSP
;
595 thread_unlock(thread
);
604 * Clear TH_SUSP and if the thread has been stopped and is now runnable,
605 * put it back on the run queue.
617 if ((thread
->state
& (TH_RUN
|TH_WAIT
|TH_SUSP
/*|TH_UNINT*/)) == TH_SUSP
) {
618 thread
->state
&= ~TH_SUSP
;
619 thread
->state
|= TH_RUN
;
621 _mk_sp_thread_unblock(thread
);
624 if (thread
->state
& TH_SUSP
) {
625 thread
->state
&= ~TH_SUSP
;
627 if (thread
->wake_active
) {
628 thread
->wake_active
= FALSE
;
629 thread_unlock(thread
);
632 thread_wakeup((event_t
)&thread
->wake_active
);
638 thread_unlock(thread
);
644 * Wait for the thread's RUN bit to clear
655 while (thread
->state
& (TH_RUN
/*|TH_UNINT*/)) {
658 if (thread
->last_processor
!= PROCESSOR_NULL
)
659 cause_ast_check(thread
->last_processor
);
661 thread
->wake_active
= TRUE
;
662 assert_wait((event_t
)&thread
->wake_active
, THREAD_ABORTSAFE
);
666 wait_result
= thread_block((void (*)(void))0);
667 if (wait_result
!= THREAD_AWAKENED
)
682 * thread_stop_wait(thread)
683 * Stop the thread then wait for it to block interruptibly
689 if (thread_stop(thread
)) {
690 if (thread_wait(thread
))
693 thread_unstop(thread
);
701 * Routine: clear_wait_internal
703 * Clear the wait condition for the specified thread.
704 * Start the thread executing if that is appropriate.
706 * thread thread to awaken
707 * result Wakeup result the thread should see
710 * the thread is locked.
718 * If the thread isn't in a wait queue, just set it running. Otherwise,
719 * try to remove it from the queue and, if successful, then set it
720 * running. NEVER interrupt an uninterruptible thread.
722 if (!((result
== THREAD_INTERRUPTED
) && (thread
->state
& TH_UNINT
))) {
723 if (wait_queue_assert_possible(thread
) ||
724 (wait_queue_remove(thread
) == KERN_SUCCESS
)) {
725 thread_go_locked(thread
, result
);
734 * Clear the wait condition for the specified thread. Start the thread
735 * executing if that is appropriate.
738 * thread thread to awaken
739 * result Wakeup result the thread should see
750 clear_wait_internal(thread
, result
);
751 thread_unlock(thread
);
757 * thread_wakeup_prim:
759 * Common routine for thread_wakeup, thread_wakeup_with_result,
760 * and thread_wakeup_one.
766 boolean_t one_thread
,
769 register wait_queue_t wq
;
772 index
= wait_hash(event
);
773 wq
= &wait_queues
[index
];
775 wait_queue_wakeup_one(wq
, event
, result
);
777 wait_queue_wakeup_all(wq
, event
, result
);
783 * Force a thread to execute on the specified processor.
784 * If the thread is currently executing, it may wait until its
785 * time slice is up before switching onto the specified processor.
787 * A processor of PROCESSOR_NULL causes the thread to be unbound.
788 * xxx - DO NOT export this to users.
792 register thread_t thread
,
793 processor_t processor
)
799 thread_bind_locked(thread
, processor
);
800 thread_unlock(thread
);
805 * Select a thread for this processor (the current processor) to run.
806 * May select the current thread, which must already be locked.
810 register processor_t myprocessor
)
812 register thread_t thread
;
813 processor_set_t pset
;
814 register run_queue_t runq
= &myprocessor
->runq
;
815 boolean_t other_runnable
;
818 * Check for other non-idle runnable threads.
820 pset
= myprocessor
->processor_set
;
821 thread
= current_thread();
824 * Update set_quanta for timesharing.
826 pset
->set_quanta
= pset
->machine_quanta
[
827 (pset
->runq
.count
> pset
->processor_count
) ?
828 pset
->processor_count
: pset
->runq
.count
];
830 /* Update the thread's priority */
831 if (thread
->sched_stamp
!= sched_tick
)
832 update_priority(thread
);
834 simple_lock(&runq
->lock
);
835 simple_lock(&pset
->runq
.lock
);
837 other_runnable
= runq
->count
> 0 || pset
->runq
.count
> 0;
839 if ( thread
->state
== TH_RUN
&&
841 (runq
->highq
< thread
->sched_pri
&&
842 pset
->runq
.highq
< thread
->sched_pri
)) &&
843 thread
->processor_set
== pset
&&
844 (thread
->bound_processor
== PROCESSOR_NULL
||
845 thread
->bound_processor
== myprocessor
) ) {
847 /* I am the highest priority runnable (non-idle) thread */
848 simple_unlock(&pset
->runq
.lock
);
849 simple_unlock(&runq
->lock
);
851 myprocessor
->slice_quanta
=
852 (thread
->sched_mode
& TH_MODE_TIMESHARE
)? pset
->set_quanta
: 1;
855 if (other_runnable
) {
856 simple_unlock(&pset
->runq
.lock
);
857 simple_unlock(&runq
->lock
);
858 thread
= choose_thread(myprocessor
);
861 simple_unlock(&pset
->runq
.lock
);
862 simple_unlock(&runq
->lock
);
865 * Nothing is runnable, so set this processor idle if it
866 * was running. If it was in an assignment or shutdown,
867 * leave it alone. Return its idle thread.
869 simple_lock(&pset
->idle_lock
);
870 if (myprocessor
->state
== PROCESSOR_RUNNING
) {
871 myprocessor
->state
= PROCESSOR_IDLE
;
873 * XXX Until it goes away, put master on end of queue, others
874 * XXX on front so master gets used last.
876 if (myprocessor
== master_processor
)
877 queue_enter(&(pset
->idle_queue
), myprocessor
,
878 processor_t
, processor_queue
);
880 queue_enter_first(&(pset
->idle_queue
), myprocessor
,
881 processor_t
, processor_queue
);
885 simple_unlock(&pset
->idle_lock
);
887 thread
= myprocessor
->idle_thread
;
895 * Stop running the current thread and start running the new thread.
896 * If continuation is non-zero, and the current thread is blocked,
897 * then it will resume by executing continuation on a new stack.
898 * Returns TRUE if the hand-off succeeds.
899 * The reason parameter contains | AST_QUANTUM if the thread blocked
900 * because its quantum expired.
905 __current_thread(void)
907 return (current_thread());
912 register thread_t old_thread
,
913 register thread_t new_thread
,
915 void (*continuation
)(void))
919 if (cpu_data
[cpu_number()].preemption_level
!= 0)
920 panic("thread_invoke: preemption_level %d\n",
921 cpu_data
[cpu_number()].preemption_level
);
924 * Mark thread interruptible.
926 thread_lock(new_thread
);
927 new_thread
->state
&= ~TH_UNINT
;
929 assert(thread_runnable(new_thread
));
931 assert(old_thread
->continuation
== (void (*)(void))0);
933 if ( (old_thread
->sched_mode
& TH_MODE_REALTIME
) &&
934 !old_thread
->stack_privilege
) {
935 old_thread
->stack_privilege
= old_thread
->kernel_stack
;
938 if (continuation
!= (void (*)()) 0) {
939 switch (new_thread
->state
& TH_STACK_STATE
) {
940 case TH_STACK_HANDOFF
:
943 * If the old thread has stack privilege, we can't give
944 * his stack away. So go and get him one and treat this
945 * as a traditional context switch.
947 if (old_thread
->stack_privilege
== current_stack())
951 * Make the whole handoff/dispatch atomic to match the
954 disable_preemption();
957 * Set up ast context of new thread and switch to its timer.
959 new_thread
->state
&= ~(TH_STACK_HANDOFF
|TH_UNINT
);
960 new_thread
->last_processor
= current_processor();
961 ast_context(new_thread
->top_act
, cpu_number());
962 timer_switch(&new_thread
->system_timer
);
963 thread_unlock(new_thread
);
965 current_task()->csw
++;
967 old_thread
->continuation
= continuation
;
968 stack_handoff(old_thread
, new_thread
);
970 wake_lock(old_thread
);
971 thread_lock(old_thread
);
972 act_machine_sv_free(old_thread
->top_act
);
974 _mk_sp_thread_done(old_thread
);
977 * inline thread_dispatch but don't free stack
980 switch (old_thread
->state
& (TH_RUN
|TH_WAIT
|TH_UNINT
|TH_IDLE
)) {
982 case TH_RUN
| TH_UNINT
:
985 * No reason to stop. Put back on a run queue.
987 old_thread
->state
|= TH_STACK_HANDOFF
;
988 _mk_sp_thread_dispatch(old_thread
);
991 case TH_RUN
| TH_WAIT
| TH_UNINT
:
992 case TH_RUN
| TH_WAIT
:
993 old_thread
->sleep_stamp
= sched_tick
;
996 case TH_WAIT
: /* this happens! */
1000 old_thread
->state
|= TH_STACK_HANDOFF
;
1001 old_thread
->state
&= ~TH_RUN
;
1002 if (old_thread
->state
& TH_TERMINATE
)
1003 thread_reaper_enqueue(old_thread
);
1005 if (old_thread
->wake_active
) {
1006 old_thread
->wake_active
= FALSE
;
1007 thread_unlock(old_thread
);
1008 wake_unlock(old_thread
);
1009 thread_wakeup((event_t
)&old_thread
->wake_active
);
1010 wake_lock(old_thread
);
1011 thread_lock(old_thread
);
1015 case TH_RUN
| TH_IDLE
:
1017 * Drop idle thread -- it is already in
1018 * idle_thread_array.
1020 old_thread
->state
|= TH_STACK_HANDOFF
;
1024 panic("State 0x%x \n",old_thread
->state
);
1027 thread_unlock(old_thread
);
1028 wake_unlock(old_thread
);
1030 thread_lock(new_thread
);
1031 assert(thread_runnable(new_thread
));
1032 _mk_sp_thread_begin(new_thread
);
1034 lcont
= new_thread
->continuation
;
1035 new_thread
->continuation
= (void(*)(void))0;
1037 thread_unlock(new_thread
);
1038 enable_preemption();
1040 counter_always(c_thread_invoke_hits
++);
1042 if (new_thread
->funnel_state
& TH_FN_REFUNNEL
) {
1043 kern_return_t save_wait_result
;
1044 new_thread
->funnel_state
= 0;
1045 save_wait_result
= new_thread
->wait_result
;
1046 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE
, new_thread
->funnel_lock
, 2, 0, 0, 0);
1047 //mutex_lock(new_thread->funnel_lock);
1048 funnel_lock(new_thread
->funnel_lock
);
1049 KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE
, new_thread
->funnel_lock
, 2, 0, 0, 0);
1050 new_thread
->funnel_state
= TH_FN_OWNED
;
1051 new_thread
->wait_result
= save_wait_result
;
1056 call_continuation(lcont
);
1060 case TH_STACK_ALLOC
:
1062 * waiting for a stack
1064 thread_swapin(new_thread
);
1065 thread_unlock(new_thread
);
1066 counter_always(c_thread_invoke_misses
++);
1071 * already has a stack - can't handoff
1073 if (new_thread
== old_thread
) {
1075 /* same thread but with continuation */
1076 counter(++c_thread_invoke_same
);
1077 thread_unlock(new_thread
);
1079 if (old_thread
->funnel_state
& TH_FN_REFUNNEL
) {
1080 kern_return_t save_wait_result
;
1082 old_thread
->funnel_state
= 0;
1083 save_wait_result
= old_thread
->wait_result
;
1084 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE
, old_thread
->funnel_lock
, 3, 0, 0, 0);
1085 funnel_lock(old_thread
->funnel_lock
);
1086 KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE
, old_thread
->funnel_lock
, 3, 0, 0, 0);
1087 old_thread
->funnel_state
= TH_FN_OWNED
;
1088 old_thread
->wait_result
= save_wait_result
;
1091 call_continuation(continuation
);
1098 * check that the new thread has a stack
1100 if (new_thread
->state
& TH_STACK_STATE
) {
1102 /* has no stack. if not already waiting for one try to get one */
1103 if ((new_thread
->state
& TH_STACK_ALLOC
) ||
1104 /* not already waiting. nonblocking try to get one */
1105 !stack_alloc_try(new_thread
, thread_continue
))
1107 /* couldn't get one. schedule new thread to get a stack and
1108 return failure so we can try another thread. */
1109 thread_swapin(new_thread
);
1110 thread_unlock(new_thread
);
1111 counter_always(c_thread_invoke_misses
++);
1114 } else if (old_thread
== new_thread
) {
1115 counter(++c_thread_invoke_same
);
1116 thread_unlock(new_thread
);
1120 /* new thread now has a stack. it has been setup to resume in
1121 thread_continue so it can dispatch the old thread, deal with
1122 funnelling and then go to it's true continuation point */
1125 new_thread
->state
&= ~(TH_STACK_HANDOFF
| TH_UNINT
);
1128 * Set up ast context of new thread and switch to its timer.
1130 new_thread
->last_processor
= current_processor();
1131 ast_context(new_thread
->top_act
, cpu_number());
1132 timer_switch(&new_thread
->system_timer
);
1133 assert(thread_runnable(new_thread
));
1136 * N.B. On return from the call to switch_context, 'old_thread'
1137 * points at the thread that yielded to us. Unfortunately, at
1138 * this point, there are no simple_locks held, so if we are preempted
1139 * before the call to thread_dispatch blocks preemption, it is
1140 * possible for 'old_thread' to terminate, leaving us with a
1141 * stale thread pointer.
1143 disable_preemption();
1145 thread_unlock(new_thread
);
1147 counter_always(c_thread_invoke_csw
++);
1148 current_task()->csw
++;
1150 thread_lock(old_thread
);
1151 old_thread
->reason
= reason
;
1152 assert(old_thread
->runq
== RUN_QUEUE_NULL
);
1154 if (continuation
!= (void (*)(void))0)
1155 old_thread
->continuation
= continuation
;
1157 _mk_sp_thread_done(old_thread
);
1158 thread_unlock(old_thread
);
1161 * switch_context is machine-dependent. It does the
1162 * machine-dependent components of a context-switch, like
1163 * changing address spaces. It updates active_threads.
1165 old_thread
= switch_context(old_thread
, continuation
, new_thread
);
1167 /* Now on new thread's stack. Set a local variable to refer to it. */
1168 new_thread
= __current_thread();
1169 assert(old_thread
!= new_thread
);
1171 thread_lock(new_thread
);
1172 assert(thread_runnable(new_thread
));
1173 _mk_sp_thread_begin(new_thread
);
1174 thread_unlock(new_thread
);
1177 * We're back. Now old_thread is the thread that resumed
1178 * us, and we have to dispatch it.
1181 thread_dispatch(old_thread
);
1182 enable_preemption();
1184 /* if we get here and 'continuation' is set that means the
1185 * switch_context() path returned and did not call out
1186 * to the continuation. we will do it manually here */
1188 call_continuation(continuation
);
1198 * Called when the launching a new thread, at splsched();
1202 register thread_t old_thread
)
1204 register thread_t self
= current_thread();
1205 register void (*continuation
)();
1208 * We must dispatch the old thread and then
1209 * call the current thread's continuation.
1210 * There might not be an old thread, if we are
1211 * the first thread to run on this processor.
1213 if (old_thread
!= THREAD_NULL
)
1214 thread_dispatch(old_thread
);
1217 continuation
= self
->continuation
;
1218 self
->continuation
= (void (*)(void))0;
1220 _mk_sp_thread_begin(self
);
1221 thread_unlock(self
);
1224 * N.B. - the following is necessary, since thread_invoke()
1225 * inhibits preemption on entry and reenables before it
1226 * returns. Unfortunately, the first time a newly-created
1227 * thread executes, it magically appears here, and never
1228 * executes the enable_preemption() call in thread_invoke().
1230 enable_preemption();
1232 if (self
->funnel_state
& TH_FN_REFUNNEL
) {
1233 kern_return_t save_wait_result
;
1235 self
->funnel_state
= 0;
1236 save_wait_result
= self
->wait_result
;
1237 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE
, self
->funnel_lock
, 4, 0, 0, 0);
1238 funnel_lock(self
->funnel_lock
);
1239 KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE
, self
->funnel_lock
, 4, 0, 0, 0);
1240 self
->wait_result
= save_wait_result
;
1241 self
->funnel_state
= TH_FN_OWNED
;
1246 assert(continuation
);
1251 #if MACH_LDEBUG || MACH_KDB
1253 #define THREAD_LOG_SIZE 300
1267 } thread_log
[THREAD_LOG_SIZE
];
1269 int thread_log_index
;
1271 void check_thread_time(long n
);
1274 int check_thread_time_crash
;
1278 check_thread_time(long us
)
1282 if (!check_thread_time_crash
)
1285 temp
= thread_log
[0].stamp
;
1286 cyctm05_diff (&thread_log
[1].stamp
, &thread_log
[0].stamp
, &temp
);
1288 if (temp
.l
>= us
&& thread_log
[1].info
!= 0x49) /* HACK!!! */
1289 panic ("check_thread_time");
1294 log_thread_action(char * action
, long info1
, long info2
, long info3
)
1298 static unsigned int tstamp
;
1302 for (i
= THREAD_LOG_SIZE
-1; i
> 0; i
--) {
1303 thread_log
[i
] = thread_log
[i
-1];
1306 thread_log
[0].stamp
.h
= 0;
1307 thread_log
[0].stamp
.l
= tstamp
++;
1308 thread_log
[0].thread
= current_thread();
1309 thread_log
[0].info1
= info1
;
1310 thread_log
[0].info2
= info2
;
1311 thread_log
[0].info3
= info3
;
1312 thread_log
[0].action
= action
;
1313 /* strcpy (&thread_log[0].action[0], action);*/
1317 #endif /* MACH_LDEBUG || MACH_KDB */
1320 #include <ddb/db_output.h>
1321 void db_show_thread_log(void);
1324 db_show_thread_log(void)
1328 db_printf ("%s %s %s %s %s %s\n", " Thread ", " Info1 ", " Info2 ",
1329 " Info3 ", " Timestamp ", "Action");
1331 for (i
= 0; i
< THREAD_LOG_SIZE
; i
++) {
1332 db_printf ("%08x %08x %08x %08x %08x/%08x %s\n",
1333 thread_log
[i
].thread
,
1334 thread_log
[i
].info1
,
1335 thread_log
[i
].info2
,
1336 thread_log
[i
].info3
,
1337 thread_log
[i
].stamp
.h
,
1338 thread_log
[i
].stamp
.l
,
1339 thread_log
[i
].action
);
1342 #endif /* MACH_KDB */
1345 * thread_block_reason:
1347 * Block the current thread if a wait has been asserted,
1348 * otherwise unconditionally yield the remainder of the
1349 * current quantum unless reason contains AST_BLOCK.
1351 * If a continuation is specified, then thread_block will
1352 * attempt to discard the thread's kernel stack. When the
1353 * thread resumes, it will execute the continuation function
1354 * on a new kernel stack.
1356 counter(mach_counter_t c_thread_block_calls
= 0;)
1359 thread_block_reason(
1360 void (*continuation
)(void),
1363 register thread_t thread
= current_thread();
1364 register processor_t myprocessor
;
1365 register thread_t new_thread
;
1368 counter(++c_thread_block_calls
);
1370 check_simple_locks();
1372 machine_clock_assist();
1376 if ((thread
->funnel_state
& TH_FN_OWNED
) && !(reason
& AST_PREEMPT
)) {
1377 thread
->funnel_state
= TH_FN_REFUNNEL
;
1378 KERNEL_DEBUG(0x603242c | DBG_FUNC_NONE
, thread
->funnel_lock
, 2, 0, 0, 0);
1379 funnel_unlock(thread
->funnel_lock
);
1382 myprocessor
= current_processor();
1384 thread_lock(thread
);
1385 if (thread
->state
& TH_ABORT
)
1386 clear_wait_internal(thread
, THREAD_INTERRUPTED
);
1388 if (!(reason
& AST_BLOCK
))
1389 myprocessor
->slice_quanta
= 0;
1391 /* Unconditionally remove either | both */
1392 ast_off(AST_PREEMPT
);
1394 new_thread
= thread_select(myprocessor
);
1396 assert(thread_runnable(new_thread
));
1397 thread_unlock(thread
);
1398 while (!thread_invoke(thread
, new_thread
, reason
, continuation
)) {
1399 thread_lock(thread
);
1400 new_thread
= thread_select(myprocessor
);
1402 assert(thread_runnable(new_thread
));
1403 thread_unlock(thread
);
1406 if (thread
->funnel_state
& TH_FN_REFUNNEL
) {
1407 kern_return_t save_wait_result
;
1409 save_wait_result
= thread
->wait_result
;
1410 thread
->funnel_state
= 0;
1411 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE
, thread
->funnel_lock
, 5, 0, 0, 0);
1412 funnel_lock(thread
->funnel_lock
);
1413 KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE
, thread
->funnel_lock
, 5, 0, 0, 0);
1414 thread
->funnel_state
= TH_FN_OWNED
;
1415 thread
->wait_result
= save_wait_result
;
1420 return thread
->wait_result
;
1426 * Block the current thread if a wait has been asserted,
1427 * otherwise yield the remainder of the current quantum.
1431 void (*continuation
)(void))
1433 return thread_block_reason(continuation
, AST_NONE
);
1439 * Switch directly from the current thread to a specified
1440 * thread. Both the current and new threads must be
1448 thread_t old_thread
,
1449 void (*continuation
)(void),
1450 thread_t new_thread
)
1452 while (!thread_invoke(old_thread
, new_thread
, 0, continuation
)) {
1453 register processor_t myprocessor
= current_processor();
1454 thread_lock(old_thread
);
1455 new_thread
= thread_select(myprocessor
);
1456 thread_unlock(old_thread
);
1458 return old_thread
->wait_result
;
1462 * Dispatches a running thread that is not on a runq.
1463 * Called at splsched.
1467 register thread_t thread
)
1470 * If we are discarding the thread's stack, we must do it
1471 * before the thread has a chance to run.
1474 thread_lock(thread
);
1477 /* no continuations on i386 for now */
1478 if (thread
->continuation
!= (void (*)())0) {
1479 assert((thread
->state
& TH_STACK_STATE
) == 0);
1480 thread
->state
|= TH_STACK_HANDOFF
;
1482 if (thread
->top_act
) {
1483 act_machine_sv_free(thread
->top_act
);
1488 switch (thread
->state
& (TH_RUN
|TH_WAIT
|TH_UNINT
|TH_IDLE
)) {
1490 case TH_RUN
| TH_UNINT
:
1493 * No reason to stop. Put back on a run queue.
1495 _mk_sp_thread_dispatch(thread
);
1498 case TH_RUN
| TH_WAIT
| TH_UNINT
:
1499 case TH_RUN
| TH_WAIT
:
1500 thread
->sleep_stamp
= sched_tick
;
1502 case TH_WAIT
: /* this happens! */
1507 thread
->state
&= ~TH_RUN
;
1508 if (thread
->state
& TH_TERMINATE
)
1509 thread_reaper_enqueue(thread
);
1511 if (thread
->wake_active
) {
1512 thread
->wake_active
= FALSE
;
1513 thread_unlock(thread
);
1514 wake_unlock(thread
);
1515 thread_wakeup((event_t
)&thread
->wake_active
);
1520 case TH_RUN
| TH_IDLE
:
1522 * Drop idle thread -- it is already in
1523 * idle_thread_array.
1528 panic("State 0x%x \n",thread
->state
);
1530 thread_unlock(thread
);
1531 wake_unlock(thread
);
1535 * Enqueue thread on run queue. Thread must be locked,
1536 * and not already be on a run queue.
1540 register run_queue_t rq
,
1541 register thread_t thread
,
1544 register int whichq
;
1547 whichq
= thread
->sched_pri
;
1548 assert(whichq
>= MINPRI
&& whichq
<= MAXPRI
);
1550 simple_lock(&rq
->lock
); /* lock the run queue */
1551 assert(thread
->runq
== RUN_QUEUE_NULL
);
1553 enqueue_tail(&rq
->queues
[whichq
], (queue_entry_t
)thread
);
1555 enqueue_head(&rq
->queues
[whichq
], (queue_entry_t
)thread
);
1557 setbit(MAXPRI
- whichq
, rq
->bitmap
);
1558 if (whichq
> rq
->highq
)
1561 oldrqcount
= rq
->count
++;
1563 thread
->whichq
= whichq
;
1565 thread_check(thread
, rq
);
1567 simple_unlock(&rq
->lock
);
1569 return (oldrqcount
);
1575 * Make thread runnable; dispatch directly onto an idle processor
1576 * if possible. Else put on appropriate run queue (processor
1577 * if bound, else processor set. Caller must have lock on thread.
1578 * This is always called at splsched.
1579 * The tail parameter, if TRUE || TAIL_Q, indicates that the
1580 * thread should be placed at the tail of the runq. If
1581 * FALSE || HEAD_Q the thread will be placed at the head of the
1586 register thread_t new_thread
,
1587 boolean_t may_preempt
,
1590 register processor_t processor
;
1591 register run_queue_t runq
;
1592 register processor_set_t pset
;
1594 ast_t ast_flags
= AST_BLOCK
;
1596 assert(thread_runnable(new_thread
));
1599 * Update priority if needed.
1601 if (new_thread
->sched_stamp
!= sched_tick
)
1602 update_priority(new_thread
);
1604 if ( new_thread
->sched_pri
>= BASEPRI_PREEMPT
&&
1605 kernel_preemption_mode
== KERNEL_PREEMPT
)
1606 ast_flags
|= AST_URGENT
;
1608 assert(new_thread
->runq
== RUN_QUEUE_NULL
);
1611 * Try to dispatch the thread directly onto an idle processor.
1613 if ((processor
= new_thread
->bound_processor
) == PROCESSOR_NULL
) {
1615 * Not bound, any processor in the processor set is ok.
1617 pset
= new_thread
->processor_set
;
1618 if (pset
->idle_count
> 0) {
1619 simple_lock(&pset
->idle_lock
);
1620 if (pset
->idle_count
> 0) {
1621 processor
= (processor_t
) queue_first(&pset
->idle_queue
);
1622 queue_remove(&(pset
->idle_queue
), processor
, processor_t
,
1625 processor
->next_thread
= new_thread
;
1626 processor
->state
= PROCESSOR_DISPATCHING
;
1627 simple_unlock(&pset
->idle_lock
);
1628 if(processor
->slot_num
!= cpu_number())
1629 machine_signal_idle(processor
);
1632 simple_unlock(&pset
->idle_lock
);
1636 * Place thread on processor set run queue.
1639 run_queue_enqueue(runq
, new_thread
, tail
);
1644 thread
= current_thread();
1645 processor
= current_processor();
1647 pset
== processor
->processor_set
) {
1649 * XXX if we have a non-empty local runq or are
1650 * XXX running a bound thread, ought to check for
1651 * XXX another cpu running lower-pri thread to preempt.
1653 if (csw_needed(thread
, processor
))
1659 * Bound, can only run on bound processor. Have to lock
1660 * processor here because it may not be the current one.
1662 if (processor
->state
== PROCESSOR_IDLE
) {
1663 simple_lock(&processor
->lock
);
1664 pset
= processor
->processor_set
;
1665 simple_lock(&pset
->idle_lock
);
1666 if (processor
->state
== PROCESSOR_IDLE
) {
1667 queue_remove(&pset
->idle_queue
, processor
,
1668 processor_t
, processor_queue
);
1670 processor
->next_thread
= new_thread
;
1671 processor
->state
= PROCESSOR_DISPATCHING
;
1672 simple_unlock(&pset
->idle_lock
);
1673 simple_unlock(&processor
->lock
);
1674 if(processor
->slot_num
!= cpu_number())
1675 machine_signal_idle(processor
);
1678 simple_unlock(&pset
->idle_lock
);
1679 simple_unlock(&processor
->lock
);
1683 * Cause ast on processor if processor is on line, and the
1684 * currently executing thread is not bound to that processor
1685 * (bound threads have implicit priority over non-bound threads).
1686 * We also avoid sending the AST to the idle thread (if it got
1687 * scheduled in the window between the 'if' above and here),
1688 * since the idle_thread is bound.
1690 runq
= &processor
->runq
;
1691 if (processor
== current_processor()) {
1692 run_queue_enqueue(runq
, new_thread
, tail
);
1694 thread
= current_thread();
1695 if ( thread
->bound_processor
== PROCESSOR_NULL
||
1696 csw_needed(thread
, processor
))
1700 thread
= cpu_data
[processor
->slot_num
].active_thread
;
1701 if ( run_queue_enqueue(runq
, new_thread
, tail
) == 0 &&
1702 processor
->state
!= PROCESSOR_OFF_LINE
&&
1703 thread
&& thread
->bound_processor
!= processor
)
1704 cause_ast_check(processor
);
1712 * Set the priority of the specified thread to the specified
1713 * priority. This may cause the thread to change queues.
1715 * The thread *must* be locked by the caller.
1723 register struct run_queue
*rq
;
1725 rq
= rem_runq(thread
);
1726 assert(thread
->runq
== RUN_QUEUE_NULL
);
1727 thread
->sched_pri
= pri
;
1728 if (rq
!= RUN_QUEUE_NULL
) {
1730 thread_setrun(thread
, TRUE
, TAIL_Q
);
1732 run_queue_enqueue(rq
, thread
, TAIL_Q
);
1739 * Remove a thread from its run queue.
1740 * The run queue that the process was on is returned
1741 * (or RUN_QUEUE_NULL if not on a run queue). Thread *must* be locked
1742 * before calling this routine. Unusual locking protocol on runq
1743 * field in thread structure makes this code interesting; see thread.h.
1749 register struct run_queue
*rq
;
1753 * If rq is RUN_QUEUE_NULL, the thread will stay out of the
1754 * run_queues because the caller locked the thread. Otherwise
1755 * the thread is on a runq, but could leave.
1757 if (rq
!= RUN_QUEUE_NULL
) {
1758 simple_lock(&rq
->lock
);
1759 if (rq
== thread
->runq
) {
1761 * Thread is in a runq and we have a lock on
1765 thread_check(thread
, rq
);
1767 remqueue(&rq
->queues
[0], (queue_entry_t
)thread
);
1770 if (queue_empty(rq
->queues
+ thread
->sched_pri
)) {
1771 /* update run queue status */
1772 if (thread
->sched_pri
!= IDLEPRI
)
1773 clrbit(MAXPRI
- thread
->sched_pri
, rq
->bitmap
);
1774 rq
->highq
= MAXPRI
- ffsbit(rq
->bitmap
);
1776 thread
->runq
= RUN_QUEUE_NULL
;
1777 simple_unlock(&rq
->lock
);
1781 * The thread left the runq before we could
1782 * lock the runq. It is not on a runq now, and
1783 * can't move again because this routine's
1784 * caller locked the thread.
1786 assert(thread
->runq
== RUN_QUEUE_NULL
);
1787 simple_unlock(&rq
->lock
);
1788 rq
= RUN_QUEUE_NULL
;
1799 * Choose a thread to execute. The thread chosen is removed
1800 * from its run queue. Note that this requires only that the runq
1804 * Check processor runq first; if anything found, run it.
1805 * Else check pset runq; if nothing found, return idle thread.
1807 * Second line of strategy is implemented by choose_pset_thread.
1808 * This is only called on processor startup and when thread_block
1809 * thinks there's something in the processor runq.
1813 processor_t myprocessor
)
1817 register run_queue_t runq
;
1818 processor_set_t pset
;
1820 runq
= &myprocessor
->runq
;
1821 pset
= myprocessor
->processor_set
;
1823 simple_lock(&runq
->lock
);
1824 if (runq
->count
> 0 && runq
->highq
>= pset
->runq
.highq
) {
1825 q
= runq
->queues
+ runq
->highq
;
1827 if (!queue_empty(q
)) {
1828 #endif /*MACH_ASSERT*/
1829 thread
= (thread_t
)q
->next
;
1830 ((queue_entry_t
)thread
)->next
->prev
= q
;
1831 q
->next
= ((queue_entry_t
)thread
)->next
;
1832 thread
->runq
= RUN_QUEUE_NULL
;
1834 if (queue_empty(q
)) {
1835 if (runq
->highq
!= IDLEPRI
)
1836 clrbit(MAXPRI
- runq
->highq
, runq
->bitmap
);
1837 runq
->highq
= MAXPRI
- ffsbit(runq
->bitmap
);
1839 simple_unlock(&runq
->lock
);
1843 panic("choose_thread");
1844 #endif /*MACH_ASSERT*/
1848 simple_unlock(&runq
->lock
);
1849 simple_lock(&pset
->runq
.lock
);
1850 return (choose_pset_thread(myprocessor
, pset
));
1855 * choose_pset_thread: choose a thread from processor_set runq or
1856 * set processor idle and choose its idle thread.
1858 * Caller must be at splsched and have a lock on the runq. This
1859 * lock is released by this routine. myprocessor is always the current
1860 * processor, and pset must be its processor set.
1861 * This routine chooses and removes a thread from the runq if there
1862 * is one (and returns it), else it sets the processor idle and
1863 * returns its idle thread.
1867 register processor_t myprocessor
,
1868 processor_set_t pset
)
1870 register run_queue_t runq
;
1871 register thread_t thread
;
1875 if (runq
->count
> 0) {
1876 q
= runq
->queues
+ runq
->highq
;
1878 if (!queue_empty(q
)) {
1879 #endif /*MACH_ASSERT*/
1880 thread
= (thread_t
)q
->next
;
1881 ((queue_entry_t
)thread
)->next
->prev
= q
;
1882 q
->next
= ((queue_entry_t
)thread
)->next
;
1883 thread
->runq
= RUN_QUEUE_NULL
;
1885 if (queue_empty(q
)) {
1886 if (runq
->highq
!= IDLEPRI
)
1887 clrbit(MAXPRI
- runq
->highq
, runq
->bitmap
);
1888 runq
->highq
= MAXPRI
- ffsbit(runq
->bitmap
);
1890 simple_unlock(&runq
->lock
);
1894 panic("choose_pset_thread");
1895 #endif /*MACH_ASSERT*/
1898 simple_unlock(&runq
->lock
);
1901 * Nothing is runnable, so set this processor idle if it
1902 * was running. If it was in an assignment or shutdown,
1903 * leave it alone. Return its idle thread.
1905 simple_lock(&pset
->idle_lock
);
1906 if (myprocessor
->state
== PROCESSOR_RUNNING
) {
1907 myprocessor
->state
= PROCESSOR_IDLE
;
1909 * XXX Until it goes away, put master on end of queue, others
1910 * XXX on front so master gets used last.
1912 if (myprocessor
== master_processor
)
1913 queue_enter(&(pset
->idle_queue
), myprocessor
,
1914 processor_t
, processor_queue
);
1916 queue_enter_first(&(pset
->idle_queue
), myprocessor
,
1917 processor_t
, processor_queue
);
1921 simple_unlock(&pset
->idle_lock
);
1923 return (myprocessor
->idle_thread
);
1927 * no_dispatch_count counts number of times processors go non-idle
1928 * without being dispatched. This should be very rare.
1930 int no_dispatch_count
= 0;
1933 * This is the idle thread, which just looks for other threads
1937 idle_thread_continue(void)
1939 register processor_t myprocessor
;
1940 register volatile thread_t
*threadp
;
1941 register volatile int *gcount
;
1942 register volatile int *lcount
;
1943 register thread_t new_thread
;
1945 register processor_set_t pset
;
1948 mycpu
= cpu_number();
1949 myprocessor
= current_processor();
1950 threadp
= (volatile thread_t
*) &myprocessor
->next_thread
;
1951 lcount
= (volatile int *) &myprocessor
->runq
.count
;
1954 #ifdef MARK_CPU_IDLE
1955 MARK_CPU_IDLE(mycpu
);
1956 #endif /* MARK_CPU_IDLE */
1958 gcount
= (volatile int *)&myprocessor
->processor_set
->runq
.count
;
1961 while ( (*threadp
== (volatile thread_t
)THREAD_NULL
) &&
1962 (*gcount
== 0) && (*lcount
== 0) ) {
1964 /* check for ASTs while we wait */
1965 if (need_ast
[mycpu
] &~ ( AST_SCHEDULING
| AST_PREEMPT
|
1966 AST_BSD
| AST_BSD_INIT
)) {
1967 /* don't allow scheduling ASTs */
1968 need_ast
[mycpu
] &= ~( AST_SCHEDULING
| AST_PREEMPT
|
1969 AST_BSD
| AST_BSD_INIT
);
1970 ast_taken(AST_ALL
, TRUE
); /* back at spllo */
1978 machine_clock_assist();
1983 #ifdef MARK_CPU_ACTIVE
1985 MARK_CPU_ACTIVE(mycpu
);
1987 #endif /* MARK_CPU_ACTIVE */
1990 * This is not a switch statement to avoid the
1991 * bounds checking code in the common case.
1993 pset
= myprocessor
->processor_set
;
1994 simple_lock(&pset
->idle_lock
);
1996 state
= myprocessor
->state
;
1997 if (state
== PROCESSOR_DISPATCHING
) {
1999 * Commmon case -- cpu dispatched.
2001 new_thread
= *threadp
;
2002 *threadp
= (volatile thread_t
) THREAD_NULL
;
2003 myprocessor
->state
= PROCESSOR_RUNNING
;
2004 simple_unlock(&pset
->idle_lock
);
2006 thread_lock(new_thread
);
2007 simple_lock(&myprocessor
->runq
.lock
);
2008 simple_lock(&pset
->runq
.lock
);
2009 if ( myprocessor
->runq
.highq
> new_thread
->sched_pri
||
2010 pset
->runq
.highq
> new_thread
->sched_pri
) {
2011 simple_unlock(&pset
->runq
.lock
);
2012 simple_unlock(&myprocessor
->runq
.lock
);
2014 if (new_thread
->bound_processor
!= PROCESSOR_NULL
)
2015 run_queue_enqueue(&myprocessor
->runq
, new_thread
, HEAD_Q
);
2017 run_queue_enqueue(&pset
->runq
, new_thread
, HEAD_Q
);
2018 thread_unlock(new_thread
);
2020 counter(c_idle_thread_block
++);
2021 thread_block(idle_thread_continue
);
2024 simple_unlock(&pset
->runq
.lock
);
2025 simple_unlock(&myprocessor
->runq
.lock
);
2026 thread_unlock(new_thread
);
2028 counter(c_idle_thread_handoff
++);
2029 thread_run(myprocessor
->idle_thread
,
2030 idle_thread_continue
, new_thread
);
2034 if (state
== PROCESSOR_IDLE
) {
2035 if (myprocessor
->state
!= PROCESSOR_IDLE
) {
2037 * Something happened, try again.
2042 * Processor was not dispatched (Rare).
2043 * Set it running again.
2045 no_dispatch_count
++;
2047 queue_remove(&pset
->idle_queue
, myprocessor
,
2048 processor_t
, processor_queue
);
2049 myprocessor
->state
= PROCESSOR_RUNNING
;
2050 simple_unlock(&pset
->idle_lock
);
2052 counter(c_idle_thread_block
++);
2053 thread_block(idle_thread_continue
);
2056 if ( state
== PROCESSOR_ASSIGN
||
2057 state
== PROCESSOR_SHUTDOWN
) {
2059 * Changing processor sets, or going off-line.
2060 * Release next_thread if there is one. Actual
2061 * thread to run is on a runq.
2063 if ((new_thread
= (thread_t
)*threadp
) != THREAD_NULL
) {
2064 *threadp
= (volatile thread_t
) THREAD_NULL
;
2065 simple_unlock(&pset
->idle_lock
);
2066 thread_lock(new_thread
);
2067 thread_setrun(new_thread
, FALSE
, TAIL_Q
);
2068 thread_unlock(new_thread
);
2070 simple_unlock(&pset
->idle_lock
);
2072 counter(c_idle_thread_block
++);
2073 thread_block(idle_thread_continue
);
2076 simple_unlock(&pset
->idle_lock
);
2077 printf("Bad processor state %d (Cpu %d)\n",
2078 cpu_state(mycpu
), mycpu
);
2079 panic("idle_thread");
2090 thread_t self
= current_thread();
2093 stack_privilege(self
);
2098 self
->priority
= IDLEPRI
;
2099 self
->sched_pri
= self
->priority
;
2101 thread_unlock(self
);
2104 counter(c_idle_thread_block
++);
2105 thread_block((void(*)(void))0);
2106 idle_thread_continue();
2110 static uint64_t sched_tick_interval
, sched_tick_deadline
;
2112 void sched_tick_thread(void);
2115 sched_tick_init(void)
2117 kernel_thread_with_priority(
2118 kernel_task
, MAXPRI_STANDARD
,
2119 sched_tick_thread
, TRUE
, TRUE
);
2125 * Update the priorities of all threads periodically.
2128 sched_tick_thread_continue(void)
2133 #endif /* SIMPLE_CLOCK */
2135 clock_get_uptime(&abstime
);
2137 sched_tick
++; /* age usage one more time */
2140 * Compensate for clock drift. sched_usec is an
2141 * exponential average of the number of microseconds in
2142 * a second. It decays in the same fashion as cpu_usage.
2144 new_usec
= sched_usec_elapsed();
2145 sched_usec
= (5*sched_usec
+ 3*new_usec
)/8;
2146 #endif /* SIMPLE_CLOCK */
2149 * Compute the scheduler load factors.
2151 compute_mach_factor();
2154 * Scan the run queues for runnable threads that need to
2155 * have their priorities recalculated.
2159 clock_deadline_for_periodic_event(sched_tick_interval
, abstime
,
2160 &sched_tick_deadline
);
2162 assert_wait((event_t
)sched_tick_thread_continue
, THREAD_INTERRUPTIBLE
);
2163 thread_set_timer_deadline(sched_tick_deadline
);
2164 thread_block(sched_tick_thread_continue
);
2169 sched_tick_thread(void)
2171 thread_t self
= current_thread();
2175 stack_privilege(self
);
2177 rate
= (1000 >> SCHED_TICK_SHIFT
);
2178 clock_interval_to_absolutetime_interval(rate
, USEC_PER_SEC
,
2179 &sched_tick_interval
);
2180 clock_get_uptime(&sched_tick_deadline
);
2182 thread_block(sched_tick_thread_continue
);
2186 #define MAX_STUCK_THREADS 128
2189 * do_thread_scan: scan for stuck threads. A thread is stuck if
2190 * it is runnable but its priority is so low that it has not
2191 * run for several seconds. Its priority should be higher, but
2192 * won't be until it runs and calls update_priority. The scanner
2193 * finds these threads and does the updates.
2195 * Scanner runs in two passes. Pass one squirrels likely
2196 * thread ids away in an array (takes out references for them).
2197 * Pass two does the priority updates. This is necessary because
2198 * the run queue lock is required for the candidate scan, but
2199 * cannot be held during updates [set_pri will deadlock].
2201 * Array length should be enough so that restart isn't necessary,
2202 * but restart logic is included. Does not scan processor runqs.
2205 thread_t stuck_threads
[MAX_STUCK_THREADS
];
2206 int stuck_count
= 0;
2209 * do_runq_scan is the guts of pass 1. It scans a runq for
2210 * stuck threads. A boolean is returned indicating whether
2211 * a retry is needed.
2218 register thread_t thread
;
2221 boolean_t result
= FALSE
;
2224 simple_lock(&runq
->lock
);
2225 if ((count
= runq
->count
) > 0) {
2226 q
= runq
->queues
+ runq
->highq
;
2228 queue_iterate(q
, thread
, thread_t
, links
) {
2229 if ( !(thread
->state
& (TH_WAIT
|TH_SUSP
)) &&
2230 (thread
->sched_mode
& TH_MODE_TIMESHARE
) ) {
2231 if (thread
->sched_stamp
!= sched_tick
) {
2233 * Stuck, save its id for later.
2235 if (stuck_count
== MAX_STUCK_THREADS
) {
2237 * !@#$% No more room.
2239 simple_unlock(&runq
->lock
);
2246 * Inline version of thread_reference
2247 * XXX - lock ordering problem here:
2248 * thread locks should be taken before runq
2249 * locks: just try and get the thread's locks
2250 * and ignore this thread if we fail, we might
2251 * have better luck next time.
2253 if (simple_lock_try(&thread
->lock
)) {
2254 thread
->ref_count
++;
2255 thread_unlock(thread
);
2256 stuck_threads
[stuck_count
++] = thread
;
2269 simple_unlock(&runq
->lock
);
2275 boolean_t thread_scan_enabled
= TRUE
;
2278 do_thread_scan(void)
2280 register boolean_t restart_needed
= FALSE
;
2281 register thread_t thread
;
2282 register processor_set_t pset
= &default_pset
;
2283 register processor_t processor
;
2286 if (!thread_scan_enabled
)
2290 restart_needed
= do_runq_scan(&pset
->runq
);
2291 if (!restart_needed
) {
2292 simple_lock(&pset
->processors_lock
);
2293 processor
= (processor_t
)queue_first(&pset
->processors
);
2294 while (!queue_end(&pset
->processors
, (queue_entry_t
)processor
)) {
2295 if (restart_needed
= do_runq_scan(&processor
->runq
))
2298 thread
= processor
->idle_thread
;
2299 if (thread
->sched_stamp
!= sched_tick
) {
2300 if (stuck_count
== MAX_STUCK_THREADS
) {
2301 restart_needed
= TRUE
;
2305 stuck_threads
[stuck_count
++] = thread
;
2308 processor
= (processor_t
)queue_next(&processor
->processors
);
2310 simple_unlock(&pset
->processors_lock
);
2314 * Ok, we now have a collection of candidates -- fix them.
2316 while (stuck_count
> 0) {
2317 thread
= stuck_threads
[--stuck_count
];
2318 stuck_threads
[stuck_count
] = THREAD_NULL
;
2320 thread_lock(thread
);
2321 if ( (thread
->sched_mode
& TH_MODE_TIMESHARE
) ||
2322 (thread
->state
& TH_IDLE
) ) {
2323 if ( !(thread
->state
& (TH_WAIT
|TH_SUSP
)) &&
2324 thread
->sched_stamp
!= sched_tick
)
2325 update_priority(thread
);
2327 thread_unlock(thread
);
2329 if (!(thread
->state
& TH_IDLE
))
2330 thread_deallocate(thread
);
2333 } while (restart_needed
);
2337 * Just in case someone doesn't use the macro
2339 #undef thread_wakeup
2348 thread_wakeup_with_result(x
, THREAD_AWAKENED
);
2357 return ((thread
->state
& (TH_RUN
|TH_WAIT
)) == TH_RUN
);
2364 printf("processor_set: %08x\n",ps
);
2365 printf("idle_queue: %08x %08x, idle_count: 0x%x\n",
2366 ps
->idle_queue
.next
,ps
->idle_queue
.prev
,ps
->idle_count
);
2367 printf("processors: %08x %08x, processor_count: 0x%x\n",
2368 ps
->processors
.next
,ps
->processors
.prev
,ps
->processor_count
);
2369 printf("tasks: %08x %08x, task_count: 0x%x\n",
2370 ps
->tasks
.next
,ps
->tasks
.prev
,ps
->task_count
);
2371 printf("threads: %08x %08x, thread_count: 0x%x\n",
2372 ps
->threads
.next
,ps
->threads
.prev
,ps
->thread_count
);
2373 printf("ref_count: 0x%x, active: %x\n",
2374 ps
->ref_count
,ps
->active
);
2375 printf("pset_self: %08x, pset_name_self: %08x\n",ps
->pset_self
, ps
->pset_name_self
);
2376 printf("set_quanta: 0x%x\n", ps
->set_quanta
);
2379 #define processor_state(s) (((s)>PROCESSOR_SHUTDOWN)?"*unknown*":states[s])
2385 char *states
[]={"OFF_LINE","RUNNING","IDLE","DISPATCHING",
2386 "ASSIGN","SHUTDOWN"};
2388 printf("processor: %08x\n",p
);
2389 printf("processor_queue: %08x %08x\n",
2390 p
->processor_queue
.next
,p
->processor_queue
.prev
);
2391 printf("state: %8s, next_thread: %08x, idle_thread: %08x\n",
2392 processor_state(p
->state
), p
->next_thread
, p
->idle_thread
);
2393 printf("slice_quanta: %x\n", p
->slice_quanta
);
2394 printf("processor_set: %08x, processor_set_next: %08x\n",
2395 p
->processor_set
, p
->processor_set_next
);
2396 printf("processors: %08x %08x\n", p
->processors
.next
,p
->processors
.prev
);
2397 printf("processor_self: %08x, slot_num: 0x%x\n", p
->processor_self
, p
->slot_num
);
2401 dump_run_queue_struct(
2407 for( i
=0; i
< NRQS
; ) {
2410 printf("%6s",(i
==0)?"runq:":"");
2411 for( j
=0; (j
<8) && (i
< NRQS
); j
++,i
++ ) {
2412 if( rq
->queues
[i
].next
== &rq
->queues
[i
] )
2413 printf( " --------");
2415 printf(" %08x",rq
->queues
[i
].next
);
2419 for( i
=0; i
< NRQBM
; ) {
2420 register unsigned int mask
;
2427 *d
++ = ((rq
->bitmap
[i
]&mask
)?'r':'e');
2431 printf("%8s%s\n",((i
==0)?"bitmap:":""),dump_buf
);
2434 printf("highq: 0x%x, count: %u\n", rq
->highq
, rq
->count
);
2441 register queue_t q1
;
2443 register queue_entry_t e
;
2446 for (i
= 0; i
< NRQS
; i
++) {
2447 if (q1
->next
!= q1
) {
2451 for (t_cnt
=0, e
= q1
->next
; e
!= q1
; e
= e
->next
) {
2452 printf("\t0x%08x",e
);
2453 if( (t_cnt
= ++t_cnt%4
) == 0 )
2460 printf("[%u]\t<empty>\n",i);
2471 register queue_t q1
;
2473 register queue_entry_t e
;
2479 for (i
= MAXPRI
; i
>= 0; i
--) {
2480 if (q1
->next
== q1
) {
2481 if (q1
->prev
!= q1
) {
2482 panic("checkrq: empty at %s", msg
);
2489 for (e
= q1
->next
; e
!= q1
; e
= e
->next
) {
2491 if (e
->next
->prev
!= e
)
2492 panic("checkrq-2 at %s", msg
);
2493 if (e
->prev
->next
!= e
)
2494 panic("checkrq-3 at %s", msg
);
2500 panic("checkrq: count wrong at %s", msg
);
2501 if (rq
->count
!= 0 && highq
> rq
->highq
)
2502 panic("checkrq: highq wrong at %s", msg
);
2507 register thread_t thread
,
2508 register run_queue_t rq
)
2510 register int whichq
= thread
->sched_pri
;
2511 register queue_entry_t queue
, entry
;
2513 if (whichq
< MINPRI
|| whichq
> MAXPRI
)
2514 panic("thread_check: bad pri");
2516 if (whichq
!= thread
->whichq
)
2517 panic("thread_check: whichq");
2519 queue
= &rq
->queues
[whichq
];
2520 entry
= queue_first(queue
);
2521 while (!queue_end(queue
, entry
)) {
2522 if (entry
== (queue_entry_t
)thread
)
2525 entry
= queue_next(entry
);
2528 panic("thread_check: not found");
2534 #include <ddb/db_output.h>
2535 #define printf kdbprintf
2536 extern int db_indent
;
2537 void db_sched(void);
2542 iprintf("Scheduling Statistics:\n");
2544 iprintf("Thread invocations: csw %d same %d\n",
2545 c_thread_invoke_csw
, c_thread_invoke_same
);
2547 iprintf("Thread block: calls %d\n",
2548 c_thread_block_calls
);
2549 iprintf("Idle thread:\n\thandoff %d block %d no_dispatch %d\n",
2550 c_idle_thread_handoff
,
2551 c_idle_thread_block
, no_dispatch_count
);
2552 iprintf("Sched thread blocks: %d\n", c_sched_thread_block
);
2553 #endif /* MACH_COUNTERS */
2556 #endif /* MACH_KDB */