2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * @OSF_FREE_COPYRIGHT@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
54 * Author: Avadis Tevanian, Jr.
57 * Scheduling primitives
64 #include <simple_clock.h>
65 #include <power_save.h>
66 #include <task_swapper.h>
68 #include <ddb/db_output.h>
69 #include <mach/machine.h>
70 #include <machine/machine_routines.h>
71 #include <machine/sched_param.h>
73 #include <kern/clock.h>
74 #include <kern/counters.h>
75 #include <kern/cpu_number.h>
76 #include <kern/cpu_data.h>
77 #include <kern/etap_macros.h>
78 #include <kern/lock.h>
79 #include <kern/macro_help.h>
80 #include <kern/machine.h>
81 #include <kern/misc_protos.h>
82 #include <kern/processor.h>
83 #include <kern/queue.h>
84 #include <kern/sched.h>
85 #include <kern/sched_prim.h>
86 #include <kern/syscall_subr.h>
87 #include <kern/task.h>
88 #include <kern/thread.h>
89 #include <kern/thread_swap.h>
91 #include <vm/vm_kern.h>
92 #include <vm/vm_map.h>
93 #include <mach/policy.h>
94 #include <mach/sync_policy.h>
96 #include <kern/mk_sp.h> /*** ??? fix so this can be removed ***/
97 #include <sys/kdebug.h>
100 #include <kern/task_swap.h>
101 extern int task_swap_on
;
102 #endif /* TASK_SWAPPER */
106 #define DEFAULT_PREEMPTION_RATE 100 /* (1/s) */
107 int default_preemption_rate
= DEFAULT_PREEMPTION_RATE
;
109 #define NO_KERNEL_PREEMPT 0
110 #define KERNEL_PREEMPT 1
111 int kernel_preemption_mode
= KERNEL_PREEMPT
;
114 natural_t min_quantum_ms
;
120 #endif /* SIMPLE_CLOCK */
123 void thread_continue(thread_t
);
125 void wait_queues_init(void);
132 thread_t
choose_pset_thread(
133 processor_t myprocessor
,
134 processor_set_t pset
);
136 thread_t
choose_thread(
137 processor_t myprocessor
);
139 int run_queue_enqueue(
144 void idle_thread_continue(void);
145 void do_thread_scan(void);
147 void clear_wait_internal(
152 void dump_run_queues(
154 void dump_run_queue_struct(
158 void dump_processor_set(
170 boolean_t
thread_runnable(
176 * states are combinations of:
178 * W waiting (or on wait queue)
179 * N non-interruptible
184 * assert_wait thread_block clear_wait swapout swapin
186 * R RW, RWN R; setrun - -
187 * RN RWN RN; setrun - -
200 * Waiting protocols and implementation:
202 * Each thread may be waiting for exactly one event; this event
203 * is set using assert_wait(). That thread may be awakened either
204 * by performing a thread_wakeup_prim() on its event,
205 * or by directly waking that thread up with clear_wait().
207 * The implementation of wait events uses a hash table. Each
208 * bucket is queue of threads having the same hash function
209 * value; the chain for the queue (linked list) is the run queue
210 * field. [It is not possible to be waiting and runnable at the
213 * Locks on both the thread and on the hash buckets govern the
214 * wait event field and the queue chain field. Because wakeup
215 * operations only have the event as an argument, the event hash
216 * bucket must be locked before any thread.
218 * Scheduling operations may also occur at interrupt level; therefore,
219 * interrupts below splsched() must be prevented when holding
220 * thread or hash bucket locks.
222 * The wait event hash table declarations are as follows:
227 struct wait_queue wait_queues
[NUMQUEUES
];
229 #define wait_hash(event) \
230 ((((int)(event) < 0)? ~(int)(event): (int)(event)) % NUMQUEUES)
236 * Calculate the minimum quantum
239 if (default_preemption_rate
< 1)
240 default_preemption_rate
= DEFAULT_PREEMPTION_RATE
;
241 min_quantum
= hz
/ default_preemption_rate
;
244 * Round up result (4/5) to an
245 * integral number of ticks.
247 if (((hz
* 10) / default_preemption_rate
) - (min_quantum
* 10) >= 5)
252 min_quantum_ms
= (1000 / hz
) * min_quantum
;
254 printf("scheduling quantum is %d ms\n", min_quantum_ms
);
257 pset_sys_bootstrap(); /* initialize processor mgmt. */
262 #endif /* SIMPLE_CLOCK */
268 wait_queues_init(void)
272 for (i
= 0; i
< NUMQUEUES
; i
++) {
273 wait_queue_init(&wait_queues
[i
], SYNC_POLICY_FIFO
);
278 * Thread timeout routine, called when timer expires.
282 timer_call_param_t p0
,
283 timer_call_param_t p1
)
285 thread_t thread
= p0
;
290 if ( thread
->wait_timer_is_set
&&
291 !timer_call_is_delayed(&thread
->wait_timer
, NULL
) ) {
292 thread
->wait_timer_active
--;
293 thread
->wait_timer_is_set
= FALSE
;
296 clear_wait_internal(thread
, THREAD_TIMED_OUT
);
297 thread_unlock(thread
);
300 if (--thread
->wait_timer_active
== 0)
301 thread_wakeup_one(&thread
->wait_timer_active
);
309 * Set a timer for the current thread, if the thread
310 * is ready to wait. Must be called between assert_wait()
311 * and thread_block().
316 natural_t scale_factor
)
318 thread_t thread
= current_thread();
319 AbsoluteTime deadline
;
325 if ((thread
->state
& TH_WAIT
) != 0) {
326 clock_interval_to_deadline(interval
, scale_factor
, &deadline
);
327 timer_call_enter(&thread
->wait_timer
, deadline
);
328 assert(!thread
->wait_timer_is_set
);
329 thread
->wait_timer_active
++;
330 thread
->wait_timer_is_set
= TRUE
;
332 thread_unlock(thread
);
338 thread_set_timer_deadline(
339 AbsoluteTime deadline
)
341 thread_t thread
= current_thread();
347 if ((thread
->state
& TH_WAIT
) != 0) {
348 timer_call_enter(&thread
->wait_timer
, deadline
);
349 assert(!thread
->wait_timer_is_set
);
350 thread
->wait_timer_active
++;
351 thread
->wait_timer_is_set
= TRUE
;
353 thread_unlock(thread
);
359 thread_cancel_timer(void)
361 thread_t thread
= current_thread();
366 if (thread
->wait_timer_is_set
) {
367 if (timer_call_cancel(&thread
->wait_timer
))
368 thread
->wait_timer_active
--;
369 thread
->wait_timer_is_set
= FALSE
;
376 * thread_depress_timeout:
378 * Timeout routine for priority depression.
381 thread_depress_timeout(
382 thread_call_param_t p0
,
383 thread_call_param_t p1
)
385 thread_t thread
= p0
;
386 sched_policy_t
*policy
;
391 policy
= policy_id_to_sched_policy(thread
->policy
);
392 thread_unlock(thread
);
395 if (policy
!= SCHED_POLICY_NULL
)
396 policy
->sp_ops
.sp_thread_depress_timeout(policy
, thread
);
398 thread_deallocate(thread
);
402 * Set up thread timeout element when thread is created.
408 timer_call_setup(&thread
->wait_timer
, thread_timer_expire
, thread
);
409 thread
->wait_timer_is_set
= FALSE
;
410 thread
->wait_timer_active
= 1;
413 thread_call_setup(&thread
->depress_timer
, thread_depress_timeout
, thread
);
417 thread_timer_terminate(void)
419 thread_t thread
= current_thread();
424 if (thread
->wait_timer_is_set
) {
425 if (timer_call_cancel(&thread
->wait_timer
))
426 thread
->wait_timer_active
--;
427 thread
->wait_timer_is_set
= FALSE
;
430 thread
->wait_timer_active
--;
432 while (thread
->wait_timer_active
> 0) {
433 assert_wait((event_t
)&thread
->wait_timer_active
, THREAD_UNINT
);
437 thread_block((void (*)(void)) 0);
446 thread_deallocate(thread
);
450 * Routine: thread_go_locked
452 * Start a thread running.
454 * thread lock held, IPC locks may be held.
455 * thread must have been pulled from wait queue under same lock hold.
463 sched_policy_t
*policy
;
466 assert(thread
->at_safe_point
== FALSE
);
467 assert(thread
->wait_event
== NO_EVENT
);
468 assert(thread
->wait_queue
== WAIT_QUEUE_NULL
);
470 if (thread
->state
& TH_WAIT
) {
472 thread
->state
&= ~(TH_WAIT
|TH_UNINT
);
473 if (!(thread
->state
& TH_RUN
)) {
474 thread
->state
|= TH_RUN
;
476 if (thread
->state
& TH_SWAPPED_OUT
)
477 thread_swapin(thread
->top_act
, FALSE
);
479 #endif /* THREAD_SWAPPER */
481 policy
= &sched_policy
[thread
->policy
];
482 sfr
= policy
->sp_ops
.sp_thread_unblock(policy
, thread
);
483 assert(sfr
== SF_SUCCESS
);
486 thread
->wait_result
= result
;
491 * The next few lines are a major hack. Hopefully this will get us
492 * around all of the scheduling framework hooha. We can't call
493 * sp_thread_unblock yet because we could still be finishing up the
494 * durn two stage block on another processor and thread_setrun
495 * could be called by s_t_u and we'll really be messed up then.
497 /* Don't mess with this if we are still swapped out */
498 if (!(thread
->state
& TH_SWAPPED_OUT
))
499 thread
->sp_state
= MK_SP_RUNNABLE
;
504 thread_mark_wait_locked(
509 assert(thread
== current_thread());
511 thread
->wait_result
= -1; /* JMM - Needed for non-assert kernel */
512 thread
->state
|= (interruptible
&& thread
->interruptible
) ?
513 TH_WAIT
: (TH_WAIT
| TH_UNINT
);
514 thread
->at_safe_point
= (interruptible
== THREAD_ABORTSAFE
) && (thread
->interruptible
);
515 thread
->sleep_stamp
= sched_tick
;
521 * Routine: assert_wait_timeout
523 * Assert that the thread intends to block,
524 * waiting for a timeout (no user known event).
526 unsigned int assert_wait_timeout_event
;
530 mach_msg_timeout_t msecs
,
535 assert_wait((event_t
)&assert_wait_timeout_event
, interruptible
);
536 thread_set_timer(msecs
, 1000*NSEC_PER_USEC
);
540 * Check to see if an assert wait is possible, without actually doing one.
541 * This is used by debug code in locks and elsewhere to verify that it is
542 * always OK to block when trying to take a blocking lock (since waiting
543 * for the actual assert_wait to catch the case may make it hard to detect
547 assert_wait_possible(void)
551 extern unsigned int debug_mode
;
554 if(debug_mode
) return TRUE
; /* Always succeed in debug mode */
557 thread
= current_thread();
559 return (thread
== NULL
|| wait_queue_assert_possible(thread
));
565 * Assert that the current thread is about to go to
566 * sleep until the specified event occurs.
573 register wait_queue_t wq
;
576 assert(event
!= NO_EVENT
);
577 assert(assert_wait_possible());
579 index
= wait_hash(event
);
580 wq
= &wait_queues
[index
];
581 wait_queue_assert_wait(wq
,
588 * thread_[un]stop(thread)
589 * Once a thread has blocked interruptibly (via assert_wait) prevent
590 * it from running until thread_unstop.
592 * If someone else has already stopped the thread, wait for the
593 * stop to be cleared, and then stop it again.
595 * Return FALSE if interrupted.
597 * NOTE: thread_hold/thread_suspend should be called on the activation
598 * before calling thread_stop. TH_SUSP is only recognized when
599 * a thread blocks and only prevents clear_wait/thread_wakeup
600 * from restarting an interruptible wait. The wake_active flag is
601 * used to indicate that someone is waiting on the thread.
612 while (thread
->state
& TH_SUSP
) {
615 thread
->wake_active
= TRUE
;
616 assert_wait((event_t
)&thread
->wake_active
, THREAD_ABORTSAFE
);
620 wait_result
= thread_block((void (*)(void)) 0);
621 if (wait_result
!= THREAD_AWAKENED
)
628 thread
->state
|= TH_SUSP
;
629 thread_unlock(thread
);
638 * Clear TH_SUSP and if the thread has been stopped and is now runnable,
639 * put it back on the run queue.
645 sched_policy_t
*policy
;
653 if ((thread
->state
& (TH_RUN
|TH_WAIT
|TH_SUSP
/*|TH_UNINT*/)) == TH_SUSP
) {
654 thread
->state
= (thread
->state
& ~TH_SUSP
) | TH_RUN
;
656 if (thread
->state
& TH_SWAPPED_OUT
)
657 thread_swapin(thread
->top_act
, FALSE
);
659 #endif /* THREAD_SWAPPER */
661 policy
= &sched_policy
[thread
->policy
];
662 sfr
= policy
->sp_ops
.sp_thread_unblock(policy
, thread
);
663 assert(sfr
== SF_SUCCESS
);
667 if (thread
->state
& TH_SUSP
) {
668 thread
->state
&= ~TH_SUSP
;
670 if (thread
->wake_active
) {
671 thread
->wake_active
= FALSE
;
672 thread_unlock(thread
);
675 thread_wakeup((event_t
)&thread
->wake_active
);
681 thread_unlock(thread
);
687 * Wait for the thread's RUN bit to clear
698 while (thread
->state
& (TH_RUN
/*|TH_UNINT*/)) {
701 if (thread
->last_processor
!= PROCESSOR_NULL
)
702 cause_ast_check(thread
->last_processor
);
704 thread
->wake_active
= TRUE
;
705 assert_wait((event_t
)&thread
->wake_active
, THREAD_ABORTSAFE
);
709 wait_result
= thread_block((void (*)(void))0);
710 if (wait_result
!= THREAD_AWAKENED
)
723 * thread_stop_wait(thread)
724 * Stop the thread then wait for it to block interruptibly
730 if (thread_stop(thread
)) {
731 if (thread_wait(thread
))
734 thread_unstop(thread
);
742 * Routine: clear_wait_internal
744 * Clear the wait condition for the specified thread.
745 * Start the thread executing if that is appropriate.
747 * thread thread to awaken
748 * result Wakeup result the thread should see
751 * the thread is locked.
759 * If the thread isn't in a wait queue, just set it running. Otherwise,
760 * try to remove it from the queue and, if successful, then set it
761 * running. NEVER interrupt an uninterruptible thread.
763 if (!((result
== THREAD_INTERRUPTED
) && (thread
->state
& TH_UNINT
))) {
764 if (wait_queue_assert_possible(thread
) ||
765 (wait_queue_remove(thread
) == KERN_SUCCESS
)) {
766 thread_go_locked(thread
, result
);
775 * Clear the wait condition for the specified thread. Start the thread
776 * executing if that is appropriate.
779 * thread thread to awaken
780 * result Wakeup result the thread should see
791 clear_wait_internal(thread
, result
);
792 thread_unlock(thread
);
798 * thread_wakeup_prim:
800 * Common routine for thread_wakeup, thread_wakeup_with_result,
801 * and thread_wakeup_one.
807 boolean_t one_thread
,
810 register wait_queue_t wq
;
813 index
= wait_hash(event
);
814 wq
= &wait_queues
[index
];
816 wait_queue_wakeup_one(wq
, event
, result
);
818 wait_queue_wakeup_all(wq
, event
, result
);
824 * Force a thread to execute on the specified processor.
825 * If the thread is currently executing, it may wait until its
826 * time slice is up before switching onto the specified processor.
828 * A processor of PROCESSOR_NULL causes the thread to be unbound.
829 * xxx - DO NOT export this to users.
833 register thread_t thread
,
834 processor_t processor
)
840 thread_bind_locked(thread
, processor
);
841 thread_unlock(thread
);
846 * Select a thread for this processor (the current processor) to run.
847 * May select the current thread, which must already be locked.
851 register processor_t myprocessor
)
853 register thread_t thread
;
854 processor_set_t pset
;
855 register run_queue_t runq
= &myprocessor
->runq
;
856 boolean_t other_runnable
;
857 sched_policy_t
*policy
;
860 * Check for other non-idle runnable threads.
862 myprocessor
->first_quantum
= TRUE
;
863 pset
= myprocessor
->processor_set
;
864 thread
= current_thread();
867 thread
->unconsumed_quantum
= myprocessor
->quantum
;
870 simple_lock(&runq
->lock
);
871 simple_lock(&pset
->runq
.lock
);
873 other_runnable
= runq
->count
> 0 || pset
->runq
.count
> 0;
875 if ( thread
->state
== TH_RUN
&&
877 (runq
->highq
< thread
->sched_pri
&&
878 pset
->runq
.highq
< thread
->sched_pri
)) &&
879 thread
->processor_set
== pset
&&
880 (thread
->bound_processor
== PROCESSOR_NULL
||
881 thread
->bound_processor
== myprocessor
) ) {
883 /* I am the highest priority runnable (non-idle) thread */
884 simple_unlock(&pset
->runq
.lock
);
885 simple_unlock(&runq
->lock
);
887 /* Update the thread's meta-priority */
888 policy
= policy_id_to_sched_policy(thread
->policy
);
889 assert(policy
!= SCHED_POLICY_NULL
);
890 (void)policy
->sp_ops
.sp_thread_update_mpri(policy
, thread
);
893 if (other_runnable
) {
894 simple_unlock(&pset
->runq
.lock
);
895 simple_unlock(&runq
->lock
);
896 thread
= choose_thread(myprocessor
);
899 simple_unlock(&pset
->runq
.lock
);
900 simple_unlock(&runq
->lock
);
903 * Nothing is runnable, so set this processor idle if it
904 * was running. If it was in an assignment or shutdown,
905 * leave it alone. Return its idle thread.
907 simple_lock(&pset
->idle_lock
);
908 if (myprocessor
->state
== PROCESSOR_RUNNING
) {
909 myprocessor
->state
= PROCESSOR_IDLE
;
911 * XXX Until it goes away, put master on end of queue, others
912 * XXX on front so master gets used last.
914 if (myprocessor
== master_processor
)
915 queue_enter(&(pset
->idle_queue
), myprocessor
,
916 processor_t
, processor_queue
);
918 queue_enter_first(&(pset
->idle_queue
), myprocessor
,
919 processor_t
, processor_queue
);
923 simple_unlock(&pset
->idle_lock
);
925 thread
= myprocessor
->idle_thread
;
933 * Stop running the current thread and start running the new thread.
934 * If continuation is non-zero, and the current thread is blocked,
935 * then it will resume by executing continuation on a new stack.
936 * Returns TRUE if the hand-off succeeds.
937 * The reason parameter == AST_QUANTUM if the thread blocked
938 * because its quantum expired.
944 __current_thread(void)
946 return (current_thread());
951 register thread_t old_thread
,
952 register thread_t new_thread
,
954 void (*continuation
)(void))
956 sched_policy_t
*policy
;
961 * Mark thread interruptible.
963 thread_lock(new_thread
);
964 new_thread
->state
&= ~TH_UNINT
;
966 if (cpu_data
[cpu_number()].preemption_level
!= 1)
967 panic("thread_invoke: preemption_level %d\n",
968 cpu_data
[cpu_number()].preemption_level
);
971 assert(thread_runnable(new_thread
));
973 assert(old_thread
->continuation
== (void (*)(void))0);
975 if ((old_thread
->sched_mode
& TH_MODE_REALTIME
) && (!old_thread
->stack_privilege
)) {
976 old_thread
->stack_privilege
= old_thread
->kernel_stack
;
979 if (continuation
!= (void (*)()) 0) {
980 switch (new_thread
->state
& TH_STACK_STATE
) {
981 case TH_STACK_HANDOFF
:
984 * If the old thread has stack privilege, we can't give
985 * his stack away. So go and get him one and treat this
986 * as a traditional context switch.
988 if (old_thread
->stack_privilege
== current_stack())
992 * Make the whole handoff/dispatch atomic to match the
995 disable_preemption();
998 * Set up ast context of new thread and switch to its timer.
1000 new_thread
->state
&= ~(TH_STACK_HANDOFF
|TH_UNINT
);
1001 new_thread
->last_processor
= current_processor();
1002 ast_context(new_thread
->top_act
, cpu_number());
1003 timer_switch(&new_thread
->system_timer
);
1004 thread_unlock(new_thread
);
1006 old_thread
->continuation
= continuation
;
1007 stack_handoff(old_thread
, new_thread
);
1009 wake_lock(old_thread
);
1010 thread_lock(old_thread
);
1011 act_machine_sv_free(old_thread
->top_act
);
1014 * inline thread_dispatch but don't free stack
1017 switch (old_thread
->state
& (TH_RUN
|TH_WAIT
|TH_UNINT
|TH_IDLE
)) {
1018 sched_policy_t
*policy
;
1021 case TH_RUN
| TH_UNINT
:
1024 * No reason to stop. Put back on a run queue.
1026 old_thread
->state
|= TH_STACK_HANDOFF
;
1028 /* Get pointer to scheduling policy "object" */
1029 policy
= &sched_policy
[old_thread
->policy
];
1031 /* Leave enqueueing thread up to scheduling policy */
1032 sfr
= policy
->sp_ops
.sp_thread_dispatch(policy
, old_thread
);
1033 assert(sfr
== SF_SUCCESS
);
1036 case TH_RUN
| TH_WAIT
| TH_UNINT
:
1037 case TH_RUN
| TH_WAIT
:
1038 old_thread
->sleep_stamp
= sched_tick
;
1041 case TH_WAIT
: /* this happens! */
1045 old_thread
->state
|= TH_STACK_HANDOFF
;
1046 old_thread
->state
&= ~TH_RUN
;
1047 if (old_thread
->state
& TH_TERMINATE
)
1048 thread_reaper_enqueue(old_thread
);
1050 if (old_thread
->wake_active
) {
1051 old_thread
->wake_active
= FALSE
;
1052 thread_unlock(old_thread
);
1053 wake_unlock(old_thread
);
1054 thread_wakeup((event_t
)&old_thread
->wake_active
);
1055 wake_lock(old_thread
);
1056 thread_lock(old_thread
);
1060 case TH_RUN
| TH_IDLE
:
1062 * Drop idle thread -- it is already in
1063 * idle_thread_array.
1065 old_thread
->state
|= TH_STACK_HANDOFF
;
1069 panic("State 0x%x \n",old_thread
->state
);
1072 /* Get pointer to scheduling policy "object" */
1073 policy
= &sched_policy
[old_thread
->policy
];
1075 /* Indicate to sched policy that old thread has stopped execution */
1076 /*** ??? maybe use a macro -- rkc, 1/4/96 ***/
1077 sfr
= policy
->sp_ops
.sp_thread_done(policy
, old_thread
);
1078 assert(sfr
== SF_SUCCESS
);
1079 thread_unlock(old_thread
);
1080 wake_unlock(old_thread
);
1081 thread_lock(new_thread
);
1083 assert(thread_runnable(new_thread
));
1085 /* Get pointer to scheduling policy "object" */
1086 policy
= &sched_policy
[new_thread
->policy
];
1088 /* Indicate to sched policy that new thread has started execution */
1089 /*** ??? maybe use a macro ***/
1090 sfr
= policy
->sp_ops
.sp_thread_begin(policy
, new_thread
);
1091 assert(sfr
== SF_SUCCESS
);
1093 lcont
= new_thread
->continuation
;
1094 new_thread
->continuation
= (void(*)(void))0;
1096 thread_unlock(new_thread
);
1097 enable_preemption();
1099 counter_always(c_thread_invoke_hits
++);
1101 if (new_thread
->funnel_state
& TH_FN_REFUNNEL
) {
1102 kern_return_t save_wait_result
;
1103 new_thread
->funnel_state
= 0;
1104 save_wait_result
= new_thread
->wait_result
;
1105 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE
, new_thread
->funnel_lock
, 2, 0, 0, 0);
1106 //mutex_lock(new_thread->funnel_lock);
1107 funnel_lock(new_thread
->funnel_lock
);
1108 KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE
, new_thread
->funnel_lock
, 2, 0, 0, 0);
1109 new_thread
->funnel_state
= TH_FN_OWNED
;
1110 new_thread
->wait_result
= save_wait_result
;
1115 call_continuation(lcont
);
1119 case TH_STACK_COMING_IN
:
1121 * waiting for a stack
1123 thread_swapin(new_thread
);
1124 thread_unlock(new_thread
);
1125 counter_always(c_thread_invoke_misses
++);
1130 * already has a stack - can't handoff
1132 if (new_thread
== old_thread
) {
1134 /* same thread but with continuation */
1135 counter(++c_thread_invoke_same
);
1136 thread_unlock(new_thread
);
1138 if (old_thread
->funnel_state
& TH_FN_REFUNNEL
) {
1139 kern_return_t save_wait_result
;
1141 old_thread
->funnel_state
= 0;
1142 save_wait_result
= old_thread
->wait_result
;
1143 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE
, old_thread
->funnel_lock
, 3, 0, 0, 0);
1144 funnel_lock(old_thread
->funnel_lock
);
1145 KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE
, old_thread
->funnel_lock
, 3, 0, 0, 0);
1146 old_thread
->funnel_state
= TH_FN_OWNED
;
1147 old_thread
->wait_result
= save_wait_result
;
1150 call_continuation(continuation
);
1157 * check that the new thread has a stack
1159 if (new_thread
->state
& TH_STACK_STATE
) {
1161 /* has no stack. if not already waiting for one try to get one */
1162 if ((new_thread
->state
& TH_STACK_COMING_IN
) ||
1163 /* not already waiting. nonblocking try to get one */
1164 !stack_alloc_try(new_thread
, thread_continue
))
1166 /* couldn't get one. schedule new thread to get a stack and
1167 return failure so we can try another thread. */
1168 thread_swapin(new_thread
);
1169 thread_unlock(new_thread
);
1170 counter_always(c_thread_invoke_misses
++);
1173 } else if (old_thread
== new_thread
) {
1174 counter(++c_thread_invoke_same
);
1175 thread_unlock(new_thread
);
1179 /* new thread now has a stack. it has been setup to resume in
1180 thread_continue so it can dispatch the old thread, deal with
1181 funnelling and then go to it's true continuation point */
1184 new_thread
->state
&= ~(TH_STACK_HANDOFF
| TH_UNINT
);
1187 * Set up ast context of new thread and switch to its timer.
1189 new_thread
->last_processor
= current_processor();
1190 ast_context(new_thread
->top_act
, cpu_number());
1191 timer_switch(&new_thread
->system_timer
);
1192 assert(thread_runnable(new_thread
));
1195 * N.B. On return from the call to switch_context, 'old_thread'
1196 * points at the thread that yielded to us. Unfortunately, at
1197 * this point, there are no simple_locks held, so if we are preempted
1198 * before the call to thread_dispatch blocks preemption, it is
1199 * possible for 'old_thread' to terminate, leaving us with a
1200 * stale thread pointer.
1202 disable_preemption();
1204 thread_unlock(new_thread
);
1206 counter_always(c_thread_invoke_csw
++);
1207 current_task()->csw
++;
1210 thread_lock(old_thread
);
1211 old_thread
->reason
= reason
;
1212 assert(old_thread
->runq
== RUN_QUEUE_NULL
);
1214 if (continuation
!= (void (*)(void))0)
1215 old_thread
->continuation
= continuation
;
1217 /* Indicate to sched policy that old thread has stopped execution */
1218 policy
= &sched_policy
[old_thread
->policy
];
1219 /*** ??? maybe use a macro -- ***/
1220 sfr
= policy
->sp_ops
.sp_thread_done(policy
, old_thread
);
1221 assert(sfr
== SF_SUCCESS
);
1222 thread_unlock(old_thread
);
1225 * switch_context is machine-dependent. It does the
1226 * machine-dependent components of a context-switch, like
1227 * changing address spaces. It updates active_threads.
1229 old_thread
= switch_context(old_thread
, continuation
, new_thread
);
1231 /* Now on new thread's stack. Set a local variable to refer to it. */
1232 new_thread
= __current_thread();
1233 assert(old_thread
!= new_thread
);
1235 assert(thread_runnable(new_thread
));
1237 thread_lock(new_thread
);
1238 assert(thread_runnable(new_thread
));
1239 /* Indicate to sched policy that new thread has started execution */
1240 policy
= &sched_policy
[new_thread
->policy
];
1241 /*** ??? maybe use a macro -- rkc, 1/4/96 ***/
1242 sfr
= policy
->sp_ops
.sp_thread_begin(policy
, new_thread
);
1243 assert(sfr
== SF_SUCCESS
);
1244 thread_unlock(new_thread
);
1247 * We're back. Now old_thread is the thread that resumed
1248 * us, and we have to dispatch it.
1251 // Code from OSF in Grenoble deleted the following fields. They were
1252 // used in HPPA and 386 code, but not in the PPC for other than
1253 // just setting and resetting. They didn't delete these lines from
1254 // the MACH_RT builds, though, causing compile errors. I'm going
1255 // to make a wild guess and assume we can just delete these.
1257 if (old_thread
->preempt
== TH_NOT_PREEMPTABLE
) {
1259 * Mark that we have been really preempted
1261 old_thread
->preempt
= TH_PREEMPTED
;
1264 thread_dispatch(old_thread
);
1265 enable_preemption();
1267 /* if we get here and 'continuation' is set that means the
1268 * switch_context() path returned and did not call out
1269 * to the continuation. we will do it manually here */
1271 call_continuation(continuation
);
1281 * Called when the launching a new thread, at splsched();
1285 register thread_t old_thread
)
1287 register thread_t self
;
1288 register void (*continuation
)();
1289 sched_policy_t
*policy
;
1292 self
= current_thread();
1295 * We must dispatch the old thread and then
1296 * call the current thread's continuation.
1297 * There might not be an old thread, if we are
1298 * the first thread to run on this processor.
1300 if (old_thread
!= THREAD_NULL
) {
1301 thread_dispatch(old_thread
);
1305 /* Get pointer to scheduling policy "object" */
1306 policy
= &sched_policy
[self
->policy
];
1308 /* Indicate to sched policy that new thread has started execution */
1309 /*** ??? maybe use a macro -- rkc, 1/4/96 ***/
1310 sfr
= policy
->sp_ops
.sp_thread_begin(policy
,self
);
1311 assert(sfr
== SF_SUCCESS
);
1316 continuation
= self
->continuation
;
1317 self
->continuation
= (void (*)(void))0;
1318 thread_unlock(self
);
1321 * N.B. - the following is necessary, since thread_invoke()
1322 * inhibits preemption on entry and reenables before it
1323 * returns. Unfortunately, the first time a newly-created
1324 * thread executes, it magically appears here, and never
1325 * executes the enable_preemption() call in thread_invoke().
1327 enable_preemption();
1329 if (self
->funnel_state
& TH_FN_REFUNNEL
) {
1330 kern_return_t save_wait_result
;
1331 self
->funnel_state
= 0;
1332 save_wait_result
= self
->wait_result
;
1333 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE
, self
->funnel_lock
, 4, 0, 0, 0);
1334 funnel_lock(self
->funnel_lock
);
1335 KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE
, self
->funnel_lock
, 4, 0, 0, 0);
1336 self
->wait_result
= save_wait_result
;
1337 self
->funnel_state
= TH_FN_OWNED
;
1341 assert(continuation
);
1346 #if MACH_LDEBUG || MACH_KDB
1348 #define THREAD_LOG_SIZE 300
1362 } thread_log
[THREAD_LOG_SIZE
];
1364 int thread_log_index
;
1366 void check_thread_time(long n
);
1369 int check_thread_time_crash
;
1373 check_thread_time(long us
)
1377 if (!check_thread_time_crash
)
1380 temp
= thread_log
[0].stamp
;
1381 cyctm05_diff (&thread_log
[1].stamp
, &thread_log
[0].stamp
, &temp
);
1383 if (temp
.l
>= us
&& thread_log
[1].info
!= 0x49) /* HACK!!! */
1384 panic ("check_thread_time");
1389 log_thread_action(char * action
, long info1
, long info2
, long info3
)
1393 static unsigned int tstamp
;
1397 for (i
= THREAD_LOG_SIZE
-1; i
> 0; i
--) {
1398 thread_log
[i
] = thread_log
[i
-1];
1401 thread_log
[0].stamp
.h
= 0;
1402 thread_log
[0].stamp
.l
= tstamp
++;
1403 thread_log
[0].thread
= current_thread();
1404 thread_log
[0].info1
= info1
;
1405 thread_log
[0].info2
= info2
;
1406 thread_log
[0].info3
= info3
;
1407 thread_log
[0].action
= action
;
1408 /* strcpy (&thread_log[0].action[0], action);*/
1412 #endif /* MACH_LDEBUG || MACH_KDB */
1415 #include <ddb/db_output.h>
1416 void db_show_thread_log(void);
1419 db_show_thread_log(void)
1423 db_printf ("%s %s %s %s %s %s\n", " Thread ", " Info1 ", " Info2 ",
1424 " Info3 ", " Timestamp ", "Action");
1426 for (i
= 0; i
< THREAD_LOG_SIZE
; i
++) {
1427 db_printf ("%08x %08x %08x %08x %08x/%08x %s\n",
1428 thread_log
[i
].thread
,
1429 thread_log
[i
].info1
,
1430 thread_log
[i
].info2
,
1431 thread_log
[i
].info3
,
1432 thread_log
[i
].stamp
.h
,
1433 thread_log
[i
].stamp
.l
,
1434 thread_log
[i
].action
);
1437 #endif /* MACH_KDB */
1440 * thread_block_reason:
1442 * Block the current thread. If the thread is runnable
1443 * then someone must have woken it up between its request
1444 * to sleep and now. In this case, it goes back on a
1447 * If a continuation is specified, then thread_block will
1448 * attempt to discard the thread's kernel stack. When the
1449 * thread resumes, it will execute the continuation function
1450 * on a new kernel stack.
1452 counter(mach_counter_t c_thread_block_calls
= 0;)
1455 thread_block_reason(
1456 void (*continuation
)(void),
1459 register thread_t thread
= current_thread();
1460 register processor_t myprocessor
;
1461 register thread_t new_thread
;
1464 counter(++c_thread_block_calls
);
1466 check_simple_locks();
1468 machine_clock_assist();
1472 if ((thread
->funnel_state
& TH_FN_OWNED
) && !(reason
& AST_PREEMPT
)) {
1473 thread
->funnel_state
= TH_FN_REFUNNEL
;
1474 KERNEL_DEBUG(0x603242c | DBG_FUNC_NONE
, thread
->funnel_lock
, 2, 0, 0, 0);
1475 funnel_unlock(thread
->funnel_lock
);
1478 myprocessor
= current_processor();
1480 thread_lock(thread
);
1481 if (thread
->state
& TH_ABORT
)
1482 clear_wait_internal(thread
, THREAD_INTERRUPTED
);
1484 /* Unconditionally remove either | both */
1485 ast_off(AST_QUANTUM
|AST_BLOCK
|AST_URGENT
);
1487 new_thread
= thread_select(myprocessor
);
1489 assert(thread_runnable(new_thread
));
1490 thread_unlock(thread
);
1491 while (!thread_invoke(thread
, new_thread
, reason
, continuation
)) {
1492 thread_lock(thread
);
1493 new_thread
= thread_select(myprocessor
);
1495 assert(thread_runnable(new_thread
));
1496 thread_unlock(thread
);
1499 if (thread
->funnel_state
& TH_FN_REFUNNEL
) {
1500 kern_return_t save_wait_result
;
1502 save_wait_result
= thread
->wait_result
;
1503 thread
->funnel_state
= 0;
1504 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE
, thread
->funnel_lock
, 5, 0, 0, 0);
1505 funnel_lock(thread
->funnel_lock
);
1506 KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE
, thread
->funnel_lock
, 5, 0, 0, 0);
1507 thread
->funnel_state
= TH_FN_OWNED
;
1508 thread
->wait_result
= save_wait_result
;
1513 return thread
->wait_result
;
1519 * Now calls thread_block_reason() which forwards the
1520 * the reason parameter to thread_invoke() so it can
1521 * do the right thing if the thread's quantum expired.
1525 void (*continuation
)(void))
1527 return thread_block_reason(continuation
, 0);
1533 * Switch directly from the current thread to a specified
1534 * thread. Both the current and new threads must be
1542 thread_t old_thread
,
1543 void (*continuation
)(void),
1544 thread_t new_thread
)
1546 while (!thread_invoke(old_thread
, new_thread
, 0, continuation
)) {
1547 register processor_t myprocessor
= current_processor();
1548 thread_lock(old_thread
);
1549 new_thread
= thread_select(myprocessor
);
1550 thread_unlock(old_thread
);
1552 return old_thread
->wait_result
;
1556 * Dispatches a running thread that is not on a runq.
1557 * Called at splsched.
1561 register thread_t thread
)
1563 sched_policy_t
*policy
;
1567 * If we are discarding the thread's stack, we must do it
1568 * before the thread has a chance to run.
1571 thread_lock(thread
);
1574 /* no continuations on i386 for now */
1575 if (thread
->continuation
!= (void (*)())0) {
1576 assert((thread
->state
& TH_STACK_STATE
) == 0);
1577 thread
->state
|= TH_STACK_HANDOFF
;
1579 if (thread
->top_act
) {
1580 act_machine_sv_free(thread
->top_act
);
1585 switch (thread
->state
& (TH_RUN
|TH_WAIT
|TH_UNINT
|TH_IDLE
)) {
1587 case TH_RUN
| TH_UNINT
:
1590 * No reason to stop. Put back on a run queue.
1592 /* Leave enqueueing thread up to scheduling policy */
1593 policy
= &sched_policy
[thread
->policy
];
1594 /*** ??? maybe use a macro ***/
1595 sfr
= policy
->sp_ops
.sp_thread_dispatch(policy
, thread
);
1596 assert(sfr
== SF_SUCCESS
);
1599 case TH_RUN
| TH_WAIT
| TH_UNINT
:
1600 case TH_RUN
| TH_WAIT
:
1601 thread
->sleep_stamp
= sched_tick
;
1603 case TH_WAIT
: /* this happens! */
1608 thread
->state
&= ~TH_RUN
;
1609 if (thread
->state
& TH_TERMINATE
)
1610 thread_reaper_enqueue(thread
);
1612 if (thread
->wake_active
) {
1613 thread
->wake_active
= FALSE
;
1614 thread_unlock(thread
);
1615 wake_unlock(thread
);
1616 thread_wakeup((event_t
)&thread
->wake_active
);
1621 case TH_RUN
| TH_IDLE
:
1623 * Drop idle thread -- it is already in
1624 * idle_thread_array.
1629 panic("State 0x%x \n",thread
->state
);
1631 thread_unlock(thread
);
1632 wake_unlock(thread
);
1636 * Enqueue thread on run queue. Thread must be locked,
1637 * and not already be on a run queue.
1641 register run_queue_t rq
,
1642 register thread_t thread
,
1645 register int whichq
;
1648 whichq
= thread
->sched_pri
;
1649 assert(whichq
>= MINPRI
&& whichq
<= MAXPRI
);
1651 simple_lock(&rq
->lock
); /* lock the run queue */
1652 assert(thread
->runq
== RUN_QUEUE_NULL
);
1654 enqueue_tail(&rq
->queues
[whichq
], (queue_entry_t
)thread
);
1656 enqueue_head(&rq
->queues
[whichq
], (queue_entry_t
)thread
);
1658 setbit(MAXPRI
- whichq
, rq
->bitmap
);
1659 if (whichq
> rq
->highq
)
1662 oldrqcount
= rq
->count
++;
1664 thread
->whichq
= whichq
;
1666 thread_check(thread
, rq
);
1668 simple_unlock(&rq
->lock
);
1670 return (oldrqcount
);
1676 * Make thread runnable; dispatch directly onto an idle processor
1677 * if possible. Else put on appropriate run queue (processor
1678 * if bound, else processor set. Caller must have lock on thread.
1679 * This is always called at splsched.
1680 * The tail parameter, if TRUE || TAIL_Q, indicates that the
1681 * thread should be placed at the tail of the runq. If
1682 * FALSE || HEAD_Q the thread will be placed at the head of the
1687 register thread_t new_thread
,
1688 boolean_t may_preempt
,
1691 register processor_t processor
;
1692 register run_queue_t runq
;
1693 register processor_set_t pset
;
1695 ast_t ast_flags
= AST_BLOCK
;
1697 mp_disable_preemption();
1699 assert(!(new_thread
->state
& TH_SWAPPED_OUT
));
1700 assert(thread_runnable(new_thread
));
1703 * Update priority if needed.
1705 if (new_thread
->sched_stamp
!= sched_tick
)
1706 update_priority(new_thread
);
1708 if (new_thread
->policy
& (POLICY_FIFO
|POLICY_RR
)) {
1709 if ( new_thread
->sched_pri
>= (MAXPRI_KERNBAND
- 2) &&
1710 kernel_preemption_mode
== KERNEL_PREEMPT
)
1711 ast_flags
|= AST_URGENT
;
1714 assert(new_thread
->runq
== RUN_QUEUE_NULL
);
1717 * Try to dispatch the thread directly onto an idle processor.
1719 if ((processor
= new_thread
->bound_processor
) == PROCESSOR_NULL
) {
1721 * Not bound, any processor in the processor set is ok.
1723 pset
= new_thread
->processor_set
;
1724 if (pset
->idle_count
> 0) {
1725 simple_lock(&pset
->idle_lock
);
1726 if (pset
->idle_count
> 0) {
1727 processor
= (processor_t
) queue_first(&pset
->idle_queue
);
1728 queue_remove(&(pset
->idle_queue
), processor
, processor_t
,
1731 processor
->next_thread
= new_thread
;
1732 processor
->state
= PROCESSOR_DISPATCHING
;
1733 simple_unlock(&pset
->idle_lock
);
1734 if(processor
->slot_num
!= cpu_number())
1735 machine_signal_idle(processor
);
1736 mp_enable_preemption();
1739 simple_unlock(&pset
->idle_lock
);
1747 thread
= current_thread();
1748 processor
= current_processor();
1750 pset
== processor
->processor_set
&&
1751 thread
->sched_pri
< new_thread
->sched_pri
) {
1753 * XXX if we have a non-empty local runq or are
1754 * XXX running a bound thread, ought to check for
1755 * XXX another cpu running lower-pri thread to preempt.
1758 * Turn off first_quantum to allow csw.
1760 processor
->first_quantum
= FALSE
;
1766 * Put us on the end of the runq, if we are not preempting
1767 * or the guy we are preempting.
1769 run_queue_enqueue(runq
, new_thread
, tail
);
1773 * Bound, can only run on bound processor. Have to lock
1774 * processor here because it may not be the current one.
1776 if (processor
->state
== PROCESSOR_IDLE
) {
1777 simple_lock(&processor
->lock
);
1778 pset
= processor
->processor_set
;
1779 simple_lock(&pset
->idle_lock
);
1780 if (processor
->state
== PROCESSOR_IDLE
) {
1781 queue_remove(&pset
->idle_queue
, processor
,
1782 processor_t
, processor_queue
);
1784 processor
->next_thread
= new_thread
;
1785 processor
->state
= PROCESSOR_DISPATCHING
;
1786 simple_unlock(&pset
->idle_lock
);
1787 simple_unlock(&processor
->lock
);
1788 if(processor
->slot_num
!= cpu_number())
1789 machine_signal_idle(processor
);
1790 mp_enable_preemption();
1793 simple_unlock(&pset
->idle_lock
);
1794 simple_unlock(&processor
->lock
);
1798 * Cause ast on processor if processor is on line, and the
1799 * currently executing thread is not bound to that processor
1800 * (bound threads have implicit priority over non-bound threads).
1801 * We also avoid sending the AST to the idle thread (if it got
1802 * scheduled in the window between the 'if' above and here),
1803 * since the idle_thread is bound.
1805 runq
= &processor
->runq
;
1806 thread
= current_thread();
1807 if (processor
== current_processor()) {
1808 if ( thread
->bound_processor
== PROCESSOR_NULL
||
1809 thread
->sched_pri
< new_thread
->sched_pri
) {
1810 processor
->first_quantum
= FALSE
;
1814 run_queue_enqueue(runq
, new_thread
, tail
);
1817 thread
= cpu_data
[processor
->slot_num
].active_thread
;
1818 if ( run_queue_enqueue(runq
, new_thread
, tail
) == 0 &&
1819 processor
->state
!= PROCESSOR_OFF_LINE
&&
1820 thread
&& thread
->bound_processor
!= processor
)
1821 cause_ast_check(processor
);
1825 mp_enable_preemption();
1831 * Set the priority of the specified thread to the specified
1832 * priority. This may cause the thread to change queues.
1834 * The thread *must* be locked by the caller.
1842 register struct run_queue
*rq
;
1844 rq
= rem_runq(thread
);
1845 assert(thread
->runq
== RUN_QUEUE_NULL
);
1846 thread
->sched_pri
= pri
;
1847 if (rq
!= RUN_QUEUE_NULL
) {
1849 thread_setrun(thread
, TRUE
, TAIL_Q
);
1851 run_queue_enqueue(rq
, thread
, TAIL_Q
);
1858 * Remove a thread from its run queue.
1859 * The run queue that the process was on is returned
1860 * (or RUN_QUEUE_NULL if not on a run queue). Thread *must* be locked
1861 * before calling this routine. Unusual locking protocol on runq
1862 * field in thread structure makes this code interesting; see thread.h.
1868 register struct run_queue
*rq
;
1872 * If rq is RUN_QUEUE_NULL, the thread will stay out of the
1873 * run_queues because the caller locked the thread. Otherwise
1874 * the thread is on a runq, but could leave.
1876 if (rq
!= RUN_QUEUE_NULL
) {
1877 simple_lock(&rq
->lock
);
1878 if (rq
== thread
->runq
) {
1880 * Thread is in a runq and we have a lock on
1884 thread_check(thread
, rq
);
1886 remqueue(&rq
->queues
[0], (queue_entry_t
)thread
);
1889 if (queue_empty(rq
->queues
+ thread
->sched_pri
)) {
1890 /* update run queue status */
1891 if (thread
->sched_pri
!= IDLEPRI
)
1892 clrbit(MAXPRI
- thread
->sched_pri
, rq
->bitmap
);
1893 rq
->highq
= MAXPRI
- ffsbit(rq
->bitmap
);
1895 thread
->runq
= RUN_QUEUE_NULL
;
1896 simple_unlock(&rq
->lock
);
1900 * The thread left the runq before we could
1901 * lock the runq. It is not on a runq now, and
1902 * can't move again because this routine's
1903 * caller locked the thread.
1905 assert(thread
->runq
== RUN_QUEUE_NULL
);
1906 simple_unlock(&rq
->lock
);
1907 rq
= RUN_QUEUE_NULL
;
1918 * Choose a thread to execute. The thread chosen is removed
1919 * from its run queue. Note that this requires only that the runq
1923 * Check processor runq first; if anything found, run it.
1924 * Else check pset runq; if nothing found, return idle thread.
1926 * Second line of strategy is implemented by choose_pset_thread.
1927 * This is only called on processor startup and when thread_block
1928 * thinks there's something in the processor runq.
1932 processor_t myprocessor
)
1936 register run_queue_t runq
;
1937 processor_set_t pset
;
1939 runq
= &myprocessor
->runq
;
1940 pset
= myprocessor
->processor_set
;
1942 simple_lock(&runq
->lock
);
1943 if (runq
->count
> 0 && runq
->highq
>= pset
->runq
.highq
) {
1944 q
= runq
->queues
+ runq
->highq
;
1946 if (!queue_empty(q
)) {
1947 #endif /*MACH_ASSERT*/
1948 thread
= (thread_t
)q
->next
;
1949 ((queue_entry_t
)thread
)->next
->prev
= q
;
1950 q
->next
= ((queue_entry_t
)thread
)->next
;
1951 thread
->runq
= RUN_QUEUE_NULL
;
1953 if (queue_empty(q
)) {
1954 if (runq
->highq
!= IDLEPRI
)
1955 clrbit(MAXPRI
- runq
->highq
, runq
->bitmap
);
1956 runq
->highq
= MAXPRI
- ffsbit(runq
->bitmap
);
1958 simple_unlock(&runq
->lock
);
1962 panic("choose_thread");
1963 #endif /*MACH_ASSERT*/
1967 simple_unlock(&runq
->lock
);
1968 simple_lock(&pset
->runq
.lock
);
1969 return (choose_pset_thread(myprocessor
, pset
));
1974 * choose_pset_thread: choose a thread from processor_set runq or
1975 * set processor idle and choose its idle thread.
1977 * Caller must be at splsched and have a lock on the runq. This
1978 * lock is released by this routine. myprocessor is always the current
1979 * processor, and pset must be its processor set.
1980 * This routine chooses and removes a thread from the runq if there
1981 * is one (and returns it), else it sets the processor idle and
1982 * returns its idle thread.
1986 register processor_t myprocessor
,
1987 processor_set_t pset
)
1989 register run_queue_t runq
;
1990 register thread_t thread
;
1994 if (runq
->count
> 0) {
1995 q
= runq
->queues
+ runq
->highq
;
1997 if (!queue_empty(q
)) {
1998 #endif /*MACH_ASSERT*/
1999 thread
= (thread_t
)q
->next
;
2000 ((queue_entry_t
)thread
)->next
->prev
= q
;
2001 q
->next
= ((queue_entry_t
)thread
)->next
;
2002 thread
->runq
= RUN_QUEUE_NULL
;
2004 if (queue_empty(q
)) {
2005 if (runq
->highq
!= IDLEPRI
)
2006 clrbit(MAXPRI
- runq
->highq
, runq
->bitmap
);
2007 runq
->highq
= MAXPRI
- ffsbit(runq
->bitmap
);
2009 simple_unlock(&runq
->lock
);
2013 panic("choose_pset_thread");
2014 #endif /*MACH_ASSERT*/
2017 simple_unlock(&runq
->lock
);
2020 * Nothing is runnable, so set this processor idle if it
2021 * was running. If it was in an assignment or shutdown,
2022 * leave it alone. Return its idle thread.
2024 simple_lock(&pset
->idle_lock
);
2025 if (myprocessor
->state
== PROCESSOR_RUNNING
) {
2026 myprocessor
->state
= PROCESSOR_IDLE
;
2028 * XXX Until it goes away, put master on end of queue, others
2029 * XXX on front so master gets used last.
2031 if (myprocessor
== master_processor
)
2032 queue_enter(&(pset
->idle_queue
), myprocessor
,
2033 processor_t
, processor_queue
);
2035 queue_enter_first(&(pset
->idle_queue
), myprocessor
,
2036 processor_t
, processor_queue
);
2040 simple_unlock(&pset
->idle_lock
);
2042 return (myprocessor
->idle_thread
);
2046 * no_dispatch_count counts number of times processors go non-idle
2047 * without being dispatched. This should be very rare.
2049 int no_dispatch_count
= 0;
2052 * This is the idle thread, which just looks for other threads
2056 idle_thread_continue(void)
2058 register processor_t myprocessor
;
2059 register volatile thread_t
*threadp
;
2060 register volatile int *gcount
;
2061 register volatile int *lcount
;
2062 register thread_t new_thread
;
2064 register processor_set_t pset
;
2067 mycpu
= cpu_number();
2068 myprocessor
= current_processor();
2069 threadp
= (volatile thread_t
*) &myprocessor
->next_thread
;
2070 lcount
= (volatile int *) &myprocessor
->runq
.count
;
2073 #ifdef MARK_CPU_IDLE
2074 MARK_CPU_IDLE(mycpu
);
2075 #endif /* MARK_CPU_IDLE */
2077 gcount
= (volatile int *)&myprocessor
->processor_set
->runq
.count
;
2080 while ( (*threadp
== (volatile thread_t
)THREAD_NULL
) &&
2081 (*gcount
== 0) && (*lcount
== 0) ) {
2083 /* check for ASTs while we wait */
2085 if (need_ast
[mycpu
] &~ (AST_SCHEDULING
|AST_URGENT
|AST_BSD
|AST_BSD_INIT
)) {
2086 /* don't allow scheduling ASTs */
2087 need_ast
[mycpu
] &= ~(AST_SCHEDULING
|AST_URGENT
|AST_BSD
|AST_BSD_INIT
);
2088 ast_taken(FALSE
, AST_ALL
, TRUE
); /* back at spllo */
2096 machine_clock_assist();
2101 #ifdef MARK_CPU_ACTIVE
2103 MARK_CPU_ACTIVE(mycpu
);
2105 #endif /* MARK_CPU_ACTIVE */
2108 * This is not a switch statement to avoid the
2109 * bounds checking code in the common case.
2111 pset
= myprocessor
->processor_set
;
2112 simple_lock(&pset
->idle_lock
);
2114 state
= myprocessor
->state
;
2115 if (state
== PROCESSOR_DISPATCHING
) {
2117 * Commmon case -- cpu dispatched.
2119 new_thread
= *threadp
;
2120 *threadp
= (volatile thread_t
) THREAD_NULL
;
2121 myprocessor
->state
= PROCESSOR_RUNNING
;
2122 simple_unlock(&pset
->idle_lock
);
2124 thread_lock(new_thread
);
2125 simple_lock(&myprocessor
->runq
.lock
);
2126 simple_lock(&pset
->runq
.lock
);
2127 if ( myprocessor
->runq
.highq
> new_thread
->sched_pri
||
2128 pset
->runq
.highq
> new_thread
->sched_pri
) {
2129 simple_unlock(&pset
->runq
.lock
);
2130 simple_unlock(&myprocessor
->runq
.lock
);
2132 if (new_thread
->bound_processor
!= PROCESSOR_NULL
)
2133 run_queue_enqueue(&myprocessor
->runq
, new_thread
, HEAD_Q
);
2135 run_queue_enqueue(&pset
->runq
, new_thread
, HEAD_Q
);
2136 thread_unlock(new_thread
);
2138 counter(c_idle_thread_block
++);
2139 thread_block(idle_thread_continue
);
2142 simple_unlock(&pset
->runq
.lock
);
2143 simple_unlock(&myprocessor
->runq
.lock
);
2146 * set up quantum for new thread.
2148 if (new_thread
->policy
& (POLICY_RR
|POLICY_FIFO
))
2149 myprocessor
->quantum
= new_thread
->unconsumed_quantum
;
2151 myprocessor
->quantum
= pset
->set_quantum
;
2152 thread_unlock(new_thread
);
2154 myprocessor
->first_quantum
= TRUE
;
2155 counter(c_idle_thread_handoff
++);
2156 thread_run(myprocessor
->idle_thread
,
2157 idle_thread_continue
, new_thread
);
2161 if (state
== PROCESSOR_IDLE
) {
2162 if (myprocessor
->state
!= PROCESSOR_IDLE
) {
2164 * Something happened, try again.
2169 * Processor was not dispatched (Rare).
2170 * Set it running again.
2172 no_dispatch_count
++;
2174 queue_remove(&pset
->idle_queue
, myprocessor
,
2175 processor_t
, processor_queue
);
2176 myprocessor
->state
= PROCESSOR_RUNNING
;
2177 simple_unlock(&pset
->idle_lock
);
2179 counter(c_idle_thread_block
++);
2180 thread_block(idle_thread_continue
);
2183 if ( state
== PROCESSOR_ASSIGN
||
2184 state
== PROCESSOR_SHUTDOWN
) {
2186 * Changing processor sets, or going off-line.
2187 * Release next_thread if there is one. Actual
2188 * thread to run is on a runq.
2190 if ((new_thread
= (thread_t
)*threadp
) != THREAD_NULL
) {
2191 *threadp
= (volatile thread_t
) THREAD_NULL
;
2192 simple_unlock(&pset
->idle_lock
);
2193 thread_lock(new_thread
);
2194 thread_setrun(new_thread
, FALSE
, TAIL_Q
);
2195 thread_unlock(new_thread
);
2197 simple_unlock(&pset
->idle_lock
);
2199 counter(c_idle_thread_block
++);
2200 thread_block(idle_thread_continue
);
2203 simple_unlock(&pset
->idle_lock
);
2204 printf("Bad processor state %d (Cpu %d)\n",
2205 cpu_state(mycpu
), mycpu
);
2206 panic("idle_thread");
2217 thread_t self
= current_thread();
2220 stack_privilege(self
);
2221 thread_swappable(current_act(), FALSE
);
2226 self
->priority
= IDLEPRI
;
2227 self
->sched_pri
= self
->priority
;
2229 thread_unlock(self
);
2232 counter(c_idle_thread_block
++);
2233 thread_block((void(*)(void))0);
2234 idle_thread_continue();
2238 static AbsoluteTime sched_tick_interval
, sched_tick_deadline
;
2243 * Update the priorities of all threads periodically.
2246 sched_tick_thread_continue(void)
2248 AbsoluteTime abstime
;
2251 #endif /* SIMPLE_CLOCK */
2253 clock_get_uptime(&abstime
);
2255 sched_tick
++; /* age usage one more time */
2258 * Compensate for clock drift. sched_usec is an
2259 * exponential average of the number of microseconds in
2260 * a second. It decays in the same fashion as cpu_usage.
2262 new_usec
= sched_usec_elapsed();
2263 sched_usec
= (5*sched_usec
+ 3*new_usec
)/8;
2264 #endif /* SIMPLE_CLOCK */
2267 * Compute the scheduler load factors.
2269 compute_mach_factor();
2272 * Scan the run queues for runnable threads that need to
2273 * have their priorities recalculated.
2277 clock_deadline_for_periodic_event(sched_tick_interval
, abstime
,
2278 &sched_tick_deadline
);
2280 assert_wait((event_t
)sched_tick_thread_continue
, THREAD_INTERRUPTIBLE
);
2281 thread_set_timer_deadline(sched_tick_deadline
);
2282 thread_block(sched_tick_thread_continue
);
2287 sched_tick_thread(void)
2289 thread_t self
= current_thread();
2293 stack_privilege(self
);
2294 thread_swappable(self
->top_act
, FALSE
);
2299 self
->priority
= MAXPRI_STANDARD
;
2300 self
->sched_pri
= self
->priority
;
2302 thread_unlock(self
);
2305 rate
= (1000 >> SCHED_TICK_SHIFT
);
2306 clock_interval_to_absolutetime_interval(rate
, USEC_PER_SEC
,
2307 &sched_tick_interval
);
2308 clock_get_uptime(&sched_tick_deadline
);
2310 thread_block(sched_tick_thread_continue
);
2314 #define MAX_STUCK_THREADS 128
2317 * do_thread_scan: scan for stuck threads. A thread is stuck if
2318 * it is runnable but its priority is so low that it has not
2319 * run for several seconds. Its priority should be higher, but
2320 * won't be until it runs and calls update_priority. The scanner
2321 * finds these threads and does the updates.
2323 * Scanner runs in two passes. Pass one squirrels likely
2324 * thread ids away in an array (takes out references for them).
2325 * Pass two does the priority updates. This is necessary because
2326 * the run queue lock is required for the candidate scan, but
2327 * cannot be held during updates [set_pri will deadlock].
2329 * Array length should be enough so that restart isn't necessary,
2330 * but restart logic is included. Does not scan processor runqs.
2333 thread_t stuck_threads
[MAX_STUCK_THREADS
];
2334 int stuck_count
= 0;
2337 * do_runq_scan is the guts of pass 1. It scans a runq for
2338 * stuck threads. A boolean is returned indicating whether
2339 * a retry is needed.
2346 register thread_t thread
;
2349 boolean_t result
= FALSE
;
2352 simple_lock(&runq
->lock
);
2353 if ((count
= runq
->count
) > 0) {
2354 q
= runq
->queues
+ runq
->highq
;
2356 queue_iterate(q
, thread
, thread_t
, links
) {
2357 if ( !(thread
->state
& (TH_WAIT
|TH_SUSP
)) &&
2358 thread
->policy
== POLICY_TIMESHARE
) {
2359 if (thread
->sched_stamp
!= sched_tick
) {
2361 * Stuck, save its id for later.
2363 if (stuck_count
== MAX_STUCK_THREADS
) {
2365 * !@#$% No more room.
2367 simple_unlock(&runq
->lock
);
2374 * Inline version of thread_reference
2375 * XXX - lock ordering problem here:
2376 * thread locks should be taken before runq
2377 * locks: just try and get the thread's locks
2378 * and ignore this thread if we fail, we might
2379 * have better luck next time.
2381 if (simple_lock_try(&thread
->lock
)) {
2382 thread
->ref_count
++;
2383 thread_unlock(thread
);
2384 stuck_threads
[stuck_count
++] = thread
;
2397 simple_unlock(&runq
->lock
);
2403 boolean_t thread_scan_enabled
= TRUE
;
2406 do_thread_scan(void)
2408 register boolean_t restart_needed
= FALSE
;
2409 register thread_t thread
;
2410 register processor_set_t pset
= &default_pset
;
2411 register processor_t processor
;
2414 if (!thread_scan_enabled
)
2418 restart_needed
= do_runq_scan(&pset
->runq
);
2419 if (!restart_needed
) {
2420 simple_lock(&pset
->processors_lock
);
2421 processor
= (processor_t
)queue_first(&pset
->processors
);
2422 while (!queue_end(&pset
->processors
, (queue_entry_t
)processor
)) {
2423 if (restart_needed
= do_runq_scan(&processor
->runq
))
2426 processor
= (processor_t
)queue_next(&processor
->processors
);
2428 simple_unlock(&pset
->processors_lock
);
2432 * Ok, we now have a collection of candidates -- fix them.
2434 while (stuck_count
> 0) {
2435 thread
= stuck_threads
[--stuck_count
];
2436 stuck_threads
[stuck_count
] = THREAD_NULL
;
2438 thread_lock(thread
);
2439 if (thread
->policy
== POLICY_TIMESHARE
) {
2440 if ( !(thread
->state
& (TH_WAIT
|TH_SUSP
)) &&
2441 thread
->sched_stamp
!= sched_tick
)
2442 update_priority(thread
);
2444 thread_unlock(thread
);
2446 thread_deallocate(thread
);
2449 } while (restart_needed
);
2453 * Just in case someone doesn't use the macro
2455 #undef thread_wakeup
2464 thread_wakeup_with_result(x
, THREAD_AWAKENED
);
2471 sched_policy_t
*policy
;
2473 /* Ask sched policy if thread is runnable */
2474 policy
= policy_id_to_sched_policy(thread
->policy
);
2476 return ((policy
!= SCHED_POLICY_NULL
)?
2477 policy
->sp_ops
.sp_thread_runnable(policy
, thread
) : FALSE
);
2486 printf("processor_set: %08x\n",ps
);
2487 printf("idle_queue: %08x %08x, idle_count: 0x%x\n",
2488 ps
->idle_queue
.next
,ps
->idle_queue
.prev
,ps
->idle_count
);
2489 printf("processors: %08x %08x, processor_count: 0x%x\n",
2490 ps
->processors
.next
,ps
->processors
.prev
,ps
->processor_count
);
2491 printf("tasks: %08x %08x, task_count: 0x%x\n",
2492 ps
->tasks
.next
,ps
->tasks
.prev
,ps
->task_count
);
2493 printf("threads: %08x %08x, thread_count: 0x%x\n",
2494 ps
->threads
.next
,ps
->threads
.prev
,ps
->thread_count
);
2495 printf("ref_count: 0x%x, active: %x\n",
2496 ps
->ref_count
,ps
->active
);
2497 printf("pset_self: %08x, pset_name_self: %08x\n",ps
->pset_self
, ps
->pset_name_self
);
2498 printf("max_priority: 0x%x, policies: 0x%x, set_quantum: 0x%x\n",
2499 ps
->max_priority
, ps
->policies
, ps
->set_quantum
);
2502 #define processor_state(s) (((s)>PROCESSOR_SHUTDOWN)?"*unknown*":states[s])
2508 char *states
[]={"OFF_LINE","RUNNING","IDLE","DISPATCHING",
2509 "ASSIGN","SHUTDOWN"};
2511 printf("processor: %08x\n",p
);
2512 printf("processor_queue: %08x %08x\n",
2513 p
->processor_queue
.next
,p
->processor_queue
.prev
);
2514 printf("state: %8s, next_thread: %08x, idle_thread: %08x\n",
2515 processor_state(p
->state
), p
->next_thread
, p
->idle_thread
);
2516 printf("quantum: %u, first_quantum: %x, last_quantum: %u\n",
2517 p
->quantum
, p
->first_quantum
, p
->last_quantum
);
2518 printf("processor_set: %08x, processor_set_next: %08x\n",
2519 p
->processor_set
, p
->processor_set_next
);
2520 printf("processors: %08x %08x\n", p
->processors
.next
,p
->processors
.prev
);
2521 printf("processor_self: %08x, slot_num: 0x%x\n", p
->processor_self
, p
->slot_num
);
2525 dump_run_queue_struct(
2531 for( i
=0; i
< NRQS
; ) {
2534 printf("%6s",(i
==0)?"runq:":"");
2535 for( j
=0; (j
<8) && (i
< NRQS
); j
++,i
++ ) {
2536 if( rq
->queues
[i
].next
== &rq
->queues
[i
] )
2537 printf( " --------");
2539 printf(" %08x",rq
->queues
[i
].next
);
2543 for( i
=0; i
< NRQBM
; ) {
2544 register unsigned int mask
;
2551 *d
++ = ((rq
->bitmap
[i
]&mask
)?'r':'e');
2555 printf("%8s%s\n",((i
==0)?"bitmap:":""),dump_buf
);
2558 printf("highq: 0x%x, count: %u\n", rq
->highq
, rq
->count
);
2565 register queue_t q1
;
2567 register queue_entry_t e
;
2570 for (i
= 0; i
< NRQS
; i
++) {
2571 if (q1
->next
!= q1
) {
2575 for (t_cnt
=0, e
= q1
->next
; e
!= q1
; e
= e
->next
) {
2576 printf("\t0x%08x",e
);
2577 if( (t_cnt
= ++t_cnt%4
) == 0 )
2584 printf("[%u]\t<empty>\n",i);
2595 register queue_t q1
;
2597 register queue_entry_t e
;
2603 for (i
= MAXPRI
; i
>= 0; i
--) {
2604 if (q1
->next
== q1
) {
2605 if (q1
->prev
!= q1
) {
2606 panic("checkrq: empty at %s", msg
);
2613 for (e
= q1
->next
; e
!= q1
; e
= e
->next
) {
2615 if (e
->next
->prev
!= e
)
2616 panic("checkrq-2 at %s", msg
);
2617 if (e
->prev
->next
!= e
)
2618 panic("checkrq-3 at %s", msg
);
2624 panic("checkrq: count wrong at %s", msg
);
2625 if (rq
->count
!= 0 && highq
> rq
->highq
)
2626 panic("checkrq: highq wrong at %s", msg
);
2631 register thread_t thread
,
2632 register run_queue_t rq
)
2634 register int whichq
= thread
->sched_pri
;
2635 register queue_entry_t queue
, entry
;
2637 if (whichq
< MINPRI
|| whichq
> MAXPRI
)
2638 panic("thread_check: bad pri");
2640 if (whichq
!= thread
->whichq
)
2641 panic("thread_check: whichq");
2643 queue
= &rq
->queues
[whichq
];
2644 entry
= queue_first(queue
);
2645 while (!queue_end(queue
, entry
)) {
2646 if (entry
== (queue_entry_t
)thread
)
2649 entry
= queue_next(entry
);
2652 panic("thread_check: not found");
2658 #include <ddb/db_output.h>
2659 #define printf kdbprintf
2660 extern int db_indent
;
2661 void db_sched(void);
2666 iprintf("Scheduling Statistics:\n");
2668 iprintf("Thread invocations: csw %d same %d\n",
2669 c_thread_invoke_csw
, c_thread_invoke_same
);
2671 iprintf("Thread block: calls %d\n",
2672 c_thread_block_calls
);
2673 iprintf("Idle thread:\n\thandoff %d block %d no_dispatch %d\n",
2674 c_idle_thread_handoff
,
2675 c_idle_thread_block
, no_dispatch_count
);
2676 iprintf("Sched thread blocks: %d\n", c_sched_thread_block
);
2677 #endif /* MACH_COUNTERS */
2680 #endif /* MACH_KDB */