]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/sched_prim.c
xnu-201.tar.gz
[apple/xnu.git] / osfmk / kern / sched_prim.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_FREE_COPYRIGHT@
24 */
25/*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50/*
51 */
52/*
53 * File: sched_prim.c
54 * Author: Avadis Tevanian, Jr.
55 * Date: 1986
56 *
57 * Scheduling primitives
58 *
59 */
60
61#include <debug.h>
62#include <cpus.h>
63#include <mach_kdb.h>
64#include <simple_clock.h>
65#include <power_save.h>
66#include <task_swapper.h>
67
68#include <ddb/db_output.h>
69#include <mach/machine.h>
70#include <machine/machine_routines.h>
71#include <machine/sched_param.h>
72#include <kern/ast.h>
73#include <kern/clock.h>
74#include <kern/counters.h>
75#include <kern/cpu_number.h>
76#include <kern/cpu_data.h>
77#include <kern/etap_macros.h>
78#include <kern/lock.h>
79#include <kern/macro_help.h>
80#include <kern/machine.h>
81#include <kern/misc_protos.h>
82#include <kern/processor.h>
83#include <kern/queue.h>
84#include <kern/sched.h>
85#include <kern/sched_prim.h>
86#include <kern/syscall_subr.h>
87#include <kern/task.h>
88#include <kern/thread.h>
89#include <kern/thread_swap.h>
90#include <vm/pmap.h>
91#include <vm/vm_kern.h>
92#include <vm/vm_map.h>
93#include <mach/policy.h>
94#include <mach/sync_policy.h>
1c79356b
A
95#include <kern/mk_sp.h> /*** ??? fix so this can be removed ***/
96#include <sys/kdebug.h>
97
98#if TASK_SWAPPER
99#include <kern/task_swap.h>
100extern int task_swap_on;
101#endif /* TASK_SWAPPER */
102
103extern int hz;
104
0b4e3aa0 105#define DEFAULT_PREEMPTION_RATE 100 /* (1/s) */
1c79356b
A
106int default_preemption_rate = DEFAULT_PREEMPTION_RATE;
107
0b4e3aa0
A
108#define MAX_UNSAFE_QUANTA 800
109int max_unsafe_quanta = MAX_UNSAFE_QUANTA;
110
111#define MAX_POLL_QUANTA 2
112int max_poll_quanta = MAX_POLL_QUANTA;
113
114#define SCHED_POLL_YIELD_SHIFT 4 /* 1/16 */
115int sched_poll_yield_shift = SCHED_POLL_YIELD_SHIFT;
116
1c79356b
A
117#define NO_KERNEL_PREEMPT 0
118#define KERNEL_PREEMPT 1
119int kernel_preemption_mode = KERNEL_PREEMPT;
120
0b4e3aa0 121uint32_t std_quantum_us;
1c79356b
A
122
123unsigned sched_tick;
124
125#if SIMPLE_CLOCK
126int sched_usec;
127#endif /* SIMPLE_CLOCK */
128
129/* Forwards */
130void thread_continue(thread_t);
131
132void wait_queues_init(void);
133
134void set_pri(
135 thread_t thread,
136 int pri,
137 int resched);
138
139thread_t choose_pset_thread(
140 processor_t myprocessor,
141 processor_set_t pset);
142
143thread_t choose_thread(
144 processor_t myprocessor);
145
146int run_queue_enqueue(
147 run_queue_t runq,
148 thread_t thread,
149 boolean_t tail);
150
151void idle_thread_continue(void);
152void do_thread_scan(void);
153
154void clear_wait_internal(
155 thread_t thread,
156 int result);
157
158#if DEBUG
159void dump_run_queues(
160 run_queue_t rq);
161void dump_run_queue_struct(
162 run_queue_t rq);
163void dump_processor(
164 processor_t p);
165void dump_processor_set(
166 processor_set_t ps);
167
168void checkrq(
169 run_queue_t rq,
170 char *msg);
171
172void thread_check(
173 thread_t thread,
174 run_queue_t runq);
1c79356b 175
0b4e3aa0 176static
1c79356b
A
177boolean_t thread_runnable(
178 thread_t thread);
179
0b4e3aa0
A
180#endif /*DEBUG*/
181
182
1c79356b
A
183/*
184 * State machine
185 *
186 * states are combinations of:
187 * R running
188 * W waiting (or on wait queue)
189 * N non-interruptible
190 * O swapped out
191 * I being swapped in
192 *
193 * init action
194 * assert_wait thread_block clear_wait swapout swapin
195 *
196 * R RW, RWN R; setrun - -
197 * RN RWN RN; setrun - -
198 *
199 * RW W R -
200 * RWN WN RN -
201 *
202 * W R; setrun WO
203 * WN RN; setrun -
204 *
205 * RO - - R
206 *
207 */
208
209/*
210 * Waiting protocols and implementation:
211 *
212 * Each thread may be waiting for exactly one event; this event
213 * is set using assert_wait(). That thread may be awakened either
214 * by performing a thread_wakeup_prim() on its event,
215 * or by directly waking that thread up with clear_wait().
216 *
217 * The implementation of wait events uses a hash table. Each
218 * bucket is queue of threads having the same hash function
219 * value; the chain for the queue (linked list) is the run queue
220 * field. [It is not possible to be waiting and runnable at the
221 * same time.]
222 *
223 * Locks on both the thread and on the hash buckets govern the
224 * wait event field and the queue chain field. Because wakeup
225 * operations only have the event as an argument, the event hash
226 * bucket must be locked before any thread.
227 *
228 * Scheduling operations may also occur at interrupt level; therefore,
229 * interrupts below splsched() must be prevented when holding
230 * thread or hash bucket locks.
231 *
232 * The wait event hash table declarations are as follows:
233 */
234
235#define NUMQUEUES 59
236
237struct wait_queue wait_queues[NUMQUEUES];
238
239#define wait_hash(event) \
240 ((((int)(event) < 0)? ~(int)(event): (int)(event)) % NUMQUEUES)
241
242void
243sched_init(void)
244{
245 /*
0b4e3aa0
A
246 * Calculate the timeslicing quantum
247 * in us.
1c79356b
A
248 */
249 if (default_preemption_rate < 1)
250 default_preemption_rate = DEFAULT_PREEMPTION_RATE;
0b4e3aa0 251 std_quantum_us = (1000 * 1000) / default_preemption_rate;
1c79356b 252
0b4e3aa0 253 printf("standard timeslicing quantum is %d us\n", std_quantum_us);
1c79356b
A
254
255 wait_queues_init();
256 pset_sys_bootstrap(); /* initialize processor mgmt. */
257 processor_action();
258 sched_tick = 0;
259#if SIMPLE_CLOCK
260 sched_usec = 0;
261#endif /* SIMPLE_CLOCK */
262 ast_init();
1c79356b
A
263}
264
265void
266wait_queues_init(void)
267{
268 register int i;
269
270 for (i = 0; i < NUMQUEUES; i++) {
271 wait_queue_init(&wait_queues[i], SYNC_POLICY_FIFO);
272 }
273}
274
275/*
0b4e3aa0 276 * Thread wait timer expiration.
1c79356b
A
277 */
278void
279thread_timer_expire(
280 timer_call_param_t p0,
281 timer_call_param_t p1)
282{
283 thread_t thread = p0;
284 spl_t s;
285
286 s = splsched();
287 wake_lock(thread);
0b4e3aa0
A
288 if (--thread->wait_timer_active == 1) {
289 if (thread->wait_timer_is_set) {
290 thread->wait_timer_is_set = FALSE;
291 thread_lock(thread);
292 if (thread->active)
293 clear_wait_internal(thread, THREAD_TIMED_OUT);
294 thread_unlock(thread);
295 }
1c79356b
A
296 }
297 else
0b4e3aa0 298 if (thread->wait_timer_active == 0)
1c79356b
A
299 thread_wakeup_one(&thread->wait_timer_active);
300 wake_unlock(thread);
301 splx(s);
302}
303
304/*
305 * thread_set_timer:
306 *
307 * Set a timer for the current thread, if the thread
308 * is ready to wait. Must be called between assert_wait()
309 * and thread_block().
310 */
311void
312thread_set_timer(
0b4e3aa0
A
313 uint32_t interval,
314 uint32_t scale_factor)
1c79356b
A
315{
316 thread_t thread = current_thread();
0b4e3aa0 317 uint64_t deadline;
1c79356b
A
318 spl_t s;
319
320 s = splsched();
321 wake_lock(thread);
322 thread_lock(thread);
323 if ((thread->state & TH_WAIT) != 0) {
324 clock_interval_to_deadline(interval, scale_factor, &deadline);
325 timer_call_enter(&thread->wait_timer, deadline);
326 assert(!thread->wait_timer_is_set);
327 thread->wait_timer_active++;
328 thread->wait_timer_is_set = TRUE;
329 }
330 thread_unlock(thread);
331 wake_unlock(thread);
332 splx(s);
333}
334
335void
336thread_set_timer_deadline(
0b4e3aa0 337 uint64_t deadline)
1c79356b
A
338{
339 thread_t thread = current_thread();
340 spl_t s;
341
342 s = splsched();
343 wake_lock(thread);
344 thread_lock(thread);
345 if ((thread->state & TH_WAIT) != 0) {
346 timer_call_enter(&thread->wait_timer, deadline);
347 assert(!thread->wait_timer_is_set);
348 thread->wait_timer_active++;
349 thread->wait_timer_is_set = TRUE;
350 }
351 thread_unlock(thread);
352 wake_unlock(thread);
353 splx(s);
354}
355
356void
357thread_cancel_timer(void)
358{
359 thread_t thread = current_thread();
360 spl_t s;
361
362 s = splsched();
363 wake_lock(thread);
364 if (thread->wait_timer_is_set) {
365 if (timer_call_cancel(&thread->wait_timer))
366 thread->wait_timer_active--;
367 thread->wait_timer_is_set = FALSE;
368 }
369 wake_unlock(thread);
370 splx(s);
371}
372
1c79356b
A
373/*
374 * Set up thread timeout element when thread is created.
375 */
376void
377thread_timer_setup(
378 thread_t thread)
379{
0b4e3aa0
A
380 extern void thread_depress_expire(
381 timer_call_param_t p0,
382 timer_call_param_t p1);
383
1c79356b
A
384 timer_call_setup(&thread->wait_timer, thread_timer_expire, thread);
385 thread->wait_timer_is_set = FALSE;
386 thread->wait_timer_active = 1;
1c79356b 387
0b4e3aa0
A
388 timer_call_setup(&thread->depress_timer, thread_depress_expire, thread);
389 thread->depress_timer_active = 1;
390
391 thread->ref_count++;
1c79356b
A
392}
393
394void
395thread_timer_terminate(void)
396{
397 thread_t thread = current_thread();
398 spl_t s;
399
400 s = splsched();
401 wake_lock(thread);
402 if (thread->wait_timer_is_set) {
403 if (timer_call_cancel(&thread->wait_timer))
404 thread->wait_timer_active--;
405 thread->wait_timer_is_set = FALSE;
406 }
407
408 thread->wait_timer_active--;
409
410 while (thread->wait_timer_active > 0) {
411 assert_wait((event_t)&thread->wait_timer_active, THREAD_UNINT);
412 wake_unlock(thread);
413 splx(s);
414
415 thread_block((void (*)(void)) 0);
416
417 s = splsched();
418 wake_lock(thread);
419 }
420
0b4e3aa0
A
421 thread->depress_timer_active--;
422
423 while (thread->depress_timer_active > 0) {
424 assert_wait((event_t)&thread->depress_timer_active, THREAD_UNINT);
425 wake_unlock(thread);
426 splx(s);
427
428 thread_block((void (*)(void)) 0);
429
430 s = splsched();
431 wake_lock(thread);
432 }
433
1c79356b
A
434 wake_unlock(thread);
435 splx(s);
436
437 thread_deallocate(thread);
438}
439
440/*
441 * Routine: thread_go_locked
442 * Purpose:
443 * Start a thread running.
444 * Conditions:
445 * thread lock held, IPC locks may be held.
446 * thread must have been pulled from wait queue under same lock hold.
447 */
448void
449thread_go_locked(
450 thread_t thread,
451 int result)
452{
1c79356b
A
453 assert(thread->at_safe_point == FALSE);
454 assert(thread->wait_event == NO_EVENT);
455 assert(thread->wait_queue == WAIT_QUEUE_NULL);
456
457 if (thread->state & TH_WAIT) {
1c79356b
A
458 thread->state &= ~(TH_WAIT|TH_UNINT);
459 if (!(thread->state & TH_RUN)) {
460 thread->state |= TH_RUN;
0b4e3aa0
A
461
462 _mk_sp_thread_unblock(thread);
1c79356b 463 }
0b4e3aa0 464
1c79356b
A
465 thread->wait_result = result;
466 }
1c79356b
A
467}
468
469void
470thread_mark_wait_locked(
471 thread_t thread,
472 int interruptible)
473{
474
475 assert(thread == current_thread());
476
477 thread->wait_result = -1; /* JMM - Needed for non-assert kernel */
478 thread->state |= (interruptible && thread->interruptible) ?
479 TH_WAIT : (TH_WAIT | TH_UNINT);
480 thread->at_safe_point = (interruptible == THREAD_ABORTSAFE) && (thread->interruptible);
481 thread->sleep_stamp = sched_tick;
482}
483
484
485
486/*
487 * Routine: assert_wait_timeout
488 * Purpose:
489 * Assert that the thread intends to block,
490 * waiting for a timeout (no user known event).
491 */
492unsigned int assert_wait_timeout_event;
493
494void
495assert_wait_timeout(
496 mach_msg_timeout_t msecs,
497 int interruptible)
498{
499 spl_t s;
500
501 assert_wait((event_t)&assert_wait_timeout_event, interruptible);
502 thread_set_timer(msecs, 1000*NSEC_PER_USEC);
503}
504
505/*
506 * Check to see if an assert wait is possible, without actually doing one.
507 * This is used by debug code in locks and elsewhere to verify that it is
508 * always OK to block when trying to take a blocking lock (since waiting
509 * for the actual assert_wait to catch the case may make it hard to detect
510 * this case.
511 */
512boolean_t
513assert_wait_possible(void)
514{
515
516 thread_t thread;
517 extern unsigned int debug_mode;
518
519#if DEBUG
520 if(debug_mode) return TRUE; /* Always succeed in debug mode */
521#endif
522
523 thread = current_thread();
524
525 return (thread == NULL || wait_queue_assert_possible(thread));
526}
527
528/*
529 * assert_wait:
530 *
531 * Assert that the current thread is about to go to
532 * sleep until the specified event occurs.
533 */
534void
535assert_wait(
536 event_t event,
537 int interruptible)
538{
539 register wait_queue_t wq;
540 register int index;
541
542 assert(event != NO_EVENT);
543 assert(assert_wait_possible());
544
545 index = wait_hash(event);
546 wq = &wait_queues[index];
0b4e3aa0 547 (void)wait_queue_assert_wait(wq,
1c79356b
A
548 event,
549 interruptible);
550}
551
552
553/*
554 * thread_[un]stop(thread)
555 * Once a thread has blocked interruptibly (via assert_wait) prevent
556 * it from running until thread_unstop.
557 *
558 * If someone else has already stopped the thread, wait for the
559 * stop to be cleared, and then stop it again.
560 *
561 * Return FALSE if interrupted.
562 *
563 * NOTE: thread_hold/thread_suspend should be called on the activation
564 * before calling thread_stop. TH_SUSP is only recognized when
565 * a thread blocks and only prevents clear_wait/thread_wakeup
566 * from restarting an interruptible wait. The wake_active flag is
567 * used to indicate that someone is waiting on the thread.
568 */
569boolean_t
570thread_stop(
571 thread_t thread)
572{
573 spl_t s;
574
575 s = splsched();
576 wake_lock(thread);
577
578 while (thread->state & TH_SUSP) {
e7c99d92
A
579 int wait_result;
580
1c79356b
A
581 thread->wake_active = TRUE;
582 assert_wait((event_t)&thread->wake_active, THREAD_ABORTSAFE);
583 wake_unlock(thread);
584 splx(s);
585
e7c99d92
A
586 wait_result = thread_block((void (*)(void)) 0);
587 if (wait_result != THREAD_AWAKENED)
1c79356b
A
588 return (FALSE);
589
590 s = splsched();
591 wake_lock(thread);
592 }
593 thread_lock(thread);
594 thread->state |= TH_SUSP;
595 thread_unlock(thread);
596
597 wake_unlock(thread);
598 splx(s);
599
600 return (TRUE);
601}
602
603/*
604 * Clear TH_SUSP and if the thread has been stopped and is now runnable,
605 * put it back on the run queue.
606 */
607void
608thread_unstop(
609 thread_t thread)
610{
1c79356b
A
611 spl_t s;
612
613 s = splsched();
614 wake_lock(thread);
615 thread_lock(thread);
616
617 if ((thread->state & (TH_RUN|TH_WAIT|TH_SUSP/*|TH_UNINT*/)) == TH_SUSP) {
0b4e3aa0
A
618 thread->state &= ~TH_SUSP;
619 thread->state |= TH_RUN;
620
621 _mk_sp_thread_unblock(thread);
1c79356b
A
622 }
623 else
624 if (thread->state & TH_SUSP) {
625 thread->state &= ~TH_SUSP;
626
627 if (thread->wake_active) {
628 thread->wake_active = FALSE;
629 thread_unlock(thread);
630 wake_unlock(thread);
631 splx(s);
632 thread_wakeup((event_t)&thread->wake_active);
633
634 return;
635 }
636 }
637
638 thread_unlock(thread);
639 wake_unlock(thread);
640 splx(s);
641}
642
643/*
644 * Wait for the thread's RUN bit to clear
645 */
646boolean_t
647thread_wait(
648 thread_t thread)
649{
650 spl_t s;
651
652 s = splsched();
653 wake_lock(thread);
654
655 while (thread->state & (TH_RUN/*|TH_UNINT*/)) {
e7c99d92
A
656 int wait_result;
657
1c79356b
A
658 if (thread->last_processor != PROCESSOR_NULL)
659 cause_ast_check(thread->last_processor);
660
661 thread->wake_active = TRUE;
662 assert_wait((event_t)&thread->wake_active, THREAD_ABORTSAFE);
663 wake_unlock(thread);
664 splx(s);
665
e7c99d92
A
666 wait_result = thread_block((void (*)(void))0);
667 if (wait_result != THREAD_AWAKENED)
668 return FALSE;
1c79356b
A
669
670 s = splsched();
671 wake_lock(thread);
672 }
0b4e3aa0 673
1c79356b
A
674 wake_unlock(thread);
675 splx(s);
0b4e3aa0
A
676
677 return (TRUE);
1c79356b
A
678}
679
680
681/*
682 * thread_stop_wait(thread)
683 * Stop the thread then wait for it to block interruptibly
684 */
685boolean_t
686thread_stop_wait(
687 thread_t thread)
688{
689 if (thread_stop(thread)) {
690 if (thread_wait(thread))
691 return (TRUE);
692
693 thread_unstop(thread);
694 }
695
696 return (FALSE);
697}
698
699
700/*
701 * Routine: clear_wait_internal
702 *
703 * Clear the wait condition for the specified thread.
704 * Start the thread executing if that is appropriate.
705 * Arguments:
706 * thread thread to awaken
707 * result Wakeup result the thread should see
708 * Conditions:
709 * At splsched
710 * the thread is locked.
711 */
712void
713clear_wait_internal(
714 thread_t thread,
715 int result)
716{
717 /*
718 * If the thread isn't in a wait queue, just set it running. Otherwise,
719 * try to remove it from the queue and, if successful, then set it
720 * running. NEVER interrupt an uninterruptible thread.
721 */
722 if (!((result == THREAD_INTERRUPTED) && (thread->state & TH_UNINT))) {
723 if (wait_queue_assert_possible(thread) ||
724 (wait_queue_remove(thread) == KERN_SUCCESS)) {
725 thread_go_locked(thread, result);
726 }
727 }
728}
729
730
731/*
732 * clear_wait:
733 *
734 * Clear the wait condition for the specified thread. Start the thread
735 * executing if that is appropriate.
736 *
737 * parameters:
738 * thread thread to awaken
739 * result Wakeup result the thread should see
740 */
741void
742clear_wait(
743 thread_t thread,
744 int result)
745{
746 spl_t s;
747
748 s = splsched();
749 thread_lock(thread);
750 clear_wait_internal(thread, result);
751 thread_unlock(thread);
752 splx(s);
753}
754
755
756/*
757 * thread_wakeup_prim:
758 *
759 * Common routine for thread_wakeup, thread_wakeup_with_result,
760 * and thread_wakeup_one.
761 *
762 */
763void
764thread_wakeup_prim(
765 event_t event,
766 boolean_t one_thread,
767 int result)
768{
769 register wait_queue_t wq;
770 register int index;
771
772 index = wait_hash(event);
773 wq = &wait_queues[index];
774 if (one_thread)
775 wait_queue_wakeup_one(wq, event, result);
776 else
777 wait_queue_wakeup_all(wq, event, result);
778}
779
780/*
781 * thread_bind:
782 *
783 * Force a thread to execute on the specified processor.
784 * If the thread is currently executing, it may wait until its
785 * time slice is up before switching onto the specified processor.
786 *
787 * A processor of PROCESSOR_NULL causes the thread to be unbound.
788 * xxx - DO NOT export this to users.
789 */
790void
791thread_bind(
792 register thread_t thread,
793 processor_t processor)
794{
795 spl_t s;
796
797 s = splsched();
798 thread_lock(thread);
799 thread_bind_locked(thread, processor);
800 thread_unlock(thread);
801 splx(s);
802}
803
804/*
805 * Select a thread for this processor (the current processor) to run.
806 * May select the current thread, which must already be locked.
807 */
808thread_t
809thread_select(
810 register processor_t myprocessor)
811{
812 register thread_t thread;
813 processor_set_t pset;
814 register run_queue_t runq = &myprocessor->runq;
815 boolean_t other_runnable;
1c79356b
A
816
817 /*
818 * Check for other non-idle runnable threads.
819 */
1c79356b
A
820 pset = myprocessor->processor_set;
821 thread = current_thread();
822
0b4e3aa0
A
823 /*
824 * Update set_quanta for timesharing.
825 */
826 pset->set_quanta = pset->machine_quanta[
827 (pset->runq.count > pset->processor_count) ?
828 pset->processor_count : pset->runq.count];
829
830 /* Update the thread's priority */
831 if (thread->sched_stamp != sched_tick)
832 update_priority(thread);
1c79356b
A
833
834 simple_lock(&runq->lock);
835 simple_lock(&pset->runq.lock);
836
837 other_runnable = runq->count > 0 || pset->runq.count > 0;
838
839 if ( thread->state == TH_RUN &&
840 (!other_runnable ||
841 (runq->highq < thread->sched_pri &&
842 pset->runq.highq < thread->sched_pri)) &&
843 thread->processor_set == pset &&
844 (thread->bound_processor == PROCESSOR_NULL ||
845 thread->bound_processor == myprocessor) ) {
846
847 /* I am the highest priority runnable (non-idle) thread */
848 simple_unlock(&pset->runq.lock);
849 simple_unlock(&runq->lock);
850
0b4e3aa0
A
851 myprocessor->slice_quanta =
852 (thread->sched_mode & TH_MODE_TIMESHARE)? pset->set_quanta: 1;
1c79356b
A
853 }
854 else
855 if (other_runnable) {
856 simple_unlock(&pset->runq.lock);
857 simple_unlock(&runq->lock);
858 thread = choose_thread(myprocessor);
859 }
860 else {
861 simple_unlock(&pset->runq.lock);
862 simple_unlock(&runq->lock);
863
864 /*
865 * Nothing is runnable, so set this processor idle if it
866 * was running. If it was in an assignment or shutdown,
867 * leave it alone. Return its idle thread.
868 */
869 simple_lock(&pset->idle_lock);
870 if (myprocessor->state == PROCESSOR_RUNNING) {
871 myprocessor->state = PROCESSOR_IDLE;
872 /*
873 * XXX Until it goes away, put master on end of queue, others
874 * XXX on front so master gets used last.
875 */
876 if (myprocessor == master_processor)
877 queue_enter(&(pset->idle_queue), myprocessor,
878 processor_t, processor_queue);
879 else
880 queue_enter_first(&(pset->idle_queue), myprocessor,
881 processor_t, processor_queue);
882
883 pset->idle_count++;
884 }
885 simple_unlock(&pset->idle_lock);
886
887 thread = myprocessor->idle_thread;
888 }
889
890 return (thread);
891}
892
893
894/*
895 * Stop running the current thread and start running the new thread.
896 * If continuation is non-zero, and the current thread is blocked,
897 * then it will resume by executing continuation on a new stack.
898 * Returns TRUE if the hand-off succeeds.
0b4e3aa0 899 * The reason parameter contains | AST_QUANTUM if the thread blocked
1c79356b
A
900 * because its quantum expired.
901 * Assumes splsched.
902 */
903
1c79356b
A
904static thread_t
905__current_thread(void)
906{
907 return (current_thread());
908}
909
910boolean_t
911thread_invoke(
912 register thread_t old_thread,
913 register thread_t new_thread,
914 int reason,
915 void (*continuation)(void))
916{
1c79356b
A
917 void (*lcont)(void);
918
0b4e3aa0
A
919 if (cpu_data[cpu_number()].preemption_level != 0)
920 panic("thread_invoke: preemption_level %d\n",
921 cpu_data[cpu_number()].preemption_level);
922
1c79356b
A
923 /*
924 * Mark thread interruptible.
925 */
926 thread_lock(new_thread);
927 new_thread->state &= ~TH_UNINT;
928
1c79356b
A
929 assert(thread_runnable(new_thread));
930
931 assert(old_thread->continuation == (void (*)(void))0);
932
0b4e3aa0
A
933 if ( (old_thread->sched_mode & TH_MODE_REALTIME) &&
934 !old_thread->stack_privilege ) {
935 old_thread->stack_privilege = old_thread->kernel_stack;
1c79356b
A
936 }
937
938 if (continuation != (void (*)()) 0) {
939 switch (new_thread->state & TH_STACK_STATE) {
940 case TH_STACK_HANDOFF:
941
942 /*
943 * If the old thread has stack privilege, we can't give
944 * his stack away. So go and get him one and treat this
945 * as a traditional context switch.
946 */
947 if (old_thread->stack_privilege == current_stack())
948 goto get_new_stack;
949
950 /*
951 * Make the whole handoff/dispatch atomic to match the
952 * non-handoff case.
953 */
954 disable_preemption();
955
956 /*
957 * Set up ast context of new thread and switch to its timer.
958 */
959 new_thread->state &= ~(TH_STACK_HANDOFF|TH_UNINT);
960 new_thread->last_processor = current_processor();
961 ast_context(new_thread->top_act, cpu_number());
962 timer_switch(&new_thread->system_timer);
963 thread_unlock(new_thread);
0b4e3aa0
A
964
965 current_task()->csw++;
1c79356b
A
966
967 old_thread->continuation = continuation;
968 stack_handoff(old_thread, new_thread);
969
970 wake_lock(old_thread);
971 thread_lock(old_thread);
972 act_machine_sv_free(old_thread->top_act);
0b4e3aa0
A
973
974 _mk_sp_thread_done(old_thread);
1c79356b
A
975
976 /*
977 * inline thread_dispatch but don't free stack
978 */
979
980 switch (old_thread->state & (TH_RUN|TH_WAIT|TH_UNINT|TH_IDLE)) {
1c79356b
A
981
982 case TH_RUN | TH_UNINT:
983 case TH_RUN:
984 /*
985 * No reason to stop. Put back on a run queue.
986 */
987 old_thread->state |= TH_STACK_HANDOFF;
0b4e3aa0 988 _mk_sp_thread_dispatch(old_thread);
1c79356b
A
989 break;
990
991 case TH_RUN | TH_WAIT | TH_UNINT:
992 case TH_RUN | TH_WAIT:
993 old_thread->sleep_stamp = sched_tick;
994 /* fallthrough */
995
996 case TH_WAIT: /* this happens! */
997 /*
998 * Waiting
999 */
1000 old_thread->state |= TH_STACK_HANDOFF;
1001 old_thread->state &= ~TH_RUN;
1002 if (old_thread->state & TH_TERMINATE)
1003 thread_reaper_enqueue(old_thread);
1004
1005 if (old_thread->wake_active) {
1006 old_thread->wake_active = FALSE;
1007 thread_unlock(old_thread);
1008 wake_unlock(old_thread);
1009 thread_wakeup((event_t)&old_thread->wake_active);
1010 wake_lock(old_thread);
1011 thread_lock(old_thread);
1012 }
1013 break;
1014
1015 case TH_RUN | TH_IDLE:
1016 /*
1017 * Drop idle thread -- it is already in
1018 * idle_thread_array.
1019 */
1020 old_thread->state |= TH_STACK_HANDOFF;
1021 break;
1022
1023 default:
1024 panic("State 0x%x \n",old_thread->state);
1025 }
1c79356b 1026
1c79356b
A
1027 thread_unlock(old_thread);
1028 wake_unlock(old_thread);
1c79356b 1029
0b4e3aa0 1030 thread_lock(new_thread);
1c79356b 1031 assert(thread_runnable(new_thread));
0b4e3aa0 1032 _mk_sp_thread_begin(new_thread);
1c79356b
A
1033
1034 lcont = new_thread->continuation;
1035 new_thread->continuation = (void(*)(void))0;
1036
1037 thread_unlock(new_thread);
1038 enable_preemption();
1039
1040 counter_always(c_thread_invoke_hits++);
1041
1042 if (new_thread->funnel_state & TH_FN_REFUNNEL) {
1043 kern_return_t save_wait_result;
1044 new_thread->funnel_state = 0;
1045 save_wait_result = new_thread->wait_result;
1046 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE, new_thread->funnel_lock, 2, 0, 0, 0);
1047 //mutex_lock(new_thread->funnel_lock);
1048 funnel_lock(new_thread->funnel_lock);
1049 KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE, new_thread->funnel_lock, 2, 0, 0, 0);
1050 new_thread->funnel_state = TH_FN_OWNED;
1051 new_thread->wait_result = save_wait_result;
1052 }
1053 (void) spllo();
1054
1055 assert(lcont);
1056 call_continuation(lcont);
1057 /*NOTREACHED*/
1058 return TRUE;
1059
0b4e3aa0 1060 case TH_STACK_ALLOC:
1c79356b
A
1061 /*
1062 * waiting for a stack
1063 */
1064 thread_swapin(new_thread);
1065 thread_unlock(new_thread);
1066 counter_always(c_thread_invoke_misses++);
1067 return FALSE;
1068
1069 case 0:
1070 /*
1071 * already has a stack - can't handoff
1072 */
1073 if (new_thread == old_thread) {
1074
1075 /* same thread but with continuation */
1076 counter(++c_thread_invoke_same);
1077 thread_unlock(new_thread);
1078
1079 if (old_thread->funnel_state & TH_FN_REFUNNEL) {
1080 kern_return_t save_wait_result;
1081
1082 old_thread->funnel_state = 0;
1083 save_wait_result = old_thread->wait_result;
1084 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE, old_thread->funnel_lock, 3, 0, 0, 0);
1085 funnel_lock(old_thread->funnel_lock);
1086 KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE, old_thread->funnel_lock, 3, 0, 0, 0);
1087 old_thread->funnel_state = TH_FN_OWNED;
1088 old_thread->wait_result = save_wait_result;
1089 }
1090 (void) spllo();
1091 call_continuation(continuation);
1092 /*NOTREACHED*/
1093 }
1094 break;
1095 }
1096 } else {
1097 /*
1098 * check that the new thread has a stack
1099 */
1100 if (new_thread->state & TH_STACK_STATE) {
1101 get_new_stack:
1102 /* has no stack. if not already waiting for one try to get one */
0b4e3aa0 1103 if ((new_thread->state & TH_STACK_ALLOC) ||
1c79356b
A
1104 /* not already waiting. nonblocking try to get one */
1105 !stack_alloc_try(new_thread, thread_continue))
1106 {
1107 /* couldn't get one. schedule new thread to get a stack and
1108 return failure so we can try another thread. */
1109 thread_swapin(new_thread);
1110 thread_unlock(new_thread);
1111 counter_always(c_thread_invoke_misses++);
1112 return FALSE;
1113 }
1114 } else if (old_thread == new_thread) {
1115 counter(++c_thread_invoke_same);
1116 thread_unlock(new_thread);
1117 return TRUE;
1118 }
1119
1120 /* new thread now has a stack. it has been setup to resume in
1121 thread_continue so it can dispatch the old thread, deal with
1122 funnelling and then go to it's true continuation point */
1123 }
1124
1125 new_thread->state &= ~(TH_STACK_HANDOFF | TH_UNINT);
1126
1127 /*
1128 * Set up ast context of new thread and switch to its timer.
1129 */
1130 new_thread->last_processor = current_processor();
1131 ast_context(new_thread->top_act, cpu_number());
1132 timer_switch(&new_thread->system_timer);
1133 assert(thread_runnable(new_thread));
1134
1135 /*
1136 * N.B. On return from the call to switch_context, 'old_thread'
1137 * points at the thread that yielded to us. Unfortunately, at
1138 * this point, there are no simple_locks held, so if we are preempted
1139 * before the call to thread_dispatch blocks preemption, it is
1140 * possible for 'old_thread' to terminate, leaving us with a
1141 * stale thread pointer.
1142 */
1143 disable_preemption();
1144
1145 thread_unlock(new_thread);
1146
1147 counter_always(c_thread_invoke_csw++);
1148 current_task()->csw++;
1149
1c79356b
A
1150 thread_lock(old_thread);
1151 old_thread->reason = reason;
1152 assert(old_thread->runq == RUN_QUEUE_NULL);
1153
1154 if (continuation != (void (*)(void))0)
1155 old_thread->continuation = continuation;
1156
0b4e3aa0 1157 _mk_sp_thread_done(old_thread);
1c79356b
A
1158 thread_unlock(old_thread);
1159
1160 /*
1161 * switch_context is machine-dependent. It does the
1162 * machine-dependent components of a context-switch, like
1163 * changing address spaces. It updates active_threads.
1164 */
1165 old_thread = switch_context(old_thread, continuation, new_thread);
1166
1167 /* Now on new thread's stack. Set a local variable to refer to it. */
1168 new_thread = __current_thread();
1169 assert(old_thread != new_thread);
1170
1c79356b
A
1171 thread_lock(new_thread);
1172 assert(thread_runnable(new_thread));
0b4e3aa0 1173 _mk_sp_thread_begin(new_thread);
1c79356b
A
1174 thread_unlock(new_thread);
1175
1176 /*
1177 * We're back. Now old_thread is the thread that resumed
1178 * us, and we have to dispatch it.
1179 */
0b4e3aa0 1180
1c79356b
A
1181 thread_dispatch(old_thread);
1182 enable_preemption();
1183
1184 /* if we get here and 'continuation' is set that means the
1185 * switch_context() path returned and did not call out
1186 * to the continuation. we will do it manually here */
1187 if (continuation) {
1188 call_continuation(continuation);
1189 /* NOTREACHED */
1190 }
1191
1192 return TRUE;
1193}
1194
1195/*
1196 * thread_continue:
1197 *
1198 * Called when the launching a new thread, at splsched();
1199 */
1200void
1201thread_continue(
0b4e3aa0 1202 register thread_t old_thread)
1c79356b 1203{
0b4e3aa0
A
1204 register thread_t self = current_thread();
1205 register void (*continuation)();
1c79356b
A
1206
1207 /*
1208 * We must dispatch the old thread and then
1209 * call the current thread's continuation.
1210 * There might not be an old thread, if we are
1211 * the first thread to run on this processor.
1212 */
0b4e3aa0 1213 if (old_thread != THREAD_NULL)
1c79356b
A
1214 thread_dispatch(old_thread);
1215
0b4e3aa0 1216 thread_lock(self);
1c79356b
A
1217 continuation = self->continuation;
1218 self->continuation = (void (*)(void))0;
0b4e3aa0
A
1219
1220 _mk_sp_thread_begin(self);
1c79356b
A
1221 thread_unlock(self);
1222
1223 /*
1224 * N.B. - the following is necessary, since thread_invoke()
1225 * inhibits preemption on entry and reenables before it
1226 * returns. Unfortunately, the first time a newly-created
1227 * thread executes, it magically appears here, and never
1228 * executes the enable_preemption() call in thread_invoke().
1229 */
1230 enable_preemption();
1231
1232 if (self->funnel_state & TH_FN_REFUNNEL) {
0b4e3aa0
A
1233 kern_return_t save_wait_result;
1234
1235 self->funnel_state = 0;
1236 save_wait_result = self->wait_result;
1237 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE, self->funnel_lock, 4, 0, 0, 0);
1238 funnel_lock(self->funnel_lock);
1239 KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE, self->funnel_lock, 4, 0, 0, 0);
1240 self->wait_result = save_wait_result;
1241 self->funnel_state = TH_FN_OWNED;
1c79356b 1242 }
0b4e3aa0 1243
1c79356b
A
1244 spllo();
1245
1246 assert(continuation);
1247 (*continuation)();
1248 /*NOTREACHED*/
1249}
1250
1251#if MACH_LDEBUG || MACH_KDB
1252
1253#define THREAD_LOG_SIZE 300
1254
1255struct t64 {
1256 unsigned long h;
1257 unsigned long l;
1258};
1259
1260struct {
1261 struct t64 stamp;
1262 thread_t thread;
1263 long info1;
1264 long info2;
1265 long info3;
1266 char * action;
1267} thread_log[THREAD_LOG_SIZE];
1268
1269int thread_log_index;
1270
1271void check_thread_time(long n);
1272
1273
1274int check_thread_time_crash;
1275
1276#if 0
1277void
1278check_thread_time(long us)
1279{
1280 struct t64 temp;
1281
1282 if (!check_thread_time_crash)
1283 return;
1284
1285 temp = thread_log[0].stamp;
1286 cyctm05_diff (&thread_log[1].stamp, &thread_log[0].stamp, &temp);
1287
1288 if (temp.l >= us && thread_log[1].info != 0x49) /* HACK!!! */
1289 panic ("check_thread_time");
1290}
1291#endif
1292
1293void
1294log_thread_action(char * action, long info1, long info2, long info3)
1295{
1296 int i;
1297 spl_t x;
1298 static unsigned int tstamp;
1299
1300 x = splhigh();
1301
1302 for (i = THREAD_LOG_SIZE-1; i > 0; i--) {
1303 thread_log[i] = thread_log[i-1];
1304 }
1305
1306 thread_log[0].stamp.h = 0;
1307 thread_log[0].stamp.l = tstamp++;
1308 thread_log[0].thread = current_thread();
1309 thread_log[0].info1 = info1;
1310 thread_log[0].info2 = info2;
1311 thread_log[0].info3 = info3;
1312 thread_log[0].action = action;
1313/* strcpy (&thread_log[0].action[0], action);*/
1314
1315 splx(x);
1316}
1317#endif /* MACH_LDEBUG || MACH_KDB */
1318
1319#if MACH_KDB
1320#include <ddb/db_output.h>
1321void db_show_thread_log(void);
1322
1323void
1324db_show_thread_log(void)
1325{
1326 int i;
1327
1328 db_printf ("%s %s %s %s %s %s\n", " Thread ", " Info1 ", " Info2 ",
1329 " Info3 ", " Timestamp ", "Action");
1330
1331 for (i = 0; i < THREAD_LOG_SIZE; i++) {
1332 db_printf ("%08x %08x %08x %08x %08x/%08x %s\n",
1333 thread_log[i].thread,
1334 thread_log[i].info1,
1335 thread_log[i].info2,
1336 thread_log[i].info3,
1337 thread_log[i].stamp.h,
1338 thread_log[i].stamp.l,
1339 thread_log[i].action);
1340 }
1341}
1342#endif /* MACH_KDB */
1343
1344/*
1345 * thread_block_reason:
1346 *
0b4e3aa0
A
1347 * Block the current thread if a wait has been asserted,
1348 * otherwise unconditionally yield the remainder of the
1349 * current quantum unless reason contains AST_BLOCK.
1350 *
1c79356b
A
1351 * If a continuation is specified, then thread_block will
1352 * attempt to discard the thread's kernel stack. When the
1353 * thread resumes, it will execute the continuation function
1354 * on a new kernel stack.
1355 */
1356counter(mach_counter_t c_thread_block_calls = 0;)
1357
1358int
1359thread_block_reason(
1360 void (*continuation)(void),
1361 int reason)
1362{
1363 register thread_t thread = current_thread();
1364 register processor_t myprocessor;
1365 register thread_t new_thread;
1366 spl_t s;
1367
1368 counter(++c_thread_block_calls);
1369
1370 check_simple_locks();
1371
1372 machine_clock_assist();
1373
1374 s = splsched();
1375
1376 if ((thread->funnel_state & TH_FN_OWNED) && !(reason & AST_PREEMPT)) {
1377 thread->funnel_state = TH_FN_REFUNNEL;
1378 KERNEL_DEBUG(0x603242c | DBG_FUNC_NONE, thread->funnel_lock, 2, 0, 0, 0);
1379 funnel_unlock(thread->funnel_lock);
1380 }
1381
1382 myprocessor = current_processor();
1383
1384 thread_lock(thread);
1385 if (thread->state & TH_ABORT)
1386 clear_wait_internal(thread, THREAD_INTERRUPTED);
1387
0b4e3aa0
A
1388 if (!(reason & AST_BLOCK))
1389 myprocessor->slice_quanta = 0;
1390
1c79356b 1391 /* Unconditionally remove either | both */
0b4e3aa0 1392 ast_off(AST_PREEMPT);
1c79356b
A
1393
1394 new_thread = thread_select(myprocessor);
1395 assert(new_thread);
1396 assert(thread_runnable(new_thread));
1397 thread_unlock(thread);
1398 while (!thread_invoke(thread, new_thread, reason, continuation)) {
1399 thread_lock(thread);
1400 new_thread = thread_select(myprocessor);
1401 assert(new_thread);
1402 assert(thread_runnable(new_thread));
1403 thread_unlock(thread);
1404 }
1405
1406 if (thread->funnel_state & TH_FN_REFUNNEL) {
1407 kern_return_t save_wait_result;
1408
1409 save_wait_result = thread->wait_result;
1410 thread->funnel_state = 0;
1411 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE, thread->funnel_lock, 5, 0, 0, 0);
1412 funnel_lock(thread->funnel_lock);
1413 KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE, thread->funnel_lock, 5, 0, 0, 0);
1414 thread->funnel_state = TH_FN_OWNED;
1415 thread->wait_result = save_wait_result;
1416 }
1417
1418 splx(s);
1419
1420 return thread->wait_result;
1421}
1422
1423/*
1424 * thread_block:
1425 *
0b4e3aa0
A
1426 * Block the current thread if a wait has been asserted,
1427 * otherwise yield the remainder of the current quantum.
1c79356b
A
1428 */
1429int
1430thread_block(
1431 void (*continuation)(void))
1432{
0b4e3aa0 1433 return thread_block_reason(continuation, AST_NONE);
1c79356b
A
1434}
1435
1436/*
1437 * thread_run:
1438 *
1439 * Switch directly from the current thread to a specified
1440 * thread. Both the current and new threads must be
1441 * runnable.
1442 *
1443 * Assumption:
1444 * at splsched.
1445 */
1446int
1447thread_run(
1448 thread_t old_thread,
1449 void (*continuation)(void),
1450 thread_t new_thread)
1451{
1452 while (!thread_invoke(old_thread, new_thread, 0, continuation)) {
1453 register processor_t myprocessor = current_processor();
1454 thread_lock(old_thread);
1455 new_thread = thread_select(myprocessor);
1456 thread_unlock(old_thread);
1457 }
1458 return old_thread->wait_result;
1459}
1460
1461/*
1462 * Dispatches a running thread that is not on a runq.
1463 * Called at splsched.
1464 */
1465void
1466thread_dispatch(
1467 register thread_t thread)
1468{
1c79356b
A
1469 /*
1470 * If we are discarding the thread's stack, we must do it
1471 * before the thread has a chance to run.
1472 */
1473 wake_lock(thread);
1474 thread_lock(thread);
1475
1476#ifndef i386
1477 /* no continuations on i386 for now */
1478 if (thread->continuation != (void (*)())0) {
1479 assert((thread->state & TH_STACK_STATE) == 0);
1480 thread->state |= TH_STACK_HANDOFF;
1481 stack_free(thread);
1482 if (thread->top_act) {
1483 act_machine_sv_free(thread->top_act);
1484 }
1485 }
1486#endif
1487
1488 switch (thread->state & (TH_RUN|TH_WAIT|TH_UNINT|TH_IDLE)) {
1489
1490 case TH_RUN | TH_UNINT:
1491 case TH_RUN:
1492 /*
1493 * No reason to stop. Put back on a run queue.
1494 */
0b4e3aa0 1495 _mk_sp_thread_dispatch(thread);
1c79356b
A
1496 break;
1497
1498 case TH_RUN | TH_WAIT | TH_UNINT:
1499 case TH_RUN | TH_WAIT:
1500 thread->sleep_stamp = sched_tick;
1501 /* fallthrough */
1502 case TH_WAIT: /* this happens! */
1503
1504 /*
1505 * Waiting
1506 */
1507 thread->state &= ~TH_RUN;
1508 if (thread->state & TH_TERMINATE)
1509 thread_reaper_enqueue(thread);
1510
1511 if (thread->wake_active) {
1512 thread->wake_active = FALSE;
1513 thread_unlock(thread);
1514 wake_unlock(thread);
1515 thread_wakeup((event_t)&thread->wake_active);
1516 return;
1517 }
1518 break;
1519
1520 case TH_RUN | TH_IDLE:
1521 /*
1522 * Drop idle thread -- it is already in
1523 * idle_thread_array.
1524 */
1525 break;
1526
1527 default:
1528 panic("State 0x%x \n",thread->state);
1529 }
1530 thread_unlock(thread);
1531 wake_unlock(thread);
1532}
1533
1534/*
1535 * Enqueue thread on run queue. Thread must be locked,
1536 * and not already be on a run queue.
1537 */
1538int
1539run_queue_enqueue(
1540 register run_queue_t rq,
1541 register thread_t thread,
1542 boolean_t tail)
1543{
1544 register int whichq;
1545 int oldrqcount;
1546
1547 whichq = thread->sched_pri;
1548 assert(whichq >= MINPRI && whichq <= MAXPRI);
1549
1550 simple_lock(&rq->lock); /* lock the run queue */
1551 assert(thread->runq == RUN_QUEUE_NULL);
1552 if (tail)
1553 enqueue_tail(&rq->queues[whichq], (queue_entry_t)thread);
1554 else
1555 enqueue_head(&rq->queues[whichq], (queue_entry_t)thread);
1556
1557 setbit(MAXPRI - whichq, rq->bitmap);
1558 if (whichq > rq->highq)
1559 rq->highq = whichq;
1560
1561 oldrqcount = rq->count++;
1562 thread->runq = rq;
1563 thread->whichq = whichq;
1564#if DEBUG
1565 thread_check(thread, rq);
1566#endif /* DEBUG */
1567 simple_unlock(&rq->lock);
1568
1569 return (oldrqcount);
1570}
1571
1572/*
1573 * thread_setrun:
1574 *
1575 * Make thread runnable; dispatch directly onto an idle processor
1576 * if possible. Else put on appropriate run queue (processor
1577 * if bound, else processor set. Caller must have lock on thread.
1578 * This is always called at splsched.
1579 * The tail parameter, if TRUE || TAIL_Q, indicates that the
1580 * thread should be placed at the tail of the runq. If
1581 * FALSE || HEAD_Q the thread will be placed at the head of the
1582 * appropriate runq.
1583 */
1584void
1585thread_setrun(
1586 register thread_t new_thread,
1587 boolean_t may_preempt,
1588 boolean_t tail)
1589{
1590 register processor_t processor;
1591 register run_queue_t runq;
1592 register processor_set_t pset;
1593 thread_t thread;
1594 ast_t ast_flags = AST_BLOCK;
1595
1c79356b
A
1596 assert(thread_runnable(new_thread));
1597
1598 /*
1599 * Update priority if needed.
1600 */
1601 if (new_thread->sched_stamp != sched_tick)
1602 update_priority(new_thread);
1603
0b4e3aa0
A
1604 if ( new_thread->sched_pri >= BASEPRI_PREEMPT &&
1605 kernel_preemption_mode == KERNEL_PREEMPT )
1606 ast_flags |= AST_URGENT;
1c79356b
A
1607
1608 assert(new_thread->runq == RUN_QUEUE_NULL);
1609
1610 /*
1611 * Try to dispatch the thread directly onto an idle processor.
1612 */
1613 if ((processor = new_thread->bound_processor) == PROCESSOR_NULL) {
1614 /*
1615 * Not bound, any processor in the processor set is ok.
1616 */
1617 pset = new_thread->processor_set;
1618 if (pset->idle_count > 0) {
1619 simple_lock(&pset->idle_lock);
1620 if (pset->idle_count > 0) {
1621 processor = (processor_t) queue_first(&pset->idle_queue);
1622 queue_remove(&(pset->idle_queue), processor, processor_t,
1623 processor_queue);
1624 pset->idle_count--;
1625 processor->next_thread = new_thread;
1626 processor->state = PROCESSOR_DISPATCHING;
1627 simple_unlock(&pset->idle_lock);
1628 if(processor->slot_num != cpu_number())
1629 machine_signal_idle(processor);
1c79356b
A
1630 return;
1631 }
1632 simple_unlock(&pset->idle_lock);
1633 }
1c79356b 1634
0b4e3aa0
A
1635 /*
1636 * Place thread on processor set run queue.
1637 */
1638 runq = &pset->runq;
1639 run_queue_enqueue(runq, new_thread, tail);
1640
1c79356b
A
1641 /*
1642 * Preempt check
1643 */
1c79356b
A
1644 thread = current_thread();
1645 processor = current_processor();
0b4e3aa0
A
1646 if ( may_preempt &&
1647 pset == processor->processor_set ) {
1c79356b
A
1648 /*
1649 * XXX if we have a non-empty local runq or are
1650 * XXX running a bound thread, ought to check for
1651 * XXX another cpu running lower-pri thread to preempt.
1652 */
0b4e3aa0
A
1653 if (csw_needed(thread, processor))
1654 ast_on(ast_flags);
1c79356b 1655 }
1c79356b
A
1656 }
1657 else {
1658 /*
1659 * Bound, can only run on bound processor. Have to lock
1660 * processor here because it may not be the current one.
1661 */
1662 if (processor->state == PROCESSOR_IDLE) {
1663 simple_lock(&processor->lock);
1664 pset = processor->processor_set;
1665 simple_lock(&pset->idle_lock);
1666 if (processor->state == PROCESSOR_IDLE) {
1667 queue_remove(&pset->idle_queue, processor,
1668 processor_t, processor_queue);
1669 pset->idle_count--;
1670 processor->next_thread = new_thread;
1671 processor->state = PROCESSOR_DISPATCHING;
1672 simple_unlock(&pset->idle_lock);
1673 simple_unlock(&processor->lock);
1674 if(processor->slot_num != cpu_number())
1675 machine_signal_idle(processor);
1c79356b
A
1676 return;
1677 }
1678 simple_unlock(&pset->idle_lock);
1679 simple_unlock(&processor->lock);
1680 }
1681
1682 /*
1683 * Cause ast on processor if processor is on line, and the
1684 * currently executing thread is not bound to that processor
1685 * (bound threads have implicit priority over non-bound threads).
1686 * We also avoid sending the AST to the idle thread (if it got
1687 * scheduled in the window between the 'if' above and here),
1688 * since the idle_thread is bound.
1689 */
1690 runq = &processor->runq;
1c79356b 1691 if (processor == current_processor()) {
1c79356b 1692 run_queue_enqueue(runq, new_thread, tail);
0b4e3aa0
A
1693
1694 thread = current_thread();
1695 if ( thread->bound_processor == PROCESSOR_NULL ||
1696 csw_needed(thread, processor))
1697 ast_on(ast_flags);
1c79356b
A
1698 }
1699 else {
1700 thread = cpu_data[processor->slot_num].active_thread;
1701 if ( run_queue_enqueue(runq, new_thread, tail) == 0 &&
1702 processor->state != PROCESSOR_OFF_LINE &&
1703 thread && thread->bound_processor != processor )
1704 cause_ast_check(processor);
1705 }
1706 }
1c79356b
A
1707}
1708
1709/*
1710 * set_pri:
1711 *
1712 * Set the priority of the specified thread to the specified
1713 * priority. This may cause the thread to change queues.
1714 *
1715 * The thread *must* be locked by the caller.
1716 */
1717void
1718set_pri(
1719 thread_t thread,
1720 int pri,
1721 boolean_t resched)
1722{
1723 register struct run_queue *rq;
1724
1725 rq = rem_runq(thread);
1726 assert(thread->runq == RUN_QUEUE_NULL);
1727 thread->sched_pri = pri;
1728 if (rq != RUN_QUEUE_NULL) {
1729 if (resched)
1730 thread_setrun(thread, TRUE, TAIL_Q);
1731 else
1732 run_queue_enqueue(rq, thread, TAIL_Q);
1733 }
1734}
1735
1736/*
1737 * rem_runq:
1738 *
1739 * Remove a thread from its run queue.
1740 * The run queue that the process was on is returned
1741 * (or RUN_QUEUE_NULL if not on a run queue). Thread *must* be locked
1742 * before calling this routine. Unusual locking protocol on runq
1743 * field in thread structure makes this code interesting; see thread.h.
1744 */
1745run_queue_t
1746rem_runq(
1747 thread_t thread)
1748{
1749 register struct run_queue *rq;
1750
1751 rq = thread->runq;
1752 /*
1753 * If rq is RUN_QUEUE_NULL, the thread will stay out of the
1754 * run_queues because the caller locked the thread. Otherwise
1755 * the thread is on a runq, but could leave.
1756 */
1757 if (rq != RUN_QUEUE_NULL) {
1758 simple_lock(&rq->lock);
1759 if (rq == thread->runq) {
1760 /*
1761 * Thread is in a runq and we have a lock on
1762 * that runq.
1763 */
1764#if DEBUG
1765 thread_check(thread, rq);
1766#endif /* DEBUG */
1767 remqueue(&rq->queues[0], (queue_entry_t)thread);
1768 rq->count--;
1769
1770 if (queue_empty(rq->queues + thread->sched_pri)) {
1771 /* update run queue status */
1772 if (thread->sched_pri != IDLEPRI)
1773 clrbit(MAXPRI - thread->sched_pri, rq->bitmap);
1774 rq->highq = MAXPRI - ffsbit(rq->bitmap);
1775 }
1776 thread->runq = RUN_QUEUE_NULL;
1777 simple_unlock(&rq->lock);
1778 }
1779 else {
1780 /*
1781 * The thread left the runq before we could
1782 * lock the runq. It is not on a runq now, and
1783 * can't move again because this routine's
1784 * caller locked the thread.
1785 */
1786 assert(thread->runq == RUN_QUEUE_NULL);
1787 simple_unlock(&rq->lock);
1788 rq = RUN_QUEUE_NULL;
1789 }
1790 }
1791
1792 return (rq);
1793}
1794
1795
1796/*
1797 * choose_thread:
1798 *
1799 * Choose a thread to execute. The thread chosen is removed
1800 * from its run queue. Note that this requires only that the runq
1801 * lock be held.
1802 *
1803 * Strategy:
1804 * Check processor runq first; if anything found, run it.
1805 * Else check pset runq; if nothing found, return idle thread.
1806 *
1807 * Second line of strategy is implemented by choose_pset_thread.
1808 * This is only called on processor startup and when thread_block
1809 * thinks there's something in the processor runq.
1810 */
1811thread_t
1812choose_thread(
1813 processor_t myprocessor)
1814{
1815 thread_t thread;
1816 register queue_t q;
1817 register run_queue_t runq;
1818 processor_set_t pset;
1819
1820 runq = &myprocessor->runq;
1821 pset = myprocessor->processor_set;
1822
1823 simple_lock(&runq->lock);
1824 if (runq->count > 0 && runq->highq >= pset->runq.highq) {
1825 q = runq->queues + runq->highq;
1826#if MACH_ASSERT
1827 if (!queue_empty(q)) {
1828#endif /*MACH_ASSERT*/
1829 thread = (thread_t)q->next;
1830 ((queue_entry_t)thread)->next->prev = q;
1831 q->next = ((queue_entry_t)thread)->next;
1832 thread->runq = RUN_QUEUE_NULL;
1833 runq->count--;
1834 if (queue_empty(q)) {
1835 if (runq->highq != IDLEPRI)
1836 clrbit(MAXPRI - runq->highq, runq->bitmap);
1837 runq->highq = MAXPRI - ffsbit(runq->bitmap);
1838 }
1839 simple_unlock(&runq->lock);
1840 return (thread);
1841#if MACH_ASSERT
1842 }
1843 panic("choose_thread");
1844#endif /*MACH_ASSERT*/
1845 /*NOTREACHED*/
1846 }
1847
1848 simple_unlock(&runq->lock);
1849 simple_lock(&pset->runq.lock);
1850 return (choose_pset_thread(myprocessor, pset));
1851}
1852
1853
1854/*
1855 * choose_pset_thread: choose a thread from processor_set runq or
1856 * set processor idle and choose its idle thread.
1857 *
1858 * Caller must be at splsched and have a lock on the runq. This
1859 * lock is released by this routine. myprocessor is always the current
1860 * processor, and pset must be its processor set.
1861 * This routine chooses and removes a thread from the runq if there
1862 * is one (and returns it), else it sets the processor idle and
1863 * returns its idle thread.
1864 */
1865thread_t
1866choose_pset_thread(
1867 register processor_t myprocessor,
1868 processor_set_t pset)
1869{
1870 register run_queue_t runq;
1871 register thread_t thread;
1872 register queue_t q;
1873
1874 runq = &pset->runq;
1875 if (runq->count > 0) {
1876 q = runq->queues + runq->highq;
1877#if MACH_ASSERT
1878 if (!queue_empty(q)) {
1879#endif /*MACH_ASSERT*/
1880 thread = (thread_t)q->next;
1881 ((queue_entry_t)thread)->next->prev = q;
1882 q->next = ((queue_entry_t)thread)->next;
1883 thread->runq = RUN_QUEUE_NULL;
1884 runq->count--;
1885 if (queue_empty(q)) {
1886 if (runq->highq != IDLEPRI)
1887 clrbit(MAXPRI - runq->highq, runq->bitmap);
1888 runq->highq = MAXPRI - ffsbit(runq->bitmap);
1889 }
1890 simple_unlock(&runq->lock);
1891 return (thread);
1892#if MACH_ASSERT
1893 }
1894 panic("choose_pset_thread");
1895#endif /*MACH_ASSERT*/
1896 /*NOTREACHED*/
1897 }
1898 simple_unlock(&runq->lock);
1899
1900 /*
1901 * Nothing is runnable, so set this processor idle if it
1902 * was running. If it was in an assignment or shutdown,
1903 * leave it alone. Return its idle thread.
1904 */
1905 simple_lock(&pset->idle_lock);
1906 if (myprocessor->state == PROCESSOR_RUNNING) {
1907 myprocessor->state = PROCESSOR_IDLE;
1908 /*
1909 * XXX Until it goes away, put master on end of queue, others
1910 * XXX on front so master gets used last.
1911 */
1912 if (myprocessor == master_processor)
1913 queue_enter(&(pset->idle_queue), myprocessor,
1914 processor_t, processor_queue);
1915 else
1916 queue_enter_first(&(pset->idle_queue), myprocessor,
1917 processor_t, processor_queue);
1918
1919 pset->idle_count++;
1920 }
1921 simple_unlock(&pset->idle_lock);
1922
1923 return (myprocessor->idle_thread);
1924}
1925
1926/*
1927 * no_dispatch_count counts number of times processors go non-idle
1928 * without being dispatched. This should be very rare.
1929 */
1930int no_dispatch_count = 0;
1931
1932/*
1933 * This is the idle thread, which just looks for other threads
1934 * to execute.
1935 */
1936void
1937idle_thread_continue(void)
1938{
1939 register processor_t myprocessor;
1940 register volatile thread_t *threadp;
1941 register volatile int *gcount;
1942 register volatile int *lcount;
1943 register thread_t new_thread;
1944 register int state;
1945 register processor_set_t pset;
1946 int mycpu;
1947
1948 mycpu = cpu_number();
1949 myprocessor = current_processor();
1950 threadp = (volatile thread_t *) &myprocessor->next_thread;
1951 lcount = (volatile int *) &myprocessor->runq.count;
1952
1953 for (;;) {
1954#ifdef MARK_CPU_IDLE
1955 MARK_CPU_IDLE(mycpu);
1956#endif /* MARK_CPU_IDLE */
1957
1958 gcount = (volatile int *)&myprocessor->processor_set->runq.count;
1959
1960 (void)splsched();
1961 while ( (*threadp == (volatile thread_t)THREAD_NULL) &&
1962 (*gcount == 0) && (*lcount == 0) ) {
1963
1964 /* check for ASTs while we wait */
0b4e3aa0
A
1965 if (need_ast[mycpu] &~ ( AST_SCHEDULING | AST_PREEMPT |
1966 AST_BSD | AST_BSD_INIT )) {
1c79356b 1967 /* don't allow scheduling ASTs */
0b4e3aa0
A
1968 need_ast[mycpu] &= ~( AST_SCHEDULING | AST_PREEMPT |
1969 AST_BSD | AST_BSD_INIT );
1970 ast_taken(AST_ALL, TRUE); /* back at spllo */
1c79356b
A
1971 }
1972 else
1973#ifdef __ppc__
1974 machine_idle();
1975#else
1976 (void)spllo();
1977#endif
1978 machine_clock_assist();
1979
1980 (void)splsched();
1981 }
1982
1983#ifdef MARK_CPU_ACTIVE
1984 (void)spllo();
1985 MARK_CPU_ACTIVE(mycpu);
1986 (void)splsched();
1987#endif /* MARK_CPU_ACTIVE */
1988
1989 /*
1990 * This is not a switch statement to avoid the
1991 * bounds checking code in the common case.
1992 */
1993 pset = myprocessor->processor_set;
1994 simple_lock(&pset->idle_lock);
1995retry:
1996 state = myprocessor->state;
1997 if (state == PROCESSOR_DISPATCHING) {
1998 /*
1999 * Commmon case -- cpu dispatched.
2000 */
2001 new_thread = *threadp;
2002 *threadp = (volatile thread_t) THREAD_NULL;
2003 myprocessor->state = PROCESSOR_RUNNING;
2004 simple_unlock(&pset->idle_lock);
2005
2006 thread_lock(new_thread);
2007 simple_lock(&myprocessor->runq.lock);
2008 simple_lock(&pset->runq.lock);
2009 if ( myprocessor->runq.highq > new_thread->sched_pri ||
2010 pset->runq.highq > new_thread->sched_pri ) {
2011 simple_unlock(&pset->runq.lock);
2012 simple_unlock(&myprocessor->runq.lock);
2013
2014 if (new_thread->bound_processor != PROCESSOR_NULL)
2015 run_queue_enqueue(&myprocessor->runq, new_thread, HEAD_Q);
2016 else
2017 run_queue_enqueue(&pset->runq, new_thread, HEAD_Q);
2018 thread_unlock(new_thread);
2019
2020 counter(c_idle_thread_block++);
2021 thread_block(idle_thread_continue);
2022 }
2023 else {
2024 simple_unlock(&pset->runq.lock);
2025 simple_unlock(&myprocessor->runq.lock);
1c79356b
A
2026 thread_unlock(new_thread);
2027
1c79356b
A
2028 counter(c_idle_thread_handoff++);
2029 thread_run(myprocessor->idle_thread,
2030 idle_thread_continue, new_thread);
2031 }
2032 }
2033 else
2034 if (state == PROCESSOR_IDLE) {
2035 if (myprocessor->state != PROCESSOR_IDLE) {
2036 /*
2037 * Something happened, try again.
2038 */
2039 goto retry;
2040 }
2041 /*
2042 * Processor was not dispatched (Rare).
2043 * Set it running again.
2044 */
2045 no_dispatch_count++;
2046 pset->idle_count--;
2047 queue_remove(&pset->idle_queue, myprocessor,
2048 processor_t, processor_queue);
2049 myprocessor->state = PROCESSOR_RUNNING;
2050 simple_unlock(&pset->idle_lock);
2051
2052 counter(c_idle_thread_block++);
2053 thread_block(idle_thread_continue);
2054 }
2055 else
2056 if ( state == PROCESSOR_ASSIGN ||
2057 state == PROCESSOR_SHUTDOWN ) {
2058 /*
2059 * Changing processor sets, or going off-line.
2060 * Release next_thread if there is one. Actual
2061 * thread to run is on a runq.
2062 */
2063 if ((new_thread = (thread_t)*threadp) != THREAD_NULL) {
2064 *threadp = (volatile thread_t) THREAD_NULL;
2065 simple_unlock(&pset->idle_lock);
2066 thread_lock(new_thread);
2067 thread_setrun(new_thread, FALSE, TAIL_Q);
2068 thread_unlock(new_thread);
2069 } else
2070 simple_unlock(&pset->idle_lock);
2071
2072 counter(c_idle_thread_block++);
2073 thread_block(idle_thread_continue);
2074 }
2075 else {
2076 simple_unlock(&pset->idle_lock);
2077 printf("Bad processor state %d (Cpu %d)\n",
2078 cpu_state(mycpu), mycpu);
2079 panic("idle_thread");
2080
2081 }
2082
2083 (void)spllo();
2084 }
2085}
2086
2087void
2088idle_thread(void)
2089{
2090 thread_t self = current_thread();
2091 spl_t s;
2092
2093 stack_privilege(self);
1c79356b
A
2094
2095 s = splsched();
2096 thread_lock(self);
2097
2098 self->priority = IDLEPRI;
2099 self->sched_pri = self->priority;
2100
2101 thread_unlock(self);
2102 splx(s);
2103
2104 counter(c_idle_thread_block++);
2105 thread_block((void(*)(void))0);
2106 idle_thread_continue();
2107 /*NOTREACHED*/
2108}
2109
0b4e3aa0
A
2110static uint64_t sched_tick_interval, sched_tick_deadline;
2111
2112void sched_tick_thread(void);
2113
2114void
2115sched_tick_init(void)
2116{
2117 kernel_thread_with_priority(
2118 kernel_task, MAXPRI_STANDARD,
2119 sched_tick_thread, TRUE, TRUE);
2120}
1c79356b
A
2121
2122/*
2123 * sched_tick_thread
2124 *
2125 * Update the priorities of all threads periodically.
2126 */
2127void
2128sched_tick_thread_continue(void)
2129{
0b4e3aa0 2130 uint64_t abstime;
1c79356b
A
2131#if SIMPLE_CLOCK
2132 int new_usec;
2133#endif /* SIMPLE_CLOCK */
2134
2135 clock_get_uptime(&abstime);
2136
2137 sched_tick++; /* age usage one more time */
2138#if SIMPLE_CLOCK
2139 /*
2140 * Compensate for clock drift. sched_usec is an
2141 * exponential average of the number of microseconds in
2142 * a second. It decays in the same fashion as cpu_usage.
2143 */
2144 new_usec = sched_usec_elapsed();
2145 sched_usec = (5*sched_usec + 3*new_usec)/8;
2146#endif /* SIMPLE_CLOCK */
2147
2148 /*
2149 * Compute the scheduler load factors.
2150 */
2151 compute_mach_factor();
2152
2153 /*
2154 * Scan the run queues for runnable threads that need to
2155 * have their priorities recalculated.
2156 */
2157 do_thread_scan();
2158
2159 clock_deadline_for_periodic_event(sched_tick_interval, abstime,
2160 &sched_tick_deadline);
2161
2162 assert_wait((event_t)sched_tick_thread_continue, THREAD_INTERRUPTIBLE);
2163 thread_set_timer_deadline(sched_tick_deadline);
2164 thread_block(sched_tick_thread_continue);
2165 /*NOTREACHED*/
2166}
2167
2168void
2169sched_tick_thread(void)
2170{
2171 thread_t self = current_thread();
2172 natural_t rate;
2173 spl_t s;
2174
2175 stack_privilege(self);
1c79356b
A
2176
2177 rate = (1000 >> SCHED_TICK_SHIFT);
2178 clock_interval_to_absolutetime_interval(rate, USEC_PER_SEC,
2179 &sched_tick_interval);
2180 clock_get_uptime(&sched_tick_deadline);
2181
2182 thread_block(sched_tick_thread_continue);
2183 /*NOTREACHED*/
2184}
2185
2186#define MAX_STUCK_THREADS 128
2187
2188/*
2189 * do_thread_scan: scan for stuck threads. A thread is stuck if
2190 * it is runnable but its priority is so low that it has not
2191 * run for several seconds. Its priority should be higher, but
2192 * won't be until it runs and calls update_priority. The scanner
2193 * finds these threads and does the updates.
2194 *
2195 * Scanner runs in two passes. Pass one squirrels likely
2196 * thread ids away in an array (takes out references for them).
2197 * Pass two does the priority updates. This is necessary because
2198 * the run queue lock is required for the candidate scan, but
2199 * cannot be held during updates [set_pri will deadlock].
2200 *
2201 * Array length should be enough so that restart isn't necessary,
2202 * but restart logic is included. Does not scan processor runqs.
2203 *
2204 */
2205thread_t stuck_threads[MAX_STUCK_THREADS];
2206int stuck_count = 0;
2207
2208/*
2209 * do_runq_scan is the guts of pass 1. It scans a runq for
2210 * stuck threads. A boolean is returned indicating whether
2211 * a retry is needed.
2212 */
2213boolean_t
2214do_runq_scan(
2215 run_queue_t runq)
2216{
2217 register queue_t q;
2218 register thread_t thread;
2219 register int count;
2220 spl_t s;
2221 boolean_t result = FALSE;
2222
2223 s = splsched();
2224 simple_lock(&runq->lock);
2225 if ((count = runq->count) > 0) {
2226 q = runq->queues + runq->highq;
2227 while (count > 0) {
2228 queue_iterate(q, thread, thread_t, links) {
0b4e3aa0
A
2229 if ( !(thread->state & (TH_WAIT|TH_SUSP)) &&
2230 (thread->sched_mode & TH_MODE_TIMESHARE) ) {
1c79356b
A
2231 if (thread->sched_stamp != sched_tick) {
2232 /*
2233 * Stuck, save its id for later.
2234 */
2235 if (stuck_count == MAX_STUCK_THREADS) {
2236 /*
2237 * !@#$% No more room.
2238 */
2239 simple_unlock(&runq->lock);
2240 splx(s);
2241
2242 return (TRUE);
2243 }
2244
2245 /*
2246 * Inline version of thread_reference
2247 * XXX - lock ordering problem here:
2248 * thread locks should be taken before runq
2249 * locks: just try and get the thread's locks
2250 * and ignore this thread if we fail, we might
2251 * have better luck next time.
2252 */
2253 if (simple_lock_try(&thread->lock)) {
2254 thread->ref_count++;
2255 thread_unlock(thread);
2256 stuck_threads[stuck_count++] = thread;
2257 }
2258 else
2259 result = TRUE;
2260 }
2261 }
2262
2263 count--;
2264 }
2265
2266 q--;
2267 }
2268 }
2269 simple_unlock(&runq->lock);
2270 splx(s);
2271
2272 return (result);
2273}
2274
2275boolean_t thread_scan_enabled = TRUE;
2276
2277void
2278do_thread_scan(void)
2279{
2280 register boolean_t restart_needed = FALSE;
2281 register thread_t thread;
2282 register processor_set_t pset = &default_pset;
2283 register processor_t processor;
2284 spl_t s;
2285
2286 if (!thread_scan_enabled)
2287 return;
2288
2289 do {
2290 restart_needed = do_runq_scan(&pset->runq);
2291 if (!restart_needed) {
2292 simple_lock(&pset->processors_lock);
2293 processor = (processor_t)queue_first(&pset->processors);
2294 while (!queue_end(&pset->processors, (queue_entry_t)processor)) {
2295 if (restart_needed = do_runq_scan(&processor->runq))
2296 break;
2297
0b4e3aa0
A
2298 thread = processor->idle_thread;
2299 if (thread->sched_stamp != sched_tick) {
2300 if (stuck_count == MAX_STUCK_THREADS) {
2301 restart_needed = TRUE;
2302 break;
2303 }
2304
2305 stuck_threads[stuck_count++] = thread;
2306 }
2307
1c79356b
A
2308 processor = (processor_t)queue_next(&processor->processors);
2309 }
2310 simple_unlock(&pset->processors_lock);
2311 }
2312
2313 /*
2314 * Ok, we now have a collection of candidates -- fix them.
2315 */
2316 while (stuck_count > 0) {
2317 thread = stuck_threads[--stuck_count];
2318 stuck_threads[stuck_count] = THREAD_NULL;
2319 s = splsched();
2320 thread_lock(thread);
0b4e3aa0
A
2321 if ( (thread->sched_mode & TH_MODE_TIMESHARE) ||
2322 (thread->state & TH_IDLE) ) {
1c79356b 2323 if ( !(thread->state & (TH_WAIT|TH_SUSP)) &&
0b4e3aa0 2324 thread->sched_stamp != sched_tick )
1c79356b
A
2325 update_priority(thread);
2326 }
2327 thread_unlock(thread);
2328 splx(s);
0b4e3aa0
A
2329 if (!(thread->state & TH_IDLE))
2330 thread_deallocate(thread);
1c79356b
A
2331 }
2332
2333 } while (restart_needed);
2334}
2335
2336/*
2337 * Just in case someone doesn't use the macro
2338 */
2339#undef thread_wakeup
2340void
2341thread_wakeup(
2342 event_t x);
2343
2344void
2345thread_wakeup(
2346 event_t x)
2347{
2348 thread_wakeup_with_result(x, THREAD_AWAKENED);
2349}
2350
0b4e3aa0
A
2351#if DEBUG
2352
2353static boolean_t
1c79356b 2354thread_runnable(
0b4e3aa0 2355 thread_t thread)
1c79356b 2356{
0b4e3aa0 2357 return ((thread->state & (TH_RUN|TH_WAIT)) == TH_RUN);
1c79356b
A
2358}
2359
1c79356b
A
2360void
2361dump_processor_set(
2362 processor_set_t ps)
2363{
2364 printf("processor_set: %08x\n",ps);
2365 printf("idle_queue: %08x %08x, idle_count: 0x%x\n",
2366 ps->idle_queue.next,ps->idle_queue.prev,ps->idle_count);
2367 printf("processors: %08x %08x, processor_count: 0x%x\n",
2368 ps->processors.next,ps->processors.prev,ps->processor_count);
2369 printf("tasks: %08x %08x, task_count: 0x%x\n",
2370 ps->tasks.next,ps->tasks.prev,ps->task_count);
2371 printf("threads: %08x %08x, thread_count: 0x%x\n",
2372 ps->threads.next,ps->threads.prev,ps->thread_count);
2373 printf("ref_count: 0x%x, active: %x\n",
2374 ps->ref_count,ps->active);
2375 printf("pset_self: %08x, pset_name_self: %08x\n",ps->pset_self, ps->pset_name_self);
0b4e3aa0 2376 printf("set_quanta: 0x%x\n", ps->set_quanta);
1c79356b
A
2377}
2378
2379#define processor_state(s) (((s)>PROCESSOR_SHUTDOWN)?"*unknown*":states[s])
2380
2381void
2382dump_processor(
2383 processor_t p)
2384{
2385 char *states[]={"OFF_LINE","RUNNING","IDLE","DISPATCHING",
2386 "ASSIGN","SHUTDOWN"};
2387
2388 printf("processor: %08x\n",p);
2389 printf("processor_queue: %08x %08x\n",
2390 p->processor_queue.next,p->processor_queue.prev);
2391 printf("state: %8s, next_thread: %08x, idle_thread: %08x\n",
2392 processor_state(p->state), p->next_thread, p->idle_thread);
0b4e3aa0 2393 printf("slice_quanta: %x\n", p->slice_quanta);
1c79356b
A
2394 printf("processor_set: %08x, processor_set_next: %08x\n",
2395 p->processor_set, p->processor_set_next);
2396 printf("processors: %08x %08x\n", p->processors.next,p->processors.prev);
2397 printf("processor_self: %08x, slot_num: 0x%x\n", p->processor_self, p->slot_num);
2398}
2399
2400void
2401dump_run_queue_struct(
2402 run_queue_t rq)
2403{
2404 char dump_buf[80];
2405 int i;
2406
2407 for( i=0; i < NRQS; ) {
2408 int j;
2409
2410 printf("%6s",(i==0)?"runq:":"");
2411 for( j=0; (j<8) && (i < NRQS); j++,i++ ) {
2412 if( rq->queues[i].next == &rq->queues[i] )
2413 printf( " --------");
2414 else
2415 printf(" %08x",rq->queues[i].next);
2416 }
2417 printf("\n");
2418 }
2419 for( i=0; i < NRQBM; ) {
2420 register unsigned int mask;
2421 char *d=dump_buf;
2422
2423 mask = ~0;
2424 mask ^= (mask>>1);
2425
2426 do {
2427 *d++ = ((rq->bitmap[i]&mask)?'r':'e');
2428 mask >>=1;
2429 } while( mask );
2430 *d = '\0';
2431 printf("%8s%s\n",((i==0)?"bitmap:":""),dump_buf);
2432 i++;
2433 }
2434 printf("highq: 0x%x, count: %u\n", rq->highq, rq->count);
2435}
2436
2437void
2438dump_run_queues(
2439 run_queue_t runq)
2440{
2441 register queue_t q1;
2442 register int i;
2443 register queue_entry_t e;
2444
2445 q1 = runq->queues;
2446 for (i = 0; i < NRQS; i++) {
2447 if (q1->next != q1) {
2448 int t_cnt;
2449
2450 printf("[%u]",i);
2451 for (t_cnt=0, e = q1->next; e != q1; e = e->next) {
2452 printf("\t0x%08x",e);
2453 if( (t_cnt = ++t_cnt%4) == 0 )
2454 printf("\n");
2455 }
2456 if( t_cnt )
2457 printf("\n");
2458 }
2459 /* else
2460 printf("[%u]\t<empty>\n",i);
2461 */
2462 q1++;
2463 }
2464}
2465
2466void
2467checkrq(
2468 run_queue_t rq,
2469 char *msg)
2470{
2471 register queue_t q1;
2472 register int i, j;
2473 register queue_entry_t e;
2474 register int highq;
2475
2476 highq = NRQS;
2477 j = 0;
2478 q1 = rq->queues;
2479 for (i = MAXPRI; i >= 0; i--) {
2480 if (q1->next == q1) {
2481 if (q1->prev != q1) {
2482 panic("checkrq: empty at %s", msg);
2483 }
2484 }
2485 else {
2486 if (highq == -1)
2487 highq = i;
2488
2489 for (e = q1->next; e != q1; e = e->next) {
2490 j++;
2491 if (e->next->prev != e)
2492 panic("checkrq-2 at %s", msg);
2493 if (e->prev->next != e)
2494 panic("checkrq-3 at %s", msg);
2495 }
2496 }
2497 q1++;
2498 }
2499 if (j != rq->count)
2500 panic("checkrq: count wrong at %s", msg);
2501 if (rq->count != 0 && highq > rq->highq)
2502 panic("checkrq: highq wrong at %s", msg);
2503}
2504
2505void
2506thread_check(
2507 register thread_t thread,
2508 register run_queue_t rq)
2509{
2510 register int whichq = thread->sched_pri;
2511 register queue_entry_t queue, entry;
2512
2513 if (whichq < MINPRI || whichq > MAXPRI)
2514 panic("thread_check: bad pri");
2515
2516 if (whichq != thread->whichq)
2517 panic("thread_check: whichq");
2518
2519 queue = &rq->queues[whichq];
2520 entry = queue_first(queue);
2521 while (!queue_end(queue, entry)) {
2522 if (entry == (queue_entry_t)thread)
2523 return;
2524
2525 entry = queue_next(entry);
2526 }
2527
2528 panic("thread_check: not found");
2529}
2530
2531#endif /* DEBUG */
2532
2533#if MACH_KDB
2534#include <ddb/db_output.h>
2535#define printf kdbprintf
2536extern int db_indent;
2537void db_sched(void);
2538
2539void
2540db_sched(void)
2541{
2542 iprintf("Scheduling Statistics:\n");
2543 db_indent += 2;
2544 iprintf("Thread invocations: csw %d same %d\n",
2545 c_thread_invoke_csw, c_thread_invoke_same);
2546#if MACH_COUNTERS
2547 iprintf("Thread block: calls %d\n",
2548 c_thread_block_calls);
2549 iprintf("Idle thread:\n\thandoff %d block %d no_dispatch %d\n",
2550 c_idle_thread_handoff,
2551 c_idle_thread_block, no_dispatch_count);
2552 iprintf("Sched thread blocks: %d\n", c_sched_thread_block);
2553#endif /* MACH_COUNTERS */
2554 db_indent -= 2;
2555}
2556#endif /* MACH_KDB */