]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/sched_prim.c
ae49e69ca9b88c79e1c222e5e0c31c5ba4f121ed
[apple/xnu.git] / osfmk / kern / sched_prim.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_FREE_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52 /*
53 * File: sched_prim.c
54 * Author: Avadis Tevanian, Jr.
55 * Date: 1986
56 *
57 * Scheduling primitives
58 *
59 */
60
61 #include <debug.h>
62 #include <cpus.h>
63 #include <mach_kdb.h>
64 #include <simple_clock.h>
65 #include <power_save.h>
66 #include <task_swapper.h>
67
68 #include <ddb/db_output.h>
69 #include <mach/machine.h>
70 #include <machine/machine_routines.h>
71 #include <machine/sched_param.h>
72 #include <kern/ast.h>
73 #include <kern/clock.h>
74 #include <kern/counters.h>
75 #include <kern/cpu_number.h>
76 #include <kern/cpu_data.h>
77 #include <kern/etap_macros.h>
78 #include <kern/lock.h>
79 #include <kern/macro_help.h>
80 #include <kern/machine.h>
81 #include <kern/misc_protos.h>
82 #include <kern/processor.h>
83 #include <kern/queue.h>
84 #include <kern/sched.h>
85 #include <kern/sched_prim.h>
86 #include <kern/syscall_subr.h>
87 #include <kern/task.h>
88 #include <kern/thread.h>
89 #include <kern/thread_swap.h>
90 #include <vm/pmap.h>
91 #include <vm/vm_kern.h>
92 #include <vm/vm_map.h>
93 #include <mach/policy.h>
94 #include <mach/sync_policy.h>
95 #include <kern/sf.h>
96 #include <kern/mk_sp.h> /*** ??? fix so this can be removed ***/
97 #include <sys/kdebug.h>
98
99 #if TASK_SWAPPER
100 #include <kern/task_swap.h>
101 extern int task_swap_on;
102 #endif /* TASK_SWAPPER */
103
104 extern int hz;
105
106 #define DEFAULT_PREEMPTION_RATE 100 /* (1/s) */
107 int default_preemption_rate = DEFAULT_PREEMPTION_RATE;
108
109 #define NO_KERNEL_PREEMPT 0
110 #define KERNEL_PREEMPT 1
111 int kernel_preemption_mode = KERNEL_PREEMPT;
112
113 int min_quantum;
114 natural_t min_quantum_ms;
115
116 unsigned sched_tick;
117
118 #if SIMPLE_CLOCK
119 int sched_usec;
120 #endif /* SIMPLE_CLOCK */
121
122 /* Forwards */
123 void thread_continue(thread_t);
124
125 void wait_queues_init(void);
126
127 void set_pri(
128 thread_t thread,
129 int pri,
130 int resched);
131
132 thread_t choose_pset_thread(
133 processor_t myprocessor,
134 processor_set_t pset);
135
136 thread_t choose_thread(
137 processor_t myprocessor);
138
139 int run_queue_enqueue(
140 run_queue_t runq,
141 thread_t thread,
142 boolean_t tail);
143
144 void idle_thread_continue(void);
145 void do_thread_scan(void);
146
147 void clear_wait_internal(
148 thread_t thread,
149 int result);
150
151 #if DEBUG
152 void dump_run_queues(
153 run_queue_t rq);
154 void dump_run_queue_struct(
155 run_queue_t rq);
156 void dump_processor(
157 processor_t p);
158 void dump_processor_set(
159 processor_set_t ps);
160
161 void checkrq(
162 run_queue_t rq,
163 char *msg);
164
165 void thread_check(
166 thread_t thread,
167 run_queue_t runq);
168 #endif /*DEBUG*/
169
170 boolean_t thread_runnable(
171 thread_t thread);
172
173 /*
174 * State machine
175 *
176 * states are combinations of:
177 * R running
178 * W waiting (or on wait queue)
179 * N non-interruptible
180 * O swapped out
181 * I being swapped in
182 *
183 * init action
184 * assert_wait thread_block clear_wait swapout swapin
185 *
186 * R RW, RWN R; setrun - -
187 * RN RWN RN; setrun - -
188 *
189 * RW W R -
190 * RWN WN RN -
191 *
192 * W R; setrun WO
193 * WN RN; setrun -
194 *
195 * RO - - R
196 *
197 */
198
199 /*
200 * Waiting protocols and implementation:
201 *
202 * Each thread may be waiting for exactly one event; this event
203 * is set using assert_wait(). That thread may be awakened either
204 * by performing a thread_wakeup_prim() on its event,
205 * or by directly waking that thread up with clear_wait().
206 *
207 * The implementation of wait events uses a hash table. Each
208 * bucket is queue of threads having the same hash function
209 * value; the chain for the queue (linked list) is the run queue
210 * field. [It is not possible to be waiting and runnable at the
211 * same time.]
212 *
213 * Locks on both the thread and on the hash buckets govern the
214 * wait event field and the queue chain field. Because wakeup
215 * operations only have the event as an argument, the event hash
216 * bucket must be locked before any thread.
217 *
218 * Scheduling operations may also occur at interrupt level; therefore,
219 * interrupts below splsched() must be prevented when holding
220 * thread or hash bucket locks.
221 *
222 * The wait event hash table declarations are as follows:
223 */
224
225 #define NUMQUEUES 59
226
227 struct wait_queue wait_queues[NUMQUEUES];
228
229 #define wait_hash(event) \
230 ((((int)(event) < 0)? ~(int)(event): (int)(event)) % NUMQUEUES)
231
232 void
233 sched_init(void)
234 {
235 /*
236 * Calculate the minimum quantum
237 * in ticks.
238 */
239 if (default_preemption_rate < 1)
240 default_preemption_rate = DEFAULT_PREEMPTION_RATE;
241 min_quantum = hz / default_preemption_rate;
242
243 /*
244 * Round up result (4/5) to an
245 * integral number of ticks.
246 */
247 if (((hz * 10) / default_preemption_rate) - (min_quantum * 10) >= 5)
248 min_quantum++;
249 if (min_quantum < 1)
250 min_quantum = 1;
251
252 min_quantum_ms = (1000 / hz) * min_quantum;
253
254 printf("scheduling quantum is %d ms\n", min_quantum_ms);
255
256 wait_queues_init();
257 pset_sys_bootstrap(); /* initialize processor mgmt. */
258 processor_action();
259 sched_tick = 0;
260 #if SIMPLE_CLOCK
261 sched_usec = 0;
262 #endif /* SIMPLE_CLOCK */
263 ast_init();
264 sf_init();
265 }
266
267 void
268 wait_queues_init(void)
269 {
270 register int i;
271
272 for (i = 0; i < NUMQUEUES; i++) {
273 wait_queue_init(&wait_queues[i], SYNC_POLICY_FIFO);
274 }
275 }
276
277 /*
278 * Thread timeout routine, called when timer expires.
279 */
280 void
281 thread_timer_expire(
282 timer_call_param_t p0,
283 timer_call_param_t p1)
284 {
285 thread_t thread = p0;
286 spl_t s;
287
288 s = splsched();
289 wake_lock(thread);
290 if ( thread->wait_timer_is_set &&
291 !timer_call_is_delayed(&thread->wait_timer, NULL) ) {
292 thread->wait_timer_active--;
293 thread->wait_timer_is_set = FALSE;
294 thread_lock(thread);
295 if (thread->active)
296 clear_wait_internal(thread, THREAD_TIMED_OUT);
297 thread_unlock(thread);
298 }
299 else
300 if (--thread->wait_timer_active == 0)
301 thread_wakeup_one(&thread->wait_timer_active);
302 wake_unlock(thread);
303 splx(s);
304 }
305
306 /*
307 * thread_set_timer:
308 *
309 * Set a timer for the current thread, if the thread
310 * is ready to wait. Must be called between assert_wait()
311 * and thread_block().
312 */
313 void
314 thread_set_timer(
315 natural_t interval,
316 natural_t scale_factor)
317 {
318 thread_t thread = current_thread();
319 AbsoluteTime deadline;
320 spl_t s;
321
322 s = splsched();
323 wake_lock(thread);
324 thread_lock(thread);
325 if ((thread->state & TH_WAIT) != 0) {
326 clock_interval_to_deadline(interval, scale_factor, &deadline);
327 timer_call_enter(&thread->wait_timer, deadline);
328 assert(!thread->wait_timer_is_set);
329 thread->wait_timer_active++;
330 thread->wait_timer_is_set = TRUE;
331 }
332 thread_unlock(thread);
333 wake_unlock(thread);
334 splx(s);
335 }
336
337 void
338 thread_set_timer_deadline(
339 AbsoluteTime deadline)
340 {
341 thread_t thread = current_thread();
342 spl_t s;
343
344 s = splsched();
345 wake_lock(thread);
346 thread_lock(thread);
347 if ((thread->state & TH_WAIT) != 0) {
348 timer_call_enter(&thread->wait_timer, deadline);
349 assert(!thread->wait_timer_is_set);
350 thread->wait_timer_active++;
351 thread->wait_timer_is_set = TRUE;
352 }
353 thread_unlock(thread);
354 wake_unlock(thread);
355 splx(s);
356 }
357
358 void
359 thread_cancel_timer(void)
360 {
361 thread_t thread = current_thread();
362 spl_t s;
363
364 s = splsched();
365 wake_lock(thread);
366 if (thread->wait_timer_is_set) {
367 if (timer_call_cancel(&thread->wait_timer))
368 thread->wait_timer_active--;
369 thread->wait_timer_is_set = FALSE;
370 }
371 wake_unlock(thread);
372 splx(s);
373 }
374
375 /*
376 * thread_depress_timeout:
377 *
378 * Timeout routine for priority depression.
379 */
380 void
381 thread_depress_timeout(
382 thread_call_param_t p0,
383 thread_call_param_t p1)
384 {
385 thread_t thread = p0;
386 sched_policy_t *policy;
387 spl_t s;
388
389 s = splsched();
390 thread_lock(thread);
391 policy = policy_id_to_sched_policy(thread->policy);
392 thread_unlock(thread);
393 splx(s);
394
395 if (policy != SCHED_POLICY_NULL)
396 policy->sp_ops.sp_thread_depress_timeout(policy, thread);
397
398 thread_deallocate(thread);
399 }
400
401 /*
402 * Set up thread timeout element when thread is created.
403 */
404 void
405 thread_timer_setup(
406 thread_t thread)
407 {
408 timer_call_setup(&thread->wait_timer, thread_timer_expire, thread);
409 thread->wait_timer_is_set = FALSE;
410 thread->wait_timer_active = 1;
411 thread->ref_count++;
412
413 thread_call_setup(&thread->depress_timer, thread_depress_timeout, thread);
414 }
415
416 void
417 thread_timer_terminate(void)
418 {
419 thread_t thread = current_thread();
420 spl_t s;
421
422 s = splsched();
423 wake_lock(thread);
424 if (thread->wait_timer_is_set) {
425 if (timer_call_cancel(&thread->wait_timer))
426 thread->wait_timer_active--;
427 thread->wait_timer_is_set = FALSE;
428 }
429
430 thread->wait_timer_active--;
431
432 while (thread->wait_timer_active > 0) {
433 assert_wait((event_t)&thread->wait_timer_active, THREAD_UNINT);
434 wake_unlock(thread);
435 splx(s);
436
437 thread_block((void (*)(void)) 0);
438
439 s = splsched();
440 wake_lock(thread);
441 }
442
443 wake_unlock(thread);
444 splx(s);
445
446 thread_deallocate(thread);
447 }
448
449 /*
450 * Routine: thread_go_locked
451 * Purpose:
452 * Start a thread running.
453 * Conditions:
454 * thread lock held, IPC locks may be held.
455 * thread must have been pulled from wait queue under same lock hold.
456 */
457 void
458 thread_go_locked(
459 thread_t thread,
460 int result)
461 {
462 int state;
463 sched_policy_t *policy;
464 sf_return_t sfr;
465
466 assert(thread->at_safe_point == FALSE);
467 assert(thread->wait_event == NO_EVENT);
468 assert(thread->wait_queue == WAIT_QUEUE_NULL);
469
470 if (thread->state & TH_WAIT) {
471
472 thread->state &= ~(TH_WAIT|TH_UNINT);
473 if (!(thread->state & TH_RUN)) {
474 thread->state |= TH_RUN;
475 #if THREAD_SWAPPER
476 if (thread->state & TH_SWAPPED_OUT)
477 thread_swapin(thread->top_act, FALSE);
478 else
479 #endif /* THREAD_SWAPPER */
480 {
481 policy = &sched_policy[thread->policy];
482 sfr = policy->sp_ops.sp_thread_unblock(policy, thread);
483 assert(sfr == SF_SUCCESS);
484 }
485 }
486 thread->wait_result = result;
487 }
488
489
490 /*
491 * The next few lines are a major hack. Hopefully this will get us
492 * around all of the scheduling framework hooha. We can't call
493 * sp_thread_unblock yet because we could still be finishing up the
494 * durn two stage block on another processor and thread_setrun
495 * could be called by s_t_u and we'll really be messed up then.
496 */
497 /* Don't mess with this if we are still swapped out */
498 if (!(thread->state & TH_SWAPPED_OUT))
499 thread->sp_state = MK_SP_RUNNABLE;
500
501 }
502
503 void
504 thread_mark_wait_locked(
505 thread_t thread,
506 int interruptible)
507 {
508
509 assert(thread == current_thread());
510
511 thread->wait_result = -1; /* JMM - Needed for non-assert kernel */
512 thread->state |= (interruptible && thread->interruptible) ?
513 TH_WAIT : (TH_WAIT | TH_UNINT);
514 thread->at_safe_point = (interruptible == THREAD_ABORTSAFE) && (thread->interruptible);
515 thread->sleep_stamp = sched_tick;
516 }
517
518
519
520 /*
521 * Routine: assert_wait_timeout
522 * Purpose:
523 * Assert that the thread intends to block,
524 * waiting for a timeout (no user known event).
525 */
526 unsigned int assert_wait_timeout_event;
527
528 void
529 assert_wait_timeout(
530 mach_msg_timeout_t msecs,
531 int interruptible)
532 {
533 spl_t s;
534
535 assert_wait((event_t)&assert_wait_timeout_event, interruptible);
536 thread_set_timer(msecs, 1000*NSEC_PER_USEC);
537 }
538
539 /*
540 * Check to see if an assert wait is possible, without actually doing one.
541 * This is used by debug code in locks and elsewhere to verify that it is
542 * always OK to block when trying to take a blocking lock (since waiting
543 * for the actual assert_wait to catch the case may make it hard to detect
544 * this case.
545 */
546 boolean_t
547 assert_wait_possible(void)
548 {
549
550 thread_t thread;
551 extern unsigned int debug_mode;
552
553 #if DEBUG
554 if(debug_mode) return TRUE; /* Always succeed in debug mode */
555 #endif
556
557 thread = current_thread();
558
559 return (thread == NULL || wait_queue_assert_possible(thread));
560 }
561
562 /*
563 * assert_wait:
564 *
565 * Assert that the current thread is about to go to
566 * sleep until the specified event occurs.
567 */
568 void
569 assert_wait(
570 event_t event,
571 int interruptible)
572 {
573 register wait_queue_t wq;
574 register int index;
575
576 assert(event != NO_EVENT);
577 assert(assert_wait_possible());
578
579 index = wait_hash(event);
580 wq = &wait_queues[index];
581 wait_queue_assert_wait(wq,
582 event,
583 interruptible);
584 }
585
586
587 /*
588 * thread_[un]stop(thread)
589 * Once a thread has blocked interruptibly (via assert_wait) prevent
590 * it from running until thread_unstop.
591 *
592 * If someone else has already stopped the thread, wait for the
593 * stop to be cleared, and then stop it again.
594 *
595 * Return FALSE if interrupted.
596 *
597 * NOTE: thread_hold/thread_suspend should be called on the activation
598 * before calling thread_stop. TH_SUSP is only recognized when
599 * a thread blocks and only prevents clear_wait/thread_wakeup
600 * from restarting an interruptible wait. The wake_active flag is
601 * used to indicate that someone is waiting on the thread.
602 */
603 boolean_t
604 thread_stop(
605 thread_t thread)
606 {
607 spl_t s;
608
609 s = splsched();
610 wake_lock(thread);
611
612 while (thread->state & TH_SUSP) {
613 thread->wake_active = TRUE;
614 assert_wait((event_t)&thread->wake_active, THREAD_ABORTSAFE);
615 wake_unlock(thread);
616 splx(s);
617
618 thread_block((void (*)(void)) 0);
619 if (current_thread()->wait_result != THREAD_AWAKENED)
620 return (FALSE);
621
622 s = splsched();
623 wake_lock(thread);
624 }
625 thread_lock(thread);
626 thread->state |= TH_SUSP;
627 thread_unlock(thread);
628
629 wake_unlock(thread);
630 splx(s);
631
632 return (TRUE);
633 }
634
635 /*
636 * Clear TH_SUSP and if the thread has been stopped and is now runnable,
637 * put it back on the run queue.
638 */
639 void
640 thread_unstop(
641 thread_t thread)
642 {
643 sched_policy_t *policy;
644 sf_return_t sfr;
645 spl_t s;
646
647 s = splsched();
648 wake_lock(thread);
649 thread_lock(thread);
650
651 if ((thread->state & (TH_RUN|TH_WAIT|TH_SUSP/*|TH_UNINT*/)) == TH_SUSP) {
652 thread->state = (thread->state & ~TH_SUSP) | TH_RUN;
653 #if THREAD_SWAPPER
654 if (thread->state & TH_SWAPPED_OUT)
655 thread_swapin(thread->top_act, FALSE);
656 else
657 #endif /* THREAD_SWAPPER */
658 {
659 policy = &sched_policy[thread->policy];
660 sfr = policy->sp_ops.sp_thread_unblock(policy, thread);
661 assert(sfr == SF_SUCCESS);
662 }
663 }
664 else
665 if (thread->state & TH_SUSP) {
666 thread->state &= ~TH_SUSP;
667
668 if (thread->wake_active) {
669 thread->wake_active = FALSE;
670 thread_unlock(thread);
671 wake_unlock(thread);
672 splx(s);
673 thread_wakeup((event_t)&thread->wake_active);
674
675 return;
676 }
677 }
678
679 thread_unlock(thread);
680 wake_unlock(thread);
681 splx(s);
682 }
683
684 /*
685 * Wait for the thread's RUN bit to clear
686 */
687 boolean_t
688 thread_wait(
689 thread_t thread)
690 {
691 spl_t s;
692
693 s = splsched();
694 wake_lock(thread);
695
696 while (thread->state & (TH_RUN/*|TH_UNINT*/)) {
697 if (thread->last_processor != PROCESSOR_NULL)
698 cause_ast_check(thread->last_processor);
699
700 thread->wake_active = TRUE;
701 assert_wait((event_t)&thread->wake_active, THREAD_ABORTSAFE);
702 wake_unlock(thread);
703 splx(s);
704
705 thread_block((void (*)(void))0);
706 if (current_thread()->wait_result != THREAD_AWAKENED)
707 return (FALSE);
708
709 s = splsched();
710 wake_lock(thread);
711 }
712
713 wake_unlock(thread);
714 splx(s);
715
716 return (TRUE);
717 }
718
719
720 /*
721 * thread_stop_wait(thread)
722 * Stop the thread then wait for it to block interruptibly
723 */
724 boolean_t
725 thread_stop_wait(
726 thread_t thread)
727 {
728 if (thread_stop(thread)) {
729 if (thread_wait(thread))
730 return (TRUE);
731
732 thread_unstop(thread);
733 }
734
735 return (FALSE);
736 }
737
738
739 /*
740 * Routine: clear_wait_internal
741 *
742 * Clear the wait condition for the specified thread.
743 * Start the thread executing if that is appropriate.
744 * Arguments:
745 * thread thread to awaken
746 * result Wakeup result the thread should see
747 * Conditions:
748 * At splsched
749 * the thread is locked.
750 */
751 void
752 clear_wait_internal(
753 thread_t thread,
754 int result)
755 {
756 /*
757 * If the thread isn't in a wait queue, just set it running. Otherwise,
758 * try to remove it from the queue and, if successful, then set it
759 * running. NEVER interrupt an uninterruptible thread.
760 */
761 if (!((result == THREAD_INTERRUPTED) && (thread->state & TH_UNINT))) {
762 if (wait_queue_assert_possible(thread) ||
763 (wait_queue_remove(thread) == KERN_SUCCESS)) {
764 thread_go_locked(thread, result);
765 }
766 }
767 }
768
769
770 /*
771 * clear_wait:
772 *
773 * Clear the wait condition for the specified thread. Start the thread
774 * executing if that is appropriate.
775 *
776 * parameters:
777 * thread thread to awaken
778 * result Wakeup result the thread should see
779 */
780 void
781 clear_wait(
782 thread_t thread,
783 int result)
784 {
785 spl_t s;
786
787 s = splsched();
788 thread_lock(thread);
789 clear_wait_internal(thread, result);
790 thread_unlock(thread);
791 splx(s);
792 }
793
794
795 /*
796 * thread_wakeup_prim:
797 *
798 * Common routine for thread_wakeup, thread_wakeup_with_result,
799 * and thread_wakeup_one.
800 *
801 */
802 void
803 thread_wakeup_prim(
804 event_t event,
805 boolean_t one_thread,
806 int result)
807 {
808 register wait_queue_t wq;
809 register int index;
810
811 index = wait_hash(event);
812 wq = &wait_queues[index];
813 if (one_thread)
814 wait_queue_wakeup_one(wq, event, result);
815 else
816 wait_queue_wakeup_all(wq, event, result);
817 }
818
819 /*
820 * thread_bind:
821 *
822 * Force a thread to execute on the specified processor.
823 * If the thread is currently executing, it may wait until its
824 * time slice is up before switching onto the specified processor.
825 *
826 * A processor of PROCESSOR_NULL causes the thread to be unbound.
827 * xxx - DO NOT export this to users.
828 */
829 void
830 thread_bind(
831 register thread_t thread,
832 processor_t processor)
833 {
834 spl_t s;
835
836 s = splsched();
837 thread_lock(thread);
838 thread_bind_locked(thread, processor);
839 thread_unlock(thread);
840 splx(s);
841 }
842
843 /*
844 * Select a thread for this processor (the current processor) to run.
845 * May select the current thread, which must already be locked.
846 */
847 thread_t
848 thread_select(
849 register processor_t myprocessor)
850 {
851 register thread_t thread;
852 processor_set_t pset;
853 register run_queue_t runq = &myprocessor->runq;
854 boolean_t other_runnable;
855 sched_policy_t *policy;
856
857 /*
858 * Check for other non-idle runnable threads.
859 */
860 myprocessor->first_quantum = TRUE;
861 pset = myprocessor->processor_set;
862 thread = current_thread();
863
864 #if 0 /* CHECKME! */
865 thread->unconsumed_quantum = myprocessor->quantum;
866 #endif
867
868 simple_lock(&runq->lock);
869 simple_lock(&pset->runq.lock);
870
871 other_runnable = runq->count > 0 || pset->runq.count > 0;
872
873 if ( thread->state == TH_RUN &&
874 (!other_runnable ||
875 (runq->highq < thread->sched_pri &&
876 pset->runq.highq < thread->sched_pri)) &&
877 thread->processor_set == pset &&
878 (thread->bound_processor == PROCESSOR_NULL ||
879 thread->bound_processor == myprocessor) ) {
880
881 /* I am the highest priority runnable (non-idle) thread */
882 simple_unlock(&pset->runq.lock);
883 simple_unlock(&runq->lock);
884
885 /* Update the thread's meta-priority */
886 policy = policy_id_to_sched_policy(thread->policy);
887 assert(policy != SCHED_POLICY_NULL);
888 (void)policy->sp_ops.sp_thread_update_mpri(policy, thread);
889 }
890 else
891 if (other_runnable) {
892 simple_unlock(&pset->runq.lock);
893 simple_unlock(&runq->lock);
894 thread = choose_thread(myprocessor);
895 }
896 else {
897 simple_unlock(&pset->runq.lock);
898 simple_unlock(&runq->lock);
899
900 /*
901 * Nothing is runnable, so set this processor idle if it
902 * was running. If it was in an assignment or shutdown,
903 * leave it alone. Return its idle thread.
904 */
905 simple_lock(&pset->idle_lock);
906 if (myprocessor->state == PROCESSOR_RUNNING) {
907 myprocessor->state = PROCESSOR_IDLE;
908 /*
909 * XXX Until it goes away, put master on end of queue, others
910 * XXX on front so master gets used last.
911 */
912 if (myprocessor == master_processor)
913 queue_enter(&(pset->idle_queue), myprocessor,
914 processor_t, processor_queue);
915 else
916 queue_enter_first(&(pset->idle_queue), myprocessor,
917 processor_t, processor_queue);
918
919 pset->idle_count++;
920 }
921 simple_unlock(&pset->idle_lock);
922
923 thread = myprocessor->idle_thread;
924 }
925
926 return (thread);
927 }
928
929
930 /*
931 * Stop running the current thread and start running the new thread.
932 * If continuation is non-zero, and the current thread is blocked,
933 * then it will resume by executing continuation on a new stack.
934 * Returns TRUE if the hand-off succeeds.
935 * The reason parameter == AST_QUANTUM if the thread blocked
936 * because its quantum expired.
937 * Assumes splsched.
938 */
939
940
941 static thread_t
942 __current_thread(void)
943 {
944 return (current_thread());
945 }
946
947 boolean_t
948 thread_invoke(
949 register thread_t old_thread,
950 register thread_t new_thread,
951 int reason,
952 void (*continuation)(void))
953 {
954 sched_policy_t *policy;
955 sf_return_t sfr;
956 void (*lcont)(void);
957
958 /*
959 * Mark thread interruptible.
960 */
961 thread_lock(new_thread);
962 new_thread->state &= ~TH_UNINT;
963
964 if (cpu_data[cpu_number()].preemption_level != 1)
965 panic("thread_invoke: preemption_level %d\n",
966 cpu_data[cpu_number()].preemption_level);
967
968
969 assert(thread_runnable(new_thread));
970
971 assert(old_thread->continuation == (void (*)(void))0);
972
973 if ((old_thread->sched_mode & TH_MODE_REALTIME) && (!old_thread->stack_privilege)) {
974 old_thread->stack_privilege = old_thread->kernel_stack;
975 }
976
977 if (continuation != (void (*)()) 0) {
978 switch (new_thread->state & TH_STACK_STATE) {
979 case TH_STACK_HANDOFF:
980
981 /*
982 * If the old thread has stack privilege, we can't give
983 * his stack away. So go and get him one and treat this
984 * as a traditional context switch.
985 */
986 if (old_thread->stack_privilege == current_stack())
987 goto get_new_stack;
988
989 /*
990 * Make the whole handoff/dispatch atomic to match the
991 * non-handoff case.
992 */
993 disable_preemption();
994
995 /*
996 * Set up ast context of new thread and switch to its timer.
997 */
998 new_thread->state &= ~(TH_STACK_HANDOFF|TH_UNINT);
999 new_thread->last_processor = current_processor();
1000 ast_context(new_thread->top_act, cpu_number());
1001 timer_switch(&new_thread->system_timer);
1002 thread_unlock(new_thread);
1003
1004 old_thread->continuation = continuation;
1005 stack_handoff(old_thread, new_thread);
1006
1007 wake_lock(old_thread);
1008 thread_lock(old_thread);
1009 act_machine_sv_free(old_thread->top_act);
1010
1011 /*
1012 * inline thread_dispatch but don't free stack
1013 */
1014
1015 switch (old_thread->state & (TH_RUN|TH_WAIT|TH_UNINT|TH_IDLE)) {
1016 sched_policy_t *policy;
1017 sf_return_t sfr;
1018
1019 case TH_RUN | TH_UNINT:
1020 case TH_RUN:
1021 /*
1022 * No reason to stop. Put back on a run queue.
1023 */
1024 old_thread->state |= TH_STACK_HANDOFF;
1025
1026 /* Get pointer to scheduling policy "object" */
1027 policy = &sched_policy[old_thread->policy];
1028
1029 /* Leave enqueueing thread up to scheduling policy */
1030 sfr = policy->sp_ops.sp_thread_dispatch(policy, old_thread);
1031 assert(sfr == SF_SUCCESS);
1032 break;
1033
1034 case TH_RUN | TH_WAIT | TH_UNINT:
1035 case TH_RUN | TH_WAIT:
1036 old_thread->sleep_stamp = sched_tick;
1037 /* fallthrough */
1038
1039 case TH_WAIT: /* this happens! */
1040 /*
1041 * Waiting
1042 */
1043 old_thread->state |= TH_STACK_HANDOFF;
1044 old_thread->state &= ~TH_RUN;
1045 if (old_thread->state & TH_TERMINATE)
1046 thread_reaper_enqueue(old_thread);
1047
1048 if (old_thread->wake_active) {
1049 old_thread->wake_active = FALSE;
1050 thread_unlock(old_thread);
1051 wake_unlock(old_thread);
1052 thread_wakeup((event_t)&old_thread->wake_active);
1053 wake_lock(old_thread);
1054 thread_lock(old_thread);
1055 }
1056 break;
1057
1058 case TH_RUN | TH_IDLE:
1059 /*
1060 * Drop idle thread -- it is already in
1061 * idle_thread_array.
1062 */
1063 old_thread->state |= TH_STACK_HANDOFF;
1064 break;
1065
1066 default:
1067 panic("State 0x%x \n",old_thread->state);
1068 }
1069
1070 /* Get pointer to scheduling policy "object" */
1071 policy = &sched_policy[old_thread->policy];
1072
1073 /* Indicate to sched policy that old thread has stopped execution */
1074 /*** ??? maybe use a macro -- rkc, 1/4/96 ***/
1075 sfr = policy->sp_ops.sp_thread_done(policy, old_thread);
1076 assert(sfr == SF_SUCCESS);
1077 thread_unlock(old_thread);
1078 wake_unlock(old_thread);
1079 thread_lock(new_thread);
1080
1081 assert(thread_runnable(new_thread));
1082
1083 /* Get pointer to scheduling policy "object" */
1084 policy = &sched_policy[new_thread->policy];
1085
1086 /* Indicate to sched policy that new thread has started execution */
1087 /*** ??? maybe use a macro ***/
1088 sfr = policy->sp_ops.sp_thread_begin(policy, new_thread);
1089 assert(sfr == SF_SUCCESS);
1090
1091 lcont = new_thread->continuation;
1092 new_thread->continuation = (void(*)(void))0;
1093
1094 thread_unlock(new_thread);
1095 enable_preemption();
1096
1097 counter_always(c_thread_invoke_hits++);
1098
1099 if (new_thread->funnel_state & TH_FN_REFUNNEL) {
1100 kern_return_t save_wait_result;
1101 new_thread->funnel_state = 0;
1102 save_wait_result = new_thread->wait_result;
1103 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE, new_thread->funnel_lock, 2, 0, 0, 0);
1104 //mutex_lock(new_thread->funnel_lock);
1105 funnel_lock(new_thread->funnel_lock);
1106 KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE, new_thread->funnel_lock, 2, 0, 0, 0);
1107 new_thread->funnel_state = TH_FN_OWNED;
1108 new_thread->wait_result = save_wait_result;
1109 }
1110 (void) spllo();
1111
1112 assert(lcont);
1113 call_continuation(lcont);
1114 /*NOTREACHED*/
1115 return TRUE;
1116
1117 case TH_STACK_COMING_IN:
1118 /*
1119 * waiting for a stack
1120 */
1121 thread_swapin(new_thread);
1122 thread_unlock(new_thread);
1123 counter_always(c_thread_invoke_misses++);
1124 return FALSE;
1125
1126 case 0:
1127 /*
1128 * already has a stack - can't handoff
1129 */
1130 if (new_thread == old_thread) {
1131
1132 /* same thread but with continuation */
1133 counter(++c_thread_invoke_same);
1134 thread_unlock(new_thread);
1135
1136 if (old_thread->funnel_state & TH_FN_REFUNNEL) {
1137 kern_return_t save_wait_result;
1138
1139 old_thread->funnel_state = 0;
1140 save_wait_result = old_thread->wait_result;
1141 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE, old_thread->funnel_lock, 3, 0, 0, 0);
1142 funnel_lock(old_thread->funnel_lock);
1143 KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE, old_thread->funnel_lock, 3, 0, 0, 0);
1144 old_thread->funnel_state = TH_FN_OWNED;
1145 old_thread->wait_result = save_wait_result;
1146 }
1147 (void) spllo();
1148 call_continuation(continuation);
1149 /*NOTREACHED*/
1150 }
1151 break;
1152 }
1153 } else {
1154 /*
1155 * check that the new thread has a stack
1156 */
1157 if (new_thread->state & TH_STACK_STATE) {
1158 get_new_stack:
1159 /* has no stack. if not already waiting for one try to get one */
1160 if ((new_thread->state & TH_STACK_COMING_IN) ||
1161 /* not already waiting. nonblocking try to get one */
1162 !stack_alloc_try(new_thread, thread_continue))
1163 {
1164 /* couldn't get one. schedule new thread to get a stack and
1165 return failure so we can try another thread. */
1166 thread_swapin(new_thread);
1167 thread_unlock(new_thread);
1168 counter_always(c_thread_invoke_misses++);
1169 return FALSE;
1170 }
1171 } else if (old_thread == new_thread) {
1172 counter(++c_thread_invoke_same);
1173 thread_unlock(new_thread);
1174 return TRUE;
1175 }
1176
1177 /* new thread now has a stack. it has been setup to resume in
1178 thread_continue so it can dispatch the old thread, deal with
1179 funnelling and then go to it's true continuation point */
1180 }
1181
1182 new_thread->state &= ~(TH_STACK_HANDOFF | TH_UNINT);
1183
1184 /*
1185 * Set up ast context of new thread and switch to its timer.
1186 */
1187 new_thread->last_processor = current_processor();
1188 ast_context(new_thread->top_act, cpu_number());
1189 timer_switch(&new_thread->system_timer);
1190 assert(thread_runnable(new_thread));
1191
1192 /*
1193 * N.B. On return from the call to switch_context, 'old_thread'
1194 * points at the thread that yielded to us. Unfortunately, at
1195 * this point, there are no simple_locks held, so if we are preempted
1196 * before the call to thread_dispatch blocks preemption, it is
1197 * possible for 'old_thread' to terminate, leaving us with a
1198 * stale thread pointer.
1199 */
1200 disable_preemption();
1201
1202 thread_unlock(new_thread);
1203
1204 counter_always(c_thread_invoke_csw++);
1205 current_task()->csw++;
1206
1207
1208 thread_lock(old_thread);
1209 old_thread->reason = reason;
1210 assert(old_thread->runq == RUN_QUEUE_NULL);
1211
1212 if (continuation != (void (*)(void))0)
1213 old_thread->continuation = continuation;
1214
1215 /* Indicate to sched policy that old thread has stopped execution */
1216 policy = &sched_policy[old_thread->policy];
1217 /*** ??? maybe use a macro -- ***/
1218 sfr = policy->sp_ops.sp_thread_done(policy, old_thread);
1219 assert(sfr == SF_SUCCESS);
1220 thread_unlock(old_thread);
1221
1222 /*
1223 * switch_context is machine-dependent. It does the
1224 * machine-dependent components of a context-switch, like
1225 * changing address spaces. It updates active_threads.
1226 */
1227 old_thread = switch_context(old_thread, continuation, new_thread);
1228
1229 /* Now on new thread's stack. Set a local variable to refer to it. */
1230 new_thread = __current_thread();
1231 assert(old_thread != new_thread);
1232
1233 assert(thread_runnable(new_thread));
1234
1235 thread_lock(new_thread);
1236 assert(thread_runnable(new_thread));
1237 /* Indicate to sched policy that new thread has started execution */
1238 policy = &sched_policy[new_thread->policy];
1239 /*** ??? maybe use a macro -- rkc, 1/4/96 ***/
1240 sfr = policy->sp_ops.sp_thread_begin(policy, new_thread);
1241 assert(sfr == SF_SUCCESS);
1242 thread_unlock(new_thread);
1243
1244 /*
1245 * We're back. Now old_thread is the thread that resumed
1246 * us, and we have to dispatch it.
1247 */
1248 /* CHECKME! */
1249 // Code from OSF in Grenoble deleted the following fields. They were
1250 // used in HPPA and 386 code, but not in the PPC for other than
1251 // just setting and resetting. They didn't delete these lines from
1252 // the MACH_RT builds, though, causing compile errors. I'm going
1253 // to make a wild guess and assume we can just delete these.
1254 #if 0
1255 if (old_thread->preempt == TH_NOT_PREEMPTABLE) {
1256 /*
1257 * Mark that we have been really preempted
1258 */
1259 old_thread->preempt = TH_PREEMPTED;
1260 }
1261 #endif
1262 thread_dispatch(old_thread);
1263 enable_preemption();
1264
1265 /* if we get here and 'continuation' is set that means the
1266 * switch_context() path returned and did not call out
1267 * to the continuation. we will do it manually here */
1268 if (continuation) {
1269 call_continuation(continuation);
1270 /* NOTREACHED */
1271 }
1272
1273 return TRUE;
1274 }
1275
1276 /*
1277 * thread_continue:
1278 *
1279 * Called when the launching a new thread, at splsched();
1280 */
1281 void
1282 thread_continue(
1283 register thread_t old_thread)
1284 {
1285 register thread_t self;
1286 register void (*continuation)();
1287 sched_policy_t *policy;
1288 sf_return_t sfr;
1289
1290 self = current_thread();
1291
1292 /*
1293 * We must dispatch the old thread and then
1294 * call the current thread's continuation.
1295 * There might not be an old thread, if we are
1296 * the first thread to run on this processor.
1297 */
1298 if (old_thread != THREAD_NULL) {
1299 thread_dispatch(old_thread);
1300
1301 thread_lock(self);
1302
1303 /* Get pointer to scheduling policy "object" */
1304 policy = &sched_policy[self->policy];
1305
1306 /* Indicate to sched policy that new thread has started execution */
1307 /*** ??? maybe use a macro -- rkc, 1/4/96 ***/
1308 sfr = policy->sp_ops.sp_thread_begin(policy,self);
1309 assert(sfr == SF_SUCCESS);
1310 } else {
1311 thread_lock(self);
1312 }
1313
1314 continuation = self->continuation;
1315 self->continuation = (void (*)(void))0;
1316 thread_unlock(self);
1317
1318 /*
1319 * N.B. - the following is necessary, since thread_invoke()
1320 * inhibits preemption on entry and reenables before it
1321 * returns. Unfortunately, the first time a newly-created
1322 * thread executes, it magically appears here, and never
1323 * executes the enable_preemption() call in thread_invoke().
1324 */
1325 enable_preemption();
1326
1327 if (self->funnel_state & TH_FN_REFUNNEL) {
1328 kern_return_t save_wait_result;
1329 self->funnel_state = 0;
1330 save_wait_result = self->wait_result;
1331 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE, self->funnel_lock, 4, 0, 0, 0);
1332 funnel_lock(self->funnel_lock);
1333 KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE, self->funnel_lock, 4, 0, 0, 0);
1334 self->wait_result = save_wait_result;
1335 self->funnel_state = TH_FN_OWNED;
1336 }
1337 spllo();
1338
1339 assert(continuation);
1340 (*continuation)();
1341 /*NOTREACHED*/
1342 }
1343
1344 #if MACH_LDEBUG || MACH_KDB
1345
1346 #define THREAD_LOG_SIZE 300
1347
1348 struct t64 {
1349 unsigned long h;
1350 unsigned long l;
1351 };
1352
1353 struct {
1354 struct t64 stamp;
1355 thread_t thread;
1356 long info1;
1357 long info2;
1358 long info3;
1359 char * action;
1360 } thread_log[THREAD_LOG_SIZE];
1361
1362 int thread_log_index;
1363
1364 void check_thread_time(long n);
1365
1366
1367 int check_thread_time_crash;
1368
1369 #if 0
1370 void
1371 check_thread_time(long us)
1372 {
1373 struct t64 temp;
1374
1375 if (!check_thread_time_crash)
1376 return;
1377
1378 temp = thread_log[0].stamp;
1379 cyctm05_diff (&thread_log[1].stamp, &thread_log[0].stamp, &temp);
1380
1381 if (temp.l >= us && thread_log[1].info != 0x49) /* HACK!!! */
1382 panic ("check_thread_time");
1383 }
1384 #endif
1385
1386 void
1387 log_thread_action(char * action, long info1, long info2, long info3)
1388 {
1389 int i;
1390 spl_t x;
1391 static unsigned int tstamp;
1392
1393 x = splhigh();
1394
1395 for (i = THREAD_LOG_SIZE-1; i > 0; i--) {
1396 thread_log[i] = thread_log[i-1];
1397 }
1398
1399 thread_log[0].stamp.h = 0;
1400 thread_log[0].stamp.l = tstamp++;
1401 thread_log[0].thread = current_thread();
1402 thread_log[0].info1 = info1;
1403 thread_log[0].info2 = info2;
1404 thread_log[0].info3 = info3;
1405 thread_log[0].action = action;
1406 /* strcpy (&thread_log[0].action[0], action);*/
1407
1408 splx(x);
1409 }
1410 #endif /* MACH_LDEBUG || MACH_KDB */
1411
1412 #if MACH_KDB
1413 #include <ddb/db_output.h>
1414 void db_show_thread_log(void);
1415
1416 void
1417 db_show_thread_log(void)
1418 {
1419 int i;
1420
1421 db_printf ("%s %s %s %s %s %s\n", " Thread ", " Info1 ", " Info2 ",
1422 " Info3 ", " Timestamp ", "Action");
1423
1424 for (i = 0; i < THREAD_LOG_SIZE; i++) {
1425 db_printf ("%08x %08x %08x %08x %08x/%08x %s\n",
1426 thread_log[i].thread,
1427 thread_log[i].info1,
1428 thread_log[i].info2,
1429 thread_log[i].info3,
1430 thread_log[i].stamp.h,
1431 thread_log[i].stamp.l,
1432 thread_log[i].action);
1433 }
1434 }
1435 #endif /* MACH_KDB */
1436
1437 /*
1438 * thread_block_reason:
1439 *
1440 * Block the current thread. If the thread is runnable
1441 * then someone must have woken it up between its request
1442 * to sleep and now. In this case, it goes back on a
1443 * run queue.
1444 *
1445 * If a continuation is specified, then thread_block will
1446 * attempt to discard the thread's kernel stack. When the
1447 * thread resumes, it will execute the continuation function
1448 * on a new kernel stack.
1449 */
1450 counter(mach_counter_t c_thread_block_calls = 0;)
1451
1452 int
1453 thread_block_reason(
1454 void (*continuation)(void),
1455 int reason)
1456 {
1457 register thread_t thread = current_thread();
1458 register processor_t myprocessor;
1459 register thread_t new_thread;
1460 spl_t s;
1461
1462 counter(++c_thread_block_calls);
1463
1464 check_simple_locks();
1465
1466 machine_clock_assist();
1467
1468 s = splsched();
1469
1470 if ((thread->funnel_state & TH_FN_OWNED) && !(reason & AST_PREEMPT)) {
1471 thread->funnel_state = TH_FN_REFUNNEL;
1472 KERNEL_DEBUG(0x603242c | DBG_FUNC_NONE, thread->funnel_lock, 2, 0, 0, 0);
1473 funnel_unlock(thread->funnel_lock);
1474 }
1475
1476 myprocessor = current_processor();
1477
1478 thread_lock(thread);
1479 if (thread->state & TH_ABORT)
1480 clear_wait_internal(thread, THREAD_INTERRUPTED);
1481
1482 /* Unconditionally remove either | both */
1483 ast_off(AST_QUANTUM|AST_BLOCK|AST_URGENT);
1484
1485 new_thread = thread_select(myprocessor);
1486 assert(new_thread);
1487 assert(thread_runnable(new_thread));
1488 thread_unlock(thread);
1489 while (!thread_invoke(thread, new_thread, reason, continuation)) {
1490 thread_lock(thread);
1491 new_thread = thread_select(myprocessor);
1492 assert(new_thread);
1493 assert(thread_runnable(new_thread));
1494 thread_unlock(thread);
1495 }
1496
1497 if (thread->funnel_state & TH_FN_REFUNNEL) {
1498 kern_return_t save_wait_result;
1499
1500 save_wait_result = thread->wait_result;
1501 thread->funnel_state = 0;
1502 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE, thread->funnel_lock, 5, 0, 0, 0);
1503 funnel_lock(thread->funnel_lock);
1504 KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE, thread->funnel_lock, 5, 0, 0, 0);
1505 thread->funnel_state = TH_FN_OWNED;
1506 thread->wait_result = save_wait_result;
1507 }
1508
1509 splx(s);
1510
1511 return thread->wait_result;
1512 }
1513
1514 /*
1515 * thread_block:
1516 *
1517 * Now calls thread_block_reason() which forwards the
1518 * the reason parameter to thread_invoke() so it can
1519 * do the right thing if the thread's quantum expired.
1520 */
1521 int
1522 thread_block(
1523 void (*continuation)(void))
1524 {
1525 return thread_block_reason(continuation, 0);
1526 }
1527
1528 /*
1529 * thread_run:
1530 *
1531 * Switch directly from the current thread to a specified
1532 * thread. Both the current and new threads must be
1533 * runnable.
1534 *
1535 * Assumption:
1536 * at splsched.
1537 */
1538 int
1539 thread_run(
1540 thread_t old_thread,
1541 void (*continuation)(void),
1542 thread_t new_thread)
1543 {
1544 while (!thread_invoke(old_thread, new_thread, 0, continuation)) {
1545 register processor_t myprocessor = current_processor();
1546 thread_lock(old_thread);
1547 new_thread = thread_select(myprocessor);
1548 thread_unlock(old_thread);
1549 }
1550 return old_thread->wait_result;
1551 }
1552
1553 /*
1554 * Dispatches a running thread that is not on a runq.
1555 * Called at splsched.
1556 */
1557 void
1558 thread_dispatch(
1559 register thread_t thread)
1560 {
1561 sched_policy_t *policy;
1562 sf_return_t sfr;
1563
1564 /*
1565 * If we are discarding the thread's stack, we must do it
1566 * before the thread has a chance to run.
1567 */
1568 wake_lock(thread);
1569 thread_lock(thread);
1570
1571 #ifndef i386
1572 /* no continuations on i386 for now */
1573 if (thread->continuation != (void (*)())0) {
1574 assert((thread->state & TH_STACK_STATE) == 0);
1575 thread->state |= TH_STACK_HANDOFF;
1576 stack_free(thread);
1577 if (thread->top_act) {
1578 act_machine_sv_free(thread->top_act);
1579 }
1580 }
1581 #endif
1582
1583 switch (thread->state & (TH_RUN|TH_WAIT|TH_UNINT|TH_IDLE)) {
1584
1585 case TH_RUN | TH_UNINT:
1586 case TH_RUN:
1587 /*
1588 * No reason to stop. Put back on a run queue.
1589 */
1590 /* Leave enqueueing thread up to scheduling policy */
1591 policy = &sched_policy[thread->policy];
1592 /*** ??? maybe use a macro ***/
1593 sfr = policy->sp_ops.sp_thread_dispatch(policy, thread);
1594 assert(sfr == SF_SUCCESS);
1595 break;
1596
1597 case TH_RUN | TH_WAIT | TH_UNINT:
1598 case TH_RUN | TH_WAIT:
1599 thread->sleep_stamp = sched_tick;
1600 /* fallthrough */
1601 case TH_WAIT: /* this happens! */
1602
1603 /*
1604 * Waiting
1605 */
1606 thread->state &= ~TH_RUN;
1607 if (thread->state & TH_TERMINATE)
1608 thread_reaper_enqueue(thread);
1609
1610 if (thread->wake_active) {
1611 thread->wake_active = FALSE;
1612 thread_unlock(thread);
1613 wake_unlock(thread);
1614 thread_wakeup((event_t)&thread->wake_active);
1615 return;
1616 }
1617 break;
1618
1619 case TH_RUN | TH_IDLE:
1620 /*
1621 * Drop idle thread -- it is already in
1622 * idle_thread_array.
1623 */
1624 break;
1625
1626 default:
1627 panic("State 0x%x \n",thread->state);
1628 }
1629 thread_unlock(thread);
1630 wake_unlock(thread);
1631 }
1632
1633 /*
1634 * Enqueue thread on run queue. Thread must be locked,
1635 * and not already be on a run queue.
1636 */
1637 int
1638 run_queue_enqueue(
1639 register run_queue_t rq,
1640 register thread_t thread,
1641 boolean_t tail)
1642 {
1643 register int whichq;
1644 int oldrqcount;
1645
1646 whichq = thread->sched_pri;
1647 assert(whichq >= MINPRI && whichq <= MAXPRI);
1648
1649 simple_lock(&rq->lock); /* lock the run queue */
1650 assert(thread->runq == RUN_QUEUE_NULL);
1651 if (tail)
1652 enqueue_tail(&rq->queues[whichq], (queue_entry_t)thread);
1653 else
1654 enqueue_head(&rq->queues[whichq], (queue_entry_t)thread);
1655
1656 setbit(MAXPRI - whichq, rq->bitmap);
1657 if (whichq > rq->highq)
1658 rq->highq = whichq;
1659
1660 oldrqcount = rq->count++;
1661 thread->runq = rq;
1662 thread->whichq = whichq;
1663 #if DEBUG
1664 thread_check(thread, rq);
1665 #endif /* DEBUG */
1666 simple_unlock(&rq->lock);
1667
1668 return (oldrqcount);
1669 }
1670
1671 /*
1672 * thread_setrun:
1673 *
1674 * Make thread runnable; dispatch directly onto an idle processor
1675 * if possible. Else put on appropriate run queue (processor
1676 * if bound, else processor set. Caller must have lock on thread.
1677 * This is always called at splsched.
1678 * The tail parameter, if TRUE || TAIL_Q, indicates that the
1679 * thread should be placed at the tail of the runq. If
1680 * FALSE || HEAD_Q the thread will be placed at the head of the
1681 * appropriate runq.
1682 */
1683 void
1684 thread_setrun(
1685 register thread_t new_thread,
1686 boolean_t may_preempt,
1687 boolean_t tail)
1688 {
1689 register processor_t processor;
1690 register run_queue_t runq;
1691 register processor_set_t pset;
1692 thread_t thread;
1693 ast_t ast_flags = AST_BLOCK;
1694
1695 mp_disable_preemption();
1696
1697 assert(!(new_thread->state & TH_SWAPPED_OUT));
1698 assert(thread_runnable(new_thread));
1699
1700 /*
1701 * Update priority if needed.
1702 */
1703 if (new_thread->sched_stamp != sched_tick)
1704 update_priority(new_thread);
1705
1706 if (new_thread->policy & (POLICY_FIFO|POLICY_RR)) {
1707 if ( new_thread->sched_pri >= (MAXPRI_KERNBAND - 2) &&
1708 kernel_preemption_mode == KERNEL_PREEMPT )
1709 ast_flags |= AST_URGENT;
1710 }
1711
1712 assert(new_thread->runq == RUN_QUEUE_NULL);
1713
1714 /*
1715 * Try to dispatch the thread directly onto an idle processor.
1716 */
1717 if ((processor = new_thread->bound_processor) == PROCESSOR_NULL) {
1718 /*
1719 * Not bound, any processor in the processor set is ok.
1720 */
1721 pset = new_thread->processor_set;
1722 if (pset->idle_count > 0) {
1723 simple_lock(&pset->idle_lock);
1724 if (pset->idle_count > 0) {
1725 processor = (processor_t) queue_first(&pset->idle_queue);
1726 queue_remove(&(pset->idle_queue), processor, processor_t,
1727 processor_queue);
1728 pset->idle_count--;
1729 processor->next_thread = new_thread;
1730 processor->state = PROCESSOR_DISPATCHING;
1731 simple_unlock(&pset->idle_lock);
1732 if(processor->slot_num != cpu_number())
1733 machine_signal_idle(processor);
1734 mp_enable_preemption();
1735 return;
1736 }
1737 simple_unlock(&pset->idle_lock);
1738 }
1739
1740
1741 /*
1742 * Preempt check
1743 */
1744 runq = &pset->runq;
1745 thread = current_thread();
1746 processor = current_processor();
1747 if ( may_preempt &&
1748 pset == processor->processor_set &&
1749 thread->sched_pri < new_thread->sched_pri ) {
1750 /*
1751 * XXX if we have a non-empty local runq or are
1752 * XXX running a bound thread, ought to check for
1753 * XXX another cpu running lower-pri thread to preempt.
1754 */
1755 /*
1756 * Turn off first_quantum to allow csw.
1757 */
1758 processor->first_quantum = FALSE;
1759
1760 ast_on(ast_flags);
1761 }
1762
1763 /*
1764 * Put us on the end of the runq, if we are not preempting
1765 * or the guy we are preempting.
1766 */
1767 run_queue_enqueue(runq, new_thread, tail);
1768 }
1769 else {
1770 /*
1771 * Bound, can only run on bound processor. Have to lock
1772 * processor here because it may not be the current one.
1773 */
1774 if (processor->state == PROCESSOR_IDLE) {
1775 simple_lock(&processor->lock);
1776 pset = processor->processor_set;
1777 simple_lock(&pset->idle_lock);
1778 if (processor->state == PROCESSOR_IDLE) {
1779 queue_remove(&pset->idle_queue, processor,
1780 processor_t, processor_queue);
1781 pset->idle_count--;
1782 processor->next_thread = new_thread;
1783 processor->state = PROCESSOR_DISPATCHING;
1784 simple_unlock(&pset->idle_lock);
1785 simple_unlock(&processor->lock);
1786 if(processor->slot_num != cpu_number())
1787 machine_signal_idle(processor);
1788 mp_enable_preemption();
1789 return;
1790 }
1791 simple_unlock(&pset->idle_lock);
1792 simple_unlock(&processor->lock);
1793 }
1794
1795 /*
1796 * Cause ast on processor if processor is on line, and the
1797 * currently executing thread is not bound to that processor
1798 * (bound threads have implicit priority over non-bound threads).
1799 * We also avoid sending the AST to the idle thread (if it got
1800 * scheduled in the window between the 'if' above and here),
1801 * since the idle_thread is bound.
1802 */
1803 runq = &processor->runq;
1804 thread = current_thread();
1805 if (processor == current_processor()) {
1806 if ( thread->bound_processor == PROCESSOR_NULL ||
1807 thread->sched_pri < new_thread->sched_pri ) {
1808 processor->first_quantum = FALSE;
1809 ast_on(ast_flags);
1810 }
1811
1812 run_queue_enqueue(runq, new_thread, tail);
1813 }
1814 else {
1815 thread = cpu_data[processor->slot_num].active_thread;
1816 if ( run_queue_enqueue(runq, new_thread, tail) == 0 &&
1817 processor->state != PROCESSOR_OFF_LINE &&
1818 thread && thread->bound_processor != processor )
1819 cause_ast_check(processor);
1820 }
1821 }
1822
1823 mp_enable_preemption();
1824 }
1825
1826 /*
1827 * set_pri:
1828 *
1829 * Set the priority of the specified thread to the specified
1830 * priority. This may cause the thread to change queues.
1831 *
1832 * The thread *must* be locked by the caller.
1833 */
1834 void
1835 set_pri(
1836 thread_t thread,
1837 int pri,
1838 boolean_t resched)
1839 {
1840 register struct run_queue *rq;
1841
1842 rq = rem_runq(thread);
1843 assert(thread->runq == RUN_QUEUE_NULL);
1844 thread->sched_pri = pri;
1845 if (rq != RUN_QUEUE_NULL) {
1846 if (resched)
1847 thread_setrun(thread, TRUE, TAIL_Q);
1848 else
1849 run_queue_enqueue(rq, thread, TAIL_Q);
1850 }
1851 }
1852
1853 /*
1854 * rem_runq:
1855 *
1856 * Remove a thread from its run queue.
1857 * The run queue that the process was on is returned
1858 * (or RUN_QUEUE_NULL if not on a run queue). Thread *must* be locked
1859 * before calling this routine. Unusual locking protocol on runq
1860 * field in thread structure makes this code interesting; see thread.h.
1861 */
1862 run_queue_t
1863 rem_runq(
1864 thread_t thread)
1865 {
1866 register struct run_queue *rq;
1867
1868 rq = thread->runq;
1869 /*
1870 * If rq is RUN_QUEUE_NULL, the thread will stay out of the
1871 * run_queues because the caller locked the thread. Otherwise
1872 * the thread is on a runq, but could leave.
1873 */
1874 if (rq != RUN_QUEUE_NULL) {
1875 simple_lock(&rq->lock);
1876 if (rq == thread->runq) {
1877 /*
1878 * Thread is in a runq and we have a lock on
1879 * that runq.
1880 */
1881 #if DEBUG
1882 thread_check(thread, rq);
1883 #endif /* DEBUG */
1884 remqueue(&rq->queues[0], (queue_entry_t)thread);
1885 rq->count--;
1886
1887 if (queue_empty(rq->queues + thread->sched_pri)) {
1888 /* update run queue status */
1889 if (thread->sched_pri != IDLEPRI)
1890 clrbit(MAXPRI - thread->sched_pri, rq->bitmap);
1891 rq->highq = MAXPRI - ffsbit(rq->bitmap);
1892 }
1893 thread->runq = RUN_QUEUE_NULL;
1894 simple_unlock(&rq->lock);
1895 }
1896 else {
1897 /*
1898 * The thread left the runq before we could
1899 * lock the runq. It is not on a runq now, and
1900 * can't move again because this routine's
1901 * caller locked the thread.
1902 */
1903 assert(thread->runq == RUN_QUEUE_NULL);
1904 simple_unlock(&rq->lock);
1905 rq = RUN_QUEUE_NULL;
1906 }
1907 }
1908
1909 return (rq);
1910 }
1911
1912
1913 /*
1914 * choose_thread:
1915 *
1916 * Choose a thread to execute. The thread chosen is removed
1917 * from its run queue. Note that this requires only that the runq
1918 * lock be held.
1919 *
1920 * Strategy:
1921 * Check processor runq first; if anything found, run it.
1922 * Else check pset runq; if nothing found, return idle thread.
1923 *
1924 * Second line of strategy is implemented by choose_pset_thread.
1925 * This is only called on processor startup and when thread_block
1926 * thinks there's something in the processor runq.
1927 */
1928 thread_t
1929 choose_thread(
1930 processor_t myprocessor)
1931 {
1932 thread_t thread;
1933 register queue_t q;
1934 register run_queue_t runq;
1935 processor_set_t pset;
1936
1937 runq = &myprocessor->runq;
1938 pset = myprocessor->processor_set;
1939
1940 simple_lock(&runq->lock);
1941 if (runq->count > 0 && runq->highq >= pset->runq.highq) {
1942 q = runq->queues + runq->highq;
1943 #if MACH_ASSERT
1944 if (!queue_empty(q)) {
1945 #endif /*MACH_ASSERT*/
1946 thread = (thread_t)q->next;
1947 ((queue_entry_t)thread)->next->prev = q;
1948 q->next = ((queue_entry_t)thread)->next;
1949 thread->runq = RUN_QUEUE_NULL;
1950 runq->count--;
1951 if (queue_empty(q)) {
1952 if (runq->highq != IDLEPRI)
1953 clrbit(MAXPRI - runq->highq, runq->bitmap);
1954 runq->highq = MAXPRI - ffsbit(runq->bitmap);
1955 }
1956 simple_unlock(&runq->lock);
1957 return (thread);
1958 #if MACH_ASSERT
1959 }
1960 panic("choose_thread");
1961 #endif /*MACH_ASSERT*/
1962 /*NOTREACHED*/
1963 }
1964
1965 simple_unlock(&runq->lock);
1966 simple_lock(&pset->runq.lock);
1967 return (choose_pset_thread(myprocessor, pset));
1968 }
1969
1970
1971 /*
1972 * choose_pset_thread: choose a thread from processor_set runq or
1973 * set processor idle and choose its idle thread.
1974 *
1975 * Caller must be at splsched and have a lock on the runq. This
1976 * lock is released by this routine. myprocessor is always the current
1977 * processor, and pset must be its processor set.
1978 * This routine chooses and removes a thread from the runq if there
1979 * is one (and returns it), else it sets the processor idle and
1980 * returns its idle thread.
1981 */
1982 thread_t
1983 choose_pset_thread(
1984 register processor_t myprocessor,
1985 processor_set_t pset)
1986 {
1987 register run_queue_t runq;
1988 register thread_t thread;
1989 register queue_t q;
1990
1991 runq = &pset->runq;
1992 if (runq->count > 0) {
1993 q = runq->queues + runq->highq;
1994 #if MACH_ASSERT
1995 if (!queue_empty(q)) {
1996 #endif /*MACH_ASSERT*/
1997 thread = (thread_t)q->next;
1998 ((queue_entry_t)thread)->next->prev = q;
1999 q->next = ((queue_entry_t)thread)->next;
2000 thread->runq = RUN_QUEUE_NULL;
2001 runq->count--;
2002 if (queue_empty(q)) {
2003 if (runq->highq != IDLEPRI)
2004 clrbit(MAXPRI - runq->highq, runq->bitmap);
2005 runq->highq = MAXPRI - ffsbit(runq->bitmap);
2006 }
2007 simple_unlock(&runq->lock);
2008 return (thread);
2009 #if MACH_ASSERT
2010 }
2011 panic("choose_pset_thread");
2012 #endif /*MACH_ASSERT*/
2013 /*NOTREACHED*/
2014 }
2015 simple_unlock(&runq->lock);
2016
2017 /*
2018 * Nothing is runnable, so set this processor idle if it
2019 * was running. If it was in an assignment or shutdown,
2020 * leave it alone. Return its idle thread.
2021 */
2022 simple_lock(&pset->idle_lock);
2023 if (myprocessor->state == PROCESSOR_RUNNING) {
2024 myprocessor->state = PROCESSOR_IDLE;
2025 /*
2026 * XXX Until it goes away, put master on end of queue, others
2027 * XXX on front so master gets used last.
2028 */
2029 if (myprocessor == master_processor)
2030 queue_enter(&(pset->idle_queue), myprocessor,
2031 processor_t, processor_queue);
2032 else
2033 queue_enter_first(&(pset->idle_queue), myprocessor,
2034 processor_t, processor_queue);
2035
2036 pset->idle_count++;
2037 }
2038 simple_unlock(&pset->idle_lock);
2039
2040 return (myprocessor->idle_thread);
2041 }
2042
2043 /*
2044 * no_dispatch_count counts number of times processors go non-idle
2045 * without being dispatched. This should be very rare.
2046 */
2047 int no_dispatch_count = 0;
2048
2049 /*
2050 * This is the idle thread, which just looks for other threads
2051 * to execute.
2052 */
2053 void
2054 idle_thread_continue(void)
2055 {
2056 register processor_t myprocessor;
2057 register volatile thread_t *threadp;
2058 register volatile int *gcount;
2059 register volatile int *lcount;
2060 register thread_t new_thread;
2061 register int state;
2062 register processor_set_t pset;
2063 int mycpu;
2064
2065 mycpu = cpu_number();
2066 myprocessor = current_processor();
2067 threadp = (volatile thread_t *) &myprocessor->next_thread;
2068 lcount = (volatile int *) &myprocessor->runq.count;
2069
2070 for (;;) {
2071 #ifdef MARK_CPU_IDLE
2072 MARK_CPU_IDLE(mycpu);
2073 #endif /* MARK_CPU_IDLE */
2074
2075 gcount = (volatile int *)&myprocessor->processor_set->runq.count;
2076
2077 (void)splsched();
2078 while ( (*threadp == (volatile thread_t)THREAD_NULL) &&
2079 (*gcount == 0) && (*lcount == 0) ) {
2080
2081 /* check for ASTs while we wait */
2082
2083 if (need_ast[mycpu] &~ (AST_SCHEDULING|AST_URGENT|AST_BSD|AST_BSD_INIT)) {
2084 /* don't allow scheduling ASTs */
2085 need_ast[mycpu] &= ~(AST_SCHEDULING|AST_URGENT|AST_BSD|AST_BSD_INIT);
2086 ast_taken(FALSE, AST_ALL, TRUE); /* back at spllo */
2087 }
2088 else
2089 #ifdef __ppc__
2090 machine_idle();
2091 #else
2092 (void)spllo();
2093 #endif
2094 machine_clock_assist();
2095
2096 (void)splsched();
2097 }
2098
2099 #ifdef MARK_CPU_ACTIVE
2100 (void)spllo();
2101 MARK_CPU_ACTIVE(mycpu);
2102 (void)splsched();
2103 #endif /* MARK_CPU_ACTIVE */
2104
2105 /*
2106 * This is not a switch statement to avoid the
2107 * bounds checking code in the common case.
2108 */
2109 pset = myprocessor->processor_set;
2110 simple_lock(&pset->idle_lock);
2111 retry:
2112 state = myprocessor->state;
2113 if (state == PROCESSOR_DISPATCHING) {
2114 /*
2115 * Commmon case -- cpu dispatched.
2116 */
2117 new_thread = *threadp;
2118 *threadp = (volatile thread_t) THREAD_NULL;
2119 myprocessor->state = PROCESSOR_RUNNING;
2120 simple_unlock(&pset->idle_lock);
2121
2122 thread_lock(new_thread);
2123 simple_lock(&myprocessor->runq.lock);
2124 simple_lock(&pset->runq.lock);
2125 if ( myprocessor->runq.highq > new_thread->sched_pri ||
2126 pset->runq.highq > new_thread->sched_pri ) {
2127 simple_unlock(&pset->runq.lock);
2128 simple_unlock(&myprocessor->runq.lock);
2129
2130 if (new_thread->bound_processor != PROCESSOR_NULL)
2131 run_queue_enqueue(&myprocessor->runq, new_thread, HEAD_Q);
2132 else
2133 run_queue_enqueue(&pset->runq, new_thread, HEAD_Q);
2134 thread_unlock(new_thread);
2135
2136 counter(c_idle_thread_block++);
2137 thread_block(idle_thread_continue);
2138 }
2139 else {
2140 simple_unlock(&pset->runq.lock);
2141 simple_unlock(&myprocessor->runq.lock);
2142
2143 /*
2144 * set up quantum for new thread.
2145 */
2146 if (new_thread->policy & (POLICY_RR|POLICY_FIFO))
2147 myprocessor->quantum = new_thread->unconsumed_quantum;
2148 else
2149 myprocessor->quantum = pset->set_quantum;
2150 thread_unlock(new_thread);
2151
2152 myprocessor->first_quantum = TRUE;
2153 counter(c_idle_thread_handoff++);
2154 thread_run(myprocessor->idle_thread,
2155 idle_thread_continue, new_thread);
2156 }
2157 }
2158 else
2159 if (state == PROCESSOR_IDLE) {
2160 if (myprocessor->state != PROCESSOR_IDLE) {
2161 /*
2162 * Something happened, try again.
2163 */
2164 goto retry;
2165 }
2166 /*
2167 * Processor was not dispatched (Rare).
2168 * Set it running again.
2169 */
2170 no_dispatch_count++;
2171 pset->idle_count--;
2172 queue_remove(&pset->idle_queue, myprocessor,
2173 processor_t, processor_queue);
2174 myprocessor->state = PROCESSOR_RUNNING;
2175 simple_unlock(&pset->idle_lock);
2176
2177 counter(c_idle_thread_block++);
2178 thread_block(idle_thread_continue);
2179 }
2180 else
2181 if ( state == PROCESSOR_ASSIGN ||
2182 state == PROCESSOR_SHUTDOWN ) {
2183 /*
2184 * Changing processor sets, or going off-line.
2185 * Release next_thread if there is one. Actual
2186 * thread to run is on a runq.
2187 */
2188 if ((new_thread = (thread_t)*threadp) != THREAD_NULL) {
2189 *threadp = (volatile thread_t) THREAD_NULL;
2190 simple_unlock(&pset->idle_lock);
2191 thread_lock(new_thread);
2192 thread_setrun(new_thread, FALSE, TAIL_Q);
2193 thread_unlock(new_thread);
2194 } else
2195 simple_unlock(&pset->idle_lock);
2196
2197 counter(c_idle_thread_block++);
2198 thread_block(idle_thread_continue);
2199 }
2200 else {
2201 simple_unlock(&pset->idle_lock);
2202 printf("Bad processor state %d (Cpu %d)\n",
2203 cpu_state(mycpu), mycpu);
2204 panic("idle_thread");
2205
2206 }
2207
2208 (void)spllo();
2209 }
2210 }
2211
2212 void
2213 idle_thread(void)
2214 {
2215 thread_t self = current_thread();
2216 spl_t s;
2217
2218 stack_privilege(self);
2219 thread_swappable(current_act(), FALSE);
2220
2221 s = splsched();
2222 thread_lock(self);
2223
2224 self->priority = IDLEPRI;
2225 self->sched_pri = self->priority;
2226
2227 thread_unlock(self);
2228 splx(s);
2229
2230 counter(c_idle_thread_block++);
2231 thread_block((void(*)(void))0);
2232 idle_thread_continue();
2233 /*NOTREACHED*/
2234 }
2235
2236 static AbsoluteTime sched_tick_interval, sched_tick_deadline;
2237
2238 /*
2239 * sched_tick_thread
2240 *
2241 * Update the priorities of all threads periodically.
2242 */
2243 void
2244 sched_tick_thread_continue(void)
2245 {
2246 AbsoluteTime abstime;
2247 #if SIMPLE_CLOCK
2248 int new_usec;
2249 #endif /* SIMPLE_CLOCK */
2250
2251 clock_get_uptime(&abstime);
2252
2253 sched_tick++; /* age usage one more time */
2254 #if SIMPLE_CLOCK
2255 /*
2256 * Compensate for clock drift. sched_usec is an
2257 * exponential average of the number of microseconds in
2258 * a second. It decays in the same fashion as cpu_usage.
2259 */
2260 new_usec = sched_usec_elapsed();
2261 sched_usec = (5*sched_usec + 3*new_usec)/8;
2262 #endif /* SIMPLE_CLOCK */
2263
2264 /*
2265 * Compute the scheduler load factors.
2266 */
2267 compute_mach_factor();
2268
2269 /*
2270 * Scan the run queues for runnable threads that need to
2271 * have their priorities recalculated.
2272 */
2273 do_thread_scan();
2274
2275 clock_deadline_for_periodic_event(sched_tick_interval, abstime,
2276 &sched_tick_deadline);
2277
2278 assert_wait((event_t)sched_tick_thread_continue, THREAD_INTERRUPTIBLE);
2279 thread_set_timer_deadline(sched_tick_deadline);
2280 thread_block(sched_tick_thread_continue);
2281 /*NOTREACHED*/
2282 }
2283
2284 void
2285 sched_tick_thread(void)
2286 {
2287 thread_t self = current_thread();
2288 natural_t rate;
2289 spl_t s;
2290
2291 stack_privilege(self);
2292 thread_swappable(self->top_act, FALSE);
2293
2294 s = splsched();
2295 thread_lock(self);
2296
2297 self->priority = MAXPRI_STANDARD;
2298 self->sched_pri = self->priority;
2299
2300 thread_unlock(self);
2301 splx(s);
2302
2303 rate = (1000 >> SCHED_TICK_SHIFT);
2304 clock_interval_to_absolutetime_interval(rate, USEC_PER_SEC,
2305 &sched_tick_interval);
2306 clock_get_uptime(&sched_tick_deadline);
2307
2308 thread_block(sched_tick_thread_continue);
2309 /*NOTREACHED*/
2310 }
2311
2312 #define MAX_STUCK_THREADS 128
2313
2314 /*
2315 * do_thread_scan: scan for stuck threads. A thread is stuck if
2316 * it is runnable but its priority is so low that it has not
2317 * run for several seconds. Its priority should be higher, but
2318 * won't be until it runs and calls update_priority. The scanner
2319 * finds these threads and does the updates.
2320 *
2321 * Scanner runs in two passes. Pass one squirrels likely
2322 * thread ids away in an array (takes out references for them).
2323 * Pass two does the priority updates. This is necessary because
2324 * the run queue lock is required for the candidate scan, but
2325 * cannot be held during updates [set_pri will deadlock].
2326 *
2327 * Array length should be enough so that restart isn't necessary,
2328 * but restart logic is included. Does not scan processor runqs.
2329 *
2330 */
2331 thread_t stuck_threads[MAX_STUCK_THREADS];
2332 int stuck_count = 0;
2333
2334 /*
2335 * do_runq_scan is the guts of pass 1. It scans a runq for
2336 * stuck threads. A boolean is returned indicating whether
2337 * a retry is needed.
2338 */
2339 boolean_t
2340 do_runq_scan(
2341 run_queue_t runq)
2342 {
2343 register queue_t q;
2344 register thread_t thread;
2345 register int count;
2346 spl_t s;
2347 boolean_t result = FALSE;
2348
2349 s = splsched();
2350 simple_lock(&runq->lock);
2351 if ((count = runq->count) > 0) {
2352 q = runq->queues + runq->highq;
2353 while (count > 0) {
2354 queue_iterate(q, thread, thread_t, links) {
2355 if ( !(thread->state & (TH_WAIT|TH_SUSP)) &&
2356 thread->policy == POLICY_TIMESHARE ) {
2357 if (thread->sched_stamp != sched_tick) {
2358 /*
2359 * Stuck, save its id for later.
2360 */
2361 if (stuck_count == MAX_STUCK_THREADS) {
2362 /*
2363 * !@#$% No more room.
2364 */
2365 simple_unlock(&runq->lock);
2366 splx(s);
2367
2368 return (TRUE);
2369 }
2370
2371 /*
2372 * Inline version of thread_reference
2373 * XXX - lock ordering problem here:
2374 * thread locks should be taken before runq
2375 * locks: just try and get the thread's locks
2376 * and ignore this thread if we fail, we might
2377 * have better luck next time.
2378 */
2379 if (simple_lock_try(&thread->lock)) {
2380 thread->ref_count++;
2381 thread_unlock(thread);
2382 stuck_threads[stuck_count++] = thread;
2383 }
2384 else
2385 result = TRUE;
2386 }
2387 }
2388
2389 count--;
2390 }
2391
2392 q--;
2393 }
2394 }
2395 simple_unlock(&runq->lock);
2396 splx(s);
2397
2398 return (result);
2399 }
2400
2401 boolean_t thread_scan_enabled = TRUE;
2402
2403 void
2404 do_thread_scan(void)
2405 {
2406 register boolean_t restart_needed = FALSE;
2407 register thread_t thread;
2408 register processor_set_t pset = &default_pset;
2409 register processor_t processor;
2410 spl_t s;
2411
2412 if (!thread_scan_enabled)
2413 return;
2414
2415 do {
2416 restart_needed = do_runq_scan(&pset->runq);
2417 if (!restart_needed) {
2418 simple_lock(&pset->processors_lock);
2419 processor = (processor_t)queue_first(&pset->processors);
2420 while (!queue_end(&pset->processors, (queue_entry_t)processor)) {
2421 if (restart_needed = do_runq_scan(&processor->runq))
2422 break;
2423
2424 processor = (processor_t)queue_next(&processor->processors);
2425 }
2426 simple_unlock(&pset->processors_lock);
2427 }
2428
2429 /*
2430 * Ok, we now have a collection of candidates -- fix them.
2431 */
2432 while (stuck_count > 0) {
2433 thread = stuck_threads[--stuck_count];
2434 stuck_threads[stuck_count] = THREAD_NULL;
2435 s = splsched();
2436 thread_lock(thread);
2437 if (thread->policy == POLICY_TIMESHARE) {
2438 if ( !(thread->state & (TH_WAIT|TH_SUSP)) &&
2439 thread->sched_stamp != sched_tick )
2440 update_priority(thread);
2441 }
2442 thread_unlock(thread);
2443 splx(s);
2444 thread_deallocate(thread);
2445 }
2446
2447 } while (restart_needed);
2448 }
2449
2450 /*
2451 * Just in case someone doesn't use the macro
2452 */
2453 #undef thread_wakeup
2454 void
2455 thread_wakeup(
2456 event_t x);
2457
2458 void
2459 thread_wakeup(
2460 event_t x)
2461 {
2462 thread_wakeup_with_result(x, THREAD_AWAKENED);
2463 }
2464
2465 boolean_t
2466 thread_runnable(
2467 thread_t thread)
2468 {
2469 sched_policy_t *policy;
2470
2471 /* Ask sched policy if thread is runnable */
2472 policy = policy_id_to_sched_policy(thread->policy);
2473
2474 return ((policy != SCHED_POLICY_NULL)?
2475 policy->sp_ops.sp_thread_runnable(policy, thread) : FALSE);
2476 }
2477
2478 #if DEBUG
2479
2480 void
2481 dump_processor_set(
2482 processor_set_t ps)
2483 {
2484 printf("processor_set: %08x\n",ps);
2485 printf("idle_queue: %08x %08x, idle_count: 0x%x\n",
2486 ps->idle_queue.next,ps->idle_queue.prev,ps->idle_count);
2487 printf("processors: %08x %08x, processor_count: 0x%x\n",
2488 ps->processors.next,ps->processors.prev,ps->processor_count);
2489 printf("tasks: %08x %08x, task_count: 0x%x\n",
2490 ps->tasks.next,ps->tasks.prev,ps->task_count);
2491 printf("threads: %08x %08x, thread_count: 0x%x\n",
2492 ps->threads.next,ps->threads.prev,ps->thread_count);
2493 printf("ref_count: 0x%x, active: %x\n",
2494 ps->ref_count,ps->active);
2495 printf("pset_self: %08x, pset_name_self: %08x\n",ps->pset_self, ps->pset_name_self);
2496 printf("max_priority: 0x%x, policies: 0x%x, set_quantum: 0x%x\n",
2497 ps->max_priority, ps->policies, ps->set_quantum);
2498 }
2499
2500 #define processor_state(s) (((s)>PROCESSOR_SHUTDOWN)?"*unknown*":states[s])
2501
2502 void
2503 dump_processor(
2504 processor_t p)
2505 {
2506 char *states[]={"OFF_LINE","RUNNING","IDLE","DISPATCHING",
2507 "ASSIGN","SHUTDOWN"};
2508
2509 printf("processor: %08x\n",p);
2510 printf("processor_queue: %08x %08x\n",
2511 p->processor_queue.next,p->processor_queue.prev);
2512 printf("state: %8s, next_thread: %08x, idle_thread: %08x\n",
2513 processor_state(p->state), p->next_thread, p->idle_thread);
2514 printf("quantum: %u, first_quantum: %x, last_quantum: %u\n",
2515 p->quantum, p->first_quantum, p->last_quantum);
2516 printf("processor_set: %08x, processor_set_next: %08x\n",
2517 p->processor_set, p->processor_set_next);
2518 printf("processors: %08x %08x\n", p->processors.next,p->processors.prev);
2519 printf("processor_self: %08x, slot_num: 0x%x\n", p->processor_self, p->slot_num);
2520 }
2521
2522 void
2523 dump_run_queue_struct(
2524 run_queue_t rq)
2525 {
2526 char dump_buf[80];
2527 int i;
2528
2529 for( i=0; i < NRQS; ) {
2530 int j;
2531
2532 printf("%6s",(i==0)?"runq:":"");
2533 for( j=0; (j<8) && (i < NRQS); j++,i++ ) {
2534 if( rq->queues[i].next == &rq->queues[i] )
2535 printf( " --------");
2536 else
2537 printf(" %08x",rq->queues[i].next);
2538 }
2539 printf("\n");
2540 }
2541 for( i=0; i < NRQBM; ) {
2542 register unsigned int mask;
2543 char *d=dump_buf;
2544
2545 mask = ~0;
2546 mask ^= (mask>>1);
2547
2548 do {
2549 *d++ = ((rq->bitmap[i]&mask)?'r':'e');
2550 mask >>=1;
2551 } while( mask );
2552 *d = '\0';
2553 printf("%8s%s\n",((i==0)?"bitmap:":""),dump_buf);
2554 i++;
2555 }
2556 printf("highq: 0x%x, count: %u\n", rq->highq, rq->count);
2557 }
2558
2559 void
2560 dump_run_queues(
2561 run_queue_t runq)
2562 {
2563 register queue_t q1;
2564 register int i;
2565 register queue_entry_t e;
2566
2567 q1 = runq->queues;
2568 for (i = 0; i < NRQS; i++) {
2569 if (q1->next != q1) {
2570 int t_cnt;
2571
2572 printf("[%u]",i);
2573 for (t_cnt=0, e = q1->next; e != q1; e = e->next) {
2574 printf("\t0x%08x",e);
2575 if( (t_cnt = ++t_cnt%4) == 0 )
2576 printf("\n");
2577 }
2578 if( t_cnt )
2579 printf("\n");
2580 }
2581 /* else
2582 printf("[%u]\t<empty>\n",i);
2583 */
2584 q1++;
2585 }
2586 }
2587
2588 void
2589 checkrq(
2590 run_queue_t rq,
2591 char *msg)
2592 {
2593 register queue_t q1;
2594 register int i, j;
2595 register queue_entry_t e;
2596 register int highq;
2597
2598 highq = NRQS;
2599 j = 0;
2600 q1 = rq->queues;
2601 for (i = MAXPRI; i >= 0; i--) {
2602 if (q1->next == q1) {
2603 if (q1->prev != q1) {
2604 panic("checkrq: empty at %s", msg);
2605 }
2606 }
2607 else {
2608 if (highq == -1)
2609 highq = i;
2610
2611 for (e = q1->next; e != q1; e = e->next) {
2612 j++;
2613 if (e->next->prev != e)
2614 panic("checkrq-2 at %s", msg);
2615 if (e->prev->next != e)
2616 panic("checkrq-3 at %s", msg);
2617 }
2618 }
2619 q1++;
2620 }
2621 if (j != rq->count)
2622 panic("checkrq: count wrong at %s", msg);
2623 if (rq->count != 0 && highq > rq->highq)
2624 panic("checkrq: highq wrong at %s", msg);
2625 }
2626
2627 void
2628 thread_check(
2629 register thread_t thread,
2630 register run_queue_t rq)
2631 {
2632 register int whichq = thread->sched_pri;
2633 register queue_entry_t queue, entry;
2634
2635 if (whichq < MINPRI || whichq > MAXPRI)
2636 panic("thread_check: bad pri");
2637
2638 if (whichq != thread->whichq)
2639 panic("thread_check: whichq");
2640
2641 queue = &rq->queues[whichq];
2642 entry = queue_first(queue);
2643 while (!queue_end(queue, entry)) {
2644 if (entry == (queue_entry_t)thread)
2645 return;
2646
2647 entry = queue_next(entry);
2648 }
2649
2650 panic("thread_check: not found");
2651 }
2652
2653 #endif /* DEBUG */
2654
2655 #if MACH_KDB
2656 #include <ddb/db_output.h>
2657 #define printf kdbprintf
2658 extern int db_indent;
2659 void db_sched(void);
2660
2661 void
2662 db_sched(void)
2663 {
2664 iprintf("Scheduling Statistics:\n");
2665 db_indent += 2;
2666 iprintf("Thread invocations: csw %d same %d\n",
2667 c_thread_invoke_csw, c_thread_invoke_same);
2668 #if MACH_COUNTERS
2669 iprintf("Thread block: calls %d\n",
2670 c_thread_block_calls);
2671 iprintf("Idle thread:\n\thandoff %d block %d no_dispatch %d\n",
2672 c_idle_thread_handoff,
2673 c_idle_thread_block, no_dispatch_count);
2674 iprintf("Sched thread blocks: %d\n", c_sched_thread_block);
2675 #endif /* MACH_COUNTERS */
2676 db_indent -= 2;
2677 }
2678 #endif /* MACH_KDB */