]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/sched_prim.c
e2027c0662e0f474c01245dffbdd810277da89c6
[apple/xnu.git] / osfmk / kern / sched_prim.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: sched_prim.c
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1986
62 *
63 * Scheduling primitives
64 *
65 */
66
67 #include <debug.h>
68 #include <mach_kdb.h>
69
70 #include <ddb/db_output.h>
71
72 #include <mach/mach_types.h>
73 #include <mach/machine.h>
74 #include <mach/policy.h>
75 #include <mach/sync_policy.h>
76
77 #include <machine/machine_routines.h>
78 #include <machine/sched_param.h>
79 #include <machine/machine_cpu.h>
80
81 #include <kern/kern_types.h>
82 #include <kern/clock.h>
83 #include <kern/counters.h>
84 #include <kern/cpu_number.h>
85 #include <kern/cpu_data.h>
86 #include <kern/debug.h>
87 #include <kern/lock.h>
88 #include <kern/macro_help.h>
89 #include <kern/machine.h>
90 #include <kern/misc_protos.h>
91 #include <kern/processor.h>
92 #include <kern/queue.h>
93 #include <kern/sched.h>
94 #include <kern/sched_prim.h>
95 #include <kern/syscall_subr.h>
96 #include <kern/task.h>
97 #include <kern/thread.h>
98 #include <kern/wait_queue.h>
99
100 #include <vm/pmap.h>
101 #include <vm/vm_kern.h>
102 #include <vm/vm_map.h>
103
104 #include <sys/kdebug.h>
105
106 #include <kern/pms.h>
107
108 struct run_queue rt_runq;
109 #define RT_RUNQ ((processor_t)-1)
110 decl_simple_lock_data(static,rt_lock);
111
112 #define DEFAULT_PREEMPTION_RATE 100 /* (1/s) */
113 int default_preemption_rate = DEFAULT_PREEMPTION_RATE;
114
115 #define MAX_UNSAFE_QUANTA 800
116 int max_unsafe_quanta = MAX_UNSAFE_QUANTA;
117
118 #define MAX_POLL_QUANTA 2
119 int max_poll_quanta = MAX_POLL_QUANTA;
120
121 #define SCHED_POLL_YIELD_SHIFT 4 /* 1/16 */
122 int sched_poll_yield_shift = SCHED_POLL_YIELD_SHIFT;
123
124 uint64_t max_unsafe_computation;
125 uint32_t sched_safe_duration;
126 uint64_t max_poll_computation;
127
128 uint32_t std_quantum;
129 uint32_t min_std_quantum;
130
131 uint32_t std_quantum_us;
132
133 uint32_t max_rt_quantum;
134 uint32_t min_rt_quantum;
135
136 uint32_t sched_cswtime;
137
138 unsigned sched_tick;
139 uint32_t sched_tick_interval;
140
141 uint32_t sched_pri_shift = INT8_MAX;
142 uint32_t sched_fixed_shift;
143
144 uint32_t sched_run_count, sched_share_count;
145 uint32_t sched_load_average, sched_mach_factor;
146
147 void (*pm_tick_callout)(void) = NULL;
148
149 /* Forwards */
150 void wait_queues_init(void) __attribute__((section("__TEXT, initcode")));
151
152 static void load_shift_init(void) __attribute__((section("__TEXT, initcode")));
153 static void preempt_pri_init(void) __attribute__((section("__TEXT, initcode")));
154
155 static thread_t thread_select_idle(
156 thread_t thread,
157 processor_t processor);
158
159 static thread_t processor_idle(
160 thread_t thread,
161 processor_t processor);
162
163 static thread_t choose_thread(
164 processor_t processor);
165
166 static thread_t steal_thread(
167 processor_t processor);
168
169 static void thread_update_scan(void);
170
171 #if DEBUG
172 extern int debug_task;
173 #define TLOG(a, fmt, args...) if(debug_task & a) kprintf(fmt, ## args)
174 #else
175 #define TLOG(a, fmt, args...) do {} while (0)
176 #endif
177
178 #if DEBUG
179 static
180 boolean_t thread_runnable(
181 thread_t thread);
182
183 #endif /*DEBUG*/
184
185 /*
186 * State machine
187 *
188 * states are combinations of:
189 * R running
190 * W waiting (or on wait queue)
191 * N non-interruptible
192 * O swapped out
193 * I being swapped in
194 *
195 * init action
196 * assert_wait thread_block clear_wait swapout swapin
197 *
198 * R RW, RWN R; setrun - -
199 * RN RWN RN; setrun - -
200 *
201 * RW W R -
202 * RWN WN RN -
203 *
204 * W R; setrun WO
205 * WN RN; setrun -
206 *
207 * RO - - R
208 *
209 */
210
211 /*
212 * Waiting protocols and implementation:
213 *
214 * Each thread may be waiting for exactly one event; this event
215 * is set using assert_wait(). That thread may be awakened either
216 * by performing a thread_wakeup_prim() on its event,
217 * or by directly waking that thread up with clear_wait().
218 *
219 * The implementation of wait events uses a hash table. Each
220 * bucket is queue of threads having the same hash function
221 * value; the chain for the queue (linked list) is the run queue
222 * field. [It is not possible to be waiting and runnable at the
223 * same time.]
224 *
225 * Locks on both the thread and on the hash buckets govern the
226 * wait event field and the queue chain field. Because wakeup
227 * operations only have the event as an argument, the event hash
228 * bucket must be locked before any thread.
229 *
230 * Scheduling operations may also occur at interrupt level; therefore,
231 * interrupts below splsched() must be prevented when holding
232 * thread or hash bucket locks.
233 *
234 * The wait event hash table declarations are as follows:
235 */
236
237 #define NUMQUEUES 59
238
239 struct wait_queue wait_queues[NUMQUEUES];
240
241 #define wait_hash(event) \
242 ((((int)(event) < 0)? ~(int)(event): (int)(event)) % NUMQUEUES)
243
244 int8_t sched_load_shifts[NRQS];
245 int sched_preempt_pri[NRQBM];
246
247 void
248 sched_init(void)
249 {
250 /*
251 * Calculate the timeslicing quantum
252 * in us.
253 */
254 if (default_preemption_rate < 1)
255 default_preemption_rate = DEFAULT_PREEMPTION_RATE;
256 std_quantum_us = (1000 * 1000) / default_preemption_rate;
257
258 printf("standard timeslicing quantum is %d us\n", std_quantum_us);
259
260 sched_safe_duration = (2 * max_unsafe_quanta / default_preemption_rate) *
261 (1 << SCHED_TICK_SHIFT);
262
263 wait_queues_init();
264 load_shift_init();
265 preempt_pri_init();
266 simple_lock_init(&rt_lock, 0);
267 run_queue_init(&rt_runq);
268 sched_tick = 0;
269 ast_init();
270 }
271
272 void
273 sched_timebase_init(void)
274 {
275 uint64_t abstime;
276 uint32_t shift;
277
278 /* standard timeslicing quantum */
279 clock_interval_to_absolutetime_interval(
280 std_quantum_us, NSEC_PER_USEC, &abstime);
281 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
282 std_quantum = abstime;
283
284 /* smallest remaining quantum (250 us) */
285 clock_interval_to_absolutetime_interval(250, NSEC_PER_USEC, &abstime);
286 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
287 min_std_quantum = abstime;
288
289 /* smallest rt computaton (50 us) */
290 clock_interval_to_absolutetime_interval(50, NSEC_PER_USEC, &abstime);
291 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
292 min_rt_quantum = abstime;
293
294 /* maximum rt computation (50 ms) */
295 clock_interval_to_absolutetime_interval(
296 50, 1000*NSEC_PER_USEC, &abstime);
297 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
298 max_rt_quantum = abstime;
299
300 /* scheduler tick interval */
301 clock_interval_to_absolutetime_interval(USEC_PER_SEC >> SCHED_TICK_SHIFT,
302 NSEC_PER_USEC, &abstime);
303 sched_tick_interval = abstime;
304
305 #if DEBUG
306 printf("Quantum: %d. Smallest quantum: %d. Min Rt/Max Rt: %d/%d."
307 " Tick: %d.\n",
308 std_quantum, min_std_quantum, min_rt_quantum, max_rt_quantum,
309 sched_tick_interval);
310 #endif
311
312 /*
313 * Compute conversion factor from usage to
314 * timesharing priorities with 5/8 ** n aging.
315 */
316 abstime = (abstime * 5) / 3;
317 for (shift = 0; abstime > BASEPRI_DEFAULT; ++shift)
318 abstime >>= 1;
319 sched_fixed_shift = shift;
320
321 max_unsafe_computation = max_unsafe_quanta * std_quantum;
322 max_poll_computation = max_poll_quanta * std_quantum;
323 }
324
325 void
326 wait_queues_init(void)
327 {
328 register int i;
329
330 for (i = 0; i < NUMQUEUES; i++) {
331 wait_queue_init(&wait_queues[i], SYNC_POLICY_FIFO);
332 }
333 }
334
335 /*
336 * Set up values for timeshare
337 * loading factors.
338 */
339 static void
340 load_shift_init(void)
341 {
342 int8_t k, *p = sched_load_shifts;
343 uint32_t i, j;
344
345 *p++ = INT8_MIN; *p++ = 0;
346
347 for (i = j = 2, k = 1; i < NRQS; ++k) {
348 for (j <<= 1; i < j; ++i)
349 *p++ = k;
350 }
351 }
352
353 static void
354 preempt_pri_init(void)
355 {
356 int i, *p = sched_preempt_pri;
357
358 for (i = BASEPRI_FOREGROUND + 1; i < MINPRI_KERNEL; ++i)
359 setbit(i, p);
360
361 for (i = BASEPRI_PREEMPT; i <= MAXPRI; ++i)
362 setbit(i, p);
363 }
364
365 /*
366 * Thread wait timer expiration.
367 */
368 void
369 thread_timer_expire(
370 void *p0,
371 __unused void *p1)
372 {
373 thread_t thread = p0;
374 spl_t s;
375
376 s = splsched();
377 thread_lock(thread);
378 if (--thread->wait_timer_active == 0) {
379 if (thread->wait_timer_is_set) {
380 thread->wait_timer_is_set = FALSE;
381 clear_wait_internal(thread, THREAD_TIMED_OUT);
382 }
383 }
384 thread_unlock(thread);
385 splx(s);
386 }
387
388 /*
389 * thread_set_timer:
390 *
391 * Set a timer for the current thread, if the thread
392 * is ready to wait. Must be called between assert_wait()
393 * and thread_block().
394 */
395 void
396 thread_set_timer(
397 uint32_t interval,
398 uint32_t scale_factor)
399 {
400 thread_t thread = current_thread();
401 uint64_t deadline;
402 spl_t s;
403
404 s = splsched();
405 thread_lock(thread);
406 if ((thread->state & TH_WAIT) != 0) {
407 clock_interval_to_deadline(interval, scale_factor, &deadline);
408 if (!timer_call_enter(&thread->wait_timer, deadline))
409 thread->wait_timer_active++;
410 thread->wait_timer_is_set = TRUE;
411 }
412 thread_unlock(thread);
413 splx(s);
414 }
415
416 void
417 thread_set_timer_deadline(
418 uint64_t deadline)
419 {
420 thread_t thread = current_thread();
421 spl_t s;
422
423 s = splsched();
424 thread_lock(thread);
425 if ((thread->state & TH_WAIT) != 0) {
426 if (!timer_call_enter(&thread->wait_timer, deadline))
427 thread->wait_timer_active++;
428 thread->wait_timer_is_set = TRUE;
429 }
430 thread_unlock(thread);
431 splx(s);
432 }
433
434 void
435 thread_cancel_timer(void)
436 {
437 thread_t thread = current_thread();
438 spl_t s;
439
440 s = splsched();
441 thread_lock(thread);
442 if (thread->wait_timer_is_set) {
443 if (timer_call_cancel(&thread->wait_timer))
444 thread->wait_timer_active--;
445 thread->wait_timer_is_set = FALSE;
446 }
447 thread_unlock(thread);
448 splx(s);
449 }
450
451 /*
452 * thread_unblock:
453 *
454 * Unblock thread on wake up.
455 *
456 * Returns TRUE if the thread is still running.
457 *
458 * Thread must be locked.
459 */
460 boolean_t
461 thread_unblock(
462 thread_t thread,
463 wait_result_t wresult)
464 {
465 boolean_t result = FALSE;
466
467 /*
468 * Set wait_result.
469 */
470 thread->wait_result = wresult;
471
472 /*
473 * Cancel pending wait timer.
474 */
475 if (thread->wait_timer_is_set) {
476 if (timer_call_cancel(&thread->wait_timer))
477 thread->wait_timer_active--;
478 thread->wait_timer_is_set = FALSE;
479 }
480
481 /*
482 * Update scheduling state: not waiting,
483 * set running.
484 */
485 thread->state &= ~(TH_WAIT|TH_UNINT);
486
487 if (!(thread->state & TH_RUN)) {
488 thread->state |= TH_RUN;
489
490 (*thread->sched_call)(SCHED_CALL_UNBLOCK, thread);
491
492 /*
493 * Update run counts.
494 */
495 sched_run_incr();
496 if (thread->sched_mode & TH_MODE_TIMESHARE)
497 sched_share_incr();
498 }
499 else {
500 /*
501 * Signal if idling on another processor.
502 */
503 if (thread->state & TH_IDLE) {
504 processor_t processor = thread->last_processor;
505
506 if (processor != current_processor())
507 machine_signal_idle(processor);
508 }
509
510 result = TRUE;
511 }
512
513 /*
514 * Calculate deadline for real-time threads.
515 */
516 if (thread->sched_mode & TH_MODE_REALTIME) {
517 thread->realtime.deadline = mach_absolute_time();
518 thread->realtime.deadline += thread->realtime.constraint;
519 }
520
521 /*
522 * Clear old quantum, fail-safe computation, etc.
523 */
524 thread->current_quantum = 0;
525 thread->computation_metered = 0;
526 thread->reason = AST_NONE;
527
528 KERNEL_DEBUG_CONSTANT(
529 MACHDBG_CODE(DBG_MACH_SCHED,MACH_MAKE_RUNNABLE) | DBG_FUNC_NONE,
530 (int)thread, (int)thread->sched_pri, 0, 0, 0);
531
532 return (result);
533 }
534
535 /*
536 * Routine: thread_go
537 * Purpose:
538 * Unblock and dispatch thread.
539 * Conditions:
540 * thread lock held, IPC locks may be held.
541 * thread must have been pulled from wait queue under same lock hold.
542 * Returns:
543 * KERN_SUCCESS - Thread was set running
544 * KERN_NOT_WAITING - Thread was not waiting
545 */
546 kern_return_t
547 thread_go(
548 thread_t thread,
549 wait_result_t wresult)
550 {
551 assert(thread->at_safe_point == FALSE);
552 assert(thread->wait_event == NO_EVENT64);
553 assert(thread->wait_queue == WAIT_QUEUE_NULL);
554
555 if ((thread->state & (TH_WAIT|TH_TERMINATE)) == TH_WAIT) {
556 if (!thread_unblock(thread, wresult))
557 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
558
559 return (KERN_SUCCESS);
560 }
561
562 return (KERN_NOT_WAITING);
563 }
564
565 /*
566 * Routine: thread_mark_wait_locked
567 * Purpose:
568 * Mark a thread as waiting. If, given the circumstances,
569 * it doesn't want to wait (i.e. already aborted), then
570 * indicate that in the return value.
571 * Conditions:
572 * at splsched() and thread is locked.
573 */
574 __private_extern__
575 wait_result_t
576 thread_mark_wait_locked(
577 thread_t thread,
578 wait_interrupt_t interruptible)
579 {
580 boolean_t at_safe_point;
581
582 /*
583 * The thread may have certain types of interrupts/aborts masked
584 * off. Even if the wait location says these types of interrupts
585 * are OK, we have to honor mask settings (outer-scoped code may
586 * not be able to handle aborts at the moment).
587 */
588 if (interruptible > (thread->options & TH_OPT_INTMASK))
589 interruptible = thread->options & TH_OPT_INTMASK;
590
591 at_safe_point = (interruptible == THREAD_ABORTSAFE);
592
593 if ( interruptible == THREAD_UNINT ||
594 !(thread->sched_mode & TH_MODE_ABORT) ||
595 (!at_safe_point &&
596 (thread->sched_mode & TH_MODE_ABORTSAFELY))) {
597 thread->state |= (interruptible) ? TH_WAIT : (TH_WAIT | TH_UNINT);
598 thread->at_safe_point = at_safe_point;
599 return (thread->wait_result = THREAD_WAITING);
600 }
601 else
602 if (thread->sched_mode & TH_MODE_ABORTSAFELY)
603 thread->sched_mode &= ~TH_MODE_ISABORTED;
604
605 return (thread->wait_result = THREAD_INTERRUPTED);
606 }
607
608 /*
609 * Routine: thread_interrupt_level
610 * Purpose:
611 * Set the maximum interruptible state for the
612 * current thread. The effective value of any
613 * interruptible flag passed into assert_wait
614 * will never exceed this.
615 *
616 * Useful for code that must not be interrupted,
617 * but which calls code that doesn't know that.
618 * Returns:
619 * The old interrupt level for the thread.
620 */
621 __private_extern__
622 wait_interrupt_t
623 thread_interrupt_level(
624 wait_interrupt_t new_level)
625 {
626 thread_t thread = current_thread();
627 wait_interrupt_t result = thread->options & TH_OPT_INTMASK;
628
629 thread->options = (thread->options & ~TH_OPT_INTMASK) | (new_level & TH_OPT_INTMASK);
630
631 return result;
632 }
633
634 /*
635 * Check to see if an assert wait is possible, without actually doing one.
636 * This is used by debug code in locks and elsewhere to verify that it is
637 * always OK to block when trying to take a blocking lock (since waiting
638 * for the actual assert_wait to catch the case may make it hard to detect
639 * this case.
640 */
641 boolean_t
642 assert_wait_possible(void)
643 {
644
645 thread_t thread;
646
647 #if DEBUG
648 if(debug_mode) return TRUE; /* Always succeed in debug mode */
649 #endif
650
651 thread = current_thread();
652
653 return (thread == NULL || wait_queue_assert_possible(thread));
654 }
655
656 /*
657 * assert_wait:
658 *
659 * Assert that the current thread is about to go to
660 * sleep until the specified event occurs.
661 */
662 wait_result_t
663 assert_wait(
664 event_t event,
665 wait_interrupt_t interruptible)
666 {
667 register wait_queue_t wq;
668 register int index;
669
670 assert(event != NO_EVENT);
671
672 index = wait_hash(event);
673 wq = &wait_queues[index];
674 return wait_queue_assert_wait(wq, event, interruptible, 0);
675 }
676
677 wait_result_t
678 assert_wait_timeout(
679 event_t event,
680 wait_interrupt_t interruptible,
681 uint32_t interval,
682 uint32_t scale_factor)
683 {
684 thread_t thread = current_thread();
685 wait_result_t wresult;
686 wait_queue_t wqueue;
687 uint64_t deadline;
688 spl_t s;
689
690 assert(event != NO_EVENT);
691 wqueue = &wait_queues[wait_hash(event)];
692
693 s = splsched();
694 wait_queue_lock(wqueue);
695 thread_lock(thread);
696
697 clock_interval_to_deadline(interval, scale_factor, &deadline);
698 wresult = wait_queue_assert_wait64_locked(wqueue, (uint32_t)event,
699 interruptible, deadline, thread);
700
701 thread_unlock(thread);
702 wait_queue_unlock(wqueue);
703 splx(s);
704
705 return (wresult);
706 }
707
708 wait_result_t
709 assert_wait_deadline(
710 event_t event,
711 wait_interrupt_t interruptible,
712 uint64_t deadline)
713 {
714 thread_t thread = current_thread();
715 wait_result_t wresult;
716 wait_queue_t wqueue;
717 spl_t s;
718
719 assert(event != NO_EVENT);
720 wqueue = &wait_queues[wait_hash(event)];
721
722 s = splsched();
723 wait_queue_lock(wqueue);
724 thread_lock(thread);
725
726 wresult = wait_queue_assert_wait64_locked(wqueue, (uint32_t)event,
727 interruptible, deadline, thread);
728
729 thread_unlock(thread);
730 wait_queue_unlock(wqueue);
731 splx(s);
732
733 return (wresult);
734 }
735
736 /*
737 * thread_sleep_fast_usimple_lock:
738 *
739 * Cause the current thread to wait until the specified event
740 * occurs. The specified simple_lock is unlocked before releasing
741 * the cpu and re-acquired as part of waking up.
742 *
743 * This is the simple lock sleep interface for components that use a
744 * faster version of simple_lock() than is provided by usimple_lock().
745 */
746 __private_extern__ wait_result_t
747 thread_sleep_fast_usimple_lock(
748 event_t event,
749 simple_lock_t lock,
750 wait_interrupt_t interruptible)
751 {
752 wait_result_t res;
753
754 res = assert_wait(event, interruptible);
755 if (res == THREAD_WAITING) {
756 simple_unlock(lock);
757 res = thread_block(THREAD_CONTINUE_NULL);
758 simple_lock(lock);
759 }
760 return res;
761 }
762
763
764 /*
765 * thread_sleep_usimple_lock:
766 *
767 * Cause the current thread to wait until the specified event
768 * occurs. The specified usimple_lock is unlocked before releasing
769 * the cpu and re-acquired as part of waking up.
770 *
771 * This is the simple lock sleep interface for components where
772 * simple_lock() is defined in terms of usimple_lock().
773 */
774 wait_result_t
775 thread_sleep_usimple_lock(
776 event_t event,
777 usimple_lock_t lock,
778 wait_interrupt_t interruptible)
779 {
780 wait_result_t res;
781
782 res = assert_wait(event, interruptible);
783 if (res == THREAD_WAITING) {
784 usimple_unlock(lock);
785 res = thread_block(THREAD_CONTINUE_NULL);
786 usimple_lock(lock);
787 }
788 return res;
789 }
790
791 /*
792 * thread_sleep_mutex:
793 *
794 * Cause the current thread to wait until the specified event
795 * occurs. The specified mutex is unlocked before releasing
796 * the cpu. The mutex will be re-acquired before returning.
797 *
798 * JMM - Add hint to make sure mutex is available before rousting
799 */
800 wait_result_t
801 thread_sleep_mutex(
802 event_t event,
803 mutex_t *mutex,
804 wait_interrupt_t interruptible)
805 {
806 wait_result_t res;
807
808 res = assert_wait(event, interruptible);
809 if (res == THREAD_WAITING) {
810 mutex_unlock(mutex);
811 res = thread_block(THREAD_CONTINUE_NULL);
812 mutex_lock(mutex);
813 }
814 return res;
815 }
816
817 /*
818 * thread_sleep_mutex_deadline:
819 *
820 * Cause the current thread to wait until the specified event
821 * (or deadline) occurs. The specified mutex is unlocked before
822 * releasing the cpu. The mutex will be re-acquired before returning.
823 */
824 wait_result_t
825 thread_sleep_mutex_deadline(
826 event_t event,
827 mutex_t *mutex,
828 uint64_t deadline,
829 wait_interrupt_t interruptible)
830 {
831 wait_result_t res;
832
833 res = assert_wait_deadline(event, interruptible, deadline);
834 if (res == THREAD_WAITING) {
835 mutex_unlock(mutex);
836 res = thread_block(THREAD_CONTINUE_NULL);
837 mutex_lock(mutex);
838 }
839 return res;
840 }
841
842 /*
843 * thread_sleep_lock_write:
844 *
845 * Cause the current thread to wait until the specified event
846 * occurs. The specified (write) lock is unlocked before releasing
847 * the cpu. The (write) lock will be re-acquired before returning.
848 */
849 wait_result_t
850 thread_sleep_lock_write(
851 event_t event,
852 lock_t *lock,
853 wait_interrupt_t interruptible)
854 {
855 wait_result_t res;
856
857 res = assert_wait(event, interruptible);
858 if (res == THREAD_WAITING) {
859 lock_write_done(lock);
860 res = thread_block(THREAD_CONTINUE_NULL);
861 lock_write(lock);
862 }
863 return res;
864 }
865
866 /*
867 * thread_stop:
868 *
869 * Force a preemption point for a thread and wait
870 * for it to stop running. Arbitrates access among
871 * multiple stop requests. (released by unstop)
872 *
873 * The thread must enter a wait state and stop via a
874 * separate means.
875 *
876 * Returns FALSE if interrupted.
877 */
878 boolean_t
879 thread_stop(
880 thread_t thread)
881 {
882 wait_result_t wresult;
883 spl_t s = splsched();
884
885 wake_lock(thread);
886 thread_lock(thread);
887
888 while (thread->state & TH_SUSP) {
889 thread->wake_active = TRUE;
890 thread_unlock(thread);
891
892 wresult = assert_wait(&thread->wake_active, THREAD_ABORTSAFE);
893 wake_unlock(thread);
894 splx(s);
895
896 if (wresult == THREAD_WAITING)
897 wresult = thread_block(THREAD_CONTINUE_NULL);
898
899 if (wresult != THREAD_AWAKENED)
900 return (FALSE);
901
902 s = splsched();
903 wake_lock(thread);
904 thread_lock(thread);
905 }
906
907 thread->state |= TH_SUSP;
908
909 while (thread->state & TH_RUN) {
910 processor_t processor = thread->last_processor;
911
912 if (processor != PROCESSOR_NULL && processor->active_thread == thread)
913 cause_ast_check(processor);
914
915 thread->wake_active = TRUE;
916 thread_unlock(thread);
917
918 wresult = assert_wait(&thread->wake_active, THREAD_ABORTSAFE);
919 wake_unlock(thread);
920 splx(s);
921
922 if (wresult == THREAD_WAITING)
923 wresult = thread_block(THREAD_CONTINUE_NULL);
924
925 if (wresult != THREAD_AWAKENED) {
926 thread_unstop(thread);
927 return (FALSE);
928 }
929
930 s = splsched();
931 wake_lock(thread);
932 thread_lock(thread);
933 }
934
935 thread_unlock(thread);
936 wake_unlock(thread);
937 splx(s);
938
939 return (TRUE);
940 }
941
942 /*
943 * thread_unstop:
944 *
945 * Release a previous stop request and set
946 * the thread running if appropriate.
947 *
948 * Use only after a successful stop operation.
949 */
950 void
951 thread_unstop(
952 thread_t thread)
953 {
954 spl_t s = splsched();
955
956 wake_lock(thread);
957 thread_lock(thread);
958
959 if ((thread->state & (TH_RUN|TH_WAIT|TH_SUSP)) == TH_SUSP) {
960 thread->state &= ~TH_SUSP;
961 thread_unblock(thread, THREAD_AWAKENED);
962
963 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
964 }
965 else
966 if (thread->state & TH_SUSP) {
967 thread->state &= ~TH_SUSP;
968
969 if (thread->wake_active) {
970 thread->wake_active = FALSE;
971 thread_unlock(thread);
972
973 thread_wakeup(&thread->wake_active);
974 wake_unlock(thread);
975 splx(s);
976
977 return;
978 }
979 }
980
981 thread_unlock(thread);
982 wake_unlock(thread);
983 splx(s);
984 }
985
986 /*
987 * thread_wait:
988 *
989 * Wait for a thread to stop running. (non-interruptible)
990 *
991 */
992 void
993 thread_wait(
994 thread_t thread)
995 {
996 wait_result_t wresult;
997 spl_t s = splsched();
998
999 wake_lock(thread);
1000 thread_lock(thread);
1001
1002 while (thread->state & TH_RUN) {
1003 processor_t processor = thread->last_processor;
1004
1005 if (processor != PROCESSOR_NULL && processor->active_thread == thread)
1006 cause_ast_check(processor);
1007
1008 thread->wake_active = TRUE;
1009 thread_unlock(thread);
1010
1011 wresult = assert_wait(&thread->wake_active, THREAD_UNINT);
1012 wake_unlock(thread);
1013 splx(s);
1014
1015 if (wresult == THREAD_WAITING)
1016 thread_block(THREAD_CONTINUE_NULL);
1017
1018 s = splsched();
1019 wake_lock(thread);
1020 thread_lock(thread);
1021 }
1022
1023 thread_unlock(thread);
1024 wake_unlock(thread);
1025 splx(s);
1026 }
1027
1028 /*
1029 * Routine: clear_wait_internal
1030 *
1031 * Clear the wait condition for the specified thread.
1032 * Start the thread executing if that is appropriate.
1033 * Arguments:
1034 * thread thread to awaken
1035 * result Wakeup result the thread should see
1036 * Conditions:
1037 * At splsched
1038 * the thread is locked.
1039 * Returns:
1040 * KERN_SUCCESS thread was rousted out a wait
1041 * KERN_FAILURE thread was waiting but could not be rousted
1042 * KERN_NOT_WAITING thread was not waiting
1043 */
1044 __private_extern__ kern_return_t
1045 clear_wait_internal(
1046 thread_t thread,
1047 wait_result_t wresult)
1048 {
1049 wait_queue_t wq = thread->wait_queue;
1050 int i = LockTimeOut;
1051
1052 do {
1053 if (wresult == THREAD_INTERRUPTED && (thread->state & TH_UNINT))
1054 return (KERN_FAILURE);
1055
1056 if (wq != WAIT_QUEUE_NULL) {
1057 if (wait_queue_lock_try(wq)) {
1058 wait_queue_pull_thread_locked(wq, thread, TRUE);
1059 /* wait queue unlocked, thread still locked */
1060 }
1061 else {
1062 thread_unlock(thread);
1063 delay(1);
1064
1065 thread_lock(thread);
1066 if (wq != thread->wait_queue)
1067 return (KERN_NOT_WAITING);
1068
1069 continue;
1070 }
1071 }
1072
1073 return (thread_go(thread, wresult));
1074 } while (--i > 0);
1075
1076 panic("clear_wait_internal: deadlock: thread=%p, wq=%p, cpu=%d\n",
1077 thread, wq, cpu_number());
1078
1079 return (KERN_FAILURE);
1080 }
1081
1082
1083 /*
1084 * clear_wait:
1085 *
1086 * Clear the wait condition for the specified thread. Start the thread
1087 * executing if that is appropriate.
1088 *
1089 * parameters:
1090 * thread thread to awaken
1091 * result Wakeup result the thread should see
1092 */
1093 kern_return_t
1094 clear_wait(
1095 thread_t thread,
1096 wait_result_t result)
1097 {
1098 kern_return_t ret;
1099 spl_t s;
1100
1101 s = splsched();
1102 thread_lock(thread);
1103 ret = clear_wait_internal(thread, result);
1104 thread_unlock(thread);
1105 splx(s);
1106 return ret;
1107 }
1108
1109
1110 /*
1111 * thread_wakeup_prim:
1112 *
1113 * Common routine for thread_wakeup, thread_wakeup_with_result,
1114 * and thread_wakeup_one.
1115 *
1116 */
1117 kern_return_t
1118 thread_wakeup_prim(
1119 event_t event,
1120 boolean_t one_thread,
1121 wait_result_t result)
1122 {
1123 register wait_queue_t wq;
1124 register int index;
1125
1126 index = wait_hash(event);
1127 wq = &wait_queues[index];
1128 if (one_thread)
1129 return (wait_queue_wakeup_one(wq, event, result));
1130 else
1131 return (wait_queue_wakeup_all(wq, event, result));
1132 }
1133
1134 /*
1135 * thread_bind:
1136 *
1137 * Force the current thread to execute on the specified processor.
1138 *
1139 * Returns the previous binding. PROCESSOR_NULL means
1140 * not bound.
1141 *
1142 * XXX - DO NOT export this to users - XXX
1143 */
1144 processor_t
1145 thread_bind(
1146 processor_t processor)
1147 {
1148 thread_t self = current_thread();
1149 processor_t prev;
1150 spl_t s;
1151
1152 s = splsched();
1153 thread_lock(self);
1154
1155 prev = self->bound_processor;
1156 self->bound_processor = processor;
1157
1158 thread_unlock(self);
1159 splx(s);
1160
1161 return (prev);
1162 }
1163
1164 /*
1165 * thread_select:
1166 *
1167 * Select a new thread for the current processor to execute.
1168 *
1169 * May select the current thread, which must be locked.
1170 */
1171 static thread_t
1172 thread_select(
1173 thread_t thread,
1174 processor_t processor)
1175 {
1176 processor_set_t pset = processor->processor_set;
1177 thread_t new_thread;
1178 boolean_t other_runnable;
1179
1180 do {
1181 /*
1182 * Update the priority.
1183 */
1184 if (thread->sched_stamp != sched_tick)
1185 update_priority(thread);
1186
1187 processor->current_pri = thread->sched_pri;
1188
1189 pset_lock(pset);
1190
1191 simple_lock(&rt_lock);
1192
1193 /*
1194 * Check for other runnable threads.
1195 */
1196 other_runnable = processor->runq.count > 0 || rt_runq.count > 0;
1197
1198 /*
1199 * Test to see if the current thread should continue
1200 * to run on this processor. Must be runnable, and not
1201 * bound to a different processor, nor be in the wrong
1202 * processor set.
1203 */
1204 if ( thread->state == TH_RUN &&
1205 (thread->bound_processor == PROCESSOR_NULL ||
1206 thread->bound_processor == processor) &&
1207 (thread->affinity_set == AFFINITY_SET_NULL ||
1208 thread->affinity_set->aset_pset == pset) ) {
1209 if ( thread->sched_pri >= BASEPRI_RTQUEUES &&
1210 first_timeslice(processor) ) {
1211 if (rt_runq.highq >= BASEPRI_RTQUEUES) {
1212 register run_queue_t runq = &rt_runq;
1213 register queue_t q;
1214
1215 q = runq->queues + runq->highq;
1216 if (((thread_t)q->next)->realtime.deadline <
1217 processor->deadline) {
1218 thread = (thread_t)q->next;
1219 ((queue_entry_t)thread)->next->prev = q;
1220 q->next = ((queue_entry_t)thread)->next;
1221 thread->runq = PROCESSOR_NULL;
1222 runq->count--; runq->urgency--;
1223 assert(runq->urgency >= 0);
1224 if (queue_empty(q)) {
1225 if (runq->highq != IDLEPRI)
1226 clrbit(MAXPRI - runq->highq, runq->bitmap);
1227 runq->highq = MAXPRI - ffsbit(runq->bitmap);
1228 }
1229 }
1230 }
1231
1232 simple_unlock(&rt_lock);
1233
1234 processor->deadline = thread->realtime.deadline;
1235
1236 pset_unlock(pset);
1237
1238 return (thread);
1239 }
1240
1241 if ( (!other_runnable ||
1242 (processor->runq.highq < thread->sched_pri &&
1243 rt_runq.highq < thread->sched_pri)) ) {
1244
1245 simple_unlock(&rt_lock);
1246
1247 /* I am the highest priority runnable (non-idle) thread */
1248
1249 pset_hint_low(pset, processor);
1250 pset_hint_high(pset, processor);
1251
1252 processor->deadline = UINT64_MAX;
1253
1254 pset_unlock(pset);
1255
1256 return (thread);
1257 }
1258 }
1259
1260 if (other_runnable)
1261 return choose_thread(processor);
1262
1263 simple_unlock(&rt_lock);
1264
1265 /*
1266 * No runnable threads, attempt to steal
1267 * from other processors.
1268 */
1269 if (pset->high_hint != PROCESSOR_NULL && pset->high_hint->runq.count > 0) {
1270 new_thread = steal_thread(pset->high_hint);
1271 if (new_thread != THREAD_NULL) {
1272 pset_unlock(pset);
1273
1274 return (new_thread);
1275 }
1276 }
1277
1278 /*
1279 * Nothing is runnable, so set this processor idle if it
1280 * was running.
1281 */
1282 if (processor->state == PROCESSOR_RUNNING) {
1283 remqueue(&pset->active_queue, (queue_entry_t)processor);
1284 processor->state = PROCESSOR_IDLE;
1285
1286 enqueue_head(&pset->idle_queue, (queue_entry_t)processor);
1287 pset->low_hint = processor;
1288 pset->idle_count++;
1289 }
1290
1291 processor->deadline = UINT64_MAX;
1292
1293 pset_unlock(pset);
1294
1295 /*
1296 * Choose idle thread if fast idle is not possible.
1297 */
1298 if ((thread->state & (TH_IDLE|TH_TERMINATE|TH_SUSP)) || !(thread->state & TH_WAIT) || thread->wake_active)
1299 return (processor->idle_thread);
1300
1301 /*
1302 * Perform idling activities directly without a
1303 * context switch. Return dispatched thread,
1304 * else check again for a runnable thread.
1305 */
1306 new_thread = thread_select_idle(thread, processor);
1307
1308 } while (new_thread == THREAD_NULL);
1309
1310 return (new_thread);
1311 }
1312
1313 /*
1314 * thread_select_idle:
1315 *
1316 * Idle the processor using the current thread context.
1317 *
1318 * Called with thread locked, then dropped and relocked.
1319 */
1320 static thread_t
1321 thread_select_idle(
1322 thread_t thread,
1323 processor_t processor)
1324 {
1325 thread_t new_thread;
1326
1327 if (thread->sched_mode & TH_MODE_TIMESHARE)
1328 sched_share_decr();
1329 sched_run_decr();
1330
1331 thread->state |= TH_IDLE;
1332 processor->current_pri = IDLEPRI;
1333
1334 thread_unlock(thread);
1335
1336 /*
1337 * Switch execution timing to processor idle thread.
1338 */
1339 processor->last_dispatch = mach_absolute_time();
1340 thread_timer_event(processor->last_dispatch, &processor->idle_thread->system_timer);
1341 PROCESSOR_DATA(processor, kernel_timer) = &processor->idle_thread->system_timer;
1342
1343 /*
1344 * Cancel the quantum timer while idling.
1345 */
1346 timer_call_cancel(&processor->quantum_timer);
1347 processor->timeslice = 0;
1348
1349 (*thread->sched_call)(SCHED_CALL_BLOCK, thread);
1350
1351 /*
1352 * Enable interrupts and perform idling activities. No
1353 * preemption due to TH_IDLE being set.
1354 */
1355 spllo(); new_thread = processor_idle(thread, processor);
1356
1357 (*thread->sched_call)(SCHED_CALL_UNBLOCK, thread);
1358
1359 thread_lock(thread);
1360
1361 /*
1362 * If awakened, switch to thread timer and start a new quantum.
1363 * Otherwise skip; we will context switch to another thread or return here.
1364 */
1365 if (!(thread->state & TH_WAIT)) {
1366 processor->last_dispatch = mach_absolute_time();
1367 thread_timer_event(processor->last_dispatch, &thread->system_timer);
1368 PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer;
1369
1370 thread_quantum_init(thread);
1371
1372 processor->quantum_end = processor->last_dispatch + thread->current_quantum;
1373 timer_call_enter1(&processor->quantum_timer, thread, processor->quantum_end);
1374 processor->timeslice = 1;
1375
1376 thread->computation_epoch = processor->last_dispatch;
1377 }
1378
1379 thread->state &= ~TH_IDLE;
1380
1381 sched_run_incr();
1382 if (thread->sched_mode & TH_MODE_TIMESHARE)
1383 sched_share_incr();
1384
1385 return (new_thread);
1386 }
1387
1388 /*
1389 * Perform a context switch and start executing the new thread.
1390 *
1391 * Returns FALSE on failure, and the thread is re-dispatched.
1392 *
1393 * Called at splsched.
1394 */
1395
1396 #define funnel_release_check(thread, debug) \
1397 MACRO_BEGIN \
1398 if ((thread)->funnel_state & TH_FN_OWNED) { \
1399 (thread)->funnel_state = TH_FN_REFUNNEL; \
1400 KERNEL_DEBUG(0x603242c | DBG_FUNC_NONE, \
1401 (thread)->funnel_lock, (debug), 0, 0, 0); \
1402 funnel_unlock((thread)->funnel_lock); \
1403 } \
1404 MACRO_END
1405
1406 #define funnel_refunnel_check(thread, debug) \
1407 MACRO_BEGIN \
1408 if ((thread)->funnel_state & TH_FN_REFUNNEL) { \
1409 kern_return_t result = (thread)->wait_result; \
1410 \
1411 (thread)->funnel_state = 0; \
1412 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE, \
1413 (thread)->funnel_lock, (debug), 0, 0, 0); \
1414 funnel_lock((thread)->funnel_lock); \
1415 KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE, \
1416 (thread)->funnel_lock, (debug), 0, 0, 0); \
1417 (thread)->funnel_state = TH_FN_OWNED; \
1418 (thread)->wait_result = result; \
1419 } \
1420 MACRO_END
1421
1422 static boolean_t
1423 thread_invoke(
1424 register thread_t self,
1425 register thread_t thread,
1426 ast_t reason)
1427 {
1428 thread_continue_t continuation = self->continuation;
1429 void *parameter = self->parameter;
1430 processor_t processor;
1431
1432 if (get_preemption_level() != 0)
1433 panic("thread_invoke: preemption_level %d\n",
1434 get_preemption_level());
1435
1436 assert(self == current_thread());
1437
1438 /*
1439 * Mark thread interruptible.
1440 */
1441 thread_lock(thread);
1442 thread->state &= ~TH_UNINT;
1443
1444 #if DEBUG
1445 assert(thread_runnable(thread));
1446 #endif
1447
1448 /*
1449 * Allow time constraint threads to hang onto
1450 * a stack.
1451 */
1452 if ((self->sched_mode & TH_MODE_REALTIME) && !self->reserved_stack)
1453 self->reserved_stack = self->kernel_stack;
1454
1455 if (continuation != NULL) {
1456 if (!thread->kernel_stack) {
1457 /*
1458 * If we are using a privileged stack,
1459 * check to see whether we can exchange it with
1460 * that of the other thread.
1461 */
1462 if (self->kernel_stack == self->reserved_stack && !thread->reserved_stack)
1463 goto need_stack;
1464
1465 /*
1466 * Context switch by performing a stack handoff.
1467 */
1468 continuation = thread->continuation;
1469 parameter = thread->parameter;
1470
1471 processor = current_processor();
1472 processor->active_thread = thread;
1473 processor->current_pri = thread->sched_pri;
1474 if (thread->last_processor != processor && thread->last_processor != NULL) {
1475 if (thread->last_processor->processor_set != processor->processor_set)
1476 thread->ps_switch++;
1477 thread->p_switch++;
1478 }
1479 thread->last_processor = processor;
1480 thread->c_switch++;
1481 ast_context(thread);
1482 thread_unlock(thread);
1483
1484 self->reason = reason;
1485
1486 processor->last_dispatch = mach_absolute_time();
1487 thread_timer_event(processor->last_dispatch, &thread->system_timer);
1488 PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer;
1489
1490 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_HANDOFF)|DBG_FUNC_NONE,
1491 self->reason, (int)thread, self->sched_pri, thread->sched_pri, 0);
1492
1493 TLOG(1, "thread_invoke: calling machine_stack_handoff\n");
1494 machine_stack_handoff(self, thread);
1495
1496 thread_dispatch(self, thread);
1497
1498 thread->continuation = thread->parameter = NULL;
1499
1500 counter(c_thread_invoke_hits++);
1501
1502 funnel_refunnel_check(thread, 2);
1503 (void) spllo();
1504
1505 assert(continuation);
1506 call_continuation(continuation, parameter, thread->wait_result);
1507 /*NOTREACHED*/
1508 }
1509 else if (thread == self) {
1510 /* same thread but with continuation */
1511 ast_context(self);
1512 counter(++c_thread_invoke_same);
1513 thread_unlock(self);
1514
1515 self->continuation = self->parameter = NULL;
1516
1517 funnel_refunnel_check(self, 3);
1518 (void) spllo();
1519
1520 call_continuation(continuation, parameter, self->wait_result);
1521 /*NOTREACHED*/
1522 }
1523 }
1524 else {
1525 /*
1526 * Check that the other thread has a stack
1527 */
1528 if (!thread->kernel_stack) {
1529 need_stack:
1530 if (!stack_alloc_try(thread)) {
1531 counter(c_thread_invoke_misses++);
1532 thread_unlock(thread);
1533 thread_stack_enqueue(thread);
1534 return (FALSE);
1535 }
1536 }
1537 else if (thread == self) {
1538 ast_context(self);
1539 counter(++c_thread_invoke_same);
1540 thread_unlock(self);
1541 return (TRUE);
1542 }
1543 }
1544
1545 /*
1546 * Context switch by full context save.
1547 */
1548 processor = current_processor();
1549 processor->active_thread = thread;
1550 processor->current_pri = thread->sched_pri;
1551 if (thread->last_processor != processor && thread->last_processor != NULL) {
1552 if (thread->last_processor->processor_set != processor->processor_set)
1553 thread->ps_switch++;
1554 thread->p_switch++;
1555 }
1556 thread->last_processor = processor;
1557 thread->c_switch++;
1558 ast_context(thread);
1559 thread_unlock(thread);
1560
1561 counter(c_thread_invoke_csw++);
1562
1563 assert(self->runq == PROCESSOR_NULL);
1564 self->reason = reason;
1565
1566 processor->last_dispatch = mach_absolute_time();
1567 thread_timer_event(processor->last_dispatch, &thread->system_timer);
1568 PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer;
1569
1570 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE,
1571 (int)self->reason, (int)thread, self->sched_pri, thread->sched_pri, 0);
1572
1573 /*
1574 * This is where we actually switch register context,
1575 * and address space if required. We will next run
1576 * as a result of a subsequent context switch.
1577 */
1578 thread = machine_switch_context(self, continuation, thread);
1579 TLOG(1,"thread_invoke: returning machine_switch_context: self %p continuation %p thread %p\n", self, continuation, thread);
1580
1581 /*
1582 * We have been resumed and are set to run.
1583 */
1584 thread_dispatch(thread, self);
1585
1586 if (continuation) {
1587 self->continuation = self->parameter = NULL;
1588
1589 funnel_refunnel_check(self, 3);
1590 (void) spllo();
1591
1592 call_continuation(continuation, parameter, self->wait_result);
1593 /*NOTREACHED*/
1594 }
1595
1596 return (TRUE);
1597 }
1598
1599 /*
1600 * thread_dispatch:
1601 *
1602 * Handle threads at context switch. Re-dispatch other thread
1603 * if still running, otherwise update run state and perform
1604 * special actions. Update quantum for other thread and begin
1605 * the quantum for ourselves.
1606 *
1607 * Called at splsched.
1608 */
1609 void
1610 thread_dispatch(
1611 thread_t thread,
1612 thread_t self)
1613 {
1614 processor_t processor = self->last_processor;
1615
1616 if (thread != THREAD_NULL) {
1617 /*
1618 * If blocked at a continuation, discard
1619 * the stack.
1620 */
1621 if (thread->continuation != NULL && thread->kernel_stack != 0)
1622 stack_free(thread);
1623
1624 if (!(thread->state & TH_IDLE)) {
1625 wake_lock(thread);
1626 thread_lock(thread);
1627
1628 /*
1629 * Compute remainder of current quantum.
1630 */
1631 if ( first_timeslice(processor) &&
1632 processor->quantum_end > processor->last_dispatch )
1633 thread->current_quantum = (processor->quantum_end - processor->last_dispatch);
1634 else
1635 thread->current_quantum = 0;
1636
1637 if (thread->sched_mode & TH_MODE_REALTIME) {
1638 /*
1639 * Cancel the deadline if the thread has
1640 * consumed the entire quantum.
1641 */
1642 if (thread->current_quantum == 0) {
1643 thread->realtime.deadline = UINT64_MAX;
1644 thread->reason |= AST_QUANTUM;
1645 }
1646 }
1647 else {
1648 /*
1649 * For non-realtime threads treat a tiny
1650 * remaining quantum as an expired quantum
1651 * but include what's left next time.
1652 */
1653 if (thread->current_quantum < min_std_quantum) {
1654 thread->reason |= AST_QUANTUM;
1655 thread->current_quantum += std_quantum;
1656 }
1657 }
1658
1659 /*
1660 * If we are doing a direct handoff then
1661 * take the remainder of the quantum.
1662 */
1663 if ((thread->reason & (AST_HANDOFF|AST_QUANTUM)) == AST_HANDOFF) {
1664 self->current_quantum = thread->current_quantum;
1665 thread->reason |= AST_QUANTUM;
1666 thread->current_quantum = 0;
1667 }
1668
1669 thread->last_switch = processor->last_dispatch;
1670
1671 thread->computation_metered += (thread->last_switch - thread->computation_epoch);
1672
1673 if (!(thread->state & TH_WAIT)) {
1674 /*
1675 * Still running.
1676 */
1677 if (thread->reason & AST_QUANTUM)
1678 thread_setrun(thread, SCHED_TAILQ);
1679 else
1680 if (thread->reason & AST_PREEMPT)
1681 thread_setrun(thread, SCHED_HEADQ);
1682 else
1683 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
1684
1685 thread->reason = AST_NONE;
1686
1687 thread_unlock(thread);
1688 wake_unlock(thread);
1689 }
1690 else {
1691 /*
1692 * Waiting.
1693 */
1694 thread->state &= ~TH_RUN;
1695
1696 if (thread->sched_mode & TH_MODE_TIMESHARE)
1697 sched_share_decr();
1698 sched_run_decr();
1699
1700 if (thread->wake_active) {
1701 thread->wake_active = FALSE;
1702 thread_unlock(thread);
1703
1704 thread_wakeup(&thread->wake_active);
1705 }
1706 else
1707 thread_unlock(thread);
1708
1709 wake_unlock(thread);
1710
1711 (*thread->sched_call)(SCHED_CALL_BLOCK, thread);
1712
1713 if (thread->state & TH_TERMINATE)
1714 thread_terminate_enqueue(thread);
1715 }
1716 }
1717 }
1718
1719 if (!(self->state & TH_IDLE)) {
1720 /*
1721 * Get a new quantum if none remaining.
1722 */
1723 if (self->current_quantum == 0)
1724 thread_quantum_init(self);
1725
1726 /*
1727 * Set up quantum timer and timeslice.
1728 */
1729 processor->quantum_end = (processor->last_dispatch + self->current_quantum);
1730 timer_call_enter1(&processor->quantum_timer, self, processor->quantum_end);
1731
1732 processor->timeslice = 1;
1733
1734 self->last_switch = processor->last_dispatch;
1735
1736 self->computation_epoch = self->last_switch;
1737 }
1738 else {
1739 timer_call_cancel(&processor->quantum_timer);
1740 processor->timeslice = 0;
1741 }
1742 }
1743
1744 /*
1745 * thread_block_reason:
1746 *
1747 * Forces a reschedule, blocking the caller if a wait
1748 * has been asserted.
1749 *
1750 * If a continuation is specified, then thread_invoke will
1751 * attempt to discard the thread's kernel stack. When the
1752 * thread resumes, it will execute the continuation function
1753 * on a new kernel stack.
1754 */
1755 counter(mach_counter_t c_thread_block_calls = 0;)
1756
1757 wait_result_t
1758 thread_block_reason(
1759 thread_continue_t continuation,
1760 void *parameter,
1761 ast_t reason)
1762 {
1763 register thread_t self = current_thread();
1764 register processor_t processor;
1765 register thread_t new_thread;
1766 spl_t s;
1767
1768 counter(++c_thread_block_calls);
1769
1770 s = splsched();
1771
1772 if (!(reason & AST_PREEMPT))
1773 funnel_release_check(self, 2);
1774
1775 processor = current_processor();
1776
1777 /* If we're explicitly yielding, force a subsequent quantum */
1778 if (reason & AST_YIELD)
1779 processor->timeslice = 0;
1780
1781 /* We're handling all scheduling AST's */
1782 ast_off(AST_SCHEDULING);
1783
1784 self->continuation = continuation;
1785 self->parameter = parameter;
1786
1787 do {
1788 thread_lock(self);
1789 new_thread = thread_select(self, processor);
1790 thread_unlock(self);
1791 } while (!thread_invoke(self, new_thread, reason));
1792
1793 funnel_refunnel_check(self, 5);
1794 splx(s);
1795
1796 return (self->wait_result);
1797 }
1798
1799 /*
1800 * thread_block:
1801 *
1802 * Block the current thread if a wait has been asserted.
1803 */
1804 wait_result_t
1805 thread_block(
1806 thread_continue_t continuation)
1807 {
1808 return thread_block_reason(continuation, NULL, AST_NONE);
1809 }
1810
1811 wait_result_t
1812 thread_block_parameter(
1813 thread_continue_t continuation,
1814 void *parameter)
1815 {
1816 return thread_block_reason(continuation, parameter, AST_NONE);
1817 }
1818
1819 /*
1820 * thread_run:
1821 *
1822 * Switch directly from the current thread to the
1823 * new thread, handing off our quantum if appropriate.
1824 *
1825 * New thread must be runnable, and not on a run queue.
1826 *
1827 * Called at splsched.
1828 */
1829 int
1830 thread_run(
1831 thread_t self,
1832 thread_continue_t continuation,
1833 void *parameter,
1834 thread_t new_thread)
1835 {
1836 ast_t handoff = AST_HANDOFF;
1837
1838 funnel_release_check(self, 3);
1839
1840 self->continuation = continuation;
1841 self->parameter = parameter;
1842
1843 while (!thread_invoke(self, new_thread, handoff)) {
1844 processor_t processor = current_processor();
1845
1846 thread_lock(self);
1847 new_thread = thread_select(self, processor);
1848 thread_unlock(self);
1849 handoff = AST_NONE;
1850 }
1851
1852 funnel_refunnel_check(self, 6);
1853
1854 return (self->wait_result);
1855 }
1856
1857 /*
1858 * thread_continue:
1859 *
1860 * Called at splsched when a thread first receives
1861 * a new stack after a continuation.
1862 */
1863 void
1864 thread_continue(
1865 register thread_t thread)
1866 {
1867 register thread_t self = current_thread();
1868 register thread_continue_t continuation;
1869 register void *parameter;
1870
1871 continuation = self->continuation;
1872 parameter = self->parameter;
1873
1874 thread_dispatch(thread, self);
1875
1876 self->continuation = self->parameter = NULL;
1877
1878 funnel_refunnel_check(self, 4);
1879
1880 if (thread != THREAD_NULL)
1881 (void)spllo();
1882
1883 TLOG(1, "thread_continue: calling call_continuation \n");
1884 call_continuation(continuation, parameter, self->wait_result);
1885 /*NOTREACHED*/
1886 }
1887
1888 /*
1889 * run_queue_init:
1890 *
1891 * Initialize a run queue before first use.
1892 */
1893 void
1894 run_queue_init(
1895 run_queue_t rq)
1896 {
1897 int i;
1898
1899 rq->highq = IDLEPRI;
1900 for (i = 0; i < NRQBM; i++)
1901 rq->bitmap[i] = 0;
1902 setbit(MAXPRI - IDLEPRI, rq->bitmap);
1903 rq->urgency = rq->count = 0;
1904 for (i = 0; i < NRQS; i++)
1905 queue_init(&rq->queues[i]);
1906 }
1907
1908 /*
1909 * run_queue_dequeue:
1910 *
1911 * Perform a dequeue operation on a run queue,
1912 * and return the resulting thread.
1913 *
1914 * The run queue must be locked (see run_queue_remove()
1915 * for more info), and not empty.
1916 */
1917 static thread_t
1918 run_queue_dequeue(
1919 run_queue_t rq,
1920 integer_t options)
1921 {
1922 thread_t thread;
1923 queue_t queue = rq->queues + rq->highq;
1924
1925 if (options & SCHED_HEADQ) {
1926 thread = (thread_t)queue->next;
1927 ((queue_entry_t)thread)->next->prev = queue;
1928 queue->next = ((queue_entry_t)thread)->next;
1929 }
1930 else {
1931 thread = (thread_t)queue->prev;
1932 ((queue_entry_t)thread)->prev->next = queue;
1933 queue->prev = ((queue_entry_t)thread)->prev;
1934 }
1935
1936 thread->runq = PROCESSOR_NULL;
1937 rq->count--;
1938 if (testbit(rq->highq, sched_preempt_pri)) {
1939 rq->urgency--; assert(rq->urgency >= 0);
1940 }
1941 if (queue_empty(queue)) {
1942 if (rq->highq != IDLEPRI)
1943 clrbit(MAXPRI - rq->highq, rq->bitmap);
1944 rq->highq = MAXPRI - ffsbit(rq->bitmap);
1945 }
1946
1947 return (thread);
1948 }
1949
1950 /*
1951 * realtime_queue_insert:
1952 *
1953 * Enqueue a thread for realtime execution.
1954 */
1955 static boolean_t
1956 realtime_queue_insert(
1957 thread_t thread)
1958 {
1959 run_queue_t rq = &rt_runq;
1960 queue_t queue = rq->queues + thread->sched_pri;
1961 uint64_t deadline = thread->realtime.deadline;
1962 boolean_t preempt = FALSE;
1963
1964 simple_lock(&rt_lock);
1965
1966 if (queue_empty(queue)) {
1967 enqueue_tail(queue, (queue_entry_t)thread);
1968
1969 setbit(MAXPRI - thread->sched_pri, rq->bitmap);
1970 if (thread->sched_pri > rq->highq)
1971 rq->highq = thread->sched_pri;
1972 preempt = TRUE;
1973 }
1974 else {
1975 register thread_t entry = (thread_t)queue_first(queue);
1976
1977 while (TRUE) {
1978 if ( queue_end(queue, (queue_entry_t)entry) ||
1979 deadline < entry->realtime.deadline ) {
1980 entry = (thread_t)queue_prev((queue_entry_t)entry);
1981 break;
1982 }
1983
1984 entry = (thread_t)queue_next((queue_entry_t)entry);
1985 }
1986
1987 if ((queue_entry_t)entry == queue)
1988 preempt = TRUE;
1989
1990 insque((queue_entry_t)thread, (queue_entry_t)entry);
1991 }
1992
1993 thread->runq = RT_RUNQ;
1994 rq->count++; rq->urgency++;
1995
1996 simple_unlock(&rt_lock);
1997
1998 return (preempt);
1999 }
2000
2001 /*
2002 * realtime_setrun:
2003 *
2004 * Dispatch a thread for realtime execution.
2005 *
2006 * Thread must be locked. Associated pset must
2007 * be locked, and is returned unlocked.
2008 */
2009 static void
2010 realtime_setrun(
2011 processor_t processor,
2012 thread_t thread)
2013 {
2014 processor_set_t pset = processor->processor_set;
2015
2016 /*
2017 * Dispatch directly onto idle processor.
2018 */
2019 if (processor->state == PROCESSOR_IDLE) {
2020 remqueue(&pset->idle_queue, (queue_entry_t)processor);
2021 pset->idle_count--;
2022 enqueue_head(&pset->active_queue, (queue_entry_t)processor);
2023
2024 processor->next_thread = thread;
2025 processor->deadline = thread->realtime.deadline;
2026 processor->state = PROCESSOR_DISPATCHING;
2027 pset_unlock(pset);
2028
2029 if (processor != current_processor())
2030 machine_signal_idle(processor);
2031 return;
2032 }
2033
2034 if (realtime_queue_insert(thread)) {
2035 if (processor == current_processor())
2036 ast_on(AST_PREEMPT | AST_URGENT);
2037 else
2038 cause_ast_check(processor);
2039 }
2040
2041 pset_unlock(pset);
2042 }
2043
2044 /*
2045 * processor_enqueue:
2046 *
2047 * Enqueue thread on a processor run queue. Thread must be locked,
2048 * and not already be on a run queue.
2049 *
2050 * Returns TRUE if a preemption is indicated based on the state
2051 * of the run queue.
2052 *
2053 * The run queue must be locked (see run_queue_remove()
2054 * for more info).
2055 */
2056 static boolean_t
2057 processor_enqueue(
2058 processor_t processor,
2059 thread_t thread,
2060 integer_t options)
2061 {
2062 run_queue_t rq = &processor->runq;
2063 queue_t queue = rq->queues + thread->sched_pri;
2064 boolean_t result = FALSE;
2065
2066 if (queue_empty(queue)) {
2067 enqueue_tail(queue, (queue_entry_t)thread);
2068
2069 setbit(MAXPRI - thread->sched_pri, rq->bitmap);
2070 if (thread->sched_pri > rq->highq) {
2071 rq->highq = thread->sched_pri;
2072 result = TRUE;
2073 }
2074 }
2075 else
2076 if (options & SCHED_TAILQ)
2077 enqueue_tail(queue, (queue_entry_t)thread);
2078 else
2079 enqueue_head(queue, (queue_entry_t)thread);
2080
2081 thread->runq = processor;
2082 if (testbit(thread->sched_pri, sched_preempt_pri))
2083 rq->urgency++;
2084 rq->count++;
2085
2086 return (result);
2087 }
2088
2089 /*
2090 * processor_setrun:
2091 *
2092 * Dispatch a thread for execution on a
2093 * processor.
2094 *
2095 * Thread must be locked. Associated pset must
2096 * be locked, and is returned unlocked.
2097 */
2098 static void
2099 processor_setrun(
2100 processor_t processor,
2101 thread_t thread,
2102 integer_t options)
2103 {
2104 processor_set_t pset = processor->processor_set;
2105 ast_t preempt;
2106
2107 /*
2108 * Dispatch directly onto idle processor.
2109 */
2110 if (processor->state == PROCESSOR_IDLE) {
2111 remqueue(&pset->idle_queue, (queue_entry_t)processor);
2112 pset->idle_count--;
2113 enqueue_head(&pset->active_queue, (queue_entry_t)processor);
2114
2115 processor->next_thread = thread;
2116 processor->deadline = UINT64_MAX;
2117 processor->state = PROCESSOR_DISPATCHING;
2118 pset_unlock(pset);
2119
2120 if (processor != current_processor())
2121 machine_signal_idle(processor);
2122 return;
2123 }
2124
2125 /*
2126 * Set preemption mode.
2127 */
2128 if (testbit(thread->sched_pri, sched_preempt_pri))
2129 preempt = (AST_PREEMPT | AST_URGENT);
2130 else
2131 if (thread->sched_mode & TH_MODE_TIMESHARE && thread->priority < BASEPRI_BACKGROUND)
2132 preempt = AST_NONE;
2133 else
2134 preempt = (options & SCHED_PREEMPT)? AST_PREEMPT: AST_NONE;
2135
2136 if (!processor_enqueue(processor, thread, options))
2137 preempt = AST_NONE;
2138
2139 pset_hint_high(pset, processor);
2140
2141 if (preempt != AST_NONE) {
2142 if (processor == current_processor()) {
2143 thread_t self = processor->active_thread;
2144
2145 if (csw_needed(self, processor))
2146 ast_on(preempt);
2147 }
2148 else
2149 if ( (processor->state == PROCESSOR_RUNNING ||
2150 processor->state == PROCESSOR_SHUTDOWN) &&
2151 thread->sched_pri >= processor->current_pri ) {
2152 cause_ast_check(processor);
2153 }
2154 }
2155 else
2156 if ( processor->state == PROCESSOR_SHUTDOWN &&
2157 thread->sched_pri >= processor->current_pri ) {
2158 cause_ast_check(processor);
2159 }
2160
2161 pset_unlock(pset);
2162 }
2163
2164 #define next_pset(p) (((p)->pset_list != PROCESSOR_SET_NULL)? (p)->pset_list: (p)->node->psets)
2165
2166 /*
2167 * choose_next_pset:
2168 *
2169 * Return the next sibling pset containing
2170 * available processors.
2171 *
2172 * Returns the original pset if none other is
2173 * suitable.
2174 */
2175 static processor_set_t
2176 choose_next_pset(
2177 processor_set_t pset)
2178 {
2179 processor_set_t nset = pset;
2180
2181 do {
2182 nset = next_pset(nset);
2183 } while (nset->processor_count < 1 && nset != pset);
2184
2185 return ((nset != pset)? nset: pset);
2186 }
2187
2188 /*
2189 * choose_processor:
2190 *
2191 * Choose a processor for the thread, beginning at
2192 * the pset.
2193 *
2194 * Returns a processor, possibly from a different pset.
2195 *
2196 * The thread must be locked. The pset must be locked,
2197 * and the resulting pset is locked on return.
2198 */
2199 static processor_t
2200 choose_processor(
2201 processor_set_t pset,
2202 thread_t thread)
2203 {
2204 processor_set_t nset, cset = pset;
2205 processor_t processor;
2206
2207 /*
2208 * Iterate through the processor sets to locate
2209 * an appropriate processor.
2210 */
2211 do {
2212 /*
2213 * Choose an idle processor.
2214 */
2215 if (!queue_empty(&cset->idle_queue))
2216 return ((processor_t)queue_first(&cset->idle_queue));
2217
2218 if (thread->sched_pri >= BASEPRI_RTQUEUES) {
2219 /*
2220 * For an RT thread, iterate through active processors, first fit.
2221 */
2222 processor = (processor_t)queue_first(&cset->active_queue);
2223 while (!queue_end(&cset->active_queue, (queue_entry_t)processor)) {
2224 if (thread->sched_pri > processor->current_pri ||
2225 thread->realtime.deadline < processor->deadline)
2226 return (processor);
2227
2228 processor = (processor_t)queue_next((queue_entry_t)processor);
2229 }
2230 }
2231 else {
2232 /*
2233 * Choose the low hint processor in the processor set if available.
2234 */
2235 processor = cset->low_hint;
2236 if (processor != PROCESSOR_NULL &&
2237 processor->state != PROCESSOR_SHUTDOWN && processor->state != PROCESSOR_OFF_LINE)
2238 return (processor);
2239
2240 /*
2241 * Choose any active processor if the hint was invalid.
2242 */
2243 processor = (processor_t)dequeue_head(&cset->active_queue);
2244 if (processor != PROCESSOR_NULL) {
2245 enqueue_tail(&cset->active_queue, (queue_entry_t)processor);
2246 return (processor);
2247 }
2248 }
2249
2250 /*
2251 * Move onto the next processor set.
2252 */
2253 nset = next_pset(cset);
2254
2255 if (nset != pset) {
2256 pset_unlock(cset);
2257
2258 cset = nset;
2259 pset_lock(cset);
2260 }
2261 } while (nset != pset);
2262
2263 /*
2264 * If all else fails choose the current processor,
2265 * this routine must return a running processor.
2266 */
2267 processor = current_processor();
2268 if (cset != processor->processor_set) {
2269 pset_unlock(cset);
2270
2271 cset = processor->processor_set;
2272 pset_lock(cset);
2273 }
2274
2275 return (processor);
2276 }
2277
2278 /*
2279 * thread_setrun:
2280 *
2281 * Dispatch thread for execution, onto an idle
2282 * processor or run queue, and signal a preemption
2283 * as appropriate.
2284 *
2285 * Thread must be locked.
2286 */
2287 void
2288 thread_setrun(
2289 thread_t thread,
2290 integer_t options)
2291 {
2292 processor_t processor;
2293 processor_set_t pset;
2294
2295 #if DEBUG
2296 assert(thread_runnable(thread));
2297 #endif
2298
2299 /*
2300 * Update priority if needed.
2301 */
2302 if (thread->sched_stamp != sched_tick)
2303 update_priority(thread);
2304
2305 assert(thread->runq == PROCESSOR_NULL);
2306
2307 if (thread->bound_processor == PROCESSOR_NULL) {
2308 /*
2309 * Unbound case.
2310 */
2311 if (thread->affinity_set != AFFINITY_SET_NULL) {
2312 /*
2313 * Use affinity set policy hint.
2314 */
2315 pset = thread->affinity_set->aset_pset;
2316 pset_lock(pset);
2317
2318 processor = choose_processor(pset, thread);
2319 }
2320 else
2321 if (thread->last_processor != PROCESSOR_NULL) {
2322 /*
2323 * Simple (last processor) affinity case.
2324 */
2325 processor = thread->last_processor;
2326 pset = processor->processor_set;
2327 pset_lock(pset);
2328
2329 /*
2330 * Choose a different processor in certain cases.
2331 */
2332 if (processor->state == PROCESSOR_SHUTDOWN || processor->state == PROCESSOR_OFF_LINE)
2333 processor = choose_processor(pset, thread);
2334 else
2335 if (thread->sched_pri >= BASEPRI_RTQUEUES) {
2336 /*
2337 * If the processor is executing an RT thread with
2338 * an earlier deadline, choose another.
2339 */
2340 if (thread->sched_pri <= processor->current_pri ||
2341 thread->realtime.deadline >= processor->deadline)
2342 processor = choose_processor(pset, thread);
2343 }
2344 else
2345 if (processor->state != PROCESSOR_IDLE && pset->idle_count > 0) {
2346 processor = choose_processor(pset, thread);
2347 }
2348 else {
2349 processor_set_t nset = choose_next_pset(pset);
2350
2351 /*
2352 * Bump into a lesser loaded processor set if appropriate.
2353 */
2354 if (pset != nset && (nset->low_hint == PROCESSOR_NULL ||
2355 (pset->idle_count == 0 && nset->idle_count > 0) ||
2356 processor->runq.count > nset->low_hint->runq.count)) {
2357 pset_unlock(pset);
2358
2359 pset = nset;
2360 pset_lock(pset);
2361
2362 processor = choose_processor(pset, thread);
2363 }
2364 }
2365 }
2366 else {
2367 /*
2368 * No Affinity case:
2369 *
2370 * Choose a processor from the current processor set.
2371 */
2372 processor = current_processor();
2373 pset = processor->processor_set;
2374 pset_lock(pset);
2375
2376 processor = choose_processor(pset, thread);
2377 }
2378 }
2379 else {
2380 /*
2381 * Bound case:
2382 *
2383 * Unconditionally dispatch on the processor.
2384 */
2385 processor = thread->bound_processor;
2386 pset = processor->processor_set;
2387 pset_lock(pset);
2388 }
2389
2390 /*
2391 * Dispatch the thread on the choosen processor.
2392 */
2393 if (thread->sched_pri >= BASEPRI_RTQUEUES)
2394 realtime_setrun(processor, thread);
2395 else
2396 processor_setrun(processor, thread, options);
2397 }
2398
2399 /*
2400 * processor_queue_shutdown:
2401 *
2402 * Shutdown a processor run queue by moving
2403 * non-bound threads to the current processor.
2404 *
2405 * Associated pset must be locked, and is
2406 * returned unlocked.
2407 */
2408 void
2409 processor_queue_shutdown(
2410 processor_t processor)
2411 {
2412 processor_set_t pset = processor->processor_set;
2413 run_queue_t rq = &processor->runq;
2414 queue_t queue = rq->queues + rq->highq;
2415 int pri = rq->highq, count = rq->count;
2416 thread_t next, thread;
2417 queue_head_t tqueue;
2418
2419 queue_init(&tqueue);
2420
2421 while (count > 0) {
2422 thread = (thread_t)queue_first(queue);
2423 while (!queue_end(queue, (queue_entry_t)thread)) {
2424 next = (thread_t)queue_next((queue_entry_t)thread);
2425
2426 if (thread->bound_processor != processor) {
2427 remqueue(queue, (queue_entry_t)thread);
2428
2429 thread->runq = PROCESSOR_NULL;
2430 rq->count--;
2431 if (testbit(pri, sched_preempt_pri)) {
2432 rq->urgency--; assert(rq->urgency >= 0);
2433 }
2434 if (queue_empty(queue)) {
2435 if (pri != IDLEPRI)
2436 clrbit(MAXPRI - pri, rq->bitmap);
2437 rq->highq = MAXPRI - ffsbit(rq->bitmap);
2438 }
2439
2440 enqueue_tail(&tqueue, (queue_entry_t)thread);
2441 }
2442 count--;
2443
2444 thread = next;
2445 }
2446
2447 queue--; pri--;
2448 }
2449
2450 pset_unlock(pset);
2451
2452 processor = current_processor();
2453 pset = processor->processor_set;
2454
2455 while ((thread = (thread_t)dequeue_head(&tqueue)) != THREAD_NULL) {
2456 thread_lock(thread);
2457 thread->last_processor = PROCESSOR_NULL;
2458
2459 pset_lock(pset);
2460
2461 processor_enqueue(processor, thread, SCHED_TAILQ);
2462
2463 pset_unlock(pset);
2464
2465 thread_unlock(thread);
2466 }
2467 }
2468
2469 /*
2470 * Check for a possible preemption point in
2471 * the (current) thread.
2472 *
2473 * Called at splsched.
2474 */
2475 ast_t
2476 csw_check(
2477 thread_t thread,
2478 processor_t processor)
2479 {
2480 int current_pri = thread->sched_pri;
2481 ast_t result = AST_NONE;
2482 run_queue_t runq;
2483
2484 if (first_timeslice(processor)) {
2485 runq = &rt_runq;
2486 if (runq->highq >= BASEPRI_RTQUEUES)
2487 return (AST_PREEMPT | AST_URGENT);
2488
2489 if (runq->highq > current_pri) {
2490 if (runq->urgency > 0)
2491 return (AST_PREEMPT | AST_URGENT);
2492
2493 result |= AST_PREEMPT;
2494 }
2495
2496 runq = &processor->runq;
2497 if (runq->highq > current_pri) {
2498 if (runq->urgency > 0)
2499 return (AST_PREEMPT | AST_URGENT);
2500
2501 result |= AST_PREEMPT;
2502 }
2503 }
2504 else {
2505 runq = &rt_runq;
2506 if (runq->highq >= current_pri) {
2507 if (runq->urgency > 0)
2508 return (AST_PREEMPT | AST_URGENT);
2509
2510 result |= AST_PREEMPT;
2511 }
2512
2513 runq = &processor->runq;
2514 if (runq->highq >= current_pri) {
2515 if (runq->urgency > 0)
2516 return (AST_PREEMPT | AST_URGENT);
2517
2518 result |= AST_PREEMPT;
2519 }
2520 }
2521
2522 if (result != AST_NONE)
2523 return (result);
2524
2525 if (thread->state & TH_SUSP)
2526 result |= AST_PREEMPT;
2527
2528 return (result);
2529 }
2530
2531 /*
2532 * set_sched_pri:
2533 *
2534 * Set the scheduled priority of the specified thread.
2535 *
2536 * This may cause the thread to change queues.
2537 *
2538 * Thread must be locked.
2539 */
2540 void
2541 set_sched_pri(
2542 thread_t thread,
2543 int priority)
2544 {
2545 boolean_t removed = run_queue_remove(thread);
2546
2547 thread->sched_pri = priority;
2548 if (removed)
2549 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
2550 else
2551 if (thread->state & TH_RUN) {
2552 processor_t processor = thread->last_processor;
2553
2554 if (thread == current_thread()) {
2555 ast_t preempt = csw_check(thread, processor);
2556
2557 if (preempt != AST_NONE)
2558 ast_on(preempt);
2559 processor->current_pri = priority;
2560 }
2561 else
2562 if ( processor != PROCESSOR_NULL &&
2563 processor->active_thread == thread )
2564 cause_ast_check(processor);
2565 }
2566 }
2567
2568 #if 0
2569
2570 static void
2571 run_queue_check(
2572 run_queue_t rq,
2573 thread_t thread)
2574 {
2575 queue_t q;
2576 queue_entry_t qe;
2577
2578 if (rq != thread->runq)
2579 panic("run_queue_check: thread runq");
2580
2581 if (thread->sched_pri > MAXPRI || thread->sched_pri < MINPRI)
2582 panic("run_queue_check: thread sched_pri");
2583
2584 q = &rq->queues[thread->sched_pri];
2585 qe = queue_first(q);
2586 while (!queue_end(q, qe)) {
2587 if (qe == (queue_entry_t)thread)
2588 return;
2589
2590 qe = queue_next(qe);
2591 }
2592
2593 panic("run_queue_check: end");
2594 }
2595
2596 #endif /* DEBUG */
2597
2598 /*
2599 * run_queue_remove:
2600 *
2601 * Remove a thread from a current run queue and
2602 * return TRUE if successful.
2603 *
2604 * Thread must be locked.
2605 */
2606 boolean_t
2607 run_queue_remove(
2608 thread_t thread)
2609 {
2610 processor_t processor = thread->runq;
2611
2612 /*
2613 * If processor is PROCESSOR_NULL, the thread will stay out of the
2614 * run queues because the caller locked the thread. Otherwise
2615 * the thread is on a run queue, but could be chosen for dispatch
2616 * and removed.
2617 */
2618 if (processor != PROCESSOR_NULL) {
2619 void * rqlock;
2620 run_queue_t rq;
2621
2622 /*
2623 * The processor run queues are locked by the
2624 * processor set. Real-time priorities use a
2625 * global queue with a dedicated lock.
2626 */
2627 if (thread->sched_pri < BASEPRI_RTQUEUES) {
2628 rqlock = &processor->processor_set->sched_lock;
2629 rq = &processor->runq;
2630 }
2631 else {
2632 rqlock = &rt_lock; rq = &rt_runq;
2633 }
2634
2635 simple_lock(rqlock);
2636
2637 if (processor == thread->runq) {
2638 /*
2639 * Thread is on a run queue and we have a lock on
2640 * that run queue.
2641 */
2642 remqueue(&rq->queues[0], (queue_entry_t)thread);
2643 rq->count--;
2644 if (testbit(thread->sched_pri, sched_preempt_pri)) {
2645 rq->urgency--; assert(rq->urgency >= 0);
2646 }
2647
2648 if (queue_empty(rq->queues + thread->sched_pri)) {
2649 /* update run queue status */
2650 if (thread->sched_pri != IDLEPRI)
2651 clrbit(MAXPRI - thread->sched_pri, rq->bitmap);
2652 rq->highq = MAXPRI - ffsbit(rq->bitmap);
2653 }
2654
2655 thread->runq = PROCESSOR_NULL;
2656 }
2657 else {
2658 /*
2659 * The thread left the run queue before we could
2660 * lock the run queue.
2661 */
2662 assert(thread->runq == PROCESSOR_NULL);
2663 processor = PROCESSOR_NULL;
2664 }
2665
2666 simple_unlock(rqlock);
2667 }
2668
2669 return (processor != PROCESSOR_NULL);
2670 }
2671
2672 /*
2673 * choose_thread:
2674 *
2675 * Choose a thread to execute from the run queues
2676 * and return it. May steal a thread from another
2677 * processor.
2678 *
2679 * Called with pset scheduling lock and rt lock held,
2680 * released on return.
2681 */
2682 static thread_t
2683 choose_thread(
2684 processor_t processor)
2685 {
2686 processor_set_t pset = processor->processor_set;
2687 thread_t thread;
2688
2689 if (processor->runq.count > 0 && processor->runq.highq >= rt_runq.highq) {
2690 simple_unlock(&rt_lock);
2691
2692 pset_hint_low(pset, processor);
2693
2694 if (pset->high_hint != PROCESSOR_NULL) {
2695 if (processor != pset->high_hint) {
2696 if (processor->runq.count >= pset->high_hint->runq.count)
2697 pset->high_hint = processor;
2698 else
2699 if (pset->high_hint->runq.highq > processor->runq.highq) {
2700 thread = steal_thread(pset->high_hint);
2701 if (thread != THREAD_NULL) {
2702 processor->deadline = UINT64_MAX;
2703 pset_unlock(pset);
2704
2705 return (thread);
2706 }
2707 }
2708 }
2709 }
2710 else
2711 pset->high_hint = processor;
2712
2713 thread = run_queue_dequeue(&processor->runq, SCHED_HEADQ);
2714
2715 processor->deadline = UINT64_MAX;
2716 pset_unlock(pset);
2717
2718 return (thread);
2719 }
2720
2721 thread = run_queue_dequeue(&rt_runq, SCHED_HEADQ);
2722 simple_unlock(&rt_lock);
2723
2724 processor->deadline = thread->realtime.deadline;
2725 pset_unlock(pset);
2726
2727 return (thread);
2728 }
2729
2730 /*
2731 * steal_thread:
2732 *
2733 * Steal a thread from a processor and return it.
2734 *
2735 * Associated pset must be locked. Returns THREAD_NULL
2736 * on failure.
2737 */
2738 static thread_t
2739 steal_thread(
2740 processor_t processor)
2741 {
2742 run_queue_t rq = &processor->runq;
2743 queue_t queue = rq->queues + rq->highq;
2744 int pri = rq->highq, count = rq->count;
2745 thread_t thread = THREAD_NULL;
2746
2747 while (count > 0) {
2748 thread = (thread_t)queue_first(queue);
2749 while (!queue_end(queue, (queue_entry_t)thread)) {
2750 if (thread->bound_processor != processor) {
2751 remqueue(queue, (queue_entry_t)thread);
2752
2753 thread->runq = PROCESSOR_NULL;
2754 rq->count--;
2755 if (testbit(pri, sched_preempt_pri)) {
2756 rq->urgency--; assert(rq->urgency >= 0);
2757 }
2758 if (queue_empty(queue)) {
2759 if (pri != IDLEPRI)
2760 clrbit(MAXPRI - pri, rq->bitmap);
2761 rq->highq = MAXPRI - ffsbit(rq->bitmap);
2762 }
2763
2764 return (thread);
2765 }
2766 count--;
2767
2768 thread = (thread_t)queue_next((queue_entry_t)thread);
2769 }
2770
2771 queue--; pri--;
2772 }
2773
2774 return (THREAD_NULL);
2775 }
2776
2777 /*
2778 * This is the processor idle loop, which just looks for other threads
2779 * to execute. Processor idle threads invoke this without supplying a
2780 * current thread to idle without an asserted wait state.
2781 *
2782 * Returns a the next thread to execute if dispatched directly.
2783 */
2784 static thread_t
2785 processor_idle(
2786 thread_t thread,
2787 processor_t processor)
2788 {
2789 processor_set_t pset = processor->processor_set;
2790 thread_t new_thread;
2791 int state;
2792
2793 (void)splsched();
2794
2795 #ifdef __ppc__
2796 pmsDown(); /* Step power down */
2797 #endif
2798
2799 KERNEL_DEBUG_CONSTANT(
2800 MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_START, (int)thread, 0, 0, 0, 0);
2801
2802 timer_switch(&PROCESSOR_DATA(processor, system_state),
2803 mach_absolute_time(), &PROCESSOR_DATA(processor, idle_state));
2804 PROCESSOR_DATA(processor, current_state) = &PROCESSOR_DATA(processor, idle_state);
2805
2806 while (processor->next_thread == THREAD_NULL && processor->runq.count == 0 &&
2807 (thread == THREAD_NULL || ((thread->state & (TH_WAIT|TH_SUSP)) == TH_WAIT && !thread->wake_active))) {
2808 volatile processor_t hint;
2809
2810 machine_idle();
2811
2812 (void)splsched();
2813
2814 if (pset->low_hint == PROCESSOR_NULL)
2815 break;
2816
2817 hint = pset->high_hint;
2818 if (hint != PROCESSOR_NULL && hint->runq.count > 0)
2819 break;
2820 }
2821
2822 timer_switch(&PROCESSOR_DATA(processor, idle_state),
2823 mach_absolute_time(), &PROCESSOR_DATA(processor, system_state));
2824 PROCESSOR_DATA(processor, current_state) = &PROCESSOR_DATA(processor, system_state);
2825
2826 pset_lock(pset);
2827
2828 #ifdef __ppc__
2829 pmsStep(0); /* Step up out of idle power */
2830 #endif
2831
2832 state = processor->state;
2833 if (state == PROCESSOR_DISPATCHING) {
2834 /*
2835 * Commmon case -- cpu dispatched.
2836 */
2837 new_thread = processor->next_thread;
2838 processor->next_thread = THREAD_NULL;
2839 processor->state = PROCESSOR_RUNNING;
2840
2841 if ( processor->runq.highq > new_thread->sched_pri ||
2842 (rt_runq.highq > 0 && rt_runq.highq >= new_thread->sched_pri) ) {
2843 processor->deadline = UINT64_MAX;
2844
2845 pset_unlock(pset);
2846
2847 thread_lock(new_thread);
2848 thread_setrun(new_thread, SCHED_HEADQ);
2849 thread_unlock(new_thread);
2850
2851 KERNEL_DEBUG_CONSTANT(
2852 MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_END, (int)thread, (int)state, 0, 0, 0);
2853
2854 return (THREAD_NULL);
2855 }
2856
2857 pset_unlock(pset);
2858
2859 KERNEL_DEBUG_CONSTANT(
2860 MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_END, (int)thread, (int)state, (int)new_thread, 0, 0);
2861
2862 return (new_thread);
2863 }
2864 else
2865 if (state == PROCESSOR_IDLE) {
2866 remqueue(&pset->idle_queue, (queue_entry_t)processor);
2867 pset->idle_count--;
2868
2869 processor->state = PROCESSOR_RUNNING;
2870 enqueue_head(&pset->active_queue, (queue_entry_t)processor);
2871 }
2872 else
2873 if (state == PROCESSOR_SHUTDOWN) {
2874 /*
2875 * Going off-line. Force a
2876 * reschedule.
2877 */
2878 if ((new_thread = processor->next_thread) != THREAD_NULL) {
2879 processor->next_thread = THREAD_NULL;
2880 processor->deadline = UINT64_MAX;
2881
2882 pset_unlock(pset);
2883
2884 thread_lock(new_thread);
2885 thread_setrun(new_thread, SCHED_HEADQ);
2886 thread_unlock(new_thread);
2887
2888 KERNEL_DEBUG_CONSTANT(
2889 MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_END, (int)thread, (int)state, 0, 0, 0);
2890
2891 return (THREAD_NULL);
2892 }
2893 }
2894
2895 pset_unlock(pset);
2896
2897 KERNEL_DEBUG_CONSTANT(
2898 MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_END, (int)thread, (int)state, 0, 0, 0);
2899
2900 return (THREAD_NULL);
2901 }
2902
2903 void
2904 idle_thread(void)
2905 {
2906 processor_t processor = current_processor();
2907 thread_t new_thread;
2908
2909 new_thread = processor_idle(THREAD_NULL, processor);
2910 if (new_thread != THREAD_NULL) {
2911 thread_run(processor->idle_thread, (thread_continue_t)idle_thread, NULL, new_thread);
2912 /*NOTREACHED*/
2913 }
2914
2915 thread_block((thread_continue_t)idle_thread);
2916 /*NOTREACHED*/
2917 }
2918
2919 kern_return_t
2920 idle_thread_create(
2921 processor_t processor)
2922 {
2923 kern_return_t result;
2924 thread_t thread;
2925 spl_t s;
2926
2927 result = kernel_thread_create((thread_continue_t)idle_thread, NULL, MAXPRI_KERNEL, &thread);
2928 if (result != KERN_SUCCESS)
2929 return (result);
2930
2931 s = splsched();
2932 thread_lock(thread);
2933 thread->bound_processor = processor;
2934 processor->idle_thread = thread;
2935 thread->sched_pri = thread->priority = IDLEPRI;
2936 thread->state = (TH_RUN | TH_IDLE);
2937 thread_unlock(thread);
2938 splx(s);
2939
2940 thread_deallocate(thread);
2941
2942 return (KERN_SUCCESS);
2943 }
2944
2945 static uint64_t sched_tick_deadline;
2946
2947 /*
2948 * sched_startup:
2949 *
2950 * Kicks off scheduler services.
2951 *
2952 * Called at splsched.
2953 */
2954 void
2955 sched_startup(void)
2956 {
2957 kern_return_t result;
2958 thread_t thread;
2959
2960 result = kernel_thread_start_priority((thread_continue_t)sched_tick_thread, NULL, MAXPRI_KERNEL, &thread);
2961 if (result != KERN_SUCCESS)
2962 panic("sched_startup");
2963
2964 thread_deallocate(thread);
2965
2966 /*
2967 * Yield to the sched_tick_thread while it times
2968 * a series of context switches back. It stores
2969 * the baseline value in sched_cswtime.
2970 *
2971 * The current thread is the only other thread
2972 * active at this point.
2973 */
2974 while (sched_cswtime == 0)
2975 thread_block(THREAD_CONTINUE_NULL);
2976
2977 thread_daemon_init();
2978
2979 thread_call_initialize();
2980 }
2981
2982 /*
2983 * sched_tick_thread:
2984 *
2985 * Perform periodic bookkeeping functions about ten
2986 * times per second.
2987 */
2988 static void
2989 sched_tick_continue(void)
2990 {
2991 uint64_t abstime = mach_absolute_time();
2992
2993 sched_tick++;
2994
2995 /*
2996 * Compute various averages.
2997 */
2998 compute_averages();
2999
3000 /*
3001 * Scan the run queues for threads which
3002 * may need to be updated.
3003 */
3004 thread_update_scan();
3005
3006 if (pm_tick_callout != NULL)
3007 (*pm_tick_callout)();
3008
3009 clock_deadline_for_periodic_event(sched_tick_interval, abstime,
3010 &sched_tick_deadline);
3011
3012 assert_wait_deadline((event_t)sched_tick_thread, THREAD_UNINT, sched_tick_deadline);
3013 thread_block((thread_continue_t)sched_tick_continue);
3014 /*NOTREACHED*/
3015 }
3016
3017 /*
3018 * Time a series of context switches to determine
3019 * a baseline. Toss the high and low and return
3020 * the one-way value.
3021 */
3022 static uint32_t
3023 time_cswitch(void)
3024 {
3025 uint32_t new, hi, low, accum;
3026 uint64_t abstime;
3027 int i, tries = 7;
3028
3029 accum = hi = low = 0;
3030 for (i = 0; i < tries; ++i) {
3031 abstime = mach_absolute_time();
3032 thread_block(THREAD_CONTINUE_NULL);
3033
3034 new = mach_absolute_time() - abstime;
3035
3036 if (i == 0)
3037 accum = hi = low = new;
3038 else {
3039 if (new < low)
3040 low = new;
3041 else
3042 if (new > hi)
3043 hi = new;
3044 accum += new;
3045 }
3046 }
3047
3048 return ((accum - hi - low) / (2 * (tries - 2)));
3049 }
3050
3051 void
3052 sched_tick_thread(void)
3053 {
3054 sched_cswtime = time_cswitch();
3055
3056 sched_tick_deadline = mach_absolute_time();
3057
3058 sched_tick_continue();
3059 /*NOTREACHED*/
3060 }
3061
3062 /*
3063 * thread_update_scan / runq_scan:
3064 *
3065 * Scan the run queues to account for timesharing threads
3066 * which need to be updated.
3067 *
3068 * Scanner runs in two passes. Pass one squirrels likely
3069 * threads away in an array, pass two does the update.
3070 *
3071 * This is necessary because the run queue is locked for
3072 * the candidate scan, but the thread is locked for the update.
3073 *
3074 * Array should be sized to make forward progress, without
3075 * disabling preemption for long periods.
3076 */
3077
3078 #define THREAD_UPDATE_SIZE 128
3079
3080 static thread_t thread_update_array[THREAD_UPDATE_SIZE];
3081 static int thread_update_count = 0;
3082
3083 /*
3084 * Scan a runq for candidate threads.
3085 *
3086 * Returns TRUE if retry is needed.
3087 */
3088 static boolean_t
3089 runq_scan(
3090 run_queue_t runq)
3091 {
3092 register int count;
3093 register queue_t q;
3094 register thread_t thread;
3095
3096 if ((count = runq->count) > 0) {
3097 q = runq->queues + runq->highq;
3098 while (count > 0) {
3099 queue_iterate(q, thread, thread_t, links) {
3100 if ( thread->sched_stamp != sched_tick &&
3101 (thread->sched_mode & TH_MODE_TIMESHARE) ) {
3102 if (thread_update_count == THREAD_UPDATE_SIZE)
3103 return (TRUE);
3104
3105 thread_update_array[thread_update_count++] = thread;
3106 thread_reference_internal(thread);
3107 }
3108
3109 count--;
3110 }
3111
3112 q--;
3113 }
3114 }
3115
3116 return (FALSE);
3117 }
3118
3119 static void
3120 thread_update_scan(void)
3121 {
3122 boolean_t restart_needed = FALSE;
3123 processor_t processor = processor_list;
3124 processor_set_t pset;
3125 thread_t thread;
3126 spl_t s;
3127
3128 do {
3129 do {
3130 pset = processor->processor_set;
3131
3132 s = splsched();
3133 pset_lock(pset);
3134
3135 restart_needed = runq_scan(&processor->runq);
3136
3137 pset_unlock(pset);
3138 splx(s);
3139
3140 if (restart_needed)
3141 break;
3142
3143 thread = processor->idle_thread;
3144 if (thread != THREAD_NULL && thread->sched_stamp != sched_tick) {
3145 if (thread_update_count == THREAD_UPDATE_SIZE) {
3146 restart_needed = TRUE;
3147 break;
3148 }
3149
3150 thread_update_array[thread_update_count++] = thread;
3151 thread_reference_internal(thread);
3152 }
3153 } while ((processor = processor->processor_list) != NULL);
3154
3155 /*
3156 * Ok, we now have a collection of candidates -- fix them.
3157 */
3158 while (thread_update_count > 0) {
3159 thread = thread_update_array[--thread_update_count];
3160 thread_update_array[thread_update_count] = THREAD_NULL;
3161
3162 s = splsched();
3163 thread_lock(thread);
3164 if ( !(thread->state & (TH_WAIT|TH_SUSP)) &&
3165 thread->sched_stamp != sched_tick )
3166 update_priority(thread);
3167 thread_unlock(thread);
3168 splx(s);
3169
3170 thread_deallocate(thread);
3171 }
3172 } while (restart_needed);
3173 }
3174
3175 /*
3176 * Just in case someone doesn't use the macro
3177 */
3178 #undef thread_wakeup
3179 void
3180 thread_wakeup(
3181 event_t x);
3182
3183 void
3184 thread_wakeup(
3185 event_t x)
3186 {
3187 thread_wakeup_with_result(x, THREAD_AWAKENED);
3188 }
3189
3190 boolean_t
3191 preemption_enabled(void)
3192 {
3193 return (get_preemption_level() == 0 && ml_get_interrupts_enabled());
3194 }
3195
3196 #if DEBUG
3197 static boolean_t
3198 thread_runnable(
3199 thread_t thread)
3200 {
3201 return ((thread->state & (TH_RUN|TH_WAIT)) == TH_RUN);
3202 }
3203 #endif /* DEBUG */
3204
3205 #if MACH_KDB
3206 #include <ddb/db_output.h>
3207 #define printf kdbprintf
3208 void db_sched(void);
3209
3210 void
3211 db_sched(void)
3212 {
3213 iprintf("Scheduling Statistics:\n");
3214 db_indent += 2;
3215 iprintf("Thread invocations: csw %d same %d\n",
3216 c_thread_invoke_csw, c_thread_invoke_same);
3217 #if MACH_COUNTERS
3218 iprintf("Thread block: calls %d\n",
3219 c_thread_block_calls);
3220 iprintf("Idle thread:\n\thandoff %d block %d\n",
3221 c_idle_thread_handoff,
3222 c_idle_thread_block);
3223 iprintf("Sched thread blocks: %d\n", c_sched_thread_block);
3224 #endif /* MACH_COUNTERS */
3225 db_indent -= 2;
3226 }
3227
3228 #include <ddb/db_output.h>
3229 void db_show_thread_log(void);
3230
3231 void
3232 db_show_thread_log(void)
3233 {
3234 }
3235 #endif /* MACH_KDB */