]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/sched_prim.c
xnu-344.21.73.tar.gz
[apple/xnu.git] / osfmk / kern / sched_prim.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
d7e50217 6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
1c79356b 7 *
d7e50217
A
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
d7e50217
A
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
1c79356b
A
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25/*
26 * @OSF_FREE_COPYRIGHT@
27 */
28/*
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
31 * All Rights Reserved.
32 *
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
38 *
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
42 *
43 * Carnegie Mellon requests users of this software to return to
44 *
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
49 *
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
52 */
53/*
54 */
55/*
56 * File: sched_prim.c
57 * Author: Avadis Tevanian, Jr.
58 * Date: 1986
59 *
60 * Scheduling primitives
61 *
62 */
63
64#include <debug.h>
65#include <cpus.h>
66#include <mach_kdb.h>
67#include <simple_clock.h>
68#include <power_save.h>
69#include <task_swapper.h>
70
71#include <ddb/db_output.h>
72#include <mach/machine.h>
73#include <machine/machine_routines.h>
74#include <machine/sched_param.h>
75#include <kern/ast.h>
76#include <kern/clock.h>
77#include <kern/counters.h>
78#include <kern/cpu_number.h>
79#include <kern/cpu_data.h>
80#include <kern/etap_macros.h>
81#include <kern/lock.h>
82#include <kern/macro_help.h>
83#include <kern/machine.h>
84#include <kern/misc_protos.h>
85#include <kern/processor.h>
86#include <kern/queue.h>
87#include <kern/sched.h>
88#include <kern/sched_prim.h>
89#include <kern/syscall_subr.h>
90#include <kern/task.h>
91#include <kern/thread.h>
92#include <kern/thread_swap.h>
93#include <vm/pmap.h>
94#include <vm/vm_kern.h>
95#include <vm/vm_map.h>
96#include <mach/policy.h>
97#include <mach/sync_policy.h>
1c79356b
A
98#include <kern/mk_sp.h> /*** ??? fix so this can be removed ***/
99#include <sys/kdebug.h>
100
101#if TASK_SWAPPER
102#include <kern/task_swap.h>
103extern int task_swap_on;
104#endif /* TASK_SWAPPER */
105
106extern int hz;
107
0b4e3aa0 108#define DEFAULT_PREEMPTION_RATE 100 /* (1/s) */
1c79356b
A
109int default_preemption_rate = DEFAULT_PREEMPTION_RATE;
110
0b4e3aa0
A
111#define MAX_UNSAFE_QUANTA 800
112int max_unsafe_quanta = MAX_UNSAFE_QUANTA;
113
114#define MAX_POLL_QUANTA 2
115int max_poll_quanta = MAX_POLL_QUANTA;
116
117#define SCHED_POLL_YIELD_SHIFT 4 /* 1/16 */
118int sched_poll_yield_shift = SCHED_POLL_YIELD_SHIFT;
119
0b4e3aa0 120uint32_t std_quantum_us;
1c79356b
A
121
122unsigned sched_tick;
123
124#if SIMPLE_CLOCK
125int sched_usec;
126#endif /* SIMPLE_CLOCK */
127
128/* Forwards */
1c79356b
A
129void wait_queues_init(void);
130
1c79356b
A
131thread_t choose_pset_thread(
132 processor_t myprocessor,
133 processor_set_t pset);
134
135thread_t choose_thread(
136 processor_t myprocessor);
137
9bccf70c 138boolean_t run_queue_enqueue(
1c79356b
A
139 run_queue_t runq,
140 thread_t thread,
141 boolean_t tail);
142
1c79356b
A
143void do_thread_scan(void);
144
1c79356b
A
145#if DEBUG
146void dump_run_queues(
147 run_queue_t rq);
148void dump_run_queue_struct(
149 run_queue_t rq);
150void dump_processor(
151 processor_t p);
152void dump_processor_set(
153 processor_set_t ps);
154
155void checkrq(
156 run_queue_t rq,
157 char *msg);
158
159void thread_check(
160 thread_t thread,
161 run_queue_t runq);
1c79356b 162
0b4e3aa0 163static
1c79356b
A
164boolean_t thread_runnable(
165 thread_t thread);
166
0b4e3aa0
A
167#endif /*DEBUG*/
168
169
1c79356b
A
170/*
171 * State machine
172 *
173 * states are combinations of:
174 * R running
175 * W waiting (or on wait queue)
176 * N non-interruptible
177 * O swapped out
178 * I being swapped in
179 *
180 * init action
181 * assert_wait thread_block clear_wait swapout swapin
182 *
183 * R RW, RWN R; setrun - -
184 * RN RWN RN; setrun - -
185 *
186 * RW W R -
187 * RWN WN RN -
188 *
189 * W R; setrun WO
190 * WN RN; setrun -
191 *
192 * RO - - R
193 *
194 */
195
196/*
197 * Waiting protocols and implementation:
198 *
199 * Each thread may be waiting for exactly one event; this event
200 * is set using assert_wait(). That thread may be awakened either
201 * by performing a thread_wakeup_prim() on its event,
202 * or by directly waking that thread up with clear_wait().
203 *
204 * The implementation of wait events uses a hash table. Each
205 * bucket is queue of threads having the same hash function
206 * value; the chain for the queue (linked list) is the run queue
207 * field. [It is not possible to be waiting and runnable at the
208 * same time.]
209 *
210 * Locks on both the thread and on the hash buckets govern the
211 * wait event field and the queue chain field. Because wakeup
212 * operations only have the event as an argument, the event hash
213 * bucket must be locked before any thread.
214 *
215 * Scheduling operations may also occur at interrupt level; therefore,
216 * interrupts below splsched() must be prevented when holding
217 * thread or hash bucket locks.
218 *
219 * The wait event hash table declarations are as follows:
220 */
221
222#define NUMQUEUES 59
223
224struct wait_queue wait_queues[NUMQUEUES];
225
226#define wait_hash(event) \
227 ((((int)(event) < 0)? ~(int)(event): (int)(event)) % NUMQUEUES)
228
229void
230sched_init(void)
231{
232 /*
0b4e3aa0
A
233 * Calculate the timeslicing quantum
234 * in us.
1c79356b
A
235 */
236 if (default_preemption_rate < 1)
237 default_preemption_rate = DEFAULT_PREEMPTION_RATE;
0b4e3aa0 238 std_quantum_us = (1000 * 1000) / default_preemption_rate;
1c79356b 239
0b4e3aa0 240 printf("standard timeslicing quantum is %d us\n", std_quantum_us);
1c79356b
A
241
242 wait_queues_init();
243 pset_sys_bootstrap(); /* initialize processor mgmt. */
244 processor_action();
245 sched_tick = 0;
246#if SIMPLE_CLOCK
247 sched_usec = 0;
248#endif /* SIMPLE_CLOCK */
249 ast_init();
1c79356b
A
250}
251
252void
253wait_queues_init(void)
254{
255 register int i;
256
257 for (i = 0; i < NUMQUEUES; i++) {
258 wait_queue_init(&wait_queues[i], SYNC_POLICY_FIFO);
259 }
260}
261
262/*
0b4e3aa0 263 * Thread wait timer expiration.
1c79356b
A
264 */
265void
266thread_timer_expire(
267 timer_call_param_t p0,
268 timer_call_param_t p1)
269{
270 thread_t thread = p0;
271 spl_t s;
272
273 s = splsched();
274 wake_lock(thread);
0b4e3aa0
A
275 if (--thread->wait_timer_active == 1) {
276 if (thread->wait_timer_is_set) {
277 thread->wait_timer_is_set = FALSE;
278 thread_lock(thread);
279 if (thread->active)
280 clear_wait_internal(thread, THREAD_TIMED_OUT);
281 thread_unlock(thread);
282 }
1c79356b
A
283 }
284 else
0b4e3aa0 285 if (thread->wait_timer_active == 0)
1c79356b
A
286 thread_wakeup_one(&thread->wait_timer_active);
287 wake_unlock(thread);
288 splx(s);
289}
290
291/*
292 * thread_set_timer:
293 *
294 * Set a timer for the current thread, if the thread
295 * is ready to wait. Must be called between assert_wait()
296 * and thread_block().
297 */
298void
299thread_set_timer(
0b4e3aa0
A
300 uint32_t interval,
301 uint32_t scale_factor)
1c79356b
A
302{
303 thread_t thread = current_thread();
0b4e3aa0 304 uint64_t deadline;
1c79356b
A
305 spl_t s;
306
307 s = splsched();
308 wake_lock(thread);
309 thread_lock(thread);
310 if ((thread->state & TH_WAIT) != 0) {
311 clock_interval_to_deadline(interval, scale_factor, &deadline);
312 timer_call_enter(&thread->wait_timer, deadline);
313 assert(!thread->wait_timer_is_set);
314 thread->wait_timer_active++;
315 thread->wait_timer_is_set = TRUE;
316 }
317 thread_unlock(thread);
318 wake_unlock(thread);
319 splx(s);
320}
321
322void
323thread_set_timer_deadline(
0b4e3aa0 324 uint64_t deadline)
1c79356b
A
325{
326 thread_t thread = current_thread();
327 spl_t s;
328
329 s = splsched();
330 wake_lock(thread);
331 thread_lock(thread);
332 if ((thread->state & TH_WAIT) != 0) {
333 timer_call_enter(&thread->wait_timer, deadline);
334 assert(!thread->wait_timer_is_set);
335 thread->wait_timer_active++;
336 thread->wait_timer_is_set = TRUE;
337 }
338 thread_unlock(thread);
339 wake_unlock(thread);
340 splx(s);
341}
342
343void
344thread_cancel_timer(void)
345{
346 thread_t thread = current_thread();
347 spl_t s;
348
349 s = splsched();
350 wake_lock(thread);
351 if (thread->wait_timer_is_set) {
352 if (timer_call_cancel(&thread->wait_timer))
353 thread->wait_timer_active--;
354 thread->wait_timer_is_set = FALSE;
355 }
356 wake_unlock(thread);
357 splx(s);
358}
359
1c79356b
A
360/*
361 * Set up thread timeout element when thread is created.
362 */
363void
364thread_timer_setup(
365 thread_t thread)
366{
0b4e3aa0
A
367 extern void thread_depress_expire(
368 timer_call_param_t p0,
369 timer_call_param_t p1);
370
1c79356b
A
371 timer_call_setup(&thread->wait_timer, thread_timer_expire, thread);
372 thread->wait_timer_is_set = FALSE;
373 thread->wait_timer_active = 1;
1c79356b 374
0b4e3aa0
A
375 timer_call_setup(&thread->depress_timer, thread_depress_expire, thread);
376 thread->depress_timer_active = 1;
377
378 thread->ref_count++;
1c79356b
A
379}
380
381void
382thread_timer_terminate(void)
383{
384 thread_t thread = current_thread();
9bccf70c 385 wait_result_t res;
1c79356b
A
386 spl_t s;
387
388 s = splsched();
389 wake_lock(thread);
390 if (thread->wait_timer_is_set) {
391 if (timer_call_cancel(&thread->wait_timer))
392 thread->wait_timer_active--;
393 thread->wait_timer_is_set = FALSE;
394 }
395
396 thread->wait_timer_active--;
397
398 while (thread->wait_timer_active > 0) {
9bccf70c
A
399 res = assert_wait((event_t)&thread->wait_timer_active, THREAD_UNINT);
400 assert(res == THREAD_WAITING);
1c79356b
A
401 wake_unlock(thread);
402 splx(s);
403
9bccf70c
A
404 res = thread_block(THREAD_CONTINUE_NULL);
405 assert(res == THREAD_AWAKENED);
1c79356b
A
406
407 s = splsched();
408 wake_lock(thread);
409 }
410
0b4e3aa0
A
411 thread->depress_timer_active--;
412
413 while (thread->depress_timer_active > 0) {
9bccf70c
A
414 res = assert_wait((event_t)&thread->depress_timer_active, THREAD_UNINT);
415 assert(res == THREAD_WAITING);
0b4e3aa0
A
416 wake_unlock(thread);
417 splx(s);
418
9bccf70c
A
419 res = thread_block(THREAD_CONTINUE_NULL);
420 assert(res == THREAD_AWAKENED);
0b4e3aa0
A
421
422 s = splsched();
423 wake_lock(thread);
424 }
425
1c79356b
A
426 wake_unlock(thread);
427 splx(s);
428
429 thread_deallocate(thread);
430}
431
432/*
433 * Routine: thread_go_locked
434 * Purpose:
435 * Start a thread running.
436 * Conditions:
437 * thread lock held, IPC locks may be held.
438 * thread must have been pulled from wait queue under same lock hold.
9bccf70c
A
439 * Returns:
440 * KERN_SUCCESS - Thread was set running
441 * KERN_NOT_WAITING - Thread was not waiting
1c79356b 442 */
9bccf70c 443kern_return_t
1c79356b
A
444thread_go_locked(
445 thread_t thread,
9bccf70c 446 wait_result_t result)
1c79356b 447{
1c79356b 448 assert(thread->at_safe_point == FALSE);
9bccf70c 449 assert(thread->wait_event == NO_EVENT64);
1c79356b
A
450 assert(thread->wait_queue == WAIT_QUEUE_NULL);
451
9bccf70c 452 if ((thread->state & (TH_WAIT|TH_TERMINATE)) == TH_WAIT) {
1c79356b
A
453 thread->state &= ~(TH_WAIT|TH_UNINT);
454 if (!(thread->state & TH_RUN)) {
455 thread->state |= TH_RUN;
0b4e3aa0 456
9bccf70c
A
457 if (thread->active_callout)
458 call_thread_unblock();
459
460 if (!(thread->state & TH_IDLE)) {
461 _mk_sp_thread_unblock(thread);
462 hw_atomic_add(&thread->processor_set->run_count, 1);
463 }
1c79356b 464 }
0b4e3aa0 465
1c79356b 466 thread->wait_result = result;
9bccf70c 467 return KERN_SUCCESS;
1c79356b 468 }
9bccf70c 469 return KERN_NOT_WAITING;
1c79356b
A
470}
471
9bccf70c
A
472/*
473 * Routine: thread_mark_wait_locked
474 * Purpose:
475 * Mark a thread as waiting. If, given the circumstances,
476 * it doesn't want to wait (i.e. already aborted), then
477 * indicate that in the return value.
478 * Conditions:
479 * at splsched() and thread is locked.
480 */
481__private_extern__
482wait_result_t
1c79356b 483thread_mark_wait_locked(
9bccf70c
A
484 thread_t thread,
485 wait_interrupt_t interruptible)
1c79356b 486{
9bccf70c
A
487 wait_result_t wait_result;
488 boolean_t at_safe_point;
1c79356b
A
489
490 assert(thread == current_thread());
491
9bccf70c
A
492 /*
493 * The thread may have certain types of interrupts/aborts masked
494 * off. Even if the wait location says these types of interrupts
495 * are OK, we have to honor mask settings (outer-scoped code may
496 * not be able to handle aborts at the moment).
497 */
498 if (interruptible > thread->interrupt_level)
499 interruptible = thread->interrupt_level;
500
501 at_safe_point = (interruptible == THREAD_ABORTSAFE);
502
503 if ((interruptible == THREAD_UNINT) ||
504 !(thread->state & TH_ABORT) ||
505 (!at_safe_point && (thread->state & TH_ABORT_SAFELY))) {
506 thread->state |= (interruptible) ? TH_WAIT : (TH_WAIT | TH_UNINT);
507 thread->at_safe_point = at_safe_point;
508 thread->sleep_stamp = sched_tick;
509 return (thread->wait_result = THREAD_WAITING);
510 } else if (thread->state & TH_ABORT_SAFELY) {
511 thread->state &= ~(TH_ABORT|TH_ABORT_SAFELY);
512 }
513 return (thread->wait_result = THREAD_INTERRUPTED);
1c79356b
A
514}
515
9bccf70c
A
516/*
517 * Routine: thread_interrupt_level
518 * Purpose:
519 * Set the maximum interruptible state for the
520 * current thread. The effective value of any
521 * interruptible flag passed into assert_wait
522 * will never exceed this.
523 *
524 * Useful for code that must not be interrupted,
525 * but which calls code that doesn't know that.
526 * Returns:
527 * The old interrupt level for the thread.
528 */
529__private_extern__
530wait_interrupt_t
531thread_interrupt_level(
532 wait_interrupt_t new_level)
533{
534 thread_t thread = current_thread();
535 wait_interrupt_t result = thread->interrupt_level;
1c79356b 536
9bccf70c
A
537 thread->interrupt_level = new_level;
538 return result;
539}
1c79356b
A
540
541/*
542 * Routine: assert_wait_timeout
543 * Purpose:
544 * Assert that the thread intends to block,
545 * waiting for a timeout (no user known event).
546 */
547unsigned int assert_wait_timeout_event;
548
9bccf70c 549wait_result_t
1c79356b 550assert_wait_timeout(
9bccf70c
A
551 mach_msg_timeout_t msecs,
552 wait_interrupt_t interruptible)
1c79356b 553{
9bccf70c 554 wait_result_t res;
1c79356b 555
9bccf70c
A
556 res = assert_wait((event_t)&assert_wait_timeout_event, interruptible);
557 if (res == THREAD_WAITING)
558 thread_set_timer(msecs, 1000*NSEC_PER_USEC);
559 return res;
1c79356b
A
560}
561
562/*
563 * Check to see if an assert wait is possible, without actually doing one.
564 * This is used by debug code in locks and elsewhere to verify that it is
565 * always OK to block when trying to take a blocking lock (since waiting
566 * for the actual assert_wait to catch the case may make it hard to detect
567 * this case.
568 */
569boolean_t
570assert_wait_possible(void)
571{
572
573 thread_t thread;
574 extern unsigned int debug_mode;
575
576#if DEBUG
577 if(debug_mode) return TRUE; /* Always succeed in debug mode */
578#endif
579
580 thread = current_thread();
581
582 return (thread == NULL || wait_queue_assert_possible(thread));
583}
584
585/*
586 * assert_wait:
587 *
588 * Assert that the current thread is about to go to
589 * sleep until the specified event occurs.
590 */
9bccf70c 591wait_result_t
1c79356b
A
592assert_wait(
593 event_t event,
9bccf70c 594 wait_interrupt_t interruptible)
1c79356b
A
595{
596 register wait_queue_t wq;
597 register int index;
598
599 assert(event != NO_EVENT);
600 assert(assert_wait_possible());
601
602 index = wait_hash(event);
603 wq = &wait_queues[index];
9bccf70c
A
604 return wait_queue_assert_wait(wq, event, interruptible);
605}
606
607
608/*
609 * thread_sleep_fast_usimple_lock:
610 *
611 * Cause the current thread to wait until the specified event
612 * occurs. The specified simple_lock is unlocked before releasing
613 * the cpu and re-acquired as part of waking up.
614 *
615 * This is the simple lock sleep interface for components that use a
616 * faster version of simple_lock() than is provided by usimple_lock().
617 */
618__private_extern__ wait_result_t
619thread_sleep_fast_usimple_lock(
620 event_t event,
621 simple_lock_t lock,
622 wait_interrupt_t interruptible)
623{
624 wait_result_t res;
625
626 res = assert_wait(event, interruptible);
627 if (res == THREAD_WAITING) {
628 simple_unlock(lock);
629 res = thread_block(THREAD_CONTINUE_NULL);
630 simple_lock(lock);
631 }
632 return res;
1c79356b
A
633}
634
9bccf70c
A
635
636/*
637 * thread_sleep_usimple_lock:
638 *
639 * Cause the current thread to wait until the specified event
640 * occurs. The specified usimple_lock is unlocked before releasing
641 * the cpu and re-acquired as part of waking up.
642 *
643 * This is the simple lock sleep interface for components where
644 * simple_lock() is defined in terms of usimple_lock().
645 */
646wait_result_t
647thread_sleep_usimple_lock(
648 event_t event,
649 usimple_lock_t lock,
650 wait_interrupt_t interruptible)
651{
652 wait_result_t res;
653
654 res = assert_wait(event, interruptible);
655 if (res == THREAD_WAITING) {
656 usimple_unlock(lock);
657 res = thread_block(THREAD_CONTINUE_NULL);
658 usimple_lock(lock);
659 }
660 return res;
661}
662
663/*
664 * thread_sleep_mutex:
665 *
666 * Cause the current thread to wait until the specified event
667 * occurs. The specified mutex is unlocked before releasing
668 * the cpu. The mutex will be re-acquired before returning.
669 *
670 * JMM - Add hint to make sure mutex is available before rousting
671 */
672wait_result_t
673thread_sleep_mutex(
674 event_t event,
675 mutex_t *mutex,
676 wait_interrupt_t interruptible)
677{
678 wait_result_t res;
679
680 res = assert_wait(event, interruptible);
681 if (res == THREAD_WAITING) {
682 mutex_unlock(mutex);
683 res = thread_block(THREAD_CONTINUE_NULL);
684 mutex_lock(mutex);
685 }
686 return res;
687}
1c79356b 688
9bccf70c
A
689/*
690 * thread_sleep_mutex_deadline:
691 *
692 * Cause the current thread to wait until the specified event
693 * (or deadline) occurs. The specified mutex is unlocked before
694 * releasing the cpu. The mutex will be re-acquired before returning.
695 *
696 * JMM - Add hint to make sure mutex is available before rousting
697 */
698wait_result_t
699thread_sleep_mutex_deadline(
700 event_t event,
701 mutex_t *mutex,
702 uint64_t deadline,
703 wait_interrupt_t interruptible)
704{
705 wait_result_t res;
706
707 res = assert_wait(event, interruptible);
708 if (res == THREAD_WAITING) {
709 mutex_unlock(mutex);
710 thread_set_timer_deadline(deadline);
711 res = thread_block(THREAD_CONTINUE_NULL);
712 if (res != THREAD_TIMED_OUT)
713 thread_cancel_timer();
714 mutex_lock(mutex);
715 }
716 return res;
717}
718
719/*
720 * thread_sleep_lock_write:
721 *
722 * Cause the current thread to wait until the specified event
723 * occurs. The specified (write) lock is unlocked before releasing
724 * the cpu. The (write) lock will be re-acquired before returning.
725 *
726 * JMM - Add hint to make sure mutex is available before rousting
727 */
728wait_result_t
729thread_sleep_lock_write(
730 event_t event,
731 lock_t *lock,
732 wait_interrupt_t interruptible)
733{
734 wait_result_t res;
735
736 res = assert_wait(event, interruptible);
737 if (res == THREAD_WAITING) {
738 lock_write_done(lock);
739 res = thread_block(THREAD_CONTINUE_NULL);
740 lock_write(lock);
741 }
742 return res;
743}
744
745
746/*
747 * thread_sleep_funnel:
748 *
749 * Cause the current thread to wait until the specified event
750 * occurs. If the thread is funnelled, the funnel will be released
751 * before giving up the cpu. The funnel will be re-acquired before returning.
752 *
753 * JMM - Right now the funnel is dropped and re-acquired inside
754 * thread_block(). At some point, this may give thread_block() a hint.
755 */
756wait_result_t
757thread_sleep_funnel(
758 event_t event,
759 wait_interrupt_t interruptible)
760{
761 wait_result_t res;
762
763 res = assert_wait(event, interruptible);
764 if (res == THREAD_WAITING) {
765 res = thread_block(THREAD_CONTINUE_NULL);
766 }
767 return res;
768}
769
1c79356b
A
770/*
771 * thread_[un]stop(thread)
772 * Once a thread has blocked interruptibly (via assert_wait) prevent
773 * it from running until thread_unstop.
774 *
775 * If someone else has already stopped the thread, wait for the
776 * stop to be cleared, and then stop it again.
777 *
778 * Return FALSE if interrupted.
779 *
780 * NOTE: thread_hold/thread_suspend should be called on the activation
781 * before calling thread_stop. TH_SUSP is only recognized when
782 * a thread blocks and only prevents clear_wait/thread_wakeup
783 * from restarting an interruptible wait. The wake_active flag is
784 * used to indicate that someone is waiting on the thread.
785 */
786boolean_t
787thread_stop(
9bccf70c 788 thread_t thread)
1c79356b 789{
9bccf70c 790 spl_t s = splsched();
1c79356b 791
1c79356b
A
792 wake_lock(thread);
793
794 while (thread->state & TH_SUSP) {
9bccf70c 795 wait_result_t result;
e7c99d92 796
1c79356b 797 thread->wake_active = TRUE;
9bccf70c 798 result = assert_wait(&thread->wake_active, THREAD_ABORTSAFE);
1c79356b
A
799 wake_unlock(thread);
800 splx(s);
801
9bccf70c
A
802 if (result == THREAD_WAITING)
803 result = thread_block(THREAD_CONTINUE_NULL);
804
805 if (result != THREAD_AWAKENED)
1c79356b
A
806 return (FALSE);
807
808 s = splsched();
809 wake_lock(thread);
810 }
9bccf70c 811
1c79356b
A
812 thread_lock(thread);
813 thread->state |= TH_SUSP;
1c79356b 814
9bccf70c
A
815 while (thread->state & TH_RUN) {
816 wait_result_t result;
817 processor_t processor = thread->last_processor;
818
819 if ( processor != PROCESSOR_NULL &&
820 processor->state == PROCESSOR_RUNNING &&
821 processor->cpu_data->active_thread == thread )
822 cause_ast_check(processor);
823 thread_unlock(thread);
824
825 thread->wake_active = TRUE;
826 result = assert_wait(&thread->wake_active, THREAD_ABORTSAFE);
827 wake_unlock(thread);
828 splx(s);
829
830 if (result == THREAD_WAITING)
831 result = thread_block(THREAD_CONTINUE_NULL);
832
833 if (result != THREAD_AWAKENED) {
834 thread_unstop(thread);
835 return (FALSE);
836 }
837
838 s = splsched();
839 wake_lock(thread);
840 thread_lock(thread);
841 }
842
843 thread_unlock(thread);
1c79356b
A
844 wake_unlock(thread);
845 splx(s);
846
847 return (TRUE);
848}
849
850/*
851 * Clear TH_SUSP and if the thread has been stopped and is now runnable,
852 * put it back on the run queue.
853 */
854void
855thread_unstop(
9bccf70c 856 thread_t thread)
1c79356b 857{
9bccf70c 858 spl_t s = splsched();
1c79356b 859
1c79356b
A
860 wake_lock(thread);
861 thread_lock(thread);
862
9bccf70c 863 if ((thread->state & (TH_RUN|TH_WAIT|TH_SUSP)) == TH_SUSP) {
0b4e3aa0
A
864 thread->state &= ~TH_SUSP;
865 thread->state |= TH_RUN;
866
9bccf70c 867 assert(!(thread->state & TH_IDLE));
0b4e3aa0 868 _mk_sp_thread_unblock(thread);
9bccf70c 869 hw_atomic_add(&thread->processor_set->run_count, 1);
1c79356b
A
870 }
871 else
872 if (thread->state & TH_SUSP) {
873 thread->state &= ~TH_SUSP;
874
875 if (thread->wake_active) {
876 thread->wake_active = FALSE;
877 thread_unlock(thread);
878 wake_unlock(thread);
879 splx(s);
1c79356b 880
9bccf70c 881 thread_wakeup(&thread->wake_active);
1c79356b
A
882 return;
883 }
884 }
885
886 thread_unlock(thread);
887 wake_unlock(thread);
888 splx(s);
889}
890
891/*
892 * Wait for the thread's RUN bit to clear
893 */
894boolean_t
895thread_wait(
9bccf70c 896 thread_t thread)
1c79356b 897{
9bccf70c 898 spl_t s = splsched();
1c79356b 899
1c79356b 900 wake_lock(thread);
9bccf70c 901 thread_lock(thread);
1c79356b 902
9bccf70c
A
903 while (thread->state & TH_RUN) {
904 wait_result_t result;
905 processor_t processor = thread->last_processor;
e7c99d92 906
9bccf70c
A
907 if ( processor != PROCESSOR_NULL &&
908 processor->state == PROCESSOR_RUNNING &&
909 processor->cpu_data->active_thread == thread )
910 cause_ast_check(processor);
911 thread_unlock(thread);
1c79356b
A
912
913 thread->wake_active = TRUE;
9bccf70c 914 result = assert_wait(&thread->wake_active, THREAD_ABORTSAFE);
1c79356b
A
915 wake_unlock(thread);
916 splx(s);
917
9bccf70c
A
918 if (result == THREAD_WAITING)
919 result = thread_block(THREAD_CONTINUE_NULL);
920
921 if (result != THREAD_AWAKENED)
922 return (FALSE);
1c79356b
A
923
924 s = splsched();
925 wake_lock(thread);
9bccf70c 926 thread_lock(thread);
1c79356b 927 }
0b4e3aa0 928
9bccf70c 929 thread_unlock(thread);
1c79356b
A
930 wake_unlock(thread);
931 splx(s);
0b4e3aa0
A
932
933 return (TRUE);
1c79356b
A
934}
935
1c79356b
A
936/*
937 * Routine: clear_wait_internal
938 *
939 * Clear the wait condition for the specified thread.
940 * Start the thread executing if that is appropriate.
941 * Arguments:
942 * thread thread to awaken
943 * result Wakeup result the thread should see
944 * Conditions:
945 * At splsched
946 * the thread is locked.
9bccf70c
A
947 * Returns:
948 * KERN_SUCCESS thread was rousted out a wait
949 * KERN_FAILURE thread was waiting but could not be rousted
950 * KERN_NOT_WAITING thread was not waiting
1c79356b 951 */
9bccf70c 952__private_extern__ kern_return_t
1c79356b 953clear_wait_internal(
9bccf70c
A
954 thread_t thread,
955 wait_result_t result)
1c79356b 956{
9bccf70c
A
957 wait_queue_t wq = thread->wait_queue;
958 kern_return_t ret;
959 int loop_count;
960
961 loop_count = 0;
962 do {
963 if ((result == THREAD_INTERRUPTED) && (thread->state & TH_UNINT))
964 return KERN_FAILURE;
965
966 if (wq != WAIT_QUEUE_NULL) {
967 if (wait_queue_lock_try(wq)) {
968 wait_queue_pull_thread_locked(wq, thread, TRUE);
969 /* wait queue unlocked, thread still locked */
970 } else {
971 thread_unlock(thread);
972 delay(1);
973 thread_lock(thread);
974
975 if (wq != thread->wait_queue) {
976 return KERN_NOT_WAITING; /* we know it moved */
977 }
978 continue;
979 }
1c79356b 980 }
9bccf70c
A
981 ret = thread_go_locked(thread, result);
982 return ret;
983 } while (++loop_count < LockTimeOut);
984 panic("clear_wait_internal: deadlock: thread=0x%x, wq=0x%x, cpu=%d\n",
985 thread, wq, cpu_number());
986 return KERN_FAILURE;
1c79356b
A
987}
988
989
990/*
991 * clear_wait:
992 *
993 * Clear the wait condition for the specified thread. Start the thread
994 * executing if that is appropriate.
995 *
996 * parameters:
997 * thread thread to awaken
998 * result Wakeup result the thread should see
999 */
9bccf70c 1000kern_return_t
1c79356b 1001clear_wait(
9bccf70c
A
1002 thread_t thread,
1003 wait_result_t result)
1c79356b 1004{
9bccf70c 1005 kern_return_t ret;
1c79356b
A
1006 spl_t s;
1007
1008 s = splsched();
1009 thread_lock(thread);
9bccf70c 1010 ret = clear_wait_internal(thread, result);
1c79356b
A
1011 thread_unlock(thread);
1012 splx(s);
9bccf70c 1013 return ret;
1c79356b
A
1014}
1015
1016
1017/*
1018 * thread_wakeup_prim:
1019 *
1020 * Common routine for thread_wakeup, thread_wakeup_with_result,
1021 * and thread_wakeup_one.
1022 *
1023 */
9bccf70c 1024kern_return_t
1c79356b
A
1025thread_wakeup_prim(
1026 event_t event,
1027 boolean_t one_thread,
9bccf70c 1028 wait_result_t result)
1c79356b
A
1029{
1030 register wait_queue_t wq;
1031 register int index;
1032
1033 index = wait_hash(event);
1034 wq = &wait_queues[index];
1035 if (one_thread)
9bccf70c 1036 return (wait_queue_wakeup_one(wq, event, result));
1c79356b 1037 else
9bccf70c 1038 return (wait_queue_wakeup_all(wq, event, result));
1c79356b
A
1039}
1040
1041/*
1042 * thread_bind:
1043 *
1044 * Force a thread to execute on the specified processor.
1045 * If the thread is currently executing, it may wait until its
1046 * time slice is up before switching onto the specified processor.
1047 *
1048 * A processor of PROCESSOR_NULL causes the thread to be unbound.
1049 * xxx - DO NOT export this to users.
1050 */
1051void
1052thread_bind(
1053 register thread_t thread,
1054 processor_t processor)
1055{
1056 spl_t s;
1057
1058 s = splsched();
1059 thread_lock(thread);
1060 thread_bind_locked(thread, processor);
1061 thread_unlock(thread);
1062 splx(s);
1063}
1064
1065/*
1066 * Select a thread for this processor (the current processor) to run.
1067 * May select the current thread, which must already be locked.
1068 */
1069thread_t
1070thread_select(
1071 register processor_t myprocessor)
1072{
1073 register thread_t thread;
1074 processor_set_t pset;
1075 register run_queue_t runq = &myprocessor->runq;
1076 boolean_t other_runnable;
1c79356b
A
1077
1078 /*
1079 * Check for other non-idle runnable threads.
1080 */
1c79356b 1081 pset = myprocessor->processor_set;
9bccf70c 1082 thread = myprocessor->cpu_data->active_thread;
0b4e3aa0
A
1083
1084 /* Update the thread's priority */
1085 if (thread->sched_stamp != sched_tick)
1086 update_priority(thread);
1c79356b 1087
9bccf70c
A
1088 myprocessor->current_pri = thread->sched_pri;
1089
1c79356b
A
1090 simple_lock(&runq->lock);
1091 simple_lock(&pset->runq.lock);
1092
1093 other_runnable = runq->count > 0 || pset->runq.count > 0;
1094
1095 if ( thread->state == TH_RUN &&
1096 (!other_runnable ||
1097 (runq->highq < thread->sched_pri &&
1098 pset->runq.highq < thread->sched_pri)) &&
1099 thread->processor_set == pset &&
1100 (thread->bound_processor == PROCESSOR_NULL ||
1101 thread->bound_processor == myprocessor) ) {
1102
1103 /* I am the highest priority runnable (non-idle) thread */
1104 simple_unlock(&pset->runq.lock);
1105 simple_unlock(&runq->lock);
1106
0b4e3aa0
A
1107 myprocessor->slice_quanta =
1108 (thread->sched_mode & TH_MODE_TIMESHARE)? pset->set_quanta: 1;
1c79356b
A
1109 }
1110 else
9bccf70c 1111 if (other_runnable)
1c79356b 1112 thread = choose_thread(myprocessor);
1c79356b
A
1113 else {
1114 simple_unlock(&pset->runq.lock);
1115 simple_unlock(&runq->lock);
1116
1117 /*
1118 * Nothing is runnable, so set this processor idle if it
1119 * was running. If it was in an assignment or shutdown,
1120 * leave it alone. Return its idle thread.
1121 */
9bccf70c 1122 simple_lock(&pset->sched_lock);
1c79356b 1123 if (myprocessor->state == PROCESSOR_RUNNING) {
9bccf70c 1124 remqueue(&pset->active_queue, (queue_entry_t)myprocessor);
1c79356b 1125 myprocessor->state = PROCESSOR_IDLE;
9bccf70c 1126
1c79356b 1127 if (myprocessor == master_processor)
9bccf70c 1128 enqueue_tail(&pset->idle_queue, (queue_entry_t)myprocessor);
1c79356b 1129 else
9bccf70c 1130 enqueue_head(&pset->idle_queue, (queue_entry_t)myprocessor);
1c79356b
A
1131
1132 pset->idle_count++;
1133 }
9bccf70c 1134 simple_unlock(&pset->sched_lock);
1c79356b
A
1135
1136 thread = myprocessor->idle_thread;
1137 }
1138
1139 return (thread);
1140}
1141
1142
1143/*
1144 * Stop running the current thread and start running the new thread.
1145 * If continuation is non-zero, and the current thread is blocked,
1146 * then it will resume by executing continuation on a new stack.
1147 * Returns TRUE if the hand-off succeeds.
9bccf70c 1148 *
1c79356b
A
1149 * Assumes splsched.
1150 */
1151
1c79356b
A
1152static thread_t
1153__current_thread(void)
1154{
1155 return (current_thread());
1156}
1157
1158boolean_t
1159thread_invoke(
1160 register thread_t old_thread,
1161 register thread_t new_thread,
1162 int reason,
9bccf70c 1163 thread_continue_t old_cont)
1c79356b 1164{
9bccf70c
A
1165 thread_continue_t new_cont;
1166 processor_t processor;
1c79356b 1167
9bccf70c 1168 if (get_preemption_level() != 0)
0b4e3aa0 1169 panic("thread_invoke: preemption_level %d\n",
9bccf70c 1170 get_preemption_level());
0b4e3aa0 1171
1c79356b 1172 /*
9bccf70c 1173 * Mark thread interruptible.
1c79356b
A
1174 */
1175 thread_lock(new_thread);
1176 new_thread->state &= ~TH_UNINT;
1177
1c79356b
A
1178 assert(thread_runnable(new_thread));
1179
9bccf70c 1180 assert(old_thread->continuation == NULL);
1c79356b 1181
9bccf70c
A
1182 /*
1183 * Allow time constraint threads to hang onto
1184 * a stack.
1185 */
0b4e3aa0
A
1186 if ( (old_thread->sched_mode & TH_MODE_REALTIME) &&
1187 !old_thread->stack_privilege ) {
1188 old_thread->stack_privilege = old_thread->kernel_stack;
1c79356b
A
1189 }
1190
9bccf70c
A
1191 if (old_cont != NULL) {
1192 if (new_thread->state & TH_STACK_HANDOFF) {
1193 /*
1194 * If the old thread is using a privileged stack,
1195 * check to see whether we can exchange it with
1196 * that of the new thread.
1197 */
1198 if ( old_thread->kernel_stack == old_thread->stack_privilege &&
1199 !new_thread->stack_privilege)
1200 goto need_stack;
1c79356b 1201
9bccf70c
A
1202 new_thread->state &= ~TH_STACK_HANDOFF;
1203 new_cont = new_thread->continuation;
1204 new_thread->continuation = NULL;
1c79356b 1205
9bccf70c
A
1206 /*
1207 * Set up ast context of new thread and switch
1208 * to its timer.
1209 */
1210 processor = current_processor();
1211 new_thread->last_processor = processor;
1212 processor->current_pri = new_thread->sched_pri;
1213 ast_context(new_thread->top_act, processor->slot_num);
1214 timer_switch(&new_thread->system_timer);
1215 thread_unlock(new_thread);
0b4e3aa0 1216
9bccf70c 1217 current_task()->csw++;
1c79356b 1218
9bccf70c
A
1219 old_thread->reason = reason;
1220 old_thread->continuation = old_cont;
0b4e3aa0 1221
9bccf70c 1222 _mk_sp_thread_done(old_thread, new_thread, processor);
1c79356b 1223
9bccf70c
A
1224 stack_handoff(old_thread, new_thread);
1225
1226 _mk_sp_thread_begin(new_thread, processor);
1c79356b 1227
1c79356b
A
1228 wake_lock(old_thread);
1229 thread_lock(old_thread);
1c79356b 1230
9bccf70c
A
1231 /*
1232 * Inline thread_dispatch but
1233 * don't free stack.
1234 */
1235
1236 switch (old_thread->state & (TH_RUN|TH_WAIT|TH_UNINT|TH_IDLE)) {
1237
1238 case TH_RUN | TH_UNINT:
1239 case TH_RUN:
1240 /*
1241 * Still running, put back
1242 * onto a run queue.
1243 */
1244 old_thread->state |= TH_STACK_HANDOFF;
1245 _mk_sp_thread_dispatch(old_thread);
1246
1247 thread_unlock(old_thread);
1248 wake_unlock(old_thread);
1249 break;
1c79356b 1250
9bccf70c
A
1251 case TH_RUN | TH_WAIT | TH_UNINT:
1252 case TH_RUN | TH_WAIT:
1253 {
1254 boolean_t reap, wake, callblock;
1c79356b 1255
9bccf70c
A
1256 /*
1257 * Waiting.
1258 */
1259 old_thread->sleep_stamp = sched_tick;
1260 old_thread->state |= TH_STACK_HANDOFF;
1261 old_thread->state &= ~TH_RUN;
1262 hw_atomic_sub(&old_thread->processor_set->run_count, 1);
1263 callblock = old_thread->active_callout;
1264 wake = old_thread->wake_active;
1265 old_thread->wake_active = FALSE;
1266 reap = (old_thread->state & TH_TERMINATE)? TRUE: FALSE;
1267
1268 thread_unlock(old_thread);
1269 wake_unlock(old_thread);
1270
1271 if (callblock)
1272 call_thread_block();
1273
1274 if (wake)
1275 thread_wakeup((event_t)&old_thread->wake_active);
1276
1277 if (reap)
1278 thread_reaper_enqueue(old_thread);
1279 break;
1280 }
1c79356b 1281
9bccf70c
A
1282 case TH_RUN | TH_IDLE:
1283 /*
1284 * The idle threads don't go
1285 * onto a run queue.
1286 */
1287 old_thread->state |= TH_STACK_HANDOFF;
1288 thread_unlock(old_thread);
1289 wake_unlock(old_thread);
1290 break;
1c79356b 1291
9bccf70c
A
1292 default:
1293 panic("thread_invoke: state 0x%x\n", old_thread->state);
1294 }
1c79356b 1295
9bccf70c 1296 counter_always(c_thread_invoke_hits++);
1c79356b 1297
9bccf70c
A
1298 if (new_thread->funnel_state & TH_FN_REFUNNEL) {
1299 kern_return_t wait_result = new_thread->wait_result;
1c79356b 1300
9bccf70c
A
1301 new_thread->funnel_state = 0;
1302 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE,
1303 new_thread->funnel_lock, 2, 0, 0, 0);
1304 funnel_lock(new_thread->funnel_lock);
1305 KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE,
1306 new_thread->funnel_lock, 2, 0, 0, 0);
1307 new_thread->funnel_state = TH_FN_OWNED;
1308 new_thread->wait_result = wait_result;
1309 }
1310 (void) spllo();
1c79356b 1311
9bccf70c
A
1312 assert(new_cont);
1313 call_continuation(new_cont);
1314 /*NOTREACHED*/
1315 return (TRUE);
1c79356b 1316 }
9bccf70c
A
1317 else
1318 if (new_thread->state & TH_STACK_ALLOC) {
1319 /*
1320 * Waiting for a stack
1321 */
1c79356b 1322 counter_always(c_thread_invoke_misses++);
9bccf70c
A
1323 thread_unlock(new_thread);
1324 return (FALSE);
1325 }
1326 else
1327 if (new_thread == old_thread) {
1328 /* same thread but with continuation */
1329 counter(++c_thread_invoke_same);
1330 thread_unlock(new_thread);
1331
1332 if (new_thread->funnel_state & TH_FN_REFUNNEL) {
1333 kern_return_t wait_result = new_thread->wait_result;
1334
1335 new_thread->funnel_state = 0;
1336 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE,
1337 new_thread->funnel_lock, 3, 0, 0, 0);
1338 funnel_lock(new_thread->funnel_lock);
1339 KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE,
1340 new_thread->funnel_lock, 3, 0, 0, 0);
1341 new_thread->funnel_state = TH_FN_OWNED;
1342 new_thread->wait_result = wait_result;
1343 }
1344 (void) spllo();
1345 call_continuation(old_cont);
1346 /*NOTREACHED*/
1347 }
1c79356b 1348 }
9bccf70c
A
1349 else {
1350 /*
1351 * Check that the new thread has a stack
1352 */
1353 if (new_thread->state & TH_STACK_HANDOFF) {
1354need_stack:
1355 if (!stack_alloc_try(new_thread, thread_continue)) {
1356 counter_always(c_thread_invoke_misses++);
1357 thread_swapin(new_thread);
1358 return (FALSE);
1359 }
1c79356b 1360
9bccf70c
A
1361 new_thread->state &= ~TH_STACK_HANDOFF;
1362 }
1363 else
1364 if (new_thread->state & TH_STACK_ALLOC) {
1365 /*
1366 * Waiting for a stack
1367 */
1368 counter_always(c_thread_invoke_misses++);
1369 thread_unlock(new_thread);
1370 return (FALSE);
1371 }
1372 else
1373 if (old_thread == new_thread) {
1374 counter(++c_thread_invoke_same);
1375 thread_unlock(new_thread);
1376 return (TRUE);
1377 }
1378 }
1c79356b
A
1379
1380 /*
9bccf70c 1381 * Set up ast context of new thread and switch to its timer.
1c79356b 1382 */
9bccf70c
A
1383 processor = current_processor();
1384 new_thread->last_processor = processor;
1385 processor->current_pri = new_thread->sched_pri;
1386 ast_context(new_thread->top_act, processor->slot_num);
1c79356b
A
1387 timer_switch(&new_thread->system_timer);
1388 assert(thread_runnable(new_thread));
1c79356b
A
1389 thread_unlock(new_thread);
1390
1391 counter_always(c_thread_invoke_csw++);
1392 current_task()->csw++;
1393
1c79356b 1394 assert(old_thread->runq == RUN_QUEUE_NULL);
9bccf70c
A
1395 old_thread->reason = reason;
1396 old_thread->continuation = old_cont;
1c79356b 1397
9bccf70c 1398 _mk_sp_thread_done(old_thread, new_thread, processor);
1c79356b
A
1399
1400 /*
1401 * switch_context is machine-dependent. It does the
1402 * machine-dependent components of a context-switch, like
1403 * changing address spaces. It updates active_threads.
1404 */
9bccf70c 1405 old_thread = switch_context(old_thread, old_cont, new_thread);
1c79356b
A
1406
1407 /* Now on new thread's stack. Set a local variable to refer to it. */
1408 new_thread = __current_thread();
1409 assert(old_thread != new_thread);
1410
1c79356b 1411 assert(thread_runnable(new_thread));
9bccf70c 1412 _mk_sp_thread_begin(new_thread, new_thread->last_processor);
1c79356b
A
1413
1414 /*
1415 * We're back. Now old_thread is the thread that resumed
1416 * us, and we have to dispatch it.
1417 */
1c79356b 1418 thread_dispatch(old_thread);
9bccf70c
A
1419
1420 if (old_cont) {
1421 if (new_thread->funnel_state & TH_FN_REFUNNEL) {
1422 kern_return_t wait_result = new_thread->wait_result;
1423
1424 new_thread->funnel_state = 0;
1425 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE,
1426 new_thread->funnel_lock, 3, 0, 0, 0);
1427 funnel_lock(new_thread->funnel_lock);
1428 KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE,
1429 new_thread->funnel_lock, 3, 0, 0, 0);
1430 new_thread->funnel_state = TH_FN_OWNED;
1431 new_thread->wait_result = wait_result;
1432 }
1433 (void) spllo();
1434 call_continuation(old_cont);
1435 /*NOTREACHED*/
1c79356b
A
1436 }
1437
9bccf70c 1438 return (TRUE);
1c79356b
A
1439}
1440
1441/*
1442 * thread_continue:
1443 *
9bccf70c 1444 * Called when a thread gets a new stack, at splsched();
1c79356b
A
1445 */
1446void
1447thread_continue(
0b4e3aa0 1448 register thread_t old_thread)
1c79356b 1449{
9bccf70c
A
1450 register thread_t self = current_thread();
1451 register thread_continue_t continuation;
1452
1453 continuation = self->continuation;
1454 self->continuation = NULL;
1455
1456 _mk_sp_thread_begin(self, self->last_processor);
1c79356b
A
1457
1458 /*
1459 * We must dispatch the old thread and then
1460 * call the current thread's continuation.
1461 * There might not be an old thread, if we are
1462 * the first thread to run on this processor.
1463 */
0b4e3aa0 1464 if (old_thread != THREAD_NULL)
1c79356b 1465 thread_dispatch(old_thread);
1c79356b
A
1466
1467 if (self->funnel_state & TH_FN_REFUNNEL) {
9bccf70c 1468 kern_return_t wait_result = self->wait_result;
0b4e3aa0
A
1469
1470 self->funnel_state = 0;
0b4e3aa0
A
1471 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE, self->funnel_lock, 4, 0, 0, 0);
1472 funnel_lock(self->funnel_lock);
1473 KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE, self->funnel_lock, 4, 0, 0, 0);
0b4e3aa0 1474 self->funnel_state = TH_FN_OWNED;
9bccf70c 1475 self->wait_result = wait_result;
1c79356b 1476 }
9bccf70c 1477 (void)spllo();
1c79356b 1478 assert(continuation);
9bccf70c 1479 call_continuation(continuation);
1c79356b
A
1480 /*NOTREACHED*/
1481}
1482
1483#if MACH_LDEBUG || MACH_KDB
1484
1485#define THREAD_LOG_SIZE 300
1486
1487struct t64 {
1488 unsigned long h;
1489 unsigned long l;
1490};
1491
1492struct {
1493 struct t64 stamp;
1494 thread_t thread;
1495 long info1;
1496 long info2;
1497 long info3;
1498 char * action;
1499} thread_log[THREAD_LOG_SIZE];
1500
1501int thread_log_index;
1502
1503void check_thread_time(long n);
1504
1505
1506int check_thread_time_crash;
1507
1508#if 0
1509void
1510check_thread_time(long us)
1511{
1512 struct t64 temp;
1513
1514 if (!check_thread_time_crash)
1515 return;
1516
1517 temp = thread_log[0].stamp;
1518 cyctm05_diff (&thread_log[1].stamp, &thread_log[0].stamp, &temp);
1519
1520 if (temp.l >= us && thread_log[1].info != 0x49) /* HACK!!! */
1521 panic ("check_thread_time");
1522}
1523#endif
1524
1525void
1526log_thread_action(char * action, long info1, long info2, long info3)
1527{
1528 int i;
1529 spl_t x;
1530 static unsigned int tstamp;
1531
1532 x = splhigh();
1533
1534 for (i = THREAD_LOG_SIZE-1; i > 0; i--) {
1535 thread_log[i] = thread_log[i-1];
1536 }
1537
1538 thread_log[0].stamp.h = 0;
1539 thread_log[0].stamp.l = tstamp++;
1540 thread_log[0].thread = current_thread();
1541 thread_log[0].info1 = info1;
1542 thread_log[0].info2 = info2;
1543 thread_log[0].info3 = info3;
1544 thread_log[0].action = action;
1545/* strcpy (&thread_log[0].action[0], action);*/
1546
1547 splx(x);
1548}
1549#endif /* MACH_LDEBUG || MACH_KDB */
1550
1551#if MACH_KDB
1552#include <ddb/db_output.h>
1553void db_show_thread_log(void);
1554
1555void
1556db_show_thread_log(void)
1557{
1558 int i;
1559
1560 db_printf ("%s %s %s %s %s %s\n", " Thread ", " Info1 ", " Info2 ",
1561 " Info3 ", " Timestamp ", "Action");
1562
1563 for (i = 0; i < THREAD_LOG_SIZE; i++) {
1564 db_printf ("%08x %08x %08x %08x %08x/%08x %s\n",
1565 thread_log[i].thread,
1566 thread_log[i].info1,
1567 thread_log[i].info2,
1568 thread_log[i].info3,
1569 thread_log[i].stamp.h,
1570 thread_log[i].stamp.l,
1571 thread_log[i].action);
1572 }
1573}
1574#endif /* MACH_KDB */
1575
1576/*
1577 * thread_block_reason:
1578 *
0b4e3aa0
A
1579 * Block the current thread if a wait has been asserted,
1580 * otherwise unconditionally yield the remainder of the
1581 * current quantum unless reason contains AST_BLOCK.
1582 *
1c79356b
A
1583 * If a continuation is specified, then thread_block will
1584 * attempt to discard the thread's kernel stack. When the
1585 * thread resumes, it will execute the continuation function
1586 * on a new kernel stack.
1587 */
1588counter(mach_counter_t c_thread_block_calls = 0;)
1589
1590int
1591thread_block_reason(
9bccf70c
A
1592 thread_continue_t continuation,
1593 ast_t reason)
1c79356b
A
1594{
1595 register thread_t thread = current_thread();
1596 register processor_t myprocessor;
1597 register thread_t new_thread;
1598 spl_t s;
1599
1600 counter(++c_thread_block_calls);
1601
1602 check_simple_locks();
1603
1c79356b
A
1604 s = splsched();
1605
1606 if ((thread->funnel_state & TH_FN_OWNED) && !(reason & AST_PREEMPT)) {
9bccf70c
A
1607 thread->funnel_state = TH_FN_REFUNNEL;
1608 KERNEL_DEBUG(
1609 0x603242c | DBG_FUNC_NONE, thread->funnel_lock, 2, 0, 0, 0);
1610 funnel_unlock(thread->funnel_lock);
1c79356b
A
1611 }
1612
1613 myprocessor = current_processor();
1614
9bccf70c
A
1615 /* If we're explicitly yielding, force a subsequent quantum */
1616 if (reason & AST_YIELD)
0b4e3aa0
A
1617 myprocessor->slice_quanta = 0;
1618
9bccf70c
A
1619 /* We're handling all scheduling AST's */
1620 ast_off(AST_SCHEDULING);
1c79356b 1621
9bccf70c 1622 thread_lock(thread);
1c79356b 1623 new_thread = thread_select(myprocessor);
9bccf70c 1624 assert(new_thread && thread_runnable(new_thread));
1c79356b
A
1625 thread_unlock(thread);
1626 while (!thread_invoke(thread, new_thread, reason, continuation)) {
1627 thread_lock(thread);
1628 new_thread = thread_select(myprocessor);
9bccf70c 1629 assert(new_thread && thread_runnable(new_thread));
1c79356b
A
1630 thread_unlock(thread);
1631 }
1632
1633 if (thread->funnel_state & TH_FN_REFUNNEL) {
9bccf70c 1634 kern_return_t wait_result = thread->wait_result;
1c79356b 1635
1c79356b 1636 thread->funnel_state = 0;
9bccf70c
A
1637 KERNEL_DEBUG(
1638 0x6032428 | DBG_FUNC_NONE, thread->funnel_lock, 5, 0, 0, 0);
1c79356b 1639 funnel_lock(thread->funnel_lock);
9bccf70c
A
1640 KERNEL_DEBUG(
1641 0x6032430 | DBG_FUNC_NONE, thread->funnel_lock, 5, 0, 0, 0);
1c79356b 1642 thread->funnel_state = TH_FN_OWNED;
9bccf70c 1643 thread->wait_result = wait_result;
1c79356b
A
1644 }
1645
1646 splx(s);
1647
9bccf70c 1648 return (thread->wait_result);
1c79356b
A
1649}
1650
1651/*
1652 * thread_block:
1653 *
9bccf70c 1654 * Block the current thread if a wait has been asserted.
1c79356b
A
1655 */
1656int
1657thread_block(
9bccf70c 1658 thread_continue_t continuation)
1c79356b 1659{
0b4e3aa0 1660 return thread_block_reason(continuation, AST_NONE);
1c79356b
A
1661}
1662
1663/*
1664 * thread_run:
1665 *
9bccf70c
A
1666 * Switch directly from the current (old) thread to the
1667 * specified thread, handing off our quantum if possible.
1668 *
1669 * New thread must be runnable, and not on a run queue.
1c79356b
A
1670 *
1671 * Assumption:
1672 * at splsched.
1673 */
1674int
1675thread_run(
9bccf70c
A
1676 thread_t old_thread,
1677 thread_continue_t continuation,
1678 thread_t new_thread)
1c79356b 1679{
9bccf70c
A
1680 ast_t handoff = AST_HANDOFF;
1681
1682 assert(old_thread == current_thread());
1683
9bccf70c
A
1684 if (old_thread->funnel_state & TH_FN_OWNED) {
1685 old_thread->funnel_state = TH_FN_REFUNNEL;
1686 KERNEL_DEBUG(
1687 0x603242c | DBG_FUNC_NONE, old_thread->funnel_lock, 3, 0, 0, 0);
1688 funnel_unlock(old_thread->funnel_lock);
1689 }
1690
1691 while (!thread_invoke(old_thread, new_thread, handoff, continuation)) {
1692 register processor_t myprocessor = current_processor();
1693
1c79356b
A
1694 thread_lock(old_thread);
1695 new_thread = thread_select(myprocessor);
1696 thread_unlock(old_thread);
9bccf70c
A
1697 handoff = AST_NONE;
1698 }
1699
1700 /* if we fell thru */
1701 if (old_thread->funnel_state & TH_FN_REFUNNEL) {
1702 kern_return_t wait_result = old_thread->wait_result;
1703
1704 old_thread->funnel_state = 0;
1705 KERNEL_DEBUG(
1706 0x6032428 | DBG_FUNC_NONE, old_thread->funnel_lock, 6, 0, 0, 0);
1707 funnel_lock(old_thread->funnel_lock);
1708 KERNEL_DEBUG(
1709 0x6032430 | DBG_FUNC_NONE, old_thread->funnel_lock, 6, 0, 0, 0);
1710 old_thread->funnel_state = TH_FN_OWNED;
1711 old_thread->wait_result = wait_result;
1c79356b 1712 }
9bccf70c
A
1713
1714 return (old_thread->wait_result);
1c79356b
A
1715}
1716
1717/*
1718 * Dispatches a running thread that is not on a runq.
1719 * Called at splsched.
1720 */
1721void
1722thread_dispatch(
1723 register thread_t thread)
1724{
9bccf70c
A
1725 wake_lock(thread);
1726 thread_lock(thread);
1727
1c79356b
A
1728 /*
1729 * If we are discarding the thread's stack, we must do it
1730 * before the thread has a chance to run.
1731 */
1c79356b 1732#ifndef i386
9bccf70c
A
1733 if (thread->continuation != NULL) {
1734 assert((thread->state & TH_STACK_STATE) == 0);
1735 thread->state |= TH_STACK_HANDOFF;
1736 stack_free(thread);
1737 }
1c79356b
A
1738#endif
1739
1740 switch (thread->state & (TH_RUN|TH_WAIT|TH_UNINT|TH_IDLE)) {
1741
1742 case TH_RUN | TH_UNINT:
1743 case TH_RUN:
1744 /*
1745 * No reason to stop. Put back on a run queue.
1746 */
0b4e3aa0 1747 _mk_sp_thread_dispatch(thread);
1c79356b
A
1748 break;
1749
1750 case TH_RUN | TH_WAIT | TH_UNINT:
1751 case TH_RUN | TH_WAIT:
9bccf70c
A
1752 {
1753 boolean_t reap, wake, callblock;
1c79356b
A
1754
1755 /*
1756 * Waiting
1757 */
9bccf70c 1758 thread->sleep_stamp = sched_tick;
1c79356b 1759 thread->state &= ~TH_RUN;
9bccf70c
A
1760 hw_atomic_sub(&thread->processor_set->run_count, 1);
1761 callblock = thread->active_callout;
1762 wake = thread->wake_active;
1763 thread->wake_active = FALSE;
1764 reap = (thread->state & TH_TERMINATE)? TRUE: FALSE;
1c79356b 1765
9bccf70c
A
1766 thread_unlock(thread);
1767 wake_unlock(thread);
1768
1769 if (callblock)
1770 call_thread_block();
1771
1772 if (wake)
1c79356b 1773 thread_wakeup((event_t)&thread->wake_active);
9bccf70c
A
1774
1775 if (reap)
1776 thread_reaper_enqueue(thread);
1777
1778 return;
1779 }
1c79356b
A
1780
1781 case TH_RUN | TH_IDLE:
1782 /*
9bccf70c
A
1783 * The idle threads don't go
1784 * onto a run queue.
1c79356b
A
1785 */
1786 break;
1787
1788 default:
9bccf70c 1789 panic("thread_dispatch: bad thread state 0x%x\n", thread->state);
1c79356b 1790 }
9bccf70c 1791
1c79356b
A
1792 thread_unlock(thread);
1793 wake_unlock(thread);
1794}
1795
1796/*
1797 * Enqueue thread on run queue. Thread must be locked,
9bccf70c
A
1798 * and not already be on a run queue. Returns TRUE iff
1799 * the particular queue level was empty beforehand.
1c79356b 1800 */
9bccf70c 1801boolean_t
1c79356b
A
1802run_queue_enqueue(
1803 register run_queue_t rq,
1804 register thread_t thread,
1805 boolean_t tail)
1806{
9bccf70c
A
1807 register int whichq = thread->sched_pri;
1808 register queue_t queue = &rq->queues[whichq];
1809 boolean_t result = FALSE;
1c79356b 1810
1c79356b
A
1811 assert(whichq >= MINPRI && whichq <= MAXPRI);
1812
9bccf70c 1813 simple_lock(&rq->lock);
1c79356b 1814 assert(thread->runq == RUN_QUEUE_NULL);
9bccf70c
A
1815 if (queue_empty(queue)) {
1816 enqueue_tail(queue, (queue_entry_t)thread);
1817
1818 setbit(MAXPRI - whichq, rq->bitmap);
1819 if (whichq > rq->highq)
1820 rq->highq = whichq;
1821 result = TRUE;
1822 }
1823 else
1c79356b 1824 if (tail)
9bccf70c 1825 enqueue_tail(queue, (queue_entry_t)thread);
1c79356b 1826 else
9bccf70c 1827 enqueue_head(queue, (queue_entry_t)thread);
1c79356b 1828
1c79356b 1829 thread->runq = rq;
9bccf70c
A
1830 if (thread->sched_mode & TH_MODE_PREEMPT)
1831 rq->urgency++;
1832 rq->count++;
1c79356b
A
1833#if DEBUG
1834 thread_check(thread, rq);
1835#endif /* DEBUG */
1836 simple_unlock(&rq->lock);
1837
9bccf70c 1838 return (result);
1c79356b
A
1839}
1840
9bccf70c
A
1841struct {
1842 uint32_t pset_idle_last,
1843 pset_idle_any,
1844 pset_self,
1845 pset_last,
1846 pset_other,
1847 bound_idle,
1848 bound_self,
1849 bound_other;
1850} dispatch_counts;
1851
1c79356b
A
1852/*
1853 * thread_setrun:
1854 *
9bccf70c
A
1855 * Dispatch thread for execution, directly onto an idle
1856 * processor if possible. Else put on appropriate run
1857 * queue. (local if bound, else processor set)
1858 *
1859 * Thread must be locked.
1860 *
1861 * The tail parameter indicates the proper placement of
1862 * the thread on a run queue.
1c79356b
A
1863 */
1864void
1865thread_setrun(
1866 register thread_t new_thread,
1c79356b
A
1867 boolean_t tail)
1868{
1869 register processor_t processor;
1c79356b 1870 register processor_set_t pset;
9bccf70c
A
1871 register thread_t thread;
1872 boolean_t try_preempt = FALSE;
1873 ast_t preempt = AST_BLOCK;
1c79356b 1874
1c79356b
A
1875 assert(thread_runnable(new_thread));
1876
1877 /*
1878 * Update priority if needed.
1879 */
1880 if (new_thread->sched_stamp != sched_tick)
1881 update_priority(new_thread);
1882
1c79356b 1883 /*
9bccf70c 1884 * Check for urgent preemption.
1c79356b 1885 */
9bccf70c
A
1886 if (new_thread->sched_mode & TH_MODE_PREEMPT)
1887 preempt |= AST_URGENT;
1888
1889 assert(new_thread->runq == RUN_QUEUE_NULL);
1890
1c79356b
A
1891 if ((processor = new_thread->bound_processor) == PROCESSOR_NULL) {
1892 /*
9bccf70c
A
1893 * First try to dispatch on
1894 * the last processor.
1c79356b
A
1895 */
1896 pset = new_thread->processor_set;
9bccf70c
A
1897 processor = new_thread->last_processor;
1898 if ( pset->processor_count > 1 &&
1899 processor != PROCESSOR_NULL &&
1900 processor->state == PROCESSOR_IDLE ) {
1901 simple_lock(&processor->lock);
1902 simple_lock(&pset->sched_lock);
1903 if ( processor->processor_set == pset &&
1904 processor->state == PROCESSOR_IDLE ) {
1905 remqueue(&pset->idle_queue, (queue_entry_t)processor);
1c79356b
A
1906 pset->idle_count--;
1907 processor->next_thread = new_thread;
1908 processor->state = PROCESSOR_DISPATCHING;
9bccf70c
A
1909 simple_unlock(&pset->sched_lock);
1910 simple_unlock(&processor->lock);
1911 if (processor != current_processor())
1c79356b 1912 machine_signal_idle(processor);
9bccf70c 1913 dispatch_counts.pset_idle_last++;
1c79356b
A
1914 return;
1915 }
9bccf70c
A
1916 simple_unlock(&processor->lock);
1917 }
1918 else
1919 simple_lock(&pset->sched_lock);
1920
1921 /*
1922 * Next pick any idle processor
1923 * in the processor set.
1924 */
1925 if (pset->idle_count > 0) {
1926 processor = (processor_t)dequeue_head(&pset->idle_queue);
1927 pset->idle_count--;
1928 processor->next_thread = new_thread;
1929 processor->state = PROCESSOR_DISPATCHING;
1930 simple_unlock(&pset->sched_lock);
1931 if (processor != current_processor())
1932 machine_signal_idle(processor);
1933 dispatch_counts.pset_idle_any++;
1934 return;
1935 }
1c79356b 1936
0b4e3aa0 1937 /*
9bccf70c 1938 * Place thread on run queue.
0b4e3aa0 1939 */
9bccf70c
A
1940 if (run_queue_enqueue(&pset->runq, new_thread, tail))
1941 try_preempt = TRUE;
1942
1943 /*
1944 * Update the timesharing quanta.
1945 */
1946 pset_quanta_update(pset);
0b4e3aa0 1947
1c79356b 1948 /*
9bccf70c 1949 * Preempt check.
1c79356b 1950 */
1c79356b 1951 processor = current_processor();
9bccf70c
A
1952 thread = processor->cpu_data->active_thread;
1953 if (try_preempt) {
1954 /*
1955 * First try the current processor
1956 * if it is a member of the correct
1957 * processor set.
1c79356b 1958 */
9bccf70c
A
1959 if ( pset == processor->processor_set &&
1960 csw_needed(thread, processor) ) {
1961 simple_unlock(&pset->sched_lock);
1962
1963 ast_on(preempt);
1964 dispatch_counts.pset_self++;
1965 return;
1966 }
1967
1968 /*
1969 * If that failed and we have other
1970 * processors available keep trying.
1971 */
1972 if ( pset->processor_count > 1 ||
1973 pset != processor->processor_set ) {
1974 queue_t active = &pset->active_queue;
1975 processor_t myprocessor, lastprocessor;
1976 queue_entry_t next;
1977
1978 /*
1979 * Next try the last processor
1980 * dispatched on.
1981 */
1982 myprocessor = processor;
1983 processor = new_thread->last_processor;
1984 if ( processor != myprocessor &&
1985 processor != PROCESSOR_NULL &&
1986 processor->processor_set == pset &&
1987 processor->state == PROCESSOR_RUNNING &&
1988 new_thread->sched_pri > processor->current_pri ) {
1989 cause_ast_check(processor);
1990 simple_unlock(&pset->sched_lock);
1991 dispatch_counts.pset_last++;
1992 return;
1993 }
1994
1995 /*
1996 * Lastly, pick any other
1997 * available processor.
1998 */
1999 lastprocessor = processor;
2000 processor = (processor_t)queue_first(active);
2001 while (!queue_end(active, (queue_entry_t)processor)) {
2002 next = queue_next((queue_entry_t)processor);
2003
2004 if ( processor != myprocessor &&
2005 processor != lastprocessor &&
2006 new_thread->sched_pri > processor->current_pri ) {
2007 if (!queue_end(active, next)) {
2008 remqueue(active, (queue_entry_t)processor);
2009 enqueue_tail(active, (queue_entry_t)processor);
2010 }
2011 cause_ast_check(processor);
2012 simple_unlock(&pset->sched_lock);
2013 dispatch_counts.pset_other++;
2014 return;
2015 }
2016
2017 processor = (processor_t)next;
2018 }
2019 }
1c79356b 2020 }
9bccf70c
A
2021
2022 simple_unlock(&pset->sched_lock);
1c79356b
A
2023 }
2024 else {
2025 /*
2026 * Bound, can only run on bound processor. Have to lock
2027 * processor here because it may not be the current one.
2028 */
9bccf70c 2029 if (processor->state == PROCESSOR_IDLE) {
1c79356b
A
2030 simple_lock(&processor->lock);
2031 pset = processor->processor_set;
9bccf70c 2032 simple_lock(&pset->sched_lock);
1c79356b 2033 if (processor->state == PROCESSOR_IDLE) {
9bccf70c 2034 remqueue(&pset->idle_queue, (queue_entry_t)processor);
1c79356b
A
2035 pset->idle_count--;
2036 processor->next_thread = new_thread;
2037 processor->state = PROCESSOR_DISPATCHING;
9bccf70c 2038 simple_unlock(&pset->sched_lock);
1c79356b 2039 simple_unlock(&processor->lock);
9bccf70c 2040 if (processor != current_processor())
1c79356b 2041 machine_signal_idle(processor);
9bccf70c 2042 dispatch_counts.bound_idle++;
1c79356b
A
2043 return;
2044 }
9bccf70c 2045 simple_unlock(&pset->sched_lock);
1c79356b
A
2046 simple_unlock(&processor->lock);
2047 }
2048
9bccf70c
A
2049 if (run_queue_enqueue(&processor->runq, new_thread, tail))
2050 try_preempt = TRUE;
2051
2052 if (processor == current_processor()) {
2053 if (try_preempt) {
2054 thread = processor->cpu_data->active_thread;
2055 if (csw_needed(thread, processor)) {
2056 ast_on(preempt);
2057 dispatch_counts.bound_self++;
2058 }
2059 }
2060 }
1c79356b 2061 else {
9bccf70c
A
2062 if (try_preempt) {
2063 if ( processor->state == PROCESSOR_RUNNING &&
2064 new_thread->sched_pri > processor->current_pri ) {
2065 cause_ast_check(processor);
2066 dispatch_counts.bound_other++;
2067 return;
2068 }
2069 }
2070
2071 if (processor->state == PROCESSOR_IDLE) {
2072 machine_signal_idle(processor);
2073 dispatch_counts.bound_idle++;
2074 }
2075 }
2076 }
2077}
2078
2079/*
2080 * Called at splsched by a thread on itself.
2081 */
2082ast_t
2083csw_check(
2084 thread_t thread,
2085 processor_t processor)
2086{
2087 int current_pri = thread->sched_pri;
2088 ast_t result = AST_NONE;
2089 run_queue_t runq;
2090
2091 if (first_quantum(processor)) {
2092 runq = &processor->processor_set->runq;
2093 if (runq->highq > current_pri) {
2094 if (runq->urgency > 0)
2095 return (AST_BLOCK | AST_URGENT);
2096
2097 result |= AST_BLOCK;
2098 }
2099
2100 runq = &processor->runq;
2101 if (runq->highq > current_pri) {
2102 if (runq->urgency > 0)
2103 return (AST_BLOCK | AST_URGENT);
2104
2105 result |= AST_BLOCK;
2106 }
2107 }
2108 else {
2109 runq = &processor->processor_set->runq;
2110 if (runq->highq >= current_pri) {
2111 if (runq->urgency > 0)
2112 return (AST_BLOCK | AST_URGENT);
2113
2114 result |= AST_BLOCK;
2115 }
2116
2117 runq = &processor->runq;
2118 if (runq->highq >= current_pri) {
2119 if (runq->urgency > 0)
2120 return (AST_BLOCK | AST_URGENT);
2121
2122 result |= AST_BLOCK;
2123 }
1c79356b 2124 }
9bccf70c
A
2125
2126 if (result != AST_NONE)
2127 return (result);
2128
2129 if (thread->state & TH_SUSP)
2130 result |= AST_BLOCK;
2131
2132 return (result);
1c79356b
A
2133}
2134
2135/*
9bccf70c 2136 * set_sched_pri:
1c79356b 2137 *
9bccf70c
A
2138 * Set the current scheduled priority of the specified thread.
2139 * This may cause the thread to change queues.
1c79356b
A
2140 *
2141 * The thread *must* be locked by the caller.
2142 */
2143void
9bccf70c 2144set_sched_pri(
1c79356b 2145 thread_t thread,
9bccf70c 2146 int priority)
1c79356b 2147{
9bccf70c
A
2148 register struct run_queue *rq = rem_runq(thread);
2149
2150 if ( !(thread->sched_mode & TH_MODE_TIMESHARE) &&
2151 (priority >= BASEPRI_PREEMPT ||
2152 (thread->task_priority < MINPRI_KERNEL &&
2153 thread->task_priority >= BASEPRI_BACKGROUND &&
2154 priority > thread->task_priority) ||
2155 (thread->sched_mode & TH_MODE_FORCEDPREEMPT) ) )
2156 thread->sched_mode |= TH_MODE_PREEMPT;
2157 else
2158 thread->sched_mode &= ~TH_MODE_PREEMPT;
1c79356b 2159
9bccf70c
A
2160 thread->sched_pri = priority;
2161 if (rq != RUN_QUEUE_NULL)
2162 thread_setrun(thread, TAIL_Q);
2163 else
2164 if ((thread->state & (TH_RUN|TH_WAIT)) == TH_RUN) {
2165 processor_t processor = thread->last_processor;
2166
2167 if (thread == current_thread()) {
2168 ast_t preempt = csw_check(thread, processor);
2169
2170 if (preempt != AST_NONE)
2171 ast_on(preempt);
2172 processor->current_pri = priority;
2173 }
2174 else
2175 if ( processor != PROCESSOR_NULL &&
2176 processor->cpu_data->active_thread == thread )
2177 cause_ast_check(processor);
1c79356b
A
2178 }
2179}
2180
2181/*
2182 * rem_runq:
2183 *
2184 * Remove a thread from its run queue.
2185 * The run queue that the process was on is returned
2186 * (or RUN_QUEUE_NULL if not on a run queue). Thread *must* be locked
2187 * before calling this routine. Unusual locking protocol on runq
2188 * field in thread structure makes this code interesting; see thread.h.
2189 */
2190run_queue_t
2191rem_runq(
2192 thread_t thread)
2193{
2194 register struct run_queue *rq;
2195
2196 rq = thread->runq;
2197 /*
2198 * If rq is RUN_QUEUE_NULL, the thread will stay out of the
2199 * run_queues because the caller locked the thread. Otherwise
2200 * the thread is on a runq, but could leave.
2201 */
2202 if (rq != RUN_QUEUE_NULL) {
2203 simple_lock(&rq->lock);
2204 if (rq == thread->runq) {
2205 /*
2206 * Thread is in a runq and we have a lock on
2207 * that runq.
2208 */
2209#if DEBUG
2210 thread_check(thread, rq);
2211#endif /* DEBUG */
2212 remqueue(&rq->queues[0], (queue_entry_t)thread);
2213 rq->count--;
9bccf70c
A
2214 if (thread->sched_mode & TH_MODE_PREEMPT)
2215 rq->urgency--;
2216 assert(rq->urgency >= 0);
1c79356b
A
2217
2218 if (queue_empty(rq->queues + thread->sched_pri)) {
2219 /* update run queue status */
2220 if (thread->sched_pri != IDLEPRI)
2221 clrbit(MAXPRI - thread->sched_pri, rq->bitmap);
2222 rq->highq = MAXPRI - ffsbit(rq->bitmap);
2223 }
2224 thread->runq = RUN_QUEUE_NULL;
2225 simple_unlock(&rq->lock);
2226 }
2227 else {
2228 /*
2229 * The thread left the runq before we could
2230 * lock the runq. It is not on a runq now, and
2231 * can't move again because this routine's
2232 * caller locked the thread.
2233 */
2234 assert(thread->runq == RUN_QUEUE_NULL);
2235 simple_unlock(&rq->lock);
2236 rq = RUN_QUEUE_NULL;
2237 }
2238 }
2239
2240 return (rq);
2241}
2242
1c79356b
A
2243/*
2244 * choose_thread:
2245 *
2246 * Choose a thread to execute. The thread chosen is removed
2247 * from its run queue. Note that this requires only that the runq
2248 * lock be held.
2249 *
2250 * Strategy:
2251 * Check processor runq first; if anything found, run it.
2252 * Else check pset runq; if nothing found, return idle thread.
2253 *
2254 * Second line of strategy is implemented by choose_pset_thread.
9bccf70c
A
2255 *
2256 * Called with both the local & pset run queues locked, returned
2257 * unlocked.
1c79356b
A
2258 */
2259thread_t
2260choose_thread(
2261 processor_t myprocessor)
2262{
2263 thread_t thread;
2264 register queue_t q;
2265 register run_queue_t runq;
2266 processor_set_t pset;
2267
2268 runq = &myprocessor->runq;
2269 pset = myprocessor->processor_set;
2270
1c79356b 2271 if (runq->count > 0 && runq->highq >= pset->runq.highq) {
9bccf70c 2272 simple_unlock(&pset->runq.lock);
1c79356b
A
2273 q = runq->queues + runq->highq;
2274#if MACH_ASSERT
2275 if (!queue_empty(q)) {
2276#endif /*MACH_ASSERT*/
2277 thread = (thread_t)q->next;
2278 ((queue_entry_t)thread)->next->prev = q;
2279 q->next = ((queue_entry_t)thread)->next;
2280 thread->runq = RUN_QUEUE_NULL;
2281 runq->count--;
9bccf70c
A
2282 if (thread->sched_mode & TH_MODE_PREEMPT)
2283 runq->urgency--;
2284 assert(runq->urgency >= 0);
1c79356b
A
2285 if (queue_empty(q)) {
2286 if (runq->highq != IDLEPRI)
2287 clrbit(MAXPRI - runq->highq, runq->bitmap);
2288 runq->highq = MAXPRI - ffsbit(runq->bitmap);
2289 }
2290 simple_unlock(&runq->lock);
2291 return (thread);
2292#if MACH_ASSERT
2293 }
2294 panic("choose_thread");
2295#endif /*MACH_ASSERT*/
2296 /*NOTREACHED*/
2297 }
9bccf70c 2298 simple_unlock(&myprocessor->runq.lock);
1c79356b 2299
1c79356b
A
2300 return (choose_pset_thread(myprocessor, pset));
2301}
2302
1c79356b
A
2303/*
2304 * choose_pset_thread: choose a thread from processor_set runq or
2305 * set processor idle and choose its idle thread.
2306 *
1c79356b
A
2307 * This routine chooses and removes a thread from the runq if there
2308 * is one (and returns it), else it sets the processor idle and
2309 * returns its idle thread.
9bccf70c
A
2310 *
2311 * Called with both local & pset run queues locked, returned
2312 * unlocked.
1c79356b
A
2313 */
2314thread_t
2315choose_pset_thread(
2316 register processor_t myprocessor,
2317 processor_set_t pset)
2318{
2319 register run_queue_t runq;
2320 register thread_t thread;
2321 register queue_t q;
2322
2323 runq = &pset->runq;
2324 if (runq->count > 0) {
2325 q = runq->queues + runq->highq;
2326#if MACH_ASSERT
2327 if (!queue_empty(q)) {
2328#endif /*MACH_ASSERT*/
2329 thread = (thread_t)q->next;
2330 ((queue_entry_t)thread)->next->prev = q;
2331 q->next = ((queue_entry_t)thread)->next;
2332 thread->runq = RUN_QUEUE_NULL;
2333 runq->count--;
9bccf70c
A
2334 if (thread->sched_mode & TH_MODE_PREEMPT)
2335 runq->urgency--;
2336 assert(runq->urgency >= 0);
1c79356b
A
2337 if (queue_empty(q)) {
2338 if (runq->highq != IDLEPRI)
2339 clrbit(MAXPRI - runq->highq, runq->bitmap);
2340 runq->highq = MAXPRI - ffsbit(runq->bitmap);
2341 }
9bccf70c 2342 pset_quanta_update(pset);
1c79356b
A
2343 simple_unlock(&runq->lock);
2344 return (thread);
2345#if MACH_ASSERT
2346 }
2347 panic("choose_pset_thread");
2348#endif /*MACH_ASSERT*/
2349 /*NOTREACHED*/
2350 }
2351 simple_unlock(&runq->lock);
2352
2353 /*
2354 * Nothing is runnable, so set this processor idle if it
2355 * was running. If it was in an assignment or shutdown,
2356 * leave it alone. Return its idle thread.
2357 */
9bccf70c 2358 simple_lock(&pset->sched_lock);
1c79356b 2359 if (myprocessor->state == PROCESSOR_RUNNING) {
9bccf70c 2360 remqueue(&pset->active_queue, (queue_entry_t)myprocessor);
1c79356b 2361 myprocessor->state = PROCESSOR_IDLE;
9bccf70c 2362
1c79356b 2363 if (myprocessor == master_processor)
9bccf70c 2364 enqueue_tail(&pset->idle_queue, (queue_entry_t)myprocessor);
1c79356b 2365 else
9bccf70c 2366 enqueue_head(&pset->idle_queue, (queue_entry_t)myprocessor);
1c79356b
A
2367
2368 pset->idle_count++;
2369 }
9bccf70c 2370 simple_unlock(&pset->sched_lock);
1c79356b
A
2371
2372 return (myprocessor->idle_thread);
2373}
2374
2375/*
2376 * no_dispatch_count counts number of times processors go non-idle
2377 * without being dispatched. This should be very rare.
2378 */
2379int no_dispatch_count = 0;
2380
2381/*
2382 * This is the idle thread, which just looks for other threads
2383 * to execute.
2384 */
2385void
2386idle_thread_continue(void)
2387{
2388 register processor_t myprocessor;
2389 register volatile thread_t *threadp;
2390 register volatile int *gcount;
2391 register volatile int *lcount;
2392 register thread_t new_thread;
2393 register int state;
2394 register processor_set_t pset;
2395 int mycpu;
2396
2397 mycpu = cpu_number();
9bccf70c 2398 myprocessor = cpu_to_processor(mycpu);
1c79356b
A
2399 threadp = (volatile thread_t *) &myprocessor->next_thread;
2400 lcount = (volatile int *) &myprocessor->runq.count;
2401
2402 for (;;) {
1c79356b
A
2403 gcount = (volatile int *)&myprocessor->processor_set->runq.count;
2404
2405 (void)splsched();
2406 while ( (*threadp == (volatile thread_t)THREAD_NULL) &&
2407 (*gcount == 0) && (*lcount == 0) ) {
2408
2409 /* check for ASTs while we wait */
9bccf70c 2410 if (need_ast[mycpu] &~ ( AST_SCHEDULING | AST_BSD )) {
1c79356b 2411 /* don't allow scheduling ASTs */
9bccf70c 2412 need_ast[mycpu] &= ~( AST_SCHEDULING | AST_BSD );
0b4e3aa0 2413 ast_taken(AST_ALL, TRUE); /* back at spllo */
1c79356b
A
2414 }
2415 else
2416#ifdef __ppc__
2417 machine_idle();
2418#else
2419 (void)spllo();
2420#endif
1c79356b
A
2421 (void)splsched();
2422 }
2423
1c79356b
A
2424 /*
2425 * This is not a switch statement to avoid the
2426 * bounds checking code in the common case.
2427 */
2428 pset = myprocessor->processor_set;
9bccf70c 2429 simple_lock(&pset->sched_lock);
1c79356b
A
2430retry:
2431 state = myprocessor->state;
2432 if (state == PROCESSOR_DISPATCHING) {
2433 /*
2434 * Commmon case -- cpu dispatched.
2435 */
2436 new_thread = *threadp;
2437 *threadp = (volatile thread_t) THREAD_NULL;
2438 myprocessor->state = PROCESSOR_RUNNING;
9bccf70c
A
2439 enqueue_tail(&pset->active_queue, (queue_entry_t)myprocessor);
2440 simple_unlock(&pset->sched_lock);
1c79356b 2441
1c79356b
A
2442 if ( myprocessor->runq.highq > new_thread->sched_pri ||
2443 pset->runq.highq > new_thread->sched_pri ) {
9bccf70c
A
2444 thread_lock(new_thread);
2445 thread_setrun(new_thread, HEAD_Q);
1c79356b
A
2446 thread_unlock(new_thread);
2447
2448 counter(c_idle_thread_block++);
2449 thread_block(idle_thread_continue);
9bccf70c 2450 /* NOTREACHED */
1c79356b
A
2451 }
2452 else {
1c79356b
A
2453 counter(c_idle_thread_handoff++);
2454 thread_run(myprocessor->idle_thread,
2455 idle_thread_continue, new_thread);
9bccf70c 2456 /* NOTREACHED */
1c79356b
A
2457 }
2458 }
2459 else
2460 if (state == PROCESSOR_IDLE) {
2461 if (myprocessor->state != PROCESSOR_IDLE) {
2462 /*
2463 * Something happened, try again.
2464 */
2465 goto retry;
2466 }
2467 /*
2468 * Processor was not dispatched (Rare).
2469 * Set it running again.
2470 */
2471 no_dispatch_count++;
2472 pset->idle_count--;
9bccf70c 2473 remqueue(&pset->idle_queue, (queue_entry_t)myprocessor);
1c79356b 2474 myprocessor->state = PROCESSOR_RUNNING;
9bccf70c
A
2475 enqueue_tail(&pset->active_queue, (queue_entry_t)myprocessor);
2476 simple_unlock(&pset->sched_lock);
1c79356b
A
2477
2478 counter(c_idle_thread_block++);
2479 thread_block(idle_thread_continue);
9bccf70c 2480 /* NOTREACHED */
1c79356b
A
2481 }
2482 else
2483 if ( state == PROCESSOR_ASSIGN ||
2484 state == PROCESSOR_SHUTDOWN ) {
2485 /*
2486 * Changing processor sets, or going off-line.
2487 * Release next_thread if there is one. Actual
2488 * thread to run is on a runq.
2489 */
2490 if ((new_thread = (thread_t)*threadp) != THREAD_NULL) {
2491 *threadp = (volatile thread_t) THREAD_NULL;
9bccf70c
A
2492 simple_unlock(&pset->sched_lock);
2493
1c79356b 2494 thread_lock(new_thread);
9bccf70c 2495 thread_setrun(new_thread, TAIL_Q);
1c79356b 2496 thread_unlock(new_thread);
9bccf70c
A
2497 }
2498 else
2499 simple_unlock(&pset->sched_lock);
1c79356b
A
2500
2501 counter(c_idle_thread_block++);
2502 thread_block(idle_thread_continue);
9bccf70c 2503 /* NOTREACHED */
1c79356b
A
2504 }
2505 else {
9bccf70c 2506 simple_unlock(&pset->sched_lock);
1c79356b 2507
9bccf70c 2508 panic("idle_thread: bad processor state %d\n", cpu_state(mycpu));
1c79356b
A
2509 }
2510
2511 (void)spllo();
2512 }
2513}
2514
2515void
2516idle_thread(void)
2517{
2518 thread_t self = current_thread();
2519 spl_t s;
2520
2521 stack_privilege(self);
1c79356b
A
2522
2523 s = splsched();
2524 thread_lock(self);
1c79356b 2525 self->priority = IDLEPRI;
9bccf70c 2526 set_sched_pri(self, self->priority);
1c79356b
A
2527 thread_unlock(self);
2528 splx(s);
2529
2530 counter(c_idle_thread_block++);
9bccf70c 2531 thread_block(idle_thread_continue);
1c79356b
A
2532 /*NOTREACHED*/
2533}
2534
0b4e3aa0
A
2535static uint64_t sched_tick_interval, sched_tick_deadline;
2536
2537void sched_tick_thread(void);
2538
2539void
2540sched_tick_init(void)
2541{
2542 kernel_thread_with_priority(
2543 kernel_task, MAXPRI_STANDARD,
2544 sched_tick_thread, TRUE, TRUE);
2545}
1c79356b
A
2546
2547/*
2548 * sched_tick_thread
2549 *
2550 * Update the priorities of all threads periodically.
2551 */
2552void
2553sched_tick_thread_continue(void)
2554{
0b4e3aa0 2555 uint64_t abstime;
1c79356b
A
2556#if SIMPLE_CLOCK
2557 int new_usec;
2558#endif /* SIMPLE_CLOCK */
2559
2560 clock_get_uptime(&abstime);
2561
2562 sched_tick++; /* age usage one more time */
2563#if SIMPLE_CLOCK
2564 /*
2565 * Compensate for clock drift. sched_usec is an
2566 * exponential average of the number of microseconds in
2567 * a second. It decays in the same fashion as cpu_usage.
2568 */
2569 new_usec = sched_usec_elapsed();
2570 sched_usec = (5*sched_usec + 3*new_usec)/8;
2571#endif /* SIMPLE_CLOCK */
2572
2573 /*
2574 * Compute the scheduler load factors.
2575 */
2576 compute_mach_factor();
2577
2578 /*
2579 * Scan the run queues for runnable threads that need to
2580 * have their priorities recalculated.
2581 */
2582 do_thread_scan();
2583
2584 clock_deadline_for_periodic_event(sched_tick_interval, abstime,
2585 &sched_tick_deadline);
2586
2587 assert_wait((event_t)sched_tick_thread_continue, THREAD_INTERRUPTIBLE);
2588 thread_set_timer_deadline(sched_tick_deadline);
2589 thread_block(sched_tick_thread_continue);
2590 /*NOTREACHED*/
2591}
2592
2593void
2594sched_tick_thread(void)
2595{
2596 thread_t self = current_thread();
2597 natural_t rate;
2598 spl_t s;
2599
2600 stack_privilege(self);
1c79356b
A
2601
2602 rate = (1000 >> SCHED_TICK_SHIFT);
2603 clock_interval_to_absolutetime_interval(rate, USEC_PER_SEC,
2604 &sched_tick_interval);
2605 clock_get_uptime(&sched_tick_deadline);
2606
2607 thread_block(sched_tick_thread_continue);
2608 /*NOTREACHED*/
2609}
2610
2611#define MAX_STUCK_THREADS 128
2612
2613/*
2614 * do_thread_scan: scan for stuck threads. A thread is stuck if
2615 * it is runnable but its priority is so low that it has not
2616 * run for several seconds. Its priority should be higher, but
2617 * won't be until it runs and calls update_priority. The scanner
2618 * finds these threads and does the updates.
2619 *
2620 * Scanner runs in two passes. Pass one squirrels likely
2621 * thread ids away in an array (takes out references for them).
2622 * Pass two does the priority updates. This is necessary because
2623 * the run queue lock is required for the candidate scan, but
9bccf70c 2624 * cannot be held during updates.
1c79356b
A
2625 *
2626 * Array length should be enough so that restart isn't necessary,
9bccf70c 2627 * but restart logic is included.
1c79356b
A
2628 *
2629 */
2630thread_t stuck_threads[MAX_STUCK_THREADS];
2631int stuck_count = 0;
2632
2633/*
2634 * do_runq_scan is the guts of pass 1. It scans a runq for
2635 * stuck threads. A boolean is returned indicating whether
2636 * a retry is needed.
2637 */
2638boolean_t
2639do_runq_scan(
2640 run_queue_t runq)
2641{
2642 register queue_t q;
2643 register thread_t thread;
2644 register int count;
2645 spl_t s;
2646 boolean_t result = FALSE;
2647
2648 s = splsched();
2649 simple_lock(&runq->lock);
2650 if ((count = runq->count) > 0) {
2651 q = runq->queues + runq->highq;
2652 while (count > 0) {
2653 queue_iterate(q, thread, thread_t, links) {
0b4e3aa0
A
2654 if ( !(thread->state & (TH_WAIT|TH_SUSP)) &&
2655 (thread->sched_mode & TH_MODE_TIMESHARE) ) {
1c79356b
A
2656 if (thread->sched_stamp != sched_tick) {
2657 /*
2658 * Stuck, save its id for later.
2659 */
2660 if (stuck_count == MAX_STUCK_THREADS) {
2661 /*
2662 * !@#$% No more room.
2663 */
2664 simple_unlock(&runq->lock);
2665 splx(s);
2666
2667 return (TRUE);
2668 }
2669
2670 /*
2671 * Inline version of thread_reference
2672 * XXX - lock ordering problem here:
2673 * thread locks should be taken before runq
2674 * locks: just try and get the thread's locks
2675 * and ignore this thread if we fail, we might
2676 * have better luck next time.
2677 */
9bccf70c 2678 if (thread_lock_try(thread)) {
1c79356b
A
2679 thread->ref_count++;
2680 thread_unlock(thread);
2681 stuck_threads[stuck_count++] = thread;
2682 }
2683 else
2684 result = TRUE;
2685 }
2686 }
2687
2688 count--;
2689 }
2690
2691 q--;
2692 }
2693 }
2694 simple_unlock(&runq->lock);
2695 splx(s);
2696
2697 return (result);
2698}
2699
2700boolean_t thread_scan_enabled = TRUE;
2701
2702void
2703do_thread_scan(void)
2704{
2705 register boolean_t restart_needed = FALSE;
2706 register thread_t thread;
2707 register processor_set_t pset = &default_pset;
2708 register processor_t processor;
2709 spl_t s;
2710
2711 if (!thread_scan_enabled)
2712 return;
2713
2714 do {
2715 restart_needed = do_runq_scan(&pset->runq);
2716 if (!restart_needed) {
2717 simple_lock(&pset->processors_lock);
2718 processor = (processor_t)queue_first(&pset->processors);
2719 while (!queue_end(&pset->processors, (queue_entry_t)processor)) {
2720 if (restart_needed = do_runq_scan(&processor->runq))
2721 break;
2722
0b4e3aa0
A
2723 thread = processor->idle_thread;
2724 if (thread->sched_stamp != sched_tick) {
2725 if (stuck_count == MAX_STUCK_THREADS) {
2726 restart_needed = TRUE;
2727 break;
2728 }
2729
2730 stuck_threads[stuck_count++] = thread;
2731 }
2732
1c79356b
A
2733 processor = (processor_t)queue_next(&processor->processors);
2734 }
2735 simple_unlock(&pset->processors_lock);
2736 }
2737
2738 /*
2739 * Ok, we now have a collection of candidates -- fix them.
2740 */
2741 while (stuck_count > 0) {
2742 thread = stuck_threads[--stuck_count];
2743 stuck_threads[stuck_count] = THREAD_NULL;
2744 s = splsched();
2745 thread_lock(thread);
0b4e3aa0
A
2746 if ( (thread->sched_mode & TH_MODE_TIMESHARE) ||
2747 (thread->state & TH_IDLE) ) {
1c79356b 2748 if ( !(thread->state & (TH_WAIT|TH_SUSP)) &&
0b4e3aa0 2749 thread->sched_stamp != sched_tick )
1c79356b
A
2750 update_priority(thread);
2751 }
2752 thread_unlock(thread);
2753 splx(s);
0b4e3aa0
A
2754 if (!(thread->state & TH_IDLE))
2755 thread_deallocate(thread);
1c79356b 2756 }
9bccf70c
A
2757
2758 if (restart_needed)
2759 delay(1); /* XXX */
1c79356b
A
2760
2761 } while (restart_needed);
2762}
2763
2764/*
2765 * Just in case someone doesn't use the macro
2766 */
2767#undef thread_wakeup
2768void
2769thread_wakeup(
2770 event_t x);
2771
2772void
2773thread_wakeup(
2774 event_t x)
2775{
2776 thread_wakeup_with_result(x, THREAD_AWAKENED);
2777}
2778
9bccf70c 2779
0b4e3aa0
A
2780#if DEBUG
2781
2782static boolean_t
1c79356b 2783thread_runnable(
0b4e3aa0 2784 thread_t thread)
1c79356b 2785{
0b4e3aa0 2786 return ((thread->state & (TH_RUN|TH_WAIT)) == TH_RUN);
1c79356b
A
2787}
2788
1c79356b
A
2789void
2790dump_processor_set(
2791 processor_set_t ps)
2792{
2793 printf("processor_set: %08x\n",ps);
2794 printf("idle_queue: %08x %08x, idle_count: 0x%x\n",
2795 ps->idle_queue.next,ps->idle_queue.prev,ps->idle_count);
2796 printf("processors: %08x %08x, processor_count: 0x%x\n",
2797 ps->processors.next,ps->processors.prev,ps->processor_count);
2798 printf("tasks: %08x %08x, task_count: 0x%x\n",
2799 ps->tasks.next,ps->tasks.prev,ps->task_count);
2800 printf("threads: %08x %08x, thread_count: 0x%x\n",
2801 ps->threads.next,ps->threads.prev,ps->thread_count);
2802 printf("ref_count: 0x%x, active: %x\n",
2803 ps->ref_count,ps->active);
2804 printf("pset_self: %08x, pset_name_self: %08x\n",ps->pset_self, ps->pset_name_self);
0b4e3aa0 2805 printf("set_quanta: 0x%x\n", ps->set_quanta);
1c79356b
A
2806}
2807
2808#define processor_state(s) (((s)>PROCESSOR_SHUTDOWN)?"*unknown*":states[s])
2809
2810void
2811dump_processor(
2812 processor_t p)
2813{
2814 char *states[]={"OFF_LINE","RUNNING","IDLE","DISPATCHING",
2815 "ASSIGN","SHUTDOWN"};
2816
2817 printf("processor: %08x\n",p);
2818 printf("processor_queue: %08x %08x\n",
2819 p->processor_queue.next,p->processor_queue.prev);
2820 printf("state: %8s, next_thread: %08x, idle_thread: %08x\n",
2821 processor_state(p->state), p->next_thread, p->idle_thread);
0b4e3aa0 2822 printf("slice_quanta: %x\n", p->slice_quanta);
1c79356b
A
2823 printf("processor_set: %08x, processor_set_next: %08x\n",
2824 p->processor_set, p->processor_set_next);
2825 printf("processors: %08x %08x\n", p->processors.next,p->processors.prev);
2826 printf("processor_self: %08x, slot_num: 0x%x\n", p->processor_self, p->slot_num);
2827}
2828
2829void
2830dump_run_queue_struct(
2831 run_queue_t rq)
2832{
2833 char dump_buf[80];
2834 int i;
2835
2836 for( i=0; i < NRQS; ) {
2837 int j;
2838
2839 printf("%6s",(i==0)?"runq:":"");
2840 for( j=0; (j<8) && (i < NRQS); j++,i++ ) {
2841 if( rq->queues[i].next == &rq->queues[i] )
2842 printf( " --------");
2843 else
2844 printf(" %08x",rq->queues[i].next);
2845 }
2846 printf("\n");
2847 }
2848 for( i=0; i < NRQBM; ) {
2849 register unsigned int mask;
2850 char *d=dump_buf;
2851
2852 mask = ~0;
2853 mask ^= (mask>>1);
2854
2855 do {
2856 *d++ = ((rq->bitmap[i]&mask)?'r':'e');
2857 mask >>=1;
2858 } while( mask );
2859 *d = '\0';
2860 printf("%8s%s\n",((i==0)?"bitmap:":""),dump_buf);
2861 i++;
2862 }
2863 printf("highq: 0x%x, count: %u\n", rq->highq, rq->count);
2864}
2865
2866void
2867dump_run_queues(
2868 run_queue_t runq)
2869{
2870 register queue_t q1;
2871 register int i;
2872 register queue_entry_t e;
2873
2874 q1 = runq->queues;
2875 for (i = 0; i < NRQS; i++) {
2876 if (q1->next != q1) {
2877 int t_cnt;
2878
2879 printf("[%u]",i);
2880 for (t_cnt=0, e = q1->next; e != q1; e = e->next) {
2881 printf("\t0x%08x",e);
2882 if( (t_cnt = ++t_cnt%4) == 0 )
2883 printf("\n");
2884 }
2885 if( t_cnt )
2886 printf("\n");
2887 }
2888 /* else
2889 printf("[%u]\t<empty>\n",i);
2890 */
2891 q1++;
2892 }
2893}
2894
2895void
2896checkrq(
2897 run_queue_t rq,
2898 char *msg)
2899{
2900 register queue_t q1;
2901 register int i, j;
2902 register queue_entry_t e;
2903 register int highq;
2904
2905 highq = NRQS;
2906 j = 0;
2907 q1 = rq->queues;
2908 for (i = MAXPRI; i >= 0; i--) {
2909 if (q1->next == q1) {
2910 if (q1->prev != q1) {
2911 panic("checkrq: empty at %s", msg);
2912 }
2913 }
2914 else {
2915 if (highq == -1)
2916 highq = i;
2917
2918 for (e = q1->next; e != q1; e = e->next) {
2919 j++;
2920 if (e->next->prev != e)
2921 panic("checkrq-2 at %s", msg);
2922 if (e->prev->next != e)
2923 panic("checkrq-3 at %s", msg);
2924 }
2925 }
2926 q1++;
2927 }
2928 if (j != rq->count)
2929 panic("checkrq: count wrong at %s", msg);
2930 if (rq->count != 0 && highq > rq->highq)
2931 panic("checkrq: highq wrong at %s", msg);
2932}
2933
2934void
2935thread_check(
2936 register thread_t thread,
2937 register run_queue_t rq)
2938{
2939 register int whichq = thread->sched_pri;
2940 register queue_entry_t queue, entry;
2941
2942 if (whichq < MINPRI || whichq > MAXPRI)
2943 panic("thread_check: bad pri");
2944
1c79356b
A
2945 queue = &rq->queues[whichq];
2946 entry = queue_first(queue);
2947 while (!queue_end(queue, entry)) {
2948 if (entry == (queue_entry_t)thread)
2949 return;
2950
2951 entry = queue_next(entry);
2952 }
2953
2954 panic("thread_check: not found");
2955}
2956
2957#endif /* DEBUG */
2958
2959#if MACH_KDB
2960#include <ddb/db_output.h>
2961#define printf kdbprintf
2962extern int db_indent;
2963void db_sched(void);
2964
2965void
2966db_sched(void)
2967{
2968 iprintf("Scheduling Statistics:\n");
2969 db_indent += 2;
2970 iprintf("Thread invocations: csw %d same %d\n",
2971 c_thread_invoke_csw, c_thread_invoke_same);
2972#if MACH_COUNTERS
2973 iprintf("Thread block: calls %d\n",
2974 c_thread_block_calls);
2975 iprintf("Idle thread:\n\thandoff %d block %d no_dispatch %d\n",
2976 c_idle_thread_handoff,
2977 c_idle_thread_block, no_dispatch_count);
2978 iprintf("Sched thread blocks: %d\n", c_sched_thread_block);
2979#endif /* MACH_COUNTERS */
2980 db_indent -= 2;
2981}
2982#endif /* MACH_KDB */