]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/sched_prim.c
xnu-1486.2.11.tar.gz
[apple/xnu.git] / osfmk / kern / sched_prim.c
CommitLineData
1c79356b 1/*
c910b4d9 2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_FREE_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: sched_prim.c
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1986
62 *
63 * Scheduling primitives
64 *
65 */
66
67#include <debug.h>
1c79356b 68#include <mach_kdb.h>
1c79356b
A
69
70#include <ddb/db_output.h>
91447636
A
71
72#include <mach/mach_types.h>
1c79356b 73#include <mach/machine.h>
91447636
A
74#include <mach/policy.h>
75#include <mach/sync_policy.h>
76
1c79356b
A
77#include <machine/machine_routines.h>
78#include <machine/sched_param.h>
0c530ab8 79#include <machine/machine_cpu.h>
91447636
A
80
81#include <kern/kern_types.h>
1c79356b
A
82#include <kern/clock.h>
83#include <kern/counters.h>
84#include <kern/cpu_number.h>
85#include <kern/cpu_data.h>
91447636 86#include <kern/debug.h>
1c79356b
A
87#include <kern/lock.h>
88#include <kern/macro_help.h>
89#include <kern/machine.h>
90#include <kern/misc_protos.h>
91#include <kern/processor.h>
92#include <kern/queue.h>
93#include <kern/sched.h>
94#include <kern/sched_prim.h>
95#include <kern/syscall_subr.h>
96#include <kern/task.h>
97#include <kern/thread.h>
91447636
A
98#include <kern/wait_queue.h>
99
1c79356b
A
100#include <vm/pmap.h>
101#include <vm/vm_kern.h>
102#include <vm/vm_map.h>
91447636 103
b0d623f7
A
104#include <mach/sdt.h>
105
1c79356b
A
106#include <sys/kdebug.h>
107
0c530ab8 108#include <kern/pms.h>
3a60a9f5 109
2d21ac55
A
110struct run_queue rt_runq;
111#define RT_RUNQ ((processor_t)-1)
112decl_simple_lock_data(static,rt_lock);
113
0b4e3aa0 114#define DEFAULT_PREEMPTION_RATE 100 /* (1/s) */
1c79356b
A
115int default_preemption_rate = DEFAULT_PREEMPTION_RATE;
116
0b4e3aa0
A
117#define MAX_UNSAFE_QUANTA 800
118int max_unsafe_quanta = MAX_UNSAFE_QUANTA;
119
120#define MAX_POLL_QUANTA 2
121int max_poll_quanta = MAX_POLL_QUANTA;
122
123#define SCHED_POLL_YIELD_SHIFT 4 /* 1/16 */
124int sched_poll_yield_shift = SCHED_POLL_YIELD_SHIFT;
125
55e303ae
A
126uint64_t max_unsafe_computation;
127uint32_t sched_safe_duration;
128uint64_t max_poll_computation;
129
130uint32_t std_quantum;
131uint32_t min_std_quantum;
132
91447636
A
133uint32_t std_quantum_us;
134
55e303ae
A
135uint32_t max_rt_quantum;
136uint32_t min_rt_quantum;
137
91447636
A
138uint32_t sched_cswtime;
139
1c79356b 140unsigned sched_tick;
91447636 141uint32_t sched_tick_interval;
1c79356b 142
2d21ac55
A
143uint32_t sched_pri_shift = INT8_MAX;
144uint32_t sched_fixed_shift;
145
146uint32_t sched_run_count, sched_share_count;
147uint32_t sched_load_average, sched_mach_factor;
148
1c79356b 149/* Forwards */
2d21ac55 150static void load_shift_init(void) __attribute__((section("__TEXT, initcode")));
4a3eedf9 151static void preempt_pri_init(void) __attribute__((section("__TEXT, initcode")));
2d21ac55 152
c910b4d9
A
153static thread_t run_queue_dequeue(
154 run_queue_t runq,
155 integer_t options);
156
b0d623f7
A
157static thread_t choose_thread(
158 processor_t processor,
159 int priority);
160
2d21ac55
A
161static thread_t thread_select_idle(
162 thread_t thread,
163 processor_t processor);
1c79356b 164
2d21ac55
A
165static thread_t processor_idle(
166 thread_t thread,
167 processor_t processor);
91447636 168
2d21ac55 169static thread_t steal_thread(
cf7d32b8
A
170 processor_set_t pset);
171
172static thread_t steal_processor_thread(
55e303ae 173 processor_t processor);
1c79356b 174
91447636 175static void thread_update_scan(void);
1c79356b 176
2d21ac55
A
177#if DEBUG
178extern int debug_task;
179#define TLOG(a, fmt, args...) if(debug_task & a) kprintf(fmt, ## args)
180#else
181#define TLOG(a, fmt, args...) do {} while (0)
182#endif
183
1c79356b 184#if DEBUG
0b4e3aa0 185static
1c79356b
A
186boolean_t thread_runnable(
187 thread_t thread);
188
0b4e3aa0
A
189#endif /*DEBUG*/
190
1c79356b
A
191/*
192 * State machine
193 *
194 * states are combinations of:
195 * R running
196 * W waiting (or on wait queue)
197 * N non-interruptible
198 * O swapped out
199 * I being swapped in
200 *
201 * init action
202 * assert_wait thread_block clear_wait swapout swapin
203 *
204 * R RW, RWN R; setrun - -
205 * RN RWN RN; setrun - -
206 *
207 * RW W R -
208 * RWN WN RN -
209 *
210 * W R; setrun WO
211 * WN RN; setrun -
212 *
213 * RO - - R
214 *
215 */
216
91447636 217int8_t sched_load_shifts[NRQS];
b0d623f7 218int sched_preempt_pri[NRQBM];
91447636 219
1c79356b
A
220void
221sched_init(void)
222{
223 /*
0b4e3aa0
A
224 * Calculate the timeslicing quantum
225 * in us.
1c79356b
A
226 */
227 if (default_preemption_rate < 1)
228 default_preemption_rate = DEFAULT_PREEMPTION_RATE;
0b4e3aa0 229 std_quantum_us = (1000 * 1000) / default_preemption_rate;
1c79356b 230
0b4e3aa0 231 printf("standard timeslicing quantum is %d us\n", std_quantum_us);
1c79356b 232
55e303ae
A
233 sched_safe_duration = (2 * max_unsafe_quanta / default_preemption_rate) *
234 (1 << SCHED_TICK_SHIFT);
235
91447636 236 load_shift_init();
4a3eedf9 237 preempt_pri_init();
2d21ac55
A
238 simple_lock_init(&rt_lock, 0);
239 run_queue_init(&rt_runq);
1c79356b 240 sched_tick = 0;
1c79356b 241 ast_init();
1c79356b
A
242}
243
55e303ae
A
244void
245sched_timebase_init(void)
246{
91447636
A
247 uint64_t abstime;
248 uint32_t shift;
55e303ae 249
91447636 250 /* standard timeslicing quantum */
55e303ae
A
251 clock_interval_to_absolutetime_interval(
252 std_quantum_us, NSEC_PER_USEC, &abstime);
253 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
b0d623f7 254 std_quantum = (uint32_t)abstime;
55e303ae 255
91447636 256 /* smallest remaining quantum (250 us) */
55e303ae
A
257 clock_interval_to_absolutetime_interval(250, NSEC_PER_USEC, &abstime);
258 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
b0d623f7 259 min_std_quantum = (uint32_t)abstime;
55e303ae 260
91447636 261 /* smallest rt computaton (50 us) */
55e303ae
A
262 clock_interval_to_absolutetime_interval(50, NSEC_PER_USEC, &abstime);
263 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
b0d623f7 264 min_rt_quantum = (uint32_t)abstime;
55e303ae 265
91447636 266 /* maximum rt computation (50 ms) */
55e303ae
A
267 clock_interval_to_absolutetime_interval(
268 50, 1000*NSEC_PER_USEC, &abstime);
269 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
b0d623f7 270 max_rt_quantum = (uint32_t)abstime;
55e303ae 271
91447636
A
272 /* scheduler tick interval */
273 clock_interval_to_absolutetime_interval(USEC_PER_SEC >> SCHED_TICK_SHIFT,
274 NSEC_PER_USEC, &abstime);
cf7d32b8 275 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
b0d623f7 276 sched_tick_interval = (uint32_t)abstime;
55e303ae 277
91447636
A
278 /*
279 * Compute conversion factor from usage to
280 * timesharing priorities with 5/8 ** n aging.
281 */
282 abstime = (abstime * 5) / 3;
283 for (shift = 0; abstime > BASEPRI_DEFAULT; ++shift)
284 abstime >>= 1;
2d21ac55 285 sched_fixed_shift = shift;
91447636 286
55e303ae
A
287 max_unsafe_computation = max_unsafe_quanta * std_quantum;
288 max_poll_computation = max_poll_quanta * std_quantum;
289}
290
91447636
A
291/*
292 * Set up values for timeshare
293 * loading factors.
294 */
295static void
296load_shift_init(void)
297{
298 int8_t k, *p = sched_load_shifts;
299 uint32_t i, j;
300
301 *p++ = INT8_MIN; *p++ = 0;
302
303 for (i = j = 2, k = 1; i < NRQS; ++k) {
304 for (j <<= 1; i < j; ++i)
305 *p++ = k;
306 }
307}
308
4a3eedf9
A
309static void
310preempt_pri_init(void)
311{
312 int i, *p = sched_preempt_pri;
313
314 for (i = BASEPRI_FOREGROUND + 1; i < MINPRI_KERNEL; ++i)
315 setbit(i, p);
316
317 for (i = BASEPRI_PREEMPT; i <= MAXPRI; ++i)
318 setbit(i, p);
319}
320
1c79356b 321/*
0b4e3aa0 322 * Thread wait timer expiration.
1c79356b
A
323 */
324void
325thread_timer_expire(
91447636
A
326 void *p0,
327 __unused void *p1)
1c79356b
A
328{
329 thread_t thread = p0;
330 spl_t s;
331
332 s = splsched();
55e303ae 333 thread_lock(thread);
91447636 334 if (--thread->wait_timer_active == 0) {
0b4e3aa0
A
335 if (thread->wait_timer_is_set) {
336 thread->wait_timer_is_set = FALSE;
55e303ae 337 clear_wait_internal(thread, THREAD_TIMED_OUT);
0b4e3aa0 338 }
1c79356b 339 }
55e303ae 340 thread_unlock(thread);
1c79356b
A
341 splx(s);
342}
343
b0d623f7
A
344#ifndef __LP64__
345
1c79356b
A
346/*
347 * thread_set_timer:
348 *
349 * Set a timer for the current thread, if the thread
350 * is ready to wait. Must be called between assert_wait()
351 * and thread_block().
352 */
353void
354thread_set_timer(
0b4e3aa0
A
355 uint32_t interval,
356 uint32_t scale_factor)
1c79356b
A
357{
358 thread_t thread = current_thread();
0b4e3aa0 359 uint64_t deadline;
1c79356b
A
360 spl_t s;
361
362 s = splsched();
1c79356b
A
363 thread_lock(thread);
364 if ((thread->state & TH_WAIT) != 0) {
365 clock_interval_to_deadline(interval, scale_factor, &deadline);
91447636
A
366 if (!timer_call_enter(&thread->wait_timer, deadline))
367 thread->wait_timer_active++;
1c79356b
A
368 thread->wait_timer_is_set = TRUE;
369 }
370 thread_unlock(thread);
1c79356b
A
371 splx(s);
372}
373
374void
375thread_set_timer_deadline(
0b4e3aa0 376 uint64_t deadline)
1c79356b
A
377{
378 thread_t thread = current_thread();
379 spl_t s;
380
381 s = splsched();
1c79356b
A
382 thread_lock(thread);
383 if ((thread->state & TH_WAIT) != 0) {
91447636
A
384 if (!timer_call_enter(&thread->wait_timer, deadline))
385 thread->wait_timer_active++;
1c79356b
A
386 thread->wait_timer_is_set = TRUE;
387 }
388 thread_unlock(thread);
1c79356b
A
389 splx(s);
390}
391
392void
393thread_cancel_timer(void)
394{
395 thread_t thread = current_thread();
396 spl_t s;
397
398 s = splsched();
55e303ae 399 thread_lock(thread);
1c79356b
A
400 if (thread->wait_timer_is_set) {
401 if (timer_call_cancel(&thread->wait_timer))
402 thread->wait_timer_active--;
403 thread->wait_timer_is_set = FALSE;
404 }
55e303ae 405 thread_unlock(thread);
1c79356b
A
406 splx(s);
407}
408
b0d623f7
A
409#endif /* __LP64__ */
410
1c79356b 411/*
91447636
A
412 * thread_unblock:
413 *
414 * Unblock thread on wake up.
415 *
416 * Returns TRUE if the thread is still running.
417 *
418 * Thread must be locked.
1c79356b 419 */
91447636
A
420boolean_t
421thread_unblock(
422 thread_t thread,
423 wait_result_t wresult)
1c79356b 424{
91447636 425 boolean_t result = FALSE;
0b4e3aa0 426
91447636 427 /*
2d21ac55 428 * Set wait_result.
91447636
A
429 */
430 thread->wait_result = wresult;
1c79356b 431
91447636 432 /*
2d21ac55 433 * Cancel pending wait timer.
91447636 434 */
1c79356b
A
435 if (thread->wait_timer_is_set) {
436 if (timer_call_cancel(&thread->wait_timer))
437 thread->wait_timer_active--;
438 thread->wait_timer_is_set = FALSE;
439 }
440
91447636 441 /*
2d21ac55
A
442 * Update scheduling state: not waiting,
443 * set running.
91447636
A
444 */
445 thread->state &= ~(TH_WAIT|TH_UNINT);
1c79356b 446
91447636
A
447 if (!(thread->state & TH_RUN)) {
448 thread->state |= TH_RUN;
1c79356b 449
2d21ac55 450 (*thread->sched_call)(SCHED_CALL_UNBLOCK, thread);
1c79356b 451
91447636 452 /*
2d21ac55 453 * Update run counts.
91447636 454 */
2d21ac55 455 sched_run_incr();
91447636 456 if (thread->sched_mode & TH_MODE_TIMESHARE)
2d21ac55 457 sched_share_incr();
1c79356b 458 }
2d21ac55
A
459 else {
460 /*
461 * Signal if idling on another processor.
462 */
463 if (thread->state & TH_IDLE) {
464 processor_t processor = thread->last_processor;
465
466 if (processor != current_processor())
467 machine_signal_idle(processor);
468 }
469
91447636 470 result = TRUE;
2d21ac55 471 }
1c79356b 472
91447636
A
473 /*
474 * Calculate deadline for real-time threads.
475 */
476 if (thread->sched_mode & TH_MODE_REALTIME) {
477 thread->realtime.deadline = mach_absolute_time();
478 thread->realtime.deadline += thread->realtime.constraint;
0b4e3aa0
A
479 }
480
91447636
A
481 /*
482 * Clear old quantum, fail-safe computation, etc.
483 */
484 thread->current_quantum = 0;
485 thread->computation_metered = 0;
486 thread->reason = AST_NONE;
1c79356b 487
91447636
A
488 KERNEL_DEBUG_CONSTANT(
489 MACHDBG_CODE(DBG_MACH_SCHED,MACH_MAKE_RUNNABLE) | DBG_FUNC_NONE,
b0d623f7
A
490 (uintptr_t)thread_tid(thread), thread->sched_pri, 0, 0, 0);
491
492 DTRACE_SCHED2(wakeup, struct thread *, thread, struct proc *, thread->task->bsd_info);
91447636
A
493
494 return (result);
1c79356b
A
495}
496
497/*
91447636 498 * Routine: thread_go
1c79356b 499 * Purpose:
91447636 500 * Unblock and dispatch thread.
1c79356b
A
501 * Conditions:
502 * thread lock held, IPC locks may be held.
503 * thread must have been pulled from wait queue under same lock hold.
9bccf70c
A
504 * Returns:
505 * KERN_SUCCESS - Thread was set running
506 * KERN_NOT_WAITING - Thread was not waiting
1c79356b 507 */
9bccf70c 508kern_return_t
91447636 509thread_go(
1c79356b 510 thread_t thread,
55e303ae 511 wait_result_t wresult)
1c79356b 512{
1c79356b 513 assert(thread->at_safe_point == FALSE);
9bccf70c 514 assert(thread->wait_event == NO_EVENT64);
1c79356b
A
515 assert(thread->wait_queue == WAIT_QUEUE_NULL);
516
9bccf70c 517 if ((thread->state & (TH_WAIT|TH_TERMINATE)) == TH_WAIT) {
91447636 518 if (!thread_unblock(thread, wresult))
55e303ae 519 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
55e303ae
A
520
521 return (KERN_SUCCESS);
1c79356b 522 }
55e303ae
A
523
524 return (KERN_NOT_WAITING);
1c79356b
A
525}
526
9bccf70c
A
527/*
528 * Routine: thread_mark_wait_locked
529 * Purpose:
530 * Mark a thread as waiting. If, given the circumstances,
531 * it doesn't want to wait (i.e. already aborted), then
532 * indicate that in the return value.
533 * Conditions:
534 * at splsched() and thread is locked.
535 */
536__private_extern__
537wait_result_t
1c79356b 538thread_mark_wait_locked(
9bccf70c
A
539 thread_t thread,
540 wait_interrupt_t interruptible)
1c79356b 541{
55e303ae 542 boolean_t at_safe_point;
1c79356b 543
b0d623f7
A
544 assert(thread == current_thread());
545
9bccf70c
A
546 /*
547 * The thread may have certain types of interrupts/aborts masked
548 * off. Even if the wait location says these types of interrupts
549 * are OK, we have to honor mask settings (outer-scoped code may
550 * not be able to handle aborts at the moment).
551 */
91447636
A
552 if (interruptible > (thread->options & TH_OPT_INTMASK))
553 interruptible = thread->options & TH_OPT_INTMASK;
9bccf70c
A
554
555 at_safe_point = (interruptible == THREAD_ABORTSAFE);
556
55e303ae 557 if ( interruptible == THREAD_UNINT ||
2d21ac55 558 !(thread->sched_mode & TH_MODE_ABORT) ||
55e303ae 559 (!at_safe_point &&
2d21ac55 560 (thread->sched_mode & TH_MODE_ABORTSAFELY))) {
b0d623f7
A
561
562 DTRACE_SCHED(sleep);
563
9bccf70c
A
564 thread->state |= (interruptible) ? TH_WAIT : (TH_WAIT | TH_UNINT);
565 thread->at_safe_point = at_safe_point;
9bccf70c 566 return (thread->wait_result = THREAD_WAITING);
9bccf70c 567 }
55e303ae 568 else
2d21ac55
A
569 if (thread->sched_mode & TH_MODE_ABORTSAFELY)
570 thread->sched_mode &= ~TH_MODE_ISABORTED;
55e303ae 571
9bccf70c 572 return (thread->wait_result = THREAD_INTERRUPTED);
1c79356b
A
573}
574
9bccf70c
A
575/*
576 * Routine: thread_interrupt_level
577 * Purpose:
578 * Set the maximum interruptible state for the
579 * current thread. The effective value of any
580 * interruptible flag passed into assert_wait
581 * will never exceed this.
582 *
583 * Useful for code that must not be interrupted,
584 * but which calls code that doesn't know that.
585 * Returns:
586 * The old interrupt level for the thread.
587 */
588__private_extern__
589wait_interrupt_t
590thread_interrupt_level(
591 wait_interrupt_t new_level)
592{
593 thread_t thread = current_thread();
91447636 594 wait_interrupt_t result = thread->options & TH_OPT_INTMASK;
1c79356b 595
91447636 596 thread->options = (thread->options & ~TH_OPT_INTMASK) | (new_level & TH_OPT_INTMASK);
1c79356b 597
91447636 598 return result;
1c79356b
A
599}
600
601/*
602 * Check to see if an assert wait is possible, without actually doing one.
603 * This is used by debug code in locks and elsewhere to verify that it is
604 * always OK to block when trying to take a blocking lock (since waiting
605 * for the actual assert_wait to catch the case may make it hard to detect
606 * this case.
607 */
608boolean_t
609assert_wait_possible(void)
610{
611
612 thread_t thread;
1c79356b
A
613
614#if DEBUG
615 if(debug_mode) return TRUE; /* Always succeed in debug mode */
616#endif
617
618 thread = current_thread();
619
620 return (thread == NULL || wait_queue_assert_possible(thread));
621}
622
623/*
624 * assert_wait:
625 *
626 * Assert that the current thread is about to go to
627 * sleep until the specified event occurs.
628 */
9bccf70c 629wait_result_t
1c79356b
A
630assert_wait(
631 event_t event,
9bccf70c 632 wait_interrupt_t interruptible)
1c79356b
A
633{
634 register wait_queue_t wq;
635 register int index;
636
637 assert(event != NO_EVENT);
1c79356b
A
638
639 index = wait_hash(event);
640 wq = &wait_queues[index];
91447636 641 return wait_queue_assert_wait(wq, event, interruptible, 0);
9bccf70c
A
642}
643
91447636
A
644wait_result_t
645assert_wait_timeout(
646 event_t event,
647 wait_interrupt_t interruptible,
648 uint32_t interval,
649 uint32_t scale_factor)
55e303ae 650{
91447636
A
651 thread_t thread = current_thread();
652 wait_result_t wresult;
653 wait_queue_t wqueue;
654 uint64_t deadline;
655 spl_t s;
656
55e303ae 657 assert(event != NO_EVENT);
91447636
A
658 wqueue = &wait_queues[wait_hash(event)];
659
660 s = splsched();
661 wait_queue_lock(wqueue);
662 thread_lock(thread);
663
664 clock_interval_to_deadline(interval, scale_factor, &deadline);
b0d623f7 665 wresult = wait_queue_assert_wait64_locked(wqueue, CAST_DOWN(event64_t, event),
91447636
A
666 interruptible, deadline, thread);
667
668 thread_unlock(thread);
669 wait_queue_unlock(wqueue);
670 splx(s);
55e303ae 671
91447636 672 return (wresult);
55e303ae
A
673}
674
675wait_result_t
91447636 676assert_wait_deadline(
55e303ae 677 event_t event,
91447636
A
678 wait_interrupt_t interruptible,
679 uint64_t deadline)
55e303ae
A
680{
681 thread_t thread = current_thread();
91447636
A
682 wait_result_t wresult;
683 wait_queue_t wqueue;
55e303ae
A
684 spl_t s;
685
686 assert(event != NO_EVENT);
91447636 687 wqueue = &wait_queues[wait_hash(event)];
55e303ae
A
688
689 s = splsched();
91447636 690 wait_queue_lock(wqueue);
55e303ae
A
691 thread_lock(thread);
692
b0d623f7 693 wresult = wait_queue_assert_wait64_locked(wqueue, CAST_DOWN(event64_t,event),
91447636 694 interruptible, deadline, thread);
55e303ae
A
695
696 thread_unlock(thread);
91447636 697 wait_queue_unlock(wqueue);
55e303ae
A
698 splx(s);
699
700 return (wresult);
701}
9bccf70c
A
702
703/*
704 * thread_sleep_fast_usimple_lock:
705 *
706 * Cause the current thread to wait until the specified event
707 * occurs. The specified simple_lock is unlocked before releasing
708 * the cpu and re-acquired as part of waking up.
709 *
710 * This is the simple lock sleep interface for components that use a
711 * faster version of simple_lock() than is provided by usimple_lock().
712 */
713__private_extern__ wait_result_t
714thread_sleep_fast_usimple_lock(
715 event_t event,
716 simple_lock_t lock,
717 wait_interrupt_t interruptible)
718{
719 wait_result_t res;
720
721 res = assert_wait(event, interruptible);
722 if (res == THREAD_WAITING) {
723 simple_unlock(lock);
724 res = thread_block(THREAD_CONTINUE_NULL);
725 simple_lock(lock);
726 }
727 return res;
1c79356b
A
728}
729
9bccf70c
A
730
731/*
732 * thread_sleep_usimple_lock:
733 *
734 * Cause the current thread to wait until the specified event
735 * occurs. The specified usimple_lock is unlocked before releasing
736 * the cpu and re-acquired as part of waking up.
737 *
738 * This is the simple lock sleep interface for components where
739 * simple_lock() is defined in terms of usimple_lock().
740 */
741wait_result_t
742thread_sleep_usimple_lock(
743 event_t event,
744 usimple_lock_t lock,
745 wait_interrupt_t interruptible)
746{
747 wait_result_t res;
748
749 res = assert_wait(event, interruptible);
750 if (res == THREAD_WAITING) {
751 usimple_unlock(lock);
752 res = thread_block(THREAD_CONTINUE_NULL);
753 usimple_lock(lock);
754 }
755 return res;
756}
757
9bccf70c
A
758/*
759 * thread_sleep_lock_write:
760 *
761 * Cause the current thread to wait until the specified event
762 * occurs. The specified (write) lock is unlocked before releasing
763 * the cpu. The (write) lock will be re-acquired before returning.
9bccf70c
A
764 */
765wait_result_t
766thread_sleep_lock_write(
767 event_t event,
768 lock_t *lock,
769 wait_interrupt_t interruptible)
770{
771 wait_result_t res;
772
773 res = assert_wait(event, interruptible);
774 if (res == THREAD_WAITING) {
775 lock_write_done(lock);
776 res = thread_block(THREAD_CONTINUE_NULL);
777 lock_write(lock);
778 }
779 return res;
780}
781
1c79356b 782/*
91447636 783 * thread_stop:
1c79356b 784 *
91447636
A
785 * Force a preemption point for a thread and wait
786 * for it to stop running. Arbitrates access among
787 * multiple stop requests. (released by unstop)
1c79356b 788 *
91447636
A
789 * The thread must enter a wait state and stop via a
790 * separate means.
1c79356b 791 *
91447636 792 * Returns FALSE if interrupted.
1c79356b
A
793 */
794boolean_t
795thread_stop(
91447636 796 thread_t thread)
1c79356b 797{
91447636 798 wait_result_t wresult;
2d21ac55 799 spl_t s = splsched();
1c79356b 800
1c79356b 801 wake_lock(thread);
2d21ac55 802 thread_lock(thread);
1c79356b
A
803
804 while (thread->state & TH_SUSP) {
805 thread->wake_active = TRUE;
2d21ac55
A
806 thread_unlock(thread);
807
91447636 808 wresult = assert_wait(&thread->wake_active, THREAD_ABORTSAFE);
1c79356b
A
809 wake_unlock(thread);
810 splx(s);
811
91447636
A
812 if (wresult == THREAD_WAITING)
813 wresult = thread_block(THREAD_CONTINUE_NULL);
9bccf70c 814
91447636 815 if (wresult != THREAD_AWAKENED)
1c79356b
A
816 return (FALSE);
817
818 s = splsched();
819 wake_lock(thread);
2d21ac55 820 thread_lock(thread);
1c79356b 821 }
9bccf70c 822
1c79356b 823 thread->state |= TH_SUSP;
1c79356b 824
9bccf70c 825 while (thread->state & TH_RUN) {
9bccf70c
A
826 processor_t processor = thread->last_processor;
827
2d21ac55 828 if (processor != PROCESSOR_NULL && processor->active_thread == thread)
9bccf70c 829 cause_ast_check(processor);
9bccf70c
A
830
831 thread->wake_active = TRUE;
2d21ac55
A
832 thread_unlock(thread);
833
91447636 834 wresult = assert_wait(&thread->wake_active, THREAD_ABORTSAFE);
9bccf70c
A
835 wake_unlock(thread);
836 splx(s);
837
91447636
A
838 if (wresult == THREAD_WAITING)
839 wresult = thread_block(THREAD_CONTINUE_NULL);
9bccf70c 840
91447636 841 if (wresult != THREAD_AWAKENED) {
9bccf70c
A
842 thread_unstop(thread);
843 return (FALSE);
844 }
845
846 s = splsched();
847 wake_lock(thread);
848 thread_lock(thread);
849 }
850
851 thread_unlock(thread);
1c79356b
A
852 wake_unlock(thread);
853 splx(s);
854
855 return (TRUE);
856}
857
858/*
91447636
A
859 * thread_unstop:
860 *
861 * Release a previous stop request and set
862 * the thread running if appropriate.
863 *
864 * Use only after a successful stop operation.
1c79356b
A
865 */
866void
867thread_unstop(
9bccf70c 868 thread_t thread)
1c79356b 869{
9bccf70c 870 spl_t s = splsched();
1c79356b 871
1c79356b
A
872 wake_lock(thread);
873 thread_lock(thread);
874
9bccf70c 875 if ((thread->state & (TH_RUN|TH_WAIT|TH_SUSP)) == TH_SUSP) {
0b4e3aa0 876 thread->state &= ~TH_SUSP;
91447636 877 thread_unblock(thread, THREAD_AWAKENED);
55e303ae
A
878
879 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
1c79356b
A
880 }
881 else
882 if (thread->state & TH_SUSP) {
883 thread->state &= ~TH_SUSP;
884
885 if (thread->wake_active) {
886 thread->wake_active = FALSE;
887 thread_unlock(thread);
2d21ac55
A
888
889 thread_wakeup(&thread->wake_active);
1c79356b
A
890 wake_unlock(thread);
891 splx(s);
1c79356b
A
892
893 return;
894 }
895 }
896
897 thread_unlock(thread);
898 wake_unlock(thread);
899 splx(s);
900}
901
902/*
91447636
A
903 * thread_wait:
904 *
905 * Wait for a thread to stop running. (non-interruptible)
906 *
1c79356b 907 */
91447636 908void
1c79356b 909thread_wait(
91447636 910 thread_t thread)
1c79356b 911{
91447636
A
912 wait_result_t wresult;
913 spl_t s = splsched();
1c79356b 914
1c79356b 915 wake_lock(thread);
9bccf70c 916 thread_lock(thread);
1c79356b 917
9bccf70c 918 while (thread->state & TH_RUN) {
9bccf70c 919 processor_t processor = thread->last_processor;
e7c99d92 920
2d21ac55 921 if (processor != PROCESSOR_NULL && processor->active_thread == thread)
9bccf70c 922 cause_ast_check(processor);
1c79356b
A
923
924 thread->wake_active = TRUE;
2d21ac55
A
925 thread_unlock(thread);
926
91447636 927 wresult = assert_wait(&thread->wake_active, THREAD_UNINT);
1c79356b
A
928 wake_unlock(thread);
929 splx(s);
930
91447636
A
931 if (wresult == THREAD_WAITING)
932 thread_block(THREAD_CONTINUE_NULL);
1c79356b
A
933
934 s = splsched();
935 wake_lock(thread);
9bccf70c 936 thread_lock(thread);
1c79356b 937 }
0b4e3aa0 938
9bccf70c 939 thread_unlock(thread);
1c79356b
A
940 wake_unlock(thread);
941 splx(s);
1c79356b
A
942}
943
1c79356b
A
944/*
945 * Routine: clear_wait_internal
946 *
947 * Clear the wait condition for the specified thread.
948 * Start the thread executing if that is appropriate.
949 * Arguments:
950 * thread thread to awaken
951 * result Wakeup result the thread should see
952 * Conditions:
953 * At splsched
954 * the thread is locked.
9bccf70c
A
955 * Returns:
956 * KERN_SUCCESS thread was rousted out a wait
957 * KERN_FAILURE thread was waiting but could not be rousted
958 * KERN_NOT_WAITING thread was not waiting
1c79356b 959 */
9bccf70c 960__private_extern__ kern_return_t
1c79356b 961clear_wait_internal(
9bccf70c 962 thread_t thread,
55e303ae 963 wait_result_t wresult)
1c79356b 964{
9bccf70c 965 wait_queue_t wq = thread->wait_queue;
55e303ae 966 int i = LockTimeOut;
9bccf70c 967
9bccf70c 968 do {
55e303ae
A
969 if (wresult == THREAD_INTERRUPTED && (thread->state & TH_UNINT))
970 return (KERN_FAILURE);
9bccf70c
A
971
972 if (wq != WAIT_QUEUE_NULL) {
973 if (wait_queue_lock_try(wq)) {
974 wait_queue_pull_thread_locked(wq, thread, TRUE);
975 /* wait queue unlocked, thread still locked */
55e303ae
A
976 }
977 else {
9bccf70c
A
978 thread_unlock(thread);
979 delay(1);
55e303ae 980
9bccf70c 981 thread_lock(thread);
55e303ae
A
982 if (wq != thread->wait_queue)
983 return (KERN_NOT_WAITING);
9bccf70c 984
9bccf70c
A
985 continue;
986 }
1c79356b 987 }
55e303ae 988
91447636 989 return (thread_go(thread, wresult));
55e303ae
A
990 } while (--i > 0);
991
2d21ac55 992 panic("clear_wait_internal: deadlock: thread=%p, wq=%p, cpu=%d\n",
9bccf70c 993 thread, wq, cpu_number());
55e303ae
A
994
995 return (KERN_FAILURE);
1c79356b
A
996}
997
998
999/*
1000 * clear_wait:
1001 *
1002 * Clear the wait condition for the specified thread. Start the thread
1003 * executing if that is appropriate.
1004 *
1005 * parameters:
1006 * thread thread to awaken
1007 * result Wakeup result the thread should see
1008 */
9bccf70c 1009kern_return_t
1c79356b 1010clear_wait(
9bccf70c
A
1011 thread_t thread,
1012 wait_result_t result)
1c79356b 1013{
9bccf70c 1014 kern_return_t ret;
1c79356b
A
1015 spl_t s;
1016
1017 s = splsched();
1018 thread_lock(thread);
9bccf70c 1019 ret = clear_wait_internal(thread, result);
1c79356b
A
1020 thread_unlock(thread);
1021 splx(s);
9bccf70c 1022 return ret;
1c79356b
A
1023}
1024
1025
1026/*
1027 * thread_wakeup_prim:
1028 *
1029 * Common routine for thread_wakeup, thread_wakeup_with_result,
1030 * and thread_wakeup_one.
1031 *
1032 */
9bccf70c 1033kern_return_t
1c79356b
A
1034thread_wakeup_prim(
1035 event_t event,
1036 boolean_t one_thread,
9bccf70c 1037 wait_result_t result)
1c79356b
A
1038{
1039 register wait_queue_t wq;
1040 register int index;
1041
1042 index = wait_hash(event);
1043 wq = &wait_queues[index];
1044 if (one_thread)
9bccf70c 1045 return (wait_queue_wakeup_one(wq, event, result));
1c79356b 1046 else
9bccf70c 1047 return (wait_queue_wakeup_all(wq, event, result));
1c79356b
A
1048}
1049
1050/*
1051 * thread_bind:
1052 *
2d21ac55 1053 * Force the current thread to execute on the specified processor.
1c79356b 1054 *
55e303ae
A
1055 * Returns the previous binding. PROCESSOR_NULL means
1056 * not bound.
1057 *
1058 * XXX - DO NOT export this to users - XXX
1c79356b 1059 */
55e303ae 1060processor_t
1c79356b 1061thread_bind(
2d21ac55 1062 processor_t processor)
1c79356b 1063{
2d21ac55 1064 thread_t self = current_thread();
55e303ae 1065 processor_t prev;
55e303ae 1066 spl_t s;
1c79356b
A
1067
1068 s = splsched();
2d21ac55 1069 thread_lock(self);
55e303ae 1070
2d21ac55
A
1071 prev = self->bound_processor;
1072 self->bound_processor = processor;
55e303ae 1073
2d21ac55 1074 thread_unlock(self);
1c79356b 1075 splx(s);
55e303ae
A
1076
1077 return (prev);
1c79356b
A
1078}
1079
1080/*
2d21ac55
A
1081 * thread_select:
1082 *
1083 * Select a new thread for the current processor to execute.
55e303ae
A
1084 *
1085 * May select the current thread, which must be locked.
1c79356b 1086 */
2d21ac55 1087static thread_t
1c79356b 1088thread_select(
2d21ac55
A
1089 thread_t thread,
1090 processor_t processor)
1c79356b 1091{
2d21ac55 1092 processor_set_t pset = processor->processor_set;
cf7d32b8 1093 thread_t new_thread = THREAD_NULL;
b0d623f7 1094 boolean_t inactive_state;
1c79356b 1095
2d21ac55
A
1096 do {
1097 /*
1098 * Update the priority.
1099 */
1100 if (thread->sched_stamp != sched_tick)
1101 update_priority(thread);
0b4e3aa0 1102
2d21ac55 1103 processor->current_pri = thread->sched_pri;
1c79356b 1104
2d21ac55
A
1105 pset_lock(pset);
1106
b0d623f7 1107 inactive_state = processor->state != PROCESSOR_SHUTDOWN && machine_cpu_is_inactive(processor->cpu_id);
c910b4d9 1108
2d21ac55
A
1109 simple_lock(&rt_lock);
1110
2d21ac55
A
1111 /*
1112 * Test to see if the current thread should continue
1113 * to run on this processor. Must be runnable, and not
1114 * bound to a different processor, nor be in the wrong
1115 * processor set.
1116 */
b0d623f7
A
1117 if ( thread->state == TH_RUN &&
1118 (thread->sched_pri >= BASEPRI_RTQUEUES ||
1119 processor->processor_meta == PROCESSOR_META_NULL ||
1120 processor->processor_meta->primary == processor) &&
2d21ac55
A
1121 (thread->bound_processor == PROCESSOR_NULL ||
1122 thread->bound_processor == processor) &&
1123 (thread->affinity_set == AFFINITY_SET_NULL ||
1124 thread->affinity_set->aset_pset == pset) ) {
1125 if ( thread->sched_pri >= BASEPRI_RTQUEUES &&
1126 first_timeslice(processor) ) {
1127 if (rt_runq.highq >= BASEPRI_RTQUEUES) {
1128 register run_queue_t runq = &rt_runq;
1129 register queue_t q;
1130
1131 q = runq->queues + runq->highq;
1132 if (((thread_t)q->next)->realtime.deadline <
1133 processor->deadline) {
1134 thread = (thread_t)q->next;
1135 ((queue_entry_t)thread)->next->prev = q;
1136 q->next = ((queue_entry_t)thread)->next;
1137 thread->runq = PROCESSOR_NULL;
2d21ac55 1138 runq->count--; runq->urgency--;
4a3eedf9 1139 assert(runq->urgency >= 0);
2d21ac55
A
1140 if (queue_empty(q)) {
1141 if (runq->highq != IDLEPRI)
1142 clrbit(MAXPRI - runq->highq, runq->bitmap);
1143 runq->highq = MAXPRI - ffsbit(runq->bitmap);
1144 }
55e303ae
A
1145 }
1146 }
2d21ac55
A
1147
1148 simple_unlock(&rt_lock);
1149
1150 processor->deadline = thread->realtime.deadline;
1151
1152 pset_unlock(pset);
1153
1154 return (thread);
55e303ae
A
1155 }
1156
b0d623f7
A
1157 if (!inactive_state && rt_runq.highq < thread->sched_pri &&
1158 (new_thread = choose_thread(processor, thread->sched_pri)) == THREAD_NULL) {
55e303ae 1159
2d21ac55 1160 simple_unlock(&rt_lock);
55e303ae 1161
2d21ac55 1162 /* I am the highest priority runnable (non-idle) thread */
1c79356b 1163
cf7d32b8 1164 pset_pri_hint(pset, processor, processor->current_pri);
1c79356b 1165
c910b4d9
A
1166 pset_count_hint(pset, processor, processor->runq.count);
1167
2d21ac55 1168 processor->deadline = UINT64_MAX;
55e303ae 1169
2d21ac55 1170 pset_unlock(pset);
55e303ae 1171
2d21ac55
A
1172 return (thread);
1173 }
1174 }
1175
b0d623f7
A
1176 if (new_thread != THREAD_NULL ||
1177 (processor->runq.highq >= rt_runq.highq &&
1178 (new_thread = choose_thread(processor, MINPRI)) != THREAD_NULL)) {
c910b4d9
A
1179 simple_unlock(&rt_lock);
1180
c910b4d9 1181 if (!inactive_state) {
b0d623f7 1182 pset_pri_hint(pset, processor, new_thread->sched_pri);
c910b4d9
A
1183
1184 pset_count_hint(pset, processor, processor->runq.count);
1185 }
1186
1187 processor->deadline = UINT64_MAX;
1188 pset_unlock(pset);
1189
b0d623f7
A
1190 return (new_thread);
1191 }
c910b4d9 1192
b0d623f7 1193 if (rt_runq.count > 0) {
c910b4d9
A
1194 thread = run_queue_dequeue(&rt_runq, SCHED_HEADQ);
1195 simple_unlock(&rt_lock);
1196
1197 processor->deadline = thread->realtime.deadline;
1198 pset_unlock(pset);
1199
1200 return (thread);
1201 }
2d21ac55
A
1202
1203 simple_unlock(&rt_lock);
55e303ae 1204
c910b4d9
A
1205 processor->deadline = UINT64_MAX;
1206
b0d623f7
A
1207 /*
1208 * Set processor inactive based on
1209 * indication from the platform code.
1210 */
c910b4d9
A
1211 if (inactive_state) {
1212 if (processor->state == PROCESSOR_RUNNING)
1213 remqueue(&pset->active_queue, (queue_entry_t)processor);
1214 else
1215 if (processor->state == PROCESSOR_IDLE)
1216 remqueue(&pset->idle_queue, (queue_entry_t)processor);
1217
1218 processor->state = PROCESSOR_INACTIVE;
1219
1220 pset_unlock(pset);
1221
1222 return (processor->idle_thread);
1223 }
1224
2d21ac55
A
1225 /*
1226 * No runnable threads, attempt to steal
1227 * from other processors.
1228 */
cf7d32b8
A
1229 new_thread = steal_thread(pset);
1230 if (new_thread != THREAD_NULL)
1231 return (new_thread);
2d21ac55 1232
cf7d32b8
A
1233 /*
1234 * If other threads have appeared, shortcut
1235 * around again.
1236 */
1237 if (processor->runq.count > 0 || rt_runq.count > 0)
1238 continue;
1239
1240 pset_lock(pset);
55e303ae 1241
1c79356b
A
1242 /*
1243 * Nothing is runnable, so set this processor idle if it
2d21ac55 1244 * was running.
1c79356b 1245 */
55e303ae
A
1246 if (processor->state == PROCESSOR_RUNNING) {
1247 remqueue(&pset->active_queue, (queue_entry_t)processor);
1248 processor->state = PROCESSOR_IDLE;
1c79356b 1249
b0d623f7
A
1250 if (processor->processor_meta == PROCESSOR_META_NULL || processor->processor_meta->primary == processor) {
1251 enqueue_head(&pset->idle_queue, (queue_entry_t)processor);
1252 pset->low_pri = pset->low_count = processor;
1253 }
1254 else {
1255 enqueue_head(&processor->processor_meta->idle_queue, (queue_entry_t)processor);
1256
1257 if (thread->sched_pri < BASEPRI_RTQUEUES) {
1258 pset_unlock(pset);
1259
1260 return (processor->idle_thread);
1261 }
1262 }
1c79356b 1263 }
1c79356b 1264
2d21ac55
A
1265 pset_unlock(pset);
1266
1267 /*
1268 * Choose idle thread if fast idle is not possible.
1269 */
1270 if ((thread->state & (TH_IDLE|TH_TERMINATE|TH_SUSP)) || !(thread->state & TH_WAIT) || thread->wake_active)
1271 return (processor->idle_thread);
1272
1273 /*
1274 * Perform idling activities directly without a
1275 * context switch. Return dispatched thread,
1276 * else check again for a runnable thread.
1277 */
1278 new_thread = thread_select_idle(thread, processor);
1279
1280 } while (new_thread == THREAD_NULL);
1281
1282 return (new_thread);
1283}
1284
1285/*
1286 * thread_select_idle:
1287 *
1288 * Idle the processor using the current thread context.
1289 *
1290 * Called with thread locked, then dropped and relocked.
1291 */
1292static thread_t
1293thread_select_idle(
1294 thread_t thread,
1295 processor_t processor)
1296{
1297 thread_t new_thread;
1298
1299 if (thread->sched_mode & TH_MODE_TIMESHARE)
1300 sched_share_decr();
1301 sched_run_decr();
1302
1303 thread->state |= TH_IDLE;
1304 processor->current_pri = IDLEPRI;
1305
1306 thread_unlock(thread);
1307
1308 /*
1309 * Switch execution timing to processor idle thread.
1310 */
1311 processor->last_dispatch = mach_absolute_time();
1312 thread_timer_event(processor->last_dispatch, &processor->idle_thread->system_timer);
1313 PROCESSOR_DATA(processor, kernel_timer) = &processor->idle_thread->system_timer;
1314
1315 /*
1316 * Cancel the quantum timer while idling.
1317 */
1318 timer_call_cancel(&processor->quantum_timer);
1319 processor->timeslice = 0;
1320
1321 (*thread->sched_call)(SCHED_CALL_BLOCK, thread);
1322
1323 /*
1324 * Enable interrupts and perform idling activities. No
1325 * preemption due to TH_IDLE being set.
1326 */
1327 spllo(); new_thread = processor_idle(thread, processor);
1328
cf7d32b8
A
1329 /*
1330 * Return at splsched.
1331 */
2d21ac55
A
1332 (*thread->sched_call)(SCHED_CALL_UNBLOCK, thread);
1333
1334 thread_lock(thread);
1335
1336 /*
1337 * If awakened, switch to thread timer and start a new quantum.
1338 * Otherwise skip; we will context switch to another thread or return here.
1339 */
1340 if (!(thread->state & TH_WAIT)) {
1341 processor->last_dispatch = mach_absolute_time();
1342 thread_timer_event(processor->last_dispatch, &thread->system_timer);
1343 PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer;
1344
1345 thread_quantum_init(thread);
1346
1347 processor->quantum_end = processor->last_dispatch + thread->current_quantum;
1348 timer_call_enter1(&processor->quantum_timer, thread, processor->quantum_end);
1349 processor->timeslice = 1;
1350
1351 thread->computation_epoch = processor->last_dispatch;
1c79356b
A
1352 }
1353
2d21ac55 1354 thread->state &= ~TH_IDLE;
55e303ae 1355
2d21ac55
A
1356 sched_run_incr();
1357 if (thread->sched_mode & TH_MODE_TIMESHARE)
1358 sched_share_incr();
1359
1360 return (new_thread);
1c79356b
A
1361}
1362
b0d623f7
A
1363/*
1364 * choose_thread:
1365 *
1366 * Locate a thread to execute from the processor run queue
1367 * and return it. Only choose a thread with greater or equal
1368 * priority.
1369 *
1370 * Associated pset must be locked. Returns THREAD_NULL
1371 * on failure.
1372 */
1373static thread_t
1374choose_thread(
1375 processor_t processor,
1376 int priority)
1377{
1378 run_queue_t rq = &processor->runq;
1379 queue_t queue = rq->queues + rq->highq;
1380 int pri = rq->highq, count = rq->count;
1381 thread_t thread;
1382
1383 while (count > 0 && pri >= priority) {
1384 thread = (thread_t)queue_first(queue);
1385 while (!queue_end(queue, (queue_entry_t)thread)) {
1386 if (thread->bound_processor == PROCESSOR_NULL ||
1387 thread->bound_processor == processor) {
1388 remqueue(queue, (queue_entry_t)thread);
1389
1390 thread->runq = PROCESSOR_NULL;
1391 rq->count--;
1392 if (testbit(pri, sched_preempt_pri)) {
1393 rq->urgency--; assert(rq->urgency >= 0);
1394 }
1395 if (queue_empty(queue)) {
1396 if (pri != IDLEPRI)
1397 clrbit(MAXPRI - pri, rq->bitmap);
1398 rq->highq = MAXPRI - ffsbit(rq->bitmap);
1399 }
1400
1401 return (thread);
1402 }
1403 count--;
1404
1405 thread = (thread_t)queue_next((queue_entry_t)thread);
1406 }
1407
1408 queue--; pri--;
1409 }
1410
1411 return (THREAD_NULL);
1412}
1413
1c79356b 1414/*
55e303ae
A
1415 * Perform a context switch and start executing the new thread.
1416 *
91447636 1417 * Returns FALSE on failure, and the thread is re-dispatched.
9bccf70c 1418 *
55e303ae 1419 * Called at splsched.
1c79356b
A
1420 */
1421
55e303ae
A
1422#define funnel_release_check(thread, debug) \
1423MACRO_BEGIN \
1424 if ((thread)->funnel_state & TH_FN_OWNED) { \
1425 (thread)->funnel_state = TH_FN_REFUNNEL; \
1426 KERNEL_DEBUG(0x603242c | DBG_FUNC_NONE, \
1427 (thread)->funnel_lock, (debug), 0, 0, 0); \
1428 funnel_unlock((thread)->funnel_lock); \
1429 } \
1430MACRO_END
1431
1432#define funnel_refunnel_check(thread, debug) \
1433MACRO_BEGIN \
1434 if ((thread)->funnel_state & TH_FN_REFUNNEL) { \
1435 kern_return_t result = (thread)->wait_result; \
1436 \
1437 (thread)->funnel_state = 0; \
1438 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE, \
1439 (thread)->funnel_lock, (debug), 0, 0, 0); \
1440 funnel_lock((thread)->funnel_lock); \
1441 KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE, \
1442 (thread)->funnel_lock, (debug), 0, 0, 0); \
1443 (thread)->funnel_state = TH_FN_OWNED; \
1444 (thread)->wait_result = result; \
1445 } \
1446MACRO_END
1447
2d21ac55 1448static boolean_t
1c79356b 1449thread_invoke(
2d21ac55
A
1450 register thread_t self,
1451 register thread_t thread,
91447636 1452 ast_t reason)
1c79356b 1453{
2d21ac55
A
1454 thread_continue_t continuation = self->continuation;
1455 void *parameter = self->parameter;
9bccf70c 1456 processor_t processor;
1c79356b 1457
b0d623f7
A
1458 if (get_preemption_level() != 0) {
1459 int pl = get_preemption_level();
1460 panic("thread_invoke: preemption_level %d, possible cause: %s",
1461 pl, (pl < 0 ? "unlocking an unlocked mutex or spinlock" :
1462 "blocking while holding a spinlock, or within interrupt context"));
1463 }
0b4e3aa0 1464
2d21ac55 1465 assert(self == current_thread());
91447636 1466
1c79356b 1467 /*
9bccf70c 1468 * Mark thread interruptible.
1c79356b 1469 */
2d21ac55
A
1470 thread_lock(thread);
1471 thread->state &= ~TH_UNINT;
1c79356b 1472
2d21ac55
A
1473#if DEBUG
1474 assert(thread_runnable(thread));
1475#endif
1c79356b 1476
9bccf70c
A
1477 /*
1478 * Allow time constraint threads to hang onto
1479 * a stack.
1480 */
2d21ac55
A
1481 if ((self->sched_mode & TH_MODE_REALTIME) && !self->reserved_stack)
1482 self->reserved_stack = self->kernel_stack;
1c79356b 1483
91447636 1484 if (continuation != NULL) {
2d21ac55 1485 if (!thread->kernel_stack) {
9bccf70c 1486 /*
2d21ac55 1487 * If we are using a privileged stack,
9bccf70c 1488 * check to see whether we can exchange it with
2d21ac55 1489 * that of the other thread.
9bccf70c 1490 */
2d21ac55 1491 if (self->kernel_stack == self->reserved_stack && !thread->reserved_stack)
9bccf70c 1492 goto need_stack;
1c79356b 1493
91447636
A
1494 /*
1495 * Context switch by performing a stack handoff.
1496 */
2d21ac55
A
1497 continuation = thread->continuation;
1498 parameter = thread->parameter;
1c79356b 1499
9bccf70c 1500 processor = current_processor();
2d21ac55
A
1501 processor->active_thread = thread;
1502 processor->current_pri = thread->sched_pri;
1503 if (thread->last_processor != processor && thread->last_processor != NULL) {
1504 if (thread->last_processor->processor_set != processor->processor_set)
1505 thread->ps_switch++;
1506 thread->p_switch++;
1507 }
1508 thread->last_processor = processor;
1509 thread->c_switch++;
1510 ast_context(thread);
1511 thread_unlock(thread);
1c79356b 1512
2d21ac55 1513 self->reason = reason;
91447636
A
1514
1515 processor->last_dispatch = mach_absolute_time();
2d21ac55
A
1516 thread_timer_event(processor->last_dispatch, &thread->system_timer);
1517 PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer;
1518
1519 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_HANDOFF)|DBG_FUNC_NONE,
b0d623f7 1520 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
1c79356b 1521
b0d623f7
A
1522 DTRACE_SCHED2(off__cpu, struct thread *, thread, struct proc *, thread->task->bsd_info);
1523
1524 TLOG(1, "thread_invoke: calling machine_stack_handoff\n");
2d21ac55 1525 machine_stack_handoff(self, thread);
9bccf70c 1526
b0d623f7
A
1527 DTRACE_SCHED(on__cpu);
1528
2d21ac55 1529 thread_dispatch(self, thread);
1c79356b 1530
2d21ac55 1531 thread->continuation = thread->parameter = NULL;
1c79356b 1532
2d21ac55 1533 counter(c_thread_invoke_hits++);
1c79356b 1534
2d21ac55 1535 funnel_refunnel_check(thread, 2);
9bccf70c 1536 (void) spllo();
1c79356b 1537
2d21ac55
A
1538 assert(continuation);
1539 call_continuation(continuation, parameter, thread->wait_result);
9bccf70c 1540 /*NOTREACHED*/
9bccf70c 1541 }
2d21ac55 1542 else if (thread == self) {
9bccf70c 1543 /* same thread but with continuation */
2d21ac55 1544 ast_context(self);
9bccf70c 1545 counter(++c_thread_invoke_same);
2d21ac55 1546 thread_unlock(self);
9bccf70c 1547
2d21ac55
A
1548 self->continuation = self->parameter = NULL;
1549
1550 funnel_refunnel_check(self, 3);
9bccf70c 1551 (void) spllo();
55e303ae 1552
2d21ac55 1553 call_continuation(continuation, parameter, self->wait_result);
9bccf70c
A
1554 /*NOTREACHED*/
1555 }
1c79356b 1556 }
9bccf70c
A
1557 else {
1558 /*
2d21ac55 1559 * Check that the other thread has a stack
9bccf70c 1560 */
2d21ac55 1561 if (!thread->kernel_stack) {
9bccf70c 1562need_stack:
2d21ac55
A
1563 if (!stack_alloc_try(thread)) {
1564 counter(c_thread_invoke_misses++);
1565 thread_unlock(thread);
1566 thread_stack_enqueue(thread);
9bccf70c
A
1567 return (FALSE);
1568 }
9bccf70c 1569 }
2d21ac55
A
1570 else if (thread == self) {
1571 ast_context(self);
9bccf70c 1572 counter(++c_thread_invoke_same);
2d21ac55 1573 thread_unlock(self);
9bccf70c
A
1574 return (TRUE);
1575 }
1576 }
1c79356b
A
1577
1578 /*
91447636 1579 * Context switch by full context save.
1c79356b 1580 */
9bccf70c 1581 processor = current_processor();
2d21ac55
A
1582 processor->active_thread = thread;
1583 processor->current_pri = thread->sched_pri;
1584 if (thread->last_processor != processor && thread->last_processor != NULL) {
1585 if (thread->last_processor->processor_set != processor->processor_set)
1586 thread->ps_switch++;
1587 thread->p_switch++;
1588 }
1589 thread->last_processor = processor;
1590 thread->c_switch++;
1591 ast_context(thread);
1592 thread_unlock(thread);
1c79356b 1593
2d21ac55 1594 counter(c_thread_invoke_csw++);
1c79356b 1595
2d21ac55
A
1596 assert(self->runq == PROCESSOR_NULL);
1597 self->reason = reason;
1c79356b 1598
91447636 1599 processor->last_dispatch = mach_absolute_time();
2d21ac55
A
1600 thread_timer_event(processor->last_dispatch, &thread->system_timer);
1601 PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer;
91447636 1602
2d21ac55 1603 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE,
b0d623f7
A
1604 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
1605
1606 DTRACE_SCHED2(off__cpu, struct thread *, thread, struct proc *, thread->task->bsd_info);
1c79356b
A
1607
1608 /*
91447636 1609 * This is where we actually switch register context,
2d21ac55
A
1610 * and address space if required. We will next run
1611 * as a result of a subsequent context switch.
91447636 1612 */
2d21ac55 1613 thread = machine_switch_context(self, continuation, thread);
b0d623f7
A
1614 TLOG(1,"thread_invoke: returning machine_switch_context: self %p continuation %p thread %p\n", self, continuation, thread);
1615
1616 DTRACE_SCHED(on__cpu);
1c79356b
A
1617
1618 /*
2d21ac55 1619 * We have been resumed and are set to run.
1c79356b 1620 */
2d21ac55 1621 thread_dispatch(thread, self);
9bccf70c 1622
91447636 1623 if (continuation) {
2d21ac55
A
1624 self->continuation = self->parameter = NULL;
1625
1626 funnel_refunnel_check(self, 3);
9bccf70c 1627 (void) spllo();
55e303ae 1628
2d21ac55 1629 call_continuation(continuation, parameter, self->wait_result);
9bccf70c 1630 /*NOTREACHED*/
1c79356b
A
1631 }
1632
9bccf70c 1633 return (TRUE);
1c79356b
A
1634}
1635
1636/*
2d21ac55 1637 * thread_dispatch:
1c79356b 1638 *
2d21ac55
A
1639 * Handle threads at context switch. Re-dispatch other thread
1640 * if still running, otherwise update run state and perform
1641 * special actions. Update quantum for other thread and begin
1642 * the quantum for ourselves.
91447636
A
1643 *
1644 * Called at splsched.
1c79356b
A
1645 */
1646void
2d21ac55
A
1647thread_dispatch(
1648 thread_t thread,
1649 thread_t self)
1c79356b 1650{
2d21ac55
A
1651 processor_t processor = self->last_processor;
1652
1653 if (thread != THREAD_NULL) {
91447636 1654 /*
2d21ac55
A
1655 * If blocked at a continuation, discard
1656 * the stack.
91447636 1657 */
2d21ac55
A
1658 if (thread->continuation != NULL && thread->kernel_stack != 0)
1659 stack_free(thread);
1660
1661 if (!(thread->state & TH_IDLE)) {
1662 wake_lock(thread);
1663 thread_lock(thread);
9bccf70c 1664
91447636 1665 /*
2d21ac55 1666 * Compute remainder of current quantum.
91447636 1667 */
2d21ac55
A
1668 if ( first_timeslice(processor) &&
1669 processor->quantum_end > processor->last_dispatch )
b0d623f7 1670 thread->current_quantum = (uint32_t)(processor->quantum_end - processor->last_dispatch);
2d21ac55
A
1671 else
1672 thread->current_quantum = 0;
1673
1674 if (thread->sched_mode & TH_MODE_REALTIME) {
1675 /*
1676 * Cancel the deadline if the thread has
1677 * consumed the entire quantum.
1678 */
1679 if (thread->current_quantum == 0) {
1680 thread->realtime.deadline = UINT64_MAX;
1681 thread->reason |= AST_QUANTUM;
1682 }
91447636 1683 }
2d21ac55
A
1684 else {
1685 /*
1686 * For non-realtime threads treat a tiny
1687 * remaining quantum as an expired quantum
1688 * but include what's left next time.
1689 */
1690 if (thread->current_quantum < min_std_quantum) {
1691 thread->reason |= AST_QUANTUM;
1692 thread->current_quantum += std_quantum;
1693 }
1694 }
1695
91447636 1696 /*
2d21ac55
A
1697 * If we are doing a direct handoff then
1698 * take the remainder of the quantum.
91447636 1699 */
2d21ac55
A
1700 if ((thread->reason & (AST_HANDOFF|AST_QUANTUM)) == AST_HANDOFF) {
1701 self->current_quantum = thread->current_quantum;
1702 thread->reason |= AST_QUANTUM;
1703 thread->current_quantum = 0;
91447636 1704 }
91447636 1705
b0d623f7 1706 thread->computation_metered += (processor->last_dispatch - thread->computation_epoch);
2d21ac55
A
1707
1708 if (!(thread->state & TH_WAIT)) {
1709 /*
1710 * Still running.
1711 */
1712 if (thread->reason & AST_QUANTUM)
1713 thread_setrun(thread, SCHED_TAILQ);
1714 else
1715 if (thread->reason & AST_PREEMPT)
1716 thread_setrun(thread, SCHED_HEADQ);
1717 else
1718 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
1719
1720 thread->reason = AST_NONE;
1721
1722 thread_unlock(thread);
1723 wake_unlock(thread);
1724 }
1725 else {
1726 /*
1727 * Waiting.
1728 */
1729 thread->state &= ~TH_RUN;
1730
1731 if (thread->sched_mode & TH_MODE_TIMESHARE)
1732 sched_share_decr();
1733 sched_run_decr();
1734
1735 if (thread->wake_active) {
1736 thread->wake_active = FALSE;
1737 thread_unlock(thread);
1738
1739 thread_wakeup(&thread->wake_active);
1740 }
1741 else
1742 thread_unlock(thread);
91447636 1743
2d21ac55 1744 wake_unlock(thread);
91447636 1745
2d21ac55
A
1746 (*thread->sched_call)(SCHED_CALL_BLOCK, thread);
1747
1748 if (thread->state & TH_TERMINATE)
1749 thread_terminate_enqueue(thread);
1750 }
1751 }
91447636 1752 }
91447636 1753
2d21ac55 1754 if (!(self->state & TH_IDLE)) {
91447636 1755 /*
2d21ac55 1756 * Get a new quantum if none remaining.
91447636 1757 */
2d21ac55
A
1758 if (self->current_quantum == 0)
1759 thread_quantum_init(self);
91447636
A
1760
1761 /*
2d21ac55 1762 * Set up quantum timer and timeslice.
91447636 1763 */
2d21ac55
A
1764 processor->quantum_end = (processor->last_dispatch + self->current_quantum);
1765 timer_call_enter1(&processor->quantum_timer, self, processor->quantum_end);
91447636 1766
2d21ac55 1767 processor->timeslice = 1;
91447636 1768
b0d623f7 1769 self->computation_epoch = processor->last_dispatch;
91447636
A
1770 }
1771 else {
1772 timer_call_cancel(&processor->quantum_timer);
2d21ac55 1773 processor->timeslice = 0;
91447636
A
1774 }
1775}
1776
b0d623f7
A
1777#include <libkern/OSDebug.h>
1778
1779uint32_t kdebug_thread_block = 0;
1780
1781
91447636 1782/*
2d21ac55 1783 * thread_block_reason:
91447636 1784 *
2d21ac55
A
1785 * Forces a reschedule, blocking the caller if a wait
1786 * has been asserted.
91447636 1787 *
2d21ac55
A
1788 * If a continuation is specified, then thread_invoke will
1789 * attempt to discard the thread's kernel stack. When the
1790 * thread resumes, it will execute the continuation function
1791 * on a new kernel stack.
91447636 1792 */
2d21ac55
A
1793counter(mach_counter_t c_thread_block_calls = 0;)
1794
1795wait_result_t
1796thread_block_reason(
1797 thread_continue_t continuation,
1798 void *parameter,
1799 ast_t reason)
91447636 1800{
2d21ac55
A
1801 register thread_t self = current_thread();
1802 register processor_t processor;
1803 register thread_t new_thread;
1804 spl_t s;
1c79356b
A
1805
1806 counter(++c_thread_block_calls);
1807
1c79356b
A
1808 s = splsched();
1809
55e303ae 1810 if (!(reason & AST_PREEMPT))
91447636 1811 funnel_release_check(self, 2);
1c79356b 1812
55e303ae 1813 processor = current_processor();
1c79356b 1814
9bccf70c
A
1815 /* If we're explicitly yielding, force a subsequent quantum */
1816 if (reason & AST_YIELD)
55e303ae 1817 processor->timeslice = 0;
0b4e3aa0 1818
9bccf70c
A
1819 /* We're handling all scheduling AST's */
1820 ast_off(AST_SCHEDULING);
1c79356b 1821
91447636
A
1822 self->continuation = continuation;
1823 self->parameter = parameter;
1824
b0d623f7
A
1825 if (kdebug_thread_block && kdebug_enable && self->state != TH_RUN) {
1826 uint32_t bt[8];
1827
1828 OSBacktrace((void **)&bt[0], 8);
1829
1830 KERNEL_DEBUG_CONSTANT(0x140004c | DBG_FUNC_START, bt[0], bt[1], bt[2], bt[3], 0);
1831 KERNEL_DEBUG_CONSTANT(0x140004c | DBG_FUNC_END, bt[4], bt[5], bt[6], bt[7], 0);
1832 }
1833
2d21ac55 1834 do {
91447636 1835 thread_lock(self);
2d21ac55 1836 new_thread = thread_select(self, processor);
91447636 1837 thread_unlock(self);
2d21ac55 1838 } while (!thread_invoke(self, new_thread, reason));
1c79356b 1839
91447636 1840 funnel_refunnel_check(self, 5);
1c79356b
A
1841 splx(s);
1842
91447636 1843 return (self->wait_result);
1c79356b
A
1844}
1845
1846/*
1847 * thread_block:
1848 *
9bccf70c 1849 * Block the current thread if a wait has been asserted.
1c79356b 1850 */
91447636 1851wait_result_t
1c79356b 1852thread_block(
9bccf70c 1853 thread_continue_t continuation)
1c79356b 1854{
91447636
A
1855 return thread_block_reason(continuation, NULL, AST_NONE);
1856}
1857
1858wait_result_t
1859thread_block_parameter(
1860 thread_continue_t continuation,
1861 void *parameter)
1862{
1863 return thread_block_reason(continuation, parameter, AST_NONE);
1c79356b
A
1864}
1865
1866/*
1867 * thread_run:
1868 *
91447636 1869 * Switch directly from the current thread to the
55e303ae 1870 * new thread, handing off our quantum if appropriate.
9bccf70c
A
1871 *
1872 * New thread must be runnable, and not on a run queue.
1c79356b 1873 *
55e303ae 1874 * Called at splsched.
1c79356b
A
1875 */
1876int
1877thread_run(
91447636 1878 thread_t self,
9bccf70c 1879 thread_continue_t continuation,
91447636 1880 void *parameter,
9bccf70c 1881 thread_t new_thread)
1c79356b 1882{
9bccf70c
A
1883 ast_t handoff = AST_HANDOFF;
1884
91447636 1885 funnel_release_check(self, 3);
9bccf70c 1886
91447636
A
1887 self->continuation = continuation;
1888 self->parameter = parameter;
9bccf70c 1889
91447636 1890 while (!thread_invoke(self, new_thread, handoff)) {
2d21ac55 1891 processor_t processor = current_processor();
9bccf70c 1892
91447636 1893 thread_lock(self);
2d21ac55 1894 new_thread = thread_select(self, processor);
91447636 1895 thread_unlock(self);
9bccf70c
A
1896 handoff = AST_NONE;
1897 }
1898
91447636 1899 funnel_refunnel_check(self, 6);
9bccf70c 1900
91447636 1901 return (self->wait_result);
1c79356b
A
1902}
1903
1904/*
91447636 1905 * thread_continue:
55e303ae 1906 *
91447636
A
1907 * Called at splsched when a thread first receives
1908 * a new stack after a continuation.
1c79356b
A
1909 */
1910void
91447636 1911thread_continue(
2d21ac55 1912 register thread_t thread)
1c79356b 1913{
91447636
A
1914 register thread_t self = current_thread();
1915 register thread_continue_t continuation;
1916 register void *parameter;
b0d623f7
A
1917
1918 DTRACE_SCHED(on__cpu);
1919
91447636 1920 continuation = self->continuation;
91447636 1921 parameter = self->parameter;
9bccf70c 1922
2d21ac55 1923 thread_dispatch(thread, self);
9bccf70c 1924
2d21ac55 1925 self->continuation = self->parameter = NULL;
1c79356b 1926
91447636 1927 funnel_refunnel_check(self, 4);
1c79356b 1928
2d21ac55 1929 if (thread != THREAD_NULL)
91447636 1930 (void)spllo();
9bccf70c 1931
2d21ac55 1932 TLOG(1, "thread_continue: calling call_continuation \n");
91447636
A
1933 call_continuation(continuation, parameter, self->wait_result);
1934 /*NOTREACHED*/
1c79356b
A
1935}
1936
1937/*
2d21ac55 1938 * run_queue_init:
55e303ae 1939 *
2d21ac55 1940 * Initialize a run queue before first use.
1c79356b 1941 */
2d21ac55
A
1942void
1943run_queue_init(
1944 run_queue_t rq)
1945{
1946 int i;
1947
1948 rq->highq = IDLEPRI;
1949 for (i = 0; i < NRQBM; i++)
1950 rq->bitmap[i] = 0;
1951 setbit(MAXPRI - IDLEPRI, rq->bitmap);
1952 rq->urgency = rq->count = 0;
1953 for (i = 0; i < NRQS; i++)
1954 queue_init(&rq->queues[i]);
1955}
1c79356b 1956
2d21ac55
A
1957/*
1958 * run_queue_dequeue:
1959 *
1960 * Perform a dequeue operation on a run queue,
1961 * and return the resulting thread.
1962 *
1963 * The run queue must be locked (see run_queue_remove()
1964 * for more info), and not empty.
1965 */
1966static thread_t
1967run_queue_dequeue(
1968 run_queue_t rq,
1969 integer_t options)
1970{
1971 thread_t thread;
1972 queue_t queue = rq->queues + rq->highq;
9bccf70c 1973
2d21ac55
A
1974 if (options & SCHED_HEADQ) {
1975 thread = (thread_t)queue->next;
1976 ((queue_entry_t)thread)->next->prev = queue;
1977 queue->next = ((queue_entry_t)thread)->next;
1978 }
1979 else {
1980 thread = (thread_t)queue->prev;
1981 ((queue_entry_t)thread)->prev->next = queue;
1982 queue->prev = ((queue_entry_t)thread)->prev;
9bccf70c 1983 }
1c79356b 1984
2d21ac55
A
1985 thread->runq = PROCESSOR_NULL;
1986 rq->count--;
4a3eedf9
A
1987 if (testbit(rq->highq, sched_preempt_pri)) {
1988 rq->urgency--; assert(rq->urgency >= 0);
1989 }
2d21ac55
A
1990 if (queue_empty(queue)) {
1991 if (rq->highq != IDLEPRI)
1992 clrbit(MAXPRI - rq->highq, rq->bitmap);
1993 rq->highq = MAXPRI - ffsbit(rq->bitmap);
1994 }
1c79356b 1995
2d21ac55 1996 return (thread);
1c79356b
A
1997}
1998
1999/*
2d21ac55
A
2000 * realtime_queue_insert:
2001 *
2002 * Enqueue a thread for realtime execution.
1c79356b 2003 */
2d21ac55
A
2004static boolean_t
2005realtime_queue_insert(
2006 thread_t thread)
1c79356b 2007{
2d21ac55
A
2008 run_queue_t rq = &rt_runq;
2009 queue_t queue = rq->queues + thread->sched_pri;
2010 uint64_t deadline = thread->realtime.deadline;
2011 boolean_t preempt = FALSE;
1c79356b 2012
2d21ac55 2013 simple_lock(&rt_lock);
1c79356b 2014
55e303ae
A
2015 if (queue_empty(queue)) {
2016 enqueue_tail(queue, (queue_entry_t)thread);
2017
2d21ac55
A
2018 setbit(MAXPRI - thread->sched_pri, rq->bitmap);
2019 if (thread->sched_pri > rq->highq)
2020 rq->highq = thread->sched_pri;
2021 preempt = TRUE;
55e303ae
A
2022 }
2023 else {
2024 register thread_t entry = (thread_t)queue_first(queue);
2025
2026 while (TRUE) {
2027 if ( queue_end(queue, (queue_entry_t)entry) ||
2028 deadline < entry->realtime.deadline ) {
2029 entry = (thread_t)queue_prev((queue_entry_t)entry);
2030 break;
2031 }
2032
2033 entry = (thread_t)queue_next((queue_entry_t)entry);
2034 }
2035
2036 if ((queue_entry_t)entry == queue)
2d21ac55 2037 preempt = TRUE;
55e303ae
A
2038
2039 insque((queue_entry_t)thread, (queue_entry_t)entry);
2040 }
2041
2d21ac55 2042 thread->runq = RT_RUNQ;
55e303ae
A
2043 rq->count++; rq->urgency++;
2044
2d21ac55 2045 simple_unlock(&rt_lock);
55e303ae 2046
2d21ac55
A
2047 return (preempt);
2048}
55e303ae 2049
2d21ac55
A
2050/*
2051 * realtime_setrun:
2052 *
2053 * Dispatch a thread for realtime execution.
2054 *
2055 * Thread must be locked. Associated pset must
2056 * be locked, and is returned unlocked.
2057 */
2058static void
2059realtime_setrun(
2060 processor_t processor,
2061 thread_t thread)
2062{
2063 processor_set_t pset = processor->processor_set;
55e303ae 2064
2d21ac55
A
2065 /*
2066 * Dispatch directly onto idle processor.
2067 */
2068 if (processor->state == PROCESSOR_IDLE) {
2069 remqueue(&pset->idle_queue, (queue_entry_t)processor);
cf7d32b8 2070 enqueue_tail(&pset->active_queue, (queue_entry_t)processor);
55e303ae 2071
2d21ac55
A
2072 processor->next_thread = thread;
2073 processor->deadline = thread->realtime.deadline;
2074 processor->state = PROCESSOR_DISPATCHING;
2075 pset_unlock(pset);
55e303ae 2076
2d21ac55
A
2077 if (processor != current_processor())
2078 machine_signal_idle(processor);
2079 return;
2080 }
55e303ae 2081
2d21ac55
A
2082 if (realtime_queue_insert(thread)) {
2083 if (processor == current_processor())
2084 ast_on(AST_PREEMPT | AST_URGENT);
2085 else
2086 cause_ast_check(processor);
2087 }
2088
2089 pset_unlock(pset);
2090}
2091
2092/*
2093 * processor_enqueue:
2094 *
2095 * Enqueue thread on a processor run queue. Thread must be locked,
2096 * and not already be on a run queue.
2097 *
2098 * Returns TRUE if a preemption is indicated based on the state
2099 * of the run queue.
2100 *
2101 * The run queue must be locked (see run_queue_remove()
2102 * for more info).
2103 */
2104static boolean_t
2105processor_enqueue(
2106 processor_t processor,
2107 thread_t thread,
2108 integer_t options)
2109{
2110 run_queue_t rq = &processor->runq;
2111 queue_t queue = rq->queues + thread->sched_pri;
2112 boolean_t result = FALSE;
2113
2114 if (queue_empty(queue)) {
2115 enqueue_tail(queue, (queue_entry_t)thread);
2116
2117 setbit(MAXPRI - thread->sched_pri, rq->bitmap);
2118 if (thread->sched_pri > rq->highq) {
2119 rq->highq = thread->sched_pri;
2120 result = TRUE;
55e303ae
A
2121 }
2122 }
2d21ac55
A
2123 else
2124 if (options & SCHED_TAILQ)
2125 enqueue_tail(queue, (queue_entry_t)thread);
2126 else
2127 enqueue_head(queue, (queue_entry_t)thread);
55e303ae 2128
2d21ac55 2129 thread->runq = processor;
4a3eedf9 2130 if (testbit(thread->sched_pri, sched_preempt_pri))
2d21ac55
A
2131 rq->urgency++;
2132 rq->count++;
2133
2134 return (result);
55e303ae
A
2135}
2136
2137/*
2d21ac55 2138 * processor_setrun:
55e303ae 2139 *
2d21ac55
A
2140 * Dispatch a thread for execution on a
2141 * processor.
55e303ae 2142 *
2d21ac55
A
2143 * Thread must be locked. Associated pset must
2144 * be locked, and is returned unlocked.
55e303ae 2145 */
2d21ac55
A
2146static void
2147processor_setrun(
2148 processor_t processor,
2149 thread_t thread,
2150 integer_t options)
55e303ae 2151{
2d21ac55
A
2152 processor_set_t pset = processor->processor_set;
2153 ast_t preempt;
55e303ae 2154
55e303ae 2155 /*
2d21ac55 2156 * Dispatch directly onto idle processor.
55e303ae 2157 */
2d21ac55
A
2158 if (processor->state == PROCESSOR_IDLE) {
2159 remqueue(&pset->idle_queue, (queue_entry_t)processor);
cf7d32b8 2160 enqueue_tail(&pset->active_queue, (queue_entry_t)processor);
2d21ac55
A
2161
2162 processor->next_thread = thread;
2163 processor->deadline = UINT64_MAX;
2164 processor->state = PROCESSOR_DISPATCHING;
2165 pset_unlock(pset);
2166
2167 if (processor != current_processor())
2168 machine_signal_idle(processor);
2169 return;
2170 }
55e303ae
A
2171
2172 /*
2d21ac55 2173 * Set preemption mode.
1c79356b 2174 */
4a3eedf9 2175 if (testbit(thread->sched_pri, sched_preempt_pri))
55e303ae 2176 preempt = (AST_PREEMPT | AST_URGENT);
2d21ac55 2177 else
c910b4d9 2178 if (thread->sched_mode & TH_MODE_TIMESHARE && thread->sched_pri < thread->priority)
2d21ac55
A
2179 preempt = AST_NONE;
2180 else
2181 preempt = (options & SCHED_PREEMPT)? AST_PREEMPT: AST_NONE;
9bccf70c 2182
2d21ac55
A
2183 if (!processor_enqueue(processor, thread, options))
2184 preempt = AST_NONE;
9bccf70c 2185
2d21ac55
A
2186 if (preempt != AST_NONE) {
2187 if (processor == current_processor()) {
c910b4d9 2188 if (csw_check(processor) != AST_NONE)
2d21ac55 2189 ast_on(preempt);
9bccf70c
A
2190 }
2191 else
2d21ac55
A
2192 if ( (processor->state == PROCESSOR_RUNNING ||
2193 processor->state == PROCESSOR_SHUTDOWN) &&
2194 thread->sched_pri >= processor->current_pri ) {
2195 cause_ast_check(processor);
2196 }
2197 }
2198 else
2199 if ( processor->state == PROCESSOR_SHUTDOWN &&
2200 thread->sched_pri >= processor->current_pri ) {
2201 cause_ast_check(processor);
2202 }
2203
2204 pset_unlock(pset);
2205}
9bccf70c 2206
2d21ac55
A
2207#define next_pset(p) (((p)->pset_list != PROCESSOR_SET_NULL)? (p)->pset_list: (p)->node->psets)
2208
2209/*
2210 * choose_next_pset:
2211 *
2212 * Return the next sibling pset containing
2213 * available processors.
2214 *
2215 * Returns the original pset if none other is
2216 * suitable.
2217 */
2218static processor_set_t
2219choose_next_pset(
2220 processor_set_t pset)
2221{
2222 processor_set_t nset = pset;
2223
2224 do {
2225 nset = next_pset(nset);
2226 } while (nset->processor_count < 1 && nset != pset);
2227
cf7d32b8 2228 return (nset);
2d21ac55
A
2229}
2230
2231/*
2232 * choose_processor:
2233 *
2234 * Choose a processor for the thread, beginning at
2235 * the pset.
2236 *
2237 * Returns a processor, possibly from a different pset.
2238 *
2239 * The thread must be locked. The pset must be locked,
2240 * and the resulting pset is locked on return.
2241 */
2242static processor_t
2243choose_processor(
2244 processor_set_t pset,
2245 thread_t thread)
2246{
2247 processor_set_t nset, cset = pset;
cf7d32b8 2248 processor_t processor = thread->last_processor;
b0d623f7 2249 processor_meta_t pmeta = PROCESSOR_META_NULL;
cf7d32b8
A
2250
2251 /*
2252 * Prefer the last processor, when appropriate.
2253 */
2254 if (processor != PROCESSOR_NULL) {
7e4a7d39 2255 if (processor->processor_meta != PROCESSOR_META_NULL)
b0d623f7
A
2256 processor = processor->processor_meta->primary;
2257
c910b4d9 2258 if (processor->processor_set != pset || processor->state == PROCESSOR_INACTIVE ||
cf7d32b8
A
2259 processor->state == PROCESSOR_SHUTDOWN || processor->state == PROCESSOR_OFF_LINE)
2260 processor = PROCESSOR_NULL;
2261 else
7e4a7d39 2262 if (processor->state == PROCESSOR_IDLE)
cf7d32b8
A
2263 return (processor);
2264 }
2d21ac55
A
2265
2266 /*
2267 * Iterate through the processor sets to locate
2268 * an appropriate processor.
2269 */
2270 do {
9bccf70c 2271 /*
2d21ac55 2272 * Choose an idle processor.
9bccf70c 2273 */
2d21ac55
A
2274 if (!queue_empty(&cset->idle_queue))
2275 return ((processor_t)queue_first(&cset->idle_queue));
1c79356b 2276
2d21ac55
A
2277 if (thread->sched_pri >= BASEPRI_RTQUEUES) {
2278 /*
2279 * For an RT thread, iterate through active processors, first fit.
2280 */
2281 processor = (processor_t)queue_first(&cset->active_queue);
2282 while (!queue_end(&cset->active_queue, (queue_entry_t)processor)) {
2283 if (thread->sched_pri > processor->current_pri ||
2284 thread->realtime.deadline < processor->deadline)
2285 return (processor);
2286
b0d623f7
A
2287 if (pmeta == PROCESSOR_META_NULL) {
2288 if (processor->processor_meta != PROCESSOR_META_NULL &&
2289 !queue_empty(&processor->processor_meta->idle_queue))
2290 pmeta = processor->processor_meta;
2291 }
2292
2d21ac55
A
2293 processor = (processor_t)queue_next((queue_entry_t)processor);
2294 }
cf7d32b8 2295
b0d623f7
A
2296 if (pmeta != PROCESSOR_META_NULL)
2297 return ((processor_t)queue_first(&pmeta->idle_queue));
2298
cf7d32b8 2299 processor = PROCESSOR_NULL;
2d21ac55 2300 }
55e303ae 2301 else {
2d21ac55 2302 /*
c910b4d9 2303 * Check any hinted processors in the processor set if available.
2d21ac55 2304 */
c910b4d9
A
2305 if (cset->low_pri != PROCESSOR_NULL && cset->low_pri->state != PROCESSOR_INACTIVE &&
2306 cset->low_pri->state != PROCESSOR_SHUTDOWN && cset->low_pri->state != PROCESSOR_OFF_LINE &&
2307 (processor == PROCESSOR_NULL ||
2308 (thread->sched_pri > BASEPRI_DEFAULT && cset->low_pri->current_pri < thread->sched_pri))) {
2309 processor = cset->low_pri;
2310 }
2311 else
2312 if (cset->low_count != PROCESSOR_NULL && cset->low_count->state != PROCESSOR_INACTIVE &&
2313 cset->low_count->state != PROCESSOR_SHUTDOWN && cset->low_count->state != PROCESSOR_OFF_LINE &&
b0d623f7
A
2314 (processor == PROCESSOR_NULL || (thread->sched_pri <= BASEPRI_DEFAULT &&
2315 cset->low_count->runq.count < processor->runq.count))) {
c910b4d9 2316 processor = cset->low_count;
cf7d32b8 2317 }
9bccf70c 2318
9bccf70c 2319 /*
cf7d32b8 2320 * Otherwise, choose an available processor in the set.
1c79356b 2321 */
cf7d32b8
A
2322 if (processor == PROCESSOR_NULL) {
2323 processor = (processor_t)dequeue_head(&cset->active_queue);
2324 if (processor != PROCESSOR_NULL)
2325 enqueue_tail(&cset->active_queue, (queue_entry_t)processor);
2d21ac55 2326 }
b0d623f7
A
2327
2328 if (processor != PROCESSOR_NULL && pmeta == PROCESSOR_META_NULL) {
2329 if (processor->processor_meta != PROCESSOR_META_NULL &&
2330 !queue_empty(&processor->processor_meta->idle_queue))
2331 pmeta = processor->processor_meta;
2332 }
2d21ac55
A
2333 }
2334
2335 /*
2336 * Move onto the next processor set.
2337 */
2338 nset = next_pset(cset);
2339
2340 if (nset != pset) {
2341 pset_unlock(cset);
2342
2343 cset = nset;
2344 pset_lock(cset);
2345 }
2346 } while (nset != pset);
2347
2348 /*
cf7d32b8
A
2349 * Make sure that we pick a running processor,
2350 * and that the correct processor set is locked.
2d21ac55 2351 */
cf7d32b8 2352 do {
b0d623f7
A
2353 if (pmeta != PROCESSOR_META_NULL) {
2354 if (cset != pmeta->primary->processor_set) {
2355 pset_unlock(cset);
2356
2357 cset = pmeta->primary->processor_set;
2358 pset_lock(cset);
2359 }
2360
2361 if (!queue_empty(&pmeta->idle_queue))
2362 return ((processor_t)queue_first(&pmeta->idle_queue));
2363
2364 pmeta = PROCESSOR_META_NULL;
2365 }
2366
cf7d32b8
A
2367 /*
2368 * If we haven't been able to choose a processor,
c910b4d9 2369 * pick the boot processor and return it.
cf7d32b8
A
2370 */
2371 if (processor == PROCESSOR_NULL) {
c910b4d9 2372 processor = master_processor;
2d21ac55 2373
cf7d32b8
A
2374 /*
2375 * Check that the correct processor set is
2376 * returned locked.
2377 */
2378 if (cset != processor->processor_set) {
2379 pset_unlock(cset);
2380
2381 cset = processor->processor_set;
2382 pset_lock(cset);
2383 }
2384
2385 return (processor);
2386 }
2387
2388 /*
2389 * Check that the processor set for the chosen
2390 * processor is locked.
2391 */
2392 if (cset != processor->processor_set) {
2393 pset_unlock(cset);
2394
2395 cset = processor->processor_set;
2396 pset_lock(cset);
2397 }
2398
2399 /*
2400 * We must verify that the chosen processor is still available.
2401 */
c910b4d9
A
2402 if (processor->state == PROCESSOR_INACTIVE ||
2403 processor->state == PROCESSOR_SHUTDOWN || processor->state == PROCESSOR_OFF_LINE)
cf7d32b8
A
2404 processor = PROCESSOR_NULL;
2405 } while (processor == PROCESSOR_NULL);
2d21ac55
A
2406
2407 return (processor);
2408}
2409
2410/*
2411 * thread_setrun:
2412 *
2413 * Dispatch thread for execution, onto an idle
2414 * processor or run queue, and signal a preemption
2415 * as appropriate.
2416 *
2417 * Thread must be locked.
2418 */
2419void
2420thread_setrun(
2421 thread_t thread,
2422 integer_t options)
2423{
2424 processor_t processor;
2425 processor_set_t pset;
2426
2427#if DEBUG
2428 assert(thread_runnable(thread));
2429#endif
55e303ae 2430
2d21ac55
A
2431 /*
2432 * Update priority if needed.
2433 */
2434 if (thread->sched_stamp != sched_tick)
2435 update_priority(thread);
2436
2437 assert(thread->runq == PROCESSOR_NULL);
2438
2439 if (thread->bound_processor == PROCESSOR_NULL) {
2440 /*
2441 * Unbound case.
2442 */
2443 if (thread->affinity_set != AFFINITY_SET_NULL) {
2444 /*
2445 * Use affinity set policy hint.
2446 */
2447 pset = thread->affinity_set->aset_pset;
2448 pset_lock(pset);
2449
2450 processor = choose_processor(pset, thread);
2451 }
2452 else
2453 if (thread->last_processor != PROCESSOR_NULL) {
2454 /*
2455 * Simple (last processor) affinity case.
2456 */
2457 processor = thread->last_processor;
2458 pset = processor->processor_set;
2459 pset_lock(pset);
2460
9bccf70c 2461 /*
2d21ac55 2462 * Choose a different processor in certain cases.
9bccf70c 2463 */
2d21ac55 2464 if (thread->sched_pri >= BASEPRI_RTQUEUES) {
9bccf70c 2465 /*
2d21ac55
A
2466 * If the processor is executing an RT thread with
2467 * an earlier deadline, choose another.
9bccf70c 2468 */
2d21ac55
A
2469 if (thread->sched_pri <= processor->current_pri ||
2470 thread->realtime.deadline >= processor->deadline)
2471 processor = choose_processor(pset, thread);
2472 }
2473 else
2d21ac55 2474 processor = choose_processor(pset, thread);
2d21ac55
A
2475 }
2476 else {
2477 /*
2478 * No Affinity case:
2479 *
cf7d32b8
A
2480 * Utilitize a per task hint to spread threads
2481 * among the available processor sets.
2d21ac55 2482 */
cf7d32b8
A
2483 task_t task = thread->task;
2484
2485 pset = task->pset_hint;
2486 if (pset == PROCESSOR_SET_NULL)
2487 pset = current_processor()->processor_set;
2488
2489 pset = choose_next_pset(pset);
2d21ac55 2490 pset_lock(pset);
9bccf70c 2491
2d21ac55 2492 processor = choose_processor(pset, thread);
cf7d32b8 2493 task->pset_hint = processor->processor_set;
55e303ae 2494 }
1c79356b
A
2495 }
2496 else {
2d21ac55
A
2497 /*
2498 * Bound case:
2499 *
2500 * Unconditionally dispatch on the processor.
2501 */
2502 processor = thread->bound_processor;
55e303ae 2503 pset = processor->processor_set;
2d21ac55
A
2504 pset_lock(pset);
2505 }
2506
2507 /*
2508 * Dispatch the thread on the choosen processor.
2509 */
2510 if (thread->sched_pri >= BASEPRI_RTQUEUES)
2511 realtime_setrun(processor, thread);
2512 else
2513 processor_setrun(processor, thread, options);
2514}
2515
b0d623f7
A
2516processor_set_t
2517task_choose_pset(
2518 task_t task)
2519{
2520 processor_set_t pset = task->pset_hint;
2521
2522 if (pset != PROCESSOR_SET_NULL)
2523 pset = choose_next_pset(pset);
2524
2525 return (pset);
2526}
2527
2d21ac55
A
2528/*
2529 * processor_queue_shutdown:
2530 *
c910b4d9
A
2531 * Shutdown a processor run queue by
2532 * re-dispatching non-bound threads.
2d21ac55
A
2533 *
2534 * Associated pset must be locked, and is
2535 * returned unlocked.
2536 */
2537void
2538processor_queue_shutdown(
2539 processor_t processor)
2540{
2541 processor_set_t pset = processor->processor_set;
2542 run_queue_t rq = &processor->runq;
2543 queue_t queue = rq->queues + rq->highq;
2544 int pri = rq->highq, count = rq->count;
2545 thread_t next, thread;
2546 queue_head_t tqueue;
2547
2548 queue_init(&tqueue);
2549
2550 while (count > 0) {
2551 thread = (thread_t)queue_first(queue);
2552 while (!queue_end(queue, (queue_entry_t)thread)) {
2553 next = (thread_t)queue_next((queue_entry_t)thread);
2554
b0d623f7 2555 if (thread->bound_processor == PROCESSOR_NULL) {
2d21ac55
A
2556 remqueue(queue, (queue_entry_t)thread);
2557
2558 thread->runq = PROCESSOR_NULL;
2559 rq->count--;
4a3eedf9
A
2560 if (testbit(pri, sched_preempt_pri)) {
2561 rq->urgency--; assert(rq->urgency >= 0);
2562 }
2d21ac55
A
2563 if (queue_empty(queue)) {
2564 if (pri != IDLEPRI)
2565 clrbit(MAXPRI - pri, rq->bitmap);
2566 rq->highq = MAXPRI - ffsbit(rq->bitmap);
9bccf70c 2567 }
2d21ac55
A
2568
2569 enqueue_tail(&tqueue, (queue_entry_t)thread);
9bccf70c 2570 }
2d21ac55
A
2571 count--;
2572
2573 thread = next;
9bccf70c 2574 }
55e303ae 2575
2d21ac55
A
2576 queue--; pri--;
2577 }
2578
2579 pset_unlock(pset);
2580
2d21ac55
A
2581 while ((thread = (thread_t)dequeue_head(&tqueue)) != THREAD_NULL) {
2582 thread_lock(thread);
55e303ae 2583
c910b4d9 2584 thread_setrun(thread, SCHED_TAILQ);
2d21ac55
A
2585
2586 thread_unlock(thread);
9bccf70c
A
2587 }
2588}
2589
2590/*
c910b4d9
A
2591 * Check for a preemption point in
2592 * the current context.
55e303ae
A
2593 *
2594 * Called at splsched.
9bccf70c
A
2595 */
2596ast_t
2597csw_check(
9bccf70c
A
2598 processor_t processor)
2599{
9bccf70c
A
2600 ast_t result = AST_NONE;
2601 run_queue_t runq;
2602
55e303ae 2603 if (first_timeslice(processor)) {
2d21ac55 2604 runq = &rt_runq;
55e303ae
A
2605 if (runq->highq >= BASEPRI_RTQUEUES)
2606 return (AST_PREEMPT | AST_URGENT);
2607
c910b4d9 2608 if (runq->highq > processor->current_pri) {
9bccf70c 2609 if (runq->urgency > 0)
55e303ae 2610 return (AST_PREEMPT | AST_URGENT);
9bccf70c 2611
55e303ae 2612 result |= AST_PREEMPT;
9bccf70c
A
2613 }
2614
2615 runq = &processor->runq;
c910b4d9 2616 if (runq->highq > processor->current_pri) {
9bccf70c 2617 if (runq->urgency > 0)
55e303ae 2618 return (AST_PREEMPT | AST_URGENT);
9bccf70c 2619
55e303ae 2620 result |= AST_PREEMPT;
9bccf70c
A
2621 }
2622 }
2623 else {
2d21ac55 2624 runq = &rt_runq;
c910b4d9 2625 if (runq->highq >= processor->current_pri) {
9bccf70c 2626 if (runq->urgency > 0)
55e303ae 2627 return (AST_PREEMPT | AST_URGENT);
9bccf70c 2628
55e303ae 2629 result |= AST_PREEMPT;
9bccf70c
A
2630 }
2631
2632 runq = &processor->runq;
c910b4d9 2633 if (runq->highq >= processor->current_pri) {
9bccf70c 2634 if (runq->urgency > 0)
55e303ae 2635 return (AST_PREEMPT | AST_URGENT);
9bccf70c 2636
55e303ae 2637 result |= AST_PREEMPT;
9bccf70c 2638 }
1c79356b 2639 }
9bccf70c
A
2640
2641 if (result != AST_NONE)
2642 return (result);
2643
b0d623f7
A
2644 if (processor->current_pri < BASEPRI_RTQUEUES && processor->processor_meta != PROCESSOR_META_NULL &&
2645 processor->processor_meta->primary != processor)
2646 return (AST_PREEMPT);
2647
2648 if (machine_cpu_is_inactive(processor->cpu_id))
c910b4d9 2649 return (AST_PREEMPT);
9bccf70c 2650
c910b4d9
A
2651 if (processor->active_thread->state & TH_SUSP)
2652 return (AST_PREEMPT);
2653
2654 return (AST_NONE);
1c79356b
A
2655}
2656
2657/*
9bccf70c 2658 * set_sched_pri:
1c79356b 2659 *
55e303ae
A
2660 * Set the scheduled priority of the specified thread.
2661 *
9bccf70c 2662 * This may cause the thread to change queues.
1c79356b 2663 *
55e303ae 2664 * Thread must be locked.
1c79356b
A
2665 */
2666void
9bccf70c 2667set_sched_pri(
2d21ac55
A
2668 thread_t thread,
2669 int priority)
1c79356b 2670{
2d21ac55 2671 boolean_t removed = run_queue_remove(thread);
9bccf70c 2672
9bccf70c 2673 thread->sched_pri = priority;
2d21ac55 2674 if (removed)
55e303ae 2675 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
9bccf70c 2676 else
55e303ae 2677 if (thread->state & TH_RUN) {
9bccf70c
A
2678 processor_t processor = thread->last_processor;
2679
2680 if (thread == current_thread()) {
c910b4d9 2681 ast_t preempt;
9bccf70c 2682
9bccf70c 2683 processor->current_pri = priority;
c910b4d9
A
2684 if ((preempt = csw_check(processor)) != AST_NONE)
2685 ast_on(preempt);
9bccf70c
A
2686 }
2687 else
2688 if ( processor != PROCESSOR_NULL &&
55e303ae 2689 processor->active_thread == thread )
9bccf70c 2690 cause_ast_check(processor);
1c79356b
A
2691 }
2692}
2693
91447636
A
2694#if 0
2695
2696static void
2697run_queue_check(
2698 run_queue_t rq,
2699 thread_t thread)
2700{
2701 queue_t q;
2702 queue_entry_t qe;
2703
2704 if (rq != thread->runq)
2705 panic("run_queue_check: thread runq");
2706
2707 if (thread->sched_pri > MAXPRI || thread->sched_pri < MINPRI)
2708 panic("run_queue_check: thread sched_pri");
2709
2710 q = &rq->queues[thread->sched_pri];
2711 qe = queue_first(q);
2712 while (!queue_end(q, qe)) {
2713 if (qe == (queue_entry_t)thread)
2714 return;
2715
2716 qe = queue_next(qe);
2717 }
2718
2719 panic("run_queue_check: end");
2720}
2721
2722#endif /* DEBUG */
2723
1c79356b 2724/*
55e303ae 2725 * run_queue_remove:
1c79356b 2726 *
2d21ac55
A
2727 * Remove a thread from a current run queue and
2728 * return TRUE if successful.
55e303ae
A
2729 *
2730 * Thread must be locked.
1c79356b 2731 */
2d21ac55 2732boolean_t
55e303ae 2733run_queue_remove(
2d21ac55 2734 thread_t thread)
1c79356b 2735{
2d21ac55 2736 processor_t processor = thread->runq;
1c79356b 2737
1c79356b 2738 /*
2d21ac55 2739 * If processor is PROCESSOR_NULL, the thread will stay out of the
55e303ae
A
2740 * run queues because the caller locked the thread. Otherwise
2741 * the thread is on a run queue, but could be chosen for dispatch
2742 * and removed.
1c79356b 2743 */
2d21ac55
A
2744 if (processor != PROCESSOR_NULL) {
2745 void * rqlock;
2746 run_queue_t rq;
55e303ae
A
2747
2748 /*
2d21ac55
A
2749 * The processor run queues are locked by the
2750 * processor set. Real-time priorities use a
2751 * global queue with a dedicated lock.
55e303ae 2752 */
2d21ac55
A
2753 if (thread->sched_pri < BASEPRI_RTQUEUES) {
2754 rqlock = &processor->processor_set->sched_lock;
2755 rq = &processor->runq;
2756 }
2757 else {
2758 rqlock = &rt_lock; rq = &rt_runq;
55e303ae
A
2759 }
2760
2d21ac55 2761 simple_lock(rqlock);
55e303ae 2762
2d21ac55 2763 if (processor == thread->runq) {
1c79356b 2764 /*
55e303ae
A
2765 * Thread is on a run queue and we have a lock on
2766 * that run queue.
1c79356b 2767 */
1c79356b
A
2768 remqueue(&rq->queues[0], (queue_entry_t)thread);
2769 rq->count--;
4a3eedf9
A
2770 if (testbit(thread->sched_pri, sched_preempt_pri)) {
2771 rq->urgency--; assert(rq->urgency >= 0);
2772 }
1c79356b
A
2773
2774 if (queue_empty(rq->queues + thread->sched_pri)) {
2775 /* update run queue status */
2776 if (thread->sched_pri != IDLEPRI)
2777 clrbit(MAXPRI - thread->sched_pri, rq->bitmap);
2778 rq->highq = MAXPRI - ffsbit(rq->bitmap);
2779 }
55e303ae 2780
2d21ac55 2781 thread->runq = PROCESSOR_NULL;
1c79356b
A
2782 }
2783 else {
2784 /*
55e303ae
A
2785 * The thread left the run queue before we could
2786 * lock the run queue.
1c79356b 2787 */
2d21ac55
A
2788 assert(thread->runq == PROCESSOR_NULL);
2789 processor = PROCESSOR_NULL;
1c79356b 2790 }
55e303ae 2791
2d21ac55 2792 simple_unlock(rqlock);
1c79356b
A
2793 }
2794
2d21ac55 2795 return (processor != PROCESSOR_NULL);
1c79356b
A
2796}
2797
2d21ac55 2798/*
cf7d32b8 2799 * steal_processor_thread:
2d21ac55 2800 *
cf7d32b8
A
2801 * Locate a thread to steal from the processor and
2802 * return it.
2d21ac55
A
2803 *
2804 * Associated pset must be locked. Returns THREAD_NULL
2805 * on failure.
2806 */
2807static thread_t
cf7d32b8 2808steal_processor_thread(
2d21ac55 2809 processor_t processor)
91447636 2810{
2d21ac55
A
2811 run_queue_t rq = &processor->runq;
2812 queue_t queue = rq->queues + rq->highq;
2813 int pri = rq->highq, count = rq->count;
cf7d32b8 2814 thread_t thread;
2d21ac55
A
2815
2816 while (count > 0) {
2817 thread = (thread_t)queue_first(queue);
2818 while (!queue_end(queue, (queue_entry_t)thread)) {
b0d623f7 2819 if (thread->bound_processor == PROCESSOR_NULL) {
2d21ac55
A
2820 remqueue(queue, (queue_entry_t)thread);
2821
2822 thread->runq = PROCESSOR_NULL;
2823 rq->count--;
4a3eedf9
A
2824 if (testbit(pri, sched_preempt_pri)) {
2825 rq->urgency--; assert(rq->urgency >= 0);
2826 }
2d21ac55
A
2827 if (queue_empty(queue)) {
2828 if (pri != IDLEPRI)
2829 clrbit(MAXPRI - pri, rq->bitmap);
2830 rq->highq = MAXPRI - ffsbit(rq->bitmap);
2831 }
91447636 2832
2d21ac55
A
2833 return (thread);
2834 }
2835 count--;
91447636 2836
2d21ac55 2837 thread = (thread_t)queue_next((queue_entry_t)thread);
91447636 2838 }
91447636 2839
2d21ac55
A
2840 queue--; pri--;
2841 }
91447636 2842
2d21ac55 2843 return (THREAD_NULL);
91447636
A
2844}
2845
cf7d32b8
A
2846/*
2847 * Locate and steal a thread, beginning
2848 * at the pset.
2849 *
2850 * The pset must be locked, and is returned
2851 * unlocked.
2852 *
2853 * Returns the stolen thread, or THREAD_NULL on
2854 * failure.
2855 */
2856static thread_t
2857steal_thread(
2858 processor_set_t pset)
2859{
2860 processor_set_t nset, cset = pset;
2861 processor_t processor;
2862 thread_t thread;
2863
2864 do {
2865 processor = (processor_t)queue_first(&cset->active_queue);
2866 while (!queue_end(&cset->active_queue, (queue_entry_t)processor)) {
2867 if (processor->runq.count > 0) {
2868 thread = steal_processor_thread(processor);
2869 if (thread != THREAD_NULL) {
2870 remqueue(&cset->active_queue, (queue_entry_t)processor);
2871 enqueue_tail(&cset->active_queue, (queue_entry_t)processor);
2872
cf7d32b8
A
2873 pset_unlock(cset);
2874
2875 return (thread);
2876 }
2877 }
2878
2879 processor = (processor_t)queue_next((queue_entry_t)processor);
2880 }
2881
2882 nset = next_pset(cset);
2883
2884 if (nset != pset) {
2885 pset_unlock(cset);
2886
2887 cset = nset;
2888 pset_lock(cset);
2889 }
2890 } while (nset != pset);
2891
2892 pset_unlock(cset);
2893
2894 return (THREAD_NULL);
2895}
2896
1c79356b 2897/*
2d21ac55
A
2898 * This is the processor idle loop, which just looks for other threads
2899 * to execute. Processor idle threads invoke this without supplying a
2900 * current thread to idle without an asserted wait state.
2901 *
2902 * Returns a the next thread to execute if dispatched directly.
1c79356b 2903 */
2d21ac55
A
2904static thread_t
2905processor_idle(
2906 thread_t thread,
2907 processor_t processor)
1c79356b 2908{
2d21ac55
A
2909 processor_set_t pset = processor->processor_set;
2910 thread_t new_thread;
2911 int state;
1c79356b 2912
2d21ac55 2913 (void)splsched();
1c79356b 2914
2d21ac55 2915 KERNEL_DEBUG_CONSTANT(
b0d623f7 2916 MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_START, (uintptr_t)thread_tid(thread), 0, 0, 0, 0);
3a60a9f5 2917
2d21ac55
A
2918 timer_switch(&PROCESSOR_DATA(processor, system_state),
2919 mach_absolute_time(), &PROCESSOR_DATA(processor, idle_state));
2920 PROCESSOR_DATA(processor, current_state) = &PROCESSOR_DATA(processor, idle_state);
3a60a9f5 2921
cf7d32b8 2922 while (processor->next_thread == THREAD_NULL && processor->runq.count == 0 && rt_runq.count == 0 &&
2d21ac55 2923 (thread == THREAD_NULL || ((thread->state & (TH_WAIT|TH_SUSP)) == TH_WAIT && !thread->wake_active))) {
2d21ac55 2924 machine_idle();
55e303ae
A
2925
2926 (void)splsched();
c910b4d9 2927
b0d623f7 2928 if (processor->state == PROCESSOR_INACTIVE && !machine_cpu_is_inactive(processor->cpu_id))
c910b4d9 2929 break;
55e303ae
A
2930 }
2931
2d21ac55
A
2932 timer_switch(&PROCESSOR_DATA(processor, idle_state),
2933 mach_absolute_time(), &PROCESSOR_DATA(processor, system_state));
2934 PROCESSOR_DATA(processor, current_state) = &PROCESSOR_DATA(processor, system_state);
1c79356b 2935
2d21ac55
A
2936 pset_lock(pset);
2937
55e303ae
A
2938 state = processor->state;
2939 if (state == PROCESSOR_DISPATCHING) {
1c79356b 2940 /*
55e303ae 2941 * Commmon case -- cpu dispatched.
1c79356b 2942 */
2d21ac55
A
2943 new_thread = processor->next_thread;
2944 processor->next_thread = THREAD_NULL;
55e303ae 2945 processor->state = PROCESSOR_RUNNING;
1c79356b 2946
4a3eedf9
A
2947 if ( processor->runq.highq > new_thread->sched_pri ||
2948 (rt_runq.highq > 0 && rt_runq.highq >= new_thread->sched_pri) ) {
2d21ac55 2949 processor->deadline = UINT64_MAX;
55e303ae 2950
2d21ac55 2951 pset_unlock(pset);
1c79356b 2952
2d21ac55
A
2953 thread_lock(new_thread);
2954 thread_setrun(new_thread, SCHED_HEADQ);
2955 thread_unlock(new_thread);
55e303ae 2956
4a3eedf9 2957 KERNEL_DEBUG_CONSTANT(
b0d623f7 2958 MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_END, (uintptr_t)thread_tid(thread), state, 0, 0, 0);
4a3eedf9 2959
2d21ac55 2960 return (THREAD_NULL);
1c79356b 2961 }
1c79356b 2962
2d21ac55
A
2963 pset_unlock(pset);
2964
4a3eedf9 2965 KERNEL_DEBUG_CONSTANT(
b0d623f7 2966 MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_END, (uintptr_t)thread_tid(thread), state, (uintptr_t)thread_tid(new_thread), 0, 0);
4a3eedf9 2967
2d21ac55 2968 return (new_thread);
55e303ae
A
2969 }
2970 else
2971 if (state == PROCESSOR_IDLE) {
55e303ae 2972 remqueue(&pset->idle_queue, (queue_entry_t)processor);
1c79356b 2973
2d21ac55 2974 processor->state = PROCESSOR_RUNNING;
cf7d32b8 2975 enqueue_tail(&pset->active_queue, (queue_entry_t)processor);
1c79356b 2976 }
55e303ae 2977 else
c910b4d9
A
2978 if (state == PROCESSOR_INACTIVE) {
2979 processor->state = PROCESSOR_RUNNING;
2980 enqueue_tail(&pset->active_queue, (queue_entry_t)processor);
2981 }
2982 else
55e303ae
A
2983 if (state == PROCESSOR_SHUTDOWN) {
2984 /*
2985 * Going off-line. Force a
2986 * reschedule.
2987 */
2d21ac55
A
2988 if ((new_thread = processor->next_thread) != THREAD_NULL) {
2989 processor->next_thread = THREAD_NULL;
55e303ae 2990 processor->deadline = UINT64_MAX;
2d21ac55
A
2991
2992 pset_unlock(pset);
55e303ae
A
2993
2994 thread_lock(new_thread);
2995 thread_setrun(new_thread, SCHED_HEADQ);
2996 thread_unlock(new_thread);
55e303ae 2997
4a3eedf9 2998 KERNEL_DEBUG_CONSTANT(
b0d623f7 2999 MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_END, (uintptr_t)thread_tid(thread), state, 0, 0, 0);
4a3eedf9 3000
2d21ac55
A
3001 return (THREAD_NULL);
3002 }
55e303ae
A
3003 }
3004
2d21ac55
A
3005 pset_unlock(pset);
3006
4a3eedf9 3007 KERNEL_DEBUG_CONSTANT(
b0d623f7 3008 MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_END, (uintptr_t)thread_tid(thread), state, 0, 0, 0);
4a3eedf9 3009
2d21ac55
A
3010 return (THREAD_NULL);
3011}
3012
cf7d32b8
A
3013/*
3014 * Each processor has a dedicated thread which
3015 * executes the idle loop when there is no suitable
3016 * previous context.
3017 */
2d21ac55
A
3018void
3019idle_thread(void)
3020{
3021 processor_t processor = current_processor();
3022 thread_t new_thread;
3023
3024 new_thread = processor_idle(THREAD_NULL, processor);
3025 if (new_thread != THREAD_NULL) {
3026 thread_run(processor->idle_thread, (thread_continue_t)idle_thread, NULL, new_thread);
3027 /*NOTREACHED*/
3028 }
55e303ae 3029
2d21ac55 3030 thread_block((thread_continue_t)idle_thread);
55e303ae 3031 /*NOTREACHED*/
1c79356b
A
3032}
3033
91447636
A
3034kern_return_t
3035idle_thread_create(
3036 processor_t processor)
1c79356b 3037{
91447636
A
3038 kern_return_t result;
3039 thread_t thread;
3040 spl_t s;
3041
3042 result = kernel_thread_create((thread_continue_t)idle_thread, NULL, MAXPRI_KERNEL, &thread);
3043 if (result != KERN_SUCCESS)
3044 return (result);
3045
3046 s = splsched();
3047 thread_lock(thread);
3048 thread->bound_processor = processor;
3049 processor->idle_thread = thread;
3050 thread->sched_pri = thread->priority = IDLEPRI;
3051 thread->state = (TH_RUN | TH_IDLE);
3052 thread_unlock(thread);
3053 splx(s);
3054
3055 thread_deallocate(thread);
3056
3057 return (KERN_SUCCESS);
1c79356b
A
3058}
3059
55e303ae 3060static uint64_t sched_tick_deadline;
0b4e3aa0 3061
91447636
A
3062/*
3063 * sched_startup:
3064 *
3065 * Kicks off scheduler services.
3066 *
3067 * Called at splsched.
3068 */
0b4e3aa0 3069void
91447636 3070sched_startup(void)
0b4e3aa0 3071{
91447636
A
3072 kern_return_t result;
3073 thread_t thread;
3074
3075 result = kernel_thread_start_priority((thread_continue_t)sched_tick_thread, NULL, MAXPRI_KERNEL, &thread);
3076 if (result != KERN_SUCCESS)
3077 panic("sched_startup");
3078
3079 thread_deallocate(thread);
3080
3081 /*
3082 * Yield to the sched_tick_thread while it times
3083 * a series of context switches back. It stores
3084 * the baseline value in sched_cswtime.
3085 *
3086 * The current thread is the only other thread
3087 * active at this point.
3088 */
3089 while (sched_cswtime == 0)
3090 thread_block(THREAD_CONTINUE_NULL);
3091
3092 thread_daemon_init();
3093
3094 thread_call_initialize();
0b4e3aa0 3095}
1c79356b
A
3096
3097/*
91447636 3098 * sched_tick_thread:
1c79356b 3099 *
55e303ae
A
3100 * Perform periodic bookkeeping functions about ten
3101 * times per second.
1c79356b 3102 */
91447636
A
3103static void
3104sched_tick_continue(void)
1c79356b 3105{
91447636 3106 uint64_t abstime = mach_absolute_time();
1c79356b 3107
91447636 3108 sched_tick++;
1c79356b
A
3109
3110 /*
91447636 3111 * Compute various averages.
1c79356b 3112 */
91447636 3113 compute_averages();
1c79356b
A
3114
3115 /*
91447636
A
3116 * Scan the run queues for threads which
3117 * may need to be updated.
1c79356b 3118 */
91447636 3119 thread_update_scan();
1c79356b
A
3120
3121 clock_deadline_for_periodic_event(sched_tick_interval, abstime,
3122 &sched_tick_deadline);
3123
91447636
A
3124 assert_wait_deadline((event_t)sched_tick_thread, THREAD_UNINT, sched_tick_deadline);
3125 thread_block((thread_continue_t)sched_tick_continue);
1c79356b
A
3126 /*NOTREACHED*/
3127}
3128
91447636
A
3129/*
3130 * Time a series of context switches to determine
3131 * a baseline. Toss the high and low and return
3132 * the one-way value.
3133 */
3134static uint32_t
3135time_cswitch(void)
3136{
3137 uint32_t new, hi, low, accum;
3138 uint64_t abstime;
3139 int i, tries = 7;
3140
3141 accum = hi = low = 0;
3142 for (i = 0; i < tries; ++i) {
3143 abstime = mach_absolute_time();
3144 thread_block(THREAD_CONTINUE_NULL);
3145
b0d623f7 3146 new = (uint32_t)(mach_absolute_time() - abstime);
91447636
A
3147
3148 if (i == 0)
3149 accum = hi = low = new;
3150 else {
3151 if (new < low)
3152 low = new;
3153 else
3154 if (new > hi)
3155 hi = new;
3156 accum += new;
3157 }
3158 }
3159
3160 return ((accum - hi - low) / (2 * (tries - 2)));
3161}
3162
1c79356b
A
3163void
3164sched_tick_thread(void)
3165{
91447636
A
3166 sched_cswtime = time_cswitch();
3167
55e303ae 3168 sched_tick_deadline = mach_absolute_time();
1c79356b 3169
91447636 3170 sched_tick_continue();
1c79356b
A
3171 /*NOTREACHED*/
3172}
3173
1c79356b 3174/*
91447636 3175 * thread_update_scan / runq_scan:
55e303ae 3176 *
91447636
A
3177 * Scan the run queues to account for timesharing threads
3178 * which need to be updated.
1c79356b
A
3179 *
3180 * Scanner runs in two passes. Pass one squirrels likely
91447636 3181 * threads away in an array, pass two does the update.
1c79356b 3182 *
91447636
A
3183 * This is necessary because the run queue is locked for
3184 * the candidate scan, but the thread is locked for the update.
1c79356b 3185 *
91447636
A
3186 * Array should be sized to make forward progress, without
3187 * disabling preemption for long periods.
1c79356b 3188 */
55e303ae 3189
91447636 3190#define THREAD_UPDATE_SIZE 128
55e303ae 3191
91447636
A
3192static thread_t thread_update_array[THREAD_UPDATE_SIZE];
3193static int thread_update_count = 0;
1c79356b
A
3194
3195/*
91447636
A
3196 * Scan a runq for candidate threads.
3197 *
3198 * Returns TRUE if retry is needed.
1c79356b 3199 */
55e303ae 3200static boolean_t
91447636 3201runq_scan(
1c79356b
A
3202 run_queue_t runq)
3203{
91447636 3204 register int count;
1c79356b
A
3205 register queue_t q;
3206 register thread_t thread;
1c79356b 3207
1c79356b
A
3208 if ((count = runq->count) > 0) {
3209 q = runq->queues + runq->highq;
3210 while (count > 0) {
3211 queue_iterate(q, thread, thread_t, links) {
55e303ae 3212 if ( thread->sched_stamp != sched_tick &&
0b4e3aa0 3213 (thread->sched_mode & TH_MODE_TIMESHARE) ) {
91447636 3214 if (thread_update_count == THREAD_UPDATE_SIZE)
55e303ae 3215 return (TRUE);
1c79356b 3216
91447636
A
3217 thread_update_array[thread_update_count++] = thread;
3218 thread_reference_internal(thread);
1c79356b
A
3219 }
3220
3221 count--;
3222 }
3223
3224 q--;
3225 }
3226 }
1c79356b 3227
91447636 3228 return (FALSE);
1c79356b
A
3229}
3230
55e303ae 3231static void
91447636 3232thread_update_scan(void)
1c79356b 3233{
2d21ac55
A
3234 boolean_t restart_needed = FALSE;
3235 processor_t processor = processor_list;
3236 processor_set_t pset;
3237 thread_t thread;
3238 spl_t s;
1c79356b 3239
1c79356b 3240 do {
2d21ac55
A
3241 do {
3242 pset = processor->processor_set;
1c79356b 3243
2d21ac55
A
3244 s = splsched();
3245 pset_lock(pset);
0b4e3aa0 3246
2d21ac55
A
3247 restart_needed = runq_scan(&processor->runq);
3248
3249 pset_unlock(pset);
3250 splx(s);
3251
3252 if (restart_needed)
3253 break;
3254
3255 thread = processor->idle_thread;
3256 if (thread != THREAD_NULL && thread->sched_stamp != sched_tick) {
3257 if (thread_update_count == THREAD_UPDATE_SIZE) {
3258 restart_needed = TRUE;
3259 break;
0b4e3aa0
A
3260 }
3261
2d21ac55
A
3262 thread_update_array[thread_update_count++] = thread;
3263 thread_reference_internal(thread);
1c79356b 3264 }
2d21ac55 3265 } while ((processor = processor->processor_list) != NULL);
1c79356b
A
3266
3267 /*
3268 * Ok, we now have a collection of candidates -- fix them.
3269 */
91447636
A
3270 while (thread_update_count > 0) {
3271 thread = thread_update_array[--thread_update_count];
3272 thread_update_array[thread_update_count] = THREAD_NULL;
55e303ae 3273
1c79356b
A
3274 s = splsched();
3275 thread_lock(thread);
55e303ae
A
3276 if ( !(thread->state & (TH_WAIT|TH_SUSP)) &&
3277 thread->sched_stamp != sched_tick )
3278 update_priority(thread);
1c79356b
A
3279 thread_unlock(thread);
3280 splx(s);
55e303ae 3281
91447636 3282 thread_deallocate(thread);
1c79356b 3283 }
1c79356b
A
3284 } while (restart_needed);
3285}
3286
3287/*
3288 * Just in case someone doesn't use the macro
3289 */
3290#undef thread_wakeup
3291void
3292thread_wakeup(
3293 event_t x);
3294
3295void
3296thread_wakeup(
3297 event_t x)
3298{
3299 thread_wakeup_with_result(x, THREAD_AWAKENED);
3300}
3301
91447636
A
3302boolean_t
3303preemption_enabled(void)
3304{
3305 return (get_preemption_level() == 0 && ml_get_interrupts_enabled());
3306}
9bccf70c 3307
0b4e3aa0 3308#if DEBUG
0b4e3aa0 3309static boolean_t
1c79356b 3310thread_runnable(
0b4e3aa0 3311 thread_t thread)
1c79356b 3312{
0b4e3aa0 3313 return ((thread->state & (TH_RUN|TH_WAIT)) == TH_RUN);
1c79356b 3314}
1c79356b
A
3315#endif /* DEBUG */
3316
3317#if MACH_KDB
3318#include <ddb/db_output.h>
3319#define printf kdbprintf
1c79356b
A
3320void db_sched(void);
3321
3322void
3323db_sched(void)
3324{
3325 iprintf("Scheduling Statistics:\n");
3326 db_indent += 2;
3327 iprintf("Thread invocations: csw %d same %d\n",
3328 c_thread_invoke_csw, c_thread_invoke_same);
3329#if MACH_COUNTERS
3330 iprintf("Thread block: calls %d\n",
3331 c_thread_block_calls);
2d21ac55 3332 iprintf("Idle thread:\n\thandoff %d block %d\n",
1c79356b 3333 c_idle_thread_handoff,
2d21ac55 3334 c_idle_thread_block);
1c79356b
A
3335 iprintf("Sched thread blocks: %d\n", c_sched_thread_block);
3336#endif /* MACH_COUNTERS */
3337 db_indent -= 2;
3338}
55e303ae
A
3339
3340#include <ddb/db_output.h>
3341void db_show_thread_log(void);
3342
3343void
3344db_show_thread_log(void)
3345{
3346}
1c79356b 3347#endif /* MACH_KDB */