2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * @OSF_FREE_COPYRIGHT@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr.
63 * Scheduling primitives
69 #include <mach/mach_types.h>
70 #include <mach/machine.h>
71 #include <mach/policy.h>
72 #include <mach/sync_policy.h>
73 #include <mach/thread_act.h>
75 #include <machine/machine_routines.h>
76 #include <machine/sched_param.h>
77 #include <machine/machine_cpu.h>
78 #include <machine/machlimits.h>
80 #include <kern/kern_types.h>
81 #include <kern/clock.h>
82 #include <kern/counters.h>
83 #include <kern/cpu_number.h>
84 #include <kern/cpu_data.h>
85 #include <kern/debug.h>
86 #include <kern/lock.h>
87 #include <kern/macro_help.h>
88 #include <kern/machine.h>
89 #include <kern/misc_protos.h>
90 #include <kern/processor.h>
91 #include <kern/queue.h>
92 #include <kern/sched.h>
93 #include <kern/sched_prim.h>
94 #include <kern/syscall_subr.h>
95 #include <kern/task.h>
96 #include <kern/thread.h>
97 #include <kern/wait_queue.h>
98 #include <kern/ledger.h>
101 #include <vm/vm_kern.h>
102 #include <vm/vm_map.h>
104 #include <mach/sdt.h>
106 #include <sys/kdebug.h>
108 #include <kern/pms.h>
110 struct rt_queue rt_runq
;
111 #define RT_RUNQ ((processor_t)-1)
112 decl_simple_lock_data(static,rt_lock
);
114 #if defined(CONFIG_SCHED_TRADITIONAL) || defined(CONFIG_SCHED_PROTO) || defined(CONFIG_SCHED_GRRR) || defined(CONFIG_SCHED_FIXEDPRIORITY)
115 static struct fairshare_queue fs_runq
;
116 #define FS_RUNQ ((processor_t)-2)
117 decl_simple_lock_data(static,fs_lock
);
120 #define DEFAULT_PREEMPTION_RATE 100 /* (1/s) */
121 int default_preemption_rate
= DEFAULT_PREEMPTION_RATE
;
123 #define DEFAULT_BG_PREEMPTION_RATE 400 /* (1/s) */
124 int default_bg_preemption_rate
= DEFAULT_BG_PREEMPTION_RATE
;
126 #define MAX_UNSAFE_QUANTA 800
127 int max_unsafe_quanta
= MAX_UNSAFE_QUANTA
;
129 #define MAX_POLL_QUANTA 2
130 int max_poll_quanta
= MAX_POLL_QUANTA
;
132 #define SCHED_POLL_YIELD_SHIFT 4 /* 1/16 */
133 int sched_poll_yield_shift
= SCHED_POLL_YIELD_SHIFT
;
135 uint64_t max_poll_computation
;
137 uint64_t max_unsafe_computation
;
138 uint64_t sched_safe_duration
;
140 #if defined(CONFIG_SCHED_TRADITIONAL)
142 uint32_t std_quantum
;
143 uint32_t min_std_quantum
;
146 uint32_t std_quantum_us
;
147 uint32_t bg_quantum_us
;
149 #endif /* CONFIG_SCHED_TRADITIONAL */
151 uint32_t thread_depress_time
;
152 uint32_t default_timeshare_computation
;
153 uint32_t default_timeshare_constraint
;
155 uint32_t max_rt_quantum
;
156 uint32_t min_rt_quantum
;
158 #if defined(CONFIG_SCHED_TRADITIONAL)
161 uint32_t sched_tick_interval
;
163 uint32_t sched_pri_shift
= INT8_MAX
;
164 uint32_t sched_fixed_shift
;
166 static boolean_t sched_traditional_use_pset_runqueue
= FALSE
;
168 __attribute__((always_inline
))
169 static inline run_queue_t
runq_for_processor(processor_t processor
)
171 if (sched_traditional_use_pset_runqueue
)
172 return &processor
->processor_set
->pset_runq
;
174 return &processor
->runq
;
177 __attribute__((always_inline
))
178 static inline void runq_consider_incr_bound_count(processor_t processor
, thread_t thread
)
180 if (thread
->bound_processor
== PROCESSOR_NULL
)
183 assert(thread
->bound_processor
== processor
);
185 if (sched_traditional_use_pset_runqueue
)
186 processor
->processor_set
->pset_runq_bound_count
++;
188 processor
->runq_bound_count
++;
191 __attribute__((always_inline
))
192 static inline void runq_consider_decr_bound_count(processor_t processor
, thread_t thread
)
194 if (thread
->bound_processor
== PROCESSOR_NULL
)
197 assert(thread
->bound_processor
== processor
);
199 if (sched_traditional_use_pset_runqueue
)
200 processor
->processor_set
->pset_runq_bound_count
--;
202 processor
->runq_bound_count
--;
205 #endif /* CONFIG_SCHED_TRADITIONAL */
207 uint64_t sched_one_second_interval
;
209 uint32_t sched_run_count
, sched_share_count
;
210 uint32_t sched_load_average
, sched_mach_factor
;
214 #if defined(CONFIG_SCHED_TRADITIONAL)
216 static void load_shift_init(void) __attribute__((section("__TEXT, initcode")));
217 static void preempt_pri_init(void) __attribute__((section("__TEXT, initcode")));
219 #endif /* CONFIG_SCHED_TRADITIONAL */
221 static thread_t
thread_select(
223 processor_t processor
);
225 #if CONFIG_SCHED_IDLE_IN_PLACE
226 static thread_t
thread_select_idle(
228 processor_t processor
);
231 thread_t
processor_idle(
233 processor_t processor
);
235 #if defined(CONFIG_SCHED_TRADITIONAL)
237 static thread_t
steal_thread(
238 processor_set_t pset
);
240 static thread_t
steal_thread_disabled(
241 processor_set_t pset
) __attribute__((unused
));
244 static thread_t
steal_processor_thread(
245 processor_t processor
);
247 static void thread_update_scan(void);
249 static void processor_setrun(
250 processor_t processor
,
256 processor_t processor
,
261 processor_queue_remove(
262 processor_t processor
,
265 static boolean_t
processor_queue_empty(processor_t processor
);
267 static boolean_t
priority_is_urgent(int priority
);
269 static ast_t
processor_csw_check(processor_t processor
);
271 static boolean_t
processor_queue_has_priority(processor_t processor
,
275 static boolean_t
should_current_thread_rechoose_processor(processor_t processor
);
277 static int sched_traditional_processor_runq_count(processor_t processor
);
279 static boolean_t
sched_traditional_with_pset_runqueue_processor_queue_empty(processor_t processor
);
281 static uint64_t sched_traditional_processor_runq_stats_count_sum(processor_t processor
);
283 static uint64_t sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum(processor_t processor
);
287 #if defined(CONFIG_SCHED_TRADITIONAL)
290 sched_traditional_init(void);
293 sched_traditional_timebase_init(void);
296 sched_traditional_processor_init(processor_t processor
);
299 sched_traditional_pset_init(processor_set_t pset
);
302 sched_traditional_with_pset_runqueue_init(void);
307 sched_realtime_init(void) __attribute__((section("__TEXT, initcode")));
310 sched_realtime_timebase_init(void);
312 #if defined(CONFIG_SCHED_TRADITIONAL)
314 sched_traditional_tick_continue(void);
317 sched_traditional_initial_quantum_size(thread_t thread
);
320 sched_traditional_initial_thread_sched_mode(task_t parent_task
);
323 sched_traditional_supports_timeshare_mode(void);
326 sched_traditional_choose_thread(
327 processor_t processor
,
333 extern int debug_task
;
334 #define TLOG(a, fmt, args...) if(debug_task & a) kprintf(fmt, ## args)
336 #define TLOG(a, fmt, args...) do {} while (0)
341 boolean_t
thread_runnable(
349 * states are combinations of:
351 * W waiting (or on wait queue)
352 * N non-interruptible
357 * assert_wait thread_block clear_wait swapout swapin
359 * R RW, RWN R; setrun - -
360 * RN RWN RN; setrun - -
372 #if defined(CONFIG_SCHED_TRADITIONAL)
373 int8_t sched_load_shifts
[NRQS
];
374 int sched_preempt_pri
[NRQBM
];
378 #if defined(CONFIG_SCHED_TRADITIONAL)
380 const struct sched_dispatch_table sched_traditional_dispatch
= {
381 sched_traditional_init
,
382 sched_traditional_timebase_init
,
383 sched_traditional_processor_init
,
384 sched_traditional_pset_init
,
385 sched_traditional_tick_continue
,
386 sched_traditional_choose_thread
,
391 processor_queue_shutdown
,
392 processor_queue_remove
,
393 processor_queue_empty
,
396 processor_queue_has_priority
,
397 sched_traditional_initial_quantum_size
,
398 sched_traditional_initial_thread_sched_mode
,
399 sched_traditional_supports_timeshare_mode
,
402 lightweight_update_priority
,
403 sched_traditional_quantum_expire
,
404 should_current_thread_rechoose_processor
,
405 sched_traditional_processor_runq_count
,
406 sched_traditional_processor_runq_stats_count_sum
,
407 sched_traditional_fairshare_init
,
408 sched_traditional_fairshare_runq_count
,
409 sched_traditional_fairshare_runq_stats_count_sum
,
410 sched_traditional_fairshare_enqueue
,
411 sched_traditional_fairshare_dequeue
,
412 sched_traditional_fairshare_queue_remove
,
413 TRUE
/* direct_dispatch_to_idle_processors */
416 const struct sched_dispatch_table sched_traditional_with_pset_runqueue_dispatch
= {
417 sched_traditional_with_pset_runqueue_init
,
418 sched_traditional_timebase_init
,
419 sched_traditional_processor_init
,
420 sched_traditional_pset_init
,
421 sched_traditional_tick_continue
,
422 sched_traditional_choose_thread
,
427 processor_queue_shutdown
,
428 processor_queue_remove
,
429 sched_traditional_with_pset_runqueue_processor_queue_empty
,
432 processor_queue_has_priority
,
433 sched_traditional_initial_quantum_size
,
434 sched_traditional_initial_thread_sched_mode
,
435 sched_traditional_supports_timeshare_mode
,
438 lightweight_update_priority
,
439 sched_traditional_quantum_expire
,
440 should_current_thread_rechoose_processor
,
441 sched_traditional_processor_runq_count
,
442 sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum
,
443 sched_traditional_fairshare_init
,
444 sched_traditional_fairshare_runq_count
,
445 sched_traditional_fairshare_runq_stats_count_sum
,
446 sched_traditional_fairshare_enqueue
,
447 sched_traditional_fairshare_dequeue
,
448 sched_traditional_fairshare_queue_remove
,
449 FALSE
/* direct_dispatch_to_idle_processors */
454 const struct sched_dispatch_table
*sched_current_dispatch
= NULL
;
457 * Statically allocate a buffer to hold the longest possible
458 * scheduler description string, as currently implemented.
459 * bsd/kern/kern_sysctl.c has a corresponding definition in bsd/
460 * to export to userspace via sysctl(3). If either version
461 * changes, update the other.
463 * Note that in addition to being an upper bound on the strings
464 * in the kernel, it's also an exact parameter to PE_get_default(),
465 * which interrogates the device tree on some platforms. That
466 * API requires the caller know the exact size of the device tree
467 * property, so we need both a legacy size (32) and the current size
468 * (48) to deal with old and new device trees. The device tree property
469 * is similarly padded to a fixed size so that the same kernel image
470 * can run on multiple devices with different schedulers configured
471 * in the device tree.
473 #define SCHED_STRING_MAX_LENGTH (48)
475 char sched_string
[SCHED_STRING_MAX_LENGTH
];
476 static enum sched_enum _sched_enum
= sched_enum_unknown
;
481 char sched_arg
[SCHED_STRING_MAX_LENGTH
] = { '\0' };
483 /* Check for runtime selection of the scheduler algorithm */
484 if (!PE_parse_boot_argn("sched", sched_arg
, sizeof (sched_arg
))) {
485 /* If no boot-args override, look in device tree */
486 if (!PE_get_default("kern.sched", sched_arg
,
487 SCHED_STRING_MAX_LENGTH
)) {
492 if (strlen(sched_arg
) > 0) {
494 /* Allow pattern below */
495 #if defined(CONFIG_SCHED_TRADITIONAL)
496 } else if (0 == strcmp(sched_arg
, kSchedTraditionalString
)) {
497 sched_current_dispatch
= &sched_traditional_dispatch
;
498 _sched_enum
= sched_enum_traditional
;
499 strlcpy(sched_string
, kSchedTraditionalString
, sizeof(sched_string
));
500 kprintf("Scheduler: Runtime selection of %s\n", kSchedTraditionalString
);
501 } else if (0 == strcmp(sched_arg
, kSchedTraditionalWithPsetRunqueueString
)) {
502 sched_current_dispatch
= &sched_traditional_with_pset_runqueue_dispatch
;
503 _sched_enum
= sched_enum_traditional_with_pset_runqueue
;
504 strlcpy(sched_string
, kSchedTraditionalWithPsetRunqueueString
, sizeof(sched_string
));
505 kprintf("Scheduler: Runtime selection of %s\n", kSchedTraditionalWithPsetRunqueueString
);
507 #if defined(CONFIG_SCHED_PROTO)
508 } else if (0 == strcmp(sched_arg
, kSchedProtoString
)) {
509 sched_current_dispatch
= &sched_proto_dispatch
;
510 _sched_enum
= sched_enum_proto
;
511 strlcpy(sched_string
, kSchedProtoString
, sizeof(sched_string
));
512 kprintf("Scheduler: Runtime selection of %s\n", kSchedProtoString
);
514 #if defined(CONFIG_SCHED_GRRR)
515 } else if (0 == strcmp(sched_arg
, kSchedGRRRString
)) {
516 sched_current_dispatch
= &sched_grrr_dispatch
;
517 _sched_enum
= sched_enum_grrr
;
518 strlcpy(sched_string
, kSchedGRRRString
, sizeof(sched_string
));
519 kprintf("Scheduler: Runtime selection of %s\n", kSchedGRRRString
);
521 #if defined(CONFIG_SCHED_FIXEDPRIORITY)
522 } else if (0 == strcmp(sched_arg
, kSchedFixedPriorityString
)) {
523 sched_current_dispatch
= &sched_fixedpriority_dispatch
;
524 _sched_enum
= sched_enum_fixedpriority
;
525 strlcpy(sched_string
, kSchedFixedPriorityString
, sizeof(sched_string
));
526 kprintf("Scheduler: Runtime selection of %s\n", kSchedFixedPriorityString
);
527 } else if (0 == strcmp(sched_arg
, kSchedFixedPriorityWithPsetRunqueueString
)) {
528 sched_current_dispatch
= &sched_fixedpriority_with_pset_runqueue_dispatch
;
529 _sched_enum
= sched_enum_fixedpriority_with_pset_runqueue
;
530 strlcpy(sched_string
, kSchedFixedPriorityWithPsetRunqueueString
, sizeof(sched_string
));
531 kprintf("Scheduler: Runtime selection of %s\n", kSchedFixedPriorityWithPsetRunqueueString
);
534 panic("Unrecognized scheduler algorithm: %s", sched_arg
);
537 #if defined(CONFIG_SCHED_TRADITIONAL)
538 sched_current_dispatch
= &sched_traditional_dispatch
;
539 _sched_enum
= sched_enum_traditional
;
540 strlcpy(sched_string
, kSchedTraditionalString
, sizeof(sched_string
));
541 kprintf("Scheduler: Default of %s\n", kSchedTraditionalString
);
542 #elif defined(CONFIG_SCHED_PROTO)
543 sched_current_dispatch
= &sched_proto_dispatch
;
544 _sched_enum
= sched_enum_proto
;
545 strlcpy(sched_string
, kSchedProtoString
, sizeof(sched_string
));
546 kprintf("Scheduler: Default of %s\n", kSchedProtoString
);
547 #elif defined(CONFIG_SCHED_GRRR)
548 sched_current_dispatch
= &sched_grrr_dispatch
;
549 _sched_enum
= sched_enum_grrr
;
550 strlcpy(sched_string
, kSchedGRRRString
, sizeof(sched_string
));
551 kprintf("Scheduler: Default of %s\n", kSchedGRRRString
);
552 #elif defined(CONFIG_SCHED_FIXEDPRIORITY)
553 sched_current_dispatch
= &sched_fixedpriority_dispatch
;
554 _sched_enum
= sched_enum_fixedpriority
;
555 strlcpy(sched_string
, kSchedFixedPriorityString
, sizeof(sched_string
));
556 kprintf("Scheduler: Default of %s\n", kSchedFixedPriorityString
);
558 #error No default scheduler implementation
563 SCHED(fairshare_init
)();
564 sched_realtime_init();
567 SCHED(pset_init
)(&pset0
);
568 SCHED(processor_init
)(master_processor
);
572 sched_timebase_init(void)
576 clock_interval_to_absolutetime_interval(1, NSEC_PER_SEC
, &abstime
);
577 sched_one_second_interval
= abstime
;
579 SCHED(timebase_init
)();
580 sched_realtime_timebase_init();
583 #if defined(CONFIG_SCHED_TRADITIONAL)
586 sched_traditional_init(void)
589 * Calculate the timeslicing quantum
592 if (default_preemption_rate
< 1)
593 default_preemption_rate
= DEFAULT_PREEMPTION_RATE
;
594 std_quantum_us
= (1000 * 1000) / default_preemption_rate
;
596 printf("standard timeslicing quantum is %d us\n", std_quantum_us
);
598 if (default_bg_preemption_rate
< 1)
599 default_bg_preemption_rate
= DEFAULT_BG_PREEMPTION_RATE
;
600 bg_quantum_us
= (1000 * 1000) / default_bg_preemption_rate
;
602 printf("standard background quantum is %d us\n", bg_quantum_us
);
610 sched_traditional_timebase_init(void)
615 /* standard timeslicing quantum */
616 clock_interval_to_absolutetime_interval(
617 std_quantum_us
, NSEC_PER_USEC
, &abstime
);
618 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
619 std_quantum
= (uint32_t)abstime
;
621 /* smallest remaining quantum (250 us) */
622 clock_interval_to_absolutetime_interval(250, NSEC_PER_USEC
, &abstime
);
623 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
624 min_std_quantum
= (uint32_t)abstime
;
626 /* quantum for background tasks */
627 clock_interval_to_absolutetime_interval(
628 bg_quantum_us
, NSEC_PER_USEC
, &abstime
);
629 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
630 bg_quantum
= (uint32_t)abstime
;
632 /* scheduler tick interval */
633 clock_interval_to_absolutetime_interval(USEC_PER_SEC
>> SCHED_TICK_SHIFT
,
634 NSEC_PER_USEC
, &abstime
);
635 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
636 sched_tick_interval
= (uint32_t)abstime
;
639 * Compute conversion factor from usage to
640 * timesharing priorities with 5/8 ** n aging.
642 abstime
= (abstime
* 5) / 3;
643 for (shift
= 0; abstime
> BASEPRI_DEFAULT
; ++shift
)
645 sched_fixed_shift
= shift
;
647 max_unsafe_computation
= max_unsafe_quanta
* std_quantum
;
648 sched_safe_duration
= 2 * max_unsafe_quanta
* std_quantum
;
650 max_poll_computation
= max_poll_quanta
* std_quantum
;
651 thread_depress_time
= 1 * std_quantum
;
652 default_timeshare_computation
= std_quantum
/ 2;
653 default_timeshare_constraint
= std_quantum
;
658 sched_traditional_processor_init(processor_t processor
)
660 if (!sched_traditional_use_pset_runqueue
) {
661 run_queue_init(&processor
->runq
);
663 processor
->runq_bound_count
= 0;
667 sched_traditional_pset_init(processor_set_t pset
)
669 if (sched_traditional_use_pset_runqueue
) {
670 run_queue_init(&pset
->pset_runq
);
672 pset
->pset_runq_bound_count
= 0;
676 sched_traditional_with_pset_runqueue_init(void)
678 sched_traditional_init();
679 sched_traditional_use_pset_runqueue
= TRUE
;
682 #endif /* CONFIG_SCHED_TRADITIONAL */
684 #if defined(CONFIG_SCHED_TRADITIONAL) || defined(CONFIG_SCHED_PROTO) || defined(CONFIG_SCHED_GRRR) || defined(CONFIG_SCHED_FIXEDPRIORITY)
686 sched_traditional_fairshare_init(void)
688 simple_lock_init(&fs_lock
, 0);
691 queue_init(&fs_runq
.queue
);
696 sched_realtime_init(void)
698 simple_lock_init(&rt_lock
, 0);
701 queue_init(&rt_runq
.queue
);
705 sched_realtime_timebase_init(void)
709 /* smallest rt computaton (50 us) */
710 clock_interval_to_absolutetime_interval(50, NSEC_PER_USEC
, &abstime
);
711 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
712 min_rt_quantum
= (uint32_t)abstime
;
714 /* maximum rt computation (50 ms) */
715 clock_interval_to_absolutetime_interval(
716 50, 1000*NSEC_PER_USEC
, &abstime
);
717 assert((abstime
>> 32) == 0 && (uint32_t)abstime
!= 0);
718 max_rt_quantum
= (uint32_t)abstime
;
722 #if defined(CONFIG_SCHED_TRADITIONAL)
725 * Set up values for timeshare
729 load_shift_init(void)
731 int8_t k
, *p
= sched_load_shifts
;
734 *p
++ = INT8_MIN
; *p
++ = 0;
736 for (i
= j
= 2, k
= 1; i
< NRQS
; ++k
) {
737 for (j
<<= 1; i
< j
; ++i
)
743 preempt_pri_init(void)
745 int i
, *p
= sched_preempt_pri
;
747 for (i
= BASEPRI_FOREGROUND
+ 1; i
< MINPRI_KERNEL
; ++i
)
750 for (i
= BASEPRI_PREEMPT
; i
<= MAXPRI
; ++i
)
754 #endif /* CONFIG_SCHED_TRADITIONAL */
757 * Thread wait timer expiration.
764 thread_t thread
= p0
;
769 if (--thread
->wait_timer_active
== 0) {
770 if (thread
->wait_timer_is_set
) {
771 thread
->wait_timer_is_set
= FALSE
;
772 clear_wait_internal(thread
, THREAD_TIMED_OUT
);
775 thread_unlock(thread
);
784 * Set a timer for the current thread, if the thread
785 * is ready to wait. Must be called between assert_wait()
786 * and thread_block().
791 uint32_t scale_factor
)
793 thread_t thread
= current_thread();
799 if ((thread
->state
& TH_WAIT
) != 0) {
800 clock_interval_to_deadline(interval
, scale_factor
, &deadline
);
801 if (!timer_call_enter(&thread
->wait_timer
, deadline
, thread
->sched_pri
>= BASEPRI_RTQUEUES
? TIMER_CALL_CRITICAL
: 0))
802 thread
->wait_timer_active
++;
803 thread
->wait_timer_is_set
= TRUE
;
805 thread_unlock(thread
);
810 thread_set_timer_deadline(
813 thread_t thread
= current_thread();
818 if ((thread
->state
& TH_WAIT
) != 0) {
819 if (!timer_call_enter(&thread
->wait_timer
, deadline
, thread
->sched_pri
>= BASEPRI_RTQUEUES
? TIMER_CALL_CRITICAL
: 0))
820 thread
->wait_timer_active
++;
821 thread
->wait_timer_is_set
= TRUE
;
823 thread_unlock(thread
);
828 thread_cancel_timer(void)
830 thread_t thread
= current_thread();
835 if (thread
->wait_timer_is_set
) {
836 if (timer_call_cancel(&thread
->wait_timer
))
837 thread
->wait_timer_active
--;
838 thread
->wait_timer_is_set
= FALSE
;
840 thread_unlock(thread
);
844 #endif /* __LP64__ */
849 * Unblock thread on wake up.
851 * Returns TRUE if the thread is still running.
853 * Thread must be locked.
858 wait_result_t wresult
)
860 boolean_t result
= FALSE
;
865 thread
->wait_result
= wresult
;
868 * Cancel pending wait timer.
870 if (thread
->wait_timer_is_set
) {
871 if (timer_call_cancel(&thread
->wait_timer
))
872 thread
->wait_timer_active
--;
873 thread
->wait_timer_is_set
= FALSE
;
877 * Update scheduling state: not waiting,
880 thread
->state
&= ~(TH_WAIT
|TH_UNINT
);
882 if (!(thread
->state
& TH_RUN
)) {
883 thread
->state
|= TH_RUN
;
885 (*thread
->sched_call
)(SCHED_CALL_UNBLOCK
, thread
);
891 if (thread
->sched_mode
== TH_MODE_TIMESHARE
)
896 * Signal if idling on another processor.
898 #if CONFIG_SCHED_IDLE_IN_PLACE
899 if (thread
->state
& TH_IDLE
) {
900 processor_t processor
= thread
->last_processor
;
902 if (processor
!= current_processor())
903 machine_signal_idle(processor
);
906 assert((thread
->state
& TH_IDLE
) == 0);
913 * Calculate deadline for real-time threads.
915 if (thread
->sched_mode
== TH_MODE_REALTIME
) {
916 thread
->realtime
.deadline
= mach_absolute_time();
917 thread
->realtime
.deadline
+= thread
->realtime
.constraint
;
921 * Clear old quantum, fail-safe computation, etc.
923 thread
->current_quantum
= 0;
924 thread
->computation_metered
= 0;
925 thread
->reason
= AST_NONE
;
927 /* Event should only be triggered if thread is not already running */
928 if (result
== FALSE
) {
929 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
930 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_MAKE_RUNNABLE
) | DBG_FUNC_NONE
,
931 (uintptr_t)thread_tid(thread
), thread
->sched_pri
, thread
->wait_result
, 0, 0);
934 DTRACE_SCHED2(wakeup
, struct thread
*, thread
, struct proc
*, thread
->task
->bsd_info
);
942 * Unblock and dispatch thread.
944 * thread lock held, IPC locks may be held.
945 * thread must have been pulled from wait queue under same lock hold.
947 * KERN_SUCCESS - Thread was set running
948 * KERN_NOT_WAITING - Thread was not waiting
953 wait_result_t wresult
)
955 assert(thread
->at_safe_point
== FALSE
);
956 assert(thread
->wait_event
== NO_EVENT64
);
957 assert(thread
->wait_queue
== WAIT_QUEUE_NULL
);
959 if ((thread
->state
& (TH_WAIT
|TH_TERMINATE
)) == TH_WAIT
) {
960 if (!thread_unblock(thread
, wresult
))
961 thread_setrun(thread
, SCHED_PREEMPT
| SCHED_TAILQ
);
963 return (KERN_SUCCESS
);
966 return (KERN_NOT_WAITING
);
970 * Routine: thread_mark_wait_locked
972 * Mark a thread as waiting. If, given the circumstances,
973 * it doesn't want to wait (i.e. already aborted), then
974 * indicate that in the return value.
976 * at splsched() and thread is locked.
980 thread_mark_wait_locked(
982 wait_interrupt_t interruptible
)
984 boolean_t at_safe_point
;
986 assert(thread
== current_thread());
989 * The thread may have certain types of interrupts/aborts masked
990 * off. Even if the wait location says these types of interrupts
991 * are OK, we have to honor mask settings (outer-scoped code may
992 * not be able to handle aborts at the moment).
994 if (interruptible
> (thread
->options
& TH_OPT_INTMASK
))
995 interruptible
= thread
->options
& TH_OPT_INTMASK
;
997 at_safe_point
= (interruptible
== THREAD_ABORTSAFE
);
999 if ( interruptible
== THREAD_UNINT
||
1000 !(thread
->sched_flags
& TH_SFLAG_ABORT
) ||
1002 (thread
->sched_flags
& TH_SFLAG_ABORTSAFELY
))) {
1004 if ( !(thread
->state
& TH_TERMINATE
))
1005 DTRACE_SCHED(sleep
);
1007 thread
->state
|= (interruptible
) ? TH_WAIT
: (TH_WAIT
| TH_UNINT
);
1008 thread
->at_safe_point
= at_safe_point
;
1009 return (thread
->wait_result
= THREAD_WAITING
);
1012 if (thread
->sched_flags
& TH_SFLAG_ABORTSAFELY
)
1013 thread
->sched_flags
&= ~TH_SFLAG_ABORTED_MASK
;
1015 return (thread
->wait_result
= THREAD_INTERRUPTED
);
1019 * Routine: thread_interrupt_level
1021 * Set the maximum interruptible state for the
1022 * current thread. The effective value of any
1023 * interruptible flag passed into assert_wait
1024 * will never exceed this.
1026 * Useful for code that must not be interrupted,
1027 * but which calls code that doesn't know that.
1029 * The old interrupt level for the thread.
1033 thread_interrupt_level(
1034 wait_interrupt_t new_level
)
1036 thread_t thread
= current_thread();
1037 wait_interrupt_t result
= thread
->options
& TH_OPT_INTMASK
;
1039 thread
->options
= (thread
->options
& ~TH_OPT_INTMASK
) | (new_level
& TH_OPT_INTMASK
);
1045 * Check to see if an assert wait is possible, without actually doing one.
1046 * This is used by debug code in locks and elsewhere to verify that it is
1047 * always OK to block when trying to take a blocking lock (since waiting
1048 * for the actual assert_wait to catch the case may make it hard to detect
1052 assert_wait_possible(void)
1058 if(debug_mode
) return TRUE
; /* Always succeed in debug mode */
1061 thread
= current_thread();
1063 return (thread
== NULL
|| wait_queue_assert_possible(thread
));
1069 * Assert that the current thread is about to go to
1070 * sleep until the specified event occurs.
1075 wait_interrupt_t interruptible
)
1077 register wait_queue_t wq
;
1080 assert(event
!= NO_EVENT
);
1082 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
1083 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_WAIT
)|DBG_FUNC_NONE
,
1084 VM_KERNEL_UNSLIDE(event
), 0, 0, 0, 0);
1086 index
= wait_hash(event
);
1087 wq
= &wait_queues
[index
];
1088 return wait_queue_assert_wait(wq
, event
, interruptible
, 0);
1092 assert_wait_timeout(
1094 wait_interrupt_t interruptible
,
1096 uint32_t scale_factor
)
1098 thread_t thread
= current_thread();
1099 wait_result_t wresult
;
1100 wait_queue_t wqueue
;
1104 assert(event
!= NO_EVENT
);
1105 wqueue
= &wait_queues
[wait_hash(event
)];
1108 wait_queue_lock(wqueue
);
1109 thread_lock(thread
);
1111 clock_interval_to_deadline(interval
, scale_factor
, &deadline
);
1113 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
1114 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_WAIT
)|DBG_FUNC_NONE
,
1115 VM_KERNEL_UNSLIDE(event
), interruptible
, deadline
, 0, 0);
1117 wresult
= wait_queue_assert_wait64_locked(wqueue
, CAST_DOWN(event64_t
, event
),
1118 interruptible
, deadline
, thread
);
1120 thread_unlock(thread
);
1121 wait_queue_unlock(wqueue
);
1128 assert_wait_deadline(
1130 wait_interrupt_t interruptible
,
1133 thread_t thread
= current_thread();
1134 wait_result_t wresult
;
1135 wait_queue_t wqueue
;
1138 assert(event
!= NO_EVENT
);
1139 wqueue
= &wait_queues
[wait_hash(event
)];
1142 wait_queue_lock(wqueue
);
1143 thread_lock(thread
);
1145 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
1146 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_WAIT
)|DBG_FUNC_NONE
,
1147 VM_KERNEL_UNSLIDE(event
), interruptible
, deadline
, 0, 0);
1149 wresult
= wait_queue_assert_wait64_locked(wqueue
, CAST_DOWN(event64_t
,event
),
1150 interruptible
, deadline
, thread
);
1152 thread_unlock(thread
);
1153 wait_queue_unlock(wqueue
);
1160 * thread_sleep_fast_usimple_lock:
1162 * Cause the current thread to wait until the specified event
1163 * occurs. The specified simple_lock is unlocked before releasing
1164 * the cpu and re-acquired as part of waking up.
1166 * This is the simple lock sleep interface for components that use a
1167 * faster version of simple_lock() than is provided by usimple_lock().
1169 __private_extern__ wait_result_t
1170 thread_sleep_fast_usimple_lock(
1173 wait_interrupt_t interruptible
)
1177 res
= assert_wait(event
, interruptible
);
1178 if (res
== THREAD_WAITING
) {
1179 simple_unlock(lock
);
1180 res
= thread_block(THREAD_CONTINUE_NULL
);
1188 * thread_sleep_usimple_lock:
1190 * Cause the current thread to wait until the specified event
1191 * occurs. The specified usimple_lock is unlocked before releasing
1192 * the cpu and re-acquired as part of waking up.
1194 * This is the simple lock sleep interface for components where
1195 * simple_lock() is defined in terms of usimple_lock().
1198 thread_sleep_usimple_lock(
1200 usimple_lock_t lock
,
1201 wait_interrupt_t interruptible
)
1205 res
= assert_wait(event
, interruptible
);
1206 if (res
== THREAD_WAITING
) {
1207 usimple_unlock(lock
);
1208 res
= thread_block(THREAD_CONTINUE_NULL
);
1215 * thread_sleep_lock_write:
1217 * Cause the current thread to wait until the specified event
1218 * occurs. The specified (write) lock is unlocked before releasing
1219 * the cpu. The (write) lock will be re-acquired before returning.
1222 thread_sleep_lock_write(
1225 wait_interrupt_t interruptible
)
1229 res
= assert_wait(event
, interruptible
);
1230 if (res
== THREAD_WAITING
) {
1231 lock_write_done(lock
);
1232 res
= thread_block(THREAD_CONTINUE_NULL
);
1241 * Force a preemption point for a thread and wait
1242 * for it to stop running. Arbitrates access among
1243 * multiple stop requests. (released by unstop)
1245 * The thread must enter a wait state and stop via a
1248 * Returns FALSE if interrupted.
1254 wait_result_t wresult
;
1255 spl_t s
= splsched();
1258 thread_lock(thread
);
1260 while (thread
->state
& TH_SUSP
) {
1261 thread
->wake_active
= TRUE
;
1262 thread_unlock(thread
);
1264 wresult
= assert_wait(&thread
->wake_active
, THREAD_ABORTSAFE
);
1265 wake_unlock(thread
);
1268 if (wresult
== THREAD_WAITING
)
1269 wresult
= thread_block(THREAD_CONTINUE_NULL
);
1271 if (wresult
!= THREAD_AWAKENED
)
1276 thread_lock(thread
);
1279 thread
->state
|= TH_SUSP
;
1281 while (thread
->state
& TH_RUN
) {
1282 processor_t processor
= thread
->last_processor
;
1284 if (processor
!= PROCESSOR_NULL
&& processor
->active_thread
== thread
)
1285 cause_ast_check(processor
);
1287 thread
->wake_active
= TRUE
;
1288 thread_unlock(thread
);
1290 wresult
= assert_wait(&thread
->wake_active
, THREAD_ABORTSAFE
);
1291 wake_unlock(thread
);
1294 if (wresult
== THREAD_WAITING
)
1295 wresult
= thread_block(THREAD_CONTINUE_NULL
);
1297 if (wresult
!= THREAD_AWAKENED
) {
1298 thread_unstop(thread
);
1304 thread_lock(thread
);
1307 thread_unlock(thread
);
1308 wake_unlock(thread
);
1317 * Release a previous stop request and set
1318 * the thread running if appropriate.
1320 * Use only after a successful stop operation.
1326 spl_t s
= splsched();
1329 thread_lock(thread
);
1331 if ((thread
->state
& (TH_RUN
|TH_WAIT
|TH_SUSP
)) == TH_SUSP
) {
1332 thread
->state
&= ~TH_SUSP
;
1333 thread_unblock(thread
, THREAD_AWAKENED
);
1335 thread_setrun(thread
, SCHED_PREEMPT
| SCHED_TAILQ
);
1338 if (thread
->state
& TH_SUSP
) {
1339 thread
->state
&= ~TH_SUSP
;
1341 if (thread
->wake_active
) {
1342 thread
->wake_active
= FALSE
;
1343 thread_unlock(thread
);
1345 thread_wakeup(&thread
->wake_active
);
1346 wake_unlock(thread
);
1353 thread_unlock(thread
);
1354 wake_unlock(thread
);
1359 * Thread locked, returns the same way
1361 static inline boolean_t
1362 thread_isoncpu(thread_t thread
)
1364 processor_t processor
= thread
->last_processor
;
1366 return ((processor
!= PROCESSOR_NULL
) && (processor
->active_thread
== thread
));
1371 * Wait for a thread to stop running. (non-interruptible)
1377 boolean_t until_not_runnable
)
1379 wait_result_t wresult
;
1381 processor_t processor
;
1382 spl_t s
= splsched();
1385 thread_lock(thread
);
1388 * Wait until not running on a CPU. If stronger requirement
1389 * desired, wait until not runnable. Assumption: if thread is
1390 * on CPU, then TH_RUN is set, so we're not waiting in any case
1391 * where the original, pure "TH_RUN" check would have let us
1394 while ((oncpu
= thread_isoncpu(thread
)) ||
1395 (until_not_runnable
&& (thread
->state
& TH_RUN
))) {
1398 assert(thread
->state
& TH_RUN
);
1399 processor
= thread
->last_processor
;
1400 cause_ast_check(processor
);
1403 thread
->wake_active
= TRUE
;
1404 thread_unlock(thread
);
1406 wresult
= assert_wait(&thread
->wake_active
, THREAD_UNINT
);
1407 wake_unlock(thread
);
1410 if (wresult
== THREAD_WAITING
)
1411 thread_block(THREAD_CONTINUE_NULL
);
1415 thread_lock(thread
);
1418 thread_unlock(thread
);
1419 wake_unlock(thread
);
1424 * Routine: clear_wait_internal
1426 * Clear the wait condition for the specified thread.
1427 * Start the thread executing if that is appropriate.
1429 * thread thread to awaken
1430 * result Wakeup result the thread should see
1433 * the thread is locked.
1435 * KERN_SUCCESS thread was rousted out a wait
1436 * KERN_FAILURE thread was waiting but could not be rousted
1437 * KERN_NOT_WAITING thread was not waiting
1439 __private_extern__ kern_return_t
1440 clear_wait_internal(
1442 wait_result_t wresult
)
1444 wait_queue_t wq
= thread
->wait_queue
;
1445 uint32_t i
= LockTimeOut
;
1448 if (wresult
== THREAD_INTERRUPTED
&& (thread
->state
& TH_UNINT
))
1449 return (KERN_FAILURE
);
1451 if (wq
!= WAIT_QUEUE_NULL
) {
1452 if (wait_queue_lock_try(wq
)) {
1453 wait_queue_pull_thread_locked(wq
, thread
, TRUE
);
1454 /* wait queue unlocked, thread still locked */
1457 thread_unlock(thread
);
1460 thread_lock(thread
);
1461 if (wq
!= thread
->wait_queue
)
1462 return (KERN_NOT_WAITING
);
1468 return (thread_go(thread
, wresult
));
1469 } while ((--i
> 0) || machine_timeout_suspended());
1471 panic("clear_wait_internal: deadlock: thread=%p, wq=%p, cpu=%d\n",
1472 thread
, wq
, cpu_number());
1474 return (KERN_FAILURE
);
1481 * Clear the wait condition for the specified thread. Start the thread
1482 * executing if that is appropriate.
1485 * thread thread to awaken
1486 * result Wakeup result the thread should see
1491 wait_result_t result
)
1497 thread_lock(thread
);
1498 ret
= clear_wait_internal(thread
, result
);
1499 thread_unlock(thread
);
1506 * thread_wakeup_prim:
1508 * Common routine for thread_wakeup, thread_wakeup_with_result,
1509 * and thread_wakeup_one.
1515 boolean_t one_thread
,
1516 wait_result_t result
)
1518 return (thread_wakeup_prim_internal(event
, one_thread
, result
, -1));
1523 thread_wakeup_prim_internal(
1525 boolean_t one_thread
,
1526 wait_result_t result
,
1529 register wait_queue_t wq
;
1532 index
= wait_hash(event
);
1533 wq
= &wait_queues
[index
];
1535 return (wait_queue_wakeup_one(wq
, event
, result
, priority
));
1537 return (wait_queue_wakeup_all(wq
, event
, result
));
1543 * Force the current thread to execute on the specified processor.
1545 * Returns the previous binding. PROCESSOR_NULL means
1548 * XXX - DO NOT export this to users - XXX
1552 processor_t processor
)
1554 thread_t self
= current_thread();
1561 prev
= self
->bound_processor
;
1562 self
->bound_processor
= processor
;
1564 thread_unlock(self
);
1573 * Select a new thread for the current processor to execute.
1575 * May select the current thread, which must be locked.
1580 processor_t processor
)
1582 processor_set_t pset
= processor
->processor_set
;
1583 thread_t new_thread
= THREAD_NULL
;
1584 boolean_t inactive_state
;
1586 assert(processor
== current_processor());
1590 * Update the priority.
1592 if (SCHED(can_update_priority
)(thread
))
1593 SCHED(update_priority
)(thread
);
1595 processor
->current_pri
= thread
->sched_pri
;
1596 processor
->current_thmode
= thread
->sched_mode
;
1600 assert(pset
->low_count
);
1601 assert(pset
->low_pri
);
1603 inactive_state
= processor
->state
!= PROCESSOR_SHUTDOWN
&& machine_processor_is_inactive(processor
);
1605 simple_lock(&rt_lock
);
1608 * Test to see if the current thread should continue
1609 * to run on this processor. Must be runnable, and not
1610 * bound to a different processor, nor be in the wrong
1613 if ( ((thread
->state
& ~TH_SUSP
) == TH_RUN
) &&
1614 (thread
->sched_pri
>= BASEPRI_RTQUEUES
||
1615 processor
->processor_meta
== PROCESSOR_META_NULL
||
1616 processor
->processor_meta
->primary
== processor
) &&
1617 (thread
->bound_processor
== PROCESSOR_NULL
||
1618 thread
->bound_processor
== processor
) &&
1619 (thread
->affinity_set
== AFFINITY_SET_NULL
||
1620 thread
->affinity_set
->aset_pset
== pset
) ) {
1621 if ( thread
->sched_pri
>= BASEPRI_RTQUEUES
&&
1622 first_timeslice(processor
) ) {
1623 if (rt_runq
.count
> 0) {
1627 if (((thread_t
)q
->next
)->realtime
.deadline
<
1628 processor
->deadline
) {
1629 thread
= (thread_t
)dequeue_head(q
);
1630 thread
->runq
= PROCESSOR_NULL
;
1631 SCHED_STATS_RUNQ_CHANGE(&rt_runq
.runq_stats
, rt_runq
.count
);
1636 simple_unlock(&rt_lock
);
1638 processor
->deadline
= thread
->realtime
.deadline
;
1645 if (!inactive_state
&& (thread
->sched_mode
!= TH_MODE_FAIRSHARE
|| SCHED(fairshare_runq_count
)() == 0) && (rt_runq
.count
== 0 || BASEPRI_RTQUEUES
< thread
->sched_pri
) &&
1646 (new_thread
= SCHED(choose_thread
)(processor
, thread
->sched_mode
== TH_MODE_FAIRSHARE
? MINPRI
: thread
->sched_pri
)) == THREAD_NULL
) {
1648 simple_unlock(&rt_lock
);
1650 /* I am the highest priority runnable (non-idle) thread */
1652 pset_pri_hint(pset
, processor
, processor
->current_pri
);
1654 pset_count_hint(pset
, processor
, SCHED(processor_runq_count
)(processor
));
1656 processor
->deadline
= UINT64_MAX
;
1664 if (new_thread
!= THREAD_NULL
||
1665 (SCHED(processor_queue_has_priority
)(processor
, rt_runq
.count
== 0 ? IDLEPRI
: BASEPRI_RTQUEUES
, TRUE
) &&
1666 (new_thread
= SCHED(choose_thread
)(processor
, MINPRI
)) != THREAD_NULL
)) {
1667 simple_unlock(&rt_lock
);
1669 if (!inactive_state
) {
1670 pset_pri_hint(pset
, processor
, new_thread
->sched_pri
);
1672 pset_count_hint(pset
, processor
, SCHED(processor_runq_count
)(processor
));
1675 processor
->deadline
= UINT64_MAX
;
1678 return (new_thread
);
1681 if (rt_runq
.count
> 0) {
1682 thread
= (thread_t
)dequeue_head(&rt_runq
.queue
);
1684 thread
->runq
= PROCESSOR_NULL
;
1685 SCHED_STATS_RUNQ_CHANGE(&rt_runq
.runq_stats
, rt_runq
.count
);
1688 simple_unlock(&rt_lock
);
1690 processor
->deadline
= thread
->realtime
.deadline
;
1696 simple_unlock(&rt_lock
);
1698 /* No realtime threads and no normal threads on the per-processor
1699 * runqueue. Finally check for global fairshare threads.
1701 if ((new_thread
= SCHED(fairshare_dequeue
)()) != THREAD_NULL
) {
1703 processor
->deadline
= UINT64_MAX
;
1706 return (new_thread
);
1709 processor
->deadline
= UINT64_MAX
;
1712 * Set processor inactive based on
1713 * indication from the platform code.
1715 if (inactive_state
) {
1716 if (processor
->state
== PROCESSOR_RUNNING
)
1717 remqueue((queue_entry_t
)processor
);
1719 if (processor
->state
== PROCESSOR_IDLE
)
1720 remqueue((queue_entry_t
)processor
);
1722 processor
->state
= PROCESSOR_INACTIVE
;
1726 return (processor
->idle_thread
);
1730 * No runnable threads, attempt to steal
1731 * from other processors.
1733 new_thread
= SCHED(steal_thread
)(pset
);
1734 if (new_thread
!= THREAD_NULL
) {
1735 return (new_thread
);
1739 * If other threads have appeared, shortcut
1742 if (!SCHED(processor_queue_empty
)(processor
) || rt_runq
.count
> 0 || SCHED(fairshare_runq_count
)() > 0)
1748 * Nothing is runnable, so set this processor idle if it
1751 if (processor
->state
== PROCESSOR_RUNNING
) {
1752 remqueue((queue_entry_t
)processor
);
1753 processor
->state
= PROCESSOR_IDLE
;
1755 if (processor
->processor_meta
== PROCESSOR_META_NULL
|| processor
->processor_meta
->primary
== processor
) {
1756 enqueue_head(&pset
->idle_queue
, (queue_entry_t
)processor
);
1757 pset_pri_init_hint(pset
, processor
);
1758 pset_count_init_hint(pset
, processor
);
1761 enqueue_head(&processor
->processor_meta
->idle_queue
, (queue_entry_t
)processor
);
1763 return (processor
->idle_thread
);
1769 #if CONFIG_SCHED_IDLE_IN_PLACE
1771 * Choose idle thread if fast idle is not possible.
1773 if ((thread
->state
& (TH_IDLE
|TH_TERMINATE
|TH_SUSP
)) || !(thread
->state
& TH_WAIT
) || thread
->wake_active
|| thread
->sched_pri
>= BASEPRI_RTQUEUES
)
1774 return (processor
->idle_thread
);
1777 * Perform idling activities directly without a
1778 * context switch. Return dispatched thread,
1779 * else check again for a runnable thread.
1781 new_thread
= thread_select_idle(thread
, processor
);
1783 #else /* !CONFIG_SCHED_IDLE_IN_PLACE */
1786 * Do a full context switch to idle so that the current
1787 * thread can start running on another processor without
1788 * waiting for the fast-idled processor to wake up.
1790 return (processor
->idle_thread
);
1792 #endif /* !CONFIG_SCHED_IDLE_IN_PLACE */
1794 } while (new_thread
== THREAD_NULL
);
1796 return (new_thread
);
1799 #if CONFIG_SCHED_IDLE_IN_PLACE
1801 * thread_select_idle:
1803 * Idle the processor using the current thread context.
1805 * Called with thread locked, then dropped and relocked.
1810 processor_t processor
)
1812 thread_t new_thread
;
1814 if (thread
->sched_mode
== TH_MODE_TIMESHARE
)
1818 thread
->state
|= TH_IDLE
;
1819 processor
->current_pri
= IDLEPRI
;
1820 processor
->current_thmode
= TH_MODE_NONE
;
1822 /* Reload precise timing global policy to thread-local policy */
1823 thread
->precise_user_kernel_time
= use_precise_user_kernel_time(thread
);
1825 thread_unlock(thread
);
1828 * Switch execution timing to processor idle thread.
1830 processor
->last_dispatch
= mach_absolute_time();
1831 thread
->last_run_time
= processor
->last_dispatch
;
1832 thread_timer_event(processor
->last_dispatch
, &processor
->idle_thread
->system_timer
);
1833 PROCESSOR_DATA(processor
, kernel_timer
) = &processor
->idle_thread
->system_timer
;
1836 * Cancel the quantum timer while idling.
1838 timer_call_cancel(&processor
->quantum_timer
);
1839 processor
->timeslice
= 0;
1841 (*thread
->sched_call
)(SCHED_CALL_BLOCK
, thread
);
1843 thread_tell_urgency(THREAD_URGENCY_NONE
, 0, 0);
1846 * Enable interrupts and perform idling activities. No
1847 * preemption due to TH_IDLE being set.
1849 spllo(); new_thread
= processor_idle(thread
, processor
);
1852 * Return at splsched.
1854 (*thread
->sched_call
)(SCHED_CALL_UNBLOCK
, thread
);
1856 thread_lock(thread
);
1859 * If we idled in place, simulate a context switch back
1860 * to the original priority of the thread so that the
1861 * platform layer cannot distinguish this from a true
1862 * switch to the idle thread.
1864 if (thread
->sched_mode
== TH_MODE_REALTIME
)
1865 thread_tell_urgency(THREAD_URGENCY_REAL_TIME
, thread
->realtime
.period
, thread
->realtime
.deadline
);
1866 /* Identify non-promoted threads which have requested a
1867 * "background" priority.
1869 else if ((thread
->sched_pri
<= MAXPRI_THROTTLE
) &&
1870 (thread
->priority
<= MAXPRI_THROTTLE
))
1871 thread_tell_urgency(THREAD_URGENCY_BACKGROUND
, thread
->sched_pri
, thread
->priority
);
1873 thread_tell_urgency(THREAD_URGENCY_NORMAL
, thread
->sched_pri
, thread
->priority
);
1876 * If awakened, switch to thread timer and start a new quantum.
1877 * Otherwise skip; we will context switch to another thread or return here.
1879 if (!(thread
->state
& TH_WAIT
)) {
1880 processor
->last_dispatch
= mach_absolute_time();
1881 thread_timer_event(processor
->last_dispatch
, &thread
->system_timer
);
1882 PROCESSOR_DATA(processor
, kernel_timer
) = &thread
->system_timer
;
1884 thread_quantum_init(thread
);
1885 thread
->last_quantum_refill_time
= processor
->last_dispatch
;
1887 processor
->quantum_end
= processor
->last_dispatch
+ thread
->current_quantum
;
1888 timer_call_enter1(&processor
->quantum_timer
, thread
, processor
->quantum_end
, TIMER_CALL_CRITICAL
);
1889 processor
->timeslice
= 1;
1891 thread
->computation_epoch
= processor
->last_dispatch
;
1894 thread
->state
&= ~TH_IDLE
;
1897 if (thread
->sched_mode
== TH_MODE_TIMESHARE
)
1900 return (new_thread
);
1902 #endif /* CONFIG_SCHED_IDLE_IN_PLACE */
1904 #if defined(CONFIG_SCHED_TRADITIONAL)
1906 sched_traditional_choose_thread(
1907 processor_t processor
,
1912 thread
= choose_thread(processor
, runq_for_processor(processor
), priority
);
1913 if (thread
!= THREAD_NULL
) {
1914 runq_consider_decr_bound_count(processor
, thread
);
1920 #endif /* defined(CONFIG_SCHED_TRADITIONAL) */
1922 #if defined(CONFIG_SCHED_TRADITIONAL) || defined(CONFIG_SCHED_FIXEDPRIORITY)
1927 * Locate a thread to execute from the processor run queue
1928 * and return it. Only choose a thread with greater or equal
1931 * Associated pset must be locked. Returns THREAD_NULL
1936 processor_t processor
,
1940 queue_t queue
= rq
->queues
+ rq
->highq
;
1941 int pri
= rq
->highq
, count
= rq
->count
;
1944 while (count
> 0 && pri
>= priority
) {
1945 thread
= (thread_t
)queue_first(queue
);
1946 while (!queue_end(queue
, (queue_entry_t
)thread
)) {
1947 if (thread
->bound_processor
== PROCESSOR_NULL
||
1948 thread
->bound_processor
== processor
) {
1949 remqueue((queue_entry_t
)thread
);
1951 thread
->runq
= PROCESSOR_NULL
;
1952 SCHED_STATS_RUNQ_CHANGE(&rq
->runq_stats
, rq
->count
);
1954 if (SCHED(priority_is_urgent
)(pri
)) {
1955 rq
->urgency
--; assert(rq
->urgency
>= 0);
1957 if (queue_empty(queue
)) {
1959 clrbit(MAXPRI
- pri
, rq
->bitmap
);
1960 rq
->highq
= MAXPRI
- ffsbit(rq
->bitmap
);
1967 thread
= (thread_t
)queue_next((queue_entry_t
)thread
);
1973 return (THREAD_NULL
);
1976 #endif /* defined(CONFIG_SCHED_TRADITIONAL) || defined(CONFIG_SCHED_FIXEDPRIORITY) */
1979 * Perform a context switch and start executing the new thread.
1981 * Returns FALSE on failure, and the thread is re-dispatched.
1983 * Called at splsched.
1986 #define funnel_release_check(thread, debug) \
1988 if ((thread)->funnel_state & TH_FN_OWNED) { \
1989 (thread)->funnel_state = TH_FN_REFUNNEL; \
1990 KERNEL_DEBUG(0x603242c | DBG_FUNC_NONE, \
1991 (thread)->funnel_lock, (debug), 0, 0, 0); \
1992 funnel_unlock((thread)->funnel_lock); \
1996 #define funnel_refunnel_check(thread, debug) \
1998 if ((thread)->funnel_state & TH_FN_REFUNNEL) { \
1999 kern_return_t result = (thread)->wait_result; \
2001 (thread)->funnel_state = 0; \
2002 KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE, \
2003 (thread)->funnel_lock, (debug), 0, 0, 0); \
2004 funnel_lock((thread)->funnel_lock); \
2005 KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE, \
2006 (thread)->funnel_lock, (debug), 0, 0, 0); \
2007 (thread)->funnel_state = TH_FN_OWNED; \
2008 (thread)->wait_result = result; \
2014 register thread_t self
,
2015 register thread_t thread
,
2018 thread_continue_t continuation
= self
->continuation
;
2019 void *parameter
= self
->parameter
;
2020 processor_t processor
;
2022 if (get_preemption_level() != 0) {
2023 int pl
= get_preemption_level();
2024 panic("thread_invoke: preemption_level %d, possible cause: %s",
2025 pl
, (pl
< 0 ? "unlocking an unlocked mutex or spinlock" :
2026 "blocking while holding a spinlock, or within interrupt context"));
2029 assert(self
== current_thread());
2032 * Mark thread interruptible.
2034 thread_lock(thread
);
2035 thread
->state
&= ~TH_UNINT
;
2038 assert(thread_runnable(thread
));
2041 /* Reload precise timing global policy to thread-local policy */
2042 thread
->precise_user_kernel_time
= use_precise_user_kernel_time(thread
);
2045 * Allow time constraint threads to hang onto
2048 if ((self
->sched_mode
== TH_MODE_REALTIME
) && !self
->reserved_stack
)
2049 self
->reserved_stack
= self
->kernel_stack
;
2051 if (continuation
!= NULL
) {
2052 if (!thread
->kernel_stack
) {
2054 * If we are using a privileged stack,
2055 * check to see whether we can exchange it with
2056 * that of the other thread.
2058 if (self
->kernel_stack
== self
->reserved_stack
&& !thread
->reserved_stack
)
2062 * Context switch by performing a stack handoff.
2064 continuation
= thread
->continuation
;
2065 parameter
= thread
->parameter
;
2067 processor
= current_processor();
2068 processor
->active_thread
= thread
;
2069 processor
->current_pri
= thread
->sched_pri
;
2070 processor
->current_thmode
= thread
->sched_mode
;
2071 if (thread
->last_processor
!= processor
&& thread
->last_processor
!= NULL
) {
2072 if (thread
->last_processor
->processor_set
!= processor
->processor_set
)
2073 thread
->ps_switch
++;
2076 thread
->last_processor
= processor
;
2078 ast_context(thread
);
2079 thread_unlock(thread
);
2081 self
->reason
= reason
;
2083 processor
->last_dispatch
= mach_absolute_time();
2084 self
->last_run_time
= processor
->last_dispatch
;
2085 thread_timer_event(processor
->last_dispatch
, &thread
->system_timer
);
2086 PROCESSOR_DATA(processor
, kernel_timer
) = &thread
->system_timer
;
2089 * Since non-precise user/kernel time doesn't update the state timer
2090 * during privilege transitions, synthesize an event now.
2092 if (!thread
->precise_user_kernel_time
) {
2093 timer_switch(PROCESSOR_DATA(processor
, current_state
),
2094 processor
->last_dispatch
,
2095 PROCESSOR_DATA(processor
, current_state
));
2098 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2099 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_HANDOFF
)|DBG_FUNC_NONE
,
2100 self
->reason
, (uintptr_t)thread_tid(thread
), self
->sched_pri
, thread
->sched_pri
, 0);
2102 if ((thread
->chosen_processor
!= processor
) && (thread
->chosen_processor
!= NULL
)) {
2103 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_MOVED
)|DBG_FUNC_NONE
,
2104 (uintptr_t)thread_tid(thread
), (uintptr_t)thread
->chosen_processor
->cpu_id
, 0, 0, 0);
2107 DTRACE_SCHED2(off__cpu
, struct thread
*, thread
, struct proc
*, thread
->task
->bsd_info
);
2109 SCHED_STATS_CSW(processor
, self
->reason
, self
->sched_pri
, thread
->sched_pri
);
2111 TLOG(1, "thread_invoke: calling stack_handoff\n");
2112 stack_handoff(self
, thread
);
2114 DTRACE_SCHED(on__cpu
);
2116 thread_dispatch(self
, thread
);
2118 thread
->continuation
= thread
->parameter
= NULL
;
2120 counter(c_thread_invoke_hits
++);
2122 funnel_refunnel_check(thread
, 2);
2125 assert(continuation
);
2126 call_continuation(continuation
, parameter
, thread
->wait_result
);
2129 else if (thread
== self
) {
2130 /* same thread but with continuation */
2132 counter(++c_thread_invoke_same
);
2133 thread_unlock(self
);
2135 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2136 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_SCHED
) | DBG_FUNC_NONE
,
2137 self
->reason
, (uintptr_t)thread_tid(thread
), self
->sched_pri
, thread
->sched_pri
, 0);
2139 self
->continuation
= self
->parameter
= NULL
;
2141 funnel_refunnel_check(self
, 3);
2144 call_continuation(continuation
, parameter
, self
->wait_result
);
2150 * Check that the other thread has a stack
2152 if (!thread
->kernel_stack
) {
2154 if (!stack_alloc_try(thread
)) {
2155 counter(c_thread_invoke_misses
++);
2156 thread_unlock(thread
);
2157 thread_stack_enqueue(thread
);
2161 else if (thread
== self
) {
2163 counter(++c_thread_invoke_same
);
2164 thread_unlock(self
);
2166 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2167 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_SCHED
) | DBG_FUNC_NONE
,
2168 self
->reason
, (uintptr_t)thread_tid(thread
), self
->sched_pri
, thread
->sched_pri
, 0);
2175 * Context switch by full context save.
2177 processor
= current_processor();
2178 processor
->active_thread
= thread
;
2179 processor
->current_pri
= thread
->sched_pri
;
2180 processor
->current_thmode
= thread
->sched_mode
;
2181 if (thread
->last_processor
!= processor
&& thread
->last_processor
!= NULL
) {
2182 if (thread
->last_processor
->processor_set
!= processor
->processor_set
)
2183 thread
->ps_switch
++;
2186 thread
->last_processor
= processor
;
2188 ast_context(thread
);
2189 thread_unlock(thread
);
2191 counter(c_thread_invoke_csw
++);
2193 assert(self
->runq
== PROCESSOR_NULL
);
2194 self
->reason
= reason
;
2196 processor
->last_dispatch
= mach_absolute_time();
2197 self
->last_run_time
= processor
->last_dispatch
;
2198 thread_timer_event(processor
->last_dispatch
, &thread
->system_timer
);
2199 PROCESSOR_DATA(processor
, kernel_timer
) = &thread
->system_timer
;
2202 * Since non-precise user/kernel time doesn't update the state timer
2203 * during privilege transitions, synthesize an event now.
2205 if (!thread
->precise_user_kernel_time
) {
2206 timer_switch(PROCESSOR_DATA(processor
, current_state
),
2207 processor
->last_dispatch
,
2208 PROCESSOR_DATA(processor
, current_state
));
2212 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2213 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_SCHED
) | DBG_FUNC_NONE
,
2214 self
->reason
, (uintptr_t)thread_tid(thread
), self
->sched_pri
, thread
->sched_pri
, 0);
2216 if ((thread
->chosen_processor
!= processor
) && (thread
->chosen_processor
!= NULL
)) {
2217 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_MOVED
)|DBG_FUNC_NONE
,
2218 (uintptr_t)thread_tid(thread
), (uintptr_t)thread
->chosen_processor
->cpu_id
, 0, 0, 0);
2221 DTRACE_SCHED2(off__cpu
, struct thread
*, thread
, struct proc
*, thread
->task
->bsd_info
);
2223 SCHED_STATS_CSW(processor
, self
->reason
, self
->sched_pri
, thread
->sched_pri
);
2226 * This is where we actually switch register context,
2227 * and address space if required. We will next run
2228 * as a result of a subsequent context switch.
2230 assert(continuation
== self
->continuation
);
2231 thread
= machine_switch_context(self
, continuation
, thread
);
2232 assert(self
== current_thread());
2233 TLOG(1,"thread_invoke: returning machine_switch_context: self %p continuation %p thread %p\n", self
, continuation
, thread
);
2235 DTRACE_SCHED(on__cpu
);
2238 * We have been resumed and are set to run.
2240 thread_dispatch(thread
, self
);
2243 self
->continuation
= self
->parameter
= NULL
;
2245 funnel_refunnel_check(self
, 3);
2248 call_continuation(continuation
, parameter
, self
->wait_result
);
2258 * Handle threads at context switch. Re-dispatch other thread
2259 * if still running, otherwise update run state and perform
2260 * special actions. Update quantum for other thread and begin
2261 * the quantum for ourselves.
2263 * Called at splsched.
2270 processor_t processor
= self
->last_processor
;
2272 if (thread
!= THREAD_NULL
) {
2274 * If blocked at a continuation, discard
2277 if (thread
->continuation
!= NULL
&& thread
->kernel_stack
!= 0)
2280 if (!(thread
->state
& TH_IDLE
)) {
2282 int64_t remainder
= 0;
2284 if (processor
->quantum_end
> processor
->last_dispatch
)
2285 remainder
= processor
->quantum_end
-
2286 processor
->last_dispatch
;
2288 consumed
= thread
->current_quantum
- remainder
;
2290 if ((thread
->reason
& AST_LEDGER
) == 0)
2292 * Bill CPU time to both the individual thread
2295 ledger_credit(thread
->t_ledger
,
2296 task_ledgers
.cpu_time
, consumed
);
2297 ledger_credit(thread
->t_threadledger
,
2298 thread_ledgers
.cpu_time
, consumed
);
2301 thread_lock(thread
);
2304 * Compute remainder of current quantum.
2306 if (first_timeslice(processor
) &&
2307 processor
->quantum_end
> processor
->last_dispatch
)
2308 thread
->current_quantum
= (uint32_t)remainder
;
2310 thread
->current_quantum
= 0;
2312 if (thread
->sched_mode
== TH_MODE_REALTIME
) {
2314 * Cancel the deadline if the thread has
2315 * consumed the entire quantum.
2317 if (thread
->current_quantum
== 0) {
2318 thread
->realtime
.deadline
= UINT64_MAX
;
2319 thread
->reason
|= AST_QUANTUM
;
2322 #if defined(CONFIG_SCHED_TRADITIONAL)
2324 * For non-realtime threads treat a tiny
2325 * remaining quantum as an expired quantum
2326 * but include what's left next time.
2328 if (thread
->current_quantum
< min_std_quantum
) {
2329 thread
->reason
|= AST_QUANTUM
;
2330 thread
->current_quantum
+= SCHED(initial_quantum_size
)(thread
);
2336 * If we are doing a direct handoff then
2337 * take the remainder of the quantum.
2339 if ((thread
->reason
& (AST_HANDOFF
|AST_QUANTUM
)) == AST_HANDOFF
) {
2340 self
->current_quantum
= thread
->current_quantum
;
2341 thread
->reason
|= AST_QUANTUM
;
2342 thread
->current_quantum
= 0;
2345 thread
->computation_metered
+= (processor
->last_dispatch
- thread
->computation_epoch
);
2347 if (!(thread
->state
& TH_WAIT
)) {
2351 if (thread
->reason
& AST_QUANTUM
)
2352 thread_setrun(thread
, SCHED_TAILQ
);
2354 if (thread
->reason
& AST_PREEMPT
)
2355 thread_setrun(thread
, SCHED_HEADQ
);
2357 thread_setrun(thread
, SCHED_PREEMPT
| SCHED_TAILQ
);
2359 thread
->reason
= AST_NONE
;
2361 if (thread
->wake_active
) {
2362 thread
->wake_active
= FALSE
;
2363 thread_unlock(thread
);
2365 thread_wakeup(&thread
->wake_active
);
2368 thread_unlock(thread
);
2370 wake_unlock(thread
);
2376 boolean_t should_terminate
= FALSE
;
2378 /* Only the first call to thread_dispatch
2379 * after explicit termination should add
2380 * the thread to the termination queue
2382 if ((thread
->state
& (TH_TERMINATE
|TH_TERMINATE2
)) == TH_TERMINATE
) {
2383 should_terminate
= TRUE
;
2384 thread
->state
|= TH_TERMINATE2
;
2387 thread
->state
&= ~TH_RUN
;
2389 if (thread
->sched_mode
== TH_MODE_TIMESHARE
)
2393 (*thread
->sched_call
)(SCHED_CALL_BLOCK
, thread
);
2395 if (thread
->wake_active
) {
2396 thread
->wake_active
= FALSE
;
2397 thread_unlock(thread
);
2399 thread_wakeup(&thread
->wake_active
);
2402 thread_unlock(thread
);
2404 wake_unlock(thread
);
2406 if (should_terminate
)
2407 thread_terminate_enqueue(thread
);
2412 if (!(self
->state
& TH_IDLE
)) {
2414 if (self
->sched_mode
== TH_MODE_REALTIME
)
2415 thread_tell_urgency(THREAD_URGENCY_REAL_TIME
, self
->realtime
.period
, self
->realtime
.deadline
);
2416 /* Identify non-promoted threads which have requested a
2417 * "background" priority.
2419 else if ((self
->sched_pri
<= MAXPRI_THROTTLE
) &&
2420 (self
->priority
<= MAXPRI_THROTTLE
))
2421 thread_tell_urgency(THREAD_URGENCY_BACKGROUND
, self
->sched_pri
, self
->priority
);
2423 thread_tell_urgency(THREAD_URGENCY_NORMAL
, self
->sched_pri
, self
->priority
);
2425 * Get a new quantum if none remaining.
2427 if (self
->current_quantum
== 0) {
2428 thread_quantum_init(self
);
2429 self
->last_quantum_refill_time
= processor
->last_dispatch
;
2433 * Set up quantum timer and timeslice.
2435 processor
->quantum_end
= (processor
->last_dispatch
+ self
->current_quantum
);
2436 timer_call_enter1(&processor
->quantum_timer
, self
, processor
->quantum_end
, TIMER_CALL_CRITICAL
);
2438 processor
->timeslice
= 1;
2440 self
->computation_epoch
= processor
->last_dispatch
;
2443 timer_call_cancel(&processor
->quantum_timer
);
2444 processor
->timeslice
= 0;
2446 thread_tell_urgency(THREAD_URGENCY_NONE
, 0, 0);
2450 #include <libkern/OSDebug.h>
2452 uint32_t kdebug_thread_block
= 0;
2456 * thread_block_reason:
2458 * Forces a reschedule, blocking the caller if a wait
2459 * has been asserted.
2461 * If a continuation is specified, then thread_invoke will
2462 * attempt to discard the thread's kernel stack. When the
2463 * thread resumes, it will execute the continuation function
2464 * on a new kernel stack.
2466 counter(mach_counter_t c_thread_block_calls
= 0;)
2469 thread_block_reason(
2470 thread_continue_t continuation
,
2474 register thread_t self
= current_thread();
2475 register processor_t processor
;
2476 register thread_t new_thread
;
2479 counter(++c_thread_block_calls
);
2483 if (!(reason
& AST_PREEMPT
))
2484 funnel_release_check(self
, 2);
2486 processor
= current_processor();
2488 /* If we're explicitly yielding, force a subsequent quantum */
2489 if (reason
& AST_YIELD
)
2490 processor
->timeslice
= 0;
2492 /* We're handling all scheduling AST's */
2493 ast_off(AST_SCHEDULING
);
2495 self
->continuation
= continuation
;
2496 self
->parameter
= parameter
;
2498 if (__improbable(kdebug_thread_block
&& kdebug_enable
&& self
->state
!= TH_RUN
)) {
2499 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
2500 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_BLOCK
),
2501 reason
, VM_KERNEL_UNSLIDE(continuation
), 0, 0, 0);
2506 new_thread
= thread_select(self
, processor
);
2507 thread_unlock(self
);
2508 } while (!thread_invoke(self
, new_thread
, reason
));
2510 funnel_refunnel_check(self
, 5);
2513 return (self
->wait_result
);
2519 * Block the current thread if a wait has been asserted.
2523 thread_continue_t continuation
)
2525 return thread_block_reason(continuation
, NULL
, AST_NONE
);
2529 thread_block_parameter(
2530 thread_continue_t continuation
,
2533 return thread_block_reason(continuation
, parameter
, AST_NONE
);
2539 * Switch directly from the current thread to the
2540 * new thread, handing off our quantum if appropriate.
2542 * New thread must be runnable, and not on a run queue.
2544 * Called at splsched.
2549 thread_continue_t continuation
,
2551 thread_t new_thread
)
2553 ast_t handoff
= AST_HANDOFF
;
2555 funnel_release_check(self
, 3);
2557 self
->continuation
= continuation
;
2558 self
->parameter
= parameter
;
2560 while (!thread_invoke(self
, new_thread
, handoff
)) {
2561 processor_t processor
= current_processor();
2564 new_thread
= thread_select(self
, processor
);
2565 thread_unlock(self
);
2569 funnel_refunnel_check(self
, 6);
2571 return (self
->wait_result
);
2577 * Called at splsched when a thread first receives
2578 * a new stack after a continuation.
2582 register thread_t thread
)
2584 register thread_t self
= current_thread();
2585 register thread_continue_t continuation
;
2586 register void *parameter
;
2588 DTRACE_SCHED(on__cpu
);
2590 continuation
= self
->continuation
;
2591 parameter
= self
->parameter
;
2593 thread_dispatch(thread
, self
);
2595 self
->continuation
= self
->parameter
= NULL
;
2597 funnel_refunnel_check(self
, 4);
2599 if (thread
!= THREAD_NULL
)
2602 TLOG(1, "thread_continue: calling call_continuation \n");
2603 call_continuation(continuation
, parameter
, self
->wait_result
);
2608 thread_quantum_init(thread_t thread
)
2610 if (thread
->sched_mode
== TH_MODE_REALTIME
) {
2611 thread
->current_quantum
= thread
->realtime
.computation
;
2613 thread
->current_quantum
= SCHED(initial_quantum_size
)(thread
);
2617 #if defined(CONFIG_SCHED_TRADITIONAL)
2619 sched_traditional_initial_quantum_size(thread_t thread
)
2621 if ((thread
== THREAD_NULL
) || thread
->priority
> MAXPRI_THROTTLE
)
2628 sched_traditional_initial_thread_sched_mode(task_t parent_task
)
2630 if (parent_task
== kernel_task
)
2631 return TH_MODE_FIXED
;
2633 return TH_MODE_TIMESHARE
;
2637 sched_traditional_supports_timeshare_mode(void)
2642 #endif /* CONFIG_SCHED_TRADITIONAL */
2647 * Initialize a run queue before first use.
2655 rq
->highq
= IDLEPRI
;
2656 for (i
= 0; i
< NRQBM
; i
++)
2658 setbit(MAXPRI
- IDLEPRI
, rq
->bitmap
);
2659 rq
->urgency
= rq
->count
= 0;
2660 for (i
= 0; i
< NRQS
; i
++)
2661 queue_init(&rq
->queues
[i
]);
2664 #if defined(CONFIG_SCHED_TRADITIONAL) || defined(CONFIG_SCHED_PROTO) || defined(CONFIG_SCHED_GRRR) || defined(CONFIG_SCHED_FIXEDPRIORITY)
2666 sched_traditional_fairshare_runq_count(void)
2668 return fs_runq
.count
;
2672 sched_traditional_fairshare_runq_stats_count_sum(void)
2674 return fs_runq
.runq_stats
.count_sum
;
2678 sched_traditional_fairshare_enqueue(thread_t thread
)
2680 queue_t queue
= &fs_runq
.queue
;
2682 simple_lock(&fs_lock
);
2684 enqueue_tail(queue
, (queue_entry_t
)thread
);
2686 thread
->runq
= FS_RUNQ
;
2687 SCHED_STATS_RUNQ_CHANGE(&fs_runq
.runq_stats
, fs_runq
.count
);
2690 simple_unlock(&fs_lock
);
2694 sched_traditional_fairshare_dequeue(void)
2698 simple_lock(&fs_lock
);
2699 if (fs_runq
.count
> 0) {
2700 thread
= (thread_t
)dequeue_head(&fs_runq
.queue
);
2702 thread
->runq
= PROCESSOR_NULL
;
2703 SCHED_STATS_RUNQ_CHANGE(&fs_runq
.runq_stats
, fs_runq
.count
);
2706 simple_unlock(&fs_lock
);
2710 simple_unlock(&fs_lock
);
2716 sched_traditional_fairshare_queue_remove(thread_t thread
)
2720 simple_lock(&fs_lock
);
2723 if (FS_RUNQ
== thread
->runq
) {
2724 remqueue((queue_entry_t
)thread
);
2725 SCHED_STATS_RUNQ_CHANGE(&fs_runq
.runq_stats
, fs_runq
.count
);
2728 thread
->runq
= PROCESSOR_NULL
;
2729 simple_unlock(&fs_lock
);
2734 * The thread left the run queue before we could
2735 * lock the run queue.
2737 assert(thread
->runq
== PROCESSOR_NULL
);
2738 simple_unlock(&fs_lock
);
2743 #endif /* defined(CONFIG_SCHED_TRADITIONAL) || defined(CONFIG_SCHED_PROTO) || defined(CONFIG_SCHED_GRRR) || defined(CONFIG_SCHED_FIXEDPRIORITY) */
2746 * run_queue_dequeue:
2748 * Perform a dequeue operation on a run queue,
2749 * and return the resulting thread.
2751 * The run queue must be locked (see thread_run_queue_remove()
2752 * for more info), and not empty.
2760 queue_t queue
= rq
->queues
+ rq
->highq
;
2762 if (options
& SCHED_HEADQ
) {
2763 thread
= (thread_t
)dequeue_head(queue
);
2766 thread
= (thread_t
)dequeue_tail(queue
);
2769 thread
->runq
= PROCESSOR_NULL
;
2770 SCHED_STATS_RUNQ_CHANGE(&rq
->runq_stats
, rq
->count
);
2772 if (SCHED(priority_is_urgent
)(rq
->highq
)) {
2773 rq
->urgency
--; assert(rq
->urgency
>= 0);
2775 if (queue_empty(queue
)) {
2776 if (rq
->highq
!= IDLEPRI
)
2777 clrbit(MAXPRI
- rq
->highq
, rq
->bitmap
);
2778 rq
->highq
= MAXPRI
- ffsbit(rq
->bitmap
);
2785 * run_queue_enqueue:
2787 * Perform a enqueue operation on a run queue.
2789 * The run queue must be locked (see thread_run_queue_remove()
2798 queue_t queue
= rq
->queues
+ thread
->sched_pri
;
2799 boolean_t result
= FALSE
;
2801 if (queue_empty(queue
)) {
2802 enqueue_tail(queue
, (queue_entry_t
)thread
);
2804 setbit(MAXPRI
- thread
->sched_pri
, rq
->bitmap
);
2805 if (thread
->sched_pri
> rq
->highq
) {
2806 rq
->highq
= thread
->sched_pri
;
2811 if (options
& SCHED_TAILQ
)
2812 enqueue_tail(queue
, (queue_entry_t
)thread
);
2814 enqueue_head(queue
, (queue_entry_t
)thread
);
2816 if (SCHED(priority_is_urgent
)(thread
->sched_pri
))
2818 SCHED_STATS_RUNQ_CHANGE(&rq
->runq_stats
, rq
->count
);
2828 * Remove a specific thread from a runqueue.
2830 * The run queue must be locked.
2838 remqueue((queue_entry_t
)thread
);
2839 SCHED_STATS_RUNQ_CHANGE(&rq
->runq_stats
, rq
->count
);
2841 if (SCHED(priority_is_urgent
)(thread
->sched_pri
)) {
2842 rq
->urgency
--; assert(rq
->urgency
>= 0);
2845 if (queue_empty(rq
->queues
+ thread
->sched_pri
)) {
2846 /* update run queue status */
2847 if (thread
->sched_pri
!= IDLEPRI
)
2848 clrbit(MAXPRI
- thread
->sched_pri
, rq
->bitmap
);
2849 rq
->highq
= MAXPRI
- ffsbit(rq
->bitmap
);
2852 thread
->runq
= PROCESSOR_NULL
;
2858 * Dispatch a thread for round-robin execution.
2860 * Thread must be locked. Associated pset must
2861 * be locked, and is returned unlocked.
2865 processor_t processor
,
2868 processor_set_t pset
= processor
->processor_set
;
2870 thread
->chosen_processor
= processor
;
2872 SCHED(fairshare_enqueue
)(thread
);
2874 if (processor
!= current_processor())
2875 machine_signal_idle(processor
);
2882 * realtime_queue_insert:
2884 * Enqueue a thread for realtime execution.
2887 realtime_queue_insert(
2890 queue_t queue
= &rt_runq
.queue
;
2891 uint64_t deadline
= thread
->realtime
.deadline
;
2892 boolean_t preempt
= FALSE
;
2894 simple_lock(&rt_lock
);
2896 if (queue_empty(queue
)) {
2897 enqueue_tail(queue
, (queue_entry_t
)thread
);
2901 register thread_t entry
= (thread_t
)queue_first(queue
);
2904 if ( queue_end(queue
, (queue_entry_t
)entry
) ||
2905 deadline
< entry
->realtime
.deadline
) {
2906 entry
= (thread_t
)queue_prev((queue_entry_t
)entry
);
2910 entry
= (thread_t
)queue_next((queue_entry_t
)entry
);
2913 if ((queue_entry_t
)entry
== queue
)
2916 insque((queue_entry_t
)thread
, (queue_entry_t
)entry
);
2919 thread
->runq
= RT_RUNQ
;
2920 SCHED_STATS_RUNQ_CHANGE(&rt_runq
.runq_stats
, rt_runq
.count
);
2923 simple_unlock(&rt_lock
);
2931 * Dispatch a thread for realtime execution.
2933 * Thread must be locked. Associated pset must
2934 * be locked, and is returned unlocked.
2938 processor_t processor
,
2941 processor_set_t pset
= processor
->processor_set
;
2943 thread
->chosen_processor
= processor
;
2946 * Dispatch directly onto idle processor.
2948 if ( (thread
->bound_processor
== processor
)
2949 && processor
->state
== PROCESSOR_IDLE
) {
2950 remqueue((queue_entry_t
)processor
);
2951 enqueue_tail(&pset
->active_queue
, (queue_entry_t
)processor
);
2953 processor
->next_thread
= thread
;
2954 processor
->deadline
= thread
->realtime
.deadline
;
2955 processor
->state
= PROCESSOR_DISPATCHING
;
2958 if (processor
!= current_processor())
2959 machine_signal_idle(processor
);
2963 if (realtime_queue_insert(thread
)) {
2964 int prstate
= processor
->state
;
2965 if (processor
== current_processor())
2966 ast_on(AST_PREEMPT
| AST_URGENT
);
2967 else if ((prstate
== PROCESSOR_IDLE
) || (prstate
== PROCESSOR_DISPATCHING
))
2968 machine_signal_idle(processor
);
2970 cause_ast_check(processor
);
2976 #if defined(CONFIG_SCHED_TRADITIONAL)
2979 priority_is_urgent(int priority
)
2981 return testbit(priority
, sched_preempt_pri
) ? TRUE
: FALSE
;
2985 * processor_enqueue:
2987 * Enqueue thread on a processor run queue. Thread must be locked,
2988 * and not already be on a run queue.
2990 * Returns TRUE if a preemption is indicated based on the state
2993 * The run queue must be locked (see thread_run_queue_remove()
2998 processor_t processor
,
3002 run_queue_t rq
= runq_for_processor(processor
);
3005 result
= run_queue_enqueue(rq
, thread
, options
);
3006 thread
->runq
= processor
;
3007 runq_consider_incr_bound_count(processor
, thread
);
3012 #endif /* CONFIG_SCHED_TRADITIONAL */
3017 * Dispatch a thread for execution on a
3020 * Thread must be locked. Associated pset must
3021 * be locked, and is returned unlocked.
3025 processor_t processor
,
3029 processor_set_t pset
= processor
->processor_set
;
3032 thread
->chosen_processor
= processor
;
3035 * Dispatch directly onto idle processor.
3037 if ( (SCHED(direct_dispatch_to_idle_processors
) ||
3038 thread
->bound_processor
== processor
)
3039 && processor
->state
== PROCESSOR_IDLE
) {
3040 remqueue((queue_entry_t
)processor
);
3041 enqueue_tail(&pset
->active_queue
, (queue_entry_t
)processor
);
3043 processor
->next_thread
= thread
;
3044 processor
->deadline
= UINT64_MAX
;
3045 processor
->state
= PROCESSOR_DISPATCHING
;
3048 if (processor
!= current_processor())
3049 machine_signal_idle(processor
);
3054 * Set preemption mode.
3056 if (SCHED(priority_is_urgent
)(thread
->sched_pri
) && thread
->sched_pri
> processor
->current_pri
)
3057 preempt
= (AST_PREEMPT
| AST_URGENT
);
3058 else if(processor
->active_thread
&& thread_eager_preemption(processor
->active_thread
))
3059 preempt
= (AST_PREEMPT
| AST_URGENT
);
3061 if ((thread
->sched_mode
== TH_MODE_TIMESHARE
) && thread
->sched_pri
< thread
->priority
)
3064 preempt
= (options
& SCHED_PREEMPT
)? AST_PREEMPT
: AST_NONE
;
3066 if (!SCHED(processor_enqueue
)(processor
, thread
, options
))
3069 if (preempt
!= AST_NONE
) {
3070 if (processor
== current_processor()) {
3071 if (csw_check(processor
) != AST_NONE
)
3075 if ( processor
->state
== PROCESSOR_IDLE
|| processor
->state
== PROCESSOR_DISPATCHING
) {
3076 machine_signal_idle(processor
);
3079 if ( (processor
->state
== PROCESSOR_RUNNING
||
3080 processor
->state
== PROCESSOR_SHUTDOWN
) &&
3081 (thread
->sched_pri
>= processor
->current_pri
||
3082 processor
->current_thmode
== TH_MODE_FAIRSHARE
)) {
3083 cause_ast_check(processor
);
3087 if ( processor
->state
== PROCESSOR_SHUTDOWN
&&
3088 thread
->sched_pri
>= processor
->current_pri
) {
3089 cause_ast_check(processor
);
3092 if ( processor
->state
== PROCESSOR_IDLE
&&
3093 processor
!= current_processor() ) {
3094 machine_signal_idle(processor
);
3100 #if defined(CONFIG_SCHED_TRADITIONAL)
3103 processor_queue_empty(processor_t processor
)
3105 return runq_for_processor(processor
)->count
== 0;
3110 sched_traditional_with_pset_runqueue_processor_queue_empty(processor_t processor
)
3112 processor_set_t pset
= processor
->processor_set
;
3113 int count
= runq_for_processor(processor
)->count
;
3116 * The pset runq contains the count of all runnable threads
3117 * for all processors in the pset. However, for threads that
3118 * are bound to another processor, the current "processor"
3119 * is not eligible to execute the thread. So we only
3120 * include bound threads that our bound to the current
3121 * "processor". This allows the processor to idle when the
3122 * count of eligible threads drops to 0, even if there's
3123 * a runnable thread bound to a different processor in the
3127 count
-= pset
->pset_runq_bound_count
;
3128 count
+= processor
->runq_bound_count
;
3134 processor_csw_check(processor_t processor
)
3137 boolean_t has_higher
;
3139 assert(processor
->active_thread
!= NULL
);
3141 runq
= runq_for_processor(processor
);
3142 if (first_timeslice(processor
)) {
3143 has_higher
= (runq
->highq
> processor
->current_pri
);
3145 has_higher
= (runq
->highq
>= processor
->current_pri
);
3148 if (runq
->urgency
> 0)
3149 return (AST_PREEMPT
| AST_URGENT
);
3151 if (processor
->active_thread
&& thread_eager_preemption(processor
->active_thread
))
3152 return (AST_PREEMPT
| AST_URGENT
);
3161 processor_queue_has_priority(processor_t processor
,
3166 return runq_for_processor(processor
)->highq
>= priority
;
3168 return runq_for_processor(processor
)->highq
> priority
;
3172 should_current_thread_rechoose_processor(processor_t processor
)
3174 return (processor
->current_pri
< BASEPRI_RTQUEUES
3175 && processor
->processor_meta
!= PROCESSOR_META_NULL
3176 && processor
->processor_meta
->primary
!= processor
);
3180 sched_traditional_processor_runq_count(processor_t processor
)
3182 return runq_for_processor(processor
)->count
;
3187 sched_traditional_processor_runq_stats_count_sum(processor_t processor
)
3189 return runq_for_processor(processor
)->runq_stats
.count_sum
;
3193 sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum(processor_t processor
)
3195 if (processor
->cpu_id
== processor
->processor_set
->cpu_set_low
)
3196 return runq_for_processor(processor
)->runq_stats
.count_sum
;
3201 #endif /* CONFIG_SCHED_TRADITIONAL */
3203 #define next_pset(p) (((p)->pset_list != PROCESSOR_SET_NULL)? (p)->pset_list: (p)->node->psets)
3208 * Return the next sibling pset containing
3209 * available processors.
3211 * Returns the original pset if none other is
3214 static processor_set_t
3216 processor_set_t pset
)
3218 processor_set_t nset
= pset
;
3221 nset
= next_pset(nset
);
3222 } while (nset
->online_processor_count
< 1 && nset
!= pset
);
3230 * Choose a processor for the thread, beginning at
3231 * the pset. Accepts an optional processor hint in
3234 * Returns a processor, possibly from a different pset.
3236 * The thread must be locked. The pset must be locked,
3237 * and the resulting pset is locked on return.
3241 processor_set_t pset
,
3242 processor_t processor
,
3245 processor_set_t nset
, cset
= pset
;
3246 processor_meta_t pmeta
= PROCESSOR_META_NULL
;
3247 processor_t mprocessor
;
3250 * Prefer the hinted processor, when appropriate.
3253 if (processor
!= PROCESSOR_NULL
) {
3254 if (processor
->processor_meta
!= PROCESSOR_META_NULL
)
3255 processor
= processor
->processor_meta
->primary
;
3258 mprocessor
= machine_choose_processor(pset
, processor
);
3259 if (mprocessor
!= PROCESSOR_NULL
)
3260 processor
= mprocessor
;
3262 if (processor
!= PROCESSOR_NULL
) {
3263 if (processor
->processor_set
!= pset
||
3264 processor
->state
== PROCESSOR_INACTIVE
||
3265 processor
->state
== PROCESSOR_SHUTDOWN
||
3266 processor
->state
== PROCESSOR_OFF_LINE
)
3267 processor
= PROCESSOR_NULL
;
3269 if (processor
->state
== PROCESSOR_IDLE
||
3270 ((thread
->sched_pri
>= BASEPRI_RTQUEUES
) &&
3271 (processor
->current_pri
< BASEPRI_RTQUEUES
)))
3276 * Iterate through the processor sets to locate
3277 * an appropriate processor.
3281 * Choose an idle processor.
3283 if (!queue_empty(&cset
->idle_queue
))
3284 return ((processor_t
)queue_first(&cset
->idle_queue
));
3286 if (thread
->sched_pri
>= BASEPRI_RTQUEUES
) {
3287 integer_t lowest_priority
= MAXPRI
+ 1;
3288 integer_t lowest_unpaired
= MAXPRI
+ 1;
3289 uint64_t furthest_deadline
= 1;
3290 processor_t lp_processor
= PROCESSOR_NULL
;
3291 processor_t lp_unpaired
= PROCESSOR_NULL
;
3292 processor_t fd_processor
= PROCESSOR_NULL
;
3294 lp_processor
= cset
->low_pri
;
3295 /* Consider hinted processor */
3296 if (lp_processor
!= PROCESSOR_NULL
&&
3297 ((lp_processor
->processor_meta
== PROCESSOR_META_NULL
) ||
3298 ((lp_processor
== lp_processor
->processor_meta
->primary
) &&
3299 !queue_empty(&lp_processor
->processor_meta
->idle_queue
))) &&
3300 lp_processor
->state
!= PROCESSOR_INACTIVE
&&
3301 lp_processor
->state
!= PROCESSOR_SHUTDOWN
&&
3302 lp_processor
->state
!= PROCESSOR_OFF_LINE
&&
3303 (lp_processor
->current_pri
< thread
->sched_pri
))
3304 return lp_processor
;
3306 processor
= (processor_t
)queue_first(&cset
->active_queue
);
3307 while (!queue_end(&cset
->active_queue
, (queue_entry_t
)processor
)) {
3308 /* Discover the processor executing the
3309 * thread with the lowest priority within
3310 * this pset, or the one with the furthest
3313 integer_t cpri
= processor
->current_pri
;
3314 if (cpri
< lowest_priority
) {
3315 lowest_priority
= cpri
;
3316 lp_processor
= processor
;
3319 if ((cpri
>= BASEPRI_RTQUEUES
) && (processor
->deadline
> furthest_deadline
)) {
3320 furthest_deadline
= processor
->deadline
;
3321 fd_processor
= processor
;
3325 if (processor
->processor_meta
!= PROCESSOR_META_NULL
&&
3326 !queue_empty(&processor
->processor_meta
->idle_queue
)) {
3327 if (cpri
< lowest_unpaired
) {
3328 lowest_unpaired
= cpri
;
3329 lp_unpaired
= processor
;
3330 pmeta
= processor
->processor_meta
;
3333 if (pmeta
== PROCESSOR_META_NULL
)
3334 pmeta
= processor
->processor_meta
;
3336 processor
= (processor_t
)queue_next((queue_entry_t
)processor
);
3339 if (thread
->sched_pri
> lowest_unpaired
)
3342 if (pmeta
!= PROCESSOR_META_NULL
)
3343 return ((processor_t
)queue_first(&pmeta
->idle_queue
));
3344 if (thread
->sched_pri
> lowest_priority
)
3345 return lp_processor
;
3346 if (thread
->realtime
.deadline
< furthest_deadline
)
3347 return fd_processor
;
3349 processor
= PROCESSOR_NULL
;
3353 * Check any hinted processors in the processor set if available.
3355 if (cset
->low_pri
!= PROCESSOR_NULL
&& cset
->low_pri
->state
!= PROCESSOR_INACTIVE
&&
3356 cset
->low_pri
->state
!= PROCESSOR_SHUTDOWN
&& cset
->low_pri
->state
!= PROCESSOR_OFF_LINE
&&
3357 (processor
== PROCESSOR_NULL
||
3358 (thread
->sched_pri
> BASEPRI_DEFAULT
&& cset
->low_pri
->current_pri
< thread
->sched_pri
))) {
3359 processor
= cset
->low_pri
;
3362 if (cset
->low_count
!= PROCESSOR_NULL
&& cset
->low_count
->state
!= PROCESSOR_INACTIVE
&&
3363 cset
->low_count
->state
!= PROCESSOR_SHUTDOWN
&& cset
->low_count
->state
!= PROCESSOR_OFF_LINE
&&
3364 (processor
== PROCESSOR_NULL
|| (thread
->sched_pri
<= BASEPRI_DEFAULT
&&
3365 SCHED(processor_runq_count
)(cset
->low_count
) < SCHED(processor_runq_count
)(processor
)))) {
3366 processor
= cset
->low_count
;
3370 * Otherwise, choose an available processor in the set.
3372 if (processor
== PROCESSOR_NULL
) {
3373 processor
= (processor_t
)dequeue_head(&cset
->active_queue
);
3374 if (processor
!= PROCESSOR_NULL
)
3375 enqueue_tail(&cset
->active_queue
, (queue_entry_t
)processor
);
3378 if (processor
!= PROCESSOR_NULL
&& pmeta
== PROCESSOR_META_NULL
) {
3379 if (processor
->processor_meta
!= PROCESSOR_META_NULL
&&
3380 !queue_empty(&processor
->processor_meta
->idle_queue
))
3381 pmeta
= processor
->processor_meta
;
3386 * Move onto the next processor set.
3388 nset
= next_pset(cset
);
3396 } while (nset
!= pset
);
3399 * Make sure that we pick a running processor,
3400 * and that the correct processor set is locked.
3403 if (pmeta
!= PROCESSOR_META_NULL
) {
3404 if (cset
!= pmeta
->primary
->processor_set
) {
3407 cset
= pmeta
->primary
->processor_set
;
3411 if (!queue_empty(&pmeta
->idle_queue
))
3412 return ((processor_t
)queue_first(&pmeta
->idle_queue
));
3414 pmeta
= PROCESSOR_META_NULL
;
3418 * If we haven't been able to choose a processor,
3419 * pick the boot processor and return it.
3421 if (processor
== PROCESSOR_NULL
) {
3422 processor
= master_processor
;
3425 * Check that the correct processor set is
3428 if (cset
!= processor
->processor_set
) {
3431 cset
= processor
->processor_set
;
3439 * Check that the processor set for the chosen
3440 * processor is locked.
3442 if (cset
!= processor
->processor_set
) {
3445 cset
= processor
->processor_set
;
3450 * We must verify that the chosen processor is still available.
3452 if (processor
->state
== PROCESSOR_INACTIVE
||
3453 processor
->state
== PROCESSOR_SHUTDOWN
|| processor
->state
== PROCESSOR_OFF_LINE
)
3454 processor
= PROCESSOR_NULL
;
3455 } while (processor
== PROCESSOR_NULL
);
3463 * Dispatch thread for execution, onto an idle
3464 * processor or run queue, and signal a preemption
3467 * Thread must be locked.
3474 processor_t processor
;
3475 processor_set_t pset
;
3478 assert(thread_runnable(thread
));
3482 * Update priority if needed.
3484 if (SCHED(can_update_priority
)(thread
))
3485 SCHED(update_priority
)(thread
);
3487 assert(thread
->runq
== PROCESSOR_NULL
);
3489 if (thread
->bound_processor
== PROCESSOR_NULL
) {
3493 if (thread
->affinity_set
!= AFFINITY_SET_NULL
) {
3495 * Use affinity set policy hint.
3497 pset
= thread
->affinity_set
->aset_pset
;
3500 processor
= SCHED(choose_processor
)(pset
, PROCESSOR_NULL
, thread
);
3503 if (thread
->last_processor
!= PROCESSOR_NULL
) {
3505 * Simple (last processor) affinity case.
3507 processor
= thread
->last_processor
;
3508 pset
= processor
->processor_set
;
3510 processor
= SCHED(choose_processor
)(pset
, processor
, thread
);
3512 if ((thread
->last_processor
!= processor
) && (thread
->last_processor
!= PROCESSOR_NULL
)) {
3513 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_LPA_BROKEN
)|DBG_FUNC_NONE
,
3514 (uintptr_t)thread_tid(thread
), (uintptr_t)thread
->last_processor
->cpu_id
, (uintptr_t)processor
->cpu_id
, thread
->last_processor
->state
, 0);
3522 * Utilitize a per task hint to spread threads
3523 * among the available processor sets.
3525 task_t task
= thread
->task
;
3527 pset
= task
->pset_hint
;
3528 if (pset
== PROCESSOR_SET_NULL
)
3529 pset
= current_processor()->processor_set
;
3531 pset
= choose_next_pset(pset
);
3534 processor
= SCHED(choose_processor
)(pset
, PROCESSOR_NULL
, thread
);
3535 task
->pset_hint
= processor
->processor_set
;
3542 * Unconditionally dispatch on the processor.
3544 processor
= thread
->bound_processor
;
3545 pset
= processor
->processor_set
;
3550 * Dispatch the thread on the choosen processor.
3552 if (thread
->sched_pri
>= BASEPRI_RTQUEUES
)
3553 realtime_setrun(processor
, thread
);
3554 else if (thread
->sched_mode
== TH_MODE_FAIRSHARE
)
3555 fairshare_setrun(processor
, thread
);
3557 processor_setrun(processor
, thread
, options
);
3564 processor_set_t pset
= task
->pset_hint
;
3566 if (pset
!= PROCESSOR_SET_NULL
)
3567 pset
= choose_next_pset(pset
);
3572 #if defined(CONFIG_SCHED_TRADITIONAL)
3575 * processor_queue_shutdown:
3577 * Shutdown a processor run queue by
3578 * re-dispatching non-bound threads.
3580 * Associated pset must be locked, and is
3581 * returned unlocked.
3584 processor_queue_shutdown(
3585 processor_t processor
)
3587 processor_set_t pset
= processor
->processor_set
;
3588 run_queue_t rq
= runq_for_processor(processor
);
3589 queue_t queue
= rq
->queues
+ rq
->highq
;
3590 int pri
= rq
->highq
, count
= rq
->count
;
3591 thread_t next
, thread
;
3592 queue_head_t tqueue
;
3594 queue_init(&tqueue
);
3597 thread
= (thread_t
)queue_first(queue
);
3598 while (!queue_end(queue
, (queue_entry_t
)thread
)) {
3599 next
= (thread_t
)queue_next((queue_entry_t
)thread
);
3601 if (thread
->bound_processor
== PROCESSOR_NULL
) {
3602 remqueue((queue_entry_t
)thread
);
3604 thread
->runq
= PROCESSOR_NULL
;
3605 SCHED_STATS_RUNQ_CHANGE(&rq
->runq_stats
, rq
->count
);
3606 runq_consider_decr_bound_count(processor
, thread
);
3608 if (SCHED(priority_is_urgent
)(pri
)) {
3609 rq
->urgency
--; assert(rq
->urgency
>= 0);
3611 if (queue_empty(queue
)) {
3613 clrbit(MAXPRI
- pri
, rq
->bitmap
);
3614 rq
->highq
= MAXPRI
- ffsbit(rq
->bitmap
);
3617 enqueue_tail(&tqueue
, (queue_entry_t
)thread
);
3629 while ((thread
= (thread_t
)dequeue_head(&tqueue
)) != THREAD_NULL
) {
3630 thread_lock(thread
);
3632 thread_setrun(thread
, SCHED_TAILQ
);
3634 thread_unlock(thread
);
3638 #endif /* CONFIG_SCHED_TRADITIONAL */
3641 * Check for a preemption point in
3642 * the current context.
3644 * Called at splsched.
3648 processor_t processor
)
3650 ast_t result
= AST_NONE
;
3651 thread_t thread
= processor
->active_thread
;
3653 if (first_timeslice(processor
)) {
3654 if (rt_runq
.count
> 0)
3655 return (AST_PREEMPT
| AST_URGENT
);
3658 if (rt_runq
.count
> 0 && BASEPRI_RTQUEUES
>= processor
->current_pri
)
3659 return (AST_PREEMPT
| AST_URGENT
);
3662 result
= SCHED(processor_csw_check
)(processor
);
3663 if (result
!= AST_NONE
)
3666 if (SCHED(should_current_thread_rechoose_processor
)(processor
))
3667 return (AST_PREEMPT
);
3669 if (machine_processor_is_inactive(processor
))
3670 return (AST_PREEMPT
);
3672 if (thread
->state
& TH_SUSP
)
3673 return (AST_PREEMPT
);
3681 * Set the scheduled priority of the specified thread.
3683 * This may cause the thread to change queues.
3685 * Thread must be locked.
3692 boolean_t removed
= thread_run_queue_remove(thread
);
3694 thread
->sched_pri
= priority
;
3696 thread_setrun(thread
, SCHED_PREEMPT
| SCHED_TAILQ
);
3698 if (thread
->state
& TH_RUN
) {
3699 processor_t processor
= thread
->last_processor
;
3701 if (thread
== current_thread()) {
3704 processor
->current_pri
= priority
;
3705 processor
->current_thmode
= thread
->sched_mode
;
3706 if ((preempt
= csw_check(processor
)) != AST_NONE
)
3710 if ( processor
!= PROCESSOR_NULL
&&
3711 processor
->active_thread
== thread
)
3712 cause_ast_check(processor
);
3726 if (rq
!= thread
->runq
)
3727 panic("run_queue_check: thread runq");
3729 if (thread
->sched_pri
> MAXPRI
|| thread
->sched_pri
< MINPRI
)
3730 panic("run_queue_check: thread sched_pri");
3732 q
= &rq
->queues
[thread
->sched_pri
];
3733 qe
= queue_first(q
);
3734 while (!queue_end(q
, qe
)) {
3735 if (qe
== (queue_entry_t
)thread
)
3738 qe
= queue_next(qe
);
3741 panic("run_queue_check: end");
3746 #if defined(CONFIG_SCHED_TRADITIONAL)
3748 /* locks the runqueue itself */
3751 processor_queue_remove(
3752 processor_t processor
,
3758 rqlock
= &processor
->processor_set
->sched_lock
;
3759 rq
= runq_for_processor(processor
);
3761 simple_lock(rqlock
);
3762 if (processor
== thread
->runq
) {
3764 * Thread is on a run queue and we have a lock on
3767 runq_consider_decr_bound_count(processor
, thread
);
3768 run_queue_remove(rq
, thread
);
3772 * The thread left the run queue before we could
3773 * lock the run queue.
3775 assert(thread
->runq
== PROCESSOR_NULL
);
3776 processor
= PROCESSOR_NULL
;
3779 simple_unlock(rqlock
);
3781 return (processor
!= PROCESSOR_NULL
);
3784 #endif /* CONFIG_SCHED_TRADITIONAL */
3787 * thread_run_queue_remove:
3789 * Remove a thread from a current run queue and
3790 * return TRUE if successful.
3792 * Thread must be locked.
3795 thread_run_queue_remove(
3798 processor_t processor
= thread
->runq
;
3801 * If processor is PROCESSOR_NULL, the thread will stay out of the
3802 * run queues because the caller locked the thread. Otherwise
3803 * the thread is on a run queue, but could be chosen for dispatch
3806 if (processor
!= PROCESSOR_NULL
) {
3810 * The processor run queues are locked by the
3811 * processor set. Real-time priorities use a
3812 * global queue with a dedicated lock.
3814 if (thread
->sched_mode
== TH_MODE_FAIRSHARE
) {
3815 return SCHED(fairshare_queue_remove
)(thread
);
3818 if (thread
->sched_pri
< BASEPRI_RTQUEUES
) {
3819 return SCHED(processor_queue_remove
)(processor
, thread
);
3822 simple_lock(&rt_lock
);
3825 if (processor
== thread
->runq
) {
3827 * Thread is on a run queue and we have a lock on
3830 remqueue((queue_entry_t
)thread
);
3831 SCHED_STATS_RUNQ_CHANGE(&rt_runq
.runq_stats
, rt_runq
.count
);
3834 thread
->runq
= PROCESSOR_NULL
;
3838 * The thread left the run queue before we could
3839 * lock the run queue.
3841 assert(thread
->runq
== PROCESSOR_NULL
);
3842 processor
= PROCESSOR_NULL
;
3845 simple_unlock(&rt_lock
);
3848 return (processor
!= PROCESSOR_NULL
);
3851 #if defined(CONFIG_SCHED_TRADITIONAL)
3854 * steal_processor_thread:
3856 * Locate a thread to steal from the processor and
3859 * Associated pset must be locked. Returns THREAD_NULL
3863 steal_processor_thread(
3864 processor_t processor
)
3866 run_queue_t rq
= runq_for_processor(processor
);
3867 queue_t queue
= rq
->queues
+ rq
->highq
;
3868 int pri
= rq
->highq
, count
= rq
->count
;
3872 thread
= (thread_t
)queue_first(queue
);
3873 while (!queue_end(queue
, (queue_entry_t
)thread
)) {
3874 if (thread
->bound_processor
== PROCESSOR_NULL
) {
3875 remqueue((queue_entry_t
)thread
);
3877 thread
->runq
= PROCESSOR_NULL
;
3878 SCHED_STATS_RUNQ_CHANGE(&rq
->runq_stats
, rq
->count
);
3879 runq_consider_decr_bound_count(processor
, thread
);
3881 if (SCHED(priority_is_urgent
)(pri
)) {
3882 rq
->urgency
--; assert(rq
->urgency
>= 0);
3884 if (queue_empty(queue
)) {
3886 clrbit(MAXPRI
- pri
, rq
->bitmap
);
3887 rq
->highq
= MAXPRI
- ffsbit(rq
->bitmap
);
3894 thread
= (thread_t
)queue_next((queue_entry_t
)thread
);
3900 return (THREAD_NULL
);
3904 * Locate and steal a thread, beginning
3907 * The pset must be locked, and is returned
3910 * Returns the stolen thread, or THREAD_NULL on
3915 processor_set_t pset
)
3917 processor_set_t nset
, cset
= pset
;
3918 processor_t processor
;
3922 processor
= (processor_t
)queue_first(&cset
->active_queue
);
3923 while (!queue_end(&cset
->active_queue
, (queue_entry_t
)processor
)) {
3924 if (runq_for_processor(processor
)->count
> 0) {
3925 thread
= steal_processor_thread(processor
);
3926 if (thread
!= THREAD_NULL
) {
3927 remqueue((queue_entry_t
)processor
);
3928 enqueue_tail(&cset
->active_queue
, (queue_entry_t
)processor
);
3936 processor
= (processor_t
)queue_next((queue_entry_t
)processor
);
3939 nset
= next_pset(cset
);
3947 } while (nset
!= pset
);
3951 return (THREAD_NULL
);
3954 static thread_t
steal_thread_disabled(
3955 processor_set_t pset
)
3959 return (THREAD_NULL
);
3962 #endif /* CONFIG_SCHED_TRADITIONAL */
3966 thread_get_urgency(uint64_t *rt_period
, uint64_t *rt_deadline
)
3968 processor_t processor
;
3971 processor
= current_processor();
3973 thread
= processor
->next_thread
;
3975 if (thread
!= NULL
) {
3976 if (thread
->sched_mode
== TH_MODE_REALTIME
) {
3978 if (rt_period
!= NULL
)
3979 *rt_period
= thread
->realtime
.period
;
3980 if (rt_deadline
!= NULL
)
3981 *rt_deadline
= thread
->realtime
.deadline
;
3983 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_GET_URGENCY
), THREAD_URGENCY_REAL_TIME
, thread
->realtime
.period
,
3984 (thread
->realtime
.deadline
>> 32), thread
->realtime
.deadline
, 0);
3986 return (THREAD_URGENCY_REAL_TIME
);
3987 } else if ((thread
->sched_pri
<= MAXPRI_THROTTLE
) &&
3988 (thread
->priority
<= MAXPRI_THROTTLE
)) {
3989 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_GET_URGENCY
), THREAD_URGENCY_BACKGROUND
, thread
->sched_pri
, thread
->priority
, 0, 0);
3990 return (THREAD_URGENCY_BACKGROUND
);
3993 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_GET_URGENCY
), THREAD_URGENCY_NORMAL
, 0, 0, 0, 0);
3995 return (THREAD_URGENCY_NORMAL
);
3998 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED_GET_URGENCY
), THREAD_URGENCY_NONE
, 0, 0, 0, 0);
3999 return (THREAD_URGENCY_NONE
);
4004 * This is the processor idle loop, which just looks for other threads
4005 * to execute. Processor idle threads invoke this without supplying a
4006 * current thread to idle without an asserted wait state.
4008 * Returns a the next thread to execute if dispatched directly.
4012 #define IDLE_KERNEL_DEBUG_CONSTANT(...) KERNEL_DEBUG_CONSTANT(__VA_ARGS__)
4014 #define IDLE_KERNEL_DEBUG_CONSTANT(...) do { } while(0)
4020 processor_t processor
)
4022 processor_set_t pset
= processor
->processor_set
;
4023 thread_t new_thread
;
4027 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
4028 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_IDLE
) | DBG_FUNC_START
,
4029 (uintptr_t)thread_tid(thread
), 0, 0, 0, 0);
4031 SCHED_STATS_CPU_IDLE_START(processor
);
4033 timer_switch(&PROCESSOR_DATA(processor
, system_state
),
4034 mach_absolute_time(), &PROCESSOR_DATA(processor
, idle_state
));
4035 PROCESSOR_DATA(processor
, current_state
) = &PROCESSOR_DATA(processor
, idle_state
);
4037 while (processor
->next_thread
== THREAD_NULL
&& SCHED(processor_queue_empty
)(processor
) && rt_runq
.count
== 0 && SCHED(fairshare_runq_count
)() == 0 &&
4038 (thread
== THREAD_NULL
|| ((thread
->state
& (TH_WAIT
|TH_SUSP
)) == TH_WAIT
&& !thread
->wake_active
))) {
4039 IDLE_KERNEL_DEBUG_CONSTANT(
4040 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_IDLE
) | DBG_FUNC_NONE
, (uintptr_t)thread_tid(thread
), rt_runq
.count
, SCHED(processor_runq_count
)(processor
), -1, 0);
4046 IDLE_KERNEL_DEBUG_CONSTANT(
4047 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_IDLE
) | DBG_FUNC_NONE
, (uintptr_t)thread_tid(thread
), rt_runq
.count
, SCHED(processor_runq_count
)(processor
), -2, 0);
4049 if (processor
->state
== PROCESSOR_INACTIVE
&& !machine_processor_is_inactive(processor
))
4053 timer_switch(&PROCESSOR_DATA(processor
, idle_state
),
4054 mach_absolute_time(), &PROCESSOR_DATA(processor
, system_state
));
4055 PROCESSOR_DATA(processor
, current_state
) = &PROCESSOR_DATA(processor
, system_state
);
4059 state
= processor
->state
;
4060 if (state
== PROCESSOR_DISPATCHING
) {
4062 * Commmon case -- cpu dispatched.
4064 new_thread
= processor
->next_thread
;
4065 processor
->next_thread
= THREAD_NULL
;
4066 processor
->state
= PROCESSOR_RUNNING
;
4068 if (SCHED(processor_queue_has_priority
)(processor
, new_thread
->sched_pri
, FALSE
) ||
4069 (rt_runq
.count
> 0 && BASEPRI_RTQUEUES
>= new_thread
->sched_pri
) ) {
4070 processor
->deadline
= UINT64_MAX
;
4074 thread_lock(new_thread
);
4075 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_REDISPATCH
), (uintptr_t)thread_tid(new_thread
), new_thread
->sched_pri
, rt_runq
.count
, 0, 0);
4076 thread_setrun(new_thread
, SCHED_HEADQ
);
4077 thread_unlock(new_thread
);
4079 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
4080 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_IDLE
) | DBG_FUNC_END
,
4081 (uintptr_t)thread_tid(thread
), state
, 0, 0, 0);
4083 return (THREAD_NULL
);
4088 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
4089 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_IDLE
) | DBG_FUNC_END
,
4090 (uintptr_t)thread_tid(thread
), state
, (uintptr_t)thread_tid(new_thread
), 0, 0);
4092 return (new_thread
);
4095 if (state
== PROCESSOR_IDLE
) {
4096 remqueue((queue_entry_t
)processor
);
4098 processor
->state
= PROCESSOR_RUNNING
;
4099 enqueue_tail(&pset
->active_queue
, (queue_entry_t
)processor
);
4102 if (state
== PROCESSOR_INACTIVE
) {
4103 processor
->state
= PROCESSOR_RUNNING
;
4104 enqueue_tail(&pset
->active_queue
, (queue_entry_t
)processor
);
4107 if (state
== PROCESSOR_SHUTDOWN
) {
4109 * Going off-line. Force a
4112 if ((new_thread
= processor
->next_thread
) != THREAD_NULL
) {
4113 processor
->next_thread
= THREAD_NULL
;
4114 processor
->deadline
= UINT64_MAX
;
4118 thread_lock(new_thread
);
4119 thread_setrun(new_thread
, SCHED_HEADQ
);
4120 thread_unlock(new_thread
);
4122 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
4123 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_IDLE
) | DBG_FUNC_END
,
4124 (uintptr_t)thread_tid(thread
), state
, 0, 0, 0);
4126 return (THREAD_NULL
);
4132 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
4133 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_IDLE
) | DBG_FUNC_END
,
4134 (uintptr_t)thread_tid(thread
), state
, 0, 0, 0);
4136 return (THREAD_NULL
);
4140 * Each processor has a dedicated thread which
4141 * executes the idle loop when there is no suitable
4147 processor_t processor
= current_processor();
4148 thread_t new_thread
;
4150 new_thread
= processor_idle(THREAD_NULL
, processor
);
4151 if (new_thread
!= THREAD_NULL
) {
4152 thread_run(processor
->idle_thread
, (thread_continue_t
)idle_thread
, NULL
, new_thread
);
4156 thread_block((thread_continue_t
)idle_thread
);
4162 processor_t processor
)
4164 kern_return_t result
;
4168 result
= kernel_thread_create((thread_continue_t
)idle_thread
, NULL
, MAXPRI_KERNEL
, &thread
);
4169 if (result
!= KERN_SUCCESS
)
4173 thread_lock(thread
);
4174 thread
->bound_processor
= processor
;
4175 processor
->idle_thread
= thread
;
4176 thread
->sched_pri
= thread
->priority
= IDLEPRI
;
4177 thread
->state
= (TH_RUN
| TH_IDLE
);
4178 thread_unlock(thread
);
4181 thread_deallocate(thread
);
4183 return (KERN_SUCCESS
);
4189 * Kicks off scheduler services.
4191 * Called at splsched.
4196 kern_return_t result
;
4199 result
= kernel_thread_start_priority((thread_continue_t
)sched_init_thread
,
4200 (void *)SCHED(maintenance_continuation
),
4201 MAXPRI_KERNEL
, &thread
);
4202 if (result
!= KERN_SUCCESS
)
4203 panic("sched_startup");
4205 thread_deallocate(thread
);
4208 * Yield to the sched_init_thread once, to
4209 * initialize our own thread after being switched
4212 * The current thread is the only other thread
4213 * active at this point.
4215 thread_block(THREAD_CONTINUE_NULL
);
4218 #if defined(CONFIG_SCHED_TRADITIONAL)
4220 static uint64_t sched_tick_deadline
= 0;
4223 * sched_init_thread:
4225 * Perform periodic bookkeeping functions about ten
4229 sched_traditional_tick_continue(void)
4231 uint64_t abstime
= mach_absolute_time();
4236 * Compute various averages.
4241 * Scan the run queues for threads which
4242 * may need to be updated.
4244 thread_update_scan();
4246 if (sched_tick_deadline
== 0)
4247 sched_tick_deadline
= abstime
;
4249 clock_deadline_for_periodic_event(sched_tick_interval
, abstime
,
4250 &sched_tick_deadline
);
4252 assert_wait_deadline((event_t
)sched_traditional_tick_continue
, THREAD_UNINT
, sched_tick_deadline
);
4253 thread_block((thread_continue_t
)sched_traditional_tick_continue
);
4257 #endif /* CONFIG_SCHED_TRADITIONAL */
4260 sched_init_thread(void (*continuation
)(void))
4262 thread_block(THREAD_CONTINUE_NULL
);
4269 #if defined(CONFIG_SCHED_TRADITIONAL)
4272 * thread_update_scan / runq_scan:
4274 * Scan the run queues to account for timesharing threads
4275 * which need to be updated.
4277 * Scanner runs in two passes. Pass one squirrels likely
4278 * threads away in an array, pass two does the update.
4280 * This is necessary because the run queue is locked for
4281 * the candidate scan, but the thread is locked for the update.
4283 * Array should be sized to make forward progress, without
4284 * disabling preemption for long periods.
4287 #define THREAD_UPDATE_SIZE 128
4289 static thread_t thread_update_array
[THREAD_UPDATE_SIZE
];
4290 static int thread_update_count
= 0;
4293 * Scan a runq for candidate threads.
4295 * Returns TRUE if retry is needed.
4303 register thread_t thread
;
4305 if ((count
= runq
->count
) > 0) {
4306 q
= runq
->queues
+ runq
->highq
;
4308 queue_iterate(q
, thread
, thread_t
, links
) {
4309 if ( thread
->sched_stamp
!= sched_tick
&&
4310 (thread
->sched_mode
== TH_MODE_TIMESHARE
) ) {
4311 if (thread_update_count
== THREAD_UPDATE_SIZE
)
4314 thread_update_array
[thread_update_count
++] = thread
;
4315 thread_reference_internal(thread
);
4329 thread_update_scan(void)
4331 boolean_t restart_needed
= FALSE
;
4332 processor_t processor
= processor_list
;
4333 processor_set_t pset
;
4339 pset
= processor
->processor_set
;
4344 restart_needed
= runq_scan(runq_for_processor(processor
));
4352 thread
= processor
->idle_thread
;
4353 if (thread
!= THREAD_NULL
&& thread
->sched_stamp
!= sched_tick
) {
4354 if (thread_update_count
== THREAD_UPDATE_SIZE
) {
4355 restart_needed
= TRUE
;
4359 thread_update_array
[thread_update_count
++] = thread
;
4360 thread_reference_internal(thread
);
4362 } while ((processor
= processor
->processor_list
) != NULL
);
4365 * Ok, we now have a collection of candidates -- fix them.
4367 while (thread_update_count
> 0) {
4368 thread
= thread_update_array
[--thread_update_count
];
4369 thread_update_array
[thread_update_count
] = THREAD_NULL
;
4372 thread_lock(thread
);
4373 if ( !(thread
->state
& (TH_WAIT
)) ) {
4374 if (SCHED(can_update_priority
)(thread
))
4375 SCHED(update_priority
)(thread
);
4377 thread_unlock(thread
);
4380 thread_deallocate(thread
);
4382 } while (restart_needed
);
4385 #endif /* CONFIG_SCHED_TRADITIONAL */
4388 thread_eager_preemption(thread_t thread
)
4390 return ((thread
->sched_flags
& TH_SFLAG_EAGERPREEMPT
) != 0);
4394 thread_set_eager_preempt(thread_t thread
)
4398 ast_t ast
= AST_NONE
;
4401 p
= current_processor();
4403 thread_lock(thread
);
4404 thread
->sched_flags
|= TH_SFLAG_EAGERPREEMPT
;
4406 if (thread
== current_thread()) {
4407 thread_unlock(thread
);
4410 if (ast
!= AST_NONE
) {
4411 (void) thread_block_reason(THREAD_CONTINUE_NULL
, NULL
, ast
);
4414 p
= thread
->last_processor
;
4416 if (p
!= PROCESSOR_NULL
&& p
->state
== PROCESSOR_RUNNING
&&
4417 p
->active_thread
== thread
) {
4421 thread_unlock(thread
);
4428 thread_clear_eager_preempt(thread_t thread
)
4433 thread_lock(thread
);
4435 thread
->sched_flags
&= ~TH_SFLAG_EAGERPREEMPT
;
4437 thread_unlock(thread
);
4441 * Scheduling statistics
4444 sched_stats_handle_csw(processor_t processor
, int reasons
, int selfpri
, int otherpri
)
4446 struct processor_sched_statistics
*stats
;
4447 boolean_t to_realtime
= FALSE
;
4449 stats
= &processor
->processor_data
.sched_stats
;
4452 if (otherpri
>= BASEPRI_REALTIME
) {
4453 stats
->rt_sched_count
++;
4457 if ((reasons
& AST_PREEMPT
) != 0) {
4458 stats
->preempt_count
++;
4460 if (selfpri
>= BASEPRI_REALTIME
) {
4461 stats
->preempted_rt_count
++;
4465 stats
->preempted_by_rt_count
++;
4472 sched_stats_handle_runq_change(struct runq_stats
*stats
, int old_count
)
4474 uint64_t timestamp
= mach_absolute_time();
4476 stats
->count_sum
+= (timestamp
- stats
->last_change_timestamp
) * old_count
;
4477 stats
->last_change_timestamp
= timestamp
;
4481 * For calls from assembly code
4483 #undef thread_wakeup
4492 thread_wakeup_with_result(x
, THREAD_AWAKENED
);
4496 preemption_enabled(void)
4498 return (get_preemption_level() == 0 && ml_get_interrupts_enabled());
4506 return ((thread
->state
& (TH_RUN
|TH_WAIT
)) == TH_RUN
);