]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/sched_prim.c
5d04d976e6d2f130910e3bd3b950ca746effc064
[apple/xnu.git] / osfmk / kern / sched_prim.c
1 /*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: sched_prim.c
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1986
62 *
63 * Scheduling primitives
64 *
65 */
66
67 #include <debug.h>
68
69 #include <mach/mach_types.h>
70 #include <mach/machine.h>
71 #include <mach/policy.h>
72 #include <mach/sync_policy.h>
73 #include <mach/thread_act.h>
74
75 #include <machine/machine_routines.h>
76 #include <machine/sched_param.h>
77 #include <machine/machine_cpu.h>
78 #include <machine/limits.h>
79 #include <machine/atomic.h>
80
81 #include <machine/commpage.h>
82
83 #include <kern/kern_types.h>
84 #include <kern/backtrace.h>
85 #include <kern/clock.h>
86 #include <kern/counters.h>
87 #include <kern/cpu_number.h>
88 #include <kern/cpu_data.h>
89 #include <kern/smp.h>
90 #include <kern/debug.h>
91 #include <kern/macro_help.h>
92 #include <kern/machine.h>
93 #include <kern/misc_protos.h>
94 #if MONOTONIC
95 #include <kern/monotonic.h>
96 #endif /* MONOTONIC */
97 #include <kern/processor.h>
98 #include <kern/queue.h>
99 #include <kern/sched.h>
100 #include <kern/sched_prim.h>
101 #include <kern/sfi.h>
102 #include <kern/syscall_subr.h>
103 #include <kern/task.h>
104 #include <kern/thread.h>
105 #include <kern/ledger.h>
106 #include <kern/timer_queue.h>
107 #include <kern/waitq.h>
108 #include <kern/policy_internal.h>
109 #include <kern/cpu_quiesce.h>
110
111 #include <vm/pmap.h>
112 #include <vm/vm_kern.h>
113 #include <vm/vm_map.h>
114 #include <vm/vm_pageout.h>
115
116 #include <mach/sdt.h>
117 #include <mach/mach_host.h>
118 #include <mach/host_info.h>
119
120 #include <sys/kdebug.h>
121 #include <kperf/kperf.h>
122 #include <kern/kpc.h>
123 #include <san/kasan.h>
124 #include <kern/pms.h>
125 #include <kern/host.h>
126 #include <stdatomic.h>
127
128 struct sched_statistics PERCPU_DATA(sched_stats);
129 bool sched_stats_active;
130
131 int
132 rt_runq_count(processor_set_t pset)
133 {
134 return atomic_load_explicit(&SCHED(rt_runq)(pset)->count, memory_order_relaxed);
135 }
136
137 void
138 rt_runq_count_incr(processor_set_t pset)
139 {
140 atomic_fetch_add_explicit(&SCHED(rt_runq)(pset)->count, 1, memory_order_relaxed);
141 }
142
143 void
144 rt_runq_count_decr(processor_set_t pset)
145 {
146 atomic_fetch_sub_explicit(&SCHED(rt_runq)(pset)->count, 1, memory_order_relaxed);
147 }
148
149 #define DEFAULT_PREEMPTION_RATE 100 /* (1/s) */
150 TUNABLE(int, default_preemption_rate, "preempt", DEFAULT_PREEMPTION_RATE);
151
152 #define DEFAULT_BG_PREEMPTION_RATE 400 /* (1/s) */
153 TUNABLE(int, default_bg_preemption_rate, "bg_preempt", DEFAULT_BG_PREEMPTION_RATE);
154
155 #define MAX_UNSAFE_QUANTA 800
156 TUNABLE(int, max_unsafe_quanta, "unsafe", MAX_UNSAFE_QUANTA);
157
158 #define MAX_POLL_QUANTA 2
159 TUNABLE(int, max_poll_quanta, "poll", MAX_POLL_QUANTA);
160
161 #define SCHED_POLL_YIELD_SHIFT 4 /* 1/16 */
162 int sched_poll_yield_shift = SCHED_POLL_YIELD_SHIFT;
163
164 uint64_t max_poll_computation;
165
166 uint64_t max_unsafe_computation;
167 uint64_t sched_safe_duration;
168
169 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
170
171 uint32_t std_quantum;
172 uint32_t min_std_quantum;
173 uint32_t bg_quantum;
174
175 uint32_t std_quantum_us;
176 uint32_t bg_quantum_us;
177
178 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
179
180 uint32_t thread_depress_time;
181 uint32_t default_timeshare_computation;
182 uint32_t default_timeshare_constraint;
183
184 uint32_t max_rt_quantum;
185 uint32_t min_rt_quantum;
186
187 uint32_t rt_constraint_threshold;
188
189 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
190
191 unsigned sched_tick;
192 uint32_t sched_tick_interval;
193
194 /* Timeshare load calculation interval (15ms) */
195 uint32_t sched_load_compute_interval_us = 15000;
196 uint64_t sched_load_compute_interval_abs;
197 static _Atomic uint64_t sched_load_compute_deadline;
198
199 uint32_t sched_pri_shifts[TH_BUCKET_MAX];
200 uint32_t sched_fixed_shift;
201
202 uint32_t sched_decay_usage_age_factor = 1; /* accelerate 5/8^n usage aging */
203
204 /* Allow foreground to decay past default to resolve inversions */
205 #define DEFAULT_DECAY_BAND_LIMIT ((BASEPRI_FOREGROUND - BASEPRI_DEFAULT) + 2)
206 int sched_pri_decay_band_limit = DEFAULT_DECAY_BAND_LIMIT;
207
208 /* Defaults for timer deadline profiling */
209 #define TIMER_DEADLINE_TRACKING_BIN_1_DEFAULT 2000000 /* Timers with deadlines <=
210 * 2ms */
211 #define TIMER_DEADLINE_TRACKING_BIN_2_DEFAULT 5000000 /* Timers with deadlines
212 * <= 5ms */
213
214 uint64_t timer_deadline_tracking_bin_1;
215 uint64_t timer_deadline_tracking_bin_2;
216
217 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
218
219 thread_t sched_maintenance_thread;
220
221 /* interrupts disabled lock to guard recommended cores state */
222 decl_simple_lock_data(static, sched_recommended_cores_lock);
223 static uint64_t usercontrol_requested_recommended_cores = ALL_CORES_RECOMMENDED;
224 static void sched_update_recommended_cores(uint64_t recommended_cores);
225
226 #if __arm__ || __arm64__
227 static void sched_recommended_cores_maintenance(void);
228 uint64_t perfcontrol_failsafe_starvation_threshold;
229 extern char *proc_name_address(struct proc *p);
230 #endif /* __arm__ || __arm64__ */
231
232 uint64_t sched_one_second_interval;
233 boolean_t allow_direct_handoff = TRUE;
234
235 /* Forwards */
236
237 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
238
239 static void load_shift_init(void);
240 static void preempt_pri_init(void);
241
242 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
243
244 thread_t processor_idle(
245 thread_t thread,
246 processor_t processor);
247
248 static ast_t
249 csw_check_locked(
250 thread_t thread,
251 processor_t processor,
252 processor_set_t pset,
253 ast_t check_reason);
254
255 static void processor_setrun(
256 processor_t processor,
257 thread_t thread,
258 integer_t options);
259
260 static void
261 sched_realtime_timebase_init(void);
262
263 static void
264 sched_timer_deadline_tracking_init(void);
265
266 #if DEBUG
267 extern int debug_task;
268 #define TLOG(a, fmt, args...) if(debug_task & a) kprintf(fmt, ## args)
269 #else
270 #define TLOG(a, fmt, args...) do {} while (0)
271 #endif
272
273 static processor_t
274 thread_bind_internal(
275 thread_t thread,
276 processor_t processor);
277
278 static void
279 sched_vm_group_maintenance(void);
280
281 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
282 int8_t sched_load_shifts[NRQS];
283 bitmap_t sched_preempt_pri[BITMAP_LEN(NRQS_MAX)];
284 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
285
286 /*
287 * Statically allocate a buffer to hold the longest possible
288 * scheduler description string, as currently implemented.
289 * bsd/kern/kern_sysctl.c has a corresponding definition in bsd/
290 * to export to userspace via sysctl(3). If either version
291 * changes, update the other.
292 *
293 * Note that in addition to being an upper bound on the strings
294 * in the kernel, it's also an exact parameter to PE_get_default(),
295 * which interrogates the device tree on some platforms. That
296 * API requires the caller know the exact size of the device tree
297 * property, so we need both a legacy size (32) and the current size
298 * (48) to deal with old and new device trees. The device tree property
299 * is similarly padded to a fixed size so that the same kernel image
300 * can run on multiple devices with different schedulers configured
301 * in the device tree.
302 */
303 char sched_string[SCHED_STRING_MAX_LENGTH];
304
305 uint32_t sched_debug_flags = SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS;
306
307 /* Global flag which indicates whether Background Stepper Context is enabled */
308 static int cpu_throttle_enabled = 1;
309
310 void
311 sched_init(void)
312 {
313 boolean_t direct_handoff = FALSE;
314 kprintf("Scheduler: Default of %s\n", SCHED(sched_name));
315
316 if (!PE_parse_boot_argn("sched_pri_decay_limit", &sched_pri_decay_band_limit, sizeof(sched_pri_decay_band_limit))) {
317 /* No boot-args, check in device tree */
318 if (!PE_get_default("kern.sched_pri_decay_limit",
319 &sched_pri_decay_band_limit,
320 sizeof(sched_pri_decay_band_limit))) {
321 /* Allow decay all the way to normal limits */
322 sched_pri_decay_band_limit = DEFAULT_DECAY_BAND_LIMIT;
323 }
324 }
325
326 kprintf("Setting scheduler priority decay band limit %d\n", sched_pri_decay_band_limit);
327
328 if (PE_parse_boot_argn("sched_debug", &sched_debug_flags, sizeof(sched_debug_flags))) {
329 kprintf("Scheduler: Debug flags 0x%08x\n", sched_debug_flags);
330 }
331 strlcpy(sched_string, SCHED(sched_name), sizeof(sched_string));
332
333 cpu_quiescent_counter_init();
334
335 SCHED(init)();
336 SCHED(rt_init)(&pset0);
337 sched_timer_deadline_tracking_init();
338
339 SCHED(pset_init)(&pset0);
340 SCHED(processor_init)(master_processor);
341
342 if (PE_parse_boot_argn("direct_handoff", &direct_handoff, sizeof(direct_handoff))) {
343 allow_direct_handoff = direct_handoff;
344 }
345 }
346
347 void
348 sched_timebase_init(void)
349 {
350 uint64_t abstime;
351
352 clock_interval_to_absolutetime_interval(1, NSEC_PER_SEC, &abstime);
353 sched_one_second_interval = abstime;
354
355 SCHED(timebase_init)();
356 sched_realtime_timebase_init();
357 }
358
359 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
360
361 void
362 sched_timeshare_init(void)
363 {
364 /*
365 * Calculate the timeslicing quantum
366 * in us.
367 */
368 if (default_preemption_rate < 1) {
369 default_preemption_rate = DEFAULT_PREEMPTION_RATE;
370 }
371 std_quantum_us = (1000 * 1000) / default_preemption_rate;
372
373 printf("standard timeslicing quantum is %d us\n", std_quantum_us);
374
375 if (default_bg_preemption_rate < 1) {
376 default_bg_preemption_rate = DEFAULT_BG_PREEMPTION_RATE;
377 }
378 bg_quantum_us = (1000 * 1000) / default_bg_preemption_rate;
379
380 printf("standard background quantum is %d us\n", bg_quantum_us);
381
382 load_shift_init();
383 preempt_pri_init();
384 sched_tick = 0;
385 }
386
387 void
388 sched_timeshare_timebase_init(void)
389 {
390 uint64_t abstime;
391 uint32_t shift;
392
393 /* standard timeslicing quantum */
394 clock_interval_to_absolutetime_interval(
395 std_quantum_us, NSEC_PER_USEC, &abstime);
396 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
397 std_quantum = (uint32_t)abstime;
398
399 /* smallest remaining quantum (250 us) */
400 clock_interval_to_absolutetime_interval(250, NSEC_PER_USEC, &abstime);
401 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
402 min_std_quantum = (uint32_t)abstime;
403
404 /* quantum for background tasks */
405 clock_interval_to_absolutetime_interval(
406 bg_quantum_us, NSEC_PER_USEC, &abstime);
407 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
408 bg_quantum = (uint32_t)abstime;
409
410 /* scheduler tick interval */
411 clock_interval_to_absolutetime_interval(USEC_PER_SEC >> SCHED_TICK_SHIFT,
412 NSEC_PER_USEC, &abstime);
413 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
414 sched_tick_interval = (uint32_t)abstime;
415
416 /* timeshare load calculation interval & deadline initialization */
417 clock_interval_to_absolutetime_interval(sched_load_compute_interval_us, NSEC_PER_USEC, &sched_load_compute_interval_abs);
418 os_atomic_init(&sched_load_compute_deadline, sched_load_compute_interval_abs);
419
420 /*
421 * Compute conversion factor from usage to
422 * timesharing priorities with 5/8 ** n aging.
423 */
424 abstime = (abstime * 5) / 3;
425 for (shift = 0; abstime > BASEPRI_DEFAULT; ++shift) {
426 abstime >>= 1;
427 }
428 sched_fixed_shift = shift;
429
430 for (uint32_t i = 0; i < TH_BUCKET_MAX; i++) {
431 sched_pri_shifts[i] = INT8_MAX;
432 }
433
434 max_unsafe_computation = ((uint64_t)max_unsafe_quanta) * std_quantum;
435 sched_safe_duration = 2 * ((uint64_t)max_unsafe_quanta) * std_quantum;
436
437 max_poll_computation = ((uint64_t)max_poll_quanta) * std_quantum;
438 thread_depress_time = 1 * std_quantum;
439 default_timeshare_computation = std_quantum / 2;
440 default_timeshare_constraint = std_quantum;
441
442 #if __arm__ || __arm64__
443 perfcontrol_failsafe_starvation_threshold = (2 * sched_tick_interval);
444 #endif /* __arm__ || __arm64__ */
445 }
446
447 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
448
449 void
450 pset_rt_init(processor_set_t pset)
451 {
452 os_atomic_init(&pset->rt_runq.count, 0);
453 queue_init(&pset->rt_runq.queue);
454 memset(&pset->rt_runq.runq_stats, 0, sizeof pset->rt_runq.runq_stats);
455 }
456
457 static void
458 sched_realtime_timebase_init(void)
459 {
460 uint64_t abstime;
461
462 /* smallest rt computaton (50 us) */
463 clock_interval_to_absolutetime_interval(50, NSEC_PER_USEC, &abstime);
464 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
465 min_rt_quantum = (uint32_t)abstime;
466
467 /* maximum rt computation (50 ms) */
468 clock_interval_to_absolutetime_interval(
469 50, 1000 * NSEC_PER_USEC, &abstime);
470 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
471 max_rt_quantum = (uint32_t)abstime;
472
473 /* constraint threshold for sending backup IPIs (4 ms) */
474 clock_interval_to_absolutetime_interval(4, NSEC_PER_MSEC, &abstime);
475 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
476 rt_constraint_threshold = (uint32_t)abstime;
477 }
478
479 void
480 sched_check_spill(processor_set_t pset, thread_t thread)
481 {
482 (void)pset;
483 (void)thread;
484
485 return;
486 }
487
488 bool
489 sched_thread_should_yield(processor_t processor, thread_t thread)
490 {
491 (void)thread;
492
493 return !SCHED(processor_queue_empty)(processor) || rt_runq_count(processor->processor_set) > 0;
494 }
495
496 /* Default implementations of .steal_thread_enabled */
497 bool
498 sched_steal_thread_DISABLED(processor_set_t pset)
499 {
500 (void)pset;
501 return false;
502 }
503
504 bool
505 sched_steal_thread_enabled(processor_set_t pset)
506 {
507 return bit_count(pset->node->pset_map) > 1;
508 }
509
510 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
511
512 /*
513 * Set up values for timeshare
514 * loading factors.
515 */
516 static void
517 load_shift_init(void)
518 {
519 int8_t k, *p = sched_load_shifts;
520 uint32_t i, j;
521
522 uint32_t sched_decay_penalty = 1;
523
524 if (PE_parse_boot_argn("sched_decay_penalty", &sched_decay_penalty, sizeof(sched_decay_penalty))) {
525 kprintf("Overriding scheduler decay penalty %u\n", sched_decay_penalty);
526 }
527
528 if (PE_parse_boot_argn("sched_decay_usage_age_factor", &sched_decay_usage_age_factor, sizeof(sched_decay_usage_age_factor))) {
529 kprintf("Overriding scheduler decay usage age factor %u\n", sched_decay_usage_age_factor);
530 }
531
532 if (sched_decay_penalty == 0) {
533 /*
534 * There is no penalty for timeshare threads for using too much
535 * CPU, so set all load shifts to INT8_MIN. Even under high load,
536 * sched_pri_shift will be >INT8_MAX, and there will be no
537 * penalty applied to threads (nor will sched_usage be updated per
538 * thread).
539 */
540 for (i = 0; i < NRQS; i++) {
541 sched_load_shifts[i] = INT8_MIN;
542 }
543
544 return;
545 }
546
547 *p++ = INT8_MIN; *p++ = 0;
548
549 /*
550 * For a given system load "i", the per-thread priority
551 * penalty per quantum of CPU usage is ~2^k priority
552 * levels. "sched_decay_penalty" can cause more
553 * array entries to be filled with smaller "k" values
554 */
555 for (i = 2, j = 1 << sched_decay_penalty, k = 1; i < NRQS; ++k) {
556 for (j <<= 1; (i < j) && (i < NRQS); ++i) {
557 *p++ = k;
558 }
559 }
560 }
561
562 static void
563 preempt_pri_init(void)
564 {
565 bitmap_t *p = sched_preempt_pri;
566
567 for (int i = BASEPRI_FOREGROUND; i < MINPRI_KERNEL; ++i) {
568 bitmap_set(p, i);
569 }
570
571 for (int i = BASEPRI_PREEMPT; i <= MAXPRI; ++i) {
572 bitmap_set(p, i);
573 }
574 }
575
576 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
577
578 /*
579 * Thread wait timer expiration.
580 */
581 void
582 thread_timer_expire(
583 void *p0,
584 __unused void *p1)
585 {
586 thread_t thread = p0;
587 spl_t s;
588
589 assert_thread_magic(thread);
590
591 s = splsched();
592 thread_lock(thread);
593 if (--thread->wait_timer_active == 0) {
594 if (thread->wait_timer_is_set) {
595 thread->wait_timer_is_set = FALSE;
596 clear_wait_internal(thread, THREAD_TIMED_OUT);
597 }
598 }
599 thread_unlock(thread);
600 splx(s);
601 }
602
603 /*
604 * thread_unblock:
605 *
606 * Unblock thread on wake up.
607 *
608 * Returns TRUE if the thread should now be placed on the runqueue.
609 *
610 * Thread must be locked.
611 *
612 * Called at splsched().
613 */
614 boolean_t
615 thread_unblock(
616 thread_t thread,
617 wait_result_t wresult)
618 {
619 boolean_t ready_for_runq = FALSE;
620 thread_t cthread = current_thread();
621 uint32_t new_run_count;
622 int old_thread_state;
623
624 /*
625 * Set wait_result.
626 */
627 thread->wait_result = wresult;
628
629 /*
630 * Cancel pending wait timer.
631 */
632 if (thread->wait_timer_is_set) {
633 if (timer_call_cancel(&thread->wait_timer)) {
634 thread->wait_timer_active--;
635 }
636 thread->wait_timer_is_set = FALSE;
637 }
638
639 boolean_t aticontext, pidle;
640 ml_get_power_state(&aticontext, &pidle);
641
642 /*
643 * Update scheduling state: not waiting,
644 * set running.
645 */
646 old_thread_state = thread->state;
647 thread->state = (old_thread_state | TH_RUN) &
648 ~(TH_WAIT | TH_UNINT | TH_WAIT_REPORT);
649
650 if ((old_thread_state & TH_RUN) == 0) {
651 uint64_t ctime = mach_approximate_time();
652 thread->last_made_runnable_time = thread->last_basepri_change_time = ctime;
653 timer_start(&thread->runnable_timer, ctime);
654
655 ready_for_runq = TRUE;
656
657 if (old_thread_state & TH_WAIT_REPORT) {
658 (*thread->sched_call)(SCHED_CALL_UNBLOCK, thread);
659 }
660
661 /* Update the runnable thread count */
662 new_run_count = SCHED(run_count_incr)(thread);
663
664 #if CONFIG_SCHED_AUTO_JOIN
665 if (aticontext == FALSE && work_interval_should_propagate(cthread, thread)) {
666 work_interval_auto_join_propagate(cthread, thread);
667 }
668 #endif /*CONFIG_SCHED_AUTO_JOIN */
669 } else {
670 /*
671 * Either the thread is idling in place on another processor,
672 * or it hasn't finished context switching yet.
673 */
674 assert((thread->state & TH_IDLE) == 0);
675 /*
676 * The run count is only dropped after the context switch completes
677 * and the thread is still waiting, so we should not run_incr here
678 */
679 new_run_count = os_atomic_load(&sched_run_buckets[TH_BUCKET_RUN], relaxed);
680 }
681
682 /*
683 * Calculate deadline for real-time threads.
684 */
685 if (thread->sched_mode == TH_MODE_REALTIME) {
686 uint64_t ctime;
687
688 ctime = mach_absolute_time();
689 thread->realtime.deadline = thread->realtime.constraint + ctime;
690 }
691
692 /*
693 * Clear old quantum, fail-safe computation, etc.
694 */
695 thread->quantum_remaining = 0;
696 thread->computation_metered = 0;
697 thread->reason = AST_NONE;
698 thread->block_hint = kThreadWaitNone;
699
700 /* Obtain power-relevant interrupt and "platform-idle exit" statistics.
701 * We also account for "double hop" thread signaling via
702 * the thread callout infrastructure.
703 * DRK: consider removing the callout wakeup counters in the future
704 * they're present for verification at the moment.
705 */
706
707 if (__improbable(aticontext && !(thread_get_tag_internal(thread) & THREAD_TAG_CALLOUT))) {
708 DTRACE_SCHED2(iwakeup, struct thread *, thread, struct proc *, thread->task->bsd_info);
709
710 uint64_t ttd = current_processor()->timer_call_ttd;
711
712 if (ttd) {
713 if (ttd <= timer_deadline_tracking_bin_1) {
714 thread->thread_timer_wakeups_bin_1++;
715 } else if (ttd <= timer_deadline_tracking_bin_2) {
716 thread->thread_timer_wakeups_bin_2++;
717 }
718 }
719
720 ledger_credit_thread(thread, thread->t_ledger,
721 task_ledgers.interrupt_wakeups, 1);
722 if (pidle) {
723 ledger_credit_thread(thread, thread->t_ledger,
724 task_ledgers.platform_idle_wakeups, 1);
725 }
726 } else if (thread_get_tag_internal(cthread) & THREAD_TAG_CALLOUT) {
727 /* TODO: what about an interrupt that does a wake taken on a callout thread? */
728 if (cthread->callout_woken_from_icontext) {
729 ledger_credit_thread(thread, thread->t_ledger,
730 task_ledgers.interrupt_wakeups, 1);
731 thread->thread_callout_interrupt_wakeups++;
732
733 if (cthread->callout_woken_from_platform_idle) {
734 ledger_credit_thread(thread, thread->t_ledger,
735 task_ledgers.platform_idle_wakeups, 1);
736 thread->thread_callout_platform_idle_wakeups++;
737 }
738
739 cthread->callout_woke_thread = TRUE;
740 }
741 }
742
743 if (thread_get_tag_internal(thread) & THREAD_TAG_CALLOUT) {
744 thread->callout_woken_from_icontext = !!aticontext;
745 thread->callout_woken_from_platform_idle = !!pidle;
746 thread->callout_woke_thread = FALSE;
747 }
748
749 #if KPERF
750 if (ready_for_runq) {
751 kperf_make_runnable(thread, aticontext);
752 }
753 #endif /* KPERF */
754
755 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
756 MACHDBG_CODE(DBG_MACH_SCHED, MACH_MAKE_RUNNABLE) | DBG_FUNC_NONE,
757 (uintptr_t)thread_tid(thread), thread->sched_pri, thread->wait_result,
758 sched_run_buckets[TH_BUCKET_RUN], 0);
759
760 DTRACE_SCHED2(wakeup, struct thread *, thread, struct proc *, thread->task->bsd_info);
761
762 return ready_for_runq;
763 }
764
765 /*
766 * Routine: thread_allowed_for_handoff
767 * Purpose:
768 * Check if the thread is allowed for handoff operation
769 * Conditions:
770 * thread lock held, IPC locks may be held.
771 * TODO: In future, do not allow handoff if threads have different cluster
772 * recommendations.
773 */
774 boolean_t
775 thread_allowed_for_handoff(
776 thread_t thread)
777 {
778 thread_t self = current_thread();
779
780 if (allow_direct_handoff &&
781 thread->sched_mode == TH_MODE_REALTIME &&
782 self->sched_mode == TH_MODE_REALTIME) {
783 return TRUE;
784 }
785
786 return FALSE;
787 }
788
789 /*
790 * Routine: thread_go
791 * Purpose:
792 * Unblock and dispatch thread.
793 * Conditions:
794 * thread lock held, IPC locks may be held.
795 * thread must have been pulled from wait queue under same lock hold.
796 * thread must have been waiting
797 * Returns:
798 * KERN_SUCCESS - Thread was set running
799 *
800 * TODO: This should return void
801 */
802 kern_return_t
803 thread_go(
804 thread_t thread,
805 wait_result_t wresult,
806 waitq_options_t option)
807 {
808 thread_t self = current_thread();
809
810 assert_thread_magic(thread);
811
812 assert(thread->at_safe_point == FALSE);
813 assert(thread->wait_event == NO_EVENT64);
814 assert(thread->waitq == NULL);
815
816 assert(!(thread->state & (TH_TERMINATE | TH_TERMINATE2)));
817 assert(thread->state & TH_WAIT);
818
819
820 if (thread_unblock(thread, wresult)) {
821 #if SCHED_TRACE_THREAD_WAKEUPS
822 backtrace(&thread->thread_wakeup_bt[0],
823 (sizeof(thread->thread_wakeup_bt) / sizeof(uintptr_t)), NULL);
824 #endif
825 if ((option & WQ_OPTION_HANDOFF) &&
826 thread_allowed_for_handoff(thread)) {
827 thread_reference(thread);
828 assert(self->handoff_thread == NULL);
829 self->handoff_thread = thread;
830 } else {
831 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
832 }
833 }
834
835 return KERN_SUCCESS;
836 }
837
838 /*
839 * Routine: thread_mark_wait_locked
840 * Purpose:
841 * Mark a thread as waiting. If, given the circumstances,
842 * it doesn't want to wait (i.e. already aborted), then
843 * indicate that in the return value.
844 * Conditions:
845 * at splsched() and thread is locked.
846 */
847 __private_extern__
848 wait_result_t
849 thread_mark_wait_locked(
850 thread_t thread,
851 wait_interrupt_t interruptible_orig)
852 {
853 boolean_t at_safe_point;
854 wait_interrupt_t interruptible = interruptible_orig;
855
856 if (thread->state & TH_IDLE) {
857 panic("Invalid attempt to wait while running the idle thread");
858 }
859
860 assert(!(thread->state & (TH_WAIT | TH_IDLE | TH_UNINT | TH_TERMINATE2 | TH_WAIT_REPORT)));
861
862 /*
863 * The thread may have certain types of interrupts/aborts masked
864 * off. Even if the wait location says these types of interrupts
865 * are OK, we have to honor mask settings (outer-scoped code may
866 * not be able to handle aborts at the moment).
867 */
868 interruptible &= TH_OPT_INTMASK;
869 if (interruptible > (thread->options & TH_OPT_INTMASK)) {
870 interruptible = thread->options & TH_OPT_INTMASK;
871 }
872
873 at_safe_point = (interruptible == THREAD_ABORTSAFE);
874
875 if (interruptible == THREAD_UNINT ||
876 !(thread->sched_flags & TH_SFLAG_ABORT) ||
877 (!at_safe_point &&
878 (thread->sched_flags & TH_SFLAG_ABORTSAFELY))) {
879 if (!(thread->state & TH_TERMINATE)) {
880 DTRACE_SCHED(sleep);
881 }
882
883 int state_bits = TH_WAIT;
884 if (!interruptible) {
885 state_bits |= TH_UNINT;
886 }
887 if (thread->sched_call) {
888 wait_interrupt_t mask = THREAD_WAIT_NOREPORT_USER;
889 if (is_kerneltask(thread->task)) {
890 mask = THREAD_WAIT_NOREPORT_KERNEL;
891 }
892 if ((interruptible_orig & mask) == 0) {
893 state_bits |= TH_WAIT_REPORT;
894 }
895 }
896 thread->state |= state_bits;
897 thread->at_safe_point = at_safe_point;
898
899 /* TODO: pass this through assert_wait instead, have
900 * assert_wait just take a struct as an argument */
901 assert(!thread->block_hint);
902 thread->block_hint = thread->pending_block_hint;
903 thread->pending_block_hint = kThreadWaitNone;
904
905 return thread->wait_result = THREAD_WAITING;
906 } else {
907 if (thread->sched_flags & TH_SFLAG_ABORTSAFELY) {
908 thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
909 }
910 }
911 thread->pending_block_hint = kThreadWaitNone;
912
913 return thread->wait_result = THREAD_INTERRUPTED;
914 }
915
916 /*
917 * Routine: thread_interrupt_level
918 * Purpose:
919 * Set the maximum interruptible state for the
920 * current thread. The effective value of any
921 * interruptible flag passed into assert_wait
922 * will never exceed this.
923 *
924 * Useful for code that must not be interrupted,
925 * but which calls code that doesn't know that.
926 * Returns:
927 * The old interrupt level for the thread.
928 */
929 __private_extern__
930 wait_interrupt_t
931 thread_interrupt_level(
932 wait_interrupt_t new_level)
933 {
934 thread_t thread = current_thread();
935 wait_interrupt_t result = thread->options & TH_OPT_INTMASK;
936
937 thread->options = (thread->options & ~TH_OPT_INTMASK) | (new_level & TH_OPT_INTMASK);
938
939 return result;
940 }
941
942 /*
943 * assert_wait:
944 *
945 * Assert that the current thread is about to go to
946 * sleep until the specified event occurs.
947 */
948 wait_result_t
949 assert_wait(
950 event_t event,
951 wait_interrupt_t interruptible)
952 {
953 if (__improbable(event == NO_EVENT)) {
954 panic("%s() called with NO_EVENT", __func__);
955 }
956
957 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
958 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE,
959 VM_KERNEL_UNSLIDE_OR_PERM(event), 0, 0, 0, 0);
960
961 struct waitq *waitq;
962 waitq = global_eventq(event);
963 return waitq_assert_wait64(waitq, CAST_EVENT64_T(event), interruptible, TIMEOUT_WAIT_FOREVER);
964 }
965
966 /*
967 * assert_wait_queue:
968 *
969 * Return the global waitq for the specified event
970 */
971 struct waitq *
972 assert_wait_queue(
973 event_t event)
974 {
975 return global_eventq(event);
976 }
977
978 wait_result_t
979 assert_wait_timeout(
980 event_t event,
981 wait_interrupt_t interruptible,
982 uint32_t interval,
983 uint32_t scale_factor)
984 {
985 thread_t thread = current_thread();
986 wait_result_t wresult;
987 uint64_t deadline;
988 spl_t s;
989
990 if (__improbable(event == NO_EVENT)) {
991 panic("%s() called with NO_EVENT", __func__);
992 }
993
994 struct waitq *waitq;
995 waitq = global_eventq(event);
996
997 s = splsched();
998 waitq_lock(waitq);
999
1000 clock_interval_to_deadline(interval, scale_factor, &deadline);
1001
1002 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1003 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE,
1004 VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
1005
1006 wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
1007 interruptible,
1008 TIMEOUT_URGENCY_SYS_NORMAL,
1009 deadline, TIMEOUT_NO_LEEWAY,
1010 thread);
1011
1012 waitq_unlock(waitq);
1013 splx(s);
1014 return wresult;
1015 }
1016
1017 wait_result_t
1018 assert_wait_timeout_with_leeway(
1019 event_t event,
1020 wait_interrupt_t interruptible,
1021 wait_timeout_urgency_t urgency,
1022 uint32_t interval,
1023 uint32_t leeway,
1024 uint32_t scale_factor)
1025 {
1026 thread_t thread = current_thread();
1027 wait_result_t wresult;
1028 uint64_t deadline;
1029 uint64_t abstime;
1030 uint64_t slop;
1031 uint64_t now;
1032 spl_t s;
1033
1034 if (__improbable(event == NO_EVENT)) {
1035 panic("%s() called with NO_EVENT", __func__);
1036 }
1037
1038 now = mach_absolute_time();
1039 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
1040 deadline = now + abstime;
1041
1042 clock_interval_to_absolutetime_interval(leeway, scale_factor, &slop);
1043
1044 struct waitq *waitq;
1045 waitq = global_eventq(event);
1046
1047 s = splsched();
1048 waitq_lock(waitq);
1049
1050 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1051 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE,
1052 VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
1053
1054 wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
1055 interruptible,
1056 urgency, deadline, slop,
1057 thread);
1058
1059 waitq_unlock(waitq);
1060 splx(s);
1061 return wresult;
1062 }
1063
1064 wait_result_t
1065 assert_wait_deadline(
1066 event_t event,
1067 wait_interrupt_t interruptible,
1068 uint64_t deadline)
1069 {
1070 thread_t thread = current_thread();
1071 wait_result_t wresult;
1072 spl_t s;
1073
1074 if (__improbable(event == NO_EVENT)) {
1075 panic("%s() called with NO_EVENT", __func__);
1076 }
1077
1078 struct waitq *waitq;
1079 waitq = global_eventq(event);
1080
1081 s = splsched();
1082 waitq_lock(waitq);
1083
1084 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1085 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE,
1086 VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
1087
1088 wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
1089 interruptible,
1090 TIMEOUT_URGENCY_SYS_NORMAL, deadline,
1091 TIMEOUT_NO_LEEWAY, thread);
1092 waitq_unlock(waitq);
1093 splx(s);
1094 return wresult;
1095 }
1096
1097 wait_result_t
1098 assert_wait_deadline_with_leeway(
1099 event_t event,
1100 wait_interrupt_t interruptible,
1101 wait_timeout_urgency_t urgency,
1102 uint64_t deadline,
1103 uint64_t leeway)
1104 {
1105 thread_t thread = current_thread();
1106 wait_result_t wresult;
1107 spl_t s;
1108
1109 if (__improbable(event == NO_EVENT)) {
1110 panic("%s() called with NO_EVENT", __func__);
1111 }
1112
1113 struct waitq *waitq;
1114 waitq = global_eventq(event);
1115
1116 s = splsched();
1117 waitq_lock(waitq);
1118
1119 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1120 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE,
1121 VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
1122
1123 wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
1124 interruptible,
1125 urgency, deadline, leeway,
1126 thread);
1127 waitq_unlock(waitq);
1128 splx(s);
1129 return wresult;
1130 }
1131
1132 /*
1133 * thread_isoncpu:
1134 *
1135 * Return TRUE if a thread is running on a processor such that an AST
1136 * is needed to pull it out of userspace execution, or if executing in
1137 * the kernel, bring to a context switch boundary that would cause
1138 * thread state to be serialized in the thread PCB.
1139 *
1140 * Thread locked, returns the same way. While locked, fields
1141 * like "state" cannot change. "runq" can change only from set to unset.
1142 */
1143 static inline boolean_t
1144 thread_isoncpu(thread_t thread)
1145 {
1146 /* Not running or runnable */
1147 if (!(thread->state & TH_RUN)) {
1148 return FALSE;
1149 }
1150
1151 /* Waiting on a runqueue, not currently running */
1152 /* TODO: This is invalid - it can get dequeued without thread lock, but not context switched. */
1153 if (thread->runq != PROCESSOR_NULL) {
1154 return FALSE;
1155 }
1156
1157 /*
1158 * Thread does not have a stack yet
1159 * It could be on the stack alloc queue or preparing to be invoked
1160 */
1161 if (!thread->kernel_stack) {
1162 return FALSE;
1163 }
1164
1165 /*
1166 * Thread must be running on a processor, or
1167 * about to run, or just did run. In all these
1168 * cases, an AST to the processor is needed
1169 * to guarantee that the thread is kicked out
1170 * of userspace and the processor has
1171 * context switched (and saved register state).
1172 */
1173 return TRUE;
1174 }
1175
1176 /*
1177 * thread_stop:
1178 *
1179 * Force a preemption point for a thread and wait
1180 * for it to stop running on a CPU. If a stronger
1181 * guarantee is requested, wait until no longer
1182 * runnable. Arbitrates access among
1183 * multiple stop requests. (released by unstop)
1184 *
1185 * The thread must enter a wait state and stop via a
1186 * separate means.
1187 *
1188 * Returns FALSE if interrupted.
1189 */
1190 boolean_t
1191 thread_stop(
1192 thread_t thread,
1193 boolean_t until_not_runnable)
1194 {
1195 wait_result_t wresult;
1196 spl_t s = splsched();
1197 boolean_t oncpu;
1198
1199 wake_lock(thread);
1200 thread_lock(thread);
1201
1202 while (thread->state & TH_SUSP) {
1203 thread->wake_active = TRUE;
1204 thread_unlock(thread);
1205
1206 wresult = assert_wait(&thread->wake_active, THREAD_ABORTSAFE);
1207 wake_unlock(thread);
1208 splx(s);
1209
1210 if (wresult == THREAD_WAITING) {
1211 wresult = thread_block(THREAD_CONTINUE_NULL);
1212 }
1213
1214 if (wresult != THREAD_AWAKENED) {
1215 return FALSE;
1216 }
1217
1218 s = splsched();
1219 wake_lock(thread);
1220 thread_lock(thread);
1221 }
1222
1223 thread->state |= TH_SUSP;
1224
1225 while ((oncpu = thread_isoncpu(thread)) ||
1226 (until_not_runnable && (thread->state & TH_RUN))) {
1227 processor_t processor;
1228
1229 if (oncpu) {
1230 assert(thread->state & TH_RUN);
1231 processor = thread->chosen_processor;
1232 cause_ast_check(processor);
1233 }
1234
1235 thread->wake_active = TRUE;
1236 thread_unlock(thread);
1237
1238 wresult = assert_wait(&thread->wake_active, THREAD_ABORTSAFE);
1239 wake_unlock(thread);
1240 splx(s);
1241
1242 if (wresult == THREAD_WAITING) {
1243 wresult = thread_block(THREAD_CONTINUE_NULL);
1244 }
1245
1246 if (wresult != THREAD_AWAKENED) {
1247 thread_unstop(thread);
1248 return FALSE;
1249 }
1250
1251 s = splsched();
1252 wake_lock(thread);
1253 thread_lock(thread);
1254 }
1255
1256 thread_unlock(thread);
1257 wake_unlock(thread);
1258 splx(s);
1259
1260 /*
1261 * We return with the thread unlocked. To prevent it from
1262 * transitioning to a runnable state (or from TH_RUN to
1263 * being on the CPU), the caller must ensure the thread
1264 * is stopped via an external means (such as an AST)
1265 */
1266
1267 return TRUE;
1268 }
1269
1270 /*
1271 * thread_unstop:
1272 *
1273 * Release a previous stop request and set
1274 * the thread running if appropriate.
1275 *
1276 * Use only after a successful stop operation.
1277 */
1278 void
1279 thread_unstop(
1280 thread_t thread)
1281 {
1282 spl_t s = splsched();
1283
1284 wake_lock(thread);
1285 thread_lock(thread);
1286
1287 assert((thread->state & (TH_RUN | TH_WAIT | TH_SUSP)) != TH_SUSP);
1288
1289 if (thread->state & TH_SUSP) {
1290 thread->state &= ~TH_SUSP;
1291
1292 if (thread->wake_active) {
1293 thread->wake_active = FALSE;
1294 thread_unlock(thread);
1295
1296 thread_wakeup(&thread->wake_active);
1297 wake_unlock(thread);
1298 splx(s);
1299
1300 return;
1301 }
1302 }
1303
1304 thread_unlock(thread);
1305 wake_unlock(thread);
1306 splx(s);
1307 }
1308
1309 /*
1310 * thread_wait:
1311 *
1312 * Wait for a thread to stop running. (non-interruptible)
1313 *
1314 */
1315 void
1316 thread_wait(
1317 thread_t thread,
1318 boolean_t until_not_runnable)
1319 {
1320 wait_result_t wresult;
1321 boolean_t oncpu;
1322 processor_t processor;
1323 spl_t s = splsched();
1324
1325 wake_lock(thread);
1326 thread_lock(thread);
1327
1328 /*
1329 * Wait until not running on a CPU. If stronger requirement
1330 * desired, wait until not runnable. Assumption: if thread is
1331 * on CPU, then TH_RUN is set, so we're not waiting in any case
1332 * where the original, pure "TH_RUN" check would have let us
1333 * finish.
1334 */
1335 while ((oncpu = thread_isoncpu(thread)) ||
1336 (until_not_runnable && (thread->state & TH_RUN))) {
1337 if (oncpu) {
1338 assert(thread->state & TH_RUN);
1339 processor = thread->chosen_processor;
1340 cause_ast_check(processor);
1341 }
1342
1343 thread->wake_active = TRUE;
1344 thread_unlock(thread);
1345
1346 wresult = assert_wait(&thread->wake_active, THREAD_UNINT);
1347 wake_unlock(thread);
1348 splx(s);
1349
1350 if (wresult == THREAD_WAITING) {
1351 thread_block(THREAD_CONTINUE_NULL);
1352 }
1353
1354 s = splsched();
1355 wake_lock(thread);
1356 thread_lock(thread);
1357 }
1358
1359 thread_unlock(thread);
1360 wake_unlock(thread);
1361 splx(s);
1362 }
1363
1364 /*
1365 * Routine: clear_wait_internal
1366 *
1367 * Clear the wait condition for the specified thread.
1368 * Start the thread executing if that is appropriate.
1369 * Arguments:
1370 * thread thread to awaken
1371 * result Wakeup result the thread should see
1372 * Conditions:
1373 * At splsched
1374 * the thread is locked.
1375 * Returns:
1376 * KERN_SUCCESS thread was rousted out a wait
1377 * KERN_FAILURE thread was waiting but could not be rousted
1378 * KERN_NOT_WAITING thread was not waiting
1379 */
1380 __private_extern__ kern_return_t
1381 clear_wait_internal(
1382 thread_t thread,
1383 wait_result_t wresult)
1384 {
1385 uint32_t i = LockTimeOutUsec;
1386 struct waitq *waitq = thread->waitq;
1387
1388 do {
1389 if (wresult == THREAD_INTERRUPTED && (thread->state & TH_UNINT)) {
1390 return KERN_FAILURE;
1391 }
1392
1393 if (waitq != NULL) {
1394 if (!waitq_pull_thread_locked(waitq, thread)) {
1395 thread_unlock(thread);
1396 delay(1);
1397 if (i > 0 && !machine_timeout_suspended()) {
1398 i--;
1399 }
1400 thread_lock(thread);
1401 if (waitq != thread->waitq) {
1402 return KERN_NOT_WAITING;
1403 }
1404 continue;
1405 }
1406 }
1407
1408 /* TODO: Can we instead assert TH_TERMINATE is not set? */
1409 if ((thread->state & (TH_WAIT | TH_TERMINATE)) == TH_WAIT) {
1410 return thread_go(thread, wresult, WQ_OPTION_NONE);
1411 } else {
1412 return KERN_NOT_WAITING;
1413 }
1414 } while (i > 0);
1415
1416 panic("clear_wait_internal: deadlock: thread=%p, wq=%p, cpu=%d\n",
1417 thread, waitq, cpu_number());
1418
1419 return KERN_FAILURE;
1420 }
1421
1422
1423 /*
1424 * clear_wait:
1425 *
1426 * Clear the wait condition for the specified thread. Start the thread
1427 * executing if that is appropriate.
1428 *
1429 * parameters:
1430 * thread thread to awaken
1431 * result Wakeup result the thread should see
1432 */
1433 kern_return_t
1434 clear_wait(
1435 thread_t thread,
1436 wait_result_t result)
1437 {
1438 kern_return_t ret;
1439 spl_t s;
1440
1441 s = splsched();
1442 thread_lock(thread);
1443 ret = clear_wait_internal(thread, result);
1444 thread_unlock(thread);
1445 splx(s);
1446 return ret;
1447 }
1448
1449
1450 /*
1451 * thread_wakeup_prim:
1452 *
1453 * Common routine for thread_wakeup, thread_wakeup_with_result,
1454 * and thread_wakeup_one.
1455 *
1456 */
1457 kern_return_t
1458 thread_wakeup_prim(
1459 event_t event,
1460 boolean_t one_thread,
1461 wait_result_t result)
1462 {
1463 if (__improbable(event == NO_EVENT)) {
1464 panic("%s() called with NO_EVENT", __func__);
1465 }
1466
1467 struct waitq *wq = global_eventq(event);
1468
1469 if (one_thread) {
1470 return waitq_wakeup64_one(wq, CAST_EVENT64_T(event), result, WAITQ_ALL_PRIORITIES);
1471 } else {
1472 return waitq_wakeup64_all(wq, CAST_EVENT64_T(event), result, WAITQ_ALL_PRIORITIES);
1473 }
1474 }
1475
1476 /*
1477 * Wakeup a specified thread if and only if it's waiting for this event
1478 */
1479 kern_return_t
1480 thread_wakeup_thread(
1481 event_t event,
1482 thread_t thread)
1483 {
1484 if (__improbable(event == NO_EVENT)) {
1485 panic("%s() called with NO_EVENT", __func__);
1486 }
1487
1488 if (__improbable(thread == THREAD_NULL)) {
1489 panic("%s() called with THREAD_NULL", __func__);
1490 }
1491
1492 struct waitq *wq = global_eventq(event);
1493
1494 return waitq_wakeup64_thread(wq, CAST_EVENT64_T(event), thread, THREAD_AWAKENED);
1495 }
1496
1497 /*
1498 * Wakeup a thread waiting on an event and promote it to a priority.
1499 *
1500 * Requires woken thread to un-promote itself when done.
1501 */
1502 kern_return_t
1503 thread_wakeup_one_with_pri(
1504 event_t event,
1505 int priority)
1506 {
1507 if (__improbable(event == NO_EVENT)) {
1508 panic("%s() called with NO_EVENT", __func__);
1509 }
1510
1511 struct waitq *wq = global_eventq(event);
1512
1513 return waitq_wakeup64_one(wq, CAST_EVENT64_T(event), THREAD_AWAKENED, priority);
1514 }
1515
1516 /*
1517 * Wakeup a thread waiting on an event,
1518 * promote it to a priority,
1519 * and return a reference to the woken thread.
1520 *
1521 * Requires woken thread to un-promote itself when done.
1522 */
1523 thread_t
1524 thread_wakeup_identify(event_t event,
1525 int priority)
1526 {
1527 if (__improbable(event == NO_EVENT)) {
1528 panic("%s() called with NO_EVENT", __func__);
1529 }
1530
1531 struct waitq *wq = global_eventq(event);
1532
1533 return waitq_wakeup64_identify(wq, CAST_EVENT64_T(event), THREAD_AWAKENED, priority);
1534 }
1535
1536 /*
1537 * thread_bind:
1538 *
1539 * Force the current thread to execute on the specified processor.
1540 * Takes effect after the next thread_block().
1541 *
1542 * Returns the previous binding. PROCESSOR_NULL means
1543 * not bound.
1544 *
1545 * XXX - DO NOT export this to users - XXX
1546 */
1547 processor_t
1548 thread_bind(
1549 processor_t processor)
1550 {
1551 thread_t self = current_thread();
1552 processor_t prev;
1553 spl_t s;
1554
1555 s = splsched();
1556 thread_lock(self);
1557
1558 prev = thread_bind_internal(self, processor);
1559
1560 thread_unlock(self);
1561 splx(s);
1562
1563 return prev;
1564 }
1565
1566 /*
1567 * thread_bind_internal:
1568 *
1569 * If the specified thread is not the current thread, and it is currently
1570 * running on another CPU, a remote AST must be sent to that CPU to cause
1571 * the thread to migrate to its bound processor. Otherwise, the migration
1572 * will occur at the next quantum expiration or blocking point.
1573 *
1574 * When the thread is the current thread, and explicit thread_block() should
1575 * be used to force the current processor to context switch away and
1576 * let the thread migrate to the bound processor.
1577 *
1578 * Thread must be locked, and at splsched.
1579 */
1580
1581 static processor_t
1582 thread_bind_internal(
1583 thread_t thread,
1584 processor_t processor)
1585 {
1586 processor_t prev;
1587
1588 /* <rdar://problem/15102234> */
1589 assert(thread->sched_pri < BASEPRI_RTQUEUES);
1590 /* A thread can't be bound if it's sitting on a (potentially incorrect) runqueue */
1591 assert(thread->runq == PROCESSOR_NULL);
1592
1593 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_THREAD_BIND), thread_tid(thread), processor ? (uintptr_t)processor->cpu_id : (uintptr_t)-1, 0, 0, 0);
1594
1595 prev = thread->bound_processor;
1596 thread->bound_processor = processor;
1597
1598 return prev;
1599 }
1600
1601 /*
1602 * thread_vm_bind_group_add:
1603 *
1604 * The "VM bind group" is a special mechanism to mark a collection
1605 * of threads from the VM subsystem that, in general, should be scheduled
1606 * with only one CPU of parallelism. To accomplish this, we initially
1607 * bind all the threads to the master processor, which has the effect
1608 * that only one of the threads in the group can execute at once, including
1609 * preempting threads in the group that are a lower priority. Future
1610 * mechanisms may use more dynamic mechanisms to prevent the collection
1611 * of VM threads from using more CPU time than desired.
1612 *
1613 * The current implementation can result in priority inversions where
1614 * compute-bound priority 95 or realtime threads that happen to have
1615 * landed on the master processor prevent the VM threads from running.
1616 * When this situation is detected, we unbind the threads for one
1617 * scheduler tick to allow the scheduler to run the threads an
1618 * additional CPUs, before restoring the binding (assuming high latency
1619 * is no longer a problem).
1620 */
1621
1622 /*
1623 * The current max is provisioned for:
1624 * vm_compressor_swap_trigger_thread (92)
1625 * 2 x vm_pageout_iothread_internal (92) when vm_restricted_to_single_processor==TRUE
1626 * vm_pageout_continue (92)
1627 * memorystatus_thread (95)
1628 */
1629 #define MAX_VM_BIND_GROUP_COUNT (5)
1630 decl_simple_lock_data(static, sched_vm_group_list_lock);
1631 static thread_t sched_vm_group_thread_list[MAX_VM_BIND_GROUP_COUNT];
1632 static int sched_vm_group_thread_count;
1633 static boolean_t sched_vm_group_temporarily_unbound = FALSE;
1634
1635 void
1636 thread_vm_bind_group_add(void)
1637 {
1638 thread_t self = current_thread();
1639
1640 thread_reference_internal(self);
1641 self->options |= TH_OPT_SCHED_VM_GROUP;
1642
1643 simple_lock(&sched_vm_group_list_lock, LCK_GRP_NULL);
1644 assert(sched_vm_group_thread_count < MAX_VM_BIND_GROUP_COUNT);
1645 sched_vm_group_thread_list[sched_vm_group_thread_count++] = self;
1646 simple_unlock(&sched_vm_group_list_lock);
1647
1648 thread_bind(master_processor);
1649
1650 /* Switch to bound processor if not already there */
1651 thread_block(THREAD_CONTINUE_NULL);
1652 }
1653
1654 static void
1655 sched_vm_group_maintenance(void)
1656 {
1657 uint64_t ctime = mach_absolute_time();
1658 uint64_t longtime = ctime - sched_tick_interval;
1659 int i;
1660 spl_t s;
1661 boolean_t high_latency_observed = FALSE;
1662 boolean_t runnable_and_not_on_runq_observed = FALSE;
1663 boolean_t bind_target_changed = FALSE;
1664 processor_t bind_target = PROCESSOR_NULL;
1665
1666 /* Make sure nobody attempts to add new threads while we are enumerating them */
1667 simple_lock(&sched_vm_group_list_lock, LCK_GRP_NULL);
1668
1669 s = splsched();
1670
1671 for (i = 0; i < sched_vm_group_thread_count; i++) {
1672 thread_t thread = sched_vm_group_thread_list[i];
1673 assert(thread != THREAD_NULL);
1674 thread_lock(thread);
1675 if ((thread->state & (TH_RUN | TH_WAIT)) == TH_RUN) {
1676 if (thread->runq != PROCESSOR_NULL && thread->last_made_runnable_time < longtime) {
1677 high_latency_observed = TRUE;
1678 } else if (thread->runq == PROCESSOR_NULL) {
1679 /* There are some cases where a thread be transitiong that also fall into this case */
1680 runnable_and_not_on_runq_observed = TRUE;
1681 }
1682 }
1683 thread_unlock(thread);
1684
1685 if (high_latency_observed && runnable_and_not_on_runq_observed) {
1686 /* All the things we are looking for are true, stop looking */
1687 break;
1688 }
1689 }
1690
1691 splx(s);
1692
1693 if (sched_vm_group_temporarily_unbound) {
1694 /* If we turned off binding, make sure everything is OK before rebinding */
1695 if (!high_latency_observed) {
1696 /* rebind */
1697 bind_target_changed = TRUE;
1698 bind_target = master_processor;
1699 sched_vm_group_temporarily_unbound = FALSE; /* might be reset to TRUE if change cannot be completed */
1700 }
1701 } else {
1702 /*
1703 * Check if we're in a bad state, which is defined by high
1704 * latency with no core currently executing a thread. If a
1705 * single thread is making progress on a CPU, that means the
1706 * binding concept to reduce parallelism is working as
1707 * designed.
1708 */
1709 if (high_latency_observed && !runnable_and_not_on_runq_observed) {
1710 /* unbind */
1711 bind_target_changed = TRUE;
1712 bind_target = PROCESSOR_NULL;
1713 sched_vm_group_temporarily_unbound = TRUE;
1714 }
1715 }
1716
1717 if (bind_target_changed) {
1718 s = splsched();
1719 for (i = 0; i < sched_vm_group_thread_count; i++) {
1720 thread_t thread = sched_vm_group_thread_list[i];
1721 boolean_t removed;
1722 assert(thread != THREAD_NULL);
1723
1724 thread_lock(thread);
1725 removed = thread_run_queue_remove(thread);
1726 if (removed || ((thread->state & (TH_RUN | TH_WAIT)) == TH_WAIT)) {
1727 thread_bind_internal(thread, bind_target);
1728 } else {
1729 /*
1730 * Thread was in the middle of being context-switched-to,
1731 * or was in the process of blocking. To avoid switching the bind
1732 * state out mid-flight, defer the change if possible.
1733 */
1734 if (bind_target == PROCESSOR_NULL) {
1735 thread_bind_internal(thread, bind_target);
1736 } else {
1737 sched_vm_group_temporarily_unbound = TRUE; /* next pass will try again */
1738 }
1739 }
1740
1741 if (removed) {
1742 thread_run_queue_reinsert(thread, SCHED_PREEMPT | SCHED_TAILQ);
1743 }
1744 thread_unlock(thread);
1745 }
1746 splx(s);
1747 }
1748
1749 simple_unlock(&sched_vm_group_list_lock);
1750 }
1751
1752 /* Invoked prior to idle entry to determine if, on SMT capable processors, an SMT
1753 * rebalancing opportunity exists when a core is (instantaneously) idle, but
1754 * other SMT-capable cores may be over-committed. TODO: some possible negatives:
1755 * IPI thrash if this core does not remain idle following the load balancing ASTs
1756 * Idle "thrash", when IPI issue is followed by idle entry/core power down
1757 * followed by a wakeup shortly thereafter.
1758 */
1759
1760 #if (DEVELOPMENT || DEBUG)
1761 int sched_smt_balance = 1;
1762 #endif
1763
1764 /* Invoked with pset locked, returns with pset unlocked */
1765 void
1766 sched_SMT_balance(processor_t cprocessor, processor_set_t cpset)
1767 {
1768 processor_t ast_processor = NULL;
1769
1770 #if (DEVELOPMENT || DEBUG)
1771 if (__improbable(sched_smt_balance == 0)) {
1772 goto smt_balance_exit;
1773 }
1774 #endif
1775
1776 assert(cprocessor == current_processor());
1777 if (cprocessor->is_SMT == FALSE) {
1778 goto smt_balance_exit;
1779 }
1780
1781 processor_t sib_processor = cprocessor->processor_secondary ? cprocessor->processor_secondary : cprocessor->processor_primary;
1782
1783 /* Determine if both this processor and its sibling are idle,
1784 * indicating an SMT rebalancing opportunity.
1785 */
1786 if (sib_processor->state != PROCESSOR_IDLE) {
1787 goto smt_balance_exit;
1788 }
1789
1790 processor_t sprocessor;
1791
1792 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
1793 uint64_t running_secondary_map = (cpset->cpu_state_map[PROCESSOR_RUNNING] &
1794 ~cpset->primary_map);
1795 for (int cpuid = lsb_first(running_secondary_map); cpuid >= 0; cpuid = lsb_next(running_secondary_map, cpuid)) {
1796 sprocessor = processor_array[cpuid];
1797 if ((sprocessor->processor_primary->state == PROCESSOR_RUNNING) &&
1798 (sprocessor->current_pri < BASEPRI_RTQUEUES)) {
1799 ipi_type = sched_ipi_action(sprocessor, NULL, false, SCHED_IPI_EVENT_SMT_REBAL);
1800 if (ipi_type != SCHED_IPI_NONE) {
1801 assert(sprocessor != cprocessor);
1802 ast_processor = sprocessor;
1803 break;
1804 }
1805 }
1806 }
1807
1808 smt_balance_exit:
1809 pset_unlock(cpset);
1810
1811 if (ast_processor) {
1812 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_SMT_BALANCE), ast_processor->cpu_id, ast_processor->state, ast_processor->processor_primary->state, 0, 0);
1813 sched_ipi_perform(ast_processor, ipi_type);
1814 }
1815 }
1816
1817 static cpumap_t
1818 pset_available_cpumap(processor_set_t pset)
1819 {
1820 return (pset->cpu_state_map[PROCESSOR_IDLE] | pset->cpu_state_map[PROCESSOR_DISPATCHING] | pset->cpu_state_map[PROCESSOR_RUNNING]) &
1821 pset->recommended_bitmask;
1822 }
1823
1824 static cpumap_t
1825 pset_available_but_not_running_cpumap(processor_set_t pset)
1826 {
1827 return (pset->cpu_state_map[PROCESSOR_IDLE] | pset->cpu_state_map[PROCESSOR_DISPATCHING]) &
1828 pset->recommended_bitmask;
1829 }
1830
1831 bool
1832 pset_has_stealable_threads(processor_set_t pset)
1833 {
1834 pset_assert_locked(pset);
1835
1836 cpumap_t avail_map = pset_available_but_not_running_cpumap(pset);
1837 /*
1838 * Secondary CPUs never steal, so allow stealing of threads if there are more threads than
1839 * available primary CPUs
1840 */
1841 avail_map &= pset->primary_map;
1842
1843 return (pset->pset_runq.count > 0) && ((pset->pset_runq.count + rt_runq_count(pset)) > bit_count(avail_map));
1844 }
1845
1846 /*
1847 * Called with pset locked, on a processor that is committing to run a new thread
1848 * Will transition an idle or dispatching processor to running as it picks up
1849 * the first new thread from the idle thread.
1850 */
1851 static void
1852 pset_commit_processor_to_new_thread(processor_set_t pset, processor_t processor, thread_t new_thread)
1853 {
1854 pset_assert_locked(pset);
1855
1856 if (processor->state == PROCESSOR_DISPATCHING || processor->state == PROCESSOR_IDLE) {
1857 assert(current_thread() == processor->idle_thread);
1858
1859 /*
1860 * Dispatching processor is now committed to running new_thread,
1861 * so change its state to PROCESSOR_RUNNING.
1862 */
1863 pset_update_processor_state(pset, processor, PROCESSOR_RUNNING);
1864 } else {
1865 assert((processor->state == PROCESSOR_RUNNING) || (processor->state == PROCESSOR_SHUTDOWN));
1866 }
1867
1868 processor_state_update_from_thread(processor, new_thread);
1869
1870 if (new_thread->sched_pri >= BASEPRI_RTQUEUES) {
1871 bit_set(pset->realtime_map, processor->cpu_id);
1872 } else {
1873 bit_clear(pset->realtime_map, processor->cpu_id);
1874 }
1875
1876 pset_node_t node = pset->node;
1877
1878 if (bit_count(node->pset_map) == 1) {
1879 /* Node has only a single pset, so skip node pset map updates */
1880 return;
1881 }
1882
1883 cpumap_t avail_map = pset_available_cpumap(pset);
1884
1885 if (new_thread->sched_pri >= BASEPRI_RTQUEUES) {
1886 if ((avail_map & pset->realtime_map) == avail_map) {
1887 /* No more non-RT CPUs in this pset */
1888 atomic_bit_clear(&node->pset_non_rt_map, pset->pset_id, memory_order_relaxed);
1889 }
1890 avail_map &= pset->primary_map;
1891 if ((avail_map & pset->realtime_map) == avail_map) {
1892 /* No more non-RT primary CPUs in this pset */
1893 atomic_bit_clear(&node->pset_non_rt_primary_map, pset->pset_id, memory_order_relaxed);
1894 }
1895 } else {
1896 if ((avail_map & pset->realtime_map) != avail_map) {
1897 if (!bit_test(atomic_load(&node->pset_non_rt_map), pset->pset_id)) {
1898 atomic_bit_set(&node->pset_non_rt_map, pset->pset_id, memory_order_relaxed);
1899 }
1900 }
1901 avail_map &= pset->primary_map;
1902 if ((avail_map & pset->realtime_map) != avail_map) {
1903 if (!bit_test(atomic_load(&node->pset_non_rt_primary_map), pset->pset_id)) {
1904 atomic_bit_set(&node->pset_non_rt_primary_map, pset->pset_id, memory_order_relaxed);
1905 }
1906 }
1907 }
1908 }
1909
1910 static processor_t choose_processor_for_realtime_thread(processor_set_t pset, processor_t skip_processor, bool consider_secondaries);
1911 static bool all_available_primaries_are_running_realtime_threads(processor_set_t pset);
1912 #if defined(__x86_64__)
1913 static bool these_processors_are_running_realtime_threads(processor_set_t pset, uint64_t these_map);
1914 #endif
1915 static bool sched_ok_to_run_realtime_thread(processor_set_t pset, processor_t processor);
1916 static bool processor_is_fast_track_candidate_for_realtime_thread(processor_set_t pset, processor_t processor);
1917 int sched_allow_rt_smt = 1;
1918 int sched_avoid_cpu0 = 1;
1919
1920 /*
1921 * thread_select:
1922 *
1923 * Select a new thread for the current processor to execute.
1924 *
1925 * May select the current thread, which must be locked.
1926 */
1927 static thread_t
1928 thread_select(thread_t thread,
1929 processor_t processor,
1930 ast_t *reason)
1931 {
1932 processor_set_t pset = processor->processor_set;
1933 thread_t new_thread = THREAD_NULL;
1934
1935 assert(processor == current_processor());
1936 assert((thread->state & (TH_RUN | TH_TERMINATE2)) == TH_RUN);
1937
1938 do {
1939 /*
1940 * Update the priority.
1941 */
1942 if (SCHED(can_update_priority)(thread)) {
1943 SCHED(update_priority)(thread);
1944 }
1945
1946 pset_lock(pset);
1947
1948 processor_state_update_from_thread(processor, thread);
1949
1950 restart:
1951 /* Acknowledge any pending IPIs here with pset lock held */
1952 bit_clear(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id);
1953 bit_clear(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
1954
1955 #if defined(CONFIG_SCHED_DEFERRED_AST)
1956 bit_clear(pset->pending_deferred_AST_cpu_mask, processor->cpu_id);
1957 #endif
1958
1959 bool secondary_can_only_run_realtime_thread = false;
1960
1961 assert(processor->state != PROCESSOR_OFF_LINE);
1962
1963 if (!processor->is_recommended) {
1964 /*
1965 * The performance controller has provided a hint to not dispatch more threads,
1966 * unless they are bound to us (and thus we are the only option
1967 */
1968 if (!SCHED(processor_bound_count)(processor)) {
1969 goto idle;
1970 }
1971 } else if (processor->processor_primary != processor) {
1972 /*
1973 * Should this secondary SMT processor attempt to find work? For pset runqueue systems,
1974 * we should look for work only under the same conditions that choose_processor()
1975 * would have assigned work, which is when all primary processors have been assigned work.
1976 *
1977 * An exception is that bound threads are dispatched to a processor without going through
1978 * choose_processor(), so in those cases we should continue trying to dequeue work.
1979 */
1980 if (!SCHED(processor_bound_count)(processor)) {
1981 if ((pset->recommended_bitmask & pset->primary_map & pset->cpu_state_map[PROCESSOR_IDLE]) != 0) {
1982 goto idle;
1983 }
1984
1985 /*
1986 * TODO: What if a secondary core beat an idle primary to waking up from an IPI?
1987 * Should it dequeue immediately, or spin waiting for the primary to wake up?
1988 */
1989
1990 /* There are no idle primaries */
1991
1992 if (processor->processor_primary->current_pri >= BASEPRI_RTQUEUES) {
1993 bool secondary_can_run_realtime_thread = sched_allow_rt_smt && rt_runq_count(pset) && all_available_primaries_are_running_realtime_threads(pset);
1994 if (!secondary_can_run_realtime_thread) {
1995 goto idle;
1996 }
1997 secondary_can_only_run_realtime_thread = true;
1998 }
1999 }
2000 }
2001
2002 /*
2003 * Test to see if the current thread should continue
2004 * to run on this processor. Must not be attempting to wait, and not
2005 * bound to a different processor, nor be in the wrong
2006 * processor set, nor be forced to context switch by TH_SUSP.
2007 *
2008 * Note that there are never any RT threads in the regular runqueue.
2009 *
2010 * This code is very insanely tricky.
2011 */
2012
2013 /* i.e. not waiting, not TH_SUSP'ed */
2014 bool still_running = ((thread->state & (TH_TERMINATE | TH_IDLE | TH_WAIT | TH_RUN | TH_SUSP)) == TH_RUN);
2015
2016 /*
2017 * Threads running on SMT processors are forced to context switch. Don't rebalance realtime threads.
2018 * TODO: This should check if it's worth it to rebalance, i.e. 'are there any idle primary processors'
2019 * <rdar://problem/47907700>
2020 *
2021 * A yielding thread shouldn't be forced to context switch.
2022 */
2023
2024 bool is_yielding = (*reason & AST_YIELD) == AST_YIELD;
2025
2026 bool needs_smt_rebalance = !is_yielding && thread->sched_pri < BASEPRI_RTQUEUES && processor->processor_primary != processor;
2027
2028 bool affinity_mismatch = thread->affinity_set != AFFINITY_SET_NULL && thread->affinity_set->aset_pset != pset;
2029
2030 bool bound_elsewhere = thread->bound_processor != PROCESSOR_NULL && thread->bound_processor != processor;
2031
2032 bool avoid_processor = !is_yielding && SCHED(avoid_processor_enabled) && SCHED(thread_avoid_processor)(processor, thread);
2033
2034 if (still_running && !needs_smt_rebalance && !affinity_mismatch && !bound_elsewhere && !avoid_processor) {
2035 /*
2036 * This thread is eligible to keep running on this processor.
2037 *
2038 * RT threads with un-expired quantum stay on processor,
2039 * unless there's a valid RT thread with an earlier deadline.
2040 */
2041 if (thread->sched_pri >= BASEPRI_RTQUEUES && processor->first_timeslice) {
2042 if (rt_runq_count(pset) > 0) {
2043 thread_t next_rt = qe_queue_first(&SCHED(rt_runq)(pset)->queue, struct thread, runq_links);
2044
2045 if (next_rt->realtime.deadline < processor->deadline &&
2046 (next_rt->bound_processor == PROCESSOR_NULL ||
2047 next_rt->bound_processor == processor)) {
2048 /* The next RT thread is better, so pick it off the runqueue. */
2049 goto pick_new_rt_thread;
2050 }
2051 }
2052
2053 /* This is still the best RT thread to run. */
2054 processor->deadline = thread->realtime.deadline;
2055
2056 sched_update_pset_load_average(pset, 0);
2057
2058 processor_t next_rt_processor = PROCESSOR_NULL;
2059 sched_ipi_type_t next_rt_ipi_type = SCHED_IPI_NONE;
2060
2061 if (rt_runq_count(pset) - bit_count(pset->pending_AST_URGENT_cpu_mask) > 0) {
2062 next_rt_processor = choose_processor_for_realtime_thread(pset, processor, true);
2063 if (next_rt_processor) {
2064 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR) | DBG_FUNC_NONE,
2065 (uintptr_t)0, (uintptr_t)-4, next_rt_processor->cpu_id, next_rt_processor->state, 0);
2066 if (next_rt_processor->state == PROCESSOR_IDLE) {
2067 pset_update_processor_state(pset, next_rt_processor, PROCESSOR_DISPATCHING);
2068 }
2069 next_rt_ipi_type = sched_ipi_action(next_rt_processor, NULL, false, SCHED_IPI_EVENT_PREEMPT);
2070 }
2071 }
2072 pset_unlock(pset);
2073
2074 if (next_rt_processor) {
2075 sched_ipi_perform(next_rt_processor, next_rt_ipi_type);
2076 }
2077
2078 return thread;
2079 }
2080
2081 if ((rt_runq_count(pset) == 0) &&
2082 SCHED(processor_queue_has_priority)(processor, thread->sched_pri, TRUE) == FALSE) {
2083 /* This thread is still the highest priority runnable (non-idle) thread */
2084 processor->deadline = UINT64_MAX;
2085
2086 sched_update_pset_load_average(pset, 0);
2087 pset_unlock(pset);
2088
2089 return thread;
2090 }
2091 } else {
2092 /*
2093 * This processor must context switch.
2094 * If it's due to a rebalance, we should aggressively find this thread a new home.
2095 */
2096 if (needs_smt_rebalance || affinity_mismatch || bound_elsewhere || avoid_processor) {
2097 *reason |= AST_REBALANCE;
2098 }
2099 }
2100
2101 /* OK, so we're not going to run the current thread. Look at the RT queue. */
2102 bool ok_to_run_realtime_thread = sched_ok_to_run_realtime_thread(pset, processor);
2103 if ((rt_runq_count(pset) > 0) && ok_to_run_realtime_thread) {
2104 thread_t next_rt = qe_queue_first(&SCHED(rt_runq)(pset)->queue, struct thread, runq_links);
2105
2106 if (__probable((next_rt->bound_processor == PROCESSOR_NULL ||
2107 (next_rt->bound_processor == processor)))) {
2108 pick_new_rt_thread:
2109 new_thread = qe_dequeue_head(&SCHED(rt_runq)(pset)->queue, struct thread, runq_links);
2110
2111 new_thread->runq = PROCESSOR_NULL;
2112 SCHED_STATS_RUNQ_CHANGE(&SCHED(rt_runq)(pset)->runq_stats, rt_runq_count(pset));
2113 rt_runq_count_decr(pset);
2114
2115 processor->deadline = new_thread->realtime.deadline;
2116
2117 pset_commit_processor_to_new_thread(pset, processor, new_thread);
2118
2119 sched_update_pset_load_average(pset, 0);
2120
2121 processor_t ast_processor = PROCESSOR_NULL;
2122 processor_t next_rt_processor = PROCESSOR_NULL;
2123 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
2124 sched_ipi_type_t next_rt_ipi_type = SCHED_IPI_NONE;
2125
2126 if (processor->processor_secondary != NULL) {
2127 processor_t sprocessor = processor->processor_secondary;
2128 if ((sprocessor->state == PROCESSOR_RUNNING) || (sprocessor->state == PROCESSOR_DISPATCHING)) {
2129 ipi_type = sched_ipi_action(sprocessor, NULL, false, SCHED_IPI_EVENT_SMT_REBAL);
2130 ast_processor = sprocessor;
2131 }
2132 }
2133 if (rt_runq_count(pset) - bit_count(pset->pending_AST_URGENT_cpu_mask) > 0) {
2134 next_rt_processor = choose_processor_for_realtime_thread(pset, processor, true);
2135 if (next_rt_processor) {
2136 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR) | DBG_FUNC_NONE,
2137 (uintptr_t)0, (uintptr_t)-5, next_rt_processor->cpu_id, next_rt_processor->state, 0);
2138 if (next_rt_processor->state == PROCESSOR_IDLE) {
2139 pset_update_processor_state(pset, next_rt_processor, PROCESSOR_DISPATCHING);
2140 }
2141 next_rt_ipi_type = sched_ipi_action(next_rt_processor, NULL, false, SCHED_IPI_EVENT_PREEMPT);
2142 }
2143 }
2144 pset_unlock(pset);
2145
2146 if (ast_processor) {
2147 sched_ipi_perform(ast_processor, ipi_type);
2148 }
2149
2150 if (next_rt_processor) {
2151 sched_ipi_perform(next_rt_processor, next_rt_ipi_type);
2152 }
2153
2154 return new_thread;
2155 }
2156 }
2157 if (secondary_can_only_run_realtime_thread) {
2158 goto idle;
2159 }
2160
2161 processor->deadline = UINT64_MAX;
2162
2163 /* No RT threads, so let's look at the regular threads. */
2164 if ((new_thread = SCHED(choose_thread)(processor, MINPRI, *reason)) != THREAD_NULL) {
2165 pset_commit_processor_to_new_thread(pset, processor, new_thread);
2166 sched_update_pset_load_average(pset, 0);
2167
2168 processor_t ast_processor = PROCESSOR_NULL;
2169 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
2170
2171 processor_t sprocessor = processor->processor_secondary;
2172 if ((sprocessor != NULL) && (sprocessor->state == PROCESSOR_RUNNING)) {
2173 if (thread_no_smt(new_thread)) {
2174 ipi_type = sched_ipi_action(sprocessor, NULL, false, SCHED_IPI_EVENT_SMT_REBAL);
2175 ast_processor = sprocessor;
2176 }
2177 }
2178 pset_unlock(pset);
2179
2180 if (ast_processor) {
2181 sched_ipi_perform(ast_processor, ipi_type);
2182 }
2183 return new_thread;
2184 }
2185
2186 if (processor->must_idle) {
2187 processor->must_idle = false;
2188 goto idle;
2189 }
2190
2191 if (SCHED(steal_thread_enabled)(pset) && (processor->processor_primary == processor)) {
2192 /*
2193 * No runnable threads, attempt to steal
2194 * from other processors. Returns with pset lock dropped.
2195 */
2196
2197 if ((new_thread = SCHED(steal_thread)(pset)) != THREAD_NULL) {
2198 /*
2199 * Avoid taking the pset_lock unless it is necessary to change state.
2200 * It's safe to read processor->state here, as only the current processor can change state
2201 * from this point (interrupts are disabled and this processor is committed to run new_thread).
2202 */
2203 if (processor->state == PROCESSOR_DISPATCHING || processor->state == PROCESSOR_IDLE) {
2204 pset_lock(pset);
2205 pset_commit_processor_to_new_thread(pset, processor, new_thread);
2206 pset_unlock(pset);
2207 } else {
2208 assert((processor->state == PROCESSOR_RUNNING) || (processor->state == PROCESSOR_SHUTDOWN));
2209 processor_state_update_from_thread(processor, new_thread);
2210 }
2211
2212 return new_thread;
2213 }
2214
2215 /*
2216 * If other threads have appeared, shortcut
2217 * around again.
2218 */
2219 if (!SCHED(processor_queue_empty)(processor) || (ok_to_run_realtime_thread && (rt_runq_count(pset) > 0))) {
2220 continue;
2221 }
2222
2223 pset_lock(pset);
2224
2225 /* Someone selected this processor while we had dropped the lock */
2226 if (bit_test(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
2227 goto restart;
2228 }
2229 }
2230
2231 idle:
2232 /*
2233 * Nothing is runnable, so set this processor idle if it
2234 * was running.
2235 */
2236 if ((processor->state == PROCESSOR_RUNNING) || (processor->state == PROCESSOR_DISPATCHING)) {
2237 pset_update_processor_state(pset, processor, PROCESSOR_IDLE);
2238 processor_state_update_idle(processor);
2239 }
2240
2241 /* Invoked with pset locked, returns with pset unlocked */
2242 SCHED(processor_balance)(processor, pset);
2243
2244 new_thread = processor->idle_thread;
2245 } while (new_thread == THREAD_NULL);
2246
2247 return new_thread;
2248 }
2249
2250 /*
2251 * thread_invoke
2252 *
2253 * Called at splsched with neither thread locked.
2254 *
2255 * Perform a context switch and start executing the new thread.
2256 *
2257 * Returns FALSE when the context switch didn't happen.
2258 * The reference to the new thread is still consumed.
2259 *
2260 * "self" is what is currently running on the processor,
2261 * "thread" is the new thread to context switch to
2262 * (which may be the same thread in some cases)
2263 */
2264 static boolean_t
2265 thread_invoke(
2266 thread_t self,
2267 thread_t thread,
2268 ast_t reason)
2269 {
2270 if (__improbable(get_preemption_level() != 0)) {
2271 int pl = get_preemption_level();
2272 panic("thread_invoke: preemption_level %d, possible cause: %s",
2273 pl, (pl < 0 ? "unlocking an unlocked mutex or spinlock" :
2274 "blocking while holding a spinlock, or within interrupt context"));
2275 }
2276
2277 thread_continue_t continuation = self->continuation;
2278 void *parameter = self->parameter;
2279 processor_t processor;
2280
2281 uint64_t ctime = mach_absolute_time();
2282
2283 #ifdef CONFIG_MACH_APPROXIMATE_TIME
2284 commpage_update_mach_approximate_time(ctime);
2285 #endif
2286
2287 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
2288 if (!((thread->state & TH_IDLE) != 0 ||
2289 ((reason & AST_HANDOFF) && self->sched_mode == TH_MODE_REALTIME))) {
2290 sched_timeshare_consider_maintenance(ctime);
2291 }
2292 #endif
2293
2294 #if MONOTONIC
2295 mt_sched_update(self);
2296 #endif /* MONOTONIC */
2297
2298 assert_thread_magic(self);
2299 assert(self == current_thread());
2300 assert(self->runq == PROCESSOR_NULL);
2301 assert((self->state & (TH_RUN | TH_TERMINATE2)) == TH_RUN);
2302
2303 thread_lock(thread);
2304
2305 assert_thread_magic(thread);
2306 assert((thread->state & (TH_RUN | TH_WAIT | TH_UNINT | TH_TERMINATE | TH_TERMINATE2)) == TH_RUN);
2307 assert(thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == current_processor());
2308 assert(thread->runq == PROCESSOR_NULL);
2309
2310 /* Reload precise timing global policy to thread-local policy */
2311 thread->precise_user_kernel_time = use_precise_user_kernel_time(thread);
2312
2313 /* Update SFI class based on other factors */
2314 thread->sfi_class = sfi_thread_classify(thread);
2315
2316 /* Update the same_pri_latency for the thread (used by perfcontrol callouts) */
2317 thread->same_pri_latency = ctime - thread->last_basepri_change_time;
2318 /*
2319 * In case a base_pri update happened between the timestamp and
2320 * taking the thread lock
2321 */
2322 if (ctime <= thread->last_basepri_change_time) {
2323 thread->same_pri_latency = ctime - thread->last_made_runnable_time;
2324 }
2325
2326 /* Allow realtime threads to hang onto a stack. */
2327 if ((self->sched_mode == TH_MODE_REALTIME) && !self->reserved_stack) {
2328 self->reserved_stack = self->kernel_stack;
2329 }
2330
2331 /* Prepare for spin debugging */
2332 #if INTERRUPT_MASKED_DEBUG
2333 ml_spin_debug_clear(thread);
2334 #endif
2335
2336 if (continuation != NULL) {
2337 if (!thread->kernel_stack) {
2338 /*
2339 * If we are using a privileged stack,
2340 * check to see whether we can exchange it with
2341 * that of the other thread.
2342 */
2343 if (self->kernel_stack == self->reserved_stack && !thread->reserved_stack) {
2344 goto need_stack;
2345 }
2346
2347 /*
2348 * Context switch by performing a stack handoff.
2349 * Requires both threads to be parked in a continuation.
2350 */
2351 continuation = thread->continuation;
2352 parameter = thread->parameter;
2353
2354 processor = current_processor();
2355 processor->active_thread = thread;
2356 processor_state_update_from_thread(processor, thread);
2357
2358 if (thread->last_processor != processor && thread->last_processor != NULL) {
2359 if (thread->last_processor->processor_set != processor->processor_set) {
2360 thread->ps_switch++;
2361 }
2362 thread->p_switch++;
2363 }
2364 thread->last_processor = processor;
2365 thread->c_switch++;
2366 ast_context(thread);
2367
2368 thread_unlock(thread);
2369
2370 self->reason = reason;
2371
2372 processor->last_dispatch = ctime;
2373 self->last_run_time = ctime;
2374 processor_timer_switch_thread(ctime, &thread->system_timer);
2375 timer_update(&thread->runnable_timer, ctime);
2376 processor->kernel_timer = &thread->system_timer;
2377
2378 /*
2379 * Since non-precise user/kernel time doesn't update the state timer
2380 * during privilege transitions, synthesize an event now.
2381 */
2382 if (!thread->precise_user_kernel_time) {
2383 timer_update(processor->current_state, ctime);
2384 }
2385
2386 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2387 MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_HANDOFF) | DBG_FUNC_NONE,
2388 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
2389
2390 if ((thread->chosen_processor != processor) && (thread->chosen_processor != PROCESSOR_NULL)) {
2391 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_MOVED) | DBG_FUNC_NONE,
2392 (uintptr_t)thread_tid(thread), (uintptr_t)thread->chosen_processor->cpu_id, 0, 0, 0);
2393 }
2394
2395 DTRACE_SCHED2(off__cpu, struct thread *, thread, struct proc *, thread->task->bsd_info);
2396
2397 SCHED_STATS_CSW(processor, self->reason, self->sched_pri, thread->sched_pri);
2398
2399 #if KPERF
2400 kperf_off_cpu(self);
2401 #endif /* KPERF */
2402
2403 /*
2404 * This is where we actually switch thread identity,
2405 * and address space if required. However, register
2406 * state is not switched - this routine leaves the
2407 * stack and register state active on the current CPU.
2408 */
2409 TLOG(1, "thread_invoke: calling stack_handoff\n");
2410 stack_handoff(self, thread);
2411
2412 /* 'self' is now off core */
2413 assert(thread == current_thread_volatile());
2414
2415 DTRACE_SCHED(on__cpu);
2416
2417 #if KPERF
2418 kperf_on_cpu(thread, continuation, NULL);
2419 #endif /* KPERF */
2420
2421 thread_dispatch(self, thread);
2422
2423 #if KASAN
2424 /* Old thread's stack has been moved to the new thread, so explicitly
2425 * unpoison it. */
2426 kasan_unpoison_stack(thread->kernel_stack, kernel_stack_size);
2427 #endif
2428
2429 thread->continuation = thread->parameter = NULL;
2430
2431 counter(c_thread_invoke_hits++);
2432
2433 boolean_t enable_interrupts = TRUE;
2434
2435 /* idle thread needs to stay interrupts-disabled */
2436 if ((thread->state & TH_IDLE)) {
2437 enable_interrupts = FALSE;
2438 }
2439
2440 assert(continuation);
2441 call_continuation(continuation, parameter,
2442 thread->wait_result, enable_interrupts);
2443 /*NOTREACHED*/
2444 } else if (thread == self) {
2445 /* same thread but with continuation */
2446 ast_context(self);
2447 counter(++c_thread_invoke_same);
2448
2449 thread_unlock(self);
2450
2451 #if KPERF
2452 kperf_on_cpu(thread, continuation, NULL);
2453 #endif /* KPERF */
2454
2455 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2456 MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED) | DBG_FUNC_NONE,
2457 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
2458
2459 #if KASAN
2460 /* stack handoff to self - no thread_dispatch(), so clear the stack
2461 * and free the fakestack directly */
2462 kasan_fakestack_drop(self);
2463 kasan_fakestack_gc(self);
2464 kasan_unpoison_stack(self->kernel_stack, kernel_stack_size);
2465 #endif
2466
2467 self->continuation = self->parameter = NULL;
2468
2469 boolean_t enable_interrupts = TRUE;
2470
2471 /* idle thread needs to stay interrupts-disabled */
2472 if ((self->state & TH_IDLE)) {
2473 enable_interrupts = FALSE;
2474 }
2475
2476 call_continuation(continuation, parameter,
2477 self->wait_result, enable_interrupts);
2478 /*NOTREACHED*/
2479 }
2480 } else {
2481 /*
2482 * Check that the other thread has a stack
2483 */
2484 if (!thread->kernel_stack) {
2485 need_stack:
2486 if (!stack_alloc_try(thread)) {
2487 counter(c_thread_invoke_misses++);
2488 thread_unlock(thread);
2489 thread_stack_enqueue(thread);
2490 return FALSE;
2491 }
2492 } else if (thread == self) {
2493 ast_context(self);
2494 counter(++c_thread_invoke_same);
2495 thread_unlock(self);
2496
2497 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2498 MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED) | DBG_FUNC_NONE,
2499 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
2500
2501 return TRUE;
2502 }
2503 }
2504
2505 /*
2506 * Context switch by full context save.
2507 */
2508 processor = current_processor();
2509 processor->active_thread = thread;
2510 processor_state_update_from_thread(processor, thread);
2511
2512 if (thread->last_processor != processor && thread->last_processor != NULL) {
2513 if (thread->last_processor->processor_set != processor->processor_set) {
2514 thread->ps_switch++;
2515 }
2516 thread->p_switch++;
2517 }
2518 thread->last_processor = processor;
2519 thread->c_switch++;
2520 ast_context(thread);
2521
2522 thread_unlock(thread);
2523
2524 counter(c_thread_invoke_csw++);
2525
2526 self->reason = reason;
2527
2528 processor->last_dispatch = ctime;
2529 self->last_run_time = ctime;
2530 processor_timer_switch_thread(ctime, &thread->system_timer);
2531 timer_update(&thread->runnable_timer, ctime);
2532 processor->kernel_timer = &thread->system_timer;
2533
2534 /*
2535 * Since non-precise user/kernel time doesn't update the state timer
2536 * during privilege transitions, synthesize an event now.
2537 */
2538 if (!thread->precise_user_kernel_time) {
2539 timer_update(processor->current_state, ctime);
2540 }
2541
2542 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2543 MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED) | DBG_FUNC_NONE,
2544 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
2545
2546 if ((thread->chosen_processor != processor) && (thread->chosen_processor != NULL)) {
2547 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_MOVED) | DBG_FUNC_NONE,
2548 (uintptr_t)thread_tid(thread), (uintptr_t)thread->chosen_processor->cpu_id, 0, 0, 0);
2549 }
2550
2551 DTRACE_SCHED2(off__cpu, struct thread *, thread, struct proc *, thread->task->bsd_info);
2552
2553 SCHED_STATS_CSW(processor, self->reason, self->sched_pri, thread->sched_pri);
2554
2555 #if KPERF
2556 kperf_off_cpu(self);
2557 #endif /* KPERF */
2558
2559 /*
2560 * This is where we actually switch register context,
2561 * and address space if required. We will next run
2562 * as a result of a subsequent context switch.
2563 *
2564 * Once registers are switched and the processor is running "thread",
2565 * the stack variables and non-volatile registers will contain whatever
2566 * was there the last time that thread blocked. No local variables should
2567 * be used after this point, except for the special case of "thread", which
2568 * the platform layer returns as the previous thread running on the processor
2569 * via the function call ABI as a return register, and "self", which may have
2570 * been stored on the stack or a non-volatile register, but a stale idea of
2571 * what was on the CPU is newly-accurate because that thread is again
2572 * running on the CPU.
2573 *
2574 * If one of the threads is using a continuation, thread_continue
2575 * is used to stitch up its context.
2576 *
2577 * If we are invoking a thread which is resuming from a continuation,
2578 * the CPU will invoke thread_continue next.
2579 *
2580 * If the current thread is parking in a continuation, then its state
2581 * won't be saved and the stack will be discarded. When the stack is
2582 * re-allocated, it will be configured to resume from thread_continue.
2583 */
2584 assert(continuation == self->continuation);
2585 thread = machine_switch_context(self, continuation, thread);
2586 assert(self == current_thread_volatile());
2587 TLOG(1, "thread_invoke: returning machine_switch_context: self %p continuation %p thread %p\n", self, continuation, thread);
2588
2589 assert(continuation == NULL && self->continuation == NULL);
2590
2591 DTRACE_SCHED(on__cpu);
2592
2593 #if KPERF
2594 kperf_on_cpu(self, NULL, __builtin_frame_address(0));
2595 #endif /* KPERF */
2596
2597 /* We have been resumed and are set to run. */
2598 thread_dispatch(thread, self);
2599
2600 return TRUE;
2601 }
2602
2603 #if defined(CONFIG_SCHED_DEFERRED_AST)
2604 /*
2605 * pset_cancel_deferred_dispatch:
2606 *
2607 * Cancels all ASTs that we can cancel for the given processor set
2608 * if the current processor is running the last runnable thread in the
2609 * system.
2610 *
2611 * This function assumes the current thread is runnable. This must
2612 * be called with the pset unlocked.
2613 */
2614 static void
2615 pset_cancel_deferred_dispatch(
2616 processor_set_t pset,
2617 processor_t processor)
2618 {
2619 processor_t active_processor = NULL;
2620 uint32_t sampled_sched_run_count;
2621
2622 pset_lock(pset);
2623 sampled_sched_run_count = os_atomic_load(&sched_run_buckets[TH_BUCKET_RUN], relaxed);
2624
2625 /*
2626 * If we have emptied the run queue, and our current thread is runnable, we
2627 * should tell any processors that are still DISPATCHING that they will
2628 * probably not have any work to do. In the event that there are no
2629 * pending signals that we can cancel, this is also uninteresting.
2630 *
2631 * In the unlikely event that another thread becomes runnable while we are
2632 * doing this (sched_run_count is atomically updated, not guarded), the
2633 * codepath making it runnable SHOULD (a dangerous word) need the pset lock
2634 * in order to dispatch it to a processor in our pset. So, the other
2635 * codepath will wait while we squash all cancelable ASTs, get the pset
2636 * lock, and then dispatch the freshly runnable thread. So this should be
2637 * correct (we won't accidentally have a runnable thread that hasn't been
2638 * dispatched to an idle processor), if not ideal (we may be restarting the
2639 * dispatch process, which could have some overhead).
2640 */
2641
2642 if ((sampled_sched_run_count == 1) && (pset->pending_deferred_AST_cpu_mask)) {
2643 uint64_t dispatching_map = (pset->cpu_state_map[PROCESSOR_DISPATCHING] &
2644 pset->pending_deferred_AST_cpu_mask &
2645 ~pset->pending_AST_URGENT_cpu_mask);
2646 for (int cpuid = lsb_first(dispatching_map); cpuid >= 0; cpuid = lsb_next(dispatching_map, cpuid)) {
2647 active_processor = processor_array[cpuid];
2648 /*
2649 * If a processor is DISPATCHING, it could be because of
2650 * a cancelable signal.
2651 *
2652 * IF the processor is not our
2653 * current processor (the current processor should not
2654 * be DISPATCHING, so this is a bit paranoid), AND there
2655 * is a cancelable signal pending on the processor, AND
2656 * there is no non-cancelable signal pending (as there is
2657 * no point trying to backtrack on bringing the processor
2658 * up if a signal we cannot cancel is outstanding), THEN
2659 * it should make sense to roll back the processor state
2660 * to the IDLE state.
2661 *
2662 * If the racey nature of this approach (as the signal
2663 * will be arbitrated by hardware, and can fire as we
2664 * roll back state) results in the core responding
2665 * despite being pushed back to the IDLE state, it
2666 * should be no different than if the core took some
2667 * interrupt while IDLE.
2668 */
2669 if (active_processor != processor) {
2670 /*
2671 * Squash all of the processor state back to some
2672 * reasonable facsimile of PROCESSOR_IDLE.
2673 */
2674
2675 processor_state_update_idle(active_processor);
2676 active_processor->deadline = UINT64_MAX;
2677 pset_update_processor_state(pset, active_processor, PROCESSOR_IDLE);
2678 bit_clear(pset->pending_deferred_AST_cpu_mask, active_processor->cpu_id);
2679 machine_signal_idle_cancel(active_processor);
2680 }
2681 }
2682 }
2683
2684 pset_unlock(pset);
2685 }
2686 #else
2687 /* We don't support deferred ASTs; everything is candycanes and sunshine. */
2688 #endif
2689
2690 static void
2691 thread_csw_callout(
2692 thread_t old,
2693 thread_t new,
2694 uint64_t timestamp)
2695 {
2696 perfcontrol_event event = (new->state & TH_IDLE) ? IDLE : CONTEXT_SWITCH;
2697 uint64_t same_pri_latency = (new->state & TH_IDLE) ? 0 : new->same_pri_latency;
2698 machine_switch_perfcontrol_context(event, timestamp, 0,
2699 same_pri_latency, old, new);
2700 }
2701
2702
2703 /*
2704 * thread_dispatch:
2705 *
2706 * Handle threads at context switch. Re-dispatch other thread
2707 * if still running, otherwise update run state and perform
2708 * special actions. Update quantum for other thread and begin
2709 * the quantum for ourselves.
2710 *
2711 * "thread" is the old thread that we have switched away from.
2712 * "self" is the new current thread that we have context switched to
2713 *
2714 * Called at splsched.
2715 *
2716 */
2717 void
2718 thread_dispatch(
2719 thread_t thread,
2720 thread_t self)
2721 {
2722 processor_t processor = self->last_processor;
2723 bool was_idle = false;
2724
2725 assert(processor == current_processor());
2726 assert(self == current_thread_volatile());
2727 assert(thread != self);
2728
2729 if (thread != THREAD_NULL) {
2730 /*
2731 * Do the perfcontrol callout for context switch.
2732 * The reason we do this here is:
2733 * - thread_dispatch() is called from various places that are not
2734 * the direct context switch path for eg. processor shutdown etc.
2735 * So adding the callout here covers all those cases.
2736 * - We want this callout as early as possible to be close
2737 * to the timestamp taken in thread_invoke()
2738 * - We want to avoid holding the thread lock while doing the
2739 * callout
2740 * - We do not want to callout if "thread" is NULL.
2741 */
2742 thread_csw_callout(thread, self, processor->last_dispatch);
2743
2744 #if KASAN
2745 if (thread->continuation != NULL) {
2746 /*
2747 * Thread has a continuation and the normal stack is going away.
2748 * Unpoison the stack and mark all fakestack objects as unused.
2749 */
2750 kasan_fakestack_drop(thread);
2751 if (thread->kernel_stack) {
2752 kasan_unpoison_stack(thread->kernel_stack, kernel_stack_size);
2753 }
2754 }
2755
2756 /*
2757 * Free all unused fakestack objects.
2758 */
2759 kasan_fakestack_gc(thread);
2760 #endif
2761
2762 /*
2763 * If blocked at a continuation, discard
2764 * the stack.
2765 */
2766 if (thread->continuation != NULL && thread->kernel_stack != 0) {
2767 stack_free(thread);
2768 }
2769
2770 if (thread->state & TH_IDLE) {
2771 was_idle = true;
2772 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2773 MACHDBG_CODE(DBG_MACH_SCHED, MACH_DISPATCH) | DBG_FUNC_NONE,
2774 (uintptr_t)thread_tid(thread), 0, thread->state,
2775 sched_run_buckets[TH_BUCKET_RUN], 0);
2776 } else {
2777 int64_t consumed;
2778 int64_t remainder = 0;
2779
2780 if (processor->quantum_end > processor->last_dispatch) {
2781 remainder = processor->quantum_end -
2782 processor->last_dispatch;
2783 }
2784
2785 consumed = thread->quantum_remaining - remainder;
2786
2787 if ((thread->reason & AST_LEDGER) == 0) {
2788 /*
2789 * Bill CPU time to both the task and
2790 * the individual thread.
2791 */
2792 ledger_credit_thread(thread, thread->t_ledger,
2793 task_ledgers.cpu_time, consumed);
2794 ledger_credit_thread(thread, thread->t_threadledger,
2795 thread_ledgers.cpu_time, consumed);
2796 if (thread->t_bankledger) {
2797 ledger_credit_thread(thread, thread->t_bankledger,
2798 bank_ledgers.cpu_time,
2799 (consumed - thread->t_deduct_bank_ledger_time));
2800 }
2801 thread->t_deduct_bank_ledger_time = 0;
2802 if (consumed > 0) {
2803 /*
2804 * This should never be negative, but in traces we are seeing some instances
2805 * of consumed being negative.
2806 * <rdar://problem/57782596> thread_dispatch() thread CPU consumed calculation sometimes results in negative value
2807 */
2808 sched_update_pset_avg_execution_time(current_processor()->processor_set, consumed, processor->last_dispatch, thread->th_sched_bucket);
2809 }
2810 }
2811
2812 wake_lock(thread);
2813 thread_lock(thread);
2814
2815 /*
2816 * Apply a priority floor if the thread holds a kernel resource
2817 * Do this before checking starting_pri to avoid overpenalizing
2818 * repeated rwlock blockers.
2819 */
2820 if (__improbable(thread->rwlock_count != 0)) {
2821 lck_rw_set_promotion_locked(thread);
2822 }
2823
2824 boolean_t keep_quantum = processor->first_timeslice;
2825
2826 /*
2827 * Treat a thread which has dropped priority since it got on core
2828 * as having expired its quantum.
2829 */
2830 if (processor->starting_pri > thread->sched_pri) {
2831 keep_quantum = FALSE;
2832 }
2833
2834 /* Compute remainder of current quantum. */
2835 if (keep_quantum &&
2836 processor->quantum_end > processor->last_dispatch) {
2837 thread->quantum_remaining = (uint32_t)remainder;
2838 } else {
2839 thread->quantum_remaining = 0;
2840 }
2841
2842 if (thread->sched_mode == TH_MODE_REALTIME) {
2843 /*
2844 * Cancel the deadline if the thread has
2845 * consumed the entire quantum.
2846 */
2847 if (thread->quantum_remaining == 0) {
2848 thread->realtime.deadline = UINT64_MAX;
2849 }
2850 } else {
2851 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
2852 /*
2853 * For non-realtime threads treat a tiny
2854 * remaining quantum as an expired quantum
2855 * but include what's left next time.
2856 */
2857 if (thread->quantum_remaining < min_std_quantum) {
2858 thread->reason |= AST_QUANTUM;
2859 thread->quantum_remaining += SCHED(initial_quantum_size)(thread);
2860 }
2861 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
2862 }
2863
2864 /*
2865 * If we are doing a direct handoff then
2866 * take the remainder of the quantum.
2867 */
2868 if ((thread->reason & (AST_HANDOFF | AST_QUANTUM)) == AST_HANDOFF) {
2869 self->quantum_remaining = thread->quantum_remaining;
2870 thread->reason |= AST_QUANTUM;
2871 thread->quantum_remaining = 0;
2872 } else {
2873 #if defined(CONFIG_SCHED_MULTIQ)
2874 if (SCHED(sched_groups_enabled) &&
2875 thread->sched_group == self->sched_group) {
2876 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2877 MACHDBG_CODE(DBG_MACH_SCHED, MACH_QUANTUM_HANDOFF),
2878 self->reason, (uintptr_t)thread_tid(thread),
2879 self->quantum_remaining, thread->quantum_remaining, 0);
2880
2881 self->quantum_remaining = thread->quantum_remaining;
2882 thread->quantum_remaining = 0;
2883 /* Don't set AST_QUANTUM here - old thread might still want to preempt someone else */
2884 }
2885 #endif /* defined(CONFIG_SCHED_MULTIQ) */
2886 }
2887
2888 thread->computation_metered += (processor->last_dispatch - thread->computation_epoch);
2889
2890 if (!(thread->state & TH_WAIT)) {
2891 /*
2892 * Still runnable.
2893 */
2894 thread->last_made_runnable_time = thread->last_basepri_change_time = processor->last_dispatch;
2895
2896 machine_thread_going_off_core(thread, FALSE, processor->last_dispatch, TRUE);
2897
2898 ast_t reason = thread->reason;
2899 sched_options_t options = SCHED_NONE;
2900
2901 if (reason & AST_REBALANCE) {
2902 options |= SCHED_REBALANCE;
2903 if (reason & AST_QUANTUM) {
2904 /*
2905 * Having gone to the trouble of forcing this thread off a less preferred core,
2906 * we should force the preferable core to reschedule immediately to give this
2907 * thread a chance to run instead of just sitting on the run queue where
2908 * it may just be stolen back by the idle core we just forced it off.
2909 * But only do this at the end of a quantum to prevent cascading effects.
2910 */
2911 options |= SCHED_PREEMPT;
2912 }
2913 }
2914
2915 if (reason & AST_QUANTUM) {
2916 options |= SCHED_TAILQ;
2917 } else if (reason & AST_PREEMPT) {
2918 options |= SCHED_HEADQ;
2919 } else {
2920 options |= (SCHED_PREEMPT | SCHED_TAILQ);
2921 }
2922
2923 thread_setrun(thread, options);
2924
2925 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2926 MACHDBG_CODE(DBG_MACH_SCHED, MACH_DISPATCH) | DBG_FUNC_NONE,
2927 (uintptr_t)thread_tid(thread), thread->reason, thread->state,
2928 sched_run_buckets[TH_BUCKET_RUN], 0);
2929
2930 if (thread->wake_active) {
2931 thread->wake_active = FALSE;
2932 thread_unlock(thread);
2933
2934 thread_wakeup(&thread->wake_active);
2935 } else {
2936 thread_unlock(thread);
2937 }
2938
2939 wake_unlock(thread);
2940 } else {
2941 /*
2942 * Waiting.
2943 */
2944 boolean_t should_terminate = FALSE;
2945 uint32_t new_run_count;
2946 int thread_state = thread->state;
2947
2948 /* Only the first call to thread_dispatch
2949 * after explicit termination should add
2950 * the thread to the termination queue
2951 */
2952 if ((thread_state & (TH_TERMINATE | TH_TERMINATE2)) == TH_TERMINATE) {
2953 should_terminate = TRUE;
2954 thread_state |= TH_TERMINATE2;
2955 }
2956
2957 timer_stop(&thread->runnable_timer, processor->last_dispatch);
2958
2959 thread_state &= ~TH_RUN;
2960 thread->state = thread_state;
2961
2962 thread->last_made_runnable_time = thread->last_basepri_change_time = THREAD_NOT_RUNNABLE;
2963 thread->chosen_processor = PROCESSOR_NULL;
2964
2965 new_run_count = SCHED(run_count_decr)(thread);
2966
2967 #if CONFIG_SCHED_AUTO_JOIN
2968 if ((thread->sched_flags & TH_SFLAG_THREAD_GROUP_AUTO_JOIN) != 0) {
2969 work_interval_auto_join_unwind(thread);
2970 }
2971 #endif /* CONFIG_SCHED_AUTO_JOIN */
2972
2973 #if CONFIG_SCHED_SFI
2974 if (thread->reason & AST_SFI) {
2975 thread->wait_sfi_begin_time = processor->last_dispatch;
2976 }
2977 #endif
2978 machine_thread_going_off_core(thread, should_terminate, processor->last_dispatch, FALSE);
2979
2980 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2981 MACHDBG_CODE(DBG_MACH_SCHED, MACH_DISPATCH) | DBG_FUNC_NONE,
2982 (uintptr_t)thread_tid(thread), thread->reason, thread_state,
2983 new_run_count, 0);
2984
2985 if (thread_state & TH_WAIT_REPORT) {
2986 (*thread->sched_call)(SCHED_CALL_BLOCK, thread);
2987 }
2988
2989 if (thread->wake_active) {
2990 thread->wake_active = FALSE;
2991 thread_unlock(thread);
2992
2993 thread_wakeup(&thread->wake_active);
2994 } else {
2995 thread_unlock(thread);
2996 }
2997
2998 wake_unlock(thread);
2999
3000 if (should_terminate) {
3001 thread_terminate_enqueue(thread);
3002 }
3003 }
3004 }
3005 /*
3006 * The thread could have been added to the termination queue, so it's
3007 * unsafe to use after this point.
3008 */
3009 thread = THREAD_NULL;
3010 }
3011
3012 int urgency = THREAD_URGENCY_NONE;
3013 uint64_t latency = 0;
3014
3015 /* Update (new) current thread and reprogram running timers */
3016 thread_lock(self);
3017
3018 if (!(self->state & TH_IDLE)) {
3019 uint64_t arg1, arg2;
3020
3021 #if CONFIG_SCHED_SFI
3022 ast_t new_ast;
3023
3024 new_ast = sfi_thread_needs_ast(self, NULL);
3025
3026 if (new_ast != AST_NONE) {
3027 ast_on(new_ast);
3028 }
3029 #endif
3030
3031 assertf(processor->last_dispatch >= self->last_made_runnable_time,
3032 "Non-monotonic time? dispatch at 0x%llx, runnable at 0x%llx",
3033 processor->last_dispatch, self->last_made_runnable_time);
3034
3035 assert(self->last_made_runnable_time <= self->last_basepri_change_time);
3036
3037 latency = processor->last_dispatch - self->last_made_runnable_time;
3038 assert(latency >= self->same_pri_latency);
3039
3040 urgency = thread_get_urgency(self, &arg1, &arg2);
3041
3042 thread_tell_urgency(urgency, arg1, arg2, latency, self);
3043
3044 /*
3045 * Get a new quantum if none remaining.
3046 */
3047 if (self->quantum_remaining == 0) {
3048 thread_quantum_init(self);
3049 }
3050
3051 /*
3052 * Set up quantum timer and timeslice.
3053 */
3054 processor->quantum_end = processor->last_dispatch +
3055 self->quantum_remaining;
3056
3057 running_timer_setup(processor, RUNNING_TIMER_QUANTUM, self,
3058 processor->quantum_end, processor->last_dispatch);
3059 if (was_idle) {
3060 /*
3061 * kperf's running timer is active whenever the idle thread for a
3062 * CPU is not running.
3063 */
3064 kperf_running_setup(processor, processor->last_dispatch);
3065 }
3066 running_timers_activate(processor);
3067 processor->first_timeslice = TRUE;
3068 } else {
3069 running_timers_deactivate(processor);
3070 processor->first_timeslice = FALSE;
3071 thread_tell_urgency(THREAD_URGENCY_NONE, 0, 0, 0, self);
3072 }
3073
3074 assert(self->block_hint == kThreadWaitNone);
3075 self->computation_epoch = processor->last_dispatch;
3076 self->reason = AST_NONE;
3077 processor->starting_pri = self->sched_pri;
3078
3079 thread_unlock(self);
3080
3081 machine_thread_going_on_core(self, urgency, latency, self->same_pri_latency,
3082 processor->last_dispatch);
3083
3084 #if defined(CONFIG_SCHED_DEFERRED_AST)
3085 /*
3086 * TODO: Can we state that redispatching our old thread is also
3087 * uninteresting?
3088 */
3089 if ((os_atomic_load(&sched_run_buckets[TH_BUCKET_RUN], relaxed) == 1) && !(self->state & TH_IDLE)) {
3090 pset_cancel_deferred_dispatch(processor->processor_set, processor);
3091 }
3092 #endif
3093 }
3094
3095 /*
3096 * thread_block_reason:
3097 *
3098 * Forces a reschedule, blocking the caller if a wait
3099 * has been asserted.
3100 *
3101 * If a continuation is specified, then thread_invoke will
3102 * attempt to discard the thread's kernel stack. When the
3103 * thread resumes, it will execute the continuation function
3104 * on a new kernel stack.
3105 */
3106 counter(mach_counter_t c_thread_block_calls = 0; )
3107
3108 wait_result_t
3109 thread_block_reason(
3110 thread_continue_t continuation,
3111 void *parameter,
3112 ast_t reason)
3113 {
3114 thread_t self = current_thread();
3115 processor_t processor;
3116 thread_t new_thread;
3117 spl_t s;
3118
3119 counter(++c_thread_block_calls);
3120
3121 s = splsched();
3122
3123 processor = current_processor();
3124
3125 /* If we're explicitly yielding, force a subsequent quantum */
3126 if (reason & AST_YIELD) {
3127 processor->first_timeslice = FALSE;
3128 }
3129
3130 /* We're handling all scheduling AST's */
3131 ast_off(AST_SCHEDULING);
3132
3133 #if PROC_REF_DEBUG
3134 if ((continuation != NULL) && (self->task != kernel_task)) {
3135 if (uthread_get_proc_refcount(self->uthread) != 0) {
3136 panic("thread_block_reason with continuation uthread %p with uu_proc_refcount != 0", self->uthread);
3137 }
3138 }
3139 #endif
3140
3141 self->continuation = continuation;
3142 self->parameter = parameter;
3143
3144 if (self->state & ~(TH_RUN | TH_IDLE)) {
3145 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3146 MACHDBG_CODE(DBG_MACH_SCHED, MACH_BLOCK),
3147 reason, VM_KERNEL_UNSLIDE(continuation), 0, 0, 0);
3148 }
3149
3150 do {
3151 thread_lock(self);
3152 new_thread = thread_select(self, processor, &reason);
3153 thread_unlock(self);
3154 } while (!thread_invoke(self, new_thread, reason));
3155
3156 splx(s);
3157
3158 return self->wait_result;
3159 }
3160
3161 /*
3162 * thread_block:
3163 *
3164 * Block the current thread if a wait has been asserted.
3165 */
3166 wait_result_t
3167 thread_block(
3168 thread_continue_t continuation)
3169 {
3170 return thread_block_reason(continuation, NULL, AST_NONE);
3171 }
3172
3173 wait_result_t
3174 thread_block_parameter(
3175 thread_continue_t continuation,
3176 void *parameter)
3177 {
3178 return thread_block_reason(continuation, parameter, AST_NONE);
3179 }
3180
3181 /*
3182 * thread_run:
3183 *
3184 * Switch directly from the current thread to the
3185 * new thread, handing off our quantum if appropriate.
3186 *
3187 * New thread must be runnable, and not on a run queue.
3188 *
3189 * Called at splsched.
3190 */
3191 int
3192 thread_run(
3193 thread_t self,
3194 thread_continue_t continuation,
3195 void *parameter,
3196 thread_t new_thread)
3197 {
3198 ast_t reason = AST_NONE;
3199
3200 if ((self->state & TH_IDLE) == 0) {
3201 reason = AST_HANDOFF;
3202 }
3203
3204 /*
3205 * If this thread hadn't been setrun'ed, it
3206 * might not have a chosen processor, so give it one
3207 */
3208 if (new_thread->chosen_processor == NULL) {
3209 new_thread->chosen_processor = current_processor();
3210 }
3211
3212 self->continuation = continuation;
3213 self->parameter = parameter;
3214
3215 while (!thread_invoke(self, new_thread, reason)) {
3216 /* the handoff failed, so we have to fall back to the normal block path */
3217 processor_t processor = current_processor();
3218
3219 reason = AST_NONE;
3220
3221 thread_lock(self);
3222 new_thread = thread_select(self, processor, &reason);
3223 thread_unlock(self);
3224 }
3225
3226 return self->wait_result;
3227 }
3228
3229 /*
3230 * thread_continue:
3231 *
3232 * Called at splsched when a thread first receives
3233 * a new stack after a continuation.
3234 *
3235 * Called with THREAD_NULL as the old thread when
3236 * invoked by machine_load_context.
3237 */
3238 void
3239 thread_continue(
3240 thread_t thread)
3241 {
3242 thread_t self = current_thread();
3243 thread_continue_t continuation;
3244 void *parameter;
3245
3246 DTRACE_SCHED(on__cpu);
3247
3248 continuation = self->continuation;
3249 parameter = self->parameter;
3250
3251 assert(continuation != NULL);
3252
3253 #if KPERF
3254 kperf_on_cpu(self, continuation, NULL);
3255 #endif
3256
3257 thread_dispatch(thread, self);
3258
3259 self->continuation = self->parameter = NULL;
3260
3261 #if INTERRUPT_MASKED_DEBUG
3262 /* Reset interrupt-masked spin debugging timeout */
3263 ml_spin_debug_clear(self);
3264 #endif
3265
3266 TLOG(1, "thread_continue: calling call_continuation\n");
3267
3268 boolean_t enable_interrupts = TRUE;
3269
3270 /* bootstrap thread, idle thread need to stay interrupts-disabled */
3271 if (thread == THREAD_NULL || (self->state & TH_IDLE)) {
3272 enable_interrupts = FALSE;
3273 }
3274
3275 call_continuation(continuation, parameter, self->wait_result, enable_interrupts);
3276 /*NOTREACHED*/
3277 }
3278
3279 void
3280 thread_quantum_init(thread_t thread)
3281 {
3282 if (thread->sched_mode == TH_MODE_REALTIME) {
3283 thread->quantum_remaining = thread->realtime.computation;
3284 } else {
3285 thread->quantum_remaining = SCHED(initial_quantum_size)(thread);
3286 }
3287 }
3288
3289 uint32_t
3290 sched_timeshare_initial_quantum_size(thread_t thread)
3291 {
3292 if ((thread != THREAD_NULL) && thread->th_sched_bucket == TH_BUCKET_SHARE_BG) {
3293 return bg_quantum;
3294 } else {
3295 return std_quantum;
3296 }
3297 }
3298
3299 /*
3300 * run_queue_init:
3301 *
3302 * Initialize a run queue before first use.
3303 */
3304 void
3305 run_queue_init(
3306 run_queue_t rq)
3307 {
3308 rq->highq = NOPRI;
3309 for (u_int i = 0; i < BITMAP_LEN(NRQS); i++) {
3310 rq->bitmap[i] = 0;
3311 }
3312 rq->urgency = rq->count = 0;
3313 for (int i = 0; i < NRQS; i++) {
3314 circle_queue_init(&rq->queues[i]);
3315 }
3316 }
3317
3318 /*
3319 * run_queue_dequeue:
3320 *
3321 * Perform a dequeue operation on a run queue,
3322 * and return the resulting thread.
3323 *
3324 * The run queue must be locked (see thread_run_queue_remove()
3325 * for more info), and not empty.
3326 */
3327 thread_t
3328 run_queue_dequeue(
3329 run_queue_t rq,
3330 sched_options_t options)
3331 {
3332 thread_t thread;
3333 circle_queue_t queue = &rq->queues[rq->highq];
3334
3335 if (options & SCHED_HEADQ) {
3336 thread = cqe_dequeue_head(queue, struct thread, runq_links);
3337 } else {
3338 thread = cqe_dequeue_tail(queue, struct thread, runq_links);
3339 }
3340
3341 assert(thread != THREAD_NULL);
3342 assert_thread_magic(thread);
3343
3344 thread->runq = PROCESSOR_NULL;
3345 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
3346 rq->count--;
3347 if (SCHED(priority_is_urgent)(rq->highq)) {
3348 rq->urgency--; assert(rq->urgency >= 0);
3349 }
3350 if (circle_queue_empty(queue)) {
3351 bitmap_clear(rq->bitmap, rq->highq);
3352 rq->highq = bitmap_first(rq->bitmap, NRQS);
3353 }
3354
3355 return thread;
3356 }
3357
3358 /*
3359 * run_queue_enqueue:
3360 *
3361 * Perform a enqueue operation on a run queue.
3362 *
3363 * The run queue must be locked (see thread_run_queue_remove()
3364 * for more info).
3365 */
3366 boolean_t
3367 run_queue_enqueue(
3368 run_queue_t rq,
3369 thread_t thread,
3370 sched_options_t options)
3371 {
3372 circle_queue_t queue = &rq->queues[thread->sched_pri];
3373 boolean_t result = FALSE;
3374
3375 assert_thread_magic(thread);
3376
3377 if (circle_queue_empty(queue)) {
3378 circle_enqueue_tail(queue, &thread->runq_links);
3379
3380 rq_bitmap_set(rq->bitmap, thread->sched_pri);
3381 if (thread->sched_pri > rq->highq) {
3382 rq->highq = thread->sched_pri;
3383 result = TRUE;
3384 }
3385 } else {
3386 if (options & SCHED_TAILQ) {
3387 circle_enqueue_tail(queue, &thread->runq_links);
3388 } else {
3389 circle_enqueue_head(queue, &thread->runq_links);
3390 }
3391 }
3392 if (SCHED(priority_is_urgent)(thread->sched_pri)) {
3393 rq->urgency++;
3394 }
3395 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
3396 rq->count++;
3397
3398 return result;
3399 }
3400
3401 /*
3402 * run_queue_remove:
3403 *
3404 * Remove a specific thread from a runqueue.
3405 *
3406 * The run queue must be locked.
3407 */
3408 void
3409 run_queue_remove(
3410 run_queue_t rq,
3411 thread_t thread)
3412 {
3413 circle_queue_t queue = &rq->queues[thread->sched_pri];
3414
3415 assert(thread->runq != PROCESSOR_NULL);
3416 assert_thread_magic(thread);
3417
3418 circle_dequeue(queue, &thread->runq_links);
3419 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
3420 rq->count--;
3421 if (SCHED(priority_is_urgent)(thread->sched_pri)) {
3422 rq->urgency--; assert(rq->urgency >= 0);
3423 }
3424
3425 if (circle_queue_empty(queue)) {
3426 /* update run queue status */
3427 bitmap_clear(rq->bitmap, thread->sched_pri);
3428 rq->highq = bitmap_first(rq->bitmap, NRQS);
3429 }
3430
3431 thread->runq = PROCESSOR_NULL;
3432 }
3433
3434 /*
3435 * run_queue_peek
3436 *
3437 * Peek at the runq and return the highest
3438 * priority thread from the runq.
3439 *
3440 * The run queue must be locked.
3441 */
3442 thread_t
3443 run_queue_peek(
3444 run_queue_t rq)
3445 {
3446 if (rq->count > 0) {
3447 circle_queue_t queue = &rq->queues[rq->highq];
3448 thread_t thread = cqe_queue_first(queue, struct thread, runq_links);
3449 assert_thread_magic(thread);
3450 return thread;
3451 } else {
3452 return THREAD_NULL;
3453 }
3454 }
3455
3456 rt_queue_t
3457 sched_rtlocal_runq(processor_set_t pset)
3458 {
3459 return &pset->rt_runq;
3460 }
3461
3462 void
3463 sched_rtlocal_init(processor_set_t pset)
3464 {
3465 pset_rt_init(pset);
3466 }
3467
3468 void
3469 sched_rtlocal_queue_shutdown(processor_t processor)
3470 {
3471 processor_set_t pset = processor->processor_set;
3472 thread_t thread;
3473 queue_head_t tqueue;
3474
3475 pset_lock(pset);
3476
3477 /* We only need to migrate threads if this is the last active or last recommended processor in the pset */
3478 if ((pset->online_processor_count > 0) && pset_is_recommended(pset)) {
3479 pset_unlock(pset);
3480 return;
3481 }
3482
3483 queue_init(&tqueue);
3484
3485 while (rt_runq_count(pset) > 0) {
3486 thread = qe_dequeue_head(&pset->rt_runq.queue, struct thread, runq_links);
3487 thread->runq = PROCESSOR_NULL;
3488 SCHED_STATS_RUNQ_CHANGE(&pset->rt_runq.runq_stats, rt_runq_count(pset));
3489 rt_runq_count_decr(pset);
3490 enqueue_tail(&tqueue, &thread->runq_links);
3491 }
3492 sched_update_pset_load_average(pset, 0);
3493 pset_unlock(pset);
3494
3495 qe_foreach_element_safe(thread, &tqueue, runq_links) {
3496 remqueue(&thread->runq_links);
3497
3498 thread_lock(thread);
3499
3500 thread_setrun(thread, SCHED_TAILQ);
3501
3502 thread_unlock(thread);
3503 }
3504 }
3505
3506 /* Assumes RT lock is not held, and acquires splsched/rt_lock itself */
3507 void
3508 sched_rtlocal_runq_scan(sched_update_scan_context_t scan_context)
3509 {
3510 thread_t thread;
3511
3512 pset_node_t node = &pset_node0;
3513 processor_set_t pset = node->psets;
3514
3515 spl_t s = splsched();
3516 do {
3517 while (pset != NULL) {
3518 pset_lock(pset);
3519
3520 qe_foreach_element_safe(thread, &pset->rt_runq.queue, runq_links) {
3521 if (thread->last_made_runnable_time < scan_context->earliest_rt_make_runnable_time) {
3522 scan_context->earliest_rt_make_runnable_time = thread->last_made_runnable_time;
3523 }
3524 }
3525
3526 pset_unlock(pset);
3527
3528 pset = pset->pset_list;
3529 }
3530 } while (((node = node->node_list) != NULL) && ((pset = node->psets) != NULL));
3531 splx(s);
3532 }
3533
3534 int64_t
3535 sched_rtlocal_runq_count_sum(void)
3536 {
3537 pset_node_t node = &pset_node0;
3538 processor_set_t pset = node->psets;
3539 int64_t count = 0;
3540
3541 do {
3542 while (pset != NULL) {
3543 count += pset->rt_runq.runq_stats.count_sum;
3544
3545 pset = pset->pset_list;
3546 }
3547 } while (((node = node->node_list) != NULL) && ((pset = node->psets) != NULL));
3548
3549 return count;
3550 }
3551
3552 /*
3553 * realtime_queue_insert:
3554 *
3555 * Enqueue a thread for realtime execution.
3556 */
3557 static boolean_t
3558 realtime_queue_insert(processor_t processor, processor_set_t pset, thread_t thread)
3559 {
3560 queue_t queue = &SCHED(rt_runq)(pset)->queue;
3561 uint64_t deadline = thread->realtime.deadline;
3562 boolean_t preempt = FALSE;
3563
3564 pset_assert_locked(pset);
3565
3566 if (queue_empty(queue)) {
3567 enqueue_tail(queue, &thread->runq_links);
3568 preempt = TRUE;
3569 } else {
3570 /* Insert into rt_runq in thread deadline order */
3571 queue_entry_t iter;
3572 qe_foreach(iter, queue) {
3573 thread_t iter_thread = qe_element(iter, struct thread, runq_links);
3574 assert_thread_magic(iter_thread);
3575
3576 if (deadline < iter_thread->realtime.deadline) {
3577 if (iter == queue_first(queue)) {
3578 preempt = TRUE;
3579 }
3580 insque(&thread->runq_links, queue_prev(iter));
3581 break;
3582 } else if (iter == queue_last(queue)) {
3583 enqueue_tail(queue, &thread->runq_links);
3584 break;
3585 }
3586 }
3587 }
3588
3589 thread->runq = processor;
3590 SCHED_STATS_RUNQ_CHANGE(&SCHED(rt_runq)(pset)->runq_stats, rt_runq_count(pset));
3591 rt_runq_count_incr(pset);
3592
3593 return preempt;
3594 }
3595
3596 #define MAX_BACKUP_PROCESSORS 7
3597 #if defined(__x86_64__)
3598 #define DEFAULT_BACKUP_PROCESSORS 1
3599 #else
3600 #define DEFAULT_BACKUP_PROCESSORS 0
3601 #endif
3602
3603 int sched_rt_n_backup_processors = DEFAULT_BACKUP_PROCESSORS;
3604
3605 int
3606 sched_get_rt_n_backup_processors(void)
3607 {
3608 return sched_rt_n_backup_processors;
3609 }
3610
3611 void
3612 sched_set_rt_n_backup_processors(int n)
3613 {
3614 if (n < 0) {
3615 n = 0;
3616 } else if (n > MAX_BACKUP_PROCESSORS) {
3617 n = MAX_BACKUP_PROCESSORS;
3618 }
3619
3620 sched_rt_n_backup_processors = n;
3621 }
3622
3623 /*
3624 * realtime_setrun:
3625 *
3626 * Dispatch a thread for realtime execution.
3627 *
3628 * Thread must be locked. Associated pset must
3629 * be locked, and is returned unlocked.
3630 */
3631 static void
3632 realtime_setrun(
3633 processor_t chosen_processor,
3634 thread_t thread)
3635 {
3636 processor_set_t pset = chosen_processor->processor_set;
3637 pset_assert_locked(pset);
3638 ast_t preempt;
3639
3640 int n_backup = 0;
3641
3642 if (thread->realtime.constraint <= rt_constraint_threshold) {
3643 n_backup = sched_rt_n_backup_processors;
3644 }
3645 assert((n_backup >= 0) && (n_backup <= MAX_BACKUP_PROCESSORS));
3646
3647 sched_ipi_type_t ipi_type[MAX_BACKUP_PROCESSORS + 1] = {};
3648 processor_t ipi_processor[MAX_BACKUP_PROCESSORS + 1] = {};
3649
3650 thread->chosen_processor = chosen_processor;
3651
3652 /* <rdar://problem/15102234> */
3653 assert(thread->bound_processor == PROCESSOR_NULL);
3654
3655 realtime_queue_insert(chosen_processor, pset, thread);
3656
3657 processor_t processor = chosen_processor;
3658 bool chosen_process_is_secondary = chosen_processor->processor_primary != chosen_processor;
3659
3660 int count = 0;
3661 for (int i = 0; i <= n_backup; i++) {
3662 if (i > 0) {
3663 processor = choose_processor_for_realtime_thread(pset, chosen_processor, chosen_process_is_secondary);
3664 if ((processor == PROCESSOR_NULL) || (sched_avoid_cpu0 && (processor->cpu_id == 0))) {
3665 break;
3666 }
3667 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR) | DBG_FUNC_NONE,
3668 (uintptr_t)thread_tid(thread), (uintptr_t)-3, processor->cpu_id, processor->state, 0);
3669 }
3670 ipi_type[i] = SCHED_IPI_NONE;
3671 ipi_processor[i] = processor;
3672 count++;
3673
3674 if (processor->current_pri < BASEPRI_RTQUEUES) {
3675 preempt = (AST_PREEMPT | AST_URGENT);
3676 } else if (thread->realtime.deadline < processor->deadline) {
3677 preempt = (AST_PREEMPT | AST_URGENT);
3678 } else {
3679 preempt = AST_NONE;
3680 }
3681
3682 if (preempt != AST_NONE) {
3683 if (processor->state == PROCESSOR_IDLE) {
3684 processor_state_update_from_thread(processor, thread);
3685 processor->deadline = thread->realtime.deadline;
3686 pset_update_processor_state(pset, processor, PROCESSOR_DISPATCHING);
3687 if (processor == current_processor()) {
3688 ast_on(preempt);
3689
3690 if ((preempt & AST_URGENT) == AST_URGENT) {
3691 bit_set(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id);
3692 }
3693
3694 if ((preempt & AST_PREEMPT) == AST_PREEMPT) {
3695 bit_set(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
3696 }
3697 } else {
3698 ipi_type[i] = sched_ipi_action(processor, thread, true, SCHED_IPI_EVENT_PREEMPT);
3699 }
3700 } else if (processor->state == PROCESSOR_DISPATCHING) {
3701 if ((processor->current_pri < thread->sched_pri) || (processor->deadline > thread->realtime.deadline)) {
3702 processor_state_update_from_thread(processor, thread);
3703 processor->deadline = thread->realtime.deadline;
3704 }
3705 } else {
3706 if (processor == current_processor()) {
3707 ast_on(preempt);
3708
3709 if ((preempt & AST_URGENT) == AST_URGENT) {
3710 bit_set(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id);
3711 }
3712
3713 if ((preempt & AST_PREEMPT) == AST_PREEMPT) {
3714 bit_set(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
3715 }
3716 } else {
3717 ipi_type[i] = sched_ipi_action(processor, thread, false, SCHED_IPI_EVENT_PREEMPT);
3718 }
3719 }
3720 } else {
3721 /* Selected processor was too busy, just keep thread enqueued and let other processors drain it naturally. */
3722 }
3723 }
3724
3725 pset_unlock(pset);
3726
3727 assert((count > 0) && (count <= (n_backup + 1)));
3728 for (int i = 0; i < count; i++) {
3729 assert(ipi_processor[i] != PROCESSOR_NULL);
3730 sched_ipi_perform(ipi_processor[i], ipi_type[i]);
3731 }
3732 }
3733
3734
3735 sched_ipi_type_t
3736 sched_ipi_deferred_policy(processor_set_t pset, processor_t dst,
3737 __unused sched_ipi_event_t event)
3738 {
3739 #if defined(CONFIG_SCHED_DEFERRED_AST)
3740 if (!bit_test(pset->pending_deferred_AST_cpu_mask, dst->cpu_id)) {
3741 return SCHED_IPI_DEFERRED;
3742 }
3743 #else /* CONFIG_SCHED_DEFERRED_AST */
3744 panic("Request for deferred IPI on an unsupported platform; pset: %p CPU: %d", pset, dst->cpu_id);
3745 #endif /* CONFIG_SCHED_DEFERRED_AST */
3746 return SCHED_IPI_NONE;
3747 }
3748
3749 sched_ipi_type_t
3750 sched_ipi_action(processor_t dst, thread_t thread, boolean_t dst_idle, sched_ipi_event_t event)
3751 {
3752 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
3753 assert(dst != NULL);
3754
3755 processor_set_t pset = dst->processor_set;
3756 if (current_processor() == dst) {
3757 return SCHED_IPI_NONE;
3758 }
3759
3760 if (bit_test(pset->pending_AST_URGENT_cpu_mask, dst->cpu_id)) {
3761 return SCHED_IPI_NONE;
3762 }
3763
3764 ipi_type = SCHED(ipi_policy)(dst, thread, dst_idle, event);
3765 switch (ipi_type) {
3766 case SCHED_IPI_NONE:
3767 return SCHED_IPI_NONE;
3768 #if defined(CONFIG_SCHED_DEFERRED_AST)
3769 case SCHED_IPI_DEFERRED:
3770 bit_set(pset->pending_deferred_AST_cpu_mask, dst->cpu_id);
3771 break;
3772 #endif /* CONFIG_SCHED_DEFERRED_AST */
3773 default:
3774 bit_set(pset->pending_AST_URGENT_cpu_mask, dst->cpu_id);
3775 bit_set(pset->pending_AST_PREEMPT_cpu_mask, dst->cpu_id);
3776 break;
3777 }
3778 return ipi_type;
3779 }
3780
3781 sched_ipi_type_t
3782 sched_ipi_policy(processor_t dst, thread_t thread, boolean_t dst_idle, sched_ipi_event_t event)
3783 {
3784 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
3785 boolean_t deferred_ipi_supported = false;
3786 processor_set_t pset = dst->processor_set;
3787
3788 #if defined(CONFIG_SCHED_DEFERRED_AST)
3789 deferred_ipi_supported = true;
3790 #endif /* CONFIG_SCHED_DEFERRED_AST */
3791
3792 switch (event) {
3793 case SCHED_IPI_EVENT_SPILL:
3794 case SCHED_IPI_EVENT_SMT_REBAL:
3795 case SCHED_IPI_EVENT_REBALANCE:
3796 case SCHED_IPI_EVENT_BOUND_THR:
3797 /*
3798 * The spill, SMT rebalance, rebalance and the bound thread
3799 * scenarios use immediate IPIs always.
3800 */
3801 ipi_type = dst_idle ? SCHED_IPI_IDLE : SCHED_IPI_IMMEDIATE;
3802 break;
3803 case SCHED_IPI_EVENT_PREEMPT:
3804 /* In the preemption case, use immediate IPIs for RT threads */
3805 if (thread && (thread->sched_pri >= BASEPRI_RTQUEUES)) {
3806 ipi_type = dst_idle ? SCHED_IPI_IDLE : SCHED_IPI_IMMEDIATE;
3807 break;
3808 }
3809
3810 /*
3811 * For Non-RT threads preemption,
3812 * If the core is active, use immediate IPIs.
3813 * If the core is idle, use deferred IPIs if supported; otherwise immediate IPI.
3814 */
3815 if (deferred_ipi_supported && dst_idle) {
3816 return sched_ipi_deferred_policy(pset, dst, event);
3817 }
3818 ipi_type = dst_idle ? SCHED_IPI_IDLE : SCHED_IPI_IMMEDIATE;
3819 break;
3820 default:
3821 panic("Unrecognized scheduler IPI event type %d", event);
3822 }
3823 assert(ipi_type != SCHED_IPI_NONE);
3824 return ipi_type;
3825 }
3826
3827 void
3828 sched_ipi_perform(processor_t dst, sched_ipi_type_t ipi)
3829 {
3830 switch (ipi) {
3831 case SCHED_IPI_NONE:
3832 break;
3833 case SCHED_IPI_IDLE:
3834 machine_signal_idle(dst);
3835 break;
3836 case SCHED_IPI_IMMEDIATE:
3837 cause_ast_check(dst);
3838 break;
3839 case SCHED_IPI_DEFERRED:
3840 machine_signal_idle_deferred(dst);
3841 break;
3842 default:
3843 panic("Unrecognized scheduler IPI type: %d", ipi);
3844 }
3845 }
3846
3847 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
3848
3849 boolean_t
3850 priority_is_urgent(int priority)
3851 {
3852 return bitmap_test(sched_preempt_pri, priority) ? TRUE : FALSE;
3853 }
3854
3855 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
3856
3857 /*
3858 * processor_setrun:
3859 *
3860 * Dispatch a thread for execution on a
3861 * processor.
3862 *
3863 * Thread must be locked. Associated pset must
3864 * be locked, and is returned unlocked.
3865 */
3866 static void
3867 processor_setrun(
3868 processor_t processor,
3869 thread_t thread,
3870 integer_t options)
3871 {
3872 processor_set_t pset = processor->processor_set;
3873 pset_assert_locked(pset);
3874 ast_t preempt;
3875 enum { eExitIdle, eInterruptRunning, eDoNothing } ipi_action = eDoNothing;
3876
3877 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
3878
3879 thread->chosen_processor = processor;
3880
3881 /*
3882 * Set preemption mode.
3883 */
3884 #if defined(CONFIG_SCHED_DEFERRED_AST)
3885 /* TODO: Do we need to care about urgency (see rdar://problem/20136239)? */
3886 #endif
3887 if (SCHED(priority_is_urgent)(thread->sched_pri) && thread->sched_pri > processor->current_pri) {
3888 preempt = (AST_PREEMPT | AST_URGENT);
3889 } else if (processor->active_thread && thread_eager_preemption(processor->active_thread)) {
3890 preempt = (AST_PREEMPT | AST_URGENT);
3891 } else if ((thread->sched_mode == TH_MODE_TIMESHARE) && (thread->sched_pri < thread->base_pri)) {
3892 if (SCHED(priority_is_urgent)(thread->base_pri) && thread->sched_pri > processor->current_pri) {
3893 preempt = (options & SCHED_PREEMPT)? AST_PREEMPT: AST_NONE;
3894 } else {
3895 preempt = AST_NONE;
3896 }
3897 } else {
3898 preempt = (options & SCHED_PREEMPT)? AST_PREEMPT: AST_NONE;
3899 }
3900
3901 if ((options & (SCHED_PREEMPT | SCHED_REBALANCE)) == (SCHED_PREEMPT | SCHED_REBALANCE)) {
3902 /*
3903 * Having gone to the trouble of forcing this thread off a less preferred core,
3904 * we should force the preferable core to reschedule immediately to give this
3905 * thread a chance to run instead of just sitting on the run queue where
3906 * it may just be stolen back by the idle core we just forced it off.
3907 */
3908 preempt |= AST_PREEMPT;
3909 }
3910
3911 SCHED(processor_enqueue)(processor, thread, options);
3912 sched_update_pset_load_average(pset, 0);
3913
3914 if (preempt != AST_NONE) {
3915 if (processor->state == PROCESSOR_IDLE) {
3916 processor_state_update_from_thread(processor, thread);
3917 processor->deadline = UINT64_MAX;
3918 pset_update_processor_state(pset, processor, PROCESSOR_DISPATCHING);
3919 ipi_action = eExitIdle;
3920 } else if (processor->state == PROCESSOR_DISPATCHING) {
3921 if (processor->current_pri < thread->sched_pri) {
3922 processor_state_update_from_thread(processor, thread);
3923 processor->deadline = UINT64_MAX;
3924 }
3925 } else if ((processor->state == PROCESSOR_RUNNING ||
3926 processor->state == PROCESSOR_SHUTDOWN) &&
3927 (thread->sched_pri >= processor->current_pri)) {
3928 ipi_action = eInterruptRunning;
3929 }
3930 } else {
3931 /*
3932 * New thread is not important enough to preempt what is running, but
3933 * special processor states may need special handling
3934 */
3935 if (processor->state == PROCESSOR_SHUTDOWN &&
3936 thread->sched_pri >= processor->current_pri) {
3937 ipi_action = eInterruptRunning;
3938 } else if (processor->state == PROCESSOR_IDLE) {
3939 processor_state_update_from_thread(processor, thread);
3940 processor->deadline = UINT64_MAX;
3941 pset_update_processor_state(pset, processor, PROCESSOR_DISPATCHING);
3942
3943 ipi_action = eExitIdle;
3944 }
3945 }
3946
3947 if (ipi_action != eDoNothing) {
3948 if (processor == current_processor()) {
3949 if ((preempt = csw_check_locked(processor->active_thread, processor, pset, AST_NONE)) != AST_NONE) {
3950 ast_on(preempt);
3951 }
3952
3953 if ((preempt & AST_URGENT) == AST_URGENT) {
3954 bit_set(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id);
3955 } else {
3956 bit_clear(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id);
3957 }
3958
3959 if ((preempt & AST_PREEMPT) == AST_PREEMPT) {
3960 bit_set(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
3961 } else {
3962 bit_clear(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
3963 }
3964 } else {
3965 sched_ipi_event_t event = (options & SCHED_REBALANCE) ? SCHED_IPI_EVENT_REBALANCE : SCHED_IPI_EVENT_PREEMPT;
3966 ipi_type = sched_ipi_action(processor, thread, (ipi_action == eExitIdle), event);
3967 }
3968 }
3969 pset_unlock(pset);
3970 sched_ipi_perform(processor, ipi_type);
3971 }
3972
3973 /*
3974 * choose_next_pset:
3975 *
3976 * Return the next sibling pset containing
3977 * available processors.
3978 *
3979 * Returns the original pset if none other is
3980 * suitable.
3981 */
3982 static processor_set_t
3983 choose_next_pset(
3984 processor_set_t pset)
3985 {
3986 processor_set_t nset = pset;
3987
3988 do {
3989 nset = next_pset(nset);
3990 } while (nset->online_processor_count < 1 && nset != pset);
3991
3992 return nset;
3993 }
3994
3995 inline static processor_set_t
3996 change_locked_pset(processor_set_t current_pset, processor_set_t new_pset)
3997 {
3998 if (current_pset != new_pset) {
3999 pset_unlock(current_pset);
4000 pset_lock(new_pset);
4001 }
4002
4003 return new_pset;
4004 }
4005
4006 /*
4007 * choose_processor:
4008 *
4009 * Choose a processor for the thread, beginning at
4010 * the pset. Accepts an optional processor hint in
4011 * the pset.
4012 *
4013 * Returns a processor, possibly from a different pset.
4014 *
4015 * The thread must be locked. The pset must be locked,
4016 * and the resulting pset is locked on return.
4017 */
4018 processor_t
4019 choose_processor(
4020 processor_set_t starting_pset,
4021 processor_t processor,
4022 thread_t thread)
4023 {
4024 processor_set_t pset = starting_pset;
4025 processor_set_t nset;
4026
4027 assert(thread->sched_pri <= BASEPRI_RTQUEUES);
4028
4029 /*
4030 * Prefer the hinted processor, when appropriate.
4031 */
4032
4033 /* Fold last processor hint from secondary processor to its primary */
4034 if (processor != PROCESSOR_NULL) {
4035 processor = processor->processor_primary;
4036 }
4037
4038 /*
4039 * Only consult platform layer if pset is active, which
4040 * it may not be in some cases when a multi-set system
4041 * is going to sleep.
4042 */
4043 if (pset->online_processor_count) {
4044 if ((processor == PROCESSOR_NULL) || (processor->processor_set == pset && processor->state == PROCESSOR_IDLE)) {
4045 processor_t mc_processor = machine_choose_processor(pset, processor);
4046 if (mc_processor != PROCESSOR_NULL) {
4047 processor = mc_processor->processor_primary;
4048 }
4049 }
4050 }
4051
4052 /*
4053 * At this point, we may have a processor hint, and we may have
4054 * an initial starting pset. If the hint is not in the pset, or
4055 * if the hint is for a processor in an invalid state, discard
4056 * the hint.
4057 */
4058 if (processor != PROCESSOR_NULL) {
4059 if (processor->processor_set != pset) {
4060 processor = PROCESSOR_NULL;
4061 } else if (!processor->is_recommended) {
4062 processor = PROCESSOR_NULL;
4063 } else {
4064 switch (processor->state) {
4065 case PROCESSOR_START:
4066 case PROCESSOR_SHUTDOWN:
4067 case PROCESSOR_OFF_LINE:
4068 /*
4069 * Hint is for a processor that cannot support running new threads.
4070 */
4071 processor = PROCESSOR_NULL;
4072 break;
4073 case PROCESSOR_IDLE:
4074 /*
4075 * Hint is for an idle processor. Assume it is no worse than any other
4076 * idle processor. The platform layer had an opportunity to provide
4077 * the "least cost idle" processor above.
4078 */
4079 if ((thread->sched_pri < BASEPRI_RTQUEUES) || processor_is_fast_track_candidate_for_realtime_thread(pset, processor)) {
4080 return processor;
4081 }
4082 processor = PROCESSOR_NULL;
4083 break;
4084 case PROCESSOR_RUNNING:
4085 case PROCESSOR_DISPATCHING:
4086 /*
4087 * Hint is for an active CPU. This fast-path allows
4088 * realtime threads to preempt non-realtime threads
4089 * to regain their previous executing processor.
4090 */
4091 if ((thread->sched_pri >= BASEPRI_RTQUEUES) &&
4092 processor_is_fast_track_candidate_for_realtime_thread(pset, processor)) {
4093 return processor;
4094 }
4095
4096 /* Otherwise, use hint as part of search below */
4097 break;
4098 default:
4099 processor = PROCESSOR_NULL;
4100 break;
4101 }
4102 }
4103 }
4104
4105 /*
4106 * Iterate through the processor sets to locate
4107 * an appropriate processor. Seed results with
4108 * a last-processor hint, if available, so that
4109 * a search must find something strictly better
4110 * to replace it.
4111 *
4112 * A primary/secondary pair of SMT processors are
4113 * "unpaired" if the primary is busy but its
4114 * corresponding secondary is idle (so the physical
4115 * core has full use of its resources).
4116 */
4117
4118 integer_t lowest_priority = MAXPRI + 1;
4119 integer_t lowest_secondary_priority = MAXPRI + 1;
4120 integer_t lowest_unpaired_primary_priority = MAXPRI + 1;
4121 integer_t lowest_idle_secondary_priority = MAXPRI + 1;
4122 integer_t lowest_count = INT_MAX;
4123 uint64_t furthest_deadline = 1;
4124 processor_t lp_processor = PROCESSOR_NULL;
4125 processor_t lp_unpaired_primary_processor = PROCESSOR_NULL;
4126 processor_t lp_idle_secondary_processor = PROCESSOR_NULL;
4127 processor_t lp_paired_secondary_processor = PROCESSOR_NULL;
4128 processor_t lc_processor = PROCESSOR_NULL;
4129 processor_t fd_processor = PROCESSOR_NULL;
4130
4131 if (processor != PROCESSOR_NULL) {
4132 /* All other states should be enumerated above. */
4133 assert(processor->state == PROCESSOR_RUNNING || processor->state == PROCESSOR_DISPATCHING);
4134
4135 lowest_priority = processor->current_pri;
4136 lp_processor = processor;
4137
4138 if (processor->current_pri >= BASEPRI_RTQUEUES) {
4139 furthest_deadline = processor->deadline;
4140 fd_processor = processor;
4141 }
4142
4143 lowest_count = SCHED(processor_runq_count)(processor);
4144 lc_processor = processor;
4145 }
4146
4147 if (thread->sched_pri >= BASEPRI_RTQUEUES) {
4148 pset_node_t node = pset->node;
4149 int consider_secondaries = (!pset->is_SMT) || (bit_count(node->pset_map) == 1) || (node->pset_non_rt_primary_map == 0);
4150 for (; consider_secondaries < 2; consider_secondaries++) {
4151 pset = change_locked_pset(pset, starting_pset);
4152 do {
4153 processor = choose_processor_for_realtime_thread(pset, PROCESSOR_NULL, consider_secondaries);
4154 if (processor) {
4155 return processor;
4156 }
4157
4158 /* NRG Collect processor stats for furthest deadline etc. here */
4159
4160 nset = next_pset(pset);
4161
4162 if (nset != starting_pset) {
4163 pset = change_locked_pset(pset, nset);
4164 }
4165 } while (nset != starting_pset);
4166 }
4167 /* Or we could just let it change to starting_pset in the loop above */
4168 pset = change_locked_pset(pset, starting_pset);
4169 }
4170
4171 do {
4172 /*
4173 * Choose an idle processor, in pset traversal order
4174 */
4175
4176 uint64_t idle_primary_map = (pset->cpu_state_map[PROCESSOR_IDLE] &
4177 pset->primary_map &
4178 pset->recommended_bitmask);
4179
4180 /* there shouldn't be a pending AST if the processor is idle */
4181 assert((idle_primary_map & pset->pending_AST_URGENT_cpu_mask) == 0);
4182
4183 int cpuid = lsb_first(idle_primary_map);
4184 if (cpuid >= 0) {
4185 processor = processor_array[cpuid];
4186 return processor;
4187 }
4188
4189 /*
4190 * Otherwise, enumerate active and idle processors to find primary candidates
4191 * with lower priority/etc.
4192 */
4193
4194 uint64_t active_map = ((pset->cpu_state_map[PROCESSOR_RUNNING] | pset->cpu_state_map[PROCESSOR_DISPATCHING]) &
4195 pset->recommended_bitmask &
4196 ~pset->pending_AST_URGENT_cpu_mask);
4197
4198 if (SCHED(priority_is_urgent)(thread->sched_pri) == FALSE) {
4199 active_map &= ~pset->pending_AST_PREEMPT_cpu_mask;
4200 }
4201
4202 active_map = bit_ror64(active_map, (pset->last_chosen + 1));
4203 for (int rotid = lsb_first(active_map); rotid >= 0; rotid = lsb_next(active_map, rotid)) {
4204 cpuid = ((rotid + pset->last_chosen + 1) & 63);
4205 processor = processor_array[cpuid];
4206
4207 integer_t cpri = processor->current_pri;
4208 processor_t primary = processor->processor_primary;
4209 if (primary != processor) {
4210 /* If primary is running a NO_SMT thread, don't choose its secondary */
4211 if (!((primary->state == PROCESSOR_RUNNING) && processor_active_thread_no_smt(primary))) {
4212 if (cpri < lowest_secondary_priority) {
4213 lowest_secondary_priority = cpri;
4214 lp_paired_secondary_processor = processor;
4215 }
4216 }
4217 } else {
4218 if (cpri < lowest_priority) {
4219 lowest_priority = cpri;
4220 lp_processor = processor;
4221 }
4222 }
4223
4224 if ((cpri >= BASEPRI_RTQUEUES) && (processor->deadline > furthest_deadline)) {
4225 furthest_deadline = processor->deadline;
4226 fd_processor = processor;
4227 }
4228
4229 integer_t ccount = SCHED(processor_runq_count)(processor);
4230 if (ccount < lowest_count) {
4231 lowest_count = ccount;
4232 lc_processor = processor;
4233 }
4234 }
4235
4236 /*
4237 * For SMT configs, these idle secondary processors must have active primary. Otherwise
4238 * the idle primary would have short-circuited the loop above
4239 */
4240 uint64_t idle_secondary_map = (pset->cpu_state_map[PROCESSOR_IDLE] &
4241 ~pset->primary_map &
4242 pset->recommended_bitmask);
4243
4244 /* there shouldn't be a pending AST if the processor is idle */
4245 assert((idle_secondary_map & pset->pending_AST_URGENT_cpu_mask) == 0);
4246 assert((idle_secondary_map & pset->pending_AST_PREEMPT_cpu_mask) == 0);
4247
4248 for (cpuid = lsb_first(idle_secondary_map); cpuid >= 0; cpuid = lsb_next(idle_secondary_map, cpuid)) {
4249 processor = processor_array[cpuid];
4250
4251 processor_t cprimary = processor->processor_primary;
4252
4253 integer_t primary_pri = cprimary->current_pri;
4254
4255 /*
4256 * TODO: This should also make the same decisions
4257 * as secondary_can_run_realtime_thread
4258 *
4259 * TODO: Keep track of the pending preemption priority
4260 * of the primary to make this more accurate.
4261 */
4262
4263 /* If the primary is running a no-smt thread, then don't choose its secondary */
4264 if (cprimary->state == PROCESSOR_RUNNING &&
4265 processor_active_thread_no_smt(cprimary)) {
4266 continue;
4267 }
4268
4269 /*
4270 * Find the idle secondary processor with the lowest priority primary
4271 *
4272 * We will choose this processor as a fallback if we find no better
4273 * primary to preempt.
4274 */
4275 if (primary_pri < lowest_idle_secondary_priority) {
4276 lp_idle_secondary_processor = processor;
4277 lowest_idle_secondary_priority = primary_pri;
4278 }
4279
4280 /* Find the the lowest priority active primary with idle secondary */
4281 if (primary_pri < lowest_unpaired_primary_priority) {
4282 /* If the primary processor is offline or starting up, it's not a candidate for this path */
4283 if (cprimary->state != PROCESSOR_RUNNING &&
4284 cprimary->state != PROCESSOR_DISPATCHING) {
4285 continue;
4286 }
4287
4288 if (!cprimary->is_recommended) {
4289 continue;
4290 }
4291
4292 /* if the primary is pending preemption, don't try to re-preempt it */
4293 if (bit_test(pset->pending_AST_URGENT_cpu_mask, cprimary->cpu_id)) {
4294 continue;
4295 }
4296
4297 if (SCHED(priority_is_urgent)(thread->sched_pri) == FALSE &&
4298 bit_test(pset->pending_AST_PREEMPT_cpu_mask, cprimary->cpu_id)) {
4299 continue;
4300 }
4301
4302 lowest_unpaired_primary_priority = primary_pri;
4303 lp_unpaired_primary_processor = cprimary;
4304 }
4305 }
4306
4307 /*
4308 * We prefer preempting a primary processor over waking up its secondary.
4309 * The secondary will then be woken up by the preempted thread.
4310 */
4311 if (thread->sched_pri > lowest_unpaired_primary_priority) {
4312 pset->last_chosen = lp_unpaired_primary_processor->cpu_id;
4313 return lp_unpaired_primary_processor;
4314 }
4315
4316 /*
4317 * We prefer preempting a lower priority active processor over directly
4318 * waking up an idle secondary.
4319 * The preempted thread will then find the idle secondary.
4320 */
4321 if (thread->sched_pri > lowest_priority) {
4322 pset->last_chosen = lp_processor->cpu_id;
4323 return lp_processor;
4324 }
4325
4326 if (thread->sched_pri >= BASEPRI_RTQUEUES) {
4327 /*
4328 * For realtime threads, the most important aspect is
4329 * scheduling latency, so we will pick an active
4330 * secondary processor in this pset, or preempt
4331 * another RT thread with a further deadline before
4332 * going to the next pset.
4333 */
4334
4335 if (sched_allow_rt_smt && (thread->sched_pri > lowest_secondary_priority)) {
4336 pset->last_chosen = lp_paired_secondary_processor->cpu_id;
4337 return lp_paired_secondary_processor;
4338 }
4339
4340 if (thread->realtime.deadline < furthest_deadline) {
4341 return fd_processor;
4342 }
4343 }
4344
4345 /*
4346 * lc_processor is used to indicate the best processor set run queue
4347 * on which to enqueue a thread when all available CPUs are busy with
4348 * higher priority threads, so try to make sure it is initialized.
4349 */
4350 if (lc_processor == PROCESSOR_NULL) {
4351 cpumap_t available_map = ((pset->cpu_state_map[PROCESSOR_IDLE] |
4352 pset->cpu_state_map[PROCESSOR_RUNNING] |
4353 pset->cpu_state_map[PROCESSOR_DISPATCHING]) &
4354 pset->recommended_bitmask);
4355 cpuid = lsb_first(available_map);
4356 if (cpuid >= 0) {
4357 lc_processor = processor_array[cpuid];
4358 lowest_count = SCHED(processor_runq_count)(lc_processor);
4359 }
4360 }
4361
4362 /*
4363 * Move onto the next processor set.
4364 *
4365 * If all primary processors in this pset are running a higher
4366 * priority thread, move on to next pset. Only when we have
4367 * exhausted the search for primary processors do we
4368 * fall back to secondaries.
4369 */
4370 nset = next_pset(pset);
4371
4372 if (nset != starting_pset) {
4373 pset = change_locked_pset(pset, nset);
4374 }
4375 } while (nset != starting_pset);
4376
4377 /*
4378 * Make sure that we pick a running processor,
4379 * and that the correct processor set is locked.
4380 * Since we may have unlocked the candidate processor's
4381 * pset, it may have changed state.
4382 *
4383 * All primary processors are running a higher priority
4384 * thread, so the only options left are enqueuing on
4385 * the secondary processor that would perturb the least priority
4386 * primary, or the least busy primary.
4387 */
4388 boolean_t fallback_processor = false;
4389 do {
4390 /* lowest_priority is evaluated in the main loops above */
4391 if (lp_idle_secondary_processor != PROCESSOR_NULL) {
4392 processor = lp_idle_secondary_processor;
4393 lp_idle_secondary_processor = PROCESSOR_NULL;
4394 } else if (lp_paired_secondary_processor != PROCESSOR_NULL) {
4395 processor = lp_paired_secondary_processor;
4396 lp_paired_secondary_processor = PROCESSOR_NULL;
4397 } else if (lc_processor != PROCESSOR_NULL) {
4398 processor = lc_processor;
4399 lc_processor = PROCESSOR_NULL;
4400 } else {
4401 /*
4402 * All processors are executing higher priority threads, and
4403 * the lowest_count candidate was not usable.
4404 *
4405 * For AMP platforms running the clutch scheduler always
4406 * return a processor from the requested pset to allow the
4407 * thread to be enqueued in the correct runq. For non-AMP
4408 * platforms, simply return the master_processor.
4409 */
4410 fallback_processor = true;
4411 #if CONFIG_SCHED_EDGE
4412 processor = processor_array[lsb_first(starting_pset->primary_map)];
4413 #else /* CONFIG_SCHED_EDGE */
4414 processor = master_processor;
4415 #endif /* CONFIG_SCHED_EDGE */
4416 }
4417
4418 /*
4419 * Check that the correct processor set is
4420 * returned locked.
4421 */
4422 pset = change_locked_pset(pset, processor->processor_set);
4423
4424 /*
4425 * We must verify that the chosen processor is still available.
4426 * The cases where we pick the master_processor or the fallback
4427 * processor are execptions, since we may need enqueue a thread
4428 * on its runqueue if this is the last remaining processor
4429 * during pset shutdown.
4430 *
4431 * <rdar://problem/47559304> would really help here since it
4432 * gets rid of the weird last processor SHUTDOWN case where
4433 * the pset is still schedulable.
4434 */
4435 if (processor != master_processor && (fallback_processor == false) && (processor->state == PROCESSOR_SHUTDOWN || processor->state == PROCESSOR_OFF_LINE)) {
4436 processor = PROCESSOR_NULL;
4437 }
4438 } while (processor == PROCESSOR_NULL);
4439
4440 pset->last_chosen = processor->cpu_id;
4441 return processor;
4442 }
4443
4444 /*
4445 * Default implementation of SCHED(choose_node)()
4446 * for single node systems
4447 */
4448 pset_node_t
4449 sched_choose_node(__unused thread_t thread)
4450 {
4451 return &pset_node0;
4452 }
4453
4454 /*
4455 * choose_starting_pset:
4456 *
4457 * Choose a starting processor set for the thread.
4458 * May return a processor hint within the pset.
4459 *
4460 * Returns a starting processor set, to be used by
4461 * choose_processor.
4462 *
4463 * The thread must be locked. The resulting pset is unlocked on return,
4464 * and is chosen without taking any pset locks.
4465 */
4466 processor_set_t
4467 choose_starting_pset(pset_node_t node, thread_t thread, processor_t *processor_hint)
4468 {
4469 processor_set_t pset;
4470 processor_t processor = PROCESSOR_NULL;
4471
4472 if (thread->affinity_set != AFFINITY_SET_NULL) {
4473 /*
4474 * Use affinity set policy hint.
4475 */
4476 pset = thread->affinity_set->aset_pset;
4477 } else if (thread->last_processor != PROCESSOR_NULL) {
4478 /*
4479 * Simple (last processor) affinity case.
4480 */
4481 processor = thread->last_processor;
4482 pset = processor->processor_set;
4483 } else {
4484 /*
4485 * No Affinity case:
4486 *
4487 * Utilitize a per task hint to spread threads
4488 * among the available processor sets.
4489 * NRG this seems like the wrong thing to do.
4490 * See also task->pset_hint = pset in thread_setrun()
4491 */
4492 task_t task = thread->task;
4493
4494 pset = task->pset_hint;
4495 if (pset == PROCESSOR_SET_NULL) {
4496 pset = current_processor()->processor_set;
4497 }
4498
4499 pset = choose_next_pset(pset);
4500 }
4501
4502 if (!bit_test(node->pset_map, pset->pset_id)) {
4503 /* pset is not from this node so choose one that is */
4504 int id = lsb_first(node->pset_map);
4505 assert(id >= 0);
4506 pset = pset_array[id];
4507 }
4508
4509 if (bit_count(node->pset_map) == 1) {
4510 /* Only a single pset in this node */
4511 goto out;
4512 }
4513
4514 bool avoid_cpu0 = false;
4515
4516 #if defined(__x86_64__)
4517 if ((thread->sched_pri >= BASEPRI_RTQUEUES) && sched_avoid_cpu0) {
4518 /* Avoid the pset containing cpu0 */
4519 avoid_cpu0 = true;
4520 /* Assert that cpu0 is in pset0. I expect this to be true on __x86_64__ */
4521 assert(bit_test(pset_array[0]->cpu_bitmask, 0));
4522 }
4523 #endif
4524
4525 if (thread->sched_pri >= BASEPRI_RTQUEUES) {
4526 pset_map_t rt_target_map = atomic_load(&node->pset_non_rt_primary_map);
4527 if ((avoid_cpu0 && pset->pset_id == 0) || !bit_test(rt_target_map, pset->pset_id)) {
4528 if (avoid_cpu0) {
4529 rt_target_map = bit_ror64(rt_target_map, 1);
4530 }
4531 int rotid = lsb_first(rt_target_map);
4532 if (rotid >= 0) {
4533 int id = avoid_cpu0 ? ((rotid + 1) & 63) : rotid;
4534 pset = pset_array[id];
4535 goto out;
4536 }
4537 }
4538 if (!pset->is_SMT || !sched_allow_rt_smt) {
4539 /* All psets are full of RT threads - fall back to choose processor to find the furthest deadline RT thread */
4540 goto out;
4541 }
4542 rt_target_map = atomic_load(&node->pset_non_rt_map);
4543 if ((avoid_cpu0 && pset->pset_id == 0) || !bit_test(rt_target_map, pset->pset_id)) {
4544 if (avoid_cpu0) {
4545 rt_target_map = bit_ror64(rt_target_map, 1);
4546 }
4547 int rotid = lsb_first(rt_target_map);
4548 if (rotid >= 0) {
4549 int id = avoid_cpu0 ? ((rotid + 1) & 63) : rotid;
4550 pset = pset_array[id];
4551 goto out;
4552 }
4553 }
4554 /* All psets are full of RT threads - fall back to choose processor to find the furthest deadline RT thread */
4555 } else {
4556 pset_map_t idle_map = atomic_load(&node->pset_idle_map);
4557 if (!bit_test(idle_map, pset->pset_id)) {
4558 int next_idle_pset_id = lsb_first(idle_map);
4559 if (next_idle_pset_id >= 0) {
4560 pset = pset_array[next_idle_pset_id];
4561 }
4562 }
4563 }
4564
4565 out:
4566 if ((processor != PROCESSOR_NULL) && (processor->processor_set != pset)) {
4567 processor = PROCESSOR_NULL;
4568 }
4569 if (processor != PROCESSOR_NULL) {
4570 *processor_hint = processor;
4571 }
4572
4573 return pset;
4574 }
4575
4576 /*
4577 * thread_setrun:
4578 *
4579 * Dispatch thread for execution, onto an idle
4580 * processor or run queue, and signal a preemption
4581 * as appropriate.
4582 *
4583 * Thread must be locked.
4584 */
4585 void
4586 thread_setrun(
4587 thread_t thread,
4588 sched_options_t options)
4589 {
4590 processor_t processor;
4591 processor_set_t pset;
4592
4593 assert((thread->state & (TH_RUN | TH_WAIT | TH_UNINT | TH_TERMINATE | TH_TERMINATE2)) == TH_RUN);
4594 assert(thread->runq == PROCESSOR_NULL);
4595
4596 /*
4597 * Update priority if needed.
4598 */
4599 if (SCHED(can_update_priority)(thread)) {
4600 SCHED(update_priority)(thread);
4601 }
4602
4603 thread->sfi_class = sfi_thread_classify(thread);
4604
4605 assert(thread->runq == PROCESSOR_NULL);
4606
4607 if (thread->bound_processor == PROCESSOR_NULL) {
4608 /*
4609 * Unbound case.
4610 */
4611 processor_t processor_hint = PROCESSOR_NULL;
4612 pset_node_t node = SCHED(choose_node)(thread);
4613 processor_set_t starting_pset = choose_starting_pset(node, thread, &processor_hint);
4614
4615 pset_lock(starting_pset);
4616
4617 processor = SCHED(choose_processor)(starting_pset, processor_hint, thread);
4618 pset = processor->processor_set;
4619 task_t task = thread->task;
4620 task->pset_hint = pset; /* NRG this is done without holding the task lock */
4621
4622 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR) | DBG_FUNC_NONE,
4623 (uintptr_t)thread_tid(thread), (uintptr_t)-1, processor->cpu_id, processor->state, 0);
4624 } else {
4625 /*
4626 * Bound case:
4627 *
4628 * Unconditionally dispatch on the processor.
4629 */
4630 processor = thread->bound_processor;
4631 pset = processor->processor_set;
4632 pset_lock(pset);
4633
4634 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR) | DBG_FUNC_NONE,
4635 (uintptr_t)thread_tid(thread), (uintptr_t)-2, processor->cpu_id, processor->state, 0);
4636 }
4637
4638 /*
4639 * Dispatch the thread on the chosen processor.
4640 * TODO: This should be based on sched_mode, not sched_pri
4641 */
4642 if (thread->sched_pri >= BASEPRI_RTQUEUES) {
4643 realtime_setrun(processor, thread);
4644 } else {
4645 processor_setrun(processor, thread, options);
4646 }
4647 /* pset is now unlocked */
4648 if (thread->bound_processor == PROCESSOR_NULL) {
4649 SCHED(check_spill)(pset, thread);
4650 }
4651 }
4652
4653 processor_set_t
4654 task_choose_pset(
4655 task_t task)
4656 {
4657 processor_set_t pset = task->pset_hint;
4658
4659 if (pset != PROCESSOR_SET_NULL) {
4660 pset = choose_next_pset(pset);
4661 }
4662
4663 return pset;
4664 }
4665
4666 /*
4667 * Check for a preemption point in
4668 * the current context.
4669 *
4670 * Called at splsched with thread locked.
4671 */
4672 ast_t
4673 csw_check(
4674 thread_t thread,
4675 processor_t processor,
4676 ast_t check_reason)
4677 {
4678 processor_set_t pset = processor->processor_set;
4679
4680 assert(thread == processor->active_thread);
4681
4682 pset_lock(pset);
4683
4684 processor_state_update_from_thread(processor, thread);
4685
4686 ast_t preempt = csw_check_locked(thread, processor, pset, check_reason);
4687
4688 /* Acknowledge the IPI if we decided not to preempt */
4689
4690 if ((preempt & AST_URGENT) == 0) {
4691 bit_clear(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id);
4692 }
4693
4694 if ((preempt & AST_PREEMPT) == 0) {
4695 bit_clear(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
4696 }
4697
4698 pset_unlock(pset);
4699
4700 return preempt;
4701 }
4702
4703 /*
4704 * Check for preemption at splsched with
4705 * pset and thread locked
4706 */
4707 ast_t
4708 csw_check_locked(
4709 thread_t thread,
4710 processor_t processor,
4711 processor_set_t pset,
4712 ast_t check_reason)
4713 {
4714 ast_t result;
4715
4716 if (processor->first_timeslice) {
4717 if (rt_runq_count(pset) > 0) {
4718 return check_reason | AST_PREEMPT | AST_URGENT;
4719 }
4720 } else {
4721 if (rt_runq_count(pset) > 0) {
4722 if (BASEPRI_RTQUEUES > processor->current_pri) {
4723 return check_reason | AST_PREEMPT | AST_URGENT;
4724 } else {
4725 return check_reason | AST_PREEMPT;
4726 }
4727 }
4728 }
4729
4730 /*
4731 * If the current thread is running on a processor that is no longer recommended,
4732 * urgently preempt it, at which point thread_select() should
4733 * try to idle the processor and re-dispatch the thread to a recommended processor.
4734 */
4735 if (!processor->is_recommended) {
4736 return check_reason | AST_PREEMPT | AST_URGENT;
4737 }
4738
4739 result = SCHED(processor_csw_check)(processor);
4740 if (result != AST_NONE) {
4741 return check_reason | result | (thread_eager_preemption(thread) ? AST_URGENT : AST_NONE);
4742 }
4743
4744 /*
4745 * Same for avoid-processor
4746 *
4747 * TODO: Should these set AST_REBALANCE?
4748 */
4749 if (SCHED(avoid_processor_enabled) && SCHED(thread_avoid_processor)(processor, thread)) {
4750 return check_reason | AST_PREEMPT;
4751 }
4752
4753 /*
4754 * Even though we could continue executing on this processor, a
4755 * secondary SMT core should try to shed load to another primary core.
4756 *
4757 * TODO: Should this do the same check that thread_select does? i.e.
4758 * if no bound threads target this processor, and idle primaries exist, preempt
4759 * The case of RT threads existing is already taken care of above
4760 */
4761
4762 if (processor->current_pri < BASEPRI_RTQUEUES &&
4763 processor->processor_primary != processor) {
4764 return check_reason | AST_PREEMPT;
4765 }
4766
4767 if (thread->state & TH_SUSP) {
4768 return check_reason | AST_PREEMPT;
4769 }
4770
4771 #if CONFIG_SCHED_SFI
4772 /*
4773 * Current thread may not need to be preempted, but maybe needs
4774 * an SFI wait?
4775 */
4776 result = sfi_thread_needs_ast(thread, NULL);
4777 if (result != AST_NONE) {
4778 return check_reason | result;
4779 }
4780 #endif
4781
4782 return AST_NONE;
4783 }
4784
4785 /*
4786 * Handle preemption IPI or IPI in response to setting an AST flag
4787 * Triggered by cause_ast_check
4788 * Called at splsched
4789 */
4790 void
4791 ast_check(processor_t processor)
4792 {
4793 if (processor->state != PROCESSOR_RUNNING &&
4794 processor->state != PROCESSOR_SHUTDOWN) {
4795 return;
4796 }
4797
4798 thread_t thread = processor->active_thread;
4799
4800 assert(thread == current_thread());
4801
4802 thread_lock(thread);
4803
4804 /*
4805 * Propagate thread ast to processor.
4806 * (handles IPI in response to setting AST flag)
4807 */
4808 ast_propagate(thread);
4809
4810 /*
4811 * Stash the old urgency and perfctl values to find out if
4812 * csw_check updates them.
4813 */
4814 thread_urgency_t old_urgency = processor->current_urgency;
4815 perfcontrol_class_t old_perfctl_class = processor->current_perfctl_class;
4816
4817 ast_t preempt;
4818
4819 if ((preempt = csw_check(thread, processor, AST_NONE)) != AST_NONE) {
4820 ast_on(preempt);
4821 }
4822
4823 if (old_urgency != processor->current_urgency) {
4824 /*
4825 * Urgency updates happen with the thread lock held (ugh).
4826 * TODO: This doesn't notice QoS changes...
4827 */
4828 uint64_t urgency_param1, urgency_param2;
4829
4830 thread_urgency_t urgency = thread_get_urgency(thread, &urgency_param1, &urgency_param2);
4831 thread_tell_urgency(urgency, urgency_param1, urgency_param2, 0, thread);
4832 }
4833
4834 thread_unlock(thread);
4835
4836 if (old_perfctl_class != processor->current_perfctl_class) {
4837 /*
4838 * We updated the perfctl class of this thread from another core.
4839 * Let CLPC know that the currently running thread has a new
4840 * class.
4841 */
4842
4843 machine_switch_perfcontrol_state_update(PERFCONTROL_ATTR_UPDATE,
4844 mach_approximate_time(), 0, thread);
4845 }
4846 }
4847
4848
4849 /*
4850 * set_sched_pri:
4851 *
4852 * Set the scheduled priority of the specified thread.
4853 *
4854 * This may cause the thread to change queues.
4855 *
4856 * Thread must be locked.
4857 */
4858 void
4859 set_sched_pri(
4860 thread_t thread,
4861 int16_t new_priority,
4862 set_sched_pri_options_t options)
4863 {
4864 bool is_current_thread = (thread == current_thread());
4865 bool removed_from_runq = false;
4866 bool lazy_update = ((options & SETPRI_LAZY) == SETPRI_LAZY);
4867
4868 int16_t old_priority = thread->sched_pri;
4869
4870 /* If we're already at this priority, no need to mess with the runqueue */
4871 if (new_priority == old_priority) {
4872 #if CONFIG_SCHED_CLUTCH
4873 /* For the first thread in the system, the priority is correct but
4874 * th_sched_bucket is still TH_BUCKET_RUN. Since the clutch
4875 * scheduler relies on the bucket being set for all threads, update
4876 * its bucket here.
4877 */
4878 if (thread->th_sched_bucket == TH_BUCKET_RUN) {
4879 assert(is_current_thread);
4880 SCHED(update_thread_bucket)(thread);
4881 }
4882 #endif /* CONFIG_SCHED_CLUTCH */
4883
4884 return;
4885 }
4886
4887 if (is_current_thread) {
4888 assert(thread->state & TH_RUN);
4889 assert(thread->runq == PROCESSOR_NULL);
4890 } else {
4891 removed_from_runq = thread_run_queue_remove(thread);
4892 }
4893
4894 thread->sched_pri = new_priority;
4895
4896 #if CONFIG_SCHED_CLUTCH
4897 /*
4898 * Since for the clutch scheduler, the thread's bucket determines its runq
4899 * in the hierarchy it is important to update the bucket when the thread
4900 * lock is held and the thread has been removed from the runq hierarchy.
4901 */
4902 SCHED(update_thread_bucket)(thread);
4903
4904 #endif /* CONFIG_SCHED_CLUTCH */
4905
4906 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHANGE_PRIORITY),
4907 (uintptr_t)thread_tid(thread),
4908 thread->base_pri,
4909 thread->sched_pri,
4910 thread->sched_usage,
4911 0);
4912
4913 if (removed_from_runq) {
4914 thread_run_queue_reinsert(thread, SCHED_PREEMPT | SCHED_TAILQ);
4915 } else if (is_current_thread) {
4916 processor_t processor = thread->last_processor;
4917 assert(processor == current_processor());
4918
4919 thread_urgency_t old_urgency = processor->current_urgency;
4920
4921 /*
4922 * When dropping in priority, check if the thread no longer belongs on core.
4923 * If a thread raises its own priority, don't aggressively rebalance it.
4924 * <rdar://problem/31699165>
4925 *
4926 * csw_check does a processor_state_update_from_thread, but
4927 * we should do our own if we're being lazy.
4928 */
4929 if (!lazy_update && new_priority < old_priority) {
4930 ast_t preempt;
4931
4932 if ((preempt = csw_check(thread, processor, AST_NONE)) != AST_NONE) {
4933 ast_on(preempt);
4934 }
4935 } else {
4936 processor_state_update_from_thread(processor, thread);
4937 }
4938
4939 /*
4940 * set_sched_pri doesn't alter RT params. We expect direct base priority/QoS
4941 * class alterations from user space to occur relatively infrequently, hence
4942 * those are lazily handled. QoS classes have distinct priority bands, and QoS
4943 * inheritance is expected to involve priority changes.
4944 */
4945 if (processor->current_urgency != old_urgency) {
4946 uint64_t urgency_param1, urgency_param2;
4947
4948 thread_urgency_t new_urgency = thread_get_urgency(thread,
4949 &urgency_param1, &urgency_param2);
4950
4951 thread_tell_urgency(new_urgency, urgency_param1,
4952 urgency_param2, 0, thread);
4953 }
4954
4955 /* TODO: only call this if current_perfctl_class changed */
4956 uint64_t ctime = mach_approximate_time();
4957 machine_thread_going_on_core(thread, processor->current_urgency, 0, 0, ctime);
4958 } else if (thread->state & TH_RUN) {
4959 processor_t processor = thread->last_processor;
4960
4961 if (!lazy_update &&
4962 processor != PROCESSOR_NULL &&
4963 processor != current_processor() &&
4964 processor->active_thread == thread) {
4965 cause_ast_check(processor);
4966 }
4967 }
4968 }
4969
4970 /*
4971 * thread_run_queue_remove_for_handoff
4972 *
4973 * Pull a thread or its (recursive) push target out of the runqueue
4974 * so that it is ready for thread_run()
4975 *
4976 * Called at splsched
4977 *
4978 * Returns the thread that was pulled or THREAD_NULL if no thread could be pulled.
4979 * This may be different than the thread that was passed in.
4980 */
4981 thread_t
4982 thread_run_queue_remove_for_handoff(thread_t thread)
4983 {
4984 thread_t pulled_thread = THREAD_NULL;
4985
4986 thread_lock(thread);
4987
4988 /*
4989 * Check that the thread is not bound to a different processor,
4990 * NO_SMT flag is not set on the thread, cluster type of
4991 * processor matches with thread if the thread is pinned to a
4992 * particular cluster and that realtime is not involved.
4993 *
4994 * Next, pull it off its run queue. If it doesn't come, it's not eligible.
4995 */
4996 processor_t processor = current_processor();
4997 if ((thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == processor)
4998 && (!thread_no_smt(thread))
4999 && (processor->current_pri < BASEPRI_RTQUEUES)
5000 && (thread->sched_pri < BASEPRI_RTQUEUES)
5001 #if __AMP__
5002 && ((!(thread->sched_flags & TH_SFLAG_PCORE_ONLY)) ||
5003 processor->processor_set->pset_cluster_type == PSET_AMP_P)
5004 && ((!(thread->sched_flags & TH_SFLAG_ECORE_ONLY)) ||
5005 processor->processor_set->pset_cluster_type == PSET_AMP_E)
5006 #endif /* __AMP__ */
5007 ) {
5008 if (thread_run_queue_remove(thread)) {
5009 pulled_thread = thread;
5010 }
5011 }
5012
5013 thread_unlock(thread);
5014
5015 return pulled_thread;
5016 }
5017
5018 /*
5019 * thread_prepare_for_handoff
5020 *
5021 * Make the thread ready for handoff.
5022 * If the thread was runnable then pull it off the runq, if the thread could
5023 * not be pulled, return NULL.
5024 *
5025 * If the thread was woken up from wait for handoff, make sure it is not bound to
5026 * different processor.
5027 *
5028 * Called at splsched
5029 *
5030 * Returns the thread that was pulled or THREAD_NULL if no thread could be pulled.
5031 * This may be different than the thread that was passed in.
5032 */
5033 thread_t
5034 thread_prepare_for_handoff(thread_t thread, thread_handoff_option_t option)
5035 {
5036 thread_t pulled_thread = THREAD_NULL;
5037
5038 if (option & THREAD_HANDOFF_SETRUN_NEEDED) {
5039 processor_t processor = current_processor();
5040 thread_lock(thread);
5041
5042 /*
5043 * Check that the thread is not bound to a different processor,
5044 * NO_SMT flag is not set on the thread and cluster type of
5045 * processor matches with thread if the thread is pinned to a
5046 * particular cluster. Call setrun instead if above conditions
5047 * are not satisfied.
5048 */
5049 if ((thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == processor)
5050 && (!thread_no_smt(thread))
5051 #if __AMP__
5052 && ((!(thread->sched_flags & TH_SFLAG_PCORE_ONLY)) ||
5053 processor->processor_set->pset_cluster_type == PSET_AMP_P)
5054 && ((!(thread->sched_flags & TH_SFLAG_ECORE_ONLY)) ||
5055 processor->processor_set->pset_cluster_type == PSET_AMP_E)
5056 #endif /* __AMP__ */
5057 ) {
5058 pulled_thread = thread;
5059 } else {
5060 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
5061 }
5062 thread_unlock(thread);
5063 } else {
5064 pulled_thread = thread_run_queue_remove_for_handoff(thread);
5065 }
5066
5067 return pulled_thread;
5068 }
5069
5070 /*
5071 * thread_run_queue_remove:
5072 *
5073 * Remove a thread from its current run queue and
5074 * return TRUE if successful.
5075 *
5076 * Thread must be locked.
5077 *
5078 * If thread->runq is PROCESSOR_NULL, the thread will not re-enter the
5079 * run queues because the caller locked the thread. Otherwise
5080 * the thread is on a run queue, but could be chosen for dispatch
5081 * and removed by another processor under a different lock, which
5082 * will set thread->runq to PROCESSOR_NULL.
5083 *
5084 * Hence the thread select path must not rely on anything that could
5085 * be changed under the thread lock after calling this function,
5086 * most importantly thread->sched_pri.
5087 */
5088 boolean_t
5089 thread_run_queue_remove(
5090 thread_t thread)
5091 {
5092 boolean_t removed = FALSE;
5093 processor_t processor = thread->runq;
5094
5095 if ((thread->state & (TH_RUN | TH_WAIT)) == TH_WAIT) {
5096 /* Thread isn't runnable */
5097 assert(thread->runq == PROCESSOR_NULL);
5098 return FALSE;
5099 }
5100
5101 if (processor == PROCESSOR_NULL) {
5102 /*
5103 * The thread is either not on the runq,
5104 * or is in the midst of being removed from the runq.
5105 *
5106 * runq is set to NULL under the pset lock, not the thread
5107 * lock, so the thread may still be in the process of being dequeued
5108 * from the runq. It will wait in invoke for the thread lock to be
5109 * dropped.
5110 */
5111
5112 return FALSE;
5113 }
5114
5115 if (thread->sched_pri < BASEPRI_RTQUEUES) {
5116 return SCHED(processor_queue_remove)(processor, thread);
5117 }
5118
5119 processor_set_t pset = processor->processor_set;
5120
5121 pset_lock(pset);
5122
5123 if (thread->runq != PROCESSOR_NULL) {
5124 /*
5125 * Thread is on the RT run queue and we have a lock on
5126 * that run queue.
5127 */
5128
5129 remqueue(&thread->runq_links);
5130 SCHED_STATS_RUNQ_CHANGE(&SCHED(rt_runq)(pset)->runq_stats, rt_runq_count(pset));
5131 rt_runq_count_decr(pset);
5132
5133 thread->runq = PROCESSOR_NULL;
5134
5135 removed = TRUE;
5136 }
5137
5138 pset_unlock(pset);
5139
5140 return removed;
5141 }
5142
5143 /*
5144 * Put the thread back where it goes after a thread_run_queue_remove
5145 *
5146 * Thread must have been removed under the same thread lock hold
5147 *
5148 * thread locked, at splsched
5149 */
5150 void
5151 thread_run_queue_reinsert(thread_t thread, sched_options_t options)
5152 {
5153 assert(thread->runq == PROCESSOR_NULL);
5154 assert(thread->state & (TH_RUN));
5155
5156 thread_setrun(thread, options);
5157 }
5158
5159 void
5160 sys_override_cpu_throttle(boolean_t enable_override)
5161 {
5162 if (enable_override) {
5163 cpu_throttle_enabled = 0;
5164 } else {
5165 cpu_throttle_enabled = 1;
5166 }
5167 }
5168
5169 thread_urgency_t
5170 thread_get_urgency(thread_t thread, uint64_t *arg1, uint64_t *arg2)
5171 {
5172 uint64_t urgency_param1 = 0, urgency_param2 = 0;
5173
5174 thread_urgency_t urgency;
5175
5176 if (thread == NULL || (thread->state & TH_IDLE)) {
5177 urgency_param1 = 0;
5178 urgency_param2 = 0;
5179
5180 urgency = THREAD_URGENCY_NONE;
5181 } else if (thread->sched_mode == TH_MODE_REALTIME) {
5182 urgency_param1 = thread->realtime.period;
5183 urgency_param2 = thread->realtime.deadline;
5184
5185 urgency = THREAD_URGENCY_REAL_TIME;
5186 } else if (cpu_throttle_enabled &&
5187 (thread->sched_pri <= MAXPRI_THROTTLE) &&
5188 (thread->base_pri <= MAXPRI_THROTTLE)) {
5189 /*
5190 * Threads that are running at low priority but are not
5191 * tagged with a specific QoS are separated out from
5192 * the "background" urgency. Performance management
5193 * subsystem can decide to either treat these threads
5194 * as normal threads or look at other signals like thermal
5195 * levels for optimal power/perf tradeoffs for a platform.
5196 */
5197 boolean_t thread_lacks_qos = (proc_get_effective_thread_policy(thread, TASK_POLICY_QOS) == THREAD_QOS_UNSPECIFIED); //thread_has_qos_policy(thread);
5198 boolean_t task_is_suppressed = (proc_get_effective_task_policy(thread->task, TASK_POLICY_SUP_ACTIVE) == 0x1);
5199
5200 /*
5201 * Background urgency applied when thread priority is
5202 * MAXPRI_THROTTLE or lower and thread is not promoted
5203 * and thread has a QoS specified
5204 */
5205 urgency_param1 = thread->sched_pri;
5206 urgency_param2 = thread->base_pri;
5207
5208 if (thread_lacks_qos && !task_is_suppressed) {
5209 urgency = THREAD_URGENCY_LOWPRI;
5210 } else {
5211 urgency = THREAD_URGENCY_BACKGROUND;
5212 }
5213 } else {
5214 /* For otherwise unclassified threads, report throughput QoS parameters */
5215 urgency_param1 = proc_get_effective_thread_policy(thread, TASK_POLICY_THROUGH_QOS);
5216 urgency_param2 = proc_get_effective_task_policy(thread->task, TASK_POLICY_THROUGH_QOS);
5217 urgency = THREAD_URGENCY_NORMAL;
5218 }
5219
5220 if (arg1 != NULL) {
5221 *arg1 = urgency_param1;
5222 }
5223 if (arg2 != NULL) {
5224 *arg2 = urgency_param2;
5225 }
5226
5227 return urgency;
5228 }
5229
5230 perfcontrol_class_t
5231 thread_get_perfcontrol_class(thread_t thread)
5232 {
5233 /* Special case handling */
5234 if (thread->state & TH_IDLE) {
5235 return PERFCONTROL_CLASS_IDLE;
5236 }
5237 if (thread->task == kernel_task) {
5238 return PERFCONTROL_CLASS_KERNEL;
5239 }
5240 if (thread->sched_mode == TH_MODE_REALTIME) {
5241 return PERFCONTROL_CLASS_REALTIME;
5242 }
5243
5244 /* perfcontrol_class based on base_pri */
5245 if (thread->base_pri <= MAXPRI_THROTTLE) {
5246 return PERFCONTROL_CLASS_BACKGROUND;
5247 } else if (thread->base_pri <= BASEPRI_UTILITY) {
5248 return PERFCONTROL_CLASS_UTILITY;
5249 } else if (thread->base_pri <= BASEPRI_DEFAULT) {
5250 return PERFCONTROL_CLASS_NONUI;
5251 } else if (thread->base_pri <= BASEPRI_FOREGROUND) {
5252 return PERFCONTROL_CLASS_UI;
5253 } else {
5254 return PERFCONTROL_CLASS_ABOVEUI;
5255 }
5256 }
5257
5258 /*
5259 * This is the processor idle loop, which just looks for other threads
5260 * to execute. Processor idle threads invoke this without supplying a
5261 * current thread to idle without an asserted wait state.
5262 *
5263 * Returns a the next thread to execute if dispatched directly.
5264 */
5265
5266 #if 0
5267 #define IDLE_KERNEL_DEBUG_CONSTANT(...) KERNEL_DEBUG_CONSTANT(__VA_ARGS__)
5268 #else
5269 #define IDLE_KERNEL_DEBUG_CONSTANT(...) do { } while(0)
5270 #endif
5271
5272 thread_t
5273 processor_idle(
5274 thread_t thread,
5275 processor_t processor)
5276 {
5277 processor_set_t pset = processor->processor_set;
5278
5279 (void)splsched();
5280
5281 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
5282 MACHDBG_CODE(DBG_MACH_SCHED, MACH_IDLE) | DBG_FUNC_START,
5283 (uintptr_t)thread_tid(thread), 0, 0, 0, 0);
5284
5285 SCHED_STATS_INC(idle_transitions);
5286 assert(processor->running_timers_active == false);
5287
5288 uint64_t ctime = mach_absolute_time();
5289
5290 timer_switch(&processor->system_state, ctime, &processor->idle_state);
5291 processor->current_state = &processor->idle_state;
5292
5293 cpu_quiescent_counter_leave(ctime);
5294
5295 while (1) {
5296 /*
5297 * Ensure that updates to my processor and pset state,
5298 * made by the IPI source processor before sending the IPI,
5299 * are visible on this processor now (even though we don't
5300 * take the pset lock yet).
5301 */
5302 atomic_thread_fence(memory_order_acquire);
5303
5304 if (processor->state != PROCESSOR_IDLE) {
5305 break;
5306 }
5307 if (bit_test(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
5308 break;
5309 }
5310 #if defined(CONFIG_SCHED_DEFERRED_AST)
5311 if (bit_test(pset->pending_deferred_AST_cpu_mask, processor->cpu_id)) {
5312 break;
5313 }
5314 #endif
5315 if (processor->is_recommended && (processor->processor_primary == processor)) {
5316 if (rt_runq_count(pset)) {
5317 break;
5318 }
5319 } else {
5320 if (SCHED(processor_bound_count)(processor)) {
5321 break;
5322 }
5323 }
5324
5325 IDLE_KERNEL_DEBUG_CONSTANT(
5326 MACHDBG_CODE(DBG_MACH_SCHED, MACH_IDLE) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), rt_runq_count(pset), SCHED(processor_runq_count)(processor), -1, 0);
5327
5328 machine_track_platform_idle(TRUE);
5329
5330 machine_idle();
5331 /* returns with interrupts enabled */
5332
5333 machine_track_platform_idle(FALSE);
5334
5335 (void)splsched();
5336
5337 /*
5338 * Check if we should call sched_timeshare_consider_maintenance() here.
5339 * The CPU was woken out of idle due to an interrupt and we should do the
5340 * call only if the processor is still idle. If the processor is non-idle,
5341 * the threads running on the processor would do the call as part of
5342 * context swithing.
5343 */
5344 if (processor->state == PROCESSOR_IDLE) {
5345 sched_timeshare_consider_maintenance(mach_absolute_time());
5346 }
5347
5348 IDLE_KERNEL_DEBUG_CONSTANT(
5349 MACHDBG_CODE(DBG_MACH_SCHED, MACH_IDLE) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), rt_runq_count(pset), SCHED(processor_runq_count)(processor), -2, 0);
5350
5351 if (!SCHED(processor_queue_empty)(processor)) {
5352 /* Secondary SMT processors respond to directed wakeups
5353 * exclusively. Some platforms induce 'spurious' SMT wakeups.
5354 */
5355 if (processor->processor_primary == processor) {
5356 break;
5357 }
5358 }
5359 }
5360
5361 ctime = mach_absolute_time();
5362
5363 timer_switch(&processor->idle_state, ctime, &processor->system_state);
5364 processor->current_state = &processor->system_state;
5365
5366 cpu_quiescent_counter_join(ctime);
5367
5368 ast_t reason = AST_NONE;
5369
5370 /* We're handling all scheduling AST's */
5371 ast_off(AST_SCHEDULING);
5372
5373 /*
5374 * thread_select will move the processor from dispatching to running,
5375 * or put it in idle if there's nothing to do.
5376 */
5377 thread_t current_thread = current_thread();
5378
5379 thread_lock(current_thread);
5380 thread_t new_thread = thread_select(current_thread, processor, &reason);
5381 thread_unlock(current_thread);
5382
5383 assert(processor->running_timers_active == false);
5384
5385 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
5386 MACHDBG_CODE(DBG_MACH_SCHED, MACH_IDLE) | DBG_FUNC_END,
5387 (uintptr_t)thread_tid(thread), processor->state, (uintptr_t)thread_tid(new_thread), reason, 0);
5388
5389 return new_thread;
5390 }
5391
5392 /*
5393 * Each processor has a dedicated thread which
5394 * executes the idle loop when there is no suitable
5395 * previous context.
5396 *
5397 * This continuation is entered with interrupts disabled.
5398 */
5399 void
5400 idle_thread(__assert_only void* parameter,
5401 __unused wait_result_t result)
5402 {
5403 assert(ml_get_interrupts_enabled() == FALSE);
5404 assert(parameter == NULL);
5405
5406 processor_t processor = current_processor();
5407
5408 /*
5409 * Ensure that anything running in idle context triggers
5410 * preemption-disabled checks.
5411 */
5412 disable_preemption();
5413
5414 /*
5415 * Enable interrupts temporarily to handle any pending interrupts
5416 * or IPIs before deciding to sleep
5417 */
5418 spllo();
5419
5420 thread_t new_thread = processor_idle(THREAD_NULL, processor);
5421 /* returns with interrupts disabled */
5422
5423 enable_preemption();
5424
5425 if (new_thread != THREAD_NULL) {
5426 thread_run(processor->idle_thread,
5427 idle_thread, NULL, new_thread);
5428 /*NOTREACHED*/
5429 }
5430
5431 thread_block(idle_thread);
5432 /*NOTREACHED*/
5433 }
5434
5435 kern_return_t
5436 idle_thread_create(
5437 processor_t processor)
5438 {
5439 kern_return_t result;
5440 thread_t thread;
5441 spl_t s;
5442 char name[MAXTHREADNAMESIZE];
5443
5444 result = kernel_thread_create(idle_thread, NULL, MAXPRI_KERNEL, &thread);
5445 if (result != KERN_SUCCESS) {
5446 return result;
5447 }
5448
5449 snprintf(name, sizeof(name), "idle #%d", processor->cpu_id);
5450 thread_set_thread_name(thread, name);
5451
5452 s = splsched();
5453 thread_lock(thread);
5454 thread->bound_processor = processor;
5455 processor->idle_thread = thread;
5456 thread->sched_pri = thread->base_pri = IDLEPRI;
5457 thread->state = (TH_RUN | TH_IDLE);
5458 thread->options |= TH_OPT_IDLE_THREAD;
5459 thread_unlock(thread);
5460 splx(s);
5461
5462 thread_deallocate(thread);
5463
5464 return KERN_SUCCESS;
5465 }
5466
5467 /*
5468 * sched_startup:
5469 *
5470 * Kicks off scheduler services.
5471 *
5472 * Called at splsched.
5473 */
5474 void
5475 sched_startup(void)
5476 {
5477 kern_return_t result;
5478 thread_t thread;
5479
5480 simple_lock_init(&sched_vm_group_list_lock, 0);
5481
5482 #if __arm__ || __arm64__
5483 simple_lock_init(&sched_recommended_cores_lock, 0);
5484 #endif /* __arm__ || __arm64__ */
5485
5486 result = kernel_thread_start_priority((thread_continue_t)sched_init_thread,
5487 NULL, MAXPRI_KERNEL, &thread);
5488 if (result != KERN_SUCCESS) {
5489 panic("sched_startup");
5490 }
5491
5492 thread_deallocate(thread);
5493
5494 assert_thread_magic(thread);
5495
5496 /*
5497 * Yield to the sched_init_thread once, to
5498 * initialize our own thread after being switched
5499 * back to.
5500 *
5501 * The current thread is the only other thread
5502 * active at this point.
5503 */
5504 thread_block(THREAD_CONTINUE_NULL);
5505 }
5506
5507 #if __arm64__
5508 static _Atomic uint64_t sched_perfcontrol_callback_deadline;
5509 #endif /* __arm64__ */
5510
5511
5512 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
5513
5514 static volatile uint64_t sched_maintenance_deadline;
5515 static uint64_t sched_tick_last_abstime;
5516 static uint64_t sched_tick_delta;
5517 uint64_t sched_tick_max_delta;
5518
5519
5520 /*
5521 * sched_init_thread:
5522 *
5523 * Perform periodic bookkeeping functions about ten
5524 * times per second.
5525 */
5526 void
5527 sched_timeshare_maintenance_continue(void)
5528 {
5529 uint64_t sched_tick_ctime, late_time;
5530
5531 struct sched_update_scan_context scan_context = {
5532 .earliest_bg_make_runnable_time = UINT64_MAX,
5533 .earliest_normal_make_runnable_time = UINT64_MAX,
5534 .earliest_rt_make_runnable_time = UINT64_MAX
5535 };
5536
5537 sched_tick_ctime = mach_absolute_time();
5538
5539 if (__improbable(sched_tick_last_abstime == 0)) {
5540 sched_tick_last_abstime = sched_tick_ctime;
5541 late_time = 0;
5542 sched_tick_delta = 1;
5543 } else {
5544 late_time = sched_tick_ctime - sched_tick_last_abstime;
5545 sched_tick_delta = late_time / sched_tick_interval;
5546 /* Ensure a delta of 1, since the interval could be slightly
5547 * smaller than the sched_tick_interval due to dispatch
5548 * latencies.
5549 */
5550 sched_tick_delta = MAX(sched_tick_delta, 1);
5551
5552 /* In the event interrupt latencies or platform
5553 * idle events that advanced the timebase resulted
5554 * in periods where no threads were dispatched,
5555 * cap the maximum "tick delta" at SCHED_TICK_MAX_DELTA
5556 * iterations.
5557 */
5558 sched_tick_delta = MIN(sched_tick_delta, SCHED_TICK_MAX_DELTA);
5559
5560 sched_tick_last_abstime = sched_tick_ctime;
5561 sched_tick_max_delta = MAX(sched_tick_delta, sched_tick_max_delta);
5562 }
5563
5564 scan_context.sched_tick_last_abstime = sched_tick_last_abstime;
5565 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_MAINTENANCE) | DBG_FUNC_START,
5566 sched_tick_delta, late_time, 0, 0, 0);
5567
5568 /* Add a number of pseudo-ticks corresponding to the elapsed interval
5569 * This could be greater than 1 if substantial intervals where
5570 * all processors are idle occur, which rarely occurs in practice.
5571 */
5572
5573 sched_tick += sched_tick_delta;
5574
5575 update_vm_info();
5576
5577 /*
5578 * Compute various averages.
5579 */
5580 compute_averages(sched_tick_delta);
5581
5582 /*
5583 * Scan the run queues for threads which
5584 * may need to be updated, and find the earliest runnable thread on the runqueue
5585 * to report its latency.
5586 */
5587 SCHED(thread_update_scan)(&scan_context);
5588
5589 SCHED(rt_runq_scan)(&scan_context);
5590
5591 uint64_t ctime = mach_absolute_time();
5592
5593 uint64_t bg_max_latency = (ctime > scan_context.earliest_bg_make_runnable_time) ?
5594 ctime - scan_context.earliest_bg_make_runnable_time : 0;
5595
5596 uint64_t default_max_latency = (ctime > scan_context.earliest_normal_make_runnable_time) ?
5597 ctime - scan_context.earliest_normal_make_runnable_time : 0;
5598
5599 uint64_t realtime_max_latency = (ctime > scan_context.earliest_rt_make_runnable_time) ?
5600 ctime - scan_context.earliest_rt_make_runnable_time : 0;
5601
5602 machine_max_runnable_latency(bg_max_latency, default_max_latency, realtime_max_latency);
5603
5604 /*
5605 * Check to see if the special sched VM group needs attention.
5606 */
5607 sched_vm_group_maintenance();
5608
5609 #if __arm__ || __arm64__
5610 /* Check to see if the recommended cores failsafe is active */
5611 sched_recommended_cores_maintenance();
5612 #endif /* __arm__ || __arm64__ */
5613
5614
5615 #if DEBUG || DEVELOPMENT
5616 #if __x86_64__
5617 #include <i386/misc_protos.h>
5618 /* Check for long-duration interrupts */
5619 mp_interrupt_watchdog();
5620 #endif /* __x86_64__ */
5621 #endif /* DEBUG || DEVELOPMENT */
5622
5623 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_MAINTENANCE) | DBG_FUNC_END,
5624 sched_pri_shifts[TH_BUCKET_SHARE_FG], sched_pri_shifts[TH_BUCKET_SHARE_BG],
5625 sched_pri_shifts[TH_BUCKET_SHARE_UT], sched_pri_shifts[TH_BUCKET_SHARE_DF], 0);
5626
5627 assert_wait((event_t)sched_timeshare_maintenance_continue, THREAD_UNINT);
5628 thread_block((thread_continue_t)sched_timeshare_maintenance_continue);
5629 /*NOTREACHED*/
5630 }
5631
5632 static uint64_t sched_maintenance_wakeups;
5633
5634 /*
5635 * Determine if the set of routines formerly driven by a maintenance timer
5636 * must be invoked, based on a deadline comparison. Signals the scheduler
5637 * maintenance thread on deadline expiration. Must be invoked at an interval
5638 * lower than the "sched_tick_interval", currently accomplished by
5639 * invocation via the quantum expiration timer and at context switch time.
5640 * Performance matters: this routine reuses a timestamp approximating the
5641 * current absolute time received from the caller, and should perform
5642 * no more than a comparison against the deadline in the common case.
5643 */
5644 void
5645 sched_timeshare_consider_maintenance(uint64_t ctime)
5646 {
5647 cpu_quiescent_counter_checkin(ctime);
5648
5649 uint64_t deadline = sched_maintenance_deadline;
5650
5651 if (__improbable(ctime >= deadline)) {
5652 if (__improbable(current_thread() == sched_maintenance_thread)) {
5653 return;
5654 }
5655 OSMemoryBarrier();
5656
5657 uint64_t ndeadline = ctime + sched_tick_interval;
5658
5659 if (__probable(os_atomic_cmpxchg(&sched_maintenance_deadline, deadline, ndeadline, seq_cst))) {
5660 thread_wakeup((event_t)sched_timeshare_maintenance_continue);
5661 sched_maintenance_wakeups++;
5662 }
5663 }
5664
5665 #if !CONFIG_SCHED_CLUTCH
5666 /*
5667 * Only non-clutch schedulers use the global load calculation EWMA algorithm. For clutch
5668 * scheduler, the load is maintained at the thread group and bucket level.
5669 */
5670 uint64_t load_compute_deadline = os_atomic_load_wide(&sched_load_compute_deadline, relaxed);
5671
5672 if (__improbable(load_compute_deadline && ctime >= load_compute_deadline)) {
5673 uint64_t new_deadline = 0;
5674 if (os_atomic_cmpxchg(&sched_load_compute_deadline, load_compute_deadline, new_deadline, relaxed)) {
5675 compute_sched_load();
5676 new_deadline = ctime + sched_load_compute_interval_abs;
5677 os_atomic_store_wide(&sched_load_compute_deadline, new_deadline, relaxed);
5678 }
5679 }
5680 #endif /* CONFIG_SCHED_CLUTCH */
5681
5682 #if __arm64__
5683 uint64_t perf_deadline = os_atomic_load(&sched_perfcontrol_callback_deadline, relaxed);
5684
5685 if (__improbable(perf_deadline && ctime >= perf_deadline)) {
5686 /* CAS in 0, if success, make callback. Otherwise let the next context switch check again. */
5687 if (os_atomic_cmpxchg(&sched_perfcontrol_callback_deadline, perf_deadline, 0, relaxed)) {
5688 machine_perfcontrol_deadline_passed(perf_deadline);
5689 }
5690 }
5691 #endif /* __arm64__ */
5692 }
5693
5694 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
5695
5696 void
5697 sched_init_thread(void)
5698 {
5699 thread_block(THREAD_CONTINUE_NULL);
5700
5701 thread_t thread = current_thread();
5702
5703 thread_set_thread_name(thread, "sched_maintenance_thread");
5704
5705 sched_maintenance_thread = thread;
5706
5707 SCHED(maintenance_continuation)();
5708
5709 /*NOTREACHED*/
5710 }
5711
5712 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
5713
5714 /*
5715 * thread_update_scan / runq_scan:
5716 *
5717 * Scan the run queues to account for timesharing threads
5718 * which need to be updated.
5719 *
5720 * Scanner runs in two passes. Pass one squirrels likely
5721 * threads away in an array, pass two does the update.
5722 *
5723 * This is necessary because the run queue is locked for
5724 * the candidate scan, but the thread is locked for the update.
5725 *
5726 * Array should be sized to make forward progress, without
5727 * disabling preemption for long periods.
5728 */
5729
5730 #define THREAD_UPDATE_SIZE 128
5731
5732 static thread_t thread_update_array[THREAD_UPDATE_SIZE];
5733 static uint32_t thread_update_count = 0;
5734
5735 /* Returns TRUE if thread was added, FALSE if thread_update_array is full */
5736 boolean_t
5737 thread_update_add_thread(thread_t thread)
5738 {
5739 if (thread_update_count == THREAD_UPDATE_SIZE) {
5740 return FALSE;
5741 }
5742
5743 thread_update_array[thread_update_count++] = thread;
5744 thread_reference_internal(thread);
5745 return TRUE;
5746 }
5747
5748 void
5749 thread_update_process_threads(void)
5750 {
5751 assert(thread_update_count <= THREAD_UPDATE_SIZE);
5752
5753 for (uint32_t i = 0; i < thread_update_count; i++) {
5754 thread_t thread = thread_update_array[i];
5755 assert_thread_magic(thread);
5756 thread_update_array[i] = THREAD_NULL;
5757
5758 spl_t s = splsched();
5759 thread_lock(thread);
5760 if (!(thread->state & (TH_WAIT)) && thread->sched_stamp != sched_tick) {
5761 SCHED(update_priority)(thread);
5762 }
5763 thread_unlock(thread);
5764 splx(s);
5765
5766 thread_deallocate(thread);
5767 }
5768
5769 thread_update_count = 0;
5770 }
5771
5772 static boolean_t
5773 runq_scan_thread(
5774 thread_t thread,
5775 sched_update_scan_context_t scan_context)
5776 {
5777 assert_thread_magic(thread);
5778
5779 if (thread->sched_stamp != sched_tick &&
5780 thread->sched_mode == TH_MODE_TIMESHARE) {
5781 if (thread_update_add_thread(thread) == FALSE) {
5782 return TRUE;
5783 }
5784 }
5785
5786 if (cpu_throttle_enabled && ((thread->sched_pri <= MAXPRI_THROTTLE) && (thread->base_pri <= MAXPRI_THROTTLE))) {
5787 if (thread->last_made_runnable_time < scan_context->earliest_bg_make_runnable_time) {
5788 scan_context->earliest_bg_make_runnable_time = thread->last_made_runnable_time;
5789 }
5790 } else {
5791 if (thread->last_made_runnable_time < scan_context->earliest_normal_make_runnable_time) {
5792 scan_context->earliest_normal_make_runnable_time = thread->last_made_runnable_time;
5793 }
5794 }
5795
5796 return FALSE;
5797 }
5798
5799 /*
5800 * Scan a runq for candidate threads.
5801 *
5802 * Returns TRUE if retry is needed.
5803 */
5804 boolean_t
5805 runq_scan(
5806 run_queue_t runq,
5807 sched_update_scan_context_t scan_context)
5808 {
5809 int count = runq->count;
5810 int queue_index;
5811
5812 assert(count >= 0);
5813
5814 if (count == 0) {
5815 return FALSE;
5816 }
5817
5818 for (queue_index = bitmap_first(runq->bitmap, NRQS);
5819 queue_index >= 0;
5820 queue_index = bitmap_next(runq->bitmap, queue_index)) {
5821 thread_t thread;
5822 circle_queue_t queue = &runq->queues[queue_index];
5823
5824 cqe_foreach_element(thread, queue, runq_links) {
5825 assert(count > 0);
5826 if (runq_scan_thread(thread, scan_context) == TRUE) {
5827 return TRUE;
5828 }
5829 count--;
5830 }
5831 }
5832
5833 return FALSE;
5834 }
5835
5836 #if CONFIG_SCHED_CLUTCH
5837
5838 boolean_t
5839 sched_clutch_timeshare_scan(
5840 queue_t thread_queue,
5841 uint16_t thread_count,
5842 sched_update_scan_context_t scan_context)
5843 {
5844 if (thread_count == 0) {
5845 return FALSE;
5846 }
5847
5848 thread_t thread;
5849 qe_foreach_element_safe(thread, thread_queue, th_clutch_timeshare_link) {
5850 if (runq_scan_thread(thread, scan_context) == TRUE) {
5851 return TRUE;
5852 }
5853 thread_count--;
5854 }
5855
5856 assert(thread_count == 0);
5857 return FALSE;
5858 }
5859
5860
5861 #endif /* CONFIG_SCHED_CLUTCH */
5862
5863 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
5864
5865 boolean_t
5866 thread_eager_preemption(thread_t thread)
5867 {
5868 return (thread->sched_flags & TH_SFLAG_EAGERPREEMPT) != 0;
5869 }
5870
5871 void
5872 thread_set_eager_preempt(thread_t thread)
5873 {
5874 spl_t x;
5875 processor_t p;
5876 ast_t ast = AST_NONE;
5877
5878 x = splsched();
5879 p = current_processor();
5880
5881 thread_lock(thread);
5882 thread->sched_flags |= TH_SFLAG_EAGERPREEMPT;
5883
5884 if (thread == current_thread()) {
5885 ast = csw_check(thread, p, AST_NONE);
5886 thread_unlock(thread);
5887 if (ast != AST_NONE) {
5888 (void) thread_block_reason(THREAD_CONTINUE_NULL, NULL, ast);
5889 }
5890 } else {
5891 p = thread->last_processor;
5892
5893 if (p != PROCESSOR_NULL && p->state == PROCESSOR_RUNNING &&
5894 p->active_thread == thread) {
5895 cause_ast_check(p);
5896 }
5897
5898 thread_unlock(thread);
5899 }
5900
5901 splx(x);
5902 }
5903
5904 void
5905 thread_clear_eager_preempt(thread_t thread)
5906 {
5907 spl_t x;
5908
5909 x = splsched();
5910 thread_lock(thread);
5911
5912 thread->sched_flags &= ~TH_SFLAG_EAGERPREEMPT;
5913
5914 thread_unlock(thread);
5915 splx(x);
5916 }
5917
5918 /*
5919 * Scheduling statistics
5920 */
5921 void
5922 sched_stats_handle_csw(processor_t processor, int reasons, int selfpri, int otherpri)
5923 {
5924 struct sched_statistics *stats;
5925 boolean_t to_realtime = FALSE;
5926
5927 stats = PERCPU_GET_RELATIVE(sched_stats, processor, processor);
5928 stats->csw_count++;
5929
5930 if (otherpri >= BASEPRI_REALTIME) {
5931 stats->rt_sched_count++;
5932 to_realtime = TRUE;
5933 }
5934
5935 if ((reasons & AST_PREEMPT) != 0) {
5936 stats->preempt_count++;
5937
5938 if (selfpri >= BASEPRI_REALTIME) {
5939 stats->preempted_rt_count++;
5940 }
5941
5942 if (to_realtime) {
5943 stats->preempted_by_rt_count++;
5944 }
5945 }
5946 }
5947
5948 void
5949 sched_stats_handle_runq_change(struct runq_stats *stats, int old_count)
5950 {
5951 uint64_t timestamp = mach_absolute_time();
5952
5953 stats->count_sum += (timestamp - stats->last_change_timestamp) * old_count;
5954 stats->last_change_timestamp = timestamp;
5955 }
5956
5957 /*
5958 * For calls from assembly code
5959 */
5960 #undef thread_wakeup
5961 void
5962 thread_wakeup(
5963 event_t x);
5964
5965 void
5966 thread_wakeup(
5967 event_t x)
5968 {
5969 thread_wakeup_with_result(x, THREAD_AWAKENED);
5970 }
5971
5972 boolean_t
5973 preemption_enabled(void)
5974 {
5975 return get_preemption_level() == 0 && ml_get_interrupts_enabled();
5976 }
5977
5978 static void
5979 sched_timer_deadline_tracking_init(void)
5980 {
5981 nanoseconds_to_absolutetime(TIMER_DEADLINE_TRACKING_BIN_1_DEFAULT, &timer_deadline_tracking_bin_1);
5982 nanoseconds_to_absolutetime(TIMER_DEADLINE_TRACKING_BIN_2_DEFAULT, &timer_deadline_tracking_bin_2);
5983 }
5984
5985 #if __arm__ || __arm64__
5986
5987 uint32_t perfcontrol_requested_recommended_cores = ALL_CORES_RECOMMENDED;
5988 uint32_t perfcontrol_requested_recommended_core_count = MAX_CPUS;
5989 bool perfcontrol_failsafe_active = false;
5990 bool perfcontrol_sleep_override = false;
5991
5992 uint64_t perfcontrol_failsafe_maintenance_runnable_time;
5993 uint64_t perfcontrol_failsafe_activation_time;
5994 uint64_t perfcontrol_failsafe_deactivation_time;
5995
5996 /* data covering who likely caused it and how long they ran */
5997 #define FAILSAFE_NAME_LEN 33 /* (2*MAXCOMLEN)+1 from size of p_name */
5998 char perfcontrol_failsafe_name[FAILSAFE_NAME_LEN];
5999 int perfcontrol_failsafe_pid;
6000 uint64_t perfcontrol_failsafe_tid;
6001 uint64_t perfcontrol_failsafe_thread_timer_at_start;
6002 uint64_t perfcontrol_failsafe_thread_timer_last_seen;
6003 uint32_t perfcontrol_failsafe_recommended_at_trigger;
6004
6005 /*
6006 * Perf controller calls here to update the recommended core bitmask.
6007 * If the failsafe is active, we don't immediately apply the new value.
6008 * Instead, we store the new request and use it after the failsafe deactivates.
6009 *
6010 * If the failsafe is not active, immediately apply the update.
6011 *
6012 * No scheduler locks are held, no other locks are held that scheduler might depend on,
6013 * interrupts are enabled
6014 *
6015 * currently prototype is in osfmk/arm/machine_routines.h
6016 */
6017 void
6018 sched_perfcontrol_update_recommended_cores(uint32_t recommended_cores)
6019 {
6020 assert(preemption_enabled());
6021
6022 spl_t s = splsched();
6023 simple_lock(&sched_recommended_cores_lock, LCK_GRP_NULL);
6024
6025 perfcontrol_requested_recommended_cores = recommended_cores;
6026 perfcontrol_requested_recommended_core_count = __builtin_popcountll(recommended_cores);
6027
6028 if ((perfcontrol_failsafe_active == false) && (perfcontrol_sleep_override == false)) {
6029 sched_update_recommended_cores(perfcontrol_requested_recommended_cores & usercontrol_requested_recommended_cores);
6030 } else {
6031 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
6032 MACHDBG_CODE(DBG_MACH_SCHED, MACH_REC_CORES_FAILSAFE) | DBG_FUNC_NONE,
6033 perfcontrol_requested_recommended_cores,
6034 sched_maintenance_thread->last_made_runnable_time, 0, 0, 0);
6035 }
6036
6037 simple_unlock(&sched_recommended_cores_lock);
6038 splx(s);
6039 }
6040
6041 void
6042 sched_override_recommended_cores_for_sleep(void)
6043 {
6044 spl_t s = splsched();
6045 simple_lock(&sched_recommended_cores_lock, LCK_GRP_NULL);
6046
6047 if (perfcontrol_sleep_override == false) {
6048 perfcontrol_sleep_override = true;
6049 sched_update_recommended_cores(ALL_CORES_RECOMMENDED);
6050 }
6051
6052 simple_unlock(&sched_recommended_cores_lock);
6053 splx(s);
6054 }
6055
6056 void
6057 sched_restore_recommended_cores_after_sleep(void)
6058 {
6059 spl_t s = splsched();
6060 simple_lock(&sched_recommended_cores_lock, LCK_GRP_NULL);
6061
6062 if (perfcontrol_sleep_override == true) {
6063 perfcontrol_sleep_override = false;
6064 sched_update_recommended_cores(perfcontrol_requested_recommended_cores & usercontrol_requested_recommended_cores);
6065 }
6066
6067 simple_unlock(&sched_recommended_cores_lock);
6068 splx(s);
6069 }
6070
6071 /*
6072 * Consider whether we need to activate the recommended cores failsafe
6073 *
6074 * Called from quantum timer interrupt context of a realtime thread
6075 * No scheduler locks are held, interrupts are disabled
6076 */
6077 void
6078 sched_consider_recommended_cores(uint64_t ctime, thread_t cur_thread)
6079 {
6080 /*
6081 * Check if a realtime thread is starving the system
6082 * and bringing up non-recommended cores would help
6083 *
6084 * TODO: Is this the correct check for recommended == possible cores?
6085 * TODO: Validate the checks without the relevant lock are OK.
6086 */
6087
6088 if (__improbable(perfcontrol_failsafe_active == TRUE)) {
6089 /* keep track of how long the responsible thread runs */
6090
6091 simple_lock(&sched_recommended_cores_lock, LCK_GRP_NULL);
6092
6093 if (perfcontrol_failsafe_active == TRUE &&
6094 cur_thread->thread_id == perfcontrol_failsafe_tid) {
6095 perfcontrol_failsafe_thread_timer_last_seen = timer_grab(&cur_thread->user_timer) +
6096 timer_grab(&cur_thread->system_timer);
6097 }
6098
6099 simple_unlock(&sched_recommended_cores_lock);
6100
6101 /* we're already trying to solve the problem, so bail */
6102 return;
6103 }
6104
6105 /* The failsafe won't help if there are no more processors to enable */
6106 if (__probable(perfcontrol_requested_recommended_core_count >= processor_count)) {
6107 return;
6108 }
6109
6110 uint64_t too_long_ago = ctime - perfcontrol_failsafe_starvation_threshold;
6111
6112 /* Use the maintenance thread as our canary in the coal mine */
6113 thread_t m_thread = sched_maintenance_thread;
6114
6115 /* If it doesn't look bad, nothing to see here */
6116 if (__probable(m_thread->last_made_runnable_time >= too_long_ago)) {
6117 return;
6118 }
6119
6120 /* It looks bad, take the lock to be sure */
6121 thread_lock(m_thread);
6122
6123 if (m_thread->runq == PROCESSOR_NULL ||
6124 (m_thread->state & (TH_RUN | TH_WAIT)) != TH_RUN ||
6125 m_thread->last_made_runnable_time >= too_long_ago) {
6126 /*
6127 * Maintenance thread is either on cpu or blocked, and
6128 * therefore wouldn't benefit from more cores
6129 */
6130 thread_unlock(m_thread);
6131 return;
6132 }
6133
6134 uint64_t maintenance_runnable_time = m_thread->last_made_runnable_time;
6135
6136 thread_unlock(m_thread);
6137
6138 /*
6139 * There are cores disabled at perfcontrol's recommendation, but the
6140 * system is so overloaded that the maintenance thread can't run.
6141 * That likely means that perfcontrol can't run either, so it can't fix
6142 * the recommendation. We have to kick in a failsafe to keep from starving.
6143 *
6144 * When the maintenance thread has been starved for too long,
6145 * ignore the recommendation from perfcontrol and light up all the cores.
6146 *
6147 * TODO: Consider weird states like boot, sleep, or debugger
6148 */
6149
6150 simple_lock(&sched_recommended_cores_lock, LCK_GRP_NULL);
6151
6152 if (perfcontrol_failsafe_active == TRUE) {
6153 simple_unlock(&sched_recommended_cores_lock);
6154 return;
6155 }
6156
6157 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
6158 MACHDBG_CODE(DBG_MACH_SCHED, MACH_REC_CORES_FAILSAFE) | DBG_FUNC_START,
6159 perfcontrol_requested_recommended_cores, maintenance_runnable_time, 0, 0, 0);
6160
6161 perfcontrol_failsafe_active = TRUE;
6162 perfcontrol_failsafe_activation_time = mach_absolute_time();
6163 perfcontrol_failsafe_maintenance_runnable_time = maintenance_runnable_time;
6164 perfcontrol_failsafe_recommended_at_trigger = perfcontrol_requested_recommended_cores;
6165
6166 /* Capture some data about who screwed up (assuming that the thread on core is at fault) */
6167 task_t task = cur_thread->task;
6168 perfcontrol_failsafe_pid = task_pid(task);
6169 strlcpy(perfcontrol_failsafe_name, proc_name_address(task->bsd_info), sizeof(perfcontrol_failsafe_name));
6170
6171 perfcontrol_failsafe_tid = cur_thread->thread_id;
6172
6173 /* Blame the thread for time it has run recently */
6174 uint64_t recent_computation = (ctime - cur_thread->computation_epoch) + cur_thread->computation_metered;
6175
6176 uint64_t last_seen = timer_grab(&cur_thread->user_timer) + timer_grab(&cur_thread->system_timer);
6177
6178 /* Compute the start time of the bad behavior in terms of the thread's on core time */
6179 perfcontrol_failsafe_thread_timer_at_start = last_seen - recent_computation;
6180 perfcontrol_failsafe_thread_timer_last_seen = last_seen;
6181
6182 /* Ignore the previously recommended core configuration */
6183 sched_update_recommended_cores(ALL_CORES_RECOMMENDED);
6184
6185 simple_unlock(&sched_recommended_cores_lock);
6186 }
6187
6188 /*
6189 * Now that our bacon has been saved by the failsafe, consider whether to turn it off
6190 *
6191 * Runs in the context of the maintenance thread, no locks held
6192 */
6193 static void
6194 sched_recommended_cores_maintenance(void)
6195 {
6196 /* Common case - no failsafe, nothing to be done here */
6197 if (__probable(perfcontrol_failsafe_active == FALSE)) {
6198 return;
6199 }
6200
6201 uint64_t ctime = mach_absolute_time();
6202
6203 boolean_t print_diagnostic = FALSE;
6204 char p_name[FAILSAFE_NAME_LEN] = "";
6205
6206 spl_t s = splsched();
6207 simple_lock(&sched_recommended_cores_lock, LCK_GRP_NULL);
6208
6209 /* Check again, under the lock, to avoid races */
6210 if (perfcontrol_failsafe_active == FALSE) {
6211 goto out;
6212 }
6213
6214 /*
6215 * Ensure that the other cores get another few ticks to run some threads
6216 * If we don't have this hysteresis, the maintenance thread is the first
6217 * to run, and then it immediately kills the other cores
6218 */
6219 if ((ctime - perfcontrol_failsafe_activation_time) < perfcontrol_failsafe_starvation_threshold) {
6220 goto out;
6221 }
6222
6223 /* Capture some diagnostic state under the lock so we can print it out later */
6224
6225 int pid = perfcontrol_failsafe_pid;
6226 uint64_t tid = perfcontrol_failsafe_tid;
6227
6228 uint64_t thread_usage = perfcontrol_failsafe_thread_timer_last_seen -
6229 perfcontrol_failsafe_thread_timer_at_start;
6230 uint32_t rec_cores_before = perfcontrol_failsafe_recommended_at_trigger;
6231 uint32_t rec_cores_after = perfcontrol_requested_recommended_cores;
6232 uint64_t failsafe_duration = ctime - perfcontrol_failsafe_activation_time;
6233 strlcpy(p_name, perfcontrol_failsafe_name, sizeof(p_name));
6234
6235 print_diagnostic = TRUE;
6236
6237 /* Deactivate the failsafe and reinstate the requested recommendation settings */
6238
6239 perfcontrol_failsafe_deactivation_time = ctime;
6240 perfcontrol_failsafe_active = FALSE;
6241
6242 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
6243 MACHDBG_CODE(DBG_MACH_SCHED, MACH_REC_CORES_FAILSAFE) | DBG_FUNC_END,
6244 perfcontrol_requested_recommended_cores, failsafe_duration, 0, 0, 0);
6245
6246 sched_update_recommended_cores(perfcontrol_requested_recommended_cores & usercontrol_requested_recommended_cores);
6247
6248 out:
6249 simple_unlock(&sched_recommended_cores_lock);
6250 splx(s);
6251
6252 if (print_diagnostic) {
6253 uint64_t failsafe_duration_ms = 0, thread_usage_ms = 0;
6254
6255 absolutetime_to_nanoseconds(failsafe_duration, &failsafe_duration_ms);
6256 failsafe_duration_ms = failsafe_duration_ms / NSEC_PER_MSEC;
6257
6258 absolutetime_to_nanoseconds(thread_usage, &thread_usage_ms);
6259 thread_usage_ms = thread_usage_ms / NSEC_PER_MSEC;
6260
6261 printf("recommended core failsafe kicked in for %lld ms "
6262 "likely due to %s[%d] thread 0x%llx spending "
6263 "%lld ms on cpu at realtime priority - "
6264 "new recommendation: 0x%x -> 0x%x\n",
6265 failsafe_duration_ms, p_name, pid, tid, thread_usage_ms,
6266 rec_cores_before, rec_cores_after);
6267 }
6268 }
6269
6270 #endif /* __arm__ || __arm64__ */
6271
6272 kern_return_t
6273 sched_processor_enable(processor_t processor, boolean_t enable)
6274 {
6275 assert(preemption_enabled());
6276
6277 spl_t s = splsched();
6278 simple_lock(&sched_recommended_cores_lock, LCK_GRP_NULL);
6279
6280 if (enable) {
6281 bit_set(usercontrol_requested_recommended_cores, processor->cpu_id);
6282 } else {
6283 bit_clear(usercontrol_requested_recommended_cores, processor->cpu_id);
6284 }
6285
6286 #if __arm__ || __arm64__
6287 if ((perfcontrol_failsafe_active == false) && (perfcontrol_sleep_override == false)) {
6288 sched_update_recommended_cores(perfcontrol_requested_recommended_cores & usercontrol_requested_recommended_cores);
6289 } else {
6290 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
6291 MACHDBG_CODE(DBG_MACH_SCHED, MACH_REC_CORES_FAILSAFE) | DBG_FUNC_NONE,
6292 perfcontrol_requested_recommended_cores,
6293 sched_maintenance_thread->last_made_runnable_time, 0, 0, 0);
6294 }
6295 #else /* __arm__ || __arm64__ */
6296 sched_update_recommended_cores(usercontrol_requested_recommended_cores);
6297 #endif /* !__arm__ || __arm64__ */
6298
6299 simple_unlock(&sched_recommended_cores_lock);
6300 splx(s);
6301
6302 return KERN_SUCCESS;
6303 }
6304
6305
6306 /*
6307 * Apply a new recommended cores mask to the processors it affects
6308 * Runs after considering failsafes and such
6309 *
6310 * Iterate over processors and update their ->is_recommended field.
6311 * If a processor is running, we let it drain out at its next
6312 * quantum expiration or blocking point. If a processor is idle, there
6313 * may be more work for it to do, so IPI it.
6314 *
6315 * interrupts disabled, sched_recommended_cores_lock is held
6316 */
6317 static void
6318 sched_update_recommended_cores(uint64_t recommended_cores)
6319 {
6320 processor_set_t pset, nset;
6321 processor_t processor;
6322 uint64_t needs_exit_idle_mask = 0x0;
6323 uint32_t avail_count;
6324
6325 processor = processor_list;
6326 pset = processor->processor_set;
6327
6328 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_UPDATE_REC_CORES) | DBG_FUNC_START,
6329 recommended_cores,
6330 #if __arm__ || __arm64__
6331 perfcontrol_failsafe_active, 0, 0);
6332 #else /* __arm__ || __arm64__ */
6333 0, 0, 0);
6334 #endif /* ! __arm__ || __arm64__ */
6335
6336 if (__builtin_popcountll(recommended_cores) == 0) {
6337 bit_set(recommended_cores, master_processor->cpu_id); /* add boot processor or we hang */
6338 }
6339
6340 /* First set recommended cores */
6341 pset_lock(pset);
6342 avail_count = 0;
6343 do {
6344 nset = processor->processor_set;
6345 if (nset != pset) {
6346 pset_unlock(pset);
6347 pset = nset;
6348 pset_lock(pset);
6349 }
6350
6351 if (bit_test(recommended_cores, processor->cpu_id)) {
6352 processor->is_recommended = TRUE;
6353 bit_set(pset->recommended_bitmask, processor->cpu_id);
6354
6355 if (processor->state == PROCESSOR_IDLE) {
6356 if (processor != current_processor()) {
6357 bit_set(needs_exit_idle_mask, processor->cpu_id);
6358 }
6359 }
6360 if (processor->state != PROCESSOR_OFF_LINE) {
6361 avail_count++;
6362 SCHED(pset_made_schedulable)(processor, pset, false);
6363 }
6364 }
6365 } while ((processor = processor->processor_list) != NULL);
6366 pset_unlock(pset);
6367
6368 /* Now shutdown not recommended cores */
6369 processor = processor_list;
6370 pset = processor->processor_set;
6371
6372 pset_lock(pset);
6373 do {
6374 nset = processor->processor_set;
6375 if (nset != pset) {
6376 pset_unlock(pset);
6377 pset = nset;
6378 pset_lock(pset);
6379 }
6380
6381 if (!bit_test(recommended_cores, processor->cpu_id)) {
6382 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
6383
6384 processor->is_recommended = FALSE;
6385 bit_clear(pset->recommended_bitmask, processor->cpu_id);
6386
6387 if ((processor->state == PROCESSOR_RUNNING) || (processor->state == PROCESSOR_DISPATCHING)) {
6388 ipi_type = SCHED_IPI_IMMEDIATE;
6389 }
6390 SCHED(processor_queue_shutdown)(processor);
6391 /* pset unlocked */
6392
6393 SCHED(rt_queue_shutdown)(processor);
6394
6395 if (ipi_type != SCHED_IPI_NONE) {
6396 if (processor == current_processor()) {
6397 ast_on(AST_PREEMPT);
6398 } else {
6399 sched_ipi_perform(processor, ipi_type);
6400 }
6401 }
6402
6403 pset_lock(pset);
6404 }
6405 } while ((processor = processor->processor_list) != NULL);
6406
6407 processor_avail_count_user = avail_count;
6408 #if defined(__x86_64__)
6409 commpage_update_active_cpus();
6410 #endif
6411
6412 pset_unlock(pset);
6413
6414 /* Issue all pending IPIs now that the pset lock has been dropped */
6415 for (int cpuid = lsb_first(needs_exit_idle_mask); cpuid >= 0; cpuid = lsb_next(needs_exit_idle_mask, cpuid)) {
6416 processor = processor_array[cpuid];
6417 machine_signal_idle(processor);
6418 }
6419
6420 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_UPDATE_REC_CORES) | DBG_FUNC_END,
6421 needs_exit_idle_mask, 0, 0, 0);
6422 }
6423
6424 void
6425 thread_set_options(uint32_t thopt)
6426 {
6427 spl_t x;
6428 thread_t t = current_thread();
6429
6430 x = splsched();
6431 thread_lock(t);
6432
6433 t->options |= thopt;
6434
6435 thread_unlock(t);
6436 splx(x);
6437 }
6438
6439 void
6440 thread_set_pending_block_hint(thread_t thread, block_hint_t block_hint)
6441 {
6442 thread->pending_block_hint = block_hint;
6443 }
6444
6445 uint32_t
6446 qos_max_parallelism(int qos, uint64_t options)
6447 {
6448 return SCHED(qos_max_parallelism)(qos, options);
6449 }
6450
6451 uint32_t
6452 sched_qos_max_parallelism(__unused int qos, uint64_t options)
6453 {
6454 host_basic_info_data_t hinfo;
6455 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
6456 /* Query the machine layer for core information */
6457 __assert_only kern_return_t kret = host_info(host_self(), HOST_BASIC_INFO,
6458 (host_info_t)&hinfo, &count);
6459 assert(kret == KERN_SUCCESS);
6460
6461 if (options & QOS_PARALLELISM_COUNT_LOGICAL) {
6462 return hinfo.logical_cpu;
6463 } else {
6464 return hinfo.physical_cpu;
6465 }
6466 }
6467
6468 int sched_allow_NO_SMT_threads = 1;
6469 bool
6470 thread_no_smt(thread_t thread)
6471 {
6472 return sched_allow_NO_SMT_threads && (thread->bound_processor == PROCESSOR_NULL) && ((thread->sched_flags & TH_SFLAG_NO_SMT) || (thread->task->t_flags & TF_NO_SMT));
6473 }
6474
6475 bool
6476 processor_active_thread_no_smt(processor_t processor)
6477 {
6478 return sched_allow_NO_SMT_threads && !processor->current_is_bound && processor->current_is_NO_SMT;
6479 }
6480
6481 #if __arm64__
6482
6483 /*
6484 * Set up or replace old timer with new timer
6485 *
6486 * Returns true if canceled old timer, false if it did not
6487 */
6488 boolean_t
6489 sched_perfcontrol_update_callback_deadline(uint64_t new_deadline)
6490 {
6491 /*
6492 * Exchange deadline for new deadline, if old deadline was nonzero,
6493 * then I cancelled the callback, otherwise I didn't
6494 */
6495
6496 return os_atomic_xchg(&sched_perfcontrol_callback_deadline, new_deadline,
6497 relaxed) != 0;
6498 }
6499
6500 #endif /* __arm64__ */
6501
6502 #if CONFIG_SCHED_EDGE
6503
6504 #define SCHED_PSET_LOAD_EWMA_TC_NSECS 10000000u
6505
6506 /*
6507 * sched_edge_pset_running_higher_bucket()
6508 *
6509 * Routine to calculate cumulative running counts for each scheduling
6510 * bucket. This effectively lets the load calculation calculate if a
6511 * cluster is running any threads at a QoS lower than the thread being
6512 * migrated etc.
6513 */
6514
6515 static void
6516 sched_edge_pset_running_higher_bucket(processor_set_t pset, uint32_t *running_higher)
6517 {
6518 bitmap_t *active_map = &pset->cpu_state_map[PROCESSOR_RUNNING];
6519
6520 /* Edge Scheduler Optimization */
6521 for (int cpu = bitmap_first(active_map, MAX_CPUS); cpu >= 0; cpu = bitmap_next(active_map, cpu)) {
6522 sched_bucket_t cpu_bucket = os_atomic_load(&pset->cpu_running_buckets[cpu], relaxed);
6523 for (sched_bucket_t bucket = cpu_bucket; bucket < TH_BUCKET_SCHED_MAX; bucket++) {
6524 running_higher[bucket]++;
6525 }
6526 }
6527 }
6528
6529 /*
6530 * sched_update_pset_load_average()
6531 *
6532 * Updates the load average for each sched bucket for a cluster.
6533 * This routine must be called with the pset lock held.
6534 */
6535 void
6536 sched_update_pset_load_average(processor_set_t pset, uint64_t curtime)
6537 {
6538 if (pset->online_processor_count == 0) {
6539 /* Looks like the pset is not runnable any more; nothing to do here */
6540 return;
6541 }
6542
6543 /*
6544 * Edge Scheduler Optimization
6545 *
6546 * See if more callers of this routine can pass in timestamps to avoid the
6547 * mach_absolute_time() call here.
6548 */
6549
6550 if (!curtime) {
6551 curtime = mach_absolute_time();
6552 }
6553 uint64_t last_update = os_atomic_load(&pset->pset_load_last_update, relaxed);
6554 int64_t delta_ticks = curtime - last_update;
6555 if (delta_ticks < 0) {
6556 return;
6557 }
6558
6559 uint64_t delta_nsecs = 0;
6560 absolutetime_to_nanoseconds(delta_ticks, &delta_nsecs);
6561
6562 if (__improbable(delta_nsecs > UINT32_MAX)) {
6563 delta_nsecs = UINT32_MAX;
6564 }
6565
6566 uint32_t running_higher[TH_BUCKET_SCHED_MAX] = {0};
6567 sched_edge_pset_running_higher_bucket(pset, running_higher);
6568
6569 for (sched_bucket_t sched_bucket = TH_BUCKET_FIXPRI; sched_bucket < TH_BUCKET_SCHED_MAX; sched_bucket++) {
6570 uint64_t old_load_average = os_atomic_load(&pset->pset_load_average[sched_bucket], relaxed);
6571 uint64_t old_load_average_factor = old_load_average * SCHED_PSET_LOAD_EWMA_TC_NSECS;
6572 uint32_t current_runq_depth = (sched_edge_cluster_cumulative_count(&pset->pset_clutch_root, sched_bucket) + rt_runq_count(pset) + running_higher[sched_bucket]) / pset->online_processor_count;
6573
6574 /*
6575 * For the new load average multiply current_runq_depth by delta_nsecs (which resuts in a 32.0 value).
6576 * Since we want to maintain the load average as a 24.8 fixed arithmetic value for precision, the
6577 * new load averga needs to be shifted before it can be added to the old load average.
6578 */
6579 uint64_t new_load_average_factor = (current_runq_depth * delta_nsecs) << SCHED_PSET_LOAD_EWMA_FRACTION_BITS;
6580
6581 /*
6582 * For extremely parallel workloads, it is important that the load average on a cluster moves zero to non-zero
6583 * instantly to allow threads to be migrated to other (potentially idle) clusters quickly. Hence use the EWMA
6584 * when the system is already loaded; otherwise for an idle system use the latest load average immediately.
6585 */
6586 int old_load_shifted = (int)((old_load_average + SCHED_PSET_LOAD_EWMA_ROUND_BIT) >> SCHED_PSET_LOAD_EWMA_FRACTION_BITS);
6587 boolean_t load_uptick = (old_load_shifted == 0) && (current_runq_depth != 0);
6588 boolean_t load_downtick = (old_load_shifted != 0) && (current_runq_depth == 0);
6589 uint64_t load_average;
6590 if (load_uptick || load_downtick) {
6591 load_average = (current_runq_depth << SCHED_PSET_LOAD_EWMA_FRACTION_BITS);
6592 } else {
6593 /* Indicates a loaded system; use EWMA for load average calculation */
6594 load_average = (old_load_average_factor + new_load_average_factor) / (delta_nsecs + SCHED_PSET_LOAD_EWMA_TC_NSECS);
6595 }
6596 os_atomic_store(&pset->pset_load_average[sched_bucket], load_average, relaxed);
6597 KDBG(MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH, MACH_SCHED_EDGE_LOAD_AVG) | DBG_FUNC_NONE, pset->pset_cluster_id, (load_average >> SCHED_PSET_LOAD_EWMA_FRACTION_BITS), load_average & SCHED_PSET_LOAD_EWMA_FRACTION_MASK, sched_bucket);
6598 }
6599 os_atomic_store(&pset->pset_load_last_update, curtime, relaxed);
6600 }
6601
6602 void
6603 sched_update_pset_avg_execution_time(processor_set_t pset, uint64_t execution_time, uint64_t curtime, sched_bucket_t sched_bucket)
6604 {
6605 pset_execution_time_t old_execution_time_packed, new_execution_time_packed;
6606 uint64_t avg_thread_execution_time = 0;
6607
6608 os_atomic_rmw_loop(&pset->pset_execution_time[sched_bucket].pset_execution_time_packed,
6609 old_execution_time_packed.pset_execution_time_packed,
6610 new_execution_time_packed.pset_execution_time_packed, relaxed, {
6611 uint64_t last_update = old_execution_time_packed.pset_execution_time_last_update;
6612 int64_t delta_ticks = curtime - last_update;
6613 if (delta_ticks < 0) {
6614 /*
6615 * Its possible that another CPU came in and updated the pset_execution_time
6616 * before this CPU could do it. Since the average execution time is meant to
6617 * be an approximate measure per cluster, ignore the older update.
6618 */
6619 os_atomic_rmw_loop_give_up(return );
6620 }
6621 uint64_t delta_nsecs = 0;
6622 absolutetime_to_nanoseconds(delta_ticks, &delta_nsecs);
6623
6624 uint64_t nanotime = 0;
6625 absolutetime_to_nanoseconds(execution_time, &nanotime);
6626 uint64_t execution_time_us = nanotime / NSEC_PER_USEC;
6627
6628 uint64_t old_execution_time = (old_execution_time_packed.pset_avg_thread_execution_time * SCHED_PSET_LOAD_EWMA_TC_NSECS);
6629 uint64_t new_execution_time = (execution_time_us * delta_nsecs);
6630
6631 avg_thread_execution_time = (old_execution_time + new_execution_time) / (delta_nsecs + SCHED_PSET_LOAD_EWMA_TC_NSECS);
6632 new_execution_time_packed.pset_avg_thread_execution_time = avg_thread_execution_time;
6633 new_execution_time_packed.pset_execution_time_last_update = curtime;
6634 });
6635 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PSET_AVG_EXEC_TIME) | DBG_FUNC_NONE, pset->pset_cluster_id, avg_thread_execution_time, sched_bucket);
6636 }
6637
6638 #else /* CONFIG_SCHED_EDGE */
6639
6640 void
6641 sched_update_pset_load_average(processor_set_t pset, __unused uint64_t curtime)
6642 {
6643 int non_rt_load = pset->pset_runq.count;
6644 int load = ((bit_count(pset->cpu_state_map[PROCESSOR_RUNNING]) + non_rt_load + rt_runq_count(pset)) << PSET_LOAD_NUMERATOR_SHIFT);
6645 int new_load_average = ((int)pset->load_average + load) >> 1;
6646
6647 pset->load_average = new_load_average;
6648 #if (DEVELOPMENT || DEBUG)
6649 #if __AMP__
6650 if (pset->pset_cluster_type == PSET_AMP_P) {
6651 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PSET_LOAD_AVERAGE) | DBG_FUNC_NONE, sched_get_pset_load_average(pset, 0), (bit_count(pset->cpu_state_map[PROCESSOR_RUNNING]) + pset->pset_runq.count + rt_runq_count(pset)));
6652 }
6653 #endif
6654 #endif
6655 }
6656
6657 void
6658 sched_update_pset_avg_execution_time(__unused processor_set_t pset, __unused uint64_t execution_time, __unused uint64_t curtime, __unused sched_bucket_t sched_bucket)
6659 {
6660 }
6661 #endif /* CONFIG_SCHED_EDGE */
6662
6663 /* pset is locked */
6664 static bool
6665 processor_is_fast_track_candidate_for_realtime_thread(processor_set_t pset, processor_t processor)
6666 {
6667 int cpuid = processor->cpu_id;
6668 #if defined(__x86_64__)
6669 if (sched_avoid_cpu0 && (cpuid == 0)) {
6670 return false;
6671 }
6672 #endif
6673
6674 cpumap_t fasttrack_map = pset_available_cpumap(pset) & ~pset->pending_AST_URGENT_cpu_mask & ~pset->realtime_map;
6675
6676 return bit_test(fasttrack_map, cpuid);
6677 }
6678
6679 /* pset is locked */
6680 static processor_t
6681 choose_processor_for_realtime_thread(processor_set_t pset, processor_t skip_processor, bool consider_secondaries)
6682 {
6683 #if defined(__x86_64__)
6684 bool avoid_cpu0 = sched_avoid_cpu0 && bit_test(pset->cpu_bitmask, 0);
6685 #else
6686 const bool avoid_cpu0 = false;
6687 #endif
6688
6689 cpumap_t cpu_map = pset_available_cpumap(pset) & ~pset->pending_AST_URGENT_cpu_mask & ~pset->realtime_map;
6690 if (skip_processor) {
6691 bit_clear(cpu_map, skip_processor->cpu_id);
6692 }
6693
6694 cpumap_t primary_map = cpu_map & pset->primary_map;
6695 if (avoid_cpu0) {
6696 primary_map = bit_ror64(primary_map, 1);
6697 }
6698
6699 int rotid = lsb_first(primary_map);
6700 if (rotid >= 0) {
6701 int cpuid = avoid_cpu0 ? ((rotid + 1) & 63) : rotid;
6702
6703 processor_t processor = processor_array[cpuid];
6704
6705 return processor;
6706 }
6707
6708 if (!pset->is_SMT || !sched_allow_rt_smt || !consider_secondaries) {
6709 goto out;
6710 }
6711
6712 /* Consider secondary processors */
6713 cpumap_t secondary_map = cpu_map & ~pset->primary_map;
6714 if (avoid_cpu0) {
6715 /* Also avoid cpu1 */
6716 secondary_map = bit_ror64(secondary_map, 2);
6717 }
6718 rotid = lsb_first(secondary_map);
6719 if (rotid >= 0) {
6720 int cpuid = avoid_cpu0 ? ((rotid + 2) & 63) : rotid;
6721
6722 processor_t processor = processor_array[cpuid];
6723
6724 return processor;
6725 }
6726
6727 out:
6728 if (skip_processor) {
6729 return PROCESSOR_NULL;
6730 }
6731
6732 /*
6733 * If we didn't find an obvious processor to choose, but there are still more CPUs
6734 * not already running realtime threads than realtime threads in the realtime run queue,
6735 * this thread belongs in this pset, so choose some other processor in this pset
6736 * to ensure the thread is enqueued here.
6737 */
6738 cpumap_t non_realtime_map = pset_available_cpumap(pset) & pset->primary_map & ~pset->realtime_map;
6739 if (bit_count(non_realtime_map) > rt_runq_count(pset)) {
6740 cpu_map = non_realtime_map;
6741 assert(cpu_map != 0);
6742 int cpuid = bit_first(cpu_map);
6743 assert(cpuid >= 0);
6744 return processor_array[cpuid];
6745 }
6746
6747 if (!pset->is_SMT || !sched_allow_rt_smt || !consider_secondaries) {
6748 goto skip_secondaries;
6749 }
6750
6751 non_realtime_map = pset_available_cpumap(pset) & ~pset->realtime_map;
6752 if (bit_count(non_realtime_map) > rt_runq_count(pset)) {
6753 cpu_map = non_realtime_map;
6754 assert(cpu_map != 0);
6755 int cpuid = bit_first(cpu_map);
6756 assert(cpuid >= 0);
6757 return processor_array[cpuid];
6758 }
6759
6760 skip_secondaries:
6761 return PROCESSOR_NULL;
6762 }
6763
6764 /* pset is locked */
6765 static bool
6766 all_available_primaries_are_running_realtime_threads(processor_set_t pset)
6767 {
6768 cpumap_t cpu_map = pset_available_cpumap(pset) & pset->primary_map & ~pset->realtime_map;
6769 return rt_runq_count(pset) > bit_count(cpu_map);
6770 }
6771
6772 #if defined(__x86_64__)
6773 /* pset is locked */
6774 static bool
6775 these_processors_are_running_realtime_threads(processor_set_t pset, uint64_t these_map)
6776 {
6777 cpumap_t cpu_map = pset_available_cpumap(pset) & these_map & ~pset->realtime_map;
6778 return rt_runq_count(pset) > bit_count(cpu_map);
6779 }
6780 #endif
6781
6782 static bool
6783 sched_ok_to_run_realtime_thread(processor_set_t pset, processor_t processor)
6784 {
6785 bool ok_to_run_realtime_thread = true;
6786 #if defined(__x86_64__)
6787 if (sched_avoid_cpu0 && processor->cpu_id == 0) {
6788 ok_to_run_realtime_thread = these_processors_are_running_realtime_threads(pset, pset->primary_map & ~0x1);
6789 } else if (sched_avoid_cpu0 && (processor->cpu_id == 1) && processor->is_SMT) {
6790 ok_to_run_realtime_thread = sched_allow_rt_smt && these_processors_are_running_realtime_threads(pset, ~0x2);
6791 } else if (processor->processor_primary != processor) {
6792 ok_to_run_realtime_thread = (sched_allow_rt_smt && all_available_primaries_are_running_realtime_threads(pset));
6793 }
6794 #else
6795 (void)pset;
6796 (void)processor;
6797 #endif
6798 return ok_to_run_realtime_thread;
6799 }
6800
6801 void
6802 sched_pset_made_schedulable(__unused processor_t processor, processor_set_t pset, boolean_t drop_lock)
6803 {
6804 if (drop_lock) {
6805 pset_unlock(pset);
6806 }
6807 }
6808
6809 void
6810 thread_set_no_smt(bool set)
6811 {
6812 if (!system_is_SMT) {
6813 /* Not a machine that supports SMT */
6814 return;
6815 }
6816
6817 thread_t thread = current_thread();
6818
6819 spl_t s = splsched();
6820 thread_lock(thread);
6821 if (set) {
6822 thread->sched_flags |= TH_SFLAG_NO_SMT;
6823 }
6824 thread_unlock(thread);
6825 splx(s);
6826 }
6827
6828 bool
6829 thread_get_no_smt(void)
6830 {
6831 return current_thread()->sched_flags & TH_SFLAG_NO_SMT;
6832 }
6833
6834 extern void task_set_no_smt(task_t);
6835 void
6836 task_set_no_smt(task_t task)
6837 {
6838 if (!system_is_SMT) {
6839 /* Not a machine that supports SMT */
6840 return;
6841 }
6842
6843 if (task == TASK_NULL) {
6844 task = current_task();
6845 }
6846
6847 task_lock(task);
6848 task->t_flags |= TF_NO_SMT;
6849 task_unlock(task);
6850 }
6851
6852 #if DEBUG || DEVELOPMENT
6853 extern void sysctl_task_set_no_smt(char no_smt);
6854 void
6855 sysctl_task_set_no_smt(char no_smt)
6856 {
6857 if (!system_is_SMT) {
6858 /* Not a machine that supports SMT */
6859 return;
6860 }
6861
6862 task_t task = current_task();
6863
6864 task_lock(task);
6865 if (no_smt == '1') {
6866 task->t_flags |= TF_NO_SMT;
6867 }
6868 task_unlock(task);
6869 }
6870
6871 extern char sysctl_task_get_no_smt(void);
6872 char
6873 sysctl_task_get_no_smt(void)
6874 {
6875 task_t task = current_task();
6876
6877 if (task->t_flags & TF_NO_SMT) {
6878 return '1';
6879 }
6880 return '0';
6881 }
6882 #endif /* DEVELOPMENT || DEBUG */
6883
6884
6885 __private_extern__ void
6886 thread_bind_cluster_type(thread_t thread, char cluster_type, bool soft_bound)
6887 {
6888 #if __AMP__
6889 spl_t s = splsched();
6890 thread_lock(thread);
6891 thread->sched_flags &= ~(TH_SFLAG_ECORE_ONLY | TH_SFLAG_PCORE_ONLY | TH_SFLAG_BOUND_SOFT);
6892 if (soft_bound) {
6893 thread->sched_flags |= TH_SFLAG_BOUND_SOFT;
6894 }
6895 switch (cluster_type) {
6896 case 'e':
6897 case 'E':
6898 thread->sched_flags |= TH_SFLAG_ECORE_ONLY;
6899 break;
6900 case 'p':
6901 case 'P':
6902 thread->sched_flags |= TH_SFLAG_PCORE_ONLY;
6903 break;
6904 default:
6905 break;
6906 }
6907 thread_unlock(thread);
6908 splx(s);
6909
6910 if (thread == current_thread()) {
6911 thread_block(THREAD_CONTINUE_NULL);
6912 }
6913 #else /* __AMP__ */
6914 (void)thread;
6915 (void)cluster_type;
6916 (void)soft_bound;
6917 #endif /* __AMP__ */
6918 }