]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/sched_prim.c
e5a3d2e2eac35616ee345d60d62e0a4e2ac3a951
[apple/xnu.git] / osfmk / kern / sched_prim.c
1 /*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: sched_prim.c
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1986
62 *
63 * Scheduling primitives
64 *
65 */
66
67 #include <debug.h>
68
69 #include <mach/mach_types.h>
70 #include <mach/machine.h>
71 #include <mach/policy.h>
72 #include <mach/sync_policy.h>
73 #include <mach/thread_act.h>
74
75 #include <machine/machine_routines.h>
76 #include <machine/sched_param.h>
77 #include <machine/machine_cpu.h>
78 #include <machine/machlimits.h>
79 #include <machine/atomic.h>
80
81 #include <machine/commpage.h>
82
83 #include <kern/kern_types.h>
84 #include <kern/backtrace.h>
85 #include <kern/clock.h>
86 #include <kern/counters.h>
87 #include <kern/cpu_number.h>
88 #include <kern/cpu_data.h>
89 #include <kern/smp.h>
90 #include <kern/debug.h>
91 #include <kern/macro_help.h>
92 #include <kern/machine.h>
93 #include <kern/misc_protos.h>
94 #if MONOTONIC
95 #include <kern/monotonic.h>
96 #endif /* MONOTONIC */
97 #include <kern/processor.h>
98 #include <kern/queue.h>
99 #include <kern/sched.h>
100 #include <kern/sched_prim.h>
101 #include <kern/sfi.h>
102 #include <kern/syscall_subr.h>
103 #include <kern/task.h>
104 #include <kern/thread.h>
105 #include <kern/ledger.h>
106 #include <kern/timer_queue.h>
107 #include <kern/waitq.h>
108 #include <kern/policy_internal.h>
109 #include <kern/cpu_quiesce.h>
110
111 #include <vm/pmap.h>
112 #include <vm/vm_kern.h>
113 #include <vm/vm_map.h>
114 #include <vm/vm_pageout.h>
115
116 #include <mach/sdt.h>
117 #include <mach/mach_host.h>
118 #include <mach/host_info.h>
119
120 #include <sys/kdebug.h>
121 #include <kperf/kperf.h>
122 #include <kern/kpc.h>
123 #include <san/kasan.h>
124 #include <kern/pms.h>
125 #include <kern/host.h>
126 #include <stdatomic.h>
127
128 int
129 rt_runq_count(processor_set_t pset)
130 {
131 return atomic_load_explicit(&SCHED(rt_runq)(pset)->count, memory_order_relaxed);
132 }
133
134 void
135 rt_runq_count_incr(processor_set_t pset)
136 {
137 atomic_fetch_add_explicit(&SCHED(rt_runq)(pset)->count, 1, memory_order_relaxed);
138 }
139
140 void
141 rt_runq_count_decr(processor_set_t pset)
142 {
143 atomic_fetch_sub_explicit(&SCHED(rt_runq)(pset)->count, 1, memory_order_relaxed);
144 }
145
146 #define DEFAULT_PREEMPTION_RATE 100 /* (1/s) */
147 int default_preemption_rate = DEFAULT_PREEMPTION_RATE;
148
149 #define DEFAULT_BG_PREEMPTION_RATE 400 /* (1/s) */
150 int default_bg_preemption_rate = DEFAULT_BG_PREEMPTION_RATE;
151
152 #define MAX_UNSAFE_QUANTA 800
153 int max_unsafe_quanta = MAX_UNSAFE_QUANTA;
154
155 #define MAX_POLL_QUANTA 2
156 int max_poll_quanta = MAX_POLL_QUANTA;
157
158 #define SCHED_POLL_YIELD_SHIFT 4 /* 1/16 */
159 int sched_poll_yield_shift = SCHED_POLL_YIELD_SHIFT;
160
161 uint64_t max_poll_computation;
162
163 uint64_t max_unsafe_computation;
164 uint64_t sched_safe_duration;
165
166 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
167
168 uint32_t std_quantum;
169 uint32_t min_std_quantum;
170 uint32_t bg_quantum;
171
172 uint32_t std_quantum_us;
173 uint32_t bg_quantum_us;
174
175 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
176
177 uint32_t thread_depress_time;
178 uint32_t default_timeshare_computation;
179 uint32_t default_timeshare_constraint;
180
181 uint32_t max_rt_quantum;
182 uint32_t min_rt_quantum;
183
184 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
185
186 unsigned sched_tick;
187 uint32_t sched_tick_interval;
188
189 /* Timeshare load calculation interval (15ms) */
190 uint32_t sched_load_compute_interval_us = 15000;
191 uint64_t sched_load_compute_interval_abs;
192 static _Atomic uint64_t sched_load_compute_deadline;
193
194 uint32_t sched_pri_shifts[TH_BUCKET_MAX];
195 uint32_t sched_fixed_shift;
196
197 uint32_t sched_decay_usage_age_factor = 1; /* accelerate 5/8^n usage aging */
198
199 /* Allow foreground to decay past default to resolve inversions */
200 #define DEFAULT_DECAY_BAND_LIMIT ((BASEPRI_FOREGROUND - BASEPRI_DEFAULT) + 2)
201 int sched_pri_decay_band_limit = DEFAULT_DECAY_BAND_LIMIT;
202
203 /* Defaults for timer deadline profiling */
204 #define TIMER_DEADLINE_TRACKING_BIN_1_DEFAULT 2000000 /* Timers with deadlines <=
205 * 2ms */
206 #define TIMER_DEADLINE_TRACKING_BIN_2_DEFAULT 5000000 /* Timers with deadlines
207 * <= 5ms */
208
209 uint64_t timer_deadline_tracking_bin_1;
210 uint64_t timer_deadline_tracking_bin_2;
211
212 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
213
214 thread_t sched_maintenance_thread;
215
216 /* interrupts disabled lock to guard recommended cores state */
217 decl_simple_lock_data(static, sched_recommended_cores_lock);
218 static uint64_t usercontrol_requested_recommended_cores = ALL_CORES_RECOMMENDED;
219 static void sched_update_recommended_cores(uint64_t recommended_cores);
220
221 #if __arm__ || __arm64__
222 static void sched_recommended_cores_maintenance(void);
223 uint64_t perfcontrol_failsafe_starvation_threshold;
224 extern char *proc_name_address(struct proc *p);
225 #endif /* __arm__ || __arm64__ */
226
227 uint64_t sched_one_second_interval;
228
229 /* Forwards */
230
231 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
232
233 static void load_shift_init(void);
234 static void preempt_pri_init(void);
235
236 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
237
238 #if CONFIG_SCHED_IDLE_IN_PLACE
239 static thread_t thread_select_idle(
240 thread_t thread,
241 processor_t processor);
242 #endif
243
244 thread_t processor_idle(
245 thread_t thread,
246 processor_t processor);
247
248 static ast_t
249 csw_check_locked(
250 thread_t thread,
251 processor_t processor,
252 processor_set_t pset,
253 ast_t check_reason);
254
255 static void processor_setrun(
256 processor_t processor,
257 thread_t thread,
258 integer_t options);
259
260 static void
261 sched_realtime_timebase_init(void);
262
263 static void
264 sched_timer_deadline_tracking_init(void);
265
266 #if DEBUG
267 extern int debug_task;
268 #define TLOG(a, fmt, args...) if(debug_task & a) kprintf(fmt, ## args)
269 #else
270 #define TLOG(a, fmt, args...) do {} while (0)
271 #endif
272
273 static processor_t
274 thread_bind_internal(
275 thread_t thread,
276 processor_t processor);
277
278 static void
279 sched_vm_group_maintenance(void);
280
281 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
282 int8_t sched_load_shifts[NRQS];
283 bitmap_t sched_preempt_pri[BITMAP_LEN(NRQS)];
284 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
285
286 const struct sched_dispatch_table *sched_current_dispatch = NULL;
287
288 /*
289 * Statically allocate a buffer to hold the longest possible
290 * scheduler description string, as currently implemented.
291 * bsd/kern/kern_sysctl.c has a corresponding definition in bsd/
292 * to export to userspace via sysctl(3). If either version
293 * changes, update the other.
294 *
295 * Note that in addition to being an upper bound on the strings
296 * in the kernel, it's also an exact parameter to PE_get_default(),
297 * which interrogates the device tree on some platforms. That
298 * API requires the caller know the exact size of the device tree
299 * property, so we need both a legacy size (32) and the current size
300 * (48) to deal with old and new device trees. The device tree property
301 * is similarly padded to a fixed size so that the same kernel image
302 * can run on multiple devices with different schedulers configured
303 * in the device tree.
304 */
305 char sched_string[SCHED_STRING_MAX_LENGTH];
306
307 uint32_t sched_debug_flags = SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS;
308
309 /* Global flag which indicates whether Background Stepper Context is enabled */
310 static int cpu_throttle_enabled = 1;
311
312 #if DEBUG
313
314 /* Since using the indirect function dispatch table has a negative impact on
315 * context switch performance, only allow DEBUG kernels to use that mechanism.
316 */
317 static void
318 sched_init_override(void)
319 {
320 char sched_arg[SCHED_STRING_MAX_LENGTH] = { '\0' };
321
322 /* Check for runtime selection of the scheduler algorithm */
323 if (!PE_parse_boot_argn("sched", sched_arg, sizeof(sched_arg))) {
324 sched_arg[0] = '\0';
325 }
326 if (strlen(sched_arg) > 0) {
327 if (0) {
328 /* Allow pattern below */
329 #if defined(CONFIG_SCHED_TRADITIONAL)
330 } else if (0 == strcmp(sched_arg, sched_traditional_dispatch.sched_name)) {
331 sched_current_dispatch = &sched_traditional_dispatch;
332 } else if (0 == strcmp(sched_arg, sched_traditional_with_pset_runqueue_dispatch.sched_name)) {
333 sched_current_dispatch = &sched_traditional_with_pset_runqueue_dispatch;
334 #endif
335 #if defined(CONFIG_SCHED_MULTIQ)
336 } else if (0 == strcmp(sched_arg, sched_multiq_dispatch.sched_name)) {
337 sched_current_dispatch = &sched_multiq_dispatch;
338 } else if (0 == strcmp(sched_arg, sched_dualq_dispatch.sched_name)) {
339 sched_current_dispatch = &sched_dualq_dispatch;
340 #endif
341 } else {
342 #if defined(CONFIG_SCHED_TRADITIONAL)
343 printf("Unrecognized scheduler algorithm: %s\n", sched_arg);
344 printf("Scheduler: Using instead: %s\n", sched_traditional_with_pset_runqueue_dispatch.sched_name);
345 sched_current_dispatch = &sched_traditional_with_pset_runqueue_dispatch;
346 #else
347 panic("Unrecognized scheduler algorithm: %s", sched_arg);
348 #endif
349 }
350 kprintf("Scheduler: Runtime selection of %s\n", SCHED(sched_name));
351 } else {
352 #if defined(CONFIG_SCHED_MULTIQ)
353 sched_current_dispatch = &sched_dualq_dispatch;
354 #elif defined(CONFIG_SCHED_TRADITIONAL)
355 sched_current_dispatch = &sched_traditional_with_pset_runqueue_dispatch;
356 #else
357 #error No default scheduler implementation
358 #endif
359 kprintf("Scheduler: Default of %s\n", SCHED(sched_name));
360 }
361 }
362
363 #endif /* DEBUG */
364
365 void
366 sched_init(void)
367 {
368 #if DEBUG
369 sched_init_override();
370 #else /* DEBUG */
371 kprintf("Scheduler: Default of %s\n", SCHED(sched_name));
372 #endif /* DEBUG */
373
374 if (!PE_parse_boot_argn("sched_pri_decay_limit", &sched_pri_decay_band_limit, sizeof(sched_pri_decay_band_limit))) {
375 /* No boot-args, check in device tree */
376 if (!PE_get_default("kern.sched_pri_decay_limit",
377 &sched_pri_decay_band_limit,
378 sizeof(sched_pri_decay_band_limit))) {
379 /* Allow decay all the way to normal limits */
380 sched_pri_decay_band_limit = DEFAULT_DECAY_BAND_LIMIT;
381 }
382 }
383
384 kprintf("Setting scheduler priority decay band limit %d\n", sched_pri_decay_band_limit);
385
386 if (PE_parse_boot_argn("sched_debug", &sched_debug_flags, sizeof(sched_debug_flags))) {
387 kprintf("Scheduler: Debug flags 0x%08x\n", sched_debug_flags);
388 }
389 strlcpy(sched_string, SCHED(sched_name), sizeof(sched_string));
390
391 cpu_quiescent_counter_init();
392
393 SCHED(init)();
394 SCHED(rt_init)(&pset0);
395 sched_timer_deadline_tracking_init();
396
397 SCHED(pset_init)(&pset0);
398 SCHED(processor_init)(master_processor);
399 }
400
401 void
402 sched_timebase_init(void)
403 {
404 uint64_t abstime;
405
406 clock_interval_to_absolutetime_interval(1, NSEC_PER_SEC, &abstime);
407 sched_one_second_interval = abstime;
408
409 SCHED(timebase_init)();
410 sched_realtime_timebase_init();
411 }
412
413 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
414
415 void
416 sched_timeshare_init(void)
417 {
418 /*
419 * Calculate the timeslicing quantum
420 * in us.
421 */
422 if (default_preemption_rate < 1) {
423 default_preemption_rate = DEFAULT_PREEMPTION_RATE;
424 }
425 std_quantum_us = (1000 * 1000) / default_preemption_rate;
426
427 printf("standard timeslicing quantum is %d us\n", std_quantum_us);
428
429 if (default_bg_preemption_rate < 1) {
430 default_bg_preemption_rate = DEFAULT_BG_PREEMPTION_RATE;
431 }
432 bg_quantum_us = (1000 * 1000) / default_bg_preemption_rate;
433
434 printf("standard background quantum is %d us\n", bg_quantum_us);
435
436 load_shift_init();
437 preempt_pri_init();
438 sched_tick = 0;
439 }
440
441 void
442 sched_timeshare_timebase_init(void)
443 {
444 uint64_t abstime;
445 uint32_t shift;
446
447 /* standard timeslicing quantum */
448 clock_interval_to_absolutetime_interval(
449 std_quantum_us, NSEC_PER_USEC, &abstime);
450 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
451 std_quantum = (uint32_t)abstime;
452
453 /* smallest remaining quantum (250 us) */
454 clock_interval_to_absolutetime_interval(250, NSEC_PER_USEC, &abstime);
455 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
456 min_std_quantum = (uint32_t)abstime;
457
458 /* quantum for background tasks */
459 clock_interval_to_absolutetime_interval(
460 bg_quantum_us, NSEC_PER_USEC, &abstime);
461 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
462 bg_quantum = (uint32_t)abstime;
463
464 /* scheduler tick interval */
465 clock_interval_to_absolutetime_interval(USEC_PER_SEC >> SCHED_TICK_SHIFT,
466 NSEC_PER_USEC, &abstime);
467 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
468 sched_tick_interval = (uint32_t)abstime;
469
470 /* timeshare load calculation interval & deadline initialization */
471 clock_interval_to_absolutetime_interval(sched_load_compute_interval_us, NSEC_PER_USEC, &sched_load_compute_interval_abs);
472 sched_load_compute_deadline = sched_load_compute_interval_abs;
473
474 /*
475 * Compute conversion factor from usage to
476 * timesharing priorities with 5/8 ** n aging.
477 */
478 abstime = (abstime * 5) / 3;
479 for (shift = 0; abstime > BASEPRI_DEFAULT; ++shift) {
480 abstime >>= 1;
481 }
482 sched_fixed_shift = shift;
483
484 for (uint32_t i = 0; i < TH_BUCKET_MAX; i++) {
485 sched_pri_shifts[i] = INT8_MAX;
486 }
487
488 max_unsafe_computation = ((uint64_t)max_unsafe_quanta) * std_quantum;
489 sched_safe_duration = 2 * ((uint64_t)max_unsafe_quanta) * std_quantum;
490
491 max_poll_computation = ((uint64_t)max_poll_quanta) * std_quantum;
492 thread_depress_time = 1 * std_quantum;
493 default_timeshare_computation = std_quantum / 2;
494 default_timeshare_constraint = std_quantum;
495
496 #if __arm__ || __arm64__
497 perfcontrol_failsafe_starvation_threshold = (2 * sched_tick_interval);
498 #endif /* __arm__ || __arm64__ */
499 }
500
501 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
502
503 void
504 pset_rt_init(processor_set_t pset)
505 {
506 rt_lock_init(pset);
507
508 pset->rt_runq.count = 0;
509 queue_init(&pset->rt_runq.queue);
510 memset(&pset->rt_runq.runq_stats, 0, sizeof pset->rt_runq.runq_stats);
511 }
512
513 rt_queue_t
514 sched_rtglobal_runq(processor_set_t pset)
515 {
516 (void)pset;
517
518 return &pset0.rt_runq;
519 }
520
521 void
522 sched_rtglobal_init(processor_set_t pset)
523 {
524 if (pset == &pset0) {
525 return pset_rt_init(pset);
526 }
527
528 /* Only pset0 rt_runq is used, so make it easy to detect
529 * buggy accesses to others.
530 */
531 memset(&pset->rt_runq, 0xfd, sizeof pset->rt_runq);
532 }
533
534 void
535 sched_rtglobal_queue_shutdown(processor_t processor)
536 {
537 (void)processor;
538 }
539
540 static void
541 sched_realtime_timebase_init(void)
542 {
543 uint64_t abstime;
544
545 /* smallest rt computaton (50 us) */
546 clock_interval_to_absolutetime_interval(50, NSEC_PER_USEC, &abstime);
547 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
548 min_rt_quantum = (uint32_t)abstime;
549
550 /* maximum rt computation (50 ms) */
551 clock_interval_to_absolutetime_interval(
552 50, 1000 * NSEC_PER_USEC, &abstime);
553 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
554 max_rt_quantum = (uint32_t)abstime;
555 }
556
557 void
558 sched_check_spill(processor_set_t pset, thread_t thread)
559 {
560 (void)pset;
561 (void)thread;
562
563 return;
564 }
565
566 bool
567 sched_thread_should_yield(processor_t processor, thread_t thread)
568 {
569 (void)thread;
570
571 return !SCHED(processor_queue_empty)(processor) || rt_runq_count(processor->processor_set) > 0;
572 }
573
574 /* Default implementations of .steal_thread_enabled */
575 bool
576 sched_steal_thread_DISABLED(processor_set_t pset)
577 {
578 (void)pset;
579 return false;
580 }
581
582 bool
583 sched_steal_thread_enabled(processor_set_t pset)
584 {
585 return pset->node->pset_count > 1;
586 }
587
588 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
589
590 /*
591 * Set up values for timeshare
592 * loading factors.
593 */
594 static void
595 load_shift_init(void)
596 {
597 int8_t k, *p = sched_load_shifts;
598 uint32_t i, j;
599
600 uint32_t sched_decay_penalty = 1;
601
602 if (PE_parse_boot_argn("sched_decay_penalty", &sched_decay_penalty, sizeof(sched_decay_penalty))) {
603 kprintf("Overriding scheduler decay penalty %u\n", sched_decay_penalty);
604 }
605
606 if (PE_parse_boot_argn("sched_decay_usage_age_factor", &sched_decay_usage_age_factor, sizeof(sched_decay_usage_age_factor))) {
607 kprintf("Overriding scheduler decay usage age factor %u\n", sched_decay_usage_age_factor);
608 }
609
610 if (sched_decay_penalty == 0) {
611 /*
612 * There is no penalty for timeshare threads for using too much
613 * CPU, so set all load shifts to INT8_MIN. Even under high load,
614 * sched_pri_shift will be >INT8_MAX, and there will be no
615 * penalty applied to threads (nor will sched_usage be updated per
616 * thread).
617 */
618 for (i = 0; i < NRQS; i++) {
619 sched_load_shifts[i] = INT8_MIN;
620 }
621
622 return;
623 }
624
625 *p++ = INT8_MIN; *p++ = 0;
626
627 /*
628 * For a given system load "i", the per-thread priority
629 * penalty per quantum of CPU usage is ~2^k priority
630 * levels. "sched_decay_penalty" can cause more
631 * array entries to be filled with smaller "k" values
632 */
633 for (i = 2, j = 1 << sched_decay_penalty, k = 1; i < NRQS; ++k) {
634 for (j <<= 1; (i < j) && (i < NRQS); ++i) {
635 *p++ = k;
636 }
637 }
638 }
639
640 static void
641 preempt_pri_init(void)
642 {
643 bitmap_t *p = sched_preempt_pri;
644
645 for (int i = BASEPRI_FOREGROUND; i < MINPRI_KERNEL; ++i) {
646 bitmap_set(p, i);
647 }
648
649 for (int i = BASEPRI_PREEMPT; i <= MAXPRI; ++i) {
650 bitmap_set(p, i);
651 }
652 }
653
654 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
655
656 /*
657 * Thread wait timer expiration.
658 */
659 void
660 thread_timer_expire(
661 void *p0,
662 __unused void *p1)
663 {
664 thread_t thread = p0;
665 spl_t s;
666
667 assert_thread_magic(thread);
668
669 s = splsched();
670 thread_lock(thread);
671 if (--thread->wait_timer_active == 0) {
672 if (thread->wait_timer_is_set) {
673 thread->wait_timer_is_set = FALSE;
674 clear_wait_internal(thread, THREAD_TIMED_OUT);
675 }
676 }
677 thread_unlock(thread);
678 splx(s);
679 }
680
681 /*
682 * thread_unblock:
683 *
684 * Unblock thread on wake up.
685 *
686 * Returns TRUE if the thread should now be placed on the runqueue.
687 *
688 * Thread must be locked.
689 *
690 * Called at splsched().
691 */
692 boolean_t
693 thread_unblock(
694 thread_t thread,
695 wait_result_t wresult)
696 {
697 boolean_t ready_for_runq = FALSE;
698 thread_t cthread = current_thread();
699 uint32_t new_run_count;
700 int old_thread_state;
701
702 /*
703 * Set wait_result.
704 */
705 thread->wait_result = wresult;
706
707 /*
708 * Cancel pending wait timer.
709 */
710 if (thread->wait_timer_is_set) {
711 if (timer_call_cancel(&thread->wait_timer)) {
712 thread->wait_timer_active--;
713 }
714 thread->wait_timer_is_set = FALSE;
715 }
716
717 /*
718 * Update scheduling state: not waiting,
719 * set running.
720 */
721 old_thread_state = thread->state;
722 thread->state = (old_thread_state | TH_RUN) &
723 ~(TH_WAIT | TH_UNINT | TH_WAIT_REPORT);
724
725 if ((old_thread_state & TH_RUN) == 0) {
726 uint64_t ctime = mach_approximate_time();
727 thread->last_made_runnable_time = thread->last_basepri_change_time = ctime;
728 timer_start(&thread->runnable_timer, ctime);
729
730 ready_for_runq = TRUE;
731
732 if (old_thread_state & TH_WAIT_REPORT) {
733 (*thread->sched_call)(SCHED_CALL_UNBLOCK, thread);
734 }
735
736 /* Update the runnable thread count */
737 new_run_count = sched_run_incr(thread);
738 } else {
739 /*
740 * Either the thread is idling in place on another processor,
741 * or it hasn't finished context switching yet.
742 */
743 #if CONFIG_SCHED_IDLE_IN_PLACE
744 if (thread->state & TH_IDLE) {
745 processor_t processor = thread->last_processor;
746
747 if (processor != current_processor()) {
748 machine_signal_idle(processor);
749 }
750 }
751 #else
752 assert((thread->state & TH_IDLE) == 0);
753 #endif
754 /*
755 * The run count is only dropped after the context switch completes
756 * and the thread is still waiting, so we should not run_incr here
757 */
758 new_run_count = sched_run_buckets[TH_BUCKET_RUN];
759 }
760
761
762 /*
763 * Calculate deadline for real-time threads.
764 */
765 if (thread->sched_mode == TH_MODE_REALTIME) {
766 uint64_t ctime;
767
768 ctime = mach_absolute_time();
769 thread->realtime.deadline = thread->realtime.constraint + ctime;
770 }
771
772 /*
773 * Clear old quantum, fail-safe computation, etc.
774 */
775 thread->quantum_remaining = 0;
776 thread->computation_metered = 0;
777 thread->reason = AST_NONE;
778 thread->block_hint = kThreadWaitNone;
779
780 /* Obtain power-relevant interrupt and "platform-idle exit" statistics.
781 * We also account for "double hop" thread signaling via
782 * the thread callout infrastructure.
783 * DRK: consider removing the callout wakeup counters in the future
784 * they're present for verification at the moment.
785 */
786 boolean_t aticontext, pidle;
787 ml_get_power_state(&aticontext, &pidle);
788
789 if (__improbable(aticontext && !(thread_get_tag_internal(thread) & THREAD_TAG_CALLOUT))) {
790 DTRACE_SCHED2(iwakeup, struct thread *, thread, struct proc *, thread->task->bsd_info);
791
792 uint64_t ttd = PROCESSOR_DATA(current_processor(), timer_call_ttd);
793
794 if (ttd) {
795 if (ttd <= timer_deadline_tracking_bin_1) {
796 thread->thread_timer_wakeups_bin_1++;
797 } else if (ttd <= timer_deadline_tracking_bin_2) {
798 thread->thread_timer_wakeups_bin_2++;
799 }
800 }
801
802 ledger_credit_thread(thread, thread->t_ledger,
803 task_ledgers.interrupt_wakeups, 1);
804 if (pidle) {
805 ledger_credit_thread(thread, thread->t_ledger,
806 task_ledgers.platform_idle_wakeups, 1);
807 }
808 } else if (thread_get_tag_internal(cthread) & THREAD_TAG_CALLOUT) {
809 /* TODO: what about an interrupt that does a wake taken on a callout thread? */
810 if (cthread->callout_woken_from_icontext) {
811 ledger_credit_thread(thread, thread->t_ledger,
812 task_ledgers.interrupt_wakeups, 1);
813 thread->thread_callout_interrupt_wakeups++;
814
815 if (cthread->callout_woken_from_platform_idle) {
816 ledger_credit_thread(thread, thread->t_ledger,
817 task_ledgers.platform_idle_wakeups, 1);
818 thread->thread_callout_platform_idle_wakeups++;
819 }
820
821 cthread->callout_woke_thread = TRUE;
822 }
823 }
824
825 if (thread_get_tag_internal(thread) & THREAD_TAG_CALLOUT) {
826 thread->callout_woken_from_icontext = aticontext;
827 thread->callout_woken_from_platform_idle = pidle;
828 thread->callout_woke_thread = FALSE;
829 }
830
831 #if KPERF
832 if (ready_for_runq) {
833 kperf_make_runnable(thread, aticontext);
834 }
835 #endif /* KPERF */
836
837 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
838 MACHDBG_CODE(DBG_MACH_SCHED, MACH_MAKE_RUNNABLE) | DBG_FUNC_NONE,
839 (uintptr_t)thread_tid(thread), thread->sched_pri, thread->wait_result,
840 sched_run_buckets[TH_BUCKET_RUN], 0);
841
842 DTRACE_SCHED2(wakeup, struct thread *, thread, struct proc *, thread->task->bsd_info);
843
844 return ready_for_runq;
845 }
846
847 /*
848 * Routine: thread_go
849 * Purpose:
850 * Unblock and dispatch thread.
851 * Conditions:
852 * thread lock held, IPC locks may be held.
853 * thread must have been pulled from wait queue under same lock hold.
854 * thread must have been waiting
855 * Returns:
856 * KERN_SUCCESS - Thread was set running
857 *
858 * TODO: This should return void
859 */
860 kern_return_t
861 thread_go(
862 thread_t thread,
863 wait_result_t wresult)
864 {
865 assert_thread_magic(thread);
866
867 assert(thread->at_safe_point == FALSE);
868 assert(thread->wait_event == NO_EVENT64);
869 assert(thread->waitq == NULL);
870
871 assert(!(thread->state & (TH_TERMINATE | TH_TERMINATE2)));
872 assert(thread->state & TH_WAIT);
873
874
875 if (thread_unblock(thread, wresult)) {
876 #if SCHED_TRACE_THREAD_WAKEUPS
877 backtrace(&thread->thread_wakeup_bt[0],
878 (sizeof(thread->thread_wakeup_bt) / sizeof(uintptr_t)));
879 #endif
880 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
881 }
882
883 return KERN_SUCCESS;
884 }
885
886 /*
887 * Routine: thread_mark_wait_locked
888 * Purpose:
889 * Mark a thread as waiting. If, given the circumstances,
890 * it doesn't want to wait (i.e. already aborted), then
891 * indicate that in the return value.
892 * Conditions:
893 * at splsched() and thread is locked.
894 */
895 __private_extern__
896 wait_result_t
897 thread_mark_wait_locked(
898 thread_t thread,
899 wait_interrupt_t interruptible_orig)
900 {
901 boolean_t at_safe_point;
902 wait_interrupt_t interruptible = interruptible_orig;
903
904 assert(!(thread->state & (TH_WAIT | TH_IDLE | TH_UNINT | TH_TERMINATE2 | TH_WAIT_REPORT)));
905
906 /*
907 * The thread may have certain types of interrupts/aborts masked
908 * off. Even if the wait location says these types of interrupts
909 * are OK, we have to honor mask settings (outer-scoped code may
910 * not be able to handle aborts at the moment).
911 */
912 interruptible &= TH_OPT_INTMASK;
913 if (interruptible > (thread->options & TH_OPT_INTMASK)) {
914 interruptible = thread->options & TH_OPT_INTMASK;
915 }
916
917 at_safe_point = (interruptible == THREAD_ABORTSAFE);
918
919 if (interruptible == THREAD_UNINT ||
920 !(thread->sched_flags & TH_SFLAG_ABORT) ||
921 (!at_safe_point &&
922 (thread->sched_flags & TH_SFLAG_ABORTSAFELY))) {
923 if (!(thread->state & TH_TERMINATE)) {
924 DTRACE_SCHED(sleep);
925 }
926
927 int state_bits = TH_WAIT;
928 if (!interruptible) {
929 state_bits |= TH_UNINT;
930 }
931 if (thread->sched_call) {
932 wait_interrupt_t mask = THREAD_WAIT_NOREPORT_USER;
933 if (is_kerneltask(thread->task)) {
934 mask = THREAD_WAIT_NOREPORT_KERNEL;
935 }
936 if ((interruptible_orig & mask) == 0) {
937 state_bits |= TH_WAIT_REPORT;
938 }
939 }
940 thread->state |= state_bits;
941 thread->at_safe_point = at_safe_point;
942
943 /* TODO: pass this through assert_wait instead, have
944 * assert_wait just take a struct as an argument */
945 assert(!thread->block_hint);
946 thread->block_hint = thread->pending_block_hint;
947 thread->pending_block_hint = kThreadWaitNone;
948
949 return thread->wait_result = THREAD_WAITING;
950 } else {
951 if (thread->sched_flags & TH_SFLAG_ABORTSAFELY) {
952 thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
953 }
954 }
955 thread->pending_block_hint = kThreadWaitNone;
956
957 return thread->wait_result = THREAD_INTERRUPTED;
958 }
959
960 /*
961 * Routine: thread_interrupt_level
962 * Purpose:
963 * Set the maximum interruptible state for the
964 * current thread. The effective value of any
965 * interruptible flag passed into assert_wait
966 * will never exceed this.
967 *
968 * Useful for code that must not be interrupted,
969 * but which calls code that doesn't know that.
970 * Returns:
971 * The old interrupt level for the thread.
972 */
973 __private_extern__
974 wait_interrupt_t
975 thread_interrupt_level(
976 wait_interrupt_t new_level)
977 {
978 thread_t thread = current_thread();
979 wait_interrupt_t result = thread->options & TH_OPT_INTMASK;
980
981 thread->options = (thread->options & ~TH_OPT_INTMASK) | (new_level & TH_OPT_INTMASK);
982
983 return result;
984 }
985
986 /*
987 * assert_wait:
988 *
989 * Assert that the current thread is about to go to
990 * sleep until the specified event occurs.
991 */
992 wait_result_t
993 assert_wait(
994 event_t event,
995 wait_interrupt_t interruptible)
996 {
997 if (__improbable(event == NO_EVENT)) {
998 panic("%s() called with NO_EVENT", __func__);
999 }
1000
1001 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1002 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE,
1003 VM_KERNEL_UNSLIDE_OR_PERM(event), 0, 0, 0, 0);
1004
1005 struct waitq *waitq;
1006 waitq = global_eventq(event);
1007 return waitq_assert_wait64(waitq, CAST_EVENT64_T(event), interruptible, TIMEOUT_WAIT_FOREVER);
1008 }
1009
1010 /*
1011 * assert_wait_queue:
1012 *
1013 * Return the global waitq for the specified event
1014 */
1015 struct waitq *
1016 assert_wait_queue(
1017 event_t event)
1018 {
1019 return global_eventq(event);
1020 }
1021
1022 wait_result_t
1023 assert_wait_timeout(
1024 event_t event,
1025 wait_interrupt_t interruptible,
1026 uint32_t interval,
1027 uint32_t scale_factor)
1028 {
1029 thread_t thread = current_thread();
1030 wait_result_t wresult;
1031 uint64_t deadline;
1032 spl_t s;
1033
1034 if (__improbable(event == NO_EVENT)) {
1035 panic("%s() called with NO_EVENT", __func__);
1036 }
1037
1038 struct waitq *waitq;
1039 waitq = global_eventq(event);
1040
1041 s = splsched();
1042 waitq_lock(waitq);
1043
1044 clock_interval_to_deadline(interval, scale_factor, &deadline);
1045
1046 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1047 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE,
1048 VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
1049
1050 wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
1051 interruptible,
1052 TIMEOUT_URGENCY_SYS_NORMAL,
1053 deadline, TIMEOUT_NO_LEEWAY,
1054 thread);
1055
1056 waitq_unlock(waitq);
1057 splx(s);
1058 return wresult;
1059 }
1060
1061 wait_result_t
1062 assert_wait_timeout_with_leeway(
1063 event_t event,
1064 wait_interrupt_t interruptible,
1065 wait_timeout_urgency_t urgency,
1066 uint32_t interval,
1067 uint32_t leeway,
1068 uint32_t scale_factor)
1069 {
1070 thread_t thread = current_thread();
1071 wait_result_t wresult;
1072 uint64_t deadline;
1073 uint64_t abstime;
1074 uint64_t slop;
1075 uint64_t now;
1076 spl_t s;
1077
1078 if (__improbable(event == NO_EVENT)) {
1079 panic("%s() called with NO_EVENT", __func__);
1080 }
1081
1082 now = mach_absolute_time();
1083 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
1084 deadline = now + abstime;
1085
1086 clock_interval_to_absolutetime_interval(leeway, scale_factor, &slop);
1087
1088 struct waitq *waitq;
1089 waitq = global_eventq(event);
1090
1091 s = splsched();
1092 waitq_lock(waitq);
1093
1094 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1095 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE,
1096 VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
1097
1098 wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
1099 interruptible,
1100 urgency, deadline, slop,
1101 thread);
1102
1103 waitq_unlock(waitq);
1104 splx(s);
1105 return wresult;
1106 }
1107
1108 wait_result_t
1109 assert_wait_deadline(
1110 event_t event,
1111 wait_interrupt_t interruptible,
1112 uint64_t deadline)
1113 {
1114 thread_t thread = current_thread();
1115 wait_result_t wresult;
1116 spl_t s;
1117
1118 if (__improbable(event == NO_EVENT)) {
1119 panic("%s() called with NO_EVENT", __func__);
1120 }
1121
1122 struct waitq *waitq;
1123 waitq = global_eventq(event);
1124
1125 s = splsched();
1126 waitq_lock(waitq);
1127
1128 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1129 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE,
1130 VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
1131
1132 wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
1133 interruptible,
1134 TIMEOUT_URGENCY_SYS_NORMAL, deadline,
1135 TIMEOUT_NO_LEEWAY, thread);
1136 waitq_unlock(waitq);
1137 splx(s);
1138 return wresult;
1139 }
1140
1141 wait_result_t
1142 assert_wait_deadline_with_leeway(
1143 event_t event,
1144 wait_interrupt_t interruptible,
1145 wait_timeout_urgency_t urgency,
1146 uint64_t deadline,
1147 uint64_t leeway)
1148 {
1149 thread_t thread = current_thread();
1150 wait_result_t wresult;
1151 spl_t s;
1152
1153 if (__improbable(event == NO_EVENT)) {
1154 panic("%s() called with NO_EVENT", __func__);
1155 }
1156
1157 struct waitq *waitq;
1158 waitq = global_eventq(event);
1159
1160 s = splsched();
1161 waitq_lock(waitq);
1162
1163 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1164 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE,
1165 VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
1166
1167 wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
1168 interruptible,
1169 urgency, deadline, leeway,
1170 thread);
1171 waitq_unlock(waitq);
1172 splx(s);
1173 return wresult;
1174 }
1175
1176 /*
1177 * thread_isoncpu:
1178 *
1179 * Return TRUE if a thread is running on a processor such that an AST
1180 * is needed to pull it out of userspace execution, or if executing in
1181 * the kernel, bring to a context switch boundary that would cause
1182 * thread state to be serialized in the thread PCB.
1183 *
1184 * Thread locked, returns the same way. While locked, fields
1185 * like "state" cannot change. "runq" can change only from set to unset.
1186 */
1187 static inline boolean_t
1188 thread_isoncpu(thread_t thread)
1189 {
1190 /* Not running or runnable */
1191 if (!(thread->state & TH_RUN)) {
1192 return FALSE;
1193 }
1194
1195 /* Waiting on a runqueue, not currently running */
1196 /* TODO: This is invalid - it can get dequeued without thread lock, but not context switched. */
1197 if (thread->runq != PROCESSOR_NULL) {
1198 return FALSE;
1199 }
1200
1201 /*
1202 * Thread does not have a stack yet
1203 * It could be on the stack alloc queue or preparing to be invoked
1204 */
1205 if (!thread->kernel_stack) {
1206 return FALSE;
1207 }
1208
1209 /*
1210 * Thread must be running on a processor, or
1211 * about to run, or just did run. In all these
1212 * cases, an AST to the processor is needed
1213 * to guarantee that the thread is kicked out
1214 * of userspace and the processor has
1215 * context switched (and saved register state).
1216 */
1217 return TRUE;
1218 }
1219
1220 /*
1221 * thread_stop:
1222 *
1223 * Force a preemption point for a thread and wait
1224 * for it to stop running on a CPU. If a stronger
1225 * guarantee is requested, wait until no longer
1226 * runnable. Arbitrates access among
1227 * multiple stop requests. (released by unstop)
1228 *
1229 * The thread must enter a wait state and stop via a
1230 * separate means.
1231 *
1232 * Returns FALSE if interrupted.
1233 */
1234 boolean_t
1235 thread_stop(
1236 thread_t thread,
1237 boolean_t until_not_runnable)
1238 {
1239 wait_result_t wresult;
1240 spl_t s = splsched();
1241 boolean_t oncpu;
1242
1243 wake_lock(thread);
1244 thread_lock(thread);
1245
1246 while (thread->state & TH_SUSP) {
1247 thread->wake_active = TRUE;
1248 thread_unlock(thread);
1249
1250 wresult = assert_wait(&thread->wake_active, THREAD_ABORTSAFE);
1251 wake_unlock(thread);
1252 splx(s);
1253
1254 if (wresult == THREAD_WAITING) {
1255 wresult = thread_block(THREAD_CONTINUE_NULL);
1256 }
1257
1258 if (wresult != THREAD_AWAKENED) {
1259 return FALSE;
1260 }
1261
1262 s = splsched();
1263 wake_lock(thread);
1264 thread_lock(thread);
1265 }
1266
1267 thread->state |= TH_SUSP;
1268
1269 while ((oncpu = thread_isoncpu(thread)) ||
1270 (until_not_runnable && (thread->state & TH_RUN))) {
1271 processor_t processor;
1272
1273 if (oncpu) {
1274 assert(thread->state & TH_RUN);
1275 processor = thread->chosen_processor;
1276 cause_ast_check(processor);
1277 }
1278
1279 thread->wake_active = TRUE;
1280 thread_unlock(thread);
1281
1282 wresult = assert_wait(&thread->wake_active, THREAD_ABORTSAFE);
1283 wake_unlock(thread);
1284 splx(s);
1285
1286 if (wresult == THREAD_WAITING) {
1287 wresult = thread_block(THREAD_CONTINUE_NULL);
1288 }
1289
1290 if (wresult != THREAD_AWAKENED) {
1291 thread_unstop(thread);
1292 return FALSE;
1293 }
1294
1295 s = splsched();
1296 wake_lock(thread);
1297 thread_lock(thread);
1298 }
1299
1300 thread_unlock(thread);
1301 wake_unlock(thread);
1302 splx(s);
1303
1304 /*
1305 * We return with the thread unlocked. To prevent it from
1306 * transitioning to a runnable state (or from TH_RUN to
1307 * being on the CPU), the caller must ensure the thread
1308 * is stopped via an external means (such as an AST)
1309 */
1310
1311 return TRUE;
1312 }
1313
1314 /*
1315 * thread_unstop:
1316 *
1317 * Release a previous stop request and set
1318 * the thread running if appropriate.
1319 *
1320 * Use only after a successful stop operation.
1321 */
1322 void
1323 thread_unstop(
1324 thread_t thread)
1325 {
1326 spl_t s = splsched();
1327
1328 wake_lock(thread);
1329 thread_lock(thread);
1330
1331 assert((thread->state & (TH_RUN | TH_WAIT | TH_SUSP)) != TH_SUSP);
1332
1333 if (thread->state & TH_SUSP) {
1334 thread->state &= ~TH_SUSP;
1335
1336 if (thread->wake_active) {
1337 thread->wake_active = FALSE;
1338 thread_unlock(thread);
1339
1340 thread_wakeup(&thread->wake_active);
1341 wake_unlock(thread);
1342 splx(s);
1343
1344 return;
1345 }
1346 }
1347
1348 thread_unlock(thread);
1349 wake_unlock(thread);
1350 splx(s);
1351 }
1352
1353 /*
1354 * thread_wait:
1355 *
1356 * Wait for a thread to stop running. (non-interruptible)
1357 *
1358 */
1359 void
1360 thread_wait(
1361 thread_t thread,
1362 boolean_t until_not_runnable)
1363 {
1364 wait_result_t wresult;
1365 boolean_t oncpu;
1366 processor_t processor;
1367 spl_t s = splsched();
1368
1369 wake_lock(thread);
1370 thread_lock(thread);
1371
1372 /*
1373 * Wait until not running on a CPU. If stronger requirement
1374 * desired, wait until not runnable. Assumption: if thread is
1375 * on CPU, then TH_RUN is set, so we're not waiting in any case
1376 * where the original, pure "TH_RUN" check would have let us
1377 * finish.
1378 */
1379 while ((oncpu = thread_isoncpu(thread)) ||
1380 (until_not_runnable && (thread->state & TH_RUN))) {
1381 if (oncpu) {
1382 assert(thread->state & TH_RUN);
1383 processor = thread->chosen_processor;
1384 cause_ast_check(processor);
1385 }
1386
1387 thread->wake_active = TRUE;
1388 thread_unlock(thread);
1389
1390 wresult = assert_wait(&thread->wake_active, THREAD_UNINT);
1391 wake_unlock(thread);
1392 splx(s);
1393
1394 if (wresult == THREAD_WAITING) {
1395 thread_block(THREAD_CONTINUE_NULL);
1396 }
1397
1398 s = splsched();
1399 wake_lock(thread);
1400 thread_lock(thread);
1401 }
1402
1403 thread_unlock(thread);
1404 wake_unlock(thread);
1405 splx(s);
1406 }
1407
1408 /*
1409 * Routine: clear_wait_internal
1410 *
1411 * Clear the wait condition for the specified thread.
1412 * Start the thread executing if that is appropriate.
1413 * Arguments:
1414 * thread thread to awaken
1415 * result Wakeup result the thread should see
1416 * Conditions:
1417 * At splsched
1418 * the thread is locked.
1419 * Returns:
1420 * KERN_SUCCESS thread was rousted out a wait
1421 * KERN_FAILURE thread was waiting but could not be rousted
1422 * KERN_NOT_WAITING thread was not waiting
1423 */
1424 __private_extern__ kern_return_t
1425 clear_wait_internal(
1426 thread_t thread,
1427 wait_result_t wresult)
1428 {
1429 uint32_t i = LockTimeOutUsec;
1430 struct waitq *waitq = thread->waitq;
1431
1432 do {
1433 if (wresult == THREAD_INTERRUPTED && (thread->state & TH_UNINT)) {
1434 return KERN_FAILURE;
1435 }
1436
1437 if (waitq != NULL) {
1438 if (!waitq_pull_thread_locked(waitq, thread)) {
1439 thread_unlock(thread);
1440 delay(1);
1441 if (i > 0 && !machine_timeout_suspended()) {
1442 i--;
1443 }
1444 thread_lock(thread);
1445 if (waitq != thread->waitq) {
1446 return KERN_NOT_WAITING;
1447 }
1448 continue;
1449 }
1450 }
1451
1452 /* TODO: Can we instead assert TH_TERMINATE is not set? */
1453 if ((thread->state & (TH_WAIT | TH_TERMINATE)) == TH_WAIT) {
1454 return thread_go(thread, wresult);
1455 } else {
1456 return KERN_NOT_WAITING;
1457 }
1458 } while (i > 0);
1459
1460 panic("clear_wait_internal: deadlock: thread=%p, wq=%p, cpu=%d\n",
1461 thread, waitq, cpu_number());
1462
1463 return KERN_FAILURE;
1464 }
1465
1466
1467 /*
1468 * clear_wait:
1469 *
1470 * Clear the wait condition for the specified thread. Start the thread
1471 * executing if that is appropriate.
1472 *
1473 * parameters:
1474 * thread thread to awaken
1475 * result Wakeup result the thread should see
1476 */
1477 kern_return_t
1478 clear_wait(
1479 thread_t thread,
1480 wait_result_t result)
1481 {
1482 kern_return_t ret;
1483 spl_t s;
1484
1485 s = splsched();
1486 thread_lock(thread);
1487 ret = clear_wait_internal(thread, result);
1488 thread_unlock(thread);
1489 splx(s);
1490 return ret;
1491 }
1492
1493
1494 /*
1495 * thread_wakeup_prim:
1496 *
1497 * Common routine for thread_wakeup, thread_wakeup_with_result,
1498 * and thread_wakeup_one.
1499 *
1500 */
1501 kern_return_t
1502 thread_wakeup_prim(
1503 event_t event,
1504 boolean_t one_thread,
1505 wait_result_t result)
1506 {
1507 if (__improbable(event == NO_EVENT)) {
1508 panic("%s() called with NO_EVENT", __func__);
1509 }
1510
1511 struct waitq *wq = global_eventq(event);
1512
1513 if (one_thread) {
1514 return waitq_wakeup64_one(wq, CAST_EVENT64_T(event), result, WAITQ_ALL_PRIORITIES);
1515 } else {
1516 return waitq_wakeup64_all(wq, CAST_EVENT64_T(event), result, WAITQ_ALL_PRIORITIES);
1517 }
1518 }
1519
1520 /*
1521 * Wakeup a specified thread if and only if it's waiting for this event
1522 */
1523 kern_return_t
1524 thread_wakeup_thread(
1525 event_t event,
1526 thread_t thread)
1527 {
1528 if (__improbable(event == NO_EVENT)) {
1529 panic("%s() called with NO_EVENT", __func__);
1530 }
1531
1532 if (__improbable(thread == THREAD_NULL)) {
1533 panic("%s() called with THREAD_NULL", __func__);
1534 }
1535
1536 struct waitq *wq = global_eventq(event);
1537
1538 return waitq_wakeup64_thread(wq, CAST_EVENT64_T(event), thread, THREAD_AWAKENED);
1539 }
1540
1541 /*
1542 * Wakeup a thread waiting on an event and promote it to a priority.
1543 *
1544 * Requires woken thread to un-promote itself when done.
1545 */
1546 kern_return_t
1547 thread_wakeup_one_with_pri(
1548 event_t event,
1549 int priority)
1550 {
1551 if (__improbable(event == NO_EVENT)) {
1552 panic("%s() called with NO_EVENT", __func__);
1553 }
1554
1555 struct waitq *wq = global_eventq(event);
1556
1557 return waitq_wakeup64_one(wq, CAST_EVENT64_T(event), THREAD_AWAKENED, priority);
1558 }
1559
1560 /*
1561 * Wakeup a thread waiting on an event,
1562 * promote it to a priority,
1563 * and return a reference to the woken thread.
1564 *
1565 * Requires woken thread to un-promote itself when done.
1566 */
1567 thread_t
1568 thread_wakeup_identify(event_t event,
1569 int priority)
1570 {
1571 if (__improbable(event == NO_EVENT)) {
1572 panic("%s() called with NO_EVENT", __func__);
1573 }
1574
1575 struct waitq *wq = global_eventq(event);
1576
1577 return waitq_wakeup64_identify(wq, CAST_EVENT64_T(event), THREAD_AWAKENED, priority);
1578 }
1579
1580 /*
1581 * thread_bind:
1582 *
1583 * Force the current thread to execute on the specified processor.
1584 * Takes effect after the next thread_block().
1585 *
1586 * Returns the previous binding. PROCESSOR_NULL means
1587 * not bound.
1588 *
1589 * XXX - DO NOT export this to users - XXX
1590 */
1591 processor_t
1592 thread_bind(
1593 processor_t processor)
1594 {
1595 thread_t self = current_thread();
1596 processor_t prev;
1597 spl_t s;
1598
1599 s = splsched();
1600 thread_lock(self);
1601
1602 prev = thread_bind_internal(self, processor);
1603
1604 thread_unlock(self);
1605 splx(s);
1606
1607 return prev;
1608 }
1609
1610 /*
1611 * thread_bind_internal:
1612 *
1613 * If the specified thread is not the current thread, and it is currently
1614 * running on another CPU, a remote AST must be sent to that CPU to cause
1615 * the thread to migrate to its bound processor. Otherwise, the migration
1616 * will occur at the next quantum expiration or blocking point.
1617 *
1618 * When the thread is the current thread, and explicit thread_block() should
1619 * be used to force the current processor to context switch away and
1620 * let the thread migrate to the bound processor.
1621 *
1622 * Thread must be locked, and at splsched.
1623 */
1624
1625 static processor_t
1626 thread_bind_internal(
1627 thread_t thread,
1628 processor_t processor)
1629 {
1630 processor_t prev;
1631
1632 /* <rdar://problem/15102234> */
1633 assert(thread->sched_pri < BASEPRI_RTQUEUES);
1634 /* A thread can't be bound if it's sitting on a (potentially incorrect) runqueue */
1635 assert(thread->runq == PROCESSOR_NULL);
1636
1637 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_THREAD_BIND), thread_tid(thread), processor ? (uintptr_t)processor->cpu_id : (uintptr_t)-1, 0, 0, 0);
1638
1639 prev = thread->bound_processor;
1640 thread->bound_processor = processor;
1641
1642 return prev;
1643 }
1644
1645 /*
1646 * thread_vm_bind_group_add:
1647 *
1648 * The "VM bind group" is a special mechanism to mark a collection
1649 * of threads from the VM subsystem that, in general, should be scheduled
1650 * with only one CPU of parallelism. To accomplish this, we initially
1651 * bind all the threads to the master processor, which has the effect
1652 * that only one of the threads in the group can execute at once, including
1653 * preempting threads in the group that are a lower priority. Future
1654 * mechanisms may use more dynamic mechanisms to prevent the collection
1655 * of VM threads from using more CPU time than desired.
1656 *
1657 * The current implementation can result in priority inversions where
1658 * compute-bound priority 95 or realtime threads that happen to have
1659 * landed on the master processor prevent the VM threads from running.
1660 * When this situation is detected, we unbind the threads for one
1661 * scheduler tick to allow the scheduler to run the threads an
1662 * additional CPUs, before restoring the binding (assuming high latency
1663 * is no longer a problem).
1664 */
1665
1666 /*
1667 * The current max is provisioned for:
1668 * vm_compressor_swap_trigger_thread (92)
1669 * 2 x vm_pageout_iothread_internal (92) when vm_restricted_to_single_processor==TRUE
1670 * vm_pageout_continue (92)
1671 * memorystatus_thread (95)
1672 */
1673 #define MAX_VM_BIND_GROUP_COUNT (5)
1674 decl_simple_lock_data(static, sched_vm_group_list_lock);
1675 static thread_t sched_vm_group_thread_list[MAX_VM_BIND_GROUP_COUNT];
1676 static int sched_vm_group_thread_count;
1677 static boolean_t sched_vm_group_temporarily_unbound = FALSE;
1678
1679 void
1680 thread_vm_bind_group_add(void)
1681 {
1682 thread_t self = current_thread();
1683
1684 thread_reference_internal(self);
1685 self->options |= TH_OPT_SCHED_VM_GROUP;
1686
1687 simple_lock(&sched_vm_group_list_lock, LCK_GRP_NULL);
1688 assert(sched_vm_group_thread_count < MAX_VM_BIND_GROUP_COUNT);
1689 sched_vm_group_thread_list[sched_vm_group_thread_count++] = self;
1690 simple_unlock(&sched_vm_group_list_lock);
1691
1692 thread_bind(master_processor);
1693
1694 /* Switch to bound processor if not already there */
1695 thread_block(THREAD_CONTINUE_NULL);
1696 }
1697
1698 static void
1699 sched_vm_group_maintenance(void)
1700 {
1701 uint64_t ctime = mach_absolute_time();
1702 uint64_t longtime = ctime - sched_tick_interval;
1703 int i;
1704 spl_t s;
1705 boolean_t high_latency_observed = FALSE;
1706 boolean_t runnable_and_not_on_runq_observed = FALSE;
1707 boolean_t bind_target_changed = FALSE;
1708 processor_t bind_target = PROCESSOR_NULL;
1709
1710 /* Make sure nobody attempts to add new threads while we are enumerating them */
1711 simple_lock(&sched_vm_group_list_lock, LCK_GRP_NULL);
1712
1713 s = splsched();
1714
1715 for (i = 0; i < sched_vm_group_thread_count; i++) {
1716 thread_t thread = sched_vm_group_thread_list[i];
1717 assert(thread != THREAD_NULL);
1718 thread_lock(thread);
1719 if ((thread->state & (TH_RUN | TH_WAIT)) == TH_RUN) {
1720 if (thread->runq != PROCESSOR_NULL && thread->last_made_runnable_time < longtime) {
1721 high_latency_observed = TRUE;
1722 } else if (thread->runq == PROCESSOR_NULL) {
1723 /* There are some cases where a thread be transitiong that also fall into this case */
1724 runnable_and_not_on_runq_observed = TRUE;
1725 }
1726 }
1727 thread_unlock(thread);
1728
1729 if (high_latency_observed && runnable_and_not_on_runq_observed) {
1730 /* All the things we are looking for are true, stop looking */
1731 break;
1732 }
1733 }
1734
1735 splx(s);
1736
1737 if (sched_vm_group_temporarily_unbound) {
1738 /* If we turned off binding, make sure everything is OK before rebinding */
1739 if (!high_latency_observed) {
1740 /* rebind */
1741 bind_target_changed = TRUE;
1742 bind_target = master_processor;
1743 sched_vm_group_temporarily_unbound = FALSE; /* might be reset to TRUE if change cannot be completed */
1744 }
1745 } else {
1746 /*
1747 * Check if we're in a bad state, which is defined by high
1748 * latency with no core currently executing a thread. If a
1749 * single thread is making progress on a CPU, that means the
1750 * binding concept to reduce parallelism is working as
1751 * designed.
1752 */
1753 if (high_latency_observed && !runnable_and_not_on_runq_observed) {
1754 /* unbind */
1755 bind_target_changed = TRUE;
1756 bind_target = PROCESSOR_NULL;
1757 sched_vm_group_temporarily_unbound = TRUE;
1758 }
1759 }
1760
1761 if (bind_target_changed) {
1762 s = splsched();
1763 for (i = 0; i < sched_vm_group_thread_count; i++) {
1764 thread_t thread = sched_vm_group_thread_list[i];
1765 boolean_t removed;
1766 assert(thread != THREAD_NULL);
1767
1768 thread_lock(thread);
1769 removed = thread_run_queue_remove(thread);
1770 if (removed || ((thread->state & (TH_RUN | TH_WAIT)) == TH_WAIT)) {
1771 thread_bind_internal(thread, bind_target);
1772 } else {
1773 /*
1774 * Thread was in the middle of being context-switched-to,
1775 * or was in the process of blocking. To avoid switching the bind
1776 * state out mid-flight, defer the change if possible.
1777 */
1778 if (bind_target == PROCESSOR_NULL) {
1779 thread_bind_internal(thread, bind_target);
1780 } else {
1781 sched_vm_group_temporarily_unbound = TRUE; /* next pass will try again */
1782 }
1783 }
1784
1785 if (removed) {
1786 thread_run_queue_reinsert(thread, SCHED_PREEMPT | SCHED_TAILQ);
1787 }
1788 thread_unlock(thread);
1789 }
1790 splx(s);
1791 }
1792
1793 simple_unlock(&sched_vm_group_list_lock);
1794 }
1795
1796 /* Invoked prior to idle entry to determine if, on SMT capable processors, an SMT
1797 * rebalancing opportunity exists when a core is (instantaneously) idle, but
1798 * other SMT-capable cores may be over-committed. TODO: some possible negatives:
1799 * IPI thrash if this core does not remain idle following the load balancing ASTs
1800 * Idle "thrash", when IPI issue is followed by idle entry/core power down
1801 * followed by a wakeup shortly thereafter.
1802 */
1803
1804 #if (DEVELOPMENT || DEBUG)
1805 int sched_smt_balance = 1;
1806 #endif
1807
1808 #if __SMP__
1809 /* Invoked with pset locked, returns with pset unlocked */
1810 void
1811 sched_SMT_balance(processor_t cprocessor, processor_set_t cpset)
1812 {
1813 processor_t ast_processor = NULL;
1814
1815 #if (DEVELOPMENT || DEBUG)
1816 if (__improbable(sched_smt_balance == 0)) {
1817 goto smt_balance_exit;
1818 }
1819 #endif
1820
1821 assert(cprocessor == current_processor());
1822 if (cprocessor->is_SMT == FALSE) {
1823 goto smt_balance_exit;
1824 }
1825
1826 processor_t sib_processor = cprocessor->processor_secondary ? cprocessor->processor_secondary : cprocessor->processor_primary;
1827
1828 /* Determine if both this processor and its sibling are idle,
1829 * indicating an SMT rebalancing opportunity.
1830 */
1831 if (sib_processor->state != PROCESSOR_IDLE) {
1832 goto smt_balance_exit;
1833 }
1834
1835 processor_t sprocessor;
1836
1837 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
1838 uint64_t running_secondary_map = (cpset->cpu_state_map[PROCESSOR_RUNNING] &
1839 ~cpset->primary_map);
1840 for (int cpuid = lsb_first(running_secondary_map); cpuid >= 0; cpuid = lsb_next(running_secondary_map, cpuid)) {
1841 sprocessor = processor_array[cpuid];
1842 if ((sprocessor->processor_primary->state == PROCESSOR_RUNNING) &&
1843 (sprocessor->current_pri < BASEPRI_RTQUEUES)) {
1844 ipi_type = sched_ipi_action(sprocessor, NULL, false, SCHED_IPI_EVENT_SMT_REBAL);
1845 if (ipi_type != SCHED_IPI_NONE) {
1846 assert(sprocessor != cprocessor);
1847 ast_processor = sprocessor;
1848 break;
1849 }
1850 }
1851 }
1852
1853 smt_balance_exit:
1854 pset_unlock(cpset);
1855
1856 if (ast_processor) {
1857 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_SMT_BALANCE), ast_processor->cpu_id, ast_processor->state, ast_processor->processor_primary->state, 0, 0);
1858 sched_ipi_perform(ast_processor, ipi_type);
1859 }
1860 }
1861 #else
1862 /* Invoked with pset locked, returns with pset unlocked */
1863 void
1864 sched_SMT_balance(__unused processor_t cprocessor, processor_set_t cpset)
1865 {
1866 pset_unlock(cpset);
1867 }
1868 #endif /* __SMP__ */
1869
1870 /*
1871 * Called with pset locked, on a processor that is committing to run a new thread
1872 * Will transition an idle or dispatching processor to running as it picks up
1873 * the first new thread from the idle thread.
1874 */
1875 static void
1876 pset_commit_processor_to_new_thread(processor_set_t pset, processor_t processor, thread_t new_thread)
1877 {
1878 if (processor->state == PROCESSOR_DISPATCHING || processor->state == PROCESSOR_IDLE) {
1879 assert(current_thread() == processor->idle_thread);
1880
1881 /*
1882 * Dispatching processor is now committed to running new_thread,
1883 * so change its state to PROCESSOR_RUNNING.
1884 */
1885 pset_update_processor_state(pset, processor, PROCESSOR_RUNNING);
1886 } else {
1887 assert((processor->state == PROCESSOR_RUNNING) || (processor->state == PROCESSOR_SHUTDOWN));
1888 }
1889
1890 processor_state_update_from_thread(processor, new_thread);
1891 }
1892
1893 static processor_t choose_processor_for_realtime_thread(processor_set_t pset);
1894 static bool all_available_primaries_are_running_realtime_threads(processor_set_t pset);
1895 static bool these_processors_are_running_realtime_threads(processor_set_t pset, uint64_t these_map);
1896 static bool sched_ok_to_run_realtime_thread(processor_set_t pset, processor_t processor);
1897 int sched_allow_rt_smt = 1;
1898 int sched_avoid_cpu0 = 1;
1899
1900 /*
1901 * thread_select:
1902 *
1903 * Select a new thread for the current processor to execute.
1904 *
1905 * May select the current thread, which must be locked.
1906 */
1907 static thread_t
1908 thread_select(thread_t thread,
1909 processor_t processor,
1910 ast_t *reason)
1911 {
1912 processor_set_t pset = processor->processor_set;
1913 thread_t new_thread = THREAD_NULL;
1914
1915 assert(processor == current_processor());
1916 assert((thread->state & (TH_RUN | TH_TERMINATE2)) == TH_RUN);
1917
1918 do {
1919 /*
1920 * Update the priority.
1921 */
1922 if (SCHED(can_update_priority)(thread)) {
1923 SCHED(update_priority)(thread);
1924 }
1925
1926 pset_lock(pset);
1927
1928 processor_state_update_from_thread(processor, thread);
1929
1930 restart:
1931 /* Acknowledge any pending IPIs here with pset lock held */
1932 bit_clear(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id);
1933 bit_clear(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
1934
1935 #if defined(CONFIG_SCHED_DEFERRED_AST)
1936 bit_clear(pset->pending_deferred_AST_cpu_mask, processor->cpu_id);
1937 #endif
1938
1939 bool secondary_can_only_run_realtime_thread = false;
1940
1941 assert(processor->state != PROCESSOR_OFF_LINE);
1942
1943 if (!processor->is_recommended) {
1944 /*
1945 * The performance controller has provided a hint to not dispatch more threads,
1946 * unless they are bound to us (and thus we are the only option
1947 */
1948 if (!SCHED(processor_bound_count)(processor)) {
1949 goto idle;
1950 }
1951 } else if (processor->processor_primary != processor) {
1952 /*
1953 * Should this secondary SMT processor attempt to find work? For pset runqueue systems,
1954 * we should look for work only under the same conditions that choose_processor()
1955 * would have assigned work, which is when all primary processors have been assigned work.
1956 *
1957 * An exception is that bound threads are dispatched to a processor without going through
1958 * choose_processor(), so in those cases we should continue trying to dequeue work.
1959 */
1960 if (!SCHED(processor_bound_count)(processor)) {
1961 if ((pset->recommended_bitmask & pset->primary_map & pset->cpu_state_map[PROCESSOR_IDLE]) != 0) {
1962 goto idle;
1963 }
1964
1965 /*
1966 * TODO: What if a secondary core beat an idle primary to waking up from an IPI?
1967 * Should it dequeue immediately, or spin waiting for the primary to wake up?
1968 */
1969
1970 /* There are no idle primaries */
1971
1972 if (processor->processor_primary->current_pri >= BASEPRI_RTQUEUES) {
1973 bool secondary_can_run_realtime_thread = sched_allow_rt_smt && rt_runq_count(pset) && all_available_primaries_are_running_realtime_threads(pset);
1974 if (!secondary_can_run_realtime_thread) {
1975 goto idle;
1976 }
1977 secondary_can_only_run_realtime_thread = true;
1978 }
1979 }
1980 }
1981
1982 /*
1983 * Test to see if the current thread should continue
1984 * to run on this processor. Must not be attempting to wait, and not
1985 * bound to a different processor, nor be in the wrong
1986 * processor set, nor be forced to context switch by TH_SUSP.
1987 *
1988 * Note that there are never any RT threads in the regular runqueue.
1989 *
1990 * This code is very insanely tricky.
1991 */
1992
1993 /* i.e. not waiting, not TH_SUSP'ed */
1994 bool still_running = ((thread->state & (TH_TERMINATE | TH_IDLE | TH_WAIT | TH_RUN | TH_SUSP)) == TH_RUN);
1995
1996 /*
1997 * Threads running on SMT processors are forced to context switch. Don't rebalance realtime threads.
1998 * TODO: This should check if it's worth it to rebalance, i.e. 'are there any idle primary processors'
1999 * <rdar://problem/47907700>
2000 *
2001 * A yielding thread shouldn't be forced to context switch.
2002 */
2003
2004 bool is_yielding = (*reason & AST_YIELD) == AST_YIELD;
2005
2006 bool needs_smt_rebalance = !is_yielding && thread->sched_pri < BASEPRI_RTQUEUES && processor->processor_primary != processor;
2007
2008 bool affinity_mismatch = thread->affinity_set != AFFINITY_SET_NULL && thread->affinity_set->aset_pset != pset;
2009
2010 bool bound_elsewhere = thread->bound_processor != PROCESSOR_NULL && thread->bound_processor != processor;
2011
2012 bool avoid_processor = !is_yielding && SCHED(avoid_processor_enabled) && SCHED(thread_avoid_processor)(processor, thread);
2013
2014 if (still_running && !needs_smt_rebalance && !affinity_mismatch && !bound_elsewhere && !avoid_processor) {
2015 /*
2016 * This thread is eligible to keep running on this processor.
2017 *
2018 * RT threads with un-expired quantum stay on processor,
2019 * unless there's a valid RT thread with an earlier deadline.
2020 */
2021 if (thread->sched_pri >= BASEPRI_RTQUEUES && processor->first_timeslice) {
2022 if (rt_runq_count(pset) > 0) {
2023 rt_lock_lock(pset);
2024
2025 if (rt_runq_count(pset) > 0) {
2026 thread_t next_rt = qe_queue_first(&SCHED(rt_runq)(pset)->queue, struct thread, runq_links);
2027
2028 if (next_rt->realtime.deadline < processor->deadline &&
2029 (next_rt->bound_processor == PROCESSOR_NULL ||
2030 next_rt->bound_processor == processor)) {
2031 /* The next RT thread is better, so pick it off the runqueue. */
2032 goto pick_new_rt_thread;
2033 }
2034 }
2035
2036 rt_lock_unlock(pset);
2037 }
2038
2039 /* This is still the best RT thread to run. */
2040 processor->deadline = thread->realtime.deadline;
2041
2042 sched_update_pset_load_average(pset);
2043
2044 processor_t next_rt_processor = PROCESSOR_NULL;
2045 sched_ipi_type_t next_rt_ipi_type = SCHED_IPI_NONE;
2046
2047 if (rt_runq_count(pset) > 0) {
2048 next_rt_processor = choose_processor_for_realtime_thread(pset);
2049 if (next_rt_processor) {
2050 if (next_rt_processor->state == PROCESSOR_IDLE) {
2051 pset_update_processor_state(pset, next_rt_processor, PROCESSOR_DISPATCHING);
2052 }
2053 next_rt_ipi_type = sched_ipi_action(next_rt_processor, NULL, false, SCHED_IPI_EVENT_PREEMPT);
2054 }
2055 }
2056 pset_unlock(pset);
2057
2058 if (next_rt_processor) {
2059 sched_ipi_perform(next_rt_processor, next_rt_ipi_type);
2060 }
2061
2062 return thread;
2063 }
2064
2065 if ((rt_runq_count(pset) == 0) &&
2066 SCHED(processor_queue_has_priority)(processor, thread->sched_pri, TRUE) == FALSE) {
2067 /* This thread is still the highest priority runnable (non-idle) thread */
2068 processor->deadline = UINT64_MAX;
2069
2070 sched_update_pset_load_average(pset);
2071 pset_unlock(pset);
2072
2073 return thread;
2074 }
2075 } else {
2076 /*
2077 * This processor must context switch.
2078 * If it's due to a rebalance, we should aggressively find this thread a new home.
2079 */
2080 if (needs_smt_rebalance || affinity_mismatch || bound_elsewhere || avoid_processor) {
2081 *reason |= AST_REBALANCE;
2082 }
2083 }
2084
2085 /* OK, so we're not going to run the current thread. Look at the RT queue. */
2086 bool ok_to_run_realtime_thread = sched_ok_to_run_realtime_thread(pset, processor);
2087 if ((rt_runq_count(pset) > 0) && ok_to_run_realtime_thread) {
2088 rt_lock_lock(pset);
2089
2090 if ((rt_runq_count(pset) > 0) && ok_to_run_realtime_thread) {
2091 thread_t next_rt = qe_queue_first(&SCHED(rt_runq)(pset)->queue, struct thread, runq_links);
2092
2093 if (__probable((next_rt->bound_processor == PROCESSOR_NULL ||
2094 (next_rt->bound_processor == processor)))) {
2095 pick_new_rt_thread:
2096 new_thread = qe_dequeue_head(&SCHED(rt_runq)(pset)->queue, struct thread, runq_links);
2097
2098 new_thread->runq = PROCESSOR_NULL;
2099 SCHED_STATS_RUNQ_CHANGE(&SCHED(rt_runq)(pset)->runq_stats, rt_runq_count(pset));
2100 rt_runq_count_decr(pset);
2101
2102 processor->deadline = new_thread->realtime.deadline;
2103
2104 pset_commit_processor_to_new_thread(pset, processor, new_thread);
2105
2106 rt_lock_unlock(pset);
2107 sched_update_pset_load_average(pset);
2108
2109 processor_t ast_processor = PROCESSOR_NULL;
2110 processor_t next_rt_processor = PROCESSOR_NULL;
2111 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
2112 sched_ipi_type_t next_rt_ipi_type = SCHED_IPI_NONE;
2113
2114 if (processor->processor_secondary != NULL) {
2115 processor_t sprocessor = processor->processor_secondary;
2116 if ((sprocessor->state == PROCESSOR_RUNNING) || (sprocessor->state == PROCESSOR_DISPATCHING)) {
2117 ipi_type = sched_ipi_action(sprocessor, NULL, false, SCHED_IPI_EVENT_SMT_REBAL);
2118 ast_processor = sprocessor;
2119 }
2120 }
2121 if (rt_runq_count(pset) > 0) {
2122 next_rt_processor = choose_processor_for_realtime_thread(pset);
2123 if (next_rt_processor) {
2124 if (next_rt_processor->state == PROCESSOR_IDLE) {
2125 pset_update_processor_state(pset, next_rt_processor, PROCESSOR_DISPATCHING);
2126 }
2127 next_rt_ipi_type = sched_ipi_action(next_rt_processor, NULL, false, SCHED_IPI_EVENT_PREEMPT);
2128 }
2129 }
2130 pset_unlock(pset);
2131
2132 if (ast_processor) {
2133 sched_ipi_perform(ast_processor, ipi_type);
2134 }
2135
2136 if (next_rt_processor) {
2137 sched_ipi_perform(next_rt_processor, next_rt_ipi_type);
2138 }
2139
2140 return new_thread;
2141 }
2142 }
2143
2144 rt_lock_unlock(pset);
2145 }
2146 if (secondary_can_only_run_realtime_thread) {
2147 goto idle;
2148 }
2149
2150 processor->deadline = UINT64_MAX;
2151
2152 /* No RT threads, so let's look at the regular threads. */
2153 if ((new_thread = SCHED(choose_thread)(processor, MINPRI, *reason)) != THREAD_NULL) {
2154 sched_update_pset_load_average(pset);
2155
2156 pset_commit_processor_to_new_thread(pset, processor, new_thread);
2157
2158 processor_t ast_processor = PROCESSOR_NULL;
2159 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
2160
2161 processor_t sprocessor = processor->processor_secondary;
2162 if ((sprocessor != NULL) && (sprocessor->state == PROCESSOR_RUNNING)) {
2163 if (thread_no_smt(new_thread)) {
2164 ipi_type = sched_ipi_action(sprocessor, NULL, false, SCHED_IPI_EVENT_SMT_REBAL);
2165 ast_processor = sprocessor;
2166 }
2167 }
2168 pset_unlock(pset);
2169
2170 if (ast_processor) {
2171 sched_ipi_perform(ast_processor, ipi_type);
2172 }
2173 return new_thread;
2174 }
2175
2176 if (processor->must_idle) {
2177 processor->must_idle = false;
2178 goto idle;
2179 }
2180
2181 #if __SMP__
2182 if (SCHED(steal_thread_enabled)(pset)) {
2183 /*
2184 * No runnable threads, attempt to steal
2185 * from other processors. Returns with pset lock dropped.
2186 */
2187
2188 if ((new_thread = SCHED(steal_thread)(pset)) != THREAD_NULL) {
2189 /*
2190 * Avoid taking the pset_lock unless it is necessary to change state.
2191 * It's safe to read processor->state here, as only the current processor can change state
2192 * from this point (interrupts are disabled and this processor is committed to run new_thread).
2193 */
2194 if (processor->state == PROCESSOR_DISPATCHING || processor->state == PROCESSOR_IDLE) {
2195 pset_lock(pset);
2196 pset_commit_processor_to_new_thread(pset, processor, new_thread);
2197 pset_unlock(pset);
2198 } else {
2199 assert((processor->state == PROCESSOR_RUNNING) || (processor->state == PROCESSOR_SHUTDOWN));
2200 processor_state_update_from_thread(processor, new_thread);
2201 }
2202
2203 return new_thread;
2204 }
2205
2206 /*
2207 * If other threads have appeared, shortcut
2208 * around again.
2209 */
2210 if (!SCHED(processor_queue_empty)(processor) || (ok_to_run_realtime_thread && (rt_runq_count(pset) > 0))) {
2211 continue;
2212 }
2213
2214 pset_lock(pset);
2215
2216 /* Someone selected this processor while we had dropped the lock */
2217 if (bit_test(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
2218 goto restart;
2219 }
2220 }
2221 #endif
2222
2223 idle:
2224 /*
2225 * Nothing is runnable, so set this processor idle if it
2226 * was running.
2227 */
2228 if ((processor->state == PROCESSOR_RUNNING) || (processor->state == PROCESSOR_DISPATCHING)) {
2229 pset_update_processor_state(pset, processor, PROCESSOR_IDLE);
2230 processor_state_update_idle(processor);
2231 }
2232
2233 #if __SMP__
2234 /* Invoked with pset locked, returns with pset unlocked */
2235 SCHED(processor_balance)(processor, pset);
2236 #else
2237 pset_unlock(pset);
2238 #endif
2239
2240 #if CONFIG_SCHED_IDLE_IN_PLACE
2241 /*
2242 * Choose idle thread if fast idle is not possible.
2243 */
2244 if (processor->processor_primary != processor) {
2245 return processor->idle_thread;
2246 }
2247
2248 if ((thread->state & (TH_IDLE | TH_TERMINATE | TH_SUSP)) || !(thread->state & TH_WAIT) || thread->wake_active || thread->sched_pri >= BASEPRI_RTQUEUES) {
2249 return processor->idle_thread;
2250 }
2251
2252 /*
2253 * Perform idling activities directly without a
2254 * context switch. Return dispatched thread,
2255 * else check again for a runnable thread.
2256 */
2257 new_thread = thread_select_idle(thread, processor);
2258
2259 #else /* !CONFIG_SCHED_IDLE_IN_PLACE */
2260
2261 /*
2262 * Do a full context switch to idle so that the current
2263 * thread can start running on another processor without
2264 * waiting for the fast-idled processor to wake up.
2265 */
2266 new_thread = processor->idle_thread;
2267
2268 #endif /* !CONFIG_SCHED_IDLE_IN_PLACE */
2269 } while (new_thread == THREAD_NULL);
2270
2271 return new_thread;
2272 }
2273
2274 #if CONFIG_SCHED_IDLE_IN_PLACE
2275 /*
2276 * thread_select_idle:
2277 *
2278 * Idle the processor using the current thread context.
2279 *
2280 * Called with thread locked, then dropped and relocked.
2281 */
2282 static thread_t
2283 thread_select_idle(
2284 thread_t thread,
2285 processor_t processor)
2286 {
2287 thread_t new_thread;
2288 uint64_t arg1, arg2;
2289 int urgency;
2290
2291 sched_run_decr(thread);
2292
2293 thread->state |= TH_IDLE;
2294 processor_state_update_idle(procssor);
2295
2296 /* Reload precise timing global policy to thread-local policy */
2297 thread->precise_user_kernel_time = use_precise_user_kernel_time(thread);
2298
2299 thread_unlock(thread);
2300
2301 /*
2302 * Switch execution timing to processor idle thread.
2303 */
2304 processor->last_dispatch = mach_absolute_time();
2305
2306 #ifdef CONFIG_MACH_APPROXIMATE_TIME
2307 commpage_update_mach_approximate_time(processor->last_dispatch);
2308 #endif
2309
2310 thread->last_run_time = processor->last_dispatch;
2311 processor_timer_switch_thread(processor->last_dispatch,
2312 &processor->idle_thread->system_timer);
2313 PROCESSOR_DATA(processor, kernel_timer) = &processor->idle_thread->system_timer;
2314
2315
2316 /*
2317 * Cancel the quantum timer while idling.
2318 */
2319 timer_call_quantum_timer_cancel(&processor->quantum_timer);
2320 processor->first_timeslice = FALSE;
2321
2322 if (thread->sched_call) {
2323 (*thread->sched_call)(SCHED_CALL_BLOCK, thread);
2324 }
2325
2326 thread_tell_urgency(THREAD_URGENCY_NONE, 0, 0, 0, NULL);
2327
2328 /*
2329 * Enable interrupts and perform idling activities. No
2330 * preemption due to TH_IDLE being set.
2331 */
2332 spllo(); new_thread = processor_idle(thread, processor);
2333
2334 /*
2335 * Return at splsched.
2336 */
2337 if (thread->sched_call) {
2338 (*thread->sched_call)(SCHED_CALL_UNBLOCK, thread);
2339 }
2340
2341 thread_lock(thread);
2342
2343 /*
2344 * If awakened, switch to thread timer and start a new quantum.
2345 * Otherwise skip; we will context switch to another thread or return here.
2346 */
2347 if (!(thread->state & TH_WAIT)) {
2348 uint64_t time_now = processor->last_dispatch = mach_absolute_time();
2349 processor_timer_switch_thread(time_now, &thread->system_timer);
2350 timer_update(&thread->runnable_timer, time_now);
2351 PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer;
2352 thread_quantum_init(thread);
2353 processor->quantum_end = time_now + thread->quantum_remaining;
2354 timer_call_quantum_timer_enter(&processor->quantum_timer,
2355 thread, processor->quantum_end, time_now);
2356 processor->first_timeslice = TRUE;
2357
2358 thread->computation_epoch = time_now;
2359 }
2360
2361 thread->state &= ~TH_IDLE;
2362
2363 urgency = thread_get_urgency(thread, &arg1, &arg2);
2364
2365 thread_tell_urgency(urgency, arg1, arg2, 0, new_thread);
2366
2367 sched_run_incr(thread);
2368
2369 return new_thread;
2370 }
2371 #endif /* CONFIG_SCHED_IDLE_IN_PLACE */
2372
2373 /*
2374 * thread_invoke
2375 *
2376 * Called at splsched with neither thread locked.
2377 *
2378 * Perform a context switch and start executing the new thread.
2379 *
2380 * Returns FALSE when the context switch didn't happen.
2381 * The reference to the new thread is still consumed.
2382 *
2383 * "self" is what is currently running on the processor,
2384 * "thread" is the new thread to context switch to
2385 * (which may be the same thread in some cases)
2386 */
2387 static boolean_t
2388 thread_invoke(
2389 thread_t self,
2390 thread_t thread,
2391 ast_t reason)
2392 {
2393 if (__improbable(get_preemption_level() != 0)) {
2394 int pl = get_preemption_level();
2395 panic("thread_invoke: preemption_level %d, possible cause: %s",
2396 pl, (pl < 0 ? "unlocking an unlocked mutex or spinlock" :
2397 "blocking while holding a spinlock, or within interrupt context"));
2398 }
2399
2400 thread_continue_t continuation = self->continuation;
2401 void *parameter = self->parameter;
2402 processor_t processor;
2403
2404 uint64_t ctime = mach_absolute_time();
2405
2406 #ifdef CONFIG_MACH_APPROXIMATE_TIME
2407 commpage_update_mach_approximate_time(ctime);
2408 #endif
2409
2410 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
2411 if ((thread->state & TH_IDLE) == 0) {
2412 sched_timeshare_consider_maintenance(ctime);
2413 }
2414 #endif
2415
2416 #if MONOTONIC
2417 mt_sched_update(self);
2418 #endif /* MONOTONIC */
2419
2420 assert_thread_magic(self);
2421 assert(self == current_thread());
2422 assert(self->runq == PROCESSOR_NULL);
2423 assert((self->state & (TH_RUN | TH_TERMINATE2)) == TH_RUN);
2424
2425 thread_lock(thread);
2426
2427 assert_thread_magic(thread);
2428 assert((thread->state & (TH_RUN | TH_WAIT | TH_UNINT | TH_TERMINATE | TH_TERMINATE2)) == TH_RUN);
2429 assert(thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == current_processor());
2430 assert(thread->runq == PROCESSOR_NULL);
2431
2432 /* Reload precise timing global policy to thread-local policy */
2433 thread->precise_user_kernel_time = use_precise_user_kernel_time(thread);
2434
2435 /* Update SFI class based on other factors */
2436 thread->sfi_class = sfi_thread_classify(thread);
2437
2438 /* Update the same_pri_latency for the thread (used by perfcontrol callouts) */
2439 thread->same_pri_latency = ctime - thread->last_basepri_change_time;
2440 /*
2441 * In case a base_pri update happened between the timestamp and
2442 * taking the thread lock
2443 */
2444 if (ctime <= thread->last_basepri_change_time) {
2445 thread->same_pri_latency = ctime - thread->last_made_runnable_time;
2446 }
2447
2448 /* Allow realtime threads to hang onto a stack. */
2449 if ((self->sched_mode == TH_MODE_REALTIME) && !self->reserved_stack) {
2450 self->reserved_stack = self->kernel_stack;
2451 }
2452
2453 /* Prepare for spin debugging */
2454 #if INTERRUPT_MASKED_DEBUG
2455 ml_spin_debug_clear(thread);
2456 #endif
2457
2458 if (continuation != NULL) {
2459 if (!thread->kernel_stack) {
2460 /*
2461 * If we are using a privileged stack,
2462 * check to see whether we can exchange it with
2463 * that of the other thread.
2464 */
2465 if (self->kernel_stack == self->reserved_stack && !thread->reserved_stack) {
2466 goto need_stack;
2467 }
2468
2469 /*
2470 * Context switch by performing a stack handoff.
2471 */
2472 continuation = thread->continuation;
2473 parameter = thread->parameter;
2474
2475 processor = current_processor();
2476 processor->active_thread = thread;
2477 processor_state_update_from_thread(processor, thread);
2478
2479 if (thread->last_processor != processor && thread->last_processor != NULL) {
2480 if (thread->last_processor->processor_set != processor->processor_set) {
2481 thread->ps_switch++;
2482 }
2483 thread->p_switch++;
2484 }
2485 thread->last_processor = processor;
2486 thread->c_switch++;
2487 ast_context(thread);
2488
2489 thread_unlock(thread);
2490
2491 self->reason = reason;
2492
2493 processor->last_dispatch = ctime;
2494 self->last_run_time = ctime;
2495 processor_timer_switch_thread(ctime, &thread->system_timer);
2496 timer_update(&thread->runnable_timer, ctime);
2497 PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer;
2498
2499 /*
2500 * Since non-precise user/kernel time doesn't update the state timer
2501 * during privilege transitions, synthesize an event now.
2502 */
2503 if (!thread->precise_user_kernel_time) {
2504 timer_update(PROCESSOR_DATA(processor, current_state), ctime);
2505 }
2506
2507 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2508 MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_HANDOFF) | DBG_FUNC_NONE,
2509 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
2510
2511 if ((thread->chosen_processor != processor) && (thread->chosen_processor != PROCESSOR_NULL)) {
2512 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_MOVED) | DBG_FUNC_NONE,
2513 (uintptr_t)thread_tid(thread), (uintptr_t)thread->chosen_processor->cpu_id, 0, 0, 0);
2514 }
2515
2516 DTRACE_SCHED2(off__cpu, struct thread *, thread, struct proc *, thread->task->bsd_info);
2517
2518 SCHED_STATS_CSW(processor, self->reason, self->sched_pri, thread->sched_pri);
2519
2520 #if KPERF
2521 kperf_off_cpu(self);
2522 #endif /* KPERF */
2523
2524 TLOG(1, "thread_invoke: calling stack_handoff\n");
2525 stack_handoff(self, thread);
2526
2527 /* 'self' is now off core */
2528 assert(thread == current_thread_volatile());
2529
2530 DTRACE_SCHED(on__cpu);
2531
2532 #if KPERF
2533 kperf_on_cpu(thread, continuation, NULL);
2534 #endif /* KPERF */
2535
2536 thread_dispatch(self, thread);
2537
2538 #if KASAN
2539 /* Old thread's stack has been moved to the new thread, so explicitly
2540 * unpoison it. */
2541 kasan_unpoison_stack(thread->kernel_stack, kernel_stack_size);
2542 #endif
2543
2544 thread->continuation = thread->parameter = NULL;
2545
2546 counter(c_thread_invoke_hits++);
2547
2548 assert(continuation);
2549 call_continuation(continuation, parameter, thread->wait_result, TRUE);
2550 /*NOTREACHED*/
2551 } else if (thread == self) {
2552 /* same thread but with continuation */
2553 ast_context(self);
2554 counter(++c_thread_invoke_same);
2555
2556 thread_unlock(self);
2557
2558 #if KPERF
2559 kperf_on_cpu(thread, continuation, NULL);
2560 #endif /* KPERF */
2561
2562 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2563 MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED) | DBG_FUNC_NONE,
2564 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
2565
2566 #if KASAN
2567 /* stack handoff to self - no thread_dispatch(), so clear the stack
2568 * and free the fakestack directly */
2569 kasan_fakestack_drop(self);
2570 kasan_fakestack_gc(self);
2571 kasan_unpoison_stack(self->kernel_stack, kernel_stack_size);
2572 #endif
2573
2574 self->continuation = self->parameter = NULL;
2575
2576 call_continuation(continuation, parameter, self->wait_result, TRUE);
2577 /*NOTREACHED*/
2578 }
2579 } else {
2580 /*
2581 * Check that the other thread has a stack
2582 */
2583 if (!thread->kernel_stack) {
2584 need_stack:
2585 if (!stack_alloc_try(thread)) {
2586 counter(c_thread_invoke_misses++);
2587 thread_unlock(thread);
2588 thread_stack_enqueue(thread);
2589 return FALSE;
2590 }
2591 } else if (thread == self) {
2592 ast_context(self);
2593 counter(++c_thread_invoke_same);
2594 thread_unlock(self);
2595
2596 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2597 MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED) | DBG_FUNC_NONE,
2598 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
2599
2600 return TRUE;
2601 }
2602 }
2603
2604 /*
2605 * Context switch by full context save.
2606 */
2607 processor = current_processor();
2608 processor->active_thread = thread;
2609 processor_state_update_from_thread(processor, thread);
2610
2611 if (thread->last_processor != processor && thread->last_processor != NULL) {
2612 if (thread->last_processor->processor_set != processor->processor_set) {
2613 thread->ps_switch++;
2614 }
2615 thread->p_switch++;
2616 }
2617 thread->last_processor = processor;
2618 thread->c_switch++;
2619 ast_context(thread);
2620
2621 thread_unlock(thread);
2622
2623 counter(c_thread_invoke_csw++);
2624
2625 self->reason = reason;
2626
2627 processor->last_dispatch = ctime;
2628 self->last_run_time = ctime;
2629 processor_timer_switch_thread(ctime, &thread->system_timer);
2630 timer_update(&thread->runnable_timer, ctime);
2631 PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer;
2632
2633 /*
2634 * Since non-precise user/kernel time doesn't update the state timer
2635 * during privilege transitions, synthesize an event now.
2636 */
2637 if (!thread->precise_user_kernel_time) {
2638 timer_update(PROCESSOR_DATA(processor, current_state), ctime);
2639 }
2640
2641 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2642 MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED) | DBG_FUNC_NONE,
2643 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
2644
2645 if ((thread->chosen_processor != processor) && (thread->chosen_processor != NULL)) {
2646 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_MOVED) | DBG_FUNC_NONE,
2647 (uintptr_t)thread_tid(thread), (uintptr_t)thread->chosen_processor->cpu_id, 0, 0, 0);
2648 }
2649
2650 DTRACE_SCHED2(off__cpu, struct thread *, thread, struct proc *, thread->task->bsd_info);
2651
2652 SCHED_STATS_CSW(processor, self->reason, self->sched_pri, thread->sched_pri);
2653
2654 #if KPERF
2655 kperf_off_cpu(self);
2656 #endif /* KPERF */
2657
2658 /*
2659 * This is where we actually switch register context,
2660 * and address space if required. We will next run
2661 * as a result of a subsequent context switch.
2662 *
2663 * Once registers are switched and the processor is running "thread",
2664 * the stack variables and non-volatile registers will contain whatever
2665 * was there the last time that thread blocked. No local variables should
2666 * be used after this point, except for the special case of "thread", which
2667 * the platform layer returns as the previous thread running on the processor
2668 * via the function call ABI as a return register, and "self", which may have
2669 * been stored on the stack or a non-volatile register, but a stale idea of
2670 * what was on the CPU is newly-accurate because that thread is again
2671 * running on the CPU.
2672 */
2673 assert(continuation == self->continuation);
2674 thread = machine_switch_context(self, continuation, thread);
2675 assert(self == current_thread_volatile());
2676 TLOG(1, "thread_invoke: returning machine_switch_context: self %p continuation %p thread %p\n", self, continuation, thread);
2677
2678 DTRACE_SCHED(on__cpu);
2679
2680 #if KPERF
2681 kperf_on_cpu(self, NULL, __builtin_frame_address(0));
2682 #endif /* KPERF */
2683
2684 /*
2685 * We have been resumed and are set to run.
2686 */
2687 thread_dispatch(thread, self);
2688
2689 if (continuation) {
2690 self->continuation = self->parameter = NULL;
2691
2692 call_continuation(continuation, parameter, self->wait_result, TRUE);
2693 /*NOTREACHED*/
2694 }
2695
2696 return TRUE;
2697 }
2698
2699 #if defined(CONFIG_SCHED_DEFERRED_AST)
2700 /*
2701 * pset_cancel_deferred_dispatch:
2702 *
2703 * Cancels all ASTs that we can cancel for the given processor set
2704 * if the current processor is running the last runnable thread in the
2705 * system.
2706 *
2707 * This function assumes the current thread is runnable. This must
2708 * be called with the pset unlocked.
2709 */
2710 static void
2711 pset_cancel_deferred_dispatch(
2712 processor_set_t pset,
2713 processor_t processor)
2714 {
2715 processor_t active_processor = NULL;
2716 uint32_t sampled_sched_run_count;
2717
2718 pset_lock(pset);
2719 sampled_sched_run_count = (volatile uint32_t) sched_run_buckets[TH_BUCKET_RUN];
2720
2721 /*
2722 * If we have emptied the run queue, and our current thread is runnable, we
2723 * should tell any processors that are still DISPATCHING that they will
2724 * probably not have any work to do. In the event that there are no
2725 * pending signals that we can cancel, this is also uninteresting.
2726 *
2727 * In the unlikely event that another thread becomes runnable while we are
2728 * doing this (sched_run_count is atomically updated, not guarded), the
2729 * codepath making it runnable SHOULD (a dangerous word) need the pset lock
2730 * in order to dispatch it to a processor in our pset. So, the other
2731 * codepath will wait while we squash all cancelable ASTs, get the pset
2732 * lock, and then dispatch the freshly runnable thread. So this should be
2733 * correct (we won't accidentally have a runnable thread that hasn't been
2734 * dispatched to an idle processor), if not ideal (we may be restarting the
2735 * dispatch process, which could have some overhead).
2736 */
2737
2738 if ((sampled_sched_run_count == 1) && (pset->pending_deferred_AST_cpu_mask)) {
2739 uint64_t dispatching_map = (pset->cpu_state_map[PROCESSOR_DISPATCHING] &
2740 pset->pending_deferred_AST_cpu_mask &
2741 ~pset->pending_AST_URGENT_cpu_mask);
2742 for (int cpuid = lsb_first(dispatching_map); cpuid >= 0; cpuid = lsb_next(dispatching_map, cpuid)) {
2743 active_processor = processor_array[cpuid];
2744 /*
2745 * If a processor is DISPATCHING, it could be because of
2746 * a cancelable signal.
2747 *
2748 * IF the processor is not our
2749 * current processor (the current processor should not
2750 * be DISPATCHING, so this is a bit paranoid), AND there
2751 * is a cancelable signal pending on the processor, AND
2752 * there is no non-cancelable signal pending (as there is
2753 * no point trying to backtrack on bringing the processor
2754 * up if a signal we cannot cancel is outstanding), THEN
2755 * it should make sense to roll back the processor state
2756 * to the IDLE state.
2757 *
2758 * If the racey nature of this approach (as the signal
2759 * will be arbitrated by hardware, and can fire as we
2760 * roll back state) results in the core responding
2761 * despite being pushed back to the IDLE state, it
2762 * should be no different than if the core took some
2763 * interrupt while IDLE.
2764 */
2765 if (active_processor != processor) {
2766 /*
2767 * Squash all of the processor state back to some
2768 * reasonable facsimile of PROCESSOR_IDLE.
2769 */
2770
2771 assert(active_processor->next_thread == THREAD_NULL);
2772 processor_state_update_idle(active_processor);
2773 active_processor->deadline = UINT64_MAX;
2774 pset_update_processor_state(pset, active_processor, PROCESSOR_IDLE);
2775 bit_clear(pset->pending_deferred_AST_cpu_mask, active_processor->cpu_id);
2776 machine_signal_idle_cancel(active_processor);
2777 }
2778 }
2779 }
2780
2781 pset_unlock(pset);
2782 }
2783 #else
2784 /* We don't support deferred ASTs; everything is candycanes and sunshine. */
2785 #endif
2786
2787 static void
2788 thread_csw_callout(
2789 thread_t old,
2790 thread_t new,
2791 uint64_t timestamp)
2792 {
2793 perfcontrol_event event = (new->state & TH_IDLE) ? IDLE : CONTEXT_SWITCH;
2794 uint64_t same_pri_latency = (new->state & TH_IDLE) ? 0 : new->same_pri_latency;
2795 machine_switch_perfcontrol_context(event, timestamp, 0,
2796 same_pri_latency, old, new);
2797 }
2798
2799
2800 /*
2801 * thread_dispatch:
2802 *
2803 * Handle threads at context switch. Re-dispatch other thread
2804 * if still running, otherwise update run state and perform
2805 * special actions. Update quantum for other thread and begin
2806 * the quantum for ourselves.
2807 *
2808 * "thread" is the old thread that we have switched away from.
2809 * "self" is the new current thread that we have context switched to
2810 *
2811 * Called at splsched.
2812 */
2813 void
2814 thread_dispatch(
2815 thread_t thread,
2816 thread_t self)
2817 {
2818 processor_t processor = self->last_processor;
2819
2820 assert(processor == current_processor());
2821 assert(self == current_thread_volatile());
2822 assert(thread != self);
2823
2824 if (thread != THREAD_NULL) {
2825 /*
2826 * Do the perfcontrol callout for context switch.
2827 * The reason we do this here is:
2828 * - thread_dispatch() is called from various places that are not
2829 * the direct context switch path for eg. processor shutdown etc.
2830 * So adding the callout here covers all those cases.
2831 * - We want this callout as early as possible to be close
2832 * to the timestamp taken in thread_invoke()
2833 * - We want to avoid holding the thread lock while doing the
2834 * callout
2835 * - We do not want to callout if "thread" is NULL.
2836 */
2837 thread_csw_callout(thread, self, processor->last_dispatch);
2838
2839 #if KASAN
2840 if (thread->continuation != NULL) {
2841 /*
2842 * Thread has a continuation and the normal stack is going away.
2843 * Unpoison the stack and mark all fakestack objects as unused.
2844 */
2845 kasan_fakestack_drop(thread);
2846 if (thread->kernel_stack) {
2847 kasan_unpoison_stack(thread->kernel_stack, kernel_stack_size);
2848 }
2849 }
2850
2851 /*
2852 * Free all unused fakestack objects.
2853 */
2854 kasan_fakestack_gc(thread);
2855 #endif
2856
2857 /*
2858 * If blocked at a continuation, discard
2859 * the stack.
2860 */
2861 if (thread->continuation != NULL && thread->kernel_stack != 0) {
2862 stack_free(thread);
2863 }
2864
2865 if (thread->state & TH_IDLE) {
2866 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2867 MACHDBG_CODE(DBG_MACH_SCHED, MACH_DISPATCH) | DBG_FUNC_NONE,
2868 (uintptr_t)thread_tid(thread), 0, thread->state,
2869 sched_run_buckets[TH_BUCKET_RUN], 0);
2870 } else {
2871 int64_t consumed;
2872 int64_t remainder = 0;
2873
2874 if (processor->quantum_end > processor->last_dispatch) {
2875 remainder = processor->quantum_end -
2876 processor->last_dispatch;
2877 }
2878
2879 consumed = thread->quantum_remaining - remainder;
2880
2881 if ((thread->reason & AST_LEDGER) == 0) {
2882 /*
2883 * Bill CPU time to both the task and
2884 * the individual thread.
2885 */
2886 ledger_credit_thread(thread, thread->t_ledger,
2887 task_ledgers.cpu_time, consumed);
2888 ledger_credit_thread(thread, thread->t_threadledger,
2889 thread_ledgers.cpu_time, consumed);
2890 if (thread->t_bankledger) {
2891 ledger_credit_thread(thread, thread->t_bankledger,
2892 bank_ledgers.cpu_time,
2893 (consumed - thread->t_deduct_bank_ledger_time));
2894 }
2895 thread->t_deduct_bank_ledger_time = 0;
2896 }
2897
2898 wake_lock(thread);
2899 thread_lock(thread);
2900
2901 /*
2902 * Apply a priority floor if the thread holds a kernel resource
2903 * Do this before checking starting_pri to avoid overpenalizing
2904 * repeated rwlock blockers.
2905 */
2906 if (__improbable(thread->rwlock_count != 0)) {
2907 lck_rw_set_promotion_locked(thread);
2908 }
2909
2910 boolean_t keep_quantum = processor->first_timeslice;
2911
2912 /*
2913 * Treat a thread which has dropped priority since it got on core
2914 * as having expired its quantum.
2915 */
2916 if (processor->starting_pri > thread->sched_pri) {
2917 keep_quantum = FALSE;
2918 }
2919
2920 /* Compute remainder of current quantum. */
2921 if (keep_quantum &&
2922 processor->quantum_end > processor->last_dispatch) {
2923 thread->quantum_remaining = (uint32_t)remainder;
2924 } else {
2925 thread->quantum_remaining = 0;
2926 }
2927
2928 if (thread->sched_mode == TH_MODE_REALTIME) {
2929 /*
2930 * Cancel the deadline if the thread has
2931 * consumed the entire quantum.
2932 */
2933 if (thread->quantum_remaining == 0) {
2934 thread->realtime.deadline = UINT64_MAX;
2935 }
2936 } else {
2937 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
2938 /*
2939 * For non-realtime threads treat a tiny
2940 * remaining quantum as an expired quantum
2941 * but include what's left next time.
2942 */
2943 if (thread->quantum_remaining < min_std_quantum) {
2944 thread->reason |= AST_QUANTUM;
2945 thread->quantum_remaining += SCHED(initial_quantum_size)(thread);
2946 }
2947 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
2948 }
2949
2950 /*
2951 * If we are doing a direct handoff then
2952 * take the remainder of the quantum.
2953 */
2954 if ((thread->reason & (AST_HANDOFF | AST_QUANTUM)) == AST_HANDOFF) {
2955 self->quantum_remaining = thread->quantum_remaining;
2956 thread->reason |= AST_QUANTUM;
2957 thread->quantum_remaining = 0;
2958 } else {
2959 #if defined(CONFIG_SCHED_MULTIQ)
2960 if (SCHED(sched_groups_enabled) &&
2961 thread->sched_group == self->sched_group) {
2962 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2963 MACHDBG_CODE(DBG_MACH_SCHED, MACH_QUANTUM_HANDOFF),
2964 self->reason, (uintptr_t)thread_tid(thread),
2965 self->quantum_remaining, thread->quantum_remaining, 0);
2966
2967 self->quantum_remaining = thread->quantum_remaining;
2968 thread->quantum_remaining = 0;
2969 /* Don't set AST_QUANTUM here - old thread might still want to preempt someone else */
2970 }
2971 #endif /* defined(CONFIG_SCHED_MULTIQ) */
2972 }
2973
2974 thread->computation_metered += (processor->last_dispatch - thread->computation_epoch);
2975
2976 if (!(thread->state & TH_WAIT)) {
2977 /*
2978 * Still runnable.
2979 */
2980 thread->last_made_runnable_time = thread->last_basepri_change_time = processor->last_dispatch;
2981
2982 machine_thread_going_off_core(thread, FALSE, processor->last_dispatch, TRUE);
2983
2984 ast_t reason = thread->reason;
2985 sched_options_t options = SCHED_NONE;
2986
2987 if (reason & AST_REBALANCE) {
2988 options |= SCHED_REBALANCE;
2989 if (reason & AST_QUANTUM) {
2990 /*
2991 * Having gone to the trouble of forcing this thread off a less preferred core,
2992 * we should force the preferable core to reschedule immediately to give this
2993 * thread a chance to run instead of just sitting on the run queue where
2994 * it may just be stolen back by the idle core we just forced it off.
2995 * But only do this at the end of a quantum to prevent cascading effects.
2996 */
2997 options |= SCHED_PREEMPT;
2998 }
2999 }
3000
3001 if (reason & AST_QUANTUM) {
3002 options |= SCHED_TAILQ;
3003 } else if (reason & AST_PREEMPT) {
3004 options |= SCHED_HEADQ;
3005 } else {
3006 options |= (SCHED_PREEMPT | SCHED_TAILQ);
3007 }
3008
3009 thread_setrun(thread, options);
3010
3011 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3012 MACHDBG_CODE(DBG_MACH_SCHED, MACH_DISPATCH) | DBG_FUNC_NONE,
3013 (uintptr_t)thread_tid(thread), thread->reason, thread->state,
3014 sched_run_buckets[TH_BUCKET_RUN], 0);
3015
3016 if (thread->wake_active) {
3017 thread->wake_active = FALSE;
3018 thread_unlock(thread);
3019
3020 thread_wakeup(&thread->wake_active);
3021 } else {
3022 thread_unlock(thread);
3023 }
3024
3025 wake_unlock(thread);
3026 } else {
3027 /*
3028 * Waiting.
3029 */
3030 boolean_t should_terminate = FALSE;
3031 uint32_t new_run_count;
3032 int thread_state = thread->state;
3033
3034 /* Only the first call to thread_dispatch
3035 * after explicit termination should add
3036 * the thread to the termination queue
3037 */
3038 if ((thread_state & (TH_TERMINATE | TH_TERMINATE2)) == TH_TERMINATE) {
3039 should_terminate = TRUE;
3040 thread_state |= TH_TERMINATE2;
3041 }
3042
3043 timer_stop(&thread->runnable_timer, processor->last_dispatch);
3044
3045 thread_state &= ~TH_RUN;
3046 thread->state = thread_state;
3047
3048 thread->last_made_runnable_time = thread->last_basepri_change_time = THREAD_NOT_RUNNABLE;
3049 thread->chosen_processor = PROCESSOR_NULL;
3050
3051 new_run_count = sched_run_decr(thread);
3052
3053 #if CONFIG_SCHED_SFI
3054 if (thread->reason & AST_SFI) {
3055 thread->wait_sfi_begin_time = processor->last_dispatch;
3056 }
3057 #endif
3058 machine_thread_going_off_core(thread, should_terminate, processor->last_dispatch, FALSE);
3059
3060 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3061 MACHDBG_CODE(DBG_MACH_SCHED, MACH_DISPATCH) | DBG_FUNC_NONE,
3062 (uintptr_t)thread_tid(thread), thread->reason, thread_state,
3063 new_run_count, 0);
3064
3065 if (thread_state & TH_WAIT_REPORT) {
3066 (*thread->sched_call)(SCHED_CALL_BLOCK, thread);
3067 }
3068
3069 if (thread->wake_active) {
3070 thread->wake_active = FALSE;
3071 thread_unlock(thread);
3072
3073 thread_wakeup(&thread->wake_active);
3074 } else {
3075 thread_unlock(thread);
3076 }
3077
3078 wake_unlock(thread);
3079
3080 if (should_terminate) {
3081 thread_terminate_enqueue(thread);
3082 }
3083 }
3084 }
3085 }
3086
3087 int urgency = THREAD_URGENCY_NONE;
3088 uint64_t latency = 0;
3089
3090 /* Update (new) current thread and reprogram quantum timer */
3091 thread_lock(self);
3092
3093 if (!(self->state & TH_IDLE)) {
3094 uint64_t arg1, arg2;
3095
3096 #if CONFIG_SCHED_SFI
3097 ast_t new_ast;
3098
3099 new_ast = sfi_thread_needs_ast(self, NULL);
3100
3101 if (new_ast != AST_NONE) {
3102 ast_on(new_ast);
3103 }
3104 #endif
3105
3106 assertf(processor->last_dispatch >= self->last_made_runnable_time,
3107 "Non-monotonic time? dispatch at 0x%llx, runnable at 0x%llx",
3108 processor->last_dispatch, self->last_made_runnable_time);
3109
3110 assert(self->last_made_runnable_time <= self->last_basepri_change_time);
3111
3112 latency = processor->last_dispatch - self->last_made_runnable_time;
3113 assert(latency >= self->same_pri_latency);
3114
3115 urgency = thread_get_urgency(self, &arg1, &arg2);
3116
3117 thread_tell_urgency(urgency, arg1, arg2, latency, self);
3118
3119 /*
3120 * Get a new quantum if none remaining.
3121 */
3122 if (self->quantum_remaining == 0) {
3123 thread_quantum_init(self);
3124 }
3125
3126 /*
3127 * Set up quantum timer and timeslice.
3128 */
3129 processor->quantum_end = processor->last_dispatch + self->quantum_remaining;
3130 timer_call_quantum_timer_enter(&processor->quantum_timer, self,
3131 processor->quantum_end, processor->last_dispatch);
3132
3133 processor->first_timeslice = TRUE;
3134 } else {
3135 timer_call_quantum_timer_cancel(&processor->quantum_timer);
3136 processor->first_timeslice = FALSE;
3137
3138 thread_tell_urgency(THREAD_URGENCY_NONE, 0, 0, 0, self);
3139 }
3140
3141 assert(self->block_hint == kThreadWaitNone);
3142 self->computation_epoch = processor->last_dispatch;
3143 self->reason = AST_NONE;
3144 processor->starting_pri = self->sched_pri;
3145
3146 thread_unlock(self);
3147
3148 machine_thread_going_on_core(self, urgency, latency, self->same_pri_latency,
3149 processor->last_dispatch);
3150
3151 #if defined(CONFIG_SCHED_DEFERRED_AST)
3152 /*
3153 * TODO: Can we state that redispatching our old thread is also
3154 * uninteresting?
3155 */
3156 if ((((volatile uint32_t)sched_run_buckets[TH_BUCKET_RUN]) == 1) &&
3157 !(self->state & TH_IDLE)) {
3158 pset_cancel_deferred_dispatch(processor->processor_set, processor);
3159 }
3160 #endif
3161 }
3162
3163 /*
3164 * thread_block_reason:
3165 *
3166 * Forces a reschedule, blocking the caller if a wait
3167 * has been asserted.
3168 *
3169 * If a continuation is specified, then thread_invoke will
3170 * attempt to discard the thread's kernel stack. When the
3171 * thread resumes, it will execute the continuation function
3172 * on a new kernel stack.
3173 */
3174 counter(mach_counter_t c_thread_block_calls = 0; )
3175
3176 wait_result_t
3177 thread_block_reason(
3178 thread_continue_t continuation,
3179 void *parameter,
3180 ast_t reason)
3181 {
3182 thread_t self = current_thread();
3183 processor_t processor;
3184 thread_t new_thread;
3185 spl_t s;
3186
3187 counter(++c_thread_block_calls);
3188
3189 s = splsched();
3190
3191 processor = current_processor();
3192
3193 /* If we're explicitly yielding, force a subsequent quantum */
3194 if (reason & AST_YIELD) {
3195 processor->first_timeslice = FALSE;
3196 }
3197
3198 /* We're handling all scheduling AST's */
3199 ast_off(AST_SCHEDULING);
3200
3201 #if PROC_REF_DEBUG
3202 if ((continuation != NULL) && (self->task != kernel_task)) {
3203 if (uthread_get_proc_refcount(self->uthread) != 0) {
3204 panic("thread_block_reason with continuation uthread %p with uu_proc_refcount != 0", self->uthread);
3205 }
3206 }
3207 #endif
3208
3209 self->continuation = continuation;
3210 self->parameter = parameter;
3211
3212 if (self->state & ~(TH_RUN | TH_IDLE)) {
3213 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3214 MACHDBG_CODE(DBG_MACH_SCHED, MACH_BLOCK),
3215 reason, VM_KERNEL_UNSLIDE(continuation), 0, 0, 0);
3216 }
3217
3218 do {
3219 thread_lock(self);
3220 new_thread = thread_select(self, processor, &reason);
3221 thread_unlock(self);
3222 } while (!thread_invoke(self, new_thread, reason));
3223
3224 splx(s);
3225
3226 return self->wait_result;
3227 }
3228
3229 /*
3230 * thread_block:
3231 *
3232 * Block the current thread if a wait has been asserted.
3233 */
3234 wait_result_t
3235 thread_block(
3236 thread_continue_t continuation)
3237 {
3238 return thread_block_reason(continuation, NULL, AST_NONE);
3239 }
3240
3241 wait_result_t
3242 thread_block_parameter(
3243 thread_continue_t continuation,
3244 void *parameter)
3245 {
3246 return thread_block_reason(continuation, parameter, AST_NONE);
3247 }
3248
3249 /*
3250 * thread_run:
3251 *
3252 * Switch directly from the current thread to the
3253 * new thread, handing off our quantum if appropriate.
3254 *
3255 * New thread must be runnable, and not on a run queue.
3256 *
3257 * Called at splsched.
3258 */
3259 int
3260 thread_run(
3261 thread_t self,
3262 thread_continue_t continuation,
3263 void *parameter,
3264 thread_t new_thread)
3265 {
3266 ast_t reason = AST_NONE;
3267
3268 if ((self->state & TH_IDLE) == 0) {
3269 reason = AST_HANDOFF;
3270 }
3271
3272 self->continuation = continuation;
3273 self->parameter = parameter;
3274
3275 while (!thread_invoke(self, new_thread, reason)) {
3276 /* the handoff failed, so we have to fall back to the normal block path */
3277 processor_t processor = current_processor();
3278
3279 reason = AST_NONE;
3280
3281 thread_lock(self);
3282 new_thread = thread_select(self, processor, &reason);
3283 thread_unlock(self);
3284 }
3285
3286 return self->wait_result;
3287 }
3288
3289 /*
3290 * thread_continue:
3291 *
3292 * Called at splsched when a thread first receives
3293 * a new stack after a continuation.
3294 */
3295 void
3296 thread_continue(
3297 thread_t thread)
3298 {
3299 thread_t self = current_thread();
3300 thread_continue_t continuation;
3301 void *parameter;
3302
3303 DTRACE_SCHED(on__cpu);
3304
3305 continuation = self->continuation;
3306 parameter = self->parameter;
3307
3308 #if KPERF
3309 kperf_on_cpu(self, continuation, NULL);
3310 #endif
3311
3312 thread_dispatch(thread, self);
3313
3314 self->continuation = self->parameter = NULL;
3315
3316 #if INTERRUPT_MASKED_DEBUG
3317 /* Reset interrupt-masked spin debugging timeout */
3318 ml_spin_debug_clear(self);
3319 #endif
3320
3321 TLOG(1, "thread_continue: calling call_continuation\n");
3322
3323 boolean_t enable_interrupts = thread != THREAD_NULL;
3324 call_continuation(continuation, parameter, self->wait_result, enable_interrupts);
3325 /*NOTREACHED*/
3326 }
3327
3328 void
3329 thread_quantum_init(thread_t thread)
3330 {
3331 if (thread->sched_mode == TH_MODE_REALTIME) {
3332 thread->quantum_remaining = thread->realtime.computation;
3333 } else {
3334 thread->quantum_remaining = SCHED(initial_quantum_size)(thread);
3335 }
3336 }
3337
3338 uint32_t
3339 sched_timeshare_initial_quantum_size(thread_t thread)
3340 {
3341 if ((thread != THREAD_NULL) && thread->th_sched_bucket == TH_BUCKET_SHARE_BG) {
3342 return bg_quantum;
3343 } else {
3344 return std_quantum;
3345 }
3346 }
3347
3348 /*
3349 * run_queue_init:
3350 *
3351 * Initialize a run queue before first use.
3352 */
3353 void
3354 run_queue_init(
3355 run_queue_t rq)
3356 {
3357 rq->highq = NOPRI;
3358 for (u_int i = 0; i < BITMAP_LEN(NRQS); i++) {
3359 rq->bitmap[i] = 0;
3360 }
3361 rq->urgency = rq->count = 0;
3362 for (int i = 0; i < NRQS; i++) {
3363 queue_init(&rq->queues[i]);
3364 }
3365 }
3366
3367 /*
3368 * run_queue_dequeue:
3369 *
3370 * Perform a dequeue operation on a run queue,
3371 * and return the resulting thread.
3372 *
3373 * The run queue must be locked (see thread_run_queue_remove()
3374 * for more info), and not empty.
3375 */
3376 thread_t
3377 run_queue_dequeue(
3378 run_queue_t rq,
3379 integer_t options)
3380 {
3381 thread_t thread;
3382 queue_t queue = &rq->queues[rq->highq];
3383
3384 if (options & SCHED_PEEK) {
3385 if (options & SCHED_HEADQ) {
3386 thread = qe_queue_first(queue, struct thread, runq_links);
3387 } else {
3388 thread = qe_queue_last(queue, struct thread, runq_links);
3389 }
3390 return thread;
3391 }
3392
3393 if (options & SCHED_HEADQ) {
3394 thread = qe_dequeue_head(queue, struct thread, runq_links);
3395 } else {
3396 thread = qe_dequeue_tail(queue, struct thread, runq_links);
3397 }
3398
3399 assert(thread != THREAD_NULL);
3400 assert_thread_magic(thread);
3401
3402 thread->runq = PROCESSOR_NULL;
3403 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
3404 rq->count--;
3405 if (SCHED(priority_is_urgent)(rq->highq)) {
3406 rq->urgency--; assert(rq->urgency >= 0);
3407 }
3408 if (queue_empty(queue)) {
3409 bitmap_clear(rq->bitmap, rq->highq);
3410 rq->highq = bitmap_first(rq->bitmap, NRQS);
3411 }
3412
3413 return thread;
3414 }
3415
3416 /*
3417 * run_queue_enqueue:
3418 *
3419 * Perform a enqueue operation on a run queue.
3420 *
3421 * The run queue must be locked (see thread_run_queue_remove()
3422 * for more info).
3423 */
3424 boolean_t
3425 run_queue_enqueue(
3426 run_queue_t rq,
3427 thread_t thread,
3428 integer_t options)
3429 {
3430 queue_t queue = &rq->queues[thread->sched_pri];
3431 boolean_t result = FALSE;
3432
3433 assert_thread_magic(thread);
3434
3435 if (queue_empty(queue)) {
3436 enqueue_tail(queue, &thread->runq_links);
3437
3438 rq_bitmap_set(rq->bitmap, thread->sched_pri);
3439 if (thread->sched_pri > rq->highq) {
3440 rq->highq = thread->sched_pri;
3441 result = TRUE;
3442 }
3443 } else {
3444 if (options & SCHED_TAILQ) {
3445 enqueue_tail(queue, &thread->runq_links);
3446 } else {
3447 enqueue_head(queue, &thread->runq_links);
3448 }
3449 }
3450 if (SCHED(priority_is_urgent)(thread->sched_pri)) {
3451 rq->urgency++;
3452 }
3453 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
3454 rq->count++;
3455
3456 return result;
3457 }
3458
3459 /*
3460 * run_queue_remove:
3461 *
3462 * Remove a specific thread from a runqueue.
3463 *
3464 * The run queue must be locked.
3465 */
3466 void
3467 run_queue_remove(
3468 run_queue_t rq,
3469 thread_t thread)
3470 {
3471 assert(thread->runq != PROCESSOR_NULL);
3472 assert_thread_magic(thread);
3473
3474 remqueue(&thread->runq_links);
3475 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
3476 rq->count--;
3477 if (SCHED(priority_is_urgent)(thread->sched_pri)) {
3478 rq->urgency--; assert(rq->urgency >= 0);
3479 }
3480
3481 if (queue_empty(&rq->queues[thread->sched_pri])) {
3482 /* update run queue status */
3483 bitmap_clear(rq->bitmap, thread->sched_pri);
3484 rq->highq = bitmap_first(rq->bitmap, NRQS);
3485 }
3486
3487 thread->runq = PROCESSOR_NULL;
3488 }
3489
3490 /* Assumes RT lock is not held, and acquires splsched/rt_lock itself */
3491 void
3492 sched_rtglobal_runq_scan(sched_update_scan_context_t scan_context)
3493 {
3494 spl_t s;
3495 thread_t thread;
3496
3497 processor_set_t pset = &pset0;
3498
3499 s = splsched();
3500 rt_lock_lock(pset);
3501
3502 qe_foreach_element_safe(thread, &pset->rt_runq.queue, runq_links) {
3503 if (thread->last_made_runnable_time < scan_context->earliest_rt_make_runnable_time) {
3504 scan_context->earliest_rt_make_runnable_time = thread->last_made_runnable_time;
3505 }
3506 }
3507
3508 rt_lock_unlock(pset);
3509 splx(s);
3510 }
3511
3512 int64_t
3513 sched_rtglobal_runq_count_sum(void)
3514 {
3515 return pset0.rt_runq.runq_stats.count_sum;
3516 }
3517
3518 /*
3519 * realtime_queue_insert:
3520 *
3521 * Enqueue a thread for realtime execution.
3522 */
3523 static boolean_t
3524 realtime_queue_insert(processor_t processor, processor_set_t pset, thread_t thread)
3525 {
3526 queue_t queue = &SCHED(rt_runq)(pset)->queue;
3527 uint64_t deadline = thread->realtime.deadline;
3528 boolean_t preempt = FALSE;
3529
3530 rt_lock_lock(pset);
3531
3532 if (queue_empty(queue)) {
3533 enqueue_tail(queue, &thread->runq_links);
3534 preempt = TRUE;
3535 } else {
3536 /* Insert into rt_runq in thread deadline order */
3537 queue_entry_t iter;
3538 qe_foreach(iter, queue) {
3539 thread_t iter_thread = qe_element(iter, struct thread, runq_links);
3540 assert_thread_magic(iter_thread);
3541
3542 if (deadline < iter_thread->realtime.deadline) {
3543 if (iter == queue_first(queue)) {
3544 preempt = TRUE;
3545 }
3546 insque(&thread->runq_links, queue_prev(iter));
3547 break;
3548 } else if (iter == queue_last(queue)) {
3549 enqueue_tail(queue, &thread->runq_links);
3550 break;
3551 }
3552 }
3553 }
3554
3555 thread->runq = processor;
3556 SCHED_STATS_RUNQ_CHANGE(&SCHED(rt_runq)(pset)->runq_stats, rt_runq_count(pset));
3557 rt_runq_count_incr(pset);
3558
3559 rt_lock_unlock(pset);
3560
3561 return preempt;
3562 }
3563
3564 /*
3565 * realtime_setrun:
3566 *
3567 * Dispatch a thread for realtime execution.
3568 *
3569 * Thread must be locked. Associated pset must
3570 * be locked, and is returned unlocked.
3571 */
3572 static void
3573 realtime_setrun(
3574 processor_t processor,
3575 thread_t thread)
3576 {
3577 processor_set_t pset = processor->processor_set;
3578 pset_assert_locked(pset);
3579 ast_t preempt;
3580
3581 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
3582
3583 thread->chosen_processor = processor;
3584
3585 /* <rdar://problem/15102234> */
3586 assert(thread->bound_processor == PROCESSOR_NULL);
3587
3588 /*
3589 * Dispatch directly onto idle processor.
3590 */
3591 if ((thread->bound_processor == processor)
3592 && processor->state == PROCESSOR_IDLE) {
3593 processor->next_thread = thread;
3594 processor_state_update_from_thread(processor, thread);
3595 processor->deadline = thread->realtime.deadline;
3596 pset_update_processor_state(pset, processor, PROCESSOR_DISPATCHING);
3597
3598 ipi_type = sched_ipi_action(processor, thread, true, SCHED_IPI_EVENT_BOUND_THR);
3599 pset_unlock(pset);
3600 sched_ipi_perform(processor, ipi_type);
3601 return;
3602 }
3603
3604 if (processor->current_pri < BASEPRI_RTQUEUES) {
3605 preempt = (AST_PREEMPT | AST_URGENT);
3606 } else if (thread->realtime.deadline < processor->deadline) {
3607 preempt = (AST_PREEMPT | AST_URGENT);
3608 } else {
3609 preempt = AST_NONE;
3610 }
3611
3612 realtime_queue_insert(processor, pset, thread);
3613
3614 ipi_type = SCHED_IPI_NONE;
3615 if (preempt != AST_NONE) {
3616 if (processor->state == PROCESSOR_IDLE) {
3617 processor->next_thread = THREAD_NULL;
3618 processor_state_update_from_thread(processor, thread);
3619 processor->deadline = thread->realtime.deadline;
3620 pset_update_processor_state(pset, processor, PROCESSOR_DISPATCHING);
3621 if (processor == current_processor()) {
3622 ast_on(preempt);
3623 } else {
3624 ipi_type = sched_ipi_action(processor, thread, true, SCHED_IPI_EVENT_PREEMPT);
3625 }
3626 } else if (processor->state == PROCESSOR_DISPATCHING) {
3627 if ((processor->next_thread == THREAD_NULL) && ((processor->current_pri < thread->sched_pri) || (processor->deadline > thread->realtime.deadline))) {
3628 processor_state_update_from_thread(processor, thread);
3629 processor->deadline = thread->realtime.deadline;
3630 }
3631 } else {
3632 if (processor == current_processor()) {
3633 ast_on(preempt);
3634
3635 if ((preempt & AST_URGENT) == AST_URGENT) {
3636 bit_set(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id);
3637 }
3638
3639 if ((preempt & AST_PREEMPT) == AST_PREEMPT) {
3640 bit_set(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
3641 }
3642 } else {
3643 ipi_type = sched_ipi_action(processor, thread, false, SCHED_IPI_EVENT_PREEMPT);
3644 }
3645 }
3646 } else {
3647 /* Selected processor was too busy, just keep thread enqueued and let other processors drain it naturally. */
3648 }
3649
3650 pset_unlock(pset);
3651 sched_ipi_perform(processor, ipi_type);
3652 }
3653
3654
3655 sched_ipi_type_t
3656 sched_ipi_deferred_policy(processor_set_t pset, processor_t dst,
3657 __unused sched_ipi_event_t event)
3658 {
3659 #if defined(CONFIG_SCHED_DEFERRED_AST)
3660 if (!bit_test(pset->pending_deferred_AST_cpu_mask, dst->cpu_id)) {
3661 return SCHED_IPI_DEFERRED;
3662 }
3663 #else /* CONFIG_SCHED_DEFERRED_AST */
3664 panic("Request for deferred IPI on an unsupported platform; pset: %p CPU: %d", pset, dst->cpu_id);
3665 #endif /* CONFIG_SCHED_DEFERRED_AST */
3666 return SCHED_IPI_NONE;
3667 }
3668
3669 sched_ipi_type_t
3670 sched_ipi_action(processor_t dst, thread_t thread, boolean_t dst_idle, sched_ipi_event_t event)
3671 {
3672 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
3673 assert(dst != NULL);
3674
3675 processor_set_t pset = dst->processor_set;
3676 if (current_processor() == dst) {
3677 return SCHED_IPI_NONE;
3678 }
3679
3680 if (bit_test(pset->pending_AST_URGENT_cpu_mask, dst->cpu_id)) {
3681 return SCHED_IPI_NONE;
3682 }
3683
3684 ipi_type = SCHED(ipi_policy)(dst, thread, dst_idle, event);
3685 switch (ipi_type) {
3686 case SCHED_IPI_NONE:
3687 return SCHED_IPI_NONE;
3688 #if defined(CONFIG_SCHED_DEFERRED_AST)
3689 case SCHED_IPI_DEFERRED:
3690 bit_set(pset->pending_deferred_AST_cpu_mask, dst->cpu_id);
3691 break;
3692 #endif /* CONFIG_SCHED_DEFERRED_AST */
3693 default:
3694 bit_set(pset->pending_AST_URGENT_cpu_mask, dst->cpu_id);
3695 bit_set(pset->pending_AST_PREEMPT_cpu_mask, dst->cpu_id);
3696 break;
3697 }
3698 return ipi_type;
3699 }
3700
3701 sched_ipi_type_t
3702 sched_ipi_policy(processor_t dst, thread_t thread, boolean_t dst_idle, sched_ipi_event_t event)
3703 {
3704 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
3705 boolean_t deferred_ipi_supported = false;
3706 processor_set_t pset = dst->processor_set;
3707
3708 #if defined(CONFIG_SCHED_DEFERRED_AST)
3709 deferred_ipi_supported = true;
3710 #endif /* CONFIG_SCHED_DEFERRED_AST */
3711
3712 switch (event) {
3713 case SCHED_IPI_EVENT_SPILL:
3714 case SCHED_IPI_EVENT_SMT_REBAL:
3715 case SCHED_IPI_EVENT_REBALANCE:
3716 case SCHED_IPI_EVENT_BOUND_THR:
3717 /*
3718 * The spill, SMT rebalance, rebalance and the bound thread
3719 * scenarios use immediate IPIs always.
3720 */
3721 ipi_type = dst_idle ? SCHED_IPI_IDLE : SCHED_IPI_IMMEDIATE;
3722 break;
3723 case SCHED_IPI_EVENT_PREEMPT:
3724 /* In the preemption case, use immediate IPIs for RT threads */
3725 if (thread && (thread->sched_pri >= BASEPRI_RTQUEUES)) {
3726 ipi_type = dst_idle ? SCHED_IPI_IDLE : SCHED_IPI_IMMEDIATE;
3727 break;
3728 }
3729
3730 /*
3731 * For Non-RT threads preemption,
3732 * If the core is active, use immediate IPIs.
3733 * If the core is idle, use deferred IPIs if supported; otherwise immediate IPI.
3734 */
3735 if (deferred_ipi_supported && dst_idle) {
3736 return sched_ipi_deferred_policy(pset, dst, event);
3737 }
3738 ipi_type = dst_idle ? SCHED_IPI_IDLE : SCHED_IPI_IMMEDIATE;
3739 break;
3740 default:
3741 panic("Unrecognized scheduler IPI event type %d", event);
3742 }
3743 assert(ipi_type != SCHED_IPI_NONE);
3744 return ipi_type;
3745 }
3746
3747 void
3748 sched_ipi_perform(processor_t dst, sched_ipi_type_t ipi)
3749 {
3750 switch (ipi) {
3751 case SCHED_IPI_NONE:
3752 break;
3753 case SCHED_IPI_IDLE:
3754 machine_signal_idle(dst);
3755 break;
3756 case SCHED_IPI_IMMEDIATE:
3757 cause_ast_check(dst);
3758 break;
3759 case SCHED_IPI_DEFERRED:
3760 machine_signal_idle_deferred(dst);
3761 break;
3762 default:
3763 panic("Unrecognized scheduler IPI type: %d", ipi);
3764 }
3765 }
3766
3767 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
3768
3769 boolean_t
3770 priority_is_urgent(int priority)
3771 {
3772 return bitmap_test(sched_preempt_pri, priority) ? TRUE : FALSE;
3773 }
3774
3775 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
3776
3777 /*
3778 * processor_setrun:
3779 *
3780 * Dispatch a thread for execution on a
3781 * processor.
3782 *
3783 * Thread must be locked. Associated pset must
3784 * be locked, and is returned unlocked.
3785 */
3786 static void
3787 processor_setrun(
3788 processor_t processor,
3789 thread_t thread,
3790 integer_t options)
3791 {
3792 processor_set_t pset = processor->processor_set;
3793 pset_assert_locked(pset);
3794 ast_t preempt;
3795 enum { eExitIdle, eInterruptRunning, eDoNothing } ipi_action = eDoNothing;
3796
3797 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
3798
3799 thread->chosen_processor = processor;
3800
3801 /*
3802 * Set preemption mode.
3803 */
3804 #if defined(CONFIG_SCHED_DEFERRED_AST)
3805 /* TODO: Do we need to care about urgency (see rdar://problem/20136239)? */
3806 #endif
3807 if (SCHED(priority_is_urgent)(thread->sched_pri) && thread->sched_pri > processor->current_pri) {
3808 preempt = (AST_PREEMPT | AST_URGENT);
3809 } else if (processor->active_thread && thread_eager_preemption(processor->active_thread)) {
3810 preempt = (AST_PREEMPT | AST_URGENT);
3811 } else if ((thread->sched_mode == TH_MODE_TIMESHARE) && (thread->sched_pri < thread->base_pri)) {
3812 if (SCHED(priority_is_urgent)(thread->base_pri) && thread->sched_pri > processor->current_pri) {
3813 preempt = (options & SCHED_PREEMPT)? AST_PREEMPT: AST_NONE;
3814 } else {
3815 preempt = AST_NONE;
3816 }
3817 } else {
3818 preempt = (options & SCHED_PREEMPT)? AST_PREEMPT: AST_NONE;
3819 }
3820
3821 if ((options & (SCHED_PREEMPT | SCHED_REBALANCE)) == (SCHED_PREEMPT | SCHED_REBALANCE)) {
3822 /*
3823 * Having gone to the trouble of forcing this thread off a less preferred core,
3824 * we should force the preferable core to reschedule immediately to give this
3825 * thread a chance to run instead of just sitting on the run queue where
3826 * it may just be stolen back by the idle core we just forced it off.
3827 */
3828 preempt |= AST_PREEMPT;
3829 }
3830
3831 SCHED(processor_enqueue)(processor, thread, options);
3832 sched_update_pset_load_average(pset);
3833
3834 if (preempt != AST_NONE) {
3835 if (processor->state == PROCESSOR_IDLE) {
3836 processor->next_thread = THREAD_NULL;
3837 processor_state_update_from_thread(processor, thread);
3838 processor->deadline = UINT64_MAX;
3839 pset_update_processor_state(pset, processor, PROCESSOR_DISPATCHING);
3840 ipi_action = eExitIdle;
3841 } else if (processor->state == PROCESSOR_DISPATCHING) {
3842 if ((processor->next_thread == THREAD_NULL) && (processor->current_pri < thread->sched_pri)) {
3843 processor_state_update_from_thread(processor, thread);
3844 processor->deadline = UINT64_MAX;
3845 }
3846 } else if ((processor->state == PROCESSOR_RUNNING ||
3847 processor->state == PROCESSOR_SHUTDOWN) &&
3848 (thread->sched_pri >= processor->current_pri)) {
3849 ipi_action = eInterruptRunning;
3850 }
3851 } else {
3852 /*
3853 * New thread is not important enough to preempt what is running, but
3854 * special processor states may need special handling
3855 */
3856 if (processor->state == PROCESSOR_SHUTDOWN &&
3857 thread->sched_pri >= processor->current_pri) {
3858 ipi_action = eInterruptRunning;
3859 } else if (processor->state == PROCESSOR_IDLE) {
3860 processor->next_thread = THREAD_NULL;
3861 processor_state_update_from_thread(processor, thread);
3862 processor->deadline = UINT64_MAX;
3863 pset_update_processor_state(pset, processor, PROCESSOR_DISPATCHING);
3864
3865 ipi_action = eExitIdle;
3866 }
3867 }
3868
3869 if (ipi_action != eDoNothing) {
3870 if (processor == current_processor()) {
3871 if ((preempt = csw_check_locked(processor->active_thread, processor, pset, AST_NONE)) != AST_NONE) {
3872 ast_on(preempt);
3873 }
3874
3875 if ((preempt & AST_URGENT) == AST_URGENT) {
3876 bit_set(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id);
3877 } else {
3878 bit_clear(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id);
3879 }
3880
3881 if ((preempt & AST_PREEMPT) == AST_PREEMPT) {
3882 bit_set(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
3883 } else {
3884 bit_clear(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
3885 }
3886 } else {
3887 sched_ipi_event_t event = (options & SCHED_REBALANCE) ? SCHED_IPI_EVENT_REBALANCE : SCHED_IPI_EVENT_PREEMPT;
3888 ipi_type = sched_ipi_action(processor, thread, (ipi_action == eExitIdle), event);
3889 }
3890 }
3891 pset_unlock(pset);
3892 sched_ipi_perform(processor, ipi_type);
3893 }
3894
3895 /*
3896 * choose_next_pset:
3897 *
3898 * Return the next sibling pset containing
3899 * available processors.
3900 *
3901 * Returns the original pset if none other is
3902 * suitable.
3903 */
3904 static processor_set_t
3905 choose_next_pset(
3906 processor_set_t pset)
3907 {
3908 processor_set_t nset = pset;
3909
3910 do {
3911 nset = next_pset(nset);
3912 } while (nset->online_processor_count < 1 && nset != pset);
3913
3914 return nset;
3915 }
3916
3917 /*
3918 * choose_processor:
3919 *
3920 * Choose a processor for the thread, beginning at
3921 * the pset. Accepts an optional processor hint in
3922 * the pset.
3923 *
3924 * Returns a processor, possibly from a different pset.
3925 *
3926 * The thread must be locked. The pset must be locked,
3927 * and the resulting pset is locked on return.
3928 */
3929 processor_t
3930 choose_processor(
3931 processor_set_t starting_pset,
3932 processor_t processor,
3933 thread_t thread)
3934 {
3935 processor_set_t pset = starting_pset;
3936 processor_set_t nset;
3937
3938 assert(thread->sched_pri <= BASEPRI_RTQUEUES);
3939
3940 /*
3941 * Prefer the hinted processor, when appropriate.
3942 */
3943
3944 /* Fold last processor hint from secondary processor to its primary */
3945 if (processor != PROCESSOR_NULL) {
3946 processor = processor->processor_primary;
3947 }
3948
3949 /*
3950 * Only consult platform layer if pset is active, which
3951 * it may not be in some cases when a multi-set system
3952 * is going to sleep.
3953 */
3954 if (pset->online_processor_count) {
3955 if ((processor == PROCESSOR_NULL) || (processor->processor_set == pset && processor->state == PROCESSOR_IDLE)) {
3956 processor_t mc_processor = machine_choose_processor(pset, processor);
3957 if (mc_processor != PROCESSOR_NULL) {
3958 processor = mc_processor->processor_primary;
3959 }
3960 }
3961 }
3962
3963 /*
3964 * At this point, we may have a processor hint, and we may have
3965 * an initial starting pset. If the hint is not in the pset, or
3966 * if the hint is for a processor in an invalid state, discard
3967 * the hint.
3968 */
3969 if (processor != PROCESSOR_NULL) {
3970 if (processor->processor_set != pset) {
3971 processor = PROCESSOR_NULL;
3972 } else if (!processor->is_recommended) {
3973 processor = PROCESSOR_NULL;
3974 } else if ((thread->sched_pri >= BASEPRI_RTQUEUES) && !sched_ok_to_run_realtime_thread(pset, processor)) {
3975 processor = PROCESSOR_NULL;
3976 } else {
3977 switch (processor->state) {
3978 case PROCESSOR_START:
3979 case PROCESSOR_SHUTDOWN:
3980 case PROCESSOR_OFF_LINE:
3981 /*
3982 * Hint is for a processor that cannot support running new threads.
3983 */
3984 processor = PROCESSOR_NULL;
3985 break;
3986 case PROCESSOR_IDLE:
3987 /*
3988 * Hint is for an idle processor. Assume it is no worse than any other
3989 * idle processor. The platform layer had an opportunity to provide
3990 * the "least cost idle" processor above.
3991 */
3992 return processor;
3993 case PROCESSOR_RUNNING:
3994 case PROCESSOR_DISPATCHING:
3995 /*
3996 * Hint is for an active CPU. This fast-path allows
3997 * realtime threads to preempt non-realtime threads
3998 * to regain their previous executing processor.
3999 */
4000 if ((thread->sched_pri >= BASEPRI_RTQUEUES) &&
4001 (processor->current_pri < BASEPRI_RTQUEUES)) {
4002 return processor;
4003 }
4004
4005 /* Otherwise, use hint as part of search below */
4006 break;
4007 default:
4008 processor = PROCESSOR_NULL;
4009 break;
4010 }
4011 }
4012 }
4013
4014 /*
4015 * Iterate through the processor sets to locate
4016 * an appropriate processor. Seed results with
4017 * a last-processor hint, if available, so that
4018 * a search must find something strictly better
4019 * to replace it.
4020 *
4021 * A primary/secondary pair of SMT processors are
4022 * "unpaired" if the primary is busy but its
4023 * corresponding secondary is idle (so the physical
4024 * core has full use of its resources).
4025 */
4026
4027 integer_t lowest_priority = MAXPRI + 1;
4028 integer_t lowest_secondary_priority = MAXPRI + 1;
4029 integer_t lowest_unpaired_primary_priority = MAXPRI + 1;
4030 integer_t lowest_idle_secondary_priority = MAXPRI + 1;
4031 integer_t lowest_count = INT_MAX;
4032 uint64_t furthest_deadline = 1;
4033 processor_t lp_processor = PROCESSOR_NULL;
4034 processor_t lp_unpaired_primary_processor = PROCESSOR_NULL;
4035 processor_t lp_idle_secondary_processor = PROCESSOR_NULL;
4036 processor_t lp_paired_secondary_processor = PROCESSOR_NULL;
4037 processor_t lc_processor = PROCESSOR_NULL;
4038 processor_t fd_processor = PROCESSOR_NULL;
4039
4040 if (processor != PROCESSOR_NULL) {
4041 /* All other states should be enumerated above. */
4042 assert(processor->state == PROCESSOR_RUNNING || processor->state == PROCESSOR_DISPATCHING);
4043
4044 lowest_priority = processor->current_pri;
4045 lp_processor = processor;
4046
4047 if (processor->current_pri >= BASEPRI_RTQUEUES) {
4048 furthest_deadline = processor->deadline;
4049 fd_processor = processor;
4050 }
4051
4052 lowest_count = SCHED(processor_runq_count)(processor);
4053 lc_processor = processor;
4054 }
4055
4056 do {
4057 int cpuid;
4058
4059 if (thread->sched_pri >= BASEPRI_RTQUEUES) {
4060 processor = choose_processor_for_realtime_thread(pset);
4061 if (processor) {
4062 return processor;
4063 }
4064 } else {
4065 /*
4066 * Choose an idle processor, in pset traversal order
4067 */
4068
4069 uint64_t idle_primary_map = (pset->cpu_state_map[PROCESSOR_IDLE] &
4070 pset->primary_map &
4071 pset->recommended_bitmask);
4072
4073 /* there shouldn't be a pending AST if the processor is idle */
4074 assert((idle_primary_map & pset->pending_AST_URGENT_cpu_mask) == 0);
4075
4076 cpuid = lsb_first(idle_primary_map);
4077 if (cpuid >= 0) {
4078 processor = processor_array[cpuid];
4079 return processor;
4080 }
4081 }
4082
4083 /*
4084 * Otherwise, enumerate active and idle processors to find primary candidates
4085 * with lower priority/etc.
4086 */
4087
4088 uint64_t active_map = ((pset->cpu_state_map[PROCESSOR_RUNNING] | pset->cpu_state_map[PROCESSOR_DISPATCHING]) &
4089 pset->recommended_bitmask &
4090 ~pset->pending_AST_URGENT_cpu_mask);
4091
4092 if (SCHED(priority_is_urgent)(thread->sched_pri) == FALSE) {
4093 active_map &= ~pset->pending_AST_PREEMPT_cpu_mask;
4094 }
4095
4096 active_map = bit_ror64(active_map, (pset->last_chosen + 1));
4097 for (int rotid = lsb_first(active_map); rotid >= 0; rotid = lsb_next(active_map, rotid)) {
4098 cpuid = ((rotid + pset->last_chosen + 1) & 63);
4099 processor = processor_array[cpuid];
4100
4101 integer_t cpri = processor->current_pri;
4102 processor_t primary = processor->processor_primary;
4103 if (primary != processor) {
4104 /* If primary is running a NO_SMT thread, don't choose its secondary */
4105 if (!((primary->state == PROCESSOR_RUNNING) && processor_active_thread_no_smt(primary))) {
4106 if (cpri < lowest_secondary_priority) {
4107 lowest_secondary_priority = cpri;
4108 lp_paired_secondary_processor = processor;
4109 }
4110 }
4111 } else {
4112 if (cpri < lowest_priority) {
4113 lowest_priority = cpri;
4114 lp_processor = processor;
4115 }
4116 }
4117
4118 if ((cpri >= BASEPRI_RTQUEUES) && (processor->deadline > furthest_deadline)) {
4119 furthest_deadline = processor->deadline;
4120 fd_processor = processor;
4121 }
4122
4123 integer_t ccount = SCHED(processor_runq_count)(processor);
4124 if (ccount < lowest_count) {
4125 lowest_count = ccount;
4126 lc_processor = processor;
4127 }
4128 }
4129
4130 /*
4131 * For SMT configs, these idle secondary processors must have active primary. Otherwise
4132 * the idle primary would have short-circuited the loop above
4133 */
4134 uint64_t idle_secondary_map = (pset->cpu_state_map[PROCESSOR_IDLE] &
4135 ~pset->primary_map &
4136 pset->recommended_bitmask);
4137
4138 /* there shouldn't be a pending AST if the processor is idle */
4139 assert((idle_secondary_map & pset->pending_AST_URGENT_cpu_mask) == 0);
4140 assert((idle_secondary_map & pset->pending_AST_PREEMPT_cpu_mask) == 0);
4141
4142 for (cpuid = lsb_first(idle_secondary_map); cpuid >= 0; cpuid = lsb_next(idle_secondary_map, cpuid)) {
4143 processor = processor_array[cpuid];
4144
4145 processor_t cprimary = processor->processor_primary;
4146
4147 integer_t primary_pri = cprimary->current_pri;
4148
4149 /*
4150 * TODO: This should also make the same decisions
4151 * as secondary_can_run_realtime_thread
4152 *
4153 * TODO: Keep track of the pending preemption priority
4154 * of the primary to make this more accurate.
4155 */
4156
4157 /* If the primary is running a no-smt thread, then don't choose its secondary */
4158 if (cprimary->state == PROCESSOR_RUNNING &&
4159 processor_active_thread_no_smt(cprimary)) {
4160 continue;
4161 }
4162
4163 /*
4164 * Find the idle secondary processor with the lowest priority primary
4165 *
4166 * We will choose this processor as a fallback if we find no better
4167 * primary to preempt.
4168 */
4169 if (primary_pri < lowest_idle_secondary_priority) {
4170 lp_idle_secondary_processor = processor;
4171 lowest_idle_secondary_priority = primary_pri;
4172 }
4173
4174 /* Find the the lowest priority active primary with idle secondary */
4175 if (primary_pri < lowest_unpaired_primary_priority) {
4176 /* If the primary processor is offline or starting up, it's not a candidate for this path */
4177 if (cprimary->state != PROCESSOR_RUNNING &&
4178 cprimary->state != PROCESSOR_DISPATCHING) {
4179 continue;
4180 }
4181
4182 if (!cprimary->is_recommended) {
4183 continue;
4184 }
4185
4186 /* if the primary is pending preemption, don't try to re-preempt it */
4187 if (bit_test(pset->pending_AST_URGENT_cpu_mask, cprimary->cpu_id)) {
4188 continue;
4189 }
4190
4191 if (SCHED(priority_is_urgent)(thread->sched_pri) == FALSE &&
4192 bit_test(pset->pending_AST_PREEMPT_cpu_mask, cprimary->cpu_id)) {
4193 continue;
4194 }
4195
4196 lowest_unpaired_primary_priority = primary_pri;
4197 lp_unpaired_primary_processor = cprimary;
4198 }
4199 }
4200
4201 /*
4202 * We prefer preempting a primary processor over waking up its secondary.
4203 * The secondary will then be woken up by the preempted thread.
4204 */
4205 if (thread->sched_pri > lowest_unpaired_primary_priority) {
4206 pset->last_chosen = lp_unpaired_primary_processor->cpu_id;
4207 return lp_unpaired_primary_processor;
4208 }
4209
4210 /*
4211 * We prefer preempting a lower priority active processor over directly
4212 * waking up an idle secondary.
4213 * The preempted thread will then find the idle secondary.
4214 */
4215 if (thread->sched_pri > lowest_priority) {
4216 pset->last_chosen = lp_processor->cpu_id;
4217 return lp_processor;
4218 }
4219
4220 if (thread->sched_pri >= BASEPRI_RTQUEUES) {
4221 /*
4222 * For realtime threads, the most important aspect is
4223 * scheduling latency, so we will pick an active
4224 * secondary processor in this pset, or preempt
4225 * another RT thread with a further deadline before
4226 * going to the next pset.
4227 */
4228
4229 if (sched_allow_rt_smt && (thread->sched_pri > lowest_secondary_priority)) {
4230 pset->last_chosen = lp_paired_secondary_processor->cpu_id;
4231 return lp_paired_secondary_processor;
4232 }
4233
4234 if (thread->realtime.deadline < furthest_deadline) {
4235 return fd_processor;
4236 }
4237 }
4238
4239 /*
4240 * If all primary processors in this pset are running a higher
4241 * priority thread, move on to next pset. Only when we have
4242 * exhausted the search for primary processors do we
4243 * fall back to secondaries.
4244 */
4245 nset = next_pset(pset);
4246
4247 if (nset != starting_pset) {
4248 pset_unlock(pset);
4249
4250 pset = nset;
4251 pset_lock(pset);
4252 }
4253 } while (nset != starting_pset);
4254
4255 /*
4256 * Make sure that we pick a running processor,
4257 * and that the correct processor set is locked.
4258 * Since we may have unlocked the candidate processor's
4259 * pset, it may have changed state.
4260 *
4261 * All primary processors are running a higher priority
4262 * thread, so the only options left are enqueuing on
4263 * the secondary processor that would perturb the least priority
4264 * primary, or the least busy primary.
4265 */
4266 do {
4267 /* lowest_priority is evaluated in the main loops above */
4268 if (lp_idle_secondary_processor != PROCESSOR_NULL) {
4269 processor = lp_idle_secondary_processor;
4270 lp_idle_secondary_processor = PROCESSOR_NULL;
4271 } else if (lp_paired_secondary_processor != PROCESSOR_NULL) {
4272 processor = lp_paired_secondary_processor;
4273 lp_paired_secondary_processor = PROCESSOR_NULL;
4274 } else if (lc_processor != PROCESSOR_NULL) {
4275 processor = lc_processor;
4276 lc_processor = PROCESSOR_NULL;
4277 } else {
4278 /*
4279 * All processors are executing higher
4280 * priority threads, and the lowest_count
4281 * candidate was not usable, so we pick a processor
4282 * to give this thread somewhere to be enqueued.
4283 *
4284 * TODO: Need tracepoint or something to show when this happens
4285 * TODO: Prefer a processor in the original pset
4286 */
4287 processor = master_processor;
4288 }
4289
4290 /*
4291 * Check that the correct processor set is
4292 * returned locked.
4293 */
4294 if (pset != processor->processor_set) {
4295 pset_unlock(pset);
4296 pset = processor->processor_set;
4297 pset_lock(pset);
4298 }
4299
4300 /*
4301 * We must verify that the chosen processor is still available.
4302 * master_processor is an exception, since we may need to preempt
4303 * a running thread on it during processor shutdown (for sleep),
4304 * and that thread needs to be enqueued on its runqueue to run
4305 * when the processor is restarted.
4306 */
4307 if (processor != master_processor && (processor->state == PROCESSOR_SHUTDOWN || processor->state == PROCESSOR_OFF_LINE)) {
4308 processor = PROCESSOR_NULL;
4309 }
4310 } while (processor == PROCESSOR_NULL);
4311
4312 pset->last_chosen = processor->cpu_id;
4313 return processor;
4314 }
4315
4316 /*
4317 * thread_setrun:
4318 *
4319 * Dispatch thread for execution, onto an idle
4320 * processor or run queue, and signal a preemption
4321 * as appropriate.
4322 *
4323 * Thread must be locked.
4324 */
4325 void
4326 thread_setrun(
4327 thread_t thread,
4328 integer_t options)
4329 {
4330 processor_t processor;
4331 processor_set_t pset;
4332
4333 assert((thread->state & (TH_RUN | TH_WAIT | TH_UNINT | TH_TERMINATE | TH_TERMINATE2)) == TH_RUN);
4334 assert(thread->runq == PROCESSOR_NULL);
4335
4336 /*
4337 * Update priority if needed.
4338 */
4339 if (SCHED(can_update_priority)(thread)) {
4340 SCHED(update_priority)(thread);
4341 }
4342
4343 thread->sfi_class = sfi_thread_classify(thread);
4344
4345 assert(thread->runq == PROCESSOR_NULL);
4346
4347 #if __SMP__
4348 if (thread->bound_processor == PROCESSOR_NULL) {
4349 /*
4350 * Unbound case.
4351 */
4352 if (thread->affinity_set != AFFINITY_SET_NULL) {
4353 /*
4354 * Use affinity set policy hint.
4355 */
4356 pset = thread->affinity_set->aset_pset;
4357 pset_lock(pset);
4358
4359 processor = SCHED(choose_processor)(pset, PROCESSOR_NULL, thread);
4360 pset = processor->processor_set;
4361
4362 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR) | DBG_FUNC_NONE,
4363 (uintptr_t)thread_tid(thread), (uintptr_t)-1, processor->cpu_id, processor->state, 0);
4364 } else if (thread->last_processor != PROCESSOR_NULL) {
4365 /*
4366 * Simple (last processor) affinity case.
4367 */
4368 processor = thread->last_processor;
4369 pset = processor->processor_set;
4370 pset_lock(pset);
4371 processor = SCHED(choose_processor)(pset, processor, thread);
4372 pset = processor->processor_set;
4373
4374 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR) | DBG_FUNC_NONE,
4375 (uintptr_t)thread_tid(thread), thread->last_processor->cpu_id, processor->cpu_id, processor->state, 0);
4376 } else {
4377 /*
4378 * No Affinity case:
4379 *
4380 * Utilitize a per task hint to spread threads
4381 * among the available processor sets.
4382 */
4383 task_t task = thread->task;
4384
4385 pset = task->pset_hint;
4386 if (pset == PROCESSOR_SET_NULL) {
4387 pset = current_processor()->processor_set;
4388 }
4389
4390 pset = choose_next_pset(pset);
4391 pset_lock(pset);
4392
4393 processor = SCHED(choose_processor)(pset, PROCESSOR_NULL, thread);
4394 pset = processor->processor_set;
4395 task->pset_hint = pset;
4396
4397 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR) | DBG_FUNC_NONE,
4398 (uintptr_t)thread_tid(thread), (uintptr_t)-1, processor->cpu_id, processor->state, 0);
4399 }
4400 } else {
4401 /*
4402 * Bound case:
4403 *
4404 * Unconditionally dispatch on the processor.
4405 */
4406 processor = thread->bound_processor;
4407 pset = processor->processor_set;
4408 pset_lock(pset);
4409
4410 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR) | DBG_FUNC_NONE,
4411 (uintptr_t)thread_tid(thread), (uintptr_t)-2, processor->cpu_id, processor->state, 0);
4412 }
4413 #else /* !__SMP__ */
4414 /* Only one processor to choose */
4415 assert(thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == master_processor);
4416 processor = master_processor;
4417 pset = processor->processor_set;
4418 pset_lock(pset);
4419 #endif /* !__SMP__ */
4420
4421 /*
4422 * Dispatch the thread on the chosen processor.
4423 * TODO: This should be based on sched_mode, not sched_pri
4424 */
4425 if (thread->sched_pri >= BASEPRI_RTQUEUES) {
4426 realtime_setrun(processor, thread);
4427 } else {
4428 processor_setrun(processor, thread, options);
4429 }
4430 /* pset is now unlocked */
4431 if (thread->bound_processor == PROCESSOR_NULL) {
4432 SCHED(check_spill)(pset, thread);
4433 }
4434 }
4435
4436 processor_set_t
4437 task_choose_pset(
4438 task_t task)
4439 {
4440 processor_set_t pset = task->pset_hint;
4441
4442 if (pset != PROCESSOR_SET_NULL) {
4443 pset = choose_next_pset(pset);
4444 }
4445
4446 return pset;
4447 }
4448
4449 /*
4450 * Check for a preemption point in
4451 * the current context.
4452 *
4453 * Called at splsched with thread locked.
4454 */
4455 ast_t
4456 csw_check(
4457 thread_t thread,
4458 processor_t processor,
4459 ast_t check_reason)
4460 {
4461 processor_set_t pset = processor->processor_set;
4462
4463 assert(thread == processor->active_thread);
4464
4465 pset_lock(pset);
4466
4467 processor_state_update_from_thread(processor, thread);
4468
4469 ast_t preempt = csw_check_locked(thread, processor, pset, check_reason);
4470
4471 /* Acknowledge the IPI if we decided not to preempt */
4472
4473 if ((preempt & AST_URGENT) == 0) {
4474 bit_clear(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id);
4475 }
4476
4477 if ((preempt & AST_PREEMPT) == 0) {
4478 bit_clear(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
4479 }
4480
4481 pset_unlock(pset);
4482
4483 return preempt;
4484 }
4485
4486 /*
4487 * Check for preemption at splsched with
4488 * pset and thread locked
4489 */
4490 ast_t
4491 csw_check_locked(
4492 thread_t thread,
4493 processor_t processor,
4494 processor_set_t pset,
4495 ast_t check_reason)
4496 {
4497 ast_t result;
4498
4499 if (processor->first_timeslice) {
4500 if (rt_runq_count(pset) > 0) {
4501 return check_reason | AST_PREEMPT | AST_URGENT;
4502 }
4503 } else {
4504 if (rt_runq_count(pset) > 0) {
4505 if (BASEPRI_RTQUEUES > processor->current_pri) {
4506 return check_reason | AST_PREEMPT | AST_URGENT;
4507 } else {
4508 return check_reason | AST_PREEMPT;
4509 }
4510 }
4511 }
4512
4513 #if __SMP__
4514 /*
4515 * If the current thread is running on a processor that is no longer recommended,
4516 * urgently preempt it, at which point thread_select() should
4517 * try to idle the processor and re-dispatch the thread to a recommended processor.
4518 */
4519 if (!processor->is_recommended) {
4520 return check_reason | AST_PREEMPT | AST_URGENT;
4521 }
4522 #endif
4523
4524 result = SCHED(processor_csw_check)(processor);
4525 if (result != AST_NONE) {
4526 return check_reason | result | (thread_eager_preemption(thread) ? AST_URGENT : AST_NONE);
4527 }
4528
4529 #if __SMP__
4530 /*
4531 * Same for avoid-processor
4532 *
4533 * TODO: Should these set AST_REBALANCE?
4534 */
4535 if (SCHED(avoid_processor_enabled) && SCHED(thread_avoid_processor)(processor, thread)) {
4536 return check_reason | AST_PREEMPT;
4537 }
4538
4539 /*
4540 * Even though we could continue executing on this processor, a
4541 * secondary SMT core should try to shed load to another primary core.
4542 *
4543 * TODO: Should this do the same check that thread_select does? i.e.
4544 * if no bound threads target this processor, and idle primaries exist, preempt
4545 * The case of RT threads existing is already taken care of above
4546 */
4547
4548 if (processor->current_pri < BASEPRI_RTQUEUES &&
4549 processor->processor_primary != processor) {
4550 return check_reason | AST_PREEMPT;
4551 }
4552 #endif
4553
4554 if (thread->state & TH_SUSP) {
4555 return check_reason | AST_PREEMPT;
4556 }
4557
4558 #if CONFIG_SCHED_SFI
4559 /*
4560 * Current thread may not need to be preempted, but maybe needs
4561 * an SFI wait?
4562 */
4563 result = sfi_thread_needs_ast(thread, NULL);
4564 if (result != AST_NONE) {
4565 return check_reason | result;
4566 }
4567 #endif
4568
4569 return AST_NONE;
4570 }
4571
4572 /*
4573 * Handle preemption IPI or IPI in response to setting an AST flag
4574 * Triggered by cause_ast_check
4575 * Called at splsched
4576 */
4577 void
4578 ast_check(processor_t processor)
4579 {
4580 if (processor->state != PROCESSOR_RUNNING &&
4581 processor->state != PROCESSOR_SHUTDOWN) {
4582 return;
4583 }
4584
4585 thread_t thread = processor->active_thread;
4586
4587 assert(thread == current_thread());
4588
4589 thread_lock(thread);
4590
4591 /*
4592 * Propagate thread ast to processor.
4593 * (handles IPI in response to setting AST flag)
4594 */
4595 ast_propagate(thread);
4596
4597 /*
4598 * Stash the old urgency and perfctl values to find out if
4599 * csw_check updates them.
4600 */
4601 thread_urgency_t old_urgency = processor->current_urgency;
4602 perfcontrol_class_t old_perfctl_class = processor->current_perfctl_class;
4603
4604 ast_t preempt;
4605
4606 if ((preempt = csw_check(thread, processor, AST_NONE)) != AST_NONE) {
4607 ast_on(preempt);
4608 }
4609
4610 if (old_urgency != processor->current_urgency) {
4611 /*
4612 * Urgency updates happen with the thread lock held (ugh).
4613 * TODO: This doesn't notice QoS changes...
4614 */
4615 uint64_t urgency_param1, urgency_param2;
4616
4617 thread_urgency_t urgency = thread_get_urgency(thread, &urgency_param1, &urgency_param2);
4618 thread_tell_urgency(urgency, urgency_param1, urgency_param2, 0, thread);
4619 }
4620
4621 thread_unlock(thread);
4622
4623 if (old_perfctl_class != processor->current_perfctl_class) {
4624 /*
4625 * We updated the perfctl class of this thread from another core.
4626 * Let CLPC know that the currently running thread has a new
4627 * class.
4628 */
4629
4630 machine_switch_perfcontrol_state_update(PERFCONTROL_ATTR_UPDATE,
4631 mach_approximate_time(), 0, thread);
4632 }
4633 }
4634
4635
4636 /*
4637 * set_sched_pri:
4638 *
4639 * Set the scheduled priority of the specified thread.
4640 *
4641 * This may cause the thread to change queues.
4642 *
4643 * Thread must be locked.
4644 */
4645 void
4646 set_sched_pri(
4647 thread_t thread,
4648 int new_priority,
4649 set_sched_pri_options_t options)
4650 {
4651 bool is_current_thread = (thread == current_thread());
4652 bool removed_from_runq = false;
4653 bool lazy_update = ((options & SETPRI_LAZY) == SETPRI_LAZY);
4654
4655 int old_priority = thread->sched_pri;
4656
4657 /* If we're already at this priority, no need to mess with the runqueue */
4658 if (new_priority == old_priority) {
4659 return;
4660 }
4661
4662 if (is_current_thread) {
4663 assert(thread->state & TH_RUN);
4664 assert(thread->runq == PROCESSOR_NULL);
4665 } else {
4666 removed_from_runq = thread_run_queue_remove(thread);
4667 }
4668
4669 thread->sched_pri = new_priority;
4670
4671 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHANGE_PRIORITY),
4672 (uintptr_t)thread_tid(thread),
4673 thread->base_pri,
4674 thread->sched_pri,
4675 thread->sched_usage,
4676 0);
4677
4678 if (removed_from_runq) {
4679 thread_run_queue_reinsert(thread, SCHED_PREEMPT | SCHED_TAILQ);
4680 } else if (is_current_thread) {
4681 processor_t processor = thread->last_processor;
4682 assert(processor == current_processor());
4683
4684 thread_urgency_t old_urgency = processor->current_urgency;
4685
4686 /*
4687 * When dropping in priority, check if the thread no longer belongs on core.
4688 * If a thread raises its own priority, don't aggressively rebalance it.
4689 * <rdar://problem/31699165>
4690 *
4691 * csw_check does a processor_state_update_from_thread, but
4692 * we should do our own if we're being lazy.
4693 */
4694 if (!lazy_update && new_priority < old_priority) {
4695 ast_t preempt;
4696
4697 if ((preempt = csw_check(thread, processor, AST_NONE)) != AST_NONE) {
4698 ast_on(preempt);
4699 }
4700 } else {
4701 processor_state_update_from_thread(processor, thread);
4702 }
4703
4704 /*
4705 * set_sched_pri doesn't alter RT params. We expect direct base priority/QoS
4706 * class alterations from user space to occur relatively infrequently, hence
4707 * those are lazily handled. QoS classes have distinct priority bands, and QoS
4708 * inheritance is expected to involve priority changes.
4709 */
4710 if (processor->current_urgency != old_urgency) {
4711 uint64_t urgency_param1, urgency_param2;
4712
4713 thread_urgency_t new_urgency = thread_get_urgency(thread,
4714 &urgency_param1, &urgency_param2);
4715
4716 thread_tell_urgency(new_urgency, urgency_param1,
4717 urgency_param2, 0, thread);
4718 }
4719
4720 /* TODO: only call this if current_perfctl_class changed */
4721 uint64_t ctime = mach_approximate_time();
4722 machine_thread_going_on_core(thread, processor->current_urgency, 0, 0, ctime);
4723 } else if (thread->state & TH_RUN) {
4724 processor_t processor = thread->last_processor;
4725
4726 if (!lazy_update &&
4727 processor != PROCESSOR_NULL &&
4728 processor != current_processor() &&
4729 processor->active_thread == thread) {
4730 cause_ast_check(processor);
4731 }
4732 }
4733 }
4734
4735 /*
4736 * thread_run_queue_remove_for_handoff
4737 *
4738 * Pull a thread or its (recursive) push target out of the runqueue
4739 * so that it is ready for thread_run()
4740 *
4741 * Called at splsched
4742 *
4743 * Returns the thread that was pulled or THREAD_NULL if no thread could be pulled.
4744 * This may be different than the thread that was passed in.
4745 */
4746 thread_t
4747 thread_run_queue_remove_for_handoff(thread_t thread)
4748 {
4749 thread_t pulled_thread = THREAD_NULL;
4750
4751 thread_lock(thread);
4752
4753 /*
4754 * Check that the thread is not bound
4755 * to a different processor, and that realtime
4756 * is not involved.
4757 *
4758 * Next, pull it off its run queue. If it
4759 * doesn't come, it's not eligible.
4760 */
4761
4762 processor_t processor = current_processor();
4763 if (processor->current_pri < BASEPRI_RTQUEUES && thread->sched_pri < BASEPRI_RTQUEUES &&
4764 (thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == processor)) {
4765 if (thread_run_queue_remove(thread)) {
4766 pulled_thread = thread;
4767 }
4768 }
4769
4770 thread_unlock(thread);
4771
4772 return pulled_thread;
4773 }
4774
4775 /*
4776 * thread_run_queue_remove:
4777 *
4778 * Remove a thread from its current run queue and
4779 * return TRUE if successful.
4780 *
4781 * Thread must be locked.
4782 *
4783 * If thread->runq is PROCESSOR_NULL, the thread will not re-enter the
4784 * run queues because the caller locked the thread. Otherwise
4785 * the thread is on a run queue, but could be chosen for dispatch
4786 * and removed by another processor under a different lock, which
4787 * will set thread->runq to PROCESSOR_NULL.
4788 *
4789 * Hence the thread select path must not rely on anything that could
4790 * be changed under the thread lock after calling this function,
4791 * most importantly thread->sched_pri.
4792 */
4793 boolean_t
4794 thread_run_queue_remove(
4795 thread_t thread)
4796 {
4797 boolean_t removed = FALSE;
4798 processor_t processor = thread->runq;
4799
4800 if ((thread->state & (TH_RUN | TH_WAIT)) == TH_WAIT) {
4801 /* Thread isn't runnable */
4802 assert(thread->runq == PROCESSOR_NULL);
4803 return FALSE;
4804 }
4805
4806 if (processor == PROCESSOR_NULL) {
4807 /*
4808 * The thread is either not on the runq,
4809 * or is in the midst of being removed from the runq.
4810 *
4811 * runq is set to NULL under the pset lock, not the thread
4812 * lock, so the thread may still be in the process of being dequeued
4813 * from the runq. It will wait in invoke for the thread lock to be
4814 * dropped.
4815 */
4816
4817 return FALSE;
4818 }
4819
4820 if (thread->sched_pri < BASEPRI_RTQUEUES) {
4821 return SCHED(processor_queue_remove)(processor, thread);
4822 }
4823
4824 processor_set_t pset = processor->processor_set;
4825
4826 rt_lock_lock(pset);
4827
4828 if (thread->runq != PROCESSOR_NULL) {
4829 /*
4830 * Thread is on the RT run queue and we have a lock on
4831 * that run queue.
4832 */
4833
4834 remqueue(&thread->runq_links);
4835 SCHED_STATS_RUNQ_CHANGE(&SCHED(rt_runq)(pset)->runq_stats, rt_runq_count(pset));
4836 rt_runq_count_decr(pset);
4837
4838 thread->runq = PROCESSOR_NULL;
4839
4840 removed = TRUE;
4841 }
4842
4843 rt_lock_unlock(pset);
4844
4845 return removed;
4846 }
4847
4848 /*
4849 * Put the thread back where it goes after a thread_run_queue_remove
4850 *
4851 * Thread must have been removed under the same thread lock hold
4852 *
4853 * thread locked, at splsched
4854 */
4855 void
4856 thread_run_queue_reinsert(thread_t thread, integer_t options)
4857 {
4858 assert(thread->runq == PROCESSOR_NULL);
4859 assert(thread->state & (TH_RUN));
4860
4861 thread_setrun(thread, options);
4862 }
4863
4864 void
4865 sys_override_cpu_throttle(boolean_t enable_override)
4866 {
4867 if (enable_override) {
4868 cpu_throttle_enabled = 0;
4869 } else {
4870 cpu_throttle_enabled = 1;
4871 }
4872 }
4873
4874 thread_urgency_t
4875 thread_get_urgency(thread_t thread, uint64_t *arg1, uint64_t *arg2)
4876 {
4877 uint64_t urgency_param1 = 0, urgency_param2 = 0;
4878
4879 thread_urgency_t urgency;
4880
4881 if (thread == NULL || (thread->state & TH_IDLE)) {
4882 urgency_param1 = 0;
4883 urgency_param2 = 0;
4884
4885 urgency = THREAD_URGENCY_NONE;
4886 } else if (thread->sched_mode == TH_MODE_REALTIME) {
4887 urgency_param1 = thread->realtime.period;
4888 urgency_param2 = thread->realtime.deadline;
4889
4890 urgency = THREAD_URGENCY_REAL_TIME;
4891 } else if (cpu_throttle_enabled &&
4892 (thread->sched_pri <= MAXPRI_THROTTLE) &&
4893 (thread->base_pri <= MAXPRI_THROTTLE)) {
4894 /*
4895 * Threads that are running at low priority but are not
4896 * tagged with a specific QoS are separated out from
4897 * the "background" urgency. Performance management
4898 * subsystem can decide to either treat these threads
4899 * as normal threads or look at other signals like thermal
4900 * levels for optimal power/perf tradeoffs for a platform.
4901 */
4902 boolean_t thread_lacks_qos = (proc_get_effective_thread_policy(thread, TASK_POLICY_QOS) == THREAD_QOS_UNSPECIFIED); //thread_has_qos_policy(thread);
4903 boolean_t task_is_suppressed = (proc_get_effective_task_policy(thread->task, TASK_POLICY_SUP_ACTIVE) == 0x1);
4904
4905 /*
4906 * Background urgency applied when thread priority is
4907 * MAXPRI_THROTTLE or lower and thread is not promoted
4908 * and thread has a QoS specified
4909 */
4910 urgency_param1 = thread->sched_pri;
4911 urgency_param2 = thread->base_pri;
4912
4913 if (thread_lacks_qos && !task_is_suppressed) {
4914 urgency = THREAD_URGENCY_LOWPRI;
4915 } else {
4916 urgency = THREAD_URGENCY_BACKGROUND;
4917 }
4918 } else {
4919 /* For otherwise unclassified threads, report throughput QoS parameters */
4920 urgency_param1 = proc_get_effective_thread_policy(thread, TASK_POLICY_THROUGH_QOS);
4921 urgency_param2 = proc_get_effective_task_policy(thread->task, TASK_POLICY_THROUGH_QOS);
4922 urgency = THREAD_URGENCY_NORMAL;
4923 }
4924
4925 if (arg1 != NULL) {
4926 *arg1 = urgency_param1;
4927 }
4928 if (arg2 != NULL) {
4929 *arg2 = urgency_param2;
4930 }
4931
4932 return urgency;
4933 }
4934
4935 perfcontrol_class_t
4936 thread_get_perfcontrol_class(thread_t thread)
4937 {
4938 /* Special case handling */
4939 if (thread->state & TH_IDLE) {
4940 return PERFCONTROL_CLASS_IDLE;
4941 }
4942 if (thread->task == kernel_task) {
4943 return PERFCONTROL_CLASS_KERNEL;
4944 }
4945 if (thread->sched_mode == TH_MODE_REALTIME) {
4946 return PERFCONTROL_CLASS_REALTIME;
4947 }
4948
4949 /* perfcontrol_class based on base_pri */
4950 if (thread->base_pri <= MAXPRI_THROTTLE) {
4951 return PERFCONTROL_CLASS_BACKGROUND;
4952 } else if (thread->base_pri <= BASEPRI_UTILITY) {
4953 return PERFCONTROL_CLASS_UTILITY;
4954 } else if (thread->base_pri <= BASEPRI_DEFAULT) {
4955 return PERFCONTROL_CLASS_NONUI;
4956 } else if (thread->base_pri <= BASEPRI_FOREGROUND) {
4957 return PERFCONTROL_CLASS_UI;
4958 } else {
4959 return PERFCONTROL_CLASS_ABOVEUI;
4960 }
4961 }
4962
4963 /*
4964 * This is the processor idle loop, which just looks for other threads
4965 * to execute. Processor idle threads invoke this without supplying a
4966 * current thread to idle without an asserted wait state.
4967 *
4968 * Returns a the next thread to execute if dispatched directly.
4969 */
4970
4971 #if 0
4972 #define IDLE_KERNEL_DEBUG_CONSTANT(...) KERNEL_DEBUG_CONSTANT(__VA_ARGS__)
4973 #else
4974 #define IDLE_KERNEL_DEBUG_CONSTANT(...) do { } while(0)
4975 #endif
4976
4977 thread_t
4978 processor_idle(
4979 thread_t thread,
4980 processor_t processor)
4981 {
4982 processor_set_t pset = processor->processor_set;
4983
4984 (void)splsched();
4985
4986 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
4987 MACHDBG_CODE(DBG_MACH_SCHED, MACH_IDLE) | DBG_FUNC_START,
4988 (uintptr_t)thread_tid(thread), 0, 0, 0, 0);
4989
4990 SCHED_STATS_CPU_IDLE_START(processor);
4991
4992 uint64_t ctime = mach_absolute_time();
4993
4994 timer_switch(&PROCESSOR_DATA(processor, system_state), ctime, &PROCESSOR_DATA(processor, idle_state));
4995 PROCESSOR_DATA(processor, current_state) = &PROCESSOR_DATA(processor, idle_state);
4996
4997 cpu_quiescent_counter_leave(ctime);
4998
4999 while (1) {
5000 /*
5001 * Ensure that updates to my processor and pset state,
5002 * made by the IPI source processor before sending the IPI,
5003 * are visible on this processor now (even though we don't
5004 * take the pset lock yet).
5005 */
5006 atomic_thread_fence(memory_order_acquire);
5007
5008 if (processor->state != PROCESSOR_IDLE) {
5009 break;
5010 }
5011 if (bit_test(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
5012 break;
5013 }
5014 #if defined(CONFIG_SCHED_DEFERRED_AST)
5015 if (bit_test(pset->pending_deferred_AST_cpu_mask, processor->cpu_id)) {
5016 break;
5017 }
5018 #endif
5019 if (processor->is_recommended && (processor->processor_primary == processor)) {
5020 if (rt_runq_count(pset)) {
5021 break;
5022 }
5023 } else {
5024 if (SCHED(processor_bound_count)(processor)) {
5025 break;
5026 }
5027 }
5028
5029 #if CONFIG_SCHED_IDLE_IN_PLACE
5030 if (thread != THREAD_NULL) {
5031 /* Did idle-in-place thread wake up */
5032 if ((thread->state & (TH_WAIT | TH_SUSP)) != TH_WAIT || thread->wake_active) {
5033 break;
5034 }
5035 }
5036 #endif
5037
5038 IDLE_KERNEL_DEBUG_CONSTANT(
5039 MACHDBG_CODE(DBG_MACH_SCHED, MACH_IDLE) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), rt_runq_count(pset), SCHED(processor_runq_count)(processor), -1, 0);
5040
5041 machine_track_platform_idle(TRUE);
5042
5043 machine_idle();
5044
5045 machine_track_platform_idle(FALSE);
5046
5047 (void)splsched();
5048
5049 /*
5050 * Check if we should call sched_timeshare_consider_maintenance() here.
5051 * The CPU was woken out of idle due to an interrupt and we should do the
5052 * call only if the processor is still idle. If the processor is non-idle,
5053 * the threads running on the processor would do the call as part of
5054 * context swithing.
5055 */
5056 if (processor->state == PROCESSOR_IDLE) {
5057 sched_timeshare_consider_maintenance(mach_absolute_time());
5058 }
5059
5060 IDLE_KERNEL_DEBUG_CONSTANT(
5061 MACHDBG_CODE(DBG_MACH_SCHED, MACH_IDLE) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), rt_runq_count(pset), SCHED(processor_runq_count)(processor), -2, 0);
5062
5063 if (!SCHED(processor_queue_empty)(processor)) {
5064 /* Secondary SMT processors respond to directed wakeups
5065 * exclusively. Some platforms induce 'spurious' SMT wakeups.
5066 */
5067 if (processor->processor_primary == processor) {
5068 break;
5069 }
5070 }
5071 }
5072
5073 ctime = mach_absolute_time();
5074
5075 timer_switch(&PROCESSOR_DATA(processor, idle_state), ctime, &PROCESSOR_DATA(processor, system_state));
5076 PROCESSOR_DATA(processor, current_state) = &PROCESSOR_DATA(processor, system_state);
5077
5078 cpu_quiescent_counter_join(ctime);
5079
5080 assert(processor->next_thread == NULL);
5081
5082 ast_t reason = AST_NONE;
5083
5084 /* We're handling all scheduling AST's */
5085 ast_off(AST_SCHEDULING);
5086
5087 /*
5088 * thread_select will move the processor from dispatching to running,
5089 * or put it in idle if there's nothing to do.
5090 */
5091 thread_t current_thread = current_thread();
5092
5093 thread_lock(current_thread);
5094 thread_t new_thread = thread_select(current_thread, processor, &reason);
5095 thread_unlock(current_thread);
5096
5097 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
5098 MACHDBG_CODE(DBG_MACH_SCHED, MACH_IDLE) | DBG_FUNC_END,
5099 (uintptr_t)thread_tid(thread), processor->state, (uintptr_t)thread_tid(new_thread), reason, 0);
5100
5101 return new_thread;
5102 }
5103
5104 /*
5105 * Each processor has a dedicated thread which
5106 * executes the idle loop when there is no suitable
5107 * previous context.
5108 */
5109 void
5110 idle_thread(void)
5111 {
5112 processor_t processor = current_processor();
5113 thread_t new_thread;
5114
5115 new_thread = processor_idle(THREAD_NULL, processor);
5116 if (new_thread != THREAD_NULL) {
5117 thread_run(processor->idle_thread, (thread_continue_t)idle_thread, NULL, new_thread);
5118 /*NOTREACHED*/
5119 }
5120
5121 thread_block((thread_continue_t)idle_thread);
5122 /*NOTREACHED*/
5123 }
5124
5125 kern_return_t
5126 idle_thread_create(
5127 processor_t processor)
5128 {
5129 kern_return_t result;
5130 thread_t thread;
5131 spl_t s;
5132 char name[MAXTHREADNAMESIZE];
5133
5134 result = kernel_thread_create((thread_continue_t)idle_thread, NULL, MAXPRI_KERNEL, &thread);
5135 if (result != KERN_SUCCESS) {
5136 return result;
5137 }
5138
5139 snprintf(name, sizeof(name), "idle #%d", processor->cpu_id);
5140 thread_set_thread_name(thread, name);
5141
5142 s = splsched();
5143 thread_lock(thread);
5144 thread->bound_processor = processor;
5145 processor->idle_thread = thread;
5146 thread->sched_pri = thread->base_pri = IDLEPRI;
5147 thread->state = (TH_RUN | TH_IDLE);
5148 thread->options |= TH_OPT_IDLE_THREAD;
5149 thread_unlock(thread);
5150 splx(s);
5151
5152 thread_deallocate(thread);
5153
5154 return KERN_SUCCESS;
5155 }
5156
5157 /*
5158 * sched_startup:
5159 *
5160 * Kicks off scheduler services.
5161 *
5162 * Called at splsched.
5163 */
5164 void
5165 sched_startup(void)
5166 {
5167 kern_return_t result;
5168 thread_t thread;
5169
5170 simple_lock_init(&sched_vm_group_list_lock, 0);
5171
5172 #if __arm__ || __arm64__
5173 simple_lock_init(&sched_recommended_cores_lock, 0);
5174 #endif /* __arm__ || __arm64__ */
5175
5176 result = kernel_thread_start_priority((thread_continue_t)sched_init_thread,
5177 (void *)SCHED(maintenance_continuation), MAXPRI_KERNEL, &thread);
5178 if (result != KERN_SUCCESS) {
5179 panic("sched_startup");
5180 }
5181
5182 thread_deallocate(thread);
5183
5184 assert_thread_magic(thread);
5185
5186 /*
5187 * Yield to the sched_init_thread once, to
5188 * initialize our own thread after being switched
5189 * back to.
5190 *
5191 * The current thread is the only other thread
5192 * active at this point.
5193 */
5194 thread_block(THREAD_CONTINUE_NULL);
5195 }
5196
5197 #if __arm64__
5198 static _Atomic uint64_t sched_perfcontrol_callback_deadline;
5199 #endif /* __arm64__ */
5200
5201
5202 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
5203
5204 static volatile uint64_t sched_maintenance_deadline;
5205 static uint64_t sched_tick_last_abstime;
5206 static uint64_t sched_tick_delta;
5207 uint64_t sched_tick_max_delta;
5208
5209
5210 /*
5211 * sched_init_thread:
5212 *
5213 * Perform periodic bookkeeping functions about ten
5214 * times per second.
5215 */
5216 void
5217 sched_timeshare_maintenance_continue(void)
5218 {
5219 uint64_t sched_tick_ctime, late_time;
5220
5221 struct sched_update_scan_context scan_context = {
5222 .earliest_bg_make_runnable_time = UINT64_MAX,
5223 .earliest_normal_make_runnable_time = UINT64_MAX,
5224 .earliest_rt_make_runnable_time = UINT64_MAX
5225 };
5226
5227 sched_tick_ctime = mach_absolute_time();
5228
5229 if (__improbable(sched_tick_last_abstime == 0)) {
5230 sched_tick_last_abstime = sched_tick_ctime;
5231 late_time = 0;
5232 sched_tick_delta = 1;
5233 } else {
5234 late_time = sched_tick_ctime - sched_tick_last_abstime;
5235 sched_tick_delta = late_time / sched_tick_interval;
5236 /* Ensure a delta of 1, since the interval could be slightly
5237 * smaller than the sched_tick_interval due to dispatch
5238 * latencies.
5239 */
5240 sched_tick_delta = MAX(sched_tick_delta, 1);
5241
5242 /* In the event interrupt latencies or platform
5243 * idle events that advanced the timebase resulted
5244 * in periods where no threads were dispatched,
5245 * cap the maximum "tick delta" at SCHED_TICK_MAX_DELTA
5246 * iterations.
5247 */
5248 sched_tick_delta = MIN(sched_tick_delta, SCHED_TICK_MAX_DELTA);
5249
5250 sched_tick_last_abstime = sched_tick_ctime;
5251 sched_tick_max_delta = MAX(sched_tick_delta, sched_tick_max_delta);
5252 }
5253
5254 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_MAINTENANCE) | DBG_FUNC_START,
5255 sched_tick_delta, late_time, 0, 0, 0);
5256
5257 /* Add a number of pseudo-ticks corresponding to the elapsed interval
5258 * This could be greater than 1 if substantial intervals where
5259 * all processors are idle occur, which rarely occurs in practice.
5260 */
5261
5262 sched_tick += sched_tick_delta;
5263
5264 update_vm_info();
5265
5266 /*
5267 * Compute various averages.
5268 */
5269 compute_averages(sched_tick_delta);
5270
5271 /*
5272 * Scan the run queues for threads which
5273 * may need to be updated, and find the earliest runnable thread on the runqueue
5274 * to report its latency.
5275 */
5276 SCHED(thread_update_scan)(&scan_context);
5277
5278 SCHED(rt_runq_scan)(&scan_context);
5279
5280 uint64_t ctime = mach_absolute_time();
5281
5282 uint64_t bg_max_latency = (ctime > scan_context.earliest_bg_make_runnable_time) ?
5283 ctime - scan_context.earliest_bg_make_runnable_time : 0;
5284
5285 uint64_t default_max_latency = (ctime > scan_context.earliest_normal_make_runnable_time) ?
5286 ctime - scan_context.earliest_normal_make_runnable_time : 0;
5287
5288 uint64_t realtime_max_latency = (ctime > scan_context.earliest_rt_make_runnable_time) ?
5289 ctime - scan_context.earliest_rt_make_runnable_time : 0;
5290
5291 machine_max_runnable_latency(bg_max_latency, default_max_latency, realtime_max_latency);
5292
5293 /*
5294 * Check to see if the special sched VM group needs attention.
5295 */
5296 sched_vm_group_maintenance();
5297
5298 #if __arm__ || __arm64__
5299 /* Check to see if the recommended cores failsafe is active */
5300 sched_recommended_cores_maintenance();
5301 #endif /* __arm__ || __arm64__ */
5302
5303
5304 #if DEBUG || DEVELOPMENT
5305 #if __x86_64__
5306 #include <i386/misc_protos.h>
5307 /* Check for long-duration interrupts */
5308 mp_interrupt_watchdog();
5309 #endif /* __x86_64__ */
5310 #endif /* DEBUG || DEVELOPMENT */
5311
5312 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_MAINTENANCE) | DBG_FUNC_END,
5313 sched_pri_shifts[TH_BUCKET_SHARE_FG], sched_pri_shifts[TH_BUCKET_SHARE_BG],
5314 sched_pri_shifts[TH_BUCKET_SHARE_UT], sched_pri_shifts[TH_BUCKET_SHARE_DF], 0);
5315
5316 assert_wait((event_t)sched_timeshare_maintenance_continue, THREAD_UNINT);
5317 thread_block((thread_continue_t)sched_timeshare_maintenance_continue);
5318 /*NOTREACHED*/
5319 }
5320
5321 static uint64_t sched_maintenance_wakeups;
5322
5323 /*
5324 * Determine if the set of routines formerly driven by a maintenance timer
5325 * must be invoked, based on a deadline comparison. Signals the scheduler
5326 * maintenance thread on deadline expiration. Must be invoked at an interval
5327 * lower than the "sched_tick_interval", currently accomplished by
5328 * invocation via the quantum expiration timer and at context switch time.
5329 * Performance matters: this routine reuses a timestamp approximating the
5330 * current absolute time received from the caller, and should perform
5331 * no more than a comparison against the deadline in the common case.
5332 */
5333 void
5334 sched_timeshare_consider_maintenance(uint64_t ctime)
5335 {
5336 cpu_quiescent_counter_checkin(ctime);
5337
5338 uint64_t deadline = sched_maintenance_deadline;
5339
5340 if (__improbable(ctime >= deadline)) {
5341 if (__improbable(current_thread() == sched_maintenance_thread)) {
5342 return;
5343 }
5344 OSMemoryBarrier();
5345
5346 uint64_t ndeadline = ctime + sched_tick_interval;
5347
5348 if (__probable(__sync_bool_compare_and_swap(&sched_maintenance_deadline, deadline, ndeadline))) {
5349 thread_wakeup((event_t)sched_timeshare_maintenance_continue);
5350 sched_maintenance_wakeups++;
5351 }
5352 }
5353
5354 uint64_t load_compute_deadline = __c11_atomic_load(&sched_load_compute_deadline, memory_order_relaxed);
5355
5356 if (__improbable(load_compute_deadline && ctime >= load_compute_deadline)) {
5357 uint64_t new_deadline = 0;
5358 if (__c11_atomic_compare_exchange_strong(&sched_load_compute_deadline, &load_compute_deadline, new_deadline,
5359 memory_order_relaxed, memory_order_relaxed)) {
5360 compute_sched_load();
5361 new_deadline = ctime + sched_load_compute_interval_abs;
5362 __c11_atomic_store(&sched_load_compute_deadline, new_deadline, memory_order_relaxed);
5363 }
5364 }
5365
5366 #if __arm64__
5367 uint64_t perf_deadline = __c11_atomic_load(&sched_perfcontrol_callback_deadline, memory_order_relaxed);
5368
5369 if (__improbable(perf_deadline && ctime >= perf_deadline)) {
5370 /* CAS in 0, if success, make callback. Otherwise let the next context switch check again. */
5371 if (__c11_atomic_compare_exchange_strong(&sched_perfcontrol_callback_deadline, &perf_deadline, 0,
5372 memory_order_relaxed, memory_order_relaxed)) {
5373 machine_perfcontrol_deadline_passed(perf_deadline);
5374 }
5375 }
5376 #endif /* __arm64__ */
5377 }
5378
5379 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
5380
5381 void
5382 sched_init_thread(void (*continuation)(void))
5383 {
5384 thread_block(THREAD_CONTINUE_NULL);
5385
5386 thread_t thread = current_thread();
5387
5388 thread_set_thread_name(thread, "sched_maintenance_thread");
5389
5390 sched_maintenance_thread = thread;
5391
5392 continuation();
5393
5394 /*NOTREACHED*/
5395 }
5396
5397 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
5398
5399 /*
5400 * thread_update_scan / runq_scan:
5401 *
5402 * Scan the run queues to account for timesharing threads
5403 * which need to be updated.
5404 *
5405 * Scanner runs in two passes. Pass one squirrels likely
5406 * threads away in an array, pass two does the update.
5407 *
5408 * This is necessary because the run queue is locked for
5409 * the candidate scan, but the thread is locked for the update.
5410 *
5411 * Array should be sized to make forward progress, without
5412 * disabling preemption for long periods.
5413 */
5414
5415 #define THREAD_UPDATE_SIZE 128
5416
5417 static thread_t thread_update_array[THREAD_UPDATE_SIZE];
5418 static uint32_t thread_update_count = 0;
5419
5420 /* Returns TRUE if thread was added, FALSE if thread_update_array is full */
5421 boolean_t
5422 thread_update_add_thread(thread_t thread)
5423 {
5424 if (thread_update_count == THREAD_UPDATE_SIZE) {
5425 return FALSE;
5426 }
5427
5428 thread_update_array[thread_update_count++] = thread;
5429 thread_reference_internal(thread);
5430 return TRUE;
5431 }
5432
5433 void
5434 thread_update_process_threads(void)
5435 {
5436 assert(thread_update_count <= THREAD_UPDATE_SIZE);
5437
5438 for (uint32_t i = 0; i < thread_update_count; i++) {
5439 thread_t thread = thread_update_array[i];
5440 assert_thread_magic(thread);
5441 thread_update_array[i] = THREAD_NULL;
5442
5443 spl_t s = splsched();
5444 thread_lock(thread);
5445 if (!(thread->state & (TH_WAIT)) && thread->sched_stamp != sched_tick) {
5446 SCHED(update_priority)(thread);
5447 }
5448 thread_unlock(thread);
5449 splx(s);
5450
5451 thread_deallocate(thread);
5452 }
5453
5454 thread_update_count = 0;
5455 }
5456
5457 /*
5458 * Scan a runq for candidate threads.
5459 *
5460 * Returns TRUE if retry is needed.
5461 */
5462 boolean_t
5463 runq_scan(
5464 run_queue_t runq,
5465 sched_update_scan_context_t scan_context)
5466 {
5467 int count = runq->count;
5468 int queue_index;
5469
5470 assert(count >= 0);
5471
5472 if (count == 0) {
5473 return FALSE;
5474 }
5475
5476 for (queue_index = bitmap_first(runq->bitmap, NRQS);
5477 queue_index >= 0;
5478 queue_index = bitmap_next(runq->bitmap, queue_index)) {
5479 thread_t thread;
5480 queue_t queue = &runq->queues[queue_index];
5481
5482 qe_foreach_element(thread, queue, runq_links) {
5483 assert(count > 0);
5484 assert_thread_magic(thread);
5485
5486 if (thread->sched_stamp != sched_tick &&
5487 thread->sched_mode == TH_MODE_TIMESHARE) {
5488 if (thread_update_add_thread(thread) == FALSE) {
5489 return TRUE;
5490 }
5491 }
5492
5493 if (cpu_throttle_enabled && ((thread->sched_pri <= MAXPRI_THROTTLE) && (thread->base_pri <= MAXPRI_THROTTLE))) {
5494 if (thread->last_made_runnable_time < scan_context->earliest_bg_make_runnable_time) {
5495 scan_context->earliest_bg_make_runnable_time = thread->last_made_runnable_time;
5496 }
5497 } else {
5498 if (thread->last_made_runnable_time < scan_context->earliest_normal_make_runnable_time) {
5499 scan_context->earliest_normal_make_runnable_time = thread->last_made_runnable_time;
5500 }
5501 }
5502 count--;
5503 }
5504 }
5505
5506 return FALSE;
5507 }
5508
5509 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
5510
5511 boolean_t
5512 thread_eager_preemption(thread_t thread)
5513 {
5514 return (thread->sched_flags & TH_SFLAG_EAGERPREEMPT) != 0;
5515 }
5516
5517 void
5518 thread_set_eager_preempt(thread_t thread)
5519 {
5520 spl_t x;
5521 processor_t p;
5522 ast_t ast = AST_NONE;
5523
5524 x = splsched();
5525 p = current_processor();
5526
5527 thread_lock(thread);
5528 thread->sched_flags |= TH_SFLAG_EAGERPREEMPT;
5529
5530 if (thread == current_thread()) {
5531 ast = csw_check(thread, p, AST_NONE);
5532 thread_unlock(thread);
5533 if (ast != AST_NONE) {
5534 (void) thread_block_reason(THREAD_CONTINUE_NULL, NULL, ast);
5535 }
5536 } else {
5537 p = thread->last_processor;
5538
5539 if (p != PROCESSOR_NULL && p->state == PROCESSOR_RUNNING &&
5540 p->active_thread == thread) {
5541 cause_ast_check(p);
5542 }
5543
5544 thread_unlock(thread);
5545 }
5546
5547 splx(x);
5548 }
5549
5550 void
5551 thread_clear_eager_preempt(thread_t thread)
5552 {
5553 spl_t x;
5554
5555 x = splsched();
5556 thread_lock(thread);
5557
5558 thread->sched_flags &= ~TH_SFLAG_EAGERPREEMPT;
5559
5560 thread_unlock(thread);
5561 splx(x);
5562 }
5563
5564 /*
5565 * Scheduling statistics
5566 */
5567 void
5568 sched_stats_handle_csw(processor_t processor, int reasons, int selfpri, int otherpri)
5569 {
5570 struct processor_sched_statistics *stats;
5571 boolean_t to_realtime = FALSE;
5572
5573 stats = &processor->processor_data.sched_stats;
5574 stats->csw_count++;
5575
5576 if (otherpri >= BASEPRI_REALTIME) {
5577 stats->rt_sched_count++;
5578 to_realtime = TRUE;
5579 }
5580
5581 if ((reasons & AST_PREEMPT) != 0) {
5582 stats->preempt_count++;
5583
5584 if (selfpri >= BASEPRI_REALTIME) {
5585 stats->preempted_rt_count++;
5586 }
5587
5588 if (to_realtime) {
5589 stats->preempted_by_rt_count++;
5590 }
5591 }
5592 }
5593
5594 void
5595 sched_stats_handle_runq_change(struct runq_stats *stats, int old_count)
5596 {
5597 uint64_t timestamp = mach_absolute_time();
5598
5599 stats->count_sum += (timestamp - stats->last_change_timestamp) * old_count;
5600 stats->last_change_timestamp = timestamp;
5601 }
5602
5603 /*
5604 * For calls from assembly code
5605 */
5606 #undef thread_wakeup
5607 void
5608 thread_wakeup(
5609 event_t x);
5610
5611 void
5612 thread_wakeup(
5613 event_t x)
5614 {
5615 thread_wakeup_with_result(x, THREAD_AWAKENED);
5616 }
5617
5618 boolean_t
5619 preemption_enabled(void)
5620 {
5621 return get_preemption_level() == 0 && ml_get_interrupts_enabled();
5622 }
5623
5624 static void
5625 sched_timer_deadline_tracking_init(void)
5626 {
5627 nanoseconds_to_absolutetime(TIMER_DEADLINE_TRACKING_BIN_1_DEFAULT, &timer_deadline_tracking_bin_1);
5628 nanoseconds_to_absolutetime(TIMER_DEADLINE_TRACKING_BIN_2_DEFAULT, &timer_deadline_tracking_bin_2);
5629 }
5630
5631 #if __arm__ || __arm64__
5632
5633 uint32_t perfcontrol_requested_recommended_cores = ALL_CORES_RECOMMENDED;
5634 uint32_t perfcontrol_requested_recommended_core_count = MAX_CPUS;
5635 bool perfcontrol_failsafe_active = false;
5636 bool perfcontrol_sleep_override = false;
5637
5638 uint64_t perfcontrol_failsafe_maintenance_runnable_time;
5639 uint64_t perfcontrol_failsafe_activation_time;
5640 uint64_t perfcontrol_failsafe_deactivation_time;
5641
5642 /* data covering who likely caused it and how long they ran */
5643 #define FAILSAFE_NAME_LEN 33 /* (2*MAXCOMLEN)+1 from size of p_name */
5644 char perfcontrol_failsafe_name[FAILSAFE_NAME_LEN];
5645 int perfcontrol_failsafe_pid;
5646 uint64_t perfcontrol_failsafe_tid;
5647 uint64_t perfcontrol_failsafe_thread_timer_at_start;
5648 uint64_t perfcontrol_failsafe_thread_timer_last_seen;
5649 uint32_t perfcontrol_failsafe_recommended_at_trigger;
5650
5651 /*
5652 * Perf controller calls here to update the recommended core bitmask.
5653 * If the failsafe is active, we don't immediately apply the new value.
5654 * Instead, we store the new request and use it after the failsafe deactivates.
5655 *
5656 * If the failsafe is not active, immediately apply the update.
5657 *
5658 * No scheduler locks are held, no other locks are held that scheduler might depend on,
5659 * interrupts are enabled
5660 *
5661 * currently prototype is in osfmk/arm/machine_routines.h
5662 */
5663 void
5664 sched_perfcontrol_update_recommended_cores(uint32_t recommended_cores)
5665 {
5666 assert(preemption_enabled());
5667
5668 spl_t s = splsched();
5669 simple_lock(&sched_recommended_cores_lock, LCK_GRP_NULL);
5670
5671 perfcontrol_requested_recommended_cores = recommended_cores;
5672 perfcontrol_requested_recommended_core_count = __builtin_popcountll(recommended_cores);
5673
5674 if ((perfcontrol_failsafe_active == false) && (perfcontrol_sleep_override == false)) {
5675 sched_update_recommended_cores(perfcontrol_requested_recommended_cores & usercontrol_requested_recommended_cores);
5676 } else {
5677 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
5678 MACHDBG_CODE(DBG_MACH_SCHED, MACH_REC_CORES_FAILSAFE) | DBG_FUNC_NONE,
5679 perfcontrol_requested_recommended_cores,
5680 sched_maintenance_thread->last_made_runnable_time, 0, 0, 0);
5681 }
5682
5683 simple_unlock(&sched_recommended_cores_lock);
5684 splx(s);
5685 }
5686
5687 void
5688 sched_override_recommended_cores_for_sleep(void)
5689 {
5690 spl_t s = splsched();
5691 simple_lock(&sched_recommended_cores_lock, LCK_GRP_NULL);
5692
5693 if (perfcontrol_sleep_override == false) {
5694 perfcontrol_sleep_override = true;
5695 sched_update_recommended_cores(ALL_CORES_RECOMMENDED);
5696 }
5697
5698 simple_unlock(&sched_recommended_cores_lock);
5699 splx(s);
5700 }
5701
5702 void
5703 sched_restore_recommended_cores_after_sleep(void)
5704 {
5705 spl_t s = splsched();
5706 simple_lock(&sched_recommended_cores_lock, LCK_GRP_NULL);
5707
5708 if (perfcontrol_sleep_override == true) {
5709 perfcontrol_sleep_override = false;
5710 sched_update_recommended_cores(perfcontrol_requested_recommended_cores & usercontrol_requested_recommended_cores);
5711 }
5712
5713 simple_unlock(&sched_recommended_cores_lock);
5714 splx(s);
5715 }
5716
5717 /*
5718 * Consider whether we need to activate the recommended cores failsafe
5719 *
5720 * Called from quantum timer interrupt context of a realtime thread
5721 * No scheduler locks are held, interrupts are disabled
5722 */
5723 void
5724 sched_consider_recommended_cores(uint64_t ctime, thread_t cur_thread)
5725 {
5726 /*
5727 * Check if a realtime thread is starving the system
5728 * and bringing up non-recommended cores would help
5729 *
5730 * TODO: Is this the correct check for recommended == possible cores?
5731 * TODO: Validate the checks without the relevant lock are OK.
5732 */
5733
5734 if (__improbable(perfcontrol_failsafe_active == TRUE)) {
5735 /* keep track of how long the responsible thread runs */
5736
5737 simple_lock(&sched_recommended_cores_lock, LCK_GRP_NULL);
5738
5739 if (perfcontrol_failsafe_active == TRUE &&
5740 cur_thread->thread_id == perfcontrol_failsafe_tid) {
5741 perfcontrol_failsafe_thread_timer_last_seen = timer_grab(&cur_thread->user_timer) +
5742 timer_grab(&cur_thread->system_timer);
5743 }
5744
5745 simple_unlock(&sched_recommended_cores_lock);
5746
5747 /* we're already trying to solve the problem, so bail */
5748 return;
5749 }
5750
5751 /* The failsafe won't help if there are no more processors to enable */
5752 if (__probable(perfcontrol_requested_recommended_core_count >= processor_count)) {
5753 return;
5754 }
5755
5756 uint64_t too_long_ago = ctime - perfcontrol_failsafe_starvation_threshold;
5757
5758 /* Use the maintenance thread as our canary in the coal mine */
5759 thread_t m_thread = sched_maintenance_thread;
5760
5761 /* If it doesn't look bad, nothing to see here */
5762 if (__probable(m_thread->last_made_runnable_time >= too_long_ago)) {
5763 return;
5764 }
5765
5766 /* It looks bad, take the lock to be sure */
5767 thread_lock(m_thread);
5768
5769 if (m_thread->runq == PROCESSOR_NULL ||
5770 (m_thread->state & (TH_RUN | TH_WAIT)) != TH_RUN ||
5771 m_thread->last_made_runnable_time >= too_long_ago) {
5772 /*
5773 * Maintenance thread is either on cpu or blocked, and
5774 * therefore wouldn't benefit from more cores
5775 */
5776 thread_unlock(m_thread);
5777 return;
5778 }
5779
5780 uint64_t maintenance_runnable_time = m_thread->last_made_runnable_time;
5781
5782 thread_unlock(m_thread);
5783
5784 /*
5785 * There are cores disabled at perfcontrol's recommendation, but the
5786 * system is so overloaded that the maintenance thread can't run.
5787 * That likely means that perfcontrol can't run either, so it can't fix
5788 * the recommendation. We have to kick in a failsafe to keep from starving.
5789 *
5790 * When the maintenance thread has been starved for too long,
5791 * ignore the recommendation from perfcontrol and light up all the cores.
5792 *
5793 * TODO: Consider weird states like boot, sleep, or debugger
5794 */
5795
5796 simple_lock(&sched_recommended_cores_lock, LCK_GRP_NULL);
5797
5798 if (perfcontrol_failsafe_active == TRUE) {
5799 simple_unlock(&sched_recommended_cores_lock);
5800 return;
5801 }
5802
5803 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
5804 MACHDBG_CODE(DBG_MACH_SCHED, MACH_REC_CORES_FAILSAFE) | DBG_FUNC_START,
5805 perfcontrol_requested_recommended_cores, maintenance_runnable_time, 0, 0, 0);
5806
5807 perfcontrol_failsafe_active = TRUE;
5808 perfcontrol_failsafe_activation_time = mach_absolute_time();
5809 perfcontrol_failsafe_maintenance_runnable_time = maintenance_runnable_time;
5810 perfcontrol_failsafe_recommended_at_trigger = perfcontrol_requested_recommended_cores;
5811
5812 /* Capture some data about who screwed up (assuming that the thread on core is at fault) */
5813 task_t task = cur_thread->task;
5814 perfcontrol_failsafe_pid = task_pid(task);
5815 strlcpy(perfcontrol_failsafe_name, proc_name_address(task->bsd_info), sizeof(perfcontrol_failsafe_name));
5816
5817 perfcontrol_failsafe_tid = cur_thread->thread_id;
5818
5819 /* Blame the thread for time it has run recently */
5820 uint64_t recent_computation = (ctime - cur_thread->computation_epoch) + cur_thread->computation_metered;
5821
5822 uint64_t last_seen = timer_grab(&cur_thread->user_timer) + timer_grab(&cur_thread->system_timer);
5823
5824 /* Compute the start time of the bad behavior in terms of the thread's on core time */
5825 perfcontrol_failsafe_thread_timer_at_start = last_seen - recent_computation;
5826 perfcontrol_failsafe_thread_timer_last_seen = last_seen;
5827
5828 /* Ignore the previously recommended core configuration */
5829 sched_update_recommended_cores(ALL_CORES_RECOMMENDED);
5830
5831 simple_unlock(&sched_recommended_cores_lock);
5832 }
5833
5834 /*
5835 * Now that our bacon has been saved by the failsafe, consider whether to turn it off
5836 *
5837 * Runs in the context of the maintenance thread, no locks held
5838 */
5839 static void
5840 sched_recommended_cores_maintenance(void)
5841 {
5842 /* Common case - no failsafe, nothing to be done here */
5843 if (__probable(perfcontrol_failsafe_active == FALSE)) {
5844 return;
5845 }
5846
5847 uint64_t ctime = mach_absolute_time();
5848
5849 boolean_t print_diagnostic = FALSE;
5850 char p_name[FAILSAFE_NAME_LEN] = "";
5851
5852 spl_t s = splsched();
5853 simple_lock(&sched_recommended_cores_lock, LCK_GRP_NULL);
5854
5855 /* Check again, under the lock, to avoid races */
5856 if (perfcontrol_failsafe_active == FALSE) {
5857 goto out;
5858 }
5859
5860 /*
5861 * Ensure that the other cores get another few ticks to run some threads
5862 * If we don't have this hysteresis, the maintenance thread is the first
5863 * to run, and then it immediately kills the other cores
5864 */
5865 if ((ctime - perfcontrol_failsafe_activation_time) < perfcontrol_failsafe_starvation_threshold) {
5866 goto out;
5867 }
5868
5869 /* Capture some diagnostic state under the lock so we can print it out later */
5870
5871 int pid = perfcontrol_failsafe_pid;
5872 uint64_t tid = perfcontrol_failsafe_tid;
5873
5874 uint64_t thread_usage = perfcontrol_failsafe_thread_timer_last_seen -
5875 perfcontrol_failsafe_thread_timer_at_start;
5876 uint32_t rec_cores_before = perfcontrol_failsafe_recommended_at_trigger;
5877 uint32_t rec_cores_after = perfcontrol_requested_recommended_cores;
5878 uint64_t failsafe_duration = ctime - perfcontrol_failsafe_activation_time;
5879 strlcpy(p_name, perfcontrol_failsafe_name, sizeof(p_name));
5880
5881 print_diagnostic = TRUE;
5882
5883 /* Deactivate the failsafe and reinstate the requested recommendation settings */
5884
5885 perfcontrol_failsafe_deactivation_time = ctime;
5886 perfcontrol_failsafe_active = FALSE;
5887
5888 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
5889 MACHDBG_CODE(DBG_MACH_SCHED, MACH_REC_CORES_FAILSAFE) | DBG_FUNC_END,
5890 perfcontrol_requested_recommended_cores, failsafe_duration, 0, 0, 0);
5891
5892 sched_update_recommended_cores(perfcontrol_requested_recommended_cores & usercontrol_requested_recommended_cores);
5893
5894 out:
5895 simple_unlock(&sched_recommended_cores_lock);
5896 splx(s);
5897
5898 if (print_diagnostic) {
5899 uint64_t failsafe_duration_ms = 0, thread_usage_ms = 0;
5900
5901 absolutetime_to_nanoseconds(failsafe_duration, &failsafe_duration_ms);
5902 failsafe_duration_ms = failsafe_duration_ms / NSEC_PER_MSEC;
5903
5904 absolutetime_to_nanoseconds(thread_usage, &thread_usage_ms);
5905 thread_usage_ms = thread_usage_ms / NSEC_PER_MSEC;
5906
5907 printf("recommended core failsafe kicked in for %lld ms "
5908 "likely due to %s[%d] thread 0x%llx spending "
5909 "%lld ms on cpu at realtime priority - "
5910 "new recommendation: 0x%x -> 0x%x\n",
5911 failsafe_duration_ms, p_name, pid, tid, thread_usage_ms,
5912 rec_cores_before, rec_cores_after);
5913 }
5914 }
5915
5916 #endif /* __arm__ || __arm64__ */
5917
5918 kern_return_t
5919 sched_processor_enable(processor_t processor, boolean_t enable)
5920 {
5921 assert(preemption_enabled());
5922
5923 spl_t s = splsched();
5924 simple_lock(&sched_recommended_cores_lock, LCK_GRP_NULL);
5925
5926 if (enable) {
5927 bit_set(usercontrol_requested_recommended_cores, processor->cpu_id);
5928 } else {
5929 bit_clear(usercontrol_requested_recommended_cores, processor->cpu_id);
5930 }
5931
5932 #if __arm__ || __arm64__
5933 if ((perfcontrol_failsafe_active == false) && (perfcontrol_sleep_override == false)) {
5934 sched_update_recommended_cores(perfcontrol_requested_recommended_cores & usercontrol_requested_recommended_cores);
5935 } else {
5936 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
5937 MACHDBG_CODE(DBG_MACH_SCHED, MACH_REC_CORES_FAILSAFE) | DBG_FUNC_NONE,
5938 perfcontrol_requested_recommended_cores,
5939 sched_maintenance_thread->last_made_runnable_time, 0, 0, 0);
5940 }
5941 #else /* __arm__ || __arm64__ */
5942 sched_update_recommended_cores(usercontrol_requested_recommended_cores);
5943 #endif /* !__arm__ || __arm64__ */
5944
5945 simple_unlock(&sched_recommended_cores_lock);
5946 splx(s);
5947
5948 return KERN_SUCCESS;
5949 }
5950
5951
5952 /*
5953 * Apply a new recommended cores mask to the processors it affects
5954 * Runs after considering failsafes and such
5955 *
5956 * Iterate over processors and update their ->is_recommended field.
5957 * If a processor is running, we let it drain out at its next
5958 * quantum expiration or blocking point. If a processor is idle, there
5959 * may be more work for it to do, so IPI it.
5960 *
5961 * interrupts disabled, sched_recommended_cores_lock is held
5962 */
5963 static void
5964 sched_update_recommended_cores(uint64_t recommended_cores)
5965 {
5966 processor_set_t pset, nset;
5967 processor_t processor;
5968 uint64_t needs_exit_idle_mask = 0x0;
5969 uint32_t avail_count;
5970
5971 processor = processor_list;
5972 pset = processor->processor_set;
5973
5974 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_UPDATE_REC_CORES) | DBG_FUNC_START,
5975 recommended_cores,
5976 #if __arm__ || __arm64__
5977 perfcontrol_failsafe_active, 0, 0);
5978 #else /* __arm__ || __arm64__ */
5979 0, 0, 0);
5980 #endif /* ! __arm__ || __arm64__ */
5981
5982 if (__builtin_popcountll(recommended_cores) == 0) {
5983 bit_set(recommended_cores, master_processor->cpu_id); /* add boot processor or we hang */
5984 }
5985
5986 /* First set recommended cores */
5987 pset_lock(pset);
5988 avail_count = 0;
5989 do {
5990 nset = processor->processor_set;
5991 if (nset != pset) {
5992 pset_unlock(pset);
5993 pset = nset;
5994 pset_lock(pset);
5995 }
5996
5997 if (bit_test(recommended_cores, processor->cpu_id)) {
5998 processor->is_recommended = TRUE;
5999 bit_set(pset->recommended_bitmask, processor->cpu_id);
6000
6001 if (processor->state == PROCESSOR_IDLE) {
6002 if (processor != current_processor()) {
6003 bit_set(needs_exit_idle_mask, processor->cpu_id);
6004 }
6005 }
6006 if (processor->state != PROCESSOR_OFF_LINE) {
6007 avail_count++;
6008 }
6009 }
6010 } while ((processor = processor->processor_list) != NULL);
6011 pset_unlock(pset);
6012
6013 /* Now shutdown not recommended cores */
6014 processor = processor_list;
6015 pset = processor->processor_set;
6016
6017 pset_lock(pset);
6018 do {
6019 nset = processor->processor_set;
6020 if (nset != pset) {
6021 pset_unlock(pset);
6022 pset = nset;
6023 pset_lock(pset);
6024 }
6025
6026 if (!bit_test(recommended_cores, processor->cpu_id)) {
6027 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
6028
6029 processor->is_recommended = FALSE;
6030 bit_clear(pset->recommended_bitmask, processor->cpu_id);
6031
6032 if ((processor->state == PROCESSOR_RUNNING) || (processor->state == PROCESSOR_DISPATCHING)) {
6033 ipi_type = SCHED_IPI_IMMEDIATE;
6034 }
6035 SCHED(processor_queue_shutdown)(processor);
6036 /* pset unlocked */
6037
6038 SCHED(rt_queue_shutdown)(processor);
6039
6040 if (ipi_type != SCHED_IPI_NONE) {
6041 if (processor == current_processor()) {
6042 ast_on(AST_PREEMPT);
6043 } else {
6044 sched_ipi_perform(processor, ipi_type);
6045 }
6046 }
6047
6048 pset_lock(pset);
6049 }
6050 } while ((processor = processor->processor_list) != NULL);
6051
6052 processor_avail_count_user = avail_count;
6053 #if defined(__x86_64__)
6054 commpage_update_active_cpus();
6055 #endif
6056
6057 pset_unlock(pset);
6058
6059 /* Issue all pending IPIs now that the pset lock has been dropped */
6060 for (int cpuid = lsb_first(needs_exit_idle_mask); cpuid >= 0; cpuid = lsb_next(needs_exit_idle_mask, cpuid)) {
6061 processor = processor_array[cpuid];
6062 machine_signal_idle(processor);
6063 }
6064
6065 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_UPDATE_REC_CORES) | DBG_FUNC_END,
6066 needs_exit_idle_mask, 0, 0, 0);
6067 }
6068
6069 void
6070 thread_set_options(uint32_t thopt)
6071 {
6072 spl_t x;
6073 thread_t t = current_thread();
6074
6075 x = splsched();
6076 thread_lock(t);
6077
6078 t->options |= thopt;
6079
6080 thread_unlock(t);
6081 splx(x);
6082 }
6083
6084 void
6085 thread_set_pending_block_hint(thread_t thread, block_hint_t block_hint)
6086 {
6087 thread->pending_block_hint = block_hint;
6088 }
6089
6090 uint32_t
6091 qos_max_parallelism(int qos, uint64_t options)
6092 {
6093 return SCHED(qos_max_parallelism)(qos, options);
6094 }
6095
6096 uint32_t
6097 sched_qos_max_parallelism(__unused int qos, uint64_t options)
6098 {
6099 host_basic_info_data_t hinfo;
6100 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
6101 /* Query the machine layer for core information */
6102 __assert_only kern_return_t kret = host_info(host_self(), HOST_BASIC_INFO,
6103 (host_info_t)&hinfo, &count);
6104 assert(kret == KERN_SUCCESS);
6105
6106 /* We would not want multiple realtime threads running on the
6107 * same physical core; even for SMT capable machines.
6108 */
6109 if (options & QOS_PARALLELISM_REALTIME) {
6110 return hinfo.physical_cpu;
6111 }
6112
6113 if (options & QOS_PARALLELISM_COUNT_LOGICAL) {
6114 return hinfo.logical_cpu;
6115 } else {
6116 return hinfo.physical_cpu;
6117 }
6118 }
6119
6120 int sched_allow_NO_SMT_threads = 1;
6121 bool
6122 thread_no_smt(thread_t thread)
6123 {
6124 #if DEBUG || DEVELOPMENT
6125 return sched_allow_NO_SMT_threads && (thread->bound_processor == PROCESSOR_NULL) && ((thread->sched_flags & TH_SFLAG_NO_SMT) || (thread->task->t_flags & TF_NO_SMT));
6126 #else
6127 return sched_allow_NO_SMT_threads && (thread->bound_processor == PROCESSOR_NULL) && (thread->sched_flags & TH_SFLAG_NO_SMT);
6128 #endif
6129 }
6130
6131 bool
6132 processor_active_thread_no_smt(processor_t processor)
6133 {
6134 return sched_allow_NO_SMT_threads && !processor->current_is_bound && processor->current_is_NO_SMT;
6135 }
6136
6137 #if __arm64__
6138
6139 /*
6140 * Set up or replace old timer with new timer
6141 *
6142 * Returns true if canceled old timer, false if it did not
6143 */
6144 boolean_t
6145 sched_perfcontrol_update_callback_deadline(uint64_t new_deadline)
6146 {
6147 /*
6148 * Exchange deadline for new deadline, if old deadline was nonzero,
6149 * then I cancelled the callback, otherwise I didn't
6150 */
6151
6152 uint64_t old_deadline = __c11_atomic_load(&sched_perfcontrol_callback_deadline,
6153 memory_order_relaxed);
6154
6155
6156 while (!__c11_atomic_compare_exchange_weak(&sched_perfcontrol_callback_deadline,
6157 &old_deadline, new_deadline,
6158 memory_order_relaxed, memory_order_relaxed)) {
6159 ;
6160 }
6161
6162
6163 /* now old_deadline contains previous value, which might not be the same if it raced */
6164
6165 return (old_deadline != 0) ? TRUE : FALSE;
6166 }
6167
6168 #endif /* __arm64__ */
6169
6170 void
6171 sched_update_pset_load_average(processor_set_t pset)
6172 {
6173 int load = ((bit_count(pset->cpu_state_map[PROCESSOR_RUNNING]) + pset->pset_runq.count + rt_runq_count(pset)) << PSET_LOAD_NUMERATOR_SHIFT);
6174 int new_load_average = (pset->load_average + load) >> 1;
6175
6176 pset->load_average = new_load_average;
6177
6178 #if (DEVELOPMENT || DEBUG)
6179 #endif
6180 }
6181
6182 /* pset is locked */
6183 static processor_t
6184 choose_processor_for_realtime_thread(processor_set_t pset)
6185 {
6186 #if defined(__x86_64__)
6187 bool avoid_cpu0 = sched_avoid_cpu0 && bit_test(pset->cpu_bitmask, 0);
6188 #else
6189 const bool avoid_cpu0 = false;
6190 #endif
6191
6192 uint64_t cpu_map = (pset->cpu_bitmask & pset->recommended_bitmask & ~pset->pending_AST_URGENT_cpu_mask);
6193 if (avoid_cpu0) {
6194 cpu_map = bit_ror64(cpu_map, 1);
6195 }
6196
6197 for (int rotid = lsb_first(cpu_map); rotid >= 0; rotid = lsb_next(cpu_map, rotid)) {
6198 int cpuid = avoid_cpu0 ? ((rotid + 1) & 63) : rotid;
6199
6200 processor_t processor = processor_array[cpuid];
6201
6202 if (processor->processor_primary != processor) {
6203 continue;
6204 }
6205
6206 if (processor->state == PROCESSOR_IDLE) {
6207 return processor;
6208 }
6209
6210 if ((processor->state != PROCESSOR_RUNNING) && (processor->state != PROCESSOR_DISPATCHING)) {
6211 continue;
6212 }
6213
6214 if (processor->current_pri >= BASEPRI_RTQUEUES) {
6215 continue;
6216 }
6217
6218 return processor;
6219 }
6220
6221 if (!sched_allow_rt_smt) {
6222 return PROCESSOR_NULL;
6223 }
6224
6225 /* Consider secondary processors */
6226 if (avoid_cpu0) {
6227 /* Also avoid cpu1 */
6228 cpu_map = bit_ror64(cpu_map, 1);
6229 }
6230 for (int rotid = lsb_first(cpu_map); rotid >= 0; rotid = lsb_next(cpu_map, rotid)) {
6231 int cpuid = avoid_cpu0 ? ((rotid + 2) & 63) : rotid;
6232
6233 processor_t processor = processor_array[cpuid];
6234
6235 if (processor->processor_primary == processor) {
6236 continue;
6237 }
6238
6239 if (processor->state == PROCESSOR_IDLE) {
6240 return processor;
6241 }
6242
6243 if ((processor->state != PROCESSOR_RUNNING) && (processor->state != PROCESSOR_DISPATCHING)) {
6244 continue;
6245 }
6246
6247 if (processor->current_pri >= BASEPRI_RTQUEUES) {
6248 continue;
6249 }
6250
6251 return processor;
6252 }
6253
6254 return PROCESSOR_NULL;
6255 }
6256
6257 /* pset is locked */
6258 static bool
6259 all_available_primaries_are_running_realtime_threads(processor_set_t pset)
6260 {
6261 return these_processors_are_running_realtime_threads(pset, pset->primary_map);
6262 }
6263
6264 /* pset is locked */
6265 static bool
6266 these_processors_are_running_realtime_threads(processor_set_t pset, uint64_t these_map)
6267 {
6268 uint64_t cpu_map = (pset->cpu_bitmask & pset->recommended_bitmask) & these_map;
6269
6270 for (int cpuid = lsb_first(cpu_map); cpuid >= 0; cpuid = lsb_next(cpu_map, cpuid)) {
6271 processor_t processor = processor_array[cpuid];
6272
6273 if (processor->state == PROCESSOR_IDLE) {
6274 return false;
6275 }
6276
6277 if (processor->state == PROCESSOR_DISPATCHING) {
6278 return false;
6279 }
6280
6281 if (processor->state != PROCESSOR_RUNNING) {
6282 /*
6283 * All other processor states are considered unavailable to run
6284 * realtime threads. In particular, we prefer an available secondary
6285 * processor over the risk of leaving a realtime thread on the run queue
6286 * while waiting for a processor in PROCESSOR_START state,
6287 * which should anyway be a rare case.
6288 */
6289 continue;
6290 }
6291
6292 if (processor->current_pri < BASEPRI_RTQUEUES) {
6293 return false;
6294 }
6295 }
6296
6297 return true;
6298 }
6299
6300 static bool
6301 sched_ok_to_run_realtime_thread(processor_set_t pset, processor_t processor)
6302 {
6303 bool ok_to_run_realtime_thread = true;
6304 #if defined(__x86_64__)
6305 if (sched_avoid_cpu0 && processor->cpu_id == 0) {
6306 ok_to_run_realtime_thread = these_processors_are_running_realtime_threads(pset, pset->primary_map & ~0x1);
6307 } else if (sched_avoid_cpu0 && (processor->cpu_id == 1) && processor->is_SMT) {
6308 ok_to_run_realtime_thread = sched_allow_rt_smt && these_processors_are_running_realtime_threads(pset, ~0x2);
6309 } else if (processor->processor_primary != processor) {
6310 ok_to_run_realtime_thread = sched_allow_rt_smt && all_available_primaries_are_running_realtime_threads(pset);
6311 }
6312 #else
6313 (void)pset;
6314 (void)processor;
6315 #endif
6316 return ok_to_run_realtime_thread;
6317 }
6318
6319 void
6320 thread_set_no_smt(bool set)
6321 {
6322 thread_t thread = current_thread();
6323
6324 spl_t s = splsched();
6325 thread_lock(thread);
6326 if (set) {
6327 thread->sched_flags |= TH_SFLAG_NO_SMT;
6328 } else {
6329 thread->sched_flags &= ~TH_SFLAG_NO_SMT;
6330 }
6331 thread_unlock(thread);
6332 splx(s);
6333 }
6334
6335 bool
6336 thread_get_no_smt(void)
6337 {
6338 return current_thread()->sched_flags & TH_SFLAG_NO_SMT;
6339 }
6340
6341 #if DEBUG || DEVELOPMENT
6342 extern void sysctl_task_set_no_smt(char no_smt);
6343 void
6344 sysctl_task_set_no_smt(char no_smt)
6345 {
6346 thread_t thread = current_thread();
6347 task_t task = thread->task;
6348
6349 if (no_smt == '1') {
6350 task->t_flags |= TF_NO_SMT;
6351 } else {
6352 task->t_flags &= ~TF_NO_SMT;
6353 }
6354 }
6355
6356 extern char sysctl_task_get_no_smt(void);
6357 char
6358 sysctl_task_get_no_smt(void)
6359 {
6360 thread_t thread = current_thread();
6361 task_t task = thread->task;
6362
6363 if (task->t_flags & TF_NO_SMT) {
6364 return '1';
6365 }
6366 return '0';
6367 }
6368 #endif