]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/sched_prim.c
xnu-6153.101.6.tar.gz
[apple/xnu.git] / osfmk / kern / sched_prim.c
1 /*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: sched_prim.c
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1986
62 *
63 * Scheduling primitives
64 *
65 */
66
67 #include <debug.h>
68
69 #include <mach/mach_types.h>
70 #include <mach/machine.h>
71 #include <mach/policy.h>
72 #include <mach/sync_policy.h>
73 #include <mach/thread_act.h>
74
75 #include <machine/machine_routines.h>
76 #include <machine/sched_param.h>
77 #include <machine/machine_cpu.h>
78 #include <machine/limits.h>
79 #include <machine/atomic.h>
80
81 #include <machine/commpage.h>
82
83 #include <kern/kern_types.h>
84 #include <kern/backtrace.h>
85 #include <kern/clock.h>
86 #include <kern/counters.h>
87 #include <kern/cpu_number.h>
88 #include <kern/cpu_data.h>
89 #include <kern/smp.h>
90 #include <kern/debug.h>
91 #include <kern/macro_help.h>
92 #include <kern/machine.h>
93 #include <kern/misc_protos.h>
94 #if MONOTONIC
95 #include <kern/monotonic.h>
96 #endif /* MONOTONIC */
97 #include <kern/processor.h>
98 #include <kern/queue.h>
99 #include <kern/sched.h>
100 #include <kern/sched_prim.h>
101 #include <kern/sfi.h>
102 #include <kern/syscall_subr.h>
103 #include <kern/task.h>
104 #include <kern/thread.h>
105 #include <kern/ledger.h>
106 #include <kern/timer_queue.h>
107 #include <kern/waitq.h>
108 #include <kern/policy_internal.h>
109 #include <kern/cpu_quiesce.h>
110
111 #include <vm/pmap.h>
112 #include <vm/vm_kern.h>
113 #include <vm/vm_map.h>
114 #include <vm/vm_pageout.h>
115
116 #include <mach/sdt.h>
117 #include <mach/mach_host.h>
118 #include <mach/host_info.h>
119
120 #include <sys/kdebug.h>
121 #include <kperf/kperf.h>
122 #include <kern/kpc.h>
123 #include <san/kasan.h>
124 #include <kern/pms.h>
125 #include <kern/host.h>
126 #include <stdatomic.h>
127
128 int
129 rt_runq_count(processor_set_t pset)
130 {
131 return atomic_load_explicit(&SCHED(rt_runq)(pset)->count, memory_order_relaxed);
132 }
133
134 void
135 rt_runq_count_incr(processor_set_t pset)
136 {
137 atomic_fetch_add_explicit(&SCHED(rt_runq)(pset)->count, 1, memory_order_relaxed);
138 }
139
140 void
141 rt_runq_count_decr(processor_set_t pset)
142 {
143 atomic_fetch_sub_explicit(&SCHED(rt_runq)(pset)->count, 1, memory_order_relaxed);
144 }
145
146 #define DEFAULT_PREEMPTION_RATE 100 /* (1/s) */
147 int default_preemption_rate = DEFAULT_PREEMPTION_RATE;
148
149 #define DEFAULT_BG_PREEMPTION_RATE 400 /* (1/s) */
150 int default_bg_preemption_rate = DEFAULT_BG_PREEMPTION_RATE;
151
152 #define MAX_UNSAFE_QUANTA 800
153 int max_unsafe_quanta = MAX_UNSAFE_QUANTA;
154
155 #define MAX_POLL_QUANTA 2
156 int max_poll_quanta = MAX_POLL_QUANTA;
157
158 #define SCHED_POLL_YIELD_SHIFT 4 /* 1/16 */
159 int sched_poll_yield_shift = SCHED_POLL_YIELD_SHIFT;
160
161 uint64_t max_poll_computation;
162
163 uint64_t max_unsafe_computation;
164 uint64_t sched_safe_duration;
165
166 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
167
168 uint32_t std_quantum;
169 uint32_t min_std_quantum;
170 uint32_t bg_quantum;
171
172 uint32_t std_quantum_us;
173 uint32_t bg_quantum_us;
174
175 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
176
177 uint32_t thread_depress_time;
178 uint32_t default_timeshare_computation;
179 uint32_t default_timeshare_constraint;
180
181 uint32_t max_rt_quantum;
182 uint32_t min_rt_quantum;
183
184 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
185
186 unsigned sched_tick;
187 uint32_t sched_tick_interval;
188
189 /* Timeshare load calculation interval (15ms) */
190 uint32_t sched_load_compute_interval_us = 15000;
191 uint64_t sched_load_compute_interval_abs;
192 static _Atomic uint64_t sched_load_compute_deadline;
193
194 uint32_t sched_pri_shifts[TH_BUCKET_MAX];
195 uint32_t sched_fixed_shift;
196
197 uint32_t sched_decay_usage_age_factor = 1; /* accelerate 5/8^n usage aging */
198
199 /* Allow foreground to decay past default to resolve inversions */
200 #define DEFAULT_DECAY_BAND_LIMIT ((BASEPRI_FOREGROUND - BASEPRI_DEFAULT) + 2)
201 int sched_pri_decay_band_limit = DEFAULT_DECAY_BAND_LIMIT;
202
203 /* Defaults for timer deadline profiling */
204 #define TIMER_DEADLINE_TRACKING_BIN_1_DEFAULT 2000000 /* Timers with deadlines <=
205 * 2ms */
206 #define TIMER_DEADLINE_TRACKING_BIN_2_DEFAULT 5000000 /* Timers with deadlines
207 * <= 5ms */
208
209 uint64_t timer_deadline_tracking_bin_1;
210 uint64_t timer_deadline_tracking_bin_2;
211
212 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
213
214 thread_t sched_maintenance_thread;
215
216 /* interrupts disabled lock to guard recommended cores state */
217 decl_simple_lock_data(static, sched_recommended_cores_lock);
218 static uint64_t usercontrol_requested_recommended_cores = ALL_CORES_RECOMMENDED;
219 static void sched_update_recommended_cores(uint64_t recommended_cores);
220
221 #if __arm__ || __arm64__
222 static void sched_recommended_cores_maintenance(void);
223 uint64_t perfcontrol_failsafe_starvation_threshold;
224 extern char *proc_name_address(struct proc *p);
225 #endif /* __arm__ || __arm64__ */
226
227 uint64_t sched_one_second_interval;
228
229 /* Forwards */
230
231 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
232
233 static void load_shift_init(void);
234 static void preempt_pri_init(void);
235
236 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
237
238 thread_t processor_idle(
239 thread_t thread,
240 processor_t processor);
241
242 static ast_t
243 csw_check_locked(
244 thread_t thread,
245 processor_t processor,
246 processor_set_t pset,
247 ast_t check_reason);
248
249 static void processor_setrun(
250 processor_t processor,
251 thread_t thread,
252 integer_t options);
253
254 static void
255 sched_realtime_timebase_init(void);
256
257 static void
258 sched_timer_deadline_tracking_init(void);
259
260 #if DEBUG
261 extern int debug_task;
262 #define TLOG(a, fmt, args...) if(debug_task & a) kprintf(fmt, ## args)
263 #else
264 #define TLOG(a, fmt, args...) do {} while (0)
265 #endif
266
267 static processor_t
268 thread_bind_internal(
269 thread_t thread,
270 processor_t processor);
271
272 static void
273 sched_vm_group_maintenance(void);
274
275 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
276 int8_t sched_load_shifts[NRQS];
277 bitmap_t sched_preempt_pri[BITMAP_LEN(NRQS_MAX)];
278 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
279
280 /*
281 * Statically allocate a buffer to hold the longest possible
282 * scheduler description string, as currently implemented.
283 * bsd/kern/kern_sysctl.c has a corresponding definition in bsd/
284 * to export to userspace via sysctl(3). If either version
285 * changes, update the other.
286 *
287 * Note that in addition to being an upper bound on the strings
288 * in the kernel, it's also an exact parameter to PE_get_default(),
289 * which interrogates the device tree on some platforms. That
290 * API requires the caller know the exact size of the device tree
291 * property, so we need both a legacy size (32) and the current size
292 * (48) to deal with old and new device trees. The device tree property
293 * is similarly padded to a fixed size so that the same kernel image
294 * can run on multiple devices with different schedulers configured
295 * in the device tree.
296 */
297 char sched_string[SCHED_STRING_MAX_LENGTH];
298
299 uint32_t sched_debug_flags = SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS;
300
301 /* Global flag which indicates whether Background Stepper Context is enabled */
302 static int cpu_throttle_enabled = 1;
303
304 void
305 sched_init(void)
306 {
307 kprintf("Scheduler: Default of %s\n", SCHED(sched_name));
308
309 if (!PE_parse_boot_argn("sched_pri_decay_limit", &sched_pri_decay_band_limit, sizeof(sched_pri_decay_band_limit))) {
310 /* No boot-args, check in device tree */
311 if (!PE_get_default("kern.sched_pri_decay_limit",
312 &sched_pri_decay_band_limit,
313 sizeof(sched_pri_decay_band_limit))) {
314 /* Allow decay all the way to normal limits */
315 sched_pri_decay_band_limit = DEFAULT_DECAY_BAND_LIMIT;
316 }
317 }
318
319 kprintf("Setting scheduler priority decay band limit %d\n", sched_pri_decay_band_limit);
320
321 if (PE_parse_boot_argn("sched_debug", &sched_debug_flags, sizeof(sched_debug_flags))) {
322 kprintf("Scheduler: Debug flags 0x%08x\n", sched_debug_flags);
323 }
324 strlcpy(sched_string, SCHED(sched_name), sizeof(sched_string));
325
326 cpu_quiescent_counter_init();
327
328 SCHED(init)();
329 SCHED(rt_init)(&pset0);
330 sched_timer_deadline_tracking_init();
331
332 SCHED(pset_init)(&pset0);
333 SCHED(processor_init)(master_processor);
334 }
335
336 void
337 sched_timebase_init(void)
338 {
339 uint64_t abstime;
340
341 clock_interval_to_absolutetime_interval(1, NSEC_PER_SEC, &abstime);
342 sched_one_second_interval = abstime;
343
344 SCHED(timebase_init)();
345 sched_realtime_timebase_init();
346 }
347
348 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
349
350 void
351 sched_timeshare_init(void)
352 {
353 /*
354 * Calculate the timeslicing quantum
355 * in us.
356 */
357 if (default_preemption_rate < 1) {
358 default_preemption_rate = DEFAULT_PREEMPTION_RATE;
359 }
360 std_quantum_us = (1000 * 1000) / default_preemption_rate;
361
362 printf("standard timeslicing quantum is %d us\n", std_quantum_us);
363
364 if (default_bg_preemption_rate < 1) {
365 default_bg_preemption_rate = DEFAULT_BG_PREEMPTION_RATE;
366 }
367 bg_quantum_us = (1000 * 1000) / default_bg_preemption_rate;
368
369 printf("standard background quantum is %d us\n", bg_quantum_us);
370
371 load_shift_init();
372 preempt_pri_init();
373 sched_tick = 0;
374 }
375
376 void
377 sched_timeshare_timebase_init(void)
378 {
379 uint64_t abstime;
380 uint32_t shift;
381
382 /* standard timeslicing quantum */
383 clock_interval_to_absolutetime_interval(
384 std_quantum_us, NSEC_PER_USEC, &abstime);
385 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
386 std_quantum = (uint32_t)abstime;
387
388 /* smallest remaining quantum (250 us) */
389 clock_interval_to_absolutetime_interval(250, NSEC_PER_USEC, &abstime);
390 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
391 min_std_quantum = (uint32_t)abstime;
392
393 /* quantum for background tasks */
394 clock_interval_to_absolutetime_interval(
395 bg_quantum_us, NSEC_PER_USEC, &abstime);
396 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
397 bg_quantum = (uint32_t)abstime;
398
399 /* scheduler tick interval */
400 clock_interval_to_absolutetime_interval(USEC_PER_SEC >> SCHED_TICK_SHIFT,
401 NSEC_PER_USEC, &abstime);
402 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
403 sched_tick_interval = (uint32_t)abstime;
404
405 /* timeshare load calculation interval & deadline initialization */
406 clock_interval_to_absolutetime_interval(sched_load_compute_interval_us, NSEC_PER_USEC, &sched_load_compute_interval_abs);
407 sched_load_compute_deadline = sched_load_compute_interval_abs;
408
409 /*
410 * Compute conversion factor from usage to
411 * timesharing priorities with 5/8 ** n aging.
412 */
413 abstime = (abstime * 5) / 3;
414 for (shift = 0; abstime > BASEPRI_DEFAULT; ++shift) {
415 abstime >>= 1;
416 }
417 sched_fixed_shift = shift;
418
419 for (uint32_t i = 0; i < TH_BUCKET_MAX; i++) {
420 sched_pri_shifts[i] = INT8_MAX;
421 }
422
423 max_unsafe_computation = ((uint64_t)max_unsafe_quanta) * std_quantum;
424 sched_safe_duration = 2 * ((uint64_t)max_unsafe_quanta) * std_quantum;
425
426 max_poll_computation = ((uint64_t)max_poll_quanta) * std_quantum;
427 thread_depress_time = 1 * std_quantum;
428 default_timeshare_computation = std_quantum / 2;
429 default_timeshare_constraint = std_quantum;
430
431 #if __arm__ || __arm64__
432 perfcontrol_failsafe_starvation_threshold = (2 * sched_tick_interval);
433 #endif /* __arm__ || __arm64__ */
434 }
435
436 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
437
438 void
439 pset_rt_init(processor_set_t pset)
440 {
441 rt_lock_init(pset);
442
443 os_atomic_init(&pset->rt_runq.count, 0);
444 queue_init(&pset->rt_runq.queue);
445 memset(&pset->rt_runq.runq_stats, 0, sizeof pset->rt_runq.runq_stats);
446 }
447
448 rt_queue_t
449 sched_rtglobal_runq(processor_set_t pset)
450 {
451 (void)pset;
452
453 return &pset0.rt_runq;
454 }
455
456 void
457 sched_rtglobal_init(processor_set_t pset)
458 {
459 if (pset == &pset0) {
460 return pset_rt_init(pset);
461 }
462
463 /* Only pset0 rt_runq is used, so make it easy to detect
464 * buggy accesses to others.
465 */
466 memset(&pset->rt_runq, 0xfd, sizeof pset->rt_runq);
467 }
468
469 void
470 sched_rtglobal_queue_shutdown(processor_t processor)
471 {
472 (void)processor;
473 }
474
475 static void
476 sched_realtime_timebase_init(void)
477 {
478 uint64_t abstime;
479
480 /* smallest rt computaton (50 us) */
481 clock_interval_to_absolutetime_interval(50, NSEC_PER_USEC, &abstime);
482 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
483 min_rt_quantum = (uint32_t)abstime;
484
485 /* maximum rt computation (50 ms) */
486 clock_interval_to_absolutetime_interval(
487 50, 1000 * NSEC_PER_USEC, &abstime);
488 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
489 max_rt_quantum = (uint32_t)abstime;
490 }
491
492 void
493 sched_check_spill(processor_set_t pset, thread_t thread)
494 {
495 (void)pset;
496 (void)thread;
497
498 return;
499 }
500
501 bool
502 sched_thread_should_yield(processor_t processor, thread_t thread)
503 {
504 (void)thread;
505
506 return !SCHED(processor_queue_empty)(processor) || rt_runq_count(processor->processor_set) > 0;
507 }
508
509 /* Default implementations of .steal_thread_enabled */
510 bool
511 sched_steal_thread_DISABLED(processor_set_t pset)
512 {
513 (void)pset;
514 return false;
515 }
516
517 bool
518 sched_steal_thread_enabled(processor_set_t pset)
519 {
520 return pset->node->pset_count > 1;
521 }
522
523 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
524
525 /*
526 * Set up values for timeshare
527 * loading factors.
528 */
529 static void
530 load_shift_init(void)
531 {
532 int8_t k, *p = sched_load_shifts;
533 uint32_t i, j;
534
535 uint32_t sched_decay_penalty = 1;
536
537 if (PE_parse_boot_argn("sched_decay_penalty", &sched_decay_penalty, sizeof(sched_decay_penalty))) {
538 kprintf("Overriding scheduler decay penalty %u\n", sched_decay_penalty);
539 }
540
541 if (PE_parse_boot_argn("sched_decay_usage_age_factor", &sched_decay_usage_age_factor, sizeof(sched_decay_usage_age_factor))) {
542 kprintf("Overriding scheduler decay usage age factor %u\n", sched_decay_usage_age_factor);
543 }
544
545 if (sched_decay_penalty == 0) {
546 /*
547 * There is no penalty for timeshare threads for using too much
548 * CPU, so set all load shifts to INT8_MIN. Even under high load,
549 * sched_pri_shift will be >INT8_MAX, and there will be no
550 * penalty applied to threads (nor will sched_usage be updated per
551 * thread).
552 */
553 for (i = 0; i < NRQS; i++) {
554 sched_load_shifts[i] = INT8_MIN;
555 }
556
557 return;
558 }
559
560 *p++ = INT8_MIN; *p++ = 0;
561
562 /*
563 * For a given system load "i", the per-thread priority
564 * penalty per quantum of CPU usage is ~2^k priority
565 * levels. "sched_decay_penalty" can cause more
566 * array entries to be filled with smaller "k" values
567 */
568 for (i = 2, j = 1 << sched_decay_penalty, k = 1; i < NRQS; ++k) {
569 for (j <<= 1; (i < j) && (i < NRQS); ++i) {
570 *p++ = k;
571 }
572 }
573 }
574
575 static void
576 preempt_pri_init(void)
577 {
578 bitmap_t *p = sched_preempt_pri;
579
580 for (int i = BASEPRI_FOREGROUND; i < MINPRI_KERNEL; ++i) {
581 bitmap_set(p, i);
582 }
583
584 for (int i = BASEPRI_PREEMPT; i <= MAXPRI; ++i) {
585 bitmap_set(p, i);
586 }
587 }
588
589 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
590
591 /*
592 * Thread wait timer expiration.
593 */
594 void
595 thread_timer_expire(
596 void *p0,
597 __unused void *p1)
598 {
599 thread_t thread = p0;
600 spl_t s;
601
602 assert_thread_magic(thread);
603
604 s = splsched();
605 thread_lock(thread);
606 if (--thread->wait_timer_active == 0) {
607 if (thread->wait_timer_is_set) {
608 thread->wait_timer_is_set = FALSE;
609 clear_wait_internal(thread, THREAD_TIMED_OUT);
610 }
611 }
612 thread_unlock(thread);
613 splx(s);
614 }
615
616 /*
617 * thread_unblock:
618 *
619 * Unblock thread on wake up.
620 *
621 * Returns TRUE if the thread should now be placed on the runqueue.
622 *
623 * Thread must be locked.
624 *
625 * Called at splsched().
626 */
627 boolean_t
628 thread_unblock(
629 thread_t thread,
630 wait_result_t wresult)
631 {
632 boolean_t ready_for_runq = FALSE;
633 thread_t cthread = current_thread();
634 uint32_t new_run_count;
635 int old_thread_state;
636
637 /*
638 * Set wait_result.
639 */
640 thread->wait_result = wresult;
641
642 /*
643 * Cancel pending wait timer.
644 */
645 if (thread->wait_timer_is_set) {
646 if (timer_call_cancel(&thread->wait_timer)) {
647 thread->wait_timer_active--;
648 }
649 thread->wait_timer_is_set = FALSE;
650 }
651
652 /*
653 * Update scheduling state: not waiting,
654 * set running.
655 */
656 old_thread_state = thread->state;
657 thread->state = (old_thread_state | TH_RUN) &
658 ~(TH_WAIT | TH_UNINT | TH_WAIT_REPORT);
659
660 if ((old_thread_state & TH_RUN) == 0) {
661 uint64_t ctime = mach_approximate_time();
662 thread->last_made_runnable_time = thread->last_basepri_change_time = ctime;
663 timer_start(&thread->runnable_timer, ctime);
664
665 ready_for_runq = TRUE;
666
667 if (old_thread_state & TH_WAIT_REPORT) {
668 (*thread->sched_call)(SCHED_CALL_UNBLOCK, thread);
669 }
670
671 /* Update the runnable thread count */
672 new_run_count = SCHED(run_count_incr)(thread);
673 } else {
674 /*
675 * Either the thread is idling in place on another processor,
676 * or it hasn't finished context switching yet.
677 */
678 assert((thread->state & TH_IDLE) == 0);
679 /*
680 * The run count is only dropped after the context switch completes
681 * and the thread is still waiting, so we should not run_incr here
682 */
683 new_run_count = os_atomic_load(&sched_run_buckets[TH_BUCKET_RUN], relaxed);
684 }
685
686
687 /*
688 * Calculate deadline for real-time threads.
689 */
690 if (thread->sched_mode == TH_MODE_REALTIME) {
691 uint64_t ctime;
692
693 ctime = mach_absolute_time();
694 thread->realtime.deadline = thread->realtime.constraint + ctime;
695 }
696
697 /*
698 * Clear old quantum, fail-safe computation, etc.
699 */
700 thread->quantum_remaining = 0;
701 thread->computation_metered = 0;
702 thread->reason = AST_NONE;
703 thread->block_hint = kThreadWaitNone;
704
705 /* Obtain power-relevant interrupt and "platform-idle exit" statistics.
706 * We also account for "double hop" thread signaling via
707 * the thread callout infrastructure.
708 * DRK: consider removing the callout wakeup counters in the future
709 * they're present for verification at the moment.
710 */
711 boolean_t aticontext, pidle;
712 ml_get_power_state(&aticontext, &pidle);
713
714 if (__improbable(aticontext && !(thread_get_tag_internal(thread) & THREAD_TAG_CALLOUT))) {
715 DTRACE_SCHED2(iwakeup, struct thread *, thread, struct proc *, thread->task->bsd_info);
716
717 uint64_t ttd = PROCESSOR_DATA(current_processor(), timer_call_ttd);
718
719 if (ttd) {
720 if (ttd <= timer_deadline_tracking_bin_1) {
721 thread->thread_timer_wakeups_bin_1++;
722 } else if (ttd <= timer_deadline_tracking_bin_2) {
723 thread->thread_timer_wakeups_bin_2++;
724 }
725 }
726
727 ledger_credit_thread(thread, thread->t_ledger,
728 task_ledgers.interrupt_wakeups, 1);
729 if (pidle) {
730 ledger_credit_thread(thread, thread->t_ledger,
731 task_ledgers.platform_idle_wakeups, 1);
732 }
733 } else if (thread_get_tag_internal(cthread) & THREAD_TAG_CALLOUT) {
734 /* TODO: what about an interrupt that does a wake taken on a callout thread? */
735 if (cthread->callout_woken_from_icontext) {
736 ledger_credit_thread(thread, thread->t_ledger,
737 task_ledgers.interrupt_wakeups, 1);
738 thread->thread_callout_interrupt_wakeups++;
739
740 if (cthread->callout_woken_from_platform_idle) {
741 ledger_credit_thread(thread, thread->t_ledger,
742 task_ledgers.platform_idle_wakeups, 1);
743 thread->thread_callout_platform_idle_wakeups++;
744 }
745
746 cthread->callout_woke_thread = TRUE;
747 }
748 }
749
750 if (thread_get_tag_internal(thread) & THREAD_TAG_CALLOUT) {
751 thread->callout_woken_from_icontext = aticontext;
752 thread->callout_woken_from_platform_idle = pidle;
753 thread->callout_woke_thread = FALSE;
754 }
755
756 #if KPERF
757 if (ready_for_runq) {
758 kperf_make_runnable(thread, aticontext);
759 }
760 #endif /* KPERF */
761
762 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
763 MACHDBG_CODE(DBG_MACH_SCHED, MACH_MAKE_RUNNABLE) | DBG_FUNC_NONE,
764 (uintptr_t)thread_tid(thread), thread->sched_pri, thread->wait_result,
765 sched_run_buckets[TH_BUCKET_RUN], 0);
766
767 DTRACE_SCHED2(wakeup, struct thread *, thread, struct proc *, thread->task->bsd_info);
768
769 return ready_for_runq;
770 }
771
772 /*
773 * Routine: thread_go
774 * Purpose:
775 * Unblock and dispatch thread.
776 * Conditions:
777 * thread lock held, IPC locks may be held.
778 * thread must have been pulled from wait queue under same lock hold.
779 * thread must have been waiting
780 * Returns:
781 * KERN_SUCCESS - Thread was set running
782 *
783 * TODO: This should return void
784 */
785 kern_return_t
786 thread_go(
787 thread_t thread,
788 wait_result_t wresult)
789 {
790 assert_thread_magic(thread);
791
792 assert(thread->at_safe_point == FALSE);
793 assert(thread->wait_event == NO_EVENT64);
794 assert(thread->waitq == NULL);
795
796 assert(!(thread->state & (TH_TERMINATE | TH_TERMINATE2)));
797 assert(thread->state & TH_WAIT);
798
799
800 if (thread_unblock(thread, wresult)) {
801 #if SCHED_TRACE_THREAD_WAKEUPS
802 backtrace(&thread->thread_wakeup_bt[0],
803 (sizeof(thread->thread_wakeup_bt) / sizeof(uintptr_t)), NULL);
804 #endif
805 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
806 }
807
808 return KERN_SUCCESS;
809 }
810
811 /*
812 * Routine: thread_mark_wait_locked
813 * Purpose:
814 * Mark a thread as waiting. If, given the circumstances,
815 * it doesn't want to wait (i.e. already aborted), then
816 * indicate that in the return value.
817 * Conditions:
818 * at splsched() and thread is locked.
819 */
820 __private_extern__
821 wait_result_t
822 thread_mark_wait_locked(
823 thread_t thread,
824 wait_interrupt_t interruptible_orig)
825 {
826 boolean_t at_safe_point;
827 wait_interrupt_t interruptible = interruptible_orig;
828
829 if (thread->state & TH_IDLE) {
830 panic("Invalid attempt to wait while running the idle thread");
831 }
832
833 assert(!(thread->state & (TH_WAIT | TH_IDLE | TH_UNINT | TH_TERMINATE2 | TH_WAIT_REPORT)));
834
835 /*
836 * The thread may have certain types of interrupts/aborts masked
837 * off. Even if the wait location says these types of interrupts
838 * are OK, we have to honor mask settings (outer-scoped code may
839 * not be able to handle aborts at the moment).
840 */
841 interruptible &= TH_OPT_INTMASK;
842 if (interruptible > (thread->options & TH_OPT_INTMASK)) {
843 interruptible = thread->options & TH_OPT_INTMASK;
844 }
845
846 at_safe_point = (interruptible == THREAD_ABORTSAFE);
847
848 if (interruptible == THREAD_UNINT ||
849 !(thread->sched_flags & TH_SFLAG_ABORT) ||
850 (!at_safe_point &&
851 (thread->sched_flags & TH_SFLAG_ABORTSAFELY))) {
852 if (!(thread->state & TH_TERMINATE)) {
853 DTRACE_SCHED(sleep);
854 }
855
856 int state_bits = TH_WAIT;
857 if (!interruptible) {
858 state_bits |= TH_UNINT;
859 }
860 if (thread->sched_call) {
861 wait_interrupt_t mask = THREAD_WAIT_NOREPORT_USER;
862 if (is_kerneltask(thread->task)) {
863 mask = THREAD_WAIT_NOREPORT_KERNEL;
864 }
865 if ((interruptible_orig & mask) == 0) {
866 state_bits |= TH_WAIT_REPORT;
867 }
868 }
869 thread->state |= state_bits;
870 thread->at_safe_point = at_safe_point;
871
872 /* TODO: pass this through assert_wait instead, have
873 * assert_wait just take a struct as an argument */
874 assert(!thread->block_hint);
875 thread->block_hint = thread->pending_block_hint;
876 thread->pending_block_hint = kThreadWaitNone;
877
878 return thread->wait_result = THREAD_WAITING;
879 } else {
880 if (thread->sched_flags & TH_SFLAG_ABORTSAFELY) {
881 thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
882 }
883 }
884 thread->pending_block_hint = kThreadWaitNone;
885
886 return thread->wait_result = THREAD_INTERRUPTED;
887 }
888
889 /*
890 * Routine: thread_interrupt_level
891 * Purpose:
892 * Set the maximum interruptible state for the
893 * current thread. The effective value of any
894 * interruptible flag passed into assert_wait
895 * will never exceed this.
896 *
897 * Useful for code that must not be interrupted,
898 * but which calls code that doesn't know that.
899 * Returns:
900 * The old interrupt level for the thread.
901 */
902 __private_extern__
903 wait_interrupt_t
904 thread_interrupt_level(
905 wait_interrupt_t new_level)
906 {
907 thread_t thread = current_thread();
908 wait_interrupt_t result = thread->options & TH_OPT_INTMASK;
909
910 thread->options = (thread->options & ~TH_OPT_INTMASK) | (new_level & TH_OPT_INTMASK);
911
912 return result;
913 }
914
915 /*
916 * assert_wait:
917 *
918 * Assert that the current thread is about to go to
919 * sleep until the specified event occurs.
920 */
921 wait_result_t
922 assert_wait(
923 event_t event,
924 wait_interrupt_t interruptible)
925 {
926 if (__improbable(event == NO_EVENT)) {
927 panic("%s() called with NO_EVENT", __func__);
928 }
929
930 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
931 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE,
932 VM_KERNEL_UNSLIDE_OR_PERM(event), 0, 0, 0, 0);
933
934 struct waitq *waitq;
935 waitq = global_eventq(event);
936 return waitq_assert_wait64(waitq, CAST_EVENT64_T(event), interruptible, TIMEOUT_WAIT_FOREVER);
937 }
938
939 /*
940 * assert_wait_queue:
941 *
942 * Return the global waitq for the specified event
943 */
944 struct waitq *
945 assert_wait_queue(
946 event_t event)
947 {
948 return global_eventq(event);
949 }
950
951 wait_result_t
952 assert_wait_timeout(
953 event_t event,
954 wait_interrupt_t interruptible,
955 uint32_t interval,
956 uint32_t scale_factor)
957 {
958 thread_t thread = current_thread();
959 wait_result_t wresult;
960 uint64_t deadline;
961 spl_t s;
962
963 if (__improbable(event == NO_EVENT)) {
964 panic("%s() called with NO_EVENT", __func__);
965 }
966
967 struct waitq *waitq;
968 waitq = global_eventq(event);
969
970 s = splsched();
971 waitq_lock(waitq);
972
973 clock_interval_to_deadline(interval, scale_factor, &deadline);
974
975 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
976 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE,
977 VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
978
979 wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
980 interruptible,
981 TIMEOUT_URGENCY_SYS_NORMAL,
982 deadline, TIMEOUT_NO_LEEWAY,
983 thread);
984
985 waitq_unlock(waitq);
986 splx(s);
987 return wresult;
988 }
989
990 wait_result_t
991 assert_wait_timeout_with_leeway(
992 event_t event,
993 wait_interrupt_t interruptible,
994 wait_timeout_urgency_t urgency,
995 uint32_t interval,
996 uint32_t leeway,
997 uint32_t scale_factor)
998 {
999 thread_t thread = current_thread();
1000 wait_result_t wresult;
1001 uint64_t deadline;
1002 uint64_t abstime;
1003 uint64_t slop;
1004 uint64_t now;
1005 spl_t s;
1006
1007 if (__improbable(event == NO_EVENT)) {
1008 panic("%s() called with NO_EVENT", __func__);
1009 }
1010
1011 now = mach_absolute_time();
1012 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
1013 deadline = now + abstime;
1014
1015 clock_interval_to_absolutetime_interval(leeway, scale_factor, &slop);
1016
1017 struct waitq *waitq;
1018 waitq = global_eventq(event);
1019
1020 s = splsched();
1021 waitq_lock(waitq);
1022
1023 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1024 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE,
1025 VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
1026
1027 wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
1028 interruptible,
1029 urgency, deadline, slop,
1030 thread);
1031
1032 waitq_unlock(waitq);
1033 splx(s);
1034 return wresult;
1035 }
1036
1037 wait_result_t
1038 assert_wait_deadline(
1039 event_t event,
1040 wait_interrupt_t interruptible,
1041 uint64_t deadline)
1042 {
1043 thread_t thread = current_thread();
1044 wait_result_t wresult;
1045 spl_t s;
1046
1047 if (__improbable(event == NO_EVENT)) {
1048 panic("%s() called with NO_EVENT", __func__);
1049 }
1050
1051 struct waitq *waitq;
1052 waitq = global_eventq(event);
1053
1054 s = splsched();
1055 waitq_lock(waitq);
1056
1057 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1058 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE,
1059 VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
1060
1061 wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
1062 interruptible,
1063 TIMEOUT_URGENCY_SYS_NORMAL, deadline,
1064 TIMEOUT_NO_LEEWAY, thread);
1065 waitq_unlock(waitq);
1066 splx(s);
1067 return wresult;
1068 }
1069
1070 wait_result_t
1071 assert_wait_deadline_with_leeway(
1072 event_t event,
1073 wait_interrupt_t interruptible,
1074 wait_timeout_urgency_t urgency,
1075 uint64_t deadline,
1076 uint64_t leeway)
1077 {
1078 thread_t thread = current_thread();
1079 wait_result_t wresult;
1080 spl_t s;
1081
1082 if (__improbable(event == NO_EVENT)) {
1083 panic("%s() called with NO_EVENT", __func__);
1084 }
1085
1086 struct waitq *waitq;
1087 waitq = global_eventq(event);
1088
1089 s = splsched();
1090 waitq_lock(waitq);
1091
1092 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1093 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE,
1094 VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
1095
1096 wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
1097 interruptible,
1098 urgency, deadline, leeway,
1099 thread);
1100 waitq_unlock(waitq);
1101 splx(s);
1102 return wresult;
1103 }
1104
1105 /*
1106 * thread_isoncpu:
1107 *
1108 * Return TRUE if a thread is running on a processor such that an AST
1109 * is needed to pull it out of userspace execution, or if executing in
1110 * the kernel, bring to a context switch boundary that would cause
1111 * thread state to be serialized in the thread PCB.
1112 *
1113 * Thread locked, returns the same way. While locked, fields
1114 * like "state" cannot change. "runq" can change only from set to unset.
1115 */
1116 static inline boolean_t
1117 thread_isoncpu(thread_t thread)
1118 {
1119 /* Not running or runnable */
1120 if (!(thread->state & TH_RUN)) {
1121 return FALSE;
1122 }
1123
1124 /* Waiting on a runqueue, not currently running */
1125 /* TODO: This is invalid - it can get dequeued without thread lock, but not context switched. */
1126 if (thread->runq != PROCESSOR_NULL) {
1127 return FALSE;
1128 }
1129
1130 /*
1131 * Thread does not have a stack yet
1132 * It could be on the stack alloc queue or preparing to be invoked
1133 */
1134 if (!thread->kernel_stack) {
1135 return FALSE;
1136 }
1137
1138 /*
1139 * Thread must be running on a processor, or
1140 * about to run, or just did run. In all these
1141 * cases, an AST to the processor is needed
1142 * to guarantee that the thread is kicked out
1143 * of userspace and the processor has
1144 * context switched (and saved register state).
1145 */
1146 return TRUE;
1147 }
1148
1149 /*
1150 * thread_stop:
1151 *
1152 * Force a preemption point for a thread and wait
1153 * for it to stop running on a CPU. If a stronger
1154 * guarantee is requested, wait until no longer
1155 * runnable. Arbitrates access among
1156 * multiple stop requests. (released by unstop)
1157 *
1158 * The thread must enter a wait state and stop via a
1159 * separate means.
1160 *
1161 * Returns FALSE if interrupted.
1162 */
1163 boolean_t
1164 thread_stop(
1165 thread_t thread,
1166 boolean_t until_not_runnable)
1167 {
1168 wait_result_t wresult;
1169 spl_t s = splsched();
1170 boolean_t oncpu;
1171
1172 wake_lock(thread);
1173 thread_lock(thread);
1174
1175 while (thread->state & TH_SUSP) {
1176 thread->wake_active = TRUE;
1177 thread_unlock(thread);
1178
1179 wresult = assert_wait(&thread->wake_active, THREAD_ABORTSAFE);
1180 wake_unlock(thread);
1181 splx(s);
1182
1183 if (wresult == THREAD_WAITING) {
1184 wresult = thread_block(THREAD_CONTINUE_NULL);
1185 }
1186
1187 if (wresult != THREAD_AWAKENED) {
1188 return FALSE;
1189 }
1190
1191 s = splsched();
1192 wake_lock(thread);
1193 thread_lock(thread);
1194 }
1195
1196 thread->state |= TH_SUSP;
1197
1198 while ((oncpu = thread_isoncpu(thread)) ||
1199 (until_not_runnable && (thread->state & TH_RUN))) {
1200 processor_t processor;
1201
1202 if (oncpu) {
1203 assert(thread->state & TH_RUN);
1204 processor = thread->chosen_processor;
1205 cause_ast_check(processor);
1206 }
1207
1208 thread->wake_active = TRUE;
1209 thread_unlock(thread);
1210
1211 wresult = assert_wait(&thread->wake_active, THREAD_ABORTSAFE);
1212 wake_unlock(thread);
1213 splx(s);
1214
1215 if (wresult == THREAD_WAITING) {
1216 wresult = thread_block(THREAD_CONTINUE_NULL);
1217 }
1218
1219 if (wresult != THREAD_AWAKENED) {
1220 thread_unstop(thread);
1221 return FALSE;
1222 }
1223
1224 s = splsched();
1225 wake_lock(thread);
1226 thread_lock(thread);
1227 }
1228
1229 thread_unlock(thread);
1230 wake_unlock(thread);
1231 splx(s);
1232
1233 /*
1234 * We return with the thread unlocked. To prevent it from
1235 * transitioning to a runnable state (or from TH_RUN to
1236 * being on the CPU), the caller must ensure the thread
1237 * is stopped via an external means (such as an AST)
1238 */
1239
1240 return TRUE;
1241 }
1242
1243 /*
1244 * thread_unstop:
1245 *
1246 * Release a previous stop request and set
1247 * the thread running if appropriate.
1248 *
1249 * Use only after a successful stop operation.
1250 */
1251 void
1252 thread_unstop(
1253 thread_t thread)
1254 {
1255 spl_t s = splsched();
1256
1257 wake_lock(thread);
1258 thread_lock(thread);
1259
1260 assert((thread->state & (TH_RUN | TH_WAIT | TH_SUSP)) != TH_SUSP);
1261
1262 if (thread->state & TH_SUSP) {
1263 thread->state &= ~TH_SUSP;
1264
1265 if (thread->wake_active) {
1266 thread->wake_active = FALSE;
1267 thread_unlock(thread);
1268
1269 thread_wakeup(&thread->wake_active);
1270 wake_unlock(thread);
1271 splx(s);
1272
1273 return;
1274 }
1275 }
1276
1277 thread_unlock(thread);
1278 wake_unlock(thread);
1279 splx(s);
1280 }
1281
1282 /*
1283 * thread_wait:
1284 *
1285 * Wait for a thread to stop running. (non-interruptible)
1286 *
1287 */
1288 void
1289 thread_wait(
1290 thread_t thread,
1291 boolean_t until_not_runnable)
1292 {
1293 wait_result_t wresult;
1294 boolean_t oncpu;
1295 processor_t processor;
1296 spl_t s = splsched();
1297
1298 wake_lock(thread);
1299 thread_lock(thread);
1300
1301 /*
1302 * Wait until not running on a CPU. If stronger requirement
1303 * desired, wait until not runnable. Assumption: if thread is
1304 * on CPU, then TH_RUN is set, so we're not waiting in any case
1305 * where the original, pure "TH_RUN" check would have let us
1306 * finish.
1307 */
1308 while ((oncpu = thread_isoncpu(thread)) ||
1309 (until_not_runnable && (thread->state & TH_RUN))) {
1310 if (oncpu) {
1311 assert(thread->state & TH_RUN);
1312 processor = thread->chosen_processor;
1313 cause_ast_check(processor);
1314 }
1315
1316 thread->wake_active = TRUE;
1317 thread_unlock(thread);
1318
1319 wresult = assert_wait(&thread->wake_active, THREAD_UNINT);
1320 wake_unlock(thread);
1321 splx(s);
1322
1323 if (wresult == THREAD_WAITING) {
1324 thread_block(THREAD_CONTINUE_NULL);
1325 }
1326
1327 s = splsched();
1328 wake_lock(thread);
1329 thread_lock(thread);
1330 }
1331
1332 thread_unlock(thread);
1333 wake_unlock(thread);
1334 splx(s);
1335 }
1336
1337 /*
1338 * Routine: clear_wait_internal
1339 *
1340 * Clear the wait condition for the specified thread.
1341 * Start the thread executing if that is appropriate.
1342 * Arguments:
1343 * thread thread to awaken
1344 * result Wakeup result the thread should see
1345 * Conditions:
1346 * At splsched
1347 * the thread is locked.
1348 * Returns:
1349 * KERN_SUCCESS thread was rousted out a wait
1350 * KERN_FAILURE thread was waiting but could not be rousted
1351 * KERN_NOT_WAITING thread was not waiting
1352 */
1353 __private_extern__ kern_return_t
1354 clear_wait_internal(
1355 thread_t thread,
1356 wait_result_t wresult)
1357 {
1358 uint32_t i = LockTimeOutUsec;
1359 struct waitq *waitq = thread->waitq;
1360
1361 do {
1362 if (wresult == THREAD_INTERRUPTED && (thread->state & TH_UNINT)) {
1363 return KERN_FAILURE;
1364 }
1365
1366 if (waitq != NULL) {
1367 if (!waitq_pull_thread_locked(waitq, thread)) {
1368 thread_unlock(thread);
1369 delay(1);
1370 if (i > 0 && !machine_timeout_suspended()) {
1371 i--;
1372 }
1373 thread_lock(thread);
1374 if (waitq != thread->waitq) {
1375 return KERN_NOT_WAITING;
1376 }
1377 continue;
1378 }
1379 }
1380
1381 /* TODO: Can we instead assert TH_TERMINATE is not set? */
1382 if ((thread->state & (TH_WAIT | TH_TERMINATE)) == TH_WAIT) {
1383 return thread_go(thread, wresult);
1384 } else {
1385 return KERN_NOT_WAITING;
1386 }
1387 } while (i > 0);
1388
1389 panic("clear_wait_internal: deadlock: thread=%p, wq=%p, cpu=%d\n",
1390 thread, waitq, cpu_number());
1391
1392 return KERN_FAILURE;
1393 }
1394
1395
1396 /*
1397 * clear_wait:
1398 *
1399 * Clear the wait condition for the specified thread. Start the thread
1400 * executing if that is appropriate.
1401 *
1402 * parameters:
1403 * thread thread to awaken
1404 * result Wakeup result the thread should see
1405 */
1406 kern_return_t
1407 clear_wait(
1408 thread_t thread,
1409 wait_result_t result)
1410 {
1411 kern_return_t ret;
1412 spl_t s;
1413
1414 s = splsched();
1415 thread_lock(thread);
1416 ret = clear_wait_internal(thread, result);
1417 thread_unlock(thread);
1418 splx(s);
1419 return ret;
1420 }
1421
1422
1423 /*
1424 * thread_wakeup_prim:
1425 *
1426 * Common routine for thread_wakeup, thread_wakeup_with_result,
1427 * and thread_wakeup_one.
1428 *
1429 */
1430 kern_return_t
1431 thread_wakeup_prim(
1432 event_t event,
1433 boolean_t one_thread,
1434 wait_result_t result)
1435 {
1436 if (__improbable(event == NO_EVENT)) {
1437 panic("%s() called with NO_EVENT", __func__);
1438 }
1439
1440 struct waitq *wq = global_eventq(event);
1441
1442 if (one_thread) {
1443 return waitq_wakeup64_one(wq, CAST_EVENT64_T(event), result, WAITQ_ALL_PRIORITIES);
1444 } else {
1445 return waitq_wakeup64_all(wq, CAST_EVENT64_T(event), result, WAITQ_ALL_PRIORITIES);
1446 }
1447 }
1448
1449 /*
1450 * Wakeup a specified thread if and only if it's waiting for this event
1451 */
1452 kern_return_t
1453 thread_wakeup_thread(
1454 event_t event,
1455 thread_t thread)
1456 {
1457 if (__improbable(event == NO_EVENT)) {
1458 panic("%s() called with NO_EVENT", __func__);
1459 }
1460
1461 if (__improbable(thread == THREAD_NULL)) {
1462 panic("%s() called with THREAD_NULL", __func__);
1463 }
1464
1465 struct waitq *wq = global_eventq(event);
1466
1467 return waitq_wakeup64_thread(wq, CAST_EVENT64_T(event), thread, THREAD_AWAKENED);
1468 }
1469
1470 /*
1471 * Wakeup a thread waiting on an event and promote it to a priority.
1472 *
1473 * Requires woken thread to un-promote itself when done.
1474 */
1475 kern_return_t
1476 thread_wakeup_one_with_pri(
1477 event_t event,
1478 int priority)
1479 {
1480 if (__improbable(event == NO_EVENT)) {
1481 panic("%s() called with NO_EVENT", __func__);
1482 }
1483
1484 struct waitq *wq = global_eventq(event);
1485
1486 return waitq_wakeup64_one(wq, CAST_EVENT64_T(event), THREAD_AWAKENED, priority);
1487 }
1488
1489 /*
1490 * Wakeup a thread waiting on an event,
1491 * promote it to a priority,
1492 * and return a reference to the woken thread.
1493 *
1494 * Requires woken thread to un-promote itself when done.
1495 */
1496 thread_t
1497 thread_wakeup_identify(event_t event,
1498 int priority)
1499 {
1500 if (__improbable(event == NO_EVENT)) {
1501 panic("%s() called with NO_EVENT", __func__);
1502 }
1503
1504 struct waitq *wq = global_eventq(event);
1505
1506 return waitq_wakeup64_identify(wq, CAST_EVENT64_T(event), THREAD_AWAKENED, priority);
1507 }
1508
1509 /*
1510 * thread_bind:
1511 *
1512 * Force the current thread to execute on the specified processor.
1513 * Takes effect after the next thread_block().
1514 *
1515 * Returns the previous binding. PROCESSOR_NULL means
1516 * not bound.
1517 *
1518 * XXX - DO NOT export this to users - XXX
1519 */
1520 processor_t
1521 thread_bind(
1522 processor_t processor)
1523 {
1524 thread_t self = current_thread();
1525 processor_t prev;
1526 spl_t s;
1527
1528 s = splsched();
1529 thread_lock(self);
1530
1531 prev = thread_bind_internal(self, processor);
1532
1533 thread_unlock(self);
1534 splx(s);
1535
1536 return prev;
1537 }
1538
1539 /*
1540 * thread_bind_internal:
1541 *
1542 * If the specified thread is not the current thread, and it is currently
1543 * running on another CPU, a remote AST must be sent to that CPU to cause
1544 * the thread to migrate to its bound processor. Otherwise, the migration
1545 * will occur at the next quantum expiration or blocking point.
1546 *
1547 * When the thread is the current thread, and explicit thread_block() should
1548 * be used to force the current processor to context switch away and
1549 * let the thread migrate to the bound processor.
1550 *
1551 * Thread must be locked, and at splsched.
1552 */
1553
1554 static processor_t
1555 thread_bind_internal(
1556 thread_t thread,
1557 processor_t processor)
1558 {
1559 processor_t prev;
1560
1561 /* <rdar://problem/15102234> */
1562 assert(thread->sched_pri < BASEPRI_RTQUEUES);
1563 /* A thread can't be bound if it's sitting on a (potentially incorrect) runqueue */
1564 assert(thread->runq == PROCESSOR_NULL);
1565
1566 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_THREAD_BIND), thread_tid(thread), processor ? (uintptr_t)processor->cpu_id : (uintptr_t)-1, 0, 0, 0);
1567
1568 prev = thread->bound_processor;
1569 thread->bound_processor = processor;
1570
1571 return prev;
1572 }
1573
1574 /*
1575 * thread_vm_bind_group_add:
1576 *
1577 * The "VM bind group" is a special mechanism to mark a collection
1578 * of threads from the VM subsystem that, in general, should be scheduled
1579 * with only one CPU of parallelism. To accomplish this, we initially
1580 * bind all the threads to the master processor, which has the effect
1581 * that only one of the threads in the group can execute at once, including
1582 * preempting threads in the group that are a lower priority. Future
1583 * mechanisms may use more dynamic mechanisms to prevent the collection
1584 * of VM threads from using more CPU time than desired.
1585 *
1586 * The current implementation can result in priority inversions where
1587 * compute-bound priority 95 or realtime threads that happen to have
1588 * landed on the master processor prevent the VM threads from running.
1589 * When this situation is detected, we unbind the threads for one
1590 * scheduler tick to allow the scheduler to run the threads an
1591 * additional CPUs, before restoring the binding (assuming high latency
1592 * is no longer a problem).
1593 */
1594
1595 /*
1596 * The current max is provisioned for:
1597 * vm_compressor_swap_trigger_thread (92)
1598 * 2 x vm_pageout_iothread_internal (92) when vm_restricted_to_single_processor==TRUE
1599 * vm_pageout_continue (92)
1600 * memorystatus_thread (95)
1601 */
1602 #define MAX_VM_BIND_GROUP_COUNT (5)
1603 decl_simple_lock_data(static, sched_vm_group_list_lock);
1604 static thread_t sched_vm_group_thread_list[MAX_VM_BIND_GROUP_COUNT];
1605 static int sched_vm_group_thread_count;
1606 static boolean_t sched_vm_group_temporarily_unbound = FALSE;
1607
1608 void
1609 thread_vm_bind_group_add(void)
1610 {
1611 thread_t self = current_thread();
1612
1613 thread_reference_internal(self);
1614 self->options |= TH_OPT_SCHED_VM_GROUP;
1615
1616 simple_lock(&sched_vm_group_list_lock, LCK_GRP_NULL);
1617 assert(sched_vm_group_thread_count < MAX_VM_BIND_GROUP_COUNT);
1618 sched_vm_group_thread_list[sched_vm_group_thread_count++] = self;
1619 simple_unlock(&sched_vm_group_list_lock);
1620
1621 thread_bind(master_processor);
1622
1623 /* Switch to bound processor if not already there */
1624 thread_block(THREAD_CONTINUE_NULL);
1625 }
1626
1627 static void
1628 sched_vm_group_maintenance(void)
1629 {
1630 uint64_t ctime = mach_absolute_time();
1631 uint64_t longtime = ctime - sched_tick_interval;
1632 int i;
1633 spl_t s;
1634 boolean_t high_latency_observed = FALSE;
1635 boolean_t runnable_and_not_on_runq_observed = FALSE;
1636 boolean_t bind_target_changed = FALSE;
1637 processor_t bind_target = PROCESSOR_NULL;
1638
1639 /* Make sure nobody attempts to add new threads while we are enumerating them */
1640 simple_lock(&sched_vm_group_list_lock, LCK_GRP_NULL);
1641
1642 s = splsched();
1643
1644 for (i = 0; i < sched_vm_group_thread_count; i++) {
1645 thread_t thread = sched_vm_group_thread_list[i];
1646 assert(thread != THREAD_NULL);
1647 thread_lock(thread);
1648 if ((thread->state & (TH_RUN | TH_WAIT)) == TH_RUN) {
1649 if (thread->runq != PROCESSOR_NULL && thread->last_made_runnable_time < longtime) {
1650 high_latency_observed = TRUE;
1651 } else if (thread->runq == PROCESSOR_NULL) {
1652 /* There are some cases where a thread be transitiong that also fall into this case */
1653 runnable_and_not_on_runq_observed = TRUE;
1654 }
1655 }
1656 thread_unlock(thread);
1657
1658 if (high_latency_observed && runnable_and_not_on_runq_observed) {
1659 /* All the things we are looking for are true, stop looking */
1660 break;
1661 }
1662 }
1663
1664 splx(s);
1665
1666 if (sched_vm_group_temporarily_unbound) {
1667 /* If we turned off binding, make sure everything is OK before rebinding */
1668 if (!high_latency_observed) {
1669 /* rebind */
1670 bind_target_changed = TRUE;
1671 bind_target = master_processor;
1672 sched_vm_group_temporarily_unbound = FALSE; /* might be reset to TRUE if change cannot be completed */
1673 }
1674 } else {
1675 /*
1676 * Check if we're in a bad state, which is defined by high
1677 * latency with no core currently executing a thread. If a
1678 * single thread is making progress on a CPU, that means the
1679 * binding concept to reduce parallelism is working as
1680 * designed.
1681 */
1682 if (high_latency_observed && !runnable_and_not_on_runq_observed) {
1683 /* unbind */
1684 bind_target_changed = TRUE;
1685 bind_target = PROCESSOR_NULL;
1686 sched_vm_group_temporarily_unbound = TRUE;
1687 }
1688 }
1689
1690 if (bind_target_changed) {
1691 s = splsched();
1692 for (i = 0; i < sched_vm_group_thread_count; i++) {
1693 thread_t thread = sched_vm_group_thread_list[i];
1694 boolean_t removed;
1695 assert(thread != THREAD_NULL);
1696
1697 thread_lock(thread);
1698 removed = thread_run_queue_remove(thread);
1699 if (removed || ((thread->state & (TH_RUN | TH_WAIT)) == TH_WAIT)) {
1700 thread_bind_internal(thread, bind_target);
1701 } else {
1702 /*
1703 * Thread was in the middle of being context-switched-to,
1704 * or was in the process of blocking. To avoid switching the bind
1705 * state out mid-flight, defer the change if possible.
1706 */
1707 if (bind_target == PROCESSOR_NULL) {
1708 thread_bind_internal(thread, bind_target);
1709 } else {
1710 sched_vm_group_temporarily_unbound = TRUE; /* next pass will try again */
1711 }
1712 }
1713
1714 if (removed) {
1715 thread_run_queue_reinsert(thread, SCHED_PREEMPT | SCHED_TAILQ);
1716 }
1717 thread_unlock(thread);
1718 }
1719 splx(s);
1720 }
1721
1722 simple_unlock(&sched_vm_group_list_lock);
1723 }
1724
1725 /* Invoked prior to idle entry to determine if, on SMT capable processors, an SMT
1726 * rebalancing opportunity exists when a core is (instantaneously) idle, but
1727 * other SMT-capable cores may be over-committed. TODO: some possible negatives:
1728 * IPI thrash if this core does not remain idle following the load balancing ASTs
1729 * Idle "thrash", when IPI issue is followed by idle entry/core power down
1730 * followed by a wakeup shortly thereafter.
1731 */
1732
1733 #if (DEVELOPMENT || DEBUG)
1734 int sched_smt_balance = 1;
1735 #endif
1736
1737 #if __SMP__
1738 /* Invoked with pset locked, returns with pset unlocked */
1739 void
1740 sched_SMT_balance(processor_t cprocessor, processor_set_t cpset)
1741 {
1742 processor_t ast_processor = NULL;
1743
1744 #if (DEVELOPMENT || DEBUG)
1745 if (__improbable(sched_smt_balance == 0)) {
1746 goto smt_balance_exit;
1747 }
1748 #endif
1749
1750 assert(cprocessor == current_processor());
1751 if (cprocessor->is_SMT == FALSE) {
1752 goto smt_balance_exit;
1753 }
1754
1755 processor_t sib_processor = cprocessor->processor_secondary ? cprocessor->processor_secondary : cprocessor->processor_primary;
1756
1757 /* Determine if both this processor and its sibling are idle,
1758 * indicating an SMT rebalancing opportunity.
1759 */
1760 if (sib_processor->state != PROCESSOR_IDLE) {
1761 goto smt_balance_exit;
1762 }
1763
1764 processor_t sprocessor;
1765
1766 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
1767 uint64_t running_secondary_map = (cpset->cpu_state_map[PROCESSOR_RUNNING] &
1768 ~cpset->primary_map);
1769 for (int cpuid = lsb_first(running_secondary_map); cpuid >= 0; cpuid = lsb_next(running_secondary_map, cpuid)) {
1770 sprocessor = processor_array[cpuid];
1771 if ((sprocessor->processor_primary->state == PROCESSOR_RUNNING) &&
1772 (sprocessor->current_pri < BASEPRI_RTQUEUES)) {
1773 ipi_type = sched_ipi_action(sprocessor, NULL, false, SCHED_IPI_EVENT_SMT_REBAL);
1774 if (ipi_type != SCHED_IPI_NONE) {
1775 assert(sprocessor != cprocessor);
1776 ast_processor = sprocessor;
1777 break;
1778 }
1779 }
1780 }
1781
1782 smt_balance_exit:
1783 pset_unlock(cpset);
1784
1785 if (ast_processor) {
1786 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_SMT_BALANCE), ast_processor->cpu_id, ast_processor->state, ast_processor->processor_primary->state, 0, 0);
1787 sched_ipi_perform(ast_processor, ipi_type);
1788 }
1789 }
1790 #else
1791 /* Invoked with pset locked, returns with pset unlocked */
1792 void
1793 sched_SMT_balance(__unused processor_t cprocessor, processor_set_t cpset)
1794 {
1795 pset_unlock(cpset);
1796 }
1797 #endif /* __SMP__ */
1798
1799 /*
1800 * Called with pset locked, on a processor that is committing to run a new thread
1801 * Will transition an idle or dispatching processor to running as it picks up
1802 * the first new thread from the idle thread.
1803 */
1804 static void
1805 pset_commit_processor_to_new_thread(processor_set_t pset, processor_t processor, thread_t new_thread)
1806 {
1807 if (processor->state == PROCESSOR_DISPATCHING || processor->state == PROCESSOR_IDLE) {
1808 assert(current_thread() == processor->idle_thread);
1809
1810 /*
1811 * Dispatching processor is now committed to running new_thread,
1812 * so change its state to PROCESSOR_RUNNING.
1813 */
1814 pset_update_processor_state(pset, processor, PROCESSOR_RUNNING);
1815 } else {
1816 assert((processor->state == PROCESSOR_RUNNING) || (processor->state == PROCESSOR_SHUTDOWN));
1817 }
1818
1819 processor_state_update_from_thread(processor, new_thread);
1820 }
1821
1822 static processor_t choose_processor_for_realtime_thread(processor_set_t pset);
1823 static bool all_available_primaries_are_running_realtime_threads(processor_set_t pset);
1824 static bool these_processors_are_running_realtime_threads(processor_set_t pset, uint64_t these_map);
1825 static bool sched_ok_to_run_realtime_thread(processor_set_t pset, processor_t processor);
1826 int sched_allow_rt_smt = 1;
1827 int sched_avoid_cpu0 = 1;
1828
1829 /*
1830 * thread_select:
1831 *
1832 * Select a new thread for the current processor to execute.
1833 *
1834 * May select the current thread, which must be locked.
1835 */
1836 static thread_t
1837 thread_select(thread_t thread,
1838 processor_t processor,
1839 ast_t *reason)
1840 {
1841 processor_set_t pset = processor->processor_set;
1842 thread_t new_thread = THREAD_NULL;
1843
1844 assert(processor == current_processor());
1845 assert((thread->state & (TH_RUN | TH_TERMINATE2)) == TH_RUN);
1846
1847 do {
1848 /*
1849 * Update the priority.
1850 */
1851 if (SCHED(can_update_priority)(thread)) {
1852 SCHED(update_priority)(thread);
1853 }
1854
1855 pset_lock(pset);
1856
1857 processor_state_update_from_thread(processor, thread);
1858
1859 restart:
1860 /* Acknowledge any pending IPIs here with pset lock held */
1861 bit_clear(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id);
1862 bit_clear(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
1863
1864 #if defined(CONFIG_SCHED_DEFERRED_AST)
1865 bit_clear(pset->pending_deferred_AST_cpu_mask, processor->cpu_id);
1866 #endif
1867
1868 bool secondary_can_only_run_realtime_thread = false;
1869
1870 assert(processor->state != PROCESSOR_OFF_LINE);
1871
1872 if (!processor->is_recommended) {
1873 /*
1874 * The performance controller has provided a hint to not dispatch more threads,
1875 * unless they are bound to us (and thus we are the only option
1876 */
1877 if (!SCHED(processor_bound_count)(processor)) {
1878 goto idle;
1879 }
1880 } else if (processor->processor_primary != processor) {
1881 /*
1882 * Should this secondary SMT processor attempt to find work? For pset runqueue systems,
1883 * we should look for work only under the same conditions that choose_processor()
1884 * would have assigned work, which is when all primary processors have been assigned work.
1885 *
1886 * An exception is that bound threads are dispatched to a processor without going through
1887 * choose_processor(), so in those cases we should continue trying to dequeue work.
1888 */
1889 if (!SCHED(processor_bound_count)(processor)) {
1890 if ((pset->recommended_bitmask & pset->primary_map & pset->cpu_state_map[PROCESSOR_IDLE]) != 0) {
1891 goto idle;
1892 }
1893
1894 /*
1895 * TODO: What if a secondary core beat an idle primary to waking up from an IPI?
1896 * Should it dequeue immediately, or spin waiting for the primary to wake up?
1897 */
1898
1899 /* There are no idle primaries */
1900
1901 if (processor->processor_primary->current_pri >= BASEPRI_RTQUEUES) {
1902 bool secondary_can_run_realtime_thread = sched_allow_rt_smt && rt_runq_count(pset) && all_available_primaries_are_running_realtime_threads(pset);
1903 if (!secondary_can_run_realtime_thread) {
1904 goto idle;
1905 }
1906 secondary_can_only_run_realtime_thread = true;
1907 }
1908 }
1909 }
1910
1911 /*
1912 * Test to see if the current thread should continue
1913 * to run on this processor. Must not be attempting to wait, and not
1914 * bound to a different processor, nor be in the wrong
1915 * processor set, nor be forced to context switch by TH_SUSP.
1916 *
1917 * Note that there are never any RT threads in the regular runqueue.
1918 *
1919 * This code is very insanely tricky.
1920 */
1921
1922 /* i.e. not waiting, not TH_SUSP'ed */
1923 bool still_running = ((thread->state & (TH_TERMINATE | TH_IDLE | TH_WAIT | TH_RUN | TH_SUSP)) == TH_RUN);
1924
1925 /*
1926 * Threads running on SMT processors are forced to context switch. Don't rebalance realtime threads.
1927 * TODO: This should check if it's worth it to rebalance, i.e. 'are there any idle primary processors'
1928 * <rdar://problem/47907700>
1929 *
1930 * A yielding thread shouldn't be forced to context switch.
1931 */
1932
1933 bool is_yielding = (*reason & AST_YIELD) == AST_YIELD;
1934
1935 bool needs_smt_rebalance = !is_yielding && thread->sched_pri < BASEPRI_RTQUEUES && processor->processor_primary != processor;
1936
1937 bool affinity_mismatch = thread->affinity_set != AFFINITY_SET_NULL && thread->affinity_set->aset_pset != pset;
1938
1939 bool bound_elsewhere = thread->bound_processor != PROCESSOR_NULL && thread->bound_processor != processor;
1940
1941 bool avoid_processor = !is_yielding && SCHED(avoid_processor_enabled) && SCHED(thread_avoid_processor)(processor, thread);
1942
1943 if (still_running && !needs_smt_rebalance && !affinity_mismatch && !bound_elsewhere && !avoid_processor) {
1944 /*
1945 * This thread is eligible to keep running on this processor.
1946 *
1947 * RT threads with un-expired quantum stay on processor,
1948 * unless there's a valid RT thread with an earlier deadline.
1949 */
1950 if (thread->sched_pri >= BASEPRI_RTQUEUES && processor->first_timeslice) {
1951 if (rt_runq_count(pset) > 0) {
1952 rt_lock_lock(pset);
1953
1954 if (rt_runq_count(pset) > 0) {
1955 thread_t next_rt = qe_queue_first(&SCHED(rt_runq)(pset)->queue, struct thread, runq_links);
1956
1957 if (next_rt->realtime.deadline < processor->deadline &&
1958 (next_rt->bound_processor == PROCESSOR_NULL ||
1959 next_rt->bound_processor == processor)) {
1960 /* The next RT thread is better, so pick it off the runqueue. */
1961 goto pick_new_rt_thread;
1962 }
1963 }
1964
1965 rt_lock_unlock(pset);
1966 }
1967
1968 /* This is still the best RT thread to run. */
1969 processor->deadline = thread->realtime.deadline;
1970
1971 sched_update_pset_load_average(pset);
1972
1973 processor_t next_rt_processor = PROCESSOR_NULL;
1974 sched_ipi_type_t next_rt_ipi_type = SCHED_IPI_NONE;
1975
1976 if (rt_runq_count(pset) > 0) {
1977 next_rt_processor = choose_processor_for_realtime_thread(pset);
1978 if (next_rt_processor) {
1979 if (next_rt_processor->state == PROCESSOR_IDLE) {
1980 pset_update_processor_state(pset, next_rt_processor, PROCESSOR_DISPATCHING);
1981 }
1982 next_rt_ipi_type = sched_ipi_action(next_rt_processor, NULL, false, SCHED_IPI_EVENT_PREEMPT);
1983 }
1984 }
1985 pset_unlock(pset);
1986
1987 if (next_rt_processor) {
1988 sched_ipi_perform(next_rt_processor, next_rt_ipi_type);
1989 }
1990
1991 return thread;
1992 }
1993
1994 if ((rt_runq_count(pset) == 0) &&
1995 SCHED(processor_queue_has_priority)(processor, thread->sched_pri, TRUE) == FALSE) {
1996 /* This thread is still the highest priority runnable (non-idle) thread */
1997 processor->deadline = UINT64_MAX;
1998
1999 sched_update_pset_load_average(pset);
2000 pset_unlock(pset);
2001
2002 return thread;
2003 }
2004 } else {
2005 /*
2006 * This processor must context switch.
2007 * If it's due to a rebalance, we should aggressively find this thread a new home.
2008 */
2009 if (needs_smt_rebalance || affinity_mismatch || bound_elsewhere || avoid_processor) {
2010 *reason |= AST_REBALANCE;
2011 }
2012 }
2013
2014 /* OK, so we're not going to run the current thread. Look at the RT queue. */
2015 bool ok_to_run_realtime_thread = sched_ok_to_run_realtime_thread(pset, processor);
2016 if ((rt_runq_count(pset) > 0) && ok_to_run_realtime_thread) {
2017 rt_lock_lock(pset);
2018
2019 if ((rt_runq_count(pset) > 0) && ok_to_run_realtime_thread) {
2020 thread_t next_rt = qe_queue_first(&SCHED(rt_runq)(pset)->queue, struct thread, runq_links);
2021
2022 if (__probable((next_rt->bound_processor == PROCESSOR_NULL ||
2023 (next_rt->bound_processor == processor)))) {
2024 pick_new_rt_thread:
2025 new_thread = qe_dequeue_head(&SCHED(rt_runq)(pset)->queue, struct thread, runq_links);
2026
2027 new_thread->runq = PROCESSOR_NULL;
2028 SCHED_STATS_RUNQ_CHANGE(&SCHED(rt_runq)(pset)->runq_stats, rt_runq_count(pset));
2029 rt_runq_count_decr(pset);
2030
2031 processor->deadline = new_thread->realtime.deadline;
2032
2033 pset_commit_processor_to_new_thread(pset, processor, new_thread);
2034
2035 rt_lock_unlock(pset);
2036 sched_update_pset_load_average(pset);
2037
2038 processor_t ast_processor = PROCESSOR_NULL;
2039 processor_t next_rt_processor = PROCESSOR_NULL;
2040 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
2041 sched_ipi_type_t next_rt_ipi_type = SCHED_IPI_NONE;
2042
2043 if (processor->processor_secondary != NULL) {
2044 processor_t sprocessor = processor->processor_secondary;
2045 if ((sprocessor->state == PROCESSOR_RUNNING) || (sprocessor->state == PROCESSOR_DISPATCHING)) {
2046 ipi_type = sched_ipi_action(sprocessor, NULL, false, SCHED_IPI_EVENT_SMT_REBAL);
2047 ast_processor = sprocessor;
2048 }
2049 }
2050 if (rt_runq_count(pset) > 0) {
2051 next_rt_processor = choose_processor_for_realtime_thread(pset);
2052 if (next_rt_processor) {
2053 if (next_rt_processor->state == PROCESSOR_IDLE) {
2054 pset_update_processor_state(pset, next_rt_processor, PROCESSOR_DISPATCHING);
2055 }
2056 next_rt_ipi_type = sched_ipi_action(next_rt_processor, NULL, false, SCHED_IPI_EVENT_PREEMPT);
2057 }
2058 }
2059 pset_unlock(pset);
2060
2061 if (ast_processor) {
2062 sched_ipi_perform(ast_processor, ipi_type);
2063 }
2064
2065 if (next_rt_processor) {
2066 sched_ipi_perform(next_rt_processor, next_rt_ipi_type);
2067 }
2068
2069 return new_thread;
2070 }
2071 }
2072
2073 rt_lock_unlock(pset);
2074 }
2075 if (secondary_can_only_run_realtime_thread) {
2076 goto idle;
2077 }
2078
2079 processor->deadline = UINT64_MAX;
2080
2081 /* No RT threads, so let's look at the regular threads. */
2082 if ((new_thread = SCHED(choose_thread)(processor, MINPRI, *reason)) != THREAD_NULL) {
2083 sched_update_pset_load_average(pset);
2084
2085 pset_commit_processor_to_new_thread(pset, processor, new_thread);
2086
2087 processor_t ast_processor = PROCESSOR_NULL;
2088 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
2089
2090 processor_t sprocessor = processor->processor_secondary;
2091 if ((sprocessor != NULL) && (sprocessor->state == PROCESSOR_RUNNING)) {
2092 if (thread_no_smt(new_thread)) {
2093 ipi_type = sched_ipi_action(sprocessor, NULL, false, SCHED_IPI_EVENT_SMT_REBAL);
2094 ast_processor = sprocessor;
2095 }
2096 }
2097 pset_unlock(pset);
2098
2099 if (ast_processor) {
2100 sched_ipi_perform(ast_processor, ipi_type);
2101 }
2102 return new_thread;
2103 }
2104
2105 if (processor->must_idle) {
2106 processor->must_idle = false;
2107 goto idle;
2108 }
2109
2110 #if __SMP__
2111 if (SCHED(steal_thread_enabled)(pset)) {
2112 /*
2113 * No runnable threads, attempt to steal
2114 * from other processors. Returns with pset lock dropped.
2115 */
2116
2117 if ((new_thread = SCHED(steal_thread)(pset)) != THREAD_NULL) {
2118 /*
2119 * Avoid taking the pset_lock unless it is necessary to change state.
2120 * It's safe to read processor->state here, as only the current processor can change state
2121 * from this point (interrupts are disabled and this processor is committed to run new_thread).
2122 */
2123 if (processor->state == PROCESSOR_DISPATCHING || processor->state == PROCESSOR_IDLE) {
2124 pset_lock(pset);
2125 pset_commit_processor_to_new_thread(pset, processor, new_thread);
2126 pset_unlock(pset);
2127 } else {
2128 assert((processor->state == PROCESSOR_RUNNING) || (processor->state == PROCESSOR_SHUTDOWN));
2129 processor_state_update_from_thread(processor, new_thread);
2130 }
2131
2132 return new_thread;
2133 }
2134
2135 /*
2136 * If other threads have appeared, shortcut
2137 * around again.
2138 */
2139 if (!SCHED(processor_queue_empty)(processor) || (ok_to_run_realtime_thread && (rt_runq_count(pset) > 0))) {
2140 continue;
2141 }
2142
2143 pset_lock(pset);
2144
2145 /* Someone selected this processor while we had dropped the lock */
2146 if (bit_test(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
2147 goto restart;
2148 }
2149 }
2150 #endif
2151
2152 idle:
2153 /*
2154 * Nothing is runnable, so set this processor idle if it
2155 * was running.
2156 */
2157 if ((processor->state == PROCESSOR_RUNNING) || (processor->state == PROCESSOR_DISPATCHING)) {
2158 pset_update_processor_state(pset, processor, PROCESSOR_IDLE);
2159 processor_state_update_idle(processor);
2160 }
2161
2162 #if __SMP__
2163 /* Invoked with pset locked, returns with pset unlocked */
2164 SCHED(processor_balance)(processor, pset);
2165 #else
2166 pset_unlock(pset);
2167 #endif
2168
2169 new_thread = processor->idle_thread;
2170 } while (new_thread == THREAD_NULL);
2171
2172 return new_thread;
2173 }
2174
2175 /*
2176 * thread_invoke
2177 *
2178 * Called at splsched with neither thread locked.
2179 *
2180 * Perform a context switch and start executing the new thread.
2181 *
2182 * Returns FALSE when the context switch didn't happen.
2183 * The reference to the new thread is still consumed.
2184 *
2185 * "self" is what is currently running on the processor,
2186 * "thread" is the new thread to context switch to
2187 * (which may be the same thread in some cases)
2188 */
2189 static boolean_t
2190 thread_invoke(
2191 thread_t self,
2192 thread_t thread,
2193 ast_t reason)
2194 {
2195 if (__improbable(get_preemption_level() != 0)) {
2196 int pl = get_preemption_level();
2197 panic("thread_invoke: preemption_level %d, possible cause: %s",
2198 pl, (pl < 0 ? "unlocking an unlocked mutex or spinlock" :
2199 "blocking while holding a spinlock, or within interrupt context"));
2200 }
2201
2202 thread_continue_t continuation = self->continuation;
2203 void *parameter = self->parameter;
2204 processor_t processor;
2205
2206 uint64_t ctime = mach_absolute_time();
2207
2208 #ifdef CONFIG_MACH_APPROXIMATE_TIME
2209 commpage_update_mach_approximate_time(ctime);
2210 #endif
2211
2212 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
2213 if ((thread->state & TH_IDLE) == 0) {
2214 sched_timeshare_consider_maintenance(ctime);
2215 }
2216 #endif
2217
2218 #if MONOTONIC
2219 mt_sched_update(self);
2220 #endif /* MONOTONIC */
2221
2222 assert_thread_magic(self);
2223 assert(self == current_thread());
2224 assert(self->runq == PROCESSOR_NULL);
2225 assert((self->state & (TH_RUN | TH_TERMINATE2)) == TH_RUN);
2226
2227 thread_lock(thread);
2228
2229 assert_thread_magic(thread);
2230 assert((thread->state & (TH_RUN | TH_WAIT | TH_UNINT | TH_TERMINATE | TH_TERMINATE2)) == TH_RUN);
2231 assert(thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == current_processor());
2232 assert(thread->runq == PROCESSOR_NULL);
2233
2234 /* Reload precise timing global policy to thread-local policy */
2235 thread->precise_user_kernel_time = use_precise_user_kernel_time(thread);
2236
2237 /* Update SFI class based on other factors */
2238 thread->sfi_class = sfi_thread_classify(thread);
2239
2240 /* Update the same_pri_latency for the thread (used by perfcontrol callouts) */
2241 thread->same_pri_latency = ctime - thread->last_basepri_change_time;
2242 /*
2243 * In case a base_pri update happened between the timestamp and
2244 * taking the thread lock
2245 */
2246 if (ctime <= thread->last_basepri_change_time) {
2247 thread->same_pri_latency = ctime - thread->last_made_runnable_time;
2248 }
2249
2250 /* Allow realtime threads to hang onto a stack. */
2251 if ((self->sched_mode == TH_MODE_REALTIME) && !self->reserved_stack) {
2252 self->reserved_stack = self->kernel_stack;
2253 }
2254
2255 /* Prepare for spin debugging */
2256 #if INTERRUPT_MASKED_DEBUG
2257 ml_spin_debug_clear(thread);
2258 #endif
2259
2260 if (continuation != NULL) {
2261 if (!thread->kernel_stack) {
2262 /*
2263 * If we are using a privileged stack,
2264 * check to see whether we can exchange it with
2265 * that of the other thread.
2266 */
2267 if (self->kernel_stack == self->reserved_stack && !thread->reserved_stack) {
2268 goto need_stack;
2269 }
2270
2271 /*
2272 * Context switch by performing a stack handoff.
2273 * Requires both threads to be parked in a continuation.
2274 */
2275 continuation = thread->continuation;
2276 parameter = thread->parameter;
2277
2278 processor = current_processor();
2279 processor->active_thread = thread;
2280 processor_state_update_from_thread(processor, thread);
2281
2282 if (thread->last_processor != processor && thread->last_processor != NULL) {
2283 if (thread->last_processor->processor_set != processor->processor_set) {
2284 thread->ps_switch++;
2285 }
2286 thread->p_switch++;
2287 }
2288 thread->last_processor = processor;
2289 thread->c_switch++;
2290 ast_context(thread);
2291
2292 thread_unlock(thread);
2293
2294 self->reason = reason;
2295
2296 processor->last_dispatch = ctime;
2297 self->last_run_time = ctime;
2298 processor_timer_switch_thread(ctime, &thread->system_timer);
2299 timer_update(&thread->runnable_timer, ctime);
2300 PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer;
2301
2302 /*
2303 * Since non-precise user/kernel time doesn't update the state timer
2304 * during privilege transitions, synthesize an event now.
2305 */
2306 if (!thread->precise_user_kernel_time) {
2307 timer_update(PROCESSOR_DATA(processor, current_state), ctime);
2308 }
2309
2310 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2311 MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_HANDOFF) | DBG_FUNC_NONE,
2312 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
2313
2314 if ((thread->chosen_processor != processor) && (thread->chosen_processor != PROCESSOR_NULL)) {
2315 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_MOVED) | DBG_FUNC_NONE,
2316 (uintptr_t)thread_tid(thread), (uintptr_t)thread->chosen_processor->cpu_id, 0, 0, 0);
2317 }
2318
2319 DTRACE_SCHED2(off__cpu, struct thread *, thread, struct proc *, thread->task->bsd_info);
2320
2321 SCHED_STATS_CSW(processor, self->reason, self->sched_pri, thread->sched_pri);
2322
2323 #if KPERF
2324 kperf_off_cpu(self);
2325 #endif /* KPERF */
2326
2327 /*
2328 * This is where we actually switch thread identity,
2329 * and address space if required. However, register
2330 * state is not switched - this routine leaves the
2331 * stack and register state active on the current CPU.
2332 */
2333 TLOG(1, "thread_invoke: calling stack_handoff\n");
2334 stack_handoff(self, thread);
2335
2336 /* 'self' is now off core */
2337 assert(thread == current_thread_volatile());
2338
2339 DTRACE_SCHED(on__cpu);
2340
2341 #if KPERF
2342 kperf_on_cpu(thread, continuation, NULL);
2343 #endif /* KPERF */
2344
2345 thread_dispatch(self, thread);
2346
2347 #if KASAN
2348 /* Old thread's stack has been moved to the new thread, so explicitly
2349 * unpoison it. */
2350 kasan_unpoison_stack(thread->kernel_stack, kernel_stack_size);
2351 #endif
2352
2353 thread->continuation = thread->parameter = NULL;
2354
2355 counter(c_thread_invoke_hits++);
2356
2357 boolean_t enable_interrupts = TRUE;
2358
2359 /* idle thread needs to stay interrupts-disabled */
2360 if ((thread->state & TH_IDLE)) {
2361 enable_interrupts = FALSE;
2362 }
2363
2364 assert(continuation);
2365 call_continuation(continuation, parameter,
2366 thread->wait_result, enable_interrupts);
2367 /*NOTREACHED*/
2368 } else if (thread == self) {
2369 /* same thread but with continuation */
2370 ast_context(self);
2371 counter(++c_thread_invoke_same);
2372
2373 thread_unlock(self);
2374
2375 #if KPERF
2376 kperf_on_cpu(thread, continuation, NULL);
2377 #endif /* KPERF */
2378
2379 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2380 MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED) | DBG_FUNC_NONE,
2381 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
2382
2383 #if KASAN
2384 /* stack handoff to self - no thread_dispatch(), so clear the stack
2385 * and free the fakestack directly */
2386 kasan_fakestack_drop(self);
2387 kasan_fakestack_gc(self);
2388 kasan_unpoison_stack(self->kernel_stack, kernel_stack_size);
2389 #endif
2390
2391 self->continuation = self->parameter = NULL;
2392
2393 boolean_t enable_interrupts = TRUE;
2394
2395 /* idle thread needs to stay interrupts-disabled */
2396 if ((self->state & TH_IDLE)) {
2397 enable_interrupts = FALSE;
2398 }
2399
2400 call_continuation(continuation, parameter,
2401 self->wait_result, enable_interrupts);
2402 /*NOTREACHED*/
2403 }
2404 } else {
2405 /*
2406 * Check that the other thread has a stack
2407 */
2408 if (!thread->kernel_stack) {
2409 need_stack:
2410 if (!stack_alloc_try(thread)) {
2411 counter(c_thread_invoke_misses++);
2412 thread_unlock(thread);
2413 thread_stack_enqueue(thread);
2414 return FALSE;
2415 }
2416 } else if (thread == self) {
2417 ast_context(self);
2418 counter(++c_thread_invoke_same);
2419 thread_unlock(self);
2420
2421 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2422 MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED) | DBG_FUNC_NONE,
2423 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
2424
2425 return TRUE;
2426 }
2427 }
2428
2429 /*
2430 * Context switch by full context save.
2431 */
2432 processor = current_processor();
2433 processor->active_thread = thread;
2434 processor_state_update_from_thread(processor, thread);
2435
2436 if (thread->last_processor != processor && thread->last_processor != NULL) {
2437 if (thread->last_processor->processor_set != processor->processor_set) {
2438 thread->ps_switch++;
2439 }
2440 thread->p_switch++;
2441 }
2442 thread->last_processor = processor;
2443 thread->c_switch++;
2444 ast_context(thread);
2445
2446 thread_unlock(thread);
2447
2448 counter(c_thread_invoke_csw++);
2449
2450 self->reason = reason;
2451
2452 processor->last_dispatch = ctime;
2453 self->last_run_time = ctime;
2454 processor_timer_switch_thread(ctime, &thread->system_timer);
2455 timer_update(&thread->runnable_timer, ctime);
2456 PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer;
2457
2458 /*
2459 * Since non-precise user/kernel time doesn't update the state timer
2460 * during privilege transitions, synthesize an event now.
2461 */
2462 if (!thread->precise_user_kernel_time) {
2463 timer_update(PROCESSOR_DATA(processor, current_state), ctime);
2464 }
2465
2466 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2467 MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED) | DBG_FUNC_NONE,
2468 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
2469
2470 if ((thread->chosen_processor != processor) && (thread->chosen_processor != NULL)) {
2471 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_MOVED) | DBG_FUNC_NONE,
2472 (uintptr_t)thread_tid(thread), (uintptr_t)thread->chosen_processor->cpu_id, 0, 0, 0);
2473 }
2474
2475 DTRACE_SCHED2(off__cpu, struct thread *, thread, struct proc *, thread->task->bsd_info);
2476
2477 SCHED_STATS_CSW(processor, self->reason, self->sched_pri, thread->sched_pri);
2478
2479 #if KPERF
2480 kperf_off_cpu(self);
2481 #endif /* KPERF */
2482
2483 /*
2484 * This is where we actually switch register context,
2485 * and address space if required. We will next run
2486 * as a result of a subsequent context switch.
2487 *
2488 * Once registers are switched and the processor is running "thread",
2489 * the stack variables and non-volatile registers will contain whatever
2490 * was there the last time that thread blocked. No local variables should
2491 * be used after this point, except for the special case of "thread", which
2492 * the platform layer returns as the previous thread running on the processor
2493 * via the function call ABI as a return register, and "self", which may have
2494 * been stored on the stack or a non-volatile register, but a stale idea of
2495 * what was on the CPU is newly-accurate because that thread is again
2496 * running on the CPU.
2497 *
2498 * If one of the threads is using a continuation, thread_continue
2499 * is used to stitch up its context.
2500 *
2501 * If we are invoking a thread which is resuming from a continuation,
2502 * the CPU will invoke thread_continue next.
2503 *
2504 * If the current thread is parking in a continuation, then its state
2505 * won't be saved and the stack will be discarded. When the stack is
2506 * re-allocated, it will be configured to resume from thread_continue.
2507 */
2508 assert(continuation == self->continuation);
2509 thread = machine_switch_context(self, continuation, thread);
2510 assert(self == current_thread_volatile());
2511 TLOG(1, "thread_invoke: returning machine_switch_context: self %p continuation %p thread %p\n", self, continuation, thread);
2512
2513 assert(continuation == NULL && self->continuation == NULL);
2514
2515 DTRACE_SCHED(on__cpu);
2516
2517 #if KPERF
2518 kperf_on_cpu(self, NULL, __builtin_frame_address(0));
2519 #endif /* KPERF */
2520
2521 /* We have been resumed and are set to run. */
2522 thread_dispatch(thread, self);
2523
2524 return TRUE;
2525 }
2526
2527 #if defined(CONFIG_SCHED_DEFERRED_AST)
2528 /*
2529 * pset_cancel_deferred_dispatch:
2530 *
2531 * Cancels all ASTs that we can cancel for the given processor set
2532 * if the current processor is running the last runnable thread in the
2533 * system.
2534 *
2535 * This function assumes the current thread is runnable. This must
2536 * be called with the pset unlocked.
2537 */
2538 static void
2539 pset_cancel_deferred_dispatch(
2540 processor_set_t pset,
2541 processor_t processor)
2542 {
2543 processor_t active_processor = NULL;
2544 uint32_t sampled_sched_run_count;
2545
2546 pset_lock(pset);
2547 sampled_sched_run_count = os_atomic_load(&sched_run_buckets[TH_BUCKET_RUN], relaxed);
2548
2549 /*
2550 * If we have emptied the run queue, and our current thread is runnable, we
2551 * should tell any processors that are still DISPATCHING that they will
2552 * probably not have any work to do. In the event that there are no
2553 * pending signals that we can cancel, this is also uninteresting.
2554 *
2555 * In the unlikely event that another thread becomes runnable while we are
2556 * doing this (sched_run_count is atomically updated, not guarded), the
2557 * codepath making it runnable SHOULD (a dangerous word) need the pset lock
2558 * in order to dispatch it to a processor in our pset. So, the other
2559 * codepath will wait while we squash all cancelable ASTs, get the pset
2560 * lock, and then dispatch the freshly runnable thread. So this should be
2561 * correct (we won't accidentally have a runnable thread that hasn't been
2562 * dispatched to an idle processor), if not ideal (we may be restarting the
2563 * dispatch process, which could have some overhead).
2564 */
2565
2566 if ((sampled_sched_run_count == 1) && (pset->pending_deferred_AST_cpu_mask)) {
2567 uint64_t dispatching_map = (pset->cpu_state_map[PROCESSOR_DISPATCHING] &
2568 pset->pending_deferred_AST_cpu_mask &
2569 ~pset->pending_AST_URGENT_cpu_mask);
2570 for (int cpuid = lsb_first(dispatching_map); cpuid >= 0; cpuid = lsb_next(dispatching_map, cpuid)) {
2571 active_processor = processor_array[cpuid];
2572 /*
2573 * If a processor is DISPATCHING, it could be because of
2574 * a cancelable signal.
2575 *
2576 * IF the processor is not our
2577 * current processor (the current processor should not
2578 * be DISPATCHING, so this is a bit paranoid), AND there
2579 * is a cancelable signal pending on the processor, AND
2580 * there is no non-cancelable signal pending (as there is
2581 * no point trying to backtrack on bringing the processor
2582 * up if a signal we cannot cancel is outstanding), THEN
2583 * it should make sense to roll back the processor state
2584 * to the IDLE state.
2585 *
2586 * If the racey nature of this approach (as the signal
2587 * will be arbitrated by hardware, and can fire as we
2588 * roll back state) results in the core responding
2589 * despite being pushed back to the IDLE state, it
2590 * should be no different than if the core took some
2591 * interrupt while IDLE.
2592 */
2593 if (active_processor != processor) {
2594 /*
2595 * Squash all of the processor state back to some
2596 * reasonable facsimile of PROCESSOR_IDLE.
2597 */
2598
2599 processor_state_update_idle(active_processor);
2600 active_processor->deadline = UINT64_MAX;
2601 pset_update_processor_state(pset, active_processor, PROCESSOR_IDLE);
2602 bit_clear(pset->pending_deferred_AST_cpu_mask, active_processor->cpu_id);
2603 machine_signal_idle_cancel(active_processor);
2604 }
2605 }
2606 }
2607
2608 pset_unlock(pset);
2609 }
2610 #else
2611 /* We don't support deferred ASTs; everything is candycanes and sunshine. */
2612 #endif
2613
2614 static void
2615 thread_csw_callout(
2616 thread_t old,
2617 thread_t new,
2618 uint64_t timestamp)
2619 {
2620 perfcontrol_event event = (new->state & TH_IDLE) ? IDLE : CONTEXT_SWITCH;
2621 uint64_t same_pri_latency = (new->state & TH_IDLE) ? 0 : new->same_pri_latency;
2622 machine_switch_perfcontrol_context(event, timestamp, 0,
2623 same_pri_latency, old, new);
2624 }
2625
2626
2627 /*
2628 * thread_dispatch:
2629 *
2630 * Handle threads at context switch. Re-dispatch other thread
2631 * if still running, otherwise update run state and perform
2632 * special actions. Update quantum for other thread and begin
2633 * the quantum for ourselves.
2634 *
2635 * "thread" is the old thread that we have switched away from.
2636 * "self" is the new current thread that we have context switched to
2637 *
2638 * Called at splsched.
2639 *
2640 */
2641 void
2642 thread_dispatch(
2643 thread_t thread,
2644 thread_t self)
2645 {
2646 processor_t processor = self->last_processor;
2647
2648 assert(processor == current_processor());
2649 assert(self == current_thread_volatile());
2650 assert(thread != self);
2651
2652 if (thread != THREAD_NULL) {
2653 /*
2654 * Do the perfcontrol callout for context switch.
2655 * The reason we do this here is:
2656 * - thread_dispatch() is called from various places that are not
2657 * the direct context switch path for eg. processor shutdown etc.
2658 * So adding the callout here covers all those cases.
2659 * - We want this callout as early as possible to be close
2660 * to the timestamp taken in thread_invoke()
2661 * - We want to avoid holding the thread lock while doing the
2662 * callout
2663 * - We do not want to callout if "thread" is NULL.
2664 */
2665 thread_csw_callout(thread, self, processor->last_dispatch);
2666
2667 #if KASAN
2668 if (thread->continuation != NULL) {
2669 /*
2670 * Thread has a continuation and the normal stack is going away.
2671 * Unpoison the stack and mark all fakestack objects as unused.
2672 */
2673 kasan_fakestack_drop(thread);
2674 if (thread->kernel_stack) {
2675 kasan_unpoison_stack(thread->kernel_stack, kernel_stack_size);
2676 }
2677 }
2678
2679 /*
2680 * Free all unused fakestack objects.
2681 */
2682 kasan_fakestack_gc(thread);
2683 #endif
2684
2685 /*
2686 * If blocked at a continuation, discard
2687 * the stack.
2688 */
2689 if (thread->continuation != NULL && thread->kernel_stack != 0) {
2690 stack_free(thread);
2691 }
2692
2693 if (thread->state & TH_IDLE) {
2694 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2695 MACHDBG_CODE(DBG_MACH_SCHED, MACH_DISPATCH) | DBG_FUNC_NONE,
2696 (uintptr_t)thread_tid(thread), 0, thread->state,
2697 sched_run_buckets[TH_BUCKET_RUN], 0);
2698 } else {
2699 int64_t consumed;
2700 int64_t remainder = 0;
2701
2702 if (processor->quantum_end > processor->last_dispatch) {
2703 remainder = processor->quantum_end -
2704 processor->last_dispatch;
2705 }
2706
2707 consumed = thread->quantum_remaining - remainder;
2708
2709 if ((thread->reason & AST_LEDGER) == 0) {
2710 /*
2711 * Bill CPU time to both the task and
2712 * the individual thread.
2713 */
2714 ledger_credit_thread(thread, thread->t_ledger,
2715 task_ledgers.cpu_time, consumed);
2716 ledger_credit_thread(thread, thread->t_threadledger,
2717 thread_ledgers.cpu_time, consumed);
2718 if (thread->t_bankledger) {
2719 ledger_credit_thread(thread, thread->t_bankledger,
2720 bank_ledgers.cpu_time,
2721 (consumed - thread->t_deduct_bank_ledger_time));
2722 }
2723 thread->t_deduct_bank_ledger_time = 0;
2724 }
2725
2726 wake_lock(thread);
2727 thread_lock(thread);
2728
2729 /*
2730 * Apply a priority floor if the thread holds a kernel resource
2731 * Do this before checking starting_pri to avoid overpenalizing
2732 * repeated rwlock blockers.
2733 */
2734 if (__improbable(thread->rwlock_count != 0)) {
2735 lck_rw_set_promotion_locked(thread);
2736 }
2737
2738 boolean_t keep_quantum = processor->first_timeslice;
2739
2740 /*
2741 * Treat a thread which has dropped priority since it got on core
2742 * as having expired its quantum.
2743 */
2744 if (processor->starting_pri > thread->sched_pri) {
2745 keep_quantum = FALSE;
2746 }
2747
2748 /* Compute remainder of current quantum. */
2749 if (keep_quantum &&
2750 processor->quantum_end > processor->last_dispatch) {
2751 thread->quantum_remaining = (uint32_t)remainder;
2752 } else {
2753 thread->quantum_remaining = 0;
2754 }
2755
2756 if (thread->sched_mode == TH_MODE_REALTIME) {
2757 /*
2758 * Cancel the deadline if the thread has
2759 * consumed the entire quantum.
2760 */
2761 if (thread->quantum_remaining == 0) {
2762 thread->realtime.deadline = UINT64_MAX;
2763 }
2764 } else {
2765 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
2766 /*
2767 * For non-realtime threads treat a tiny
2768 * remaining quantum as an expired quantum
2769 * but include what's left next time.
2770 */
2771 if (thread->quantum_remaining < min_std_quantum) {
2772 thread->reason |= AST_QUANTUM;
2773 thread->quantum_remaining += SCHED(initial_quantum_size)(thread);
2774 }
2775 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
2776 }
2777
2778 /*
2779 * If we are doing a direct handoff then
2780 * take the remainder of the quantum.
2781 */
2782 if ((thread->reason & (AST_HANDOFF | AST_QUANTUM)) == AST_HANDOFF) {
2783 self->quantum_remaining = thread->quantum_remaining;
2784 thread->reason |= AST_QUANTUM;
2785 thread->quantum_remaining = 0;
2786 } else {
2787 #if defined(CONFIG_SCHED_MULTIQ)
2788 if (SCHED(sched_groups_enabled) &&
2789 thread->sched_group == self->sched_group) {
2790 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2791 MACHDBG_CODE(DBG_MACH_SCHED, MACH_QUANTUM_HANDOFF),
2792 self->reason, (uintptr_t)thread_tid(thread),
2793 self->quantum_remaining, thread->quantum_remaining, 0);
2794
2795 self->quantum_remaining = thread->quantum_remaining;
2796 thread->quantum_remaining = 0;
2797 /* Don't set AST_QUANTUM here - old thread might still want to preempt someone else */
2798 }
2799 #endif /* defined(CONFIG_SCHED_MULTIQ) */
2800 }
2801
2802 thread->computation_metered += (processor->last_dispatch - thread->computation_epoch);
2803
2804 if (!(thread->state & TH_WAIT)) {
2805 /*
2806 * Still runnable.
2807 */
2808 thread->last_made_runnable_time = thread->last_basepri_change_time = processor->last_dispatch;
2809
2810 machine_thread_going_off_core(thread, FALSE, processor->last_dispatch, TRUE);
2811
2812 ast_t reason = thread->reason;
2813 sched_options_t options = SCHED_NONE;
2814
2815 if (reason & AST_REBALANCE) {
2816 options |= SCHED_REBALANCE;
2817 if (reason & AST_QUANTUM) {
2818 /*
2819 * Having gone to the trouble of forcing this thread off a less preferred core,
2820 * we should force the preferable core to reschedule immediately to give this
2821 * thread a chance to run instead of just sitting on the run queue where
2822 * it may just be stolen back by the idle core we just forced it off.
2823 * But only do this at the end of a quantum to prevent cascading effects.
2824 */
2825 options |= SCHED_PREEMPT;
2826 }
2827 }
2828
2829 if (reason & AST_QUANTUM) {
2830 options |= SCHED_TAILQ;
2831 } else if (reason & AST_PREEMPT) {
2832 options |= SCHED_HEADQ;
2833 } else {
2834 options |= (SCHED_PREEMPT | SCHED_TAILQ);
2835 }
2836
2837 thread_setrun(thread, options);
2838
2839 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2840 MACHDBG_CODE(DBG_MACH_SCHED, MACH_DISPATCH) | DBG_FUNC_NONE,
2841 (uintptr_t)thread_tid(thread), thread->reason, thread->state,
2842 sched_run_buckets[TH_BUCKET_RUN], 0);
2843
2844 if (thread->wake_active) {
2845 thread->wake_active = FALSE;
2846 thread_unlock(thread);
2847
2848 thread_wakeup(&thread->wake_active);
2849 } else {
2850 thread_unlock(thread);
2851 }
2852
2853 wake_unlock(thread);
2854 } else {
2855 /*
2856 * Waiting.
2857 */
2858 boolean_t should_terminate = FALSE;
2859 uint32_t new_run_count;
2860 int thread_state = thread->state;
2861
2862 /* Only the first call to thread_dispatch
2863 * after explicit termination should add
2864 * the thread to the termination queue
2865 */
2866 if ((thread_state & (TH_TERMINATE | TH_TERMINATE2)) == TH_TERMINATE) {
2867 should_terminate = TRUE;
2868 thread_state |= TH_TERMINATE2;
2869 }
2870
2871 timer_stop(&thread->runnable_timer, processor->last_dispatch);
2872
2873 thread_state &= ~TH_RUN;
2874 thread->state = thread_state;
2875
2876 thread->last_made_runnable_time = thread->last_basepri_change_time = THREAD_NOT_RUNNABLE;
2877 thread->chosen_processor = PROCESSOR_NULL;
2878
2879 new_run_count = SCHED(run_count_decr)(thread);
2880
2881 #if CONFIG_SCHED_SFI
2882 if (thread->reason & AST_SFI) {
2883 thread->wait_sfi_begin_time = processor->last_dispatch;
2884 }
2885 #endif
2886 machine_thread_going_off_core(thread, should_terminate, processor->last_dispatch, FALSE);
2887
2888 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2889 MACHDBG_CODE(DBG_MACH_SCHED, MACH_DISPATCH) | DBG_FUNC_NONE,
2890 (uintptr_t)thread_tid(thread), thread->reason, thread_state,
2891 new_run_count, 0);
2892
2893 if (thread_state & TH_WAIT_REPORT) {
2894 (*thread->sched_call)(SCHED_CALL_BLOCK, thread);
2895 }
2896
2897 if (thread->wake_active) {
2898 thread->wake_active = FALSE;
2899 thread_unlock(thread);
2900
2901 thread_wakeup(&thread->wake_active);
2902 } else {
2903 thread_unlock(thread);
2904 }
2905
2906 wake_unlock(thread);
2907
2908 if (should_terminate) {
2909 thread_terminate_enqueue(thread);
2910 }
2911 }
2912 }
2913 }
2914
2915 int urgency = THREAD_URGENCY_NONE;
2916 uint64_t latency = 0;
2917
2918 /* Update (new) current thread and reprogram quantum timer */
2919 thread_lock(self);
2920
2921 if (!(self->state & TH_IDLE)) {
2922 uint64_t arg1, arg2;
2923
2924 #if CONFIG_SCHED_SFI
2925 ast_t new_ast;
2926
2927 new_ast = sfi_thread_needs_ast(self, NULL);
2928
2929 if (new_ast != AST_NONE) {
2930 ast_on(new_ast);
2931 }
2932 #endif
2933
2934 assertf(processor->last_dispatch >= self->last_made_runnable_time,
2935 "Non-monotonic time? dispatch at 0x%llx, runnable at 0x%llx",
2936 processor->last_dispatch, self->last_made_runnable_time);
2937
2938 assert(self->last_made_runnable_time <= self->last_basepri_change_time);
2939
2940 latency = processor->last_dispatch - self->last_made_runnable_time;
2941 assert(latency >= self->same_pri_latency);
2942
2943 urgency = thread_get_urgency(self, &arg1, &arg2);
2944
2945 thread_tell_urgency(urgency, arg1, arg2, latency, self);
2946
2947 /*
2948 * Get a new quantum if none remaining.
2949 */
2950 if (self->quantum_remaining == 0) {
2951 thread_quantum_init(self);
2952 }
2953
2954 /*
2955 * Set up quantum timer and timeslice.
2956 */
2957 processor->quantum_end = processor->last_dispatch + self->quantum_remaining;
2958 timer_call_quantum_timer_enter(&processor->quantum_timer, self,
2959 processor->quantum_end, processor->last_dispatch);
2960
2961 processor->first_timeslice = TRUE;
2962 } else {
2963 timer_call_quantum_timer_cancel(&processor->quantum_timer);
2964 processor->first_timeslice = FALSE;
2965
2966 thread_tell_urgency(THREAD_URGENCY_NONE, 0, 0, 0, self);
2967 }
2968
2969 assert(self->block_hint == kThreadWaitNone);
2970 self->computation_epoch = processor->last_dispatch;
2971 self->reason = AST_NONE;
2972 processor->starting_pri = self->sched_pri;
2973
2974 thread_unlock(self);
2975
2976 machine_thread_going_on_core(self, urgency, latency, self->same_pri_latency,
2977 processor->last_dispatch);
2978
2979 #if defined(CONFIG_SCHED_DEFERRED_AST)
2980 /*
2981 * TODO: Can we state that redispatching our old thread is also
2982 * uninteresting?
2983 */
2984 if ((os_atomic_load(&sched_run_buckets[TH_BUCKET_RUN], relaxed) == 1) && !(self->state & TH_IDLE)) {
2985 pset_cancel_deferred_dispatch(processor->processor_set, processor);
2986 }
2987 #endif
2988 }
2989
2990 /*
2991 * thread_block_reason:
2992 *
2993 * Forces a reschedule, blocking the caller if a wait
2994 * has been asserted.
2995 *
2996 * If a continuation is specified, then thread_invoke will
2997 * attempt to discard the thread's kernel stack. When the
2998 * thread resumes, it will execute the continuation function
2999 * on a new kernel stack.
3000 */
3001 counter(mach_counter_t c_thread_block_calls = 0; )
3002
3003 wait_result_t
3004 thread_block_reason(
3005 thread_continue_t continuation,
3006 void *parameter,
3007 ast_t reason)
3008 {
3009 thread_t self = current_thread();
3010 processor_t processor;
3011 thread_t new_thread;
3012 spl_t s;
3013
3014 counter(++c_thread_block_calls);
3015
3016 s = splsched();
3017
3018 processor = current_processor();
3019
3020 /* If we're explicitly yielding, force a subsequent quantum */
3021 if (reason & AST_YIELD) {
3022 processor->first_timeslice = FALSE;
3023 }
3024
3025 /* We're handling all scheduling AST's */
3026 ast_off(AST_SCHEDULING);
3027
3028 #if PROC_REF_DEBUG
3029 if ((continuation != NULL) && (self->task != kernel_task)) {
3030 if (uthread_get_proc_refcount(self->uthread) != 0) {
3031 panic("thread_block_reason with continuation uthread %p with uu_proc_refcount != 0", self->uthread);
3032 }
3033 }
3034 #endif
3035
3036 self->continuation = continuation;
3037 self->parameter = parameter;
3038
3039 if (self->state & ~(TH_RUN | TH_IDLE)) {
3040 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3041 MACHDBG_CODE(DBG_MACH_SCHED, MACH_BLOCK),
3042 reason, VM_KERNEL_UNSLIDE(continuation), 0, 0, 0);
3043 }
3044
3045 do {
3046 thread_lock(self);
3047 new_thread = thread_select(self, processor, &reason);
3048 thread_unlock(self);
3049 } while (!thread_invoke(self, new_thread, reason));
3050
3051 splx(s);
3052
3053 return self->wait_result;
3054 }
3055
3056 /*
3057 * thread_block:
3058 *
3059 * Block the current thread if a wait has been asserted.
3060 */
3061 wait_result_t
3062 thread_block(
3063 thread_continue_t continuation)
3064 {
3065 return thread_block_reason(continuation, NULL, AST_NONE);
3066 }
3067
3068 wait_result_t
3069 thread_block_parameter(
3070 thread_continue_t continuation,
3071 void *parameter)
3072 {
3073 return thread_block_reason(continuation, parameter, AST_NONE);
3074 }
3075
3076 /*
3077 * thread_run:
3078 *
3079 * Switch directly from the current thread to the
3080 * new thread, handing off our quantum if appropriate.
3081 *
3082 * New thread must be runnable, and not on a run queue.
3083 *
3084 * Called at splsched.
3085 */
3086 int
3087 thread_run(
3088 thread_t self,
3089 thread_continue_t continuation,
3090 void *parameter,
3091 thread_t new_thread)
3092 {
3093 ast_t reason = AST_NONE;
3094
3095 if ((self->state & TH_IDLE) == 0) {
3096 reason = AST_HANDOFF;
3097 }
3098
3099 self->continuation = continuation;
3100 self->parameter = parameter;
3101
3102 while (!thread_invoke(self, new_thread, reason)) {
3103 /* the handoff failed, so we have to fall back to the normal block path */
3104 processor_t processor = current_processor();
3105
3106 reason = AST_NONE;
3107
3108 thread_lock(self);
3109 new_thread = thread_select(self, processor, &reason);
3110 thread_unlock(self);
3111 }
3112
3113 return self->wait_result;
3114 }
3115
3116 /*
3117 * thread_continue:
3118 *
3119 * Called at splsched when a thread first receives
3120 * a new stack after a continuation.
3121 *
3122 * Called with THREAD_NULL as the old thread when
3123 * invoked by machine_load_context.
3124 */
3125 void
3126 thread_continue(
3127 thread_t thread)
3128 {
3129 thread_t self = current_thread();
3130 thread_continue_t continuation;
3131 void *parameter;
3132
3133 DTRACE_SCHED(on__cpu);
3134
3135 continuation = self->continuation;
3136 parameter = self->parameter;
3137
3138 assert(continuation != NULL);
3139
3140 #if KPERF
3141 kperf_on_cpu(self, continuation, NULL);
3142 #endif
3143
3144 thread_dispatch(thread, self);
3145
3146 self->continuation = self->parameter = NULL;
3147
3148 #if INTERRUPT_MASKED_DEBUG
3149 /* Reset interrupt-masked spin debugging timeout */
3150 ml_spin_debug_clear(self);
3151 #endif
3152
3153 TLOG(1, "thread_continue: calling call_continuation\n");
3154
3155 boolean_t enable_interrupts = TRUE;
3156
3157 /* bootstrap thread, idle thread need to stay interrupts-disabled */
3158 if (thread == THREAD_NULL || (self->state & TH_IDLE)) {
3159 enable_interrupts = FALSE;
3160 }
3161
3162 call_continuation(continuation, parameter, self->wait_result, enable_interrupts);
3163 /*NOTREACHED*/
3164 }
3165
3166 void
3167 thread_quantum_init(thread_t thread)
3168 {
3169 if (thread->sched_mode == TH_MODE_REALTIME) {
3170 thread->quantum_remaining = thread->realtime.computation;
3171 } else {
3172 thread->quantum_remaining = SCHED(initial_quantum_size)(thread);
3173 }
3174 }
3175
3176 uint32_t
3177 sched_timeshare_initial_quantum_size(thread_t thread)
3178 {
3179 if ((thread != THREAD_NULL) && thread->th_sched_bucket == TH_BUCKET_SHARE_BG) {
3180 return bg_quantum;
3181 } else {
3182 return std_quantum;
3183 }
3184 }
3185
3186 /*
3187 * run_queue_init:
3188 *
3189 * Initialize a run queue before first use.
3190 */
3191 void
3192 run_queue_init(
3193 run_queue_t rq)
3194 {
3195 rq->highq = NOPRI;
3196 for (u_int i = 0; i < BITMAP_LEN(NRQS); i++) {
3197 rq->bitmap[i] = 0;
3198 }
3199 rq->urgency = rq->count = 0;
3200 for (int i = 0; i < NRQS; i++) {
3201 circle_queue_init(&rq->queues[i]);
3202 }
3203 }
3204
3205 /*
3206 * run_queue_dequeue:
3207 *
3208 * Perform a dequeue operation on a run queue,
3209 * and return the resulting thread.
3210 *
3211 * The run queue must be locked (see thread_run_queue_remove()
3212 * for more info), and not empty.
3213 */
3214 thread_t
3215 run_queue_dequeue(
3216 run_queue_t rq,
3217 sched_options_t options)
3218 {
3219 thread_t thread;
3220 circle_queue_t queue = &rq->queues[rq->highq];
3221
3222 if (options & SCHED_HEADQ) {
3223 thread = cqe_dequeue_head(queue, struct thread, runq_links);
3224 } else {
3225 thread = cqe_dequeue_tail(queue, struct thread, runq_links);
3226 }
3227
3228 assert(thread != THREAD_NULL);
3229 assert_thread_magic(thread);
3230
3231 thread->runq = PROCESSOR_NULL;
3232 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
3233 rq->count--;
3234 if (SCHED(priority_is_urgent)(rq->highq)) {
3235 rq->urgency--; assert(rq->urgency >= 0);
3236 }
3237 if (circle_queue_empty(queue)) {
3238 bitmap_clear(rq->bitmap, rq->highq);
3239 rq->highq = bitmap_first(rq->bitmap, NRQS);
3240 }
3241
3242 return thread;
3243 }
3244
3245 /*
3246 * run_queue_enqueue:
3247 *
3248 * Perform a enqueue operation on a run queue.
3249 *
3250 * The run queue must be locked (see thread_run_queue_remove()
3251 * for more info).
3252 */
3253 boolean_t
3254 run_queue_enqueue(
3255 run_queue_t rq,
3256 thread_t thread,
3257 sched_options_t options)
3258 {
3259 circle_queue_t queue = &rq->queues[thread->sched_pri];
3260 boolean_t result = FALSE;
3261
3262 assert_thread_magic(thread);
3263
3264 if (circle_queue_empty(queue)) {
3265 circle_enqueue_tail(queue, &thread->runq_links);
3266
3267 rq_bitmap_set(rq->bitmap, thread->sched_pri);
3268 if (thread->sched_pri > rq->highq) {
3269 rq->highq = thread->sched_pri;
3270 result = TRUE;
3271 }
3272 } else {
3273 if (options & SCHED_TAILQ) {
3274 circle_enqueue_tail(queue, &thread->runq_links);
3275 } else {
3276 circle_enqueue_head(queue, &thread->runq_links);
3277 }
3278 }
3279 if (SCHED(priority_is_urgent)(thread->sched_pri)) {
3280 rq->urgency++;
3281 }
3282 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
3283 rq->count++;
3284
3285 return result;
3286 }
3287
3288 /*
3289 * run_queue_remove:
3290 *
3291 * Remove a specific thread from a runqueue.
3292 *
3293 * The run queue must be locked.
3294 */
3295 void
3296 run_queue_remove(
3297 run_queue_t rq,
3298 thread_t thread)
3299 {
3300 circle_queue_t queue = &rq->queues[thread->sched_pri];
3301
3302 assert(thread->runq != PROCESSOR_NULL);
3303 assert_thread_magic(thread);
3304
3305 circle_dequeue(queue, &thread->runq_links);
3306 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
3307 rq->count--;
3308 if (SCHED(priority_is_urgent)(thread->sched_pri)) {
3309 rq->urgency--; assert(rq->urgency >= 0);
3310 }
3311
3312 if (circle_queue_empty(queue)) {
3313 /* update run queue status */
3314 bitmap_clear(rq->bitmap, thread->sched_pri);
3315 rq->highq = bitmap_first(rq->bitmap, NRQS);
3316 }
3317
3318 thread->runq = PROCESSOR_NULL;
3319 }
3320
3321 /*
3322 * run_queue_peek
3323 *
3324 * Peek at the runq and return the highest
3325 * priority thread from the runq.
3326 *
3327 * The run queue must be locked.
3328 */
3329 thread_t
3330 run_queue_peek(
3331 run_queue_t rq)
3332 {
3333 if (rq->count > 0) {
3334 circle_queue_t queue = &rq->queues[rq->highq];
3335 thread_t thread = cqe_queue_first(queue, struct thread, runq_links);
3336 assert_thread_magic(thread);
3337 return thread;
3338 } else {
3339 return THREAD_NULL;
3340 }
3341 }
3342
3343 /* Assumes RT lock is not held, and acquires splsched/rt_lock itself */
3344 void
3345 sched_rtglobal_runq_scan(sched_update_scan_context_t scan_context)
3346 {
3347 spl_t s;
3348 thread_t thread;
3349
3350 processor_set_t pset = &pset0;
3351
3352 s = splsched();
3353 rt_lock_lock(pset);
3354
3355 qe_foreach_element_safe(thread, &pset->rt_runq.queue, runq_links) {
3356 if (thread->last_made_runnable_time < scan_context->earliest_rt_make_runnable_time) {
3357 scan_context->earliest_rt_make_runnable_time = thread->last_made_runnable_time;
3358 }
3359 }
3360
3361 rt_lock_unlock(pset);
3362 splx(s);
3363 }
3364
3365 int64_t
3366 sched_rtglobal_runq_count_sum(void)
3367 {
3368 return pset0.rt_runq.runq_stats.count_sum;
3369 }
3370
3371 /*
3372 * realtime_queue_insert:
3373 *
3374 * Enqueue a thread for realtime execution.
3375 */
3376 static boolean_t
3377 realtime_queue_insert(processor_t processor, processor_set_t pset, thread_t thread)
3378 {
3379 queue_t queue = &SCHED(rt_runq)(pset)->queue;
3380 uint64_t deadline = thread->realtime.deadline;
3381 boolean_t preempt = FALSE;
3382
3383 rt_lock_lock(pset);
3384
3385 if (queue_empty(queue)) {
3386 enqueue_tail(queue, &thread->runq_links);
3387 preempt = TRUE;
3388 } else {
3389 /* Insert into rt_runq in thread deadline order */
3390 queue_entry_t iter;
3391 qe_foreach(iter, queue) {
3392 thread_t iter_thread = qe_element(iter, struct thread, runq_links);
3393 assert_thread_magic(iter_thread);
3394
3395 if (deadline < iter_thread->realtime.deadline) {
3396 if (iter == queue_first(queue)) {
3397 preempt = TRUE;
3398 }
3399 insque(&thread->runq_links, queue_prev(iter));
3400 break;
3401 } else if (iter == queue_last(queue)) {
3402 enqueue_tail(queue, &thread->runq_links);
3403 break;
3404 }
3405 }
3406 }
3407
3408 thread->runq = processor;
3409 SCHED_STATS_RUNQ_CHANGE(&SCHED(rt_runq)(pset)->runq_stats, rt_runq_count(pset));
3410 rt_runq_count_incr(pset);
3411
3412 rt_lock_unlock(pset);
3413
3414 return preempt;
3415 }
3416
3417 /*
3418 * realtime_setrun:
3419 *
3420 * Dispatch a thread for realtime execution.
3421 *
3422 * Thread must be locked. Associated pset must
3423 * be locked, and is returned unlocked.
3424 */
3425 static void
3426 realtime_setrun(
3427 processor_t processor,
3428 thread_t thread)
3429 {
3430 processor_set_t pset = processor->processor_set;
3431 pset_assert_locked(pset);
3432 ast_t preempt;
3433
3434 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
3435
3436 thread->chosen_processor = processor;
3437
3438 /* <rdar://problem/15102234> */
3439 assert(thread->bound_processor == PROCESSOR_NULL);
3440
3441 if (processor->current_pri < BASEPRI_RTQUEUES) {
3442 preempt = (AST_PREEMPT | AST_URGENT);
3443 } else if (thread->realtime.deadline < processor->deadline) {
3444 preempt = (AST_PREEMPT | AST_URGENT);
3445 } else {
3446 preempt = AST_NONE;
3447 }
3448
3449 realtime_queue_insert(processor, pset, thread);
3450
3451 ipi_type = SCHED_IPI_NONE;
3452 if (preempt != AST_NONE) {
3453 if (processor->state == PROCESSOR_IDLE) {
3454 processor_state_update_from_thread(processor, thread);
3455 processor->deadline = thread->realtime.deadline;
3456 pset_update_processor_state(pset, processor, PROCESSOR_DISPATCHING);
3457 if (processor == current_processor()) {
3458 ast_on(preempt);
3459 } else {
3460 ipi_type = sched_ipi_action(processor, thread, true, SCHED_IPI_EVENT_PREEMPT);
3461 }
3462 } else if (processor->state == PROCESSOR_DISPATCHING) {
3463 if ((processor->current_pri < thread->sched_pri) || (processor->deadline > thread->realtime.deadline)) {
3464 processor_state_update_from_thread(processor, thread);
3465 processor->deadline = thread->realtime.deadline;
3466 }
3467 } else {
3468 if (processor == current_processor()) {
3469 ast_on(preempt);
3470
3471 if ((preempt & AST_URGENT) == AST_URGENT) {
3472 bit_set(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id);
3473 }
3474
3475 if ((preempt & AST_PREEMPT) == AST_PREEMPT) {
3476 bit_set(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
3477 }
3478 } else {
3479 ipi_type = sched_ipi_action(processor, thread, false, SCHED_IPI_EVENT_PREEMPT);
3480 }
3481 }
3482 } else {
3483 /* Selected processor was too busy, just keep thread enqueued and let other processors drain it naturally. */
3484 }
3485
3486 pset_unlock(pset);
3487 sched_ipi_perform(processor, ipi_type);
3488 }
3489
3490
3491 sched_ipi_type_t
3492 sched_ipi_deferred_policy(processor_set_t pset, processor_t dst,
3493 __unused sched_ipi_event_t event)
3494 {
3495 #if defined(CONFIG_SCHED_DEFERRED_AST)
3496 if (!bit_test(pset->pending_deferred_AST_cpu_mask, dst->cpu_id)) {
3497 return SCHED_IPI_DEFERRED;
3498 }
3499 #else /* CONFIG_SCHED_DEFERRED_AST */
3500 panic("Request for deferred IPI on an unsupported platform; pset: %p CPU: %d", pset, dst->cpu_id);
3501 #endif /* CONFIG_SCHED_DEFERRED_AST */
3502 return SCHED_IPI_NONE;
3503 }
3504
3505 sched_ipi_type_t
3506 sched_ipi_action(processor_t dst, thread_t thread, boolean_t dst_idle, sched_ipi_event_t event)
3507 {
3508 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
3509 assert(dst != NULL);
3510
3511 processor_set_t pset = dst->processor_set;
3512 if (current_processor() == dst) {
3513 return SCHED_IPI_NONE;
3514 }
3515
3516 if (bit_test(pset->pending_AST_URGENT_cpu_mask, dst->cpu_id)) {
3517 return SCHED_IPI_NONE;
3518 }
3519
3520 ipi_type = SCHED(ipi_policy)(dst, thread, dst_idle, event);
3521 switch (ipi_type) {
3522 case SCHED_IPI_NONE:
3523 return SCHED_IPI_NONE;
3524 #if defined(CONFIG_SCHED_DEFERRED_AST)
3525 case SCHED_IPI_DEFERRED:
3526 bit_set(pset->pending_deferred_AST_cpu_mask, dst->cpu_id);
3527 break;
3528 #endif /* CONFIG_SCHED_DEFERRED_AST */
3529 default:
3530 bit_set(pset->pending_AST_URGENT_cpu_mask, dst->cpu_id);
3531 bit_set(pset->pending_AST_PREEMPT_cpu_mask, dst->cpu_id);
3532 break;
3533 }
3534 return ipi_type;
3535 }
3536
3537 sched_ipi_type_t
3538 sched_ipi_policy(processor_t dst, thread_t thread, boolean_t dst_idle, sched_ipi_event_t event)
3539 {
3540 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
3541 boolean_t deferred_ipi_supported = false;
3542 processor_set_t pset = dst->processor_set;
3543
3544 #if defined(CONFIG_SCHED_DEFERRED_AST)
3545 deferred_ipi_supported = true;
3546 #endif /* CONFIG_SCHED_DEFERRED_AST */
3547
3548 switch (event) {
3549 case SCHED_IPI_EVENT_SPILL:
3550 case SCHED_IPI_EVENT_SMT_REBAL:
3551 case SCHED_IPI_EVENT_REBALANCE:
3552 case SCHED_IPI_EVENT_BOUND_THR:
3553 /*
3554 * The spill, SMT rebalance, rebalance and the bound thread
3555 * scenarios use immediate IPIs always.
3556 */
3557 ipi_type = dst_idle ? SCHED_IPI_IDLE : SCHED_IPI_IMMEDIATE;
3558 break;
3559 case SCHED_IPI_EVENT_PREEMPT:
3560 /* In the preemption case, use immediate IPIs for RT threads */
3561 if (thread && (thread->sched_pri >= BASEPRI_RTQUEUES)) {
3562 ipi_type = dst_idle ? SCHED_IPI_IDLE : SCHED_IPI_IMMEDIATE;
3563 break;
3564 }
3565
3566 /*
3567 * For Non-RT threads preemption,
3568 * If the core is active, use immediate IPIs.
3569 * If the core is idle, use deferred IPIs if supported; otherwise immediate IPI.
3570 */
3571 if (deferred_ipi_supported && dst_idle) {
3572 return sched_ipi_deferred_policy(pset, dst, event);
3573 }
3574 ipi_type = dst_idle ? SCHED_IPI_IDLE : SCHED_IPI_IMMEDIATE;
3575 break;
3576 default:
3577 panic("Unrecognized scheduler IPI event type %d", event);
3578 }
3579 assert(ipi_type != SCHED_IPI_NONE);
3580 return ipi_type;
3581 }
3582
3583 void
3584 sched_ipi_perform(processor_t dst, sched_ipi_type_t ipi)
3585 {
3586 switch (ipi) {
3587 case SCHED_IPI_NONE:
3588 break;
3589 case SCHED_IPI_IDLE:
3590 machine_signal_idle(dst);
3591 break;
3592 case SCHED_IPI_IMMEDIATE:
3593 cause_ast_check(dst);
3594 break;
3595 case SCHED_IPI_DEFERRED:
3596 machine_signal_idle_deferred(dst);
3597 break;
3598 default:
3599 panic("Unrecognized scheduler IPI type: %d", ipi);
3600 }
3601 }
3602
3603 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
3604
3605 boolean_t
3606 priority_is_urgent(int priority)
3607 {
3608 return bitmap_test(sched_preempt_pri, priority) ? TRUE : FALSE;
3609 }
3610
3611 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
3612
3613 /*
3614 * processor_setrun:
3615 *
3616 * Dispatch a thread for execution on a
3617 * processor.
3618 *
3619 * Thread must be locked. Associated pset must
3620 * be locked, and is returned unlocked.
3621 */
3622 static void
3623 processor_setrun(
3624 processor_t processor,
3625 thread_t thread,
3626 integer_t options)
3627 {
3628 processor_set_t pset = processor->processor_set;
3629 pset_assert_locked(pset);
3630 ast_t preempt;
3631 enum { eExitIdle, eInterruptRunning, eDoNothing } ipi_action = eDoNothing;
3632
3633 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
3634
3635 thread->chosen_processor = processor;
3636
3637 /*
3638 * Set preemption mode.
3639 */
3640 #if defined(CONFIG_SCHED_DEFERRED_AST)
3641 /* TODO: Do we need to care about urgency (see rdar://problem/20136239)? */
3642 #endif
3643 if (SCHED(priority_is_urgent)(thread->sched_pri) && thread->sched_pri > processor->current_pri) {
3644 preempt = (AST_PREEMPT | AST_URGENT);
3645 } else if (processor->active_thread && thread_eager_preemption(processor->active_thread)) {
3646 preempt = (AST_PREEMPT | AST_URGENT);
3647 } else if ((thread->sched_mode == TH_MODE_TIMESHARE) && (thread->sched_pri < thread->base_pri)) {
3648 if (SCHED(priority_is_urgent)(thread->base_pri) && thread->sched_pri > processor->current_pri) {
3649 preempt = (options & SCHED_PREEMPT)? AST_PREEMPT: AST_NONE;
3650 } else {
3651 preempt = AST_NONE;
3652 }
3653 } else {
3654 preempt = (options & SCHED_PREEMPT)? AST_PREEMPT: AST_NONE;
3655 }
3656
3657 if ((options & (SCHED_PREEMPT | SCHED_REBALANCE)) == (SCHED_PREEMPT | SCHED_REBALANCE)) {
3658 /*
3659 * Having gone to the trouble of forcing this thread off a less preferred core,
3660 * we should force the preferable core to reschedule immediately to give this
3661 * thread a chance to run instead of just sitting on the run queue where
3662 * it may just be stolen back by the idle core we just forced it off.
3663 */
3664 preempt |= AST_PREEMPT;
3665 }
3666
3667 SCHED(processor_enqueue)(processor, thread, options);
3668 sched_update_pset_load_average(pset);
3669
3670 if (preempt != AST_NONE) {
3671 if (processor->state == PROCESSOR_IDLE) {
3672 processor_state_update_from_thread(processor, thread);
3673 processor->deadline = UINT64_MAX;
3674 pset_update_processor_state(pset, processor, PROCESSOR_DISPATCHING);
3675 ipi_action = eExitIdle;
3676 } else if (processor->state == PROCESSOR_DISPATCHING) {
3677 if (processor->current_pri < thread->sched_pri) {
3678 processor_state_update_from_thread(processor, thread);
3679 processor->deadline = UINT64_MAX;
3680 }
3681 } else if ((processor->state == PROCESSOR_RUNNING ||
3682 processor->state == PROCESSOR_SHUTDOWN) &&
3683 (thread->sched_pri >= processor->current_pri)) {
3684 ipi_action = eInterruptRunning;
3685 }
3686 } else {
3687 /*
3688 * New thread is not important enough to preempt what is running, but
3689 * special processor states may need special handling
3690 */
3691 if (processor->state == PROCESSOR_SHUTDOWN &&
3692 thread->sched_pri >= processor->current_pri) {
3693 ipi_action = eInterruptRunning;
3694 } else if (processor->state == PROCESSOR_IDLE) {
3695 processor_state_update_from_thread(processor, thread);
3696 processor->deadline = UINT64_MAX;
3697 pset_update_processor_state(pset, processor, PROCESSOR_DISPATCHING);
3698
3699 ipi_action = eExitIdle;
3700 }
3701 }
3702
3703 if (ipi_action != eDoNothing) {
3704 if (processor == current_processor()) {
3705 if ((preempt = csw_check_locked(processor->active_thread, processor, pset, AST_NONE)) != AST_NONE) {
3706 ast_on(preempt);
3707 }
3708
3709 if ((preempt & AST_URGENT) == AST_URGENT) {
3710 bit_set(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id);
3711 } else {
3712 bit_clear(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id);
3713 }
3714
3715 if ((preempt & AST_PREEMPT) == AST_PREEMPT) {
3716 bit_set(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
3717 } else {
3718 bit_clear(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
3719 }
3720 } else {
3721 sched_ipi_event_t event = (options & SCHED_REBALANCE) ? SCHED_IPI_EVENT_REBALANCE : SCHED_IPI_EVENT_PREEMPT;
3722 ipi_type = sched_ipi_action(processor, thread, (ipi_action == eExitIdle), event);
3723 }
3724 }
3725 pset_unlock(pset);
3726 sched_ipi_perform(processor, ipi_type);
3727 }
3728
3729 /*
3730 * choose_next_pset:
3731 *
3732 * Return the next sibling pset containing
3733 * available processors.
3734 *
3735 * Returns the original pset if none other is
3736 * suitable.
3737 */
3738 static processor_set_t
3739 choose_next_pset(
3740 processor_set_t pset)
3741 {
3742 processor_set_t nset = pset;
3743
3744 do {
3745 nset = next_pset(nset);
3746 } while (nset->online_processor_count < 1 && nset != pset);
3747
3748 return nset;
3749 }
3750
3751 /*
3752 * choose_processor:
3753 *
3754 * Choose a processor for the thread, beginning at
3755 * the pset. Accepts an optional processor hint in
3756 * the pset.
3757 *
3758 * Returns a processor, possibly from a different pset.
3759 *
3760 * The thread must be locked. The pset must be locked,
3761 * and the resulting pset is locked on return.
3762 */
3763 processor_t
3764 choose_processor(
3765 processor_set_t starting_pset,
3766 processor_t processor,
3767 thread_t thread)
3768 {
3769 processor_set_t pset = starting_pset;
3770 processor_set_t nset;
3771
3772 assert(thread->sched_pri <= BASEPRI_RTQUEUES);
3773
3774 /*
3775 * Prefer the hinted processor, when appropriate.
3776 */
3777
3778 /* Fold last processor hint from secondary processor to its primary */
3779 if (processor != PROCESSOR_NULL) {
3780 processor = processor->processor_primary;
3781 }
3782
3783 /*
3784 * Only consult platform layer if pset is active, which
3785 * it may not be in some cases when a multi-set system
3786 * is going to sleep.
3787 */
3788 if (pset->online_processor_count) {
3789 if ((processor == PROCESSOR_NULL) || (processor->processor_set == pset && processor->state == PROCESSOR_IDLE)) {
3790 processor_t mc_processor = machine_choose_processor(pset, processor);
3791 if (mc_processor != PROCESSOR_NULL) {
3792 processor = mc_processor->processor_primary;
3793 }
3794 }
3795 }
3796
3797 /*
3798 * At this point, we may have a processor hint, and we may have
3799 * an initial starting pset. If the hint is not in the pset, or
3800 * if the hint is for a processor in an invalid state, discard
3801 * the hint.
3802 */
3803 if (processor != PROCESSOR_NULL) {
3804 if (processor->processor_set != pset) {
3805 processor = PROCESSOR_NULL;
3806 } else if (!processor->is_recommended) {
3807 processor = PROCESSOR_NULL;
3808 } else if ((thread->sched_pri >= BASEPRI_RTQUEUES) && !sched_ok_to_run_realtime_thread(pset, processor)) {
3809 processor = PROCESSOR_NULL;
3810 } else {
3811 switch (processor->state) {
3812 case PROCESSOR_START:
3813 case PROCESSOR_SHUTDOWN:
3814 case PROCESSOR_OFF_LINE:
3815 /*
3816 * Hint is for a processor that cannot support running new threads.
3817 */
3818 processor = PROCESSOR_NULL;
3819 break;
3820 case PROCESSOR_IDLE:
3821 /*
3822 * Hint is for an idle processor. Assume it is no worse than any other
3823 * idle processor. The platform layer had an opportunity to provide
3824 * the "least cost idle" processor above.
3825 */
3826 return processor;
3827 case PROCESSOR_RUNNING:
3828 case PROCESSOR_DISPATCHING:
3829 /*
3830 * Hint is for an active CPU. This fast-path allows
3831 * realtime threads to preempt non-realtime threads
3832 * to regain their previous executing processor.
3833 */
3834 if ((thread->sched_pri >= BASEPRI_RTQUEUES) &&
3835 (processor->current_pri < BASEPRI_RTQUEUES)) {
3836 return processor;
3837 }
3838
3839 /* Otherwise, use hint as part of search below */
3840 break;
3841 default:
3842 processor = PROCESSOR_NULL;
3843 break;
3844 }
3845 }
3846 }
3847
3848 /*
3849 * Iterate through the processor sets to locate
3850 * an appropriate processor. Seed results with
3851 * a last-processor hint, if available, so that
3852 * a search must find something strictly better
3853 * to replace it.
3854 *
3855 * A primary/secondary pair of SMT processors are
3856 * "unpaired" if the primary is busy but its
3857 * corresponding secondary is idle (so the physical
3858 * core has full use of its resources).
3859 */
3860
3861 integer_t lowest_priority = MAXPRI + 1;
3862 integer_t lowest_secondary_priority = MAXPRI + 1;
3863 integer_t lowest_unpaired_primary_priority = MAXPRI + 1;
3864 integer_t lowest_idle_secondary_priority = MAXPRI + 1;
3865 integer_t lowest_count = INT_MAX;
3866 uint64_t furthest_deadline = 1;
3867 processor_t lp_processor = PROCESSOR_NULL;
3868 processor_t lp_unpaired_primary_processor = PROCESSOR_NULL;
3869 processor_t lp_idle_secondary_processor = PROCESSOR_NULL;
3870 processor_t lp_paired_secondary_processor = PROCESSOR_NULL;
3871 processor_t lc_processor = PROCESSOR_NULL;
3872 processor_t fd_processor = PROCESSOR_NULL;
3873
3874 if (processor != PROCESSOR_NULL) {
3875 /* All other states should be enumerated above. */
3876 assert(processor->state == PROCESSOR_RUNNING || processor->state == PROCESSOR_DISPATCHING);
3877
3878 lowest_priority = processor->current_pri;
3879 lp_processor = processor;
3880
3881 if (processor->current_pri >= BASEPRI_RTQUEUES) {
3882 furthest_deadline = processor->deadline;
3883 fd_processor = processor;
3884 }
3885
3886 lowest_count = SCHED(processor_runq_count)(processor);
3887 lc_processor = processor;
3888 }
3889
3890 do {
3891 int cpuid;
3892
3893 if (thread->sched_pri >= BASEPRI_RTQUEUES) {
3894 processor = choose_processor_for_realtime_thread(pset);
3895 if (processor) {
3896 return processor;
3897 }
3898 } else {
3899 /*
3900 * Choose an idle processor, in pset traversal order
3901 */
3902
3903 uint64_t idle_primary_map = (pset->cpu_state_map[PROCESSOR_IDLE] &
3904 pset->primary_map &
3905 pset->recommended_bitmask);
3906
3907 /* there shouldn't be a pending AST if the processor is idle */
3908 assert((idle_primary_map & pset->pending_AST_URGENT_cpu_mask) == 0);
3909
3910 cpuid = lsb_first(idle_primary_map);
3911 if (cpuid >= 0) {
3912 processor = processor_array[cpuid];
3913 return processor;
3914 }
3915 }
3916
3917 /*
3918 * Otherwise, enumerate active and idle processors to find primary candidates
3919 * with lower priority/etc.
3920 */
3921
3922 uint64_t active_map = ((pset->cpu_state_map[PROCESSOR_RUNNING] | pset->cpu_state_map[PROCESSOR_DISPATCHING]) &
3923 pset->recommended_bitmask &
3924 ~pset->pending_AST_URGENT_cpu_mask);
3925
3926 if (SCHED(priority_is_urgent)(thread->sched_pri) == FALSE) {
3927 active_map &= ~pset->pending_AST_PREEMPT_cpu_mask;
3928 }
3929
3930 active_map = bit_ror64(active_map, (pset->last_chosen + 1));
3931 for (int rotid = lsb_first(active_map); rotid >= 0; rotid = lsb_next(active_map, rotid)) {
3932 cpuid = ((rotid + pset->last_chosen + 1) & 63);
3933 processor = processor_array[cpuid];
3934
3935 integer_t cpri = processor->current_pri;
3936 processor_t primary = processor->processor_primary;
3937 if (primary != processor) {
3938 /* If primary is running a NO_SMT thread, don't choose its secondary */
3939 if (!((primary->state == PROCESSOR_RUNNING) && processor_active_thread_no_smt(primary))) {
3940 if (cpri < lowest_secondary_priority) {
3941 lowest_secondary_priority = cpri;
3942 lp_paired_secondary_processor = processor;
3943 }
3944 }
3945 } else {
3946 if (cpri < lowest_priority) {
3947 lowest_priority = cpri;
3948 lp_processor = processor;
3949 }
3950 }
3951
3952 if ((cpri >= BASEPRI_RTQUEUES) && (processor->deadline > furthest_deadline)) {
3953 furthest_deadline = processor->deadline;
3954 fd_processor = processor;
3955 }
3956
3957 integer_t ccount = SCHED(processor_runq_count)(processor);
3958 if (ccount < lowest_count) {
3959 lowest_count = ccount;
3960 lc_processor = processor;
3961 }
3962 }
3963
3964 /*
3965 * For SMT configs, these idle secondary processors must have active primary. Otherwise
3966 * the idle primary would have short-circuited the loop above
3967 */
3968 uint64_t idle_secondary_map = (pset->cpu_state_map[PROCESSOR_IDLE] &
3969 ~pset->primary_map &
3970 pset->recommended_bitmask);
3971
3972 /* there shouldn't be a pending AST if the processor is idle */
3973 assert((idle_secondary_map & pset->pending_AST_URGENT_cpu_mask) == 0);
3974 assert((idle_secondary_map & pset->pending_AST_PREEMPT_cpu_mask) == 0);
3975
3976 for (cpuid = lsb_first(idle_secondary_map); cpuid >= 0; cpuid = lsb_next(idle_secondary_map, cpuid)) {
3977 processor = processor_array[cpuid];
3978
3979 processor_t cprimary = processor->processor_primary;
3980
3981 integer_t primary_pri = cprimary->current_pri;
3982
3983 /*
3984 * TODO: This should also make the same decisions
3985 * as secondary_can_run_realtime_thread
3986 *
3987 * TODO: Keep track of the pending preemption priority
3988 * of the primary to make this more accurate.
3989 */
3990
3991 /* If the primary is running a no-smt thread, then don't choose its secondary */
3992 if (cprimary->state == PROCESSOR_RUNNING &&
3993 processor_active_thread_no_smt(cprimary)) {
3994 continue;
3995 }
3996
3997 /*
3998 * Find the idle secondary processor with the lowest priority primary
3999 *
4000 * We will choose this processor as a fallback if we find no better
4001 * primary to preempt.
4002 */
4003 if (primary_pri < lowest_idle_secondary_priority) {
4004 lp_idle_secondary_processor = processor;
4005 lowest_idle_secondary_priority = primary_pri;
4006 }
4007
4008 /* Find the the lowest priority active primary with idle secondary */
4009 if (primary_pri < lowest_unpaired_primary_priority) {
4010 /* If the primary processor is offline or starting up, it's not a candidate for this path */
4011 if (cprimary->state != PROCESSOR_RUNNING &&
4012 cprimary->state != PROCESSOR_DISPATCHING) {
4013 continue;
4014 }
4015
4016 if (!cprimary->is_recommended) {
4017 continue;
4018 }
4019
4020 /* if the primary is pending preemption, don't try to re-preempt it */
4021 if (bit_test(pset->pending_AST_URGENT_cpu_mask, cprimary->cpu_id)) {
4022 continue;
4023 }
4024
4025 if (SCHED(priority_is_urgent)(thread->sched_pri) == FALSE &&
4026 bit_test(pset->pending_AST_PREEMPT_cpu_mask, cprimary->cpu_id)) {
4027 continue;
4028 }
4029
4030 lowest_unpaired_primary_priority = primary_pri;
4031 lp_unpaired_primary_processor = cprimary;
4032 }
4033 }
4034
4035 /*
4036 * We prefer preempting a primary processor over waking up its secondary.
4037 * The secondary will then be woken up by the preempted thread.
4038 */
4039 if (thread->sched_pri > lowest_unpaired_primary_priority) {
4040 pset->last_chosen = lp_unpaired_primary_processor->cpu_id;
4041 return lp_unpaired_primary_processor;
4042 }
4043
4044 /*
4045 * We prefer preempting a lower priority active processor over directly
4046 * waking up an idle secondary.
4047 * The preempted thread will then find the idle secondary.
4048 */
4049 if (thread->sched_pri > lowest_priority) {
4050 pset->last_chosen = lp_processor->cpu_id;
4051 return lp_processor;
4052 }
4053
4054 if (thread->sched_pri >= BASEPRI_RTQUEUES) {
4055 /*
4056 * For realtime threads, the most important aspect is
4057 * scheduling latency, so we will pick an active
4058 * secondary processor in this pset, or preempt
4059 * another RT thread with a further deadline before
4060 * going to the next pset.
4061 */
4062
4063 if (sched_allow_rt_smt && (thread->sched_pri > lowest_secondary_priority)) {
4064 pset->last_chosen = lp_paired_secondary_processor->cpu_id;
4065 return lp_paired_secondary_processor;
4066 }
4067
4068 if (thread->realtime.deadline < furthest_deadline) {
4069 return fd_processor;
4070 }
4071 }
4072
4073 /*
4074 * lc_processor is used to indicate the best processor set run queue
4075 * on which to enqueue a thread when all available CPUs are busy with
4076 * higher priority threads, so try to make sure it is initialized.
4077 */
4078 if (lc_processor == PROCESSOR_NULL) {
4079 cpumap_t available_map = ((pset->cpu_state_map[PROCESSOR_IDLE] |
4080 pset->cpu_state_map[PROCESSOR_RUNNING] |
4081 pset->cpu_state_map[PROCESSOR_DISPATCHING]) &
4082 pset->recommended_bitmask);
4083 cpuid = lsb_first(available_map);
4084 if (cpuid >= 0) {
4085 lc_processor = processor_array[cpuid];
4086 lowest_count = SCHED(processor_runq_count)(lc_processor);
4087 }
4088 }
4089
4090 /*
4091 * Move onto the next processor set.
4092 *
4093 * If all primary processors in this pset are running a higher
4094 * priority thread, move on to next pset. Only when we have
4095 * exhausted the search for primary processors do we
4096 * fall back to secondaries.
4097 */
4098 nset = next_pset(pset);
4099
4100 if (nset != starting_pset) {
4101 pset_unlock(pset);
4102
4103 pset = nset;
4104 pset_lock(pset);
4105 }
4106 } while (nset != starting_pset);
4107
4108 /*
4109 * Make sure that we pick a running processor,
4110 * and that the correct processor set is locked.
4111 * Since we may have unlocked the candidate processor's
4112 * pset, it may have changed state.
4113 *
4114 * All primary processors are running a higher priority
4115 * thread, so the only options left are enqueuing on
4116 * the secondary processor that would perturb the least priority
4117 * primary, or the least busy primary.
4118 */
4119 boolean_t fallback_processor = false;
4120 do {
4121 /* lowest_priority is evaluated in the main loops above */
4122 if (lp_idle_secondary_processor != PROCESSOR_NULL) {
4123 processor = lp_idle_secondary_processor;
4124 lp_idle_secondary_processor = PROCESSOR_NULL;
4125 } else if (lp_paired_secondary_processor != PROCESSOR_NULL) {
4126 processor = lp_paired_secondary_processor;
4127 lp_paired_secondary_processor = PROCESSOR_NULL;
4128 } else if (lc_processor != PROCESSOR_NULL) {
4129 processor = lc_processor;
4130 lc_processor = PROCESSOR_NULL;
4131 } else {
4132 /*
4133 * All processors are executing higher priority threads, and
4134 * the lowest_count candidate was not usable.
4135 *
4136 * For AMP platforms running the clutch scheduler always
4137 * return a processor from the requested pset to allow the
4138 * thread to be enqueued in the correct runq. For non-AMP
4139 * platforms, simply return the master_processor.
4140 */
4141 fallback_processor = true;
4142 #if CONFIG_SCHED_CLUTCH && __AMP__
4143 processor = processor_array[lsb_first(starting_pset->primary_map)];
4144 #else /* CONFIG_SCHED_CLUTCH && __AMP__ */
4145 processor = master_processor;
4146 #endif /* CONFIG_SCHED_CLUTCH && __AMP__ */
4147 }
4148
4149 /*
4150 * Check that the correct processor set is
4151 * returned locked.
4152 */
4153 if (pset != processor->processor_set) {
4154 pset_unlock(pset);
4155 pset = processor->processor_set;
4156 pset_lock(pset);
4157 }
4158
4159 /*
4160 * We must verify that the chosen processor is still available.
4161 * The cases where we pick the master_processor or the fallback
4162 * processor are execptions, since we may need enqueue a thread
4163 * on its runqueue if this is the last remaining processor
4164 * during pset shutdown.
4165 *
4166 * <rdar://problem/47559304> would really help here since it
4167 * gets rid of the weird last processor SHUTDOWN case where
4168 * the pset is still schedulable.
4169 */
4170 if (processor != master_processor && (fallback_processor == false) && (processor->state == PROCESSOR_SHUTDOWN || processor->state == PROCESSOR_OFF_LINE)) {
4171 processor = PROCESSOR_NULL;
4172 }
4173 } while (processor == PROCESSOR_NULL);
4174
4175 pset->last_chosen = processor->cpu_id;
4176 return processor;
4177 }
4178
4179 /*
4180 * thread_setrun:
4181 *
4182 * Dispatch thread for execution, onto an idle
4183 * processor or run queue, and signal a preemption
4184 * as appropriate.
4185 *
4186 * Thread must be locked.
4187 */
4188 void
4189 thread_setrun(
4190 thread_t thread,
4191 sched_options_t options)
4192 {
4193 processor_t processor;
4194 processor_set_t pset;
4195
4196 assert((thread->state & (TH_RUN | TH_WAIT | TH_UNINT | TH_TERMINATE | TH_TERMINATE2)) == TH_RUN);
4197 assert(thread->runq == PROCESSOR_NULL);
4198
4199 /*
4200 * Update priority if needed.
4201 */
4202 if (SCHED(can_update_priority)(thread)) {
4203 SCHED(update_priority)(thread);
4204 }
4205
4206 thread->sfi_class = sfi_thread_classify(thread);
4207
4208 assert(thread->runq == PROCESSOR_NULL);
4209
4210 #if __SMP__
4211 if (thread->bound_processor == PROCESSOR_NULL) {
4212 /*
4213 * Unbound case.
4214 */
4215 if (thread->affinity_set != AFFINITY_SET_NULL) {
4216 /*
4217 * Use affinity set policy hint.
4218 */
4219 pset = thread->affinity_set->aset_pset;
4220 pset_lock(pset);
4221
4222 processor = SCHED(choose_processor)(pset, PROCESSOR_NULL, thread);
4223 pset = processor->processor_set;
4224
4225 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR) | DBG_FUNC_NONE,
4226 (uintptr_t)thread_tid(thread), (uintptr_t)-1, processor->cpu_id, processor->state, 0);
4227 } else if (thread->last_processor != PROCESSOR_NULL) {
4228 /*
4229 * Simple (last processor) affinity case.
4230 */
4231 processor = thread->last_processor;
4232 pset = processor->processor_set;
4233 pset_lock(pset);
4234 processor = SCHED(choose_processor)(pset, processor, thread);
4235 pset = processor->processor_set;
4236
4237 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR) | DBG_FUNC_NONE,
4238 (uintptr_t)thread_tid(thread), thread->last_processor->cpu_id, processor->cpu_id, processor->state, 0);
4239 } else {
4240 /*
4241 * No Affinity case:
4242 *
4243 * Utilitize a per task hint to spread threads
4244 * among the available processor sets.
4245 */
4246 task_t task = thread->task;
4247
4248 pset = task->pset_hint;
4249 if (pset == PROCESSOR_SET_NULL) {
4250 pset = current_processor()->processor_set;
4251 }
4252
4253 pset = choose_next_pset(pset);
4254 pset_lock(pset);
4255
4256 processor = SCHED(choose_processor)(pset, PROCESSOR_NULL, thread);
4257 pset = processor->processor_set;
4258 task->pset_hint = pset;
4259
4260 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR) | DBG_FUNC_NONE,
4261 (uintptr_t)thread_tid(thread), (uintptr_t)-1, processor->cpu_id, processor->state, 0);
4262 }
4263 } else {
4264 /*
4265 * Bound case:
4266 *
4267 * Unconditionally dispatch on the processor.
4268 */
4269 processor = thread->bound_processor;
4270 pset = processor->processor_set;
4271 pset_lock(pset);
4272
4273 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR) | DBG_FUNC_NONE,
4274 (uintptr_t)thread_tid(thread), (uintptr_t)-2, processor->cpu_id, processor->state, 0);
4275 }
4276 #else /* !__SMP__ */
4277 /* Only one processor to choose */
4278 assert(thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == master_processor);
4279 processor = master_processor;
4280 pset = processor->processor_set;
4281 pset_lock(pset);
4282 #endif /* !__SMP__ */
4283
4284 /*
4285 * Dispatch the thread on the chosen processor.
4286 * TODO: This should be based on sched_mode, not sched_pri
4287 */
4288 if (thread->sched_pri >= BASEPRI_RTQUEUES) {
4289 realtime_setrun(processor, thread);
4290 } else {
4291 processor_setrun(processor, thread, options);
4292 }
4293 /* pset is now unlocked */
4294 if (thread->bound_processor == PROCESSOR_NULL) {
4295 SCHED(check_spill)(pset, thread);
4296 }
4297 }
4298
4299 processor_set_t
4300 task_choose_pset(
4301 task_t task)
4302 {
4303 processor_set_t pset = task->pset_hint;
4304
4305 if (pset != PROCESSOR_SET_NULL) {
4306 pset = choose_next_pset(pset);
4307 }
4308
4309 return pset;
4310 }
4311
4312 /*
4313 * Check for a preemption point in
4314 * the current context.
4315 *
4316 * Called at splsched with thread locked.
4317 */
4318 ast_t
4319 csw_check(
4320 thread_t thread,
4321 processor_t processor,
4322 ast_t check_reason)
4323 {
4324 processor_set_t pset = processor->processor_set;
4325
4326 assert(thread == processor->active_thread);
4327
4328 pset_lock(pset);
4329
4330 processor_state_update_from_thread(processor, thread);
4331
4332 ast_t preempt = csw_check_locked(thread, processor, pset, check_reason);
4333
4334 /* Acknowledge the IPI if we decided not to preempt */
4335
4336 if ((preempt & AST_URGENT) == 0) {
4337 bit_clear(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id);
4338 }
4339
4340 if ((preempt & AST_PREEMPT) == 0) {
4341 bit_clear(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
4342 }
4343
4344 pset_unlock(pset);
4345
4346 return preempt;
4347 }
4348
4349 /*
4350 * Check for preemption at splsched with
4351 * pset and thread locked
4352 */
4353 ast_t
4354 csw_check_locked(
4355 thread_t thread,
4356 processor_t processor,
4357 processor_set_t pset,
4358 ast_t check_reason)
4359 {
4360 ast_t result;
4361
4362 if (processor->first_timeslice) {
4363 if (rt_runq_count(pset) > 0) {
4364 return check_reason | AST_PREEMPT | AST_URGENT;
4365 }
4366 } else {
4367 if (rt_runq_count(pset) > 0) {
4368 if (BASEPRI_RTQUEUES > processor->current_pri) {
4369 return check_reason | AST_PREEMPT | AST_URGENT;
4370 } else {
4371 return check_reason | AST_PREEMPT;
4372 }
4373 }
4374 }
4375
4376 #if __SMP__
4377 /*
4378 * If the current thread is running on a processor that is no longer recommended,
4379 * urgently preempt it, at which point thread_select() should
4380 * try to idle the processor and re-dispatch the thread to a recommended processor.
4381 */
4382 if (!processor->is_recommended) {
4383 return check_reason | AST_PREEMPT | AST_URGENT;
4384 }
4385 #endif
4386
4387 result = SCHED(processor_csw_check)(processor);
4388 if (result != AST_NONE) {
4389 return check_reason | result | (thread_eager_preemption(thread) ? AST_URGENT : AST_NONE);
4390 }
4391
4392 #if __SMP__
4393 /*
4394 * Same for avoid-processor
4395 *
4396 * TODO: Should these set AST_REBALANCE?
4397 */
4398 if (SCHED(avoid_processor_enabled) && SCHED(thread_avoid_processor)(processor, thread)) {
4399 return check_reason | AST_PREEMPT;
4400 }
4401
4402 /*
4403 * Even though we could continue executing on this processor, a
4404 * secondary SMT core should try to shed load to another primary core.
4405 *
4406 * TODO: Should this do the same check that thread_select does? i.e.
4407 * if no bound threads target this processor, and idle primaries exist, preempt
4408 * The case of RT threads existing is already taken care of above
4409 */
4410
4411 if (processor->current_pri < BASEPRI_RTQUEUES &&
4412 processor->processor_primary != processor) {
4413 return check_reason | AST_PREEMPT;
4414 }
4415 #endif
4416
4417 if (thread->state & TH_SUSP) {
4418 return check_reason | AST_PREEMPT;
4419 }
4420
4421 #if CONFIG_SCHED_SFI
4422 /*
4423 * Current thread may not need to be preempted, but maybe needs
4424 * an SFI wait?
4425 */
4426 result = sfi_thread_needs_ast(thread, NULL);
4427 if (result != AST_NONE) {
4428 return check_reason | result;
4429 }
4430 #endif
4431
4432 return AST_NONE;
4433 }
4434
4435 /*
4436 * Handle preemption IPI or IPI in response to setting an AST flag
4437 * Triggered by cause_ast_check
4438 * Called at splsched
4439 */
4440 void
4441 ast_check(processor_t processor)
4442 {
4443 if (processor->state != PROCESSOR_RUNNING &&
4444 processor->state != PROCESSOR_SHUTDOWN) {
4445 return;
4446 }
4447
4448 thread_t thread = processor->active_thread;
4449
4450 assert(thread == current_thread());
4451
4452 thread_lock(thread);
4453
4454 /*
4455 * Propagate thread ast to processor.
4456 * (handles IPI in response to setting AST flag)
4457 */
4458 ast_propagate(thread);
4459
4460 /*
4461 * Stash the old urgency and perfctl values to find out if
4462 * csw_check updates them.
4463 */
4464 thread_urgency_t old_urgency = processor->current_urgency;
4465 perfcontrol_class_t old_perfctl_class = processor->current_perfctl_class;
4466
4467 ast_t preempt;
4468
4469 if ((preempt = csw_check(thread, processor, AST_NONE)) != AST_NONE) {
4470 ast_on(preempt);
4471 }
4472
4473 if (old_urgency != processor->current_urgency) {
4474 /*
4475 * Urgency updates happen with the thread lock held (ugh).
4476 * TODO: This doesn't notice QoS changes...
4477 */
4478 uint64_t urgency_param1, urgency_param2;
4479
4480 thread_urgency_t urgency = thread_get_urgency(thread, &urgency_param1, &urgency_param2);
4481 thread_tell_urgency(urgency, urgency_param1, urgency_param2, 0, thread);
4482 }
4483
4484 thread_unlock(thread);
4485
4486 if (old_perfctl_class != processor->current_perfctl_class) {
4487 /*
4488 * We updated the perfctl class of this thread from another core.
4489 * Let CLPC know that the currently running thread has a new
4490 * class.
4491 */
4492
4493 machine_switch_perfcontrol_state_update(PERFCONTROL_ATTR_UPDATE,
4494 mach_approximate_time(), 0, thread);
4495 }
4496 }
4497
4498
4499 /*
4500 * set_sched_pri:
4501 *
4502 * Set the scheduled priority of the specified thread.
4503 *
4504 * This may cause the thread to change queues.
4505 *
4506 * Thread must be locked.
4507 */
4508 void
4509 set_sched_pri(
4510 thread_t thread,
4511 int new_priority,
4512 set_sched_pri_options_t options)
4513 {
4514 bool is_current_thread = (thread == current_thread());
4515 bool removed_from_runq = false;
4516 bool lazy_update = ((options & SETPRI_LAZY) == SETPRI_LAZY);
4517
4518 int old_priority = thread->sched_pri;
4519
4520 /* If we're already at this priority, no need to mess with the runqueue */
4521 if (new_priority == old_priority) {
4522 #if CONFIG_SCHED_CLUTCH
4523 /* For the first thread in the system, the priority is correct but
4524 * th_sched_bucket is still TH_BUCKET_RUN. Since the clutch
4525 * scheduler relies on the bucket being set for all threads, update
4526 * its bucket here.
4527 */
4528 if (thread->th_sched_bucket == TH_BUCKET_RUN) {
4529 assert(is_current_thread);
4530 SCHED(update_thread_bucket)(thread);
4531 }
4532 #endif /* CONFIG_SCHED_CLUTCH */
4533
4534 return;
4535 }
4536
4537 if (is_current_thread) {
4538 assert(thread->state & TH_RUN);
4539 assert(thread->runq == PROCESSOR_NULL);
4540 } else {
4541 removed_from_runq = thread_run_queue_remove(thread);
4542 }
4543
4544 thread->sched_pri = new_priority;
4545
4546 #if CONFIG_SCHED_CLUTCH
4547 /*
4548 * Since for the clutch scheduler, the thread's bucket determines its runq
4549 * in the hierarchy it is important to update the bucket when the thread
4550 * lock is held and the thread has been removed from the runq hierarchy.
4551 */
4552 SCHED(update_thread_bucket)(thread);
4553
4554 #endif /* CONFIG_SCHED_CLUTCH */
4555
4556 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHANGE_PRIORITY),
4557 (uintptr_t)thread_tid(thread),
4558 thread->base_pri,
4559 thread->sched_pri,
4560 thread->sched_usage,
4561 0);
4562
4563 if (removed_from_runq) {
4564 thread_run_queue_reinsert(thread, SCHED_PREEMPT | SCHED_TAILQ);
4565 } else if (is_current_thread) {
4566 processor_t processor = thread->last_processor;
4567 assert(processor == current_processor());
4568
4569 thread_urgency_t old_urgency = processor->current_urgency;
4570
4571 /*
4572 * When dropping in priority, check if the thread no longer belongs on core.
4573 * If a thread raises its own priority, don't aggressively rebalance it.
4574 * <rdar://problem/31699165>
4575 *
4576 * csw_check does a processor_state_update_from_thread, but
4577 * we should do our own if we're being lazy.
4578 */
4579 if (!lazy_update && new_priority < old_priority) {
4580 ast_t preempt;
4581
4582 if ((preempt = csw_check(thread, processor, AST_NONE)) != AST_NONE) {
4583 ast_on(preempt);
4584 }
4585 } else {
4586 processor_state_update_from_thread(processor, thread);
4587 }
4588
4589 /*
4590 * set_sched_pri doesn't alter RT params. We expect direct base priority/QoS
4591 * class alterations from user space to occur relatively infrequently, hence
4592 * those are lazily handled. QoS classes have distinct priority bands, and QoS
4593 * inheritance is expected to involve priority changes.
4594 */
4595 if (processor->current_urgency != old_urgency) {
4596 uint64_t urgency_param1, urgency_param2;
4597
4598 thread_urgency_t new_urgency = thread_get_urgency(thread,
4599 &urgency_param1, &urgency_param2);
4600
4601 thread_tell_urgency(new_urgency, urgency_param1,
4602 urgency_param2, 0, thread);
4603 }
4604
4605 /* TODO: only call this if current_perfctl_class changed */
4606 uint64_t ctime = mach_approximate_time();
4607 machine_thread_going_on_core(thread, processor->current_urgency, 0, 0, ctime);
4608 } else if (thread->state & TH_RUN) {
4609 processor_t processor = thread->last_processor;
4610
4611 if (!lazy_update &&
4612 processor != PROCESSOR_NULL &&
4613 processor != current_processor() &&
4614 processor->active_thread == thread) {
4615 cause_ast_check(processor);
4616 }
4617 }
4618 }
4619
4620 /*
4621 * thread_run_queue_remove_for_handoff
4622 *
4623 * Pull a thread or its (recursive) push target out of the runqueue
4624 * so that it is ready for thread_run()
4625 *
4626 * Called at splsched
4627 *
4628 * Returns the thread that was pulled or THREAD_NULL if no thread could be pulled.
4629 * This may be different than the thread that was passed in.
4630 */
4631 thread_t
4632 thread_run_queue_remove_for_handoff(thread_t thread)
4633 {
4634 thread_t pulled_thread = THREAD_NULL;
4635
4636 thread_lock(thread);
4637
4638 /*
4639 * Check that the thread is not bound
4640 * to a different processor, and that realtime
4641 * is not involved.
4642 *
4643 * Next, pull it off its run queue. If it
4644 * doesn't come, it's not eligible.
4645 */
4646
4647 processor_t processor = current_processor();
4648 if (processor->current_pri < BASEPRI_RTQUEUES && thread->sched_pri < BASEPRI_RTQUEUES &&
4649 (thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == processor)) {
4650 if (thread_run_queue_remove(thread)) {
4651 pulled_thread = thread;
4652 }
4653 }
4654
4655 thread_unlock(thread);
4656
4657 return pulled_thread;
4658 }
4659
4660 /*
4661 * thread_run_queue_remove:
4662 *
4663 * Remove a thread from its current run queue and
4664 * return TRUE if successful.
4665 *
4666 * Thread must be locked.
4667 *
4668 * If thread->runq is PROCESSOR_NULL, the thread will not re-enter the
4669 * run queues because the caller locked the thread. Otherwise
4670 * the thread is on a run queue, but could be chosen for dispatch
4671 * and removed by another processor under a different lock, which
4672 * will set thread->runq to PROCESSOR_NULL.
4673 *
4674 * Hence the thread select path must not rely on anything that could
4675 * be changed under the thread lock after calling this function,
4676 * most importantly thread->sched_pri.
4677 */
4678 boolean_t
4679 thread_run_queue_remove(
4680 thread_t thread)
4681 {
4682 boolean_t removed = FALSE;
4683 processor_t processor = thread->runq;
4684
4685 if ((thread->state & (TH_RUN | TH_WAIT)) == TH_WAIT) {
4686 /* Thread isn't runnable */
4687 assert(thread->runq == PROCESSOR_NULL);
4688 return FALSE;
4689 }
4690
4691 if (processor == PROCESSOR_NULL) {
4692 /*
4693 * The thread is either not on the runq,
4694 * or is in the midst of being removed from the runq.
4695 *
4696 * runq is set to NULL under the pset lock, not the thread
4697 * lock, so the thread may still be in the process of being dequeued
4698 * from the runq. It will wait in invoke for the thread lock to be
4699 * dropped.
4700 */
4701
4702 return FALSE;
4703 }
4704
4705 if (thread->sched_pri < BASEPRI_RTQUEUES) {
4706 return SCHED(processor_queue_remove)(processor, thread);
4707 }
4708
4709 processor_set_t pset = processor->processor_set;
4710
4711 rt_lock_lock(pset);
4712
4713 if (thread->runq != PROCESSOR_NULL) {
4714 /*
4715 * Thread is on the RT run queue and we have a lock on
4716 * that run queue.
4717 */
4718
4719 remqueue(&thread->runq_links);
4720 SCHED_STATS_RUNQ_CHANGE(&SCHED(rt_runq)(pset)->runq_stats, rt_runq_count(pset));
4721 rt_runq_count_decr(pset);
4722
4723 thread->runq = PROCESSOR_NULL;
4724
4725 removed = TRUE;
4726 }
4727
4728 rt_lock_unlock(pset);
4729
4730 return removed;
4731 }
4732
4733 /*
4734 * Put the thread back where it goes after a thread_run_queue_remove
4735 *
4736 * Thread must have been removed under the same thread lock hold
4737 *
4738 * thread locked, at splsched
4739 */
4740 void
4741 thread_run_queue_reinsert(thread_t thread, sched_options_t options)
4742 {
4743 assert(thread->runq == PROCESSOR_NULL);
4744 assert(thread->state & (TH_RUN));
4745
4746 thread_setrun(thread, options);
4747 }
4748
4749 void
4750 sys_override_cpu_throttle(boolean_t enable_override)
4751 {
4752 if (enable_override) {
4753 cpu_throttle_enabled = 0;
4754 } else {
4755 cpu_throttle_enabled = 1;
4756 }
4757 }
4758
4759 thread_urgency_t
4760 thread_get_urgency(thread_t thread, uint64_t *arg1, uint64_t *arg2)
4761 {
4762 uint64_t urgency_param1 = 0, urgency_param2 = 0;
4763
4764 thread_urgency_t urgency;
4765
4766 if (thread == NULL || (thread->state & TH_IDLE)) {
4767 urgency_param1 = 0;
4768 urgency_param2 = 0;
4769
4770 urgency = THREAD_URGENCY_NONE;
4771 } else if (thread->sched_mode == TH_MODE_REALTIME) {
4772 urgency_param1 = thread->realtime.period;
4773 urgency_param2 = thread->realtime.deadline;
4774
4775 urgency = THREAD_URGENCY_REAL_TIME;
4776 } else if (cpu_throttle_enabled &&
4777 (thread->sched_pri <= MAXPRI_THROTTLE) &&
4778 (thread->base_pri <= MAXPRI_THROTTLE)) {
4779 /*
4780 * Threads that are running at low priority but are not
4781 * tagged with a specific QoS are separated out from
4782 * the "background" urgency. Performance management
4783 * subsystem can decide to either treat these threads
4784 * as normal threads or look at other signals like thermal
4785 * levels for optimal power/perf tradeoffs for a platform.
4786 */
4787 boolean_t thread_lacks_qos = (proc_get_effective_thread_policy(thread, TASK_POLICY_QOS) == THREAD_QOS_UNSPECIFIED); //thread_has_qos_policy(thread);
4788 boolean_t task_is_suppressed = (proc_get_effective_task_policy(thread->task, TASK_POLICY_SUP_ACTIVE) == 0x1);
4789
4790 /*
4791 * Background urgency applied when thread priority is
4792 * MAXPRI_THROTTLE or lower and thread is not promoted
4793 * and thread has a QoS specified
4794 */
4795 urgency_param1 = thread->sched_pri;
4796 urgency_param2 = thread->base_pri;
4797
4798 if (thread_lacks_qos && !task_is_suppressed) {
4799 urgency = THREAD_URGENCY_LOWPRI;
4800 } else {
4801 urgency = THREAD_URGENCY_BACKGROUND;
4802 }
4803 } else {
4804 /* For otherwise unclassified threads, report throughput QoS parameters */
4805 urgency_param1 = proc_get_effective_thread_policy(thread, TASK_POLICY_THROUGH_QOS);
4806 urgency_param2 = proc_get_effective_task_policy(thread->task, TASK_POLICY_THROUGH_QOS);
4807 urgency = THREAD_URGENCY_NORMAL;
4808 }
4809
4810 if (arg1 != NULL) {
4811 *arg1 = urgency_param1;
4812 }
4813 if (arg2 != NULL) {
4814 *arg2 = urgency_param2;
4815 }
4816
4817 return urgency;
4818 }
4819
4820 perfcontrol_class_t
4821 thread_get_perfcontrol_class(thread_t thread)
4822 {
4823 /* Special case handling */
4824 if (thread->state & TH_IDLE) {
4825 return PERFCONTROL_CLASS_IDLE;
4826 }
4827 if (thread->task == kernel_task) {
4828 return PERFCONTROL_CLASS_KERNEL;
4829 }
4830 if (thread->sched_mode == TH_MODE_REALTIME) {
4831 return PERFCONTROL_CLASS_REALTIME;
4832 }
4833
4834 /* perfcontrol_class based on base_pri */
4835 if (thread->base_pri <= MAXPRI_THROTTLE) {
4836 return PERFCONTROL_CLASS_BACKGROUND;
4837 } else if (thread->base_pri <= BASEPRI_UTILITY) {
4838 return PERFCONTROL_CLASS_UTILITY;
4839 } else if (thread->base_pri <= BASEPRI_DEFAULT) {
4840 return PERFCONTROL_CLASS_NONUI;
4841 } else if (thread->base_pri <= BASEPRI_FOREGROUND) {
4842 return PERFCONTROL_CLASS_UI;
4843 } else {
4844 return PERFCONTROL_CLASS_ABOVEUI;
4845 }
4846 }
4847
4848 /*
4849 * This is the processor idle loop, which just looks for other threads
4850 * to execute. Processor idle threads invoke this without supplying a
4851 * current thread to idle without an asserted wait state.
4852 *
4853 * Returns a the next thread to execute if dispatched directly.
4854 */
4855
4856 #if 0
4857 #define IDLE_KERNEL_DEBUG_CONSTANT(...) KERNEL_DEBUG_CONSTANT(__VA_ARGS__)
4858 #else
4859 #define IDLE_KERNEL_DEBUG_CONSTANT(...) do { } while(0)
4860 #endif
4861
4862 thread_t
4863 processor_idle(
4864 thread_t thread,
4865 processor_t processor)
4866 {
4867 processor_set_t pset = processor->processor_set;
4868
4869 (void)splsched();
4870
4871 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
4872 MACHDBG_CODE(DBG_MACH_SCHED, MACH_IDLE) | DBG_FUNC_START,
4873 (uintptr_t)thread_tid(thread), 0, 0, 0, 0);
4874
4875 SCHED_STATS_CPU_IDLE_START(processor);
4876
4877 uint64_t ctime = mach_absolute_time();
4878
4879 timer_switch(&PROCESSOR_DATA(processor, system_state), ctime, &PROCESSOR_DATA(processor, idle_state));
4880 PROCESSOR_DATA(processor, current_state) = &PROCESSOR_DATA(processor, idle_state);
4881
4882 cpu_quiescent_counter_leave(ctime);
4883
4884 while (1) {
4885 /*
4886 * Ensure that updates to my processor and pset state,
4887 * made by the IPI source processor before sending the IPI,
4888 * are visible on this processor now (even though we don't
4889 * take the pset lock yet).
4890 */
4891 atomic_thread_fence(memory_order_acquire);
4892
4893 if (processor->state != PROCESSOR_IDLE) {
4894 break;
4895 }
4896 if (bit_test(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
4897 break;
4898 }
4899 #if defined(CONFIG_SCHED_DEFERRED_AST)
4900 if (bit_test(pset->pending_deferred_AST_cpu_mask, processor->cpu_id)) {
4901 break;
4902 }
4903 #endif
4904 if (processor->is_recommended && (processor->processor_primary == processor)) {
4905 if (rt_runq_count(pset)) {
4906 break;
4907 }
4908 } else {
4909 if (SCHED(processor_bound_count)(processor)) {
4910 break;
4911 }
4912 }
4913
4914 IDLE_KERNEL_DEBUG_CONSTANT(
4915 MACHDBG_CODE(DBG_MACH_SCHED, MACH_IDLE) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), rt_runq_count(pset), SCHED(processor_runq_count)(processor), -1, 0);
4916
4917 machine_track_platform_idle(TRUE);
4918
4919 machine_idle();
4920 /* returns with interrupts enabled */
4921
4922 machine_track_platform_idle(FALSE);
4923
4924 (void)splsched();
4925
4926 /*
4927 * Check if we should call sched_timeshare_consider_maintenance() here.
4928 * The CPU was woken out of idle due to an interrupt and we should do the
4929 * call only if the processor is still idle. If the processor is non-idle,
4930 * the threads running on the processor would do the call as part of
4931 * context swithing.
4932 */
4933 if (processor->state == PROCESSOR_IDLE) {
4934 sched_timeshare_consider_maintenance(mach_absolute_time());
4935 }
4936
4937 IDLE_KERNEL_DEBUG_CONSTANT(
4938 MACHDBG_CODE(DBG_MACH_SCHED, MACH_IDLE) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), rt_runq_count(pset), SCHED(processor_runq_count)(processor), -2, 0);
4939
4940 if (!SCHED(processor_queue_empty)(processor)) {
4941 /* Secondary SMT processors respond to directed wakeups
4942 * exclusively. Some platforms induce 'spurious' SMT wakeups.
4943 */
4944 if (processor->processor_primary == processor) {
4945 break;
4946 }
4947 }
4948 }
4949
4950 ctime = mach_absolute_time();
4951
4952 timer_switch(&PROCESSOR_DATA(processor, idle_state), ctime, &PROCESSOR_DATA(processor, system_state));
4953 PROCESSOR_DATA(processor, current_state) = &PROCESSOR_DATA(processor, system_state);
4954
4955 cpu_quiescent_counter_join(ctime);
4956
4957 ast_t reason = AST_NONE;
4958
4959 /* We're handling all scheduling AST's */
4960 ast_off(AST_SCHEDULING);
4961
4962 /*
4963 * thread_select will move the processor from dispatching to running,
4964 * or put it in idle if there's nothing to do.
4965 */
4966 thread_t current_thread = current_thread();
4967
4968 thread_lock(current_thread);
4969 thread_t new_thread = thread_select(current_thread, processor, &reason);
4970 thread_unlock(current_thread);
4971
4972 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
4973 MACHDBG_CODE(DBG_MACH_SCHED, MACH_IDLE) | DBG_FUNC_END,
4974 (uintptr_t)thread_tid(thread), processor->state, (uintptr_t)thread_tid(new_thread), reason, 0);
4975
4976 return new_thread;
4977 }
4978
4979 /*
4980 * Each processor has a dedicated thread which
4981 * executes the idle loop when there is no suitable
4982 * previous context.
4983 *
4984 * This continuation is entered with interrupts disabled.
4985 */
4986 void
4987 idle_thread(__assert_only void* parameter,
4988 __unused wait_result_t result)
4989 {
4990 assert(ml_get_interrupts_enabled() == FALSE);
4991 assert(parameter == NULL);
4992
4993 processor_t processor = current_processor();
4994
4995 /*
4996 * Ensure that anything running in idle context triggers
4997 * preemption-disabled checks.
4998 */
4999 disable_preemption();
5000
5001 /*
5002 * Enable interrupts temporarily to handle any pending interrupts
5003 * or IPIs before deciding to sleep
5004 */
5005 spllo();
5006
5007 thread_t new_thread = processor_idle(THREAD_NULL, processor);
5008 /* returns with interrupts disabled */
5009
5010 enable_preemption();
5011
5012 if (new_thread != THREAD_NULL) {
5013 thread_run(processor->idle_thread,
5014 idle_thread, NULL, new_thread);
5015 /*NOTREACHED*/
5016 }
5017
5018 thread_block(idle_thread);
5019 /*NOTREACHED*/
5020 }
5021
5022 kern_return_t
5023 idle_thread_create(
5024 processor_t processor)
5025 {
5026 kern_return_t result;
5027 thread_t thread;
5028 spl_t s;
5029 char name[MAXTHREADNAMESIZE];
5030
5031 result = kernel_thread_create(idle_thread, NULL, MAXPRI_KERNEL, &thread);
5032 if (result != KERN_SUCCESS) {
5033 return result;
5034 }
5035
5036 snprintf(name, sizeof(name), "idle #%d", processor->cpu_id);
5037 thread_set_thread_name(thread, name);
5038
5039 s = splsched();
5040 thread_lock(thread);
5041 thread->bound_processor = processor;
5042 processor->idle_thread = thread;
5043 thread->sched_pri = thread->base_pri = IDLEPRI;
5044 thread->state = (TH_RUN | TH_IDLE);
5045 thread->options |= TH_OPT_IDLE_THREAD;
5046 thread_unlock(thread);
5047 splx(s);
5048
5049 thread_deallocate(thread);
5050
5051 return KERN_SUCCESS;
5052 }
5053
5054 /*
5055 * sched_startup:
5056 *
5057 * Kicks off scheduler services.
5058 *
5059 * Called at splsched.
5060 */
5061 void
5062 sched_startup(void)
5063 {
5064 kern_return_t result;
5065 thread_t thread;
5066
5067 simple_lock_init(&sched_vm_group_list_lock, 0);
5068
5069 #if __arm__ || __arm64__
5070 simple_lock_init(&sched_recommended_cores_lock, 0);
5071 #endif /* __arm__ || __arm64__ */
5072
5073 result = kernel_thread_start_priority((thread_continue_t)sched_init_thread,
5074 (void *)SCHED(maintenance_continuation), MAXPRI_KERNEL, &thread);
5075 if (result != KERN_SUCCESS) {
5076 panic("sched_startup");
5077 }
5078
5079 thread_deallocate(thread);
5080
5081 assert_thread_magic(thread);
5082
5083 /*
5084 * Yield to the sched_init_thread once, to
5085 * initialize our own thread after being switched
5086 * back to.
5087 *
5088 * The current thread is the only other thread
5089 * active at this point.
5090 */
5091 thread_block(THREAD_CONTINUE_NULL);
5092 }
5093
5094 #if __arm64__
5095 static _Atomic uint64_t sched_perfcontrol_callback_deadline;
5096 #endif /* __arm64__ */
5097
5098
5099 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
5100
5101 static volatile uint64_t sched_maintenance_deadline;
5102 static uint64_t sched_tick_last_abstime;
5103 static uint64_t sched_tick_delta;
5104 uint64_t sched_tick_max_delta;
5105
5106
5107 /*
5108 * sched_init_thread:
5109 *
5110 * Perform periodic bookkeeping functions about ten
5111 * times per second.
5112 */
5113 void
5114 sched_timeshare_maintenance_continue(void)
5115 {
5116 uint64_t sched_tick_ctime, late_time;
5117
5118 struct sched_update_scan_context scan_context = {
5119 .earliest_bg_make_runnable_time = UINT64_MAX,
5120 .earliest_normal_make_runnable_time = UINT64_MAX,
5121 .earliest_rt_make_runnable_time = UINT64_MAX
5122 };
5123
5124 sched_tick_ctime = mach_absolute_time();
5125
5126 if (__improbable(sched_tick_last_abstime == 0)) {
5127 sched_tick_last_abstime = sched_tick_ctime;
5128 late_time = 0;
5129 sched_tick_delta = 1;
5130 } else {
5131 late_time = sched_tick_ctime - sched_tick_last_abstime;
5132 sched_tick_delta = late_time / sched_tick_interval;
5133 /* Ensure a delta of 1, since the interval could be slightly
5134 * smaller than the sched_tick_interval due to dispatch
5135 * latencies.
5136 */
5137 sched_tick_delta = MAX(sched_tick_delta, 1);
5138
5139 /* In the event interrupt latencies or platform
5140 * idle events that advanced the timebase resulted
5141 * in periods where no threads were dispatched,
5142 * cap the maximum "tick delta" at SCHED_TICK_MAX_DELTA
5143 * iterations.
5144 */
5145 sched_tick_delta = MIN(sched_tick_delta, SCHED_TICK_MAX_DELTA);
5146
5147 sched_tick_last_abstime = sched_tick_ctime;
5148 sched_tick_max_delta = MAX(sched_tick_delta, sched_tick_max_delta);
5149 }
5150
5151 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_MAINTENANCE) | DBG_FUNC_START,
5152 sched_tick_delta, late_time, 0, 0, 0);
5153
5154 /* Add a number of pseudo-ticks corresponding to the elapsed interval
5155 * This could be greater than 1 if substantial intervals where
5156 * all processors are idle occur, which rarely occurs in practice.
5157 */
5158
5159 sched_tick += sched_tick_delta;
5160
5161 update_vm_info();
5162
5163 /*
5164 * Compute various averages.
5165 */
5166 compute_averages(sched_tick_delta);
5167
5168 /*
5169 * Scan the run queues for threads which
5170 * may need to be updated, and find the earliest runnable thread on the runqueue
5171 * to report its latency.
5172 */
5173 SCHED(thread_update_scan)(&scan_context);
5174
5175 SCHED(rt_runq_scan)(&scan_context);
5176
5177 uint64_t ctime = mach_absolute_time();
5178
5179 uint64_t bg_max_latency = (ctime > scan_context.earliest_bg_make_runnable_time) ?
5180 ctime - scan_context.earliest_bg_make_runnable_time : 0;
5181
5182 uint64_t default_max_latency = (ctime > scan_context.earliest_normal_make_runnable_time) ?
5183 ctime - scan_context.earliest_normal_make_runnable_time : 0;
5184
5185 uint64_t realtime_max_latency = (ctime > scan_context.earliest_rt_make_runnable_time) ?
5186 ctime - scan_context.earliest_rt_make_runnable_time : 0;
5187
5188 machine_max_runnable_latency(bg_max_latency, default_max_latency, realtime_max_latency);
5189
5190 /*
5191 * Check to see if the special sched VM group needs attention.
5192 */
5193 sched_vm_group_maintenance();
5194
5195 #if __arm__ || __arm64__
5196 /* Check to see if the recommended cores failsafe is active */
5197 sched_recommended_cores_maintenance();
5198 #endif /* __arm__ || __arm64__ */
5199
5200
5201 #if DEBUG || DEVELOPMENT
5202 #if __x86_64__
5203 #include <i386/misc_protos.h>
5204 /* Check for long-duration interrupts */
5205 mp_interrupt_watchdog();
5206 #endif /* __x86_64__ */
5207 #endif /* DEBUG || DEVELOPMENT */
5208
5209 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_MAINTENANCE) | DBG_FUNC_END,
5210 sched_pri_shifts[TH_BUCKET_SHARE_FG], sched_pri_shifts[TH_BUCKET_SHARE_BG],
5211 sched_pri_shifts[TH_BUCKET_SHARE_UT], sched_pri_shifts[TH_BUCKET_SHARE_DF], 0);
5212
5213 assert_wait((event_t)sched_timeshare_maintenance_continue, THREAD_UNINT);
5214 thread_block((thread_continue_t)sched_timeshare_maintenance_continue);
5215 /*NOTREACHED*/
5216 }
5217
5218 static uint64_t sched_maintenance_wakeups;
5219
5220 /*
5221 * Determine if the set of routines formerly driven by a maintenance timer
5222 * must be invoked, based on a deadline comparison. Signals the scheduler
5223 * maintenance thread on deadline expiration. Must be invoked at an interval
5224 * lower than the "sched_tick_interval", currently accomplished by
5225 * invocation via the quantum expiration timer and at context switch time.
5226 * Performance matters: this routine reuses a timestamp approximating the
5227 * current absolute time received from the caller, and should perform
5228 * no more than a comparison against the deadline in the common case.
5229 */
5230 void
5231 sched_timeshare_consider_maintenance(uint64_t ctime)
5232 {
5233 cpu_quiescent_counter_checkin(ctime);
5234
5235 uint64_t deadline = sched_maintenance_deadline;
5236
5237 if (__improbable(ctime >= deadline)) {
5238 if (__improbable(current_thread() == sched_maintenance_thread)) {
5239 return;
5240 }
5241 OSMemoryBarrier();
5242
5243 uint64_t ndeadline = ctime + sched_tick_interval;
5244
5245 if (__probable(os_atomic_cmpxchg(&sched_maintenance_deadline, deadline, ndeadline, seq_cst))) {
5246 thread_wakeup((event_t)sched_timeshare_maintenance_continue);
5247 sched_maintenance_wakeups++;
5248 }
5249 }
5250
5251 #if !CONFIG_SCHED_CLUTCH
5252 /*
5253 * Only non-clutch schedulers use the global load calculation EWMA algorithm. For clutch
5254 * scheduler, the load is maintained at the thread group and bucket level.
5255 */
5256 uint64_t load_compute_deadline = os_atomic_load_wide(&sched_load_compute_deadline, relaxed);
5257
5258 if (__improbable(load_compute_deadline && ctime >= load_compute_deadline)) {
5259 uint64_t new_deadline = 0;
5260 if (os_atomic_cmpxchg(&sched_load_compute_deadline, load_compute_deadline, new_deadline, relaxed)) {
5261 compute_sched_load();
5262 new_deadline = ctime + sched_load_compute_interval_abs;
5263 os_atomic_store_wide(&sched_load_compute_deadline, new_deadline, relaxed);
5264 }
5265 }
5266 #endif /* CONFIG_SCHED_CLUTCH */
5267
5268 #if __arm64__
5269 uint64_t perf_deadline = os_atomic_load(&sched_perfcontrol_callback_deadline, relaxed);
5270
5271 if (__improbable(perf_deadline && ctime >= perf_deadline)) {
5272 /* CAS in 0, if success, make callback. Otherwise let the next context switch check again. */
5273 if (os_atomic_cmpxchg(&sched_perfcontrol_callback_deadline, perf_deadline, 0, relaxed)) {
5274 machine_perfcontrol_deadline_passed(perf_deadline);
5275 }
5276 }
5277 #endif /* __arm64__ */
5278 }
5279
5280 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
5281
5282 void
5283 sched_init_thread(void (*continuation)(void))
5284 {
5285 thread_block(THREAD_CONTINUE_NULL);
5286
5287 thread_t thread = current_thread();
5288
5289 thread_set_thread_name(thread, "sched_maintenance_thread");
5290
5291 sched_maintenance_thread = thread;
5292
5293 continuation();
5294
5295 /*NOTREACHED*/
5296 }
5297
5298 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
5299
5300 /*
5301 * thread_update_scan / runq_scan:
5302 *
5303 * Scan the run queues to account for timesharing threads
5304 * which need to be updated.
5305 *
5306 * Scanner runs in two passes. Pass one squirrels likely
5307 * threads away in an array, pass two does the update.
5308 *
5309 * This is necessary because the run queue is locked for
5310 * the candidate scan, but the thread is locked for the update.
5311 *
5312 * Array should be sized to make forward progress, without
5313 * disabling preemption for long periods.
5314 */
5315
5316 #define THREAD_UPDATE_SIZE 128
5317
5318 static thread_t thread_update_array[THREAD_UPDATE_SIZE];
5319 static uint32_t thread_update_count = 0;
5320
5321 /* Returns TRUE if thread was added, FALSE if thread_update_array is full */
5322 boolean_t
5323 thread_update_add_thread(thread_t thread)
5324 {
5325 if (thread_update_count == THREAD_UPDATE_SIZE) {
5326 return FALSE;
5327 }
5328
5329 thread_update_array[thread_update_count++] = thread;
5330 thread_reference_internal(thread);
5331 return TRUE;
5332 }
5333
5334 void
5335 thread_update_process_threads(void)
5336 {
5337 assert(thread_update_count <= THREAD_UPDATE_SIZE);
5338
5339 for (uint32_t i = 0; i < thread_update_count; i++) {
5340 thread_t thread = thread_update_array[i];
5341 assert_thread_magic(thread);
5342 thread_update_array[i] = THREAD_NULL;
5343
5344 spl_t s = splsched();
5345 thread_lock(thread);
5346 if (!(thread->state & (TH_WAIT)) && thread->sched_stamp != sched_tick) {
5347 SCHED(update_priority)(thread);
5348 }
5349 thread_unlock(thread);
5350 splx(s);
5351
5352 thread_deallocate(thread);
5353 }
5354
5355 thread_update_count = 0;
5356 }
5357
5358 /*
5359 * Scan a runq for candidate threads.
5360 *
5361 * Returns TRUE if retry is needed.
5362 */
5363 boolean_t
5364 runq_scan(
5365 run_queue_t runq,
5366 sched_update_scan_context_t scan_context)
5367 {
5368 int count = runq->count;
5369 int queue_index;
5370
5371 assert(count >= 0);
5372
5373 if (count == 0) {
5374 return FALSE;
5375 }
5376
5377 for (queue_index = bitmap_first(runq->bitmap, NRQS);
5378 queue_index >= 0;
5379 queue_index = bitmap_next(runq->bitmap, queue_index)) {
5380 thread_t thread;
5381 circle_queue_t queue = &runq->queues[queue_index];
5382
5383 cqe_foreach_element(thread, queue, runq_links) {
5384 assert(count > 0);
5385 assert_thread_magic(thread);
5386
5387 if (thread->sched_stamp != sched_tick &&
5388 thread->sched_mode == TH_MODE_TIMESHARE) {
5389 if (thread_update_add_thread(thread) == FALSE) {
5390 return TRUE;
5391 }
5392 }
5393
5394 if (cpu_throttle_enabled && ((thread->sched_pri <= MAXPRI_THROTTLE) && (thread->base_pri <= MAXPRI_THROTTLE))) {
5395 if (thread->last_made_runnable_time < scan_context->earliest_bg_make_runnable_time) {
5396 scan_context->earliest_bg_make_runnable_time = thread->last_made_runnable_time;
5397 }
5398 } else {
5399 if (thread->last_made_runnable_time < scan_context->earliest_normal_make_runnable_time) {
5400 scan_context->earliest_normal_make_runnable_time = thread->last_made_runnable_time;
5401 }
5402 }
5403 count--;
5404 }
5405 }
5406
5407 return FALSE;
5408 }
5409
5410 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
5411
5412 boolean_t
5413 thread_eager_preemption(thread_t thread)
5414 {
5415 return (thread->sched_flags & TH_SFLAG_EAGERPREEMPT) != 0;
5416 }
5417
5418 void
5419 thread_set_eager_preempt(thread_t thread)
5420 {
5421 spl_t x;
5422 processor_t p;
5423 ast_t ast = AST_NONE;
5424
5425 x = splsched();
5426 p = current_processor();
5427
5428 thread_lock(thread);
5429 thread->sched_flags |= TH_SFLAG_EAGERPREEMPT;
5430
5431 if (thread == current_thread()) {
5432 ast = csw_check(thread, p, AST_NONE);
5433 thread_unlock(thread);
5434 if (ast != AST_NONE) {
5435 (void) thread_block_reason(THREAD_CONTINUE_NULL, NULL, ast);
5436 }
5437 } else {
5438 p = thread->last_processor;
5439
5440 if (p != PROCESSOR_NULL && p->state == PROCESSOR_RUNNING &&
5441 p->active_thread == thread) {
5442 cause_ast_check(p);
5443 }
5444
5445 thread_unlock(thread);
5446 }
5447
5448 splx(x);
5449 }
5450
5451 void
5452 thread_clear_eager_preempt(thread_t thread)
5453 {
5454 spl_t x;
5455
5456 x = splsched();
5457 thread_lock(thread);
5458
5459 thread->sched_flags &= ~TH_SFLAG_EAGERPREEMPT;
5460
5461 thread_unlock(thread);
5462 splx(x);
5463 }
5464
5465 /*
5466 * Scheduling statistics
5467 */
5468 void
5469 sched_stats_handle_csw(processor_t processor, int reasons, int selfpri, int otherpri)
5470 {
5471 struct processor_sched_statistics *stats;
5472 boolean_t to_realtime = FALSE;
5473
5474 stats = &processor->processor_data.sched_stats;
5475 stats->csw_count++;
5476
5477 if (otherpri >= BASEPRI_REALTIME) {
5478 stats->rt_sched_count++;
5479 to_realtime = TRUE;
5480 }
5481
5482 if ((reasons & AST_PREEMPT) != 0) {
5483 stats->preempt_count++;
5484
5485 if (selfpri >= BASEPRI_REALTIME) {
5486 stats->preempted_rt_count++;
5487 }
5488
5489 if (to_realtime) {
5490 stats->preempted_by_rt_count++;
5491 }
5492 }
5493 }
5494
5495 void
5496 sched_stats_handle_runq_change(struct runq_stats *stats, int old_count)
5497 {
5498 uint64_t timestamp = mach_absolute_time();
5499
5500 stats->count_sum += (timestamp - stats->last_change_timestamp) * old_count;
5501 stats->last_change_timestamp = timestamp;
5502 }
5503
5504 /*
5505 * For calls from assembly code
5506 */
5507 #undef thread_wakeup
5508 void
5509 thread_wakeup(
5510 event_t x);
5511
5512 void
5513 thread_wakeup(
5514 event_t x)
5515 {
5516 thread_wakeup_with_result(x, THREAD_AWAKENED);
5517 }
5518
5519 boolean_t
5520 preemption_enabled(void)
5521 {
5522 return get_preemption_level() == 0 && ml_get_interrupts_enabled();
5523 }
5524
5525 static void
5526 sched_timer_deadline_tracking_init(void)
5527 {
5528 nanoseconds_to_absolutetime(TIMER_DEADLINE_TRACKING_BIN_1_DEFAULT, &timer_deadline_tracking_bin_1);
5529 nanoseconds_to_absolutetime(TIMER_DEADLINE_TRACKING_BIN_2_DEFAULT, &timer_deadline_tracking_bin_2);
5530 }
5531
5532 #if __arm__ || __arm64__
5533
5534 uint32_t perfcontrol_requested_recommended_cores = ALL_CORES_RECOMMENDED;
5535 uint32_t perfcontrol_requested_recommended_core_count = MAX_CPUS;
5536 bool perfcontrol_failsafe_active = false;
5537 bool perfcontrol_sleep_override = false;
5538
5539 uint64_t perfcontrol_failsafe_maintenance_runnable_time;
5540 uint64_t perfcontrol_failsafe_activation_time;
5541 uint64_t perfcontrol_failsafe_deactivation_time;
5542
5543 /* data covering who likely caused it and how long they ran */
5544 #define FAILSAFE_NAME_LEN 33 /* (2*MAXCOMLEN)+1 from size of p_name */
5545 char perfcontrol_failsafe_name[FAILSAFE_NAME_LEN];
5546 int perfcontrol_failsafe_pid;
5547 uint64_t perfcontrol_failsafe_tid;
5548 uint64_t perfcontrol_failsafe_thread_timer_at_start;
5549 uint64_t perfcontrol_failsafe_thread_timer_last_seen;
5550 uint32_t perfcontrol_failsafe_recommended_at_trigger;
5551
5552 /*
5553 * Perf controller calls here to update the recommended core bitmask.
5554 * If the failsafe is active, we don't immediately apply the new value.
5555 * Instead, we store the new request and use it after the failsafe deactivates.
5556 *
5557 * If the failsafe is not active, immediately apply the update.
5558 *
5559 * No scheduler locks are held, no other locks are held that scheduler might depend on,
5560 * interrupts are enabled
5561 *
5562 * currently prototype is in osfmk/arm/machine_routines.h
5563 */
5564 void
5565 sched_perfcontrol_update_recommended_cores(uint32_t recommended_cores)
5566 {
5567 assert(preemption_enabled());
5568
5569 spl_t s = splsched();
5570 simple_lock(&sched_recommended_cores_lock, LCK_GRP_NULL);
5571
5572 perfcontrol_requested_recommended_cores = recommended_cores;
5573 perfcontrol_requested_recommended_core_count = __builtin_popcountll(recommended_cores);
5574
5575 if ((perfcontrol_failsafe_active == false) && (perfcontrol_sleep_override == false)) {
5576 sched_update_recommended_cores(perfcontrol_requested_recommended_cores & usercontrol_requested_recommended_cores);
5577 } else {
5578 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
5579 MACHDBG_CODE(DBG_MACH_SCHED, MACH_REC_CORES_FAILSAFE) | DBG_FUNC_NONE,
5580 perfcontrol_requested_recommended_cores,
5581 sched_maintenance_thread->last_made_runnable_time, 0, 0, 0);
5582 }
5583
5584 simple_unlock(&sched_recommended_cores_lock);
5585 splx(s);
5586 }
5587
5588 void
5589 sched_override_recommended_cores_for_sleep(void)
5590 {
5591 spl_t s = splsched();
5592 simple_lock(&sched_recommended_cores_lock, LCK_GRP_NULL);
5593
5594 if (perfcontrol_sleep_override == false) {
5595 perfcontrol_sleep_override = true;
5596 sched_update_recommended_cores(ALL_CORES_RECOMMENDED);
5597 }
5598
5599 simple_unlock(&sched_recommended_cores_lock);
5600 splx(s);
5601 }
5602
5603 void
5604 sched_restore_recommended_cores_after_sleep(void)
5605 {
5606 spl_t s = splsched();
5607 simple_lock(&sched_recommended_cores_lock, LCK_GRP_NULL);
5608
5609 if (perfcontrol_sleep_override == true) {
5610 perfcontrol_sleep_override = false;
5611 sched_update_recommended_cores(perfcontrol_requested_recommended_cores & usercontrol_requested_recommended_cores);
5612 }
5613
5614 simple_unlock(&sched_recommended_cores_lock);
5615 splx(s);
5616 }
5617
5618 /*
5619 * Consider whether we need to activate the recommended cores failsafe
5620 *
5621 * Called from quantum timer interrupt context of a realtime thread
5622 * No scheduler locks are held, interrupts are disabled
5623 */
5624 void
5625 sched_consider_recommended_cores(uint64_t ctime, thread_t cur_thread)
5626 {
5627 /*
5628 * Check if a realtime thread is starving the system
5629 * and bringing up non-recommended cores would help
5630 *
5631 * TODO: Is this the correct check for recommended == possible cores?
5632 * TODO: Validate the checks without the relevant lock are OK.
5633 */
5634
5635 if (__improbable(perfcontrol_failsafe_active == TRUE)) {
5636 /* keep track of how long the responsible thread runs */
5637
5638 simple_lock(&sched_recommended_cores_lock, LCK_GRP_NULL);
5639
5640 if (perfcontrol_failsafe_active == TRUE &&
5641 cur_thread->thread_id == perfcontrol_failsafe_tid) {
5642 perfcontrol_failsafe_thread_timer_last_seen = timer_grab(&cur_thread->user_timer) +
5643 timer_grab(&cur_thread->system_timer);
5644 }
5645
5646 simple_unlock(&sched_recommended_cores_lock);
5647
5648 /* we're already trying to solve the problem, so bail */
5649 return;
5650 }
5651
5652 /* The failsafe won't help if there are no more processors to enable */
5653 if (__probable(perfcontrol_requested_recommended_core_count >= processor_count)) {
5654 return;
5655 }
5656
5657 uint64_t too_long_ago = ctime - perfcontrol_failsafe_starvation_threshold;
5658
5659 /* Use the maintenance thread as our canary in the coal mine */
5660 thread_t m_thread = sched_maintenance_thread;
5661
5662 /* If it doesn't look bad, nothing to see here */
5663 if (__probable(m_thread->last_made_runnable_time >= too_long_ago)) {
5664 return;
5665 }
5666
5667 /* It looks bad, take the lock to be sure */
5668 thread_lock(m_thread);
5669
5670 if (m_thread->runq == PROCESSOR_NULL ||
5671 (m_thread->state & (TH_RUN | TH_WAIT)) != TH_RUN ||
5672 m_thread->last_made_runnable_time >= too_long_ago) {
5673 /*
5674 * Maintenance thread is either on cpu or blocked, and
5675 * therefore wouldn't benefit from more cores
5676 */
5677 thread_unlock(m_thread);
5678 return;
5679 }
5680
5681 uint64_t maintenance_runnable_time = m_thread->last_made_runnable_time;
5682
5683 thread_unlock(m_thread);
5684
5685 /*
5686 * There are cores disabled at perfcontrol's recommendation, but the
5687 * system is so overloaded that the maintenance thread can't run.
5688 * That likely means that perfcontrol can't run either, so it can't fix
5689 * the recommendation. We have to kick in a failsafe to keep from starving.
5690 *
5691 * When the maintenance thread has been starved for too long,
5692 * ignore the recommendation from perfcontrol and light up all the cores.
5693 *
5694 * TODO: Consider weird states like boot, sleep, or debugger
5695 */
5696
5697 simple_lock(&sched_recommended_cores_lock, LCK_GRP_NULL);
5698
5699 if (perfcontrol_failsafe_active == TRUE) {
5700 simple_unlock(&sched_recommended_cores_lock);
5701 return;
5702 }
5703
5704 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
5705 MACHDBG_CODE(DBG_MACH_SCHED, MACH_REC_CORES_FAILSAFE) | DBG_FUNC_START,
5706 perfcontrol_requested_recommended_cores, maintenance_runnable_time, 0, 0, 0);
5707
5708 perfcontrol_failsafe_active = TRUE;
5709 perfcontrol_failsafe_activation_time = mach_absolute_time();
5710 perfcontrol_failsafe_maintenance_runnable_time = maintenance_runnable_time;
5711 perfcontrol_failsafe_recommended_at_trigger = perfcontrol_requested_recommended_cores;
5712
5713 /* Capture some data about who screwed up (assuming that the thread on core is at fault) */
5714 task_t task = cur_thread->task;
5715 perfcontrol_failsafe_pid = task_pid(task);
5716 strlcpy(perfcontrol_failsafe_name, proc_name_address(task->bsd_info), sizeof(perfcontrol_failsafe_name));
5717
5718 perfcontrol_failsafe_tid = cur_thread->thread_id;
5719
5720 /* Blame the thread for time it has run recently */
5721 uint64_t recent_computation = (ctime - cur_thread->computation_epoch) + cur_thread->computation_metered;
5722
5723 uint64_t last_seen = timer_grab(&cur_thread->user_timer) + timer_grab(&cur_thread->system_timer);
5724
5725 /* Compute the start time of the bad behavior in terms of the thread's on core time */
5726 perfcontrol_failsafe_thread_timer_at_start = last_seen - recent_computation;
5727 perfcontrol_failsafe_thread_timer_last_seen = last_seen;
5728
5729 /* Ignore the previously recommended core configuration */
5730 sched_update_recommended_cores(ALL_CORES_RECOMMENDED);
5731
5732 simple_unlock(&sched_recommended_cores_lock);
5733 }
5734
5735 /*
5736 * Now that our bacon has been saved by the failsafe, consider whether to turn it off
5737 *
5738 * Runs in the context of the maintenance thread, no locks held
5739 */
5740 static void
5741 sched_recommended_cores_maintenance(void)
5742 {
5743 /* Common case - no failsafe, nothing to be done here */
5744 if (__probable(perfcontrol_failsafe_active == FALSE)) {
5745 return;
5746 }
5747
5748 uint64_t ctime = mach_absolute_time();
5749
5750 boolean_t print_diagnostic = FALSE;
5751 char p_name[FAILSAFE_NAME_LEN] = "";
5752
5753 spl_t s = splsched();
5754 simple_lock(&sched_recommended_cores_lock, LCK_GRP_NULL);
5755
5756 /* Check again, under the lock, to avoid races */
5757 if (perfcontrol_failsafe_active == FALSE) {
5758 goto out;
5759 }
5760
5761 /*
5762 * Ensure that the other cores get another few ticks to run some threads
5763 * If we don't have this hysteresis, the maintenance thread is the first
5764 * to run, and then it immediately kills the other cores
5765 */
5766 if ((ctime - perfcontrol_failsafe_activation_time) < perfcontrol_failsafe_starvation_threshold) {
5767 goto out;
5768 }
5769
5770 /* Capture some diagnostic state under the lock so we can print it out later */
5771
5772 int pid = perfcontrol_failsafe_pid;
5773 uint64_t tid = perfcontrol_failsafe_tid;
5774
5775 uint64_t thread_usage = perfcontrol_failsafe_thread_timer_last_seen -
5776 perfcontrol_failsafe_thread_timer_at_start;
5777 uint32_t rec_cores_before = perfcontrol_failsafe_recommended_at_trigger;
5778 uint32_t rec_cores_after = perfcontrol_requested_recommended_cores;
5779 uint64_t failsafe_duration = ctime - perfcontrol_failsafe_activation_time;
5780 strlcpy(p_name, perfcontrol_failsafe_name, sizeof(p_name));
5781
5782 print_diagnostic = TRUE;
5783
5784 /* Deactivate the failsafe and reinstate the requested recommendation settings */
5785
5786 perfcontrol_failsafe_deactivation_time = ctime;
5787 perfcontrol_failsafe_active = FALSE;
5788
5789 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
5790 MACHDBG_CODE(DBG_MACH_SCHED, MACH_REC_CORES_FAILSAFE) | DBG_FUNC_END,
5791 perfcontrol_requested_recommended_cores, failsafe_duration, 0, 0, 0);
5792
5793 sched_update_recommended_cores(perfcontrol_requested_recommended_cores & usercontrol_requested_recommended_cores);
5794
5795 out:
5796 simple_unlock(&sched_recommended_cores_lock);
5797 splx(s);
5798
5799 if (print_diagnostic) {
5800 uint64_t failsafe_duration_ms = 0, thread_usage_ms = 0;
5801
5802 absolutetime_to_nanoseconds(failsafe_duration, &failsafe_duration_ms);
5803 failsafe_duration_ms = failsafe_duration_ms / NSEC_PER_MSEC;
5804
5805 absolutetime_to_nanoseconds(thread_usage, &thread_usage_ms);
5806 thread_usage_ms = thread_usage_ms / NSEC_PER_MSEC;
5807
5808 printf("recommended core failsafe kicked in for %lld ms "
5809 "likely due to %s[%d] thread 0x%llx spending "
5810 "%lld ms on cpu at realtime priority - "
5811 "new recommendation: 0x%x -> 0x%x\n",
5812 failsafe_duration_ms, p_name, pid, tid, thread_usage_ms,
5813 rec_cores_before, rec_cores_after);
5814 }
5815 }
5816
5817 #endif /* __arm__ || __arm64__ */
5818
5819 kern_return_t
5820 sched_processor_enable(processor_t processor, boolean_t enable)
5821 {
5822 assert(preemption_enabled());
5823
5824 spl_t s = splsched();
5825 simple_lock(&sched_recommended_cores_lock, LCK_GRP_NULL);
5826
5827 if (enable) {
5828 bit_set(usercontrol_requested_recommended_cores, processor->cpu_id);
5829 } else {
5830 bit_clear(usercontrol_requested_recommended_cores, processor->cpu_id);
5831 }
5832
5833 #if __arm__ || __arm64__
5834 if ((perfcontrol_failsafe_active == false) && (perfcontrol_sleep_override == false)) {
5835 sched_update_recommended_cores(perfcontrol_requested_recommended_cores & usercontrol_requested_recommended_cores);
5836 } else {
5837 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
5838 MACHDBG_CODE(DBG_MACH_SCHED, MACH_REC_CORES_FAILSAFE) | DBG_FUNC_NONE,
5839 perfcontrol_requested_recommended_cores,
5840 sched_maintenance_thread->last_made_runnable_time, 0, 0, 0);
5841 }
5842 #else /* __arm__ || __arm64__ */
5843 sched_update_recommended_cores(usercontrol_requested_recommended_cores);
5844 #endif /* !__arm__ || __arm64__ */
5845
5846 simple_unlock(&sched_recommended_cores_lock);
5847 splx(s);
5848
5849 return KERN_SUCCESS;
5850 }
5851
5852
5853 /*
5854 * Apply a new recommended cores mask to the processors it affects
5855 * Runs after considering failsafes and such
5856 *
5857 * Iterate over processors and update their ->is_recommended field.
5858 * If a processor is running, we let it drain out at its next
5859 * quantum expiration or blocking point. If a processor is idle, there
5860 * may be more work for it to do, so IPI it.
5861 *
5862 * interrupts disabled, sched_recommended_cores_lock is held
5863 */
5864 static void
5865 sched_update_recommended_cores(uint64_t recommended_cores)
5866 {
5867 processor_set_t pset, nset;
5868 processor_t processor;
5869 uint64_t needs_exit_idle_mask = 0x0;
5870 uint32_t avail_count;
5871
5872 processor = processor_list;
5873 pset = processor->processor_set;
5874
5875 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_UPDATE_REC_CORES) | DBG_FUNC_START,
5876 recommended_cores,
5877 #if __arm__ || __arm64__
5878 perfcontrol_failsafe_active, 0, 0);
5879 #else /* __arm__ || __arm64__ */
5880 0, 0, 0);
5881 #endif /* ! __arm__ || __arm64__ */
5882
5883 if (__builtin_popcountll(recommended_cores) == 0) {
5884 bit_set(recommended_cores, master_processor->cpu_id); /* add boot processor or we hang */
5885 }
5886
5887 boolean_t pset_newly_recommended = false;
5888
5889 /* First set recommended cores */
5890 pset_lock(pset);
5891 avail_count = 0;
5892 do {
5893 nset = processor->processor_set;
5894 if (nset != pset) {
5895 pset_unlock(pset);
5896 pset = nset;
5897 pset_newly_recommended = false;
5898 pset_lock(pset);
5899 }
5900
5901 if (bit_test(recommended_cores, processor->cpu_id)) {
5902 processor->is_recommended = TRUE;
5903 if (bit_first(pset->recommended_bitmask) == -1) {
5904 pset_newly_recommended = true;
5905 }
5906 bit_set(pset->recommended_bitmask, processor->cpu_id);
5907
5908 if (processor->state == PROCESSOR_IDLE) {
5909 if (processor != current_processor()) {
5910 bit_set(needs_exit_idle_mask, processor->cpu_id);
5911 }
5912 }
5913 if (processor->state != PROCESSOR_OFF_LINE) {
5914 avail_count++;
5915 }
5916 if (pset_newly_recommended) {
5917 SCHED(pset_made_schedulable)(processor, pset, false);
5918 }
5919 }
5920 } while ((processor = processor->processor_list) != NULL);
5921 pset_unlock(pset);
5922
5923 /* Now shutdown not recommended cores */
5924 processor = processor_list;
5925 pset = processor->processor_set;
5926
5927 pset_lock(pset);
5928 do {
5929 nset = processor->processor_set;
5930 if (nset != pset) {
5931 pset_unlock(pset);
5932 pset = nset;
5933 pset_lock(pset);
5934 }
5935
5936 if (!bit_test(recommended_cores, processor->cpu_id)) {
5937 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
5938
5939 processor->is_recommended = FALSE;
5940 bit_clear(pset->recommended_bitmask, processor->cpu_id);
5941
5942 if ((processor->state == PROCESSOR_RUNNING) || (processor->state == PROCESSOR_DISPATCHING)) {
5943 ipi_type = SCHED_IPI_IMMEDIATE;
5944 }
5945 SCHED(processor_queue_shutdown)(processor);
5946 /* pset unlocked */
5947
5948 SCHED(rt_queue_shutdown)(processor);
5949
5950 if (ipi_type != SCHED_IPI_NONE) {
5951 if (processor == current_processor()) {
5952 ast_on(AST_PREEMPT);
5953 } else {
5954 sched_ipi_perform(processor, ipi_type);
5955 }
5956 }
5957
5958 pset_lock(pset);
5959 }
5960 } while ((processor = processor->processor_list) != NULL);
5961
5962 processor_avail_count_user = avail_count;
5963 #if defined(__x86_64__)
5964 commpage_update_active_cpus();
5965 #endif
5966
5967 pset_unlock(pset);
5968
5969 /* Issue all pending IPIs now that the pset lock has been dropped */
5970 for (int cpuid = lsb_first(needs_exit_idle_mask); cpuid >= 0; cpuid = lsb_next(needs_exit_idle_mask, cpuid)) {
5971 processor = processor_array[cpuid];
5972 machine_signal_idle(processor);
5973 }
5974
5975 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_UPDATE_REC_CORES) | DBG_FUNC_END,
5976 needs_exit_idle_mask, 0, 0, 0);
5977 }
5978
5979 void
5980 thread_set_options(uint32_t thopt)
5981 {
5982 spl_t x;
5983 thread_t t = current_thread();
5984
5985 x = splsched();
5986 thread_lock(t);
5987
5988 t->options |= thopt;
5989
5990 thread_unlock(t);
5991 splx(x);
5992 }
5993
5994 void
5995 thread_set_pending_block_hint(thread_t thread, block_hint_t block_hint)
5996 {
5997 thread->pending_block_hint = block_hint;
5998 }
5999
6000 uint32_t
6001 qos_max_parallelism(int qos, uint64_t options)
6002 {
6003 return SCHED(qos_max_parallelism)(qos, options);
6004 }
6005
6006 uint32_t
6007 sched_qos_max_parallelism(__unused int qos, uint64_t options)
6008 {
6009 host_basic_info_data_t hinfo;
6010 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
6011 /* Query the machine layer for core information */
6012 __assert_only kern_return_t kret = host_info(host_self(), HOST_BASIC_INFO,
6013 (host_info_t)&hinfo, &count);
6014 assert(kret == KERN_SUCCESS);
6015
6016 if (options & QOS_PARALLELISM_COUNT_LOGICAL) {
6017 return hinfo.logical_cpu;
6018 } else {
6019 return hinfo.physical_cpu;
6020 }
6021 }
6022
6023 int sched_allow_NO_SMT_threads = 1;
6024 bool
6025 thread_no_smt(thread_t thread)
6026 {
6027 #if DEBUG || DEVELOPMENT
6028 return sched_allow_NO_SMT_threads && (thread->bound_processor == PROCESSOR_NULL) && ((thread->sched_flags & TH_SFLAG_NO_SMT) || (thread->task->t_flags & TF_NO_SMT));
6029 #else
6030 return sched_allow_NO_SMT_threads && (thread->bound_processor == PROCESSOR_NULL) && (thread->sched_flags & TH_SFLAG_NO_SMT);
6031 #endif
6032 }
6033
6034 bool
6035 processor_active_thread_no_smt(processor_t processor)
6036 {
6037 return sched_allow_NO_SMT_threads && !processor->current_is_bound && processor->current_is_NO_SMT;
6038 }
6039
6040 #if __arm64__
6041
6042 /*
6043 * Set up or replace old timer with new timer
6044 *
6045 * Returns true if canceled old timer, false if it did not
6046 */
6047 boolean_t
6048 sched_perfcontrol_update_callback_deadline(uint64_t new_deadline)
6049 {
6050 /*
6051 * Exchange deadline for new deadline, if old deadline was nonzero,
6052 * then I cancelled the callback, otherwise I didn't
6053 */
6054
6055 return os_atomic_xchg(&sched_perfcontrol_callback_deadline, new_deadline,
6056 relaxed) != 0;
6057 }
6058
6059 #endif /* __arm64__ */
6060
6061 void
6062 sched_update_pset_load_average(processor_set_t pset)
6063 {
6064 #if CONFIG_SCHED_CLUTCH
6065 int non_rt_load = sched_clutch_root_count(&pset->pset_clutch_root);
6066 #else /* CONFIG_SCHED_CLUTCH */
6067 int non_rt_load = pset->pset_runq.count;
6068 #endif /* CONFIG_SCHED_CLUTCH */
6069
6070 int load = ((bit_count(pset->cpu_state_map[PROCESSOR_RUNNING]) + non_rt_load + rt_runq_count(pset)) << PSET_LOAD_NUMERATOR_SHIFT);
6071 int new_load_average = (pset->load_average + load) >> 1;
6072
6073 pset->load_average = new_load_average;
6074
6075 #if (DEVELOPMENT || DEBUG)
6076 #if __AMP__
6077 if (pset->pset_cluster_type == PSET_AMP_P) {
6078 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PSET_LOAD_AVERAGE) | DBG_FUNC_NONE, sched_get_pset_load_average(pset), (bit_count(pset->cpu_state_map[PROCESSOR_RUNNING]) + pset->pset_runq.count + rt_runq_count(pset)));
6079 }
6080 #endif
6081 #endif
6082 }
6083
6084 /* pset is locked */
6085 static processor_t
6086 choose_processor_for_realtime_thread(processor_set_t pset)
6087 {
6088 #if defined(__x86_64__)
6089 bool avoid_cpu0 = sched_avoid_cpu0 && bit_test(pset->cpu_bitmask, 0);
6090 #else
6091 const bool avoid_cpu0 = false;
6092 #endif
6093
6094 uint64_t cpu_map = (pset->cpu_bitmask & pset->recommended_bitmask & ~pset->pending_AST_URGENT_cpu_mask);
6095 if (avoid_cpu0) {
6096 cpu_map = bit_ror64(cpu_map, 1);
6097 }
6098
6099 for (int rotid = lsb_first(cpu_map); rotid >= 0; rotid = lsb_next(cpu_map, rotid)) {
6100 int cpuid = avoid_cpu0 ? ((rotid + 1) & 63) : rotid;
6101
6102 processor_t processor = processor_array[cpuid];
6103
6104 if (processor->processor_primary != processor) {
6105 continue;
6106 }
6107
6108 if (processor->state == PROCESSOR_IDLE) {
6109 return processor;
6110 }
6111
6112 if ((processor->state != PROCESSOR_RUNNING) && (processor->state != PROCESSOR_DISPATCHING)) {
6113 continue;
6114 }
6115
6116 if (processor->current_pri >= BASEPRI_RTQUEUES) {
6117 continue;
6118 }
6119
6120 return processor;
6121 }
6122
6123 if (!sched_allow_rt_smt) {
6124 return PROCESSOR_NULL;
6125 }
6126
6127 /* Consider secondary processors */
6128 if (avoid_cpu0) {
6129 /* Also avoid cpu1 */
6130 cpu_map = bit_ror64(cpu_map, 1);
6131 }
6132 for (int rotid = lsb_first(cpu_map); rotid >= 0; rotid = lsb_next(cpu_map, rotid)) {
6133 int cpuid = avoid_cpu0 ? ((rotid + 2) & 63) : rotid;
6134
6135 processor_t processor = processor_array[cpuid];
6136
6137 if (processor->processor_primary == processor) {
6138 continue;
6139 }
6140
6141 if (processor->state == PROCESSOR_IDLE) {
6142 return processor;
6143 }
6144
6145 if ((processor->state != PROCESSOR_RUNNING) && (processor->state != PROCESSOR_DISPATCHING)) {
6146 continue;
6147 }
6148
6149 if (processor->current_pri >= BASEPRI_RTQUEUES) {
6150 continue;
6151 }
6152
6153 return processor;
6154 }
6155
6156 return PROCESSOR_NULL;
6157 }
6158
6159 /* pset is locked */
6160 static bool
6161 all_available_primaries_are_running_realtime_threads(processor_set_t pset)
6162 {
6163 return these_processors_are_running_realtime_threads(pset, pset->primary_map);
6164 }
6165
6166 /* pset is locked */
6167 static bool
6168 these_processors_are_running_realtime_threads(processor_set_t pset, uint64_t these_map)
6169 {
6170 uint64_t cpu_map = (pset->cpu_bitmask & pset->recommended_bitmask) & these_map;
6171
6172 for (int cpuid = lsb_first(cpu_map); cpuid >= 0; cpuid = lsb_next(cpu_map, cpuid)) {
6173 processor_t processor = processor_array[cpuid];
6174
6175 if (processor->state == PROCESSOR_IDLE) {
6176 return false;
6177 }
6178
6179 if (processor->state == PROCESSOR_DISPATCHING) {
6180 return false;
6181 }
6182
6183 if (processor->state != PROCESSOR_RUNNING) {
6184 /*
6185 * All other processor states are considered unavailable to run
6186 * realtime threads. In particular, we prefer an available secondary
6187 * processor over the risk of leaving a realtime thread on the run queue
6188 * while waiting for a processor in PROCESSOR_START state,
6189 * which should anyway be a rare case.
6190 */
6191 continue;
6192 }
6193
6194 if (processor->current_pri < BASEPRI_RTQUEUES) {
6195 return false;
6196 }
6197 }
6198
6199 return true;
6200 }
6201
6202 static bool
6203 sched_ok_to_run_realtime_thread(processor_set_t pset, processor_t processor)
6204 {
6205 bool ok_to_run_realtime_thread = true;
6206 #if defined(__x86_64__)
6207 if (sched_avoid_cpu0 && processor->cpu_id == 0) {
6208 ok_to_run_realtime_thread = these_processors_are_running_realtime_threads(pset, pset->primary_map & ~0x1);
6209 } else if (sched_avoid_cpu0 && (processor->cpu_id == 1) && processor->is_SMT) {
6210 ok_to_run_realtime_thread = sched_allow_rt_smt && these_processors_are_running_realtime_threads(pset, ~0x2);
6211 } else if (processor->processor_primary != processor) {
6212 ok_to_run_realtime_thread = sched_allow_rt_smt && all_available_primaries_are_running_realtime_threads(pset);
6213 }
6214 #else
6215 (void)pset;
6216 (void)processor;
6217 #endif
6218 return ok_to_run_realtime_thread;
6219 }
6220
6221 void
6222 sched_pset_made_schedulable(__unused processor_t processor, processor_set_t pset, boolean_t drop_lock)
6223 {
6224 if (drop_lock) {
6225 pset_unlock(pset);
6226 }
6227 }
6228
6229 void
6230 thread_set_no_smt(bool set)
6231 {
6232 thread_t thread = current_thread();
6233
6234 spl_t s = splsched();
6235 thread_lock(thread);
6236 if (set) {
6237 thread->sched_flags |= TH_SFLAG_NO_SMT;
6238 } else {
6239 thread->sched_flags &= ~TH_SFLAG_NO_SMT;
6240 }
6241 thread_unlock(thread);
6242 splx(s);
6243 }
6244
6245 bool
6246 thread_get_no_smt(void)
6247 {
6248 return current_thread()->sched_flags & TH_SFLAG_NO_SMT;
6249 }
6250
6251 #if DEBUG || DEVELOPMENT
6252 extern void sysctl_task_set_no_smt(char no_smt);
6253 void
6254 sysctl_task_set_no_smt(char no_smt)
6255 {
6256 thread_t thread = current_thread();
6257 task_t task = thread->task;
6258
6259 if (no_smt == '1') {
6260 task->t_flags |= TF_NO_SMT;
6261 } else {
6262 task->t_flags &= ~TF_NO_SMT;
6263 }
6264 }
6265
6266 extern char sysctl_task_get_no_smt(void);
6267 char
6268 sysctl_task_get_no_smt(void)
6269 {
6270 thread_t thread = current_thread();
6271 task_t task = thread->task;
6272
6273 if (task->t_flags & TF_NO_SMT) {
6274 return '1';
6275 }
6276 return '0';
6277 }
6278 #endif /* DEVELOPMENT || DEBUG */
6279
6280
6281 __private_extern__ void
6282 thread_bind_cluster_type(char cluster_type)
6283 {
6284 #if __AMP__
6285 thread_t thread = current_thread();
6286
6287 spl_t s = splsched();
6288 thread_lock(thread);
6289 thread->sched_flags &= ~(TH_SFLAG_ECORE_ONLY | TH_SFLAG_PCORE_ONLY);
6290 switch (cluster_type) {
6291 case 'e':
6292 case 'E':
6293 thread->sched_flags |= TH_SFLAG_ECORE_ONLY;
6294 break;
6295 case 'p':
6296 case 'P':
6297 thread->sched_flags |= TH_SFLAG_PCORE_ONLY;
6298 break;
6299 default:
6300 break;
6301 }
6302 thread_unlock(thread);
6303 splx(s);
6304
6305 thread_block(THREAD_CONTINUE_NULL);
6306 #else /* __AMP__ */
6307 (void)cluster_type;
6308 #endif /* __AMP__ */
6309 }