]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/sched_prim.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / kern / sched_prim.c
CommitLineData
1c79356b 1/*
39037602 2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_FREE_COPYRIGHT@
30 */
0a7de745 31/*
1c79356b
A
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
0a7de745 35 *
1c79356b
A
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
0a7de745 41 *
1c79356b
A
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
0a7de745 45 *
1c79356b 46 * Carnegie Mellon requests users of this software to return to
0a7de745 47 *
1c79356b
A
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
0a7de745 52 *
1c79356b
A
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: sched_prim.c
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1986
62 *
63 * Scheduling primitives
64 *
65 */
66
67#include <debug.h>
91447636
A
68
69#include <mach/mach_types.h>
1c79356b 70#include <mach/machine.h>
91447636
A
71#include <mach/policy.h>
72#include <mach/sync_policy.h>
6d2010ae 73#include <mach/thread_act.h>
91447636 74
1c79356b
A
75#include <machine/machine_routines.h>
76#include <machine/sched_param.h>
0c530ab8 77#include <machine/machine_cpu.h>
cb323159 78#include <machine/limits.h>
5ba3f43e 79#include <machine/atomic.h>
91447636 80
fe8ab488 81#include <machine/commpage.h>
fe8ab488 82
91447636 83#include <kern/kern_types.h>
39037602 84#include <kern/backtrace.h>
1c79356b 85#include <kern/clock.h>
1c79356b
A
86#include <kern/cpu_number.h>
87#include <kern/cpu_data.h>
3e170ce0 88#include <kern/smp.h>
91447636 89#include <kern/debug.h>
1c79356b
A
90#include <kern/macro_help.h>
91#include <kern/machine.h>
92#include <kern/misc_protos.h>
5ba3f43e
A
93#if MONOTONIC
94#include <kern/monotonic.h>
95#endif /* MONOTONIC */
1c79356b
A
96#include <kern/processor.h>
97#include <kern/queue.h>
98#include <kern/sched.h>
99#include <kern/sched_prim.h>
fe8ab488 100#include <kern/sfi.h>
1c79356b
A
101#include <kern/syscall_subr.h>
102#include <kern/task.h>
103#include <kern/thread.h>
316670eb 104#include <kern/ledger.h>
39236c6e 105#include <kern/timer_queue.h>
3e170ce0 106#include <kern/waitq.h>
39037602 107#include <kern/policy_internal.h>
d9a64523 108#include <kern/cpu_quiesce.h>
91447636 109
1c79356b
A
110#include <vm/pmap.h>
111#include <vm/vm_kern.h>
112#include <vm/vm_map.h>
5ba3f43e 113#include <vm/vm_pageout.h>
91447636 114
b0d623f7 115#include <mach/sdt.h>
5ba3f43e
A
116#include <mach/mach_host.h>
117#include <mach/host_info.h>
b0d623f7 118
1c79356b 119#include <sys/kdebug.h>
39037602
A
120#include <kperf/kperf.h>
121#include <kern/kpc.h>
5ba3f43e 122#include <san/kasan.h>
0c530ab8 123#include <kern/pms.h>
5ba3f43e
A
124#include <kern/host.h>
125#include <stdatomic.h>
3a60a9f5 126
f427ee49
A
127struct sched_statistics PERCPU_DATA(sched_stats);
128bool sched_stats_active;
129
0a7de745
A
130int
131rt_runq_count(processor_set_t pset)
5ba3f43e 132{
0a7de745 133 return atomic_load_explicit(&SCHED(rt_runq)(pset)->count, memory_order_relaxed);
5ba3f43e 134}
2d21ac55 135
0a7de745
A
136void
137rt_runq_count_incr(processor_set_t pset)
5ba3f43e 138{
0a7de745 139 atomic_fetch_add_explicit(&SCHED(rt_runq)(pset)->count, 1, memory_order_relaxed);
5ba3f43e 140}
3e170ce0 141
0a7de745
A
142void
143rt_runq_count_decr(processor_set_t pset)
5ba3f43e 144{
0a7de745 145 atomic_fetch_sub_explicit(&SCHED(rt_runq)(pset)->count, 1, memory_order_relaxed);
5ba3f43e 146}
6d2010ae 147
0a7de745 148#define DEFAULT_PREEMPTION_RATE 100 /* (1/s) */
f427ee49 149TUNABLE(int, default_preemption_rate, "preempt", DEFAULT_PREEMPTION_RATE);
1c79356b 150
0a7de745 151#define DEFAULT_BG_PREEMPTION_RATE 400 /* (1/s) */
f427ee49 152TUNABLE(int, default_bg_preemption_rate, "bg_preempt", DEFAULT_BG_PREEMPTION_RATE);
316670eb 153
f427ee49
A
154#define MAX_UNSAFE_QUANTA 800
155TUNABLE(int, max_unsafe_quanta, "unsafe", MAX_UNSAFE_QUANTA);
0b4e3aa0 156
f427ee49
A
157#define MAX_POLL_QUANTA 2
158TUNABLE(int, max_poll_quanta, "poll", MAX_POLL_QUANTA);
0b4e3aa0 159
0a7de745 160#define SCHED_POLL_YIELD_SHIFT 4 /* 1/16 */
f427ee49 161int sched_poll_yield_shift = SCHED_POLL_YIELD_SHIFT;
0b4e3aa0 162
0a7de745 163uint64_t max_poll_computation;
55e303ae 164
0a7de745
A
165uint64_t max_unsafe_computation;
166uint64_t sched_safe_duration;
6d2010ae 167
fe8ab488 168#if defined(CONFIG_SCHED_TIMESHARE_CORE)
6d2010ae 169
0a7de745
A
170uint32_t std_quantum;
171uint32_t min_std_quantum;
172uint32_t bg_quantum;
55e303ae 173
0a7de745
A
174uint32_t std_quantum_us;
175uint32_t bg_quantum_us;
91447636 176
fe8ab488 177#endif /* CONFIG_SCHED_TIMESHARE_CORE */
6d2010ae 178
0a7de745
A
179uint32_t thread_depress_time;
180uint32_t default_timeshare_computation;
181uint32_t default_timeshare_constraint;
6d2010ae 182
0a7de745
A
183uint32_t max_rt_quantum;
184uint32_t min_rt_quantum;
55e303ae 185
f427ee49
A
186uint32_t rt_constraint_threshold;
187
fe8ab488 188#if defined(CONFIG_SCHED_TIMESHARE_CORE)
6d2010ae 189
0a7de745
A
190unsigned sched_tick;
191uint32_t sched_tick_interval;
d9a64523
A
192
193/* Timeshare load calculation interval (15ms) */
0a7de745
A
194uint32_t sched_load_compute_interval_us = 15000;
195uint64_t sched_load_compute_interval_abs;
196static _Atomic uint64_t sched_load_compute_deadline;
1c79356b 197
0a7de745
A
198uint32_t sched_pri_shifts[TH_BUCKET_MAX];
199uint32_t sched_fixed_shift;
39236c6e 200
0a7de745 201uint32_t sched_decay_usage_age_factor = 1; /* accelerate 5/8^n usage aging */
2d21ac55 202
fe8ab488
A
203/* Allow foreground to decay past default to resolve inversions */
204#define DEFAULT_DECAY_BAND_LIMIT ((BASEPRI_FOREGROUND - BASEPRI_DEFAULT) + 2)
0a7de745 205int sched_pri_decay_band_limit = DEFAULT_DECAY_BAND_LIMIT;
6d2010ae 206
4b17d6b6
A
207/* Defaults for timer deadline profiling */
208#define TIMER_DEADLINE_TRACKING_BIN_1_DEFAULT 2000000 /* Timers with deadlines <=
0a7de745 209 * 2ms */
4b17d6b6 210#define TIMER_DEADLINE_TRACKING_BIN_2_DEFAULT 5000000 /* Timers with deadlines
0a7de745 211 * <= 5ms */
39236c6e 212
4b17d6b6
A
213uint64_t timer_deadline_tracking_bin_1;
214uint64_t timer_deadline_tracking_bin_2;
215
490019cf
A
216#endif /* CONFIG_SCHED_TIMESHARE_CORE */
217
39236c6e
A
218thread_t sched_maintenance_thread;
219
5ba3f43e 220/* interrupts disabled lock to guard recommended cores state */
0a7de745
A
221decl_simple_lock_data(static, sched_recommended_cores_lock);
222static uint64_t usercontrol_requested_recommended_cores = ALL_CORES_RECOMMENDED;
223static void sched_update_recommended_cores(uint64_t recommended_cores);
5ba3f43e 224
0a7de745
A
225#if __arm__ || __arm64__
226static void sched_recommended_cores_maintenance(void);
5ba3f43e
A
227uint64_t perfcontrol_failsafe_starvation_threshold;
228extern char *proc_name_address(struct proc *p);
5ba3f43e 229#endif /* __arm__ || __arm64__ */
fe8ab488 230
0a7de745 231uint64_t sched_one_second_interval;
f427ee49 232boolean_t allow_direct_handoff = TRUE;
6d2010ae 233
1c79356b 234/* Forwards */
6d2010ae 235
fe8ab488 236#if defined(CONFIG_SCHED_TIMESHARE_CORE)
6d2010ae 237
39236c6e
A
238static void load_shift_init(void);
239static void preempt_pri_init(void);
2d21ac55 240
fe8ab488 241#endif /* CONFIG_SCHED_TIMESHARE_CORE */
c910b4d9 242
0a7de745
A
243thread_t processor_idle(
244 thread_t thread,
245 processor_t processor);
91447636 246
0a7de745
A
247static ast_t
248csw_check_locked(
249 thread_t thread,
250 processor_t processor,
251 processor_set_t pset,
252 ast_t check_reason);
39236c6e 253
6d2010ae 254static void processor_setrun(
0a7de745
A
255 processor_t processor,
256 thread_t thread,
257 integer_t options);
6d2010ae 258
6d2010ae
A
259static void
260sched_realtime_timebase_init(void);
261
4b17d6b6
A
262static void
263sched_timer_deadline_tracking_init(void);
264
0a7de745 265#if DEBUG
2d21ac55
A
266extern int debug_task;
267#define TLOG(a, fmt, args...) if(debug_task & a) kprintf(fmt, ## args)
268#else
269#define TLOG(a, fmt, args...) do {} while (0)
270#endif
271
3e170ce0
A
272static processor_t
273thread_bind_internal(
0a7de745
A
274 thread_t thread,
275 processor_t processor);
1c79356b 276
3e170ce0
A
277static void
278sched_vm_group_maintenance(void);
1c79356b 279
fe8ab488 280#if defined(CONFIG_SCHED_TIMESHARE_CORE)
0a7de745 281int8_t sched_load_shifts[NRQS];
cb323159 282bitmap_t sched_preempt_pri[BITMAP_LEN(NRQS_MAX)];
fe8ab488 283#endif /* CONFIG_SCHED_TIMESHARE_CORE */
6d2010ae 284
6d2010ae
A
285/*
286 * Statically allocate a buffer to hold the longest possible
287 * scheduler description string, as currently implemented.
288 * bsd/kern/kern_sysctl.c has a corresponding definition in bsd/
289 * to export to userspace via sysctl(3). If either version
290 * changes, update the other.
291 *
292 * Note that in addition to being an upper bound on the strings
293 * in the kernel, it's also an exact parameter to PE_get_default(),
294 * which interrogates the device tree on some platforms. That
295 * API requires the caller know the exact size of the device tree
296 * property, so we need both a legacy size (32) and the current size
297 * (48) to deal with old and new device trees. The device tree property
298 * is similarly padded to a fixed size so that the same kernel image
299 * can run on multiple devices with different schedulers configured
300 * in the device tree.
301 */
6d2010ae 302char sched_string[SCHED_STRING_MAX_LENGTH];
3e170ce0 303
5ba3f43e 304uint32_t sched_debug_flags = SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS;
39236c6e
A
305
306/* Global flag which indicates whether Background Stepper Context is enabled */
307static int cpu_throttle_enabled = 1;
91447636 308
5ba3f43e
A
309void
310sched_init(void)
311{
f427ee49 312 boolean_t direct_handoff = FALSE;
5ba3f43e 313 kprintf("Scheduler: Default of %s\n", SCHED(sched_name));
5ba3f43e
A
314
315 if (!PE_parse_boot_argn("sched_pri_decay_limit", &sched_pri_decay_band_limit, sizeof(sched_pri_decay_band_limit))) {
316 /* No boot-args, check in device tree */
317 if (!PE_get_default("kern.sched_pri_decay_limit",
0a7de745
A
318 &sched_pri_decay_band_limit,
319 sizeof(sched_pri_decay_band_limit))) {
5ba3f43e
A
320 /* Allow decay all the way to normal limits */
321 sched_pri_decay_band_limit = DEFAULT_DECAY_BAND_LIMIT;
322 }
323 }
324
325 kprintf("Setting scheduler priority decay band limit %d\n", sched_pri_decay_band_limit);
0a7de745 326
3e170ce0
A
327 if (PE_parse_boot_argn("sched_debug", &sched_debug_flags, sizeof(sched_debug_flags))) {
328 kprintf("Scheduler: Debug flags 0x%08x\n", sched_debug_flags);
6d2010ae 329 }
5ba3f43e
A
330 strlcpy(sched_string, SCHED(sched_name), sizeof(sched_string));
331
d9a64523
A
332 cpu_quiescent_counter_init();
333
6d2010ae 334 SCHED(init)();
5ba3f43e 335 SCHED(rt_init)(&pset0);
4b17d6b6 336 sched_timer_deadline_tracking_init();
39236c6e 337
6d2010ae
A
338 SCHED(pset_init)(&pset0);
339 SCHED(processor_init)(master_processor);
f427ee49
A
340
341 if (PE_parse_boot_argn("direct_handoff", &direct_handoff, sizeof(direct_handoff))) {
342 allow_direct_handoff = direct_handoff;
343 }
6d2010ae
A
344}
345
346void
347sched_timebase_init(void)
348{
0a7de745
A
349 uint64_t abstime;
350
6d2010ae
A
351 clock_interval_to_absolutetime_interval(1, NSEC_PER_SEC, &abstime);
352 sched_one_second_interval = abstime;
0a7de745 353
6d2010ae
A
354 SCHED(timebase_init)();
355 sched_realtime_timebase_init();
356}
357
fe8ab488 358#if defined(CONFIG_SCHED_TIMESHARE_CORE)
6d2010ae 359
fe8ab488 360void
3e170ce0 361sched_timeshare_init(void)
1c79356b
A
362{
363 /*
0b4e3aa0
A
364 * Calculate the timeslicing quantum
365 * in us.
1c79356b 366 */
0a7de745 367 if (default_preemption_rate < 1) {
1c79356b 368 default_preemption_rate = DEFAULT_PREEMPTION_RATE;
0a7de745 369 }
0b4e3aa0 370 std_quantum_us = (1000 * 1000) / default_preemption_rate;
1c79356b 371
0b4e3aa0 372 printf("standard timeslicing quantum is %d us\n", std_quantum_us);
1c79356b 373
0a7de745 374 if (default_bg_preemption_rate < 1) {
316670eb 375 default_bg_preemption_rate = DEFAULT_BG_PREEMPTION_RATE;
0a7de745 376 }
316670eb
A
377 bg_quantum_us = (1000 * 1000) / default_bg_preemption_rate;
378
379 printf("standard background quantum is %d us\n", bg_quantum_us);
380
91447636 381 load_shift_init();
4a3eedf9 382 preempt_pri_init();
1c79356b 383 sched_tick = 0;
1c79356b
A
384}
385
fe8ab488 386void
3e170ce0 387sched_timeshare_timebase_init(void)
55e303ae 388{
0a7de745
A
389 uint64_t abstime;
390 uint32_t shift;
55e303ae 391
91447636 392 /* standard timeslicing quantum */
55e303ae 393 clock_interval_to_absolutetime_interval(
0a7de745 394 std_quantum_us, NSEC_PER_USEC, &abstime);
55e303ae 395 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
b0d623f7 396 std_quantum = (uint32_t)abstime;
55e303ae 397
91447636 398 /* smallest remaining quantum (250 us) */
55e303ae
A
399 clock_interval_to_absolutetime_interval(250, NSEC_PER_USEC, &abstime);
400 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
b0d623f7 401 min_std_quantum = (uint32_t)abstime;
55e303ae 402
316670eb
A
403 /* quantum for background tasks */
404 clock_interval_to_absolutetime_interval(
0a7de745 405 bg_quantum_us, NSEC_PER_USEC, &abstime);
316670eb
A
406 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
407 bg_quantum = (uint32_t)abstime;
408
91447636
A
409 /* scheduler tick interval */
410 clock_interval_to_absolutetime_interval(USEC_PER_SEC >> SCHED_TICK_SHIFT,
0a7de745 411 NSEC_PER_USEC, &abstime);
cf7d32b8 412 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
b0d623f7 413 sched_tick_interval = (uint32_t)abstime;
55e303ae 414
d9a64523
A
415 /* timeshare load calculation interval & deadline initialization */
416 clock_interval_to_absolutetime_interval(sched_load_compute_interval_us, NSEC_PER_USEC, &sched_load_compute_interval_abs);
f427ee49 417 os_atomic_init(&sched_load_compute_deadline, sched_load_compute_interval_abs);
0a7de745 418
91447636
A
419 /*
420 * Compute conversion factor from usage to
421 * timesharing priorities with 5/8 ** n aging.
422 */
423 abstime = (abstime * 5) / 3;
0a7de745 424 for (shift = 0; abstime > BASEPRI_DEFAULT; ++shift) {
91447636 425 abstime >>= 1;
0a7de745 426 }
2d21ac55 427 sched_fixed_shift = shift;
91447636 428
0a7de745 429 for (uint32_t i = 0; i < TH_BUCKET_MAX; i++) {
39037602 430 sched_pri_shifts[i] = INT8_MAX;
0a7de745 431 }
39037602 432
fe8ab488
A
433 max_unsafe_computation = ((uint64_t)max_unsafe_quanta) * std_quantum;
434 sched_safe_duration = 2 * ((uint64_t)max_unsafe_quanta) * std_quantum;
39037602 435
fe8ab488 436 max_poll_computation = ((uint64_t)max_poll_quanta) * std_quantum;
6d2010ae
A
437 thread_depress_time = 1 * std_quantum;
438 default_timeshare_computation = std_quantum / 2;
439 default_timeshare_constraint = std_quantum;
440
5ba3f43e 441#if __arm__ || __arm64__
0a7de745 442 perfcontrol_failsafe_starvation_threshold = (2 * sched_tick_interval);
5ba3f43e 443#endif /* __arm__ || __arm64__ */
6d2010ae
A
444}
445
fe8ab488
A
446#endif /* CONFIG_SCHED_TIMESHARE_CORE */
447
5ba3f43e
A
448void
449pset_rt_init(processor_set_t pset)
450{
cb323159 451 os_atomic_init(&pset->rt_runq.count, 0);
5ba3f43e
A
452 queue_init(&pset->rt_runq.queue);
453 memset(&pset->rt_runq.runq_stats, 0, sizeof pset->rt_runq.runq_stats);
454}
455
6d2010ae
A
456static void
457sched_realtime_timebase_init(void)
458{
459 uint64_t abstime;
460
461 /* smallest rt computaton (50 us) */
462 clock_interval_to_absolutetime_interval(50, NSEC_PER_USEC, &abstime);
463 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
464 min_rt_quantum = (uint32_t)abstime;
465
466 /* maximum rt computation (50 ms) */
467 clock_interval_to_absolutetime_interval(
0a7de745 468 50, 1000 * NSEC_PER_USEC, &abstime);
6d2010ae
A
469 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
470 max_rt_quantum = (uint32_t)abstime;
f427ee49
A
471
472 /* constraint threshold for sending backup IPIs (4 ms) */
473 clock_interval_to_absolutetime_interval(4, NSEC_PER_MSEC, &abstime);
474 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
475 rt_constraint_threshold = (uint32_t)abstime;
6d2010ae
A
476}
477
5ba3f43e
A
478void
479sched_check_spill(processor_set_t pset, thread_t thread)
480{
481 (void)pset;
482 (void)thread;
483
484 return;
485}
486
487bool
488sched_thread_should_yield(processor_t processor, thread_t thread)
489{
490 (void)thread;
491
0a7de745
A
492 return !SCHED(processor_queue_empty)(processor) || rt_runq_count(processor->processor_set) > 0;
493}
494
495/* Default implementations of .steal_thread_enabled */
496bool
497sched_steal_thread_DISABLED(processor_set_t pset)
498{
499 (void)pset;
500 return false;
501}
502
503bool
504sched_steal_thread_enabled(processor_set_t pset)
505{
f427ee49 506 return bit_count(pset->node->pset_map) > 1;
5ba3f43e
A
507}
508
fe8ab488 509#if defined(CONFIG_SCHED_TIMESHARE_CORE)
6d2010ae 510
91447636
A
511/*
512 * Set up values for timeshare
513 * loading factors.
514 */
515static void
516load_shift_init(void)
517{
0a7de745
A
518 int8_t k, *p = sched_load_shifts;
519 uint32_t i, j;
91447636 520
0a7de745 521 uint32_t sched_decay_penalty = 1;
39236c6e 522
0a7de745 523 if (PE_parse_boot_argn("sched_decay_penalty", &sched_decay_penalty, sizeof(sched_decay_penalty))) {
39236c6e
A
524 kprintf("Overriding scheduler decay penalty %u\n", sched_decay_penalty);
525 }
526
0a7de745 527 if (PE_parse_boot_argn("sched_decay_usage_age_factor", &sched_decay_usage_age_factor, sizeof(sched_decay_usage_age_factor))) {
39236c6e
A
528 kprintf("Overriding scheduler decay usage age factor %u\n", sched_decay_usage_age_factor);
529 }
530
39236c6e
A
531 if (sched_decay_penalty == 0) {
532 /*
533 * There is no penalty for timeshare threads for using too much
534 * CPU, so set all load shifts to INT8_MIN. Even under high load,
535 * sched_pri_shift will be >INT8_MAX, and there will be no
536 * penalty applied to threads (nor will sched_usage be updated per
537 * thread).
538 */
539 for (i = 0; i < NRQS; i++) {
540 sched_load_shifts[i] = INT8_MIN;
541 }
542
543 return;
544 }
545
91447636
A
546 *p++ = INT8_MIN; *p++ = 0;
547
39236c6e
A
548 /*
549 * For a given system load "i", the per-thread priority
550 * penalty per quantum of CPU usage is ~2^k priority
551 * levels. "sched_decay_penalty" can cause more
552 * array entries to be filled with smaller "k" values
553 */
554 for (i = 2, j = 1 << sched_decay_penalty, k = 1; i < NRQS; ++k) {
0a7de745 555 for (j <<= 1; (i < j) && (i < NRQS); ++i) {
91447636 556 *p++ = k;
0a7de745 557 }
91447636
A
558 }
559}
560
4a3eedf9
A
561static void
562preempt_pri_init(void)
563{
39037602 564 bitmap_t *p = sched_preempt_pri;
4a3eedf9 565
0a7de745 566 for (int i = BASEPRI_FOREGROUND; i < MINPRI_KERNEL; ++i) {
39037602 567 bitmap_set(p, i);
0a7de745 568 }
4a3eedf9 569
0a7de745 570 for (int i = BASEPRI_PREEMPT; i <= MAXPRI; ++i) {
39037602 571 bitmap_set(p, i);
0a7de745 572 }
4a3eedf9
A
573}
574
fe8ab488 575#endif /* CONFIG_SCHED_TIMESHARE_CORE */
6d2010ae 576
1c79356b 577/*
0b4e3aa0 578 * Thread wait timer expiration.
1c79356b
A
579 */
580void
581thread_timer_expire(
0a7de745
A
582 void *p0,
583 __unused void *p1)
1c79356b 584{
0a7de745
A
585 thread_t thread = p0;
586 spl_t s;
1c79356b 587
39037602
A
588 assert_thread_magic(thread);
589
1c79356b 590 s = splsched();
55e303ae 591 thread_lock(thread);
91447636 592 if (--thread->wait_timer_active == 0) {
0b4e3aa0
A
593 if (thread->wait_timer_is_set) {
594 thread->wait_timer_is_set = FALSE;
55e303ae 595 clear_wait_internal(thread, THREAD_TIMED_OUT);
0b4e3aa0 596 }
1c79356b 597 }
55e303ae 598 thread_unlock(thread);
1c79356b
A
599 splx(s);
600}
601
1c79356b 602/*
91447636
A
603 * thread_unblock:
604 *
605 * Unblock thread on wake up.
606 *
3e170ce0 607 * Returns TRUE if the thread should now be placed on the runqueue.
91447636
A
608 *
609 * Thread must be locked.
3e170ce0
A
610 *
611 * Called at splsched().
1c79356b 612 */
91447636
A
613boolean_t
614thread_unblock(
0a7de745
A
615 thread_t thread,
616 wait_result_t wresult)
1c79356b 617{
0a7de745
A
618 boolean_t ready_for_runq = FALSE;
619 thread_t cthread = current_thread();
620 uint32_t new_run_count;
621 int old_thread_state;
0b4e3aa0 622
91447636 623 /*
2d21ac55 624 * Set wait_result.
91447636
A
625 */
626 thread->wait_result = wresult;
1c79356b 627
91447636 628 /*
2d21ac55 629 * Cancel pending wait timer.
91447636 630 */
1c79356b 631 if (thread->wait_timer_is_set) {
0a7de745 632 if (timer_call_cancel(&thread->wait_timer)) {
1c79356b 633 thread->wait_timer_active--;
0a7de745 634 }
1c79356b
A
635 thread->wait_timer_is_set = FALSE;
636 }
637
f427ee49
A
638 boolean_t aticontext, pidle;
639 ml_get_power_state(&aticontext, &pidle);
640
91447636 641 /*
2d21ac55
A
642 * Update scheduling state: not waiting,
643 * set running.
91447636 644 */
d9a64523
A
645 old_thread_state = thread->state;
646 thread->state = (old_thread_state | TH_RUN) &
0a7de745 647 ~(TH_WAIT | TH_UNINT | TH_WAIT_REPORT);
1c79356b 648
d9a64523
A
649 if ((old_thread_state & TH_RUN) == 0) {
650 uint64_t ctime = mach_approximate_time();
651 thread->last_made_runnable_time = thread->last_basepri_change_time = ctime;
652 timer_start(&thread->runnable_timer, ctime);
3e170ce0
A
653
654 ready_for_runq = TRUE;
1c79356b 655
d9a64523
A
656 if (old_thread_state & TH_WAIT_REPORT) {
657 (*thread->sched_call)(SCHED_CALL_UNBLOCK, thread);
658 }
1c79356b 659
39037602 660 /* Update the runnable thread count */
cb323159 661 new_run_count = SCHED(run_count_incr)(thread);
f427ee49
A
662
663#if CONFIG_SCHED_AUTO_JOIN
664 if (aticontext == FALSE && work_interval_should_propagate(cthread, thread)) {
665 work_interval_auto_join_propagate(cthread, thread);
666 }
667#endif /*CONFIG_SCHED_AUTO_JOIN */
3e170ce0 668 } else {
2d21ac55 669 /*
39037602
A
670 * Either the thread is idling in place on another processor,
671 * or it hasn't finished context switching yet.
2d21ac55 672 */
6d2010ae 673 assert((thread->state & TH_IDLE) == 0);
39037602
A
674 /*
675 * The run count is only dropped after the context switch completes
676 * and the thread is still waiting, so we should not run_incr here
677 */
cb323159 678 new_run_count = os_atomic_load(&sched_run_buckets[TH_BUCKET_RUN], relaxed);
2d21ac55 679 }
1c79356b 680
91447636
A
681 /*
682 * Calculate deadline for real-time threads.
683 */
6d2010ae 684 if (thread->sched_mode == TH_MODE_REALTIME) {
3e170ce0 685 uint64_t ctime;
fe8ab488
A
686
687 ctime = mach_absolute_time();
688 thread->realtime.deadline = thread->realtime.constraint + ctime;
c3c9b80d
A
689 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SET_RT_DEADLINE) | DBG_FUNC_NONE,
690 (uintptr_t)thread_tid(thread), thread->realtime.deadline, thread->realtime.computation, 0);
0b4e3aa0
A
691 }
692
91447636
A
693 /*
694 * Clear old quantum, fail-safe computation, etc.
695 */
fe8ab488 696 thread->quantum_remaining = 0;
91447636
A
697 thread->computation_metered = 0;
698 thread->reason = AST_NONE;
813fb2f6 699 thread->block_hint = kThreadWaitNone;
1c79356b 700
4b17d6b6
A
701 /* Obtain power-relevant interrupt and "platform-idle exit" statistics.
702 * We also account for "double hop" thread signaling via
703 * the thread callout infrastructure.
704 * DRK: consider removing the callout wakeup counters in the future
705 * they're present for verification at the moment.
706 */
39236c6e
A
707
708 if (__improbable(aticontext && !(thread_get_tag_internal(thread) & THREAD_TAG_CALLOUT))) {
39236c6e
A
709 DTRACE_SCHED2(iwakeup, struct thread *, thread, struct proc *, thread->task->bsd_info);
710
f427ee49 711 uint64_t ttd = current_processor()->timer_call_ttd;
39236c6e 712
4b17d6b6 713 if (ttd) {
0a7de745 714 if (ttd <= timer_deadline_tracking_bin_1) {
4b17d6b6 715 thread->thread_timer_wakeups_bin_1++;
0a7de745
A
716 } else if (ttd <= timer_deadline_tracking_bin_2) {
717 thread->thread_timer_wakeups_bin_2++;
718 }
4b17d6b6 719 }
39236c6e 720
5c9f4661 721 ledger_credit_thread(thread, thread->t_ledger,
0a7de745 722 task_ledgers.interrupt_wakeups, 1);
4b17d6b6 723 if (pidle) {
5c9f4661 724 ledger_credit_thread(thread, thread->t_ledger,
0a7de745 725 task_ledgers.platform_idle_wakeups, 1);
4b17d6b6
A
726 }
727 } else if (thread_get_tag_internal(cthread) & THREAD_TAG_CALLOUT) {
5c9f4661 728 /* TODO: what about an interrupt that does a wake taken on a callout thread? */
4b17d6b6 729 if (cthread->callout_woken_from_icontext) {
5c9f4661 730 ledger_credit_thread(thread, thread->t_ledger,
0a7de745 731 task_ledgers.interrupt_wakeups, 1);
4b17d6b6 732 thread->thread_callout_interrupt_wakeups++;
5c9f4661 733
4b17d6b6 734 if (cthread->callout_woken_from_platform_idle) {
5c9f4661 735 ledger_credit_thread(thread, thread->t_ledger,
0a7de745 736 task_ledgers.platform_idle_wakeups, 1);
4b17d6b6
A
737 thread->thread_callout_platform_idle_wakeups++;
738 }
5c9f4661 739
39236c6e 740 cthread->callout_woke_thread = TRUE;
4b17d6b6
A
741 }
742 }
5c9f4661 743
4b17d6b6 744 if (thread_get_tag_internal(thread) & THREAD_TAG_CALLOUT) {
f427ee49
A
745 thread->callout_woken_from_icontext = !!aticontext;
746 thread->callout_woken_from_platform_idle = !!pidle;
39236c6e 747 thread->callout_woke_thread = FALSE;
4b17d6b6
A
748 }
749
d9a64523
A
750#if KPERF
751 if (ready_for_runq) {
752 kperf_make_runnable(thread, aticontext);
753 }
754#endif /* KPERF */
755
fe8ab488 756 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
0a7de745
A
757 MACHDBG_CODE(DBG_MACH_SCHED, MACH_MAKE_RUNNABLE) | DBG_FUNC_NONE,
758 (uintptr_t)thread_tid(thread), thread->sched_pri, thread->wait_result,
759 sched_run_buckets[TH_BUCKET_RUN], 0);
b0d623f7
A
760
761 DTRACE_SCHED2(wakeup, struct thread *, thread, struct proc *, thread->task->bsd_info);
91447636 762
0a7de745 763 return ready_for_runq;
1c79356b
A
764}
765
f427ee49
A
766/*
767 * Routine: thread_allowed_for_handoff
768 * Purpose:
769 * Check if the thread is allowed for handoff operation
770 * Conditions:
771 * thread lock held, IPC locks may be held.
772 * TODO: In future, do not allow handoff if threads have different cluster
773 * recommendations.
774 */
775boolean_t
776thread_allowed_for_handoff(
777 thread_t thread)
778{
779 thread_t self = current_thread();
780
781 if (allow_direct_handoff &&
782 thread->sched_mode == TH_MODE_REALTIME &&
783 self->sched_mode == TH_MODE_REALTIME) {
784 return TRUE;
785 }
786
787 return FALSE;
788}
789
1c79356b 790/*
91447636 791 * Routine: thread_go
1c79356b 792 * Purpose:
91447636 793 * Unblock and dispatch thread.
1c79356b
A
794 * Conditions:
795 * thread lock held, IPC locks may be held.
796 * thread must have been pulled from wait queue under same lock hold.
3e170ce0
A
797 * thread must have been waiting
798 * Returns:
9bccf70c 799 * KERN_SUCCESS - Thread was set running
3e170ce0
A
800 *
801 * TODO: This should return void
1c79356b 802 */
9bccf70c 803kern_return_t
91447636 804thread_go(
0a7de745 805 thread_t thread,
f427ee49
A
806 wait_result_t wresult,
807 waitq_options_t option)
1c79356b 808{
f427ee49
A
809 thread_t self = current_thread();
810
39037602
A
811 assert_thread_magic(thread);
812
1c79356b 813 assert(thread->at_safe_point == FALSE);
9bccf70c 814 assert(thread->wait_event == NO_EVENT64);
3e170ce0 815 assert(thread->waitq == NULL);
1c79356b 816
0a7de745 817 assert(!(thread->state & (TH_TERMINATE | TH_TERMINATE2)));
3e170ce0 818 assert(thread->state & TH_WAIT);
55e303ae 819
55e303ae 820
39037602 821 if (thread_unblock(thread, wresult)) {
0a7de745 822#if SCHED_TRACE_THREAD_WAKEUPS
39037602 823 backtrace(&thread->thread_wakeup_bt[0],
cb323159 824 (sizeof(thread->thread_wakeup_bt) / sizeof(uintptr_t)), NULL);
39037602 825#endif
f427ee49
A
826 if ((option & WQ_OPTION_HANDOFF) &&
827 thread_allowed_for_handoff(thread)) {
828 thread_reference(thread);
829 assert(self->handoff_thread == NULL);
830 self->handoff_thread = thread;
831 } else {
832 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
833 }
39037602 834 }
3e170ce0 835
0a7de745 836 return KERN_SUCCESS;
1c79356b
A
837}
838
9bccf70c
A
839/*
840 * Routine: thread_mark_wait_locked
841 * Purpose:
842 * Mark a thread as waiting. If, given the circumstances,
843 * it doesn't want to wait (i.e. already aborted), then
844 * indicate that in the return value.
845 * Conditions:
846 * at splsched() and thread is locked.
847 */
848__private_extern__
849wait_result_t
1c79356b 850thread_mark_wait_locked(
0a7de745
A
851 thread_t thread,
852 wait_interrupt_t interruptible_orig)
1c79356b 853{
0a7de745
A
854 boolean_t at_safe_point;
855 wait_interrupt_t interruptible = interruptible_orig;
1c79356b 856
cb323159
A
857 if (thread->state & TH_IDLE) {
858 panic("Invalid attempt to wait while running the idle thread");
859 }
860
0a7de745 861 assert(!(thread->state & (TH_WAIT | TH_IDLE | TH_UNINT | TH_TERMINATE2 | TH_WAIT_REPORT)));
b0d623f7 862
9bccf70c
A
863 /*
864 * The thread may have certain types of interrupts/aborts masked
865 * off. Even if the wait location says these types of interrupts
866 * are OK, we have to honor mask settings (outer-scoped code may
867 * not be able to handle aborts at the moment).
868 */
d9a64523 869 interruptible &= TH_OPT_INTMASK;
0a7de745 870 if (interruptible > (thread->options & TH_OPT_INTMASK)) {
91447636 871 interruptible = thread->options & TH_OPT_INTMASK;
0a7de745 872 }
9bccf70c
A
873
874 at_safe_point = (interruptible == THREAD_ABORTSAFE);
875
0a7de745
A
876 if (interruptible == THREAD_UNINT ||
877 !(thread->sched_flags & TH_SFLAG_ABORT) ||
878 (!at_safe_point &&
879 (thread->sched_flags & TH_SFLAG_ABORTSAFELY))) {
880 if (!(thread->state & TH_TERMINATE)) {
316670eb 881 DTRACE_SCHED(sleep);
0a7de745 882 }
b0d623f7 883
d9a64523
A
884 int state_bits = TH_WAIT;
885 if (!interruptible) {
886 state_bits |= TH_UNINT;
887 }
888 if (thread->sched_call) {
889 wait_interrupt_t mask = THREAD_WAIT_NOREPORT_USER;
890 if (is_kerneltask(thread->task)) {
891 mask = THREAD_WAIT_NOREPORT_KERNEL;
892 }
893 if ((interruptible_orig & mask) == 0) {
894 state_bits |= TH_WAIT_REPORT;
895 }
896 }
897 thread->state |= state_bits;
9bccf70c 898 thread->at_safe_point = at_safe_point;
813fb2f6
A
899
900 /* TODO: pass this through assert_wait instead, have
901 * assert_wait just take a struct as an argument */
902 assert(!thread->block_hint);
903 thread->block_hint = thread->pending_block_hint;
904 thread->pending_block_hint = kThreadWaitNone;
905
0a7de745 906 return thread->wait_result = THREAD_WAITING;
d9a64523 907 } else {
0a7de745 908 if (thread->sched_flags & TH_SFLAG_ABORTSAFELY) {
d9a64523 909 thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
0a7de745 910 }
9bccf70c 911 }
813fb2f6 912 thread->pending_block_hint = kThreadWaitNone;
55e303ae 913
0a7de745 914 return thread->wait_result = THREAD_INTERRUPTED;
1c79356b
A
915}
916
9bccf70c
A
917/*
918 * Routine: thread_interrupt_level
919 * Purpose:
920 * Set the maximum interruptible state for the
921 * current thread. The effective value of any
922 * interruptible flag passed into assert_wait
923 * will never exceed this.
924 *
925 * Useful for code that must not be interrupted,
926 * but which calls code that doesn't know that.
927 * Returns:
928 * The old interrupt level for the thread.
929 */
0a7de745 930__private_extern__
9bccf70c
A
931wait_interrupt_t
932thread_interrupt_level(
933 wait_interrupt_t new_level)
934{
935 thread_t thread = current_thread();
91447636 936 wait_interrupt_t result = thread->options & TH_OPT_INTMASK;
1c79356b 937
91447636 938 thread->options = (thread->options & ~TH_OPT_INTMASK) | (new_level & TH_OPT_INTMASK);
1c79356b 939
91447636 940 return result;
1c79356b
A
941}
942
1c79356b
A
943/*
944 * assert_wait:
945 *
946 * Assert that the current thread is about to go to
947 * sleep until the specified event occurs.
948 */
9bccf70c 949wait_result_t
1c79356b 950assert_wait(
0a7de745
A
951 event_t event,
952 wait_interrupt_t interruptible)
1c79356b 953{
0a7de745 954 if (__improbable(event == NO_EVENT)) {
3e170ce0 955 panic("%s() called with NO_EVENT", __func__);
0a7de745 956 }
1c79356b 957
316670eb 958 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
0a7de745
A
959 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE,
960 VM_KERNEL_UNSLIDE_OR_PERM(event), 0, 0, 0, 0);
316670eb 961
3e170ce0
A
962 struct waitq *waitq;
963 waitq = global_eventq(event);
964 return waitq_assert_wait64(waitq, CAST_EVENT64_T(event), interruptible, TIMEOUT_WAIT_FOREVER);
9bccf70c
A
965}
966
39037602
A
967/*
968 * assert_wait_queue:
969 *
970 * Return the global waitq for the specified event
971 */
972struct waitq *
973assert_wait_queue(
0a7de745 974 event_t event)
39037602
A
975{
976 return global_eventq(event);
977}
978
91447636
A
979wait_result_t
980assert_wait_timeout(
0a7de745
A
981 event_t event,
982 wait_interrupt_t interruptible,
983 uint32_t interval,
984 uint32_t scale_factor)
55e303ae 985{
0a7de745
A
986 thread_t thread = current_thread();
987 wait_result_t wresult;
988 uint64_t deadline;
989 spl_t s;
91447636 990
0a7de745 991 if (__improbable(event == NO_EVENT)) {
3e170ce0 992 panic("%s() called with NO_EVENT", __func__);
0a7de745 993 }
fe8ab488 994
3e170ce0
A
995 struct waitq *waitq;
996 waitq = global_eventq(event);
91447636
A
997
998 s = splsched();
3e170ce0 999 waitq_lock(waitq);
91447636
A
1000
1001 clock_interval_to_deadline(interval, scale_factor, &deadline);
3e170ce0 1002
316670eb 1003 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
0a7de745
A
1004 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE,
1005 VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
3e170ce0
A
1006
1007 wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
0a7de745
A
1008 interruptible,
1009 TIMEOUT_URGENCY_SYS_NORMAL,
1010 deadline, TIMEOUT_NO_LEEWAY,
1011 thread);
39236c6e 1012
3e170ce0 1013 waitq_unlock(waitq);
39236c6e 1014 splx(s);
3e170ce0 1015 return wresult;
39236c6e
A
1016}
1017
1018wait_result_t
1019assert_wait_timeout_with_leeway(
0a7de745
A
1020 event_t event,
1021 wait_interrupt_t interruptible,
1022 wait_timeout_urgency_t urgency,
1023 uint32_t interval,
1024 uint32_t leeway,
1025 uint32_t scale_factor)
1026{
1027 thread_t thread = current_thread();
1028 wait_result_t wresult;
1029 uint64_t deadline;
1030 uint64_t abstime;
1031 uint64_t slop;
1032 uint64_t now;
1033 spl_t s;
1034
1035 if (__improbable(event == NO_EVENT)) {
3e170ce0 1036 panic("%s() called with NO_EVENT", __func__);
0a7de745 1037 }
3e170ce0 1038
39236c6e
A
1039 now = mach_absolute_time();
1040 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
1041 deadline = now + abstime;
1042
1043 clock_interval_to_absolutetime_interval(leeway, scale_factor, &slop);
1044
3e170ce0
A
1045 struct waitq *waitq;
1046 waitq = global_eventq(event);
39236c6e
A
1047
1048 s = splsched();
3e170ce0 1049 waitq_lock(waitq);
39236c6e
A
1050
1051 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
0a7de745
A
1052 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE,
1053 VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
3e170ce0
A
1054
1055 wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
0a7de745
A
1056 interruptible,
1057 urgency, deadline, slop,
1058 thread);
91447636 1059
3e170ce0 1060 waitq_unlock(waitq);
91447636 1061 splx(s);
3e170ce0 1062 return wresult;
55e303ae
A
1063}
1064
1065wait_result_t
91447636 1066assert_wait_deadline(
0a7de745
A
1067 event_t event,
1068 wait_interrupt_t interruptible,
1069 uint64_t deadline)
55e303ae 1070{
0a7de745
A
1071 thread_t thread = current_thread();
1072 wait_result_t wresult;
1073 spl_t s;
55e303ae 1074
0a7de745 1075 if (__improbable(event == NO_EVENT)) {
3e170ce0 1076 panic("%s() called with NO_EVENT", __func__);
0a7de745 1077 }
3e170ce0
A
1078
1079 struct waitq *waitq;
1080 waitq = global_eventq(event);
55e303ae
A
1081
1082 s = splsched();
3e170ce0 1083 waitq_lock(waitq);
55e303ae 1084
316670eb 1085 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
0a7de745
A
1086 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE,
1087 VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
39236c6e 1088
3e170ce0 1089 wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
0a7de745
A
1090 interruptible,
1091 TIMEOUT_URGENCY_SYS_NORMAL, deadline,
1092 TIMEOUT_NO_LEEWAY, thread);
3e170ce0 1093 waitq_unlock(waitq);
39236c6e 1094 splx(s);
3e170ce0 1095 return wresult;
39236c6e
A
1096}
1097
1098wait_result_t
1099assert_wait_deadline_with_leeway(
0a7de745
A
1100 event_t event,
1101 wait_interrupt_t interruptible,
1102 wait_timeout_urgency_t urgency,
1103 uint64_t deadline,
1104 uint64_t leeway)
39236c6e 1105{
0a7de745
A
1106 thread_t thread = current_thread();
1107 wait_result_t wresult;
1108 spl_t s;
39236c6e 1109
0a7de745 1110 if (__improbable(event == NO_EVENT)) {
3e170ce0 1111 panic("%s() called with NO_EVENT", __func__);
0a7de745 1112 }
fe8ab488 1113
3e170ce0
A
1114 struct waitq *waitq;
1115 waitq = global_eventq(event);
39236c6e
A
1116
1117 s = splsched();
3e170ce0 1118 waitq_lock(waitq);
39236c6e
A
1119
1120 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
0a7de745
A
1121 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE,
1122 VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
39236c6e 1123
3e170ce0 1124 wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
0a7de745
A
1125 interruptible,
1126 urgency, deadline, leeway,
1127 thread);
3e170ce0 1128 waitq_unlock(waitq);
55e303ae 1129 splx(s);
3e170ce0 1130 return wresult;
55e303ae 1131}
9bccf70c 1132
39236c6e
A
1133/*
1134 * thread_isoncpu:
1135 *
1136 * Return TRUE if a thread is running on a processor such that an AST
1137 * is needed to pull it out of userspace execution, or if executing in
1138 * the kernel, bring to a context switch boundary that would cause
1139 * thread state to be serialized in the thread PCB.
0a7de745 1140 *
39236c6e 1141 * Thread locked, returns the same way. While locked, fields
fe8ab488 1142 * like "state" cannot change. "runq" can change only from set to unset.
39236c6e
A
1143 */
1144static inline boolean_t
1145thread_isoncpu(thread_t thread)
1146{
1147 /* Not running or runnable */
0a7de745
A
1148 if (!(thread->state & TH_RUN)) {
1149 return FALSE;
1150 }
39236c6e
A
1151
1152 /* Waiting on a runqueue, not currently running */
fe8ab488 1153 /* TODO: This is invalid - it can get dequeued without thread lock, but not context switched. */
0a7de745
A
1154 if (thread->runq != PROCESSOR_NULL) {
1155 return FALSE;
1156 }
39236c6e 1157
3e170ce0
A
1158 /*
1159 * Thread does not have a stack yet
1160 * It could be on the stack alloc queue or preparing to be invoked
1161 */
0a7de745
A
1162 if (!thread->kernel_stack) {
1163 return FALSE;
1164 }
3e170ce0 1165
39236c6e
A
1166 /*
1167 * Thread must be running on a processor, or
1168 * about to run, or just did run. In all these
1169 * cases, an AST to the processor is needed
1170 * to guarantee that the thread is kicked out
1171 * of userspace and the processor has
1172 * context switched (and saved register state).
1173 */
0a7de745 1174 return TRUE;
39236c6e
A
1175}
1176
1c79356b 1177/*
91447636 1178 * thread_stop:
1c79356b 1179 *
91447636 1180 * Force a preemption point for a thread and wait
39236c6e
A
1181 * for it to stop running on a CPU. If a stronger
1182 * guarantee is requested, wait until no longer
1183 * runnable. Arbitrates access among
91447636 1184 * multiple stop requests. (released by unstop)
1c79356b 1185 *
91447636
A
1186 * The thread must enter a wait state and stop via a
1187 * separate means.
1c79356b 1188 *
91447636 1189 * Returns FALSE if interrupted.
1c79356b
A
1190 */
1191boolean_t
1192thread_stop(
0a7de745
A
1193 thread_t thread,
1194 boolean_t until_not_runnable)
1c79356b 1195{
0a7de745
A
1196 wait_result_t wresult;
1197 spl_t s = splsched();
1198 boolean_t oncpu;
1c79356b 1199
1c79356b 1200 wake_lock(thread);
2d21ac55 1201 thread_lock(thread);
1c79356b
A
1202
1203 while (thread->state & TH_SUSP) {
1204 thread->wake_active = TRUE;
2d21ac55
A
1205 thread_unlock(thread);
1206
91447636 1207 wresult = assert_wait(&thread->wake_active, THREAD_ABORTSAFE);
1c79356b
A
1208 wake_unlock(thread);
1209 splx(s);
1210
0a7de745 1211 if (wresult == THREAD_WAITING) {
91447636 1212 wresult = thread_block(THREAD_CONTINUE_NULL);
0a7de745 1213 }
9bccf70c 1214
0a7de745
A
1215 if (wresult != THREAD_AWAKENED) {
1216 return FALSE;
1217 }
1c79356b
A
1218
1219 s = splsched();
1220 wake_lock(thread);
2d21ac55 1221 thread_lock(thread);
1c79356b 1222 }
9bccf70c 1223
1c79356b 1224 thread->state |= TH_SUSP;
1c79356b 1225
39236c6e 1226 while ((oncpu = thread_isoncpu(thread)) ||
0a7de745
A
1227 (until_not_runnable && (thread->state & TH_RUN))) {
1228 processor_t processor;
1229
39236c6e
A
1230 if (oncpu) {
1231 assert(thread->state & TH_RUN);
1232 processor = thread->chosen_processor;
9bccf70c 1233 cause_ast_check(processor);
39236c6e 1234 }
9bccf70c
A
1235
1236 thread->wake_active = TRUE;
2d21ac55
A
1237 thread_unlock(thread);
1238
91447636 1239 wresult = assert_wait(&thread->wake_active, THREAD_ABORTSAFE);
9bccf70c
A
1240 wake_unlock(thread);
1241 splx(s);
1242
0a7de745 1243 if (wresult == THREAD_WAITING) {
91447636 1244 wresult = thread_block(THREAD_CONTINUE_NULL);
0a7de745 1245 }
9bccf70c 1246
91447636 1247 if (wresult != THREAD_AWAKENED) {
9bccf70c 1248 thread_unstop(thread);
0a7de745 1249 return FALSE;
9bccf70c
A
1250 }
1251
1252 s = splsched();
1253 wake_lock(thread);
1254 thread_lock(thread);
1255 }
1256
1257 thread_unlock(thread);
1c79356b
A
1258 wake_unlock(thread);
1259 splx(s);
0a7de745 1260
39236c6e
A
1261 /*
1262 * We return with the thread unlocked. To prevent it from
1263 * transitioning to a runnable state (or from TH_RUN to
1264 * being on the CPU), the caller must ensure the thread
1265 * is stopped via an external means (such as an AST)
1266 */
1c79356b 1267
0a7de745 1268 return TRUE;
1c79356b
A
1269}
1270
1271/*
91447636
A
1272 * thread_unstop:
1273 *
1274 * Release a previous stop request and set
1275 * the thread running if appropriate.
1276 *
1277 * Use only after a successful stop operation.
1c79356b
A
1278 */
1279void
1280thread_unstop(
0a7de745 1281 thread_t thread)
1c79356b 1282{
0a7de745 1283 spl_t s = splsched();
1c79356b 1284
1c79356b
A
1285 wake_lock(thread);
1286 thread_lock(thread);
1287
0a7de745 1288 assert((thread->state & (TH_RUN | TH_WAIT | TH_SUSP)) != TH_SUSP);
55e303ae 1289
1c79356b
A
1290 if (thread->state & TH_SUSP) {
1291 thread->state &= ~TH_SUSP;
1292
1293 if (thread->wake_active) {
1294 thread->wake_active = FALSE;
1295 thread_unlock(thread);
2d21ac55
A
1296
1297 thread_wakeup(&thread->wake_active);
1c79356b
A
1298 wake_unlock(thread);
1299 splx(s);
1c79356b
A
1300
1301 return;
1302 }
1303 }
1304
1305 thread_unlock(thread);
1306 wake_unlock(thread);
1307 splx(s);
1308}
1309
1310/*
91447636
A
1311 * thread_wait:
1312 *
1313 * Wait for a thread to stop running. (non-interruptible)
1314 *
1c79356b 1315 */
91447636 1316void
1c79356b 1317thread_wait(
0a7de745
A
1318 thread_t thread,
1319 boolean_t until_not_runnable)
1c79356b 1320{
0a7de745
A
1321 wait_result_t wresult;
1322 boolean_t oncpu;
1323 processor_t processor;
1324 spl_t s = splsched();
1c79356b 1325
1c79356b 1326 wake_lock(thread);
9bccf70c 1327 thread_lock(thread);
1c79356b 1328
316670eb
A
1329 /*
1330 * Wait until not running on a CPU. If stronger requirement
1331 * desired, wait until not runnable. Assumption: if thread is
1332 * on CPU, then TH_RUN is set, so we're not waiting in any case
0a7de745 1333 * where the original, pure "TH_RUN" check would have let us
316670eb
A
1334 * finish.
1335 */
39236c6e 1336 while ((oncpu = thread_isoncpu(thread)) ||
0a7de745 1337 (until_not_runnable && (thread->state & TH_RUN))) {
316670eb
A
1338 if (oncpu) {
1339 assert(thread->state & TH_RUN);
39236c6e 1340 processor = thread->chosen_processor;
9bccf70c 1341 cause_ast_check(processor);
316670eb 1342 }
1c79356b
A
1343
1344 thread->wake_active = TRUE;
2d21ac55
A
1345 thread_unlock(thread);
1346
91447636 1347 wresult = assert_wait(&thread->wake_active, THREAD_UNINT);
1c79356b
A
1348 wake_unlock(thread);
1349 splx(s);
1350
0a7de745 1351 if (wresult == THREAD_WAITING) {
91447636 1352 thread_block(THREAD_CONTINUE_NULL);
0a7de745 1353 }
1c79356b
A
1354
1355 s = splsched();
1356 wake_lock(thread);
9bccf70c 1357 thread_lock(thread);
1c79356b 1358 }
0b4e3aa0 1359
9bccf70c 1360 thread_unlock(thread);
1c79356b
A
1361 wake_unlock(thread);
1362 splx(s);
1c79356b
A
1363}
1364
1c79356b
A
1365/*
1366 * Routine: clear_wait_internal
1367 *
1368 * Clear the wait condition for the specified thread.
1369 * Start the thread executing if that is appropriate.
1370 * Arguments:
1371 * thread thread to awaken
1372 * result Wakeup result the thread should see
1373 * Conditions:
1374 * At splsched
1375 * the thread is locked.
9bccf70c
A
1376 * Returns:
1377 * KERN_SUCCESS thread was rousted out a wait
1378 * KERN_FAILURE thread was waiting but could not be rousted
1379 * KERN_NOT_WAITING thread was not waiting
1c79356b 1380 */
9bccf70c 1381__private_extern__ kern_return_t
1c79356b 1382clear_wait_internal(
0a7de745
A
1383 thread_t thread,
1384 wait_result_t wresult)
1c79356b 1385{
0a7de745 1386 uint32_t i = LockTimeOutUsec;
3e170ce0 1387 struct waitq *waitq = thread->waitq;
0a7de745 1388
9bccf70c 1389 do {
0a7de745
A
1390 if (wresult == THREAD_INTERRUPTED && (thread->state & TH_UNINT)) {
1391 return KERN_FAILURE;
1392 }
9bccf70c 1393
3e170ce0 1394 if (waitq != NULL) {
39037602 1395 if (!waitq_pull_thread_locked(waitq, thread)) {
9bccf70c
A
1396 thread_unlock(thread);
1397 delay(1);
0a7de745 1398 if (i > 0 && !machine_timeout_suspended()) {
39037602 1399 i--;
0a7de745 1400 }
9bccf70c 1401 thread_lock(thread);
0a7de745 1402 if (waitq != thread->waitq) {
3e170ce0 1403 return KERN_NOT_WAITING;
0a7de745 1404 }
9bccf70c
A
1405 continue;
1406 }
1c79356b 1407 }
55e303ae 1408
3e170ce0 1409 /* TODO: Can we instead assert TH_TERMINATE is not set? */
0a7de745 1410 if ((thread->state & (TH_WAIT | TH_TERMINATE)) == TH_WAIT) {
f427ee49 1411 return thread_go(thread, wresult, WQ_OPTION_NONE);
0a7de745
A
1412 } else {
1413 return KERN_NOT_WAITING;
1414 }
39037602 1415 } while (i > 0);
55e303ae 1416
2d21ac55 1417 panic("clear_wait_internal: deadlock: thread=%p, wq=%p, cpu=%d\n",
0a7de745 1418 thread, waitq, cpu_number());
55e303ae 1419
0a7de745 1420 return KERN_FAILURE;
1c79356b
A
1421}
1422
1423
1424/*
1425 * clear_wait:
1426 *
1427 * Clear the wait condition for the specified thread. Start the thread
1428 * executing if that is appropriate.
1429 *
1430 * parameters:
1431 * thread thread to awaken
1432 * result Wakeup result the thread should see
1433 */
9bccf70c 1434kern_return_t
1c79356b 1435clear_wait(
0a7de745
A
1436 thread_t thread,
1437 wait_result_t result)
1c79356b 1438{
9bccf70c 1439 kern_return_t ret;
0a7de745 1440 spl_t s;
1c79356b
A
1441
1442 s = splsched();
1443 thread_lock(thread);
9bccf70c 1444 ret = clear_wait_internal(thread, result);
1c79356b
A
1445 thread_unlock(thread);
1446 splx(s);
9bccf70c 1447 return ret;
1c79356b
A
1448}
1449
1450
1451/*
1452 * thread_wakeup_prim:
1453 *
1454 * Common routine for thread_wakeup, thread_wakeup_with_result,
1455 * and thread_wakeup_one.
1456 *
1457 */
9bccf70c 1458kern_return_t
1c79356b 1459thread_wakeup_prim(
0a7de745
A
1460 event_t event,
1461 boolean_t one_thread,
1462 wait_result_t result)
6d2010ae 1463{
0a7de745 1464 if (__improbable(event == NO_EVENT)) {
39037602 1465 panic("%s() called with NO_EVENT", __func__);
0a7de745 1466 }
39037602
A
1467
1468 struct waitq *wq = global_eventq(event);
1469
0a7de745 1470 if (one_thread) {
39037602 1471 return waitq_wakeup64_one(wq, CAST_EVENT64_T(event), result, WAITQ_ALL_PRIORITIES);
0a7de745 1472 } else {
39037602 1473 return waitq_wakeup64_all(wq, CAST_EVENT64_T(event), result, WAITQ_ALL_PRIORITIES);
0a7de745 1474 }
6d2010ae
A
1475}
1476
39037602
A
1477/*
1478 * Wakeup a specified thread if and only if it's waiting for this event
1479 */
1480kern_return_t
1481thread_wakeup_thread(
0a7de745
A
1482 event_t event,
1483 thread_t thread)
39037602 1484{
0a7de745 1485 if (__improbable(event == NO_EVENT)) {
39037602 1486 panic("%s() called with NO_EVENT", __func__);
0a7de745 1487 }
39037602 1488
0a7de745 1489 if (__improbable(thread == THREAD_NULL)) {
5ba3f43e 1490 panic("%s() called with THREAD_NULL", __func__);
0a7de745 1491 }
5ba3f43e 1492
39037602
A
1493 struct waitq *wq = global_eventq(event);
1494
1495 return waitq_wakeup64_thread(wq, CAST_EVENT64_T(event), thread, THREAD_AWAKENED);
1496}
6d2010ae 1497
39037602
A
1498/*
1499 * Wakeup a thread waiting on an event and promote it to a priority.
1500 *
1501 * Requires woken thread to un-promote itself when done.
1502 */
6d2010ae 1503kern_return_t
39037602 1504thread_wakeup_one_with_pri(
0a7de745
A
1505 event_t event,
1506 int priority)
1c79356b 1507{
0a7de745 1508 if (__improbable(event == NO_EVENT)) {
3e170ce0 1509 panic("%s() called with NO_EVENT", __func__);
0a7de745 1510 }
3e170ce0 1511
39037602 1512 struct waitq *wq = global_eventq(event);
1c79356b 1513
39037602
A
1514 return waitq_wakeup64_one(wq, CAST_EVENT64_T(event), THREAD_AWAKENED, priority);
1515}
fe8ab488 1516
39037602
A
1517/*
1518 * Wakeup a thread waiting on an event,
1519 * promote it to a priority,
1520 * and return a reference to the woken thread.
1521 *
1522 * Requires woken thread to un-promote itself when done.
1523 */
1524thread_t
1525thread_wakeup_identify(event_t event,
0a7de745 1526 int priority)
39037602 1527{
0a7de745 1528 if (__improbable(event == NO_EVENT)) {
39037602 1529 panic("%s() called with NO_EVENT", __func__);
0a7de745 1530 }
39037602
A
1531
1532 struct waitq *wq = global_eventq(event);
1533
1534 return waitq_wakeup64_identify(wq, CAST_EVENT64_T(event), THREAD_AWAKENED, priority);
1c79356b
A
1535}
1536
1537/*
1538 * thread_bind:
1539 *
2d21ac55 1540 * Force the current thread to execute on the specified processor.
fe8ab488 1541 * Takes effect after the next thread_block().
1c79356b 1542 *
55e303ae
A
1543 * Returns the previous binding. PROCESSOR_NULL means
1544 * not bound.
1545 *
1546 * XXX - DO NOT export this to users - XXX
1c79356b 1547 */
55e303ae 1548processor_t
1c79356b 1549thread_bind(
0a7de745 1550 processor_t processor)
1c79356b 1551{
0a7de745
A
1552 thread_t self = current_thread();
1553 processor_t prev;
1554 spl_t s;
1c79356b
A
1555
1556 s = splsched();
2d21ac55 1557 thread_lock(self);
55e303ae 1558
3e170ce0 1559 prev = thread_bind_internal(self, processor);
55e303ae 1560
2d21ac55 1561 thread_unlock(self);
1c79356b 1562 splx(s);
55e303ae 1563
0a7de745 1564 return prev;
1c79356b
A
1565}
1566
3e170ce0
A
1567/*
1568 * thread_bind_internal:
1569 *
1570 * If the specified thread is not the current thread, and it is currently
1571 * running on another CPU, a remote AST must be sent to that CPU to cause
1572 * the thread to migrate to its bound processor. Otherwise, the migration
1573 * will occur at the next quantum expiration or blocking point.
1574 *
1575 * When the thread is the current thread, and explicit thread_block() should
1576 * be used to force the current processor to context switch away and
1577 * let the thread migrate to the bound processor.
1578 *
1579 * Thread must be locked, and at splsched.
1580 */
1581
1582static processor_t
1583thread_bind_internal(
0a7de745
A
1584 thread_t thread,
1585 processor_t processor)
3e170ce0 1586{
0a7de745 1587 processor_t prev;
3e170ce0
A
1588
1589 /* <rdar://problem/15102234> */
1590 assert(thread->sched_pri < BASEPRI_RTQUEUES);
1591 /* A thread can't be bound if it's sitting on a (potentially incorrect) runqueue */
1592 assert(thread->runq == PROCESSOR_NULL);
1593
1594 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_THREAD_BIND), thread_tid(thread), processor ? (uintptr_t)processor->cpu_id : (uintptr_t)-1, 0, 0, 0);
1595
1596 prev = thread->bound_processor;
1597 thread->bound_processor = processor;
1598
0a7de745 1599 return prev;
3e170ce0
A
1600}
1601
1602/*
1603 * thread_vm_bind_group_add:
1604 *
1605 * The "VM bind group" is a special mechanism to mark a collection
1606 * of threads from the VM subsystem that, in general, should be scheduled
1607 * with only one CPU of parallelism. To accomplish this, we initially
1608 * bind all the threads to the master processor, which has the effect
1609 * that only one of the threads in the group can execute at once, including
1610 * preempting threads in the group that are a lower priority. Future
1611 * mechanisms may use more dynamic mechanisms to prevent the collection
1612 * of VM threads from using more CPU time than desired.
1613 *
1614 * The current implementation can result in priority inversions where
1615 * compute-bound priority 95 or realtime threads that happen to have
1616 * landed on the master processor prevent the VM threads from running.
1617 * When this situation is detected, we unbind the threads for one
1618 * scheduler tick to allow the scheduler to run the threads an
1619 * additional CPUs, before restoring the binding (assuming high latency
1620 * is no longer a problem).
1621 */
1622
1623/*
1624 * The current max is provisioned for:
1625 * vm_compressor_swap_trigger_thread (92)
1626 * 2 x vm_pageout_iothread_internal (92) when vm_restricted_to_single_processor==TRUE
1627 * vm_pageout_continue (92)
1628 * memorystatus_thread (95)
1629 */
1630#define MAX_VM_BIND_GROUP_COUNT (5)
0a7de745 1631decl_simple_lock_data(static, sched_vm_group_list_lock);
3e170ce0
A
1632static thread_t sched_vm_group_thread_list[MAX_VM_BIND_GROUP_COUNT];
1633static int sched_vm_group_thread_count;
1634static boolean_t sched_vm_group_temporarily_unbound = FALSE;
1635
1636void
1637thread_vm_bind_group_add(void)
1638{
1639 thread_t self = current_thread();
1640
1641 thread_reference_internal(self);
1642 self->options |= TH_OPT_SCHED_VM_GROUP;
1643
0a7de745 1644 simple_lock(&sched_vm_group_list_lock, LCK_GRP_NULL);
3e170ce0
A
1645 assert(sched_vm_group_thread_count < MAX_VM_BIND_GROUP_COUNT);
1646 sched_vm_group_thread_list[sched_vm_group_thread_count++] = self;
1647 simple_unlock(&sched_vm_group_list_lock);
1648
1649 thread_bind(master_processor);
1650
1651 /* Switch to bound processor if not already there */
1652 thread_block(THREAD_CONTINUE_NULL);
1653}
1654
1655static void
1656sched_vm_group_maintenance(void)
1657{
1658 uint64_t ctime = mach_absolute_time();
1659 uint64_t longtime = ctime - sched_tick_interval;
1660 int i;
1661 spl_t s;
1662 boolean_t high_latency_observed = FALSE;
1663 boolean_t runnable_and_not_on_runq_observed = FALSE;
1664 boolean_t bind_target_changed = FALSE;
1665 processor_t bind_target = PROCESSOR_NULL;
1666
1667 /* Make sure nobody attempts to add new threads while we are enumerating them */
0a7de745 1668 simple_lock(&sched_vm_group_list_lock, LCK_GRP_NULL);
3e170ce0
A
1669
1670 s = splsched();
1671
0a7de745 1672 for (i = 0; i < sched_vm_group_thread_count; i++) {
3e170ce0
A
1673 thread_t thread = sched_vm_group_thread_list[i];
1674 assert(thread != THREAD_NULL);
1675 thread_lock(thread);
0a7de745 1676 if ((thread->state & (TH_RUN | TH_WAIT)) == TH_RUN) {
3e170ce0
A
1677 if (thread->runq != PROCESSOR_NULL && thread->last_made_runnable_time < longtime) {
1678 high_latency_observed = TRUE;
1679 } else if (thread->runq == PROCESSOR_NULL) {
1680 /* There are some cases where a thread be transitiong that also fall into this case */
1681 runnable_and_not_on_runq_observed = TRUE;
1682 }
1683 }
1684 thread_unlock(thread);
1685
1686 if (high_latency_observed && runnable_and_not_on_runq_observed) {
1687 /* All the things we are looking for are true, stop looking */
1688 break;
1689 }
1690 }
1691
1692 splx(s);
1693
1694 if (sched_vm_group_temporarily_unbound) {
1695 /* If we turned off binding, make sure everything is OK before rebinding */
1696 if (!high_latency_observed) {
1697 /* rebind */
1698 bind_target_changed = TRUE;
1699 bind_target = master_processor;
1700 sched_vm_group_temporarily_unbound = FALSE; /* might be reset to TRUE if change cannot be completed */
1701 }
1702 } else {
1703 /*
1704 * Check if we're in a bad state, which is defined by high
1705 * latency with no core currently executing a thread. If a
1706 * single thread is making progress on a CPU, that means the
1707 * binding concept to reduce parallelism is working as
1708 * designed.
1709 */
1710 if (high_latency_observed && !runnable_and_not_on_runq_observed) {
1711 /* unbind */
1712 bind_target_changed = TRUE;
1713 bind_target = PROCESSOR_NULL;
1714 sched_vm_group_temporarily_unbound = TRUE;
1715 }
1716 }
1717
1718 if (bind_target_changed) {
1719 s = splsched();
0a7de745 1720 for (i = 0; i < sched_vm_group_thread_count; i++) {
3e170ce0
A
1721 thread_t thread = sched_vm_group_thread_list[i];
1722 boolean_t removed;
1723 assert(thread != THREAD_NULL);
1724
1725 thread_lock(thread);
1726 removed = thread_run_queue_remove(thread);
1727 if (removed || ((thread->state & (TH_RUN | TH_WAIT)) == TH_WAIT)) {
1728 thread_bind_internal(thread, bind_target);
1729 } else {
1730 /*
1731 * Thread was in the middle of being context-switched-to,
1732 * or was in the process of blocking. To avoid switching the bind
1733 * state out mid-flight, defer the change if possible.
1734 */
1735 if (bind_target == PROCESSOR_NULL) {
1736 thread_bind_internal(thread, bind_target);
1737 } else {
1738 sched_vm_group_temporarily_unbound = TRUE; /* next pass will try again */
1739 }
1740 }
1741
1742 if (removed) {
1743 thread_run_queue_reinsert(thread, SCHED_PREEMPT | SCHED_TAILQ);
1744 }
1745 thread_unlock(thread);
1746 }
1747 splx(s);
1748 }
1749
1750 simple_unlock(&sched_vm_group_list_lock);
1751}
1752
fe8ab488
A
1753/* Invoked prior to idle entry to determine if, on SMT capable processors, an SMT
1754 * rebalancing opportunity exists when a core is (instantaneously) idle, but
1755 * other SMT-capable cores may be over-committed. TODO: some possible negatives:
1756 * IPI thrash if this core does not remain idle following the load balancing ASTs
1757 * Idle "thrash", when IPI issue is followed by idle entry/core power down
1758 * followed by a wakeup shortly thereafter.
1759 */
1760
fe8ab488
A
1761#if (DEVELOPMENT || DEBUG)
1762int sched_smt_balance = 1;
1763#endif
1764
3e170ce0 1765/* Invoked with pset locked, returns with pset unlocked */
5ba3f43e 1766void
0a7de745
A
1767sched_SMT_balance(processor_t cprocessor, processor_set_t cpset)
1768{
fe8ab488
A
1769 processor_t ast_processor = NULL;
1770
1771#if (DEVELOPMENT || DEBUG)
0a7de745 1772 if (__improbable(sched_smt_balance == 0)) {
fe8ab488 1773 goto smt_balance_exit;
0a7de745 1774 }
fe8ab488 1775#endif
0a7de745 1776
fe8ab488 1777 assert(cprocessor == current_processor());
0a7de745 1778 if (cprocessor->is_SMT == FALSE) {
fe8ab488 1779 goto smt_balance_exit;
0a7de745 1780 }
fe8ab488
A
1781
1782 processor_t sib_processor = cprocessor->processor_secondary ? cprocessor->processor_secondary : cprocessor->processor_primary;
1783
1784 /* Determine if both this processor and its sibling are idle,
1785 * indicating an SMT rebalancing opportunity.
1786 */
0a7de745 1787 if (sib_processor->state != PROCESSOR_IDLE) {
fe8ab488 1788 goto smt_balance_exit;
0a7de745 1789 }
fe8ab488
A
1790
1791 processor_t sprocessor;
1792
5ba3f43e 1793 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
d9a64523 1794 uint64_t running_secondary_map = (cpset->cpu_state_map[PROCESSOR_RUNNING] &
0a7de745 1795 ~cpset->primary_map);
d9a64523
A
1796 for (int cpuid = lsb_first(running_secondary_map); cpuid >= 0; cpuid = lsb_next(running_secondary_map, cpuid)) {
1797 sprocessor = processor_array[cpuid];
1798 if ((sprocessor->processor_primary->state == PROCESSOR_RUNNING) &&
5ba3f43e 1799 (sprocessor->current_pri < BASEPRI_RTQUEUES)) {
d9a64523
A
1800 ipi_type = sched_ipi_action(sprocessor, NULL, false, SCHED_IPI_EVENT_SMT_REBAL);
1801 if (ipi_type != SCHED_IPI_NONE) {
1802 assert(sprocessor != cprocessor);
1803 ast_processor = sprocessor;
1804 break;
1805 }
fe8ab488 1806 }
fe8ab488
A
1807 }
1808
1809smt_balance_exit:
1810 pset_unlock(cpset);
1811
1812 if (ast_processor) {
1813 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_SMT_BALANCE), ast_processor->cpu_id, ast_processor->state, ast_processor->processor_primary->state, 0, 0);
5ba3f43e 1814 sched_ipi_perform(ast_processor, ipi_type);
fe8ab488
A
1815 }
1816}
f427ee49
A
1817
1818static cpumap_t
1819pset_available_cpumap(processor_set_t pset)
5ba3f43e 1820{
f427ee49
A
1821 return (pset->cpu_state_map[PROCESSOR_IDLE] | pset->cpu_state_map[PROCESSOR_DISPATCHING] | pset->cpu_state_map[PROCESSOR_RUNNING]) &
1822 pset->recommended_bitmask;
1823}
1824
1825static cpumap_t
1826pset_available_but_not_running_cpumap(processor_set_t pset)
1827{
1828 return (pset->cpu_state_map[PROCESSOR_IDLE] | pset->cpu_state_map[PROCESSOR_DISPATCHING]) &
1829 pset->recommended_bitmask;
1830}
1831
1832bool
1833pset_has_stealable_threads(processor_set_t pset)
1834{
1835 pset_assert_locked(pset);
1836
1837 cpumap_t avail_map = pset_available_but_not_running_cpumap(pset);
1838 /*
1839 * Secondary CPUs never steal, so allow stealing of threads if there are more threads than
1840 * available primary CPUs
1841 */
1842 avail_map &= pset->primary_map;
1843
1844 return (pset->pset_runq.count > 0) && ((pset->pset_runq.count + rt_runq_count(pset)) > bit_count(avail_map));
5ba3f43e 1845}
fe8ab488 1846
0a7de745
A
1847/*
1848 * Called with pset locked, on a processor that is committing to run a new thread
1849 * Will transition an idle or dispatching processor to running as it picks up
1850 * the first new thread from the idle thread.
1851 */
1852static void
1853pset_commit_processor_to_new_thread(processor_set_t pset, processor_t processor, thread_t new_thread)
1854{
f427ee49
A
1855 pset_assert_locked(pset);
1856
0a7de745
A
1857 if (processor->state == PROCESSOR_DISPATCHING || processor->state == PROCESSOR_IDLE) {
1858 assert(current_thread() == processor->idle_thread);
1859
1860 /*
1861 * Dispatching processor is now committed to running new_thread,
1862 * so change its state to PROCESSOR_RUNNING.
1863 */
1864 pset_update_processor_state(pset, processor, PROCESSOR_RUNNING);
1865 } else {
1866 assert((processor->state == PROCESSOR_RUNNING) || (processor->state == PROCESSOR_SHUTDOWN));
1867 }
1868
1869 processor_state_update_from_thread(processor, new_thread);
f427ee49
A
1870
1871 if (new_thread->sched_pri >= BASEPRI_RTQUEUES) {
1872 bit_set(pset->realtime_map, processor->cpu_id);
1873 } else {
1874 bit_clear(pset->realtime_map, processor->cpu_id);
1875 }
1876
1877 pset_node_t node = pset->node;
1878
1879 if (bit_count(node->pset_map) == 1) {
1880 /* Node has only a single pset, so skip node pset map updates */
1881 return;
1882 }
1883
1884 cpumap_t avail_map = pset_available_cpumap(pset);
1885
1886 if (new_thread->sched_pri >= BASEPRI_RTQUEUES) {
1887 if ((avail_map & pset->realtime_map) == avail_map) {
1888 /* No more non-RT CPUs in this pset */
1889 atomic_bit_clear(&node->pset_non_rt_map, pset->pset_id, memory_order_relaxed);
1890 }
1891 avail_map &= pset->primary_map;
1892 if ((avail_map & pset->realtime_map) == avail_map) {
1893 /* No more non-RT primary CPUs in this pset */
1894 atomic_bit_clear(&node->pset_non_rt_primary_map, pset->pset_id, memory_order_relaxed);
1895 }
1896 } else {
1897 if ((avail_map & pset->realtime_map) != avail_map) {
1898 if (!bit_test(atomic_load(&node->pset_non_rt_map), pset->pset_id)) {
1899 atomic_bit_set(&node->pset_non_rt_map, pset->pset_id, memory_order_relaxed);
1900 }
1901 }
1902 avail_map &= pset->primary_map;
1903 if ((avail_map & pset->realtime_map) != avail_map) {
1904 if (!bit_test(atomic_load(&node->pset_non_rt_primary_map), pset->pset_id)) {
1905 atomic_bit_set(&node->pset_non_rt_primary_map, pset->pset_id, memory_order_relaxed);
1906 }
1907 }
1908 }
0a7de745
A
1909}
1910
f427ee49 1911static processor_t choose_processor_for_realtime_thread(processor_set_t pset, processor_t skip_processor, bool consider_secondaries);
a39ff7e2 1912static bool all_available_primaries_are_running_realtime_threads(processor_set_t pset);
f427ee49 1913#if defined(__x86_64__)
0a7de745 1914static bool these_processors_are_running_realtime_threads(processor_set_t pset, uint64_t these_map);
f427ee49 1915#endif
0a7de745 1916static bool sched_ok_to_run_realtime_thread(processor_set_t pset, processor_t processor);
f427ee49 1917static bool processor_is_fast_track_candidate_for_realtime_thread(processor_set_t pset, processor_t processor);
a39ff7e2 1918int sched_allow_rt_smt = 1;
0a7de745 1919int sched_avoid_cpu0 = 1;
a39ff7e2 1920
1c79356b 1921/*
2d21ac55
A
1922 * thread_select:
1923 *
1924 * Select a new thread for the current processor to execute.
55e303ae
A
1925 *
1926 * May select the current thread, which must be locked.
1c79356b 1927 */
2d21ac55 1928static thread_t
5ba3f43e 1929thread_select(thread_t thread,
0a7de745
A
1930 processor_t processor,
1931 ast_t *reason)
1c79356b 1932{
0a7de745
A
1933 processor_set_t pset = processor->processor_set;
1934 thread_t new_thread = THREAD_NULL;
1c79356b 1935
6d2010ae 1936 assert(processor == current_processor());
0a7de745 1937 assert((thread->state & (TH_RUN | TH_TERMINATE2)) == TH_RUN);
6d2010ae 1938
2d21ac55
A
1939 do {
1940 /*
1941 * Update the priority.
1942 */
0a7de745 1943 if (SCHED(can_update_priority)(thread)) {
6d2010ae 1944 SCHED(update_priority)(thread);
0a7de745 1945 }
1c79356b 1946
2d21ac55
A
1947 pset_lock(pset);
1948
0a7de745
A
1949 processor_state_update_from_thread(processor, thread);
1950
1951restart:
1952 /* Acknowledge any pending IPIs here with pset lock held */
1953 bit_clear(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id);
1954 bit_clear(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
1955
1956#if defined(CONFIG_SCHED_DEFERRED_AST)
1957 bit_clear(pset->pending_deferred_AST_cpu_mask, processor->cpu_id);
1958#endif
1959
1960 bool secondary_can_only_run_realtime_thread = false;
1961
fe8ab488 1962 assert(processor->state != PROCESSOR_OFF_LINE);
6d2010ae 1963
3e170ce0
A
1964 if (!processor->is_recommended) {
1965 /*
1966 * The performance controller has provided a hint to not dispatch more threads,
1967 * unless they are bound to us (and thus we are the only option
1968 */
1969 if (!SCHED(processor_bound_count)(processor)) {
1970 goto idle;
1971 }
1972 } else if (processor->processor_primary != processor) {
39236c6e
A
1973 /*
1974 * Should this secondary SMT processor attempt to find work? For pset runqueue systems,
1975 * we should look for work only under the same conditions that choose_processor()
1976 * would have assigned work, which is when all primary processors have been assigned work.
1977 *
1978 * An exception is that bound threads are dispatched to a processor without going through
1979 * choose_processor(), so in those cases we should continue trying to dequeue work.
1980 */
a39ff7e2 1981 if (!SCHED(processor_bound_count)(processor)) {
d9a64523 1982 if ((pset->recommended_bitmask & pset->primary_map & pset->cpu_state_map[PROCESSOR_IDLE]) != 0) {
a39ff7e2
A
1983 goto idle;
1984 }
0a7de745
A
1985
1986 /*
1987 * TODO: What if a secondary core beat an idle primary to waking up from an IPI?
1988 * Should it dequeue immediately, or spin waiting for the primary to wake up?
1989 */
1990
a39ff7e2
A
1991 /* There are no idle primaries */
1992
1993 if (processor->processor_primary->current_pri >= BASEPRI_RTQUEUES) {
1994 bool secondary_can_run_realtime_thread = sched_allow_rt_smt && rt_runq_count(pset) && all_available_primaries_are_running_realtime_threads(pset);
1995 if (!secondary_can_run_realtime_thread) {
1996 goto idle;
1997 }
0a7de745 1998 secondary_can_only_run_realtime_thread = true;
a39ff7e2 1999 }
39236c6e
A
2000 }
2001 }
2002
2d21ac55
A
2003 /*
2004 * Test to see if the current thread should continue
3e170ce0 2005 * to run on this processor. Must not be attempting to wait, and not
2d21ac55 2006 * bound to a different processor, nor be in the wrong
3e170ce0
A
2007 * processor set, nor be forced to context switch by TH_SUSP.
2008 *
2009 * Note that there are never any RT threads in the regular runqueue.
2010 *
2011 * This code is very insanely tricky.
2d21ac55 2012 */
3e170ce0 2013
5ba3f43e 2014 /* i.e. not waiting, not TH_SUSP'ed */
0a7de745 2015 bool still_running = ((thread->state & (TH_TERMINATE | TH_IDLE | TH_WAIT | TH_RUN | TH_SUSP)) == TH_RUN);
5ba3f43e
A
2016
2017 /*
2018 * Threads running on SMT processors are forced to context switch. Don't rebalance realtime threads.
2019 * TODO: This should check if it's worth it to rebalance, i.e. 'are there any idle primary processors'
0a7de745
A
2020 * <rdar://problem/47907700>
2021 *
2022 * A yielding thread shouldn't be forced to context switch.
5ba3f43e 2023 */
5ba3f43e 2024
0a7de745
A
2025 bool is_yielding = (*reason & AST_YIELD) == AST_YIELD;
2026
2027 bool needs_smt_rebalance = !is_yielding && thread->sched_pri < BASEPRI_RTQUEUES && processor->processor_primary != processor;
5ba3f43e 2028
0a7de745 2029 bool affinity_mismatch = thread->affinity_set != AFFINITY_SET_NULL && thread->affinity_set->aset_pset != pset;
5ba3f43e 2030
0a7de745
A
2031 bool bound_elsewhere = thread->bound_processor != PROCESSOR_NULL && thread->bound_processor != processor;
2032
2033 bool avoid_processor = !is_yielding && SCHED(avoid_processor_enabled) && SCHED(thread_avoid_processor)(processor, thread);
5ba3f43e
A
2034
2035 if (still_running && !needs_smt_rebalance && !affinity_mismatch && !bound_elsewhere && !avoid_processor) {
3e170ce0 2036 /*
5ba3f43e
A
2037 * This thread is eligible to keep running on this processor.
2038 *
3e170ce0
A
2039 * RT threads with un-expired quantum stay on processor,
2040 * unless there's a valid RT thread with an earlier deadline.
2041 */
2042 if (thread->sched_pri >= BASEPRI_RTQUEUES && processor->first_timeslice) {
5ba3f43e 2043 if (rt_runq_count(pset) > 0) {
f427ee49 2044 thread_t next_rt = qe_queue_first(&SCHED(rt_runq)(pset)->queue, struct thread, runq_links);
0a7de745 2045
f427ee49
A
2046 if (next_rt->realtime.deadline < processor->deadline &&
2047 (next_rt->bound_processor == PROCESSOR_NULL ||
2048 next_rt->bound_processor == processor)) {
2049 /* The next RT thread is better, so pick it off the runqueue. */
2050 goto pick_new_rt_thread;
55e303ae
A
2051 }
2052 }
2d21ac55 2053
3e170ce0 2054 /* This is still the best RT thread to run. */
2d21ac55
A
2055 processor->deadline = thread->realtime.deadline;
2056
f427ee49 2057 sched_update_pset_load_average(pset, 0);
a39ff7e2
A
2058
2059 processor_t next_rt_processor = PROCESSOR_NULL;
2060 sched_ipi_type_t next_rt_ipi_type = SCHED_IPI_NONE;
2061
f427ee49
A
2062 if (rt_runq_count(pset) - bit_count(pset->pending_AST_URGENT_cpu_mask) > 0) {
2063 next_rt_processor = choose_processor_for_realtime_thread(pset, processor, true);
a39ff7e2 2064 if (next_rt_processor) {
f427ee49
A
2065 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR) | DBG_FUNC_NONE,
2066 (uintptr_t)0, (uintptr_t)-4, next_rt_processor->cpu_id, next_rt_processor->state, 0);
0a7de745
A
2067 if (next_rt_processor->state == PROCESSOR_IDLE) {
2068 pset_update_processor_state(pset, next_rt_processor, PROCESSOR_DISPATCHING);
2069 }
a39ff7e2
A
2070 next_rt_ipi_type = sched_ipi_action(next_rt_processor, NULL, false, SCHED_IPI_EVENT_PREEMPT);
2071 }
2072 }
2d21ac55
A
2073 pset_unlock(pset);
2074
a39ff7e2
A
2075 if (next_rt_processor) {
2076 sched_ipi_perform(next_rt_processor, next_rt_ipi_type);
2077 }
2078
0a7de745 2079 return thread;
55e303ae
A
2080 }
2081
5ba3f43e 2082 if ((rt_runq_count(pset) == 0) &&
3e170ce0 2083 SCHED(processor_queue_has_priority)(processor, thread->sched_pri, TRUE) == FALSE) {
fe8ab488 2084 /* This thread is still the highest priority runnable (non-idle) thread */
2d21ac55 2085 processor->deadline = UINT64_MAX;
55e303ae 2086
f427ee49 2087 sched_update_pset_load_average(pset, 0);
2d21ac55 2088 pset_unlock(pset);
55e303ae 2089
0a7de745 2090 return thread;
2d21ac55 2091 }
5ba3f43e
A
2092 } else {
2093 /*
2094 * This processor must context switch.
2095 * If it's due to a rebalance, we should aggressively find this thread a new home.
2096 */
0a7de745
A
2097 if (needs_smt_rebalance || affinity_mismatch || bound_elsewhere || avoid_processor) {
2098 *reason |= AST_REBALANCE;
2099 }
2d21ac55
A
2100 }
2101
c3c9b80d
A
2102 bool secondary_forced_idle = ((processor->processor_secondary != PROCESSOR_NULL) &&
2103 (thread_no_smt(thread) || (thread->sched_pri >= BASEPRI_RTQUEUES)) &&
2104 (processor->processor_secondary->state == PROCESSOR_IDLE));
2105
3e170ce0 2106 /* OK, so we're not going to run the current thread. Look at the RT queue. */
0a7de745
A
2107 bool ok_to_run_realtime_thread = sched_ok_to_run_realtime_thread(pset, processor);
2108 if ((rt_runq_count(pset) > 0) && ok_to_run_realtime_thread) {
f427ee49 2109 thread_t next_rt = qe_queue_first(&SCHED(rt_runq)(pset)->queue, struct thread, runq_links);
a39ff7e2 2110
f427ee49
A
2111 if (__probable((next_rt->bound_processor == PROCESSOR_NULL ||
2112 (next_rt->bound_processor == processor)))) {
3e170ce0 2113pick_new_rt_thread:
f427ee49 2114 new_thread = qe_dequeue_head(&SCHED(rt_runq)(pset)->queue, struct thread, runq_links);
6d2010ae 2115
f427ee49
A
2116 new_thread->runq = PROCESSOR_NULL;
2117 SCHED_STATS_RUNQ_CHANGE(&SCHED(rt_runq)(pset)->runq_stats, rt_runq_count(pset));
2118 rt_runq_count_decr(pset);
6d2010ae 2119
f427ee49 2120 processor->deadline = new_thread->realtime.deadline;
0a7de745 2121
f427ee49 2122 pset_commit_processor_to_new_thread(pset, processor, new_thread);
c910b4d9 2123
f427ee49 2124 sched_update_pset_load_average(pset, 0);
a39ff7e2 2125
f427ee49
A
2126 processor_t ast_processor = PROCESSOR_NULL;
2127 processor_t next_rt_processor = PROCESSOR_NULL;
2128 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
2129 sched_ipi_type_t next_rt_ipi_type = SCHED_IPI_NONE;
a39ff7e2 2130
f427ee49
A
2131 if (processor->processor_secondary != NULL) {
2132 processor_t sprocessor = processor->processor_secondary;
2133 if ((sprocessor->state == PROCESSOR_RUNNING) || (sprocessor->state == PROCESSOR_DISPATCHING)) {
2134 ipi_type = sched_ipi_action(sprocessor, NULL, false, SCHED_IPI_EVENT_SMT_REBAL);
2135 ast_processor = sprocessor;
a39ff7e2 2136 }
f427ee49
A
2137 }
2138 if (rt_runq_count(pset) - bit_count(pset->pending_AST_URGENT_cpu_mask) > 0) {
2139 next_rt_processor = choose_processor_for_realtime_thread(pset, processor, true);
2140 if (next_rt_processor) {
2141 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR) | DBG_FUNC_NONE,
2142 (uintptr_t)0, (uintptr_t)-5, next_rt_processor->cpu_id, next_rt_processor->state, 0);
2143 if (next_rt_processor->state == PROCESSOR_IDLE) {
2144 pset_update_processor_state(pset, next_rt_processor, PROCESSOR_DISPATCHING);
a39ff7e2 2145 }
f427ee49 2146 next_rt_ipi_type = sched_ipi_action(next_rt_processor, NULL, false, SCHED_IPI_EVENT_PREEMPT);
a39ff7e2 2147 }
f427ee49
A
2148 }
2149 pset_unlock(pset);
c910b4d9 2150
f427ee49
A
2151 if (ast_processor) {
2152 sched_ipi_perform(ast_processor, ipi_type);
2153 }
a39ff7e2 2154
f427ee49
A
2155 if (next_rt_processor) {
2156 sched_ipi_perform(next_rt_processor, next_rt_ipi_type);
a39ff7e2 2157 }
5ba3f43e 2158
f427ee49
A
2159 return new_thread;
2160 }
c910b4d9 2161 }
0a7de745
A
2162 if (secondary_can_only_run_realtime_thread) {
2163 goto idle;
2164 }
2d21ac55 2165
3e170ce0 2166 processor->deadline = UINT64_MAX;
6d2010ae 2167
3e170ce0 2168 /* No RT threads, so let's look at the regular threads. */
5ba3f43e 2169 if ((new_thread = SCHED(choose_thread)(processor, MINPRI, *reason)) != THREAD_NULL) {
0a7de745 2170 pset_commit_processor_to_new_thread(pset, processor, new_thread);
f427ee49 2171 sched_update_pset_load_average(pset, 0);
0a7de745
A
2172
2173 processor_t ast_processor = PROCESSOR_NULL;
2174 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
2175
2176 processor_t sprocessor = processor->processor_secondary;
2177 if ((sprocessor != NULL) && (sprocessor->state == PROCESSOR_RUNNING)) {
2178 if (thread_no_smt(new_thread)) {
2179 ipi_type = sched_ipi_action(sprocessor, NULL, false, SCHED_IPI_EVENT_SMT_REBAL);
2180 ast_processor = sprocessor;
2181 }
c3c9b80d
A
2182 } else if (secondary_forced_idle && !thread_no_smt(new_thread) && pset_has_stealable_threads(pset)) {
2183 pset_update_processor_state(pset, sprocessor, PROCESSOR_DISPATCHING);
2184 ipi_type = sched_ipi_action(sprocessor, NULL, true, SCHED_IPI_EVENT_PREEMPT);
2185 ast_processor = sprocessor;
0a7de745 2186 }
6d2010ae 2187 pset_unlock(pset);
0a7de745
A
2188
2189 if (ast_processor) {
2190 sched_ipi_perform(ast_processor, ipi_type);
2191 }
2192 return new_thread;
2193 }
2194
2195 if (processor->must_idle) {
2196 processor->must_idle = false;
2197 goto idle;
6d2010ae 2198 }
c910b4d9 2199
f427ee49 2200 if (SCHED(steal_thread_enabled)(pset) && (processor->processor_primary == processor)) {
3e170ce0
A
2201 /*
2202 * No runnable threads, attempt to steal
2203 * from other processors. Returns with pset lock dropped.
2204 */
2d21ac55 2205
3e170ce0 2206 if ((new_thread = SCHED(steal_thread)(pset)) != THREAD_NULL) {
0a7de745
A
2207 /*
2208 * Avoid taking the pset_lock unless it is necessary to change state.
2209 * It's safe to read processor->state here, as only the current processor can change state
2210 * from this point (interrupts are disabled and this processor is committed to run new_thread).
2211 */
2212 if (processor->state == PROCESSOR_DISPATCHING || processor->state == PROCESSOR_IDLE) {
2213 pset_lock(pset);
2214 pset_commit_processor_to_new_thread(pset, processor, new_thread);
2215 pset_unlock(pset);
2216 } else {
2217 assert((processor->state == PROCESSOR_RUNNING) || (processor->state == PROCESSOR_SHUTDOWN));
2218 processor_state_update_from_thread(processor, new_thread);
2219 }
2220
2221 return new_thread;
3e170ce0 2222 }
cf7d32b8 2223
3e170ce0
A
2224 /*
2225 * If other threads have appeared, shortcut
2226 * around again.
2227 */
0a7de745 2228 if (!SCHED(processor_queue_empty)(processor) || (ok_to_run_realtime_thread && (rt_runq_count(pset) > 0))) {
3e170ce0 2229 continue;
0a7de745 2230 }
3e170ce0
A
2231
2232 pset_lock(pset);
0a7de745
A
2233
2234 /* Someone selected this processor while we had dropped the lock */
2235 if (bit_test(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
2236 goto restart;
2237 }
3e170ce0 2238 }
55e303ae 2239
0a7de745 2240idle:
1c79356b
A
2241 /*
2242 * Nothing is runnable, so set this processor idle if it
2d21ac55 2243 * was running.
1c79356b 2244 */
0a7de745 2245 if ((processor->state == PROCESSOR_RUNNING) || (processor->state == PROCESSOR_DISPATCHING)) {
d9a64523 2246 pset_update_processor_state(pset, processor, PROCESSOR_IDLE);
0a7de745 2247 processor_state_update_idle(processor);
1c79356b 2248 }
1c79356b 2249
fe8ab488 2250 /* Invoked with pset locked, returns with pset unlocked */
5ba3f43e 2251 SCHED(processor_balance)(processor, pset);
2d21ac55 2252
3e170ce0 2253 new_thread = processor->idle_thread;
2d21ac55
A
2254 } while (new_thread == THREAD_NULL);
2255
0a7de745 2256 return new_thread;
2d21ac55
A
2257}
2258
b0d623f7 2259/*
3e170ce0 2260 * thread_invoke
b0d623f7 2261 *
3e170ce0 2262 * Called at splsched with neither thread locked.
b0d623f7 2263 *
3e170ce0 2264 * Perform a context switch and start executing the new thread.
55e303ae 2265 *
3e170ce0
A
2266 * Returns FALSE when the context switch didn't happen.
2267 * The reference to the new thread is still consumed.
39236c6e
A
2268 *
2269 * "self" is what is currently running on the processor,
2270 * "thread" is the new thread to context switch to
2271 * (which may be the same thread in some cases)
2272 */
2d21ac55 2273static boolean_t
1c79356b 2274thread_invoke(
0a7de745
A
2275 thread_t self,
2276 thread_t thread,
2277 ast_t reason)
1c79356b 2278{
39236c6e 2279 if (__improbable(get_preemption_level() != 0)) {
b0d623f7
A
2280 int pl = get_preemption_level();
2281 panic("thread_invoke: preemption_level %d, possible cause: %s",
2282 pl, (pl < 0 ? "unlocking an unlocked mutex or spinlock" :
0a7de745 2283 "blocking while holding a spinlock, or within interrupt context"));
b0d623f7 2284 }
0b4e3aa0 2285
3e170ce0
A
2286 thread_continue_t continuation = self->continuation;
2287 void *parameter = self->parameter;
2288 processor_t processor;
2289
2290 uint64_t ctime = mach_absolute_time();
2291
2292#ifdef CONFIG_MACH_APPROXIMATE_TIME
2293 commpage_update_mach_approximate_time(ctime);
2294#endif
2295
2296#if defined(CONFIG_SCHED_TIMESHARE_CORE)
f427ee49
A
2297 if (!((thread->state & TH_IDLE) != 0 ||
2298 ((reason & AST_HANDOFF) && self->sched_mode == TH_MODE_REALTIME))) {
5ba3f43e 2299 sched_timeshare_consider_maintenance(ctime);
0a7de745 2300 }
3e170ce0
A
2301#endif
2302
5ba3f43e
A
2303#if MONOTONIC
2304 mt_sched_update(self);
2305#endif /* MONOTONIC */
2306
39037602 2307 assert_thread_magic(self);
2d21ac55 2308 assert(self == current_thread());
fe8ab488 2309 assert(self->runq == PROCESSOR_NULL);
0a7de745 2310 assert((self->state & (TH_RUN | TH_TERMINATE2)) == TH_RUN);
91447636 2311
2d21ac55 2312 thread_lock(thread);
1c79356b 2313
39037602 2314 assert_thread_magic(thread);
0a7de745 2315 assert((thread->state & (TH_RUN | TH_WAIT | TH_UNINT | TH_TERMINATE | TH_TERMINATE2)) == TH_RUN);
fe8ab488
A
2316 assert(thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == current_processor());
2317 assert(thread->runq == PROCESSOR_NULL);
1c79356b 2318
316670eb
A
2319 /* Reload precise timing global policy to thread-local policy */
2320 thread->precise_user_kernel_time = use_precise_user_kernel_time(thread);
3e170ce0 2321
fe8ab488
A
2322 /* Update SFI class based on other factors */
2323 thread->sfi_class = sfi_thread_classify(thread);
0a7de745 2324
5ba3f43e
A
2325 /* Update the same_pri_latency for the thread (used by perfcontrol callouts) */
2326 thread->same_pri_latency = ctime - thread->last_basepri_change_time;
0a7de745
A
2327 /*
2328 * In case a base_pri update happened between the timestamp and
2329 * taking the thread lock
5ba3f43e 2330 */
0a7de745 2331 if (ctime <= thread->last_basepri_change_time) {
5ba3f43e 2332 thread->same_pri_latency = ctime - thread->last_made_runnable_time;
0a7de745 2333 }
fe8ab488 2334
3e170ce0 2335 /* Allow realtime threads to hang onto a stack. */
0a7de745 2336 if ((self->sched_mode == TH_MODE_REALTIME) && !self->reserved_stack) {
2d21ac55 2337 self->reserved_stack = self->kernel_stack;
0a7de745 2338 }
1c79356b 2339
0a7de745 2340 /* Prepare for spin debugging */
5ba3f43e 2341#if INTERRUPT_MASKED_DEBUG
d9a64523 2342 ml_spin_debug_clear(thread);
5ba3f43e
A
2343#endif
2344
91447636 2345 if (continuation != NULL) {
2d21ac55 2346 if (!thread->kernel_stack) {
9bccf70c 2347 /*
2d21ac55 2348 * If we are using a privileged stack,
9bccf70c 2349 * check to see whether we can exchange it with
2d21ac55 2350 * that of the other thread.
9bccf70c 2351 */
0a7de745 2352 if (self->kernel_stack == self->reserved_stack && !thread->reserved_stack) {
9bccf70c 2353 goto need_stack;
0a7de745 2354 }
1c79356b 2355
91447636
A
2356 /*
2357 * Context switch by performing a stack handoff.
cb323159 2358 * Requires both threads to be parked in a continuation.
91447636 2359 */
2d21ac55
A
2360 continuation = thread->continuation;
2361 parameter = thread->parameter;
1c79356b 2362
9bccf70c 2363 processor = current_processor();
2d21ac55 2364 processor->active_thread = thread;
5ba3f43e
A
2365 processor_state_update_from_thread(processor, thread);
2366
2d21ac55 2367 if (thread->last_processor != processor && thread->last_processor != NULL) {
0a7de745 2368 if (thread->last_processor->processor_set != processor->processor_set) {
2d21ac55 2369 thread->ps_switch++;
0a7de745 2370 }
2d21ac55
A
2371 thread->p_switch++;
2372 }
2373 thread->last_processor = processor;
2374 thread->c_switch++;
2375 ast_context(thread);
3e170ce0 2376
2d21ac55 2377 thread_unlock(thread);
1c79356b 2378
2d21ac55 2379 self->reason = reason;
91447636 2380
39236c6e
A
2381 processor->last_dispatch = ctime;
2382 self->last_run_time = ctime;
d9a64523
A
2383 processor_timer_switch_thread(ctime, &thread->system_timer);
2384 timer_update(&thread->runnable_timer, ctime);
f427ee49 2385 processor->kernel_timer = &thread->system_timer;
316670eb
A
2386
2387 /*
2388 * Since non-precise user/kernel time doesn't update the state timer
2389 * during privilege transitions, synthesize an event now.
2390 */
2391 if (!thread->precise_user_kernel_time) {
f427ee49 2392 timer_update(processor->current_state, ctime);
316670eb 2393 }
d9a64523 2394
316670eb 2395 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
0a7de745
A
2396 MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_HANDOFF) | DBG_FUNC_NONE,
2397 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
1c79356b 2398
39236c6e 2399 if ((thread->chosen_processor != processor) && (thread->chosen_processor != PROCESSOR_NULL)) {
0a7de745
A
2400 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_MOVED) | DBG_FUNC_NONE,
2401 (uintptr_t)thread_tid(thread), (uintptr_t)thread->chosen_processor->cpu_id, 0, 0, 0);
6d2010ae
A
2402 }
2403
b0d623f7
A
2404 DTRACE_SCHED2(off__cpu, struct thread *, thread, struct proc *, thread->task->bsd_info);
2405
6d2010ae
A
2406 SCHED_STATS_CSW(processor, self->reason, self->sched_pri, thread->sched_pri);
2407
d9a64523
A
2408#if KPERF
2409 kperf_off_cpu(self);
2410#endif /* KPERF */
2411
cb323159
A
2412 /*
2413 * This is where we actually switch thread identity,
2414 * and address space if required. However, register
2415 * state is not switched - this routine leaves the
2416 * stack and register state active on the current CPU.
2417 */
6d2010ae
A
2418 TLOG(1, "thread_invoke: calling stack_handoff\n");
2419 stack_handoff(self, thread);
9bccf70c 2420
3e170ce0 2421 /* 'self' is now off core */
d9a64523 2422 assert(thread == current_thread_volatile());
3e170ce0 2423
b0d623f7
A
2424 DTRACE_SCHED(on__cpu);
2425
39037602
A
2426#if KPERF
2427 kperf_on_cpu(thread, continuation, NULL);
2428#endif /* KPERF */
2429
d9a64523
A
2430 thread_dispatch(self, thread);
2431
5ba3f43e 2432#if KASAN
d9a64523
A
2433 /* Old thread's stack has been moved to the new thread, so explicitly
2434 * unpoison it. */
5ba3f43e
A
2435 kasan_unpoison_stack(thread->kernel_stack, kernel_stack_size);
2436#endif
2437
2d21ac55 2438 thread->continuation = thread->parameter = NULL;
1c79356b 2439
cb323159
A
2440 boolean_t enable_interrupts = TRUE;
2441
2442 /* idle thread needs to stay interrupts-disabled */
2443 if ((thread->state & TH_IDLE)) {
2444 enable_interrupts = FALSE;
2445 }
2446
2d21ac55 2447 assert(continuation);
cb323159
A
2448 call_continuation(continuation, parameter,
2449 thread->wait_result, enable_interrupts);
9bccf70c 2450 /*NOTREACHED*/
0a7de745 2451 } else if (thread == self) {
9bccf70c 2452 /* same thread but with continuation */
2d21ac55 2453 ast_context(self);
3e170ce0 2454
2d21ac55 2455 thread_unlock(self);
9bccf70c 2456
39037602
A
2457#if KPERF
2458 kperf_on_cpu(thread, continuation, NULL);
2459#endif /* KPERF */
2460
316670eb 2461 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
0a7de745
A
2462 MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED) | DBG_FUNC_NONE,
2463 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
6d2010ae 2464
5ba3f43e 2465#if KASAN
d9a64523
A
2466 /* stack handoff to self - no thread_dispatch(), so clear the stack
2467 * and free the fakestack directly */
2468 kasan_fakestack_drop(self);
2469 kasan_fakestack_gc(self);
5ba3f43e
A
2470 kasan_unpoison_stack(self->kernel_stack, kernel_stack_size);
2471#endif
2472
2d21ac55
A
2473 self->continuation = self->parameter = NULL;
2474
cb323159
A
2475 boolean_t enable_interrupts = TRUE;
2476
2477 /* idle thread needs to stay interrupts-disabled */
2478 if ((self->state & TH_IDLE)) {
2479 enable_interrupts = FALSE;
2480 }
2481
2482 call_continuation(continuation, parameter,
2483 self->wait_result, enable_interrupts);
9bccf70c
A
2484 /*NOTREACHED*/
2485 }
3e170ce0 2486 } else {
9bccf70c 2487 /*
2d21ac55 2488 * Check that the other thread has a stack
9bccf70c 2489 */
2d21ac55 2490 if (!thread->kernel_stack) {
9bccf70c 2491need_stack:
2d21ac55 2492 if (!stack_alloc_try(thread)) {
2d21ac55
A
2493 thread_unlock(thread);
2494 thread_stack_enqueue(thread);
0a7de745 2495 return FALSE;
9bccf70c 2496 }
3e170ce0 2497 } else if (thread == self) {
2d21ac55 2498 ast_context(self);
2d21ac55 2499 thread_unlock(self);
6d2010ae 2500
316670eb 2501 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
0a7de745
A
2502 MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED) | DBG_FUNC_NONE,
2503 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
6d2010ae 2504
0a7de745 2505 return TRUE;
9bccf70c
A
2506 }
2507 }
1c79356b
A
2508
2509 /*
91447636 2510 * Context switch by full context save.
1c79356b 2511 */
9bccf70c 2512 processor = current_processor();
2d21ac55 2513 processor->active_thread = thread;
5ba3f43e 2514 processor_state_update_from_thread(processor, thread);
0a7de745 2515
2d21ac55 2516 if (thread->last_processor != processor && thread->last_processor != NULL) {
0a7de745 2517 if (thread->last_processor->processor_set != processor->processor_set) {
2d21ac55 2518 thread->ps_switch++;
0a7de745 2519 }
2d21ac55
A
2520 thread->p_switch++;
2521 }
2522 thread->last_processor = processor;
2523 thread->c_switch++;
2524 ast_context(thread);
3e170ce0 2525
2d21ac55 2526 thread_unlock(thread);
1c79356b 2527
2d21ac55 2528 self->reason = reason;
1c79356b 2529
39236c6e
A
2530 processor->last_dispatch = ctime;
2531 self->last_run_time = ctime;
d9a64523
A
2532 processor_timer_switch_thread(ctime, &thread->system_timer);
2533 timer_update(&thread->runnable_timer, ctime);
f427ee49 2534 processor->kernel_timer = &thread->system_timer;
91447636 2535
316670eb
A
2536 /*
2537 * Since non-precise user/kernel time doesn't update the state timer
2538 * during privilege transitions, synthesize an event now.
2539 */
2540 if (!thread->precise_user_kernel_time) {
f427ee49 2541 timer_update(processor->current_state, ctime);
316670eb 2542 }
3e170ce0 2543
316670eb 2544 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
0a7de745
A
2545 MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED) | DBG_FUNC_NONE,
2546 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
b0d623f7 2547
6d2010ae 2548 if ((thread->chosen_processor != processor) && (thread->chosen_processor != NULL)) {
0a7de745
A
2549 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_MOVED) | DBG_FUNC_NONE,
2550 (uintptr_t)thread_tid(thread), (uintptr_t)thread->chosen_processor->cpu_id, 0, 0, 0);
6d2010ae
A
2551 }
2552
b0d623f7 2553 DTRACE_SCHED2(off__cpu, struct thread *, thread, struct proc *, thread->task->bsd_info);
1c79356b 2554
6d2010ae
A
2555 SCHED_STATS_CSW(processor, self->reason, self->sched_pri, thread->sched_pri);
2556
d9a64523
A
2557#if KPERF
2558 kperf_off_cpu(self);
2559#endif /* KPERF */
2560
1c79356b 2561 /*
91447636 2562 * This is where we actually switch register context,
2d21ac55
A
2563 * and address space if required. We will next run
2564 * as a result of a subsequent context switch.
3e170ce0
A
2565 *
2566 * Once registers are switched and the processor is running "thread",
2567 * the stack variables and non-volatile registers will contain whatever
2568 * was there the last time that thread blocked. No local variables should
2569 * be used after this point, except for the special case of "thread", which
2570 * the platform layer returns as the previous thread running on the processor
2571 * via the function call ABI as a return register, and "self", which may have
2572 * been stored on the stack or a non-volatile register, but a stale idea of
2573 * what was on the CPU is newly-accurate because that thread is again
2574 * running on the CPU.
cb323159
A
2575 *
2576 * If one of the threads is using a continuation, thread_continue
2577 * is used to stitch up its context.
2578 *
2579 * If we are invoking a thread which is resuming from a continuation,
2580 * the CPU will invoke thread_continue next.
2581 *
2582 * If the current thread is parking in a continuation, then its state
2583 * won't be saved and the stack will be discarded. When the stack is
2584 * re-allocated, it will be configured to resume from thread_continue.
91447636 2585 */
316670eb 2586 assert(continuation == self->continuation);
2d21ac55 2587 thread = machine_switch_context(self, continuation, thread);
d9a64523 2588 assert(self == current_thread_volatile());
0a7de745 2589 TLOG(1, "thread_invoke: returning machine_switch_context: self %p continuation %p thread %p\n", self, continuation, thread);
b0d623f7 2590
cb323159
A
2591 assert(continuation == NULL && self->continuation == NULL);
2592
b0d623f7 2593 DTRACE_SCHED(on__cpu);
1c79356b 2594
39037602
A
2595#if KPERF
2596 kperf_on_cpu(self, NULL, __builtin_frame_address(0));
2597#endif /* KPERF */
2598
cb323159 2599 /* We have been resumed and are set to run. */
2d21ac55 2600 thread_dispatch(thread, self);
9bccf70c 2601
0a7de745 2602 return TRUE;
1c79356b
A
2603}
2604
3e170ce0
A
2605#if defined(CONFIG_SCHED_DEFERRED_AST)
2606/*
2607 * pset_cancel_deferred_dispatch:
2608 *
2609 * Cancels all ASTs that we can cancel for the given processor set
2610 * if the current processor is running the last runnable thread in the
2611 * system.
2612 *
2613 * This function assumes the current thread is runnable. This must
2614 * be called with the pset unlocked.
2615 */
2616static void
2617pset_cancel_deferred_dispatch(
0a7de745
A
2618 processor_set_t pset,
2619 processor_t processor)
3e170ce0 2620{
0a7de745
A
2621 processor_t active_processor = NULL;
2622 uint32_t sampled_sched_run_count;
3e170ce0
A
2623
2624 pset_lock(pset);
cb323159 2625 sampled_sched_run_count = os_atomic_load(&sched_run_buckets[TH_BUCKET_RUN], relaxed);
3e170ce0
A
2626
2627 /*
2628 * If we have emptied the run queue, and our current thread is runnable, we
2629 * should tell any processors that are still DISPATCHING that they will
2630 * probably not have any work to do. In the event that there are no
2631 * pending signals that we can cancel, this is also uninteresting.
2632 *
2633 * In the unlikely event that another thread becomes runnable while we are
2634 * doing this (sched_run_count is atomically updated, not guarded), the
2635 * codepath making it runnable SHOULD (a dangerous word) need the pset lock
2636 * in order to dispatch it to a processor in our pset. So, the other
2637 * codepath will wait while we squash all cancelable ASTs, get the pset
2638 * lock, and then dispatch the freshly runnable thread. So this should be
2639 * correct (we won't accidentally have a runnable thread that hasn't been
2640 * dispatched to an idle processor), if not ideal (we may be restarting the
2641 * dispatch process, which could have some overhead).
3e170ce0 2642 */
d9a64523
A
2643
2644 if ((sampled_sched_run_count == 1) && (pset->pending_deferred_AST_cpu_mask)) {
2645 uint64_t dispatching_map = (pset->cpu_state_map[PROCESSOR_DISPATCHING] &
0a7de745
A
2646 pset->pending_deferred_AST_cpu_mask &
2647 ~pset->pending_AST_URGENT_cpu_mask);
d9a64523
A
2648 for (int cpuid = lsb_first(dispatching_map); cpuid >= 0; cpuid = lsb_next(dispatching_map, cpuid)) {
2649 active_processor = processor_array[cpuid];
3e170ce0
A
2650 /*
2651 * If a processor is DISPATCHING, it could be because of
2652 * a cancelable signal.
2653 *
2654 * IF the processor is not our
2655 * current processor (the current processor should not
2656 * be DISPATCHING, so this is a bit paranoid), AND there
2657 * is a cancelable signal pending on the processor, AND
2658 * there is no non-cancelable signal pending (as there is
2659 * no point trying to backtrack on bringing the processor
2660 * up if a signal we cannot cancel is outstanding), THEN
2661 * it should make sense to roll back the processor state
2662 * to the IDLE state.
2663 *
2664 * If the racey nature of this approach (as the signal
2665 * will be arbitrated by hardware, and can fire as we
2666 * roll back state) results in the core responding
2667 * despite being pushed back to the IDLE state, it
2668 * should be no different than if the core took some
2669 * interrupt while IDLE.
2670 */
d9a64523 2671 if (active_processor != processor) {
3e170ce0
A
2672 /*
2673 * Squash all of the processor state back to some
2674 * reasonable facsimile of PROCESSOR_IDLE.
3e170ce0 2675 */
3e170ce0 2676
5ba3f43e 2677 processor_state_update_idle(active_processor);
3e170ce0 2678 active_processor->deadline = UINT64_MAX;
d9a64523 2679 pset_update_processor_state(pset, active_processor, PROCESSOR_IDLE);
5ba3f43e 2680 bit_clear(pset->pending_deferred_AST_cpu_mask, active_processor->cpu_id);
3e170ce0
A
2681 machine_signal_idle_cancel(active_processor);
2682 }
3e170ce0
A
2683 }
2684 }
2685
2686 pset_unlock(pset);
2687}
2688#else
2689/* We don't support deferred ASTs; everything is candycanes and sunshine. */
2690#endif
2691
5ba3f43e
A
2692static void
2693thread_csw_callout(
0a7de745
A
2694 thread_t old,
2695 thread_t new,
2696 uint64_t timestamp)
5ba3f43e
A
2697{
2698 perfcontrol_event event = (new->state & TH_IDLE) ? IDLE : CONTEXT_SWITCH;
2699 uint64_t same_pri_latency = (new->state & TH_IDLE) ? 0 : new->same_pri_latency;
0a7de745
A
2700 machine_switch_perfcontrol_context(event, timestamp, 0,
2701 same_pri_latency, old, new);
5ba3f43e
A
2702}
2703
2704
1c79356b 2705/*
2d21ac55 2706 * thread_dispatch:
1c79356b 2707 *
2d21ac55
A
2708 * Handle threads at context switch. Re-dispatch other thread
2709 * if still running, otherwise update run state and perform
2710 * special actions. Update quantum for other thread and begin
2711 * the quantum for ourselves.
91447636 2712 *
3e170ce0
A
2713 * "thread" is the old thread that we have switched away from.
2714 * "self" is the new current thread that we have context switched to
39236c6e 2715 *
91447636 2716 * Called at splsched.
cb323159 2717 *
1c79356b
A
2718 */
2719void
2d21ac55 2720thread_dispatch(
0a7de745
A
2721 thread_t thread,
2722 thread_t self)
1c79356b 2723{
0a7de745 2724 processor_t processor = self->last_processor;
f427ee49 2725 bool was_idle = false;
2d21ac55 2726
3e170ce0 2727 assert(processor == current_processor());
d9a64523 2728 assert(self == current_thread_volatile());
3e170ce0
A
2729 assert(thread != self);
2730
2d21ac55 2731 if (thread != THREAD_NULL) {
0a7de745
A
2732 /*
2733 * Do the perfcontrol callout for context switch.
5ba3f43e 2734 * The reason we do this here is:
0a7de745 2735 * - thread_dispatch() is called from various places that are not
5ba3f43e
A
2736 * the direct context switch path for eg. processor shutdown etc.
2737 * So adding the callout here covers all those cases.
0a7de745 2738 * - We want this callout as early as possible to be close
5ba3f43e 2739 * to the timestamp taken in thread_invoke()
0a7de745 2740 * - We want to avoid holding the thread lock while doing the
5ba3f43e
A
2741 * callout
2742 * - We do not want to callout if "thread" is NULL.
2743 */
0a7de745 2744 thread_csw_callout(thread, self, processor->last_dispatch);
d9a64523
A
2745
2746#if KASAN
2747 if (thread->continuation != NULL) {
2748 /*
2749 * Thread has a continuation and the normal stack is going away.
2750 * Unpoison the stack and mark all fakestack objects as unused.
2751 */
2752 kasan_fakestack_drop(thread);
2753 if (thread->kernel_stack) {
2754 kasan_unpoison_stack(thread->kernel_stack, kernel_stack_size);
2755 }
2756 }
2757
2758 /*
2759 * Free all unused fakestack objects.
2760 */
2761 kasan_fakestack_gc(thread);
2762#endif
2763
91447636 2764 /*
2d21ac55
A
2765 * If blocked at a continuation, discard
2766 * the stack.
91447636 2767 */
0a7de745 2768 if (thread->continuation != NULL && thread->kernel_stack != 0) {
2d21ac55 2769 stack_free(thread);
0a7de745 2770 }
2d21ac55 2771
3e170ce0 2772 if (thread->state & TH_IDLE) {
f427ee49 2773 was_idle = true;
3e170ce0 2774 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
0a7de745
A
2775 MACHDBG_CODE(DBG_MACH_SCHED, MACH_DISPATCH) | DBG_FUNC_NONE,
2776 (uintptr_t)thread_tid(thread), 0, thread->state,
2777 sched_run_buckets[TH_BUCKET_RUN], 0);
3e170ce0 2778 } else {
316670eb
A
2779 int64_t consumed;
2780 int64_t remainder = 0;
2781
0a7de745 2782 if (processor->quantum_end > processor->last_dispatch) {
316670eb
A
2783 remainder = processor->quantum_end -
2784 processor->last_dispatch;
0a7de745 2785 }
316670eb 2786
fe8ab488 2787 consumed = thread->quantum_remaining - remainder;
316670eb 2788
39236c6e 2789 if ((thread->reason & AST_LEDGER) == 0) {
316670eb 2790 /*
39236c6e
A
2791 * Bill CPU time to both the task and
2792 * the individual thread.
316670eb 2793 */
5c9f4661 2794 ledger_credit_thread(thread, thread->t_ledger,
0a7de745 2795 task_ledgers.cpu_time, consumed);
5c9f4661 2796 ledger_credit_thread(thread, thread->t_threadledger,
0a7de745 2797 thread_ledgers.cpu_time, consumed);
fe8ab488 2798 if (thread->t_bankledger) {
5c9f4661 2799 ledger_credit_thread(thread, thread->t_bankledger,
0a7de745
A
2800 bank_ledgers.cpu_time,
2801 (consumed - thread->t_deduct_bank_ledger_time));
fe8ab488 2802 }
5c9f4661 2803 thread->t_deduct_bank_ledger_time = 0;
f427ee49
A
2804 if (consumed > 0) {
2805 /*
2806 * This should never be negative, but in traces we are seeing some instances
2807 * of consumed being negative.
2808 * <rdar://problem/57782596> thread_dispatch() thread CPU consumed calculation sometimes results in negative value
2809 */
2810 sched_update_pset_avg_execution_time(current_processor()->processor_set, consumed, processor->last_dispatch, thread->th_sched_bucket);
2811 }
39236c6e 2812 }
316670eb 2813
2d21ac55
A
2814 wake_lock(thread);
2815 thread_lock(thread);
9bccf70c 2816
91447636 2817 /*
39037602
A
2818 * Apply a priority floor if the thread holds a kernel resource
2819 * Do this before checking starting_pri to avoid overpenalizing
2820 * repeated rwlock blockers.
2821 */
0a7de745 2822 if (__improbable(thread->rwlock_count != 0)) {
39037602 2823 lck_rw_set_promotion_locked(thread);
0a7de745 2824 }
39037602
A
2825
2826 boolean_t keep_quantum = processor->first_timeslice;
2827
2828 /*
2829 * Treat a thread which has dropped priority since it got on core
2830 * as having expired its quantum.
91447636 2831 */
0a7de745 2832 if (processor->starting_pri > thread->sched_pri) {
39037602 2833 keep_quantum = FALSE;
0a7de745 2834 }
39037602
A
2835
2836 /* Compute remainder of current quantum. */
2837 if (keep_quantum &&
0a7de745 2838 processor->quantum_end > processor->last_dispatch) {
fe8ab488 2839 thread->quantum_remaining = (uint32_t)remainder;
0a7de745 2840 } else {
fe8ab488 2841 thread->quantum_remaining = 0;
0a7de745 2842 }
2d21ac55 2843
6d2010ae 2844 if (thread->sched_mode == TH_MODE_REALTIME) {
2d21ac55
A
2845 /*
2846 * Cancel the deadline if the thread has
2847 * consumed the entire quantum.
2848 */
fe8ab488 2849 if (thread->quantum_remaining == 0) {
c3c9b80d
A
2850 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_CANCEL_RT_DEADLINE) | DBG_FUNC_NONE,
2851 (uintptr_t)thread_tid(thread), thread->realtime.deadline, thread->realtime.computation, 0);
2d21ac55 2852 thread->realtime.deadline = UINT64_MAX;
2d21ac55 2853 }
b7266188 2854 } else {
3e170ce0 2855#if defined(CONFIG_SCHED_TIMESHARE_CORE)
2d21ac55
A
2856 /*
2857 * For non-realtime threads treat a tiny
2858 * remaining quantum as an expired quantum
2859 * but include what's left next time.
2860 */
fe8ab488 2861 if (thread->quantum_remaining < min_std_quantum) {
2d21ac55 2862 thread->reason |= AST_QUANTUM;
fe8ab488 2863 thread->quantum_remaining += SCHED(initial_quantum_size)(thread);
2d21ac55 2864 }
3e170ce0 2865#endif /* CONFIG_SCHED_TIMESHARE_CORE */
2d21ac55
A
2866 }
2867
91447636 2868 /*
2d21ac55
A
2869 * If we are doing a direct handoff then
2870 * take the remainder of the quantum.
91447636 2871 */
0a7de745 2872 if ((thread->reason & (AST_HANDOFF | AST_QUANTUM)) == AST_HANDOFF) {
fe8ab488 2873 self->quantum_remaining = thread->quantum_remaining;
2d21ac55 2874 thread->reason |= AST_QUANTUM;
fe8ab488
A
2875 thread->quantum_remaining = 0;
2876 } else {
2877#if defined(CONFIG_SCHED_MULTIQ)
3e170ce0
A
2878 if (SCHED(sched_groups_enabled) &&
2879 thread->sched_group == self->sched_group) {
fe8ab488 2880 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3e170ce0 2881 MACHDBG_CODE(DBG_MACH_SCHED, MACH_QUANTUM_HANDOFF),
fe8ab488
A
2882 self->reason, (uintptr_t)thread_tid(thread),
2883 self->quantum_remaining, thread->quantum_remaining, 0);
2884
2885 self->quantum_remaining = thread->quantum_remaining;
2886 thread->quantum_remaining = 0;
3e170ce0 2887 /* Don't set AST_QUANTUM here - old thread might still want to preempt someone else */
fe8ab488
A
2888 }
2889#endif /* defined(CONFIG_SCHED_MULTIQ) */
91447636 2890 }
91447636 2891
b0d623f7 2892 thread->computation_metered += (processor->last_dispatch - thread->computation_epoch);
2d21ac55
A
2893
2894 if (!(thread->state & TH_WAIT)) {
2895 /*
3e170ce0 2896 * Still runnable.
2d21ac55 2897 */
5ba3f43e 2898 thread->last_made_runnable_time = thread->last_basepri_change_time = processor->last_dispatch;
3e170ce0 2899
0a7de745 2900 machine_thread_going_off_core(thread, FALSE, processor->last_dispatch, TRUE);
3e170ce0 2901
5ba3f43e
A
2902 ast_t reason = thread->reason;
2903 sched_options_t options = SCHED_NONE;
2904
2905 if (reason & AST_REBALANCE) {
2906 options |= SCHED_REBALANCE;
2907 if (reason & AST_QUANTUM) {
d9a64523
A
2908 /*
2909 * Having gone to the trouble of forcing this thread off a less preferred core,
2910 * we should force the preferable core to reschedule immediately to give this
5ba3f43e
A
2911 * thread a chance to run instead of just sitting on the run queue where
2912 * it may just be stolen back by the idle core we just forced it off.
2913 * But only do this at the end of a quantum to prevent cascading effects.
2914 */
2915 options |= SCHED_PREEMPT;
2916 }
2917 }
2918
0a7de745 2919 if (reason & AST_QUANTUM) {
5ba3f43e 2920 options |= SCHED_TAILQ;
0a7de745 2921 } else if (reason & AST_PREEMPT) {
5ba3f43e 2922 options |= SCHED_HEADQ;
0a7de745 2923 } else {
5ba3f43e 2924 options |= (SCHED_PREEMPT | SCHED_TAILQ);
0a7de745 2925 }
5ba3f43e
A
2926
2927 thread_setrun(thread, options);
2d21ac55 2928
fe8ab488 2929 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
0a7de745
A
2930 MACHDBG_CODE(DBG_MACH_SCHED, MACH_DISPATCH) | DBG_FUNC_NONE,
2931 (uintptr_t)thread_tid(thread), thread->reason, thread->state,
2932 sched_run_buckets[TH_BUCKET_RUN], 0);
3e170ce0 2933
316670eb
A
2934 if (thread->wake_active) {
2935 thread->wake_active = FALSE;
2936 thread_unlock(thread);
2937
2938 thread_wakeup(&thread->wake_active);
3e170ce0 2939 } else {
316670eb 2940 thread_unlock(thread);
3e170ce0 2941 }
316670eb 2942
2d21ac55 2943 wake_unlock(thread);
3e170ce0 2944 } else {
2d21ac55
A
2945 /*
2946 * Waiting.
2947 */
b7266188 2948 boolean_t should_terminate = FALSE;
fe8ab488 2949 uint32_t new_run_count;
d9a64523 2950 int thread_state = thread->state;
b7266188
A
2951
2952 /* Only the first call to thread_dispatch
2953 * after explicit termination should add
2954 * the thread to the termination queue
2955 */
0a7de745 2956 if ((thread_state & (TH_TERMINATE | TH_TERMINATE2)) == TH_TERMINATE) {
b7266188 2957 should_terminate = TRUE;
d9a64523 2958 thread_state |= TH_TERMINATE2;
b7266188
A
2959 }
2960
d9a64523
A
2961 timer_stop(&thread->runnable_timer, processor->last_dispatch);
2962
2963 thread_state &= ~TH_RUN;
2964 thread->state = thread_state;
2965
5ba3f43e 2966 thread->last_made_runnable_time = thread->last_basepri_change_time = THREAD_NOT_RUNNABLE;
39236c6e
A
2967 thread->chosen_processor = PROCESSOR_NULL;
2968
cb323159 2969 new_run_count = SCHED(run_count_decr)(thread);
2d21ac55 2970
f427ee49
A
2971#if CONFIG_SCHED_AUTO_JOIN
2972 if ((thread->sched_flags & TH_SFLAG_THREAD_GROUP_AUTO_JOIN) != 0) {
2973 work_interval_auto_join_unwind(thread);
2974 }
2975#endif /* CONFIG_SCHED_AUTO_JOIN */
2976
3e170ce0 2977#if CONFIG_SCHED_SFI
d9a64523
A
2978 if (thread->reason & AST_SFI) {
2979 thread->wait_sfi_begin_time = processor->last_dispatch;
39236c6e 2980 }
3e170ce0 2981#endif
0a7de745 2982 machine_thread_going_off_core(thread, should_terminate, processor->last_dispatch, FALSE);
fe8ab488
A
2983
2984 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
0a7de745
A
2985 MACHDBG_CODE(DBG_MACH_SCHED, MACH_DISPATCH) | DBG_FUNC_NONE,
2986 (uintptr_t)thread_tid(thread), thread->reason, thread_state,
2987 new_run_count, 0);
2d21ac55 2988
d9a64523
A
2989 if (thread_state & TH_WAIT_REPORT) {
2990 (*thread->sched_call)(SCHED_CALL_BLOCK, thread);
2991 }
b7266188 2992
2d21ac55
A
2993 if (thread->wake_active) {
2994 thread->wake_active = FALSE;
2995 thread_unlock(thread);
2996
2997 thread_wakeup(&thread->wake_active);
3e170ce0 2998 } else {
2d21ac55 2999 thread_unlock(thread);
3e170ce0 3000 }
91447636 3001
2d21ac55 3002 wake_unlock(thread);
91447636 3003
0a7de745 3004 if (should_terminate) {
2d21ac55 3005 thread_terminate_enqueue(thread);
0a7de745 3006 }
2d21ac55
A
3007 }
3008 }
f427ee49
A
3009 /*
3010 * The thread could have been added to the termination queue, so it's
3011 * unsafe to use after this point.
3012 */
3013 thread = THREAD_NULL;
91447636 3014 }
91447636 3015
5ba3f43e
A
3016 int urgency = THREAD_URGENCY_NONE;
3017 uint64_t latency = 0;
3018
f427ee49 3019 /* Update (new) current thread and reprogram running timers */
3e170ce0 3020 thread_lock(self);
0a7de745 3021
2d21ac55 3022 if (!(self->state & TH_IDLE)) {
39236c6e 3023 uint64_t arg1, arg2;
3e170ce0
A
3024
3025#if CONFIG_SCHED_SFI
0a7de745 3026 ast_t new_ast;
fe8ab488 3027
fe8ab488 3028 new_ast = sfi_thread_needs_ast(self, NULL);
fe8ab488
A
3029
3030 if (new_ast != AST_NONE) {
3031 ast_on(new_ast);
3032 }
3e170ce0
A
3033#endif
3034
5ba3f43e 3035 assertf(processor->last_dispatch >= self->last_made_runnable_time,
0a7de745
A
3036 "Non-monotonic time? dispatch at 0x%llx, runnable at 0x%llx",
3037 processor->last_dispatch, self->last_made_runnable_time);
5ba3f43e
A
3038
3039 assert(self->last_made_runnable_time <= self->last_basepri_change_time);
3040
3e170ce0 3041 latency = processor->last_dispatch - self->last_made_runnable_time;
5ba3f43e 3042 assert(latency >= self->same_pri_latency);
6d2010ae 3043
39236c6e
A
3044 urgency = thread_get_urgency(self, &arg1, &arg2);
3045
3e170ce0
A
3046 thread_tell_urgency(urgency, arg1, arg2, latency, self);
3047
91447636 3048 /*
2d21ac55 3049 * Get a new quantum if none remaining.
91447636 3050 */
fe8ab488 3051 if (self->quantum_remaining == 0) {
2d21ac55 3052 thread_quantum_init(self);
6d2010ae 3053 }
91447636
A
3054
3055 /*
2d21ac55 3056 * Set up quantum timer and timeslice.
91447636 3057 */
f427ee49
A
3058 processor->quantum_end = processor->last_dispatch +
3059 self->quantum_remaining;
91447636 3060
f427ee49
A
3061 running_timer_setup(processor, RUNNING_TIMER_QUANTUM, self,
3062 processor->quantum_end, processor->last_dispatch);
3063 if (was_idle) {
3064 /*
3065 * kperf's running timer is active whenever the idle thread for a
3066 * CPU is not running.
3067 */
3068 kperf_running_setup(processor, processor->last_dispatch);
3069 }
3070 running_timers_activate(processor);
3e170ce0
A
3071 processor->first_timeslice = TRUE;
3072 } else {
f427ee49 3073 running_timers_deactivate(processor);
3e170ce0 3074 processor->first_timeslice = FALSE;
3e170ce0 3075 thread_tell_urgency(THREAD_URGENCY_NONE, 0, 0, 0, self);
91447636 3076 }
6d2010ae 3077
813fb2f6 3078 assert(self->block_hint == kThreadWaitNone);
3e170ce0
A
3079 self->computation_epoch = processor->last_dispatch;
3080 self->reason = AST_NONE;
39037602 3081 processor->starting_pri = self->sched_pri;
3e170ce0
A
3082
3083 thread_unlock(self);
3084
5ba3f43e 3085 machine_thread_going_on_core(self, urgency, latency, self->same_pri_latency,
0a7de745 3086 processor->last_dispatch);
5ba3f43e 3087
3e170ce0
A
3088#if defined(CONFIG_SCHED_DEFERRED_AST)
3089 /*
3090 * TODO: Can we state that redispatching our old thread is also
3091 * uninteresting?
3092 */
cb323159 3093 if ((os_atomic_load(&sched_run_buckets[TH_BUCKET_RUN], relaxed) == 1) && !(self->state & TH_IDLE)) {
3e170ce0 3094 pset_cancel_deferred_dispatch(processor->processor_set, processor);
91447636 3095 }
3e170ce0 3096#endif
91447636
A
3097}
3098
3099/*
2d21ac55 3100 * thread_block_reason:
91447636 3101 *
2d21ac55
A
3102 * Forces a reschedule, blocking the caller if a wait
3103 * has been asserted.
91447636 3104 *
2d21ac55
A
3105 * If a continuation is specified, then thread_invoke will
3106 * attempt to discard the thread's kernel stack. When the
3107 * thread resumes, it will execute the continuation function
3108 * on a new kernel stack.
91447636 3109 */
2d21ac55
A
3110wait_result_t
3111thread_block_reason(
0a7de745
A
3112 thread_continue_t continuation,
3113 void *parameter,
3114 ast_t reason)
91447636 3115{
3e170ce0
A
3116 thread_t self = current_thread();
3117 processor_t processor;
3118 thread_t new_thread;
3119 spl_t s;
1c79356b 3120
1c79356b
A
3121 s = splsched();
3122
55e303ae 3123 processor = current_processor();
1c79356b 3124
9bccf70c 3125 /* If we're explicitly yielding, force a subsequent quantum */
0a7de745 3126 if (reason & AST_YIELD) {
3e170ce0 3127 processor->first_timeslice = FALSE;
0a7de745 3128 }
0b4e3aa0 3129
9bccf70c
A
3130 /* We're handling all scheduling AST's */
3131 ast_off(AST_SCHEDULING);
1c79356b 3132
490019cf
A
3133#if PROC_REF_DEBUG
3134 if ((continuation != NULL) && (self->task != kernel_task)) {
3135 if (uthread_get_proc_refcount(self->uthread) != 0) {
3136 panic("thread_block_reason with continuation uthread %p with uu_proc_refcount != 0", self->uthread);
3137 }
3138 }
3139#endif
3140
91447636
A
3141 self->continuation = continuation;
3142 self->parameter = parameter;
3143
fe8ab488 3144 if (self->state & ~(TH_RUN | TH_IDLE)) {
316670eb 3145 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
0a7de745
A
3146 MACHDBG_CODE(DBG_MACH_SCHED, MACH_BLOCK),
3147 reason, VM_KERNEL_UNSLIDE(continuation), 0, 0, 0);
b0d623f7
A
3148 }
3149
2d21ac55 3150 do {
91447636 3151 thread_lock(self);
5ba3f43e 3152 new_thread = thread_select(self, processor, &reason);
91447636 3153 thread_unlock(self);
2d21ac55 3154 } while (!thread_invoke(self, new_thread, reason));
1c79356b 3155
1c79356b
A
3156 splx(s);
3157
0a7de745 3158 return self->wait_result;
1c79356b
A
3159}
3160
3161/*
3162 * thread_block:
3163 *
9bccf70c 3164 * Block the current thread if a wait has been asserted.
1c79356b 3165 */
91447636 3166wait_result_t
1c79356b 3167thread_block(
0a7de745 3168 thread_continue_t continuation)
1c79356b 3169{
91447636
A
3170 return thread_block_reason(continuation, NULL, AST_NONE);
3171}
3172
3173wait_result_t
3174thread_block_parameter(
0a7de745
A
3175 thread_continue_t continuation,
3176 void *parameter)
91447636
A
3177{
3178 return thread_block_reason(continuation, parameter, AST_NONE);
1c79356b
A
3179}
3180
3181/*
3182 * thread_run:
3183 *
91447636 3184 * Switch directly from the current thread to the
55e303ae 3185 * new thread, handing off our quantum if appropriate.
9bccf70c
A
3186 *
3187 * New thread must be runnable, and not on a run queue.
1c79356b 3188 *
55e303ae 3189 * Called at splsched.
1c79356b
A
3190 */
3191int
3192thread_run(
0a7de745
A
3193 thread_t self,
3194 thread_continue_t continuation,
3195 void *parameter,
3196 thread_t new_thread)
1c79356b 3197{
0a7de745
A
3198 ast_t reason = AST_NONE;
3199
3200 if ((self->state & TH_IDLE) == 0) {
3201 reason = AST_HANDOFF;
3202 }
9bccf70c 3203
f427ee49
A
3204 /*
3205 * If this thread hadn't been setrun'ed, it
3206 * might not have a chosen processor, so give it one
3207 */
3208 if (new_thread->chosen_processor == NULL) {
3209 new_thread->chosen_processor = current_processor();
3210 }
3211
91447636
A
3212 self->continuation = continuation;
3213 self->parameter = parameter;
9bccf70c 3214
5ba3f43e
A
3215 while (!thread_invoke(self, new_thread, reason)) {
3216 /* the handoff failed, so we have to fall back to the normal block path */
3217 processor_t processor = current_processor();
3218
3219 reason = AST_NONE;
9bccf70c 3220
91447636 3221 thread_lock(self);
5ba3f43e 3222 new_thread = thread_select(self, processor, &reason);
91447636 3223 thread_unlock(self);
9bccf70c
A
3224 }
3225
0a7de745 3226 return self->wait_result;
1c79356b
A
3227}
3228
3229/*
91447636 3230 * thread_continue:
55e303ae 3231 *
91447636
A
3232 * Called at splsched when a thread first receives
3233 * a new stack after a continuation.
cb323159
A
3234 *
3235 * Called with THREAD_NULL as the old thread when
3236 * invoked by machine_load_context.
1c79356b
A
3237 */
3238void
91447636 3239thread_continue(
0a7de745 3240 thread_t thread)
1c79356b 3241{
3e170ce0
A
3242 thread_t self = current_thread();
3243 thread_continue_t continuation;
3244 void *parameter;
b0d623f7
A
3245
3246 DTRACE_SCHED(on__cpu);
3247
91447636 3248 continuation = self->continuation;
91447636 3249 parameter = self->parameter;
9bccf70c 3250
cb323159
A
3251 assert(continuation != NULL);
3252
39037602
A
3253#if KPERF
3254 kperf_on_cpu(self, continuation, NULL);
3255#endif
3256
2d21ac55 3257 thread_dispatch(thread, self);
9bccf70c 3258
2d21ac55 3259 self->continuation = self->parameter = NULL;
1c79356b 3260
5ba3f43e 3261#if INTERRUPT_MASKED_DEBUG
0a7de745
A
3262 /* Reset interrupt-masked spin debugging timeout */
3263 ml_spin_debug_clear(self);
5ba3f43e
A
3264#endif
3265
d9a64523 3266 TLOG(1, "thread_continue: calling call_continuation\n");
0a7de745 3267
cb323159
A
3268 boolean_t enable_interrupts = TRUE;
3269
3270 /* bootstrap thread, idle thread need to stay interrupts-disabled */
3271 if (thread == THREAD_NULL || (self->state & TH_IDLE)) {
3272 enable_interrupts = FALSE;
3273 }
3274
d9a64523 3275 call_continuation(continuation, parameter, self->wait_result, enable_interrupts);
91447636 3276 /*NOTREACHED*/
1c79356b
A
3277}
3278
2d21ac55 3279void
6d2010ae 3280thread_quantum_init(thread_t thread)
2d21ac55 3281{
6d2010ae 3282 if (thread->sched_mode == TH_MODE_REALTIME) {
fe8ab488 3283 thread->quantum_remaining = thread->realtime.computation;
6d2010ae 3284 } else {
fe8ab488 3285 thread->quantum_remaining = SCHED(initial_quantum_size)(thread);
6d2010ae
A
3286 }
3287}
2d21ac55 3288
fe8ab488 3289uint32_t
3e170ce0 3290sched_timeshare_initial_quantum_size(thread_t thread)
6d2010ae 3291{
0a7de745 3292 if ((thread != THREAD_NULL) && thread->th_sched_bucket == TH_BUCKET_SHARE_BG) {
316670eb 3293 return bg_quantum;
0a7de745 3294 } else {
39037602 3295 return std_quantum;
0a7de745 3296 }
6d2010ae
A
3297}
3298
6d2010ae
A
3299/*
3300 * run_queue_init:
3301 *
3302 * Initialize a run queue before first use.
3303 */
3304void
3305run_queue_init(
0a7de745 3306 run_queue_t rq)
6d2010ae 3307{
39037602 3308 rq->highq = NOPRI;
0a7de745 3309 for (u_int i = 0; i < BITMAP_LEN(NRQS); i++) {
2d21ac55 3310 rq->bitmap[i] = 0;
0a7de745 3311 }
2d21ac55 3312 rq->urgency = rq->count = 0;
0a7de745 3313 for (int i = 0; i < NRQS; i++) {
cb323159 3314 circle_queue_init(&rq->queues[i]);
0a7de745 3315 }
2d21ac55 3316}
1c79356b 3317
2d21ac55
A
3318/*
3319 * run_queue_dequeue:
3320 *
3321 * Perform a dequeue operation on a run queue,
3322 * and return the resulting thread.
3323 *
6d2010ae 3324 * The run queue must be locked (see thread_run_queue_remove()
2d21ac55
A
3325 * for more info), and not empty.
3326 */
6d2010ae 3327thread_t
2d21ac55 3328run_queue_dequeue(
cb323159
A
3329 run_queue_t rq,
3330 sched_options_t options)
2d21ac55 3331{
cb323159
A
3332 thread_t thread;
3333 circle_queue_t queue = &rq->queues[rq->highq];
0a7de745 3334
2d21ac55 3335 if (options & SCHED_HEADQ) {
cb323159 3336 thread = cqe_dequeue_head(queue, struct thread, runq_links);
39037602 3337 } else {
cb323159 3338 thread = cqe_dequeue_tail(queue, struct thread, runq_links);
9bccf70c 3339 }
1c79356b 3340
39037602
A
3341 assert(thread != THREAD_NULL);
3342 assert_thread_magic(thread);
3343
2d21ac55 3344 thread->runq = PROCESSOR_NULL;
6d2010ae 3345 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
2d21ac55 3346 rq->count--;
6d2010ae 3347 if (SCHED(priority_is_urgent)(rq->highq)) {
4a3eedf9
A
3348 rq->urgency--; assert(rq->urgency >= 0);
3349 }
cb323159 3350 if (circle_queue_empty(queue)) {
39037602
A
3351 bitmap_clear(rq->bitmap, rq->highq);
3352 rq->highq = bitmap_first(rq->bitmap, NRQS);
2d21ac55 3353 }
1c79356b 3354
39037602 3355 return thread;
1c79356b
A
3356}
3357
6d2010ae
A
3358/*
3359 * run_queue_enqueue:
3360 *
3361 * Perform a enqueue operation on a run queue.
3362 *
3363 * The run queue must be locked (see thread_run_queue_remove()
3364 * for more info).
3365 */
3366boolean_t
3367run_queue_enqueue(
cb323159
A
3368 run_queue_t rq,
3369 thread_t thread,
3370 sched_options_t options)
6d2010ae 3371{
cb323159
A
3372 circle_queue_t queue = &rq->queues[thread->sched_pri];
3373 boolean_t result = FALSE;
39037602
A
3374
3375 assert_thread_magic(thread);
3376
cb323159
A
3377 if (circle_queue_empty(queue)) {
3378 circle_enqueue_tail(queue, &thread->runq_links);
39037602
A
3379
3380 rq_bitmap_set(rq->bitmap, thread->sched_pri);
6d2010ae
A
3381 if (thread->sched_pri > rq->highq) {
3382 rq->highq = thread->sched_pri;
3383 result = TRUE;
3384 }
fe8ab488 3385 } else {
0a7de745 3386 if (options & SCHED_TAILQ) {
cb323159 3387 circle_enqueue_tail(queue, &thread->runq_links);
0a7de745 3388 } else {
cb323159 3389 circle_enqueue_head(queue, &thread->runq_links);
0a7de745 3390 }
fe8ab488 3391 }
0a7de745 3392 if (SCHED(priority_is_urgent)(thread->sched_pri)) {
6d2010ae 3393 rq->urgency++;
0a7de745 3394 }
6d2010ae
A
3395 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
3396 rq->count++;
39037602 3397
0a7de745 3398 return result;
6d2010ae
A
3399}
3400
3401/*
3402 * run_queue_remove:
3403 *
3404 * Remove a specific thread from a runqueue.
3405 *
3406 * The run queue must be locked.
3407 */
3408void
3409run_queue_remove(
0a7de745
A
3410 run_queue_t rq,
3411 thread_t thread)
6d2010ae 3412{
cb323159
A
3413 circle_queue_t queue = &rq->queues[thread->sched_pri];
3414
39037602
A
3415 assert(thread->runq != PROCESSOR_NULL);
3416 assert_thread_magic(thread);
6d2010ae 3417
cb323159 3418 circle_dequeue(queue, &thread->runq_links);
6d2010ae
A
3419 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
3420 rq->count--;
3421 if (SCHED(priority_is_urgent)(thread->sched_pri)) {
3422 rq->urgency--; assert(rq->urgency >= 0);
3423 }
39037602 3424
cb323159 3425 if (circle_queue_empty(queue)) {
6d2010ae 3426 /* update run queue status */
39037602
A
3427 bitmap_clear(rq->bitmap, thread->sched_pri);
3428 rq->highq = bitmap_first(rq->bitmap, NRQS);
6d2010ae 3429 }
39037602 3430
6d2010ae
A
3431 thread->runq = PROCESSOR_NULL;
3432}
3433
cb323159
A
3434/*
3435 * run_queue_peek
3436 *
3437 * Peek at the runq and return the highest
3438 * priority thread from the runq.
3439 *
3440 * The run queue must be locked.
3441 */
3442thread_t
3443run_queue_peek(
3444 run_queue_t rq)
3445{
3446 if (rq->count > 0) {
3447 circle_queue_t queue = &rq->queues[rq->highq];
3448 thread_t thread = cqe_queue_first(queue, struct thread, runq_links);
3449 assert_thread_magic(thread);
3450 return thread;
3451 } else {
3452 return THREAD_NULL;
3453 }
3454}
3455
f427ee49
A
3456rt_queue_t
3457sched_rtlocal_runq(processor_set_t pset)
3458{
3459 return &pset->rt_runq;
3460}
3461
3e170ce0 3462void
f427ee49 3463sched_rtlocal_init(processor_set_t pset)
6d2010ae 3464{
f427ee49
A
3465 pset_rt_init(pset);
3466}
3467
3468void
3469sched_rtlocal_queue_shutdown(processor_t processor)
3470{
3471 processor_set_t pset = processor->processor_set;
0a7de745 3472 thread_t thread;
f427ee49 3473 queue_head_t tqueue;
fe8ab488 3474
f427ee49 3475 pset_lock(pset);
5ba3f43e 3476
f427ee49
A
3477 /* We only need to migrate threads if this is the last active or last recommended processor in the pset */
3478 if ((pset->online_processor_count > 0) && pset_is_recommended(pset)) {
3479 pset_unlock(pset);
3480 return;
3481 }
6d2010ae 3482
f427ee49
A
3483 queue_init(&tqueue);
3484
3485 while (rt_runq_count(pset) > 0) {
3486 thread = qe_dequeue_head(&pset->rt_runq.queue, struct thread, runq_links);
3487 thread->runq = PROCESSOR_NULL;
3488 SCHED_STATS_RUNQ_CHANGE(&pset->rt_runq.runq_stats, rt_runq_count(pset));
3489 rt_runq_count_decr(pset);
3490 enqueue_tail(&tqueue, &thread->runq_links);
3491 }
3492 sched_update_pset_load_average(pset, 0);
3493 pset_unlock(pset);
3494
3495 qe_foreach_element_safe(thread, &tqueue, runq_links) {
3496 remqueue(&thread->runq_links);
3497
3498 thread_lock(thread);
3499
3500 thread_setrun(thread, SCHED_TAILQ);
3501
3502 thread_unlock(thread);
3e170ce0 3503 }
f427ee49
A
3504}
3505
3506/* Assumes RT lock is not held, and acquires splsched/rt_lock itself */
3507void
3508sched_rtlocal_runq_scan(sched_update_scan_context_t scan_context)
3509{
3510 thread_t thread;
3511
3512 pset_node_t node = &pset_node0;
3513 processor_set_t pset = node->psets;
3514
3515 spl_t s = splsched();
3516 do {
3517 while (pset != NULL) {
3518 pset_lock(pset);
3519
3520 qe_foreach_element_safe(thread, &pset->rt_runq.queue, runq_links) {
3521 if (thread->last_made_runnable_time < scan_context->earliest_rt_make_runnable_time) {
3522 scan_context->earliest_rt_make_runnable_time = thread->last_made_runnable_time;
3523 }
3524 }
6d2010ae 3525
f427ee49
A
3526 pset_unlock(pset);
3527
3528 pset = pset->pset_list;
3529 }
3530 } while (((node = node->node_list) != NULL) && ((pset = node->psets) != NULL));
3e170ce0 3531 splx(s);
6d2010ae
A
3532}
3533
5ba3f43e 3534int64_t
f427ee49 3535sched_rtlocal_runq_count_sum(void)
5ba3f43e 3536{
f427ee49
A
3537 pset_node_t node = &pset_node0;
3538 processor_set_t pset = node->psets;
3539 int64_t count = 0;
3540
3541 do {
3542 while (pset != NULL) {
3543 count += pset->rt_runq.runq_stats.count_sum;
3544
3545 pset = pset->pset_list;
3546 }
3547 } while (((node = node->node_list) != NULL) && ((pset = node->psets) != NULL));
3548
3549 return count;
5ba3f43e 3550}
3e170ce0 3551
1c79356b 3552/*
2d21ac55
A
3553 * realtime_queue_insert:
3554 *
3555 * Enqueue a thread for realtime execution.
1c79356b 3556 */
2d21ac55 3557static boolean_t
5ba3f43e 3558realtime_queue_insert(processor_t processor, processor_set_t pset, thread_t thread)
1c79356b 3559{
5ba3f43e 3560 queue_t queue = &SCHED(rt_runq)(pset)->queue;
39037602
A
3561 uint64_t deadline = thread->realtime.deadline;
3562 boolean_t preempt = FALSE;
1c79356b 3563
f427ee49 3564 pset_assert_locked(pset);
1c79356b 3565
55e303ae 3566 if (queue_empty(queue)) {
39037602 3567 enqueue_tail(queue, &thread->runq_links);
2d21ac55 3568 preempt = TRUE;
39037602
A
3569 } else {
3570 /* Insert into rt_runq in thread deadline order */
3571 queue_entry_t iter;
3572 qe_foreach(iter, queue) {
3573 thread_t iter_thread = qe_element(iter, struct thread, runq_links);
3574 assert_thread_magic(iter_thread);
3575
3576 if (deadline < iter_thread->realtime.deadline) {
0a7de745 3577 if (iter == queue_first(queue)) {
39037602 3578 preempt = TRUE;
0a7de745 3579 }
39037602
A
3580 insque(&thread->runq_links, queue_prev(iter));
3581 break;
3582 } else if (iter == queue_last(queue)) {
3583 enqueue_tail(queue, &thread->runq_links);
55e303ae
A
3584 break;
3585 }
55e303ae 3586 }
55e303ae
A
3587 }
3588
5ba3f43e
A
3589 thread->runq = processor;
3590 SCHED_STATS_RUNQ_CHANGE(&SCHED(rt_runq)(pset)->runq_stats, rt_runq_count(pset));
3591 rt_runq_count_incr(pset);
55e303ae 3592
0a7de745 3593 return preempt;
2d21ac55 3594}
55e303ae 3595
f427ee49
A
3596#define MAX_BACKUP_PROCESSORS 7
3597#if defined(__x86_64__)
3598#define DEFAULT_BACKUP_PROCESSORS 1
3599#else
3600#define DEFAULT_BACKUP_PROCESSORS 0
3601#endif
3602
3603int sched_rt_n_backup_processors = DEFAULT_BACKUP_PROCESSORS;
3604
3605int
3606sched_get_rt_n_backup_processors(void)
3607{
3608 return sched_rt_n_backup_processors;
3609}
3610
3611void
3612sched_set_rt_n_backup_processors(int n)
3613{
3614 if (n < 0) {
3615 n = 0;
3616 } else if (n > MAX_BACKUP_PROCESSORS) {
3617 n = MAX_BACKUP_PROCESSORS;
3618 }
3619
3620 sched_rt_n_backup_processors = n;
3621}
3622
2d21ac55
A
3623/*
3624 * realtime_setrun:
3625 *
3626 * Dispatch a thread for realtime execution.
3627 *
3628 * Thread must be locked. Associated pset must
3629 * be locked, and is returned unlocked.
3630 */
3631static void
3632realtime_setrun(
f427ee49 3633 processor_t chosen_processor,
0a7de745 3634 thread_t thread)
2d21ac55 3635{
f427ee49 3636 processor_set_t pset = chosen_processor->processor_set;
d9a64523
A
3637 pset_assert_locked(pset);
3638 ast_t preempt;
55e303ae 3639
f427ee49 3640 int n_backup = 0;
fe8ab488 3641
f427ee49
A
3642 if (thread->realtime.constraint <= rt_constraint_threshold) {
3643 n_backup = sched_rt_n_backup_processors;
3644 }
3645 assert((n_backup >= 0) && (n_backup <= MAX_BACKUP_PROCESSORS));
3646
3647 sched_ipi_type_t ipi_type[MAX_BACKUP_PROCESSORS + 1] = {};
3648 processor_t ipi_processor[MAX_BACKUP_PROCESSORS + 1] = {};
3649
3650 thread->chosen_processor = chosen_processor;
6d2010ae 3651
fe8ab488
A
3652 /* <rdar://problem/15102234> */
3653 assert(thread->bound_processor == PROCESSOR_NULL);
3654
f427ee49 3655 realtime_queue_insert(chosen_processor, pset, thread);
39236c6e 3656
f427ee49
A
3657 processor_t processor = chosen_processor;
3658 bool chosen_process_is_secondary = chosen_processor->processor_primary != chosen_processor;
39236c6e 3659
f427ee49
A
3660 int count = 0;
3661 for (int i = 0; i <= n_backup; i++) {
3662 if (i > 0) {
3663 processor = choose_processor_for_realtime_thread(pset, chosen_processor, chosen_process_is_secondary);
3664 if ((processor == PROCESSOR_NULL) || (sched_avoid_cpu0 && (processor->cpu_id == 0))) {
3665 break;
fe8ab488 3666 }
f427ee49
A
3667 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR) | DBG_FUNC_NONE,
3668 (uintptr_t)thread_tid(thread), (uintptr_t)-3, processor->cpu_id, processor->state, 0);
3669 }
3670 ipi_type[i] = SCHED_IPI_NONE;
3671 ipi_processor[i] = processor;
3672 count++;
3673
3674 if (processor->current_pri < BASEPRI_RTQUEUES) {
3675 preempt = (AST_PREEMPT | AST_URGENT);
3676 } else if (thread->realtime.deadline < processor->deadline) {
3677 preempt = (AST_PREEMPT | AST_URGENT);
3678 } else {
3679 preempt = AST_NONE;
3680 }
3681
3682 if (preempt != AST_NONE) {
3683 if (processor->state == PROCESSOR_IDLE) {
5ba3f43e 3684 processor_state_update_from_thread(processor, thread);
39236c6e 3685 processor->deadline = thread->realtime.deadline;
f427ee49
A
3686 pset_update_processor_state(pset, processor, PROCESSOR_DISPATCHING);
3687 if (processor == current_processor()) {
3688 ast_on(preempt);
0a7de745 3689
f427ee49
A
3690 if ((preempt & AST_URGENT) == AST_URGENT) {
3691 bit_set(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id);
3692 }
0a7de745 3693
f427ee49
A
3694 if ((preempt & AST_PREEMPT) == AST_PREEMPT) {
3695 bit_set(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
3696 }
3697 } else {
3698 ipi_type[i] = sched_ipi_action(processor, thread, true, SCHED_IPI_EVENT_PREEMPT);
3699 }
3700 } else if (processor->state == PROCESSOR_DISPATCHING) {
3701 if ((processor->current_pri < thread->sched_pri) || (processor->deadline > thread->realtime.deadline)) {
3702 processor_state_update_from_thread(processor, thread);
3703 processor->deadline = thread->realtime.deadline;
0a7de745 3704 }
39236c6e 3705 } else {
f427ee49
A
3706 if (processor == current_processor()) {
3707 ast_on(preempt);
3708
3709 if ((preempt & AST_URGENT) == AST_URGENT) {
3710 bit_set(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id);
3711 }
3712
3713 if ((preempt & AST_PREEMPT) == AST_PREEMPT) {
3714 bit_set(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
3715 }
3716 } else {
3717 ipi_type[i] = sched_ipi_action(processor, thread, false, SCHED_IPI_EVENT_PREEMPT);
3718 }
39236c6e 3719 }
f427ee49
A
3720 } else {
3721 /* Selected processor was too busy, just keep thread enqueued and let other processors drain it naturally. */
39236c6e 3722 }
2d21ac55
A
3723 }
3724
3725 pset_unlock(pset);
f427ee49
A
3726
3727 assert((count > 0) && (count <= (n_backup + 1)));
3728 for (int i = 0; i < count; i++) {
3729 assert(ipi_processor[i] != PROCESSOR_NULL);
3730 sched_ipi_perform(ipi_processor[i], ipi_type[i]);
3731 }
5ba3f43e 3732}
fe8ab488 3733
5ba3f43e 3734
0a7de745
A
3735sched_ipi_type_t
3736sched_ipi_deferred_policy(processor_set_t pset, processor_t dst,
3737 __unused sched_ipi_event_t event)
5ba3f43e
A
3738{
3739#if defined(CONFIG_SCHED_DEFERRED_AST)
0a7de745
A
3740 if (!bit_test(pset->pending_deferred_AST_cpu_mask, dst->cpu_id)) {
3741 return SCHED_IPI_DEFERRED;
3742 }
5ba3f43e 3743#else /* CONFIG_SCHED_DEFERRED_AST */
0a7de745 3744 panic("Request for deferred IPI on an unsupported platform; pset: %p CPU: %d", pset, dst->cpu_id);
5ba3f43e 3745#endif /* CONFIG_SCHED_DEFERRED_AST */
0a7de745 3746 return SCHED_IPI_NONE;
5ba3f43e
A
3747}
3748
0a7de745
A
3749sched_ipi_type_t
3750sched_ipi_action(processor_t dst, thread_t thread, boolean_t dst_idle, sched_ipi_event_t event)
5ba3f43e 3751{
0a7de745
A
3752 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
3753 assert(dst != NULL);
5ba3f43e 3754
0a7de745
A
3755 processor_set_t pset = dst->processor_set;
3756 if (current_processor() == dst) {
3757 return SCHED_IPI_NONE;
3758 }
5ba3f43e 3759
0a7de745
A
3760 if (bit_test(pset->pending_AST_URGENT_cpu_mask, dst->cpu_id)) {
3761 return SCHED_IPI_NONE;
3762 }
5ba3f43e 3763
0a7de745
A
3764 ipi_type = SCHED(ipi_policy)(dst, thread, dst_idle, event);
3765 switch (ipi_type) {
5ba3f43e 3766 case SCHED_IPI_NONE:
0a7de745
A
3767 return SCHED_IPI_NONE;
3768#if defined(CONFIG_SCHED_DEFERRED_AST)
5ba3f43e 3769 case SCHED_IPI_DEFERRED:
0a7de745
A
3770 bit_set(pset->pending_deferred_AST_cpu_mask, dst->cpu_id);
3771 break;
5ba3f43e
A
3772#endif /* CONFIG_SCHED_DEFERRED_AST */
3773 default:
0a7de745
A
3774 bit_set(pset->pending_AST_URGENT_cpu_mask, dst->cpu_id);
3775 bit_set(pset->pending_AST_PREEMPT_cpu_mask, dst->cpu_id);
3776 break;
3777 }
3778 return ipi_type;
5ba3f43e
A
3779}
3780
0a7de745
A
3781sched_ipi_type_t
3782sched_ipi_policy(processor_t dst, thread_t thread, boolean_t dst_idle, sched_ipi_event_t event)
5ba3f43e 3783{
0a7de745
A
3784 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
3785 boolean_t deferred_ipi_supported = false;
3786 processor_set_t pset = dst->processor_set;
5ba3f43e
A
3787
3788#if defined(CONFIG_SCHED_DEFERRED_AST)
0a7de745 3789 deferred_ipi_supported = true;
5ba3f43e
A
3790#endif /* CONFIG_SCHED_DEFERRED_AST */
3791
0a7de745 3792 switch (event) {
5ba3f43e
A
3793 case SCHED_IPI_EVENT_SPILL:
3794 case SCHED_IPI_EVENT_SMT_REBAL:
3795 case SCHED_IPI_EVENT_REBALANCE:
3796 case SCHED_IPI_EVENT_BOUND_THR:
0a7de745
A
3797 /*
3798 * The spill, SMT rebalance, rebalance and the bound thread
3799 * scenarios use immediate IPIs always.
3800 */
3801 ipi_type = dst_idle ? SCHED_IPI_IDLE : SCHED_IPI_IMMEDIATE;
3802 break;
5ba3f43e 3803 case SCHED_IPI_EVENT_PREEMPT:
0a7de745
A
3804 /* In the preemption case, use immediate IPIs for RT threads */
3805 if (thread && (thread->sched_pri >= BASEPRI_RTQUEUES)) {
3806 ipi_type = dst_idle ? SCHED_IPI_IDLE : SCHED_IPI_IMMEDIATE;
3807 break;
3808 }
3809
3810 /*
3811 * For Non-RT threads preemption,
3812 * If the core is active, use immediate IPIs.
3813 * If the core is idle, use deferred IPIs if supported; otherwise immediate IPI.
3814 */
3815 if (deferred_ipi_supported && dst_idle) {
3816 return sched_ipi_deferred_policy(pset, dst, event);
3817 }
3818 ipi_type = dst_idle ? SCHED_IPI_IDLE : SCHED_IPI_IMMEDIATE;
3819 break;
5ba3f43e 3820 default:
0a7de745
A
3821 panic("Unrecognized scheduler IPI event type %d", event);
3822 }
3823 assert(ipi_type != SCHED_IPI_NONE);
3824 return ipi_type;
2d21ac55
A
3825}
3826
0a7de745
A
3827void
3828sched_ipi_perform(processor_t dst, sched_ipi_type_t ipi)
5ba3f43e 3829{
0a7de745 3830 switch (ipi) {
5ba3f43e 3831 case SCHED_IPI_NONE:
0a7de745 3832 break;
5ba3f43e 3833 case SCHED_IPI_IDLE:
0a7de745
A
3834 machine_signal_idle(dst);
3835 break;
5ba3f43e 3836 case SCHED_IPI_IMMEDIATE:
0a7de745
A
3837 cause_ast_check(dst);
3838 break;
5ba3f43e 3839 case SCHED_IPI_DEFERRED:
0a7de745
A
3840 machine_signal_idle_deferred(dst);
3841 break;
5ba3f43e 3842 default:
0a7de745
A
3843 panic("Unrecognized scheduler IPI type: %d", ipi);
3844 }
5ba3f43e 3845}
6d2010ae 3846
fe8ab488
A
3847#if defined(CONFIG_SCHED_TIMESHARE_CORE)
3848
3849boolean_t
6d2010ae
A
3850priority_is_urgent(int priority)
3851{
39037602 3852 return bitmap_test(sched_preempt_pri, priority) ? TRUE : FALSE;
6d2010ae
A
3853}
3854
fe8ab488
A
3855#endif /* CONFIG_SCHED_TIMESHARE_CORE */
3856
55e303ae 3857/*
2d21ac55 3858 * processor_setrun:
55e303ae 3859 *
2d21ac55
A
3860 * Dispatch a thread for execution on a
3861 * processor.
55e303ae 3862 *
2d21ac55
A
3863 * Thread must be locked. Associated pset must
3864 * be locked, and is returned unlocked.
55e303ae 3865 */
2d21ac55
A
3866static void
3867processor_setrun(
0a7de745
A
3868 processor_t processor,
3869 thread_t thread,
3870 integer_t options)
55e303ae 3871{
d9a64523
A
3872 processor_set_t pset = processor->processor_set;
3873 pset_assert_locked(pset);
3874 ast_t preempt;
39236c6e 3875 enum { eExitIdle, eInterruptRunning, eDoNothing } ipi_action = eDoNothing;
55e303ae 3876
5ba3f43e 3877 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
fe8ab488 3878
6d2010ae
A
3879 thread->chosen_processor = processor;
3880
55e303ae 3881 /*
2d21ac55 3882 * Set preemption mode.
1c79356b 3883 */
3e170ce0
A
3884#if defined(CONFIG_SCHED_DEFERRED_AST)
3885 /* TODO: Do we need to care about urgency (see rdar://problem/20136239)? */
3886#endif
0a7de745 3887 if (SCHED(priority_is_urgent)(thread->sched_pri) && thread->sched_pri > processor->current_pri) {
6d2010ae 3888 preempt = (AST_PREEMPT | AST_URGENT);
2a1bd2d3 3889 } else if (processor->current_is_eagerpreempt) {
55e303ae 3890 preempt = (AST_PREEMPT | AST_URGENT);
0a7de745
A
3891 } else if ((thread->sched_mode == TH_MODE_TIMESHARE) && (thread->sched_pri < thread->base_pri)) {
3892 if (SCHED(priority_is_urgent)(thread->base_pri) && thread->sched_pri > processor->current_pri) {
39236c6e
A
3893 preempt = (options & SCHED_PREEMPT)? AST_PREEMPT: AST_NONE;
3894 } else {
3895 preempt = AST_NONE;
3896 }
0a7de745 3897 } else {
2d21ac55 3898 preempt = (options & SCHED_PREEMPT)? AST_PREEMPT: AST_NONE;
0a7de745 3899 }
9bccf70c 3900
0a7de745 3901 if ((options & (SCHED_PREEMPT | SCHED_REBALANCE)) == (SCHED_PREEMPT | SCHED_REBALANCE)) {
d9a64523
A
3902 /*
3903 * Having gone to the trouble of forcing this thread off a less preferred core,
3904 * we should force the preferable core to reschedule immediately to give this
3905 * thread a chance to run instead of just sitting on the run queue where
3906 * it may just be stolen back by the idle core we just forced it off.
3907 */
3908 preempt |= AST_PREEMPT;
3909 }
3910
39236c6e 3911 SCHED(processor_enqueue)(processor, thread, options);
f427ee49 3912 sched_update_pset_load_average(pset, 0);
9bccf70c 3913
2d21ac55 3914 if (preempt != AST_NONE) {
39236c6e 3915 if (processor->state == PROCESSOR_IDLE) {
5ba3f43e 3916 processor_state_update_from_thread(processor, thread);
39236c6e 3917 processor->deadline = UINT64_MAX;
d9a64523 3918 pset_update_processor_state(pset, processor, PROCESSOR_DISPATCHING);
39236c6e 3919 ipi_action = eExitIdle;
0a7de745 3920 } else if (processor->state == PROCESSOR_DISPATCHING) {
cb323159 3921 if (processor->current_pri < thread->sched_pri) {
5ba3f43e 3922 processor_state_update_from_thread(processor, thread);
39236c6e
A
3923 processor->deadline = UINT64_MAX;
3924 }
0a7de745
A
3925 } else if ((processor->state == PROCESSOR_RUNNING ||
3926 processor->state == PROCESSOR_SHUTDOWN) &&
3927 (thread->sched_pri >= processor->current_pri)) {
39236c6e 3928 ipi_action = eInterruptRunning;
2d21ac55 3929 }
39236c6e
A
3930 } else {
3931 /*
3932 * New thread is not important enough to preempt what is running, but
3933 * special processor states may need special handling
3934 */
0a7de745
A
3935 if (processor->state == PROCESSOR_SHUTDOWN &&
3936 thread->sched_pri >= processor->current_pri) {
39236c6e 3937 ipi_action = eInterruptRunning;
d190cdc3 3938 } else if (processor->state == PROCESSOR_IDLE) {
5ba3f43e 3939 processor_state_update_from_thread(processor, thread);
39236c6e 3940 processor->deadline = UINT64_MAX;
d9a64523 3941 pset_update_processor_state(pset, processor, PROCESSOR_DISPATCHING);
39236c6e
A
3942
3943 ipi_action = eExitIdle;
3944 }
2d21ac55 3945 }
39236c6e 3946
5ba3f43e 3947 if (ipi_action != eDoNothing) {
0a7de745
A
3948 if (processor == current_processor()) {
3949 if ((preempt = csw_check_locked(processor->active_thread, processor, pset, AST_NONE)) != AST_NONE) {
3950 ast_on(preempt);
3951 }
6d2010ae 3952
0a7de745
A
3953 if ((preempt & AST_URGENT) == AST_URGENT) {
3954 bit_set(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id);
3955 } else {
3956 bit_clear(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id);
3957 }
3958
3959 if ((preempt & AST_PREEMPT) == AST_PREEMPT) {
3960 bit_set(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
3961 } else {
3962 bit_clear(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
3963 }
3964 } else {
3965 sched_ipi_event_t event = (options & SCHED_REBALANCE) ? SCHED_IPI_EVENT_REBALANCE : SCHED_IPI_EVENT_PREEMPT;
3966 ipi_type = sched_ipi_action(processor, thread, (ipi_action == eExitIdle), event);
3967 }
3968 }
3969 pset_unlock(pset);
3970 sched_ipi_perform(processor, ipi_type);
3971}
3972
3973/*
2d21ac55
A
3974 * choose_next_pset:
3975 *
3976 * Return the next sibling pset containing
3977 * available processors.
3978 *
3979 * Returns the original pset if none other is
3980 * suitable.
3981 */
3982static processor_set_t
3983choose_next_pset(
0a7de745 3984 processor_set_t pset)
2d21ac55 3985{
0a7de745 3986 processor_set_t nset = pset;
2d21ac55
A
3987
3988 do {
3989 nset = next_pset(nset);
6d2010ae 3990 } while (nset->online_processor_count < 1 && nset != pset);
2d21ac55 3991
0a7de745 3992 return nset;
2d21ac55
A
3993}
3994
f427ee49
A
3995inline static processor_set_t
3996change_locked_pset(processor_set_t current_pset, processor_set_t new_pset)
3997{
3998 if (current_pset != new_pset) {
3999 pset_unlock(current_pset);
4000 pset_lock(new_pset);
4001 }
4002
4003 return new_pset;
4004}
4005
4006/*
4007 * choose_processor:
2d21ac55
A
4008 *
4009 * Choose a processor for the thread, beginning at
b7266188 4010 * the pset. Accepts an optional processor hint in
2d21ac55
A
4011 * the pset.
4012 *
4013 * Returns a processor, possibly from a different pset.
4014 *
4015 * The thread must be locked. The pset must be locked,
4016 * and the resulting pset is locked on return.
4017 */
6d2010ae 4018processor_t
2d21ac55 4019choose_processor(
d9a64523
A
4020 processor_set_t starting_pset,
4021 processor_t processor,
4022 thread_t thread)
2d21ac55 4023{
d9a64523
A
4024 processor_set_t pset = starting_pset;
4025 processor_set_t nset;
39037602
A
4026
4027 assert(thread->sched_pri <= BASEPRI_RTQUEUES);
4028
cf7d32b8 4029 /*
fe8ab488 4030 * Prefer the hinted processor, when appropriate.
cf7d32b8 4031 */
b7266188 4032
fe8ab488 4033 /* Fold last processor hint from secondary processor to its primary */
0b4c1975 4034 if (processor != PROCESSOR_NULL) {
fe8ab488 4035 processor = processor->processor_primary;
0b4c1975 4036 }
b0d623f7 4037
fe8ab488
A
4038 /*
4039 * Only consult platform layer if pset is active, which
4040 * it may not be in some cases when a multi-set system
4041 * is going to sleep.
4042 */
4043 if (pset->online_processor_count) {
4044 if ((processor == PROCESSOR_NULL) || (processor->processor_set == pset && processor->state == PROCESSOR_IDLE)) {
4045 processor_t mc_processor = machine_choose_processor(pset, processor);
0a7de745 4046 if (mc_processor != PROCESSOR_NULL) {
fe8ab488 4047 processor = mc_processor->processor_primary;
0a7de745 4048 }
fe8ab488
A
4049 }
4050 }
b7266188 4051
fe8ab488
A
4052 /*
4053 * At this point, we may have a processor hint, and we may have
4054 * an initial starting pset. If the hint is not in the pset, or
4055 * if the hint is for a processor in an invalid state, discard
4056 * the hint.
4057 */
0b4c1975 4058 if (processor != PROCESSOR_NULL) {
fe8ab488 4059 if (processor->processor_set != pset) {
cf7d32b8 4060 processor = PROCESSOR_NULL;
3e170ce0
A
4061 } else if (!processor->is_recommended) {
4062 processor = PROCESSOR_NULL;
fe8ab488
A
4063 } else {
4064 switch (processor->state) {
0a7de745
A
4065 case PROCESSOR_START:
4066 case PROCESSOR_SHUTDOWN:
4067 case PROCESSOR_OFF_LINE:
4068 /*
4069 * Hint is for a processor that cannot support running new threads.
4070 */
4071 processor = PROCESSOR_NULL;
4072 break;
4073 case PROCESSOR_IDLE:
4074 /*
4075 * Hint is for an idle processor. Assume it is no worse than any other
4076 * idle processor. The platform layer had an opportunity to provide
4077 * the "least cost idle" processor above.
4078 */
f427ee49
A
4079 if ((thread->sched_pri < BASEPRI_RTQUEUES) || processor_is_fast_track_candidate_for_realtime_thread(pset, processor)) {
4080 return processor;
4081 }
4082 processor = PROCESSOR_NULL;
4083 break;
0a7de745
A
4084 case PROCESSOR_RUNNING:
4085 case PROCESSOR_DISPATCHING:
4086 /*
4087 * Hint is for an active CPU. This fast-path allows
4088 * realtime threads to preempt non-realtime threads
4089 * to regain their previous executing processor.
4090 */
4091 if ((thread->sched_pri >= BASEPRI_RTQUEUES) &&
f427ee49 4092 processor_is_fast_track_candidate_for_realtime_thread(pset, processor)) {
0a7de745
A
4093 return processor;
4094 }
4095
4096 /* Otherwise, use hint as part of search below */
4097 break;
4098 default:
4099 processor = PROCESSOR_NULL;
4100 break;
fe8ab488
A
4101 }
4102 }
b7266188 4103 }
2d21ac55
A
4104
4105 /*
fe8ab488
A
4106 * Iterate through the processor sets to locate
4107 * an appropriate processor. Seed results with
4108 * a last-processor hint, if available, so that
4109 * a search must find something strictly better
4110 * to replace it.
4111 *
4112 * A primary/secondary pair of SMT processors are
4113 * "unpaired" if the primary is busy but its
4114 * corresponding secondary is idle (so the physical
4115 * core has full use of its resources).
2d21ac55 4116 */
fe8ab488
A
4117
4118 integer_t lowest_priority = MAXPRI + 1;
a39ff7e2 4119 integer_t lowest_secondary_priority = MAXPRI + 1;
fe8ab488 4120 integer_t lowest_unpaired_primary_priority = MAXPRI + 1;
0a7de745 4121 integer_t lowest_idle_secondary_priority = MAXPRI + 1;
fe8ab488
A
4122 integer_t lowest_count = INT_MAX;
4123 uint64_t furthest_deadline = 1;
4124 processor_t lp_processor = PROCESSOR_NULL;
4125 processor_t lp_unpaired_primary_processor = PROCESSOR_NULL;
0a7de745 4126 processor_t lp_idle_secondary_processor = PROCESSOR_NULL;
a39ff7e2 4127 processor_t lp_paired_secondary_processor = PROCESSOR_NULL;
fe8ab488
A
4128 processor_t lc_processor = PROCESSOR_NULL;
4129 processor_t fd_processor = PROCESSOR_NULL;
4130
4131 if (processor != PROCESSOR_NULL) {
4132 /* All other states should be enumerated above. */
4133 assert(processor->state == PROCESSOR_RUNNING || processor->state == PROCESSOR_DISPATCHING);
4134
4135 lowest_priority = processor->current_pri;
4136 lp_processor = processor;
4137
4138 if (processor->current_pri >= BASEPRI_RTQUEUES) {
4139 furthest_deadline = processor->deadline;
4140 fd_processor = processor;
4141 }
4142
4143 lowest_count = SCHED(processor_runq_count)(processor);
4144 lc_processor = processor;
4145 }
4146
f427ee49
A
4147 if (thread->sched_pri >= BASEPRI_RTQUEUES) {
4148 pset_node_t node = pset->node;
4149 int consider_secondaries = (!pset->is_SMT) || (bit_count(node->pset_map) == 1) || (node->pset_non_rt_primary_map == 0);
4150 for (; consider_secondaries < 2; consider_secondaries++) {
4151 pset = change_locked_pset(pset, starting_pset);
4152 do {
4153 processor = choose_processor_for_realtime_thread(pset, PROCESSOR_NULL, consider_secondaries);
4154 if (processor) {
4155 return processor;
4156 }
d9a64523 4157
f427ee49 4158 /* NRG Collect processor stats for furthest deadline etc. here */
d9a64523 4159
f427ee49 4160 nset = next_pset(pset);
0a7de745 4161
f427ee49
A
4162 if (nset != starting_pset) {
4163 pset = change_locked_pset(pset, nset);
4164 }
4165 } while (nset != starting_pset);
4166 }
4167 /* Or we could just let it change to starting_pset in the loop above */
4168 pset = change_locked_pset(pset, starting_pset);
4169 }
0a7de745 4170
f427ee49
A
4171 do {
4172 /*
4173 * Choose an idle processor, in pset traversal order
4174 */
4175
4176 uint64_t idle_primary_map = (pset->cpu_state_map[PROCESSOR_IDLE] &
4177 pset->primary_map &
4178 pset->recommended_bitmask);
4179
4180 /* there shouldn't be a pending AST if the processor is idle */
4181 assert((idle_primary_map & pset->pending_AST_URGENT_cpu_mask) == 0);
4182
4183 int cpuid = lsb_first(idle_primary_map);
4184 if (cpuid >= 0) {
4185 processor = processor_array[cpuid];
4186 return processor;
3e170ce0 4187 }
1c79356b 4188
fe8ab488 4189 /*
a39ff7e2 4190 * Otherwise, enumerate active and idle processors to find primary candidates
fe8ab488
A
4191 * with lower priority/etc.
4192 */
0b4c1975 4193
d9a64523 4194 uint64_t active_map = ((pset->cpu_state_map[PROCESSOR_RUNNING] | pset->cpu_state_map[PROCESSOR_DISPATCHING]) &
0a7de745
A
4195 pset->recommended_bitmask &
4196 ~pset->pending_AST_URGENT_cpu_mask);
4197
4198 if (SCHED(priority_is_urgent)(thread->sched_pri) == FALSE) {
4199 active_map &= ~pset->pending_AST_PREEMPT_cpu_mask;
4200 }
4201
d9a64523
A
4202 active_map = bit_ror64(active_map, (pset->last_chosen + 1));
4203 for (int rotid = lsb_first(active_map); rotid >= 0; rotid = lsb_next(active_map, rotid)) {
4204 cpuid = ((rotid + pset->last_chosen + 1) & 63);
4205 processor = processor_array[cpuid];
2d21ac55 4206
fe8ab488 4207 integer_t cpri = processor->current_pri;
0a7de745
A
4208 processor_t primary = processor->processor_primary;
4209 if (primary != processor) {
4210 /* If primary is running a NO_SMT thread, don't choose its secondary */
4211 if (!((primary->state == PROCESSOR_RUNNING) && processor_active_thread_no_smt(primary))) {
4212 if (cpri < lowest_secondary_priority) {
4213 lowest_secondary_priority = cpri;
4214 lp_paired_secondary_processor = processor;
4215 }
a39ff7e2
A
4216 }
4217 } else {
4218 if (cpri < lowest_priority) {
4219 lowest_priority = cpri;
4220 lp_processor = processor;
4221 }
fe8ab488 4222 }
b0d623f7 4223
fe8ab488
A
4224 if ((cpri >= BASEPRI_RTQUEUES) && (processor->deadline > furthest_deadline)) {
4225 furthest_deadline = processor->deadline;
4226 fd_processor = processor;
4227 }
0b4c1975 4228
fe8ab488
A
4229 integer_t ccount = SCHED(processor_runq_count)(processor);
4230 if (ccount < lowest_count) {
4231 lowest_count = ccount;
4232 lc_processor = processor;
4233 }
fe8ab488
A
4234 }
4235
4236 /*
4237 * For SMT configs, these idle secondary processors must have active primary. Otherwise
4238 * the idle primary would have short-circuited the loop above
4239 */
d9a64523 4240 uint64_t idle_secondary_map = (pset->cpu_state_map[PROCESSOR_IDLE] &
0a7de745
A
4241 ~pset->primary_map &
4242 pset->recommended_bitmask);
4243
4244 /* there shouldn't be a pending AST if the processor is idle */
4245 assert((idle_secondary_map & pset->pending_AST_URGENT_cpu_mask) == 0);
4246 assert((idle_secondary_map & pset->pending_AST_PREEMPT_cpu_mask) == 0);
3e170ce0 4247
d9a64523
A
4248 for (cpuid = lsb_first(idle_secondary_map); cpuid >= 0; cpuid = lsb_next(idle_secondary_map, cpuid)) {
4249 processor = processor_array[cpuid];
3e170ce0 4250
fe8ab488
A
4251 processor_t cprimary = processor->processor_primary;
4252
0a7de745
A
4253 integer_t primary_pri = cprimary->current_pri;
4254
4255 /*
4256 * TODO: This should also make the same decisions
4257 * as secondary_can_run_realtime_thread
4258 *
4259 * TODO: Keep track of the pending preemption priority
4260 * of the primary to make this more accurate.
4261 */
4262
4263 /* If the primary is running a no-smt thread, then don't choose its secondary */
4264 if (cprimary->state == PROCESSOR_RUNNING &&
4265 processor_active_thread_no_smt(cprimary)) {
d9a64523
A
4266 continue;
4267 }
0a7de745
A
4268
4269 /*
4270 * Find the idle secondary processor with the lowest priority primary
4271 *
4272 * We will choose this processor as a fallback if we find no better
4273 * primary to preempt.
4274 */
4275 if (primary_pri < lowest_idle_secondary_priority) {
4276 lp_idle_secondary_processor = processor;
4277 lowest_idle_secondary_priority = primary_pri;
a39ff7e2
A
4278 }
4279
0a7de745
A
4280 /* Find the the lowest priority active primary with idle secondary */
4281 if (primary_pri < lowest_unpaired_primary_priority) {
4282 /* If the primary processor is offline or starting up, it's not a candidate for this path */
4283 if (cprimary->state != PROCESSOR_RUNNING &&
4284 cprimary->state != PROCESSOR_DISPATCHING) {
4285 continue;
4286 }
fe8ab488 4287
0a7de745
A
4288 if (!cprimary->is_recommended) {
4289 continue;
0b4c1975 4290 }
0a7de745
A
4291
4292 /* if the primary is pending preemption, don't try to re-preempt it */
4293 if (bit_test(pset->pending_AST_URGENT_cpu_mask, cprimary->cpu_id)) {
4294 continue;
4295 }
4296
4297 if (SCHED(priority_is_urgent)(thread->sched_pri) == FALSE &&
4298 bit_test(pset->pending_AST_PREEMPT_cpu_mask, cprimary->cpu_id)) {
4299 continue;
4300 }
4301
4302 lowest_unpaired_primary_priority = primary_pri;
4303 lp_unpaired_primary_processor = cprimary;
2d21ac55 4304 }
fe8ab488
A
4305 }
4306
0a7de745
A
4307 /*
4308 * We prefer preempting a primary processor over waking up its secondary.
4309 * The secondary will then be woken up by the preempted thread.
4310 */
4311 if (thread->sched_pri > lowest_unpaired_primary_priority) {
4312 pset->last_chosen = lp_unpaired_primary_processor->cpu_id;
4313 return lp_unpaired_primary_processor;
4314 }
0b4c1975 4315
0a7de745
A
4316 /*
4317 * We prefer preempting a lower priority active processor over directly
4318 * waking up an idle secondary.
4319 * The preempted thread will then find the idle secondary.
4320 */
4321 if (thread->sched_pri > lowest_priority) {
4322 pset->last_chosen = lp_processor->cpu_id;
4323 return lp_processor;
4324 }
fe8ab488 4325
0a7de745 4326 if (thread->sched_pri >= BASEPRI_RTQUEUES) {
fe8ab488
A
4327 /*
4328 * For realtime threads, the most important aspect is
0a7de745
A
4329 * scheduling latency, so we will pick an active
4330 * secondary processor in this pset, or preempt
4331 * another RT thread with a further deadline before
4332 * going to the next pset.
fe8ab488
A
4333 */
4334
a39ff7e2 4335 if (sched_allow_rt_smt && (thread->sched_pri > lowest_secondary_priority)) {
d9a64523 4336 pset->last_chosen = lp_paired_secondary_processor->cpu_id;
a39ff7e2
A
4337 return lp_paired_secondary_processor;
4338 }
fe8ab488 4339
0a7de745
A
4340 if (thread->realtime.deadline < furthest_deadline) {
4341 return fd_processor;
cf7d32b8 4342 }
2d21ac55
A
4343 }
4344
4345 /*
cb323159
A
4346 * lc_processor is used to indicate the best processor set run queue
4347 * on which to enqueue a thread when all available CPUs are busy with
4348 * higher priority threads, so try to make sure it is initialized.
4349 */
4350 if (lc_processor == PROCESSOR_NULL) {
4351 cpumap_t available_map = ((pset->cpu_state_map[PROCESSOR_IDLE] |
4352 pset->cpu_state_map[PROCESSOR_RUNNING] |
4353 pset->cpu_state_map[PROCESSOR_DISPATCHING]) &
4354 pset->recommended_bitmask);
4355 cpuid = lsb_first(available_map);
4356 if (cpuid >= 0) {
4357 lc_processor = processor_array[cpuid];
4358 lowest_count = SCHED(processor_runq_count)(lc_processor);
4359 }
4360 }
4361
4362 /*
4363 * Move onto the next processor set.
4364 *
0a7de745
A
4365 * If all primary processors in this pset are running a higher
4366 * priority thread, move on to next pset. Only when we have
4367 * exhausted the search for primary processors do we
4368 * fall back to secondaries.
2d21ac55 4369 */
d9a64523 4370 nset = next_pset(pset);
2d21ac55 4371
d9a64523 4372 if (nset != starting_pset) {
f427ee49 4373 pset = change_locked_pset(pset, nset);
2d21ac55 4374 }
d9a64523 4375 } while (nset != starting_pset);
2d21ac55
A
4376
4377 /*
fe8ab488
A
4378 * Make sure that we pick a running processor,
4379 * and that the correct processor set is locked.
0a7de745 4380 * Since we may have unlocked the candidate processor's
fe8ab488
A
4381 * pset, it may have changed state.
4382 *
4383 * All primary processors are running a higher priority
4384 * thread, so the only options left are enqueuing on
4385 * the secondary processor that would perturb the least priority
4386 * primary, or the least busy primary.
2d21ac55 4387 */
cb323159 4388 boolean_t fallback_processor = false;
cf7d32b8 4389 do {
fe8ab488 4390 /* lowest_priority is evaluated in the main loops above */
0a7de745
A
4391 if (lp_idle_secondary_processor != PROCESSOR_NULL) {
4392 processor = lp_idle_secondary_processor;
4393 lp_idle_secondary_processor = PROCESSOR_NULL;
a39ff7e2
A
4394 } else if (lp_paired_secondary_processor != PROCESSOR_NULL) {
4395 processor = lp_paired_secondary_processor;
4396 lp_paired_secondary_processor = PROCESSOR_NULL;
fe8ab488
A
4397 } else if (lc_processor != PROCESSOR_NULL) {
4398 processor = lc_processor;
4399 lc_processor = PROCESSOR_NULL;
4400 } else {
cf7d32b8 4401 /*
cb323159
A
4402 * All processors are executing higher priority threads, and
4403 * the lowest_count candidate was not usable.
0a7de745 4404 *
cb323159
A
4405 * For AMP platforms running the clutch scheduler always
4406 * return a processor from the requested pset to allow the
4407 * thread to be enqueued in the correct runq. For non-AMP
4408 * platforms, simply return the master_processor.
cf7d32b8 4409 */
cb323159 4410 fallback_processor = true;
f427ee49 4411#if CONFIG_SCHED_EDGE
c6bf4f31 4412 processor = processor_array[lsb_first(starting_pset->primary_map)];
f427ee49 4413#else /* CONFIG_SCHED_EDGE */
fe8ab488 4414 processor = master_processor;
f427ee49 4415#endif /* CONFIG_SCHED_EDGE */
cf7d32b8
A
4416 }
4417
4418 /*
fe8ab488
A
4419 * Check that the correct processor set is
4420 * returned locked.
cf7d32b8 4421 */
f427ee49 4422 pset = change_locked_pset(pset, processor->processor_set);
cf7d32b8
A
4423
4424 /*
fe8ab488 4425 * We must verify that the chosen processor is still available.
cb323159
A
4426 * The cases where we pick the master_processor or the fallback
4427 * processor are execptions, since we may need enqueue a thread
4428 * on its runqueue if this is the last remaining processor
4429 * during pset shutdown.
4430 *
4431 * <rdar://problem/47559304> would really help here since it
4432 * gets rid of the weird last processor SHUTDOWN case where
4433 * the pset is still schedulable.
cf7d32b8 4434 */
cb323159 4435 if (processor != master_processor && (fallback_processor == false) && (processor->state == PROCESSOR_SHUTDOWN || processor->state == PROCESSOR_OFF_LINE)) {
cf7d32b8 4436 processor = PROCESSOR_NULL;
0a7de745 4437 }
cf7d32b8 4438 } while (processor == PROCESSOR_NULL);
2d21ac55 4439
d9a64523
A
4440 pset->last_chosen = processor->cpu_id;
4441 return processor;
2d21ac55
A
4442}
4443
f427ee49
A
4444/*
4445 * Default implementation of SCHED(choose_node)()
4446 * for single node systems
4447 */
4448pset_node_t
4449sched_choose_node(__unused thread_t thread)
4450{
4451 return &pset_node0;
4452}
4453
4454/*
4455 * choose_starting_pset:
4456 *
4457 * Choose a starting processor set for the thread.
4458 * May return a processor hint within the pset.
4459 *
4460 * Returns a starting processor set, to be used by
4461 * choose_processor.
4462 *
4463 * The thread must be locked. The resulting pset is unlocked on return,
4464 * and is chosen without taking any pset locks.
4465 */
4466processor_set_t
4467choose_starting_pset(pset_node_t node, thread_t thread, processor_t *processor_hint)
4468{
4469 processor_set_t pset;
4470 processor_t processor = PROCESSOR_NULL;
4471
4472 if (thread->affinity_set != AFFINITY_SET_NULL) {
4473 /*
4474 * Use affinity set policy hint.
4475 */
4476 pset = thread->affinity_set->aset_pset;
4477 } else if (thread->last_processor != PROCESSOR_NULL) {
4478 /*
4479 * Simple (last processor) affinity case.
4480 */
4481 processor = thread->last_processor;
4482 pset = processor->processor_set;
4483 } else {
4484 /*
4485 * No Affinity case:
4486 *
4487 * Utilitize a per task hint to spread threads
4488 * among the available processor sets.
4489 * NRG this seems like the wrong thing to do.
4490 * See also task->pset_hint = pset in thread_setrun()
4491 */
4492 task_t task = thread->task;
4493
4494 pset = task->pset_hint;
4495 if (pset == PROCESSOR_SET_NULL) {
4496 pset = current_processor()->processor_set;
4497 }
4498
4499 pset = choose_next_pset(pset);
4500 }
4501
4502 if (!bit_test(node->pset_map, pset->pset_id)) {
4503 /* pset is not from this node so choose one that is */
4504 int id = lsb_first(node->pset_map);
4505 assert(id >= 0);
4506 pset = pset_array[id];
4507 }
4508
4509 if (bit_count(node->pset_map) == 1) {
4510 /* Only a single pset in this node */
4511 goto out;
4512 }
4513
4514 bool avoid_cpu0 = false;
4515
4516#if defined(__x86_64__)
4517 if ((thread->sched_pri >= BASEPRI_RTQUEUES) && sched_avoid_cpu0) {
4518 /* Avoid the pset containing cpu0 */
4519 avoid_cpu0 = true;
4520 /* Assert that cpu0 is in pset0. I expect this to be true on __x86_64__ */
4521 assert(bit_test(pset_array[0]->cpu_bitmask, 0));
4522 }
4523#endif
4524
4525 if (thread->sched_pri >= BASEPRI_RTQUEUES) {
4526 pset_map_t rt_target_map = atomic_load(&node->pset_non_rt_primary_map);
4527 if ((avoid_cpu0 && pset->pset_id == 0) || !bit_test(rt_target_map, pset->pset_id)) {
4528 if (avoid_cpu0) {
4529 rt_target_map = bit_ror64(rt_target_map, 1);
4530 }
4531 int rotid = lsb_first(rt_target_map);
4532 if (rotid >= 0) {
4533 int id = avoid_cpu0 ? ((rotid + 1) & 63) : rotid;
4534 pset = pset_array[id];
4535 goto out;
4536 }
4537 }
4538 if (!pset->is_SMT || !sched_allow_rt_smt) {
4539 /* All psets are full of RT threads - fall back to choose processor to find the furthest deadline RT thread */
4540 goto out;
4541 }
4542 rt_target_map = atomic_load(&node->pset_non_rt_map);
4543 if ((avoid_cpu0 && pset->pset_id == 0) || !bit_test(rt_target_map, pset->pset_id)) {
4544 if (avoid_cpu0) {
4545 rt_target_map = bit_ror64(rt_target_map, 1);
4546 }
4547 int rotid = lsb_first(rt_target_map);
4548 if (rotid >= 0) {
4549 int id = avoid_cpu0 ? ((rotid + 1) & 63) : rotid;
4550 pset = pset_array[id];
4551 goto out;
4552 }
4553 }
4554 /* All psets are full of RT threads - fall back to choose processor to find the furthest deadline RT thread */
4555 } else {
4556 pset_map_t idle_map = atomic_load(&node->pset_idle_map);
4557 if (!bit_test(idle_map, pset->pset_id)) {
4558 int next_idle_pset_id = lsb_first(idle_map);
4559 if (next_idle_pset_id >= 0) {
4560 pset = pset_array[next_idle_pset_id];
4561 }
4562 }
4563 }
4564
4565out:
4566 if ((processor != PROCESSOR_NULL) && (processor->processor_set != pset)) {
4567 processor = PROCESSOR_NULL;
4568 }
4569 if (processor != PROCESSOR_NULL) {
4570 *processor_hint = processor;
4571 }
4572
4573 return pset;
4574}
4575
2d21ac55
A
4576/*
4577 * thread_setrun:
4578 *
4579 * Dispatch thread for execution, onto an idle
4580 * processor or run queue, and signal a preemption
4581 * as appropriate.
4582 *
4583 * Thread must be locked.
4584 */
4585void
4586thread_setrun(
0a7de745 4587 thread_t thread,
cb323159 4588 sched_options_t options)
2d21ac55 4589{
0a7de745
A
4590 processor_t processor;
4591 processor_set_t pset;
2d21ac55 4592
0a7de745 4593 assert((thread->state & (TH_RUN | TH_WAIT | TH_UNINT | TH_TERMINATE | TH_TERMINATE2)) == TH_RUN);
3e170ce0
A
4594 assert(thread->runq == PROCESSOR_NULL);
4595
2d21ac55
A
4596 /*
4597 * Update priority if needed.
4598 */
0a7de745 4599 if (SCHED(can_update_priority)(thread)) {
6d2010ae 4600 SCHED(update_priority)(thread);
0a7de745 4601 }
2d21ac55 4602
fe8ab488
A
4603 thread->sfi_class = sfi_thread_classify(thread);
4604
2d21ac55
A
4605 assert(thread->runq == PROCESSOR_NULL);
4606
4607 if (thread->bound_processor == PROCESSOR_NULL) {
4608 /*
4609 * Unbound case.
4610 */
f427ee49
A
4611 processor_t processor_hint = PROCESSOR_NULL;
4612 pset_node_t node = SCHED(choose_node)(thread);
4613 processor_set_t starting_pset = choose_starting_pset(node, thread, &processor_hint);
2d21ac55 4614
f427ee49 4615 pset_lock(starting_pset);
39236c6e 4616
f427ee49
A
4617 processor = SCHED(choose_processor)(starting_pset, processor_hint, thread);
4618 pset = processor->processor_set;
4619 task_t task = thread->task;
4620 task->pset_hint = pset; /* NRG this is done without holding the task lock */
39236c6e 4621
f427ee49
A
4622 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR) | DBG_FUNC_NONE,
4623 (uintptr_t)thread_tid(thread), (uintptr_t)-1, processor->cpu_id, processor->state, 0);
3e170ce0 4624 } else {
2d21ac55
A
4625 /*
4626 * Bound case:
4627 *
4628 * Unconditionally dispatch on the processor.
4629 */
4630 processor = thread->bound_processor;
55e303ae 4631 pset = processor->processor_set;
2d21ac55 4632 pset_lock(pset);
39236c6e 4633
0a7de745
A
4634 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR) | DBG_FUNC_NONE,
4635 (uintptr_t)thread_tid(thread), (uintptr_t)-2, processor->cpu_id, processor->state, 0);
2d21ac55
A
4636 }
4637
4638 /*
3e170ce0 4639 * Dispatch the thread on the chosen processor.
fe8ab488 4640 * TODO: This should be based on sched_mode, not sched_pri
2d21ac55 4641 */
5ba3f43e 4642 if (thread->sched_pri >= BASEPRI_RTQUEUES) {
2d21ac55 4643 realtime_setrun(processor, thread);
5ba3f43e 4644 } else {
2d21ac55 4645 processor_setrun(processor, thread, options);
a39ff7e2
A
4646 }
4647 /* pset is now unlocked */
4648 if (thread->bound_processor == PROCESSOR_NULL) {
4649 SCHED(check_spill)(pset, thread);
5ba3f43e 4650 }
2d21ac55
A
4651}
4652
b0d623f7
A
4653processor_set_t
4654task_choose_pset(
0a7de745 4655 task_t task)
b0d623f7 4656{
0a7de745 4657 processor_set_t pset = task->pset_hint;
b0d623f7 4658
0a7de745 4659 if (pset != PROCESSOR_SET_NULL) {
b0d623f7 4660 pset = choose_next_pset(pset);
0a7de745 4661 }
b0d623f7 4662
0a7de745 4663 return pset;
b0d623f7
A
4664}
4665
9bccf70c 4666/*
c910b4d9
A
4667 * Check for a preemption point in
4668 * the current context.
55e303ae 4669 *
fe8ab488 4670 * Called at splsched with thread locked.
9bccf70c
A
4671 */
4672ast_t
4673csw_check(
0a7de745
A
4674 thread_t thread,
4675 processor_t processor,
4676 ast_t check_reason)
39236c6e 4677{
0a7de745
A
4678 processor_set_t pset = processor->processor_set;
4679
4680 assert(thread == processor->active_thread);
39236c6e
A
4681
4682 pset_lock(pset);
4683
0a7de745
A
4684 processor_state_update_from_thread(processor, thread);
4685
4686 ast_t preempt = csw_check_locked(thread, processor, pset, check_reason);
4687
4688 /* Acknowledge the IPI if we decided not to preempt */
4689
4690 if ((preempt & AST_URGENT) == 0) {
4691 bit_clear(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id);
4692 }
39236c6e 4693
0a7de745
A
4694 if ((preempt & AST_PREEMPT) == 0) {
4695 bit_clear(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id);
4696 }
39236c6e
A
4697
4698 pset_unlock(pset);
4699
0a7de745 4700 return preempt;
39236c6e
A
4701}
4702
4703/*
4704 * Check for preemption at splsched with
fe8ab488 4705 * pset and thread locked
39236c6e
A
4706 */
4707ast_t
4708csw_check_locked(
0a7de745
A
4709 thread_t thread,
4710 processor_t processor,
4711 processor_set_t pset,
4712 ast_t check_reason)
9bccf70c 4713{
0a7de745 4714 ast_t result;
9bccf70c 4715
3e170ce0 4716 if (processor->first_timeslice) {
5ba3f43e 4717 if (rt_runq_count(pset) > 0) {
0a7de745
A
4718 return check_reason | AST_PREEMPT | AST_URGENT;
4719 }
4720 } else {
4721 if (rt_runq_count(pset) > 0) {
4722 if (BASEPRI_RTQUEUES > processor->current_pri) {
4723 return check_reason | AST_PREEMPT | AST_URGENT;
4724 } else {
4725 return check_reason | AST_PREEMPT;
4726 }
39236c6e 4727 }
1c79356b 4728 }
9bccf70c 4729
3e170ce0 4730 /*
d9a64523
A
4731 * If the current thread is running on a processor that is no longer recommended,
4732 * urgently preempt it, at which point thread_select() should
3e170ce0
A
4733 * try to idle the processor and re-dispatch the thread to a recommended processor.
4734 */
5ba3f43e 4735 if (!processor->is_recommended) {
0a7de745 4736 return check_reason | AST_PREEMPT | AST_URGENT;
5ba3f43e
A
4737 }
4738
d9a64523 4739 result = SCHED(processor_csw_check)(processor);
0a7de745 4740 if (result != AST_NONE) {
2a1bd2d3 4741 return check_reason | result | (thread_is_eager_preempt(thread) ? AST_URGENT : AST_NONE);
0a7de745 4742 }
d9a64523 4743
5ba3f43e
A
4744 /*
4745 * Same for avoid-processor
4746 *
4747 * TODO: Should these set AST_REBALANCE?
4748 */
4749 if (SCHED(avoid_processor_enabled) && SCHED(thread_avoid_processor)(processor, thread)) {
0a7de745 4750 return check_reason | AST_PREEMPT;
5ba3f43e 4751 }
3e170ce0
A
4752
4753 /*
4754 * Even though we could continue executing on this processor, a
4755 * secondary SMT core should try to shed load to another primary core.
4756 *
4757 * TODO: Should this do the same check that thread_select does? i.e.
4758 * if no bound threads target this processor, and idle primaries exist, preempt
4759 * The case of RT threads existing is already taken care of above
3e170ce0
A
4760 */
4761
4762 if (processor->current_pri < BASEPRI_RTQUEUES &&
0a7de745
A
4763 processor->processor_primary != processor) {
4764 return check_reason | AST_PREEMPT;
4765 }
3e170ce0 4766
0a7de745
A
4767 if (thread->state & TH_SUSP) {
4768 return check_reason | AST_PREEMPT;
4769 }
fe8ab488 4770
3e170ce0 4771#if CONFIG_SCHED_SFI
fe8ab488
A
4772 /*
4773 * Current thread may not need to be preempted, but maybe needs
4774 * an SFI wait?
4775 */
4776 result = sfi_thread_needs_ast(thread, NULL);
0a7de745
A
4777 if (result != AST_NONE) {
4778 return check_reason | result;
4779 }
3e170ce0 4780#endif
c910b4d9 4781
0a7de745 4782 return AST_NONE;
1c79356b
A
4783}
4784
0a7de745
A
4785/*
4786 * Handle preemption IPI or IPI in response to setting an AST flag
4787 * Triggered by cause_ast_check
4788 * Called at splsched
4789 */
4790void
4791ast_check(processor_t processor)
4792{
4793 if (processor->state != PROCESSOR_RUNNING &&
4794 processor->state != PROCESSOR_SHUTDOWN) {
4795 return;
4796 }
4797
4798 thread_t thread = processor->active_thread;
4799
4800 assert(thread == current_thread());
4801
4802 thread_lock(thread);
4803
4804 /*
4805 * Propagate thread ast to processor.
4806 * (handles IPI in response to setting AST flag)
4807 */
4808 ast_propagate(thread);
4809
4810 /*
4811 * Stash the old urgency and perfctl values to find out if
4812 * csw_check updates them.
4813 */
4814 thread_urgency_t old_urgency = processor->current_urgency;
4815 perfcontrol_class_t old_perfctl_class = processor->current_perfctl_class;
4816
4817 ast_t preempt;
4818
4819 if ((preempt = csw_check(thread, processor, AST_NONE)) != AST_NONE) {
4820 ast_on(preempt);
4821 }
4822
4823 if (old_urgency != processor->current_urgency) {
4824 /*
4825 * Urgency updates happen with the thread lock held (ugh).
4826 * TODO: This doesn't notice QoS changes...
4827 */
4828 uint64_t urgency_param1, urgency_param2;
4829
4830 thread_urgency_t urgency = thread_get_urgency(thread, &urgency_param1, &urgency_param2);
4831 thread_tell_urgency(urgency, urgency_param1, urgency_param2, 0, thread);
4832 }
4833
4834 thread_unlock(thread);
4835
4836 if (old_perfctl_class != processor->current_perfctl_class) {
4837 /*
4838 * We updated the perfctl class of this thread from another core.
4839 * Let CLPC know that the currently running thread has a new
4840 * class.
4841 */
4842
4843 machine_switch_perfcontrol_state_update(PERFCONTROL_ATTR_UPDATE,
4844 mach_approximate_time(), 0, thread);
4845 }
4846}
4847
4848
1c79356b 4849/*
9bccf70c 4850 * set_sched_pri:
1c79356b 4851 *
55e303ae
A
4852 * Set the scheduled priority of the specified thread.
4853 *
9bccf70c 4854 * This may cause the thread to change queues.
1c79356b 4855 *
55e303ae 4856 * Thread must be locked.
1c79356b
A
4857 */
4858void
9bccf70c 4859set_sched_pri(
0a7de745 4860 thread_t thread,
f427ee49 4861 int16_t new_priority,
0a7de745 4862 set_sched_pri_options_t options)
1c79356b 4863{
0a7de745
A
4864 bool is_current_thread = (thread == current_thread());
4865 bool removed_from_runq = false;
d9a64523
A
4866 bool lazy_update = ((options & SETPRI_LAZY) == SETPRI_LAZY);
4867
f427ee49 4868 int16_t old_priority = thread->sched_pri;
5ba3f43e 4869
3e170ce0 4870 /* If we're already at this priority, no need to mess with the runqueue */
0a7de745 4871 if (new_priority == old_priority) {
cb323159
A
4872#if CONFIG_SCHED_CLUTCH
4873 /* For the first thread in the system, the priority is correct but
4874 * th_sched_bucket is still TH_BUCKET_RUN. Since the clutch
4875 * scheduler relies on the bucket being set for all threads, update
4876 * its bucket here.
4877 */
4878 if (thread->th_sched_bucket == TH_BUCKET_RUN) {
4879 assert(is_current_thread);
4880 SCHED(update_thread_bucket)(thread);
4881 }
4882#endif /* CONFIG_SCHED_CLUTCH */
4883
3e170ce0 4884 return;
0a7de745 4885 }
3e170ce0
A
4886
4887 if (is_current_thread) {
0a7de745 4888 assert(thread->state & TH_RUN);
3e170ce0 4889 assert(thread->runq == PROCESSOR_NULL);
3e170ce0
A
4890 } else {
4891 removed_from_runq = thread_run_queue_remove(thread);
fe8ab488 4892 }
3e170ce0 4893
5ba3f43e 4894 thread->sched_pri = new_priority;
490019cf 4895
cb323159
A
4896#if CONFIG_SCHED_CLUTCH
4897 /*
4898 * Since for the clutch scheduler, the thread's bucket determines its runq
4899 * in the hierarchy it is important to update the bucket when the thread
4900 * lock is held and the thread has been removed from the runq hierarchy.
4901 */
4902 SCHED(update_thread_bucket)(thread);
4903
4904#endif /* CONFIG_SCHED_CLUTCH */
4905
3e170ce0 4906 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHANGE_PRIORITY),
0a7de745
A
4907 (uintptr_t)thread_tid(thread),
4908 thread->base_pri,
4909 thread->sched_pri,
4910 thread->sched_usage,
4911 0);
4912
4913 if (removed_from_runq) {
4914 thread_run_queue_reinsert(thread, SCHED_PREEMPT | SCHED_TAILQ);
4915 } else if (is_current_thread) {
4916 processor_t processor = thread->last_processor;
4917 assert(processor == current_processor());
4918
4919 thread_urgency_t old_urgency = processor->current_urgency;
4920
4921 /*
4922 * When dropping in priority, check if the thread no longer belongs on core.
4923 * If a thread raises its own priority, don't aggressively rebalance it.
4924 * <rdar://problem/31699165>
4925 *
4926 * csw_check does a processor_state_update_from_thread, but
4927 * we should do our own if we're being lazy.
4928 */
4929 if (!lazy_update && new_priority < old_priority) {
4930 ast_t preempt;
4931
4932 if ((preempt = csw_check(thread, processor, AST_NONE)) != AST_NONE) {
4933 ast_on(preempt);
4934 }
4935 } else {
4936 processor_state_update_from_thread(processor, thread);
4937 }
3e170ce0 4938
3e170ce0
A
4939 /*
4940 * set_sched_pri doesn't alter RT params. We expect direct base priority/QoS
4941 * class alterations from user space to occur relatively infrequently, hence
4942 * those are lazily handled. QoS classes have distinct priority bands, and QoS
4943 * inheritance is expected to involve priority changes.
4944 */
0a7de745
A
4945 if (processor->current_urgency != old_urgency) {
4946 uint64_t urgency_param1, urgency_param2;
fe8ab488 4947
0a7de745
A
4948 thread_urgency_t new_urgency = thread_get_urgency(thread,
4949 &urgency_param1, &urgency_param2);
9bccf70c 4950
0a7de745
A
4951 thread_tell_urgency(new_urgency, urgency_param1,
4952 urgency_param2, 0, thread);
4953 }
9bccf70c 4954
0a7de745
A
4955 /* TODO: only call this if current_perfctl_class changed */
4956 uint64_t ctime = mach_approximate_time();
4957 machine_thread_going_on_core(thread, processor->current_urgency, 0, 0, ctime);
4958 } else if (thread->state & TH_RUN) {
4959 processor_t processor = thread->last_processor;
5ba3f43e 4960
0a7de745
A
4961 if (!lazy_update &&
4962 processor != PROCESSOR_NULL &&
4963 processor != current_processor() &&
4964 processor->active_thread == thread) {
9bccf70c 4965 cause_ast_check(processor);
5ba3f43e 4966 }
1c79356b
A
4967 }
4968}
4969
3e170ce0
A
4970/*
4971 * thread_run_queue_remove_for_handoff
4972 *
4973 * Pull a thread or its (recursive) push target out of the runqueue
4974 * so that it is ready for thread_run()
4975 *
4976 * Called at splsched
4977 *
4978 * Returns the thread that was pulled or THREAD_NULL if no thread could be pulled.
4979 * This may be different than the thread that was passed in.
4980 */
4981thread_t
0a7de745
A
4982thread_run_queue_remove_for_handoff(thread_t thread)
4983{
3e170ce0 4984 thread_t pulled_thread = THREAD_NULL;
91447636 4985
3e170ce0 4986 thread_lock(thread);
91447636 4987
3e170ce0 4988 /*
f427ee49
A
4989 * Check that the thread is not bound to a different processor,
4990 * NO_SMT flag is not set on the thread, cluster type of
4991 * processor matches with thread if the thread is pinned to a
4992 * particular cluster and that realtime is not involved.
3e170ce0 4993 *
f427ee49 4994 * Next, pull it off its run queue. If it doesn't come, it's not eligible.
3e170ce0 4995 */
3e170ce0 4996 processor_t processor = current_processor();
f427ee49
A
4997 if ((thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == processor)
4998 && (!thread_no_smt(thread))
4999 && (processor->current_pri < BASEPRI_RTQUEUES)
5000 && (thread->sched_pri < BASEPRI_RTQUEUES)
5001#if __AMP__
5002 && ((!(thread->sched_flags & TH_SFLAG_PCORE_ONLY)) ||
5003 processor->processor_set->pset_cluster_type == PSET_AMP_P)
5004 && ((!(thread->sched_flags & TH_SFLAG_ECORE_ONLY)) ||
5005 processor->processor_set->pset_cluster_type == PSET_AMP_E)
5006#endif /* __AMP__ */
5007 ) {
0a7de745
A
5008 if (thread_run_queue_remove(thread)) {
5009 pulled_thread = thread;
5010 }
91447636
A
5011 }
5012
3e170ce0 5013 thread_unlock(thread);
6d2010ae 5014
3e170ce0 5015 return pulled_thread;
6d2010ae
A
5016}
5017
f427ee49
A
5018/*
5019 * thread_prepare_for_handoff
5020 *
5021 * Make the thread ready for handoff.
5022 * If the thread was runnable then pull it off the runq, if the thread could
5023 * not be pulled, return NULL.
5024 *
5025 * If the thread was woken up from wait for handoff, make sure it is not bound to
5026 * different processor.
5027 *
5028 * Called at splsched
5029 *
5030 * Returns the thread that was pulled or THREAD_NULL if no thread could be pulled.
5031 * This may be different than the thread that was passed in.
5032 */
5033thread_t
5034thread_prepare_for_handoff(thread_t thread, thread_handoff_option_t option)
5035{
5036 thread_t pulled_thread = THREAD_NULL;
5037
5038 if (option & THREAD_HANDOFF_SETRUN_NEEDED) {
5039 processor_t processor = current_processor();
5040 thread_lock(thread);
5041
5042 /*
5043 * Check that the thread is not bound to a different processor,
5044 * NO_SMT flag is not set on the thread and cluster type of
5045 * processor matches with thread if the thread is pinned to a
5046 * particular cluster. Call setrun instead if above conditions
5047 * are not satisfied.
5048 */
5049 if ((thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == processor)
5050 && (!thread_no_smt(thread))
5051#if __AMP__
5052 && ((!(thread->sched_flags & TH_SFLAG_PCORE_ONLY)) ||
5053 processor->processor_set->pset_cluster_type == PSET_AMP_P)
5054 && ((!(thread->sched_flags & TH_SFLAG_ECORE_ONLY)) ||
5055 processor->processor_set->pset_cluster_type == PSET_AMP_E)
5056#endif /* __AMP__ */
5057 ) {
5058 pulled_thread = thread;
5059 } else {
5060 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
5061 }
5062 thread_unlock(thread);
5063 } else {
5064 pulled_thread = thread_run_queue_remove_for_handoff(thread);
5065 }
5066
5067 return pulled_thread;
5068}
5069
1c79356b 5070/*
6d2010ae 5071 * thread_run_queue_remove:
1c79356b 5072 *
fe8ab488 5073 * Remove a thread from its current run queue and
2d21ac55 5074 * return TRUE if successful.
55e303ae
A
5075 *
5076 * Thread must be locked.
fe8ab488
A
5077 *
5078 * If thread->runq is PROCESSOR_NULL, the thread will not re-enter the
5079 * run queues because the caller locked the thread. Otherwise
5080 * the thread is on a run queue, but could be chosen for dispatch
5081 * and removed by another processor under a different lock, which
5082 * will set thread->runq to PROCESSOR_NULL.
5083 *
5084 * Hence the thread select path must not rely on anything that could
5085 * be changed under the thread lock after calling this function,
5086 * most importantly thread->sched_pri.
1c79356b 5087 */
2d21ac55 5088boolean_t
6d2010ae 5089thread_run_queue_remove(
0a7de745 5090 thread_t thread)
1c79356b 5091{
fe8ab488
A
5092 boolean_t removed = FALSE;
5093 processor_t processor = thread->runq;
1c79356b 5094
0a7de745 5095 if ((thread->state & (TH_RUN | TH_WAIT)) == TH_WAIT) {
fe8ab488
A
5096 /* Thread isn't runnable */
5097 assert(thread->runq == PROCESSOR_NULL);
5098 return FALSE;
5099 }
55e303ae 5100
fe8ab488 5101 if (processor == PROCESSOR_NULL) {
55e303ae 5102 /*
fe8ab488
A
5103 * The thread is either not on the runq,
5104 * or is in the midst of being removed from the runq.
5105 *
5106 * runq is set to NULL under the pset lock, not the thread
5107 * lock, so the thread may still be in the process of being dequeued
5108 * from the runq. It will wait in invoke for the thread lock to be
5109 * dropped.
55e303ae 5110 */
55e303ae 5111
fe8ab488
A
5112 return FALSE;
5113 }
55e303ae 5114
fe8ab488
A
5115 if (thread->sched_pri < BASEPRI_RTQUEUES) {
5116 return SCHED(processor_queue_remove)(processor, thread);
5117 }
55e303ae 5118
5ba3f43e
A
5119 processor_set_t pset = processor->processor_set;
5120
f427ee49 5121 pset_lock(pset);
55e303ae 5122
fe8ab488
A
5123 if (thread->runq != PROCESSOR_NULL) {
5124 /*
3e170ce0 5125 * Thread is on the RT run queue and we have a lock on
fe8ab488
A
5126 * that run queue.
5127 */
5128
39037602 5129 remqueue(&thread->runq_links);
5ba3f43e
A
5130 SCHED_STATS_RUNQ_CHANGE(&SCHED(rt_runq)(pset)->runq_stats, rt_runq_count(pset));
5131 rt_runq_count_decr(pset);
fe8ab488
A
5132
5133 thread->runq = PROCESSOR_NULL;
5134
5135 removed = TRUE;
1c79356b
A
5136 }
5137
f427ee49 5138 pset_unlock(pset);
fe8ab488 5139
0a7de745 5140 return removed;
1c79356b
A
5141}
5142
cf7d32b8 5143/*
3e170ce0 5144 * Put the thread back where it goes after a thread_run_queue_remove
cf7d32b8 5145 *
3e170ce0 5146 * Thread must have been removed under the same thread lock hold
cf7d32b8 5147 *
3e170ce0 5148 * thread locked, at splsched
cf7d32b8 5149 */
3e170ce0 5150void
cb323159 5151thread_run_queue_reinsert(thread_t thread, sched_options_t options)
cf7d32b8 5152{
3e170ce0 5153 assert(thread->runq == PROCESSOR_NULL);
5ba3f43e 5154 assert(thread->state & (TH_RUN));
cf7d32b8 5155
5ba3f43e 5156 thread_setrun(thread, options);
6d2010ae
A
5157}
5158
39236c6e 5159void
d9a64523 5160sys_override_cpu_throttle(boolean_t enable_override)
6d2010ae 5161{
0a7de745 5162 if (enable_override) {
39236c6e 5163 cpu_throttle_enabled = 0;
0a7de745 5164 } else {
d9a64523 5165 cpu_throttle_enabled = 1;
0a7de745 5166 }
39236c6e 5167}
6d2010ae 5168
0a7de745 5169thread_urgency_t
39236c6e
A
5170thread_get_urgency(thread_t thread, uint64_t *arg1, uint64_t *arg2)
5171{
0a7de745
A
5172 uint64_t urgency_param1 = 0, urgency_param2 = 0;
5173
5174 thread_urgency_t urgency;
5175
39236c6e 5176 if (thread == NULL || (thread->state & TH_IDLE)) {
0a7de745
A
5177 urgency_param1 = 0;
5178 urgency_param2 = 0;
39236c6e 5179
0a7de745 5180 urgency = THREAD_URGENCY_NONE;
39236c6e 5181 } else if (thread->sched_mode == TH_MODE_REALTIME) {
0a7de745
A
5182 urgency_param1 = thread->realtime.period;
5183 urgency_param2 = thread->realtime.deadline;
39236c6e 5184
0a7de745 5185 urgency = THREAD_URGENCY_REAL_TIME;
39236c6e 5186 } else if (cpu_throttle_enabled &&
0a7de745
A
5187 (thread->sched_pri <= MAXPRI_THROTTLE) &&
5188 (thread->base_pri <= MAXPRI_THROTTLE)) {
39236c6e 5189 /*
0a7de745
A
5190 * Threads that are running at low priority but are not
5191 * tagged with a specific QoS are separated out from
5192 * the "background" urgency. Performance management
5193 * subsystem can decide to either treat these threads
5194 * as normal threads or look at other signals like thermal
5195 * levels for optimal power/perf tradeoffs for a platform.
39236c6e 5196 */
0a7de745
A
5197 boolean_t thread_lacks_qos = (proc_get_effective_thread_policy(thread, TASK_POLICY_QOS) == THREAD_QOS_UNSPECIFIED); //thread_has_qos_policy(thread);
5198 boolean_t task_is_suppressed = (proc_get_effective_task_policy(thread->task, TASK_POLICY_SUP_ACTIVE) == 0x1);
6d2010ae 5199
0a7de745
A
5200 /*
5201 * Background urgency applied when thread priority is
5202 * MAXPRI_THROTTLE or lower and thread is not promoted
5203 * and thread has a QoS specified
fe8ab488 5204 */
0a7de745
A
5205 urgency_param1 = thread->sched_pri;
5206 urgency_param2 = thread->base_pri;
5207
5208 if (thread_lacks_qos && !task_is_suppressed) {
5209 urgency = THREAD_URGENCY_LOWPRI;
5210 } else {
5211 urgency = THREAD_URGENCY_BACKGROUND;
5212 }
5213 } else {
5214 /* For otherwise unclassified threads, report throughput QoS parameters */
5215 urgency_param1 = proc_get_effective_thread_policy(thread, TASK_POLICY_THROUGH_QOS);
5216 urgency_param2 = proc_get_effective_task_policy(thread->task, TASK_POLICY_THROUGH_QOS);
5217 urgency = THREAD_URGENCY_NORMAL;
5218 }
39037602 5219
0a7de745
A
5220 if (arg1 != NULL) {
5221 *arg1 = urgency_param1;
5222 }
5223 if (arg2 != NULL) {
5224 *arg2 = urgency_param2;
6d2010ae 5225 }
0a7de745
A
5226
5227 return urgency;
6d2010ae
A
5228}
5229
5ba3f43e
A
5230perfcontrol_class_t
5231thread_get_perfcontrol_class(thread_t thread)
5232{
0a7de745
A
5233 /* Special case handling */
5234 if (thread->state & TH_IDLE) {
5235 return PERFCONTROL_CLASS_IDLE;
5236 }
5237 if (thread->task == kernel_task) {
5238 return PERFCONTROL_CLASS_KERNEL;
5239 }
5240 if (thread->sched_mode == TH_MODE_REALTIME) {
5241 return PERFCONTROL_CLASS_REALTIME;
5242 }
5243
5244 /* perfcontrol_class based on base_pri */
5245 if (thread->base_pri <= MAXPRI_THROTTLE) {
5246 return PERFCONTROL_CLASS_BACKGROUND;
5247 } else if (thread->base_pri <= BASEPRI_UTILITY) {
5248 return PERFCONTROL_CLASS_UTILITY;
5249 } else if (thread->base_pri <= BASEPRI_DEFAULT) {
5250 return PERFCONTROL_CLASS_NONUI;
5251 } else if (thread->base_pri <= BASEPRI_FOREGROUND) {
5252 return PERFCONTROL_CLASS_UI;
5253 } else {
5254 return PERFCONTROL_CLASS_ABOVEUI;
5255 }
5ba3f43e 5256}
6d2010ae 5257
1c79356b 5258/*
2d21ac55
A
5259 * This is the processor idle loop, which just looks for other threads
5260 * to execute. Processor idle threads invoke this without supplying a
5261 * current thread to idle without an asserted wait state.
5262 *
5263 * Returns a the next thread to execute if dispatched directly.
1c79356b 5264 */
6d2010ae
A
5265
5266#if 0
5267#define IDLE_KERNEL_DEBUG_CONSTANT(...) KERNEL_DEBUG_CONSTANT(__VA_ARGS__)
5268#else
5269#define IDLE_KERNEL_DEBUG_CONSTANT(...) do { } while(0)
5270#endif
5271
5272thread_t
2d21ac55 5273processor_idle(
0a7de745
A
5274 thread_t thread,
5275 processor_t processor)
1c79356b 5276{
0a7de745
A
5277 processor_set_t pset = processor->processor_set;
5278
2d21ac55 5279 (void)splsched();
1c79356b 5280
316670eb 5281 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
0a7de745
A
5282 MACHDBG_CODE(DBG_MACH_SCHED, MACH_IDLE) | DBG_FUNC_START,
5283 (uintptr_t)thread_tid(thread), 0, 0, 0, 0);
3a60a9f5 5284
f427ee49
A
5285 SCHED_STATS_INC(idle_transitions);
5286 assert(processor->running_timers_active == false);
6d2010ae 5287
d9a64523
A
5288 uint64_t ctime = mach_absolute_time();
5289
f427ee49
A
5290 timer_switch(&processor->system_state, ctime, &processor->idle_state);
5291 processor->current_state = &processor->idle_state;
3a60a9f5 5292
d9a64523
A
5293 cpu_quiescent_counter_leave(ctime);
5294
39236c6e 5295 while (1) {
5ba3f43e
A
5296 /*
5297 * Ensure that updates to my processor and pset state,
5298 * made by the IPI source processor before sending the IPI,
5299 * are visible on this processor now (even though we don't
5300 * take the pset lock yet).
5301 */
5302 atomic_thread_fence(memory_order_acquire);
5303
0a7de745 5304 if (processor->state != PROCESSOR_IDLE) {
5ba3f43e 5305 break;
0a7de745
A
5306 }
5307 if (bit_test(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) {
39236c6e 5308 break;
0a7de745 5309 }
5ba3f43e 5310#if defined(CONFIG_SCHED_DEFERRED_AST)
0a7de745 5311 if (bit_test(pset->pending_deferred_AST_cpu_mask, processor->cpu_id)) {
39236c6e 5312 break;
0a7de745 5313 }
5ba3f43e 5314#endif
a39ff7e2 5315 if (processor->is_recommended && (processor->processor_primary == processor)) {
0a7de745 5316 if (rt_runq_count(pset)) {
3e170ce0 5317 break;
0a7de745 5318 }
3e170ce0 5319 } else {
0a7de745 5320 if (SCHED(processor_bound_count)(processor)) {
3e170ce0 5321 break;
0a7de745 5322 }
3e170ce0
A
5323 }
5324
6d2010ae 5325 IDLE_KERNEL_DEBUG_CONSTANT(
0a7de745 5326 MACHDBG_CODE(DBG_MACH_SCHED, MACH_IDLE) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), rt_runq_count(pset), SCHED(processor_runq_count)(processor), -1, 0);
6d2010ae 5327
4b17d6b6
A
5328 machine_track_platform_idle(TRUE);
5329
2d21ac55 5330 machine_idle();
cb323159 5331 /* returns with interrupts enabled */
55e303ae 5332
4b17d6b6
A
5333 machine_track_platform_idle(FALSE);
5334
55e303ae 5335 (void)splsched();
c910b4d9 5336
0a7de745
A
5337 /*
5338 * Check if we should call sched_timeshare_consider_maintenance() here.
5339 * The CPU was woken out of idle due to an interrupt and we should do the
5340 * call only if the processor is still idle. If the processor is non-idle,
5341 * the threads running on the processor would do the call as part of
d9a64523
A
5342 * context swithing.
5343 */
5344 if (processor->state == PROCESSOR_IDLE) {
5345 sched_timeshare_consider_maintenance(mach_absolute_time());
5346 }
5347
6d2010ae 5348 IDLE_KERNEL_DEBUG_CONSTANT(
0a7de745 5349 MACHDBG_CODE(DBG_MACH_SCHED, MACH_IDLE) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), rt_runq_count(pset), SCHED(processor_runq_count)(processor), -2, 0);
6d2010ae 5350
fe8ab488
A
5351 if (!SCHED(processor_queue_empty)(processor)) {
5352 /* Secondary SMT processors respond to directed wakeups
5353 * exclusively. Some platforms induce 'spurious' SMT wakeups.
5354 */
0a7de745
A
5355 if (processor->processor_primary == processor) {
5356 break;
5357 }
fe8ab488 5358 }
55e303ae
A
5359 }
5360
d9a64523
A
5361 ctime = mach_absolute_time();
5362
f427ee49
A
5363 timer_switch(&processor->idle_state, ctime, &processor->system_state);
5364 processor->current_state = &processor->system_state;
1c79356b 5365
d9a64523
A
5366 cpu_quiescent_counter_join(ctime);
5367
0a7de745 5368 ast_t reason = AST_NONE;
55e303ae 5369
0a7de745
A
5370 /* We're handling all scheduling AST's */
5371 ast_off(AST_SCHEDULING);
55e303ae 5372
0a7de745
A
5373 /*
5374 * thread_select will move the processor from dispatching to running,
5375 * or put it in idle if there's nothing to do.
5376 */
5377 thread_t current_thread = current_thread();
55e303ae 5378
0a7de745
A
5379 thread_lock(current_thread);
5380 thread_t new_thread = thread_select(current_thread, processor, &reason);
5381 thread_unlock(current_thread);
2d21ac55 5382
f427ee49
A
5383 assert(processor->running_timers_active == false);
5384
316670eb 5385 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
0a7de745
A
5386 MACHDBG_CODE(DBG_MACH_SCHED, MACH_IDLE) | DBG_FUNC_END,
5387 (uintptr_t)thread_tid(thread), processor->state, (uintptr_t)thread_tid(new_thread), reason, 0);
5388
5389 return new_thread;
2d21ac55
A
5390}
5391
cf7d32b8
A
5392/*
5393 * Each processor has a dedicated thread which
5394 * executes the idle loop when there is no suitable
5395 * previous context.
cb323159
A
5396 *
5397 * This continuation is entered with interrupts disabled.
cf7d32b8 5398 */
2d21ac55 5399void
cb323159
A
5400idle_thread(__assert_only void* parameter,
5401 __unused wait_result_t result)
2d21ac55 5402{
cb323159
A
5403 assert(ml_get_interrupts_enabled() == FALSE);
5404 assert(parameter == NULL);
5405
5406 processor_t processor = current_processor();
5407
5408 /*
5409 * Ensure that anything running in idle context triggers
5410 * preemption-disabled checks.
5411 */
5412 disable_preemption();
5413
5414 /*
5415 * Enable interrupts temporarily to handle any pending interrupts
5416 * or IPIs before deciding to sleep
5417 */
5418 spllo();
5419
5420 thread_t new_thread = processor_idle(THREAD_NULL, processor);
5421 /* returns with interrupts disabled */
5422
5423 enable_preemption();
2d21ac55 5424
2d21ac55 5425 if (new_thread != THREAD_NULL) {
cb323159
A
5426 thread_run(processor->idle_thread,
5427 idle_thread, NULL, new_thread);
2d21ac55
A
5428 /*NOTREACHED*/
5429 }
55e303ae 5430
cb323159 5431 thread_block(idle_thread);
55e303ae 5432 /*NOTREACHED*/
1c79356b
A
5433}
5434
91447636
A
5435kern_return_t
5436idle_thread_create(
0a7de745 5437 processor_t processor)
1c79356b 5438{
0a7de745
A
5439 kern_return_t result;
5440 thread_t thread;
5441 spl_t s;
5442 char name[MAXTHREADNAMESIZE];
91447636 5443
cb323159 5444 result = kernel_thread_create(idle_thread, NULL, MAXPRI_KERNEL, &thread);
0a7de745
A
5445 if (result != KERN_SUCCESS) {
5446 return result;
5447 }
91447636 5448
5ba3f43e
A
5449 snprintf(name, sizeof(name), "idle #%d", processor->cpu_id);
5450 thread_set_thread_name(thread, name);
5451
91447636
A
5452 s = splsched();
5453 thread_lock(thread);
5454 thread->bound_processor = processor;
5455 processor->idle_thread = thread;
3e170ce0 5456 thread->sched_pri = thread->base_pri = IDLEPRI;
91447636 5457 thread->state = (TH_RUN | TH_IDLE);
39236c6e 5458 thread->options |= TH_OPT_IDLE_THREAD;
91447636
A
5459 thread_unlock(thread);
5460 splx(s);
5461
5462 thread_deallocate(thread);
5463
0a7de745 5464 return KERN_SUCCESS;
1c79356b
A
5465}
5466
91447636
A
5467/*
5468 * sched_startup:
5469 *
5470 * Kicks off scheduler services.
5471 *
5472 * Called at splsched.
5473 */
0b4e3aa0 5474void
91447636 5475sched_startup(void)
0b4e3aa0 5476{
0a7de745
A
5477 kern_return_t result;
5478 thread_t thread;
91447636 5479
3e170ce0
A
5480 simple_lock_init(&sched_vm_group_list_lock, 0);
5481
5ba3f43e
A
5482#if __arm__ || __arm64__
5483 simple_lock_init(&sched_recommended_cores_lock, 0);
5484#endif /* __arm__ || __arm64__ */
490019cf 5485
6d2010ae 5486 result = kernel_thread_start_priority((thread_continue_t)sched_init_thread,
f427ee49 5487 NULL, MAXPRI_KERNEL, &thread);
0a7de745 5488 if (result != KERN_SUCCESS) {
91447636 5489 panic("sched_startup");
0a7de745 5490 }
91447636
A
5491
5492 thread_deallocate(thread);
5493
39037602
A
5494 assert_thread_magic(thread);
5495
91447636 5496 /*
316670eb
A
5497 * Yield to the sched_init_thread once, to
5498 * initialize our own thread after being switched
5499 * back to.
91447636
A
5500 *
5501 * The current thread is the only other thread
5502 * active at this point.
5503 */
316670eb 5504 thread_block(THREAD_CONTINUE_NULL);
6d2010ae 5505}
91447636 5506
5ba3f43e
A
5507#if __arm64__
5508static _Atomic uint64_t sched_perfcontrol_callback_deadline;
5509#endif /* __arm64__ */
5510
5511
fe8ab488 5512#if defined(CONFIG_SCHED_TIMESHARE_CORE)
91447636 5513
0a7de745
A
5514static volatile uint64_t sched_maintenance_deadline;
5515static uint64_t sched_tick_last_abstime;
5516static uint64_t sched_tick_delta;
5517uint64_t sched_tick_max_delta;
5ba3f43e
A
5518
5519
1c79356b 5520/*
6d2010ae 5521 * sched_init_thread:
1c79356b 5522 *
55e303ae
A
5523 * Perform periodic bookkeeping functions about ten
5524 * times per second.
1c79356b 5525 */
fe8ab488 5526void
3e170ce0 5527sched_timeshare_maintenance_continue(void)
1c79356b 5528{
0a7de745 5529 uint64_t sched_tick_ctime, late_time;
fe8ab488 5530
3e170ce0
A
5531 struct sched_update_scan_context scan_context = {
5532 .earliest_bg_make_runnable_time = UINT64_MAX,
5533 .earliest_normal_make_runnable_time = UINT64_MAX,
5534 .earliest_rt_make_runnable_time = UINT64_MAX
5535 };
5536
0a7de745 5537 sched_tick_ctime = mach_absolute_time();
1c79356b 5538
39236c6e
A
5539 if (__improbable(sched_tick_last_abstime == 0)) {
5540 sched_tick_last_abstime = sched_tick_ctime;
fe8ab488 5541 late_time = 0;
39236c6e
A
5542 sched_tick_delta = 1;
5543 } else {
fe8ab488
A
5544 late_time = sched_tick_ctime - sched_tick_last_abstime;
5545 sched_tick_delta = late_time / sched_tick_interval;
39236c6e
A
5546 /* Ensure a delta of 1, since the interval could be slightly
5547 * smaller than the sched_tick_interval due to dispatch
5548 * latencies.
5549 */
5550 sched_tick_delta = MAX(sched_tick_delta, 1);
5551
5552 /* In the event interrupt latencies or platform
5553 * idle events that advanced the timebase resulted
5554 * in periods where no threads were dispatched,
5555 * cap the maximum "tick delta" at SCHED_TICK_MAX_DELTA
5556 * iterations.
5557 */
5558 sched_tick_delta = MIN(sched_tick_delta, SCHED_TICK_MAX_DELTA);
5559
5560 sched_tick_last_abstime = sched_tick_ctime;
5561 sched_tick_max_delta = MAX(sched_tick_delta, sched_tick_max_delta);
5562 }
5563
f427ee49 5564 scan_context.sched_tick_last_abstime = sched_tick_last_abstime;
0a7de745
A
5565 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_MAINTENANCE) | DBG_FUNC_START,
5566 sched_tick_delta, late_time, 0, 0, 0);
fe8ab488 5567
39236c6e
A
5568 /* Add a number of pseudo-ticks corresponding to the elapsed interval
5569 * This could be greater than 1 if substantial intervals where
5570 * all processors are idle occur, which rarely occurs in practice.
5571 */
39037602 5572
39236c6e 5573 sched_tick += sched_tick_delta;
1c79356b 5574
5ba3f43e
A
5575 update_vm_info();
5576
1c79356b 5577 /*
91447636 5578 * Compute various averages.
1c79356b 5579 */
39236c6e 5580 compute_averages(sched_tick_delta);
1c79356b
A
5581
5582 /*
91447636 5583 * Scan the run queues for threads which
39037602
A
5584 * may need to be updated, and find the earliest runnable thread on the runqueue
5585 * to report its latency.
1c79356b 5586 */
3e170ce0
A
5587 SCHED(thread_update_scan)(&scan_context);
5588
5ba3f43e 5589 SCHED(rt_runq_scan)(&scan_context);
3e170ce0
A
5590
5591 uint64_t ctime = mach_absolute_time();
5592
39037602 5593 uint64_t bg_max_latency = (ctime > scan_context.earliest_bg_make_runnable_time) ?
0a7de745 5594 ctime - scan_context.earliest_bg_make_runnable_time : 0;
39037602
A
5595
5596 uint64_t default_max_latency = (ctime > scan_context.earliest_normal_make_runnable_time) ?
0a7de745 5597 ctime - scan_context.earliest_normal_make_runnable_time : 0;
39037602
A
5598
5599 uint64_t realtime_max_latency = (ctime > scan_context.earliest_rt_make_runnable_time) ?
0a7de745 5600 ctime - scan_context.earliest_rt_make_runnable_time : 0;
39037602
A
5601
5602 machine_max_runnable_latency(bg_max_latency, default_max_latency, realtime_max_latency);
3e170ce0
A
5603
5604 /*
5605 * Check to see if the special sched VM group needs attention.
5606 */
5607 sched_vm_group_maintenance();
fe8ab488 5608
5ba3f43e
A
5609#if __arm__ || __arm64__
5610 /* Check to see if the recommended cores failsafe is active */
5611 sched_recommended_cores_maintenance();
5612#endif /* __arm__ || __arm64__ */
5613
0a7de745 5614
5ba3f43e
A
5615#if DEBUG || DEVELOPMENT
5616#if __x86_64__
5617#include <i386/misc_protos.h>
5618 /* Check for long-duration interrupts */
5619 mp_interrupt_watchdog();
5620#endif /* __x86_64__ */
5621#endif /* DEBUG || DEVELOPMENT */
490019cf 5622
39037602 5623 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_MAINTENANCE) | DBG_FUNC_END,
0a7de745
A
5624 sched_pri_shifts[TH_BUCKET_SHARE_FG], sched_pri_shifts[TH_BUCKET_SHARE_BG],
5625 sched_pri_shifts[TH_BUCKET_SHARE_UT], sched_pri_shifts[TH_BUCKET_SHARE_DF], 0);
1c79356b 5626
3e170ce0
A
5627 assert_wait((event_t)sched_timeshare_maintenance_continue, THREAD_UNINT);
5628 thread_block((thread_continue_t)sched_timeshare_maintenance_continue);
1c79356b
A
5629 /*NOTREACHED*/
5630}
5631
39236c6e
A
5632static uint64_t sched_maintenance_wakeups;
5633
5634/*
5635 * Determine if the set of routines formerly driven by a maintenance timer
5636 * must be invoked, based on a deadline comparison. Signals the scheduler
5637 * maintenance thread on deadline expiration. Must be invoked at an interval
5638 * lower than the "sched_tick_interval", currently accomplished by
5639 * invocation via the quantum expiration timer and at context switch time.
5640 * Performance matters: this routine reuses a timestamp approximating the
5641 * current absolute time received from the caller, and should perform
5642 * no more than a comparison against the deadline in the common case.
5643 */
5644void
0a7de745
A
5645sched_timeshare_consider_maintenance(uint64_t ctime)
5646{
d9a64523
A
5647 cpu_quiescent_counter_checkin(ctime);
5648
5649 uint64_t deadline = sched_maintenance_deadline;
39236c6e
A
5650
5651 if (__improbable(ctime >= deadline)) {
0a7de745 5652 if (__improbable(current_thread() == sched_maintenance_thread)) {
39236c6e 5653 return;
0a7de745 5654 }
39236c6e
A
5655 OSMemoryBarrier();
5656
d9a64523 5657 uint64_t ndeadline = ctime + sched_tick_interval;
39236c6e 5658
cb323159 5659 if (__probable(os_atomic_cmpxchg(&sched_maintenance_deadline, deadline, ndeadline, seq_cst))) {
3e170ce0 5660 thread_wakeup((event_t)sched_timeshare_maintenance_continue);
39236c6e
A
5661 sched_maintenance_wakeups++;
5662 }
5663 }
5ba3f43e 5664
cb323159
A
5665#if !CONFIG_SCHED_CLUTCH
5666 /*
5667 * Only non-clutch schedulers use the global load calculation EWMA algorithm. For clutch
5668 * scheduler, the load is maintained at the thread group and bucket level.
5669 */
5670 uint64_t load_compute_deadline = os_atomic_load_wide(&sched_load_compute_deadline, relaxed);
d9a64523
A
5671
5672 if (__improbable(load_compute_deadline && ctime >= load_compute_deadline)) {
5673 uint64_t new_deadline = 0;
cb323159 5674 if (os_atomic_cmpxchg(&sched_load_compute_deadline, load_compute_deadline, new_deadline, relaxed)) {
d9a64523
A
5675 compute_sched_load();
5676 new_deadline = ctime + sched_load_compute_interval_abs;
cb323159 5677 os_atomic_store_wide(&sched_load_compute_deadline, new_deadline, relaxed);
d9a64523
A
5678 }
5679 }
cb323159 5680#endif /* CONFIG_SCHED_CLUTCH */
d9a64523 5681
5ba3f43e 5682#if __arm64__
cb323159 5683 uint64_t perf_deadline = os_atomic_load(&sched_perfcontrol_callback_deadline, relaxed);
5ba3f43e
A
5684
5685 if (__improbable(perf_deadline && ctime >= perf_deadline)) {
5686 /* CAS in 0, if success, make callback. Otherwise let the next context switch check again. */
cb323159 5687 if (os_atomic_cmpxchg(&sched_perfcontrol_callback_deadline, perf_deadline, 0, relaxed)) {
5ba3f43e
A
5688 machine_perfcontrol_deadline_passed(perf_deadline);
5689 }
5690 }
5691#endif /* __arm64__ */
39236c6e
A
5692}
5693
fe8ab488 5694#endif /* CONFIG_SCHED_TIMESHARE_CORE */
6d2010ae 5695
1c79356b 5696void
f427ee49 5697sched_init_thread(void)
1c79356b 5698{
316670eb 5699 thread_block(THREAD_CONTINUE_NULL);
91447636 5700
490019cf
A
5701 thread_t thread = current_thread();
5702
39037602
A
5703 thread_set_thread_name(thread, "sched_maintenance_thread");
5704
490019cf
A
5705 sched_maintenance_thread = thread;
5706
f427ee49 5707 SCHED(maintenance_continuation)();
1c79356b 5708
1c79356b
A
5709 /*NOTREACHED*/
5710}
5711
fe8ab488 5712#if defined(CONFIG_SCHED_TIMESHARE_CORE)
6d2010ae 5713
1c79356b 5714/*
91447636 5715 * thread_update_scan / runq_scan:
55e303ae 5716 *
0a7de745 5717 * Scan the run queues to account for timesharing threads
91447636 5718 * which need to be updated.
1c79356b
A
5719 *
5720 * Scanner runs in two passes. Pass one squirrels likely
91447636 5721 * threads away in an array, pass two does the update.
1c79356b 5722 *
91447636
A
5723 * This is necessary because the run queue is locked for
5724 * the candidate scan, but the thread is locked for the update.
1c79356b 5725 *
91447636
A
5726 * Array should be sized to make forward progress, without
5727 * disabling preemption for long periods.
1c79356b 5728 */
55e303ae 5729
0a7de745 5730#define THREAD_UPDATE_SIZE 128
55e303ae 5731
39037602
A
5732static thread_t thread_update_array[THREAD_UPDATE_SIZE];
5733static uint32_t thread_update_count = 0;
1c79356b 5734
fe8ab488
A
5735/* Returns TRUE if thread was added, FALSE if thread_update_array is full */
5736boolean_t
5737thread_update_add_thread(thread_t thread)
5738{
0a7de745
A
5739 if (thread_update_count == THREAD_UPDATE_SIZE) {
5740 return FALSE;
5741 }
fe8ab488
A
5742
5743 thread_update_array[thread_update_count++] = thread;
5744 thread_reference_internal(thread);
0a7de745 5745 return TRUE;
fe8ab488
A
5746}
5747
5748void
5749thread_update_process_threads(void)
5750{
39037602 5751 assert(thread_update_count <= THREAD_UPDATE_SIZE);
fe8ab488 5752
0a7de745 5753 for (uint32_t i = 0; i < thread_update_count; i++) {
39037602
A
5754 thread_t thread = thread_update_array[i];
5755 assert_thread_magic(thread);
5756 thread_update_array[i] = THREAD_NULL;
5757
5758 spl_t s = splsched();
fe8ab488 5759 thread_lock(thread);
39037602 5760 if (!(thread->state & (TH_WAIT)) && thread->sched_stamp != sched_tick) {
fe8ab488
A
5761 SCHED(update_priority)(thread);
5762 }
5763 thread_unlock(thread);
5764 splx(s);
5765
5766 thread_deallocate(thread);
5767 }
39037602
A
5768
5769 thread_update_count = 0;
fe8ab488
A
5770}
5771
f427ee49
A
5772static boolean_t
5773runq_scan_thread(
5774 thread_t thread,
5775 sched_update_scan_context_t scan_context)
5776{
5777 assert_thread_magic(thread);
5778
5779 if (thread->sched_stamp != sched_tick &&
5780 thread->sched_mode == TH_MODE_TIMESHARE) {
5781 if (thread_update_add_thread(thread) == FALSE) {
5782 return TRUE;
5783 }
5784 }
5785
5786 if (cpu_throttle_enabled && ((thread->sched_pri <= MAXPRI_THROTTLE) && (thread->base_pri <= MAXPRI_THROTTLE))) {
5787 if (thread->last_made_runnable_time < scan_context->earliest_bg_make_runnable_time) {
5788 scan_context->earliest_bg_make_runnable_time = thread->last_made_runnable_time;
5789 }
5790 } else {
5791 if (thread->last_made_runnable_time < scan_context->earliest_normal_make_runnable_time) {
5792 scan_context->earliest_normal_make_runnable_time = thread->last_made_runnable_time;
5793 }
5794 }
5795
5796 return FALSE;
5797}
5798
1c79356b 5799/*
91447636
A
5800 * Scan a runq for candidate threads.
5801 *
5802 * Returns TRUE if retry is needed.
1c79356b 5803 */
fe8ab488 5804boolean_t
91447636 5805runq_scan(
0a7de745
A
5806 run_queue_t runq,
5807 sched_update_scan_context_t scan_context)
1c79356b 5808{
39037602
A
5809 int count = runq->count;
5810 int queue_index;
1c79356b 5811
39037602
A
5812 assert(count >= 0);
5813
0a7de745 5814 if (count == 0) {
39037602 5815 return FALSE;
0a7de745 5816 }
39037602
A
5817
5818 for (queue_index = bitmap_first(runq->bitmap, NRQS);
0a7de745
A
5819 queue_index >= 0;
5820 queue_index = bitmap_next(runq->bitmap, queue_index)) {
39037602 5821 thread_t thread;
cb323159 5822 circle_queue_t queue = &runq->queues[queue_index];
3e170ce0 5823
cb323159 5824 cqe_foreach_element(thread, queue, runq_links) {
39037602 5825 assert(count > 0);
f427ee49
A
5826 if (runq_scan_thread(thread, scan_context) == TRUE) {
5827 return TRUE;
39037602
A
5828 }
5829 count--;
1c79356b
A
5830 }
5831 }
1c79356b 5832
39037602 5833 return FALSE;
1c79356b
A
5834}
5835
f427ee49
A
5836#if CONFIG_SCHED_CLUTCH
5837
5838boolean_t
5839sched_clutch_timeshare_scan(
5840 queue_t thread_queue,
5841 uint16_t thread_count,
5842 sched_update_scan_context_t scan_context)
5843{
5844 if (thread_count == 0) {
5845 return FALSE;
5846 }
5847
5848 thread_t thread;
5849 qe_foreach_element_safe(thread, thread_queue, th_clutch_timeshare_link) {
5850 if (runq_scan_thread(thread, scan_context) == TRUE) {
5851 return TRUE;
5852 }
5853 thread_count--;
5854 }
5855
5856 assert(thread_count == 0);
5857 return FALSE;
5858}
5859
5860
5861#endif /* CONFIG_SCHED_CLUTCH */
5862
fe8ab488
A
5863#endif /* CONFIG_SCHED_TIMESHARE_CORE */
5864
2a1bd2d3
A
5865bool
5866thread_is_eager_preempt(thread_t thread)
6d2010ae 5867{
2a1bd2d3 5868 return thread->sched_flags & TH_SFLAG_EAGERPREEMPT;
6d2010ae
A
5869}
5870
5871void
0a7de745 5872thread_set_eager_preempt(thread_t thread)
6d2010ae 5873{
2a1bd2d3
A
5874 spl_t s = splsched();
5875 thread_lock(thread);
6d2010ae 5876
2a1bd2d3 5877 assert(!thread_is_eager_preempt(thread));
6d2010ae 5878
6d2010ae
A
5879 thread->sched_flags |= TH_SFLAG_EAGERPREEMPT;
5880
5881 if (thread == current_thread()) {
2a1bd2d3
A
5882 /* csw_check updates current_is_eagerpreempt on the processor */
5883 ast_t ast = csw_check(thread, current_processor(), AST_NONE);
5884
fe8ab488 5885 thread_unlock(thread);
2a1bd2d3 5886
6d2010ae 5887 if (ast != AST_NONE) {
2a1bd2d3 5888 thread_block_reason(THREAD_CONTINUE_NULL, NULL, ast);
6d2010ae
A
5889 }
5890 } else {
2a1bd2d3 5891 processor_t last_processor = thread->last_processor;
6d2010ae 5892
2a1bd2d3
A
5893 if (last_processor != PROCESSOR_NULL &&
5894 last_processor->state == PROCESSOR_RUNNING &&
5895 last_processor->active_thread == thread) {
5896 cause_ast_check(last_processor);
6d2010ae 5897 }
0a7de745 5898
6d2010ae
A
5899 thread_unlock(thread);
5900 }
5901
2a1bd2d3 5902 splx(s);
6d2010ae
A
5903}
5904
5905void
0a7de745 5906thread_clear_eager_preempt(thread_t thread)
6d2010ae 5907{
2a1bd2d3 5908 spl_t s = splsched();
6d2010ae
A
5909 thread_lock(thread);
5910
2a1bd2d3
A
5911 assert(thread_is_eager_preempt(thread));
5912
6d2010ae 5913 thread->sched_flags &= ~TH_SFLAG_EAGERPREEMPT;
0a7de745 5914
2a1bd2d3
A
5915 if (thread == current_thread()) {
5916 current_processor()->current_is_eagerpreempt = false;
5917 }
5918
6d2010ae 5919 thread_unlock(thread);
2a1bd2d3 5920 splx(s);
6d2010ae 5921}
3e170ce0 5922
6d2010ae
A
5923/*
5924 * Scheduling statistics
5925 */
5926void
5927sched_stats_handle_csw(processor_t processor, int reasons, int selfpri, int otherpri)
5928{
f427ee49 5929 struct sched_statistics *stats;
6d2010ae 5930 boolean_t to_realtime = FALSE;
0a7de745 5931
f427ee49 5932 stats = PERCPU_GET_RELATIVE(sched_stats, processor, processor);
6d2010ae
A
5933 stats->csw_count++;
5934
5935 if (otherpri >= BASEPRI_REALTIME) {
5936 stats->rt_sched_count++;
5937 to_realtime = TRUE;
5938 }
5939
5940 if ((reasons & AST_PREEMPT) != 0) {
5941 stats->preempt_count++;
5942
5943 if (selfpri >= BASEPRI_REALTIME) {
5944 stats->preempted_rt_count++;
0a7de745 5945 }
6d2010ae
A
5946
5947 if (to_realtime) {
5948 stats->preempted_by_rt_count++;
5949 }
6d2010ae
A
5950 }
5951}
5952
5953void
0a7de745 5954sched_stats_handle_runq_change(struct runq_stats *stats, int old_count)
6d2010ae
A
5955{
5956 uint64_t timestamp = mach_absolute_time();
5957
5958 stats->count_sum += (timestamp - stats->last_change_timestamp) * old_count;
5959 stats->last_change_timestamp = timestamp;
5960}
5961
1c79356b 5962/*
6d2010ae 5963 * For calls from assembly code
1c79356b 5964 */
6d2010ae 5965#undef thread_wakeup
1c79356b
A
5966void
5967thread_wakeup(
0a7de745 5968 event_t x);
1c79356b
A
5969
5970void
5971thread_wakeup(
0a7de745 5972 event_t x)
1c79356b 5973{
0a7de745 5974 thread_wakeup_with_result(x, THREAD_AWAKENED);
1c79356b
A
5975}
5976
91447636
A
5977boolean_t
5978preemption_enabled(void)
5979{
0a7de745 5980 return get_preemption_level() == 0 && ml_get_interrupts_enabled();
91447636 5981}
9bccf70c 5982
4b17d6b6 5983static void
0a7de745
A
5984sched_timer_deadline_tracking_init(void)
5985{
4b17d6b6
A
5986 nanoseconds_to_absolutetime(TIMER_DEADLINE_TRACKING_BIN_1_DEFAULT, &timer_deadline_tracking_bin_1);
5987 nanoseconds_to_absolutetime(TIMER_DEADLINE_TRACKING_BIN_2_DEFAULT, &timer_deadline_tracking_bin_2);
5988}
3e170ce0 5989
5ba3f43e 5990#if __arm__ || __arm64__
3e170ce0 5991
5ba3f43e
A
5992uint32_t perfcontrol_requested_recommended_cores = ALL_CORES_RECOMMENDED;
5993uint32_t perfcontrol_requested_recommended_core_count = MAX_CPUS;
d9a64523
A
5994bool perfcontrol_failsafe_active = false;
5995bool perfcontrol_sleep_override = false;
5ba3f43e
A
5996
5997uint64_t perfcontrol_failsafe_maintenance_runnable_time;
5998uint64_t perfcontrol_failsafe_activation_time;
5999uint64_t perfcontrol_failsafe_deactivation_time;
6000
6001/* data covering who likely caused it and how long they ran */
6002#define FAILSAFE_NAME_LEN 33 /* (2*MAXCOMLEN)+1 from size of p_name */
6003char perfcontrol_failsafe_name[FAILSAFE_NAME_LEN];
6004int perfcontrol_failsafe_pid;
6005uint64_t perfcontrol_failsafe_tid;
6006uint64_t perfcontrol_failsafe_thread_timer_at_start;
6007uint64_t perfcontrol_failsafe_thread_timer_last_seen;
6008uint32_t perfcontrol_failsafe_recommended_at_trigger;
6009
6010/*
6011 * Perf controller calls here to update the recommended core bitmask.
6012 * If the failsafe is active, we don't immediately apply the new value.
6013 * Instead, we store the new request and use it after the failsafe deactivates.
6014 *
6015 * If the failsafe is not active, immediately apply the update.
6016 *
6017 * No scheduler locks are held, no other locks are held that scheduler might depend on,
6018 * interrupts are enabled
6019 *
6020 * currently prototype is in osfmk/arm/machine_routines.h
6021 */
6022void
6023sched_perfcontrol_update_recommended_cores(uint32_t recommended_cores)
3e170ce0 6024{
5ba3f43e
A
6025 assert(preemption_enabled());
6026
6027 spl_t s = splsched();
0a7de745 6028 simple_lock(&sched_recommended_cores_lock, LCK_GRP_NULL);
3e170ce0 6029
5ba3f43e
A
6030 perfcontrol_requested_recommended_cores = recommended_cores;
6031 perfcontrol_requested_recommended_core_count = __builtin_popcountll(recommended_cores);
6032
0a7de745
A
6033 if ((perfcontrol_failsafe_active == false) && (perfcontrol_sleep_override == false)) {
6034 sched_update_recommended_cores(perfcontrol_requested_recommended_cores & usercontrol_requested_recommended_cores);
6035 } else {
5ba3f43e 6036 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
0a7de745
A
6037 MACHDBG_CODE(DBG_MACH_SCHED, MACH_REC_CORES_FAILSAFE) | DBG_FUNC_NONE,
6038 perfcontrol_requested_recommended_cores,
6039 sched_maintenance_thread->last_made_runnable_time, 0, 0, 0);
6040 }
5ba3f43e
A
6041
6042 simple_unlock(&sched_recommended_cores_lock);
6043 splx(s);
6044}
6045
d9a64523
A
6046void
6047sched_override_recommended_cores_for_sleep(void)
6048{
6049 spl_t s = splsched();
0a7de745 6050 simple_lock(&sched_recommended_cores_lock, LCK_GRP_NULL);
d9a64523
A
6051
6052 if (perfcontrol_sleep_override == false) {
6053 perfcontrol_sleep_override = true;
6054 sched_update_recommended_cores(ALL_CORES_RECOMMENDED);
6055 }
6056
6057 simple_unlock(&sched_recommended_cores_lock);
6058 splx(s);
6059}
6060
6061void
6062sched_restore_recommended_cores_after_sleep(void)
6063{
6064 spl_t s = splsched();
0a7de745 6065 simple_lock(&sched_recommended_cores_lock, LCK_GRP_NULL);
d9a64523
A
6066
6067 if (perfcontrol_sleep_override == true) {
6068 perfcontrol_sleep_override = false;
0a7de745 6069 sched_update_recommended_cores(perfcontrol_requested_recommended_cores & usercontrol_requested_recommended_cores);
d9a64523
A
6070 }
6071
6072 simple_unlock(&sched_recommended_cores_lock);
6073 splx(s);
6074}
6075
5ba3f43e
A
6076/*
6077 * Consider whether we need to activate the recommended cores failsafe
6078 *
6079 * Called from quantum timer interrupt context of a realtime thread
6080 * No scheduler locks are held, interrupts are disabled
6081 */
6082void
6083sched_consider_recommended_cores(uint64_t ctime, thread_t cur_thread)
6084{
6085 /*
6086 * Check if a realtime thread is starving the system
6087 * and bringing up non-recommended cores would help
6088 *
6089 * TODO: Is this the correct check for recommended == possible cores?
6090 * TODO: Validate the checks without the relevant lock are OK.
6091 */
6092
6093 if (__improbable(perfcontrol_failsafe_active == TRUE)) {
6094 /* keep track of how long the responsible thread runs */
6095
0a7de745 6096 simple_lock(&sched_recommended_cores_lock, LCK_GRP_NULL);
5ba3f43e
A
6097
6098 if (perfcontrol_failsafe_active == TRUE &&
6099 cur_thread->thread_id == perfcontrol_failsafe_tid) {
6100 perfcontrol_failsafe_thread_timer_last_seen = timer_grab(&cur_thread->user_timer) +
0a7de745 6101 timer_grab(&cur_thread->system_timer);
5ba3f43e
A
6102 }
6103
6104 simple_unlock(&sched_recommended_cores_lock);
6105
6106 /* we're already trying to solve the problem, so bail */
6107 return;
3e170ce0
A
6108 }
6109
5ba3f43e 6110 /* The failsafe won't help if there are no more processors to enable */
0a7de745 6111 if (__probable(perfcontrol_requested_recommended_core_count >= processor_count)) {
5ba3f43e 6112 return;
0a7de745 6113 }
3e170ce0 6114
5ba3f43e
A
6115 uint64_t too_long_ago = ctime - perfcontrol_failsafe_starvation_threshold;
6116
6117 /* Use the maintenance thread as our canary in the coal mine */
6118 thread_t m_thread = sched_maintenance_thread;
6119
6120 /* If it doesn't look bad, nothing to see here */
0a7de745 6121 if (__probable(m_thread->last_made_runnable_time >= too_long_ago)) {
5ba3f43e 6122 return;
0a7de745 6123 }
5ba3f43e
A
6124
6125 /* It looks bad, take the lock to be sure */
6126 thread_lock(m_thread);
6127
6128 if (m_thread->runq == PROCESSOR_NULL ||
0a7de745 6129 (m_thread->state & (TH_RUN | TH_WAIT)) != TH_RUN ||
5ba3f43e
A
6130 m_thread->last_made_runnable_time >= too_long_ago) {
6131 /*
6132 * Maintenance thread is either on cpu or blocked, and
6133 * therefore wouldn't benefit from more cores
6134 */
6135 thread_unlock(m_thread);
6136 return;
3e170ce0 6137 }
3e170ce0 6138
5ba3f43e
A
6139 uint64_t maintenance_runnable_time = m_thread->last_made_runnable_time;
6140
6141 thread_unlock(m_thread);
6142
6143 /*
6144 * There are cores disabled at perfcontrol's recommendation, but the
6145 * system is so overloaded that the maintenance thread can't run.
6146 * That likely means that perfcontrol can't run either, so it can't fix
6147 * the recommendation. We have to kick in a failsafe to keep from starving.
6148 *
6149 * When the maintenance thread has been starved for too long,
6150 * ignore the recommendation from perfcontrol and light up all the cores.
6151 *
6152 * TODO: Consider weird states like boot, sleep, or debugger
6153 */
6154
0a7de745 6155 simple_lock(&sched_recommended_cores_lock, LCK_GRP_NULL);
5ba3f43e
A
6156
6157 if (perfcontrol_failsafe_active == TRUE) {
6158 simple_unlock(&sched_recommended_cores_lock);
6159 return;
6160 }
6161
6162 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
0a7de745
A
6163 MACHDBG_CODE(DBG_MACH_SCHED, MACH_REC_CORES_FAILSAFE) | DBG_FUNC_START,
6164 perfcontrol_requested_recommended_cores, maintenance_runnable_time, 0, 0, 0);
5ba3f43e
A
6165
6166 perfcontrol_failsafe_active = TRUE;
6167 perfcontrol_failsafe_activation_time = mach_absolute_time();
6168 perfcontrol_failsafe_maintenance_runnable_time = maintenance_runnable_time;
6169 perfcontrol_failsafe_recommended_at_trigger = perfcontrol_requested_recommended_cores;
6170
6171 /* Capture some data about who screwed up (assuming that the thread on core is at fault) */
6172 task_t task = cur_thread->task;
6173 perfcontrol_failsafe_pid = task_pid(task);
6174 strlcpy(perfcontrol_failsafe_name, proc_name_address(task->bsd_info), sizeof(perfcontrol_failsafe_name));
6175
6176 perfcontrol_failsafe_tid = cur_thread->thread_id;
6177
6178 /* Blame the thread for time it has run recently */
6179 uint64_t recent_computation = (ctime - cur_thread->computation_epoch) + cur_thread->computation_metered;
6180
6181 uint64_t last_seen = timer_grab(&cur_thread->user_timer) + timer_grab(&cur_thread->system_timer);
6182
6183 /* Compute the start time of the bad behavior in terms of the thread's on core time */
6184 perfcontrol_failsafe_thread_timer_at_start = last_seen - recent_computation;
6185 perfcontrol_failsafe_thread_timer_last_seen = last_seen;
6186
6187 /* Ignore the previously recommended core configuration */
6188 sched_update_recommended_cores(ALL_CORES_RECOMMENDED);
6189
6190 simple_unlock(&sched_recommended_cores_lock);
6191}
6192
6193/*
6194 * Now that our bacon has been saved by the failsafe, consider whether to turn it off
6195 *
6196 * Runs in the context of the maintenance thread, no locks held
6197 */
6198static void
6199sched_recommended_cores_maintenance(void)
6200{
6201 /* Common case - no failsafe, nothing to be done here */
0a7de745 6202 if (__probable(perfcontrol_failsafe_active == FALSE)) {
5ba3f43e 6203 return;
0a7de745 6204 }
5ba3f43e
A
6205
6206 uint64_t ctime = mach_absolute_time();
6207
6208 boolean_t print_diagnostic = FALSE;
6209 char p_name[FAILSAFE_NAME_LEN] = "";
6210
6211 spl_t s = splsched();
0a7de745 6212 simple_lock(&sched_recommended_cores_lock, LCK_GRP_NULL);
5ba3f43e
A
6213
6214 /* Check again, under the lock, to avoid races */
0a7de745 6215 if (perfcontrol_failsafe_active == FALSE) {
5ba3f43e 6216 goto out;
0a7de745 6217 }
5ba3f43e
A
6218
6219 /*
6220 * Ensure that the other cores get another few ticks to run some threads
6221 * If we don't have this hysteresis, the maintenance thread is the first
6222 * to run, and then it immediately kills the other cores
6223 */
0a7de745 6224 if ((ctime - perfcontrol_failsafe_activation_time) < perfcontrol_failsafe_starvation_threshold) {
5ba3f43e 6225 goto out;
0a7de745 6226 }
5ba3f43e
A
6227
6228 /* Capture some diagnostic state under the lock so we can print it out later */
6229
6230 int pid = perfcontrol_failsafe_pid;
6231 uint64_t tid = perfcontrol_failsafe_tid;
6232
6233 uint64_t thread_usage = perfcontrol_failsafe_thread_timer_last_seen -
0a7de745 6234 perfcontrol_failsafe_thread_timer_at_start;
5ba3f43e
A
6235 uint32_t rec_cores_before = perfcontrol_failsafe_recommended_at_trigger;
6236 uint32_t rec_cores_after = perfcontrol_requested_recommended_cores;
6237 uint64_t failsafe_duration = ctime - perfcontrol_failsafe_activation_time;
6238 strlcpy(p_name, perfcontrol_failsafe_name, sizeof(p_name));
6239
6240 print_diagnostic = TRUE;
6241
6242 /* Deactivate the failsafe and reinstate the requested recommendation settings */
6243
6244 perfcontrol_failsafe_deactivation_time = ctime;
6245 perfcontrol_failsafe_active = FALSE;
6246
6247 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
0a7de745
A
6248 MACHDBG_CODE(DBG_MACH_SCHED, MACH_REC_CORES_FAILSAFE) | DBG_FUNC_END,
6249 perfcontrol_requested_recommended_cores, failsafe_duration, 0, 0, 0);
5ba3f43e 6250
0a7de745 6251 sched_update_recommended_cores(perfcontrol_requested_recommended_cores & usercontrol_requested_recommended_cores);
5ba3f43e
A
6252
6253out:
6254 simple_unlock(&sched_recommended_cores_lock);
3e170ce0
A
6255 splx(s);
6256
5ba3f43e
A
6257 if (print_diagnostic) {
6258 uint64_t failsafe_duration_ms = 0, thread_usage_ms = 0;
6259
6260 absolutetime_to_nanoseconds(failsafe_duration, &failsafe_duration_ms);
6261 failsafe_duration_ms = failsafe_duration_ms / NSEC_PER_MSEC;
6262
6263 absolutetime_to_nanoseconds(thread_usage, &thread_usage_ms);
6264 thread_usage_ms = thread_usage_ms / NSEC_PER_MSEC;
6265
6266 printf("recommended core failsafe kicked in for %lld ms "
0a7de745
A
6267 "likely due to %s[%d] thread 0x%llx spending "
6268 "%lld ms on cpu at realtime priority - "
6269 "new recommendation: 0x%x -> 0x%x\n",
6270 failsafe_duration_ms, p_name, pid, tid, thread_usage_ms,
6271 rec_cores_before, rec_cores_after);
6272 }
6273}
6274
6275#endif /* __arm__ || __arm64__ */
6276
6277kern_return_t
6278sched_processor_enable(processor_t processor, boolean_t enable)
6279{
6280 assert(preemption_enabled());
6281
6282 spl_t s = splsched();
6283 simple_lock(&sched_recommended_cores_lock, LCK_GRP_NULL);
6284
6285 if (enable) {
6286 bit_set(usercontrol_requested_recommended_cores, processor->cpu_id);
6287 } else {
6288 bit_clear(usercontrol_requested_recommended_cores, processor->cpu_id);
5ba3f43e 6289 }
0a7de745
A
6290
6291#if __arm__ || __arm64__
6292 if ((perfcontrol_failsafe_active == false) && (perfcontrol_sleep_override == false)) {
6293 sched_update_recommended_cores(perfcontrol_requested_recommended_cores & usercontrol_requested_recommended_cores);
6294 } else {
6295 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
6296 MACHDBG_CODE(DBG_MACH_SCHED, MACH_REC_CORES_FAILSAFE) | DBG_FUNC_NONE,
6297 perfcontrol_requested_recommended_cores,
6298 sched_maintenance_thread->last_made_runnable_time, 0, 0, 0);
6299 }
6300#else /* __arm__ || __arm64__ */
6301 sched_update_recommended_cores(usercontrol_requested_recommended_cores);
6302#endif /* !__arm__ || __arm64__ */
6303
6304 simple_unlock(&sched_recommended_cores_lock);
6305 splx(s);
6306
6307 return KERN_SUCCESS;
5ba3f43e
A
6308}
6309
0a7de745 6310
5ba3f43e
A
6311/*
6312 * Apply a new recommended cores mask to the processors it affects
6313 * Runs after considering failsafes and such
6314 *
6315 * Iterate over processors and update their ->is_recommended field.
6316 * If a processor is running, we let it drain out at its next
6317 * quantum expiration or blocking point. If a processor is idle, there
6318 * may be more work for it to do, so IPI it.
6319 *
6320 * interrupts disabled, sched_recommended_cores_lock is held
6321 */
6322static void
0a7de745 6323sched_update_recommended_cores(uint64_t recommended_cores)
5ba3f43e
A
6324{
6325 processor_set_t pset, nset;
6326 processor_t processor;
6327 uint64_t needs_exit_idle_mask = 0x0;
0a7de745 6328 uint32_t avail_count;
5ba3f43e
A
6329
6330 processor = processor_list;
6331 pset = processor->processor_set;
6332
0a7de745
A
6333 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_UPDATE_REC_CORES) | DBG_FUNC_START,
6334 recommended_cores,
6335#if __arm__ || __arm64__
6336 perfcontrol_failsafe_active, 0, 0);
6337#else /* __arm__ || __arm64__ */
6338 0, 0, 0);
6339#endif /* ! __arm__ || __arm64__ */
5ba3f43e 6340
0a7de745 6341 if (__builtin_popcountll(recommended_cores) == 0) {
d9a64523 6342 bit_set(recommended_cores, master_processor->cpu_id); /* add boot processor or we hang */
5ba3f43e
A
6343 }
6344
6345 /* First set recommended cores */
6346 pset_lock(pset);
0a7de745 6347 avail_count = 0;
5ba3f43e 6348 do {
5ba3f43e
A
6349 nset = processor->processor_set;
6350 if (nset != pset) {
6351 pset_unlock(pset);
6352 pset = nset;
6353 pset_lock(pset);
6354 }
6355
d9a64523 6356 if (bit_test(recommended_cores, processor->cpu_id)) {
5ba3f43e 6357 processor->is_recommended = TRUE;
d9a64523 6358 bit_set(pset->recommended_bitmask, processor->cpu_id);
5ba3f43e
A
6359
6360 if (processor->state == PROCESSOR_IDLE) {
5ba3f43e 6361 if (processor != current_processor()) {
d9a64523 6362 bit_set(needs_exit_idle_mask, processor->cpu_id);
5ba3f43e
A
6363 }
6364 }
0a7de745
A
6365 if (processor->state != PROCESSOR_OFF_LINE) {
6366 avail_count++;
cb323159
A
6367 SCHED(pset_made_schedulable)(processor, pset, false);
6368 }
5ba3f43e
A
6369 }
6370 } while ((processor = processor->processor_list) != NULL);
6371 pset_unlock(pset);
6372
6373 /* Now shutdown not recommended cores */
6374 processor = processor_list;
6375 pset = processor->processor_set;
6376
6377 pset_lock(pset);
6378 do {
5ba3f43e
A
6379 nset = processor->processor_set;
6380 if (nset != pset) {
6381 pset_unlock(pset);
6382 pset = nset;
6383 pset_lock(pset);
6384 }
6385
d9a64523
A
6386 if (!bit_test(recommended_cores, processor->cpu_id)) {
6387 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
6388
5ba3f43e 6389 processor->is_recommended = FALSE;
d9a64523
A
6390 bit_clear(pset->recommended_bitmask, processor->cpu_id);
6391
6392 if ((processor->state == PROCESSOR_RUNNING) || (processor->state == PROCESSOR_DISPATCHING)) {
6393 ipi_type = SCHED_IPI_IMMEDIATE;
5ba3f43e
A
6394 }
6395 SCHED(processor_queue_shutdown)(processor);
6396 /* pset unlocked */
6397
6398 SCHED(rt_queue_shutdown)(processor);
6399
d9a64523
A
6400 if (ipi_type != SCHED_IPI_NONE) {
6401 if (processor == current_processor()) {
6402 ast_on(AST_PREEMPT);
6403 } else {
6404 sched_ipi_perform(processor, ipi_type);
6405 }
6406 }
6407
5ba3f43e
A
6408 pset_lock(pset);
6409 }
6410 } while ((processor = processor->processor_list) != NULL);
0a7de745
A
6411
6412 processor_avail_count_user = avail_count;
6413#if defined(__x86_64__)
6414 commpage_update_active_cpus();
6415#endif
6416
5ba3f43e
A
6417 pset_unlock(pset);
6418
6419 /* Issue all pending IPIs now that the pset lock has been dropped */
6420 for (int cpuid = lsb_first(needs_exit_idle_mask); cpuid >= 0; cpuid = lsb_next(needs_exit_idle_mask, cpuid)) {
6421 processor = processor_array[cpuid];
6422 machine_signal_idle(processor);
6423 }
6424
0a7de745
A
6425 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_UPDATE_REC_CORES) | DBG_FUNC_END,
6426 needs_exit_idle_mask, 0, 0, 0);
3e170ce0
A
6427}
6428
0a7de745
A
6429void
6430thread_set_options(uint32_t thopt)
6431{
6432 spl_t x;
6433 thread_t t = current_thread();
6434
6435 x = splsched();
6436 thread_lock(t);
6437
6438 t->options |= thopt;
6439
6440 thread_unlock(t);
6441 splx(x);
3e170ce0 6442}
813fb2f6 6443
0a7de745
A
6444void
6445thread_set_pending_block_hint(thread_t thread, block_hint_t block_hint)
6446{
813fb2f6
A
6447 thread->pending_block_hint = block_hint;
6448}
5ba3f43e 6449
0a7de745
A
6450uint32_t
6451qos_max_parallelism(int qos, uint64_t options)
5ba3f43e 6452{
0a7de745 6453 return SCHED(qos_max_parallelism)(qos, options);
5ba3f43e
A
6454}
6455
0a7de745
A
6456uint32_t
6457sched_qos_max_parallelism(__unused int qos, uint64_t options)
5ba3f43e 6458{
0a7de745
A
6459 host_basic_info_data_t hinfo;
6460 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
6461 /* Query the machine layer for core information */
6462 __assert_only kern_return_t kret = host_info(host_self(), HOST_BASIC_INFO,
6463 (host_info_t)&hinfo, &count);
6464 assert(kret == KERN_SUCCESS);
6465
0a7de745
A
6466 if (options & QOS_PARALLELISM_COUNT_LOGICAL) {
6467 return hinfo.logical_cpu;
6468 } else {
6469 return hinfo.physical_cpu;
6470 }
6471}
5ba3f43e 6472
0a7de745
A
6473int sched_allow_NO_SMT_threads = 1;
6474bool
6475thread_no_smt(thread_t thread)
6476{
0a7de745 6477 return sched_allow_NO_SMT_threads && (thread->bound_processor == PROCESSOR_NULL) && ((thread->sched_flags & TH_SFLAG_NO_SMT) || (thread->task->t_flags & TF_NO_SMT));
0a7de745 6478}
5ba3f43e 6479
0a7de745
A
6480bool
6481processor_active_thread_no_smt(processor_t processor)
6482{
6483 return sched_allow_NO_SMT_threads && !processor->current_is_bound && processor->current_is_NO_SMT;
5ba3f43e
A
6484}
6485
6486#if __arm64__
6487
6488/*
6489 * Set up or replace old timer with new timer
6490 *
6491 * Returns true if canceled old timer, false if it did not
6492 */
6493boolean_t
6494sched_perfcontrol_update_callback_deadline(uint64_t new_deadline)
6495{
6496 /*
6497 * Exchange deadline for new deadline, if old deadline was nonzero,
6498 * then I cancelled the callback, otherwise I didn't
6499 */
6500
cb323159
A
6501 return os_atomic_xchg(&sched_perfcontrol_callback_deadline, new_deadline,
6502 relaxed) != 0;
5ba3f43e
A
6503}
6504
6505#endif /* __arm64__ */
6506
f427ee49
A
6507#if CONFIG_SCHED_EDGE
6508
6509#define SCHED_PSET_LOAD_EWMA_TC_NSECS 10000000u
6510
6511/*
6512 * sched_edge_pset_running_higher_bucket()
6513 *
6514 * Routine to calculate cumulative running counts for each scheduling
6515 * bucket. This effectively lets the load calculation calculate if a
6516 * cluster is running any threads at a QoS lower than the thread being
6517 * migrated etc.
6518 */
6519
6520static void
6521sched_edge_pset_running_higher_bucket(processor_set_t pset, uint32_t *running_higher)
6522{
6523 bitmap_t *active_map = &pset->cpu_state_map[PROCESSOR_RUNNING];
6524
6525 /* Edge Scheduler Optimization */
6526 for (int cpu = bitmap_first(active_map, MAX_CPUS); cpu >= 0; cpu = bitmap_next(active_map, cpu)) {
6527 sched_bucket_t cpu_bucket = os_atomic_load(&pset->cpu_running_buckets[cpu], relaxed);
6528 for (sched_bucket_t bucket = cpu_bucket; bucket < TH_BUCKET_SCHED_MAX; bucket++) {
6529 running_higher[bucket]++;
6530 }
6531 }
6532}
6533
6534/*
6535 * sched_update_pset_load_average()
6536 *
6537 * Updates the load average for each sched bucket for a cluster.
6538 * This routine must be called with the pset lock held.
6539 */
5ba3f43e 6540void
f427ee49 6541sched_update_pset_load_average(processor_set_t pset, uint64_t curtime)
5ba3f43e 6542{
f427ee49
A
6543 if (pset->online_processor_count == 0) {
6544 /* Looks like the pset is not runnable any more; nothing to do here */
6545 return;
6546 }
6547
6548 /*
6549 * Edge Scheduler Optimization
6550 *
6551 * See if more callers of this routine can pass in timestamps to avoid the
6552 * mach_absolute_time() call here.
6553 */
6554
6555 if (!curtime) {
6556 curtime = mach_absolute_time();
6557 }
6558 uint64_t last_update = os_atomic_load(&pset->pset_load_last_update, relaxed);
6559 int64_t delta_ticks = curtime - last_update;
6560 if (delta_ticks < 0) {
6561 return;
6562 }
6563
6564 uint64_t delta_nsecs = 0;
6565 absolutetime_to_nanoseconds(delta_ticks, &delta_nsecs);
6566
6567 if (__improbable(delta_nsecs > UINT32_MAX)) {
6568 delta_nsecs = UINT32_MAX;
6569 }
6570
6571 uint32_t running_higher[TH_BUCKET_SCHED_MAX] = {0};
6572 sched_edge_pset_running_higher_bucket(pset, running_higher);
6573
6574 for (sched_bucket_t sched_bucket = TH_BUCKET_FIXPRI; sched_bucket < TH_BUCKET_SCHED_MAX; sched_bucket++) {
6575 uint64_t old_load_average = os_atomic_load(&pset->pset_load_average[sched_bucket], relaxed);
6576 uint64_t old_load_average_factor = old_load_average * SCHED_PSET_LOAD_EWMA_TC_NSECS;
6577 uint32_t current_runq_depth = (sched_edge_cluster_cumulative_count(&pset->pset_clutch_root, sched_bucket) + rt_runq_count(pset) + running_higher[sched_bucket]) / pset->online_processor_count;
6578
6579 /*
6580 * For the new load average multiply current_runq_depth by delta_nsecs (which resuts in a 32.0 value).
6581 * Since we want to maintain the load average as a 24.8 fixed arithmetic value for precision, the
6582 * new load averga needs to be shifted before it can be added to the old load average.
6583 */
6584 uint64_t new_load_average_factor = (current_runq_depth * delta_nsecs) << SCHED_PSET_LOAD_EWMA_FRACTION_BITS;
6585
6586 /*
6587 * For extremely parallel workloads, it is important that the load average on a cluster moves zero to non-zero
6588 * instantly to allow threads to be migrated to other (potentially idle) clusters quickly. Hence use the EWMA
6589 * when the system is already loaded; otherwise for an idle system use the latest load average immediately.
6590 */
6591 int old_load_shifted = (int)((old_load_average + SCHED_PSET_LOAD_EWMA_ROUND_BIT) >> SCHED_PSET_LOAD_EWMA_FRACTION_BITS);
6592 boolean_t load_uptick = (old_load_shifted == 0) && (current_runq_depth != 0);
6593 boolean_t load_downtick = (old_load_shifted != 0) && (current_runq_depth == 0);
6594 uint64_t load_average;
6595 if (load_uptick || load_downtick) {
6596 load_average = (current_runq_depth << SCHED_PSET_LOAD_EWMA_FRACTION_BITS);
6597 } else {
6598 /* Indicates a loaded system; use EWMA for load average calculation */
6599 load_average = (old_load_average_factor + new_load_average_factor) / (delta_nsecs + SCHED_PSET_LOAD_EWMA_TC_NSECS);
6600 }
6601 os_atomic_store(&pset->pset_load_average[sched_bucket], load_average, relaxed);
6602 KDBG(MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH, MACH_SCHED_EDGE_LOAD_AVG) | DBG_FUNC_NONE, pset->pset_cluster_id, (load_average >> SCHED_PSET_LOAD_EWMA_FRACTION_BITS), load_average & SCHED_PSET_LOAD_EWMA_FRACTION_MASK, sched_bucket);
6603 }
6604 os_atomic_store(&pset->pset_load_last_update, curtime, relaxed);
6605}
6606
6607void
6608sched_update_pset_avg_execution_time(processor_set_t pset, uint64_t execution_time, uint64_t curtime, sched_bucket_t sched_bucket)
6609{
6610 pset_execution_time_t old_execution_time_packed, new_execution_time_packed;
6611 uint64_t avg_thread_execution_time = 0;
6612
6613 os_atomic_rmw_loop(&pset->pset_execution_time[sched_bucket].pset_execution_time_packed,
6614 old_execution_time_packed.pset_execution_time_packed,
6615 new_execution_time_packed.pset_execution_time_packed, relaxed, {
6616 uint64_t last_update = old_execution_time_packed.pset_execution_time_last_update;
6617 int64_t delta_ticks = curtime - last_update;
6618 if (delta_ticks < 0) {
6619 /*
6620 * Its possible that another CPU came in and updated the pset_execution_time
6621 * before this CPU could do it. Since the average execution time is meant to
6622 * be an approximate measure per cluster, ignore the older update.
6623 */
6624 os_atomic_rmw_loop_give_up(return );
6625 }
6626 uint64_t delta_nsecs = 0;
6627 absolutetime_to_nanoseconds(delta_ticks, &delta_nsecs);
6628
6629 uint64_t nanotime = 0;
6630 absolutetime_to_nanoseconds(execution_time, &nanotime);
6631 uint64_t execution_time_us = nanotime / NSEC_PER_USEC;
6632
6633 uint64_t old_execution_time = (old_execution_time_packed.pset_avg_thread_execution_time * SCHED_PSET_LOAD_EWMA_TC_NSECS);
6634 uint64_t new_execution_time = (execution_time_us * delta_nsecs);
6635
6636 avg_thread_execution_time = (old_execution_time + new_execution_time) / (delta_nsecs + SCHED_PSET_LOAD_EWMA_TC_NSECS);
6637 new_execution_time_packed.pset_avg_thread_execution_time = avg_thread_execution_time;
6638 new_execution_time_packed.pset_execution_time_last_update = curtime;
6639 });
6640 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PSET_AVG_EXEC_TIME) | DBG_FUNC_NONE, pset->pset_cluster_id, avg_thread_execution_time, sched_bucket);
6641}
6642
6643#else /* CONFIG_SCHED_EDGE */
cb323159 6644
f427ee49
A
6645void
6646sched_update_pset_load_average(processor_set_t pset, __unused uint64_t curtime)
6647{
6648 int non_rt_load = pset->pset_runq.count;
cb323159 6649 int load = ((bit_count(pset->cpu_state_map[PROCESSOR_RUNNING]) + non_rt_load + rt_runq_count(pset)) << PSET_LOAD_NUMERATOR_SHIFT);
f427ee49 6650 int new_load_average = ((int)pset->load_average + load) >> 1;
5ba3f43e
A
6651
6652 pset->load_average = new_load_average;
5ba3f43e 6653#if (DEVELOPMENT || DEBUG)
c6bf4f31
A
6654#if __AMP__
6655 if (pset->pset_cluster_type == PSET_AMP_P) {
f427ee49 6656 KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PSET_LOAD_AVERAGE) | DBG_FUNC_NONE, sched_get_pset_load_average(pset, 0), (bit_count(pset->cpu_state_map[PROCESSOR_RUNNING]) + pset->pset_runq.count + rt_runq_count(pset)));
c6bf4f31
A
6657 }
6658#endif
5ba3f43e
A
6659#endif
6660}
a39ff7e2 6661
f427ee49
A
6662void
6663sched_update_pset_avg_execution_time(__unused processor_set_t pset, __unused uint64_t execution_time, __unused uint64_t curtime, __unused sched_bucket_t sched_bucket)
6664{
6665}
6666#endif /* CONFIG_SCHED_EDGE */
6667
6668/* pset is locked */
6669static bool
6670processor_is_fast_track_candidate_for_realtime_thread(processor_set_t pset, processor_t processor)
6671{
6672 int cpuid = processor->cpu_id;
6673#if defined(__x86_64__)
6674 if (sched_avoid_cpu0 && (cpuid == 0)) {
6675 return false;
6676 }
6677#endif
6678
6679 cpumap_t fasttrack_map = pset_available_cpumap(pset) & ~pset->pending_AST_URGENT_cpu_mask & ~pset->realtime_map;
6680
6681 return bit_test(fasttrack_map, cpuid);
6682}
6683
a39ff7e2
A
6684/* pset is locked */
6685static processor_t
f427ee49 6686choose_processor_for_realtime_thread(processor_set_t pset, processor_t skip_processor, bool consider_secondaries)
a39ff7e2 6687{
0a7de745
A
6688#if defined(__x86_64__)
6689 bool avoid_cpu0 = sched_avoid_cpu0 && bit_test(pset->cpu_bitmask, 0);
6690#else
6691 const bool avoid_cpu0 = false;
6692#endif
6693
f427ee49
A
6694 cpumap_t cpu_map = pset_available_cpumap(pset) & ~pset->pending_AST_URGENT_cpu_mask & ~pset->realtime_map;
6695 if (skip_processor) {
6696 bit_clear(cpu_map, skip_processor->cpu_id);
6697 }
6698
6699 cpumap_t primary_map = cpu_map & pset->primary_map;
0a7de745 6700 if (avoid_cpu0) {
f427ee49 6701 primary_map = bit_ror64(primary_map, 1);
0a7de745
A
6702 }
6703
f427ee49
A
6704 int rotid = lsb_first(primary_map);
6705 if (rotid >= 0) {
6706 int cpuid = avoid_cpu0 ? ((rotid + 1) & 63) : rotid;
a39ff7e2 6707
a39ff7e2
A
6708 processor_t processor = processor_array[cpuid];
6709
a39ff7e2 6710 return processor;
a39ff7e2
A
6711 }
6712
f427ee49
A
6713 if (!pset->is_SMT || !sched_allow_rt_smt || !consider_secondaries) {
6714 goto out;
a39ff7e2
A
6715 }
6716
6717 /* Consider secondary processors */
f427ee49 6718 cpumap_t secondary_map = cpu_map & ~pset->primary_map;
0a7de745
A
6719 if (avoid_cpu0) {
6720 /* Also avoid cpu1 */
f427ee49 6721 secondary_map = bit_ror64(secondary_map, 2);
0a7de745 6722 }
f427ee49
A
6723 rotid = lsb_first(secondary_map);
6724 if (rotid >= 0) {
0a7de745
A
6725 int cpuid = avoid_cpu0 ? ((rotid + 2) & 63) : rotid;
6726
a39ff7e2
A
6727 processor_t processor = processor_array[cpuid];
6728
f427ee49
A
6729 return processor;
6730 }
a39ff7e2 6731
f427ee49
A
6732out:
6733 if (skip_processor) {
6734 return PROCESSOR_NULL;
6735 }
a39ff7e2 6736
f427ee49
A
6737 /*
6738 * If we didn't find an obvious processor to choose, but there are still more CPUs
6739 * not already running realtime threads than realtime threads in the realtime run queue,
6740 * this thread belongs in this pset, so choose some other processor in this pset
6741 * to ensure the thread is enqueued here.
6742 */
6743 cpumap_t non_realtime_map = pset_available_cpumap(pset) & pset->primary_map & ~pset->realtime_map;
6744 if (bit_count(non_realtime_map) > rt_runq_count(pset)) {
6745 cpu_map = non_realtime_map;
6746 assert(cpu_map != 0);
6747 int cpuid = bit_first(cpu_map);
6748 assert(cpuid >= 0);
6749 return processor_array[cpuid];
6750 }
a39ff7e2 6751
f427ee49
A
6752 if (!pset->is_SMT || !sched_allow_rt_smt || !consider_secondaries) {
6753 goto skip_secondaries;
6754 }
a39ff7e2 6755
f427ee49
A
6756 non_realtime_map = pset_available_cpumap(pset) & ~pset->realtime_map;
6757 if (bit_count(non_realtime_map) > rt_runq_count(pset)) {
6758 cpu_map = non_realtime_map;
6759 assert(cpu_map != 0);
6760 int cpuid = bit_first(cpu_map);
6761 assert(cpuid >= 0);
6762 return processor_array[cpuid];
a39ff7e2
A
6763 }
6764
f427ee49 6765skip_secondaries:
a39ff7e2
A
6766 return PROCESSOR_NULL;
6767}
6768
6769/* pset is locked */
6770static bool
6771all_available_primaries_are_running_realtime_threads(processor_set_t pset)
6772{
f427ee49
A
6773 cpumap_t cpu_map = pset_available_cpumap(pset) & pset->primary_map & ~pset->realtime_map;
6774 return rt_runq_count(pset) > bit_count(cpu_map);
0a7de745
A
6775}
6776
f427ee49 6777#if defined(__x86_64__)
0a7de745
A
6778/* pset is locked */
6779static bool
6780these_processors_are_running_realtime_threads(processor_set_t pset, uint64_t these_map)
6781{
f427ee49
A
6782 cpumap_t cpu_map = pset_available_cpumap(pset) & these_map & ~pset->realtime_map;
6783 return rt_runq_count(pset) > bit_count(cpu_map);
a39ff7e2 6784}
f427ee49 6785#endif
d9a64523 6786
0a7de745
A
6787static bool
6788sched_ok_to_run_realtime_thread(processor_set_t pset, processor_t processor)
6789{
6790 bool ok_to_run_realtime_thread = true;
6791#if defined(__x86_64__)
6792 if (sched_avoid_cpu0 && processor->cpu_id == 0) {
6793 ok_to_run_realtime_thread = these_processors_are_running_realtime_threads(pset, pset->primary_map & ~0x1);
6794 } else if (sched_avoid_cpu0 && (processor->cpu_id == 1) && processor->is_SMT) {
6795 ok_to_run_realtime_thread = sched_allow_rt_smt && these_processors_are_running_realtime_threads(pset, ~0x2);
6796 } else if (processor->processor_primary != processor) {
f427ee49 6797 ok_to_run_realtime_thread = (sched_allow_rt_smt && all_available_primaries_are_running_realtime_threads(pset));
0a7de745
A
6798 }
6799#else
6800 (void)pset;
6801 (void)processor;
6802#endif
6803 return ok_to_run_realtime_thread;
6804}
6805
cb323159
A
6806void
6807sched_pset_made_schedulable(__unused processor_t processor, processor_set_t pset, boolean_t drop_lock)
6808{
6809 if (drop_lock) {
6810 pset_unlock(pset);
6811 }
6812}
6813
0a7de745
A
6814void
6815thread_set_no_smt(bool set)
6816{
f427ee49
A
6817 if (!system_is_SMT) {
6818 /* Not a machine that supports SMT */
6819 return;
6820 }
6821
0a7de745 6822 thread_t thread = current_thread();
d9a64523 6823
0a7de745
A
6824 spl_t s = splsched();
6825 thread_lock(thread);
6826 if (set) {
6827 thread->sched_flags |= TH_SFLAG_NO_SMT;
0a7de745
A
6828 }
6829 thread_unlock(thread);
6830 splx(s);
6831}
6832
6833bool
6834thread_get_no_smt(void)
6835{
6836 return current_thread()->sched_flags & TH_SFLAG_NO_SMT;
6837}
6838
f427ee49
A
6839extern void task_set_no_smt(task_t);
6840void
6841task_set_no_smt(task_t task)
6842{
6843 if (!system_is_SMT) {
6844 /* Not a machine that supports SMT */
6845 return;
6846 }
6847
6848 if (task == TASK_NULL) {
6849 task = current_task();
6850 }
6851
6852 task_lock(task);
6853 task->t_flags |= TF_NO_SMT;
6854 task_unlock(task);
6855}
6856
0a7de745
A
6857#if DEBUG || DEVELOPMENT
6858extern void sysctl_task_set_no_smt(char no_smt);
6859void
6860sysctl_task_set_no_smt(char no_smt)
6861{
f427ee49
A
6862 if (!system_is_SMT) {
6863 /* Not a machine that supports SMT */
6864 return;
6865 }
6866
6867 task_t task = current_task();
0a7de745 6868
f427ee49 6869 task_lock(task);
0a7de745
A
6870 if (no_smt == '1') {
6871 task->t_flags |= TF_NO_SMT;
0a7de745 6872 }
f427ee49 6873 task_unlock(task);
0a7de745
A
6874}
6875
6876extern char sysctl_task_get_no_smt(void);
6877char
6878sysctl_task_get_no_smt(void)
6879{
f427ee49 6880 task_t task = current_task();
0a7de745
A
6881
6882 if (task->t_flags & TF_NO_SMT) {
6883 return '1';
6884 }
6885 return '0';
6886}
cb323159 6887#endif /* DEVELOPMENT || DEBUG */
94ff46dc
A
6888
6889
6890__private_extern__ void
f427ee49 6891thread_bind_cluster_type(thread_t thread, char cluster_type, bool soft_bound)
94ff46dc 6892{
c6bf4f31 6893#if __AMP__
c6bf4f31
A
6894 spl_t s = splsched();
6895 thread_lock(thread);
f427ee49
A
6896 thread->sched_flags &= ~(TH_SFLAG_ECORE_ONLY | TH_SFLAG_PCORE_ONLY | TH_SFLAG_BOUND_SOFT);
6897 if (soft_bound) {
6898 thread->sched_flags |= TH_SFLAG_BOUND_SOFT;
6899 }
c6bf4f31
A
6900 switch (cluster_type) {
6901 case 'e':
6902 case 'E':
6903 thread->sched_flags |= TH_SFLAG_ECORE_ONLY;
6904 break;
6905 case 'p':
6906 case 'P':
6907 thread->sched_flags |= TH_SFLAG_PCORE_ONLY;
6908 break;
6909 default:
6910 break;
6911 }
6912 thread_unlock(thread);
6913 splx(s);
6914
f427ee49
A
6915 if (thread == current_thread()) {
6916 thread_block(THREAD_CONTINUE_NULL);
6917 }
c6bf4f31 6918#else /* __AMP__ */
f427ee49 6919 (void)thread;
94ff46dc 6920 (void)cluster_type;
f427ee49 6921 (void)soft_bound;
c6bf4f31 6922#endif /* __AMP__ */
94ff46dc 6923}
c3c9b80d
A
6924
6925#if DEVELOPMENT || DEBUG
6926extern int32_t sysctl_get_bound_cpuid(void);
6927int32_t
6928sysctl_get_bound_cpuid(void)
6929{
6930 int32_t cpuid = -1;
6931 thread_t self = current_thread();
6932
6933 processor_t processor = self->bound_processor;
6934 if (processor == NULL) {
6935 cpuid = -1;
6936 } else {
6937 cpuid = processor->cpu_id;
6938 }
6939
6940 return cpuid;
6941}
6942
6943extern kern_return_t sysctl_thread_bind_cpuid(int32_t cpuid);
6944kern_return_t
6945sysctl_thread_bind_cpuid(int32_t cpuid)
6946{
6947 processor_t processor = PROCESSOR_NULL;
6948
6949 if (cpuid == -1) {
6950 goto unbind;
6951 }
6952
6953 if (cpuid < 0 || cpuid >= MAX_SCHED_CPUS) {
6954 return KERN_INVALID_VALUE;
6955 }
6956
6957 processor = processor_array[cpuid];
6958 if (processor == PROCESSOR_NULL) {
6959 return KERN_INVALID_VALUE;
6960 }
6961
6962#if __AMP__
6963
6964 thread_t thread = current_thread();
6965
6966 if (thread->sched_flags & (TH_SFLAG_ECORE_ONLY | TH_SFLAG_PCORE_ONLY)) {
6967 if ((thread->sched_flags & TH_SFLAG_BOUND_SOFT) == 0) {
6968 /* Cannot hard-bind an already hard-cluster-bound thread */
6969 return KERN_NOT_SUPPORTED;
6970 }
6971 }
6972
6973#endif /* __AMP__ */
6974
6975unbind:
6976 thread_bind(processor);
6977
6978 thread_block(THREAD_CONTINUE_NULL);
6979 return KERN_SUCCESS;
6980}
6981#endif /* DEVELOPMENT || DEBUG */