]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/sched_prim.c
xnu-4570.51.1.tar.gz
[apple/xnu.git] / osfmk / kern / sched_prim.c
CommitLineData
1c79356b 1/*
39037602 2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_FREE_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: sched_prim.c
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1986
62 *
63 * Scheduling primitives
64 *
65 */
66
67#include <debug.h>
91447636
A
68
69#include <mach/mach_types.h>
1c79356b 70#include <mach/machine.h>
91447636
A
71#include <mach/policy.h>
72#include <mach/sync_policy.h>
6d2010ae 73#include <mach/thread_act.h>
91447636 74
1c79356b
A
75#include <machine/machine_routines.h>
76#include <machine/sched_param.h>
0c530ab8 77#include <machine/machine_cpu.h>
6d2010ae 78#include <machine/machlimits.h>
5ba3f43e 79#include <machine/atomic.h>
91447636 80
fe8ab488
A
81#ifdef CONFIG_MACH_APPROXIMATE_TIME
82#include <machine/commpage.h>
83#endif
84
91447636 85#include <kern/kern_types.h>
39037602 86#include <kern/backtrace.h>
1c79356b
A
87#include <kern/clock.h>
88#include <kern/counters.h>
89#include <kern/cpu_number.h>
90#include <kern/cpu_data.h>
3e170ce0 91#include <kern/smp.h>
91447636 92#include <kern/debug.h>
1c79356b
A
93#include <kern/macro_help.h>
94#include <kern/machine.h>
95#include <kern/misc_protos.h>
5ba3f43e
A
96#if MONOTONIC
97#include <kern/monotonic.h>
98#endif /* MONOTONIC */
1c79356b
A
99#include <kern/processor.h>
100#include <kern/queue.h>
101#include <kern/sched.h>
102#include <kern/sched_prim.h>
fe8ab488 103#include <kern/sfi.h>
1c79356b
A
104#include <kern/syscall_subr.h>
105#include <kern/task.h>
106#include <kern/thread.h>
316670eb 107#include <kern/ledger.h>
39236c6e 108#include <kern/timer_queue.h>
3e170ce0 109#include <kern/waitq.h>
39037602 110#include <kern/policy_internal.h>
91447636 111
1c79356b
A
112#include <vm/pmap.h>
113#include <vm/vm_kern.h>
114#include <vm/vm_map.h>
5ba3f43e 115#include <vm/vm_pageout.h>
91447636 116
b0d623f7 117#include <mach/sdt.h>
5ba3f43e
A
118#include <mach/mach_host.h>
119#include <mach/host_info.h>
b0d623f7 120
1c79356b 121#include <sys/kdebug.h>
39037602
A
122#include <kperf/kperf.h>
123#include <kern/kpc.h>
5ba3f43e 124#include <san/kasan.h>
0c530ab8 125#include <kern/pms.h>
5ba3f43e
A
126#include <kern/host.h>
127#include <stdatomic.h>
3a60a9f5 128
5ba3f43e
A
129int rt_runq_count(processor_set_t pset)
130{
131 return atomic_load_explicit(&SCHED(rt_runq)(pset)->count, memory_order_relaxed);
132}
2d21ac55 133
5ba3f43e
A
134void rt_runq_count_incr(processor_set_t pset)
135{
136 atomic_fetch_add_explicit(&SCHED(rt_runq)(pset)->count, 1, memory_order_relaxed);
137}
3e170ce0 138
5ba3f43e
A
139void rt_runq_count_decr(processor_set_t pset)
140{
141 atomic_fetch_sub_explicit(&SCHED(rt_runq)(pset)->count, 1, memory_order_relaxed);
142}
6d2010ae 143
0b4e3aa0 144#define DEFAULT_PREEMPTION_RATE 100 /* (1/s) */
1c79356b
A
145int default_preemption_rate = DEFAULT_PREEMPTION_RATE;
146
316670eb
A
147#define DEFAULT_BG_PREEMPTION_RATE 400 /* (1/s) */
148int default_bg_preemption_rate = DEFAULT_BG_PREEMPTION_RATE;
149
0b4e3aa0
A
150#define MAX_UNSAFE_QUANTA 800
151int max_unsafe_quanta = MAX_UNSAFE_QUANTA;
152
153#define MAX_POLL_QUANTA 2
154int max_poll_quanta = MAX_POLL_QUANTA;
155
156#define SCHED_POLL_YIELD_SHIFT 4 /* 1/16 */
157int sched_poll_yield_shift = SCHED_POLL_YIELD_SHIFT;
158
55e303ae
A
159uint64_t max_poll_computation;
160
6d2010ae
A
161uint64_t max_unsafe_computation;
162uint64_t sched_safe_duration;
163
fe8ab488 164#if defined(CONFIG_SCHED_TIMESHARE_CORE)
6d2010ae 165
55e303ae
A
166uint32_t std_quantum;
167uint32_t min_std_quantum;
316670eb 168uint32_t bg_quantum;
55e303ae 169
91447636 170uint32_t std_quantum_us;
316670eb 171uint32_t bg_quantum_us;
91447636 172
fe8ab488 173#endif /* CONFIG_SCHED_TIMESHARE_CORE */
6d2010ae
A
174
175uint32_t thread_depress_time;
176uint32_t default_timeshare_computation;
177uint32_t default_timeshare_constraint;
178
55e303ae
A
179uint32_t max_rt_quantum;
180uint32_t min_rt_quantum;
181
fe8ab488 182#if defined(CONFIG_SCHED_TIMESHARE_CORE)
6d2010ae 183
1c79356b 184unsigned sched_tick;
91447636 185uint32_t sched_tick_interval;
1c79356b 186
39037602 187uint32_t sched_pri_shifts[TH_BUCKET_MAX];
2d21ac55 188uint32_t sched_fixed_shift;
39236c6e
A
189
190uint32_t sched_decay_usage_age_factor = 1; /* accelerate 5/8^n usage aging */
2d21ac55 191
fe8ab488
A
192/* Allow foreground to decay past default to resolve inversions */
193#define DEFAULT_DECAY_BAND_LIMIT ((BASEPRI_FOREGROUND - BASEPRI_DEFAULT) + 2)
194int sched_pri_decay_band_limit = DEFAULT_DECAY_BAND_LIMIT;
6d2010ae 195
4b17d6b6
A
196/* Defaults for timer deadline profiling */
197#define TIMER_DEADLINE_TRACKING_BIN_1_DEFAULT 2000000 /* Timers with deadlines <=
39236c6e 198 * 2ms */
4b17d6b6 199#define TIMER_DEADLINE_TRACKING_BIN_2_DEFAULT 5000000 /* Timers with deadlines
39236c6e
A
200 <= 5ms */
201
4b17d6b6
A
202uint64_t timer_deadline_tracking_bin_1;
203uint64_t timer_deadline_tracking_bin_2;
204
490019cf
A
205#endif /* CONFIG_SCHED_TIMESHARE_CORE */
206
39236c6e
A
207thread_t sched_maintenance_thread;
208
5ba3f43e
A
209#if __arm__ || __arm64__
210/* interrupts disabled lock to guard recommended cores state */
211decl_simple_lock_data(static,sched_recommended_cores_lock);
212static void sched_recommended_cores_maintenance(void);
213static void sched_update_recommended_cores(uint32_t recommended_cores);
214
215uint64_t perfcontrol_failsafe_starvation_threshold;
216extern char *proc_name_address(struct proc *p);
217
218#endif /* __arm__ || __arm64__ */
fe8ab488 219
6d2010ae
A
220uint64_t sched_one_second_interval;
221
1c79356b 222/* Forwards */
6d2010ae 223
fe8ab488 224#if defined(CONFIG_SCHED_TIMESHARE_CORE)
6d2010ae 225
39236c6e
A
226static void load_shift_init(void);
227static void preempt_pri_init(void);
2d21ac55 228
fe8ab488 229#endif /* CONFIG_SCHED_TIMESHARE_CORE */
c910b4d9 230
6d2010ae 231#if CONFIG_SCHED_IDLE_IN_PLACE
2d21ac55
A
232static thread_t thread_select_idle(
233 thread_t thread,
234 processor_t processor);
6d2010ae 235#endif
1c79356b 236
6d2010ae 237thread_t processor_idle(
2d21ac55
A
238 thread_t thread,
239 processor_t processor);
91447636 240
39236c6e
A
241ast_t
242csw_check_locked( processor_t processor,
fe8ab488
A
243 processor_set_t pset,
244 ast_t check_reason);
39236c6e 245
6d2010ae
A
246static void processor_setrun(
247 processor_t processor,
248 thread_t thread,
249 integer_t options);
250
6d2010ae
A
251static void
252sched_realtime_timebase_init(void);
253
4b17d6b6
A
254static void
255sched_timer_deadline_tracking_init(void);
256
2d21ac55
A
257#if DEBUG
258extern int debug_task;
259#define TLOG(a, fmt, args...) if(debug_task & a) kprintf(fmt, ## args)
260#else
261#define TLOG(a, fmt, args...) do {} while (0)
262#endif
263
3e170ce0
A
264static processor_t
265thread_bind_internal(
266 thread_t thread,
267 processor_t processor);
1c79356b 268
3e170ce0
A
269static void
270sched_vm_group_maintenance(void);
1c79356b 271
fe8ab488 272#if defined(CONFIG_SCHED_TIMESHARE_CORE)
91447636 273int8_t sched_load_shifts[NRQS];
39037602 274bitmap_t sched_preempt_pri[BITMAP_LEN(NRQS)];
fe8ab488 275#endif /* CONFIG_SCHED_TIMESHARE_CORE */
6d2010ae 276
6d2010ae
A
277const struct sched_dispatch_table *sched_current_dispatch = NULL;
278
279/*
280 * Statically allocate a buffer to hold the longest possible
281 * scheduler description string, as currently implemented.
282 * bsd/kern/kern_sysctl.c has a corresponding definition in bsd/
283 * to export to userspace via sysctl(3). If either version
284 * changes, update the other.
285 *
286 * Note that in addition to being an upper bound on the strings
287 * in the kernel, it's also an exact parameter to PE_get_default(),
288 * which interrogates the device tree on some platforms. That
289 * API requires the caller know the exact size of the device tree
290 * property, so we need both a legacy size (32) and the current size
291 * (48) to deal with old and new device trees. The device tree property
292 * is similarly padded to a fixed size so that the same kernel image
293 * can run on multiple devices with different schedulers configured
294 * in the device tree.
295 */
6d2010ae 296char sched_string[SCHED_STRING_MAX_LENGTH];
3e170ce0 297
5ba3f43e 298uint32_t sched_debug_flags = SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS;
39236c6e
A
299
300/* Global flag which indicates whether Background Stepper Context is enabled */
301static int cpu_throttle_enabled = 1;
91447636 302
5ba3f43e
A
303#if DEBUG
304
305/* Since using the indirect function dispatch table has a negative impact on
306 * context switch performance, only allow DEBUG kernels to use that mechanism.
307 */
308static void
309sched_init_override(void)
6d2010ae
A
310{
311 char sched_arg[SCHED_STRING_MAX_LENGTH] = { '\0' };
312
313 /* Check for runtime selection of the scheduler algorithm */
314 if (!PE_parse_boot_argn("sched", sched_arg, sizeof (sched_arg))) {
5ba3f43e 315 sched_arg[0] = '\0';
6d2010ae 316 }
6d2010ae
A
317 if (strlen(sched_arg) > 0) {
318 if (0) {
319 /* Allow pattern below */
320#if defined(CONFIG_SCHED_TRADITIONAL)
3e170ce0 321 } else if (0 == strcmp(sched_arg, sched_traditional_dispatch.sched_name)) {
6d2010ae 322 sched_current_dispatch = &sched_traditional_dispatch;
3e170ce0 323 } else if (0 == strcmp(sched_arg, sched_traditional_with_pset_runqueue_dispatch.sched_name)) {
6d2010ae 324 sched_current_dispatch = &sched_traditional_with_pset_runqueue_dispatch;
6d2010ae 325#endif
fe8ab488 326#if defined(CONFIG_SCHED_MULTIQ)
3e170ce0 327 } else if (0 == strcmp(sched_arg, sched_multiq_dispatch.sched_name)) {
fe8ab488 328 sched_current_dispatch = &sched_multiq_dispatch;
3e170ce0 329 } else if (0 == strcmp(sched_arg, sched_dualq_dispatch.sched_name)) {
fe8ab488 330 sched_current_dispatch = &sched_dualq_dispatch;
6d2010ae
A
331#endif
332 } else {
fe8ab488
A
333#if defined(CONFIG_SCHED_TRADITIONAL)
334 printf("Unrecognized scheduler algorithm: %s\n", sched_arg);
3e170ce0 335 printf("Scheduler: Using instead: %s\n", sched_traditional_with_pset_runqueue_dispatch.sched_name);
fe8ab488 336 sched_current_dispatch = &sched_traditional_with_pset_runqueue_dispatch;
fe8ab488 337#else
6d2010ae 338 panic("Unrecognized scheduler algorithm: %s", sched_arg);
fe8ab488 339#endif
6d2010ae 340 }
3e170ce0 341 kprintf("Scheduler: Runtime selection of %s\n", SCHED(sched_name));
6d2010ae 342 } else {
fe8ab488
A
343#if defined(CONFIG_SCHED_MULTIQ)
344 sched_current_dispatch = &sched_multiq_dispatch;
fe8ab488 345#elif defined(CONFIG_SCHED_TRADITIONAL)
39236c6e 346 sched_current_dispatch = &sched_traditional_with_pset_runqueue_dispatch;
6d2010ae
A
347#else
348#error No default scheduler implementation
349#endif
3e170ce0
A
350 kprintf("Scheduler: Default of %s\n", SCHED(sched_name));
351 }
5ba3f43e 352}
3e170ce0 353
5ba3f43e 354#endif /* DEBUG */
3e170ce0 355
5ba3f43e
A
356void
357sched_init(void)
358{
359#if DEBUG
360 sched_init_override();
361#else /* DEBUG */
362 kprintf("Scheduler: Default of %s\n", SCHED(sched_name));
363#endif /* DEBUG */
364
365 if (!PE_parse_boot_argn("sched_pri_decay_limit", &sched_pri_decay_band_limit, sizeof(sched_pri_decay_band_limit))) {
366 /* No boot-args, check in device tree */
367 if (!PE_get_default("kern.sched_pri_decay_limit",
368 &sched_pri_decay_band_limit,
369 sizeof(sched_pri_decay_band_limit))) {
370 /* Allow decay all the way to normal limits */
371 sched_pri_decay_band_limit = DEFAULT_DECAY_BAND_LIMIT;
372 }
373 }
374
375 kprintf("Setting scheduler priority decay band limit %d\n", sched_pri_decay_band_limit);
376
3e170ce0
A
377 if (PE_parse_boot_argn("sched_debug", &sched_debug_flags, sizeof(sched_debug_flags))) {
378 kprintf("Scheduler: Debug flags 0x%08x\n", sched_debug_flags);
6d2010ae 379 }
5ba3f43e
A
380 strlcpy(sched_string, SCHED(sched_name), sizeof(sched_string));
381
6d2010ae 382 SCHED(init)();
5ba3f43e 383 SCHED(rt_init)(&pset0);
4b17d6b6 384 sched_timer_deadline_tracking_init();
39236c6e 385
6d2010ae
A
386 SCHED(pset_init)(&pset0);
387 SCHED(processor_init)(master_processor);
388}
389
390void
391sched_timebase_init(void)
392{
393 uint64_t abstime;
394
395 clock_interval_to_absolutetime_interval(1, NSEC_PER_SEC, &abstime);
396 sched_one_second_interval = abstime;
397
398 SCHED(timebase_init)();
399 sched_realtime_timebase_init();
400}
401
fe8ab488 402#if defined(CONFIG_SCHED_TIMESHARE_CORE)
6d2010ae 403
fe8ab488 404void
3e170ce0 405sched_timeshare_init(void)
1c79356b
A
406{
407 /*
0b4e3aa0
A
408 * Calculate the timeslicing quantum
409 * in us.
1c79356b
A
410 */
411 if (default_preemption_rate < 1)
412 default_preemption_rate = DEFAULT_PREEMPTION_RATE;
0b4e3aa0 413 std_quantum_us = (1000 * 1000) / default_preemption_rate;
1c79356b 414
0b4e3aa0 415 printf("standard timeslicing quantum is %d us\n", std_quantum_us);
1c79356b 416
316670eb
A
417 if (default_bg_preemption_rate < 1)
418 default_bg_preemption_rate = DEFAULT_BG_PREEMPTION_RATE;
419 bg_quantum_us = (1000 * 1000) / default_bg_preemption_rate;
420
421 printf("standard background quantum is %d us\n", bg_quantum_us);
422
91447636 423 load_shift_init();
4a3eedf9 424 preempt_pri_init();
1c79356b 425 sched_tick = 0;
1c79356b
A
426}
427
fe8ab488 428void
3e170ce0 429sched_timeshare_timebase_init(void)
55e303ae 430{
91447636
A
431 uint64_t abstime;
432 uint32_t shift;
55e303ae 433
91447636 434 /* standard timeslicing quantum */
55e303ae
A
435 clock_interval_to_absolutetime_interval(
436 std_quantum_us, NSEC_PER_USEC, &abstime);
437 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
b0d623f7 438 std_quantum = (uint32_t)abstime;
55e303ae 439
91447636 440 /* smallest remaining quantum (250 us) */
55e303ae
A
441 clock_interval_to_absolutetime_interval(250, NSEC_PER_USEC, &abstime);
442 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
b0d623f7 443 min_std_quantum = (uint32_t)abstime;
55e303ae 444
316670eb
A
445 /* quantum for background tasks */
446 clock_interval_to_absolutetime_interval(
447 bg_quantum_us, NSEC_PER_USEC, &abstime);
448 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
449 bg_quantum = (uint32_t)abstime;
450
91447636
A
451 /* scheduler tick interval */
452 clock_interval_to_absolutetime_interval(USEC_PER_SEC >> SCHED_TICK_SHIFT,
453 NSEC_PER_USEC, &abstime);
cf7d32b8 454 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
b0d623f7 455 sched_tick_interval = (uint32_t)abstime;
55e303ae 456
91447636
A
457 /*
458 * Compute conversion factor from usage to
459 * timesharing priorities with 5/8 ** n aging.
460 */
461 abstime = (abstime * 5) / 3;
462 for (shift = 0; abstime > BASEPRI_DEFAULT; ++shift)
463 abstime >>= 1;
2d21ac55 464 sched_fixed_shift = shift;
91447636 465
39037602
A
466 for (uint32_t i = 0 ; i < TH_BUCKET_MAX ; i++)
467 sched_pri_shifts[i] = INT8_MAX;
468
fe8ab488
A
469 max_unsafe_computation = ((uint64_t)max_unsafe_quanta) * std_quantum;
470 sched_safe_duration = 2 * ((uint64_t)max_unsafe_quanta) * std_quantum;
39037602 471
fe8ab488 472 max_poll_computation = ((uint64_t)max_poll_quanta) * std_quantum;
6d2010ae
A
473 thread_depress_time = 1 * std_quantum;
474 default_timeshare_computation = std_quantum / 2;
475 default_timeshare_constraint = std_quantum;
476
5ba3f43e
A
477#if __arm__ || __arm64__
478 perfcontrol_failsafe_starvation_threshold = (2 * sched_tick_interval);
479#endif /* __arm__ || __arm64__ */
6d2010ae
A
480}
481
fe8ab488
A
482#endif /* CONFIG_SCHED_TIMESHARE_CORE */
483
5ba3f43e
A
484void
485pset_rt_init(processor_set_t pset)
486{
487 rt_lock_init(pset);
488
489 pset->rt_runq.count = 0;
490 queue_init(&pset->rt_runq.queue);
491 memset(&pset->rt_runq.runq_stats, 0, sizeof pset->rt_runq.runq_stats);
492}
493
494rt_queue_t
495sched_rtglobal_runq(processor_set_t pset)
6d2010ae 496{
5ba3f43e
A
497 (void)pset;
498
499 return &pset0.rt_runq;
500}
501
502void
503sched_rtglobal_init(processor_set_t pset)
504{
505 if (pset == &pset0) {
506 return pset_rt_init(pset);
507 }
508
509 /* Only pset0 rt_runq is used, so make it easy to detect
510 * buggy accesses to others.
511 */
512 memset(&pset->rt_runq, 0xfd, sizeof pset->rt_runq);
513}
6d2010ae 514
5ba3f43e
A
515void
516sched_rtglobal_queue_shutdown(processor_t processor)
517{
518 (void)processor;
55e303ae
A
519}
520
6d2010ae
A
521static void
522sched_realtime_timebase_init(void)
523{
524 uint64_t abstime;
525
526 /* smallest rt computaton (50 us) */
527 clock_interval_to_absolutetime_interval(50, NSEC_PER_USEC, &abstime);
528 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
529 min_rt_quantum = (uint32_t)abstime;
530
531 /* maximum rt computation (50 ms) */
532 clock_interval_to_absolutetime_interval(
533 50, 1000*NSEC_PER_USEC, &abstime);
534 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
535 max_rt_quantum = (uint32_t)abstime;
536
537}
538
5ba3f43e
A
539void
540sched_check_spill(processor_set_t pset, thread_t thread)
541{
542 (void)pset;
543 (void)thread;
544
545 return;
546}
547
548bool
549sched_thread_should_yield(processor_t processor, thread_t thread)
550{
551 (void)thread;
552
553 return (!SCHED(processor_queue_empty)(processor) || rt_runq_count(processor->processor_set) > 0);
554}
555
fe8ab488 556#if defined(CONFIG_SCHED_TIMESHARE_CORE)
6d2010ae 557
91447636
A
558/*
559 * Set up values for timeshare
560 * loading factors.
561 */
562static void
563load_shift_init(void)
564{
565 int8_t k, *p = sched_load_shifts;
566 uint32_t i, j;
567
39236c6e
A
568 uint32_t sched_decay_penalty = 1;
569
570 if (PE_parse_boot_argn("sched_decay_penalty", &sched_decay_penalty, sizeof (sched_decay_penalty))) {
571 kprintf("Overriding scheduler decay penalty %u\n", sched_decay_penalty);
572 }
573
574 if (PE_parse_boot_argn("sched_decay_usage_age_factor", &sched_decay_usage_age_factor, sizeof (sched_decay_usage_age_factor))) {
575 kprintf("Overriding scheduler decay usage age factor %u\n", sched_decay_usage_age_factor);
576 }
577
39236c6e
A
578 if (sched_decay_penalty == 0) {
579 /*
580 * There is no penalty for timeshare threads for using too much
581 * CPU, so set all load shifts to INT8_MIN. Even under high load,
582 * sched_pri_shift will be >INT8_MAX, and there will be no
583 * penalty applied to threads (nor will sched_usage be updated per
584 * thread).
585 */
586 for (i = 0; i < NRQS; i++) {
587 sched_load_shifts[i] = INT8_MIN;
588 }
589
590 return;
591 }
592
91447636
A
593 *p++ = INT8_MIN; *p++ = 0;
594
39236c6e
A
595 /*
596 * For a given system load "i", the per-thread priority
597 * penalty per quantum of CPU usage is ~2^k priority
598 * levels. "sched_decay_penalty" can cause more
599 * array entries to be filled with smaller "k" values
600 */
601 for (i = 2, j = 1 << sched_decay_penalty, k = 1; i < NRQS; ++k) {
602 for (j <<= 1; (i < j) && (i < NRQS); ++i)
91447636
A
603 *p++ = k;
604 }
605}
606
4a3eedf9
A
607static void
608preempt_pri_init(void)
609{
39037602 610 bitmap_t *p = sched_preempt_pri;
4a3eedf9 611
39037602
A
612 for (int i = BASEPRI_FOREGROUND; i < MINPRI_KERNEL; ++i)
613 bitmap_set(p, i);
4a3eedf9 614
39037602
A
615 for (int i = BASEPRI_PREEMPT; i <= MAXPRI; ++i)
616 bitmap_set(p, i);
4a3eedf9
A
617}
618
fe8ab488 619#endif /* CONFIG_SCHED_TIMESHARE_CORE */
6d2010ae 620
1c79356b 621/*
0b4e3aa0 622 * Thread wait timer expiration.
1c79356b
A
623 */
624void
625thread_timer_expire(
91447636
A
626 void *p0,
627 __unused void *p1)
1c79356b
A
628{
629 thread_t thread = p0;
630 spl_t s;
631
39037602
A
632 assert_thread_magic(thread);
633
1c79356b 634 s = splsched();
55e303ae 635 thread_lock(thread);
91447636 636 if (--thread->wait_timer_active == 0) {
0b4e3aa0
A
637 if (thread->wait_timer_is_set) {
638 thread->wait_timer_is_set = FALSE;
55e303ae 639 clear_wait_internal(thread, THREAD_TIMED_OUT);
0b4e3aa0 640 }
1c79356b 641 }
55e303ae 642 thread_unlock(thread);
1c79356b
A
643 splx(s);
644}
645
1c79356b 646/*
91447636
A
647 * thread_unblock:
648 *
649 * Unblock thread on wake up.
650 *
3e170ce0 651 * Returns TRUE if the thread should now be placed on the runqueue.
91447636
A
652 *
653 * Thread must be locked.
3e170ce0
A
654 *
655 * Called at splsched().
1c79356b 656 */
91447636
A
657boolean_t
658thread_unblock(
659 thread_t thread,
660 wait_result_t wresult)
1c79356b 661{
3e170ce0 662 boolean_t ready_for_runq = FALSE;
4b17d6b6 663 thread_t cthread = current_thread();
fe8ab488 664 uint32_t new_run_count;
0b4e3aa0 665
91447636 666 /*
2d21ac55 667 * Set wait_result.
91447636
A
668 */
669 thread->wait_result = wresult;
1c79356b 670
91447636 671 /*
2d21ac55 672 * Cancel pending wait timer.
91447636 673 */
1c79356b
A
674 if (thread->wait_timer_is_set) {
675 if (timer_call_cancel(&thread->wait_timer))
676 thread->wait_timer_active--;
677 thread->wait_timer_is_set = FALSE;
678 }
679
91447636 680 /*
2d21ac55
A
681 * Update scheduling state: not waiting,
682 * set running.
91447636
A
683 */
684 thread->state &= ~(TH_WAIT|TH_UNINT);
1c79356b 685
91447636
A
686 if (!(thread->state & TH_RUN)) {
687 thread->state |= TH_RUN;
5ba3f43e 688 thread->last_made_runnable_time = thread->last_basepri_change_time = mach_approximate_time();
3e170ce0
A
689
690 ready_for_runq = TRUE;
1c79356b 691
2d21ac55 692 (*thread->sched_call)(SCHED_CALL_UNBLOCK, thread);
1c79356b 693
39037602 694 /* Update the runnable thread count */
fe8ab488 695 new_run_count = sched_run_incr(thread);
3e170ce0 696 } else {
2d21ac55 697 /*
39037602
A
698 * Either the thread is idling in place on another processor,
699 * or it hasn't finished context switching yet.
2d21ac55 700 */
6d2010ae 701#if CONFIG_SCHED_IDLE_IN_PLACE
2d21ac55
A
702 if (thread->state & TH_IDLE) {
703 processor_t processor = thread->last_processor;
704
705 if (processor != current_processor())
706 machine_signal_idle(processor);
707 }
6d2010ae
A
708#else
709 assert((thread->state & TH_IDLE) == 0);
710#endif
39037602
A
711 /*
712 * The run count is only dropped after the context switch completes
713 * and the thread is still waiting, so we should not run_incr here
714 */
715 new_run_count = sched_run_buckets[TH_BUCKET_RUN];
2d21ac55 716 }
1c79356b 717
3e170ce0 718
91447636
A
719 /*
720 * Calculate deadline for real-time threads.
721 */
6d2010ae 722 if (thread->sched_mode == TH_MODE_REALTIME) {
3e170ce0 723 uint64_t ctime;
fe8ab488
A
724
725 ctime = mach_absolute_time();
726 thread->realtime.deadline = thread->realtime.constraint + ctime;
0b4e3aa0
A
727 }
728
91447636
A
729 /*
730 * Clear old quantum, fail-safe computation, etc.
731 */
fe8ab488 732 thread->quantum_remaining = 0;
91447636
A
733 thread->computation_metered = 0;
734 thread->reason = AST_NONE;
813fb2f6 735 thread->block_hint = kThreadWaitNone;
1c79356b 736
4b17d6b6
A
737 /* Obtain power-relevant interrupt and "platform-idle exit" statistics.
738 * We also account for "double hop" thread signaling via
739 * the thread callout infrastructure.
740 * DRK: consider removing the callout wakeup counters in the future
741 * they're present for verification at the moment.
742 */
743 boolean_t aticontext, pidle;
744 ml_get_power_state(&aticontext, &pidle);
39236c6e
A
745
746 if (__improbable(aticontext && !(thread_get_tag_internal(thread) & THREAD_TAG_CALLOUT))) {
39236c6e
A
747 DTRACE_SCHED2(iwakeup, struct thread *, thread, struct proc *, thread->task->bsd_info);
748
4b17d6b6 749 uint64_t ttd = PROCESSOR_DATA(current_processor(), timer_call_ttd);
39236c6e 750
4b17d6b6
A
751 if (ttd) {
752 if (ttd <= timer_deadline_tracking_bin_1)
753 thread->thread_timer_wakeups_bin_1++;
754 else
755 if (ttd <= timer_deadline_tracking_bin_2)
756 thread->thread_timer_wakeups_bin_2++;
757 }
39236c6e 758
5c9f4661
A
759 ledger_credit_thread(thread, thread->t_ledger,
760 task_ledgers.interrupt_wakeups, 1);
4b17d6b6 761 if (pidle) {
5c9f4661
A
762 ledger_credit_thread(thread, thread->t_ledger,
763 task_ledgers.platform_idle_wakeups, 1);
4b17d6b6 764 }
39236c6e 765
4b17d6b6 766 } else if (thread_get_tag_internal(cthread) & THREAD_TAG_CALLOUT) {
5c9f4661 767 /* TODO: what about an interrupt that does a wake taken on a callout thread? */
4b17d6b6 768 if (cthread->callout_woken_from_icontext) {
5c9f4661
A
769 ledger_credit_thread(thread, thread->t_ledger,
770 task_ledgers.interrupt_wakeups, 1);
4b17d6b6 771 thread->thread_callout_interrupt_wakeups++;
5c9f4661 772
4b17d6b6 773 if (cthread->callout_woken_from_platform_idle) {
5c9f4661
A
774 ledger_credit_thread(thread, thread->t_ledger,
775 task_ledgers.platform_idle_wakeups, 1);
4b17d6b6
A
776 thread->thread_callout_platform_idle_wakeups++;
777 }
5c9f4661 778
39236c6e 779 cthread->callout_woke_thread = TRUE;
4b17d6b6
A
780 }
781 }
5c9f4661 782
4b17d6b6 783 if (thread_get_tag_internal(thread) & THREAD_TAG_CALLOUT) {
39236c6e
A
784 thread->callout_woken_from_icontext = aticontext;
785 thread->callout_woken_from_platform_idle = pidle;
786 thread->callout_woke_thread = FALSE;
4b17d6b6
A
787 }
788
fe8ab488
A
789 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
790 MACHDBG_CODE(DBG_MACH_SCHED,MACH_MAKE_RUNNABLE) | DBG_FUNC_NONE,
39037602
A
791 (uintptr_t)thread_tid(thread), thread->sched_pri, thread->wait_result,
792 sched_run_buckets[TH_BUCKET_RUN], 0);
b0d623f7
A
793
794 DTRACE_SCHED2(wakeup, struct thread *, thread, struct proc *, thread->task->bsd_info);
91447636 795
3e170ce0 796 return (ready_for_runq);
1c79356b
A
797}
798
799/*
91447636 800 * Routine: thread_go
1c79356b 801 * Purpose:
91447636 802 * Unblock and dispatch thread.
1c79356b
A
803 * Conditions:
804 * thread lock held, IPC locks may be held.
805 * thread must have been pulled from wait queue under same lock hold.
3e170ce0
A
806 * thread must have been waiting
807 * Returns:
9bccf70c 808 * KERN_SUCCESS - Thread was set running
3e170ce0
A
809 *
810 * TODO: This should return void
1c79356b 811 */
9bccf70c 812kern_return_t
91447636 813thread_go(
3e170ce0
A
814 thread_t thread,
815 wait_result_t wresult)
1c79356b 816{
39037602
A
817 assert_thread_magic(thread);
818
1c79356b 819 assert(thread->at_safe_point == FALSE);
9bccf70c 820 assert(thread->wait_event == NO_EVENT64);
3e170ce0 821 assert(thread->waitq == NULL);
1c79356b 822
3e170ce0
A
823 assert(!(thread->state & (TH_TERMINATE|TH_TERMINATE2)));
824 assert(thread->state & TH_WAIT);
55e303ae 825
55e303ae 826
39037602
A
827 if (thread_unblock(thread, wresult)) {
828#if SCHED_TRACE_THREAD_WAKEUPS
829 backtrace(&thread->thread_wakeup_bt[0],
830 (sizeof(thread->thread_wakeup_bt)/sizeof(uintptr_t)));
831#endif
3e170ce0 832 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
39037602 833 }
3e170ce0
A
834
835 return (KERN_SUCCESS);
1c79356b
A
836}
837
9bccf70c
A
838/*
839 * Routine: thread_mark_wait_locked
840 * Purpose:
841 * Mark a thread as waiting. If, given the circumstances,
842 * it doesn't want to wait (i.e. already aborted), then
843 * indicate that in the return value.
844 * Conditions:
845 * at splsched() and thread is locked.
846 */
847__private_extern__
848wait_result_t
1c79356b 849thread_mark_wait_locked(
9bccf70c
A
850 thread_t thread,
851 wait_interrupt_t interruptible)
1c79356b 852{
55e303ae 853 boolean_t at_safe_point;
1c79356b 854
3e170ce0 855 assert(!(thread->state & (TH_WAIT|TH_IDLE|TH_UNINT|TH_TERMINATE2)));
b0d623f7 856
9bccf70c
A
857 /*
858 * The thread may have certain types of interrupts/aborts masked
859 * off. Even if the wait location says these types of interrupts
860 * are OK, we have to honor mask settings (outer-scoped code may
861 * not be able to handle aborts at the moment).
862 */
91447636
A
863 if (interruptible > (thread->options & TH_OPT_INTMASK))
864 interruptible = thread->options & TH_OPT_INTMASK;
9bccf70c
A
865
866 at_safe_point = (interruptible == THREAD_ABORTSAFE);
867
55e303ae 868 if ( interruptible == THREAD_UNINT ||
6d2010ae 869 !(thread->sched_flags & TH_SFLAG_ABORT) ||
55e303ae 870 (!at_safe_point &&
6d2010ae 871 (thread->sched_flags & TH_SFLAG_ABORTSAFELY))) {
b0d623f7 872
316670eb
A
873 if ( !(thread->state & TH_TERMINATE))
874 DTRACE_SCHED(sleep);
b0d623f7 875
9bccf70c
A
876 thread->state |= (interruptible) ? TH_WAIT : (TH_WAIT | TH_UNINT);
877 thread->at_safe_point = at_safe_point;
813fb2f6
A
878
879 /* TODO: pass this through assert_wait instead, have
880 * assert_wait just take a struct as an argument */
881 assert(!thread->block_hint);
882 thread->block_hint = thread->pending_block_hint;
883 thread->pending_block_hint = kThreadWaitNone;
884
9bccf70c 885 return (thread->wait_result = THREAD_WAITING);
9bccf70c 886 }
55e303ae 887 else
6d2010ae
A
888 if (thread->sched_flags & TH_SFLAG_ABORTSAFELY)
889 thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
813fb2f6 890 thread->pending_block_hint = kThreadWaitNone;
55e303ae 891
9bccf70c 892 return (thread->wait_result = THREAD_INTERRUPTED);
1c79356b
A
893}
894
9bccf70c
A
895/*
896 * Routine: thread_interrupt_level
897 * Purpose:
898 * Set the maximum interruptible state for the
899 * current thread. The effective value of any
900 * interruptible flag passed into assert_wait
901 * will never exceed this.
902 *
903 * Useful for code that must not be interrupted,
904 * but which calls code that doesn't know that.
905 * Returns:
906 * The old interrupt level for the thread.
907 */
908__private_extern__
909wait_interrupt_t
910thread_interrupt_level(
911 wait_interrupt_t new_level)
912{
913 thread_t thread = current_thread();
91447636 914 wait_interrupt_t result = thread->options & TH_OPT_INTMASK;
1c79356b 915
91447636 916 thread->options = (thread->options & ~TH_OPT_INTMASK) | (new_level & TH_OPT_INTMASK);
1c79356b 917
91447636 918 return result;
1c79356b
A
919}
920
1c79356b
A
921/*
922 * assert_wait:
923 *
924 * Assert that the current thread is about to go to
925 * sleep until the specified event occurs.
926 */
9bccf70c 927wait_result_t
1c79356b
A
928assert_wait(
929 event_t event,
9bccf70c 930 wait_interrupt_t interruptible)
1c79356b 931{
3e170ce0
A
932 if (__improbable(event == NO_EVENT))
933 panic("%s() called with NO_EVENT", __func__);
1c79356b 934
316670eb
A
935 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
936 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT)|DBG_FUNC_NONE,
4bd07ac2 937 VM_KERNEL_UNSLIDE_OR_PERM(event), 0, 0, 0, 0);
316670eb 938
3e170ce0
A
939 struct waitq *waitq;
940 waitq = global_eventq(event);
941 return waitq_assert_wait64(waitq, CAST_EVENT64_T(event), interruptible, TIMEOUT_WAIT_FOREVER);
9bccf70c
A
942}
943
39037602
A
944/*
945 * assert_wait_queue:
946 *
947 * Return the global waitq for the specified event
948 */
949struct waitq *
950assert_wait_queue(
951 event_t event)
952{
953 return global_eventq(event);
954}
955
91447636
A
956wait_result_t
957assert_wait_timeout(
958 event_t event,
959 wait_interrupt_t interruptible,
960 uint32_t interval,
961 uint32_t scale_factor)
55e303ae 962{
91447636
A
963 thread_t thread = current_thread();
964 wait_result_t wresult;
91447636
A
965 uint64_t deadline;
966 spl_t s;
967
3e170ce0
A
968 if (__improbable(event == NO_EVENT))
969 panic("%s() called with NO_EVENT", __func__);
fe8ab488 970
3e170ce0
A
971 struct waitq *waitq;
972 waitq = global_eventq(event);
91447636
A
973
974 s = splsched();
3e170ce0 975 waitq_lock(waitq);
91447636
A
976
977 clock_interval_to_deadline(interval, scale_factor, &deadline);
3e170ce0 978
316670eb 979 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3e170ce0 980 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT)|DBG_FUNC_NONE,
4bd07ac2 981 VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
3e170ce0
A
982
983 wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
984 interruptible,
985 TIMEOUT_URGENCY_SYS_NORMAL,
986 deadline, TIMEOUT_NO_LEEWAY,
987 thread);
39236c6e 988
3e170ce0 989 waitq_unlock(waitq);
39236c6e 990 splx(s);
3e170ce0 991 return wresult;
39236c6e
A
992}
993
994wait_result_t
995assert_wait_timeout_with_leeway(
996 event_t event,
997 wait_interrupt_t interruptible,
998 wait_timeout_urgency_t urgency,
999 uint32_t interval,
1000 uint32_t leeway,
1001 uint32_t scale_factor)
1002{
1003 thread_t thread = current_thread();
1004 wait_result_t wresult;
39236c6e
A
1005 uint64_t deadline;
1006 uint64_t abstime;
1007 uint64_t slop;
1008 uint64_t now;
1009 spl_t s;
1010
3e170ce0
A
1011 if (__improbable(event == NO_EVENT))
1012 panic("%s() called with NO_EVENT", __func__);
1013
39236c6e
A
1014 now = mach_absolute_time();
1015 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
1016 deadline = now + abstime;
1017
1018 clock_interval_to_absolutetime_interval(leeway, scale_factor, &slop);
1019
3e170ce0
A
1020 struct waitq *waitq;
1021 waitq = global_eventq(event);
39236c6e
A
1022
1023 s = splsched();
3e170ce0 1024 waitq_lock(waitq);
39236c6e
A
1025
1026 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3e170ce0 1027 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT)|DBG_FUNC_NONE,
4bd07ac2 1028 VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
3e170ce0
A
1029
1030 wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
1031 interruptible,
1032 urgency, deadline, slop,
1033 thread);
91447636 1034
3e170ce0 1035 waitq_unlock(waitq);
91447636 1036 splx(s);
3e170ce0 1037 return wresult;
55e303ae
A
1038}
1039
1040wait_result_t
91447636 1041assert_wait_deadline(
55e303ae 1042 event_t event,
91447636
A
1043 wait_interrupt_t interruptible,
1044 uint64_t deadline)
55e303ae
A
1045{
1046 thread_t thread = current_thread();
91447636 1047 wait_result_t wresult;
55e303ae
A
1048 spl_t s;
1049
3e170ce0
A
1050 if (__improbable(event == NO_EVENT))
1051 panic("%s() called with NO_EVENT", __func__);
1052
1053 struct waitq *waitq;
1054 waitq = global_eventq(event);
55e303ae
A
1055
1056 s = splsched();
3e170ce0 1057 waitq_lock(waitq);
55e303ae 1058
316670eb 1059 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3e170ce0 1060 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT)|DBG_FUNC_NONE,
4bd07ac2 1061 VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
39236c6e 1062
3e170ce0
A
1063 wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
1064 interruptible,
1065 TIMEOUT_URGENCY_SYS_NORMAL, deadline,
1066 TIMEOUT_NO_LEEWAY, thread);
3e170ce0 1067 waitq_unlock(waitq);
39236c6e 1068 splx(s);
3e170ce0 1069 return wresult;
39236c6e
A
1070}
1071
1072wait_result_t
1073assert_wait_deadline_with_leeway(
1074 event_t event,
1075 wait_interrupt_t interruptible,
1076 wait_timeout_urgency_t urgency,
1077 uint64_t deadline,
1078 uint64_t leeway)
1079{
1080 thread_t thread = current_thread();
1081 wait_result_t wresult;
39236c6e
A
1082 spl_t s;
1083
3e170ce0
A
1084 if (__improbable(event == NO_EVENT))
1085 panic("%s() called with NO_EVENT", __func__);
fe8ab488 1086
3e170ce0
A
1087 struct waitq *waitq;
1088 waitq = global_eventq(event);
39236c6e
A
1089
1090 s = splsched();
3e170ce0 1091 waitq_lock(waitq);
39236c6e
A
1092
1093 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3e170ce0 1094 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT)|DBG_FUNC_NONE,
4bd07ac2 1095 VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
39236c6e 1096
3e170ce0
A
1097 wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
1098 interruptible,
1099 urgency, deadline, leeway,
1100 thread);
3e170ce0 1101 waitq_unlock(waitq);
55e303ae 1102 splx(s);
3e170ce0 1103 return wresult;
55e303ae 1104}
9bccf70c 1105
39236c6e
A
1106/*
1107 * thread_isoncpu:
1108 *
1109 * Return TRUE if a thread is running on a processor such that an AST
1110 * is needed to pull it out of userspace execution, or if executing in
1111 * the kernel, bring to a context switch boundary that would cause
1112 * thread state to be serialized in the thread PCB.
1113 *
1114 * Thread locked, returns the same way. While locked, fields
fe8ab488 1115 * like "state" cannot change. "runq" can change only from set to unset.
39236c6e
A
1116 */
1117static inline boolean_t
1118thread_isoncpu(thread_t thread)
1119{
1120 /* Not running or runnable */
1121 if (!(thread->state & TH_RUN))
1122 return (FALSE);
1123
1124 /* Waiting on a runqueue, not currently running */
fe8ab488 1125 /* TODO: This is invalid - it can get dequeued without thread lock, but not context switched. */
39236c6e
A
1126 if (thread->runq != PROCESSOR_NULL)
1127 return (FALSE);
1128
3e170ce0
A
1129 /*
1130 * Thread does not have a stack yet
1131 * It could be on the stack alloc queue or preparing to be invoked
1132 */
1133 if (!thread->kernel_stack)
1134 return (FALSE);
1135
39236c6e
A
1136 /*
1137 * Thread must be running on a processor, or
1138 * about to run, or just did run. In all these
1139 * cases, an AST to the processor is needed
1140 * to guarantee that the thread is kicked out
1141 * of userspace and the processor has
1142 * context switched (and saved register state).
1143 */
1144 return (TRUE);
1145}
1146
1c79356b 1147/*
91447636 1148 * thread_stop:
1c79356b 1149 *
91447636 1150 * Force a preemption point for a thread and wait
39236c6e
A
1151 * for it to stop running on a CPU. If a stronger
1152 * guarantee is requested, wait until no longer
1153 * runnable. Arbitrates access among
91447636 1154 * multiple stop requests. (released by unstop)
1c79356b 1155 *
91447636
A
1156 * The thread must enter a wait state and stop via a
1157 * separate means.
1c79356b 1158 *
91447636 1159 * Returns FALSE if interrupted.
1c79356b
A
1160 */
1161boolean_t
1162thread_stop(
39236c6e
A
1163 thread_t thread,
1164 boolean_t until_not_runnable)
1c79356b 1165{
91447636 1166 wait_result_t wresult;
2d21ac55 1167 spl_t s = splsched();
39236c6e 1168 boolean_t oncpu;
1c79356b 1169
1c79356b 1170 wake_lock(thread);
2d21ac55 1171 thread_lock(thread);
1c79356b
A
1172
1173 while (thread->state & TH_SUSP) {
1174 thread->wake_active = TRUE;
2d21ac55
A
1175 thread_unlock(thread);
1176
91447636 1177 wresult = assert_wait(&thread->wake_active, THREAD_ABORTSAFE);
1c79356b
A
1178 wake_unlock(thread);
1179 splx(s);
1180
91447636
A
1181 if (wresult == THREAD_WAITING)
1182 wresult = thread_block(THREAD_CONTINUE_NULL);
9bccf70c 1183
91447636 1184 if (wresult != THREAD_AWAKENED)
1c79356b
A
1185 return (FALSE);
1186
1187 s = splsched();
1188 wake_lock(thread);
2d21ac55 1189 thread_lock(thread);
1c79356b 1190 }
9bccf70c 1191
1c79356b 1192 thread->state |= TH_SUSP;
1c79356b 1193
39236c6e
A
1194 while ((oncpu = thread_isoncpu(thread)) ||
1195 (until_not_runnable && (thread->state & TH_RUN))) {
1196 processor_t processor;
1197
1198 if (oncpu) {
1199 assert(thread->state & TH_RUN);
1200 processor = thread->chosen_processor;
9bccf70c 1201 cause_ast_check(processor);
39236c6e 1202 }
9bccf70c
A
1203
1204 thread->wake_active = TRUE;
2d21ac55
A
1205 thread_unlock(thread);
1206
91447636 1207 wresult = assert_wait(&thread->wake_active, THREAD_ABORTSAFE);
9bccf70c
A
1208 wake_unlock(thread);
1209 splx(s);
1210
91447636
A
1211 if (wresult == THREAD_WAITING)
1212 wresult = thread_block(THREAD_CONTINUE_NULL);
9bccf70c 1213
91447636 1214 if (wresult != THREAD_AWAKENED) {
9bccf70c
A
1215 thread_unstop(thread);
1216 return (FALSE);
1217 }
1218
1219 s = splsched();
1220 wake_lock(thread);
1221 thread_lock(thread);
1222 }
1223
1224 thread_unlock(thread);
1c79356b
A
1225 wake_unlock(thread);
1226 splx(s);
39236c6e
A
1227
1228 /*
1229 * We return with the thread unlocked. To prevent it from
1230 * transitioning to a runnable state (or from TH_RUN to
1231 * being on the CPU), the caller must ensure the thread
1232 * is stopped via an external means (such as an AST)
1233 */
1c79356b
A
1234
1235 return (TRUE);
1236}
1237
1238/*
91447636
A
1239 * thread_unstop:
1240 *
1241 * Release a previous stop request and set
1242 * the thread running if appropriate.
1243 *
1244 * Use only after a successful stop operation.
1c79356b
A
1245 */
1246void
1247thread_unstop(
9bccf70c 1248 thread_t thread)
1c79356b 1249{
9bccf70c 1250 spl_t s = splsched();
1c79356b 1251
1c79356b
A
1252 wake_lock(thread);
1253 thread_lock(thread);
1254
3e170ce0 1255 assert((thread->state & (TH_RUN|TH_WAIT|TH_SUSP)) != TH_SUSP);
55e303ae 1256
1c79356b
A
1257 if (thread->state & TH_SUSP) {
1258 thread->state &= ~TH_SUSP;
1259
1260 if (thread->wake_active) {
1261 thread->wake_active = FALSE;
1262 thread_unlock(thread);
2d21ac55
A
1263
1264 thread_wakeup(&thread->wake_active);
1c79356b
A
1265 wake_unlock(thread);
1266 splx(s);
1c79356b
A
1267
1268 return;
1269 }
1270 }
1271
1272 thread_unlock(thread);
1273 wake_unlock(thread);
1274 splx(s);
1275}
1276
1277/*
91447636
A
1278 * thread_wait:
1279 *
1280 * Wait for a thread to stop running. (non-interruptible)
1281 *
1c79356b 1282 */
91447636 1283void
1c79356b 1284thread_wait(
316670eb
A
1285 thread_t thread,
1286 boolean_t until_not_runnable)
1c79356b 1287{
91447636 1288 wait_result_t wresult;
316670eb
A
1289 boolean_t oncpu;
1290 processor_t processor;
1291 spl_t s = splsched();
1c79356b 1292
1c79356b 1293 wake_lock(thread);
9bccf70c 1294 thread_lock(thread);
1c79356b 1295
316670eb
A
1296 /*
1297 * Wait until not running on a CPU. If stronger requirement
1298 * desired, wait until not runnable. Assumption: if thread is
1299 * on CPU, then TH_RUN is set, so we're not waiting in any case
1300 * where the original, pure "TH_RUN" check would have let us
1301 * finish.
1302 */
39236c6e 1303 while ((oncpu = thread_isoncpu(thread)) ||
316670eb 1304 (until_not_runnable && (thread->state & TH_RUN))) {
e7c99d92 1305
316670eb
A
1306 if (oncpu) {
1307 assert(thread->state & TH_RUN);
39236c6e 1308 processor = thread->chosen_processor;
9bccf70c 1309 cause_ast_check(processor);
316670eb 1310 }
1c79356b
A
1311
1312 thread->wake_active = TRUE;
2d21ac55
A
1313 thread_unlock(thread);
1314
91447636 1315 wresult = assert_wait(&thread->wake_active, THREAD_UNINT);
1c79356b
A
1316 wake_unlock(thread);
1317 splx(s);
1318
91447636
A
1319 if (wresult == THREAD_WAITING)
1320 thread_block(THREAD_CONTINUE_NULL);
1c79356b
A
1321
1322 s = splsched();
1323 wake_lock(thread);
9bccf70c 1324 thread_lock(thread);
1c79356b 1325 }
0b4e3aa0 1326
9bccf70c 1327 thread_unlock(thread);
1c79356b
A
1328 wake_unlock(thread);
1329 splx(s);
1c79356b
A
1330}
1331
1c79356b
A
1332/*
1333 * Routine: clear_wait_internal
1334 *
1335 * Clear the wait condition for the specified thread.
1336 * Start the thread executing if that is appropriate.
1337 * Arguments:
1338 * thread thread to awaken
1339 * result Wakeup result the thread should see
1340 * Conditions:
1341 * At splsched
1342 * the thread is locked.
9bccf70c
A
1343 * Returns:
1344 * KERN_SUCCESS thread was rousted out a wait
1345 * KERN_FAILURE thread was waiting but could not be rousted
1346 * KERN_NOT_WAITING thread was not waiting
1c79356b 1347 */
9bccf70c 1348__private_extern__ kern_return_t
1c79356b 1349clear_wait_internal(
9bccf70c 1350 thread_t thread,
55e303ae 1351 wait_result_t wresult)
1c79356b 1352{
39037602 1353 uint32_t i = LockTimeOutUsec;
3e170ce0 1354 struct waitq *waitq = thread->waitq;
39037602 1355
9bccf70c 1356 do {
55e303ae
A
1357 if (wresult == THREAD_INTERRUPTED && (thread->state & TH_UNINT))
1358 return (KERN_FAILURE);
9bccf70c 1359
3e170ce0 1360 if (waitq != NULL) {
39037602 1361 if (!waitq_pull_thread_locked(waitq, thread)) {
9bccf70c
A
1362 thread_unlock(thread);
1363 delay(1);
39037602
A
1364 if (i > 0 && !machine_timeout_suspended())
1365 i--;
9bccf70c 1366 thread_lock(thread);
3e170ce0
A
1367 if (waitq != thread->waitq)
1368 return KERN_NOT_WAITING;
9bccf70c
A
1369 continue;
1370 }
1c79356b 1371 }
55e303ae 1372
3e170ce0
A
1373 /* TODO: Can we instead assert TH_TERMINATE is not set? */
1374 if ((thread->state & (TH_WAIT|TH_TERMINATE)) == TH_WAIT)
1375 return (thread_go(thread, wresult));
1376 else
1377 return (KERN_NOT_WAITING);
39037602 1378 } while (i > 0);
55e303ae 1379
2d21ac55 1380 panic("clear_wait_internal: deadlock: thread=%p, wq=%p, cpu=%d\n",
3e170ce0 1381 thread, waitq, cpu_number());
55e303ae
A
1382
1383 return (KERN_FAILURE);
1c79356b
A
1384}
1385
1386
1387/*
1388 * clear_wait:
1389 *
1390 * Clear the wait condition for the specified thread. Start the thread
1391 * executing if that is appropriate.
1392 *
1393 * parameters:
1394 * thread thread to awaken
1395 * result Wakeup result the thread should see
1396 */
9bccf70c 1397kern_return_t
1c79356b 1398clear_wait(
9bccf70c
A
1399 thread_t thread,
1400 wait_result_t result)
1c79356b 1401{
9bccf70c 1402 kern_return_t ret;
1c79356b
A
1403 spl_t s;
1404
1405 s = splsched();
1406 thread_lock(thread);
9bccf70c 1407 ret = clear_wait_internal(thread, result);
1c79356b
A
1408 thread_unlock(thread);
1409 splx(s);
9bccf70c 1410 return ret;
1c79356b
A
1411}
1412
1413
1414/*
1415 * thread_wakeup_prim:
1416 *
1417 * Common routine for thread_wakeup, thread_wakeup_with_result,
1418 * and thread_wakeup_one.
1419 *
1420 */
9bccf70c 1421kern_return_t
1c79356b 1422thread_wakeup_prim(
39037602
A
1423 event_t event,
1424 boolean_t one_thread,
1425 wait_result_t result)
6d2010ae 1426{
39037602
A
1427 if (__improbable(event == NO_EVENT))
1428 panic("%s() called with NO_EVENT", __func__);
1429
1430 struct waitq *wq = global_eventq(event);
1431
1432 if (one_thread)
1433 return waitq_wakeup64_one(wq, CAST_EVENT64_T(event), result, WAITQ_ALL_PRIORITIES);
1434 else
1435 return waitq_wakeup64_all(wq, CAST_EVENT64_T(event), result, WAITQ_ALL_PRIORITIES);
6d2010ae
A
1436}
1437
39037602
A
1438/*
1439 * Wakeup a specified thread if and only if it's waiting for this event
1440 */
1441kern_return_t
1442thread_wakeup_thread(
1443 event_t event,
1444 thread_t thread)
1445{
1446 if (__improbable(event == NO_EVENT))
1447 panic("%s() called with NO_EVENT", __func__);
1448
5ba3f43e
A
1449 if (__improbable(thread == THREAD_NULL))
1450 panic("%s() called with THREAD_NULL", __func__);
1451
39037602
A
1452 struct waitq *wq = global_eventq(event);
1453
1454 return waitq_wakeup64_thread(wq, CAST_EVENT64_T(event), thread, THREAD_AWAKENED);
1455}
6d2010ae 1456
39037602
A
1457/*
1458 * Wakeup a thread waiting on an event and promote it to a priority.
1459 *
1460 * Requires woken thread to un-promote itself when done.
1461 */
6d2010ae 1462kern_return_t
39037602
A
1463thread_wakeup_one_with_pri(
1464 event_t event,
1465 int priority)
1c79356b 1466{
3e170ce0
A
1467 if (__improbable(event == NO_EVENT))
1468 panic("%s() called with NO_EVENT", __func__);
1469
39037602 1470 struct waitq *wq = global_eventq(event);
1c79356b 1471
39037602
A
1472 return waitq_wakeup64_one(wq, CAST_EVENT64_T(event), THREAD_AWAKENED, priority);
1473}
fe8ab488 1474
39037602
A
1475/*
1476 * Wakeup a thread waiting on an event,
1477 * promote it to a priority,
1478 * and return a reference to the woken thread.
1479 *
1480 * Requires woken thread to un-promote itself when done.
1481 */
1482thread_t
1483thread_wakeup_identify(event_t event,
1484 int priority)
1485{
1486 if (__improbable(event == NO_EVENT))
1487 panic("%s() called with NO_EVENT", __func__);
1488
1489 struct waitq *wq = global_eventq(event);
1490
1491 return waitq_wakeup64_identify(wq, CAST_EVENT64_T(event), THREAD_AWAKENED, priority);
1c79356b
A
1492}
1493
1494/*
1495 * thread_bind:
1496 *
2d21ac55 1497 * Force the current thread to execute on the specified processor.
fe8ab488 1498 * Takes effect after the next thread_block().
1c79356b 1499 *
55e303ae
A
1500 * Returns the previous binding. PROCESSOR_NULL means
1501 * not bound.
1502 *
1503 * XXX - DO NOT export this to users - XXX
1c79356b 1504 */
55e303ae 1505processor_t
1c79356b 1506thread_bind(
2d21ac55 1507 processor_t processor)
1c79356b 1508{
2d21ac55 1509 thread_t self = current_thread();
55e303ae 1510 processor_t prev;
55e303ae 1511 spl_t s;
1c79356b
A
1512
1513 s = splsched();
2d21ac55 1514 thread_lock(self);
55e303ae 1515
3e170ce0 1516 prev = thread_bind_internal(self, processor);
55e303ae 1517
2d21ac55 1518 thread_unlock(self);
1c79356b 1519 splx(s);
55e303ae
A
1520
1521 return (prev);
1c79356b
A
1522}
1523
3e170ce0
A
1524/*
1525 * thread_bind_internal:
1526 *
1527 * If the specified thread is not the current thread, and it is currently
1528 * running on another CPU, a remote AST must be sent to that CPU to cause
1529 * the thread to migrate to its bound processor. Otherwise, the migration
1530 * will occur at the next quantum expiration or blocking point.
1531 *
1532 * When the thread is the current thread, and explicit thread_block() should
1533 * be used to force the current processor to context switch away and
1534 * let the thread migrate to the bound processor.
1535 *
1536 * Thread must be locked, and at splsched.
1537 */
1538
1539static processor_t
1540thread_bind_internal(
1541 thread_t thread,
1542 processor_t processor)
1543{
1544 processor_t prev;
1545
1546 /* <rdar://problem/15102234> */
1547 assert(thread->sched_pri < BASEPRI_RTQUEUES);
1548 /* A thread can't be bound if it's sitting on a (potentially incorrect) runqueue */
1549 assert(thread->runq == PROCESSOR_NULL);
1550
1551 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_THREAD_BIND), thread_tid(thread), processor ? (uintptr_t)processor->cpu_id : (uintptr_t)-1, 0, 0, 0);
1552
1553 prev = thread->bound_processor;
1554 thread->bound_processor = processor;
1555
1556 return (prev);
1557}
1558
1559/*
1560 * thread_vm_bind_group_add:
1561 *
1562 * The "VM bind group" is a special mechanism to mark a collection
1563 * of threads from the VM subsystem that, in general, should be scheduled
1564 * with only one CPU of parallelism. To accomplish this, we initially
1565 * bind all the threads to the master processor, which has the effect
1566 * that only one of the threads in the group can execute at once, including
1567 * preempting threads in the group that are a lower priority. Future
1568 * mechanisms may use more dynamic mechanisms to prevent the collection
1569 * of VM threads from using more CPU time than desired.
1570 *
1571 * The current implementation can result in priority inversions where
1572 * compute-bound priority 95 or realtime threads that happen to have
1573 * landed on the master processor prevent the VM threads from running.
1574 * When this situation is detected, we unbind the threads for one
1575 * scheduler tick to allow the scheduler to run the threads an
1576 * additional CPUs, before restoring the binding (assuming high latency
1577 * is no longer a problem).
1578 */
1579
1580/*
1581 * The current max is provisioned for:
1582 * vm_compressor_swap_trigger_thread (92)
1583 * 2 x vm_pageout_iothread_internal (92) when vm_restricted_to_single_processor==TRUE
1584 * vm_pageout_continue (92)
1585 * memorystatus_thread (95)
1586 */
1587#define MAX_VM_BIND_GROUP_COUNT (5)
1588decl_simple_lock_data(static,sched_vm_group_list_lock);
1589static thread_t sched_vm_group_thread_list[MAX_VM_BIND_GROUP_COUNT];
1590static int sched_vm_group_thread_count;
1591static boolean_t sched_vm_group_temporarily_unbound = FALSE;
1592
1593void
1594thread_vm_bind_group_add(void)
1595{
1596 thread_t self = current_thread();
1597
1598 thread_reference_internal(self);
1599 self->options |= TH_OPT_SCHED_VM_GROUP;
1600
1601 simple_lock(&sched_vm_group_list_lock);
1602 assert(sched_vm_group_thread_count < MAX_VM_BIND_GROUP_COUNT);
1603 sched_vm_group_thread_list[sched_vm_group_thread_count++] = self;
1604 simple_unlock(&sched_vm_group_list_lock);
1605
1606 thread_bind(master_processor);
1607
1608 /* Switch to bound processor if not already there */
1609 thread_block(THREAD_CONTINUE_NULL);
1610}
1611
1612static void
1613sched_vm_group_maintenance(void)
1614{
1615 uint64_t ctime = mach_absolute_time();
1616 uint64_t longtime = ctime - sched_tick_interval;
1617 int i;
1618 spl_t s;
1619 boolean_t high_latency_observed = FALSE;
1620 boolean_t runnable_and_not_on_runq_observed = FALSE;
1621 boolean_t bind_target_changed = FALSE;
1622 processor_t bind_target = PROCESSOR_NULL;
1623
1624 /* Make sure nobody attempts to add new threads while we are enumerating them */
1625 simple_lock(&sched_vm_group_list_lock);
1626
1627 s = splsched();
1628
1629 for (i=0; i < sched_vm_group_thread_count; i++) {
1630 thread_t thread = sched_vm_group_thread_list[i];
1631 assert(thread != THREAD_NULL);
1632 thread_lock(thread);
1633 if ((thread->state & (TH_RUN|TH_WAIT)) == TH_RUN) {
1634 if (thread->runq != PROCESSOR_NULL && thread->last_made_runnable_time < longtime) {
1635 high_latency_observed = TRUE;
1636 } else if (thread->runq == PROCESSOR_NULL) {
1637 /* There are some cases where a thread be transitiong that also fall into this case */
1638 runnable_and_not_on_runq_observed = TRUE;
1639 }
1640 }
1641 thread_unlock(thread);
1642
1643 if (high_latency_observed && runnable_and_not_on_runq_observed) {
1644 /* All the things we are looking for are true, stop looking */
1645 break;
1646 }
1647 }
1648
1649 splx(s);
1650
1651 if (sched_vm_group_temporarily_unbound) {
1652 /* If we turned off binding, make sure everything is OK before rebinding */
1653 if (!high_latency_observed) {
1654 /* rebind */
1655 bind_target_changed = TRUE;
1656 bind_target = master_processor;
1657 sched_vm_group_temporarily_unbound = FALSE; /* might be reset to TRUE if change cannot be completed */
1658 }
1659 } else {
1660 /*
1661 * Check if we're in a bad state, which is defined by high
1662 * latency with no core currently executing a thread. If a
1663 * single thread is making progress on a CPU, that means the
1664 * binding concept to reduce parallelism is working as
1665 * designed.
1666 */
1667 if (high_latency_observed && !runnable_and_not_on_runq_observed) {
1668 /* unbind */
1669 bind_target_changed = TRUE;
1670 bind_target = PROCESSOR_NULL;
1671 sched_vm_group_temporarily_unbound = TRUE;
1672 }
1673 }
1674
1675 if (bind_target_changed) {
1676 s = splsched();
1677 for (i=0; i < sched_vm_group_thread_count; i++) {
1678 thread_t thread = sched_vm_group_thread_list[i];
1679 boolean_t removed;
1680 assert(thread != THREAD_NULL);
1681
1682 thread_lock(thread);
1683 removed = thread_run_queue_remove(thread);
1684 if (removed || ((thread->state & (TH_RUN | TH_WAIT)) == TH_WAIT)) {
1685 thread_bind_internal(thread, bind_target);
1686 } else {
1687 /*
1688 * Thread was in the middle of being context-switched-to,
1689 * or was in the process of blocking. To avoid switching the bind
1690 * state out mid-flight, defer the change if possible.
1691 */
1692 if (bind_target == PROCESSOR_NULL) {
1693 thread_bind_internal(thread, bind_target);
1694 } else {
1695 sched_vm_group_temporarily_unbound = TRUE; /* next pass will try again */
1696 }
1697 }
1698
1699 if (removed) {
1700 thread_run_queue_reinsert(thread, SCHED_PREEMPT | SCHED_TAILQ);
1701 }
1702 thread_unlock(thread);
1703 }
1704 splx(s);
1705 }
1706
1707 simple_unlock(&sched_vm_group_list_lock);
1708}
1709
fe8ab488
A
1710/* Invoked prior to idle entry to determine if, on SMT capable processors, an SMT
1711 * rebalancing opportunity exists when a core is (instantaneously) idle, but
1712 * other SMT-capable cores may be over-committed. TODO: some possible negatives:
1713 * IPI thrash if this core does not remain idle following the load balancing ASTs
1714 * Idle "thrash", when IPI issue is followed by idle entry/core power down
1715 * followed by a wakeup shortly thereafter.
1716 */
1717
fe8ab488
A
1718#if (DEVELOPMENT || DEBUG)
1719int sched_smt_balance = 1;
1720#endif
1721
3e170ce0
A
1722#if __SMP__
1723/* Invoked with pset locked, returns with pset unlocked */
5ba3f43e 1724void
fe8ab488
A
1725sched_SMT_balance(processor_t cprocessor, processor_set_t cpset) {
1726 processor_t ast_processor = NULL;
1727
1728#if (DEVELOPMENT || DEBUG)
1729 if (__improbable(sched_smt_balance == 0))
1730 goto smt_balance_exit;
1731#endif
1732
1733 assert(cprocessor == current_processor());
1734 if (cprocessor->is_SMT == FALSE)
1735 goto smt_balance_exit;
1736
1737 processor_t sib_processor = cprocessor->processor_secondary ? cprocessor->processor_secondary : cprocessor->processor_primary;
1738
1739 /* Determine if both this processor and its sibling are idle,
1740 * indicating an SMT rebalancing opportunity.
1741 */
1742 if (sib_processor->state != PROCESSOR_IDLE)
1743 goto smt_balance_exit;
1744
1745 processor_t sprocessor;
1746
5ba3f43e 1747 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
39037602 1748 qe_foreach_element(sprocessor, &cpset->active_queue, processor_queue) {
fe8ab488
A
1749 if ((sprocessor->state == PROCESSOR_RUNNING) &&
1750 (sprocessor->processor_primary != sprocessor) &&
1751 (sprocessor->processor_primary->state == PROCESSOR_RUNNING) &&
5ba3f43e
A
1752 (sprocessor->current_pri < BASEPRI_RTQUEUES)) {
1753
1754 ipi_type = sched_ipi_action(sprocessor, NULL, false, SCHED_IPI_EVENT_SMT_REBAL);
1755 if (ipi_type != SCHED_IPI_NONE) {
1756 assert(sprocessor != cprocessor);
1757 ast_processor = sprocessor;
1758 break;
1759 }
fe8ab488 1760 }
fe8ab488
A
1761 }
1762
1763smt_balance_exit:
1764 pset_unlock(cpset);
1765
1766 if (ast_processor) {
1767 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_SMT_BALANCE), ast_processor->cpu_id, ast_processor->state, ast_processor->processor_primary->state, 0, 0);
5ba3f43e 1768 sched_ipi_perform(ast_processor, ipi_type);
fe8ab488
A
1769 }
1770}
5ba3f43e
A
1771#else
1772/* Invoked with pset locked, returns with pset unlocked */
1773void
1774sched_SMT_balance(__unused processor_t cprocessor, processor_set_t cpset)
1775{
1776 pset_unlock(cpset);
1777}
3e170ce0 1778#endif /* __SMP__ */
fe8ab488 1779
a39ff7e2
A
1780static processor_t choose_processor_for_realtime_thread(processor_set_t pset);
1781static bool all_available_primaries_are_running_realtime_threads(processor_set_t pset);
1782int sched_allow_rt_smt = 1;
1783
1c79356b 1784/*
2d21ac55
A
1785 * thread_select:
1786 *
1787 * Select a new thread for the current processor to execute.
55e303ae
A
1788 *
1789 * May select the current thread, which must be locked.
1c79356b 1790 */
2d21ac55 1791static thread_t
5ba3f43e
A
1792thread_select(thread_t thread,
1793 processor_t processor,
1794 ast_t *reason)
1c79356b 1795{
2d21ac55 1796 processor_set_t pset = processor->processor_set;
cf7d32b8 1797 thread_t new_thread = THREAD_NULL;
1c79356b 1798
6d2010ae 1799 assert(processor == current_processor());
3e170ce0 1800 assert((thread->state & (TH_RUN|TH_TERMINATE2)) == TH_RUN);
6d2010ae 1801
2d21ac55
A
1802 do {
1803 /*
1804 * Update the priority.
1805 */
6d2010ae
A
1806 if (SCHED(can_update_priority)(thread))
1807 SCHED(update_priority)(thread);
1808
5ba3f43e 1809 processor_state_update_from_thread(processor, thread);
1c79356b 1810
2d21ac55
A
1811 pset_lock(pset);
1812
fe8ab488 1813 assert(processor->state != PROCESSOR_OFF_LINE);
6d2010ae 1814
3e170ce0
A
1815 if (!processor->is_recommended) {
1816 /*
1817 * The performance controller has provided a hint to not dispatch more threads,
1818 * unless they are bound to us (and thus we are the only option
1819 */
1820 if (!SCHED(processor_bound_count)(processor)) {
1821 goto idle;
1822 }
1823 } else if (processor->processor_primary != processor) {
39236c6e
A
1824 /*
1825 * Should this secondary SMT processor attempt to find work? For pset runqueue systems,
1826 * we should look for work only under the same conditions that choose_processor()
1827 * would have assigned work, which is when all primary processors have been assigned work.
1828 *
1829 * An exception is that bound threads are dispatched to a processor without going through
1830 * choose_processor(), so in those cases we should continue trying to dequeue work.
1831 */
a39ff7e2
A
1832 if (!SCHED(processor_bound_count)(processor)) {
1833 if (!queue_empty(&pset->idle_queue)) {
1834 goto idle;
1835 }
1836
1837 /* There are no idle primaries */
1838
1839 if (processor->processor_primary->current_pri >= BASEPRI_RTQUEUES) {
1840 bool secondary_can_run_realtime_thread = sched_allow_rt_smt && rt_runq_count(pset) && all_available_primaries_are_running_realtime_threads(pset);
1841 if (!secondary_can_run_realtime_thread) {
1842 goto idle;
1843 }
1844 }
39236c6e
A
1845 }
1846 }
1847
2d21ac55
A
1848 /*
1849 * Test to see if the current thread should continue
3e170ce0 1850 * to run on this processor. Must not be attempting to wait, and not
2d21ac55 1851 * bound to a different processor, nor be in the wrong
3e170ce0
A
1852 * processor set, nor be forced to context switch by TH_SUSP.
1853 *
1854 * Note that there are never any RT threads in the regular runqueue.
1855 *
1856 * This code is very insanely tricky.
2d21ac55 1857 */
3e170ce0 1858
5ba3f43e
A
1859 /* i.e. not waiting, not TH_SUSP'ed */
1860 boolean_t still_running = ((thread->state & (TH_TERMINATE|TH_IDLE|TH_WAIT|TH_RUN|TH_SUSP)) == TH_RUN);
1861
1862 /*
1863 * Threads running on SMT processors are forced to context switch. Don't rebalance realtime threads.
1864 * TODO: This should check if it's worth it to rebalance, i.e. 'are there any idle primary processors'
1865 */
1866 boolean_t needs_smt_rebalance = (thread->sched_pri < BASEPRI_RTQUEUES && processor->processor_primary != processor);
1867
1868 boolean_t affinity_mismatch = (thread->affinity_set != AFFINITY_SET_NULL && thread->affinity_set->aset_pset != pset);
1869
1870 boolean_t bound_elsewhere = (thread->bound_processor != PROCESSOR_NULL && thread->bound_processor != processor);
1871
1872 boolean_t avoid_processor = (SCHED(avoid_processor_enabled) && SCHED(thread_avoid_processor)(processor, thread));
1873
1874 if (still_running && !needs_smt_rebalance && !affinity_mismatch && !bound_elsewhere && !avoid_processor) {
3e170ce0 1875 /*
5ba3f43e
A
1876 * This thread is eligible to keep running on this processor.
1877 *
3e170ce0
A
1878 * RT threads with un-expired quantum stay on processor,
1879 * unless there's a valid RT thread with an earlier deadline.
1880 */
1881 if (thread->sched_pri >= BASEPRI_RTQUEUES && processor->first_timeslice) {
5ba3f43e 1882 if (rt_runq_count(pset) > 0) {
3e170ce0 1883
5ba3f43e
A
1884 rt_lock_lock(pset);
1885
1886 if (rt_runq_count(pset) > 0) {
1887
1888 thread_t next_rt = qe_queue_first(&SCHED(rt_runq)(pset)->queue, struct thread, runq_links);
1889
1890 if (next_rt->realtime.deadline < processor->deadline &&
1891 (next_rt->bound_processor == PROCESSOR_NULL ||
1892 next_rt->bound_processor == processor)) {
1893 /* The next RT thread is better, so pick it off the runqueue. */
1894 goto pick_new_rt_thread;
1895 }
55e303ae 1896 }
5ba3f43e
A
1897
1898 rt_lock_unlock(pset);
55e303ae 1899 }
2d21ac55 1900
3e170ce0 1901 /* This is still the best RT thread to run. */
2d21ac55
A
1902 processor->deadline = thread->realtime.deadline;
1903
5ba3f43e 1904 sched_update_pset_load_average(pset);
a39ff7e2
A
1905
1906 processor_t next_rt_processor = PROCESSOR_NULL;
1907 sched_ipi_type_t next_rt_ipi_type = SCHED_IPI_NONE;
1908
1909 if (rt_runq_count(pset) > 0) {
1910 next_rt_processor = choose_processor_for_realtime_thread(pset);
1911 if (next_rt_processor) {
1912 next_rt_ipi_type = sched_ipi_action(next_rt_processor, NULL, false, SCHED_IPI_EVENT_PREEMPT);
1913 }
1914 }
2d21ac55
A
1915 pset_unlock(pset);
1916
a39ff7e2
A
1917 if (next_rt_processor) {
1918 sched_ipi_perform(next_rt_processor, next_rt_ipi_type);
1919 }
1920
2d21ac55 1921 return (thread);
55e303ae
A
1922 }
1923
5ba3f43e 1924 if ((rt_runq_count(pset) == 0) &&
3e170ce0 1925 SCHED(processor_queue_has_priority)(processor, thread->sched_pri, TRUE) == FALSE) {
fe8ab488 1926 /* This thread is still the highest priority runnable (non-idle) thread */
2d21ac55 1927 processor->deadline = UINT64_MAX;
55e303ae 1928
5ba3f43e 1929 sched_update_pset_load_average(pset);
2d21ac55 1930 pset_unlock(pset);
55e303ae 1931
2d21ac55
A
1932 return (thread);
1933 }
5ba3f43e
A
1934 } else {
1935 /*
1936 * This processor must context switch.
1937 * If it's due to a rebalance, we should aggressively find this thread a new home.
1938 */
1939 if (needs_smt_rebalance || affinity_mismatch || bound_elsewhere || avoid_processor)
1940 *reason |= AST_REBALANCE;
2d21ac55
A
1941 }
1942
3e170ce0 1943 /* OK, so we're not going to run the current thread. Look at the RT queue. */
5ba3f43e 1944 if (rt_runq_count(pset) > 0) {
c910b4d9 1945
5ba3f43e 1946 rt_lock_lock(pset);
a39ff7e2 1947
5ba3f43e 1948 if (rt_runq_count(pset) > 0) {
a39ff7e2 1949 thread_t next_rt = qe_queue_first(&SCHED(rt_runq)(pset)->queue, struct thread, runq_links);
fe8ab488 1950
a39ff7e2
A
1951 if (__probable((next_rt->bound_processor == PROCESSOR_NULL ||
1952 (next_rt->bound_processor == processor)))) {
3e170ce0 1953pick_new_rt_thread:
a39ff7e2 1954 new_thread = qe_dequeue_head(&SCHED(rt_runq)(pset)->queue, struct thread, runq_links);
6d2010ae 1955
a39ff7e2
A
1956 new_thread->runq = PROCESSOR_NULL;
1957 SCHED_STATS_RUNQ_CHANGE(&SCHED(rt_runq)(pset)->runq_stats, rt_runq_count(pset));
1958 rt_runq_count_decr(pset);
6d2010ae 1959
a39ff7e2
A
1960 processor->deadline = new_thread->realtime.deadline;
1961 processor_state_update_from_thread(processor, new_thread);
c910b4d9 1962
a39ff7e2
A
1963 rt_lock_unlock(pset);
1964 sched_update_pset_load_average(pset);
1965
1966 processor_t ast_processor = PROCESSOR_NULL;
1967 processor_t next_rt_processor = PROCESSOR_NULL;
1968 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
1969 sched_ipi_type_t next_rt_ipi_type = SCHED_IPI_NONE;
1970
1971 if (processor->processor_secondary != NULL) {
1972 processor_t sprocessor = processor->processor_secondary;
1973 if ((sprocessor->state == PROCESSOR_RUNNING) || (sprocessor->state == PROCESSOR_DISPATCHING)) {
1974 ipi_type = sched_ipi_action(sprocessor, NULL, false, SCHED_IPI_EVENT_SMT_REBAL);
1975 ast_processor = sprocessor;
1976 }
1977 }
1978 if (rt_runq_count(pset) > 0) {
1979 next_rt_processor = choose_processor_for_realtime_thread(pset);
1980 if (next_rt_processor) {
1981 next_rt_ipi_type = sched_ipi_action(next_rt_processor, NULL, false, SCHED_IPI_EVENT_PREEMPT);
1982 }
1983 }
1984 pset_unlock(pset);
1985
1986 if (ast_processor) {
1987 sched_ipi_perform(ast_processor, ipi_type);
1988 }
c910b4d9 1989
a39ff7e2
A
1990 if (next_rt_processor) {
1991 sched_ipi_perform(next_rt_processor, next_rt_ipi_type);
1992 }
1993
1994 return (new_thread);
1995 }
39236c6e 1996 }
5ba3f43e
A
1997
1998 rt_lock_unlock(pset);
c910b4d9 1999 }
2d21ac55 2000
3e170ce0 2001 processor->deadline = UINT64_MAX;
6d2010ae 2002
3e170ce0 2003 /* No RT threads, so let's look at the regular threads. */
5ba3f43e
A
2004 if ((new_thread = SCHED(choose_thread)(processor, MINPRI, *reason)) != THREAD_NULL) {
2005 sched_update_pset_load_average(pset);
a39ff7e2 2006 processor_state_update_from_thread(processor, new_thread);
6d2010ae 2007 pset_unlock(pset);
6d2010ae
A
2008 return (new_thread);
2009 }
c910b4d9 2010
3e170ce0
A
2011#if __SMP__
2012 if (SCHED(steal_thread_enabled)) {
2013 /*
2014 * No runnable threads, attempt to steal
2015 * from other processors. Returns with pset lock dropped.
2016 */
2d21ac55 2017
3e170ce0
A
2018 if ((new_thread = SCHED(steal_thread)(pset)) != THREAD_NULL) {
2019 return (new_thread);
2020 }
cf7d32b8 2021
3e170ce0
A
2022 /*
2023 * If other threads have appeared, shortcut
2024 * around again.
2025 */
5ba3f43e 2026 if (!SCHED(processor_queue_empty)(processor) || rt_runq_count(pset) > 0)
3e170ce0
A
2027 continue;
2028
2029 pset_lock(pset);
2030 }
2031#endif
55e303ae 2032
39236c6e 2033 idle:
1c79356b
A
2034 /*
2035 * Nothing is runnable, so set this processor idle if it
2d21ac55 2036 * was running.
1c79356b 2037 */
55e303ae 2038 if (processor->state == PROCESSOR_RUNNING) {
55e303ae 2039 processor->state = PROCESSOR_IDLE;
1c79356b 2040
5ba3f43e
A
2041 if (!processor->is_recommended) {
2042 re_queue_head(&pset->unused_queue, &processor->processor_queue);
2043 } else if (processor->processor_primary == processor) {
39037602
A
2044 re_queue_head(&pset->idle_queue, &processor->processor_queue);
2045 } else {
2046 re_queue_head(&pset->idle_secondary_queue, &processor->processor_queue);
b0d623f7 2047 }
5ba3f43e
A
2048
2049 pset->active_processor_count--;
2050 sched_update_pset_load_average(pset);
1c79356b 2051 }
1c79356b 2052
3e170ce0 2053#if __SMP__
fe8ab488 2054 /* Invoked with pset locked, returns with pset unlocked */
5ba3f43e 2055 SCHED(processor_balance)(processor, pset);
3e170ce0
A
2056#else
2057 pset_unlock(pset);
2058#endif
2d21ac55 2059
6d2010ae 2060#if CONFIG_SCHED_IDLE_IN_PLACE
2d21ac55
A
2061 /*
2062 * Choose idle thread if fast idle is not possible.
2063 */
fe8ab488
A
2064 if (processor->processor_primary != processor)
2065 return (processor->idle_thread);
2066
6d2010ae 2067 if ((thread->state & (TH_IDLE|TH_TERMINATE|TH_SUSP)) || !(thread->state & TH_WAIT) || thread->wake_active || thread->sched_pri >= BASEPRI_RTQUEUES)
2d21ac55
A
2068 return (processor->idle_thread);
2069
2070 /*
2071 * Perform idling activities directly without a
2072 * context switch. Return dispatched thread,
2073 * else check again for a runnable thread.
2074 */
2075 new_thread = thread_select_idle(thread, processor);
2076
6d2010ae
A
2077#else /* !CONFIG_SCHED_IDLE_IN_PLACE */
2078
2079 /*
2080 * Do a full context switch to idle so that the current
2081 * thread can start running on another processor without
2082 * waiting for the fast-idled processor to wake up.
2083 */
3e170ce0 2084 new_thread = processor->idle_thread;
6d2010ae
A
2085
2086#endif /* !CONFIG_SCHED_IDLE_IN_PLACE */
2087
2d21ac55
A
2088 } while (new_thread == THREAD_NULL);
2089
2090 return (new_thread);
2091}
2092
6d2010ae 2093#if CONFIG_SCHED_IDLE_IN_PLACE
2d21ac55
A
2094/*
2095 * thread_select_idle:
2096 *
2097 * Idle the processor using the current thread context.
2098 *
2099 * Called with thread locked, then dropped and relocked.
2100 */
2101static thread_t
2102thread_select_idle(
2103 thread_t thread,
2104 processor_t processor)
2105{
2106 thread_t new_thread;
39236c6e
A
2107 uint64_t arg1, arg2;
2108 int urgency;
2109
fe8ab488 2110 sched_run_decr(thread);
2d21ac55
A
2111
2112 thread->state |= TH_IDLE;
5ba3f43e 2113 processor_state_update_idle(procssor);
2d21ac55 2114
316670eb
A
2115 /* Reload precise timing global policy to thread-local policy */
2116 thread->precise_user_kernel_time = use_precise_user_kernel_time(thread);
2117
2d21ac55
A
2118 thread_unlock(thread);
2119
2120 /*
2121 * Switch execution timing to processor idle thread.
2122 */
2123 processor->last_dispatch = mach_absolute_time();
fe8ab488
A
2124
2125#ifdef CONFIG_MACH_APPROXIMATE_TIME
2126 commpage_update_mach_approximate_time(processor->last_dispatch);
2127#endif
2128
6d2010ae 2129 thread->last_run_time = processor->last_dispatch;
2d21ac55
A
2130 thread_timer_event(processor->last_dispatch, &processor->idle_thread->system_timer);
2131 PROCESSOR_DATA(processor, kernel_timer) = &processor->idle_thread->system_timer;
2132
5ba3f43e 2133
2d21ac55
A
2134 /*
2135 * Cancel the quantum timer while idling.
2136 */
5ba3f43e 2137 timer_call_quantum_timer_cancel(&processor->quantum_timer);
3e170ce0 2138 processor->first_timeslice = FALSE;
2d21ac55
A
2139
2140 (*thread->sched_call)(SCHED_CALL_BLOCK, thread);
2141
3e170ce0 2142 thread_tell_urgency(THREAD_URGENCY_NONE, 0, 0, 0, NULL);
6d2010ae 2143
2d21ac55
A
2144 /*
2145 * Enable interrupts and perform idling activities. No
2146 * preemption due to TH_IDLE being set.
2147 */
2148 spllo(); new_thread = processor_idle(thread, processor);
2149
cf7d32b8
A
2150 /*
2151 * Return at splsched.
2152 */
2d21ac55
A
2153 (*thread->sched_call)(SCHED_CALL_UNBLOCK, thread);
2154
2155 thread_lock(thread);
2156
2157 /*
2158 * If awakened, switch to thread timer and start a new quantum.
2159 * Otherwise skip; we will context switch to another thread or return here.
2160 */
2161 if (!(thread->state & TH_WAIT)) {
2162 processor->last_dispatch = mach_absolute_time();
2163 thread_timer_event(processor->last_dispatch, &thread->system_timer);
2164 PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer;
2d21ac55 2165 thread_quantum_init(thread);
fe8ab488 2166 processor->quantum_end = processor->last_dispatch + thread->quantum_remaining;
5ba3f43e
A
2167 timer_call_quantum_timer_enter(&processor->quantum_timer,
2168 thread, processor->quantum_end, processor->last_dispatch);
3e170ce0 2169 processor->first_timeslice = TRUE;
2d21ac55
A
2170
2171 thread->computation_epoch = processor->last_dispatch;
1c79356b
A
2172 }
2173
2d21ac55 2174 thread->state &= ~TH_IDLE;
55e303ae 2175
39236c6e
A
2176 urgency = thread_get_urgency(thread, &arg1, &arg2);
2177
3e170ce0 2178 thread_tell_urgency(urgency, arg1, arg2, 0, new_thread);
39236c6e 2179
fe8ab488 2180 sched_run_incr(thread);
39236c6e 2181
2d21ac55 2182 return (new_thread);
1c79356b 2183}
6d2010ae
A
2184#endif /* CONFIG_SCHED_IDLE_IN_PLACE */
2185
b0d623f7 2186/*
3e170ce0 2187 * thread_invoke
b0d623f7 2188 *
3e170ce0 2189 * Called at splsched with neither thread locked.
b0d623f7 2190 *
3e170ce0 2191 * Perform a context switch and start executing the new thread.
55e303ae 2192 *
3e170ce0
A
2193 * Returns FALSE when the context switch didn't happen.
2194 * The reference to the new thread is still consumed.
39236c6e
A
2195 *
2196 * "self" is what is currently running on the processor,
2197 * "thread" is the new thread to context switch to
2198 * (which may be the same thread in some cases)
2199 */
2d21ac55 2200static boolean_t
1c79356b 2201thread_invoke(
39236c6e
A
2202 thread_t self,
2203 thread_t thread,
91447636 2204 ast_t reason)
1c79356b 2205{
39236c6e 2206 if (__improbable(get_preemption_level() != 0)) {
b0d623f7
A
2207 int pl = get_preemption_level();
2208 panic("thread_invoke: preemption_level %d, possible cause: %s",
2209 pl, (pl < 0 ? "unlocking an unlocked mutex or spinlock" :
2210 "blocking while holding a spinlock, or within interrupt context"));
2211 }
0b4e3aa0 2212
3e170ce0
A
2213 thread_continue_t continuation = self->continuation;
2214 void *parameter = self->parameter;
2215 processor_t processor;
2216
2217 uint64_t ctime = mach_absolute_time();
2218
2219#ifdef CONFIG_MACH_APPROXIMATE_TIME
2220 commpage_update_mach_approximate_time(ctime);
2221#endif
2222
2223#if defined(CONFIG_SCHED_TIMESHARE_CORE)
5ba3f43e
A
2224 if ((thread->state & TH_IDLE) == 0)
2225 sched_timeshare_consider_maintenance(ctime);
3e170ce0
A
2226#endif
2227
5ba3f43e
A
2228#if MONOTONIC
2229 mt_sched_update(self);
2230#endif /* MONOTONIC */
2231
39037602 2232 assert_thread_magic(self);
2d21ac55 2233 assert(self == current_thread());
fe8ab488 2234 assert(self->runq == PROCESSOR_NULL);
3e170ce0 2235 assert((self->state & (TH_RUN|TH_TERMINATE2)) == TH_RUN);
91447636 2236
2d21ac55 2237 thread_lock(thread);
1c79356b 2238
39037602 2239 assert_thread_magic(thread);
3e170ce0 2240 assert((thread->state & (TH_RUN|TH_WAIT|TH_UNINT|TH_TERMINATE|TH_TERMINATE2)) == TH_RUN);
fe8ab488
A
2241 assert(thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == current_processor());
2242 assert(thread->runq == PROCESSOR_NULL);
1c79356b 2243
316670eb
A
2244 /* Reload precise timing global policy to thread-local policy */
2245 thread->precise_user_kernel_time = use_precise_user_kernel_time(thread);
3e170ce0 2246
fe8ab488
A
2247 /* Update SFI class based on other factors */
2248 thread->sfi_class = sfi_thread_classify(thread);
5ba3f43e
A
2249
2250 /* Update the same_pri_latency for the thread (used by perfcontrol callouts) */
2251 thread->same_pri_latency = ctime - thread->last_basepri_change_time;
2252 /*
2253 * In case a base_pri update happened between the timestamp and
2254 * taking the thread lock
2255 */
2256 if (ctime <= thread->last_basepri_change_time)
2257 thread->same_pri_latency = ctime - thread->last_made_runnable_time;
fe8ab488 2258
3e170ce0 2259 /* Allow realtime threads to hang onto a stack. */
6d2010ae 2260 if ((self->sched_mode == TH_MODE_REALTIME) && !self->reserved_stack)
2d21ac55 2261 self->reserved_stack = self->kernel_stack;
1c79356b 2262
5ba3f43e
A
2263 /* Prepare for spin debugging */
2264#if INTERRUPT_MASKED_DEBUG
2265 ml_spin_debug_clear(thread);
2266#endif
2267
91447636 2268 if (continuation != NULL) {
2d21ac55 2269 if (!thread->kernel_stack) {
9bccf70c 2270 /*
2d21ac55 2271 * If we are using a privileged stack,
9bccf70c 2272 * check to see whether we can exchange it with
2d21ac55 2273 * that of the other thread.
9bccf70c 2274 */
2d21ac55 2275 if (self->kernel_stack == self->reserved_stack && !thread->reserved_stack)
9bccf70c 2276 goto need_stack;
1c79356b 2277
91447636
A
2278 /*
2279 * Context switch by performing a stack handoff.
2280 */
2d21ac55
A
2281 continuation = thread->continuation;
2282 parameter = thread->parameter;
1c79356b 2283
9bccf70c 2284 processor = current_processor();
2d21ac55 2285 processor->active_thread = thread;
5ba3f43e
A
2286 processor_state_update_from_thread(processor, thread);
2287
2d21ac55
A
2288 if (thread->last_processor != processor && thread->last_processor != NULL) {
2289 if (thread->last_processor->processor_set != processor->processor_set)
2290 thread->ps_switch++;
2291 thread->p_switch++;
2292 }
2293 thread->last_processor = processor;
2294 thread->c_switch++;
2295 ast_context(thread);
3e170ce0 2296
2d21ac55 2297 thread_unlock(thread);
1c79356b 2298
2d21ac55 2299 self->reason = reason;
91447636 2300
39236c6e
A
2301 processor->last_dispatch = ctime;
2302 self->last_run_time = ctime;
2303 thread_timer_event(ctime, &thread->system_timer);
2d21ac55 2304 PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer;
316670eb
A
2305
2306 /*
2307 * Since non-precise user/kernel time doesn't update the state timer
2308 * during privilege transitions, synthesize an event now.
2309 */
2310 if (!thread->precise_user_kernel_time) {
2311 timer_switch(PROCESSOR_DATA(processor, current_state),
39236c6e 2312 ctime,
316670eb
A
2313 PROCESSOR_DATA(processor, current_state));
2314 }
2d21ac55 2315
316670eb
A
2316 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2317 MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_HANDOFF)|DBG_FUNC_NONE,
2318 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
1c79356b 2319
39236c6e 2320 if ((thread->chosen_processor != processor) && (thread->chosen_processor != PROCESSOR_NULL)) {
3e170ce0 2321 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_MOVED)|DBG_FUNC_NONE,
6d2010ae
A
2322 (uintptr_t)thread_tid(thread), (uintptr_t)thread->chosen_processor->cpu_id, 0, 0, 0);
2323 }
2324
b0d623f7
A
2325 DTRACE_SCHED2(off__cpu, struct thread *, thread, struct proc *, thread->task->bsd_info);
2326
6d2010ae
A
2327 SCHED_STATS_CSW(processor, self->reason, self->sched_pri, thread->sched_pri);
2328
2329 TLOG(1, "thread_invoke: calling stack_handoff\n");
2330 stack_handoff(self, thread);
9bccf70c 2331
3e170ce0
A
2332 /* 'self' is now off core */
2333 assert(thread == current_thread());
2334
b0d623f7
A
2335 DTRACE_SCHED(on__cpu);
2336
39037602
A
2337#if KPERF
2338 kperf_on_cpu(thread, continuation, NULL);
2339#endif /* KPERF */
2340
5ba3f43e
A
2341#if KASAN
2342 kasan_unpoison_fakestack(self);
2343 kasan_unpoison_stack(thread->kernel_stack, kernel_stack_size);
2344#endif
2345
2d21ac55 2346 thread_dispatch(self, thread);
1c79356b 2347
2d21ac55 2348 thread->continuation = thread->parameter = NULL;
1c79356b 2349
2d21ac55 2350 counter(c_thread_invoke_hits++);
1c79356b 2351
9bccf70c 2352 (void) spllo();
1c79356b 2353
2d21ac55
A
2354 assert(continuation);
2355 call_continuation(continuation, parameter, thread->wait_result);
9bccf70c 2356 /*NOTREACHED*/
9bccf70c 2357 }
2d21ac55 2358 else if (thread == self) {
9bccf70c 2359 /* same thread but with continuation */
2d21ac55 2360 ast_context(self);
9bccf70c 2361 counter(++c_thread_invoke_same);
3e170ce0 2362
2d21ac55 2363 thread_unlock(self);
9bccf70c 2364
39037602
A
2365#if KPERF
2366 kperf_on_cpu(thread, continuation, NULL);
2367#endif /* KPERF */
2368
316670eb
A
2369 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2370 MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE,
2371 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
6d2010ae 2372
5ba3f43e
A
2373#if KASAN
2374 kasan_unpoison_fakestack(self);
2375 kasan_unpoison_stack(self->kernel_stack, kernel_stack_size);
2376#endif
2377
2d21ac55
A
2378 self->continuation = self->parameter = NULL;
2379
9bccf70c 2380 (void) spllo();
55e303ae 2381
2d21ac55 2382 call_continuation(continuation, parameter, self->wait_result);
9bccf70c
A
2383 /*NOTREACHED*/
2384 }
3e170ce0 2385 } else {
9bccf70c 2386 /*
2d21ac55 2387 * Check that the other thread has a stack
9bccf70c 2388 */
2d21ac55 2389 if (!thread->kernel_stack) {
9bccf70c 2390need_stack:
2d21ac55
A
2391 if (!stack_alloc_try(thread)) {
2392 counter(c_thread_invoke_misses++);
2393 thread_unlock(thread);
2394 thread_stack_enqueue(thread);
9bccf70c
A
2395 return (FALSE);
2396 }
3e170ce0 2397 } else if (thread == self) {
2d21ac55 2398 ast_context(self);
9bccf70c 2399 counter(++c_thread_invoke_same);
2d21ac55 2400 thread_unlock(self);
6d2010ae 2401
316670eb
A
2402 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2403 MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE,
2404 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
6d2010ae 2405
9bccf70c
A
2406 return (TRUE);
2407 }
2408 }
1c79356b
A
2409
2410 /*
91447636 2411 * Context switch by full context save.
1c79356b 2412 */
9bccf70c 2413 processor = current_processor();
2d21ac55 2414 processor->active_thread = thread;
5ba3f43e
A
2415 processor_state_update_from_thread(processor, thread);
2416
2d21ac55
A
2417 if (thread->last_processor != processor && thread->last_processor != NULL) {
2418 if (thread->last_processor->processor_set != processor->processor_set)
2419 thread->ps_switch++;
2420 thread->p_switch++;
2421 }
2422 thread->last_processor = processor;
2423 thread->c_switch++;
2424 ast_context(thread);
3e170ce0 2425
2d21ac55 2426 thread_unlock(thread);
1c79356b 2427
2d21ac55 2428 counter(c_thread_invoke_csw++);
1c79356b 2429
2d21ac55 2430 self->reason = reason;
1c79356b 2431
39236c6e
A
2432 processor->last_dispatch = ctime;
2433 self->last_run_time = ctime;
2434 thread_timer_event(ctime, &thread->system_timer);
2d21ac55 2435 PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer;
91447636 2436
316670eb
A
2437 /*
2438 * Since non-precise user/kernel time doesn't update the state timer
2439 * during privilege transitions, synthesize an event now.
2440 */
2441 if (!thread->precise_user_kernel_time) {
2442 timer_switch(PROCESSOR_DATA(processor, current_state),
39236c6e 2443 ctime,
316670eb
A
2444 PROCESSOR_DATA(processor, current_state));
2445 }
3e170ce0 2446
316670eb
A
2447 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2448 MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE,
2449 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
b0d623f7 2450
6d2010ae 2451 if ((thread->chosen_processor != processor) && (thread->chosen_processor != NULL)) {
3e170ce0 2452 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_MOVED)|DBG_FUNC_NONE,
6d2010ae
A
2453 (uintptr_t)thread_tid(thread), (uintptr_t)thread->chosen_processor->cpu_id, 0, 0, 0);
2454 }
2455
b0d623f7 2456 DTRACE_SCHED2(off__cpu, struct thread *, thread, struct proc *, thread->task->bsd_info);
1c79356b 2457
6d2010ae
A
2458 SCHED_STATS_CSW(processor, self->reason, self->sched_pri, thread->sched_pri);
2459
1c79356b 2460 /*
91447636 2461 * This is where we actually switch register context,
2d21ac55
A
2462 * and address space if required. We will next run
2463 * as a result of a subsequent context switch.
3e170ce0
A
2464 *
2465 * Once registers are switched and the processor is running "thread",
2466 * the stack variables and non-volatile registers will contain whatever
2467 * was there the last time that thread blocked. No local variables should
2468 * be used after this point, except for the special case of "thread", which
2469 * the platform layer returns as the previous thread running on the processor
2470 * via the function call ABI as a return register, and "self", which may have
2471 * been stored on the stack or a non-volatile register, but a stale idea of
2472 * what was on the CPU is newly-accurate because that thread is again
2473 * running on the CPU.
91447636 2474 */
316670eb 2475 assert(continuation == self->continuation);
2d21ac55 2476 thread = machine_switch_context(self, continuation, thread);
316670eb 2477 assert(self == current_thread());
b0d623f7
A
2478 TLOG(1,"thread_invoke: returning machine_switch_context: self %p continuation %p thread %p\n", self, continuation, thread);
2479
2480 DTRACE_SCHED(on__cpu);
1c79356b 2481
39037602
A
2482#if KPERF
2483 kperf_on_cpu(self, NULL, __builtin_frame_address(0));
2484#endif /* KPERF */
2485
1c79356b 2486 /*
2d21ac55 2487 * We have been resumed and are set to run.
1c79356b 2488 */
2d21ac55 2489 thread_dispatch(thread, self);
9bccf70c 2490
91447636 2491 if (continuation) {
2d21ac55
A
2492 self->continuation = self->parameter = NULL;
2493
9bccf70c 2494 (void) spllo();
55e303ae 2495
2d21ac55 2496 call_continuation(continuation, parameter, self->wait_result);
9bccf70c 2497 /*NOTREACHED*/
1c79356b
A
2498 }
2499
9bccf70c 2500 return (TRUE);
1c79356b
A
2501}
2502
3e170ce0
A
2503#if defined(CONFIG_SCHED_DEFERRED_AST)
2504/*
2505 * pset_cancel_deferred_dispatch:
2506 *
2507 * Cancels all ASTs that we can cancel for the given processor set
2508 * if the current processor is running the last runnable thread in the
2509 * system.
2510 *
2511 * This function assumes the current thread is runnable. This must
2512 * be called with the pset unlocked.
2513 */
2514static void
2515pset_cancel_deferred_dispatch(
2516 processor_set_t pset,
2517 processor_t processor)
2518{
2519 processor_t active_processor = NULL;
2520 uint32_t sampled_sched_run_count;
2521
2522 pset_lock(pset);
39037602 2523 sampled_sched_run_count = (volatile uint32_t) sched_run_buckets[TH_BUCKET_RUN];
3e170ce0
A
2524
2525 /*
2526 * If we have emptied the run queue, and our current thread is runnable, we
2527 * should tell any processors that are still DISPATCHING that they will
2528 * probably not have any work to do. In the event that there are no
2529 * pending signals that we can cancel, this is also uninteresting.
2530 *
2531 * In the unlikely event that another thread becomes runnable while we are
2532 * doing this (sched_run_count is atomically updated, not guarded), the
2533 * codepath making it runnable SHOULD (a dangerous word) need the pset lock
2534 * in order to dispatch it to a processor in our pset. So, the other
2535 * codepath will wait while we squash all cancelable ASTs, get the pset
2536 * lock, and then dispatch the freshly runnable thread. So this should be
2537 * correct (we won't accidentally have a runnable thread that hasn't been
2538 * dispatched to an idle processor), if not ideal (we may be restarting the
2539 * dispatch process, which could have some overhead).
2540 *
2541 */
2542 if ((sampled_sched_run_count == 1) &&
2543 (pset->pending_deferred_AST_cpu_mask)) {
2544 qe_foreach_element_safe(active_processor, &pset->active_queue, processor_queue) {
2545 /*
2546 * If a processor is DISPATCHING, it could be because of
2547 * a cancelable signal.
2548 *
2549 * IF the processor is not our
2550 * current processor (the current processor should not
2551 * be DISPATCHING, so this is a bit paranoid), AND there
2552 * is a cancelable signal pending on the processor, AND
2553 * there is no non-cancelable signal pending (as there is
2554 * no point trying to backtrack on bringing the processor
2555 * up if a signal we cannot cancel is outstanding), THEN
2556 * it should make sense to roll back the processor state
2557 * to the IDLE state.
2558 *
2559 * If the racey nature of this approach (as the signal
2560 * will be arbitrated by hardware, and can fire as we
2561 * roll back state) results in the core responding
2562 * despite being pushed back to the IDLE state, it
2563 * should be no different than if the core took some
2564 * interrupt while IDLE.
2565 */
2566 if ((active_processor->state == PROCESSOR_DISPATCHING) &&
5ba3f43e
A
2567 (bit_test(pset->pending_deferred_AST_cpu_mask, active_processor->cpu_id)) &&
2568 (!bit_test(pset->pending_AST_cpu_mask, active_processor->cpu_id)) &&
3e170ce0
A
2569 (active_processor != processor)) {
2570 /*
2571 * Squash all of the processor state back to some
2572 * reasonable facsimile of PROCESSOR_IDLE.
2573 *
2574 * TODO: What queue policy do we actually want here?
2575 * We want to promote selection of a good processor
2576 * to run on. Do we want to enqueue at the head?
2577 * The tail? At the (relative) old position in the
2578 * queue? Or something else entirely?
2579 */
5ba3f43e
A
2580 if (!active_processor->is_recommended) {
2581 re_queue_head(&pset->unused_queue, &active_processor->processor_queue);
2582 } else if (active_processor->processor_primary == active_processor) {
2583 re_queue_head(&pset->idle_queue, &active_processor->processor_queue);
2584 } else {
2585 re_queue_head(&pset->idle_secondary_queue, &active_processor->processor_queue);
2586 }
3e170ce0 2587
5ba3f43e
A
2588 pset->active_processor_count--;
2589 sched_update_pset_load_average(pset);
3e170ce0 2590
5ba3f43e
A
2591 assert(active_processor->next_thread == THREAD_NULL);
2592 processor_state_update_idle(active_processor);
3e170ce0
A
2593 active_processor->deadline = UINT64_MAX;
2594 active_processor->state = PROCESSOR_IDLE;
5ba3f43e 2595 bit_clear(pset->pending_deferred_AST_cpu_mask, active_processor->cpu_id);
3e170ce0
A
2596 machine_signal_idle_cancel(active_processor);
2597 }
2598
2599 }
2600 }
2601
2602 pset_unlock(pset);
2603}
2604#else
2605/* We don't support deferred ASTs; everything is candycanes and sunshine. */
2606#endif
2607
5ba3f43e
A
2608static void
2609thread_csw_callout(
2610 thread_t old,
2611 thread_t new,
2612 uint64_t timestamp)
2613{
2614 perfcontrol_event event = (new->state & TH_IDLE) ? IDLE : CONTEXT_SWITCH;
2615 uint64_t same_pri_latency = (new->state & TH_IDLE) ? 0 : new->same_pri_latency;
2616 machine_switch_perfcontrol_context(event, timestamp, 0,
2617 same_pri_latency, old, new);
2618}
2619
2620
1c79356b 2621/*
2d21ac55 2622 * thread_dispatch:
1c79356b 2623 *
2d21ac55
A
2624 * Handle threads at context switch. Re-dispatch other thread
2625 * if still running, otherwise update run state and perform
2626 * special actions. Update quantum for other thread and begin
2627 * the quantum for ourselves.
91447636 2628 *
3e170ce0
A
2629 * "thread" is the old thread that we have switched away from.
2630 * "self" is the new current thread that we have context switched to
39236c6e 2631 *
91447636 2632 * Called at splsched.
1c79356b
A
2633 */
2634void
2d21ac55
A
2635thread_dispatch(
2636 thread_t thread,
2637 thread_t self)
1c79356b 2638{
2d21ac55
A
2639 processor_t processor = self->last_processor;
2640
3e170ce0
A
2641 assert(processor == current_processor());
2642 assert(self == current_thread());
2643 assert(thread != self);
2644
2d21ac55 2645 if (thread != THREAD_NULL) {
5ba3f43e
A
2646 /*
2647 * Do the perfcontrol callout for context switch.
2648 * The reason we do this here is:
2649 * - thread_dispatch() is called from various places that are not
2650 * the direct context switch path for eg. processor shutdown etc.
2651 * So adding the callout here covers all those cases.
2652 * - We want this callout as early as possible to be close
2653 * to the timestamp taken in thread_invoke()
2654 * - We want to avoid holding the thread lock while doing the
2655 * callout
2656 * - We do not want to callout if "thread" is NULL.
2657 */
2658 thread_csw_callout(thread, self, processor->last_dispatch);
2659
91447636 2660 /*
2d21ac55
A
2661 * If blocked at a continuation, discard
2662 * the stack.
91447636 2663 */
2d21ac55
A
2664 if (thread->continuation != NULL && thread->kernel_stack != 0)
2665 stack_free(thread);
2666
3e170ce0
A
2667 if (thread->state & TH_IDLE) {
2668 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
39037602
A
2669 MACHDBG_CODE(DBG_MACH_SCHED,MACH_DISPATCH) | DBG_FUNC_NONE,
2670 (uintptr_t)thread_tid(thread), 0, thread->state,
2671 sched_run_buckets[TH_BUCKET_RUN], 0);
3e170ce0 2672 } else {
316670eb
A
2673 int64_t consumed;
2674 int64_t remainder = 0;
2675
2676 if (processor->quantum_end > processor->last_dispatch)
2677 remainder = processor->quantum_end -
2678 processor->last_dispatch;
2679
fe8ab488 2680 consumed = thread->quantum_remaining - remainder;
316670eb 2681
39236c6e 2682 if ((thread->reason & AST_LEDGER) == 0) {
316670eb 2683 /*
39236c6e
A
2684 * Bill CPU time to both the task and
2685 * the individual thread.
316670eb 2686 */
5c9f4661
A
2687 ledger_credit_thread(thread, thread->t_ledger,
2688 task_ledgers.cpu_time, consumed);
2689 ledger_credit_thread(thread, thread->t_threadledger,
2690 thread_ledgers.cpu_time, consumed);
fe8ab488 2691 if (thread->t_bankledger) {
5c9f4661
A
2692 ledger_credit_thread(thread, thread->t_bankledger,
2693 bank_ledgers.cpu_time,
2694 (consumed - thread->t_deduct_bank_ledger_time));
fe8ab488 2695 }
5c9f4661 2696 thread->t_deduct_bank_ledger_time = 0;
39236c6e 2697 }
316670eb 2698
2d21ac55
A
2699 wake_lock(thread);
2700 thread_lock(thread);
9bccf70c 2701
91447636 2702 /*
39037602
A
2703 * Apply a priority floor if the thread holds a kernel resource
2704 * Do this before checking starting_pri to avoid overpenalizing
2705 * repeated rwlock blockers.
2706 */
2707 if (__improbable(thread->rwlock_count != 0))
2708 lck_rw_set_promotion_locked(thread);
2709
2710 boolean_t keep_quantum = processor->first_timeslice;
2711
2712 /*
2713 * Treat a thread which has dropped priority since it got on core
2714 * as having expired its quantum.
91447636 2715 */
39037602
A
2716 if (processor->starting_pri > thread->sched_pri)
2717 keep_quantum = FALSE;
2718
2719 /* Compute remainder of current quantum. */
2720 if (keep_quantum &&
316670eb 2721 processor->quantum_end > processor->last_dispatch)
fe8ab488 2722 thread->quantum_remaining = (uint32_t)remainder;
2d21ac55 2723 else
fe8ab488 2724 thread->quantum_remaining = 0;
2d21ac55 2725
6d2010ae 2726 if (thread->sched_mode == TH_MODE_REALTIME) {
2d21ac55
A
2727 /*
2728 * Cancel the deadline if the thread has
2729 * consumed the entire quantum.
2730 */
fe8ab488 2731 if (thread->quantum_remaining == 0) {
2d21ac55 2732 thread->realtime.deadline = UINT64_MAX;
2d21ac55 2733 }
b7266188 2734 } else {
3e170ce0 2735#if defined(CONFIG_SCHED_TIMESHARE_CORE)
2d21ac55
A
2736 /*
2737 * For non-realtime threads treat a tiny
2738 * remaining quantum as an expired quantum
2739 * but include what's left next time.
2740 */
fe8ab488 2741 if (thread->quantum_remaining < min_std_quantum) {
2d21ac55 2742 thread->reason |= AST_QUANTUM;
fe8ab488 2743 thread->quantum_remaining += SCHED(initial_quantum_size)(thread);
2d21ac55 2744 }
3e170ce0 2745#endif /* CONFIG_SCHED_TIMESHARE_CORE */
2d21ac55
A
2746 }
2747
91447636 2748 /*
2d21ac55
A
2749 * If we are doing a direct handoff then
2750 * take the remainder of the quantum.
91447636 2751 */
2d21ac55 2752 if ((thread->reason & (AST_HANDOFF|AST_QUANTUM)) == AST_HANDOFF) {
fe8ab488 2753 self->quantum_remaining = thread->quantum_remaining;
2d21ac55 2754 thread->reason |= AST_QUANTUM;
fe8ab488
A
2755 thread->quantum_remaining = 0;
2756 } else {
2757#if defined(CONFIG_SCHED_MULTIQ)
3e170ce0
A
2758 if (SCHED(sched_groups_enabled) &&
2759 thread->sched_group == self->sched_group) {
fe8ab488 2760 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3e170ce0 2761 MACHDBG_CODE(DBG_MACH_SCHED, MACH_QUANTUM_HANDOFF),
fe8ab488
A
2762 self->reason, (uintptr_t)thread_tid(thread),
2763 self->quantum_remaining, thread->quantum_remaining, 0);
2764
2765 self->quantum_remaining = thread->quantum_remaining;
2766 thread->quantum_remaining = 0;
3e170ce0 2767 /* Don't set AST_QUANTUM here - old thread might still want to preempt someone else */
fe8ab488
A
2768 }
2769#endif /* defined(CONFIG_SCHED_MULTIQ) */
91447636 2770 }
91447636 2771
b0d623f7 2772 thread->computation_metered += (processor->last_dispatch - thread->computation_epoch);
2d21ac55
A
2773
2774 if (!(thread->state & TH_WAIT)) {
2775 /*
3e170ce0 2776 * Still runnable.
2d21ac55 2777 */
5ba3f43e 2778 thread->last_made_runnable_time = thread->last_basepri_change_time = processor->last_dispatch;
3e170ce0 2779
d190cdc3 2780 machine_thread_going_off_core(thread, FALSE, processor->last_dispatch);
3e170ce0 2781
5ba3f43e
A
2782 ast_t reason = thread->reason;
2783 sched_options_t options = SCHED_NONE;
2784
2785 if (reason & AST_REBALANCE) {
2786 options |= SCHED_REBALANCE;
2787 if (reason & AST_QUANTUM) {
2788 /* Having gone to the trouble of forcing this thread off a less preferred core,
2789 * we should force the preferable core to reschedule immediatey to give this
2790 * thread a chance to run instead of just sitting on the run queue where
2791 * it may just be stolen back by the idle core we just forced it off.
2792 * But only do this at the end of a quantum to prevent cascading effects.
2793 */
2794 options |= SCHED_PREEMPT;
2795 }
2796 }
2797
2798 if (reason & AST_QUANTUM)
2799 options |= SCHED_TAILQ;
2800 else if (reason & AST_PREEMPT)
2801 options |= SCHED_HEADQ;
2d21ac55 2802 else
5ba3f43e
A
2803 options |= (SCHED_PREEMPT | SCHED_TAILQ);
2804
2805 thread_setrun(thread, options);
2d21ac55 2806
fe8ab488 2807 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
39037602
A
2808 MACHDBG_CODE(DBG_MACH_SCHED,MACH_DISPATCH) | DBG_FUNC_NONE,
2809 (uintptr_t)thread_tid(thread), thread->reason, thread->state,
2810 sched_run_buckets[TH_BUCKET_RUN], 0);
3e170ce0 2811
316670eb
A
2812 if (thread->wake_active) {
2813 thread->wake_active = FALSE;
2814 thread_unlock(thread);
2815
2816 thread_wakeup(&thread->wake_active);
3e170ce0 2817 } else {
316670eb 2818 thread_unlock(thread);
3e170ce0 2819 }
316670eb 2820
2d21ac55 2821 wake_unlock(thread);
3e170ce0 2822 } else {
2d21ac55
A
2823 /*
2824 * Waiting.
2825 */
b7266188 2826 boolean_t should_terminate = FALSE;
fe8ab488 2827 uint32_t new_run_count;
b7266188
A
2828
2829 /* Only the first call to thread_dispatch
2830 * after explicit termination should add
2831 * the thread to the termination queue
2832 */
2833 if ((thread->state & (TH_TERMINATE|TH_TERMINATE2)) == TH_TERMINATE) {
2834 should_terminate = TRUE;
2835 thread->state |= TH_TERMINATE2;
2836 }
2837
2d21ac55 2838 thread->state &= ~TH_RUN;
5ba3f43e 2839 thread->last_made_runnable_time = thread->last_basepri_change_time = THREAD_NOT_RUNNABLE;
39236c6e
A
2840 thread->chosen_processor = PROCESSOR_NULL;
2841
fe8ab488 2842 new_run_count = sched_run_decr(thread);
2d21ac55 2843
3e170ce0 2844#if CONFIG_SCHED_SFI
fe8ab488
A
2845 if ((thread->state & (TH_WAIT | TH_TERMINATE)) == TH_WAIT) {
2846 if (thread->reason & AST_SFI) {
2847 thread->wait_sfi_begin_time = processor->last_dispatch;
2848 }
39236c6e 2849 }
3e170ce0
A
2850#endif
2851
d190cdc3 2852 machine_thread_going_off_core(thread, should_terminate, processor->last_dispatch);
fe8ab488
A
2853
2854 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
39037602
A
2855 MACHDBG_CODE(DBG_MACH_SCHED,MACH_DISPATCH) | DBG_FUNC_NONE,
2856 (uintptr_t)thread_tid(thread), thread->reason, thread->state,
2857 new_run_count, 0);
2d21ac55 2858
b7266188
A
2859 (*thread->sched_call)(SCHED_CALL_BLOCK, thread);
2860
2d21ac55
A
2861 if (thread->wake_active) {
2862 thread->wake_active = FALSE;
2863 thread_unlock(thread);
2864
2865 thread_wakeup(&thread->wake_active);
3e170ce0 2866 } else {
2d21ac55 2867 thread_unlock(thread);
3e170ce0 2868 }
91447636 2869
2d21ac55 2870 wake_unlock(thread);
91447636 2871
b7266188 2872 if (should_terminate)
2d21ac55
A
2873 thread_terminate_enqueue(thread);
2874 }
2875 }
91447636 2876 }
91447636 2877
5ba3f43e
A
2878 int urgency = THREAD_URGENCY_NONE;
2879 uint64_t latency = 0;
2880
3e170ce0
A
2881 /* Update (new) current thread and reprogram quantum timer */
2882 thread_lock(self);
5ba3f43e 2883
2d21ac55 2884 if (!(self->state & TH_IDLE)) {
39236c6e 2885 uint64_t arg1, arg2;
3e170ce0
A
2886
2887#if CONFIG_SCHED_SFI
fe8ab488
A
2888 ast_t new_ast;
2889
fe8ab488 2890 new_ast = sfi_thread_needs_ast(self, NULL);
fe8ab488
A
2891
2892 if (new_ast != AST_NONE) {
2893 ast_on(new_ast);
2894 }
3e170ce0
A
2895#endif
2896
5ba3f43e
A
2897 assertf(processor->last_dispatch >= self->last_made_runnable_time,
2898 "Non-monotonic time? dispatch at 0x%llx, runnable at 0x%llx",
2899 processor->last_dispatch, self->last_made_runnable_time);
2900
2901 assert(self->last_made_runnable_time <= self->last_basepri_change_time);
2902
3e170ce0 2903 latency = processor->last_dispatch - self->last_made_runnable_time;
5ba3f43e 2904 assert(latency >= self->same_pri_latency);
6d2010ae 2905
39236c6e
A
2906 urgency = thread_get_urgency(self, &arg1, &arg2);
2907
3e170ce0
A
2908 thread_tell_urgency(urgency, arg1, arg2, latency, self);
2909
91447636 2910 /*
2d21ac55 2911 * Get a new quantum if none remaining.
91447636 2912 */
fe8ab488 2913 if (self->quantum_remaining == 0) {
2d21ac55 2914 thread_quantum_init(self);
6d2010ae 2915 }
91447636
A
2916
2917 /*
2d21ac55 2918 * Set up quantum timer and timeslice.
91447636 2919 */
fe8ab488 2920 processor->quantum_end = processor->last_dispatch + self->quantum_remaining;
5ba3f43e
A
2921 timer_call_quantum_timer_enter(&processor->quantum_timer, self,
2922 processor->quantum_end, processor->last_dispatch);
91447636 2923
3e170ce0
A
2924 processor->first_timeslice = TRUE;
2925 } else {
5ba3f43e 2926 timer_call_quantum_timer_cancel(&processor->quantum_timer);
3e170ce0 2927 processor->first_timeslice = FALSE;
91447636 2928
3e170ce0 2929 thread_tell_urgency(THREAD_URGENCY_NONE, 0, 0, 0, self);
91447636 2930 }
6d2010ae 2931
813fb2f6 2932 assert(self->block_hint == kThreadWaitNone);
3e170ce0
A
2933 self->computation_epoch = processor->last_dispatch;
2934 self->reason = AST_NONE;
39037602 2935 processor->starting_pri = self->sched_pri;
3e170ce0
A
2936
2937 thread_unlock(self);
2938
5ba3f43e
A
2939 machine_thread_going_on_core(self, urgency, latency, self->same_pri_latency,
2940 processor->last_dispatch);
2941
3e170ce0
A
2942#if defined(CONFIG_SCHED_DEFERRED_AST)
2943 /*
2944 * TODO: Can we state that redispatching our old thread is also
2945 * uninteresting?
2946 */
39037602 2947 if ((((volatile uint32_t)sched_run_buckets[TH_BUCKET_RUN]) == 1) &&
3e170ce0
A
2948 !(self->state & TH_IDLE)) {
2949 pset_cancel_deferred_dispatch(processor->processor_set, processor);
91447636 2950 }
3e170ce0
A
2951#endif
2952
91447636
A
2953}
2954
2955/*
2d21ac55 2956 * thread_block_reason:
91447636 2957 *
2d21ac55
A
2958 * Forces a reschedule, blocking the caller if a wait
2959 * has been asserted.
91447636 2960 *
2d21ac55
A
2961 * If a continuation is specified, then thread_invoke will
2962 * attempt to discard the thread's kernel stack. When the
2963 * thread resumes, it will execute the continuation function
2964 * on a new kernel stack.
91447636 2965 */
2d21ac55
A
2966counter(mach_counter_t c_thread_block_calls = 0;)
2967
2968wait_result_t
2969thread_block_reason(
2970 thread_continue_t continuation,
2971 void *parameter,
2972 ast_t reason)
91447636 2973{
3e170ce0
A
2974 thread_t self = current_thread();
2975 processor_t processor;
2976 thread_t new_thread;
2977 spl_t s;
1c79356b
A
2978
2979 counter(++c_thread_block_calls);
2980
1c79356b
A
2981 s = splsched();
2982
55e303ae 2983 processor = current_processor();
1c79356b 2984
9bccf70c
A
2985 /* If we're explicitly yielding, force a subsequent quantum */
2986 if (reason & AST_YIELD)
3e170ce0 2987 processor->first_timeslice = FALSE;
0b4e3aa0 2988
9bccf70c
A
2989 /* We're handling all scheduling AST's */
2990 ast_off(AST_SCHEDULING);
1c79356b 2991
490019cf
A
2992#if PROC_REF_DEBUG
2993 if ((continuation != NULL) && (self->task != kernel_task)) {
2994 if (uthread_get_proc_refcount(self->uthread) != 0) {
2995 panic("thread_block_reason with continuation uthread %p with uu_proc_refcount != 0", self->uthread);
2996 }
2997 }
2998#endif
2999
91447636
A
3000 self->continuation = continuation;
3001 self->parameter = parameter;
3002
fe8ab488 3003 if (self->state & ~(TH_RUN | TH_IDLE)) {
316670eb
A
3004 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
3005 MACHDBG_CODE(DBG_MACH_SCHED,MACH_BLOCK),
3006 reason, VM_KERNEL_UNSLIDE(continuation), 0, 0, 0);
b0d623f7
A
3007 }
3008
2d21ac55 3009 do {
91447636 3010 thread_lock(self);
5ba3f43e 3011 new_thread = thread_select(self, processor, &reason);
91447636 3012 thread_unlock(self);
2d21ac55 3013 } while (!thread_invoke(self, new_thread, reason));
1c79356b 3014
1c79356b
A
3015 splx(s);
3016
91447636 3017 return (self->wait_result);
1c79356b
A
3018}
3019
3020/*
3021 * thread_block:
3022 *
9bccf70c 3023 * Block the current thread if a wait has been asserted.
1c79356b 3024 */
91447636 3025wait_result_t
1c79356b 3026thread_block(
9bccf70c 3027 thread_continue_t continuation)
1c79356b 3028{
91447636
A
3029 return thread_block_reason(continuation, NULL, AST_NONE);
3030}
3031
3032wait_result_t
3033thread_block_parameter(
3034 thread_continue_t continuation,
3035 void *parameter)
3036{
3037 return thread_block_reason(continuation, parameter, AST_NONE);
1c79356b
A
3038}
3039
3040/*
3041 * thread_run:
3042 *
91447636 3043 * Switch directly from the current thread to the
55e303ae 3044 * new thread, handing off our quantum if appropriate.
9bccf70c
A
3045 *
3046 * New thread must be runnable, and not on a run queue.
1c79356b 3047 *
55e303ae 3048 * Called at splsched.
1c79356b
A
3049 */
3050int
3051thread_run(
91447636 3052 thread_t self,
9bccf70c 3053 thread_continue_t continuation,
91447636 3054 void *parameter,
9bccf70c 3055 thread_t new_thread)
1c79356b 3056{
5ba3f43e 3057 ast_t reason = AST_HANDOFF;
9bccf70c 3058
91447636
A
3059 self->continuation = continuation;
3060 self->parameter = parameter;
9bccf70c 3061
5ba3f43e
A
3062 while (!thread_invoke(self, new_thread, reason)) {
3063 /* the handoff failed, so we have to fall back to the normal block path */
3064 processor_t processor = current_processor();
3065
3066 reason = AST_NONE;
9bccf70c 3067
91447636 3068 thread_lock(self);
5ba3f43e 3069 new_thread = thread_select(self, processor, &reason);
91447636 3070 thread_unlock(self);
9bccf70c
A
3071 }
3072
91447636 3073 return (self->wait_result);
1c79356b
A
3074}
3075
3076/*
91447636 3077 * thread_continue:
55e303ae 3078 *
91447636
A
3079 * Called at splsched when a thread first receives
3080 * a new stack after a continuation.
1c79356b
A
3081 */
3082void
91447636 3083thread_continue(
3e170ce0 3084 thread_t thread)
1c79356b 3085{
3e170ce0
A
3086 thread_t self = current_thread();
3087 thread_continue_t continuation;
3088 void *parameter;
b0d623f7
A
3089
3090 DTRACE_SCHED(on__cpu);
3091
91447636 3092 continuation = self->continuation;
91447636 3093 parameter = self->parameter;
9bccf70c 3094
39037602
A
3095#if KPERF
3096 kperf_on_cpu(self, continuation, NULL);
3097#endif
3098
2d21ac55 3099 thread_dispatch(thread, self);
9bccf70c 3100
2d21ac55 3101 self->continuation = self->parameter = NULL;
1c79356b 3102
5ba3f43e
A
3103#if INTERRUPT_MASKED_DEBUG
3104 /* Reset interrupt-masked spin debugging timeout */
3105 ml_spin_debug_clear(self);
3106#endif
3107
2d21ac55 3108 if (thread != THREAD_NULL)
91447636 3109 (void)spllo();
9bccf70c 3110
2d21ac55 3111 TLOG(1, "thread_continue: calling call_continuation \n");
91447636
A
3112 call_continuation(continuation, parameter, self->wait_result);
3113 /*NOTREACHED*/
1c79356b
A
3114}
3115
2d21ac55 3116void
6d2010ae 3117thread_quantum_init(thread_t thread)
2d21ac55 3118{
6d2010ae 3119 if (thread->sched_mode == TH_MODE_REALTIME) {
fe8ab488 3120 thread->quantum_remaining = thread->realtime.computation;
6d2010ae 3121 } else {
fe8ab488 3122 thread->quantum_remaining = SCHED(initial_quantum_size)(thread);
6d2010ae
A
3123 }
3124}
2d21ac55 3125
fe8ab488 3126uint32_t
3e170ce0 3127sched_timeshare_initial_quantum_size(thread_t thread)
6d2010ae 3128{
39037602 3129 if ((thread != THREAD_NULL) && thread->th_sched_bucket == TH_BUCKET_SHARE_BG)
316670eb 3130 return bg_quantum;
39037602
A
3131 else
3132 return std_quantum;
6d2010ae
A
3133}
3134
6d2010ae
A
3135/*
3136 * run_queue_init:
3137 *
3138 * Initialize a run queue before first use.
3139 */
3140void
3141run_queue_init(
3142 run_queue_t rq)
3143{
39037602
A
3144 rq->highq = NOPRI;
3145 for (u_int i = 0; i < BITMAP_LEN(NRQS); i++)
2d21ac55 3146 rq->bitmap[i] = 0;
2d21ac55 3147 rq->urgency = rq->count = 0;
39037602 3148 for (int i = 0; i < NRQS; i++)
2d21ac55
A
3149 queue_init(&rq->queues[i]);
3150}
1c79356b 3151
2d21ac55
A
3152/*
3153 * run_queue_dequeue:
3154 *
3155 * Perform a dequeue operation on a run queue,
3156 * and return the resulting thread.
3157 *
6d2010ae 3158 * The run queue must be locked (see thread_run_queue_remove()
2d21ac55
A
3159 * for more info), and not empty.
3160 */
6d2010ae 3161thread_t
2d21ac55 3162run_queue_dequeue(
39037602
A
3163 run_queue_t rq,
3164 integer_t options)
2d21ac55 3165{
39037602
A
3166 thread_t thread;
3167 queue_t queue = &rq->queues[rq->highq];
9bccf70c 3168
2d21ac55 3169 if (options & SCHED_HEADQ) {
39037602
A
3170 thread = qe_dequeue_head(queue, struct thread, runq_links);
3171 } else {
3172 thread = qe_dequeue_tail(queue, struct thread, runq_links);
9bccf70c 3173 }
1c79356b 3174
39037602
A
3175 assert(thread != THREAD_NULL);
3176 assert_thread_magic(thread);
3177
2d21ac55 3178 thread->runq = PROCESSOR_NULL;
6d2010ae 3179 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
2d21ac55 3180 rq->count--;
6d2010ae 3181 if (SCHED(priority_is_urgent)(rq->highq)) {
4a3eedf9
A
3182 rq->urgency--; assert(rq->urgency >= 0);
3183 }
2d21ac55 3184 if (queue_empty(queue)) {
39037602
A
3185 bitmap_clear(rq->bitmap, rq->highq);
3186 rq->highq = bitmap_first(rq->bitmap, NRQS);
2d21ac55 3187 }
1c79356b 3188
39037602 3189 return thread;
1c79356b
A
3190}
3191
6d2010ae
A
3192/*
3193 * run_queue_enqueue:
3194 *
3195 * Perform a enqueue operation on a run queue.
3196 *
3197 * The run queue must be locked (see thread_run_queue_remove()
3198 * for more info).
3199 */
3200boolean_t
3201run_queue_enqueue(
39037602
A
3202 run_queue_t rq,
3203 thread_t thread,
3204 integer_t options)
6d2010ae 3205{
39037602
A
3206 queue_t queue = &rq->queues[thread->sched_pri];
3207 boolean_t result = FALSE;
3208
3209 assert_thread_magic(thread);
3210
6d2010ae 3211 if (queue_empty(queue)) {
39037602
A
3212 enqueue_tail(queue, &thread->runq_links);
3213
3214 rq_bitmap_set(rq->bitmap, thread->sched_pri);
6d2010ae
A
3215 if (thread->sched_pri > rq->highq) {
3216 rq->highq = thread->sched_pri;
3217 result = TRUE;
3218 }
fe8ab488 3219 } else {
6d2010ae 3220 if (options & SCHED_TAILQ)
39037602 3221 enqueue_tail(queue, &thread->runq_links);
6d2010ae 3222 else
39037602 3223 enqueue_head(queue, &thread->runq_links);
fe8ab488 3224 }
6d2010ae
A
3225 if (SCHED(priority_is_urgent)(thread->sched_pri))
3226 rq->urgency++;
3227 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
3228 rq->count++;
39037602 3229
6d2010ae 3230 return (result);
6d2010ae
A
3231}
3232
3233/*
3234 * run_queue_remove:
3235 *
3236 * Remove a specific thread from a runqueue.
3237 *
3238 * The run queue must be locked.
3239 */
3240void
3241run_queue_remove(
39037602
A
3242 run_queue_t rq,
3243 thread_t thread)
6d2010ae 3244{
39037602
A
3245 assert(thread->runq != PROCESSOR_NULL);
3246 assert_thread_magic(thread);
6d2010ae 3247
39037602 3248 remqueue(&thread->runq_links);
6d2010ae
A
3249 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
3250 rq->count--;
3251 if (SCHED(priority_is_urgent)(thread->sched_pri)) {
3252 rq->urgency--; assert(rq->urgency >= 0);
3253 }
39037602
A
3254
3255 if (queue_empty(&rq->queues[thread->sched_pri])) {
6d2010ae 3256 /* update run queue status */
39037602
A
3257 bitmap_clear(rq->bitmap, thread->sched_pri);
3258 rq->highq = bitmap_first(rq->bitmap, NRQS);
6d2010ae 3259 }
39037602 3260
6d2010ae
A
3261 thread->runq = PROCESSOR_NULL;
3262}
3263
3e170ce0
A
3264/* Assumes RT lock is not held, and acquires splsched/rt_lock itself */
3265void
5ba3f43e 3266sched_rtglobal_runq_scan(sched_update_scan_context_t scan_context)
6d2010ae 3267{
3e170ce0
A
3268 spl_t s;
3269 thread_t thread;
fe8ab488 3270
5ba3f43e
A
3271 processor_set_t pset = &pset0;
3272
3e170ce0 3273 s = splsched();
5ba3f43e 3274 rt_lock_lock(pset);
6d2010ae 3275
5ba3f43e 3276 qe_foreach_element_safe(thread, &pset->rt_runq.queue, runq_links) {
3e170ce0
A
3277 if (thread->last_made_runnable_time < scan_context->earliest_rt_make_runnable_time) {
3278 scan_context->earliest_rt_make_runnable_time = thread->last_made_runnable_time;
3279 }
3280 }
6d2010ae 3281
5ba3f43e 3282 rt_lock_unlock(pset);
3e170ce0 3283 splx(s);
6d2010ae
A
3284}
3285
5ba3f43e
A
3286int64_t
3287sched_rtglobal_runq_count_sum(void)
3288{
3289 return pset0.rt_runq.runq_stats.count_sum;
3290}
3e170ce0 3291
1c79356b 3292/*
2d21ac55
A
3293 * realtime_queue_insert:
3294 *
3295 * Enqueue a thread for realtime execution.
1c79356b 3296 */
2d21ac55 3297static boolean_t
5ba3f43e 3298realtime_queue_insert(processor_t processor, processor_set_t pset, thread_t thread)
1c79356b 3299{
5ba3f43e 3300 queue_t queue = &SCHED(rt_runq)(pset)->queue;
39037602
A
3301 uint64_t deadline = thread->realtime.deadline;
3302 boolean_t preempt = FALSE;
1c79356b 3303
5ba3f43e 3304 rt_lock_lock(pset);
1c79356b 3305
55e303ae 3306 if (queue_empty(queue)) {
39037602 3307 enqueue_tail(queue, &thread->runq_links);
2d21ac55 3308 preempt = TRUE;
39037602
A
3309 } else {
3310 /* Insert into rt_runq in thread deadline order */
3311 queue_entry_t iter;
3312 qe_foreach(iter, queue) {
3313 thread_t iter_thread = qe_element(iter, struct thread, runq_links);
3314 assert_thread_magic(iter_thread);
3315
3316 if (deadline < iter_thread->realtime.deadline) {
3317 if (iter == queue_first(queue))
3318 preempt = TRUE;
3319 insque(&thread->runq_links, queue_prev(iter));
3320 break;
3321 } else if (iter == queue_last(queue)) {
3322 enqueue_tail(queue, &thread->runq_links);
55e303ae
A
3323 break;
3324 }
55e303ae 3325 }
55e303ae
A
3326 }
3327
5ba3f43e
A
3328 thread->runq = processor;
3329 SCHED_STATS_RUNQ_CHANGE(&SCHED(rt_runq)(pset)->runq_stats, rt_runq_count(pset));
3330 rt_runq_count_incr(pset);
55e303ae 3331
5ba3f43e 3332 rt_lock_unlock(pset);
55e303ae 3333
2d21ac55
A
3334 return (preempt);
3335}
55e303ae 3336
2d21ac55
A
3337/*
3338 * realtime_setrun:
3339 *
3340 * Dispatch a thread for realtime execution.
3341 *
3342 * Thread must be locked. Associated pset must
3343 * be locked, and is returned unlocked.
3344 */
3345static void
3346realtime_setrun(
3347 processor_t processor,
3348 thread_t thread)
3349{
3350 processor_set_t pset = processor->processor_set;
39236c6e 3351 ast_t preempt;
55e303ae 3352
5ba3f43e 3353 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
fe8ab488 3354
6d2010ae
A
3355 thread->chosen_processor = processor;
3356
fe8ab488
A
3357 /* <rdar://problem/15102234> */
3358 assert(thread->bound_processor == PROCESSOR_NULL);
3359
2d21ac55
A
3360 /*
3361 * Dispatch directly onto idle processor.
3362 */
6d2010ae
A
3363 if ( (thread->bound_processor == processor)
3364 && processor->state == PROCESSOR_IDLE) {
39037602 3365 re_queue_tail(&pset->active_queue, &processor->processor_queue);
55e303ae 3366
5ba3f43e
A
3367 pset->active_processor_count++;
3368 sched_update_pset_load_average(pset);
3369
2d21ac55 3370 processor->next_thread = thread;
5ba3f43e 3371 processor_state_update_from_thread(processor, thread);
2d21ac55
A
3372 processor->deadline = thread->realtime.deadline;
3373 processor->state = PROCESSOR_DISPATCHING;
55e303ae 3374
5ba3f43e 3375 ipi_type = sched_ipi_action(processor, thread, true, SCHED_IPI_EVENT_BOUND_THR);
39236c6e 3376 pset_unlock(pset);
5ba3f43e 3377 sched_ipi_perform(processor, ipi_type);
2d21ac55
A
3378 return;
3379 }
55e303ae 3380
39236c6e
A
3381 if (processor->current_pri < BASEPRI_RTQUEUES)
3382 preempt = (AST_PREEMPT | AST_URGENT);
3383 else if (thread->realtime.deadline < processor->deadline)
3384 preempt = (AST_PREEMPT | AST_URGENT);
3385 else
3386 preempt = AST_NONE;
3387
5ba3f43e 3388 realtime_queue_insert(processor, pset, thread);
39236c6e 3389
5ba3f43e 3390 ipi_type = SCHED_IPI_NONE;
39236c6e
A
3391 if (preempt != AST_NONE) {
3392 if (processor->state == PROCESSOR_IDLE) {
39037602 3393 re_queue_tail(&pset->active_queue, &processor->processor_queue);
5ba3f43e
A
3394
3395 pset->active_processor_count++;
3396 sched_update_pset_load_average(pset);
39037602 3397
39236c6e 3398 processor->next_thread = THREAD_NULL;
5ba3f43e 3399 processor_state_update_from_thread(processor, thread);
39236c6e
A
3400 processor->deadline = thread->realtime.deadline;
3401 processor->state = PROCESSOR_DISPATCHING;
3402 if (processor == current_processor()) {
3403 ast_on(preempt);
3404 } else {
5ba3f43e 3405 ipi_type = sched_ipi_action(processor, thread, true, SCHED_IPI_EVENT_PREEMPT);
fe8ab488 3406 }
39236c6e
A
3407 } else if (processor->state == PROCESSOR_DISPATCHING) {
3408 if ((processor->next_thread == THREAD_NULL) && ((processor->current_pri < thread->sched_pri) || (processor->deadline > thread->realtime.deadline))) {
5ba3f43e 3409 processor_state_update_from_thread(processor, thread);
39236c6e
A
3410 processor->deadline = thread->realtime.deadline;
3411 }
3412 } else {
3413 if (processor == current_processor()) {
3414 ast_on(preempt);
3415 } else {
5ba3f43e 3416 ipi_type = sched_ipi_action(processor, thread, false, SCHED_IPI_EVENT_PREEMPT);
39236c6e
A
3417 }
3418 }
3419 } else {
3420 /* Selected processor was too busy, just keep thread enqueued and let other processors drain it naturally. */
2d21ac55
A
3421 }
3422
3423 pset_unlock(pset);
5ba3f43e
A
3424 sched_ipi_perform(processor, ipi_type);
3425}
fe8ab488 3426
5ba3f43e
A
3427
3428sched_ipi_type_t sched_ipi_deferred_policy(processor_set_t pset, processor_t dst,
3429 __unused sched_ipi_event_t event)
3430{
3431#if defined(CONFIG_SCHED_DEFERRED_AST)
3432 if (!bit_test(pset->pending_deferred_AST_cpu_mask, dst->cpu_id)) {
3433 return SCHED_IPI_DEFERRED;
3434 }
3435#else /* CONFIG_SCHED_DEFERRED_AST */
3436 panic("Request for deferred IPI on an unsupported platform; pset: %p CPU: %d", pset, dst->cpu_id);
3437#endif /* CONFIG_SCHED_DEFERRED_AST */
3438 return SCHED_IPI_NONE;
3439}
3440
3441sched_ipi_type_t sched_ipi_action(processor_t dst, thread_t thread, boolean_t dst_idle, sched_ipi_event_t event)
3442{
3443 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
3444 assert(dst != NULL);
3445
3446 processor_set_t pset = dst->processor_set;
3447 if (current_processor() == dst) {
3448 return SCHED_IPI_NONE;
3449 }
3450
3451 if (bit_test(pset->pending_AST_cpu_mask, dst->cpu_id)) {
3452 return SCHED_IPI_NONE;
3453 }
3454
3455 ipi_type = SCHED(ipi_policy)(dst, thread, dst_idle, event);
3456 switch(ipi_type) {
3457 case SCHED_IPI_NONE:
3458 return SCHED_IPI_NONE;
3459#if defined(CONFIG_SCHED_DEFERRED_AST)
3460 case SCHED_IPI_DEFERRED:
3461 bit_set(pset->pending_deferred_AST_cpu_mask, dst->cpu_id);
3462 break;
3463#endif /* CONFIG_SCHED_DEFERRED_AST */
3464 default:
3465 bit_set(pset->pending_AST_cpu_mask, dst->cpu_id);
3466 break;
3467 }
3468 return ipi_type;
3469}
3470
3471sched_ipi_type_t sched_ipi_policy(processor_t dst, thread_t thread, boolean_t dst_idle, sched_ipi_event_t event)
3472{
3473 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
3474 boolean_t deferred_ipi_supported = false;
3475 processor_set_t pset = dst->processor_set;
3476
3477#if defined(CONFIG_SCHED_DEFERRED_AST)
3478 deferred_ipi_supported = true;
3479#endif /* CONFIG_SCHED_DEFERRED_AST */
3480
3481 switch(event) {
3482 case SCHED_IPI_EVENT_SPILL:
3483 case SCHED_IPI_EVENT_SMT_REBAL:
3484 case SCHED_IPI_EVENT_REBALANCE:
3485 case SCHED_IPI_EVENT_BOUND_THR:
3486 /*
3487 * The spill, SMT rebalance, rebalance and the bound thread
3488 * scenarios use immediate IPIs always.
3489 */
3490 ipi_type = dst_idle ? SCHED_IPI_IDLE : SCHED_IPI_IMMEDIATE;
3491 break;
3492 case SCHED_IPI_EVENT_PREEMPT:
3493 /* In the preemption case, use immediate IPIs for RT threads */
3494 if (thread && (thread->sched_pri >= BASEPRI_RTQUEUES)) {
3495 ipi_type = dst_idle ? SCHED_IPI_IDLE : SCHED_IPI_IMMEDIATE;
3496 break;
3497 }
3498
3499 /*
3500 * For Non-RT threads preemption,
3501 * If the core is active, use immediate IPIs.
3502 * If the core is idle, use deferred IPIs if supported; otherwise immediate IPI.
3503 */
3504 if (deferred_ipi_supported && dst_idle) {
3505 return sched_ipi_deferred_policy(pset, dst, event);
3506 }
3507 ipi_type = dst_idle ? SCHED_IPI_IDLE : SCHED_IPI_IMMEDIATE;
3508 break;
3509 default:
3510 panic("Unrecognized scheduler IPI event type %d", event);
3511 }
3512 assert(ipi_type != SCHED_IPI_NONE);
3513 return ipi_type;
2d21ac55
A
3514}
3515
5ba3f43e
A
3516void sched_ipi_perform(processor_t dst, sched_ipi_type_t ipi)
3517{
3518 switch (ipi) {
3519 case SCHED_IPI_NONE:
3520 break;
3521 case SCHED_IPI_IDLE:
3522 machine_signal_idle(dst);
3523 break;
3524 case SCHED_IPI_IMMEDIATE:
3525 cause_ast_check(dst);
3526 break;
3527 case SCHED_IPI_DEFERRED:
3528 machine_signal_idle_deferred(dst);
3529 break;
3530 default:
3531 panic("Unrecognized scheduler IPI type: %d", ipi);
3532 }
3533}
6d2010ae 3534
fe8ab488
A
3535#if defined(CONFIG_SCHED_TIMESHARE_CORE)
3536
3537boolean_t
6d2010ae
A
3538priority_is_urgent(int priority)
3539{
39037602 3540 return bitmap_test(sched_preempt_pri, priority) ? TRUE : FALSE;
6d2010ae
A
3541}
3542
fe8ab488
A
3543#endif /* CONFIG_SCHED_TIMESHARE_CORE */
3544
55e303ae 3545/*
2d21ac55 3546 * processor_setrun:
55e303ae 3547 *
2d21ac55
A
3548 * Dispatch a thread for execution on a
3549 * processor.
55e303ae 3550 *
2d21ac55
A
3551 * Thread must be locked. Associated pset must
3552 * be locked, and is returned unlocked.
55e303ae 3553 */
2d21ac55
A
3554static void
3555processor_setrun(
3556 processor_t processor,
3557 thread_t thread,
3558 integer_t options)
55e303ae 3559{
2d21ac55
A
3560 processor_set_t pset = processor->processor_set;
3561 ast_t preempt;
39236c6e 3562 enum { eExitIdle, eInterruptRunning, eDoNothing } ipi_action = eDoNothing;
55e303ae 3563
5ba3f43e 3564 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
fe8ab488 3565
6d2010ae
A
3566 thread->chosen_processor = processor;
3567
55e303ae 3568 /*
2d21ac55 3569 * Dispatch directly onto idle processor.
55e303ae 3570 */
6d2010ae
A
3571 if ( (SCHED(direct_dispatch_to_idle_processors) ||
3572 thread->bound_processor == processor)
3573 && processor->state == PROCESSOR_IDLE) {
39037602
A
3574
3575 re_queue_tail(&pset->active_queue, &processor->processor_queue);
2d21ac55 3576
5ba3f43e
A
3577 pset->active_processor_count++;
3578 sched_update_pset_load_average(pset);
3579
2d21ac55 3580 processor->next_thread = thread;
5ba3f43e 3581 processor_state_update_from_thread(processor, thread);
2d21ac55
A
3582 processor->deadline = UINT64_MAX;
3583 processor->state = PROCESSOR_DISPATCHING;
2d21ac55 3584
5ba3f43e 3585 ipi_type = sched_ipi_action(processor, thread, true, SCHED_IPI_EVENT_BOUND_THR);
39236c6e 3586 pset_unlock(pset);
5ba3f43e 3587 sched_ipi_perform(processor, ipi_type);
2d21ac55
A
3588 return;
3589 }
55e303ae
A
3590
3591 /*
2d21ac55 3592 * Set preemption mode.
1c79356b 3593 */
3e170ce0
A
3594#if defined(CONFIG_SCHED_DEFERRED_AST)
3595 /* TODO: Do we need to care about urgency (see rdar://problem/20136239)? */
3596#endif
6d2010ae
A
3597 if (SCHED(priority_is_urgent)(thread->sched_pri) && thread->sched_pri > processor->current_pri)
3598 preempt = (AST_PREEMPT | AST_URGENT);
3599 else if(processor->active_thread && thread_eager_preemption(processor->active_thread))
55e303ae 3600 preempt = (AST_PREEMPT | AST_URGENT);
3e170ce0
A
3601 else if ((thread->sched_mode == TH_MODE_TIMESHARE) && (thread->sched_pri < thread->base_pri)) {
3602 if(SCHED(priority_is_urgent)(thread->base_pri) && thread->sched_pri > processor->current_pri) {
39236c6e
A
3603 preempt = (options & SCHED_PREEMPT)? AST_PREEMPT: AST_NONE;
3604 } else {
3605 preempt = AST_NONE;
3606 }
3607 } else
2d21ac55 3608 preempt = (options & SCHED_PREEMPT)? AST_PREEMPT: AST_NONE;
9bccf70c 3609
39236c6e 3610 SCHED(processor_enqueue)(processor, thread, options);
5ba3f43e 3611 sched_update_pset_load_average(pset);
9bccf70c 3612
2d21ac55 3613 if (preempt != AST_NONE) {
39236c6e 3614 if (processor->state == PROCESSOR_IDLE) {
39037602 3615 re_queue_tail(&pset->active_queue, &processor->processor_queue);
5ba3f43e 3616 pset->active_processor_count++;
39236c6e 3617 processor->next_thread = THREAD_NULL;
5ba3f43e 3618 processor_state_update_from_thread(processor, thread);
39236c6e
A
3619 processor->deadline = UINT64_MAX;
3620 processor->state = PROCESSOR_DISPATCHING;
39236c6e
A
3621 ipi_action = eExitIdle;
3622 } else if ( processor->state == PROCESSOR_DISPATCHING) {
3623 if ((processor->next_thread == THREAD_NULL) && (processor->current_pri < thread->sched_pri)) {
5ba3f43e 3624 processor_state_update_from_thread(processor, thread);
39236c6e
A
3625 processor->deadline = UINT64_MAX;
3626 }
3627 } else if ( (processor->state == PROCESSOR_RUNNING ||
2d21ac55 3628 processor->state == PROCESSOR_SHUTDOWN) &&
3e170ce0 3629 (thread->sched_pri >= processor->current_pri)) {
39236c6e 3630 ipi_action = eInterruptRunning;
2d21ac55 3631 }
39236c6e
A
3632 } else {
3633 /*
3634 * New thread is not important enough to preempt what is running, but
3635 * special processor states may need special handling
3636 */
3637 if (processor->state == PROCESSOR_SHUTDOWN &&
2d21ac55 3638 thread->sched_pri >= processor->current_pri ) {
39236c6e 3639 ipi_action = eInterruptRunning;
d190cdc3 3640 } else if (processor->state == PROCESSOR_IDLE) {
39037602
A
3641 re_queue_tail(&pset->active_queue, &processor->processor_queue);
3642
5ba3f43e
A
3643 pset->active_processor_count++;
3644 // sched_update_pset_load_average(pset);
3645
39236c6e 3646 processor->next_thread = THREAD_NULL;
5ba3f43e 3647 processor_state_update_from_thread(processor, thread);
39236c6e
A
3648 processor->deadline = UINT64_MAX;
3649 processor->state = PROCESSOR_DISPATCHING;
3650
3651 ipi_action = eExitIdle;
3652 }
2d21ac55 3653 }
39236c6e 3654
5ba3f43e
A
3655 if (ipi_action != eDoNothing) {
3656 if (processor == current_processor()) {
3657 if (csw_check_locked(processor, pset, AST_NONE) != AST_NONE)
3658 ast_on(preempt);
3659 } else {
3660 sched_ipi_event_t event = (options & SCHED_REBALANCE) ? SCHED_IPI_EVENT_REBALANCE : SCHED_IPI_EVENT_PREEMPT;
3661 ipi_type = sched_ipi_action(processor, thread, (ipi_action == eExitIdle), event);
3662 }
6d2010ae 3663 }
2d21ac55 3664 pset_unlock(pset);
5ba3f43e 3665 sched_ipi_perform(processor, ipi_type);
6d2010ae
A
3666}
3667
2d21ac55
A
3668/*
3669 * choose_next_pset:
3670 *
3671 * Return the next sibling pset containing
3672 * available processors.
3673 *
3674 * Returns the original pset if none other is
3675 * suitable.
3676 */
3677static processor_set_t
3678choose_next_pset(
3679 processor_set_t pset)
3680{
3681 processor_set_t nset = pset;
3682
3683 do {
3684 nset = next_pset(nset);
6d2010ae 3685 } while (nset->online_processor_count < 1 && nset != pset);
2d21ac55 3686
cf7d32b8 3687 return (nset);
2d21ac55
A
3688}
3689
3690/*
3691 * choose_processor:
3692 *
3693 * Choose a processor for the thread, beginning at
b7266188 3694 * the pset. Accepts an optional processor hint in
2d21ac55
A
3695 * the pset.
3696 *
3697 * Returns a processor, possibly from a different pset.
3698 *
3699 * The thread must be locked. The pset must be locked,
3700 * and the resulting pset is locked on return.
3701 */
6d2010ae 3702processor_t
2d21ac55
A
3703choose_processor(
3704 processor_set_t pset,
b7266188 3705 processor_t processor,
2d21ac55
A
3706 thread_t thread)
3707{
3708 processor_set_t nset, cset = pset;
39037602
A
3709
3710 assert(thread->sched_pri <= BASEPRI_RTQUEUES);
3711
cf7d32b8 3712 /*
fe8ab488 3713 * Prefer the hinted processor, when appropriate.
cf7d32b8 3714 */
b7266188 3715
fe8ab488 3716 /* Fold last processor hint from secondary processor to its primary */
0b4c1975 3717 if (processor != PROCESSOR_NULL) {
fe8ab488 3718 processor = processor->processor_primary;
0b4c1975 3719 }
b0d623f7 3720
fe8ab488
A
3721 /*
3722 * Only consult platform layer if pset is active, which
3723 * it may not be in some cases when a multi-set system
3724 * is going to sleep.
3725 */
3726 if (pset->online_processor_count) {
3727 if ((processor == PROCESSOR_NULL) || (processor->processor_set == pset && processor->state == PROCESSOR_IDLE)) {
3728 processor_t mc_processor = machine_choose_processor(pset, processor);
3729 if (mc_processor != PROCESSOR_NULL)
3730 processor = mc_processor->processor_primary;
3731 }
3732 }
b7266188 3733
fe8ab488
A
3734 /*
3735 * At this point, we may have a processor hint, and we may have
3736 * an initial starting pset. If the hint is not in the pset, or
3737 * if the hint is for a processor in an invalid state, discard
3738 * the hint.
3739 */
0b4c1975 3740 if (processor != PROCESSOR_NULL) {
fe8ab488 3741 if (processor->processor_set != pset) {
cf7d32b8 3742 processor = PROCESSOR_NULL;
3e170ce0
A
3743 } else if (!processor->is_recommended) {
3744 processor = PROCESSOR_NULL;
fe8ab488
A
3745 } else {
3746 switch (processor->state) {
3747 case PROCESSOR_START:
3748 case PROCESSOR_SHUTDOWN:
3749 case PROCESSOR_OFF_LINE:
3750 /*
3751 * Hint is for a processor that cannot support running new threads.
3752 */
3753 processor = PROCESSOR_NULL;
3754 break;
3755 case PROCESSOR_IDLE:
3756 /*
3757 * Hint is for an idle processor. Assume it is no worse than any other
3758 * idle processor. The platform layer had an opportunity to provide
3759 * the "least cost idle" processor above.
3760 */
3761 return (processor);
fe8ab488
A
3762 case PROCESSOR_RUNNING:
3763 case PROCESSOR_DISPATCHING:
3764 /*
3765 * Hint is for an active CPU. This fast-path allows
3766 * realtime threads to preempt non-realtime threads
3767 * to regain their previous executing processor.
3768 */
3769 if ((thread->sched_pri >= BASEPRI_RTQUEUES) &&
3770 (processor->current_pri < BASEPRI_RTQUEUES))
3771 return (processor);
3772
3773 /* Otherwise, use hint as part of search below */
3774 break;
3775 default:
3776 processor = PROCESSOR_NULL;
3777 break;
3778 }
3779 }
b7266188 3780 }
2d21ac55
A
3781
3782 /*
fe8ab488
A
3783 * Iterate through the processor sets to locate
3784 * an appropriate processor. Seed results with
3785 * a last-processor hint, if available, so that
3786 * a search must find something strictly better
3787 * to replace it.
3788 *
3789 * A primary/secondary pair of SMT processors are
3790 * "unpaired" if the primary is busy but its
3791 * corresponding secondary is idle (so the physical
3792 * core has full use of its resources).
2d21ac55 3793 */
fe8ab488
A
3794
3795 integer_t lowest_priority = MAXPRI + 1;
a39ff7e2 3796 integer_t lowest_secondary_priority = MAXPRI + 1;
fe8ab488
A
3797 integer_t lowest_unpaired_primary_priority = MAXPRI + 1;
3798 integer_t lowest_count = INT_MAX;
3799 uint64_t furthest_deadline = 1;
3800 processor_t lp_processor = PROCESSOR_NULL;
3801 processor_t lp_unpaired_primary_processor = PROCESSOR_NULL;
3802 processor_t lp_unpaired_secondary_processor = PROCESSOR_NULL;
a39ff7e2 3803 processor_t lp_paired_secondary_processor = PROCESSOR_NULL;
fe8ab488
A
3804 processor_t lc_processor = PROCESSOR_NULL;
3805 processor_t fd_processor = PROCESSOR_NULL;
3806
3807 if (processor != PROCESSOR_NULL) {
3808 /* All other states should be enumerated above. */
3809 assert(processor->state == PROCESSOR_RUNNING || processor->state == PROCESSOR_DISPATCHING);
3810
3811 lowest_priority = processor->current_pri;
3812 lp_processor = processor;
3813
3814 if (processor->current_pri >= BASEPRI_RTQUEUES) {
3815 furthest_deadline = processor->deadline;
3816 fd_processor = processor;
3817 }
3818
3819 lowest_count = SCHED(processor_runq_count)(processor);
3820 lc_processor = processor;
3821 }
3822
2d21ac55 3823 do {
fe8ab488 3824
9bccf70c 3825 /*
fe8ab488 3826 * Choose an idle processor, in pset traversal order
9bccf70c 3827 */
3e170ce0 3828 qe_foreach_element(processor, &cset->idle_queue, processor_queue) {
a39ff7e2
A
3829 if (bit_test(cset->pending_AST_cpu_mask, processor->cpu_id)) {
3830 continue;
3831 }
3e170ce0
A
3832 if (processor->is_recommended)
3833 return processor;
3834 }
1c79356b 3835
fe8ab488 3836 /*
a39ff7e2 3837 * Otherwise, enumerate active and idle processors to find primary candidates
fe8ab488
A
3838 * with lower priority/etc.
3839 */
0b4c1975 3840
3e170ce0
A
3841 qe_foreach_element(processor, &cset->active_queue, processor_queue) {
3842
3843 if (!processor->is_recommended) {
3844 continue;
3845 }
a39ff7e2
A
3846 if (bit_test(cset->pending_AST_cpu_mask, processor->cpu_id)) {
3847 continue;
3848 }
2d21ac55 3849
fe8ab488 3850 integer_t cpri = processor->current_pri;
a39ff7e2
A
3851 if (processor->processor_primary != processor) {
3852 if (cpri < lowest_secondary_priority) {
3853 lowest_secondary_priority = cpri;
3854 lp_paired_secondary_processor = processor;
3855 }
3856 } else {
3857 if (cpri < lowest_priority) {
3858 lowest_priority = cpri;
3859 lp_processor = processor;
3860 }
fe8ab488 3861 }
b0d623f7 3862
fe8ab488
A
3863 if ((cpri >= BASEPRI_RTQUEUES) && (processor->deadline > furthest_deadline)) {
3864 furthest_deadline = processor->deadline;
3865 fd_processor = processor;
3866 }
0b4c1975 3867
fe8ab488
A
3868 integer_t ccount = SCHED(processor_runq_count)(processor);
3869 if (ccount < lowest_count) {
3870 lowest_count = ccount;
3871 lc_processor = processor;
3872 }
fe8ab488
A
3873 }
3874
3875 /*
3876 * For SMT configs, these idle secondary processors must have active primary. Otherwise
3877 * the idle primary would have short-circuited the loop above
3878 */
3e170ce0
A
3879 qe_foreach_element(processor, &cset->idle_secondary_queue, processor_queue) {
3880
3881 if (!processor->is_recommended) {
3882 continue;
3883 }
3884
fe8ab488
A
3885 processor_t cprimary = processor->processor_primary;
3886
a39ff7e2
A
3887 if (bit_test(cset->pending_AST_cpu_mask, cprimary->cpu_id)) {
3888 continue;
3889 }
3890
fe8ab488
A
3891 /* If the primary processor is offline or starting up, it's not a candidate for this path */
3892 if (cprimary->state == PROCESSOR_RUNNING || cprimary->state == PROCESSOR_DISPATCHING) {
3893 integer_t primary_pri = cprimary->current_pri;
3894
3895 if (primary_pri < lowest_unpaired_primary_priority) {
3896 lowest_unpaired_primary_priority = primary_pri;
3897 lp_unpaired_primary_processor = cprimary;
3898 lp_unpaired_secondary_processor = processor;
0b4c1975 3899 }
2d21ac55 3900 }
fe8ab488
A
3901 }
3902
0b4c1975 3903
fe8ab488
A
3904 if (thread->sched_pri >= BASEPRI_RTQUEUES) {
3905
3906 /*
3907 * For realtime threads, the most important aspect is
3908 * scheduling latency, so we attempt to assign threads
3909 * to good preemption candidates (assuming an idle primary
3910 * processor was not available above).
3911 */
3912
3913 if (thread->sched_pri > lowest_unpaired_primary_priority) {
3914 /* Move to end of active queue so that the next thread doesn't also pick it */
39037602 3915 re_queue_tail(&cset->active_queue, &lp_unpaired_primary_processor->processor_queue);
fe8ab488
A
3916 return lp_unpaired_primary_processor;
3917 }
3918 if (thread->sched_pri > lowest_priority) {
3919 /* Move to end of active queue so that the next thread doesn't also pick it */
39037602 3920 re_queue_tail(&cset->active_queue, &lp_processor->processor_queue);
0b4c1975 3921 return lp_processor;
fe8ab488 3922 }
a39ff7e2
A
3923 if (sched_allow_rt_smt && (thread->sched_pri > lowest_secondary_priority)) {
3924 return lp_paired_secondary_processor;
3925 }
0b4c1975
A
3926 if (thread->realtime.deadline < furthest_deadline)
3927 return fd_processor;
6d2010ae 3928
2d21ac55 3929 /*
fe8ab488
A
3930 * If all primary and secondary CPUs are busy with realtime
3931 * threads with deadlines earlier than us, move on to next
3932 * pset.
2d21ac55 3933 */
fe8ab488
A
3934 }
3935 else {
3936
3937 if (thread->sched_pri > lowest_unpaired_primary_priority) {
3938 /* Move to end of active queue so that the next thread doesn't also pick it */
39037602 3939 re_queue_tail(&cset->active_queue, &lp_unpaired_primary_processor->processor_queue);
fe8ab488 3940 return lp_unpaired_primary_processor;
c910b4d9 3941 }
fe8ab488
A
3942 if (thread->sched_pri > lowest_priority) {
3943 /* Move to end of active queue so that the next thread doesn't also pick it */
39037602 3944 re_queue_tail(&cset->active_queue, &lp_processor->processor_queue);
fe8ab488 3945 return lp_processor;
cf7d32b8 3946 }
9bccf70c 3947
9bccf70c 3948 /*
fe8ab488
A
3949 * If all primary processor in this pset are running a higher
3950 * priority thread, move on to next pset. Only when we have
3951 * exhausted this search do we fall back to other heuristics.
1c79356b 3952 */
2d21ac55
A
3953 }
3954
3955 /*
fe8ab488 3956 * Move onto the next processor set.
2d21ac55
A
3957 */
3958 nset = next_pset(cset);
3959
3960 if (nset != pset) {
3961 pset_unlock(cset);
3962
3963 cset = nset;
3964 pset_lock(cset);
3965 }
3966 } while (nset != pset);
3967
3968 /*
fe8ab488
A
3969 * Make sure that we pick a running processor,
3970 * and that the correct processor set is locked.
3971 * Since we may have unlock the candidate processor's
3972 * pset, it may have changed state.
3973 *
3974 * All primary processors are running a higher priority
3975 * thread, so the only options left are enqueuing on
3976 * the secondary processor that would perturb the least priority
3977 * primary, or the least busy primary.
2d21ac55 3978 */
cf7d32b8 3979 do {
2d21ac55 3980
fe8ab488
A
3981 /* lowest_priority is evaluated in the main loops above */
3982 if (lp_unpaired_secondary_processor != PROCESSOR_NULL) {
3983 processor = lp_unpaired_secondary_processor;
3984 lp_unpaired_secondary_processor = PROCESSOR_NULL;
a39ff7e2
A
3985 } else if (lp_paired_secondary_processor != PROCESSOR_NULL) {
3986 processor = lp_paired_secondary_processor;
3987 lp_paired_secondary_processor = PROCESSOR_NULL;
fe8ab488
A
3988 } else if (lc_processor != PROCESSOR_NULL) {
3989 processor = lc_processor;
3990 lc_processor = PROCESSOR_NULL;
3991 } else {
cf7d32b8 3992 /*
fe8ab488
A
3993 * All processors are executing higher
3994 * priority threads, and the lowest_count
3995 * candidate was not usable
cf7d32b8 3996 */
fe8ab488 3997 processor = master_processor;
cf7d32b8
A
3998 }
3999
4000 /*
fe8ab488
A
4001 * Check that the correct processor set is
4002 * returned locked.
cf7d32b8
A
4003 */
4004 if (cset != processor->processor_set) {
4005 pset_unlock(cset);
cf7d32b8
A
4006 cset = processor->processor_set;
4007 pset_lock(cset);
4008 }
4009
4010 /*
fe8ab488
A
4011 * We must verify that the chosen processor is still available.
4012 * master_processor is an exception, since we may need to preempt
4013 * a running thread on it during processor shutdown (for sleep),
4014 * and that thread needs to be enqueued on its runqueue to run
4015 * when the processor is restarted.
cf7d32b8 4016 */
fe8ab488 4017 if (processor != master_processor && (processor->state == PROCESSOR_SHUTDOWN || processor->state == PROCESSOR_OFF_LINE))
cf7d32b8 4018 processor = PROCESSOR_NULL;
fe8ab488 4019
cf7d32b8 4020 } while (processor == PROCESSOR_NULL);
2d21ac55 4021
5ba3f43e
A
4022 if (processor->state == PROCESSOR_RUNNING) {
4023 re_queue_tail(&cset->active_queue, &processor->processor_queue);
4024 }
4025
2d21ac55
A
4026 return (processor);
4027}
4028
4029/*
4030 * thread_setrun:
4031 *
4032 * Dispatch thread for execution, onto an idle
4033 * processor or run queue, and signal a preemption
4034 * as appropriate.
4035 *
4036 * Thread must be locked.
4037 */
4038void
4039thread_setrun(
4040 thread_t thread,
4041 integer_t options)
4042{
4043 processor_t processor;
4044 processor_set_t pset;
4045
3e170ce0
A
4046 assert((thread->state & (TH_RUN|TH_WAIT|TH_UNINT|TH_TERMINATE|TH_TERMINATE2)) == TH_RUN);
4047 assert(thread->runq == PROCESSOR_NULL);
4048
2d21ac55
A
4049 /*
4050 * Update priority if needed.
4051 */
6d2010ae
A
4052 if (SCHED(can_update_priority)(thread))
4053 SCHED(update_priority)(thread);
2d21ac55 4054
fe8ab488
A
4055 thread->sfi_class = sfi_thread_classify(thread);
4056
2d21ac55
A
4057 assert(thread->runq == PROCESSOR_NULL);
4058
3e170ce0 4059#if __SMP__
2d21ac55
A
4060 if (thread->bound_processor == PROCESSOR_NULL) {
4061 /*
4062 * Unbound case.
4063 */
4064 if (thread->affinity_set != AFFINITY_SET_NULL) {
4065 /*
4066 * Use affinity set policy hint.
4067 */
4068 pset = thread->affinity_set->aset_pset;
4069 pset_lock(pset);
4070
6d2010ae 4071 processor = SCHED(choose_processor)(pset, PROCESSOR_NULL, thread);
5ba3f43e 4072 pset = processor->processor_set;
39236c6e 4073
3e170ce0 4074 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR)|DBG_FUNC_NONE,
39236c6e 4075 (uintptr_t)thread_tid(thread), (uintptr_t)-1, processor->cpu_id, processor->state, 0);
3e170ce0 4076 } else if (thread->last_processor != PROCESSOR_NULL) {
2d21ac55
A
4077 /*
4078 * Simple (last processor) affinity case.
4079 */
4080 processor = thread->last_processor;
4081 pset = processor->processor_set;
4082 pset_lock(pset);
6d2010ae 4083 processor = SCHED(choose_processor)(pset, processor, thread);
5ba3f43e 4084 pset = processor->processor_set;
6d2010ae 4085
3e170ce0 4086 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR)|DBG_FUNC_NONE,
39236c6e 4087 (uintptr_t)thread_tid(thread), thread->last_processor->cpu_id, processor->cpu_id, processor->state, 0);
3e170ce0 4088 } else {
2d21ac55
A
4089 /*
4090 * No Affinity case:
4091 *
cf7d32b8
A
4092 * Utilitize a per task hint to spread threads
4093 * among the available processor sets.
2d21ac55 4094 */
cf7d32b8
A
4095 task_t task = thread->task;
4096
4097 pset = task->pset_hint;
4098 if (pset == PROCESSOR_SET_NULL)
4099 pset = current_processor()->processor_set;
4100
4101 pset = choose_next_pset(pset);
2d21ac55 4102 pset_lock(pset);
9bccf70c 4103
6d2010ae 4104 processor = SCHED(choose_processor)(pset, PROCESSOR_NULL, thread);
5ba3f43e
A
4105 pset = processor->processor_set;
4106 task->pset_hint = pset;
39236c6e 4107
3e170ce0 4108 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR)|DBG_FUNC_NONE,
39236c6e 4109 (uintptr_t)thread_tid(thread), (uintptr_t)-1, processor->cpu_id, processor->state, 0);
55e303ae 4110 }
3e170ce0 4111 } else {
2d21ac55
A
4112 /*
4113 * Bound case:
4114 *
4115 * Unconditionally dispatch on the processor.
4116 */
4117 processor = thread->bound_processor;
55e303ae 4118 pset = processor->processor_set;
2d21ac55 4119 pset_lock(pset);
39236c6e 4120
3e170ce0 4121 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR)|DBG_FUNC_NONE,
39236c6e 4122 (uintptr_t)thread_tid(thread), (uintptr_t)-2, processor->cpu_id, processor->state, 0);
2d21ac55 4123 }
3e170ce0
A
4124#else /* !__SMP__ */
4125 /* Only one processor to choose */
4126 assert(thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == master_processor);
4127 processor = master_processor;
4128 pset = processor->processor_set;
4129 pset_lock(pset);
4130#endif /* !__SMP__ */
2d21ac55
A
4131
4132 /*
3e170ce0 4133 * Dispatch the thread on the chosen processor.
fe8ab488 4134 * TODO: This should be based on sched_mode, not sched_pri
2d21ac55 4135 */
5ba3f43e 4136 if (thread->sched_pri >= BASEPRI_RTQUEUES) {
2d21ac55 4137 realtime_setrun(processor, thread);
5ba3f43e 4138 } else {
2d21ac55 4139 processor_setrun(processor, thread, options);
a39ff7e2
A
4140 }
4141 /* pset is now unlocked */
4142 if (thread->bound_processor == PROCESSOR_NULL) {
4143 SCHED(check_spill)(pset, thread);
5ba3f43e 4144 }
2d21ac55
A
4145}
4146
b0d623f7
A
4147processor_set_t
4148task_choose_pset(
4149 task_t task)
4150{
4151 processor_set_t pset = task->pset_hint;
4152
4153 if (pset != PROCESSOR_SET_NULL)
4154 pset = choose_next_pset(pset);
4155
4156 return (pset);
4157}
4158
9bccf70c 4159/*
c910b4d9
A
4160 * Check for a preemption point in
4161 * the current context.
55e303ae 4162 *
fe8ab488 4163 * Called at splsched with thread locked.
9bccf70c
A
4164 */
4165ast_t
4166csw_check(
fe8ab488
A
4167 processor_t processor,
4168 ast_t check_reason)
39236c6e
A
4169{
4170 processor_set_t pset = processor->processor_set;
4171 ast_t result;
4172
4173 pset_lock(pset);
4174
4175 /* If we were sent a remote AST and interrupted a running processor, acknowledge it here with pset lock held */
5ba3f43e 4176 bit_clear(pset->pending_AST_cpu_mask, processor->cpu_id);
39236c6e 4177
fe8ab488 4178 result = csw_check_locked(processor, pset, check_reason);
39236c6e
A
4179
4180 pset_unlock(pset);
4181
4182 return result;
4183}
4184
4185/*
4186 * Check for preemption at splsched with
fe8ab488 4187 * pset and thread locked
39236c6e
A
4188 */
4189ast_t
4190csw_check_locked(
4191 processor_t processor,
5ba3f43e 4192 processor_set_t pset,
fe8ab488 4193 ast_t check_reason)
9bccf70c 4194{
fe8ab488 4195 ast_t result;
316670eb 4196 thread_t thread = processor->active_thread;
9bccf70c 4197
3e170ce0 4198 if (processor->first_timeslice) {
5ba3f43e 4199 if (rt_runq_count(pset) > 0)
fe8ab488 4200 return (check_reason | AST_PREEMPT | AST_URGENT);
9bccf70c
A
4201 }
4202 else {
5ba3f43e 4203 if (rt_runq_count(pset) > 0) {
39236c6e 4204 if (BASEPRI_RTQUEUES > processor->current_pri)
fe8ab488 4205 return (check_reason | AST_PREEMPT | AST_URGENT);
39236c6e 4206 else
fe8ab488 4207 return (check_reason | AST_PREEMPT);
39236c6e 4208 }
1c79356b 4209 }
9bccf70c 4210
316670eb 4211 result = SCHED(processor_csw_check)(processor);
9bccf70c 4212 if (result != AST_NONE)
3e170ce0
A
4213 return (check_reason | result | (thread_eager_preemption(thread) ? AST_URGENT : AST_NONE));
4214
4215#if __SMP__
9bccf70c 4216
3e170ce0
A
4217 /*
4218 * If the current thread is running on a processor that is no longer recommended, gently
4219 * (non-urgently) get to a point and then block, and which point thread_select() should
4220 * try to idle the processor and re-dispatch the thread to a recommended processor.
4221 */
5ba3f43e 4222 if (!processor->is_recommended) {
fe8ab488 4223 return (check_reason | AST_PREEMPT);
5ba3f43e
A
4224 }
4225
4226 /*
4227 * Same for avoid-processor
4228 *
4229 * TODO: Should these set AST_REBALANCE?
4230 */
4231 if (SCHED(avoid_processor_enabled) && SCHED(thread_avoid_processor)(processor, thread)) {
4232 return (check_reason | AST_PREEMPT);
4233 }
3e170ce0
A
4234
4235 /*
4236 * Even though we could continue executing on this processor, a
4237 * secondary SMT core should try to shed load to another primary core.
4238 *
4239 * TODO: Should this do the same check that thread_select does? i.e.
4240 * if no bound threads target this processor, and idle primaries exist, preempt
4241 * The case of RT threads existing is already taken care of above
4242 * Consider Capri in this scenario.
4243 *
4244 * if (!SCHED(processor_bound_count)(processor) && !queue_empty(&pset->idle_queue))
4245 *
4246 * TODO: Alternatively - check if only primary is idle, or check if primary's pri is lower than mine.
4247 */
4248
4249 if (processor->current_pri < BASEPRI_RTQUEUES &&
4250 processor->processor_primary != processor)
4251 return (check_reason | AST_PREEMPT);
4252#endif
4253
316670eb 4254 if (thread->state & TH_SUSP)
fe8ab488
A
4255 return (check_reason | AST_PREEMPT);
4256
3e170ce0 4257#if CONFIG_SCHED_SFI
fe8ab488
A
4258 /*
4259 * Current thread may not need to be preempted, but maybe needs
4260 * an SFI wait?
4261 */
4262 result = sfi_thread_needs_ast(thread, NULL);
4263 if (result != AST_NONE)
4264 return (check_reason | result);
3e170ce0 4265#endif
c910b4d9
A
4266
4267 return (AST_NONE);
1c79356b
A
4268}
4269
4270/*
9bccf70c 4271 * set_sched_pri:
1c79356b 4272 *
55e303ae
A
4273 * Set the scheduled priority of the specified thread.
4274 *
9bccf70c 4275 * This may cause the thread to change queues.
1c79356b 4276 *
55e303ae 4277 * Thread must be locked.
1c79356b
A
4278 */
4279void
9bccf70c 4280set_sched_pri(
3e170ce0 4281 thread_t thread,
5ba3f43e 4282 int new_priority)
1c79356b 4283{
3e170ce0
A
4284 thread_t cthread = current_thread();
4285 boolean_t is_current_thread = (thread == cthread) ? TRUE : FALSE;
fe8ab488
A
4286 int curgency, nurgency;
4287 uint64_t urgency_param1, urgency_param2;
3e170ce0 4288 boolean_t removed_from_runq = FALSE;
9bccf70c 4289
5ba3f43e
A
4290 int old_priority = thread->sched_pri;
4291
3e170ce0 4292 /* If we're already at this priority, no need to mess with the runqueue */
5ba3f43e 4293 if (new_priority == old_priority)
3e170ce0
A
4294 return;
4295
4296 if (is_current_thread) {
4297 assert(thread->runq == PROCESSOR_NULL);
fe8ab488 4298 curgency = thread_get_urgency(thread, &urgency_param1, &urgency_param2);
3e170ce0
A
4299 } else {
4300 removed_from_runq = thread_run_queue_remove(thread);
fe8ab488 4301 }
3e170ce0 4302
5ba3f43e 4303 thread->sched_pri = new_priority;
490019cf 4304
3e170ce0
A
4305 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHANGE_PRIORITY),
4306 (uintptr_t)thread_tid(thread),
4307 thread->base_pri,
4308 thread->sched_pri,
a39ff7e2 4309 thread->sched_usage,
3e170ce0
A
4310 0);
4311
3e170ce0 4312 if (is_current_thread) {
fe8ab488 4313 nurgency = thread_get_urgency(thread, &urgency_param1, &urgency_param2);
3e170ce0
A
4314 /*
4315 * set_sched_pri doesn't alter RT params. We expect direct base priority/QoS
4316 * class alterations from user space to occur relatively infrequently, hence
4317 * those are lazily handled. QoS classes have distinct priority bands, and QoS
4318 * inheritance is expected to involve priority changes.
4319 */
5ba3f43e 4320 uint64_t ctime = mach_approximate_time();
fe8ab488 4321 if (nurgency != curgency) {
3e170ce0 4322 thread_tell_urgency(nurgency, urgency_param1, urgency_param2, 0, thread);
fe8ab488 4323 }
5ba3f43e 4324 machine_thread_going_on_core(thread, nurgency, 0, 0, ctime);
fe8ab488
A
4325 }
4326
3e170ce0
A
4327 if (removed_from_runq)
4328 thread_run_queue_reinsert(thread, SCHED_PREEMPT | SCHED_TAILQ);
4329 else if (thread->state & TH_RUN) {
4330 processor_t processor = thread->last_processor;
9bccf70c 4331
3e170ce0 4332 if (is_current_thread) {
5ba3f43e 4333 processor_state_update_from_thread(processor, thread);
9bccf70c 4334
5ba3f43e
A
4335 /*
4336 * When dropping in priority, check if the thread no longer belongs on core.
4337 * If a thread raises its own priority, don't aggressively rebalance it.
4338 * <rdar://problem/31699165>
4339 */
4340 if (new_priority < old_priority) {
4341 ast_t preempt;
4342
4343 if ((preempt = csw_check(processor, AST_NONE)) != AST_NONE)
4344 ast_on(preempt);
4345 }
4346 } else if (processor != PROCESSOR_NULL && processor->active_thread == thread) {
9bccf70c 4347 cause_ast_check(processor);
5ba3f43e 4348 }
1c79356b
A
4349 }
4350}
4351
3e170ce0
A
4352/*
4353 * thread_run_queue_remove_for_handoff
4354 *
4355 * Pull a thread or its (recursive) push target out of the runqueue
4356 * so that it is ready for thread_run()
4357 *
4358 * Called at splsched
4359 *
4360 * Returns the thread that was pulled or THREAD_NULL if no thread could be pulled.
4361 * This may be different than the thread that was passed in.
4362 */
4363thread_t
4364thread_run_queue_remove_for_handoff(thread_t thread) {
91447636 4365
3e170ce0 4366 thread_t pulled_thread = THREAD_NULL;
91447636 4367
3e170ce0 4368 thread_lock(thread);
91447636 4369
3e170ce0
A
4370 /*
4371 * Check that the thread is not bound
4372 * to a different processor, and that realtime
4373 * is not involved.
4374 *
4375 * Next, pull it off its run queue. If it
4376 * doesn't come, it's not eligible.
4377 */
91447636 4378
3e170ce0
A
4379 processor_t processor = current_processor();
4380 if (processor->current_pri < BASEPRI_RTQUEUES && thread->sched_pri < BASEPRI_RTQUEUES &&
4381 (thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == processor)) {
91447636 4382
3e170ce0
A
4383 if (thread_run_queue_remove(thread))
4384 pulled_thread = thread;
91447636
A
4385 }
4386
3e170ce0 4387 thread_unlock(thread);
6d2010ae 4388
3e170ce0 4389 return pulled_thread;
6d2010ae
A
4390}
4391
1c79356b 4392/*
6d2010ae 4393 * thread_run_queue_remove:
1c79356b 4394 *
fe8ab488 4395 * Remove a thread from its current run queue and
2d21ac55 4396 * return TRUE if successful.
55e303ae
A
4397 *
4398 * Thread must be locked.
fe8ab488
A
4399 *
4400 * If thread->runq is PROCESSOR_NULL, the thread will not re-enter the
4401 * run queues because the caller locked the thread. Otherwise
4402 * the thread is on a run queue, but could be chosen for dispatch
4403 * and removed by another processor under a different lock, which
4404 * will set thread->runq to PROCESSOR_NULL.
4405 *
4406 * Hence the thread select path must not rely on anything that could
4407 * be changed under the thread lock after calling this function,
4408 * most importantly thread->sched_pri.
1c79356b 4409 */
2d21ac55 4410boolean_t
6d2010ae 4411thread_run_queue_remove(
fe8ab488 4412 thread_t thread)
1c79356b 4413{
fe8ab488
A
4414 boolean_t removed = FALSE;
4415 processor_t processor = thread->runq;
1c79356b 4416
fe8ab488
A
4417 if ((thread->state & (TH_RUN|TH_WAIT)) == TH_WAIT) {
4418 /* Thread isn't runnable */
4419 assert(thread->runq == PROCESSOR_NULL);
4420 return FALSE;
4421 }
55e303ae 4422
fe8ab488 4423 if (processor == PROCESSOR_NULL) {
55e303ae 4424 /*
fe8ab488
A
4425 * The thread is either not on the runq,
4426 * or is in the midst of being removed from the runq.
4427 *
4428 * runq is set to NULL under the pset lock, not the thread
4429 * lock, so the thread may still be in the process of being dequeued
4430 * from the runq. It will wait in invoke for the thread lock to be
4431 * dropped.
55e303ae 4432 */
55e303ae 4433
fe8ab488
A
4434 return FALSE;
4435 }
55e303ae 4436
fe8ab488
A
4437 if (thread->sched_pri < BASEPRI_RTQUEUES) {
4438 return SCHED(processor_queue_remove)(processor, thread);
4439 }
55e303ae 4440
5ba3f43e
A
4441 processor_set_t pset = processor->processor_set;
4442
4443 rt_lock_lock(pset);
55e303ae 4444
fe8ab488
A
4445 if (thread->runq != PROCESSOR_NULL) {
4446 /*
3e170ce0 4447 * Thread is on the RT run queue and we have a lock on
fe8ab488
A
4448 * that run queue.
4449 */
4450
39037602 4451 remqueue(&thread->runq_links);
5ba3f43e
A
4452 SCHED_STATS_RUNQ_CHANGE(&SCHED(rt_runq)(pset)->runq_stats, rt_runq_count(pset));
4453 rt_runq_count_decr(pset);
fe8ab488
A
4454
4455 thread->runq = PROCESSOR_NULL;
4456
4457 removed = TRUE;
1c79356b
A
4458 }
4459
5ba3f43e 4460 rt_lock_unlock(pset);
fe8ab488
A
4461
4462 return (removed);
1c79356b
A
4463}
4464
cf7d32b8 4465/*
3e170ce0 4466 * Put the thread back where it goes after a thread_run_queue_remove
cf7d32b8 4467 *
3e170ce0 4468 * Thread must have been removed under the same thread lock hold
cf7d32b8 4469 *
3e170ce0 4470 * thread locked, at splsched
cf7d32b8 4471 */
3e170ce0
A
4472void
4473thread_run_queue_reinsert(thread_t thread, integer_t options)
cf7d32b8 4474{
3e170ce0 4475 assert(thread->runq == PROCESSOR_NULL);
5ba3f43e 4476 assert(thread->state & (TH_RUN));
cf7d32b8 4477
5ba3f43e 4478 thread_setrun(thread, options);
6d2010ae
A
4479}
4480
39236c6e
A
4481void
4482sys_override_cpu_throttle(int flag)
6d2010ae 4483{
39236c6e
A
4484 if (flag == CPU_THROTTLE_ENABLE)
4485 cpu_throttle_enabled = 1;
4486 if (flag == CPU_THROTTLE_DISABLE)
4487 cpu_throttle_enabled = 0;
4488}
6d2010ae 4489
39236c6e
A
4490int
4491thread_get_urgency(thread_t thread, uint64_t *arg1, uint64_t *arg2)
4492{
4493 if (thread == NULL || (thread->state & TH_IDLE)) {
4494 *arg1 = 0;
4495 *arg2 = 0;
4496
4497 return (THREAD_URGENCY_NONE);
4498 } else if (thread->sched_mode == TH_MODE_REALTIME) {
4499 *arg1 = thread->realtime.period;
4500 *arg2 = thread->realtime.deadline;
4501
4502 return (THREAD_URGENCY_REAL_TIME);
4503 } else if (cpu_throttle_enabled &&
3e170ce0 4504 ((thread->sched_pri <= MAXPRI_THROTTLE) && (thread->base_pri <= MAXPRI_THROTTLE))) {
39236c6e
A
4505 /*
4506 * Background urgency applied when thread priority is MAXPRI_THROTTLE or lower and thread is not promoted
4507 */
4508 *arg1 = thread->sched_pri;
3e170ce0 4509 *arg2 = thread->base_pri;
6d2010ae 4510
39236c6e
A
4511 return (THREAD_URGENCY_BACKGROUND);
4512 } else {
fe8ab488
A
4513 /* For otherwise unclassified threads, report throughput QoS
4514 * parameters
4515 */
39037602
A
4516 *arg1 = proc_get_effective_thread_policy(thread, TASK_POLICY_THROUGH_QOS);
4517 *arg2 = proc_get_effective_task_policy(thread->task, TASK_POLICY_THROUGH_QOS);
4518
6d2010ae
A
4519 return (THREAD_URGENCY_NORMAL);
4520 }
6d2010ae
A
4521}
4522
5ba3f43e
A
4523perfcontrol_class_t
4524thread_get_perfcontrol_class(thread_t thread)
4525{
4526 /* Special case handling */
4527 if (thread->state & TH_IDLE)
4528 return PERFCONTROL_CLASS_IDLE;
4529 if (thread->task == kernel_task)
4530 return PERFCONTROL_CLASS_KERNEL;
4531 if (thread->sched_mode == TH_MODE_REALTIME)
4532 return PERFCONTROL_CLASS_REALTIME;
4533
4534 /* perfcontrol_class based on base_pri */
4535 if (thread->base_pri <= MAXPRI_THROTTLE)
4536 return PERFCONTROL_CLASS_BACKGROUND;
4537 else if (thread->base_pri <= BASEPRI_UTILITY)
4538 return PERFCONTROL_CLASS_UTILITY;
4539 else if (thread->base_pri <= BASEPRI_DEFAULT)
4540 return PERFCONTROL_CLASS_NONUI;
4541 else if (thread->base_pri <= BASEPRI_FOREGROUND)
4542 return PERFCONTROL_CLASS_UI;
4543 else
4544 return PERFCONTROL_CLASS_ABOVEUI;
4545}
6d2010ae 4546
1c79356b 4547/*
2d21ac55
A
4548 * This is the processor idle loop, which just looks for other threads
4549 * to execute. Processor idle threads invoke this without supplying a
4550 * current thread to idle without an asserted wait state.
4551 *
4552 * Returns a the next thread to execute if dispatched directly.
1c79356b 4553 */
6d2010ae
A
4554
4555#if 0
4556#define IDLE_KERNEL_DEBUG_CONSTANT(...) KERNEL_DEBUG_CONSTANT(__VA_ARGS__)
4557#else
4558#define IDLE_KERNEL_DEBUG_CONSTANT(...) do { } while(0)
4559#endif
4560
4561thread_t
2d21ac55
A
4562processor_idle(
4563 thread_t thread,
4564 processor_t processor)
1c79356b 4565{
2d21ac55
A
4566 processor_set_t pset = processor->processor_set;
4567 thread_t new_thread;
4568 int state;
2d21ac55 4569 (void)splsched();
1c79356b 4570
316670eb
A
4571 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
4572 MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_START,
4573 (uintptr_t)thread_tid(thread), 0, 0, 0, 0);
3a60a9f5 4574
6d2010ae
A
4575 SCHED_STATS_CPU_IDLE_START(processor);
4576
2d21ac55
A
4577 timer_switch(&PROCESSOR_DATA(processor, system_state),
4578 mach_absolute_time(), &PROCESSOR_DATA(processor, idle_state));
4579 PROCESSOR_DATA(processor, current_state) = &PROCESSOR_DATA(processor, idle_state);
3a60a9f5 4580
39236c6e 4581 while (1) {
5ba3f43e
A
4582 /*
4583 * Ensure that updates to my processor and pset state,
4584 * made by the IPI source processor before sending the IPI,
4585 * are visible on this processor now (even though we don't
4586 * take the pset lock yet).
4587 */
4588 atomic_thread_fence(memory_order_acquire);
4589
4590 if (processor->state != PROCESSOR_IDLE)
4591 break;
4592 if (bit_test(pset->pending_AST_cpu_mask, processor->cpu_id))
39236c6e 4593 break;
5ba3f43e
A
4594#if defined(CONFIG_SCHED_DEFERRED_AST)
4595 if (bit_test(pset->pending_deferred_AST_cpu_mask, processor->cpu_id))
39236c6e 4596 break;
5ba3f43e 4597#endif
a39ff7e2 4598 if (processor->is_recommended && (processor->processor_primary == processor)) {
5ba3f43e 4599 if (rt_runq_count(pset))
3e170ce0
A
4600 break;
4601 } else {
4602 if (SCHED(processor_bound_count)(processor))
4603 break;
4604 }
4605
39236c6e
A
4606#if CONFIG_SCHED_IDLE_IN_PLACE
4607 if (thread != THREAD_NULL) {
4608 /* Did idle-in-place thread wake up */
4609 if ((thread->state & (TH_WAIT|TH_SUSP)) != TH_WAIT || thread->wake_active)
4610 break;
4611 }
4612#endif
4613
6d2010ae 4614 IDLE_KERNEL_DEBUG_CONSTANT(
5ba3f43e 4615 MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), rt_runq_count(pset), SCHED(processor_runq_count)(processor), -1, 0);
6d2010ae 4616
4b17d6b6
A
4617 machine_track_platform_idle(TRUE);
4618
2d21ac55 4619 machine_idle();
55e303ae 4620
4b17d6b6
A
4621 machine_track_platform_idle(FALSE);
4622
55e303ae 4623 (void)splsched();
c910b4d9 4624
6d2010ae 4625 IDLE_KERNEL_DEBUG_CONSTANT(
5ba3f43e 4626 MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), rt_runq_count(pset), SCHED(processor_runq_count)(processor), -2, 0);
6d2010ae 4627
fe8ab488
A
4628 if (!SCHED(processor_queue_empty)(processor)) {
4629 /* Secondary SMT processors respond to directed wakeups
4630 * exclusively. Some platforms induce 'spurious' SMT wakeups.
4631 */
4632 if (processor->processor_primary == processor)
4633 break;
4634 }
55e303ae
A
4635 }
4636
2d21ac55
A
4637 timer_switch(&PROCESSOR_DATA(processor, idle_state),
4638 mach_absolute_time(), &PROCESSOR_DATA(processor, system_state));
4639 PROCESSOR_DATA(processor, current_state) = &PROCESSOR_DATA(processor, system_state);
1c79356b 4640
2d21ac55
A
4641 pset_lock(pset);
4642
39236c6e 4643 /* If we were sent a remote AST and came out of idle, acknowledge it here with pset lock held */
5ba3f43e 4644 bit_clear(pset->pending_AST_cpu_mask, processor->cpu_id);
3e170ce0 4645#if defined(CONFIG_SCHED_DEFERRED_AST)
5ba3f43e 4646 bit_clear(pset->pending_deferred_AST_cpu_mask, processor->cpu_id);
3e170ce0 4647#endif
39236c6e 4648
55e303ae
A
4649 state = processor->state;
4650 if (state == PROCESSOR_DISPATCHING) {
1c79356b 4651 /*
55e303ae 4652 * Commmon case -- cpu dispatched.
1c79356b 4653 */
2d21ac55
A
4654 new_thread = processor->next_thread;
4655 processor->next_thread = THREAD_NULL;
55e303ae 4656 processor->state = PROCESSOR_RUNNING;
1c79356b 4657
39236c6e 4658 if ((new_thread != THREAD_NULL) && (SCHED(processor_queue_has_priority)(processor, new_thread->sched_pri, FALSE) ||
5ba3f43e 4659 (rt_runq_count(pset) > 0)) ) {
a39ff7e2 4660 /* Something higher priority has popped up on the runqueue - redispatch this thread elsewhere */
5ba3f43e 4661 processor_state_update_idle(processor);
2d21ac55 4662 processor->deadline = UINT64_MAX;
55e303ae 4663
2d21ac55 4664 pset_unlock(pset);
1c79356b 4665
2d21ac55 4666 thread_lock(new_thread);
5ba3f43e 4667 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REDISPATCH), (uintptr_t)thread_tid(new_thread), new_thread->sched_pri, rt_runq_count(pset), 0, 0);
2d21ac55
A
4668 thread_setrun(new_thread, SCHED_HEADQ);
4669 thread_unlock(new_thread);
55e303ae 4670
316670eb
A
4671 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
4672 MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_END,
4673 (uintptr_t)thread_tid(thread), state, 0, 0, 0);
6d2010ae 4674
2d21ac55 4675 return (THREAD_NULL);
1c79356b 4676 }
1c79356b 4677
5ba3f43e
A
4678 sched_update_pset_load_average(pset);
4679
2d21ac55
A
4680 pset_unlock(pset);
4681
316670eb
A
4682 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
4683 MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_END,
4684 (uintptr_t)thread_tid(thread), state, (uintptr_t)thread_tid(new_thread), 0, 0);
39037602 4685
2d21ac55 4686 return (new_thread);
39037602
A
4687
4688 } else if (state == PROCESSOR_IDLE) {
4689 re_queue_tail(&pset->active_queue, &processor->processor_queue);
1c79356b 4690
5ba3f43e
A
4691 pset->active_processor_count++;
4692 sched_update_pset_load_average(pset);
4693
2d21ac55 4694 processor->state = PROCESSOR_RUNNING;
5ba3f43e 4695 processor_state_update_idle(processor);
39236c6e 4696 processor->deadline = UINT64_MAX;
39037602
A
4697
4698 } else if (state == PROCESSOR_SHUTDOWN) {
55e303ae
A
4699 /*
4700 * Going off-line. Force a
4701 * reschedule.
4702 */
2d21ac55
A
4703 if ((new_thread = processor->next_thread) != THREAD_NULL) {
4704 processor->next_thread = THREAD_NULL;
5ba3f43e 4705 processor_state_update_idle(processor);
55e303ae 4706 processor->deadline = UINT64_MAX;
2d21ac55
A
4707
4708 pset_unlock(pset);
55e303ae
A
4709
4710 thread_lock(new_thread);
4711 thread_setrun(new_thread, SCHED_HEADQ);
4712 thread_unlock(new_thread);
55e303ae 4713
316670eb
A
4714 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
4715 MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_END,
4716 (uintptr_t)thread_tid(thread), state, 0, 0, 0);
6d2010ae 4717
2d21ac55
A
4718 return (THREAD_NULL);
4719 }
55e303ae
A
4720 }
4721
2d21ac55
A
4722 pset_unlock(pset);
4723
316670eb
A
4724 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
4725 MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_END,
4726 (uintptr_t)thread_tid(thread), state, 0, 0, 0);
6d2010ae 4727
2d21ac55
A
4728 return (THREAD_NULL);
4729}
4730
cf7d32b8
A
4731/*
4732 * Each processor has a dedicated thread which
4733 * executes the idle loop when there is no suitable
4734 * previous context.
4735 */
2d21ac55
A
4736void
4737idle_thread(void)
4738{
4739 processor_t processor = current_processor();
4740 thread_t new_thread;
4741
4742 new_thread = processor_idle(THREAD_NULL, processor);
4743 if (new_thread != THREAD_NULL) {
4744 thread_run(processor->idle_thread, (thread_continue_t)idle_thread, NULL, new_thread);
4745 /*NOTREACHED*/
4746 }
55e303ae 4747
2d21ac55 4748 thread_block((thread_continue_t)idle_thread);
55e303ae 4749 /*NOTREACHED*/
1c79356b
A
4750}
4751
91447636
A
4752kern_return_t
4753idle_thread_create(
4754 processor_t processor)
1c79356b 4755{
91447636
A
4756 kern_return_t result;
4757 thread_t thread;
4758 spl_t s;
5ba3f43e 4759 char name[MAXTHREADNAMESIZE];
91447636
A
4760
4761 result = kernel_thread_create((thread_continue_t)idle_thread, NULL, MAXPRI_KERNEL, &thread);
4762 if (result != KERN_SUCCESS)
4763 return (result);
4764
5ba3f43e
A
4765 snprintf(name, sizeof(name), "idle #%d", processor->cpu_id);
4766 thread_set_thread_name(thread, name);
4767
91447636
A
4768 s = splsched();
4769 thread_lock(thread);
4770 thread->bound_processor = processor;
4771 processor->idle_thread = thread;
3e170ce0 4772 thread->sched_pri = thread->base_pri = IDLEPRI;
91447636 4773 thread->state = (TH_RUN | TH_IDLE);
39236c6e 4774 thread->options |= TH_OPT_IDLE_THREAD;
91447636
A
4775 thread_unlock(thread);
4776 splx(s);
4777
4778 thread_deallocate(thread);
4779
4780 return (KERN_SUCCESS);
1c79356b
A
4781}
4782
91447636
A
4783/*
4784 * sched_startup:
4785 *
4786 * Kicks off scheduler services.
4787 *
4788 * Called at splsched.
4789 */
0b4e3aa0 4790void
91447636 4791sched_startup(void)
0b4e3aa0 4792{
91447636
A
4793 kern_return_t result;
4794 thread_t thread;
4795
3e170ce0
A
4796 simple_lock_init(&sched_vm_group_list_lock, 0);
4797
5ba3f43e
A
4798#if __arm__ || __arm64__
4799 simple_lock_init(&sched_recommended_cores_lock, 0);
4800#endif /* __arm__ || __arm64__ */
490019cf 4801
6d2010ae 4802 result = kernel_thread_start_priority((thread_continue_t)sched_init_thread,
39236c6e 4803 (void *)SCHED(maintenance_continuation), MAXPRI_KERNEL, &thread);
91447636
A
4804 if (result != KERN_SUCCESS)
4805 panic("sched_startup");
4806
4807 thread_deallocate(thread);
4808
39037602
A
4809 assert_thread_magic(thread);
4810
91447636 4811 /*
316670eb
A
4812 * Yield to the sched_init_thread once, to
4813 * initialize our own thread after being switched
4814 * back to.
91447636
A
4815 *
4816 * The current thread is the only other thread
4817 * active at this point.
4818 */
316670eb 4819 thread_block(THREAD_CONTINUE_NULL);
6d2010ae 4820}
91447636 4821
5ba3f43e
A
4822#if __arm64__
4823static _Atomic uint64_t sched_perfcontrol_callback_deadline;
4824#endif /* __arm64__ */
4825
4826
fe8ab488 4827#if defined(CONFIG_SCHED_TIMESHARE_CORE)
91447636 4828
39236c6e
A
4829static volatile uint64_t sched_maintenance_deadline;
4830static uint64_t sched_tick_last_abstime;
4831static uint64_t sched_tick_delta;
4832uint64_t sched_tick_max_delta;
5ba3f43e
A
4833
4834
1c79356b 4835/*
6d2010ae 4836 * sched_init_thread:
1c79356b 4837 *
55e303ae
A
4838 * Perform periodic bookkeeping functions about ten
4839 * times per second.
1c79356b 4840 */
fe8ab488 4841void
3e170ce0 4842sched_timeshare_maintenance_continue(void)
1c79356b 4843{
fe8ab488
A
4844 uint64_t sched_tick_ctime, late_time;
4845
3e170ce0
A
4846 struct sched_update_scan_context scan_context = {
4847 .earliest_bg_make_runnable_time = UINT64_MAX,
4848 .earliest_normal_make_runnable_time = UINT64_MAX,
4849 .earliest_rt_make_runnable_time = UINT64_MAX
4850 };
4851
fe8ab488 4852 sched_tick_ctime = mach_absolute_time();
1c79356b 4853
39236c6e
A
4854 if (__improbable(sched_tick_last_abstime == 0)) {
4855 sched_tick_last_abstime = sched_tick_ctime;
fe8ab488 4856 late_time = 0;
39236c6e
A
4857 sched_tick_delta = 1;
4858 } else {
fe8ab488
A
4859 late_time = sched_tick_ctime - sched_tick_last_abstime;
4860 sched_tick_delta = late_time / sched_tick_interval;
39236c6e
A
4861 /* Ensure a delta of 1, since the interval could be slightly
4862 * smaller than the sched_tick_interval due to dispatch
4863 * latencies.
4864 */
4865 sched_tick_delta = MAX(sched_tick_delta, 1);
4866
4867 /* In the event interrupt latencies or platform
4868 * idle events that advanced the timebase resulted
4869 * in periods where no threads were dispatched,
4870 * cap the maximum "tick delta" at SCHED_TICK_MAX_DELTA
4871 * iterations.
4872 */
4873 sched_tick_delta = MIN(sched_tick_delta, SCHED_TICK_MAX_DELTA);
4874
4875 sched_tick_last_abstime = sched_tick_ctime;
4876 sched_tick_max_delta = MAX(sched_tick_delta, sched_tick_max_delta);
4877 }
4878
fe8ab488 4879 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_MAINTENANCE)|DBG_FUNC_START,
39037602 4880 sched_tick_delta, late_time, 0, 0, 0);
fe8ab488 4881
39236c6e
A
4882 /* Add a number of pseudo-ticks corresponding to the elapsed interval
4883 * This could be greater than 1 if substantial intervals where
4884 * all processors are idle occur, which rarely occurs in practice.
4885 */
39037602 4886
39236c6e 4887 sched_tick += sched_tick_delta;
1c79356b 4888
5ba3f43e
A
4889 update_vm_info();
4890
1c79356b 4891 /*
91447636 4892 * Compute various averages.
1c79356b 4893 */
39236c6e 4894 compute_averages(sched_tick_delta);
1c79356b
A
4895
4896 /*
91447636 4897 * Scan the run queues for threads which
39037602
A
4898 * may need to be updated, and find the earliest runnable thread on the runqueue
4899 * to report its latency.
1c79356b 4900 */
3e170ce0
A
4901 SCHED(thread_update_scan)(&scan_context);
4902
5ba3f43e 4903 SCHED(rt_runq_scan)(&scan_context);
3e170ce0
A
4904
4905 uint64_t ctime = mach_absolute_time();
4906
39037602
A
4907 uint64_t bg_max_latency = (ctime > scan_context.earliest_bg_make_runnable_time) ?
4908 ctime - scan_context.earliest_bg_make_runnable_time : 0;
4909
4910 uint64_t default_max_latency = (ctime > scan_context.earliest_normal_make_runnable_time) ?
4911 ctime - scan_context.earliest_normal_make_runnable_time : 0;
4912
4913 uint64_t realtime_max_latency = (ctime > scan_context.earliest_rt_make_runnable_time) ?
4914 ctime - scan_context.earliest_rt_make_runnable_time : 0;
4915
4916 machine_max_runnable_latency(bg_max_latency, default_max_latency, realtime_max_latency);
3e170ce0
A
4917
4918 /*
4919 * Check to see if the special sched VM group needs attention.
4920 */
4921 sched_vm_group_maintenance();
fe8ab488 4922
5ba3f43e
A
4923#if __arm__ || __arm64__
4924 /* Check to see if the recommended cores failsafe is active */
4925 sched_recommended_cores_maintenance();
4926#endif /* __arm__ || __arm64__ */
4927
4928
4929#if DEBUG || DEVELOPMENT
4930#if __x86_64__
4931#include <i386/misc_protos.h>
4932 /* Check for long-duration interrupts */
4933 mp_interrupt_watchdog();
4934#endif /* __x86_64__ */
4935#endif /* DEBUG || DEVELOPMENT */
490019cf 4936
39037602
A
4937 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_MAINTENANCE) | DBG_FUNC_END,
4938 sched_pri_shifts[TH_BUCKET_SHARE_FG], sched_pri_shifts[TH_BUCKET_SHARE_BG],
4939 sched_pri_shifts[TH_BUCKET_SHARE_UT], 0, 0);
1c79356b 4940
3e170ce0
A
4941 assert_wait((event_t)sched_timeshare_maintenance_continue, THREAD_UNINT);
4942 thread_block((thread_continue_t)sched_timeshare_maintenance_continue);
1c79356b
A
4943 /*NOTREACHED*/
4944}
4945
39236c6e
A
4946static uint64_t sched_maintenance_wakeups;
4947
4948/*
4949 * Determine if the set of routines formerly driven by a maintenance timer
4950 * must be invoked, based on a deadline comparison. Signals the scheduler
4951 * maintenance thread on deadline expiration. Must be invoked at an interval
4952 * lower than the "sched_tick_interval", currently accomplished by
4953 * invocation via the quantum expiration timer and at context switch time.
4954 * Performance matters: this routine reuses a timestamp approximating the
4955 * current absolute time received from the caller, and should perform
4956 * no more than a comparison against the deadline in the common case.
4957 */
4958void
3e170ce0 4959sched_timeshare_consider_maintenance(uint64_t ctime) {
39236c6e
A
4960 uint64_t ndeadline, deadline = sched_maintenance_deadline;
4961
4962 if (__improbable(ctime >= deadline)) {
4963 if (__improbable(current_thread() == sched_maintenance_thread))
4964 return;
4965 OSMemoryBarrier();
4966
4967 ndeadline = ctime + sched_tick_interval;
4968
4969 if (__probable(__sync_bool_compare_and_swap(&sched_maintenance_deadline, deadline, ndeadline))) {
3e170ce0 4970 thread_wakeup((event_t)sched_timeshare_maintenance_continue);
39236c6e
A
4971 sched_maintenance_wakeups++;
4972 }
4973 }
5ba3f43e
A
4974
4975#if __arm64__
4976 uint64_t perf_deadline = __c11_atomic_load(&sched_perfcontrol_callback_deadline, memory_order_relaxed);
4977
4978 if (__improbable(perf_deadline && ctime >= perf_deadline)) {
4979 /* CAS in 0, if success, make callback. Otherwise let the next context switch check again. */
4980 if (__c11_atomic_compare_exchange_strong(&sched_perfcontrol_callback_deadline, &perf_deadline, 0,
4981 memory_order_relaxed, memory_order_relaxed)) {
4982 machine_perfcontrol_deadline_passed(perf_deadline);
4983 }
4984 }
4985#endif /* __arm64__ */
4986
39236c6e
A
4987}
4988
fe8ab488 4989#endif /* CONFIG_SCHED_TIMESHARE_CORE */
6d2010ae 4990
1c79356b 4991void
6d2010ae 4992sched_init_thread(void (*continuation)(void))
1c79356b 4993{
316670eb 4994 thread_block(THREAD_CONTINUE_NULL);
91447636 4995
490019cf
A
4996 thread_t thread = current_thread();
4997
39037602
A
4998 thread_set_thread_name(thread, "sched_maintenance_thread");
4999
490019cf
A
5000 sched_maintenance_thread = thread;
5001
6d2010ae 5002 continuation();
1c79356b 5003
1c79356b
A
5004 /*NOTREACHED*/
5005}
5006
fe8ab488 5007#if defined(CONFIG_SCHED_TIMESHARE_CORE)
6d2010ae 5008
1c79356b 5009/*
91447636 5010 * thread_update_scan / runq_scan:
55e303ae 5011 *
91447636
A
5012 * Scan the run queues to account for timesharing threads
5013 * which need to be updated.
1c79356b
A
5014 *
5015 * Scanner runs in two passes. Pass one squirrels likely
91447636 5016 * threads away in an array, pass two does the update.
1c79356b 5017 *
91447636
A
5018 * This is necessary because the run queue is locked for
5019 * the candidate scan, but the thread is locked for the update.
1c79356b 5020 *
91447636
A
5021 * Array should be sized to make forward progress, without
5022 * disabling preemption for long periods.
1c79356b 5023 */
55e303ae 5024
91447636 5025#define THREAD_UPDATE_SIZE 128
55e303ae 5026
39037602
A
5027static thread_t thread_update_array[THREAD_UPDATE_SIZE];
5028static uint32_t thread_update_count = 0;
1c79356b 5029
fe8ab488
A
5030/* Returns TRUE if thread was added, FALSE if thread_update_array is full */
5031boolean_t
5032thread_update_add_thread(thread_t thread)
5033{
5034 if (thread_update_count == THREAD_UPDATE_SIZE)
5035 return (FALSE);
5036
5037 thread_update_array[thread_update_count++] = thread;
5038 thread_reference_internal(thread);
5039 return (TRUE);
5040}
5041
5042void
5043thread_update_process_threads(void)
5044{
39037602 5045 assert(thread_update_count <= THREAD_UPDATE_SIZE);
fe8ab488 5046
39037602
A
5047 for (uint32_t i = 0 ; i < thread_update_count ; i++) {
5048 thread_t thread = thread_update_array[i];
5049 assert_thread_magic(thread);
5050 thread_update_array[i] = THREAD_NULL;
5051
5052 spl_t s = splsched();
fe8ab488 5053 thread_lock(thread);
39037602 5054 if (!(thread->state & (TH_WAIT)) && thread->sched_stamp != sched_tick) {
fe8ab488
A
5055 SCHED(update_priority)(thread);
5056 }
5057 thread_unlock(thread);
5058 splx(s);
5059
5060 thread_deallocate(thread);
5061 }
39037602
A
5062
5063 thread_update_count = 0;
fe8ab488
A
5064}
5065
1c79356b 5066/*
91447636
A
5067 * Scan a runq for candidate threads.
5068 *
5069 * Returns TRUE if retry is needed.
1c79356b 5070 */
fe8ab488 5071boolean_t
91447636 5072runq_scan(
39037602
A
5073 run_queue_t runq,
5074 sched_update_scan_context_t scan_context)
1c79356b 5075{
39037602
A
5076 int count = runq->count;
5077 int queue_index;
1c79356b 5078
39037602
A
5079 assert(count >= 0);
5080
5081 if (count == 0)
5082 return FALSE;
5083
5084 for (queue_index = bitmap_first(runq->bitmap, NRQS);
5085 queue_index >= 0;
5086 queue_index = bitmap_next(runq->bitmap, queue_index)) {
5087
5088 thread_t thread;
5089 queue_t queue = &runq->queues[queue_index];
3e170ce0 5090
39037602
A
5091 qe_foreach_element(thread, queue, runq_links) {
5092 assert(count > 0);
5093 assert_thread_magic(thread);
5094
5095 if (thread->sched_stamp != sched_tick &&
5096 thread->sched_mode == TH_MODE_TIMESHARE) {
5097 if (thread_update_add_thread(thread) == FALSE)
5098 return TRUE;
1c79356b
A
5099 }
5100
39037602
A
5101 if (cpu_throttle_enabled && ((thread->sched_pri <= MAXPRI_THROTTLE) && (thread->base_pri <= MAXPRI_THROTTLE))) {
5102 if (thread->last_made_runnable_time < scan_context->earliest_bg_make_runnable_time) {
5103 scan_context->earliest_bg_make_runnable_time = thread->last_made_runnable_time;
5104 }
5105 } else {
5106 if (thread->last_made_runnable_time < scan_context->earliest_normal_make_runnable_time) {
5107 scan_context->earliest_normal_make_runnable_time = thread->last_made_runnable_time;
5108 }
5109 }
5110 count--;
1c79356b
A
5111 }
5112 }
1c79356b 5113
39037602 5114 return FALSE;
1c79356b
A
5115}
5116
fe8ab488
A
5117#endif /* CONFIG_SCHED_TIMESHARE_CORE */
5118
6d2010ae
A
5119boolean_t
5120thread_eager_preemption(thread_t thread)
5121{
5122 return ((thread->sched_flags & TH_SFLAG_EAGERPREEMPT) != 0);
5123}
5124
5125void
5126thread_set_eager_preempt(thread_t thread)
5127{
5128 spl_t x;
5129 processor_t p;
5130 ast_t ast = AST_NONE;
5131
5132 x = splsched();
5133 p = current_processor();
5134
5135 thread_lock(thread);
5136 thread->sched_flags |= TH_SFLAG_EAGERPREEMPT;
5137
5138 if (thread == current_thread()) {
6d2010ae 5139
fe8ab488
A
5140 ast = csw_check(p, AST_NONE);
5141 thread_unlock(thread);
6d2010ae
A
5142 if (ast != AST_NONE) {
5143 (void) thread_block_reason(THREAD_CONTINUE_NULL, NULL, ast);
5144 }
5145 } else {
5146 p = thread->last_processor;
5147
5148 if (p != PROCESSOR_NULL && p->state == PROCESSOR_RUNNING &&
5149 p->active_thread == thread) {
5150 cause_ast_check(p);
5151 }
1c79356b 5152
6d2010ae
A
5153 thread_unlock(thread);
5154 }
5155
5156 splx(x);
5157}
5158
5159void
5160thread_clear_eager_preempt(thread_t thread)
5161{
5162 spl_t x;
5163
5164 x = splsched();
5165 thread_lock(thread);
5166
5167 thread->sched_flags &= ~TH_SFLAG_EAGERPREEMPT;
5168
5169 thread_unlock(thread);
5170 splx(x);
5171}
3e170ce0 5172
6d2010ae
A
5173/*
5174 * Scheduling statistics
5175 */
5176void
5177sched_stats_handle_csw(processor_t processor, int reasons, int selfpri, int otherpri)
5178{
5179 struct processor_sched_statistics *stats;
5180 boolean_t to_realtime = FALSE;
5181
5182 stats = &processor->processor_data.sched_stats;
5183 stats->csw_count++;
5184
5185 if (otherpri >= BASEPRI_REALTIME) {
5186 stats->rt_sched_count++;
5187 to_realtime = TRUE;
5188 }
5189
5190 if ((reasons & AST_PREEMPT) != 0) {
5191 stats->preempt_count++;
5192
5193 if (selfpri >= BASEPRI_REALTIME) {
5194 stats->preempted_rt_count++;
5195 }
5196
5197 if (to_realtime) {
5198 stats->preempted_by_rt_count++;
5199 }
5200
5201 }
5202}
5203
5204void
5205sched_stats_handle_runq_change(struct runq_stats *stats, int old_count)
5206{
5207 uint64_t timestamp = mach_absolute_time();
5208
5209 stats->count_sum += (timestamp - stats->last_change_timestamp) * old_count;
5210 stats->last_change_timestamp = timestamp;
5211}
5212
1c79356b 5213/*
6d2010ae 5214 * For calls from assembly code
1c79356b 5215 */
6d2010ae 5216#undef thread_wakeup
1c79356b
A
5217void
5218thread_wakeup(
6d2010ae 5219 event_t x);
1c79356b
A
5220
5221void
5222thread_wakeup(
6d2010ae 5223 event_t x)
1c79356b 5224{
6d2010ae 5225 thread_wakeup_with_result(x, THREAD_AWAKENED);
1c79356b
A
5226}
5227
91447636
A
5228boolean_t
5229preemption_enabled(void)
5230{
5231 return (get_preemption_level() == 0 && ml_get_interrupts_enabled());
5232}
9bccf70c 5233
4b17d6b6
A
5234static void
5235sched_timer_deadline_tracking_init(void) {
5236 nanoseconds_to_absolutetime(TIMER_DEADLINE_TRACKING_BIN_1_DEFAULT, &timer_deadline_tracking_bin_1);
5237 nanoseconds_to_absolutetime(TIMER_DEADLINE_TRACKING_BIN_2_DEFAULT, &timer_deadline_tracking_bin_2);
5238}
3e170ce0 5239
5ba3f43e 5240#if __arm__ || __arm64__
3e170ce0 5241
5ba3f43e
A
5242uint32_t perfcontrol_requested_recommended_cores = ALL_CORES_RECOMMENDED;
5243uint32_t perfcontrol_requested_recommended_core_count = MAX_CPUS;
5244boolean_t perfcontrol_failsafe_active = FALSE;
5245
5246uint64_t perfcontrol_failsafe_maintenance_runnable_time;
5247uint64_t perfcontrol_failsafe_activation_time;
5248uint64_t perfcontrol_failsafe_deactivation_time;
5249
5250/* data covering who likely caused it and how long they ran */
5251#define FAILSAFE_NAME_LEN 33 /* (2*MAXCOMLEN)+1 from size of p_name */
5252char perfcontrol_failsafe_name[FAILSAFE_NAME_LEN];
5253int perfcontrol_failsafe_pid;
5254uint64_t perfcontrol_failsafe_tid;
5255uint64_t perfcontrol_failsafe_thread_timer_at_start;
5256uint64_t perfcontrol_failsafe_thread_timer_last_seen;
5257uint32_t perfcontrol_failsafe_recommended_at_trigger;
5258
5259/*
5260 * Perf controller calls here to update the recommended core bitmask.
5261 * If the failsafe is active, we don't immediately apply the new value.
5262 * Instead, we store the new request and use it after the failsafe deactivates.
5263 *
5264 * If the failsafe is not active, immediately apply the update.
5265 *
5266 * No scheduler locks are held, no other locks are held that scheduler might depend on,
5267 * interrupts are enabled
5268 *
5269 * currently prototype is in osfmk/arm/machine_routines.h
5270 */
5271void
5272sched_perfcontrol_update_recommended_cores(uint32_t recommended_cores)
3e170ce0 5273{
5ba3f43e
A
5274 assert(preemption_enabled());
5275
5276 spl_t s = splsched();
5277 simple_lock(&sched_recommended_cores_lock);
3e170ce0 5278
5ba3f43e
A
5279 perfcontrol_requested_recommended_cores = recommended_cores;
5280 perfcontrol_requested_recommended_core_count = __builtin_popcountll(recommended_cores);
5281
5282 if (perfcontrol_failsafe_active == FALSE)
5283 sched_update_recommended_cores(perfcontrol_requested_recommended_cores);
5284 else
5285 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
5286 MACHDBG_CODE(DBG_MACH_SCHED,MACH_REC_CORES_FAILSAFE) | DBG_FUNC_NONE,
5287 perfcontrol_requested_recommended_cores,
5288 sched_maintenance_thread->last_made_runnable_time, 0, 0, 0);
5289
5290 simple_unlock(&sched_recommended_cores_lock);
5291 splx(s);
5292}
5293
5294/*
5295 * Consider whether we need to activate the recommended cores failsafe
5296 *
5297 * Called from quantum timer interrupt context of a realtime thread
5298 * No scheduler locks are held, interrupts are disabled
5299 */
5300void
5301sched_consider_recommended_cores(uint64_t ctime, thread_t cur_thread)
5302{
5303 /*
5304 * Check if a realtime thread is starving the system
5305 * and bringing up non-recommended cores would help
5306 *
5307 * TODO: Is this the correct check for recommended == possible cores?
5308 * TODO: Validate the checks without the relevant lock are OK.
5309 */
5310
5311 if (__improbable(perfcontrol_failsafe_active == TRUE)) {
5312 /* keep track of how long the responsible thread runs */
5313
5314 simple_lock(&sched_recommended_cores_lock);
5315
5316 if (perfcontrol_failsafe_active == TRUE &&
5317 cur_thread->thread_id == perfcontrol_failsafe_tid) {
5318 perfcontrol_failsafe_thread_timer_last_seen = timer_grab(&cur_thread->user_timer) +
5319 timer_grab(&cur_thread->system_timer);
5320 }
5321
5322 simple_unlock(&sched_recommended_cores_lock);
5323
5324 /* we're already trying to solve the problem, so bail */
5325 return;
3e170ce0
A
5326 }
5327
5ba3f43e
A
5328 /* The failsafe won't help if there are no more processors to enable */
5329 if (__probable(perfcontrol_requested_recommended_core_count >= processor_count))
5330 return;
3e170ce0 5331
5ba3f43e
A
5332 uint64_t too_long_ago = ctime - perfcontrol_failsafe_starvation_threshold;
5333
5334 /* Use the maintenance thread as our canary in the coal mine */
5335 thread_t m_thread = sched_maintenance_thread;
5336
5337 /* If it doesn't look bad, nothing to see here */
5338 if (__probable(m_thread->last_made_runnable_time >= too_long_ago))
5339 return;
5340
5341 /* It looks bad, take the lock to be sure */
5342 thread_lock(m_thread);
5343
5344 if (m_thread->runq == PROCESSOR_NULL ||
5345 (m_thread->state & (TH_RUN|TH_WAIT)) != TH_RUN ||
5346 m_thread->last_made_runnable_time >= too_long_ago) {
5347 /*
5348 * Maintenance thread is either on cpu or blocked, and
5349 * therefore wouldn't benefit from more cores
5350 */
5351 thread_unlock(m_thread);
5352 return;
3e170ce0 5353 }
3e170ce0 5354
5ba3f43e
A
5355 uint64_t maintenance_runnable_time = m_thread->last_made_runnable_time;
5356
5357 thread_unlock(m_thread);
5358
5359 /*
5360 * There are cores disabled at perfcontrol's recommendation, but the
5361 * system is so overloaded that the maintenance thread can't run.
5362 * That likely means that perfcontrol can't run either, so it can't fix
5363 * the recommendation. We have to kick in a failsafe to keep from starving.
5364 *
5365 * When the maintenance thread has been starved for too long,
5366 * ignore the recommendation from perfcontrol and light up all the cores.
5367 *
5368 * TODO: Consider weird states like boot, sleep, or debugger
5369 */
5370
5371 simple_lock(&sched_recommended_cores_lock);
5372
5373 if (perfcontrol_failsafe_active == TRUE) {
5374 simple_unlock(&sched_recommended_cores_lock);
5375 return;
5376 }
5377
5378 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
5379 MACHDBG_CODE(DBG_MACH_SCHED,MACH_REC_CORES_FAILSAFE) | DBG_FUNC_START,
5380 perfcontrol_requested_recommended_cores, maintenance_runnable_time, 0, 0, 0);
5381
5382 perfcontrol_failsafe_active = TRUE;
5383 perfcontrol_failsafe_activation_time = mach_absolute_time();
5384 perfcontrol_failsafe_maintenance_runnable_time = maintenance_runnable_time;
5385 perfcontrol_failsafe_recommended_at_trigger = perfcontrol_requested_recommended_cores;
5386
5387 /* Capture some data about who screwed up (assuming that the thread on core is at fault) */
5388 task_t task = cur_thread->task;
5389 perfcontrol_failsafe_pid = task_pid(task);
5390 strlcpy(perfcontrol_failsafe_name, proc_name_address(task->bsd_info), sizeof(perfcontrol_failsafe_name));
5391
5392 perfcontrol_failsafe_tid = cur_thread->thread_id;
5393
5394 /* Blame the thread for time it has run recently */
5395 uint64_t recent_computation = (ctime - cur_thread->computation_epoch) + cur_thread->computation_metered;
5396
5397 uint64_t last_seen = timer_grab(&cur_thread->user_timer) + timer_grab(&cur_thread->system_timer);
5398
5399 /* Compute the start time of the bad behavior in terms of the thread's on core time */
5400 perfcontrol_failsafe_thread_timer_at_start = last_seen - recent_computation;
5401 perfcontrol_failsafe_thread_timer_last_seen = last_seen;
5402
5403 /* Ignore the previously recommended core configuration */
5404 sched_update_recommended_cores(ALL_CORES_RECOMMENDED);
5405
5406 simple_unlock(&sched_recommended_cores_lock);
5407}
5408
5409/*
5410 * Now that our bacon has been saved by the failsafe, consider whether to turn it off
5411 *
5412 * Runs in the context of the maintenance thread, no locks held
5413 */
5414static void
5415sched_recommended_cores_maintenance(void)
5416{
5417 /* Common case - no failsafe, nothing to be done here */
5418 if (__probable(perfcontrol_failsafe_active == FALSE))
5419 return;
5420
5421 uint64_t ctime = mach_absolute_time();
5422
5423 boolean_t print_diagnostic = FALSE;
5424 char p_name[FAILSAFE_NAME_LEN] = "";
5425
5426 spl_t s = splsched();
5427 simple_lock(&sched_recommended_cores_lock);
5428
5429 /* Check again, under the lock, to avoid races */
5430 if (perfcontrol_failsafe_active == FALSE)
5431 goto out;
5432
5433 /*
5434 * Ensure that the other cores get another few ticks to run some threads
5435 * If we don't have this hysteresis, the maintenance thread is the first
5436 * to run, and then it immediately kills the other cores
5437 */
5438 if ((ctime - perfcontrol_failsafe_activation_time) < perfcontrol_failsafe_starvation_threshold)
5439 goto out;
5440
5441 /* Capture some diagnostic state under the lock so we can print it out later */
5442
5443 int pid = perfcontrol_failsafe_pid;
5444 uint64_t tid = perfcontrol_failsafe_tid;
5445
5446 uint64_t thread_usage = perfcontrol_failsafe_thread_timer_last_seen -
5447 perfcontrol_failsafe_thread_timer_at_start;
5448 uint32_t rec_cores_before = perfcontrol_failsafe_recommended_at_trigger;
5449 uint32_t rec_cores_after = perfcontrol_requested_recommended_cores;
5450 uint64_t failsafe_duration = ctime - perfcontrol_failsafe_activation_time;
5451 strlcpy(p_name, perfcontrol_failsafe_name, sizeof(p_name));
5452
5453 print_diagnostic = TRUE;
5454
5455 /* Deactivate the failsafe and reinstate the requested recommendation settings */
5456
5457 perfcontrol_failsafe_deactivation_time = ctime;
5458 perfcontrol_failsafe_active = FALSE;
5459
5460 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
5461 MACHDBG_CODE(DBG_MACH_SCHED,MACH_REC_CORES_FAILSAFE) | DBG_FUNC_END,
5462 perfcontrol_requested_recommended_cores, failsafe_duration, 0, 0, 0);
5463
5464 sched_update_recommended_cores(perfcontrol_requested_recommended_cores);
5465
5466out:
5467 simple_unlock(&sched_recommended_cores_lock);
3e170ce0
A
5468 splx(s);
5469
5ba3f43e
A
5470 if (print_diagnostic) {
5471 uint64_t failsafe_duration_ms = 0, thread_usage_ms = 0;
5472
5473 absolutetime_to_nanoseconds(failsafe_duration, &failsafe_duration_ms);
5474 failsafe_duration_ms = failsafe_duration_ms / NSEC_PER_MSEC;
5475
5476 absolutetime_to_nanoseconds(thread_usage, &thread_usage_ms);
5477 thread_usage_ms = thread_usage_ms / NSEC_PER_MSEC;
5478
5479 printf("recommended core failsafe kicked in for %lld ms "
5480 "likely due to %s[%d] thread 0x%llx spending "
5481 "%lld ms on cpu at realtime priority - "
5482 "new recommendation: 0x%x -> 0x%x\n",
5483 failsafe_duration_ms, p_name, pid, tid, thread_usage_ms,
5484 rec_cores_before, rec_cores_after);
5485 }
5486}
5487
5488/*
5489 * Apply a new recommended cores mask to the processors it affects
5490 * Runs after considering failsafes and such
5491 *
5492 * Iterate over processors and update their ->is_recommended field.
5493 * If a processor is running, we let it drain out at its next
5494 * quantum expiration or blocking point. If a processor is idle, there
5495 * may be more work for it to do, so IPI it.
5496 *
5497 * interrupts disabled, sched_recommended_cores_lock is held
5498 */
5499static void
5500sched_update_recommended_cores(uint32_t recommended_cores)
5501{
5502 processor_set_t pset, nset;
5503 processor_t processor;
5504 uint64_t needs_exit_idle_mask = 0x0;
5505
5506 processor = processor_list;
5507 pset = processor->processor_set;
5508
5509 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
5510 MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED_UPDATE_REC_CORES) | DBG_FUNC_START,
5511 recommended_cores, perfcontrol_failsafe_active, 0, 0, 0);
5512
5513 if (__builtin_popcount(recommended_cores) == 0) {
5514 recommended_cores |= 0x1U; /* add boot processor or we hang */
5515 }
5516
5517 /* First set recommended cores */
5518 pset_lock(pset);
5519 do {
5520
5521 nset = processor->processor_set;
5522 if (nset != pset) {
5523 pset_unlock(pset);
5524 pset = nset;
5525 pset_lock(pset);
5526 }
5527
5528 pset->recommended_bitmask = recommended_cores;
5529
5530 if (recommended_cores & (1ULL << processor->cpu_id)) {
5531 processor->is_recommended = TRUE;
5532
5533 if (processor->state == PROCESSOR_IDLE) {
5534 if (processor->processor_primary == processor) {
5535 re_queue_head(&pset->idle_queue, &processor->processor_queue);
5536 } else {
5537 re_queue_head(&pset->idle_secondary_queue, &processor->processor_queue);
5538 }
5539 if (processor != current_processor()) {
5540 needs_exit_idle_mask |= (1ULL << processor->cpu_id);
5541 }
5542 }
5543 }
5544 } while ((processor = processor->processor_list) != NULL);
5545 pset_unlock(pset);
5546
5547 /* Now shutdown not recommended cores */
5548 processor = processor_list;
5549 pset = processor->processor_set;
5550
5551 pset_lock(pset);
5552 do {
5553
5554 nset = processor->processor_set;
5555 if (nset != pset) {
5556 pset_unlock(pset);
5557 pset = nset;
5558 pset_lock(pset);
5559 }
5560
5561 if (!(recommended_cores & (1ULL << processor->cpu_id))) {
5562 processor->is_recommended = FALSE;
5563 if (processor->state == PROCESSOR_IDLE) {
5564 re_queue_head(&pset->unused_queue, &processor->processor_queue);
5565 }
5566 SCHED(processor_queue_shutdown)(processor);
5567 /* pset unlocked */
5568
5569 SCHED(rt_queue_shutdown)(processor);
5570
5571 pset_lock(pset);
5572 }
5573 } while ((processor = processor->processor_list) != NULL);
5574 pset_unlock(pset);
5575
5576 /* Issue all pending IPIs now that the pset lock has been dropped */
5577 for (int cpuid = lsb_first(needs_exit_idle_mask); cpuid >= 0; cpuid = lsb_next(needs_exit_idle_mask, cpuid)) {
5578 processor = processor_array[cpuid];
5579 machine_signal_idle(processor);
5580 }
5581
5582 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
5583 MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED_UPDATE_REC_CORES) | DBG_FUNC_END,
5584 needs_exit_idle_mask, 0, 0, 0, 0);
3e170ce0 5585}
5ba3f43e 5586#endif /* __arm__ || __arm64__ */
3e170ce0
A
5587
5588void thread_set_options(uint32_t thopt) {
5589 spl_t x;
5590 thread_t t = current_thread();
5591
5592 x = splsched();
5593 thread_lock(t);
5594
5595 t->options |= thopt;
5596
5597 thread_unlock(t);
5598 splx(x);
5599}
813fb2f6
A
5600
5601void thread_set_pending_block_hint(thread_t thread, block_hint_t block_hint) {
5602 thread->pending_block_hint = block_hint;
5603}
5ba3f43e
A
5604
5605uint32_t qos_max_parallelism(int qos, uint64_t options)
5606{
5607 return SCHED(qos_max_parallelism)(qos, options);
5608}
5609
5610uint32_t sched_qos_max_parallelism(__unused int qos, uint64_t options)
5611{
5612 host_basic_info_data_t hinfo;
5613 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
5614 /* Query the machine layer for core information */
5615 __assert_only kern_return_t kret = host_info(host_self(), HOST_BASIC_INFO,
5616 (host_info_t)&hinfo, &count);
5617 assert(kret == KERN_SUCCESS);
5618
5619 /* We would not want multiple realtime threads running on the
5620 * same physical core; even for SMT capable machines.
5621 */
5622 if (options & QOS_PARALLELISM_REALTIME) {
5623 return hinfo.physical_cpu;
5624 }
5625
5626 if (options & QOS_PARALLELISM_COUNT_LOGICAL) {
5627 return hinfo.logical_cpu;
5628 } else {
5629 return hinfo.physical_cpu;
5630 }
5631}
5632
5633#if __arm64__
5634
5635/*
5636 * Set up or replace old timer with new timer
5637 *
5638 * Returns true if canceled old timer, false if it did not
5639 */
5640boolean_t
5641sched_perfcontrol_update_callback_deadline(uint64_t new_deadline)
5642{
5643 /*
5644 * Exchange deadline for new deadline, if old deadline was nonzero,
5645 * then I cancelled the callback, otherwise I didn't
5646 */
5647
5648 uint64_t old_deadline = __c11_atomic_load(&sched_perfcontrol_callback_deadline,
5649 memory_order_relaxed);
5650
5651
5652 while (!__c11_atomic_compare_exchange_weak(&sched_perfcontrol_callback_deadline,
5653 &old_deadline, new_deadline,
5654 memory_order_relaxed, memory_order_relaxed));
5655
5656
5657 /* now old_deadline contains previous value, which might not be the same if it raced */
5658
5659 return (old_deadline != 0) ? TRUE : FALSE;
5660}
5661
5662#endif /* __arm64__ */
5663
5664int
5665sched_get_pset_load_average(processor_set_t pset)
5666{
5667 return pset->load_average >> (PSET_LOAD_NUMERATOR_SHIFT - PSET_LOAD_FRACTIONAL_SHIFT);
5668}
5669
5670void
5671sched_update_pset_load_average(processor_set_t pset)
5672{
5673#if DEBUG
5674 queue_entry_t iter;
5675 int count = 0;
5676 qe_foreach(iter, &pset->active_queue) {
5677 count++;
5678 }
5679 assertf(count == pset->active_processor_count, "count %d pset->active_processor_count %d\n", count, pset->active_processor_count);
5680#endif
5681
5682 int load = ((pset->active_processor_count + pset->pset_runq.count + rt_runq_count(pset)) << PSET_LOAD_NUMERATOR_SHIFT);
5683 int new_load_average = (pset->load_average + load) >> 1;
5684
5685 pset->load_average = new_load_average;
5686
5687#if (DEVELOPMENT || DEBUG)
5688#endif
5689}
a39ff7e2
A
5690
5691/* pset is locked */
5692static processor_t
5693choose_processor_for_realtime_thread(processor_set_t pset)
5694{
5695 uint64_t cpu_map = (pset->cpu_bitmask & pset->recommended_bitmask & ~pset->pending_AST_cpu_mask);
5696
5697 for (int cpuid = lsb_first(cpu_map); cpuid >= 0; cpuid = lsb_next(cpu_map, cpuid)) {
5698 processor_t processor = processor_array[cpuid];
5699
5700 if (processor->processor_primary != processor) {
5701 continue;
5702 }
5703
5704 if (processor->state == PROCESSOR_IDLE) {
5705 return processor;
5706 }
5707
5708 if ((processor->state != PROCESSOR_RUNNING) && (processor->state != PROCESSOR_DISPATCHING)) {
5709 continue;
5710 }
5711
5712 if (processor->current_pri >= BASEPRI_RTQUEUES) {
5713 continue;
5714 }
5715
5716 return processor;
5717
5718 }
5719
5720 if (!sched_allow_rt_smt) {
5721 return PROCESSOR_NULL;
5722 }
5723
5724 /* Consider secondary processors */
5725 for (int cpuid = lsb_first(cpu_map); cpuid >= 0; cpuid = lsb_next(cpu_map, cpuid)) {
5726 processor_t processor = processor_array[cpuid];
5727
5728 if (processor->processor_primary == processor) {
5729 continue;
5730 }
5731
5732 if (processor->state == PROCESSOR_IDLE) {
5733 return processor;
5734 }
5735
5736 if ((processor->state != PROCESSOR_RUNNING) && (processor->state != PROCESSOR_DISPATCHING)) {
5737 continue;
5738 }
5739
5740 if (processor->current_pri >= BASEPRI_RTQUEUES) {
5741 continue;
5742 }
5743
5744 return processor;
5745
5746 }
5747
5748 return PROCESSOR_NULL;
5749}
5750
5751/* pset is locked */
5752static bool
5753all_available_primaries_are_running_realtime_threads(processor_set_t pset)
5754{
5755 uint64_t cpu_map = (pset->cpu_bitmask & pset->recommended_bitmask);
5756
5757 for (int cpuid = lsb_first(cpu_map); cpuid >= 0; cpuid = lsb_next(cpu_map, cpuid)) {
5758 processor_t processor = processor_array[cpuid];
5759
5760 if (processor->processor_primary != processor) {
5761 continue;
5762 }
5763
5764 if ((processor->state != PROCESSOR_RUNNING) && (processor->state != PROCESSOR_DISPATCHING)) {
5765 continue;
5766 }
5767
5768 if (processor->current_pri < BASEPRI_RTQUEUES) {
5769 return false;
5770 }
5771 }
5772
5773 return true;
5774}