]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/sched_prim.c
5f7be7132c5f4f57f654ec6ac17d0a17d628b359
[apple/xnu.git] / osfmk / kern / sched_prim.c
1 /*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: sched_prim.c
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1986
62 *
63 * Scheduling primitives
64 *
65 */
66
67 #include <debug.h>
68
69 #include <mach/mach_types.h>
70 #include <mach/machine.h>
71 #include <mach/policy.h>
72 #include <mach/sync_policy.h>
73 #include <mach/thread_act.h>
74
75 #include <machine/machine_routines.h>
76 #include <machine/sched_param.h>
77 #include <machine/machine_cpu.h>
78 #include <machine/machlimits.h>
79 #include <machine/atomic.h>
80
81 #ifdef CONFIG_MACH_APPROXIMATE_TIME
82 #include <machine/commpage.h>
83 #endif
84
85 #include <kern/kern_types.h>
86 #include <kern/backtrace.h>
87 #include <kern/clock.h>
88 #include <kern/counters.h>
89 #include <kern/cpu_number.h>
90 #include <kern/cpu_data.h>
91 #include <kern/smp.h>
92 #include <kern/debug.h>
93 #include <kern/macro_help.h>
94 #include <kern/machine.h>
95 #include <kern/misc_protos.h>
96 #if MONOTONIC
97 #include <kern/monotonic.h>
98 #endif /* MONOTONIC */
99 #include <kern/processor.h>
100 #include <kern/queue.h>
101 #include <kern/sched.h>
102 #include <kern/sched_prim.h>
103 #include <kern/sfi.h>
104 #include <kern/syscall_subr.h>
105 #include <kern/task.h>
106 #include <kern/thread.h>
107 #include <kern/ledger.h>
108 #include <kern/timer_queue.h>
109 #include <kern/waitq.h>
110 #include <kern/policy_internal.h>
111
112 #include <vm/pmap.h>
113 #include <vm/vm_kern.h>
114 #include <vm/vm_map.h>
115 #include <vm/vm_pageout.h>
116
117 #include <mach/sdt.h>
118 #include <mach/mach_host.h>
119 #include <mach/host_info.h>
120
121 #include <sys/kdebug.h>
122 #include <kperf/kperf.h>
123 #include <kern/kpc.h>
124 #include <san/kasan.h>
125 #include <kern/pms.h>
126 #include <kern/host.h>
127 #include <stdatomic.h>
128
129 int rt_runq_count(processor_set_t pset)
130 {
131 return atomic_load_explicit(&SCHED(rt_runq)(pset)->count, memory_order_relaxed);
132 }
133
134 void rt_runq_count_incr(processor_set_t pset)
135 {
136 atomic_fetch_add_explicit(&SCHED(rt_runq)(pset)->count, 1, memory_order_relaxed);
137 }
138
139 void rt_runq_count_decr(processor_set_t pset)
140 {
141 atomic_fetch_sub_explicit(&SCHED(rt_runq)(pset)->count, 1, memory_order_relaxed);
142 }
143
144 #define DEFAULT_PREEMPTION_RATE 100 /* (1/s) */
145 int default_preemption_rate = DEFAULT_PREEMPTION_RATE;
146
147 #define DEFAULT_BG_PREEMPTION_RATE 400 /* (1/s) */
148 int default_bg_preemption_rate = DEFAULT_BG_PREEMPTION_RATE;
149
150 #define MAX_UNSAFE_QUANTA 800
151 int max_unsafe_quanta = MAX_UNSAFE_QUANTA;
152
153 #define MAX_POLL_QUANTA 2
154 int max_poll_quanta = MAX_POLL_QUANTA;
155
156 #define SCHED_POLL_YIELD_SHIFT 4 /* 1/16 */
157 int sched_poll_yield_shift = SCHED_POLL_YIELD_SHIFT;
158
159 uint64_t max_poll_computation;
160
161 uint64_t max_unsafe_computation;
162 uint64_t sched_safe_duration;
163
164 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
165
166 uint32_t std_quantum;
167 uint32_t min_std_quantum;
168 uint32_t bg_quantum;
169
170 uint32_t std_quantum_us;
171 uint32_t bg_quantum_us;
172
173 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
174
175 uint32_t thread_depress_time;
176 uint32_t default_timeshare_computation;
177 uint32_t default_timeshare_constraint;
178
179 uint32_t max_rt_quantum;
180 uint32_t min_rt_quantum;
181
182 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
183
184 unsigned sched_tick;
185 uint32_t sched_tick_interval;
186
187 uint32_t sched_pri_shifts[TH_BUCKET_MAX];
188 uint32_t sched_fixed_shift;
189
190 uint32_t sched_decay_usage_age_factor = 1; /* accelerate 5/8^n usage aging */
191
192 /* Allow foreground to decay past default to resolve inversions */
193 #define DEFAULT_DECAY_BAND_LIMIT ((BASEPRI_FOREGROUND - BASEPRI_DEFAULT) + 2)
194 int sched_pri_decay_band_limit = DEFAULT_DECAY_BAND_LIMIT;
195
196 /* Defaults for timer deadline profiling */
197 #define TIMER_DEADLINE_TRACKING_BIN_1_DEFAULT 2000000 /* Timers with deadlines <=
198 * 2ms */
199 #define TIMER_DEADLINE_TRACKING_BIN_2_DEFAULT 5000000 /* Timers with deadlines
200 <= 5ms */
201
202 uint64_t timer_deadline_tracking_bin_1;
203 uint64_t timer_deadline_tracking_bin_2;
204
205 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
206
207 thread_t sched_maintenance_thread;
208
209 #if __arm__ || __arm64__
210 /* interrupts disabled lock to guard recommended cores state */
211 decl_simple_lock_data(static,sched_recommended_cores_lock);
212 static void sched_recommended_cores_maintenance(void);
213 static void sched_update_recommended_cores(uint32_t recommended_cores);
214
215 uint64_t perfcontrol_failsafe_starvation_threshold;
216 extern char *proc_name_address(struct proc *p);
217
218 #endif /* __arm__ || __arm64__ */
219
220 uint64_t sched_one_second_interval;
221
222 /* Forwards */
223
224 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
225
226 static void load_shift_init(void);
227 static void preempt_pri_init(void);
228
229 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
230
231 #if CONFIG_SCHED_IDLE_IN_PLACE
232 static thread_t thread_select_idle(
233 thread_t thread,
234 processor_t processor);
235 #endif
236
237 thread_t processor_idle(
238 thread_t thread,
239 processor_t processor);
240
241 ast_t
242 csw_check_locked( processor_t processor,
243 processor_set_t pset,
244 ast_t check_reason);
245
246 static void processor_setrun(
247 processor_t processor,
248 thread_t thread,
249 integer_t options);
250
251 static void
252 sched_realtime_timebase_init(void);
253
254 static void
255 sched_timer_deadline_tracking_init(void);
256
257 #if DEBUG
258 extern int debug_task;
259 #define TLOG(a, fmt, args...) if(debug_task & a) kprintf(fmt, ## args)
260 #else
261 #define TLOG(a, fmt, args...) do {} while (0)
262 #endif
263
264 static processor_t
265 thread_bind_internal(
266 thread_t thread,
267 processor_t processor);
268
269 static void
270 sched_vm_group_maintenance(void);
271
272 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
273 int8_t sched_load_shifts[NRQS];
274 bitmap_t sched_preempt_pri[BITMAP_LEN(NRQS)];
275 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
276
277 const struct sched_dispatch_table *sched_current_dispatch = NULL;
278
279 /*
280 * Statically allocate a buffer to hold the longest possible
281 * scheduler description string, as currently implemented.
282 * bsd/kern/kern_sysctl.c has a corresponding definition in bsd/
283 * to export to userspace via sysctl(3). If either version
284 * changes, update the other.
285 *
286 * Note that in addition to being an upper bound on the strings
287 * in the kernel, it's also an exact parameter to PE_get_default(),
288 * which interrogates the device tree on some platforms. That
289 * API requires the caller know the exact size of the device tree
290 * property, so we need both a legacy size (32) and the current size
291 * (48) to deal with old and new device trees. The device tree property
292 * is similarly padded to a fixed size so that the same kernel image
293 * can run on multiple devices with different schedulers configured
294 * in the device tree.
295 */
296 char sched_string[SCHED_STRING_MAX_LENGTH];
297
298 uint32_t sched_debug_flags = SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS;
299
300 /* Global flag which indicates whether Background Stepper Context is enabled */
301 static int cpu_throttle_enabled = 1;
302
303 #if DEBUG
304
305 /* Since using the indirect function dispatch table has a negative impact on
306 * context switch performance, only allow DEBUG kernels to use that mechanism.
307 */
308 static void
309 sched_init_override(void)
310 {
311 char sched_arg[SCHED_STRING_MAX_LENGTH] = { '\0' };
312
313 /* Check for runtime selection of the scheduler algorithm */
314 if (!PE_parse_boot_argn("sched", sched_arg, sizeof (sched_arg))) {
315 sched_arg[0] = '\0';
316 }
317 if (strlen(sched_arg) > 0) {
318 if (0) {
319 /* Allow pattern below */
320 #if defined(CONFIG_SCHED_TRADITIONAL)
321 } else if (0 == strcmp(sched_arg, sched_traditional_dispatch.sched_name)) {
322 sched_current_dispatch = &sched_traditional_dispatch;
323 } else if (0 == strcmp(sched_arg, sched_traditional_with_pset_runqueue_dispatch.sched_name)) {
324 sched_current_dispatch = &sched_traditional_with_pset_runqueue_dispatch;
325 #endif
326 #if defined(CONFIG_SCHED_MULTIQ)
327 } else if (0 == strcmp(sched_arg, sched_multiq_dispatch.sched_name)) {
328 sched_current_dispatch = &sched_multiq_dispatch;
329 } else if (0 == strcmp(sched_arg, sched_dualq_dispatch.sched_name)) {
330 sched_current_dispatch = &sched_dualq_dispatch;
331 #endif
332 } else {
333 #if defined(CONFIG_SCHED_TRADITIONAL)
334 printf("Unrecognized scheduler algorithm: %s\n", sched_arg);
335 printf("Scheduler: Using instead: %s\n", sched_traditional_with_pset_runqueue_dispatch.sched_name);
336 sched_current_dispatch = &sched_traditional_with_pset_runqueue_dispatch;
337 #else
338 panic("Unrecognized scheduler algorithm: %s", sched_arg);
339 #endif
340 }
341 kprintf("Scheduler: Runtime selection of %s\n", SCHED(sched_name));
342 } else {
343 #if defined(CONFIG_SCHED_MULTIQ)
344 sched_current_dispatch = &sched_multiq_dispatch;
345 #elif defined(CONFIG_SCHED_TRADITIONAL)
346 sched_current_dispatch = &sched_traditional_with_pset_runqueue_dispatch;
347 #else
348 #error No default scheduler implementation
349 #endif
350 kprintf("Scheduler: Default of %s\n", SCHED(sched_name));
351 }
352 }
353
354 #endif /* DEBUG */
355
356 void
357 sched_init(void)
358 {
359 #if DEBUG
360 sched_init_override();
361 #else /* DEBUG */
362 kprintf("Scheduler: Default of %s\n", SCHED(sched_name));
363 #endif /* DEBUG */
364
365 if (!PE_parse_boot_argn("sched_pri_decay_limit", &sched_pri_decay_band_limit, sizeof(sched_pri_decay_band_limit))) {
366 /* No boot-args, check in device tree */
367 if (!PE_get_default("kern.sched_pri_decay_limit",
368 &sched_pri_decay_band_limit,
369 sizeof(sched_pri_decay_band_limit))) {
370 /* Allow decay all the way to normal limits */
371 sched_pri_decay_band_limit = DEFAULT_DECAY_BAND_LIMIT;
372 }
373 }
374
375 kprintf("Setting scheduler priority decay band limit %d\n", sched_pri_decay_band_limit);
376
377 if (PE_parse_boot_argn("sched_debug", &sched_debug_flags, sizeof(sched_debug_flags))) {
378 kprintf("Scheduler: Debug flags 0x%08x\n", sched_debug_flags);
379 }
380 strlcpy(sched_string, SCHED(sched_name), sizeof(sched_string));
381
382 SCHED(init)();
383 SCHED(rt_init)(&pset0);
384 sched_timer_deadline_tracking_init();
385
386 SCHED(pset_init)(&pset0);
387 SCHED(processor_init)(master_processor);
388 }
389
390 void
391 sched_timebase_init(void)
392 {
393 uint64_t abstime;
394
395 clock_interval_to_absolutetime_interval(1, NSEC_PER_SEC, &abstime);
396 sched_one_second_interval = abstime;
397
398 SCHED(timebase_init)();
399 sched_realtime_timebase_init();
400 }
401
402 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
403
404 void
405 sched_timeshare_init(void)
406 {
407 /*
408 * Calculate the timeslicing quantum
409 * in us.
410 */
411 if (default_preemption_rate < 1)
412 default_preemption_rate = DEFAULT_PREEMPTION_RATE;
413 std_quantum_us = (1000 * 1000) / default_preemption_rate;
414
415 printf("standard timeslicing quantum is %d us\n", std_quantum_us);
416
417 if (default_bg_preemption_rate < 1)
418 default_bg_preemption_rate = DEFAULT_BG_PREEMPTION_RATE;
419 bg_quantum_us = (1000 * 1000) / default_bg_preemption_rate;
420
421 printf("standard background quantum is %d us\n", bg_quantum_us);
422
423 load_shift_init();
424 preempt_pri_init();
425 sched_tick = 0;
426 }
427
428 void
429 sched_timeshare_timebase_init(void)
430 {
431 uint64_t abstime;
432 uint32_t shift;
433
434 /* standard timeslicing quantum */
435 clock_interval_to_absolutetime_interval(
436 std_quantum_us, NSEC_PER_USEC, &abstime);
437 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
438 std_quantum = (uint32_t)abstime;
439
440 /* smallest remaining quantum (250 us) */
441 clock_interval_to_absolutetime_interval(250, NSEC_PER_USEC, &abstime);
442 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
443 min_std_quantum = (uint32_t)abstime;
444
445 /* quantum for background tasks */
446 clock_interval_to_absolutetime_interval(
447 bg_quantum_us, NSEC_PER_USEC, &abstime);
448 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
449 bg_quantum = (uint32_t)abstime;
450
451 /* scheduler tick interval */
452 clock_interval_to_absolutetime_interval(USEC_PER_SEC >> SCHED_TICK_SHIFT,
453 NSEC_PER_USEC, &abstime);
454 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
455 sched_tick_interval = (uint32_t)abstime;
456
457 /*
458 * Compute conversion factor from usage to
459 * timesharing priorities with 5/8 ** n aging.
460 */
461 abstime = (abstime * 5) / 3;
462 for (shift = 0; abstime > BASEPRI_DEFAULT; ++shift)
463 abstime >>= 1;
464 sched_fixed_shift = shift;
465
466 for (uint32_t i = 0 ; i < TH_BUCKET_MAX ; i++)
467 sched_pri_shifts[i] = INT8_MAX;
468
469 max_unsafe_computation = ((uint64_t)max_unsafe_quanta) * std_quantum;
470 sched_safe_duration = 2 * ((uint64_t)max_unsafe_quanta) * std_quantum;
471
472 max_poll_computation = ((uint64_t)max_poll_quanta) * std_quantum;
473 thread_depress_time = 1 * std_quantum;
474 default_timeshare_computation = std_quantum / 2;
475 default_timeshare_constraint = std_quantum;
476
477 #if __arm__ || __arm64__
478 perfcontrol_failsafe_starvation_threshold = (2 * sched_tick_interval);
479 #endif /* __arm__ || __arm64__ */
480 }
481
482 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
483
484 void
485 pset_rt_init(processor_set_t pset)
486 {
487 rt_lock_init(pset);
488
489 pset->rt_runq.count = 0;
490 queue_init(&pset->rt_runq.queue);
491 memset(&pset->rt_runq.runq_stats, 0, sizeof pset->rt_runq.runq_stats);
492 }
493
494 rt_queue_t
495 sched_rtglobal_runq(processor_set_t pset)
496 {
497 (void)pset;
498
499 return &pset0.rt_runq;
500 }
501
502 void
503 sched_rtglobal_init(processor_set_t pset)
504 {
505 if (pset == &pset0) {
506 return pset_rt_init(pset);
507 }
508
509 /* Only pset0 rt_runq is used, so make it easy to detect
510 * buggy accesses to others.
511 */
512 memset(&pset->rt_runq, 0xfd, sizeof pset->rt_runq);
513 }
514
515 void
516 sched_rtglobal_queue_shutdown(processor_t processor)
517 {
518 (void)processor;
519 }
520
521 static void
522 sched_realtime_timebase_init(void)
523 {
524 uint64_t abstime;
525
526 /* smallest rt computaton (50 us) */
527 clock_interval_to_absolutetime_interval(50, NSEC_PER_USEC, &abstime);
528 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
529 min_rt_quantum = (uint32_t)abstime;
530
531 /* maximum rt computation (50 ms) */
532 clock_interval_to_absolutetime_interval(
533 50, 1000*NSEC_PER_USEC, &abstime);
534 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
535 max_rt_quantum = (uint32_t)abstime;
536
537 }
538
539 void
540 sched_check_spill(processor_set_t pset, thread_t thread)
541 {
542 (void)pset;
543 (void)thread;
544
545 return;
546 }
547
548 bool
549 sched_thread_should_yield(processor_t processor, thread_t thread)
550 {
551 (void)thread;
552
553 return (!SCHED(processor_queue_empty)(processor) || rt_runq_count(processor->processor_set) > 0);
554 }
555
556 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
557
558 /*
559 * Set up values for timeshare
560 * loading factors.
561 */
562 static void
563 load_shift_init(void)
564 {
565 int8_t k, *p = sched_load_shifts;
566 uint32_t i, j;
567
568 uint32_t sched_decay_penalty = 1;
569
570 if (PE_parse_boot_argn("sched_decay_penalty", &sched_decay_penalty, sizeof (sched_decay_penalty))) {
571 kprintf("Overriding scheduler decay penalty %u\n", sched_decay_penalty);
572 }
573
574 if (PE_parse_boot_argn("sched_decay_usage_age_factor", &sched_decay_usage_age_factor, sizeof (sched_decay_usage_age_factor))) {
575 kprintf("Overriding scheduler decay usage age factor %u\n", sched_decay_usage_age_factor);
576 }
577
578 if (sched_decay_penalty == 0) {
579 /*
580 * There is no penalty for timeshare threads for using too much
581 * CPU, so set all load shifts to INT8_MIN. Even under high load,
582 * sched_pri_shift will be >INT8_MAX, and there will be no
583 * penalty applied to threads (nor will sched_usage be updated per
584 * thread).
585 */
586 for (i = 0; i < NRQS; i++) {
587 sched_load_shifts[i] = INT8_MIN;
588 }
589
590 return;
591 }
592
593 *p++ = INT8_MIN; *p++ = 0;
594
595 /*
596 * For a given system load "i", the per-thread priority
597 * penalty per quantum of CPU usage is ~2^k priority
598 * levels. "sched_decay_penalty" can cause more
599 * array entries to be filled with smaller "k" values
600 */
601 for (i = 2, j = 1 << sched_decay_penalty, k = 1; i < NRQS; ++k) {
602 for (j <<= 1; (i < j) && (i < NRQS); ++i)
603 *p++ = k;
604 }
605 }
606
607 static void
608 preempt_pri_init(void)
609 {
610 bitmap_t *p = sched_preempt_pri;
611
612 for (int i = BASEPRI_FOREGROUND; i < MINPRI_KERNEL; ++i)
613 bitmap_set(p, i);
614
615 for (int i = BASEPRI_PREEMPT; i <= MAXPRI; ++i)
616 bitmap_set(p, i);
617 }
618
619 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
620
621 /*
622 * Thread wait timer expiration.
623 */
624 void
625 thread_timer_expire(
626 void *p0,
627 __unused void *p1)
628 {
629 thread_t thread = p0;
630 spl_t s;
631
632 assert_thread_magic(thread);
633
634 s = splsched();
635 thread_lock(thread);
636 if (--thread->wait_timer_active == 0) {
637 if (thread->wait_timer_is_set) {
638 thread->wait_timer_is_set = FALSE;
639 clear_wait_internal(thread, THREAD_TIMED_OUT);
640 }
641 }
642 thread_unlock(thread);
643 splx(s);
644 }
645
646 /*
647 * thread_unblock:
648 *
649 * Unblock thread on wake up.
650 *
651 * Returns TRUE if the thread should now be placed on the runqueue.
652 *
653 * Thread must be locked.
654 *
655 * Called at splsched().
656 */
657 boolean_t
658 thread_unblock(
659 thread_t thread,
660 wait_result_t wresult)
661 {
662 boolean_t ready_for_runq = FALSE;
663 thread_t cthread = current_thread();
664 uint32_t new_run_count;
665
666 /*
667 * Set wait_result.
668 */
669 thread->wait_result = wresult;
670
671 /*
672 * Cancel pending wait timer.
673 */
674 if (thread->wait_timer_is_set) {
675 if (timer_call_cancel(&thread->wait_timer))
676 thread->wait_timer_active--;
677 thread->wait_timer_is_set = FALSE;
678 }
679
680 /*
681 * Update scheduling state: not waiting,
682 * set running.
683 */
684 thread->state &= ~(TH_WAIT|TH_UNINT);
685
686 if (!(thread->state & TH_RUN)) {
687 thread->state |= TH_RUN;
688 thread->last_made_runnable_time = thread->last_basepri_change_time = mach_approximate_time();
689
690 ready_for_runq = TRUE;
691
692 (*thread->sched_call)(SCHED_CALL_UNBLOCK, thread);
693
694 /* Update the runnable thread count */
695 new_run_count = sched_run_incr(thread);
696 } else {
697 /*
698 * Either the thread is idling in place on another processor,
699 * or it hasn't finished context switching yet.
700 */
701 #if CONFIG_SCHED_IDLE_IN_PLACE
702 if (thread->state & TH_IDLE) {
703 processor_t processor = thread->last_processor;
704
705 if (processor != current_processor())
706 machine_signal_idle(processor);
707 }
708 #else
709 assert((thread->state & TH_IDLE) == 0);
710 #endif
711 /*
712 * The run count is only dropped after the context switch completes
713 * and the thread is still waiting, so we should not run_incr here
714 */
715 new_run_count = sched_run_buckets[TH_BUCKET_RUN];
716 }
717
718
719 /*
720 * Calculate deadline for real-time threads.
721 */
722 if (thread->sched_mode == TH_MODE_REALTIME) {
723 uint64_t ctime;
724
725 ctime = mach_absolute_time();
726 thread->realtime.deadline = thread->realtime.constraint + ctime;
727 }
728
729 /*
730 * Clear old quantum, fail-safe computation, etc.
731 */
732 thread->quantum_remaining = 0;
733 thread->computation_metered = 0;
734 thread->reason = AST_NONE;
735 thread->block_hint = kThreadWaitNone;
736
737 /* Obtain power-relevant interrupt and "platform-idle exit" statistics.
738 * We also account for "double hop" thread signaling via
739 * the thread callout infrastructure.
740 * DRK: consider removing the callout wakeup counters in the future
741 * they're present for verification at the moment.
742 */
743 boolean_t aticontext, pidle;
744 ml_get_power_state(&aticontext, &pidle);
745
746 if (__improbable(aticontext && !(thread_get_tag_internal(thread) & THREAD_TAG_CALLOUT))) {
747 DTRACE_SCHED2(iwakeup, struct thread *, thread, struct proc *, thread->task->bsd_info);
748
749 uint64_t ttd = PROCESSOR_DATA(current_processor(), timer_call_ttd);
750
751 if (ttd) {
752 if (ttd <= timer_deadline_tracking_bin_1)
753 thread->thread_timer_wakeups_bin_1++;
754 else
755 if (ttd <= timer_deadline_tracking_bin_2)
756 thread->thread_timer_wakeups_bin_2++;
757 }
758
759 ledger_credit_thread(thread, thread->t_ledger,
760 task_ledgers.interrupt_wakeups, 1);
761 if (pidle) {
762 ledger_credit_thread(thread, thread->t_ledger,
763 task_ledgers.platform_idle_wakeups, 1);
764 }
765
766 } else if (thread_get_tag_internal(cthread) & THREAD_TAG_CALLOUT) {
767 /* TODO: what about an interrupt that does a wake taken on a callout thread? */
768 if (cthread->callout_woken_from_icontext) {
769 ledger_credit_thread(thread, thread->t_ledger,
770 task_ledgers.interrupt_wakeups, 1);
771 thread->thread_callout_interrupt_wakeups++;
772
773 if (cthread->callout_woken_from_platform_idle) {
774 ledger_credit_thread(thread, thread->t_ledger,
775 task_ledgers.platform_idle_wakeups, 1);
776 thread->thread_callout_platform_idle_wakeups++;
777 }
778
779 cthread->callout_woke_thread = TRUE;
780 }
781 }
782
783 if (thread_get_tag_internal(thread) & THREAD_TAG_CALLOUT) {
784 thread->callout_woken_from_icontext = aticontext;
785 thread->callout_woken_from_platform_idle = pidle;
786 thread->callout_woke_thread = FALSE;
787 }
788
789 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
790 MACHDBG_CODE(DBG_MACH_SCHED,MACH_MAKE_RUNNABLE) | DBG_FUNC_NONE,
791 (uintptr_t)thread_tid(thread), thread->sched_pri, thread->wait_result,
792 sched_run_buckets[TH_BUCKET_RUN], 0);
793
794 DTRACE_SCHED2(wakeup, struct thread *, thread, struct proc *, thread->task->bsd_info);
795
796 return (ready_for_runq);
797 }
798
799 /*
800 * Routine: thread_go
801 * Purpose:
802 * Unblock and dispatch thread.
803 * Conditions:
804 * thread lock held, IPC locks may be held.
805 * thread must have been pulled from wait queue under same lock hold.
806 * thread must have been waiting
807 * Returns:
808 * KERN_SUCCESS - Thread was set running
809 *
810 * TODO: This should return void
811 */
812 kern_return_t
813 thread_go(
814 thread_t thread,
815 wait_result_t wresult)
816 {
817 assert_thread_magic(thread);
818
819 assert(thread->at_safe_point == FALSE);
820 assert(thread->wait_event == NO_EVENT64);
821 assert(thread->waitq == NULL);
822
823 assert(!(thread->state & (TH_TERMINATE|TH_TERMINATE2)));
824 assert(thread->state & TH_WAIT);
825
826
827 if (thread_unblock(thread, wresult)) {
828 #if SCHED_TRACE_THREAD_WAKEUPS
829 backtrace(&thread->thread_wakeup_bt[0],
830 (sizeof(thread->thread_wakeup_bt)/sizeof(uintptr_t)));
831 #endif
832 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
833 }
834
835 return (KERN_SUCCESS);
836 }
837
838 /*
839 * Routine: thread_mark_wait_locked
840 * Purpose:
841 * Mark a thread as waiting. If, given the circumstances,
842 * it doesn't want to wait (i.e. already aborted), then
843 * indicate that in the return value.
844 * Conditions:
845 * at splsched() and thread is locked.
846 */
847 __private_extern__
848 wait_result_t
849 thread_mark_wait_locked(
850 thread_t thread,
851 wait_interrupt_t interruptible)
852 {
853 boolean_t at_safe_point;
854
855 assert(!(thread->state & (TH_WAIT|TH_IDLE|TH_UNINT|TH_TERMINATE2)));
856
857 /*
858 * The thread may have certain types of interrupts/aborts masked
859 * off. Even if the wait location says these types of interrupts
860 * are OK, we have to honor mask settings (outer-scoped code may
861 * not be able to handle aborts at the moment).
862 */
863 if (interruptible > (thread->options & TH_OPT_INTMASK))
864 interruptible = thread->options & TH_OPT_INTMASK;
865
866 at_safe_point = (interruptible == THREAD_ABORTSAFE);
867
868 if ( interruptible == THREAD_UNINT ||
869 !(thread->sched_flags & TH_SFLAG_ABORT) ||
870 (!at_safe_point &&
871 (thread->sched_flags & TH_SFLAG_ABORTSAFELY))) {
872
873 if ( !(thread->state & TH_TERMINATE))
874 DTRACE_SCHED(sleep);
875
876 thread->state |= (interruptible) ? TH_WAIT : (TH_WAIT | TH_UNINT);
877 thread->at_safe_point = at_safe_point;
878
879 /* TODO: pass this through assert_wait instead, have
880 * assert_wait just take a struct as an argument */
881 assert(!thread->block_hint);
882 thread->block_hint = thread->pending_block_hint;
883 thread->pending_block_hint = kThreadWaitNone;
884
885 return (thread->wait_result = THREAD_WAITING);
886 }
887 else
888 if (thread->sched_flags & TH_SFLAG_ABORTSAFELY)
889 thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
890 thread->pending_block_hint = kThreadWaitNone;
891
892 return (thread->wait_result = THREAD_INTERRUPTED);
893 }
894
895 /*
896 * Routine: thread_interrupt_level
897 * Purpose:
898 * Set the maximum interruptible state for the
899 * current thread. The effective value of any
900 * interruptible flag passed into assert_wait
901 * will never exceed this.
902 *
903 * Useful for code that must not be interrupted,
904 * but which calls code that doesn't know that.
905 * Returns:
906 * The old interrupt level for the thread.
907 */
908 __private_extern__
909 wait_interrupt_t
910 thread_interrupt_level(
911 wait_interrupt_t new_level)
912 {
913 thread_t thread = current_thread();
914 wait_interrupt_t result = thread->options & TH_OPT_INTMASK;
915
916 thread->options = (thread->options & ~TH_OPT_INTMASK) | (new_level & TH_OPT_INTMASK);
917
918 return result;
919 }
920
921 /*
922 * assert_wait:
923 *
924 * Assert that the current thread is about to go to
925 * sleep until the specified event occurs.
926 */
927 wait_result_t
928 assert_wait(
929 event_t event,
930 wait_interrupt_t interruptible)
931 {
932 if (__improbable(event == NO_EVENT))
933 panic("%s() called with NO_EVENT", __func__);
934
935 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
936 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT)|DBG_FUNC_NONE,
937 VM_KERNEL_UNSLIDE_OR_PERM(event), 0, 0, 0, 0);
938
939 struct waitq *waitq;
940 waitq = global_eventq(event);
941 return waitq_assert_wait64(waitq, CAST_EVENT64_T(event), interruptible, TIMEOUT_WAIT_FOREVER);
942 }
943
944 /*
945 * assert_wait_queue:
946 *
947 * Return the global waitq for the specified event
948 */
949 struct waitq *
950 assert_wait_queue(
951 event_t event)
952 {
953 return global_eventq(event);
954 }
955
956 wait_result_t
957 assert_wait_timeout(
958 event_t event,
959 wait_interrupt_t interruptible,
960 uint32_t interval,
961 uint32_t scale_factor)
962 {
963 thread_t thread = current_thread();
964 wait_result_t wresult;
965 uint64_t deadline;
966 spl_t s;
967
968 if (__improbable(event == NO_EVENT))
969 panic("%s() called with NO_EVENT", __func__);
970
971 struct waitq *waitq;
972 waitq = global_eventq(event);
973
974 s = splsched();
975 waitq_lock(waitq);
976
977 clock_interval_to_deadline(interval, scale_factor, &deadline);
978
979 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
980 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT)|DBG_FUNC_NONE,
981 VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
982
983 wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
984 interruptible,
985 TIMEOUT_URGENCY_SYS_NORMAL,
986 deadline, TIMEOUT_NO_LEEWAY,
987 thread);
988
989 waitq_unlock(waitq);
990 splx(s);
991 return wresult;
992 }
993
994 wait_result_t
995 assert_wait_timeout_with_leeway(
996 event_t event,
997 wait_interrupt_t interruptible,
998 wait_timeout_urgency_t urgency,
999 uint32_t interval,
1000 uint32_t leeway,
1001 uint32_t scale_factor)
1002 {
1003 thread_t thread = current_thread();
1004 wait_result_t wresult;
1005 uint64_t deadline;
1006 uint64_t abstime;
1007 uint64_t slop;
1008 uint64_t now;
1009 spl_t s;
1010
1011 if (__improbable(event == NO_EVENT))
1012 panic("%s() called with NO_EVENT", __func__);
1013
1014 now = mach_absolute_time();
1015 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
1016 deadline = now + abstime;
1017
1018 clock_interval_to_absolutetime_interval(leeway, scale_factor, &slop);
1019
1020 struct waitq *waitq;
1021 waitq = global_eventq(event);
1022
1023 s = splsched();
1024 waitq_lock(waitq);
1025
1026 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1027 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT)|DBG_FUNC_NONE,
1028 VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
1029
1030 wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
1031 interruptible,
1032 urgency, deadline, slop,
1033 thread);
1034
1035 waitq_unlock(waitq);
1036 splx(s);
1037 return wresult;
1038 }
1039
1040 wait_result_t
1041 assert_wait_deadline(
1042 event_t event,
1043 wait_interrupt_t interruptible,
1044 uint64_t deadline)
1045 {
1046 thread_t thread = current_thread();
1047 wait_result_t wresult;
1048 spl_t s;
1049
1050 if (__improbable(event == NO_EVENT))
1051 panic("%s() called with NO_EVENT", __func__);
1052
1053 struct waitq *waitq;
1054 waitq = global_eventq(event);
1055
1056 s = splsched();
1057 waitq_lock(waitq);
1058
1059 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1060 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT)|DBG_FUNC_NONE,
1061 VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
1062
1063 wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
1064 interruptible,
1065 TIMEOUT_URGENCY_SYS_NORMAL, deadline,
1066 TIMEOUT_NO_LEEWAY, thread);
1067 waitq_unlock(waitq);
1068 splx(s);
1069 return wresult;
1070 }
1071
1072 wait_result_t
1073 assert_wait_deadline_with_leeway(
1074 event_t event,
1075 wait_interrupt_t interruptible,
1076 wait_timeout_urgency_t urgency,
1077 uint64_t deadline,
1078 uint64_t leeway)
1079 {
1080 thread_t thread = current_thread();
1081 wait_result_t wresult;
1082 spl_t s;
1083
1084 if (__improbable(event == NO_EVENT))
1085 panic("%s() called with NO_EVENT", __func__);
1086
1087 struct waitq *waitq;
1088 waitq = global_eventq(event);
1089
1090 s = splsched();
1091 waitq_lock(waitq);
1092
1093 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1094 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT)|DBG_FUNC_NONE,
1095 VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
1096
1097 wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
1098 interruptible,
1099 urgency, deadline, leeway,
1100 thread);
1101 waitq_unlock(waitq);
1102 splx(s);
1103 return wresult;
1104 }
1105
1106 /*
1107 * thread_isoncpu:
1108 *
1109 * Return TRUE if a thread is running on a processor such that an AST
1110 * is needed to pull it out of userspace execution, or if executing in
1111 * the kernel, bring to a context switch boundary that would cause
1112 * thread state to be serialized in the thread PCB.
1113 *
1114 * Thread locked, returns the same way. While locked, fields
1115 * like "state" cannot change. "runq" can change only from set to unset.
1116 */
1117 static inline boolean_t
1118 thread_isoncpu(thread_t thread)
1119 {
1120 /* Not running or runnable */
1121 if (!(thread->state & TH_RUN))
1122 return (FALSE);
1123
1124 /* Waiting on a runqueue, not currently running */
1125 /* TODO: This is invalid - it can get dequeued without thread lock, but not context switched. */
1126 if (thread->runq != PROCESSOR_NULL)
1127 return (FALSE);
1128
1129 /*
1130 * Thread does not have a stack yet
1131 * It could be on the stack alloc queue or preparing to be invoked
1132 */
1133 if (!thread->kernel_stack)
1134 return (FALSE);
1135
1136 /*
1137 * Thread must be running on a processor, or
1138 * about to run, or just did run. In all these
1139 * cases, an AST to the processor is needed
1140 * to guarantee that the thread is kicked out
1141 * of userspace and the processor has
1142 * context switched (and saved register state).
1143 */
1144 return (TRUE);
1145 }
1146
1147 /*
1148 * thread_stop:
1149 *
1150 * Force a preemption point for a thread and wait
1151 * for it to stop running on a CPU. If a stronger
1152 * guarantee is requested, wait until no longer
1153 * runnable. Arbitrates access among
1154 * multiple stop requests. (released by unstop)
1155 *
1156 * The thread must enter a wait state and stop via a
1157 * separate means.
1158 *
1159 * Returns FALSE if interrupted.
1160 */
1161 boolean_t
1162 thread_stop(
1163 thread_t thread,
1164 boolean_t until_not_runnable)
1165 {
1166 wait_result_t wresult;
1167 spl_t s = splsched();
1168 boolean_t oncpu;
1169
1170 wake_lock(thread);
1171 thread_lock(thread);
1172
1173 while (thread->state & TH_SUSP) {
1174 thread->wake_active = TRUE;
1175 thread_unlock(thread);
1176
1177 wresult = assert_wait(&thread->wake_active, THREAD_ABORTSAFE);
1178 wake_unlock(thread);
1179 splx(s);
1180
1181 if (wresult == THREAD_WAITING)
1182 wresult = thread_block(THREAD_CONTINUE_NULL);
1183
1184 if (wresult != THREAD_AWAKENED)
1185 return (FALSE);
1186
1187 s = splsched();
1188 wake_lock(thread);
1189 thread_lock(thread);
1190 }
1191
1192 thread->state |= TH_SUSP;
1193
1194 while ((oncpu = thread_isoncpu(thread)) ||
1195 (until_not_runnable && (thread->state & TH_RUN))) {
1196 processor_t processor;
1197
1198 if (oncpu) {
1199 assert(thread->state & TH_RUN);
1200 processor = thread->chosen_processor;
1201 cause_ast_check(processor);
1202 }
1203
1204 thread->wake_active = TRUE;
1205 thread_unlock(thread);
1206
1207 wresult = assert_wait(&thread->wake_active, THREAD_ABORTSAFE);
1208 wake_unlock(thread);
1209 splx(s);
1210
1211 if (wresult == THREAD_WAITING)
1212 wresult = thread_block(THREAD_CONTINUE_NULL);
1213
1214 if (wresult != THREAD_AWAKENED) {
1215 thread_unstop(thread);
1216 return (FALSE);
1217 }
1218
1219 s = splsched();
1220 wake_lock(thread);
1221 thread_lock(thread);
1222 }
1223
1224 thread_unlock(thread);
1225 wake_unlock(thread);
1226 splx(s);
1227
1228 /*
1229 * We return with the thread unlocked. To prevent it from
1230 * transitioning to a runnable state (or from TH_RUN to
1231 * being on the CPU), the caller must ensure the thread
1232 * is stopped via an external means (such as an AST)
1233 */
1234
1235 return (TRUE);
1236 }
1237
1238 /*
1239 * thread_unstop:
1240 *
1241 * Release a previous stop request and set
1242 * the thread running if appropriate.
1243 *
1244 * Use only after a successful stop operation.
1245 */
1246 void
1247 thread_unstop(
1248 thread_t thread)
1249 {
1250 spl_t s = splsched();
1251
1252 wake_lock(thread);
1253 thread_lock(thread);
1254
1255 assert((thread->state & (TH_RUN|TH_WAIT|TH_SUSP)) != TH_SUSP);
1256
1257 if (thread->state & TH_SUSP) {
1258 thread->state &= ~TH_SUSP;
1259
1260 if (thread->wake_active) {
1261 thread->wake_active = FALSE;
1262 thread_unlock(thread);
1263
1264 thread_wakeup(&thread->wake_active);
1265 wake_unlock(thread);
1266 splx(s);
1267
1268 return;
1269 }
1270 }
1271
1272 thread_unlock(thread);
1273 wake_unlock(thread);
1274 splx(s);
1275 }
1276
1277 /*
1278 * thread_wait:
1279 *
1280 * Wait for a thread to stop running. (non-interruptible)
1281 *
1282 */
1283 void
1284 thread_wait(
1285 thread_t thread,
1286 boolean_t until_not_runnable)
1287 {
1288 wait_result_t wresult;
1289 boolean_t oncpu;
1290 processor_t processor;
1291 spl_t s = splsched();
1292
1293 wake_lock(thread);
1294 thread_lock(thread);
1295
1296 /*
1297 * Wait until not running on a CPU. If stronger requirement
1298 * desired, wait until not runnable. Assumption: if thread is
1299 * on CPU, then TH_RUN is set, so we're not waiting in any case
1300 * where the original, pure "TH_RUN" check would have let us
1301 * finish.
1302 */
1303 while ((oncpu = thread_isoncpu(thread)) ||
1304 (until_not_runnable && (thread->state & TH_RUN))) {
1305
1306 if (oncpu) {
1307 assert(thread->state & TH_RUN);
1308 processor = thread->chosen_processor;
1309 cause_ast_check(processor);
1310 }
1311
1312 thread->wake_active = TRUE;
1313 thread_unlock(thread);
1314
1315 wresult = assert_wait(&thread->wake_active, THREAD_UNINT);
1316 wake_unlock(thread);
1317 splx(s);
1318
1319 if (wresult == THREAD_WAITING)
1320 thread_block(THREAD_CONTINUE_NULL);
1321
1322 s = splsched();
1323 wake_lock(thread);
1324 thread_lock(thread);
1325 }
1326
1327 thread_unlock(thread);
1328 wake_unlock(thread);
1329 splx(s);
1330 }
1331
1332 /*
1333 * Routine: clear_wait_internal
1334 *
1335 * Clear the wait condition for the specified thread.
1336 * Start the thread executing if that is appropriate.
1337 * Arguments:
1338 * thread thread to awaken
1339 * result Wakeup result the thread should see
1340 * Conditions:
1341 * At splsched
1342 * the thread is locked.
1343 * Returns:
1344 * KERN_SUCCESS thread was rousted out a wait
1345 * KERN_FAILURE thread was waiting but could not be rousted
1346 * KERN_NOT_WAITING thread was not waiting
1347 */
1348 __private_extern__ kern_return_t
1349 clear_wait_internal(
1350 thread_t thread,
1351 wait_result_t wresult)
1352 {
1353 uint32_t i = LockTimeOutUsec;
1354 struct waitq *waitq = thread->waitq;
1355
1356 do {
1357 if (wresult == THREAD_INTERRUPTED && (thread->state & TH_UNINT))
1358 return (KERN_FAILURE);
1359
1360 if (waitq != NULL) {
1361 if (!waitq_pull_thread_locked(waitq, thread)) {
1362 thread_unlock(thread);
1363 delay(1);
1364 if (i > 0 && !machine_timeout_suspended())
1365 i--;
1366 thread_lock(thread);
1367 if (waitq != thread->waitq)
1368 return KERN_NOT_WAITING;
1369 continue;
1370 }
1371 }
1372
1373 /* TODO: Can we instead assert TH_TERMINATE is not set? */
1374 if ((thread->state & (TH_WAIT|TH_TERMINATE)) == TH_WAIT)
1375 return (thread_go(thread, wresult));
1376 else
1377 return (KERN_NOT_WAITING);
1378 } while (i > 0);
1379
1380 panic("clear_wait_internal: deadlock: thread=%p, wq=%p, cpu=%d\n",
1381 thread, waitq, cpu_number());
1382
1383 return (KERN_FAILURE);
1384 }
1385
1386
1387 /*
1388 * clear_wait:
1389 *
1390 * Clear the wait condition for the specified thread. Start the thread
1391 * executing if that is appropriate.
1392 *
1393 * parameters:
1394 * thread thread to awaken
1395 * result Wakeup result the thread should see
1396 */
1397 kern_return_t
1398 clear_wait(
1399 thread_t thread,
1400 wait_result_t result)
1401 {
1402 kern_return_t ret;
1403 spl_t s;
1404
1405 s = splsched();
1406 thread_lock(thread);
1407 ret = clear_wait_internal(thread, result);
1408 thread_unlock(thread);
1409 splx(s);
1410 return ret;
1411 }
1412
1413
1414 /*
1415 * thread_wakeup_prim:
1416 *
1417 * Common routine for thread_wakeup, thread_wakeup_with_result,
1418 * and thread_wakeup_one.
1419 *
1420 */
1421 kern_return_t
1422 thread_wakeup_prim(
1423 event_t event,
1424 boolean_t one_thread,
1425 wait_result_t result)
1426 {
1427 if (__improbable(event == NO_EVENT))
1428 panic("%s() called with NO_EVENT", __func__);
1429
1430 struct waitq *wq = global_eventq(event);
1431
1432 if (one_thread)
1433 return waitq_wakeup64_one(wq, CAST_EVENT64_T(event), result, WAITQ_ALL_PRIORITIES);
1434 else
1435 return waitq_wakeup64_all(wq, CAST_EVENT64_T(event), result, WAITQ_ALL_PRIORITIES);
1436 }
1437
1438 /*
1439 * Wakeup a specified thread if and only if it's waiting for this event
1440 */
1441 kern_return_t
1442 thread_wakeup_thread(
1443 event_t event,
1444 thread_t thread)
1445 {
1446 if (__improbable(event == NO_EVENT))
1447 panic("%s() called with NO_EVENT", __func__);
1448
1449 if (__improbable(thread == THREAD_NULL))
1450 panic("%s() called with THREAD_NULL", __func__);
1451
1452 struct waitq *wq = global_eventq(event);
1453
1454 return waitq_wakeup64_thread(wq, CAST_EVENT64_T(event), thread, THREAD_AWAKENED);
1455 }
1456
1457 /*
1458 * Wakeup a thread waiting on an event and promote it to a priority.
1459 *
1460 * Requires woken thread to un-promote itself when done.
1461 */
1462 kern_return_t
1463 thread_wakeup_one_with_pri(
1464 event_t event,
1465 int priority)
1466 {
1467 if (__improbable(event == NO_EVENT))
1468 panic("%s() called with NO_EVENT", __func__);
1469
1470 struct waitq *wq = global_eventq(event);
1471
1472 return waitq_wakeup64_one(wq, CAST_EVENT64_T(event), THREAD_AWAKENED, priority);
1473 }
1474
1475 /*
1476 * Wakeup a thread waiting on an event,
1477 * promote it to a priority,
1478 * and return a reference to the woken thread.
1479 *
1480 * Requires woken thread to un-promote itself when done.
1481 */
1482 thread_t
1483 thread_wakeup_identify(event_t event,
1484 int priority)
1485 {
1486 if (__improbable(event == NO_EVENT))
1487 panic("%s() called with NO_EVENT", __func__);
1488
1489 struct waitq *wq = global_eventq(event);
1490
1491 return waitq_wakeup64_identify(wq, CAST_EVENT64_T(event), THREAD_AWAKENED, priority);
1492 }
1493
1494 /*
1495 * thread_bind:
1496 *
1497 * Force the current thread to execute on the specified processor.
1498 * Takes effect after the next thread_block().
1499 *
1500 * Returns the previous binding. PROCESSOR_NULL means
1501 * not bound.
1502 *
1503 * XXX - DO NOT export this to users - XXX
1504 */
1505 processor_t
1506 thread_bind(
1507 processor_t processor)
1508 {
1509 thread_t self = current_thread();
1510 processor_t prev;
1511 spl_t s;
1512
1513 s = splsched();
1514 thread_lock(self);
1515
1516 prev = thread_bind_internal(self, processor);
1517
1518 thread_unlock(self);
1519 splx(s);
1520
1521 return (prev);
1522 }
1523
1524 /*
1525 * thread_bind_internal:
1526 *
1527 * If the specified thread is not the current thread, and it is currently
1528 * running on another CPU, a remote AST must be sent to that CPU to cause
1529 * the thread to migrate to its bound processor. Otherwise, the migration
1530 * will occur at the next quantum expiration or blocking point.
1531 *
1532 * When the thread is the current thread, and explicit thread_block() should
1533 * be used to force the current processor to context switch away and
1534 * let the thread migrate to the bound processor.
1535 *
1536 * Thread must be locked, and at splsched.
1537 */
1538
1539 static processor_t
1540 thread_bind_internal(
1541 thread_t thread,
1542 processor_t processor)
1543 {
1544 processor_t prev;
1545
1546 /* <rdar://problem/15102234> */
1547 assert(thread->sched_pri < BASEPRI_RTQUEUES);
1548 /* A thread can't be bound if it's sitting on a (potentially incorrect) runqueue */
1549 assert(thread->runq == PROCESSOR_NULL);
1550
1551 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_THREAD_BIND), thread_tid(thread), processor ? (uintptr_t)processor->cpu_id : (uintptr_t)-1, 0, 0, 0);
1552
1553 prev = thread->bound_processor;
1554 thread->bound_processor = processor;
1555
1556 return (prev);
1557 }
1558
1559 /*
1560 * thread_vm_bind_group_add:
1561 *
1562 * The "VM bind group" is a special mechanism to mark a collection
1563 * of threads from the VM subsystem that, in general, should be scheduled
1564 * with only one CPU of parallelism. To accomplish this, we initially
1565 * bind all the threads to the master processor, which has the effect
1566 * that only one of the threads in the group can execute at once, including
1567 * preempting threads in the group that are a lower priority. Future
1568 * mechanisms may use more dynamic mechanisms to prevent the collection
1569 * of VM threads from using more CPU time than desired.
1570 *
1571 * The current implementation can result in priority inversions where
1572 * compute-bound priority 95 or realtime threads that happen to have
1573 * landed on the master processor prevent the VM threads from running.
1574 * When this situation is detected, we unbind the threads for one
1575 * scheduler tick to allow the scheduler to run the threads an
1576 * additional CPUs, before restoring the binding (assuming high latency
1577 * is no longer a problem).
1578 */
1579
1580 /*
1581 * The current max is provisioned for:
1582 * vm_compressor_swap_trigger_thread (92)
1583 * 2 x vm_pageout_iothread_internal (92) when vm_restricted_to_single_processor==TRUE
1584 * vm_pageout_continue (92)
1585 * memorystatus_thread (95)
1586 */
1587 #define MAX_VM_BIND_GROUP_COUNT (5)
1588 decl_simple_lock_data(static,sched_vm_group_list_lock);
1589 static thread_t sched_vm_group_thread_list[MAX_VM_BIND_GROUP_COUNT];
1590 static int sched_vm_group_thread_count;
1591 static boolean_t sched_vm_group_temporarily_unbound = FALSE;
1592
1593 void
1594 thread_vm_bind_group_add(void)
1595 {
1596 thread_t self = current_thread();
1597
1598 thread_reference_internal(self);
1599 self->options |= TH_OPT_SCHED_VM_GROUP;
1600
1601 simple_lock(&sched_vm_group_list_lock);
1602 assert(sched_vm_group_thread_count < MAX_VM_BIND_GROUP_COUNT);
1603 sched_vm_group_thread_list[sched_vm_group_thread_count++] = self;
1604 simple_unlock(&sched_vm_group_list_lock);
1605
1606 thread_bind(master_processor);
1607
1608 /* Switch to bound processor if not already there */
1609 thread_block(THREAD_CONTINUE_NULL);
1610 }
1611
1612 static void
1613 sched_vm_group_maintenance(void)
1614 {
1615 uint64_t ctime = mach_absolute_time();
1616 uint64_t longtime = ctime - sched_tick_interval;
1617 int i;
1618 spl_t s;
1619 boolean_t high_latency_observed = FALSE;
1620 boolean_t runnable_and_not_on_runq_observed = FALSE;
1621 boolean_t bind_target_changed = FALSE;
1622 processor_t bind_target = PROCESSOR_NULL;
1623
1624 /* Make sure nobody attempts to add new threads while we are enumerating them */
1625 simple_lock(&sched_vm_group_list_lock);
1626
1627 s = splsched();
1628
1629 for (i=0; i < sched_vm_group_thread_count; i++) {
1630 thread_t thread = sched_vm_group_thread_list[i];
1631 assert(thread != THREAD_NULL);
1632 thread_lock(thread);
1633 if ((thread->state & (TH_RUN|TH_WAIT)) == TH_RUN) {
1634 if (thread->runq != PROCESSOR_NULL && thread->last_made_runnable_time < longtime) {
1635 high_latency_observed = TRUE;
1636 } else if (thread->runq == PROCESSOR_NULL) {
1637 /* There are some cases where a thread be transitiong that also fall into this case */
1638 runnable_and_not_on_runq_observed = TRUE;
1639 }
1640 }
1641 thread_unlock(thread);
1642
1643 if (high_latency_observed && runnable_and_not_on_runq_observed) {
1644 /* All the things we are looking for are true, stop looking */
1645 break;
1646 }
1647 }
1648
1649 splx(s);
1650
1651 if (sched_vm_group_temporarily_unbound) {
1652 /* If we turned off binding, make sure everything is OK before rebinding */
1653 if (!high_latency_observed) {
1654 /* rebind */
1655 bind_target_changed = TRUE;
1656 bind_target = master_processor;
1657 sched_vm_group_temporarily_unbound = FALSE; /* might be reset to TRUE if change cannot be completed */
1658 }
1659 } else {
1660 /*
1661 * Check if we're in a bad state, which is defined by high
1662 * latency with no core currently executing a thread. If a
1663 * single thread is making progress on a CPU, that means the
1664 * binding concept to reduce parallelism is working as
1665 * designed.
1666 */
1667 if (high_latency_observed && !runnable_and_not_on_runq_observed) {
1668 /* unbind */
1669 bind_target_changed = TRUE;
1670 bind_target = PROCESSOR_NULL;
1671 sched_vm_group_temporarily_unbound = TRUE;
1672 }
1673 }
1674
1675 if (bind_target_changed) {
1676 s = splsched();
1677 for (i=0; i < sched_vm_group_thread_count; i++) {
1678 thread_t thread = sched_vm_group_thread_list[i];
1679 boolean_t removed;
1680 assert(thread != THREAD_NULL);
1681
1682 thread_lock(thread);
1683 removed = thread_run_queue_remove(thread);
1684 if (removed || ((thread->state & (TH_RUN | TH_WAIT)) == TH_WAIT)) {
1685 thread_bind_internal(thread, bind_target);
1686 } else {
1687 /*
1688 * Thread was in the middle of being context-switched-to,
1689 * or was in the process of blocking. To avoid switching the bind
1690 * state out mid-flight, defer the change if possible.
1691 */
1692 if (bind_target == PROCESSOR_NULL) {
1693 thread_bind_internal(thread, bind_target);
1694 } else {
1695 sched_vm_group_temporarily_unbound = TRUE; /* next pass will try again */
1696 }
1697 }
1698
1699 if (removed) {
1700 thread_run_queue_reinsert(thread, SCHED_PREEMPT | SCHED_TAILQ);
1701 }
1702 thread_unlock(thread);
1703 }
1704 splx(s);
1705 }
1706
1707 simple_unlock(&sched_vm_group_list_lock);
1708 }
1709
1710 /* Invoked prior to idle entry to determine if, on SMT capable processors, an SMT
1711 * rebalancing opportunity exists when a core is (instantaneously) idle, but
1712 * other SMT-capable cores may be over-committed. TODO: some possible negatives:
1713 * IPI thrash if this core does not remain idle following the load balancing ASTs
1714 * Idle "thrash", when IPI issue is followed by idle entry/core power down
1715 * followed by a wakeup shortly thereafter.
1716 */
1717
1718 #if (DEVELOPMENT || DEBUG)
1719 int sched_smt_balance = 1;
1720 #endif
1721
1722 #if __SMP__
1723 /* Invoked with pset locked, returns with pset unlocked */
1724 void
1725 sched_SMT_balance(processor_t cprocessor, processor_set_t cpset) {
1726 processor_t ast_processor = NULL;
1727
1728 #if (DEVELOPMENT || DEBUG)
1729 if (__improbable(sched_smt_balance == 0))
1730 goto smt_balance_exit;
1731 #endif
1732
1733 assert(cprocessor == current_processor());
1734 if (cprocessor->is_SMT == FALSE)
1735 goto smt_balance_exit;
1736
1737 processor_t sib_processor = cprocessor->processor_secondary ? cprocessor->processor_secondary : cprocessor->processor_primary;
1738
1739 /* Determine if both this processor and its sibling are idle,
1740 * indicating an SMT rebalancing opportunity.
1741 */
1742 if (sib_processor->state != PROCESSOR_IDLE)
1743 goto smt_balance_exit;
1744
1745 processor_t sprocessor;
1746
1747 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
1748 qe_foreach_element(sprocessor, &cpset->active_queue, processor_queue) {
1749 if ((sprocessor->state == PROCESSOR_RUNNING) &&
1750 (sprocessor->processor_primary != sprocessor) &&
1751 (sprocessor->processor_primary->state == PROCESSOR_RUNNING) &&
1752 (sprocessor->current_pri < BASEPRI_RTQUEUES)) {
1753
1754 ipi_type = sched_ipi_action(sprocessor, NULL, false, SCHED_IPI_EVENT_SMT_REBAL);
1755 if (ipi_type != SCHED_IPI_NONE) {
1756 assert(sprocessor != cprocessor);
1757 ast_processor = sprocessor;
1758 break;
1759 }
1760 }
1761 }
1762
1763 smt_balance_exit:
1764 pset_unlock(cpset);
1765
1766 if (ast_processor) {
1767 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_SMT_BALANCE), ast_processor->cpu_id, ast_processor->state, ast_processor->processor_primary->state, 0, 0);
1768 sched_ipi_perform(ast_processor, ipi_type);
1769 }
1770 }
1771 #else
1772 /* Invoked with pset locked, returns with pset unlocked */
1773 void
1774 sched_SMT_balance(__unused processor_t cprocessor, processor_set_t cpset)
1775 {
1776 pset_unlock(cpset);
1777 }
1778 #endif /* __SMP__ */
1779
1780 /*
1781 * thread_select:
1782 *
1783 * Select a new thread for the current processor to execute.
1784 *
1785 * May select the current thread, which must be locked.
1786 */
1787 static thread_t
1788 thread_select(thread_t thread,
1789 processor_t processor,
1790 ast_t *reason)
1791 {
1792 processor_set_t pset = processor->processor_set;
1793 thread_t new_thread = THREAD_NULL;
1794
1795 assert(processor == current_processor());
1796 assert((thread->state & (TH_RUN|TH_TERMINATE2)) == TH_RUN);
1797
1798 do {
1799 /*
1800 * Update the priority.
1801 */
1802 if (SCHED(can_update_priority)(thread))
1803 SCHED(update_priority)(thread);
1804
1805 processor_state_update_from_thread(processor, thread);
1806
1807 pset_lock(pset);
1808
1809 assert(processor->state != PROCESSOR_OFF_LINE);
1810
1811 if (!processor->is_recommended) {
1812 /*
1813 * The performance controller has provided a hint to not dispatch more threads,
1814 * unless they are bound to us (and thus we are the only option
1815 */
1816 if (!SCHED(processor_bound_count)(processor)) {
1817 goto idle;
1818 }
1819 } else if (processor->processor_primary != processor) {
1820 /*
1821 * Should this secondary SMT processor attempt to find work? For pset runqueue systems,
1822 * we should look for work only under the same conditions that choose_processor()
1823 * would have assigned work, which is when all primary processors have been assigned work.
1824 *
1825 * An exception is that bound threads are dispatched to a processor without going through
1826 * choose_processor(), so in those cases we should continue trying to dequeue work.
1827 */
1828 if (!SCHED(processor_bound_count)(processor) &&
1829 !queue_empty(&pset->idle_queue) && !rt_runq_count(pset)) {
1830 goto idle;
1831 }
1832 }
1833
1834 /*
1835 * Test to see if the current thread should continue
1836 * to run on this processor. Must not be attempting to wait, and not
1837 * bound to a different processor, nor be in the wrong
1838 * processor set, nor be forced to context switch by TH_SUSP.
1839 *
1840 * Note that there are never any RT threads in the regular runqueue.
1841 *
1842 * This code is very insanely tricky.
1843 */
1844
1845 /* i.e. not waiting, not TH_SUSP'ed */
1846 boolean_t still_running = ((thread->state & (TH_TERMINATE|TH_IDLE|TH_WAIT|TH_RUN|TH_SUSP)) == TH_RUN);
1847
1848 /*
1849 * Threads running on SMT processors are forced to context switch. Don't rebalance realtime threads.
1850 * TODO: This should check if it's worth it to rebalance, i.e. 'are there any idle primary processors'
1851 */
1852 boolean_t needs_smt_rebalance = (thread->sched_pri < BASEPRI_RTQUEUES && processor->processor_primary != processor);
1853
1854 boolean_t affinity_mismatch = (thread->affinity_set != AFFINITY_SET_NULL && thread->affinity_set->aset_pset != pset);
1855
1856 boolean_t bound_elsewhere = (thread->bound_processor != PROCESSOR_NULL && thread->bound_processor != processor);
1857
1858 boolean_t avoid_processor = (SCHED(avoid_processor_enabled) && SCHED(thread_avoid_processor)(processor, thread));
1859
1860 if (still_running && !needs_smt_rebalance && !affinity_mismatch && !bound_elsewhere && !avoid_processor) {
1861 /*
1862 * This thread is eligible to keep running on this processor.
1863 *
1864 * RT threads with un-expired quantum stay on processor,
1865 * unless there's a valid RT thread with an earlier deadline.
1866 */
1867 if (thread->sched_pri >= BASEPRI_RTQUEUES && processor->first_timeslice) {
1868 if (rt_runq_count(pset) > 0) {
1869
1870 rt_lock_lock(pset);
1871
1872 if (rt_runq_count(pset) > 0) {
1873
1874 thread_t next_rt = qe_queue_first(&SCHED(rt_runq)(pset)->queue, struct thread, runq_links);
1875
1876 if (next_rt->realtime.deadline < processor->deadline &&
1877 (next_rt->bound_processor == PROCESSOR_NULL ||
1878 next_rt->bound_processor == processor)) {
1879 /* The next RT thread is better, so pick it off the runqueue. */
1880 goto pick_new_rt_thread;
1881 }
1882 }
1883
1884 rt_lock_unlock(pset);
1885 }
1886
1887 /* This is still the best RT thread to run. */
1888 processor->deadline = thread->realtime.deadline;
1889
1890 sched_update_pset_load_average(pset);
1891 pset_unlock(pset);
1892
1893 return (thread);
1894 }
1895
1896 if ((rt_runq_count(pset) == 0) &&
1897 SCHED(processor_queue_has_priority)(processor, thread->sched_pri, TRUE) == FALSE) {
1898 /* This thread is still the highest priority runnable (non-idle) thread */
1899 processor->deadline = UINT64_MAX;
1900
1901 sched_update_pset_load_average(pset);
1902 pset_unlock(pset);
1903
1904 return (thread);
1905 }
1906 } else {
1907 /*
1908 * This processor must context switch.
1909 * If it's due to a rebalance, we should aggressively find this thread a new home.
1910 */
1911 if (needs_smt_rebalance || affinity_mismatch || bound_elsewhere || avoid_processor)
1912 *reason |= AST_REBALANCE;
1913 }
1914
1915 /* OK, so we're not going to run the current thread. Look at the RT queue. */
1916 if (rt_runq_count(pset) > 0) {
1917
1918 rt_lock_lock(pset);
1919
1920 if (rt_runq_count(pset) > 0) {
1921 thread_t next_rt = qe_queue_first(&SCHED(rt_runq)(pset)->queue, struct thread, runq_links);
1922
1923 if (__probable((next_rt->bound_processor == PROCESSOR_NULL ||
1924 (next_rt->bound_processor == processor)))) {
1925 pick_new_rt_thread:
1926 new_thread = qe_dequeue_head(&SCHED(rt_runq)(pset)->queue, struct thread, runq_links);
1927
1928 new_thread->runq = PROCESSOR_NULL;
1929 SCHED_STATS_RUNQ_CHANGE(&SCHED(rt_runq)(pset)->runq_stats, rt_runq_count(pset));
1930 rt_runq_count_decr(pset);
1931
1932 processor->deadline = new_thread->realtime.deadline;
1933
1934 rt_lock_unlock(pset);
1935 sched_update_pset_load_average(pset);
1936 pset_unlock(pset);
1937
1938 return (new_thread);
1939 }
1940 }
1941
1942 rt_lock_unlock(pset);
1943 }
1944
1945 processor->deadline = UINT64_MAX;
1946
1947 /* No RT threads, so let's look at the regular threads. */
1948 if ((new_thread = SCHED(choose_thread)(processor, MINPRI, *reason)) != THREAD_NULL) {
1949 sched_update_pset_load_average(pset);
1950 pset_unlock(pset);
1951 return (new_thread);
1952 }
1953
1954 #if __SMP__
1955 if (SCHED(steal_thread_enabled)) {
1956 /*
1957 * No runnable threads, attempt to steal
1958 * from other processors. Returns with pset lock dropped.
1959 */
1960
1961 if ((new_thread = SCHED(steal_thread)(pset)) != THREAD_NULL) {
1962 return (new_thread);
1963 }
1964
1965 /*
1966 * If other threads have appeared, shortcut
1967 * around again.
1968 */
1969 if (!SCHED(processor_queue_empty)(processor) || rt_runq_count(pset) > 0)
1970 continue;
1971
1972 pset_lock(pset);
1973 }
1974 #endif
1975
1976 idle:
1977 /*
1978 * Nothing is runnable, so set this processor idle if it
1979 * was running.
1980 */
1981 if (processor->state == PROCESSOR_RUNNING) {
1982 processor->state = PROCESSOR_IDLE;
1983
1984 if (!processor->is_recommended) {
1985 re_queue_head(&pset->unused_queue, &processor->processor_queue);
1986 } else if (processor->processor_primary == processor) {
1987 re_queue_head(&pset->idle_queue, &processor->processor_queue);
1988 } else {
1989 re_queue_head(&pset->idle_secondary_queue, &processor->processor_queue);
1990 }
1991
1992 pset->active_processor_count--;
1993 sched_update_pset_load_average(pset);
1994 }
1995
1996 #if __SMP__
1997 /* Invoked with pset locked, returns with pset unlocked */
1998 SCHED(processor_balance)(processor, pset);
1999 #else
2000 pset_unlock(pset);
2001 #endif
2002
2003 #if CONFIG_SCHED_IDLE_IN_PLACE
2004 /*
2005 * Choose idle thread if fast idle is not possible.
2006 */
2007 if (processor->processor_primary != processor)
2008 return (processor->idle_thread);
2009
2010 if ((thread->state & (TH_IDLE|TH_TERMINATE|TH_SUSP)) || !(thread->state & TH_WAIT) || thread->wake_active || thread->sched_pri >= BASEPRI_RTQUEUES)
2011 return (processor->idle_thread);
2012
2013 /*
2014 * Perform idling activities directly without a
2015 * context switch. Return dispatched thread,
2016 * else check again for a runnable thread.
2017 */
2018 new_thread = thread_select_idle(thread, processor);
2019
2020 #else /* !CONFIG_SCHED_IDLE_IN_PLACE */
2021
2022 /*
2023 * Do a full context switch to idle so that the current
2024 * thread can start running on another processor without
2025 * waiting for the fast-idled processor to wake up.
2026 */
2027 new_thread = processor->idle_thread;
2028
2029 #endif /* !CONFIG_SCHED_IDLE_IN_PLACE */
2030
2031 } while (new_thread == THREAD_NULL);
2032
2033 return (new_thread);
2034 }
2035
2036 #if CONFIG_SCHED_IDLE_IN_PLACE
2037 /*
2038 * thread_select_idle:
2039 *
2040 * Idle the processor using the current thread context.
2041 *
2042 * Called with thread locked, then dropped and relocked.
2043 */
2044 static thread_t
2045 thread_select_idle(
2046 thread_t thread,
2047 processor_t processor)
2048 {
2049 thread_t new_thread;
2050 uint64_t arg1, arg2;
2051 int urgency;
2052
2053 sched_run_decr(thread);
2054
2055 thread->state |= TH_IDLE;
2056 processor_state_update_idle(procssor);
2057
2058 /* Reload precise timing global policy to thread-local policy */
2059 thread->precise_user_kernel_time = use_precise_user_kernel_time(thread);
2060
2061 thread_unlock(thread);
2062
2063 /*
2064 * Switch execution timing to processor idle thread.
2065 */
2066 processor->last_dispatch = mach_absolute_time();
2067
2068 #ifdef CONFIG_MACH_APPROXIMATE_TIME
2069 commpage_update_mach_approximate_time(processor->last_dispatch);
2070 #endif
2071
2072 thread->last_run_time = processor->last_dispatch;
2073 thread_timer_event(processor->last_dispatch, &processor->idle_thread->system_timer);
2074 PROCESSOR_DATA(processor, kernel_timer) = &processor->idle_thread->system_timer;
2075
2076
2077 /*
2078 * Cancel the quantum timer while idling.
2079 */
2080 timer_call_quantum_timer_cancel(&processor->quantum_timer);
2081 processor->first_timeslice = FALSE;
2082
2083 (*thread->sched_call)(SCHED_CALL_BLOCK, thread);
2084
2085 thread_tell_urgency(THREAD_URGENCY_NONE, 0, 0, 0, NULL);
2086
2087 /*
2088 * Enable interrupts and perform idling activities. No
2089 * preemption due to TH_IDLE being set.
2090 */
2091 spllo(); new_thread = processor_idle(thread, processor);
2092
2093 /*
2094 * Return at splsched.
2095 */
2096 (*thread->sched_call)(SCHED_CALL_UNBLOCK, thread);
2097
2098 thread_lock(thread);
2099
2100 /*
2101 * If awakened, switch to thread timer and start a new quantum.
2102 * Otherwise skip; we will context switch to another thread or return here.
2103 */
2104 if (!(thread->state & TH_WAIT)) {
2105 processor->last_dispatch = mach_absolute_time();
2106 thread_timer_event(processor->last_dispatch, &thread->system_timer);
2107 PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer;
2108 thread_quantum_init(thread);
2109 processor->quantum_end = processor->last_dispatch + thread->quantum_remaining;
2110 timer_call_quantum_timer_enter(&processor->quantum_timer,
2111 thread, processor->quantum_end, processor->last_dispatch);
2112 processor->first_timeslice = TRUE;
2113
2114 thread->computation_epoch = processor->last_dispatch;
2115 }
2116
2117 thread->state &= ~TH_IDLE;
2118
2119 urgency = thread_get_urgency(thread, &arg1, &arg2);
2120
2121 thread_tell_urgency(urgency, arg1, arg2, 0, new_thread);
2122
2123 sched_run_incr(thread);
2124
2125 return (new_thread);
2126 }
2127 #endif /* CONFIG_SCHED_IDLE_IN_PLACE */
2128
2129 /*
2130 * thread_invoke
2131 *
2132 * Called at splsched with neither thread locked.
2133 *
2134 * Perform a context switch and start executing the new thread.
2135 *
2136 * Returns FALSE when the context switch didn't happen.
2137 * The reference to the new thread is still consumed.
2138 *
2139 * "self" is what is currently running on the processor,
2140 * "thread" is the new thread to context switch to
2141 * (which may be the same thread in some cases)
2142 */
2143 static boolean_t
2144 thread_invoke(
2145 thread_t self,
2146 thread_t thread,
2147 ast_t reason)
2148 {
2149 if (__improbable(get_preemption_level() != 0)) {
2150 int pl = get_preemption_level();
2151 panic("thread_invoke: preemption_level %d, possible cause: %s",
2152 pl, (pl < 0 ? "unlocking an unlocked mutex or spinlock" :
2153 "blocking while holding a spinlock, or within interrupt context"));
2154 }
2155
2156 thread_continue_t continuation = self->continuation;
2157 void *parameter = self->parameter;
2158 processor_t processor;
2159
2160 uint64_t ctime = mach_absolute_time();
2161
2162 #ifdef CONFIG_MACH_APPROXIMATE_TIME
2163 commpage_update_mach_approximate_time(ctime);
2164 #endif
2165
2166 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
2167 if ((thread->state & TH_IDLE) == 0)
2168 sched_timeshare_consider_maintenance(ctime);
2169 #endif
2170
2171 #if MONOTONIC
2172 mt_sched_update(self);
2173 #endif /* MONOTONIC */
2174
2175 assert_thread_magic(self);
2176 assert(self == current_thread());
2177 assert(self->runq == PROCESSOR_NULL);
2178 assert((self->state & (TH_RUN|TH_TERMINATE2)) == TH_RUN);
2179
2180 thread_lock(thread);
2181
2182 assert_thread_magic(thread);
2183 assert((thread->state & (TH_RUN|TH_WAIT|TH_UNINT|TH_TERMINATE|TH_TERMINATE2)) == TH_RUN);
2184 assert(thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == current_processor());
2185 assert(thread->runq == PROCESSOR_NULL);
2186
2187 /* Reload precise timing global policy to thread-local policy */
2188 thread->precise_user_kernel_time = use_precise_user_kernel_time(thread);
2189
2190 /* Update SFI class based on other factors */
2191 thread->sfi_class = sfi_thread_classify(thread);
2192
2193 /* Update the same_pri_latency for the thread (used by perfcontrol callouts) */
2194 thread->same_pri_latency = ctime - thread->last_basepri_change_time;
2195 /*
2196 * In case a base_pri update happened between the timestamp and
2197 * taking the thread lock
2198 */
2199 if (ctime <= thread->last_basepri_change_time)
2200 thread->same_pri_latency = ctime - thread->last_made_runnable_time;
2201
2202 /* Allow realtime threads to hang onto a stack. */
2203 if ((self->sched_mode == TH_MODE_REALTIME) && !self->reserved_stack)
2204 self->reserved_stack = self->kernel_stack;
2205
2206 /* Prepare for spin debugging */
2207 #if INTERRUPT_MASKED_DEBUG
2208 ml_spin_debug_clear(thread);
2209 #endif
2210
2211 if (continuation != NULL) {
2212 if (!thread->kernel_stack) {
2213 /*
2214 * If we are using a privileged stack,
2215 * check to see whether we can exchange it with
2216 * that of the other thread.
2217 */
2218 if (self->kernel_stack == self->reserved_stack && !thread->reserved_stack)
2219 goto need_stack;
2220
2221 /*
2222 * Context switch by performing a stack handoff.
2223 */
2224 continuation = thread->continuation;
2225 parameter = thread->parameter;
2226
2227 processor = current_processor();
2228 processor->active_thread = thread;
2229 processor_state_update_from_thread(processor, thread);
2230
2231 if (thread->last_processor != processor && thread->last_processor != NULL) {
2232 if (thread->last_processor->processor_set != processor->processor_set)
2233 thread->ps_switch++;
2234 thread->p_switch++;
2235 }
2236 thread->last_processor = processor;
2237 thread->c_switch++;
2238 ast_context(thread);
2239
2240 thread_unlock(thread);
2241
2242 self->reason = reason;
2243
2244 processor->last_dispatch = ctime;
2245 self->last_run_time = ctime;
2246 thread_timer_event(ctime, &thread->system_timer);
2247 PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer;
2248
2249 /*
2250 * Since non-precise user/kernel time doesn't update the state timer
2251 * during privilege transitions, synthesize an event now.
2252 */
2253 if (!thread->precise_user_kernel_time) {
2254 timer_switch(PROCESSOR_DATA(processor, current_state),
2255 ctime,
2256 PROCESSOR_DATA(processor, current_state));
2257 }
2258
2259 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2260 MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_HANDOFF)|DBG_FUNC_NONE,
2261 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
2262
2263 if ((thread->chosen_processor != processor) && (thread->chosen_processor != PROCESSOR_NULL)) {
2264 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_MOVED)|DBG_FUNC_NONE,
2265 (uintptr_t)thread_tid(thread), (uintptr_t)thread->chosen_processor->cpu_id, 0, 0, 0);
2266 }
2267
2268 DTRACE_SCHED2(off__cpu, struct thread *, thread, struct proc *, thread->task->bsd_info);
2269
2270 SCHED_STATS_CSW(processor, self->reason, self->sched_pri, thread->sched_pri);
2271
2272 TLOG(1, "thread_invoke: calling stack_handoff\n");
2273 stack_handoff(self, thread);
2274
2275 /* 'self' is now off core */
2276 assert(thread == current_thread());
2277
2278 DTRACE_SCHED(on__cpu);
2279
2280 #if KPERF
2281 kperf_on_cpu(thread, continuation, NULL);
2282 #endif /* KPERF */
2283
2284 #if KASAN
2285 kasan_unpoison_fakestack(self);
2286 kasan_unpoison_stack(thread->kernel_stack, kernel_stack_size);
2287 #endif
2288
2289 thread_dispatch(self, thread);
2290
2291 thread->continuation = thread->parameter = NULL;
2292
2293 counter(c_thread_invoke_hits++);
2294
2295 (void) spllo();
2296
2297 assert(continuation);
2298 call_continuation(continuation, parameter, thread->wait_result);
2299 /*NOTREACHED*/
2300 }
2301 else if (thread == self) {
2302 /* same thread but with continuation */
2303 ast_context(self);
2304 counter(++c_thread_invoke_same);
2305
2306 thread_unlock(self);
2307
2308 #if KPERF
2309 kperf_on_cpu(thread, continuation, NULL);
2310 #endif /* KPERF */
2311
2312 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2313 MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE,
2314 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
2315
2316 #if KASAN
2317 kasan_unpoison_fakestack(self);
2318 kasan_unpoison_stack(self->kernel_stack, kernel_stack_size);
2319 #endif
2320
2321 self->continuation = self->parameter = NULL;
2322
2323 (void) spllo();
2324
2325 call_continuation(continuation, parameter, self->wait_result);
2326 /*NOTREACHED*/
2327 }
2328 } else {
2329 /*
2330 * Check that the other thread has a stack
2331 */
2332 if (!thread->kernel_stack) {
2333 need_stack:
2334 if (!stack_alloc_try(thread)) {
2335 counter(c_thread_invoke_misses++);
2336 thread_unlock(thread);
2337 thread_stack_enqueue(thread);
2338 return (FALSE);
2339 }
2340 } else if (thread == self) {
2341 ast_context(self);
2342 counter(++c_thread_invoke_same);
2343 thread_unlock(self);
2344
2345 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2346 MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE,
2347 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
2348
2349 return (TRUE);
2350 }
2351 }
2352
2353 /*
2354 * Context switch by full context save.
2355 */
2356 processor = current_processor();
2357 processor->active_thread = thread;
2358 processor_state_update_from_thread(processor, thread);
2359
2360 if (thread->last_processor != processor && thread->last_processor != NULL) {
2361 if (thread->last_processor->processor_set != processor->processor_set)
2362 thread->ps_switch++;
2363 thread->p_switch++;
2364 }
2365 thread->last_processor = processor;
2366 thread->c_switch++;
2367 ast_context(thread);
2368
2369 thread_unlock(thread);
2370
2371 counter(c_thread_invoke_csw++);
2372
2373 self->reason = reason;
2374
2375 processor->last_dispatch = ctime;
2376 self->last_run_time = ctime;
2377 thread_timer_event(ctime, &thread->system_timer);
2378 PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer;
2379
2380 /*
2381 * Since non-precise user/kernel time doesn't update the state timer
2382 * during privilege transitions, synthesize an event now.
2383 */
2384 if (!thread->precise_user_kernel_time) {
2385 timer_switch(PROCESSOR_DATA(processor, current_state),
2386 ctime,
2387 PROCESSOR_DATA(processor, current_state));
2388 }
2389
2390 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2391 MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE,
2392 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
2393
2394 if ((thread->chosen_processor != processor) && (thread->chosen_processor != NULL)) {
2395 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_MOVED)|DBG_FUNC_NONE,
2396 (uintptr_t)thread_tid(thread), (uintptr_t)thread->chosen_processor->cpu_id, 0, 0, 0);
2397 }
2398
2399 DTRACE_SCHED2(off__cpu, struct thread *, thread, struct proc *, thread->task->bsd_info);
2400
2401 SCHED_STATS_CSW(processor, self->reason, self->sched_pri, thread->sched_pri);
2402
2403 /*
2404 * This is where we actually switch register context,
2405 * and address space if required. We will next run
2406 * as a result of a subsequent context switch.
2407 *
2408 * Once registers are switched and the processor is running "thread",
2409 * the stack variables and non-volatile registers will contain whatever
2410 * was there the last time that thread blocked. No local variables should
2411 * be used after this point, except for the special case of "thread", which
2412 * the platform layer returns as the previous thread running on the processor
2413 * via the function call ABI as a return register, and "self", which may have
2414 * been stored on the stack or a non-volatile register, but a stale idea of
2415 * what was on the CPU is newly-accurate because that thread is again
2416 * running on the CPU.
2417 */
2418 assert(continuation == self->continuation);
2419 thread = machine_switch_context(self, continuation, thread);
2420 assert(self == current_thread());
2421 TLOG(1,"thread_invoke: returning machine_switch_context: self %p continuation %p thread %p\n", self, continuation, thread);
2422
2423 DTRACE_SCHED(on__cpu);
2424
2425 #if KPERF
2426 kperf_on_cpu(self, NULL, __builtin_frame_address(0));
2427 #endif /* KPERF */
2428
2429 /*
2430 * We have been resumed and are set to run.
2431 */
2432 thread_dispatch(thread, self);
2433
2434 if (continuation) {
2435 self->continuation = self->parameter = NULL;
2436
2437 (void) spllo();
2438
2439 call_continuation(continuation, parameter, self->wait_result);
2440 /*NOTREACHED*/
2441 }
2442
2443 return (TRUE);
2444 }
2445
2446 #if defined(CONFIG_SCHED_DEFERRED_AST)
2447 /*
2448 * pset_cancel_deferred_dispatch:
2449 *
2450 * Cancels all ASTs that we can cancel for the given processor set
2451 * if the current processor is running the last runnable thread in the
2452 * system.
2453 *
2454 * This function assumes the current thread is runnable. This must
2455 * be called with the pset unlocked.
2456 */
2457 static void
2458 pset_cancel_deferred_dispatch(
2459 processor_set_t pset,
2460 processor_t processor)
2461 {
2462 processor_t active_processor = NULL;
2463 uint32_t sampled_sched_run_count;
2464
2465 pset_lock(pset);
2466 sampled_sched_run_count = (volatile uint32_t) sched_run_buckets[TH_BUCKET_RUN];
2467
2468 /*
2469 * If we have emptied the run queue, and our current thread is runnable, we
2470 * should tell any processors that are still DISPATCHING that they will
2471 * probably not have any work to do. In the event that there are no
2472 * pending signals that we can cancel, this is also uninteresting.
2473 *
2474 * In the unlikely event that another thread becomes runnable while we are
2475 * doing this (sched_run_count is atomically updated, not guarded), the
2476 * codepath making it runnable SHOULD (a dangerous word) need the pset lock
2477 * in order to dispatch it to a processor in our pset. So, the other
2478 * codepath will wait while we squash all cancelable ASTs, get the pset
2479 * lock, and then dispatch the freshly runnable thread. So this should be
2480 * correct (we won't accidentally have a runnable thread that hasn't been
2481 * dispatched to an idle processor), if not ideal (we may be restarting the
2482 * dispatch process, which could have some overhead).
2483 *
2484 */
2485 if ((sampled_sched_run_count == 1) &&
2486 (pset->pending_deferred_AST_cpu_mask)) {
2487 qe_foreach_element_safe(active_processor, &pset->active_queue, processor_queue) {
2488 /*
2489 * If a processor is DISPATCHING, it could be because of
2490 * a cancelable signal.
2491 *
2492 * IF the processor is not our
2493 * current processor (the current processor should not
2494 * be DISPATCHING, so this is a bit paranoid), AND there
2495 * is a cancelable signal pending on the processor, AND
2496 * there is no non-cancelable signal pending (as there is
2497 * no point trying to backtrack on bringing the processor
2498 * up if a signal we cannot cancel is outstanding), THEN
2499 * it should make sense to roll back the processor state
2500 * to the IDLE state.
2501 *
2502 * If the racey nature of this approach (as the signal
2503 * will be arbitrated by hardware, and can fire as we
2504 * roll back state) results in the core responding
2505 * despite being pushed back to the IDLE state, it
2506 * should be no different than if the core took some
2507 * interrupt while IDLE.
2508 */
2509 if ((active_processor->state == PROCESSOR_DISPATCHING) &&
2510 (bit_test(pset->pending_deferred_AST_cpu_mask, active_processor->cpu_id)) &&
2511 (!bit_test(pset->pending_AST_cpu_mask, active_processor->cpu_id)) &&
2512 (active_processor != processor)) {
2513 /*
2514 * Squash all of the processor state back to some
2515 * reasonable facsimile of PROCESSOR_IDLE.
2516 *
2517 * TODO: What queue policy do we actually want here?
2518 * We want to promote selection of a good processor
2519 * to run on. Do we want to enqueue at the head?
2520 * The tail? At the (relative) old position in the
2521 * queue? Or something else entirely?
2522 */
2523 if (!active_processor->is_recommended) {
2524 re_queue_head(&pset->unused_queue, &active_processor->processor_queue);
2525 } else if (active_processor->processor_primary == active_processor) {
2526 re_queue_head(&pset->idle_queue, &active_processor->processor_queue);
2527 } else {
2528 re_queue_head(&pset->idle_secondary_queue, &active_processor->processor_queue);
2529 }
2530
2531 pset->active_processor_count--;
2532 sched_update_pset_load_average(pset);
2533
2534 assert(active_processor->next_thread == THREAD_NULL);
2535 processor_state_update_idle(active_processor);
2536 active_processor->deadline = UINT64_MAX;
2537 active_processor->state = PROCESSOR_IDLE;
2538 bit_clear(pset->pending_deferred_AST_cpu_mask, active_processor->cpu_id);
2539 machine_signal_idle_cancel(active_processor);
2540 }
2541
2542 }
2543 }
2544
2545 pset_unlock(pset);
2546 }
2547 #else
2548 /* We don't support deferred ASTs; everything is candycanes and sunshine. */
2549 #endif
2550
2551 static void
2552 thread_csw_callout(
2553 thread_t old,
2554 thread_t new,
2555 uint64_t timestamp)
2556 {
2557 perfcontrol_event event = (new->state & TH_IDLE) ? IDLE : CONTEXT_SWITCH;
2558 uint64_t same_pri_latency = (new->state & TH_IDLE) ? 0 : new->same_pri_latency;
2559 machine_switch_perfcontrol_context(event, timestamp, 0,
2560 same_pri_latency, old, new);
2561 }
2562
2563
2564 /*
2565 * thread_dispatch:
2566 *
2567 * Handle threads at context switch. Re-dispatch other thread
2568 * if still running, otherwise update run state and perform
2569 * special actions. Update quantum for other thread and begin
2570 * the quantum for ourselves.
2571 *
2572 * "thread" is the old thread that we have switched away from.
2573 * "self" is the new current thread that we have context switched to
2574 *
2575 * Called at splsched.
2576 */
2577 void
2578 thread_dispatch(
2579 thread_t thread,
2580 thread_t self)
2581 {
2582 processor_t processor = self->last_processor;
2583
2584 assert(processor == current_processor());
2585 assert(self == current_thread());
2586 assert(thread != self);
2587
2588 if (thread != THREAD_NULL) {
2589 /*
2590 * Do the perfcontrol callout for context switch.
2591 * The reason we do this here is:
2592 * - thread_dispatch() is called from various places that are not
2593 * the direct context switch path for eg. processor shutdown etc.
2594 * So adding the callout here covers all those cases.
2595 * - We want this callout as early as possible to be close
2596 * to the timestamp taken in thread_invoke()
2597 * - We want to avoid holding the thread lock while doing the
2598 * callout
2599 * - We do not want to callout if "thread" is NULL.
2600 */
2601 thread_csw_callout(thread, self, processor->last_dispatch);
2602
2603 /*
2604 * If blocked at a continuation, discard
2605 * the stack.
2606 */
2607 if (thread->continuation != NULL && thread->kernel_stack != 0)
2608 stack_free(thread);
2609
2610 if (thread->state & TH_IDLE) {
2611 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2612 MACHDBG_CODE(DBG_MACH_SCHED,MACH_DISPATCH) | DBG_FUNC_NONE,
2613 (uintptr_t)thread_tid(thread), 0, thread->state,
2614 sched_run_buckets[TH_BUCKET_RUN], 0);
2615 } else {
2616 int64_t consumed;
2617 int64_t remainder = 0;
2618
2619 if (processor->quantum_end > processor->last_dispatch)
2620 remainder = processor->quantum_end -
2621 processor->last_dispatch;
2622
2623 consumed = thread->quantum_remaining - remainder;
2624
2625 if ((thread->reason & AST_LEDGER) == 0) {
2626 /*
2627 * Bill CPU time to both the task and
2628 * the individual thread.
2629 */
2630 ledger_credit_thread(thread, thread->t_ledger,
2631 task_ledgers.cpu_time, consumed);
2632 ledger_credit_thread(thread, thread->t_threadledger,
2633 thread_ledgers.cpu_time, consumed);
2634 if (thread->t_bankledger) {
2635 ledger_credit_thread(thread, thread->t_bankledger,
2636 bank_ledgers.cpu_time,
2637 (consumed - thread->t_deduct_bank_ledger_time));
2638 }
2639 thread->t_deduct_bank_ledger_time = 0;
2640 }
2641
2642 wake_lock(thread);
2643 thread_lock(thread);
2644
2645 /*
2646 * Apply a priority floor if the thread holds a kernel resource
2647 * Do this before checking starting_pri to avoid overpenalizing
2648 * repeated rwlock blockers.
2649 */
2650 if (__improbable(thread->rwlock_count != 0))
2651 lck_rw_set_promotion_locked(thread);
2652
2653 boolean_t keep_quantum = processor->first_timeslice;
2654
2655 /*
2656 * Treat a thread which has dropped priority since it got on core
2657 * as having expired its quantum.
2658 */
2659 if (processor->starting_pri > thread->sched_pri)
2660 keep_quantum = FALSE;
2661
2662 /* Compute remainder of current quantum. */
2663 if (keep_quantum &&
2664 processor->quantum_end > processor->last_dispatch)
2665 thread->quantum_remaining = (uint32_t)remainder;
2666 else
2667 thread->quantum_remaining = 0;
2668
2669 if (thread->sched_mode == TH_MODE_REALTIME) {
2670 /*
2671 * Cancel the deadline if the thread has
2672 * consumed the entire quantum.
2673 */
2674 if (thread->quantum_remaining == 0) {
2675 thread->realtime.deadline = UINT64_MAX;
2676 }
2677 } else {
2678 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
2679 /*
2680 * For non-realtime threads treat a tiny
2681 * remaining quantum as an expired quantum
2682 * but include what's left next time.
2683 */
2684 if (thread->quantum_remaining < min_std_quantum) {
2685 thread->reason |= AST_QUANTUM;
2686 thread->quantum_remaining += SCHED(initial_quantum_size)(thread);
2687 }
2688 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
2689 }
2690
2691 /*
2692 * If we are doing a direct handoff then
2693 * take the remainder of the quantum.
2694 */
2695 if ((thread->reason & (AST_HANDOFF|AST_QUANTUM)) == AST_HANDOFF) {
2696 self->quantum_remaining = thread->quantum_remaining;
2697 thread->reason |= AST_QUANTUM;
2698 thread->quantum_remaining = 0;
2699 } else {
2700 #if defined(CONFIG_SCHED_MULTIQ)
2701 if (SCHED(sched_groups_enabled) &&
2702 thread->sched_group == self->sched_group) {
2703 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2704 MACHDBG_CODE(DBG_MACH_SCHED, MACH_QUANTUM_HANDOFF),
2705 self->reason, (uintptr_t)thread_tid(thread),
2706 self->quantum_remaining, thread->quantum_remaining, 0);
2707
2708 self->quantum_remaining = thread->quantum_remaining;
2709 thread->quantum_remaining = 0;
2710 /* Don't set AST_QUANTUM here - old thread might still want to preempt someone else */
2711 }
2712 #endif /* defined(CONFIG_SCHED_MULTIQ) */
2713 }
2714
2715 thread->computation_metered += (processor->last_dispatch - thread->computation_epoch);
2716
2717 if (!(thread->state & TH_WAIT)) {
2718 /*
2719 * Still runnable.
2720 */
2721 thread->last_made_runnable_time = thread->last_basepri_change_time = processor->last_dispatch;
2722
2723 machine_thread_going_off_core(thread, FALSE, processor->last_dispatch);
2724
2725 ast_t reason = thread->reason;
2726 sched_options_t options = SCHED_NONE;
2727
2728 if (reason & AST_REBALANCE) {
2729 options |= SCHED_REBALANCE;
2730 if (reason & AST_QUANTUM) {
2731 /* Having gone to the trouble of forcing this thread off a less preferred core,
2732 * we should force the preferable core to reschedule immediatey to give this
2733 * thread a chance to run instead of just sitting on the run queue where
2734 * it may just be stolen back by the idle core we just forced it off.
2735 * But only do this at the end of a quantum to prevent cascading effects.
2736 */
2737 options |= SCHED_PREEMPT;
2738 }
2739 }
2740
2741 if (reason & AST_QUANTUM)
2742 options |= SCHED_TAILQ;
2743 else if (reason & AST_PREEMPT)
2744 options |= SCHED_HEADQ;
2745 else
2746 options |= (SCHED_PREEMPT | SCHED_TAILQ);
2747
2748 thread_setrun(thread, options);
2749
2750 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2751 MACHDBG_CODE(DBG_MACH_SCHED,MACH_DISPATCH) | DBG_FUNC_NONE,
2752 (uintptr_t)thread_tid(thread), thread->reason, thread->state,
2753 sched_run_buckets[TH_BUCKET_RUN], 0);
2754
2755 if (thread->wake_active) {
2756 thread->wake_active = FALSE;
2757 thread_unlock(thread);
2758
2759 thread_wakeup(&thread->wake_active);
2760 } else {
2761 thread_unlock(thread);
2762 }
2763
2764 wake_unlock(thread);
2765 } else {
2766 /*
2767 * Waiting.
2768 */
2769 boolean_t should_terminate = FALSE;
2770 uint32_t new_run_count;
2771
2772 /* Only the first call to thread_dispatch
2773 * after explicit termination should add
2774 * the thread to the termination queue
2775 */
2776 if ((thread->state & (TH_TERMINATE|TH_TERMINATE2)) == TH_TERMINATE) {
2777 should_terminate = TRUE;
2778 thread->state |= TH_TERMINATE2;
2779 }
2780
2781 thread->state &= ~TH_RUN;
2782 thread->last_made_runnable_time = thread->last_basepri_change_time = THREAD_NOT_RUNNABLE;
2783 thread->chosen_processor = PROCESSOR_NULL;
2784
2785 new_run_count = sched_run_decr(thread);
2786
2787 #if CONFIG_SCHED_SFI
2788 if ((thread->state & (TH_WAIT | TH_TERMINATE)) == TH_WAIT) {
2789 if (thread->reason & AST_SFI) {
2790 thread->wait_sfi_begin_time = processor->last_dispatch;
2791 }
2792 }
2793 #endif
2794
2795 machine_thread_going_off_core(thread, should_terminate, processor->last_dispatch);
2796
2797 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2798 MACHDBG_CODE(DBG_MACH_SCHED,MACH_DISPATCH) | DBG_FUNC_NONE,
2799 (uintptr_t)thread_tid(thread), thread->reason, thread->state,
2800 new_run_count, 0);
2801
2802 (*thread->sched_call)(SCHED_CALL_BLOCK, thread);
2803
2804 if (thread->wake_active) {
2805 thread->wake_active = FALSE;
2806 thread_unlock(thread);
2807
2808 thread_wakeup(&thread->wake_active);
2809 } else {
2810 thread_unlock(thread);
2811 }
2812
2813 wake_unlock(thread);
2814
2815 if (should_terminate)
2816 thread_terminate_enqueue(thread);
2817 }
2818 }
2819 }
2820
2821 int urgency = THREAD_URGENCY_NONE;
2822 uint64_t latency = 0;
2823
2824 /* Update (new) current thread and reprogram quantum timer */
2825 thread_lock(self);
2826
2827 if (!(self->state & TH_IDLE)) {
2828 uint64_t arg1, arg2;
2829
2830 #if CONFIG_SCHED_SFI
2831 ast_t new_ast;
2832
2833 new_ast = sfi_thread_needs_ast(self, NULL);
2834
2835 if (new_ast != AST_NONE) {
2836 ast_on(new_ast);
2837 }
2838 #endif
2839
2840 assertf(processor->last_dispatch >= self->last_made_runnable_time,
2841 "Non-monotonic time? dispatch at 0x%llx, runnable at 0x%llx",
2842 processor->last_dispatch, self->last_made_runnable_time);
2843
2844 assert(self->last_made_runnable_time <= self->last_basepri_change_time);
2845
2846 latency = processor->last_dispatch - self->last_made_runnable_time;
2847 assert(latency >= self->same_pri_latency);
2848
2849 urgency = thread_get_urgency(self, &arg1, &arg2);
2850
2851 thread_tell_urgency(urgency, arg1, arg2, latency, self);
2852
2853 /*
2854 * Get a new quantum if none remaining.
2855 */
2856 if (self->quantum_remaining == 0) {
2857 thread_quantum_init(self);
2858 }
2859
2860 /*
2861 * Set up quantum timer and timeslice.
2862 */
2863 processor->quantum_end = processor->last_dispatch + self->quantum_remaining;
2864 timer_call_quantum_timer_enter(&processor->quantum_timer, self,
2865 processor->quantum_end, processor->last_dispatch);
2866
2867 processor->first_timeslice = TRUE;
2868 } else {
2869 timer_call_quantum_timer_cancel(&processor->quantum_timer);
2870 processor->first_timeslice = FALSE;
2871
2872 thread_tell_urgency(THREAD_URGENCY_NONE, 0, 0, 0, self);
2873 }
2874
2875 assert(self->block_hint == kThreadWaitNone);
2876 self->computation_epoch = processor->last_dispatch;
2877 self->reason = AST_NONE;
2878 processor->starting_pri = self->sched_pri;
2879
2880 thread_unlock(self);
2881
2882 machine_thread_going_on_core(self, urgency, latency, self->same_pri_latency,
2883 processor->last_dispatch);
2884
2885 #if defined(CONFIG_SCHED_DEFERRED_AST)
2886 /*
2887 * TODO: Can we state that redispatching our old thread is also
2888 * uninteresting?
2889 */
2890 if ((((volatile uint32_t)sched_run_buckets[TH_BUCKET_RUN]) == 1) &&
2891 !(self->state & TH_IDLE)) {
2892 pset_cancel_deferred_dispatch(processor->processor_set, processor);
2893 }
2894 #endif
2895
2896 }
2897
2898 /*
2899 * thread_block_reason:
2900 *
2901 * Forces a reschedule, blocking the caller if a wait
2902 * has been asserted.
2903 *
2904 * If a continuation is specified, then thread_invoke will
2905 * attempt to discard the thread's kernel stack. When the
2906 * thread resumes, it will execute the continuation function
2907 * on a new kernel stack.
2908 */
2909 counter(mach_counter_t c_thread_block_calls = 0;)
2910
2911 wait_result_t
2912 thread_block_reason(
2913 thread_continue_t continuation,
2914 void *parameter,
2915 ast_t reason)
2916 {
2917 thread_t self = current_thread();
2918 processor_t processor;
2919 thread_t new_thread;
2920 spl_t s;
2921
2922 counter(++c_thread_block_calls);
2923
2924 s = splsched();
2925
2926 processor = current_processor();
2927
2928 /* If we're explicitly yielding, force a subsequent quantum */
2929 if (reason & AST_YIELD)
2930 processor->first_timeslice = FALSE;
2931
2932 /* We're handling all scheduling AST's */
2933 ast_off(AST_SCHEDULING);
2934
2935 #if PROC_REF_DEBUG
2936 if ((continuation != NULL) && (self->task != kernel_task)) {
2937 if (uthread_get_proc_refcount(self->uthread) != 0) {
2938 panic("thread_block_reason with continuation uthread %p with uu_proc_refcount != 0", self->uthread);
2939 }
2940 }
2941 #endif
2942
2943 self->continuation = continuation;
2944 self->parameter = parameter;
2945
2946 if (self->state & ~(TH_RUN | TH_IDLE)) {
2947 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2948 MACHDBG_CODE(DBG_MACH_SCHED,MACH_BLOCK),
2949 reason, VM_KERNEL_UNSLIDE(continuation), 0, 0, 0);
2950 }
2951
2952 do {
2953 thread_lock(self);
2954 new_thread = thread_select(self, processor, &reason);
2955 thread_unlock(self);
2956 } while (!thread_invoke(self, new_thread, reason));
2957
2958 splx(s);
2959
2960 return (self->wait_result);
2961 }
2962
2963 /*
2964 * thread_block:
2965 *
2966 * Block the current thread if a wait has been asserted.
2967 */
2968 wait_result_t
2969 thread_block(
2970 thread_continue_t continuation)
2971 {
2972 return thread_block_reason(continuation, NULL, AST_NONE);
2973 }
2974
2975 wait_result_t
2976 thread_block_parameter(
2977 thread_continue_t continuation,
2978 void *parameter)
2979 {
2980 return thread_block_reason(continuation, parameter, AST_NONE);
2981 }
2982
2983 /*
2984 * thread_run:
2985 *
2986 * Switch directly from the current thread to the
2987 * new thread, handing off our quantum if appropriate.
2988 *
2989 * New thread must be runnable, and not on a run queue.
2990 *
2991 * Called at splsched.
2992 */
2993 int
2994 thread_run(
2995 thread_t self,
2996 thread_continue_t continuation,
2997 void *parameter,
2998 thread_t new_thread)
2999 {
3000 ast_t reason = AST_HANDOFF;
3001
3002 self->continuation = continuation;
3003 self->parameter = parameter;
3004
3005 while (!thread_invoke(self, new_thread, reason)) {
3006 /* the handoff failed, so we have to fall back to the normal block path */
3007 processor_t processor = current_processor();
3008
3009 reason = AST_NONE;
3010
3011 thread_lock(self);
3012 new_thread = thread_select(self, processor, &reason);
3013 thread_unlock(self);
3014 }
3015
3016 return (self->wait_result);
3017 }
3018
3019 /*
3020 * thread_continue:
3021 *
3022 * Called at splsched when a thread first receives
3023 * a new stack after a continuation.
3024 */
3025 void
3026 thread_continue(
3027 thread_t thread)
3028 {
3029 thread_t self = current_thread();
3030 thread_continue_t continuation;
3031 void *parameter;
3032
3033 DTRACE_SCHED(on__cpu);
3034
3035 continuation = self->continuation;
3036 parameter = self->parameter;
3037
3038 #if KPERF
3039 kperf_on_cpu(self, continuation, NULL);
3040 #endif
3041
3042 thread_dispatch(thread, self);
3043
3044 self->continuation = self->parameter = NULL;
3045
3046 #if INTERRUPT_MASKED_DEBUG
3047 /* Reset interrupt-masked spin debugging timeout */
3048 ml_spin_debug_clear(self);
3049 #endif
3050
3051 if (thread != THREAD_NULL)
3052 (void)spllo();
3053
3054 TLOG(1, "thread_continue: calling call_continuation \n");
3055 call_continuation(continuation, parameter, self->wait_result);
3056 /*NOTREACHED*/
3057 }
3058
3059 void
3060 thread_quantum_init(thread_t thread)
3061 {
3062 if (thread->sched_mode == TH_MODE_REALTIME) {
3063 thread->quantum_remaining = thread->realtime.computation;
3064 } else {
3065 thread->quantum_remaining = SCHED(initial_quantum_size)(thread);
3066 }
3067 }
3068
3069 uint32_t
3070 sched_timeshare_initial_quantum_size(thread_t thread)
3071 {
3072 if ((thread != THREAD_NULL) && thread->th_sched_bucket == TH_BUCKET_SHARE_BG)
3073 return bg_quantum;
3074 else
3075 return std_quantum;
3076 }
3077
3078 /*
3079 * run_queue_init:
3080 *
3081 * Initialize a run queue before first use.
3082 */
3083 void
3084 run_queue_init(
3085 run_queue_t rq)
3086 {
3087 rq->highq = NOPRI;
3088 for (u_int i = 0; i < BITMAP_LEN(NRQS); i++)
3089 rq->bitmap[i] = 0;
3090 rq->urgency = rq->count = 0;
3091 for (int i = 0; i < NRQS; i++)
3092 queue_init(&rq->queues[i]);
3093 }
3094
3095 /*
3096 * run_queue_dequeue:
3097 *
3098 * Perform a dequeue operation on a run queue,
3099 * and return the resulting thread.
3100 *
3101 * The run queue must be locked (see thread_run_queue_remove()
3102 * for more info), and not empty.
3103 */
3104 thread_t
3105 run_queue_dequeue(
3106 run_queue_t rq,
3107 integer_t options)
3108 {
3109 thread_t thread;
3110 queue_t queue = &rq->queues[rq->highq];
3111
3112 if (options & SCHED_HEADQ) {
3113 thread = qe_dequeue_head(queue, struct thread, runq_links);
3114 } else {
3115 thread = qe_dequeue_tail(queue, struct thread, runq_links);
3116 }
3117
3118 assert(thread != THREAD_NULL);
3119 assert_thread_magic(thread);
3120
3121 thread->runq = PROCESSOR_NULL;
3122 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
3123 rq->count--;
3124 if (SCHED(priority_is_urgent)(rq->highq)) {
3125 rq->urgency--; assert(rq->urgency >= 0);
3126 }
3127 if (queue_empty(queue)) {
3128 bitmap_clear(rq->bitmap, rq->highq);
3129 rq->highq = bitmap_first(rq->bitmap, NRQS);
3130 }
3131
3132 return thread;
3133 }
3134
3135 /*
3136 * run_queue_enqueue:
3137 *
3138 * Perform a enqueue operation on a run queue.
3139 *
3140 * The run queue must be locked (see thread_run_queue_remove()
3141 * for more info).
3142 */
3143 boolean_t
3144 run_queue_enqueue(
3145 run_queue_t rq,
3146 thread_t thread,
3147 integer_t options)
3148 {
3149 queue_t queue = &rq->queues[thread->sched_pri];
3150 boolean_t result = FALSE;
3151
3152 assert_thread_magic(thread);
3153
3154 if (queue_empty(queue)) {
3155 enqueue_tail(queue, &thread->runq_links);
3156
3157 rq_bitmap_set(rq->bitmap, thread->sched_pri);
3158 if (thread->sched_pri > rq->highq) {
3159 rq->highq = thread->sched_pri;
3160 result = TRUE;
3161 }
3162 } else {
3163 if (options & SCHED_TAILQ)
3164 enqueue_tail(queue, &thread->runq_links);
3165 else
3166 enqueue_head(queue, &thread->runq_links);
3167 }
3168 if (SCHED(priority_is_urgent)(thread->sched_pri))
3169 rq->urgency++;
3170 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
3171 rq->count++;
3172
3173 return (result);
3174 }
3175
3176 /*
3177 * run_queue_remove:
3178 *
3179 * Remove a specific thread from a runqueue.
3180 *
3181 * The run queue must be locked.
3182 */
3183 void
3184 run_queue_remove(
3185 run_queue_t rq,
3186 thread_t thread)
3187 {
3188 assert(thread->runq != PROCESSOR_NULL);
3189 assert_thread_magic(thread);
3190
3191 remqueue(&thread->runq_links);
3192 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
3193 rq->count--;
3194 if (SCHED(priority_is_urgent)(thread->sched_pri)) {
3195 rq->urgency--; assert(rq->urgency >= 0);
3196 }
3197
3198 if (queue_empty(&rq->queues[thread->sched_pri])) {
3199 /* update run queue status */
3200 bitmap_clear(rq->bitmap, thread->sched_pri);
3201 rq->highq = bitmap_first(rq->bitmap, NRQS);
3202 }
3203
3204 thread->runq = PROCESSOR_NULL;
3205 }
3206
3207 /* Assumes RT lock is not held, and acquires splsched/rt_lock itself */
3208 void
3209 sched_rtglobal_runq_scan(sched_update_scan_context_t scan_context)
3210 {
3211 spl_t s;
3212 thread_t thread;
3213
3214 processor_set_t pset = &pset0;
3215
3216 s = splsched();
3217 rt_lock_lock(pset);
3218
3219 qe_foreach_element_safe(thread, &pset->rt_runq.queue, runq_links) {
3220 if (thread->last_made_runnable_time < scan_context->earliest_rt_make_runnable_time) {
3221 scan_context->earliest_rt_make_runnable_time = thread->last_made_runnable_time;
3222 }
3223 }
3224
3225 rt_lock_unlock(pset);
3226 splx(s);
3227 }
3228
3229 int64_t
3230 sched_rtglobal_runq_count_sum(void)
3231 {
3232 return pset0.rt_runq.runq_stats.count_sum;
3233 }
3234
3235 /*
3236 * realtime_queue_insert:
3237 *
3238 * Enqueue a thread for realtime execution.
3239 */
3240 static boolean_t
3241 realtime_queue_insert(processor_t processor, processor_set_t pset, thread_t thread)
3242 {
3243 queue_t queue = &SCHED(rt_runq)(pset)->queue;
3244 uint64_t deadline = thread->realtime.deadline;
3245 boolean_t preempt = FALSE;
3246
3247 rt_lock_lock(pset);
3248
3249 if (queue_empty(queue)) {
3250 enqueue_tail(queue, &thread->runq_links);
3251 preempt = TRUE;
3252 } else {
3253 /* Insert into rt_runq in thread deadline order */
3254 queue_entry_t iter;
3255 qe_foreach(iter, queue) {
3256 thread_t iter_thread = qe_element(iter, struct thread, runq_links);
3257 assert_thread_magic(iter_thread);
3258
3259 if (deadline < iter_thread->realtime.deadline) {
3260 if (iter == queue_first(queue))
3261 preempt = TRUE;
3262 insque(&thread->runq_links, queue_prev(iter));
3263 break;
3264 } else if (iter == queue_last(queue)) {
3265 enqueue_tail(queue, &thread->runq_links);
3266 break;
3267 }
3268 }
3269 }
3270
3271 thread->runq = processor;
3272 SCHED_STATS_RUNQ_CHANGE(&SCHED(rt_runq)(pset)->runq_stats, rt_runq_count(pset));
3273 rt_runq_count_incr(pset);
3274
3275 rt_lock_unlock(pset);
3276
3277 return (preempt);
3278 }
3279
3280 /*
3281 * realtime_setrun:
3282 *
3283 * Dispatch a thread for realtime execution.
3284 *
3285 * Thread must be locked. Associated pset must
3286 * be locked, and is returned unlocked.
3287 */
3288 static void
3289 realtime_setrun(
3290 processor_t processor,
3291 thread_t thread)
3292 {
3293 processor_set_t pset = processor->processor_set;
3294 ast_t preempt;
3295
3296 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
3297
3298 thread->chosen_processor = processor;
3299
3300 /* <rdar://problem/15102234> */
3301 assert(thread->bound_processor == PROCESSOR_NULL);
3302
3303 /*
3304 * Dispatch directly onto idle processor.
3305 */
3306 if ( (thread->bound_processor == processor)
3307 && processor->state == PROCESSOR_IDLE) {
3308 re_queue_tail(&pset->active_queue, &processor->processor_queue);
3309
3310 pset->active_processor_count++;
3311 sched_update_pset_load_average(pset);
3312
3313 processor->next_thread = thread;
3314 processor_state_update_from_thread(processor, thread);
3315 processor->deadline = thread->realtime.deadline;
3316 processor->state = PROCESSOR_DISPATCHING;
3317
3318 ipi_type = sched_ipi_action(processor, thread, true, SCHED_IPI_EVENT_BOUND_THR);
3319 pset_unlock(pset);
3320 sched_ipi_perform(processor, ipi_type);
3321 return;
3322 }
3323
3324 if (processor->current_pri < BASEPRI_RTQUEUES)
3325 preempt = (AST_PREEMPT | AST_URGENT);
3326 else if (thread->realtime.deadline < processor->deadline)
3327 preempt = (AST_PREEMPT | AST_URGENT);
3328 else
3329 preempt = AST_NONE;
3330
3331 realtime_queue_insert(processor, pset, thread);
3332
3333 ipi_type = SCHED_IPI_NONE;
3334 if (preempt != AST_NONE) {
3335 if (processor->state == PROCESSOR_IDLE) {
3336 re_queue_tail(&pset->active_queue, &processor->processor_queue);
3337
3338 pset->active_processor_count++;
3339 sched_update_pset_load_average(pset);
3340
3341 processor->next_thread = THREAD_NULL;
3342 processor_state_update_from_thread(processor, thread);
3343 processor->deadline = thread->realtime.deadline;
3344 processor->state = PROCESSOR_DISPATCHING;
3345 if (processor == current_processor()) {
3346 ast_on(preempt);
3347 } else {
3348 ipi_type = sched_ipi_action(processor, thread, true, SCHED_IPI_EVENT_PREEMPT);
3349 }
3350 } else if (processor->state == PROCESSOR_DISPATCHING) {
3351 if ((processor->next_thread == THREAD_NULL) && ((processor->current_pri < thread->sched_pri) || (processor->deadline > thread->realtime.deadline))) {
3352 processor_state_update_from_thread(processor, thread);
3353 processor->deadline = thread->realtime.deadline;
3354 }
3355 } else {
3356 if (processor == current_processor()) {
3357 ast_on(preempt);
3358 } else {
3359 ipi_type = sched_ipi_action(processor, thread, false, SCHED_IPI_EVENT_PREEMPT);
3360 }
3361 }
3362 } else {
3363 /* Selected processor was too busy, just keep thread enqueued and let other processors drain it naturally. */
3364 }
3365
3366 pset_unlock(pset);
3367 sched_ipi_perform(processor, ipi_type);
3368 }
3369
3370
3371 sched_ipi_type_t sched_ipi_deferred_policy(processor_set_t pset, processor_t dst,
3372 __unused sched_ipi_event_t event)
3373 {
3374 #if defined(CONFIG_SCHED_DEFERRED_AST)
3375 if (!bit_test(pset->pending_deferred_AST_cpu_mask, dst->cpu_id)) {
3376 return SCHED_IPI_DEFERRED;
3377 }
3378 #else /* CONFIG_SCHED_DEFERRED_AST */
3379 panic("Request for deferred IPI on an unsupported platform; pset: %p CPU: %d", pset, dst->cpu_id);
3380 #endif /* CONFIG_SCHED_DEFERRED_AST */
3381 return SCHED_IPI_NONE;
3382 }
3383
3384 sched_ipi_type_t sched_ipi_action(processor_t dst, thread_t thread, boolean_t dst_idle, sched_ipi_event_t event)
3385 {
3386 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
3387 assert(dst != NULL);
3388
3389 processor_set_t pset = dst->processor_set;
3390 if (current_processor() == dst) {
3391 return SCHED_IPI_NONE;
3392 }
3393
3394 if (bit_test(pset->pending_AST_cpu_mask, dst->cpu_id)) {
3395 return SCHED_IPI_NONE;
3396 }
3397
3398 ipi_type = SCHED(ipi_policy)(dst, thread, dst_idle, event);
3399 switch(ipi_type) {
3400 case SCHED_IPI_NONE:
3401 return SCHED_IPI_NONE;
3402 #if defined(CONFIG_SCHED_DEFERRED_AST)
3403 case SCHED_IPI_DEFERRED:
3404 bit_set(pset->pending_deferred_AST_cpu_mask, dst->cpu_id);
3405 break;
3406 #endif /* CONFIG_SCHED_DEFERRED_AST */
3407 default:
3408 bit_set(pset->pending_AST_cpu_mask, dst->cpu_id);
3409 break;
3410 }
3411 return ipi_type;
3412 }
3413
3414 sched_ipi_type_t sched_ipi_policy(processor_t dst, thread_t thread, boolean_t dst_idle, sched_ipi_event_t event)
3415 {
3416 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
3417 boolean_t deferred_ipi_supported = false;
3418 processor_set_t pset = dst->processor_set;
3419
3420 #if defined(CONFIG_SCHED_DEFERRED_AST)
3421 deferred_ipi_supported = true;
3422 #endif /* CONFIG_SCHED_DEFERRED_AST */
3423
3424 switch(event) {
3425 case SCHED_IPI_EVENT_SPILL:
3426 case SCHED_IPI_EVENT_SMT_REBAL:
3427 case SCHED_IPI_EVENT_REBALANCE:
3428 case SCHED_IPI_EVENT_BOUND_THR:
3429 /*
3430 * The spill, SMT rebalance, rebalance and the bound thread
3431 * scenarios use immediate IPIs always.
3432 */
3433 ipi_type = dst_idle ? SCHED_IPI_IDLE : SCHED_IPI_IMMEDIATE;
3434 break;
3435 case SCHED_IPI_EVENT_PREEMPT:
3436 /* In the preemption case, use immediate IPIs for RT threads */
3437 if (thread && (thread->sched_pri >= BASEPRI_RTQUEUES)) {
3438 ipi_type = dst_idle ? SCHED_IPI_IDLE : SCHED_IPI_IMMEDIATE;
3439 break;
3440 }
3441
3442 /*
3443 * For Non-RT threads preemption,
3444 * If the core is active, use immediate IPIs.
3445 * If the core is idle, use deferred IPIs if supported; otherwise immediate IPI.
3446 */
3447 if (deferred_ipi_supported && dst_idle) {
3448 return sched_ipi_deferred_policy(pset, dst, event);
3449 }
3450 ipi_type = dst_idle ? SCHED_IPI_IDLE : SCHED_IPI_IMMEDIATE;
3451 break;
3452 default:
3453 panic("Unrecognized scheduler IPI event type %d", event);
3454 }
3455 assert(ipi_type != SCHED_IPI_NONE);
3456 return ipi_type;
3457 }
3458
3459 void sched_ipi_perform(processor_t dst, sched_ipi_type_t ipi)
3460 {
3461 switch (ipi) {
3462 case SCHED_IPI_NONE:
3463 break;
3464 case SCHED_IPI_IDLE:
3465 machine_signal_idle(dst);
3466 break;
3467 case SCHED_IPI_IMMEDIATE:
3468 cause_ast_check(dst);
3469 break;
3470 case SCHED_IPI_DEFERRED:
3471 machine_signal_idle_deferred(dst);
3472 break;
3473 default:
3474 panic("Unrecognized scheduler IPI type: %d", ipi);
3475 }
3476 }
3477
3478 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
3479
3480 boolean_t
3481 priority_is_urgent(int priority)
3482 {
3483 return bitmap_test(sched_preempt_pri, priority) ? TRUE : FALSE;
3484 }
3485
3486 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
3487
3488 /*
3489 * processor_setrun:
3490 *
3491 * Dispatch a thread for execution on a
3492 * processor.
3493 *
3494 * Thread must be locked. Associated pset must
3495 * be locked, and is returned unlocked.
3496 */
3497 static void
3498 processor_setrun(
3499 processor_t processor,
3500 thread_t thread,
3501 integer_t options)
3502 {
3503 processor_set_t pset = processor->processor_set;
3504 ast_t preempt;
3505 enum { eExitIdle, eInterruptRunning, eDoNothing } ipi_action = eDoNothing;
3506
3507 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
3508
3509 thread->chosen_processor = processor;
3510
3511 /*
3512 * Dispatch directly onto idle processor.
3513 */
3514 if ( (SCHED(direct_dispatch_to_idle_processors) ||
3515 thread->bound_processor == processor)
3516 && processor->state == PROCESSOR_IDLE) {
3517
3518 re_queue_tail(&pset->active_queue, &processor->processor_queue);
3519
3520 pset->active_processor_count++;
3521 sched_update_pset_load_average(pset);
3522
3523 processor->next_thread = thread;
3524 processor_state_update_from_thread(processor, thread);
3525 processor->deadline = UINT64_MAX;
3526 processor->state = PROCESSOR_DISPATCHING;
3527
3528 ipi_type = sched_ipi_action(processor, thread, true, SCHED_IPI_EVENT_BOUND_THR);
3529 pset_unlock(pset);
3530 sched_ipi_perform(processor, ipi_type);
3531 return;
3532 }
3533
3534 /*
3535 * Set preemption mode.
3536 */
3537 #if defined(CONFIG_SCHED_DEFERRED_AST)
3538 /* TODO: Do we need to care about urgency (see rdar://problem/20136239)? */
3539 #endif
3540 if (SCHED(priority_is_urgent)(thread->sched_pri) && thread->sched_pri > processor->current_pri)
3541 preempt = (AST_PREEMPT | AST_URGENT);
3542 else if(processor->active_thread && thread_eager_preemption(processor->active_thread))
3543 preempt = (AST_PREEMPT | AST_URGENT);
3544 else if ((thread->sched_mode == TH_MODE_TIMESHARE) && (thread->sched_pri < thread->base_pri)) {
3545 if(SCHED(priority_is_urgent)(thread->base_pri) && thread->sched_pri > processor->current_pri) {
3546 preempt = (options & SCHED_PREEMPT)? AST_PREEMPT: AST_NONE;
3547 } else {
3548 preempt = AST_NONE;
3549 }
3550 } else
3551 preempt = (options & SCHED_PREEMPT)? AST_PREEMPT: AST_NONE;
3552
3553 SCHED(processor_enqueue)(processor, thread, options);
3554 sched_update_pset_load_average(pset);
3555
3556 if (preempt != AST_NONE) {
3557 if (processor->state == PROCESSOR_IDLE) {
3558 re_queue_tail(&pset->active_queue, &processor->processor_queue);
3559 pset->active_processor_count++;
3560 processor->next_thread = THREAD_NULL;
3561 processor_state_update_from_thread(processor, thread);
3562 processor->deadline = UINT64_MAX;
3563 processor->state = PROCESSOR_DISPATCHING;
3564 ipi_action = eExitIdle;
3565 } else if ( processor->state == PROCESSOR_DISPATCHING) {
3566 if ((processor->next_thread == THREAD_NULL) && (processor->current_pri < thread->sched_pri)) {
3567 processor_state_update_from_thread(processor, thread);
3568 processor->deadline = UINT64_MAX;
3569 }
3570 } else if ( (processor->state == PROCESSOR_RUNNING ||
3571 processor->state == PROCESSOR_SHUTDOWN) &&
3572 (thread->sched_pri >= processor->current_pri)) {
3573 ipi_action = eInterruptRunning;
3574 }
3575 } else {
3576 /*
3577 * New thread is not important enough to preempt what is running, but
3578 * special processor states may need special handling
3579 */
3580 if (processor->state == PROCESSOR_SHUTDOWN &&
3581 thread->sched_pri >= processor->current_pri ) {
3582 ipi_action = eInterruptRunning;
3583 } else if (processor->state == PROCESSOR_IDLE) {
3584 re_queue_tail(&pset->active_queue, &processor->processor_queue);
3585
3586 pset->active_processor_count++;
3587 // sched_update_pset_load_average(pset);
3588
3589 processor->next_thread = THREAD_NULL;
3590 processor_state_update_from_thread(processor, thread);
3591 processor->deadline = UINT64_MAX;
3592 processor->state = PROCESSOR_DISPATCHING;
3593
3594 ipi_action = eExitIdle;
3595 }
3596 }
3597
3598 if (ipi_action != eDoNothing) {
3599 if (processor == current_processor()) {
3600 if (csw_check_locked(processor, pset, AST_NONE) != AST_NONE)
3601 ast_on(preempt);
3602 } else {
3603 sched_ipi_event_t event = (options & SCHED_REBALANCE) ? SCHED_IPI_EVENT_REBALANCE : SCHED_IPI_EVENT_PREEMPT;
3604 ipi_type = sched_ipi_action(processor, thread, (ipi_action == eExitIdle), event);
3605 }
3606 }
3607 pset_unlock(pset);
3608 sched_ipi_perform(processor, ipi_type);
3609 }
3610
3611 /*
3612 * choose_next_pset:
3613 *
3614 * Return the next sibling pset containing
3615 * available processors.
3616 *
3617 * Returns the original pset if none other is
3618 * suitable.
3619 */
3620 static processor_set_t
3621 choose_next_pset(
3622 processor_set_t pset)
3623 {
3624 processor_set_t nset = pset;
3625
3626 do {
3627 nset = next_pset(nset);
3628 } while (nset->online_processor_count < 1 && nset != pset);
3629
3630 return (nset);
3631 }
3632
3633 /*
3634 * choose_processor:
3635 *
3636 * Choose a processor for the thread, beginning at
3637 * the pset. Accepts an optional processor hint in
3638 * the pset.
3639 *
3640 * Returns a processor, possibly from a different pset.
3641 *
3642 * The thread must be locked. The pset must be locked,
3643 * and the resulting pset is locked on return.
3644 */
3645 processor_t
3646 choose_processor(
3647 processor_set_t pset,
3648 processor_t processor,
3649 thread_t thread)
3650 {
3651 processor_set_t nset, cset = pset;
3652
3653 assert(thread->sched_pri <= BASEPRI_RTQUEUES);
3654
3655 /*
3656 * Prefer the hinted processor, when appropriate.
3657 */
3658
3659 /* Fold last processor hint from secondary processor to its primary */
3660 if (processor != PROCESSOR_NULL) {
3661 processor = processor->processor_primary;
3662 }
3663
3664 /*
3665 * Only consult platform layer if pset is active, which
3666 * it may not be in some cases when a multi-set system
3667 * is going to sleep.
3668 */
3669 if (pset->online_processor_count) {
3670 if ((processor == PROCESSOR_NULL) || (processor->processor_set == pset && processor->state == PROCESSOR_IDLE)) {
3671 processor_t mc_processor = machine_choose_processor(pset, processor);
3672 if (mc_processor != PROCESSOR_NULL)
3673 processor = mc_processor->processor_primary;
3674 }
3675 }
3676
3677 /*
3678 * At this point, we may have a processor hint, and we may have
3679 * an initial starting pset. If the hint is not in the pset, or
3680 * if the hint is for a processor in an invalid state, discard
3681 * the hint.
3682 */
3683 if (processor != PROCESSOR_NULL) {
3684 if (processor->processor_set != pset) {
3685 processor = PROCESSOR_NULL;
3686 } else if (!processor->is_recommended) {
3687 processor = PROCESSOR_NULL;
3688 } else {
3689 switch (processor->state) {
3690 case PROCESSOR_START:
3691 case PROCESSOR_SHUTDOWN:
3692 case PROCESSOR_OFF_LINE:
3693 /*
3694 * Hint is for a processor that cannot support running new threads.
3695 */
3696 processor = PROCESSOR_NULL;
3697 break;
3698 case PROCESSOR_IDLE:
3699 /*
3700 * Hint is for an idle processor. Assume it is no worse than any other
3701 * idle processor. The platform layer had an opportunity to provide
3702 * the "least cost idle" processor above.
3703 */
3704 return (processor);
3705 case PROCESSOR_RUNNING:
3706 case PROCESSOR_DISPATCHING:
3707 /*
3708 * Hint is for an active CPU. This fast-path allows
3709 * realtime threads to preempt non-realtime threads
3710 * to regain their previous executing processor.
3711 */
3712 if ((thread->sched_pri >= BASEPRI_RTQUEUES) &&
3713 (processor->current_pri < BASEPRI_RTQUEUES))
3714 return (processor);
3715
3716 /* Otherwise, use hint as part of search below */
3717 break;
3718 default:
3719 processor = PROCESSOR_NULL;
3720 break;
3721 }
3722 }
3723 }
3724
3725 /*
3726 * Iterate through the processor sets to locate
3727 * an appropriate processor. Seed results with
3728 * a last-processor hint, if available, so that
3729 * a search must find something strictly better
3730 * to replace it.
3731 *
3732 * A primary/secondary pair of SMT processors are
3733 * "unpaired" if the primary is busy but its
3734 * corresponding secondary is idle (so the physical
3735 * core has full use of its resources).
3736 */
3737
3738 integer_t lowest_priority = MAXPRI + 1;
3739 integer_t lowest_unpaired_primary_priority = MAXPRI + 1;
3740 integer_t lowest_count = INT_MAX;
3741 uint64_t furthest_deadline = 1;
3742 processor_t lp_processor = PROCESSOR_NULL;
3743 processor_t lp_unpaired_primary_processor = PROCESSOR_NULL;
3744 processor_t lp_unpaired_secondary_processor = PROCESSOR_NULL;
3745 processor_t lc_processor = PROCESSOR_NULL;
3746 processor_t fd_processor = PROCESSOR_NULL;
3747
3748 if (processor != PROCESSOR_NULL) {
3749 /* All other states should be enumerated above. */
3750 assert(processor->state == PROCESSOR_RUNNING || processor->state == PROCESSOR_DISPATCHING);
3751
3752 lowest_priority = processor->current_pri;
3753 lp_processor = processor;
3754
3755 if (processor->current_pri >= BASEPRI_RTQUEUES) {
3756 furthest_deadline = processor->deadline;
3757 fd_processor = processor;
3758 }
3759
3760 lowest_count = SCHED(processor_runq_count)(processor);
3761 lc_processor = processor;
3762 }
3763
3764 do {
3765
3766 /*
3767 * Choose an idle processor, in pset traversal order
3768 */
3769 qe_foreach_element(processor, &cset->idle_queue, processor_queue) {
3770 if (processor->is_recommended)
3771 return processor;
3772 }
3773
3774 /*
3775 * Otherwise, enumerate active and idle processors to find candidates
3776 * with lower priority/etc.
3777 */
3778
3779 qe_foreach_element(processor, &cset->active_queue, processor_queue) {
3780
3781 if (!processor->is_recommended) {
3782 continue;
3783 }
3784
3785 integer_t cpri = processor->current_pri;
3786 if (cpri < lowest_priority) {
3787 lowest_priority = cpri;
3788 lp_processor = processor;
3789 }
3790
3791 if ((cpri >= BASEPRI_RTQUEUES) && (processor->deadline > furthest_deadline)) {
3792 furthest_deadline = processor->deadline;
3793 fd_processor = processor;
3794 }
3795
3796 integer_t ccount = SCHED(processor_runq_count)(processor);
3797 if (ccount < lowest_count) {
3798 lowest_count = ccount;
3799 lc_processor = processor;
3800 }
3801 }
3802
3803 /*
3804 * For SMT configs, these idle secondary processors must have active primary. Otherwise
3805 * the idle primary would have short-circuited the loop above
3806 */
3807 qe_foreach_element(processor, &cset->idle_secondary_queue, processor_queue) {
3808
3809 if (!processor->is_recommended) {
3810 continue;
3811 }
3812
3813 processor_t cprimary = processor->processor_primary;
3814
3815 /* If the primary processor is offline or starting up, it's not a candidate for this path */
3816 if (cprimary->state == PROCESSOR_RUNNING || cprimary->state == PROCESSOR_DISPATCHING) {
3817 integer_t primary_pri = cprimary->current_pri;
3818
3819 if (primary_pri < lowest_unpaired_primary_priority) {
3820 lowest_unpaired_primary_priority = primary_pri;
3821 lp_unpaired_primary_processor = cprimary;
3822 lp_unpaired_secondary_processor = processor;
3823 }
3824 }
3825 }
3826
3827
3828 if (thread->sched_pri >= BASEPRI_RTQUEUES) {
3829
3830 /*
3831 * For realtime threads, the most important aspect is
3832 * scheduling latency, so we attempt to assign threads
3833 * to good preemption candidates (assuming an idle primary
3834 * processor was not available above).
3835 */
3836
3837 if (thread->sched_pri > lowest_unpaired_primary_priority) {
3838 /* Move to end of active queue so that the next thread doesn't also pick it */
3839 re_queue_tail(&cset->active_queue, &lp_unpaired_primary_processor->processor_queue);
3840 return lp_unpaired_primary_processor;
3841 }
3842 if (thread->sched_pri > lowest_priority) {
3843 /* Move to end of active queue so that the next thread doesn't also pick it */
3844 re_queue_tail(&cset->active_queue, &lp_processor->processor_queue);
3845 return lp_processor;
3846 }
3847 if (thread->realtime.deadline < furthest_deadline)
3848 return fd_processor;
3849
3850 /*
3851 * If all primary and secondary CPUs are busy with realtime
3852 * threads with deadlines earlier than us, move on to next
3853 * pset.
3854 */
3855 }
3856 else {
3857
3858 if (thread->sched_pri > lowest_unpaired_primary_priority) {
3859 /* Move to end of active queue so that the next thread doesn't also pick it */
3860 re_queue_tail(&cset->active_queue, &lp_unpaired_primary_processor->processor_queue);
3861 return lp_unpaired_primary_processor;
3862 }
3863 if (thread->sched_pri > lowest_priority) {
3864 /* Move to end of active queue so that the next thread doesn't also pick it */
3865 re_queue_tail(&cset->active_queue, &lp_processor->processor_queue);
3866 return lp_processor;
3867 }
3868
3869 /*
3870 * If all primary processor in this pset are running a higher
3871 * priority thread, move on to next pset. Only when we have
3872 * exhausted this search do we fall back to other heuristics.
3873 */
3874 }
3875
3876 /*
3877 * Move onto the next processor set.
3878 */
3879 nset = next_pset(cset);
3880
3881 if (nset != pset) {
3882 pset_unlock(cset);
3883
3884 cset = nset;
3885 pset_lock(cset);
3886 }
3887 } while (nset != pset);
3888
3889 /*
3890 * Make sure that we pick a running processor,
3891 * and that the correct processor set is locked.
3892 * Since we may have unlock the candidate processor's
3893 * pset, it may have changed state.
3894 *
3895 * All primary processors are running a higher priority
3896 * thread, so the only options left are enqueuing on
3897 * the secondary processor that would perturb the least priority
3898 * primary, or the least busy primary.
3899 */
3900 do {
3901
3902 /* lowest_priority is evaluated in the main loops above */
3903 if (lp_unpaired_secondary_processor != PROCESSOR_NULL) {
3904 processor = lp_unpaired_secondary_processor;
3905 lp_unpaired_secondary_processor = PROCESSOR_NULL;
3906 } else if (lc_processor != PROCESSOR_NULL) {
3907 processor = lc_processor;
3908 lc_processor = PROCESSOR_NULL;
3909 } else {
3910 /*
3911 * All processors are executing higher
3912 * priority threads, and the lowest_count
3913 * candidate was not usable
3914 */
3915 processor = master_processor;
3916 }
3917
3918 /*
3919 * Check that the correct processor set is
3920 * returned locked.
3921 */
3922 if (cset != processor->processor_set) {
3923 pset_unlock(cset);
3924 cset = processor->processor_set;
3925 pset_lock(cset);
3926 }
3927
3928 /*
3929 * We must verify that the chosen processor is still available.
3930 * master_processor is an exception, since we may need to preempt
3931 * a running thread on it during processor shutdown (for sleep),
3932 * and that thread needs to be enqueued on its runqueue to run
3933 * when the processor is restarted.
3934 */
3935 if (processor != master_processor && (processor->state == PROCESSOR_SHUTDOWN || processor->state == PROCESSOR_OFF_LINE))
3936 processor = PROCESSOR_NULL;
3937
3938 } while (processor == PROCESSOR_NULL);
3939
3940 if (processor->state == PROCESSOR_RUNNING) {
3941 re_queue_tail(&cset->active_queue, &processor->processor_queue);
3942 }
3943
3944 return (processor);
3945 }
3946
3947 /*
3948 * thread_setrun:
3949 *
3950 * Dispatch thread for execution, onto an idle
3951 * processor or run queue, and signal a preemption
3952 * as appropriate.
3953 *
3954 * Thread must be locked.
3955 */
3956 void
3957 thread_setrun(
3958 thread_t thread,
3959 integer_t options)
3960 {
3961 processor_t processor;
3962 processor_set_t pset;
3963
3964 assert((thread->state & (TH_RUN|TH_WAIT|TH_UNINT|TH_TERMINATE|TH_TERMINATE2)) == TH_RUN);
3965 assert(thread->runq == PROCESSOR_NULL);
3966
3967 /*
3968 * Update priority if needed.
3969 */
3970 if (SCHED(can_update_priority)(thread))
3971 SCHED(update_priority)(thread);
3972
3973 thread->sfi_class = sfi_thread_classify(thread);
3974
3975 assert(thread->runq == PROCESSOR_NULL);
3976
3977 #if __SMP__
3978 if (thread->bound_processor == PROCESSOR_NULL) {
3979 /*
3980 * Unbound case.
3981 */
3982 if (thread->affinity_set != AFFINITY_SET_NULL) {
3983 /*
3984 * Use affinity set policy hint.
3985 */
3986 pset = thread->affinity_set->aset_pset;
3987 pset_lock(pset);
3988
3989 processor = SCHED(choose_processor)(pset, PROCESSOR_NULL, thread);
3990 pset = processor->processor_set;
3991
3992 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR)|DBG_FUNC_NONE,
3993 (uintptr_t)thread_tid(thread), (uintptr_t)-1, processor->cpu_id, processor->state, 0);
3994 } else if (thread->last_processor != PROCESSOR_NULL) {
3995 /*
3996 * Simple (last processor) affinity case.
3997 */
3998 processor = thread->last_processor;
3999 pset = processor->processor_set;
4000 pset_lock(pset);
4001 processor = SCHED(choose_processor)(pset, processor, thread);
4002 pset = processor->processor_set;
4003
4004 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR)|DBG_FUNC_NONE,
4005 (uintptr_t)thread_tid(thread), thread->last_processor->cpu_id, processor->cpu_id, processor->state, 0);
4006 } else {
4007 /*
4008 * No Affinity case:
4009 *
4010 * Utilitize a per task hint to spread threads
4011 * among the available processor sets.
4012 */
4013 task_t task = thread->task;
4014
4015 pset = task->pset_hint;
4016 if (pset == PROCESSOR_SET_NULL)
4017 pset = current_processor()->processor_set;
4018
4019 pset = choose_next_pset(pset);
4020 pset_lock(pset);
4021
4022 processor = SCHED(choose_processor)(pset, PROCESSOR_NULL, thread);
4023 pset = processor->processor_set;
4024 task->pset_hint = pset;
4025
4026 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR)|DBG_FUNC_NONE,
4027 (uintptr_t)thread_tid(thread), (uintptr_t)-1, processor->cpu_id, processor->state, 0);
4028 }
4029 } else {
4030 /*
4031 * Bound case:
4032 *
4033 * Unconditionally dispatch on the processor.
4034 */
4035 processor = thread->bound_processor;
4036 pset = processor->processor_set;
4037 pset_lock(pset);
4038
4039 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR)|DBG_FUNC_NONE,
4040 (uintptr_t)thread_tid(thread), (uintptr_t)-2, processor->cpu_id, processor->state, 0);
4041 }
4042 #else /* !__SMP__ */
4043 /* Only one processor to choose */
4044 assert(thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == master_processor);
4045 processor = master_processor;
4046 pset = processor->processor_set;
4047 pset_lock(pset);
4048 #endif /* !__SMP__ */
4049
4050 /*
4051 * Dispatch the thread on the chosen processor.
4052 * TODO: This should be based on sched_mode, not sched_pri
4053 */
4054 if (thread->sched_pri >= BASEPRI_RTQUEUES) {
4055 realtime_setrun(processor, thread);
4056 } else {
4057 processor_setrun(processor, thread, options);
4058 /* pset is now unlocked */
4059 if (thread->bound_processor == PROCESSOR_NULL) {
4060 SCHED(check_spill)(pset, thread);
4061 }
4062 }
4063 }
4064
4065 processor_set_t
4066 task_choose_pset(
4067 task_t task)
4068 {
4069 processor_set_t pset = task->pset_hint;
4070
4071 if (pset != PROCESSOR_SET_NULL)
4072 pset = choose_next_pset(pset);
4073
4074 return (pset);
4075 }
4076
4077 /*
4078 * Check for a preemption point in
4079 * the current context.
4080 *
4081 * Called at splsched with thread locked.
4082 */
4083 ast_t
4084 csw_check(
4085 processor_t processor,
4086 ast_t check_reason)
4087 {
4088 processor_set_t pset = processor->processor_set;
4089 ast_t result;
4090
4091 pset_lock(pset);
4092
4093 /* If we were sent a remote AST and interrupted a running processor, acknowledge it here with pset lock held */
4094 bit_clear(pset->pending_AST_cpu_mask, processor->cpu_id);
4095
4096 result = csw_check_locked(processor, pset, check_reason);
4097
4098 pset_unlock(pset);
4099
4100 return result;
4101 }
4102
4103 /*
4104 * Check for preemption at splsched with
4105 * pset and thread locked
4106 */
4107 ast_t
4108 csw_check_locked(
4109 processor_t processor,
4110 processor_set_t pset,
4111 ast_t check_reason)
4112 {
4113 ast_t result;
4114 thread_t thread = processor->active_thread;
4115
4116 if (processor->first_timeslice) {
4117 if (rt_runq_count(pset) > 0)
4118 return (check_reason | AST_PREEMPT | AST_URGENT);
4119 }
4120 else {
4121 if (rt_runq_count(pset) > 0) {
4122 if (BASEPRI_RTQUEUES > processor->current_pri)
4123 return (check_reason | AST_PREEMPT | AST_URGENT);
4124 else
4125 return (check_reason | AST_PREEMPT);
4126 }
4127 }
4128
4129 result = SCHED(processor_csw_check)(processor);
4130 if (result != AST_NONE)
4131 return (check_reason | result | (thread_eager_preemption(thread) ? AST_URGENT : AST_NONE));
4132
4133 #if __SMP__
4134
4135 /*
4136 * If the current thread is running on a processor that is no longer recommended, gently
4137 * (non-urgently) get to a point and then block, and which point thread_select() should
4138 * try to idle the processor and re-dispatch the thread to a recommended processor.
4139 */
4140 if (!processor->is_recommended) {
4141 return (check_reason | AST_PREEMPT);
4142 }
4143
4144 /*
4145 * Same for avoid-processor
4146 *
4147 * TODO: Should these set AST_REBALANCE?
4148 */
4149 if (SCHED(avoid_processor_enabled) && SCHED(thread_avoid_processor)(processor, thread)) {
4150 return (check_reason | AST_PREEMPT);
4151 }
4152
4153 /*
4154 * Even though we could continue executing on this processor, a
4155 * secondary SMT core should try to shed load to another primary core.
4156 *
4157 * TODO: Should this do the same check that thread_select does? i.e.
4158 * if no bound threads target this processor, and idle primaries exist, preempt
4159 * The case of RT threads existing is already taken care of above
4160 * Consider Capri in this scenario.
4161 *
4162 * if (!SCHED(processor_bound_count)(processor) && !queue_empty(&pset->idle_queue))
4163 *
4164 * TODO: Alternatively - check if only primary is idle, or check if primary's pri is lower than mine.
4165 */
4166
4167 if (processor->current_pri < BASEPRI_RTQUEUES &&
4168 processor->processor_primary != processor)
4169 return (check_reason | AST_PREEMPT);
4170 #endif
4171
4172 if (thread->state & TH_SUSP)
4173 return (check_reason | AST_PREEMPT);
4174
4175 #if CONFIG_SCHED_SFI
4176 /*
4177 * Current thread may not need to be preempted, but maybe needs
4178 * an SFI wait?
4179 */
4180 result = sfi_thread_needs_ast(thread, NULL);
4181 if (result != AST_NONE)
4182 return (check_reason | result);
4183 #endif
4184
4185 return (AST_NONE);
4186 }
4187
4188 /*
4189 * set_sched_pri:
4190 *
4191 * Set the scheduled priority of the specified thread.
4192 *
4193 * This may cause the thread to change queues.
4194 *
4195 * Thread must be locked.
4196 */
4197 void
4198 set_sched_pri(
4199 thread_t thread,
4200 int new_priority)
4201 {
4202 thread_t cthread = current_thread();
4203 boolean_t is_current_thread = (thread == cthread) ? TRUE : FALSE;
4204 int curgency, nurgency;
4205 uint64_t urgency_param1, urgency_param2;
4206 boolean_t removed_from_runq = FALSE;
4207
4208 int old_priority = thread->sched_pri;
4209
4210 /* If we're already at this priority, no need to mess with the runqueue */
4211 if (new_priority == old_priority)
4212 return;
4213
4214 if (is_current_thread) {
4215 assert(thread->runq == PROCESSOR_NULL);
4216 curgency = thread_get_urgency(thread, &urgency_param1, &urgency_param2);
4217 } else {
4218 removed_from_runq = thread_run_queue_remove(thread);
4219 }
4220
4221 thread->sched_pri = new_priority;
4222
4223 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHANGE_PRIORITY),
4224 (uintptr_t)thread_tid(thread),
4225 thread->base_pri,
4226 thread->sched_pri,
4227 0, /* eventually, 'reason' */
4228 0);
4229
4230 if (is_current_thread) {
4231 nurgency = thread_get_urgency(thread, &urgency_param1, &urgency_param2);
4232 /*
4233 * set_sched_pri doesn't alter RT params. We expect direct base priority/QoS
4234 * class alterations from user space to occur relatively infrequently, hence
4235 * those are lazily handled. QoS classes have distinct priority bands, and QoS
4236 * inheritance is expected to involve priority changes.
4237 */
4238 uint64_t ctime = mach_approximate_time();
4239 if (nurgency != curgency) {
4240 thread_tell_urgency(nurgency, urgency_param1, urgency_param2, 0, thread);
4241 }
4242 machine_thread_going_on_core(thread, nurgency, 0, 0, ctime);
4243 }
4244
4245 if (removed_from_runq)
4246 thread_run_queue_reinsert(thread, SCHED_PREEMPT | SCHED_TAILQ);
4247 else if (thread->state & TH_RUN) {
4248 processor_t processor = thread->last_processor;
4249
4250 if (is_current_thread) {
4251 processor_state_update_from_thread(processor, thread);
4252
4253 /*
4254 * When dropping in priority, check if the thread no longer belongs on core.
4255 * If a thread raises its own priority, don't aggressively rebalance it.
4256 * <rdar://problem/31699165>
4257 */
4258 if (new_priority < old_priority) {
4259 ast_t preempt;
4260
4261 if ((preempt = csw_check(processor, AST_NONE)) != AST_NONE)
4262 ast_on(preempt);
4263 }
4264 } else if (processor != PROCESSOR_NULL && processor->active_thread == thread) {
4265 cause_ast_check(processor);
4266 }
4267 }
4268 }
4269
4270 /*
4271 * thread_run_queue_remove_for_handoff
4272 *
4273 * Pull a thread or its (recursive) push target out of the runqueue
4274 * so that it is ready for thread_run()
4275 *
4276 * Called at splsched
4277 *
4278 * Returns the thread that was pulled or THREAD_NULL if no thread could be pulled.
4279 * This may be different than the thread that was passed in.
4280 */
4281 thread_t
4282 thread_run_queue_remove_for_handoff(thread_t thread) {
4283
4284 thread_t pulled_thread = THREAD_NULL;
4285
4286 thread_lock(thread);
4287
4288 /*
4289 * Check that the thread is not bound
4290 * to a different processor, and that realtime
4291 * is not involved.
4292 *
4293 * Next, pull it off its run queue. If it
4294 * doesn't come, it's not eligible.
4295 */
4296
4297 processor_t processor = current_processor();
4298 if (processor->current_pri < BASEPRI_RTQUEUES && thread->sched_pri < BASEPRI_RTQUEUES &&
4299 (thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == processor)) {
4300
4301 if (thread_run_queue_remove(thread))
4302 pulled_thread = thread;
4303 }
4304
4305 thread_unlock(thread);
4306
4307 return pulled_thread;
4308 }
4309
4310 /*
4311 * thread_run_queue_remove:
4312 *
4313 * Remove a thread from its current run queue and
4314 * return TRUE if successful.
4315 *
4316 * Thread must be locked.
4317 *
4318 * If thread->runq is PROCESSOR_NULL, the thread will not re-enter the
4319 * run queues because the caller locked the thread. Otherwise
4320 * the thread is on a run queue, but could be chosen for dispatch
4321 * and removed by another processor under a different lock, which
4322 * will set thread->runq to PROCESSOR_NULL.
4323 *
4324 * Hence the thread select path must not rely on anything that could
4325 * be changed under the thread lock after calling this function,
4326 * most importantly thread->sched_pri.
4327 */
4328 boolean_t
4329 thread_run_queue_remove(
4330 thread_t thread)
4331 {
4332 boolean_t removed = FALSE;
4333 processor_t processor = thread->runq;
4334
4335 if ((thread->state & (TH_RUN|TH_WAIT)) == TH_WAIT) {
4336 /* Thread isn't runnable */
4337 assert(thread->runq == PROCESSOR_NULL);
4338 return FALSE;
4339 }
4340
4341 if (processor == PROCESSOR_NULL) {
4342 /*
4343 * The thread is either not on the runq,
4344 * or is in the midst of being removed from the runq.
4345 *
4346 * runq is set to NULL under the pset lock, not the thread
4347 * lock, so the thread may still be in the process of being dequeued
4348 * from the runq. It will wait in invoke for the thread lock to be
4349 * dropped.
4350 */
4351
4352 return FALSE;
4353 }
4354
4355 if (thread->sched_pri < BASEPRI_RTQUEUES) {
4356 return SCHED(processor_queue_remove)(processor, thread);
4357 }
4358
4359 processor_set_t pset = processor->processor_set;
4360
4361 rt_lock_lock(pset);
4362
4363 if (thread->runq != PROCESSOR_NULL) {
4364 /*
4365 * Thread is on the RT run queue and we have a lock on
4366 * that run queue.
4367 */
4368
4369 remqueue(&thread->runq_links);
4370 SCHED_STATS_RUNQ_CHANGE(&SCHED(rt_runq)(pset)->runq_stats, rt_runq_count(pset));
4371 rt_runq_count_decr(pset);
4372
4373 thread->runq = PROCESSOR_NULL;
4374
4375 removed = TRUE;
4376 }
4377
4378 rt_lock_unlock(pset);
4379
4380 return (removed);
4381 }
4382
4383 /*
4384 * Put the thread back where it goes after a thread_run_queue_remove
4385 *
4386 * Thread must have been removed under the same thread lock hold
4387 *
4388 * thread locked, at splsched
4389 */
4390 void
4391 thread_run_queue_reinsert(thread_t thread, integer_t options)
4392 {
4393 assert(thread->runq == PROCESSOR_NULL);
4394 assert(thread->state & (TH_RUN));
4395
4396 thread_setrun(thread, options);
4397 }
4398
4399 void
4400 sys_override_cpu_throttle(int flag)
4401 {
4402 if (flag == CPU_THROTTLE_ENABLE)
4403 cpu_throttle_enabled = 1;
4404 if (flag == CPU_THROTTLE_DISABLE)
4405 cpu_throttle_enabled = 0;
4406 }
4407
4408 int
4409 thread_get_urgency(thread_t thread, uint64_t *arg1, uint64_t *arg2)
4410 {
4411 if (thread == NULL || (thread->state & TH_IDLE)) {
4412 *arg1 = 0;
4413 *arg2 = 0;
4414
4415 return (THREAD_URGENCY_NONE);
4416 } else if (thread->sched_mode == TH_MODE_REALTIME) {
4417 *arg1 = thread->realtime.period;
4418 *arg2 = thread->realtime.deadline;
4419
4420 return (THREAD_URGENCY_REAL_TIME);
4421 } else if (cpu_throttle_enabled &&
4422 ((thread->sched_pri <= MAXPRI_THROTTLE) && (thread->base_pri <= MAXPRI_THROTTLE))) {
4423 /*
4424 * Background urgency applied when thread priority is MAXPRI_THROTTLE or lower and thread is not promoted
4425 */
4426 *arg1 = thread->sched_pri;
4427 *arg2 = thread->base_pri;
4428
4429 return (THREAD_URGENCY_BACKGROUND);
4430 } else {
4431 /* For otherwise unclassified threads, report throughput QoS
4432 * parameters
4433 */
4434 *arg1 = proc_get_effective_thread_policy(thread, TASK_POLICY_THROUGH_QOS);
4435 *arg2 = proc_get_effective_task_policy(thread->task, TASK_POLICY_THROUGH_QOS);
4436
4437 return (THREAD_URGENCY_NORMAL);
4438 }
4439 }
4440
4441 perfcontrol_class_t
4442 thread_get_perfcontrol_class(thread_t thread)
4443 {
4444 /* Special case handling */
4445 if (thread->state & TH_IDLE)
4446 return PERFCONTROL_CLASS_IDLE;
4447 if (thread->task == kernel_task)
4448 return PERFCONTROL_CLASS_KERNEL;
4449 if (thread->sched_mode == TH_MODE_REALTIME)
4450 return PERFCONTROL_CLASS_REALTIME;
4451
4452 /* perfcontrol_class based on base_pri */
4453 if (thread->base_pri <= MAXPRI_THROTTLE)
4454 return PERFCONTROL_CLASS_BACKGROUND;
4455 else if (thread->base_pri <= BASEPRI_UTILITY)
4456 return PERFCONTROL_CLASS_UTILITY;
4457 else if (thread->base_pri <= BASEPRI_DEFAULT)
4458 return PERFCONTROL_CLASS_NONUI;
4459 else if (thread->base_pri <= BASEPRI_FOREGROUND)
4460 return PERFCONTROL_CLASS_UI;
4461 else
4462 return PERFCONTROL_CLASS_ABOVEUI;
4463 }
4464
4465 /*
4466 * This is the processor idle loop, which just looks for other threads
4467 * to execute. Processor idle threads invoke this without supplying a
4468 * current thread to idle without an asserted wait state.
4469 *
4470 * Returns a the next thread to execute if dispatched directly.
4471 */
4472
4473 #if 0
4474 #define IDLE_KERNEL_DEBUG_CONSTANT(...) KERNEL_DEBUG_CONSTANT(__VA_ARGS__)
4475 #else
4476 #define IDLE_KERNEL_DEBUG_CONSTANT(...) do { } while(0)
4477 #endif
4478
4479 thread_t
4480 processor_idle(
4481 thread_t thread,
4482 processor_t processor)
4483 {
4484 processor_set_t pset = processor->processor_set;
4485 thread_t new_thread;
4486 int state;
4487 (void)splsched();
4488
4489 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
4490 MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_START,
4491 (uintptr_t)thread_tid(thread), 0, 0, 0, 0);
4492
4493 SCHED_STATS_CPU_IDLE_START(processor);
4494
4495 timer_switch(&PROCESSOR_DATA(processor, system_state),
4496 mach_absolute_time(), &PROCESSOR_DATA(processor, idle_state));
4497 PROCESSOR_DATA(processor, current_state) = &PROCESSOR_DATA(processor, idle_state);
4498
4499 while (1) {
4500 /*
4501 * Ensure that updates to my processor and pset state,
4502 * made by the IPI source processor before sending the IPI,
4503 * are visible on this processor now (even though we don't
4504 * take the pset lock yet).
4505 */
4506 atomic_thread_fence(memory_order_acquire);
4507
4508 if (processor->state != PROCESSOR_IDLE)
4509 break;
4510 if (bit_test(pset->pending_AST_cpu_mask, processor->cpu_id))
4511 break;
4512 #if defined(CONFIG_SCHED_DEFERRED_AST)
4513 if (bit_test(pset->pending_deferred_AST_cpu_mask, processor->cpu_id))
4514 break;
4515 #endif
4516 if (processor->is_recommended) {
4517 if (rt_runq_count(pset))
4518 break;
4519 } else {
4520 if (SCHED(processor_bound_count)(processor))
4521 break;
4522 }
4523
4524 #if CONFIG_SCHED_IDLE_IN_PLACE
4525 if (thread != THREAD_NULL) {
4526 /* Did idle-in-place thread wake up */
4527 if ((thread->state & (TH_WAIT|TH_SUSP)) != TH_WAIT || thread->wake_active)
4528 break;
4529 }
4530 #endif
4531
4532 IDLE_KERNEL_DEBUG_CONSTANT(
4533 MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), rt_runq_count(pset), SCHED(processor_runq_count)(processor), -1, 0);
4534
4535 machine_track_platform_idle(TRUE);
4536
4537 machine_idle();
4538
4539 machine_track_platform_idle(FALSE);
4540
4541 (void)splsched();
4542
4543 IDLE_KERNEL_DEBUG_CONSTANT(
4544 MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), rt_runq_count(pset), SCHED(processor_runq_count)(processor), -2, 0);
4545
4546 if (!SCHED(processor_queue_empty)(processor)) {
4547 /* Secondary SMT processors respond to directed wakeups
4548 * exclusively. Some platforms induce 'spurious' SMT wakeups.
4549 */
4550 if (processor->processor_primary == processor)
4551 break;
4552 }
4553 }
4554
4555 timer_switch(&PROCESSOR_DATA(processor, idle_state),
4556 mach_absolute_time(), &PROCESSOR_DATA(processor, system_state));
4557 PROCESSOR_DATA(processor, current_state) = &PROCESSOR_DATA(processor, system_state);
4558
4559 pset_lock(pset);
4560
4561 /* If we were sent a remote AST and came out of idle, acknowledge it here with pset lock held */
4562 bit_clear(pset->pending_AST_cpu_mask, processor->cpu_id);
4563 #if defined(CONFIG_SCHED_DEFERRED_AST)
4564 bit_clear(pset->pending_deferred_AST_cpu_mask, processor->cpu_id);
4565 #endif
4566
4567 state = processor->state;
4568 if (state == PROCESSOR_DISPATCHING) {
4569 /*
4570 * Commmon case -- cpu dispatched.
4571 */
4572 new_thread = processor->next_thread;
4573 processor->next_thread = THREAD_NULL;
4574 processor->state = PROCESSOR_RUNNING;
4575
4576 if ((new_thread != THREAD_NULL) && (SCHED(processor_queue_has_priority)(processor, new_thread->sched_pri, FALSE) ||
4577 (rt_runq_count(pset) > 0)) ) {
4578 /* Something higher priority has popped up on the runqueue - redispatch this thread elsewhere */
4579 processor_state_update_idle(processor);
4580 processor->deadline = UINT64_MAX;
4581
4582 pset_unlock(pset);
4583
4584 thread_lock(new_thread);
4585 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REDISPATCH), (uintptr_t)thread_tid(new_thread), new_thread->sched_pri, rt_runq_count(pset), 0, 0);
4586 thread_setrun(new_thread, SCHED_HEADQ);
4587 thread_unlock(new_thread);
4588
4589 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
4590 MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_END,
4591 (uintptr_t)thread_tid(thread), state, 0, 0, 0);
4592
4593 return (THREAD_NULL);
4594 }
4595
4596 sched_update_pset_load_average(pset);
4597
4598 pset_unlock(pset);
4599
4600 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
4601 MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_END,
4602 (uintptr_t)thread_tid(thread), state, (uintptr_t)thread_tid(new_thread), 0, 0);
4603
4604 return (new_thread);
4605
4606 } else if (state == PROCESSOR_IDLE) {
4607 re_queue_tail(&pset->active_queue, &processor->processor_queue);
4608
4609 pset->active_processor_count++;
4610 sched_update_pset_load_average(pset);
4611
4612 processor->state = PROCESSOR_RUNNING;
4613 processor_state_update_idle(processor);
4614 processor->deadline = UINT64_MAX;
4615
4616 } else if (state == PROCESSOR_SHUTDOWN) {
4617 /*
4618 * Going off-line. Force a
4619 * reschedule.
4620 */
4621 if ((new_thread = processor->next_thread) != THREAD_NULL) {
4622 processor->next_thread = THREAD_NULL;
4623 processor_state_update_idle(processor);
4624 processor->deadline = UINT64_MAX;
4625
4626 pset_unlock(pset);
4627
4628 thread_lock(new_thread);
4629 thread_setrun(new_thread, SCHED_HEADQ);
4630 thread_unlock(new_thread);
4631
4632 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
4633 MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_END,
4634 (uintptr_t)thread_tid(thread), state, 0, 0, 0);
4635
4636 return (THREAD_NULL);
4637 }
4638 }
4639
4640 pset_unlock(pset);
4641
4642 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
4643 MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_END,
4644 (uintptr_t)thread_tid(thread), state, 0, 0, 0);
4645
4646 return (THREAD_NULL);
4647 }
4648
4649 /*
4650 * Each processor has a dedicated thread which
4651 * executes the idle loop when there is no suitable
4652 * previous context.
4653 */
4654 void
4655 idle_thread(void)
4656 {
4657 processor_t processor = current_processor();
4658 thread_t new_thread;
4659
4660 new_thread = processor_idle(THREAD_NULL, processor);
4661 if (new_thread != THREAD_NULL) {
4662 thread_run(processor->idle_thread, (thread_continue_t)idle_thread, NULL, new_thread);
4663 /*NOTREACHED*/
4664 }
4665
4666 thread_block((thread_continue_t)idle_thread);
4667 /*NOTREACHED*/
4668 }
4669
4670 kern_return_t
4671 idle_thread_create(
4672 processor_t processor)
4673 {
4674 kern_return_t result;
4675 thread_t thread;
4676 spl_t s;
4677 char name[MAXTHREADNAMESIZE];
4678
4679 result = kernel_thread_create((thread_continue_t)idle_thread, NULL, MAXPRI_KERNEL, &thread);
4680 if (result != KERN_SUCCESS)
4681 return (result);
4682
4683 snprintf(name, sizeof(name), "idle #%d", processor->cpu_id);
4684 thread_set_thread_name(thread, name);
4685
4686 s = splsched();
4687 thread_lock(thread);
4688 thread->bound_processor = processor;
4689 processor->idle_thread = thread;
4690 thread->sched_pri = thread->base_pri = IDLEPRI;
4691 thread->state = (TH_RUN | TH_IDLE);
4692 thread->options |= TH_OPT_IDLE_THREAD;
4693 thread_unlock(thread);
4694 splx(s);
4695
4696 thread_deallocate(thread);
4697
4698 return (KERN_SUCCESS);
4699 }
4700
4701 /*
4702 * sched_startup:
4703 *
4704 * Kicks off scheduler services.
4705 *
4706 * Called at splsched.
4707 */
4708 void
4709 sched_startup(void)
4710 {
4711 kern_return_t result;
4712 thread_t thread;
4713
4714 simple_lock_init(&sched_vm_group_list_lock, 0);
4715
4716 #if __arm__ || __arm64__
4717 simple_lock_init(&sched_recommended_cores_lock, 0);
4718 #endif /* __arm__ || __arm64__ */
4719
4720 result = kernel_thread_start_priority((thread_continue_t)sched_init_thread,
4721 (void *)SCHED(maintenance_continuation), MAXPRI_KERNEL, &thread);
4722 if (result != KERN_SUCCESS)
4723 panic("sched_startup");
4724
4725 thread_deallocate(thread);
4726
4727 assert_thread_magic(thread);
4728
4729 /*
4730 * Yield to the sched_init_thread once, to
4731 * initialize our own thread after being switched
4732 * back to.
4733 *
4734 * The current thread is the only other thread
4735 * active at this point.
4736 */
4737 thread_block(THREAD_CONTINUE_NULL);
4738 }
4739
4740 #if __arm64__
4741 static _Atomic uint64_t sched_perfcontrol_callback_deadline;
4742 #endif /* __arm64__ */
4743
4744
4745 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
4746
4747 static volatile uint64_t sched_maintenance_deadline;
4748 static uint64_t sched_tick_last_abstime;
4749 static uint64_t sched_tick_delta;
4750 uint64_t sched_tick_max_delta;
4751
4752
4753 /*
4754 * sched_init_thread:
4755 *
4756 * Perform periodic bookkeeping functions about ten
4757 * times per second.
4758 */
4759 void
4760 sched_timeshare_maintenance_continue(void)
4761 {
4762 uint64_t sched_tick_ctime, late_time;
4763
4764 struct sched_update_scan_context scan_context = {
4765 .earliest_bg_make_runnable_time = UINT64_MAX,
4766 .earliest_normal_make_runnable_time = UINT64_MAX,
4767 .earliest_rt_make_runnable_time = UINT64_MAX
4768 };
4769
4770 sched_tick_ctime = mach_absolute_time();
4771
4772 if (__improbable(sched_tick_last_abstime == 0)) {
4773 sched_tick_last_abstime = sched_tick_ctime;
4774 late_time = 0;
4775 sched_tick_delta = 1;
4776 } else {
4777 late_time = sched_tick_ctime - sched_tick_last_abstime;
4778 sched_tick_delta = late_time / sched_tick_interval;
4779 /* Ensure a delta of 1, since the interval could be slightly
4780 * smaller than the sched_tick_interval due to dispatch
4781 * latencies.
4782 */
4783 sched_tick_delta = MAX(sched_tick_delta, 1);
4784
4785 /* In the event interrupt latencies or platform
4786 * idle events that advanced the timebase resulted
4787 * in periods where no threads were dispatched,
4788 * cap the maximum "tick delta" at SCHED_TICK_MAX_DELTA
4789 * iterations.
4790 */
4791 sched_tick_delta = MIN(sched_tick_delta, SCHED_TICK_MAX_DELTA);
4792
4793 sched_tick_last_abstime = sched_tick_ctime;
4794 sched_tick_max_delta = MAX(sched_tick_delta, sched_tick_max_delta);
4795 }
4796
4797 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_MAINTENANCE)|DBG_FUNC_START,
4798 sched_tick_delta, late_time, 0, 0, 0);
4799
4800 /* Add a number of pseudo-ticks corresponding to the elapsed interval
4801 * This could be greater than 1 if substantial intervals where
4802 * all processors are idle occur, which rarely occurs in practice.
4803 */
4804
4805 sched_tick += sched_tick_delta;
4806
4807 update_vm_info();
4808
4809 /*
4810 * Compute various averages.
4811 */
4812 compute_averages(sched_tick_delta);
4813
4814 /*
4815 * Scan the run queues for threads which
4816 * may need to be updated, and find the earliest runnable thread on the runqueue
4817 * to report its latency.
4818 */
4819 SCHED(thread_update_scan)(&scan_context);
4820
4821 SCHED(rt_runq_scan)(&scan_context);
4822
4823 uint64_t ctime = mach_absolute_time();
4824
4825 uint64_t bg_max_latency = (ctime > scan_context.earliest_bg_make_runnable_time) ?
4826 ctime - scan_context.earliest_bg_make_runnable_time : 0;
4827
4828 uint64_t default_max_latency = (ctime > scan_context.earliest_normal_make_runnable_time) ?
4829 ctime - scan_context.earliest_normal_make_runnable_time : 0;
4830
4831 uint64_t realtime_max_latency = (ctime > scan_context.earliest_rt_make_runnable_time) ?
4832 ctime - scan_context.earliest_rt_make_runnable_time : 0;
4833
4834 machine_max_runnable_latency(bg_max_latency, default_max_latency, realtime_max_latency);
4835
4836 /*
4837 * Check to see if the special sched VM group needs attention.
4838 */
4839 sched_vm_group_maintenance();
4840
4841 #if __arm__ || __arm64__
4842 /* Check to see if the recommended cores failsafe is active */
4843 sched_recommended_cores_maintenance();
4844 #endif /* __arm__ || __arm64__ */
4845
4846
4847 #if DEBUG || DEVELOPMENT
4848 #if __x86_64__
4849 #include <i386/misc_protos.h>
4850 /* Check for long-duration interrupts */
4851 mp_interrupt_watchdog();
4852 #endif /* __x86_64__ */
4853 #endif /* DEBUG || DEVELOPMENT */
4854
4855 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_MAINTENANCE) | DBG_FUNC_END,
4856 sched_pri_shifts[TH_BUCKET_SHARE_FG], sched_pri_shifts[TH_BUCKET_SHARE_BG],
4857 sched_pri_shifts[TH_BUCKET_SHARE_UT], 0, 0);
4858
4859 assert_wait((event_t)sched_timeshare_maintenance_continue, THREAD_UNINT);
4860 thread_block((thread_continue_t)sched_timeshare_maintenance_continue);
4861 /*NOTREACHED*/
4862 }
4863
4864 static uint64_t sched_maintenance_wakeups;
4865
4866 /*
4867 * Determine if the set of routines formerly driven by a maintenance timer
4868 * must be invoked, based on a deadline comparison. Signals the scheduler
4869 * maintenance thread on deadline expiration. Must be invoked at an interval
4870 * lower than the "sched_tick_interval", currently accomplished by
4871 * invocation via the quantum expiration timer and at context switch time.
4872 * Performance matters: this routine reuses a timestamp approximating the
4873 * current absolute time received from the caller, and should perform
4874 * no more than a comparison against the deadline in the common case.
4875 */
4876 void
4877 sched_timeshare_consider_maintenance(uint64_t ctime) {
4878 uint64_t ndeadline, deadline = sched_maintenance_deadline;
4879
4880 if (__improbable(ctime >= deadline)) {
4881 if (__improbable(current_thread() == sched_maintenance_thread))
4882 return;
4883 OSMemoryBarrier();
4884
4885 ndeadline = ctime + sched_tick_interval;
4886
4887 if (__probable(__sync_bool_compare_and_swap(&sched_maintenance_deadline, deadline, ndeadline))) {
4888 thread_wakeup((event_t)sched_timeshare_maintenance_continue);
4889 sched_maintenance_wakeups++;
4890 }
4891 }
4892
4893 #if __arm64__
4894 uint64_t perf_deadline = __c11_atomic_load(&sched_perfcontrol_callback_deadline, memory_order_relaxed);
4895
4896 if (__improbable(perf_deadline && ctime >= perf_deadline)) {
4897 /* CAS in 0, if success, make callback. Otherwise let the next context switch check again. */
4898 if (__c11_atomic_compare_exchange_strong(&sched_perfcontrol_callback_deadline, &perf_deadline, 0,
4899 memory_order_relaxed, memory_order_relaxed)) {
4900 machine_perfcontrol_deadline_passed(perf_deadline);
4901 }
4902 }
4903 #endif /* __arm64__ */
4904
4905 }
4906
4907 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
4908
4909 void
4910 sched_init_thread(void (*continuation)(void))
4911 {
4912 thread_block(THREAD_CONTINUE_NULL);
4913
4914 thread_t thread = current_thread();
4915
4916 thread_set_thread_name(thread, "sched_maintenance_thread");
4917
4918 sched_maintenance_thread = thread;
4919
4920 continuation();
4921
4922 /*NOTREACHED*/
4923 }
4924
4925 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
4926
4927 /*
4928 * thread_update_scan / runq_scan:
4929 *
4930 * Scan the run queues to account for timesharing threads
4931 * which need to be updated.
4932 *
4933 * Scanner runs in two passes. Pass one squirrels likely
4934 * threads away in an array, pass two does the update.
4935 *
4936 * This is necessary because the run queue is locked for
4937 * the candidate scan, but the thread is locked for the update.
4938 *
4939 * Array should be sized to make forward progress, without
4940 * disabling preemption for long periods.
4941 */
4942
4943 #define THREAD_UPDATE_SIZE 128
4944
4945 static thread_t thread_update_array[THREAD_UPDATE_SIZE];
4946 static uint32_t thread_update_count = 0;
4947
4948 /* Returns TRUE if thread was added, FALSE if thread_update_array is full */
4949 boolean_t
4950 thread_update_add_thread(thread_t thread)
4951 {
4952 if (thread_update_count == THREAD_UPDATE_SIZE)
4953 return (FALSE);
4954
4955 thread_update_array[thread_update_count++] = thread;
4956 thread_reference_internal(thread);
4957 return (TRUE);
4958 }
4959
4960 void
4961 thread_update_process_threads(void)
4962 {
4963 assert(thread_update_count <= THREAD_UPDATE_SIZE);
4964
4965 for (uint32_t i = 0 ; i < thread_update_count ; i++) {
4966 thread_t thread = thread_update_array[i];
4967 assert_thread_magic(thread);
4968 thread_update_array[i] = THREAD_NULL;
4969
4970 spl_t s = splsched();
4971 thread_lock(thread);
4972 if (!(thread->state & (TH_WAIT)) && thread->sched_stamp != sched_tick) {
4973 SCHED(update_priority)(thread);
4974 }
4975 thread_unlock(thread);
4976 splx(s);
4977
4978 thread_deallocate(thread);
4979 }
4980
4981 thread_update_count = 0;
4982 }
4983
4984 /*
4985 * Scan a runq for candidate threads.
4986 *
4987 * Returns TRUE if retry is needed.
4988 */
4989 boolean_t
4990 runq_scan(
4991 run_queue_t runq,
4992 sched_update_scan_context_t scan_context)
4993 {
4994 int count = runq->count;
4995 int queue_index;
4996
4997 assert(count >= 0);
4998
4999 if (count == 0)
5000 return FALSE;
5001
5002 for (queue_index = bitmap_first(runq->bitmap, NRQS);
5003 queue_index >= 0;
5004 queue_index = bitmap_next(runq->bitmap, queue_index)) {
5005
5006 thread_t thread;
5007 queue_t queue = &runq->queues[queue_index];
5008
5009 qe_foreach_element(thread, queue, runq_links) {
5010 assert(count > 0);
5011 assert_thread_magic(thread);
5012
5013 if (thread->sched_stamp != sched_tick &&
5014 thread->sched_mode == TH_MODE_TIMESHARE) {
5015 if (thread_update_add_thread(thread) == FALSE)
5016 return TRUE;
5017 }
5018
5019 if (cpu_throttle_enabled && ((thread->sched_pri <= MAXPRI_THROTTLE) && (thread->base_pri <= MAXPRI_THROTTLE))) {
5020 if (thread->last_made_runnable_time < scan_context->earliest_bg_make_runnable_time) {
5021 scan_context->earliest_bg_make_runnable_time = thread->last_made_runnable_time;
5022 }
5023 } else {
5024 if (thread->last_made_runnable_time < scan_context->earliest_normal_make_runnable_time) {
5025 scan_context->earliest_normal_make_runnable_time = thread->last_made_runnable_time;
5026 }
5027 }
5028 count--;
5029 }
5030 }
5031
5032 return FALSE;
5033 }
5034
5035 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
5036
5037 boolean_t
5038 thread_eager_preemption(thread_t thread)
5039 {
5040 return ((thread->sched_flags & TH_SFLAG_EAGERPREEMPT) != 0);
5041 }
5042
5043 void
5044 thread_set_eager_preempt(thread_t thread)
5045 {
5046 spl_t x;
5047 processor_t p;
5048 ast_t ast = AST_NONE;
5049
5050 x = splsched();
5051 p = current_processor();
5052
5053 thread_lock(thread);
5054 thread->sched_flags |= TH_SFLAG_EAGERPREEMPT;
5055
5056 if (thread == current_thread()) {
5057
5058 ast = csw_check(p, AST_NONE);
5059 thread_unlock(thread);
5060 if (ast != AST_NONE) {
5061 (void) thread_block_reason(THREAD_CONTINUE_NULL, NULL, ast);
5062 }
5063 } else {
5064 p = thread->last_processor;
5065
5066 if (p != PROCESSOR_NULL && p->state == PROCESSOR_RUNNING &&
5067 p->active_thread == thread) {
5068 cause_ast_check(p);
5069 }
5070
5071 thread_unlock(thread);
5072 }
5073
5074 splx(x);
5075 }
5076
5077 void
5078 thread_clear_eager_preempt(thread_t thread)
5079 {
5080 spl_t x;
5081
5082 x = splsched();
5083 thread_lock(thread);
5084
5085 thread->sched_flags &= ~TH_SFLAG_EAGERPREEMPT;
5086
5087 thread_unlock(thread);
5088 splx(x);
5089 }
5090
5091 /*
5092 * Scheduling statistics
5093 */
5094 void
5095 sched_stats_handle_csw(processor_t processor, int reasons, int selfpri, int otherpri)
5096 {
5097 struct processor_sched_statistics *stats;
5098 boolean_t to_realtime = FALSE;
5099
5100 stats = &processor->processor_data.sched_stats;
5101 stats->csw_count++;
5102
5103 if (otherpri >= BASEPRI_REALTIME) {
5104 stats->rt_sched_count++;
5105 to_realtime = TRUE;
5106 }
5107
5108 if ((reasons & AST_PREEMPT) != 0) {
5109 stats->preempt_count++;
5110
5111 if (selfpri >= BASEPRI_REALTIME) {
5112 stats->preempted_rt_count++;
5113 }
5114
5115 if (to_realtime) {
5116 stats->preempted_by_rt_count++;
5117 }
5118
5119 }
5120 }
5121
5122 void
5123 sched_stats_handle_runq_change(struct runq_stats *stats, int old_count)
5124 {
5125 uint64_t timestamp = mach_absolute_time();
5126
5127 stats->count_sum += (timestamp - stats->last_change_timestamp) * old_count;
5128 stats->last_change_timestamp = timestamp;
5129 }
5130
5131 /*
5132 * For calls from assembly code
5133 */
5134 #undef thread_wakeup
5135 void
5136 thread_wakeup(
5137 event_t x);
5138
5139 void
5140 thread_wakeup(
5141 event_t x)
5142 {
5143 thread_wakeup_with_result(x, THREAD_AWAKENED);
5144 }
5145
5146 boolean_t
5147 preemption_enabled(void)
5148 {
5149 return (get_preemption_level() == 0 && ml_get_interrupts_enabled());
5150 }
5151
5152 static void
5153 sched_timer_deadline_tracking_init(void) {
5154 nanoseconds_to_absolutetime(TIMER_DEADLINE_TRACKING_BIN_1_DEFAULT, &timer_deadline_tracking_bin_1);
5155 nanoseconds_to_absolutetime(TIMER_DEADLINE_TRACKING_BIN_2_DEFAULT, &timer_deadline_tracking_bin_2);
5156 }
5157
5158 #if __arm__ || __arm64__
5159
5160 uint32_t perfcontrol_requested_recommended_cores = ALL_CORES_RECOMMENDED;
5161 uint32_t perfcontrol_requested_recommended_core_count = MAX_CPUS;
5162 boolean_t perfcontrol_failsafe_active = FALSE;
5163
5164 uint64_t perfcontrol_failsafe_maintenance_runnable_time;
5165 uint64_t perfcontrol_failsafe_activation_time;
5166 uint64_t perfcontrol_failsafe_deactivation_time;
5167
5168 /* data covering who likely caused it and how long they ran */
5169 #define FAILSAFE_NAME_LEN 33 /* (2*MAXCOMLEN)+1 from size of p_name */
5170 char perfcontrol_failsafe_name[FAILSAFE_NAME_LEN];
5171 int perfcontrol_failsafe_pid;
5172 uint64_t perfcontrol_failsafe_tid;
5173 uint64_t perfcontrol_failsafe_thread_timer_at_start;
5174 uint64_t perfcontrol_failsafe_thread_timer_last_seen;
5175 uint32_t perfcontrol_failsafe_recommended_at_trigger;
5176
5177 /*
5178 * Perf controller calls here to update the recommended core bitmask.
5179 * If the failsafe is active, we don't immediately apply the new value.
5180 * Instead, we store the new request and use it after the failsafe deactivates.
5181 *
5182 * If the failsafe is not active, immediately apply the update.
5183 *
5184 * No scheduler locks are held, no other locks are held that scheduler might depend on,
5185 * interrupts are enabled
5186 *
5187 * currently prototype is in osfmk/arm/machine_routines.h
5188 */
5189 void
5190 sched_perfcontrol_update_recommended_cores(uint32_t recommended_cores)
5191 {
5192 assert(preemption_enabled());
5193
5194 spl_t s = splsched();
5195 simple_lock(&sched_recommended_cores_lock);
5196
5197 perfcontrol_requested_recommended_cores = recommended_cores;
5198 perfcontrol_requested_recommended_core_count = __builtin_popcountll(recommended_cores);
5199
5200 if (perfcontrol_failsafe_active == FALSE)
5201 sched_update_recommended_cores(perfcontrol_requested_recommended_cores);
5202 else
5203 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
5204 MACHDBG_CODE(DBG_MACH_SCHED,MACH_REC_CORES_FAILSAFE) | DBG_FUNC_NONE,
5205 perfcontrol_requested_recommended_cores,
5206 sched_maintenance_thread->last_made_runnable_time, 0, 0, 0);
5207
5208 simple_unlock(&sched_recommended_cores_lock);
5209 splx(s);
5210 }
5211
5212 /*
5213 * Consider whether we need to activate the recommended cores failsafe
5214 *
5215 * Called from quantum timer interrupt context of a realtime thread
5216 * No scheduler locks are held, interrupts are disabled
5217 */
5218 void
5219 sched_consider_recommended_cores(uint64_t ctime, thread_t cur_thread)
5220 {
5221 /*
5222 * Check if a realtime thread is starving the system
5223 * and bringing up non-recommended cores would help
5224 *
5225 * TODO: Is this the correct check for recommended == possible cores?
5226 * TODO: Validate the checks without the relevant lock are OK.
5227 */
5228
5229 if (__improbable(perfcontrol_failsafe_active == TRUE)) {
5230 /* keep track of how long the responsible thread runs */
5231
5232 simple_lock(&sched_recommended_cores_lock);
5233
5234 if (perfcontrol_failsafe_active == TRUE &&
5235 cur_thread->thread_id == perfcontrol_failsafe_tid) {
5236 perfcontrol_failsafe_thread_timer_last_seen = timer_grab(&cur_thread->user_timer) +
5237 timer_grab(&cur_thread->system_timer);
5238 }
5239
5240 simple_unlock(&sched_recommended_cores_lock);
5241
5242 /* we're already trying to solve the problem, so bail */
5243 return;
5244 }
5245
5246 /* The failsafe won't help if there are no more processors to enable */
5247 if (__probable(perfcontrol_requested_recommended_core_count >= processor_count))
5248 return;
5249
5250 uint64_t too_long_ago = ctime - perfcontrol_failsafe_starvation_threshold;
5251
5252 /* Use the maintenance thread as our canary in the coal mine */
5253 thread_t m_thread = sched_maintenance_thread;
5254
5255 /* If it doesn't look bad, nothing to see here */
5256 if (__probable(m_thread->last_made_runnable_time >= too_long_ago))
5257 return;
5258
5259 /* It looks bad, take the lock to be sure */
5260 thread_lock(m_thread);
5261
5262 if (m_thread->runq == PROCESSOR_NULL ||
5263 (m_thread->state & (TH_RUN|TH_WAIT)) != TH_RUN ||
5264 m_thread->last_made_runnable_time >= too_long_ago) {
5265 /*
5266 * Maintenance thread is either on cpu or blocked, and
5267 * therefore wouldn't benefit from more cores
5268 */
5269 thread_unlock(m_thread);
5270 return;
5271 }
5272
5273 uint64_t maintenance_runnable_time = m_thread->last_made_runnable_time;
5274
5275 thread_unlock(m_thread);
5276
5277 /*
5278 * There are cores disabled at perfcontrol's recommendation, but the
5279 * system is so overloaded that the maintenance thread can't run.
5280 * That likely means that perfcontrol can't run either, so it can't fix
5281 * the recommendation. We have to kick in a failsafe to keep from starving.
5282 *
5283 * When the maintenance thread has been starved for too long,
5284 * ignore the recommendation from perfcontrol and light up all the cores.
5285 *
5286 * TODO: Consider weird states like boot, sleep, or debugger
5287 */
5288
5289 simple_lock(&sched_recommended_cores_lock);
5290
5291 if (perfcontrol_failsafe_active == TRUE) {
5292 simple_unlock(&sched_recommended_cores_lock);
5293 return;
5294 }
5295
5296 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
5297 MACHDBG_CODE(DBG_MACH_SCHED,MACH_REC_CORES_FAILSAFE) | DBG_FUNC_START,
5298 perfcontrol_requested_recommended_cores, maintenance_runnable_time, 0, 0, 0);
5299
5300 perfcontrol_failsafe_active = TRUE;
5301 perfcontrol_failsafe_activation_time = mach_absolute_time();
5302 perfcontrol_failsafe_maintenance_runnable_time = maintenance_runnable_time;
5303 perfcontrol_failsafe_recommended_at_trigger = perfcontrol_requested_recommended_cores;
5304
5305 /* Capture some data about who screwed up (assuming that the thread on core is at fault) */
5306 task_t task = cur_thread->task;
5307 perfcontrol_failsafe_pid = task_pid(task);
5308 strlcpy(perfcontrol_failsafe_name, proc_name_address(task->bsd_info), sizeof(perfcontrol_failsafe_name));
5309
5310 perfcontrol_failsafe_tid = cur_thread->thread_id;
5311
5312 /* Blame the thread for time it has run recently */
5313 uint64_t recent_computation = (ctime - cur_thread->computation_epoch) + cur_thread->computation_metered;
5314
5315 uint64_t last_seen = timer_grab(&cur_thread->user_timer) + timer_grab(&cur_thread->system_timer);
5316
5317 /* Compute the start time of the bad behavior in terms of the thread's on core time */
5318 perfcontrol_failsafe_thread_timer_at_start = last_seen - recent_computation;
5319 perfcontrol_failsafe_thread_timer_last_seen = last_seen;
5320
5321 /* Ignore the previously recommended core configuration */
5322 sched_update_recommended_cores(ALL_CORES_RECOMMENDED);
5323
5324 simple_unlock(&sched_recommended_cores_lock);
5325 }
5326
5327 /*
5328 * Now that our bacon has been saved by the failsafe, consider whether to turn it off
5329 *
5330 * Runs in the context of the maintenance thread, no locks held
5331 */
5332 static void
5333 sched_recommended_cores_maintenance(void)
5334 {
5335 /* Common case - no failsafe, nothing to be done here */
5336 if (__probable(perfcontrol_failsafe_active == FALSE))
5337 return;
5338
5339 uint64_t ctime = mach_absolute_time();
5340
5341 boolean_t print_diagnostic = FALSE;
5342 char p_name[FAILSAFE_NAME_LEN] = "";
5343
5344 spl_t s = splsched();
5345 simple_lock(&sched_recommended_cores_lock);
5346
5347 /* Check again, under the lock, to avoid races */
5348 if (perfcontrol_failsafe_active == FALSE)
5349 goto out;
5350
5351 /*
5352 * Ensure that the other cores get another few ticks to run some threads
5353 * If we don't have this hysteresis, the maintenance thread is the first
5354 * to run, and then it immediately kills the other cores
5355 */
5356 if ((ctime - perfcontrol_failsafe_activation_time) < perfcontrol_failsafe_starvation_threshold)
5357 goto out;
5358
5359 /* Capture some diagnostic state under the lock so we can print it out later */
5360
5361 int pid = perfcontrol_failsafe_pid;
5362 uint64_t tid = perfcontrol_failsafe_tid;
5363
5364 uint64_t thread_usage = perfcontrol_failsafe_thread_timer_last_seen -
5365 perfcontrol_failsafe_thread_timer_at_start;
5366 uint32_t rec_cores_before = perfcontrol_failsafe_recommended_at_trigger;
5367 uint32_t rec_cores_after = perfcontrol_requested_recommended_cores;
5368 uint64_t failsafe_duration = ctime - perfcontrol_failsafe_activation_time;
5369 strlcpy(p_name, perfcontrol_failsafe_name, sizeof(p_name));
5370
5371 print_diagnostic = TRUE;
5372
5373 /* Deactivate the failsafe and reinstate the requested recommendation settings */
5374
5375 perfcontrol_failsafe_deactivation_time = ctime;
5376 perfcontrol_failsafe_active = FALSE;
5377
5378 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
5379 MACHDBG_CODE(DBG_MACH_SCHED,MACH_REC_CORES_FAILSAFE) | DBG_FUNC_END,
5380 perfcontrol_requested_recommended_cores, failsafe_duration, 0, 0, 0);
5381
5382 sched_update_recommended_cores(perfcontrol_requested_recommended_cores);
5383
5384 out:
5385 simple_unlock(&sched_recommended_cores_lock);
5386 splx(s);
5387
5388 if (print_diagnostic) {
5389 uint64_t failsafe_duration_ms = 0, thread_usage_ms = 0;
5390
5391 absolutetime_to_nanoseconds(failsafe_duration, &failsafe_duration_ms);
5392 failsafe_duration_ms = failsafe_duration_ms / NSEC_PER_MSEC;
5393
5394 absolutetime_to_nanoseconds(thread_usage, &thread_usage_ms);
5395 thread_usage_ms = thread_usage_ms / NSEC_PER_MSEC;
5396
5397 printf("recommended core failsafe kicked in for %lld ms "
5398 "likely due to %s[%d] thread 0x%llx spending "
5399 "%lld ms on cpu at realtime priority - "
5400 "new recommendation: 0x%x -> 0x%x\n",
5401 failsafe_duration_ms, p_name, pid, tid, thread_usage_ms,
5402 rec_cores_before, rec_cores_after);
5403 }
5404 }
5405
5406 /*
5407 * Apply a new recommended cores mask to the processors it affects
5408 * Runs after considering failsafes and such
5409 *
5410 * Iterate over processors and update their ->is_recommended field.
5411 * If a processor is running, we let it drain out at its next
5412 * quantum expiration or blocking point. If a processor is idle, there
5413 * may be more work for it to do, so IPI it.
5414 *
5415 * interrupts disabled, sched_recommended_cores_lock is held
5416 */
5417 static void
5418 sched_update_recommended_cores(uint32_t recommended_cores)
5419 {
5420 processor_set_t pset, nset;
5421 processor_t processor;
5422 uint64_t needs_exit_idle_mask = 0x0;
5423
5424 processor = processor_list;
5425 pset = processor->processor_set;
5426
5427 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
5428 MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED_UPDATE_REC_CORES) | DBG_FUNC_START,
5429 recommended_cores, perfcontrol_failsafe_active, 0, 0, 0);
5430
5431 if (__builtin_popcount(recommended_cores) == 0) {
5432 recommended_cores |= 0x1U; /* add boot processor or we hang */
5433 }
5434
5435 /* First set recommended cores */
5436 pset_lock(pset);
5437 do {
5438
5439 nset = processor->processor_set;
5440 if (nset != pset) {
5441 pset_unlock(pset);
5442 pset = nset;
5443 pset_lock(pset);
5444 }
5445
5446 pset->recommended_bitmask = recommended_cores;
5447
5448 if (recommended_cores & (1ULL << processor->cpu_id)) {
5449 processor->is_recommended = TRUE;
5450
5451 if (processor->state == PROCESSOR_IDLE) {
5452 if (processor->processor_primary == processor) {
5453 re_queue_head(&pset->idle_queue, &processor->processor_queue);
5454 } else {
5455 re_queue_head(&pset->idle_secondary_queue, &processor->processor_queue);
5456 }
5457 if (processor != current_processor()) {
5458 needs_exit_idle_mask |= (1ULL << processor->cpu_id);
5459 }
5460 }
5461 }
5462 } while ((processor = processor->processor_list) != NULL);
5463 pset_unlock(pset);
5464
5465 /* Now shutdown not recommended cores */
5466 processor = processor_list;
5467 pset = processor->processor_set;
5468
5469 pset_lock(pset);
5470 do {
5471
5472 nset = processor->processor_set;
5473 if (nset != pset) {
5474 pset_unlock(pset);
5475 pset = nset;
5476 pset_lock(pset);
5477 }
5478
5479 if (!(recommended_cores & (1ULL << processor->cpu_id))) {
5480 processor->is_recommended = FALSE;
5481 if (processor->state == PROCESSOR_IDLE) {
5482 re_queue_head(&pset->unused_queue, &processor->processor_queue);
5483 }
5484 SCHED(processor_queue_shutdown)(processor);
5485 /* pset unlocked */
5486
5487 SCHED(rt_queue_shutdown)(processor);
5488
5489 pset_lock(pset);
5490 }
5491 } while ((processor = processor->processor_list) != NULL);
5492 pset_unlock(pset);
5493
5494 /* Issue all pending IPIs now that the pset lock has been dropped */
5495 for (int cpuid = lsb_first(needs_exit_idle_mask); cpuid >= 0; cpuid = lsb_next(needs_exit_idle_mask, cpuid)) {
5496 processor = processor_array[cpuid];
5497 machine_signal_idle(processor);
5498 }
5499
5500 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
5501 MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED_UPDATE_REC_CORES) | DBG_FUNC_END,
5502 needs_exit_idle_mask, 0, 0, 0, 0);
5503 }
5504 #endif /* __arm__ || __arm64__ */
5505
5506 void thread_set_options(uint32_t thopt) {
5507 spl_t x;
5508 thread_t t = current_thread();
5509
5510 x = splsched();
5511 thread_lock(t);
5512
5513 t->options |= thopt;
5514
5515 thread_unlock(t);
5516 splx(x);
5517 }
5518
5519 void thread_set_pending_block_hint(thread_t thread, block_hint_t block_hint) {
5520 thread->pending_block_hint = block_hint;
5521 }
5522
5523 uint32_t qos_max_parallelism(int qos, uint64_t options)
5524 {
5525 return SCHED(qos_max_parallelism)(qos, options);
5526 }
5527
5528 uint32_t sched_qos_max_parallelism(__unused int qos, uint64_t options)
5529 {
5530 host_basic_info_data_t hinfo;
5531 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
5532 /* Query the machine layer for core information */
5533 __assert_only kern_return_t kret = host_info(host_self(), HOST_BASIC_INFO,
5534 (host_info_t)&hinfo, &count);
5535 assert(kret == KERN_SUCCESS);
5536
5537 /* We would not want multiple realtime threads running on the
5538 * same physical core; even for SMT capable machines.
5539 */
5540 if (options & QOS_PARALLELISM_REALTIME) {
5541 return hinfo.physical_cpu;
5542 }
5543
5544 if (options & QOS_PARALLELISM_COUNT_LOGICAL) {
5545 return hinfo.logical_cpu;
5546 } else {
5547 return hinfo.physical_cpu;
5548 }
5549 }
5550
5551 #if __arm64__
5552
5553 /*
5554 * Set up or replace old timer with new timer
5555 *
5556 * Returns true if canceled old timer, false if it did not
5557 */
5558 boolean_t
5559 sched_perfcontrol_update_callback_deadline(uint64_t new_deadline)
5560 {
5561 /*
5562 * Exchange deadline for new deadline, if old deadline was nonzero,
5563 * then I cancelled the callback, otherwise I didn't
5564 */
5565
5566 uint64_t old_deadline = __c11_atomic_load(&sched_perfcontrol_callback_deadline,
5567 memory_order_relaxed);
5568
5569
5570 while (!__c11_atomic_compare_exchange_weak(&sched_perfcontrol_callback_deadline,
5571 &old_deadline, new_deadline,
5572 memory_order_relaxed, memory_order_relaxed));
5573
5574
5575 /* now old_deadline contains previous value, which might not be the same if it raced */
5576
5577 return (old_deadline != 0) ? TRUE : FALSE;
5578 }
5579
5580 #endif /* __arm64__ */
5581
5582 int
5583 sched_get_pset_load_average(processor_set_t pset)
5584 {
5585 return pset->load_average >> (PSET_LOAD_NUMERATOR_SHIFT - PSET_LOAD_FRACTIONAL_SHIFT);
5586 }
5587
5588 void
5589 sched_update_pset_load_average(processor_set_t pset)
5590 {
5591 #if DEBUG
5592 queue_entry_t iter;
5593 int count = 0;
5594 qe_foreach(iter, &pset->active_queue) {
5595 count++;
5596 }
5597 assertf(count == pset->active_processor_count, "count %d pset->active_processor_count %d\n", count, pset->active_processor_count);
5598 #endif
5599
5600 int load = ((pset->active_processor_count + pset->pset_runq.count + rt_runq_count(pset)) << PSET_LOAD_NUMERATOR_SHIFT);
5601 int new_load_average = (pset->load_average + load) >> 1;
5602
5603 pset->load_average = new_load_average;
5604
5605 #if (DEVELOPMENT || DEBUG)
5606 #endif
5607 }