]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/sched_prim.c
bd590a0424d9998647324f2d6c9c46de1644bdd4
[apple/xnu.git] / osfmk / kern / sched_prim.c
1 /*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: sched_prim.c
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1986
62 *
63 * Scheduling primitives
64 *
65 */
66
67 #include <debug.h>
68
69 #include <mach/mach_types.h>
70 #include <mach/machine.h>
71 #include <mach/policy.h>
72 #include <mach/sync_policy.h>
73 #include <mach/thread_act.h>
74
75 #include <machine/machine_routines.h>
76 #include <machine/sched_param.h>
77 #include <machine/machine_cpu.h>
78 #include <machine/machlimits.h>
79 #include <machine/atomic.h>
80
81 #ifdef CONFIG_MACH_APPROXIMATE_TIME
82 #include <machine/commpage.h>
83 #endif
84
85 #include <kern/kern_types.h>
86 #include <kern/backtrace.h>
87 #include <kern/clock.h>
88 #include <kern/counters.h>
89 #include <kern/cpu_number.h>
90 #include <kern/cpu_data.h>
91 #include <kern/smp.h>
92 #include <kern/debug.h>
93 #include <kern/macro_help.h>
94 #include <kern/machine.h>
95 #include <kern/misc_protos.h>
96 #if MONOTONIC
97 #include <kern/monotonic.h>
98 #endif /* MONOTONIC */
99 #include <kern/processor.h>
100 #include <kern/queue.h>
101 #include <kern/sched.h>
102 #include <kern/sched_prim.h>
103 #include <kern/sfi.h>
104 #include <kern/syscall_subr.h>
105 #include <kern/task.h>
106 #include <kern/thread.h>
107 #include <kern/ledger.h>
108 #include <kern/timer_queue.h>
109 #include <kern/waitq.h>
110 #include <kern/policy_internal.h>
111
112 #include <vm/pmap.h>
113 #include <vm/vm_kern.h>
114 #include <vm/vm_map.h>
115 #include <vm/vm_pageout.h>
116
117 #include <mach/sdt.h>
118 #include <mach/mach_host.h>
119 #include <mach/host_info.h>
120
121 #include <sys/kdebug.h>
122 #include <kperf/kperf.h>
123 #include <kern/kpc.h>
124 #include <san/kasan.h>
125 #include <kern/pms.h>
126 #include <kern/host.h>
127 #include <stdatomic.h>
128
129 int rt_runq_count(processor_set_t pset)
130 {
131 return atomic_load_explicit(&SCHED(rt_runq)(pset)->count, memory_order_relaxed);
132 }
133
134 void rt_runq_count_incr(processor_set_t pset)
135 {
136 atomic_fetch_add_explicit(&SCHED(rt_runq)(pset)->count, 1, memory_order_relaxed);
137 }
138
139 void rt_runq_count_decr(processor_set_t pset)
140 {
141 atomic_fetch_sub_explicit(&SCHED(rt_runq)(pset)->count, 1, memory_order_relaxed);
142 }
143
144 #define DEFAULT_PREEMPTION_RATE 100 /* (1/s) */
145 int default_preemption_rate = DEFAULT_PREEMPTION_RATE;
146
147 #define DEFAULT_BG_PREEMPTION_RATE 400 /* (1/s) */
148 int default_bg_preemption_rate = DEFAULT_BG_PREEMPTION_RATE;
149
150 #define MAX_UNSAFE_QUANTA 800
151 int max_unsafe_quanta = MAX_UNSAFE_QUANTA;
152
153 #define MAX_POLL_QUANTA 2
154 int max_poll_quanta = MAX_POLL_QUANTA;
155
156 #define SCHED_POLL_YIELD_SHIFT 4 /* 1/16 */
157 int sched_poll_yield_shift = SCHED_POLL_YIELD_SHIFT;
158
159 uint64_t max_poll_computation;
160
161 uint64_t max_unsafe_computation;
162 uint64_t sched_safe_duration;
163
164 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
165
166 uint32_t std_quantum;
167 uint32_t min_std_quantum;
168 uint32_t bg_quantum;
169
170 uint32_t std_quantum_us;
171 uint32_t bg_quantum_us;
172
173 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
174
175 uint32_t thread_depress_time;
176 uint32_t default_timeshare_computation;
177 uint32_t default_timeshare_constraint;
178
179 uint32_t max_rt_quantum;
180 uint32_t min_rt_quantum;
181
182 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
183
184 unsigned sched_tick;
185 uint32_t sched_tick_interval;
186
187 uint32_t sched_pri_shifts[TH_BUCKET_MAX];
188 uint32_t sched_fixed_shift;
189
190 uint32_t sched_decay_usage_age_factor = 1; /* accelerate 5/8^n usage aging */
191
192 /* Allow foreground to decay past default to resolve inversions */
193 #define DEFAULT_DECAY_BAND_LIMIT ((BASEPRI_FOREGROUND - BASEPRI_DEFAULT) + 2)
194 int sched_pri_decay_band_limit = DEFAULT_DECAY_BAND_LIMIT;
195
196 /* Defaults for timer deadline profiling */
197 #define TIMER_DEADLINE_TRACKING_BIN_1_DEFAULT 2000000 /* Timers with deadlines <=
198 * 2ms */
199 #define TIMER_DEADLINE_TRACKING_BIN_2_DEFAULT 5000000 /* Timers with deadlines
200 <= 5ms */
201
202 uint64_t timer_deadline_tracking_bin_1;
203 uint64_t timer_deadline_tracking_bin_2;
204
205 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
206
207 thread_t sched_maintenance_thread;
208
209 #if __arm__ || __arm64__
210 /* interrupts disabled lock to guard recommended cores state */
211 decl_simple_lock_data(static,sched_recommended_cores_lock);
212 static void sched_recommended_cores_maintenance(void);
213 static void sched_update_recommended_cores(uint32_t recommended_cores);
214
215 uint64_t perfcontrol_failsafe_starvation_threshold;
216 extern char *proc_name_address(struct proc *p);
217
218 #endif /* __arm__ || __arm64__ */
219
220 uint64_t sched_one_second_interval;
221
222 /* Forwards */
223
224 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
225
226 static void load_shift_init(void);
227 static void preempt_pri_init(void);
228
229 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
230
231 #if CONFIG_SCHED_IDLE_IN_PLACE
232 static thread_t thread_select_idle(
233 thread_t thread,
234 processor_t processor);
235 #endif
236
237 thread_t processor_idle(
238 thread_t thread,
239 processor_t processor);
240
241 ast_t
242 csw_check_locked( processor_t processor,
243 processor_set_t pset,
244 ast_t check_reason);
245
246 static void processor_setrun(
247 processor_t processor,
248 thread_t thread,
249 integer_t options);
250
251 static void
252 sched_realtime_timebase_init(void);
253
254 static void
255 sched_timer_deadline_tracking_init(void);
256
257 #if DEBUG
258 extern int debug_task;
259 #define TLOG(a, fmt, args...) if(debug_task & a) kprintf(fmt, ## args)
260 #else
261 #define TLOG(a, fmt, args...) do {} while (0)
262 #endif
263
264 static processor_t
265 thread_bind_internal(
266 thread_t thread,
267 processor_t processor);
268
269 static void
270 sched_vm_group_maintenance(void);
271
272 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
273 int8_t sched_load_shifts[NRQS];
274 bitmap_t sched_preempt_pri[BITMAP_LEN(NRQS)];
275 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
276
277 const struct sched_dispatch_table *sched_current_dispatch = NULL;
278
279 /*
280 * Statically allocate a buffer to hold the longest possible
281 * scheduler description string, as currently implemented.
282 * bsd/kern/kern_sysctl.c has a corresponding definition in bsd/
283 * to export to userspace via sysctl(3). If either version
284 * changes, update the other.
285 *
286 * Note that in addition to being an upper bound on the strings
287 * in the kernel, it's also an exact parameter to PE_get_default(),
288 * which interrogates the device tree on some platforms. That
289 * API requires the caller know the exact size of the device tree
290 * property, so we need both a legacy size (32) and the current size
291 * (48) to deal with old and new device trees. The device tree property
292 * is similarly padded to a fixed size so that the same kernel image
293 * can run on multiple devices with different schedulers configured
294 * in the device tree.
295 */
296 char sched_string[SCHED_STRING_MAX_LENGTH];
297
298 uint32_t sched_debug_flags = SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS;
299
300 /* Global flag which indicates whether Background Stepper Context is enabled */
301 static int cpu_throttle_enabled = 1;
302
303 #if DEBUG
304
305 /* Since using the indirect function dispatch table has a negative impact on
306 * context switch performance, only allow DEBUG kernels to use that mechanism.
307 */
308 static void
309 sched_init_override(void)
310 {
311 char sched_arg[SCHED_STRING_MAX_LENGTH] = { '\0' };
312
313 /* Check for runtime selection of the scheduler algorithm */
314 if (!PE_parse_boot_argn("sched", sched_arg, sizeof (sched_arg))) {
315 sched_arg[0] = '\0';
316 }
317 if (strlen(sched_arg) > 0) {
318 if (0) {
319 /* Allow pattern below */
320 #if defined(CONFIG_SCHED_TRADITIONAL)
321 } else if (0 == strcmp(sched_arg, sched_traditional_dispatch.sched_name)) {
322 sched_current_dispatch = &sched_traditional_dispatch;
323 } else if (0 == strcmp(sched_arg, sched_traditional_with_pset_runqueue_dispatch.sched_name)) {
324 sched_current_dispatch = &sched_traditional_with_pset_runqueue_dispatch;
325 #endif
326 #if defined(CONFIG_SCHED_MULTIQ)
327 } else if (0 == strcmp(sched_arg, sched_multiq_dispatch.sched_name)) {
328 sched_current_dispatch = &sched_multiq_dispatch;
329 } else if (0 == strcmp(sched_arg, sched_dualq_dispatch.sched_name)) {
330 sched_current_dispatch = &sched_dualq_dispatch;
331 #endif
332 } else {
333 #if defined(CONFIG_SCHED_TRADITIONAL)
334 printf("Unrecognized scheduler algorithm: %s\n", sched_arg);
335 printf("Scheduler: Using instead: %s\n", sched_traditional_with_pset_runqueue_dispatch.sched_name);
336 sched_current_dispatch = &sched_traditional_with_pset_runqueue_dispatch;
337 #else
338 panic("Unrecognized scheduler algorithm: %s", sched_arg);
339 #endif
340 }
341 kprintf("Scheduler: Runtime selection of %s\n", SCHED(sched_name));
342 } else {
343 #if defined(CONFIG_SCHED_MULTIQ)
344 sched_current_dispatch = &sched_multiq_dispatch;
345 #elif defined(CONFIG_SCHED_TRADITIONAL)
346 sched_current_dispatch = &sched_traditional_with_pset_runqueue_dispatch;
347 #else
348 #error No default scheduler implementation
349 #endif
350 kprintf("Scheduler: Default of %s\n", SCHED(sched_name));
351 }
352 }
353
354 #endif /* DEBUG */
355
356 void
357 sched_init(void)
358 {
359 #if DEBUG
360 sched_init_override();
361 #else /* DEBUG */
362 kprintf("Scheduler: Default of %s\n", SCHED(sched_name));
363 #endif /* DEBUG */
364
365 if (!PE_parse_boot_argn("sched_pri_decay_limit", &sched_pri_decay_band_limit, sizeof(sched_pri_decay_band_limit))) {
366 /* No boot-args, check in device tree */
367 if (!PE_get_default("kern.sched_pri_decay_limit",
368 &sched_pri_decay_band_limit,
369 sizeof(sched_pri_decay_band_limit))) {
370 /* Allow decay all the way to normal limits */
371 sched_pri_decay_band_limit = DEFAULT_DECAY_BAND_LIMIT;
372 }
373 }
374
375 kprintf("Setting scheduler priority decay band limit %d\n", sched_pri_decay_band_limit);
376
377 if (PE_parse_boot_argn("sched_debug", &sched_debug_flags, sizeof(sched_debug_flags))) {
378 kprintf("Scheduler: Debug flags 0x%08x\n", sched_debug_flags);
379 }
380 strlcpy(sched_string, SCHED(sched_name), sizeof(sched_string));
381
382 SCHED(init)();
383 SCHED(rt_init)(&pset0);
384 sched_timer_deadline_tracking_init();
385
386 SCHED(pset_init)(&pset0);
387 SCHED(processor_init)(master_processor);
388 }
389
390 void
391 sched_timebase_init(void)
392 {
393 uint64_t abstime;
394
395 clock_interval_to_absolutetime_interval(1, NSEC_PER_SEC, &abstime);
396 sched_one_second_interval = abstime;
397
398 SCHED(timebase_init)();
399 sched_realtime_timebase_init();
400 }
401
402 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
403
404 void
405 sched_timeshare_init(void)
406 {
407 /*
408 * Calculate the timeslicing quantum
409 * in us.
410 */
411 if (default_preemption_rate < 1)
412 default_preemption_rate = DEFAULT_PREEMPTION_RATE;
413 std_quantum_us = (1000 * 1000) / default_preemption_rate;
414
415 printf("standard timeslicing quantum is %d us\n", std_quantum_us);
416
417 if (default_bg_preemption_rate < 1)
418 default_bg_preemption_rate = DEFAULT_BG_PREEMPTION_RATE;
419 bg_quantum_us = (1000 * 1000) / default_bg_preemption_rate;
420
421 printf("standard background quantum is %d us\n", bg_quantum_us);
422
423 load_shift_init();
424 preempt_pri_init();
425 sched_tick = 0;
426 }
427
428 void
429 sched_timeshare_timebase_init(void)
430 {
431 uint64_t abstime;
432 uint32_t shift;
433
434 /* standard timeslicing quantum */
435 clock_interval_to_absolutetime_interval(
436 std_quantum_us, NSEC_PER_USEC, &abstime);
437 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
438 std_quantum = (uint32_t)abstime;
439
440 /* smallest remaining quantum (250 us) */
441 clock_interval_to_absolutetime_interval(250, NSEC_PER_USEC, &abstime);
442 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
443 min_std_quantum = (uint32_t)abstime;
444
445 /* quantum for background tasks */
446 clock_interval_to_absolutetime_interval(
447 bg_quantum_us, NSEC_PER_USEC, &abstime);
448 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
449 bg_quantum = (uint32_t)abstime;
450
451 /* scheduler tick interval */
452 clock_interval_to_absolutetime_interval(USEC_PER_SEC >> SCHED_TICK_SHIFT,
453 NSEC_PER_USEC, &abstime);
454 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
455 sched_tick_interval = (uint32_t)abstime;
456
457 /*
458 * Compute conversion factor from usage to
459 * timesharing priorities with 5/8 ** n aging.
460 */
461 abstime = (abstime * 5) / 3;
462 for (shift = 0; abstime > BASEPRI_DEFAULT; ++shift)
463 abstime >>= 1;
464 sched_fixed_shift = shift;
465
466 for (uint32_t i = 0 ; i < TH_BUCKET_MAX ; i++)
467 sched_pri_shifts[i] = INT8_MAX;
468
469 max_unsafe_computation = ((uint64_t)max_unsafe_quanta) * std_quantum;
470 sched_safe_duration = 2 * ((uint64_t)max_unsafe_quanta) * std_quantum;
471
472 max_poll_computation = ((uint64_t)max_poll_quanta) * std_quantum;
473 thread_depress_time = 1 * std_quantum;
474 default_timeshare_computation = std_quantum / 2;
475 default_timeshare_constraint = std_quantum;
476
477 #if __arm__ || __arm64__
478 perfcontrol_failsafe_starvation_threshold = (2 * sched_tick_interval);
479 #endif /* __arm__ || __arm64__ */
480 }
481
482 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
483
484 void
485 pset_rt_init(processor_set_t pset)
486 {
487 rt_lock_init(pset);
488
489 pset->rt_runq.count = 0;
490 queue_init(&pset->rt_runq.queue);
491 memset(&pset->rt_runq.runq_stats, 0, sizeof pset->rt_runq.runq_stats);
492 }
493
494 rt_queue_t
495 sched_rtglobal_runq(processor_set_t pset)
496 {
497 (void)pset;
498
499 return &pset0.rt_runq;
500 }
501
502 void
503 sched_rtglobal_init(processor_set_t pset)
504 {
505 if (pset == &pset0) {
506 return pset_rt_init(pset);
507 }
508
509 /* Only pset0 rt_runq is used, so make it easy to detect
510 * buggy accesses to others.
511 */
512 memset(&pset->rt_runq, 0xfd, sizeof pset->rt_runq);
513 }
514
515 void
516 sched_rtglobal_queue_shutdown(processor_t processor)
517 {
518 (void)processor;
519 }
520
521 static void
522 sched_realtime_timebase_init(void)
523 {
524 uint64_t abstime;
525
526 /* smallest rt computaton (50 us) */
527 clock_interval_to_absolutetime_interval(50, NSEC_PER_USEC, &abstime);
528 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
529 min_rt_quantum = (uint32_t)abstime;
530
531 /* maximum rt computation (50 ms) */
532 clock_interval_to_absolutetime_interval(
533 50, 1000*NSEC_PER_USEC, &abstime);
534 assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
535 max_rt_quantum = (uint32_t)abstime;
536
537 }
538
539 void
540 sched_check_spill(processor_set_t pset, thread_t thread)
541 {
542 (void)pset;
543 (void)thread;
544
545 return;
546 }
547
548 bool
549 sched_thread_should_yield(processor_t processor, thread_t thread)
550 {
551 (void)thread;
552
553 return (!SCHED(processor_queue_empty)(processor) || rt_runq_count(processor->processor_set) > 0);
554 }
555
556 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
557
558 /*
559 * Set up values for timeshare
560 * loading factors.
561 */
562 static void
563 load_shift_init(void)
564 {
565 int8_t k, *p = sched_load_shifts;
566 uint32_t i, j;
567
568 uint32_t sched_decay_penalty = 1;
569
570 if (PE_parse_boot_argn("sched_decay_penalty", &sched_decay_penalty, sizeof (sched_decay_penalty))) {
571 kprintf("Overriding scheduler decay penalty %u\n", sched_decay_penalty);
572 }
573
574 if (PE_parse_boot_argn("sched_decay_usage_age_factor", &sched_decay_usage_age_factor, sizeof (sched_decay_usage_age_factor))) {
575 kprintf("Overriding scheduler decay usage age factor %u\n", sched_decay_usage_age_factor);
576 }
577
578 if (sched_decay_penalty == 0) {
579 /*
580 * There is no penalty for timeshare threads for using too much
581 * CPU, so set all load shifts to INT8_MIN. Even under high load,
582 * sched_pri_shift will be >INT8_MAX, and there will be no
583 * penalty applied to threads (nor will sched_usage be updated per
584 * thread).
585 */
586 for (i = 0; i < NRQS; i++) {
587 sched_load_shifts[i] = INT8_MIN;
588 }
589
590 return;
591 }
592
593 *p++ = INT8_MIN; *p++ = 0;
594
595 /*
596 * For a given system load "i", the per-thread priority
597 * penalty per quantum of CPU usage is ~2^k priority
598 * levels. "sched_decay_penalty" can cause more
599 * array entries to be filled with smaller "k" values
600 */
601 for (i = 2, j = 1 << sched_decay_penalty, k = 1; i < NRQS; ++k) {
602 for (j <<= 1; (i < j) && (i < NRQS); ++i)
603 *p++ = k;
604 }
605 }
606
607 static void
608 preempt_pri_init(void)
609 {
610 bitmap_t *p = sched_preempt_pri;
611
612 for (int i = BASEPRI_FOREGROUND; i < MINPRI_KERNEL; ++i)
613 bitmap_set(p, i);
614
615 for (int i = BASEPRI_PREEMPT; i <= MAXPRI; ++i)
616 bitmap_set(p, i);
617 }
618
619 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
620
621 /*
622 * Thread wait timer expiration.
623 */
624 void
625 thread_timer_expire(
626 void *p0,
627 __unused void *p1)
628 {
629 thread_t thread = p0;
630 spl_t s;
631
632 assert_thread_magic(thread);
633
634 s = splsched();
635 thread_lock(thread);
636 if (--thread->wait_timer_active == 0) {
637 if (thread->wait_timer_is_set) {
638 thread->wait_timer_is_set = FALSE;
639 clear_wait_internal(thread, THREAD_TIMED_OUT);
640 }
641 }
642 thread_unlock(thread);
643 splx(s);
644 }
645
646 /*
647 * thread_unblock:
648 *
649 * Unblock thread on wake up.
650 *
651 * Returns TRUE if the thread should now be placed on the runqueue.
652 *
653 * Thread must be locked.
654 *
655 * Called at splsched().
656 */
657 boolean_t
658 thread_unblock(
659 thread_t thread,
660 wait_result_t wresult)
661 {
662 boolean_t ready_for_runq = FALSE;
663 thread_t cthread = current_thread();
664 uint32_t new_run_count;
665
666 /*
667 * Set wait_result.
668 */
669 thread->wait_result = wresult;
670
671 /*
672 * Cancel pending wait timer.
673 */
674 if (thread->wait_timer_is_set) {
675 if (timer_call_cancel(&thread->wait_timer))
676 thread->wait_timer_active--;
677 thread->wait_timer_is_set = FALSE;
678 }
679
680 /*
681 * Update scheduling state: not waiting,
682 * set running.
683 */
684 thread->state &= ~(TH_WAIT|TH_UNINT);
685
686 if (!(thread->state & TH_RUN)) {
687 thread->state |= TH_RUN;
688 thread->last_made_runnable_time = thread->last_basepri_change_time = mach_approximate_time();
689
690 ready_for_runq = TRUE;
691
692 (*thread->sched_call)(SCHED_CALL_UNBLOCK, thread);
693
694 /* Update the runnable thread count */
695 new_run_count = sched_run_incr(thread);
696 } else {
697 /*
698 * Either the thread is idling in place on another processor,
699 * or it hasn't finished context switching yet.
700 */
701 #if CONFIG_SCHED_IDLE_IN_PLACE
702 if (thread->state & TH_IDLE) {
703 processor_t processor = thread->last_processor;
704
705 if (processor != current_processor())
706 machine_signal_idle(processor);
707 }
708 #else
709 assert((thread->state & TH_IDLE) == 0);
710 #endif
711 /*
712 * The run count is only dropped after the context switch completes
713 * and the thread is still waiting, so we should not run_incr here
714 */
715 new_run_count = sched_run_buckets[TH_BUCKET_RUN];
716 }
717
718
719 /*
720 * Calculate deadline for real-time threads.
721 */
722 if (thread->sched_mode == TH_MODE_REALTIME) {
723 uint64_t ctime;
724
725 ctime = mach_absolute_time();
726 thread->realtime.deadline = thread->realtime.constraint + ctime;
727 }
728
729 /*
730 * Clear old quantum, fail-safe computation, etc.
731 */
732 thread->quantum_remaining = 0;
733 thread->computation_metered = 0;
734 thread->reason = AST_NONE;
735 thread->block_hint = kThreadWaitNone;
736
737 /* Obtain power-relevant interrupt and "platform-idle exit" statistics.
738 * We also account for "double hop" thread signaling via
739 * the thread callout infrastructure.
740 * DRK: consider removing the callout wakeup counters in the future
741 * they're present for verification at the moment.
742 */
743 boolean_t aticontext, pidle;
744 ml_get_power_state(&aticontext, &pidle);
745
746 if (__improbable(aticontext && !(thread_get_tag_internal(thread) & THREAD_TAG_CALLOUT))) {
747 ledger_credit(thread->t_ledger, task_ledgers.interrupt_wakeups, 1);
748 DTRACE_SCHED2(iwakeup, struct thread *, thread, struct proc *, thread->task->bsd_info);
749
750 uint64_t ttd = PROCESSOR_DATA(current_processor(), timer_call_ttd);
751
752 if (ttd) {
753 if (ttd <= timer_deadline_tracking_bin_1)
754 thread->thread_timer_wakeups_bin_1++;
755 else
756 if (ttd <= timer_deadline_tracking_bin_2)
757 thread->thread_timer_wakeups_bin_2++;
758 }
759
760 if (pidle) {
761 ledger_credit(thread->t_ledger, task_ledgers.platform_idle_wakeups, 1);
762 }
763
764 } else if (thread_get_tag_internal(cthread) & THREAD_TAG_CALLOUT) {
765 if (cthread->callout_woken_from_icontext) {
766 ledger_credit(thread->t_ledger, task_ledgers.interrupt_wakeups, 1);
767 thread->thread_callout_interrupt_wakeups++;
768 if (cthread->callout_woken_from_platform_idle) {
769 ledger_credit(thread->t_ledger, task_ledgers.platform_idle_wakeups, 1);
770 thread->thread_callout_platform_idle_wakeups++;
771 }
772
773 cthread->callout_woke_thread = TRUE;
774 }
775 }
776
777 if (thread_get_tag_internal(thread) & THREAD_TAG_CALLOUT) {
778 thread->callout_woken_from_icontext = aticontext;
779 thread->callout_woken_from_platform_idle = pidle;
780 thread->callout_woke_thread = FALSE;
781 }
782
783 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
784 MACHDBG_CODE(DBG_MACH_SCHED,MACH_MAKE_RUNNABLE) | DBG_FUNC_NONE,
785 (uintptr_t)thread_tid(thread), thread->sched_pri, thread->wait_result,
786 sched_run_buckets[TH_BUCKET_RUN], 0);
787
788 DTRACE_SCHED2(wakeup, struct thread *, thread, struct proc *, thread->task->bsd_info);
789
790 return (ready_for_runq);
791 }
792
793 /*
794 * Routine: thread_go
795 * Purpose:
796 * Unblock and dispatch thread.
797 * Conditions:
798 * thread lock held, IPC locks may be held.
799 * thread must have been pulled from wait queue under same lock hold.
800 * thread must have been waiting
801 * Returns:
802 * KERN_SUCCESS - Thread was set running
803 *
804 * TODO: This should return void
805 */
806 kern_return_t
807 thread_go(
808 thread_t thread,
809 wait_result_t wresult)
810 {
811 assert_thread_magic(thread);
812
813 assert(thread->at_safe_point == FALSE);
814 assert(thread->wait_event == NO_EVENT64);
815 assert(thread->waitq == NULL);
816
817 assert(!(thread->state & (TH_TERMINATE|TH_TERMINATE2)));
818 assert(thread->state & TH_WAIT);
819
820
821 if (thread_unblock(thread, wresult)) {
822 #if SCHED_TRACE_THREAD_WAKEUPS
823 backtrace(&thread->thread_wakeup_bt[0],
824 (sizeof(thread->thread_wakeup_bt)/sizeof(uintptr_t)));
825 #endif
826 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
827 }
828
829 return (KERN_SUCCESS);
830 }
831
832 /*
833 * Routine: thread_mark_wait_locked
834 * Purpose:
835 * Mark a thread as waiting. If, given the circumstances,
836 * it doesn't want to wait (i.e. already aborted), then
837 * indicate that in the return value.
838 * Conditions:
839 * at splsched() and thread is locked.
840 */
841 __private_extern__
842 wait_result_t
843 thread_mark_wait_locked(
844 thread_t thread,
845 wait_interrupt_t interruptible)
846 {
847 boolean_t at_safe_point;
848
849 assert(!(thread->state & (TH_WAIT|TH_IDLE|TH_UNINT|TH_TERMINATE2)));
850
851 /*
852 * The thread may have certain types of interrupts/aborts masked
853 * off. Even if the wait location says these types of interrupts
854 * are OK, we have to honor mask settings (outer-scoped code may
855 * not be able to handle aborts at the moment).
856 */
857 if (interruptible > (thread->options & TH_OPT_INTMASK))
858 interruptible = thread->options & TH_OPT_INTMASK;
859
860 at_safe_point = (interruptible == THREAD_ABORTSAFE);
861
862 if ( interruptible == THREAD_UNINT ||
863 !(thread->sched_flags & TH_SFLAG_ABORT) ||
864 (!at_safe_point &&
865 (thread->sched_flags & TH_SFLAG_ABORTSAFELY))) {
866
867 if ( !(thread->state & TH_TERMINATE))
868 DTRACE_SCHED(sleep);
869
870 thread->state |= (interruptible) ? TH_WAIT : (TH_WAIT | TH_UNINT);
871 thread->at_safe_point = at_safe_point;
872
873 /* TODO: pass this through assert_wait instead, have
874 * assert_wait just take a struct as an argument */
875 assert(!thread->block_hint);
876 thread->block_hint = thread->pending_block_hint;
877 thread->pending_block_hint = kThreadWaitNone;
878
879 return (thread->wait_result = THREAD_WAITING);
880 }
881 else
882 if (thread->sched_flags & TH_SFLAG_ABORTSAFELY)
883 thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
884 thread->pending_block_hint = kThreadWaitNone;
885
886 return (thread->wait_result = THREAD_INTERRUPTED);
887 }
888
889 /*
890 * Routine: thread_interrupt_level
891 * Purpose:
892 * Set the maximum interruptible state for the
893 * current thread. The effective value of any
894 * interruptible flag passed into assert_wait
895 * will never exceed this.
896 *
897 * Useful for code that must not be interrupted,
898 * but which calls code that doesn't know that.
899 * Returns:
900 * The old interrupt level for the thread.
901 */
902 __private_extern__
903 wait_interrupt_t
904 thread_interrupt_level(
905 wait_interrupt_t new_level)
906 {
907 thread_t thread = current_thread();
908 wait_interrupt_t result = thread->options & TH_OPT_INTMASK;
909
910 thread->options = (thread->options & ~TH_OPT_INTMASK) | (new_level & TH_OPT_INTMASK);
911
912 return result;
913 }
914
915 /*
916 * assert_wait:
917 *
918 * Assert that the current thread is about to go to
919 * sleep until the specified event occurs.
920 */
921 wait_result_t
922 assert_wait(
923 event_t event,
924 wait_interrupt_t interruptible)
925 {
926 if (__improbable(event == NO_EVENT))
927 panic("%s() called with NO_EVENT", __func__);
928
929 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
930 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT)|DBG_FUNC_NONE,
931 VM_KERNEL_UNSLIDE_OR_PERM(event), 0, 0, 0, 0);
932
933 struct waitq *waitq;
934 waitq = global_eventq(event);
935 return waitq_assert_wait64(waitq, CAST_EVENT64_T(event), interruptible, TIMEOUT_WAIT_FOREVER);
936 }
937
938 /*
939 * assert_wait_queue:
940 *
941 * Return the global waitq for the specified event
942 */
943 struct waitq *
944 assert_wait_queue(
945 event_t event)
946 {
947 return global_eventq(event);
948 }
949
950 wait_result_t
951 assert_wait_timeout(
952 event_t event,
953 wait_interrupt_t interruptible,
954 uint32_t interval,
955 uint32_t scale_factor)
956 {
957 thread_t thread = current_thread();
958 wait_result_t wresult;
959 uint64_t deadline;
960 spl_t s;
961
962 if (__improbable(event == NO_EVENT))
963 panic("%s() called with NO_EVENT", __func__);
964
965 struct waitq *waitq;
966 waitq = global_eventq(event);
967
968 s = splsched();
969 waitq_lock(waitq);
970
971 clock_interval_to_deadline(interval, scale_factor, &deadline);
972
973 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
974 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT)|DBG_FUNC_NONE,
975 VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
976
977 wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
978 interruptible,
979 TIMEOUT_URGENCY_SYS_NORMAL,
980 deadline, TIMEOUT_NO_LEEWAY,
981 thread);
982
983 waitq_unlock(waitq);
984 splx(s);
985 return wresult;
986 }
987
988 wait_result_t
989 assert_wait_timeout_with_leeway(
990 event_t event,
991 wait_interrupt_t interruptible,
992 wait_timeout_urgency_t urgency,
993 uint32_t interval,
994 uint32_t leeway,
995 uint32_t scale_factor)
996 {
997 thread_t thread = current_thread();
998 wait_result_t wresult;
999 uint64_t deadline;
1000 uint64_t abstime;
1001 uint64_t slop;
1002 uint64_t now;
1003 spl_t s;
1004
1005 if (__improbable(event == NO_EVENT))
1006 panic("%s() called with NO_EVENT", __func__);
1007
1008 now = mach_absolute_time();
1009 clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
1010 deadline = now + abstime;
1011
1012 clock_interval_to_absolutetime_interval(leeway, scale_factor, &slop);
1013
1014 struct waitq *waitq;
1015 waitq = global_eventq(event);
1016
1017 s = splsched();
1018 waitq_lock(waitq);
1019
1020 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1021 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT)|DBG_FUNC_NONE,
1022 VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
1023
1024 wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
1025 interruptible,
1026 urgency, deadline, slop,
1027 thread);
1028
1029 waitq_unlock(waitq);
1030 splx(s);
1031 return wresult;
1032 }
1033
1034 wait_result_t
1035 assert_wait_deadline(
1036 event_t event,
1037 wait_interrupt_t interruptible,
1038 uint64_t deadline)
1039 {
1040 thread_t thread = current_thread();
1041 wait_result_t wresult;
1042 spl_t s;
1043
1044 if (__improbable(event == NO_EVENT))
1045 panic("%s() called with NO_EVENT", __func__);
1046
1047 struct waitq *waitq;
1048 waitq = global_eventq(event);
1049
1050 s = splsched();
1051 waitq_lock(waitq);
1052
1053 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1054 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT)|DBG_FUNC_NONE,
1055 VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
1056
1057 wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
1058 interruptible,
1059 TIMEOUT_URGENCY_SYS_NORMAL, deadline,
1060 TIMEOUT_NO_LEEWAY, thread);
1061 waitq_unlock(waitq);
1062 splx(s);
1063 return wresult;
1064 }
1065
1066 wait_result_t
1067 assert_wait_deadline_with_leeway(
1068 event_t event,
1069 wait_interrupt_t interruptible,
1070 wait_timeout_urgency_t urgency,
1071 uint64_t deadline,
1072 uint64_t leeway)
1073 {
1074 thread_t thread = current_thread();
1075 wait_result_t wresult;
1076 spl_t s;
1077
1078 if (__improbable(event == NO_EVENT))
1079 panic("%s() called with NO_EVENT", __func__);
1080
1081 struct waitq *waitq;
1082 waitq = global_eventq(event);
1083
1084 s = splsched();
1085 waitq_lock(waitq);
1086
1087 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1088 MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT)|DBG_FUNC_NONE,
1089 VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0);
1090
1091 wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
1092 interruptible,
1093 urgency, deadline, leeway,
1094 thread);
1095 waitq_unlock(waitq);
1096 splx(s);
1097 return wresult;
1098 }
1099
1100 /*
1101 * thread_isoncpu:
1102 *
1103 * Return TRUE if a thread is running on a processor such that an AST
1104 * is needed to pull it out of userspace execution, or if executing in
1105 * the kernel, bring to a context switch boundary that would cause
1106 * thread state to be serialized in the thread PCB.
1107 *
1108 * Thread locked, returns the same way. While locked, fields
1109 * like "state" cannot change. "runq" can change only from set to unset.
1110 */
1111 static inline boolean_t
1112 thread_isoncpu(thread_t thread)
1113 {
1114 /* Not running or runnable */
1115 if (!(thread->state & TH_RUN))
1116 return (FALSE);
1117
1118 /* Waiting on a runqueue, not currently running */
1119 /* TODO: This is invalid - it can get dequeued without thread lock, but not context switched. */
1120 if (thread->runq != PROCESSOR_NULL)
1121 return (FALSE);
1122
1123 /*
1124 * Thread does not have a stack yet
1125 * It could be on the stack alloc queue or preparing to be invoked
1126 */
1127 if (!thread->kernel_stack)
1128 return (FALSE);
1129
1130 /*
1131 * Thread must be running on a processor, or
1132 * about to run, or just did run. In all these
1133 * cases, an AST to the processor is needed
1134 * to guarantee that the thread is kicked out
1135 * of userspace and the processor has
1136 * context switched (and saved register state).
1137 */
1138 return (TRUE);
1139 }
1140
1141 /*
1142 * thread_stop:
1143 *
1144 * Force a preemption point for a thread and wait
1145 * for it to stop running on a CPU. If a stronger
1146 * guarantee is requested, wait until no longer
1147 * runnable. Arbitrates access among
1148 * multiple stop requests. (released by unstop)
1149 *
1150 * The thread must enter a wait state and stop via a
1151 * separate means.
1152 *
1153 * Returns FALSE if interrupted.
1154 */
1155 boolean_t
1156 thread_stop(
1157 thread_t thread,
1158 boolean_t until_not_runnable)
1159 {
1160 wait_result_t wresult;
1161 spl_t s = splsched();
1162 boolean_t oncpu;
1163
1164 wake_lock(thread);
1165 thread_lock(thread);
1166
1167 while (thread->state & TH_SUSP) {
1168 thread->wake_active = TRUE;
1169 thread_unlock(thread);
1170
1171 wresult = assert_wait(&thread->wake_active, THREAD_ABORTSAFE);
1172 wake_unlock(thread);
1173 splx(s);
1174
1175 if (wresult == THREAD_WAITING)
1176 wresult = thread_block(THREAD_CONTINUE_NULL);
1177
1178 if (wresult != THREAD_AWAKENED)
1179 return (FALSE);
1180
1181 s = splsched();
1182 wake_lock(thread);
1183 thread_lock(thread);
1184 }
1185
1186 thread->state |= TH_SUSP;
1187
1188 while ((oncpu = thread_isoncpu(thread)) ||
1189 (until_not_runnable && (thread->state & TH_RUN))) {
1190 processor_t processor;
1191
1192 if (oncpu) {
1193 assert(thread->state & TH_RUN);
1194 processor = thread->chosen_processor;
1195 cause_ast_check(processor);
1196 }
1197
1198 thread->wake_active = TRUE;
1199 thread_unlock(thread);
1200
1201 wresult = assert_wait(&thread->wake_active, THREAD_ABORTSAFE);
1202 wake_unlock(thread);
1203 splx(s);
1204
1205 if (wresult == THREAD_WAITING)
1206 wresult = thread_block(THREAD_CONTINUE_NULL);
1207
1208 if (wresult != THREAD_AWAKENED) {
1209 thread_unstop(thread);
1210 return (FALSE);
1211 }
1212
1213 s = splsched();
1214 wake_lock(thread);
1215 thread_lock(thread);
1216 }
1217
1218 thread_unlock(thread);
1219 wake_unlock(thread);
1220 splx(s);
1221
1222 /*
1223 * We return with the thread unlocked. To prevent it from
1224 * transitioning to a runnable state (or from TH_RUN to
1225 * being on the CPU), the caller must ensure the thread
1226 * is stopped via an external means (such as an AST)
1227 */
1228
1229 return (TRUE);
1230 }
1231
1232 /*
1233 * thread_unstop:
1234 *
1235 * Release a previous stop request and set
1236 * the thread running if appropriate.
1237 *
1238 * Use only after a successful stop operation.
1239 */
1240 void
1241 thread_unstop(
1242 thread_t thread)
1243 {
1244 spl_t s = splsched();
1245
1246 wake_lock(thread);
1247 thread_lock(thread);
1248
1249 assert((thread->state & (TH_RUN|TH_WAIT|TH_SUSP)) != TH_SUSP);
1250
1251 if (thread->state & TH_SUSP) {
1252 thread->state &= ~TH_SUSP;
1253
1254 if (thread->wake_active) {
1255 thread->wake_active = FALSE;
1256 thread_unlock(thread);
1257
1258 thread_wakeup(&thread->wake_active);
1259 wake_unlock(thread);
1260 splx(s);
1261
1262 return;
1263 }
1264 }
1265
1266 thread_unlock(thread);
1267 wake_unlock(thread);
1268 splx(s);
1269 }
1270
1271 /*
1272 * thread_wait:
1273 *
1274 * Wait for a thread to stop running. (non-interruptible)
1275 *
1276 */
1277 void
1278 thread_wait(
1279 thread_t thread,
1280 boolean_t until_not_runnable)
1281 {
1282 wait_result_t wresult;
1283 boolean_t oncpu;
1284 processor_t processor;
1285 spl_t s = splsched();
1286
1287 wake_lock(thread);
1288 thread_lock(thread);
1289
1290 /*
1291 * Wait until not running on a CPU. If stronger requirement
1292 * desired, wait until not runnable. Assumption: if thread is
1293 * on CPU, then TH_RUN is set, so we're not waiting in any case
1294 * where the original, pure "TH_RUN" check would have let us
1295 * finish.
1296 */
1297 while ((oncpu = thread_isoncpu(thread)) ||
1298 (until_not_runnable && (thread->state & TH_RUN))) {
1299
1300 if (oncpu) {
1301 assert(thread->state & TH_RUN);
1302 processor = thread->chosen_processor;
1303 cause_ast_check(processor);
1304 }
1305
1306 thread->wake_active = TRUE;
1307 thread_unlock(thread);
1308
1309 wresult = assert_wait(&thread->wake_active, THREAD_UNINT);
1310 wake_unlock(thread);
1311 splx(s);
1312
1313 if (wresult == THREAD_WAITING)
1314 thread_block(THREAD_CONTINUE_NULL);
1315
1316 s = splsched();
1317 wake_lock(thread);
1318 thread_lock(thread);
1319 }
1320
1321 thread_unlock(thread);
1322 wake_unlock(thread);
1323 splx(s);
1324 }
1325
1326 /*
1327 * Routine: clear_wait_internal
1328 *
1329 * Clear the wait condition for the specified thread.
1330 * Start the thread executing if that is appropriate.
1331 * Arguments:
1332 * thread thread to awaken
1333 * result Wakeup result the thread should see
1334 * Conditions:
1335 * At splsched
1336 * the thread is locked.
1337 * Returns:
1338 * KERN_SUCCESS thread was rousted out a wait
1339 * KERN_FAILURE thread was waiting but could not be rousted
1340 * KERN_NOT_WAITING thread was not waiting
1341 */
1342 __private_extern__ kern_return_t
1343 clear_wait_internal(
1344 thread_t thread,
1345 wait_result_t wresult)
1346 {
1347 uint32_t i = LockTimeOutUsec;
1348 struct waitq *waitq = thread->waitq;
1349
1350 do {
1351 if (wresult == THREAD_INTERRUPTED && (thread->state & TH_UNINT))
1352 return (KERN_FAILURE);
1353
1354 if (waitq != NULL) {
1355 if (!waitq_pull_thread_locked(waitq, thread)) {
1356 thread_unlock(thread);
1357 delay(1);
1358 if (i > 0 && !machine_timeout_suspended())
1359 i--;
1360 thread_lock(thread);
1361 if (waitq != thread->waitq)
1362 return KERN_NOT_WAITING;
1363 continue;
1364 }
1365 }
1366
1367 /* TODO: Can we instead assert TH_TERMINATE is not set? */
1368 if ((thread->state & (TH_WAIT|TH_TERMINATE)) == TH_WAIT)
1369 return (thread_go(thread, wresult));
1370 else
1371 return (KERN_NOT_WAITING);
1372 } while (i > 0);
1373
1374 panic("clear_wait_internal: deadlock: thread=%p, wq=%p, cpu=%d\n",
1375 thread, waitq, cpu_number());
1376
1377 return (KERN_FAILURE);
1378 }
1379
1380
1381 /*
1382 * clear_wait:
1383 *
1384 * Clear the wait condition for the specified thread. Start the thread
1385 * executing if that is appropriate.
1386 *
1387 * parameters:
1388 * thread thread to awaken
1389 * result Wakeup result the thread should see
1390 */
1391 kern_return_t
1392 clear_wait(
1393 thread_t thread,
1394 wait_result_t result)
1395 {
1396 kern_return_t ret;
1397 spl_t s;
1398
1399 s = splsched();
1400 thread_lock(thread);
1401 ret = clear_wait_internal(thread, result);
1402 thread_unlock(thread);
1403 splx(s);
1404 return ret;
1405 }
1406
1407
1408 /*
1409 * thread_wakeup_prim:
1410 *
1411 * Common routine for thread_wakeup, thread_wakeup_with_result,
1412 * and thread_wakeup_one.
1413 *
1414 */
1415 kern_return_t
1416 thread_wakeup_prim(
1417 event_t event,
1418 boolean_t one_thread,
1419 wait_result_t result)
1420 {
1421 if (__improbable(event == NO_EVENT))
1422 panic("%s() called with NO_EVENT", __func__);
1423
1424 struct waitq *wq = global_eventq(event);
1425
1426 if (one_thread)
1427 return waitq_wakeup64_one(wq, CAST_EVENT64_T(event), result, WAITQ_ALL_PRIORITIES);
1428 else
1429 return waitq_wakeup64_all(wq, CAST_EVENT64_T(event), result, WAITQ_ALL_PRIORITIES);
1430 }
1431
1432 /*
1433 * Wakeup a specified thread if and only if it's waiting for this event
1434 */
1435 kern_return_t
1436 thread_wakeup_thread(
1437 event_t event,
1438 thread_t thread)
1439 {
1440 if (__improbable(event == NO_EVENT))
1441 panic("%s() called with NO_EVENT", __func__);
1442
1443 if (__improbable(thread == THREAD_NULL))
1444 panic("%s() called with THREAD_NULL", __func__);
1445
1446 struct waitq *wq = global_eventq(event);
1447
1448 return waitq_wakeup64_thread(wq, CAST_EVENT64_T(event), thread, THREAD_AWAKENED);
1449 }
1450
1451 /*
1452 * Wakeup a thread waiting on an event and promote it to a priority.
1453 *
1454 * Requires woken thread to un-promote itself when done.
1455 */
1456 kern_return_t
1457 thread_wakeup_one_with_pri(
1458 event_t event,
1459 int priority)
1460 {
1461 if (__improbable(event == NO_EVENT))
1462 panic("%s() called with NO_EVENT", __func__);
1463
1464 struct waitq *wq = global_eventq(event);
1465
1466 return waitq_wakeup64_one(wq, CAST_EVENT64_T(event), THREAD_AWAKENED, priority);
1467 }
1468
1469 /*
1470 * Wakeup a thread waiting on an event,
1471 * promote it to a priority,
1472 * and return a reference to the woken thread.
1473 *
1474 * Requires woken thread to un-promote itself when done.
1475 */
1476 thread_t
1477 thread_wakeup_identify(event_t event,
1478 int priority)
1479 {
1480 if (__improbable(event == NO_EVENT))
1481 panic("%s() called with NO_EVENT", __func__);
1482
1483 struct waitq *wq = global_eventq(event);
1484
1485 return waitq_wakeup64_identify(wq, CAST_EVENT64_T(event), THREAD_AWAKENED, priority);
1486 }
1487
1488 /*
1489 * thread_bind:
1490 *
1491 * Force the current thread to execute on the specified processor.
1492 * Takes effect after the next thread_block().
1493 *
1494 * Returns the previous binding. PROCESSOR_NULL means
1495 * not bound.
1496 *
1497 * XXX - DO NOT export this to users - XXX
1498 */
1499 processor_t
1500 thread_bind(
1501 processor_t processor)
1502 {
1503 thread_t self = current_thread();
1504 processor_t prev;
1505 spl_t s;
1506
1507 s = splsched();
1508 thread_lock(self);
1509
1510 prev = thread_bind_internal(self, processor);
1511
1512 thread_unlock(self);
1513 splx(s);
1514
1515 return (prev);
1516 }
1517
1518 /*
1519 * thread_bind_internal:
1520 *
1521 * If the specified thread is not the current thread, and it is currently
1522 * running on another CPU, a remote AST must be sent to that CPU to cause
1523 * the thread to migrate to its bound processor. Otherwise, the migration
1524 * will occur at the next quantum expiration or blocking point.
1525 *
1526 * When the thread is the current thread, and explicit thread_block() should
1527 * be used to force the current processor to context switch away and
1528 * let the thread migrate to the bound processor.
1529 *
1530 * Thread must be locked, and at splsched.
1531 */
1532
1533 static processor_t
1534 thread_bind_internal(
1535 thread_t thread,
1536 processor_t processor)
1537 {
1538 processor_t prev;
1539
1540 /* <rdar://problem/15102234> */
1541 assert(thread->sched_pri < BASEPRI_RTQUEUES);
1542 /* A thread can't be bound if it's sitting on a (potentially incorrect) runqueue */
1543 assert(thread->runq == PROCESSOR_NULL);
1544
1545 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_THREAD_BIND), thread_tid(thread), processor ? (uintptr_t)processor->cpu_id : (uintptr_t)-1, 0, 0, 0);
1546
1547 prev = thread->bound_processor;
1548 thread->bound_processor = processor;
1549
1550 return (prev);
1551 }
1552
1553 /*
1554 * thread_vm_bind_group_add:
1555 *
1556 * The "VM bind group" is a special mechanism to mark a collection
1557 * of threads from the VM subsystem that, in general, should be scheduled
1558 * with only one CPU of parallelism. To accomplish this, we initially
1559 * bind all the threads to the master processor, which has the effect
1560 * that only one of the threads in the group can execute at once, including
1561 * preempting threads in the group that are a lower priority. Future
1562 * mechanisms may use more dynamic mechanisms to prevent the collection
1563 * of VM threads from using more CPU time than desired.
1564 *
1565 * The current implementation can result in priority inversions where
1566 * compute-bound priority 95 or realtime threads that happen to have
1567 * landed on the master processor prevent the VM threads from running.
1568 * When this situation is detected, we unbind the threads for one
1569 * scheduler tick to allow the scheduler to run the threads an
1570 * additional CPUs, before restoring the binding (assuming high latency
1571 * is no longer a problem).
1572 */
1573
1574 /*
1575 * The current max is provisioned for:
1576 * vm_compressor_swap_trigger_thread (92)
1577 * 2 x vm_pageout_iothread_internal (92) when vm_restricted_to_single_processor==TRUE
1578 * vm_pageout_continue (92)
1579 * memorystatus_thread (95)
1580 */
1581 #define MAX_VM_BIND_GROUP_COUNT (5)
1582 decl_simple_lock_data(static,sched_vm_group_list_lock);
1583 static thread_t sched_vm_group_thread_list[MAX_VM_BIND_GROUP_COUNT];
1584 static int sched_vm_group_thread_count;
1585 static boolean_t sched_vm_group_temporarily_unbound = FALSE;
1586
1587 void
1588 thread_vm_bind_group_add(void)
1589 {
1590 thread_t self = current_thread();
1591
1592 thread_reference_internal(self);
1593 self->options |= TH_OPT_SCHED_VM_GROUP;
1594
1595 simple_lock(&sched_vm_group_list_lock);
1596 assert(sched_vm_group_thread_count < MAX_VM_BIND_GROUP_COUNT);
1597 sched_vm_group_thread_list[sched_vm_group_thread_count++] = self;
1598 simple_unlock(&sched_vm_group_list_lock);
1599
1600 thread_bind(master_processor);
1601
1602 /* Switch to bound processor if not already there */
1603 thread_block(THREAD_CONTINUE_NULL);
1604 }
1605
1606 static void
1607 sched_vm_group_maintenance(void)
1608 {
1609 uint64_t ctime = mach_absolute_time();
1610 uint64_t longtime = ctime - sched_tick_interval;
1611 int i;
1612 spl_t s;
1613 boolean_t high_latency_observed = FALSE;
1614 boolean_t runnable_and_not_on_runq_observed = FALSE;
1615 boolean_t bind_target_changed = FALSE;
1616 processor_t bind_target = PROCESSOR_NULL;
1617
1618 /* Make sure nobody attempts to add new threads while we are enumerating them */
1619 simple_lock(&sched_vm_group_list_lock);
1620
1621 s = splsched();
1622
1623 for (i=0; i < sched_vm_group_thread_count; i++) {
1624 thread_t thread = sched_vm_group_thread_list[i];
1625 assert(thread != THREAD_NULL);
1626 thread_lock(thread);
1627 if ((thread->state & (TH_RUN|TH_WAIT)) == TH_RUN) {
1628 if (thread->runq != PROCESSOR_NULL && thread->last_made_runnable_time < longtime) {
1629 high_latency_observed = TRUE;
1630 } else if (thread->runq == PROCESSOR_NULL) {
1631 /* There are some cases where a thread be transitiong that also fall into this case */
1632 runnable_and_not_on_runq_observed = TRUE;
1633 }
1634 }
1635 thread_unlock(thread);
1636
1637 if (high_latency_observed && runnable_and_not_on_runq_observed) {
1638 /* All the things we are looking for are true, stop looking */
1639 break;
1640 }
1641 }
1642
1643 splx(s);
1644
1645 if (sched_vm_group_temporarily_unbound) {
1646 /* If we turned off binding, make sure everything is OK before rebinding */
1647 if (!high_latency_observed) {
1648 /* rebind */
1649 bind_target_changed = TRUE;
1650 bind_target = master_processor;
1651 sched_vm_group_temporarily_unbound = FALSE; /* might be reset to TRUE if change cannot be completed */
1652 }
1653 } else {
1654 /*
1655 * Check if we're in a bad state, which is defined by high
1656 * latency with no core currently executing a thread. If a
1657 * single thread is making progress on a CPU, that means the
1658 * binding concept to reduce parallelism is working as
1659 * designed.
1660 */
1661 if (high_latency_observed && !runnable_and_not_on_runq_observed) {
1662 /* unbind */
1663 bind_target_changed = TRUE;
1664 bind_target = PROCESSOR_NULL;
1665 sched_vm_group_temporarily_unbound = TRUE;
1666 }
1667 }
1668
1669 if (bind_target_changed) {
1670 s = splsched();
1671 for (i=0; i < sched_vm_group_thread_count; i++) {
1672 thread_t thread = sched_vm_group_thread_list[i];
1673 boolean_t removed;
1674 assert(thread != THREAD_NULL);
1675
1676 thread_lock(thread);
1677 removed = thread_run_queue_remove(thread);
1678 if (removed || ((thread->state & (TH_RUN | TH_WAIT)) == TH_WAIT)) {
1679 thread_bind_internal(thread, bind_target);
1680 } else {
1681 /*
1682 * Thread was in the middle of being context-switched-to,
1683 * or was in the process of blocking. To avoid switching the bind
1684 * state out mid-flight, defer the change if possible.
1685 */
1686 if (bind_target == PROCESSOR_NULL) {
1687 thread_bind_internal(thread, bind_target);
1688 } else {
1689 sched_vm_group_temporarily_unbound = TRUE; /* next pass will try again */
1690 }
1691 }
1692
1693 if (removed) {
1694 thread_run_queue_reinsert(thread, SCHED_PREEMPT | SCHED_TAILQ);
1695 }
1696 thread_unlock(thread);
1697 }
1698 splx(s);
1699 }
1700
1701 simple_unlock(&sched_vm_group_list_lock);
1702 }
1703
1704 /* Invoked prior to idle entry to determine if, on SMT capable processors, an SMT
1705 * rebalancing opportunity exists when a core is (instantaneously) idle, but
1706 * other SMT-capable cores may be over-committed. TODO: some possible negatives:
1707 * IPI thrash if this core does not remain idle following the load balancing ASTs
1708 * Idle "thrash", when IPI issue is followed by idle entry/core power down
1709 * followed by a wakeup shortly thereafter.
1710 */
1711
1712 #if (DEVELOPMENT || DEBUG)
1713 int sched_smt_balance = 1;
1714 #endif
1715
1716 #if __SMP__
1717 /* Invoked with pset locked, returns with pset unlocked */
1718 void
1719 sched_SMT_balance(processor_t cprocessor, processor_set_t cpset) {
1720 processor_t ast_processor = NULL;
1721
1722 #if (DEVELOPMENT || DEBUG)
1723 if (__improbable(sched_smt_balance == 0))
1724 goto smt_balance_exit;
1725 #endif
1726
1727 assert(cprocessor == current_processor());
1728 if (cprocessor->is_SMT == FALSE)
1729 goto smt_balance_exit;
1730
1731 processor_t sib_processor = cprocessor->processor_secondary ? cprocessor->processor_secondary : cprocessor->processor_primary;
1732
1733 /* Determine if both this processor and its sibling are idle,
1734 * indicating an SMT rebalancing opportunity.
1735 */
1736 if (sib_processor->state != PROCESSOR_IDLE)
1737 goto smt_balance_exit;
1738
1739 processor_t sprocessor;
1740
1741 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
1742 qe_foreach_element(sprocessor, &cpset->active_queue, processor_queue) {
1743 if ((sprocessor->state == PROCESSOR_RUNNING) &&
1744 (sprocessor->processor_primary != sprocessor) &&
1745 (sprocessor->processor_primary->state == PROCESSOR_RUNNING) &&
1746 (sprocessor->current_pri < BASEPRI_RTQUEUES)) {
1747
1748 ipi_type = sched_ipi_action(sprocessor, NULL, false, SCHED_IPI_EVENT_SMT_REBAL);
1749 if (ipi_type != SCHED_IPI_NONE) {
1750 assert(sprocessor != cprocessor);
1751 ast_processor = sprocessor;
1752 break;
1753 }
1754 }
1755 }
1756
1757 smt_balance_exit:
1758 pset_unlock(cpset);
1759
1760 if (ast_processor) {
1761 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_SMT_BALANCE), ast_processor->cpu_id, ast_processor->state, ast_processor->processor_primary->state, 0, 0);
1762 sched_ipi_perform(ast_processor, ipi_type);
1763 }
1764 }
1765 #else
1766 /* Invoked with pset locked, returns with pset unlocked */
1767 void
1768 sched_SMT_balance(__unused processor_t cprocessor, processor_set_t cpset)
1769 {
1770 pset_unlock(cpset);
1771 }
1772 #endif /* __SMP__ */
1773
1774 /*
1775 * thread_select:
1776 *
1777 * Select a new thread for the current processor to execute.
1778 *
1779 * May select the current thread, which must be locked.
1780 */
1781 static thread_t
1782 thread_select(thread_t thread,
1783 processor_t processor,
1784 ast_t *reason)
1785 {
1786 processor_set_t pset = processor->processor_set;
1787 thread_t new_thread = THREAD_NULL;
1788
1789 assert(processor == current_processor());
1790 assert((thread->state & (TH_RUN|TH_TERMINATE2)) == TH_RUN);
1791
1792 do {
1793 /*
1794 * Update the priority.
1795 */
1796 if (SCHED(can_update_priority)(thread))
1797 SCHED(update_priority)(thread);
1798
1799 processor_state_update_from_thread(processor, thread);
1800
1801 pset_lock(pset);
1802
1803 assert(processor->state != PROCESSOR_OFF_LINE);
1804
1805 if (!processor->is_recommended) {
1806 /*
1807 * The performance controller has provided a hint to not dispatch more threads,
1808 * unless they are bound to us (and thus we are the only option
1809 */
1810 if (!SCHED(processor_bound_count)(processor)) {
1811 goto idle;
1812 }
1813 } else if (processor->processor_primary != processor) {
1814 /*
1815 * Should this secondary SMT processor attempt to find work? For pset runqueue systems,
1816 * we should look for work only under the same conditions that choose_processor()
1817 * would have assigned work, which is when all primary processors have been assigned work.
1818 *
1819 * An exception is that bound threads are dispatched to a processor without going through
1820 * choose_processor(), so in those cases we should continue trying to dequeue work.
1821 */
1822 if (!SCHED(processor_bound_count)(processor) &&
1823 !queue_empty(&pset->idle_queue) && !rt_runq_count(pset)) {
1824 goto idle;
1825 }
1826 }
1827
1828 /*
1829 * Test to see if the current thread should continue
1830 * to run on this processor. Must not be attempting to wait, and not
1831 * bound to a different processor, nor be in the wrong
1832 * processor set, nor be forced to context switch by TH_SUSP.
1833 *
1834 * Note that there are never any RT threads in the regular runqueue.
1835 *
1836 * This code is very insanely tricky.
1837 */
1838
1839 /* i.e. not waiting, not TH_SUSP'ed */
1840 boolean_t still_running = ((thread->state & (TH_TERMINATE|TH_IDLE|TH_WAIT|TH_RUN|TH_SUSP)) == TH_RUN);
1841
1842 /*
1843 * Threads running on SMT processors are forced to context switch. Don't rebalance realtime threads.
1844 * TODO: This should check if it's worth it to rebalance, i.e. 'are there any idle primary processors'
1845 */
1846 boolean_t needs_smt_rebalance = (thread->sched_pri < BASEPRI_RTQUEUES && processor->processor_primary != processor);
1847
1848 boolean_t affinity_mismatch = (thread->affinity_set != AFFINITY_SET_NULL && thread->affinity_set->aset_pset != pset);
1849
1850 boolean_t bound_elsewhere = (thread->bound_processor != PROCESSOR_NULL && thread->bound_processor != processor);
1851
1852 boolean_t avoid_processor = (SCHED(avoid_processor_enabled) && SCHED(thread_avoid_processor)(processor, thread));
1853
1854 if (still_running && !needs_smt_rebalance && !affinity_mismatch && !bound_elsewhere && !avoid_processor) {
1855 /*
1856 * This thread is eligible to keep running on this processor.
1857 *
1858 * RT threads with un-expired quantum stay on processor,
1859 * unless there's a valid RT thread with an earlier deadline.
1860 */
1861 if (thread->sched_pri >= BASEPRI_RTQUEUES && processor->first_timeslice) {
1862 if (rt_runq_count(pset) > 0) {
1863
1864 rt_lock_lock(pset);
1865
1866 if (rt_runq_count(pset) > 0) {
1867
1868 thread_t next_rt = qe_queue_first(&SCHED(rt_runq)(pset)->queue, struct thread, runq_links);
1869
1870 if (next_rt->realtime.deadline < processor->deadline &&
1871 (next_rt->bound_processor == PROCESSOR_NULL ||
1872 next_rt->bound_processor == processor)) {
1873 /* The next RT thread is better, so pick it off the runqueue. */
1874 goto pick_new_rt_thread;
1875 }
1876 }
1877
1878 rt_lock_unlock(pset);
1879 }
1880
1881 /* This is still the best RT thread to run. */
1882 processor->deadline = thread->realtime.deadline;
1883
1884 sched_update_pset_load_average(pset);
1885 pset_unlock(pset);
1886
1887 return (thread);
1888 }
1889
1890 if ((rt_runq_count(pset) == 0) &&
1891 SCHED(processor_queue_has_priority)(processor, thread->sched_pri, TRUE) == FALSE) {
1892 /* This thread is still the highest priority runnable (non-idle) thread */
1893 processor->deadline = UINT64_MAX;
1894
1895 sched_update_pset_load_average(pset);
1896 pset_unlock(pset);
1897
1898 return (thread);
1899 }
1900 } else {
1901 /*
1902 * This processor must context switch.
1903 * If it's due to a rebalance, we should aggressively find this thread a new home.
1904 */
1905 if (needs_smt_rebalance || affinity_mismatch || bound_elsewhere || avoid_processor)
1906 *reason |= AST_REBALANCE;
1907 }
1908
1909 /* OK, so we're not going to run the current thread. Look at the RT queue. */
1910 if (rt_runq_count(pset) > 0) {
1911
1912 rt_lock_lock(pset);
1913
1914 if (rt_runq_count(pset) > 0) {
1915 thread_t next_rt = qe_queue_first(&SCHED(rt_runq)(pset)->queue, struct thread, runq_links);
1916
1917 if (__probable((next_rt->bound_processor == PROCESSOR_NULL ||
1918 (next_rt->bound_processor == processor)))) {
1919 pick_new_rt_thread:
1920 new_thread = qe_dequeue_head(&SCHED(rt_runq)(pset)->queue, struct thread, runq_links);
1921
1922 new_thread->runq = PROCESSOR_NULL;
1923 SCHED_STATS_RUNQ_CHANGE(&SCHED(rt_runq)(pset)->runq_stats, rt_runq_count(pset));
1924 rt_runq_count_decr(pset);
1925
1926 processor->deadline = new_thread->realtime.deadline;
1927
1928 rt_lock_unlock(pset);
1929 sched_update_pset_load_average(pset);
1930 pset_unlock(pset);
1931
1932 return (new_thread);
1933 }
1934 }
1935
1936 rt_lock_unlock(pset);
1937 }
1938
1939 processor->deadline = UINT64_MAX;
1940
1941 /* No RT threads, so let's look at the regular threads. */
1942 if ((new_thread = SCHED(choose_thread)(processor, MINPRI, *reason)) != THREAD_NULL) {
1943 sched_update_pset_load_average(pset);
1944 pset_unlock(pset);
1945 return (new_thread);
1946 }
1947
1948 #if __SMP__
1949 if (SCHED(steal_thread_enabled)) {
1950 /*
1951 * No runnable threads, attempt to steal
1952 * from other processors. Returns with pset lock dropped.
1953 */
1954
1955 if ((new_thread = SCHED(steal_thread)(pset)) != THREAD_NULL) {
1956 return (new_thread);
1957 }
1958
1959 /*
1960 * If other threads have appeared, shortcut
1961 * around again.
1962 */
1963 if (!SCHED(processor_queue_empty)(processor) || rt_runq_count(pset) > 0)
1964 continue;
1965
1966 pset_lock(pset);
1967 }
1968 #endif
1969
1970 idle:
1971 /*
1972 * Nothing is runnable, so set this processor idle if it
1973 * was running.
1974 */
1975 if (processor->state == PROCESSOR_RUNNING) {
1976 processor->state = PROCESSOR_IDLE;
1977
1978 if (!processor->is_recommended) {
1979 re_queue_head(&pset->unused_queue, &processor->processor_queue);
1980 } else if (processor->processor_primary == processor) {
1981 re_queue_head(&pset->idle_queue, &processor->processor_queue);
1982 } else {
1983 re_queue_head(&pset->idle_secondary_queue, &processor->processor_queue);
1984 }
1985
1986 pset->active_processor_count--;
1987 sched_update_pset_load_average(pset);
1988 }
1989
1990 #if __SMP__
1991 /* Invoked with pset locked, returns with pset unlocked */
1992 SCHED(processor_balance)(processor, pset);
1993 #else
1994 pset_unlock(pset);
1995 #endif
1996
1997 #if CONFIG_SCHED_IDLE_IN_PLACE
1998 /*
1999 * Choose idle thread if fast idle is not possible.
2000 */
2001 if (processor->processor_primary != processor)
2002 return (processor->idle_thread);
2003
2004 if ((thread->state & (TH_IDLE|TH_TERMINATE|TH_SUSP)) || !(thread->state & TH_WAIT) || thread->wake_active || thread->sched_pri >= BASEPRI_RTQUEUES)
2005 return (processor->idle_thread);
2006
2007 /*
2008 * Perform idling activities directly without a
2009 * context switch. Return dispatched thread,
2010 * else check again for a runnable thread.
2011 */
2012 new_thread = thread_select_idle(thread, processor);
2013
2014 #else /* !CONFIG_SCHED_IDLE_IN_PLACE */
2015
2016 /*
2017 * Do a full context switch to idle so that the current
2018 * thread can start running on another processor without
2019 * waiting for the fast-idled processor to wake up.
2020 */
2021 new_thread = processor->idle_thread;
2022
2023 #endif /* !CONFIG_SCHED_IDLE_IN_PLACE */
2024
2025 } while (new_thread == THREAD_NULL);
2026
2027 return (new_thread);
2028 }
2029
2030 #if CONFIG_SCHED_IDLE_IN_PLACE
2031 /*
2032 * thread_select_idle:
2033 *
2034 * Idle the processor using the current thread context.
2035 *
2036 * Called with thread locked, then dropped and relocked.
2037 */
2038 static thread_t
2039 thread_select_idle(
2040 thread_t thread,
2041 processor_t processor)
2042 {
2043 thread_t new_thread;
2044 uint64_t arg1, arg2;
2045 int urgency;
2046
2047 sched_run_decr(thread);
2048
2049 thread->state |= TH_IDLE;
2050 processor_state_update_idle(procssor);
2051
2052 /* Reload precise timing global policy to thread-local policy */
2053 thread->precise_user_kernel_time = use_precise_user_kernel_time(thread);
2054
2055 thread_unlock(thread);
2056
2057 /*
2058 * Switch execution timing to processor idle thread.
2059 */
2060 processor->last_dispatch = mach_absolute_time();
2061
2062 #ifdef CONFIG_MACH_APPROXIMATE_TIME
2063 commpage_update_mach_approximate_time(processor->last_dispatch);
2064 #endif
2065
2066 thread->last_run_time = processor->last_dispatch;
2067 thread_timer_event(processor->last_dispatch, &processor->idle_thread->system_timer);
2068 PROCESSOR_DATA(processor, kernel_timer) = &processor->idle_thread->system_timer;
2069
2070
2071 /*
2072 * Cancel the quantum timer while idling.
2073 */
2074 timer_call_quantum_timer_cancel(&processor->quantum_timer);
2075 processor->first_timeslice = FALSE;
2076
2077 (*thread->sched_call)(SCHED_CALL_BLOCK, thread);
2078
2079 thread_tell_urgency(THREAD_URGENCY_NONE, 0, 0, 0, NULL);
2080
2081 /*
2082 * Enable interrupts and perform idling activities. No
2083 * preemption due to TH_IDLE being set.
2084 */
2085 spllo(); new_thread = processor_idle(thread, processor);
2086
2087 /*
2088 * Return at splsched.
2089 */
2090 (*thread->sched_call)(SCHED_CALL_UNBLOCK, thread);
2091
2092 thread_lock(thread);
2093
2094 /*
2095 * If awakened, switch to thread timer and start a new quantum.
2096 * Otherwise skip; we will context switch to another thread or return here.
2097 */
2098 if (!(thread->state & TH_WAIT)) {
2099 processor->last_dispatch = mach_absolute_time();
2100 thread_timer_event(processor->last_dispatch, &thread->system_timer);
2101 PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer;
2102 thread_quantum_init(thread);
2103 processor->quantum_end = processor->last_dispatch + thread->quantum_remaining;
2104 timer_call_quantum_timer_enter(&processor->quantum_timer,
2105 thread, processor->quantum_end, processor->last_dispatch);
2106 processor->first_timeslice = TRUE;
2107
2108 thread->computation_epoch = processor->last_dispatch;
2109 }
2110
2111 thread->state &= ~TH_IDLE;
2112
2113 urgency = thread_get_urgency(thread, &arg1, &arg2);
2114
2115 thread_tell_urgency(urgency, arg1, arg2, 0, new_thread);
2116
2117 sched_run_incr(thread);
2118
2119 return (new_thread);
2120 }
2121 #endif /* CONFIG_SCHED_IDLE_IN_PLACE */
2122
2123 /*
2124 * thread_invoke
2125 *
2126 * Called at splsched with neither thread locked.
2127 *
2128 * Perform a context switch and start executing the new thread.
2129 *
2130 * Returns FALSE when the context switch didn't happen.
2131 * The reference to the new thread is still consumed.
2132 *
2133 * "self" is what is currently running on the processor,
2134 * "thread" is the new thread to context switch to
2135 * (which may be the same thread in some cases)
2136 */
2137 static boolean_t
2138 thread_invoke(
2139 thread_t self,
2140 thread_t thread,
2141 ast_t reason)
2142 {
2143 if (__improbable(get_preemption_level() != 0)) {
2144 int pl = get_preemption_level();
2145 panic("thread_invoke: preemption_level %d, possible cause: %s",
2146 pl, (pl < 0 ? "unlocking an unlocked mutex or spinlock" :
2147 "blocking while holding a spinlock, or within interrupt context"));
2148 }
2149
2150 thread_continue_t continuation = self->continuation;
2151 void *parameter = self->parameter;
2152 processor_t processor;
2153
2154 uint64_t ctime = mach_absolute_time();
2155
2156 #ifdef CONFIG_MACH_APPROXIMATE_TIME
2157 commpage_update_mach_approximate_time(ctime);
2158 #endif
2159
2160 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
2161 if ((thread->state & TH_IDLE) == 0)
2162 sched_timeshare_consider_maintenance(ctime);
2163 #endif
2164
2165 #if MONOTONIC
2166 mt_sched_update(self);
2167 #endif /* MONOTONIC */
2168
2169 assert_thread_magic(self);
2170 assert(self == current_thread());
2171 assert(self->runq == PROCESSOR_NULL);
2172 assert((self->state & (TH_RUN|TH_TERMINATE2)) == TH_RUN);
2173
2174 thread_lock(thread);
2175
2176 assert_thread_magic(thread);
2177 assert((thread->state & (TH_RUN|TH_WAIT|TH_UNINT|TH_TERMINATE|TH_TERMINATE2)) == TH_RUN);
2178 assert(thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == current_processor());
2179 assert(thread->runq == PROCESSOR_NULL);
2180
2181 /* Reload precise timing global policy to thread-local policy */
2182 thread->precise_user_kernel_time = use_precise_user_kernel_time(thread);
2183
2184 /* Update SFI class based on other factors */
2185 thread->sfi_class = sfi_thread_classify(thread);
2186
2187 /* Update the same_pri_latency for the thread (used by perfcontrol callouts) */
2188 thread->same_pri_latency = ctime - thread->last_basepri_change_time;
2189 /*
2190 * In case a base_pri update happened between the timestamp and
2191 * taking the thread lock
2192 */
2193 if (ctime <= thread->last_basepri_change_time)
2194 thread->same_pri_latency = ctime - thread->last_made_runnable_time;
2195
2196 /* Allow realtime threads to hang onto a stack. */
2197 if ((self->sched_mode == TH_MODE_REALTIME) && !self->reserved_stack)
2198 self->reserved_stack = self->kernel_stack;
2199
2200 /* Prepare for spin debugging */
2201 #if INTERRUPT_MASKED_DEBUG
2202 ml_spin_debug_clear(thread);
2203 #endif
2204
2205 if (continuation != NULL) {
2206 if (!thread->kernel_stack) {
2207 /*
2208 * If we are using a privileged stack,
2209 * check to see whether we can exchange it with
2210 * that of the other thread.
2211 */
2212 if (self->kernel_stack == self->reserved_stack && !thread->reserved_stack)
2213 goto need_stack;
2214
2215 /*
2216 * Context switch by performing a stack handoff.
2217 */
2218 continuation = thread->continuation;
2219 parameter = thread->parameter;
2220
2221 processor = current_processor();
2222 processor->active_thread = thread;
2223 processor_state_update_from_thread(processor, thread);
2224
2225 if (thread->last_processor != processor && thread->last_processor != NULL) {
2226 if (thread->last_processor->processor_set != processor->processor_set)
2227 thread->ps_switch++;
2228 thread->p_switch++;
2229 }
2230 thread->last_processor = processor;
2231 thread->c_switch++;
2232 ast_context(thread);
2233
2234 thread_unlock(thread);
2235
2236 self->reason = reason;
2237
2238 processor->last_dispatch = ctime;
2239 self->last_run_time = ctime;
2240 thread_timer_event(ctime, &thread->system_timer);
2241 PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer;
2242
2243 /*
2244 * Since non-precise user/kernel time doesn't update the state timer
2245 * during privilege transitions, synthesize an event now.
2246 */
2247 if (!thread->precise_user_kernel_time) {
2248 timer_switch(PROCESSOR_DATA(processor, current_state),
2249 ctime,
2250 PROCESSOR_DATA(processor, current_state));
2251 }
2252
2253 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2254 MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_HANDOFF)|DBG_FUNC_NONE,
2255 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
2256
2257 if ((thread->chosen_processor != processor) && (thread->chosen_processor != PROCESSOR_NULL)) {
2258 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_MOVED)|DBG_FUNC_NONE,
2259 (uintptr_t)thread_tid(thread), (uintptr_t)thread->chosen_processor->cpu_id, 0, 0, 0);
2260 }
2261
2262 DTRACE_SCHED2(off__cpu, struct thread *, thread, struct proc *, thread->task->bsd_info);
2263
2264 SCHED_STATS_CSW(processor, self->reason, self->sched_pri, thread->sched_pri);
2265
2266 TLOG(1, "thread_invoke: calling stack_handoff\n");
2267 stack_handoff(self, thread);
2268
2269 /* 'self' is now off core */
2270 assert(thread == current_thread());
2271
2272 DTRACE_SCHED(on__cpu);
2273
2274 #if KPERF
2275 kperf_on_cpu(thread, continuation, NULL);
2276 #endif /* KPERF */
2277
2278 #if KASAN
2279 kasan_unpoison_fakestack(self);
2280 kasan_unpoison_stack(thread->kernel_stack, kernel_stack_size);
2281 #endif
2282
2283 thread_dispatch(self, thread);
2284
2285 thread->continuation = thread->parameter = NULL;
2286
2287 counter(c_thread_invoke_hits++);
2288
2289 (void) spllo();
2290
2291 assert(continuation);
2292 call_continuation(continuation, parameter, thread->wait_result);
2293 /*NOTREACHED*/
2294 }
2295 else if (thread == self) {
2296 /* same thread but with continuation */
2297 ast_context(self);
2298 counter(++c_thread_invoke_same);
2299
2300 thread_unlock(self);
2301
2302 #if KPERF
2303 kperf_on_cpu(thread, continuation, NULL);
2304 #endif /* KPERF */
2305
2306 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2307 MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE,
2308 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
2309
2310 #if KASAN
2311 kasan_unpoison_fakestack(self);
2312 kasan_unpoison_stack(self->kernel_stack, kernel_stack_size);
2313 #endif
2314
2315 self->continuation = self->parameter = NULL;
2316
2317 (void) spllo();
2318
2319 call_continuation(continuation, parameter, self->wait_result);
2320 /*NOTREACHED*/
2321 }
2322 } else {
2323 /*
2324 * Check that the other thread has a stack
2325 */
2326 if (!thread->kernel_stack) {
2327 need_stack:
2328 if (!stack_alloc_try(thread)) {
2329 counter(c_thread_invoke_misses++);
2330 thread_unlock(thread);
2331 thread_stack_enqueue(thread);
2332 return (FALSE);
2333 }
2334 } else if (thread == self) {
2335 ast_context(self);
2336 counter(++c_thread_invoke_same);
2337 thread_unlock(self);
2338
2339 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2340 MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE,
2341 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
2342
2343 return (TRUE);
2344 }
2345 }
2346
2347 /*
2348 * Context switch by full context save.
2349 */
2350 processor = current_processor();
2351 processor->active_thread = thread;
2352 processor_state_update_from_thread(processor, thread);
2353
2354 if (thread->last_processor != processor && thread->last_processor != NULL) {
2355 if (thread->last_processor->processor_set != processor->processor_set)
2356 thread->ps_switch++;
2357 thread->p_switch++;
2358 }
2359 thread->last_processor = processor;
2360 thread->c_switch++;
2361 ast_context(thread);
2362
2363 thread_unlock(thread);
2364
2365 counter(c_thread_invoke_csw++);
2366
2367 self->reason = reason;
2368
2369 processor->last_dispatch = ctime;
2370 self->last_run_time = ctime;
2371 thread_timer_event(ctime, &thread->system_timer);
2372 PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer;
2373
2374 /*
2375 * Since non-precise user/kernel time doesn't update the state timer
2376 * during privilege transitions, synthesize an event now.
2377 */
2378 if (!thread->precise_user_kernel_time) {
2379 timer_switch(PROCESSOR_DATA(processor, current_state),
2380 ctime,
2381 PROCESSOR_DATA(processor, current_state));
2382 }
2383
2384 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2385 MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE,
2386 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
2387
2388 if ((thread->chosen_processor != processor) && (thread->chosen_processor != NULL)) {
2389 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_MOVED)|DBG_FUNC_NONE,
2390 (uintptr_t)thread_tid(thread), (uintptr_t)thread->chosen_processor->cpu_id, 0, 0, 0);
2391 }
2392
2393 DTRACE_SCHED2(off__cpu, struct thread *, thread, struct proc *, thread->task->bsd_info);
2394
2395 SCHED_STATS_CSW(processor, self->reason, self->sched_pri, thread->sched_pri);
2396
2397 /*
2398 * This is where we actually switch register context,
2399 * and address space if required. We will next run
2400 * as a result of a subsequent context switch.
2401 *
2402 * Once registers are switched and the processor is running "thread",
2403 * the stack variables and non-volatile registers will contain whatever
2404 * was there the last time that thread blocked. No local variables should
2405 * be used after this point, except for the special case of "thread", which
2406 * the platform layer returns as the previous thread running on the processor
2407 * via the function call ABI as a return register, and "self", which may have
2408 * been stored on the stack or a non-volatile register, but a stale idea of
2409 * what was on the CPU is newly-accurate because that thread is again
2410 * running on the CPU.
2411 */
2412 assert(continuation == self->continuation);
2413 thread = machine_switch_context(self, continuation, thread);
2414 assert(self == current_thread());
2415 TLOG(1,"thread_invoke: returning machine_switch_context: self %p continuation %p thread %p\n", self, continuation, thread);
2416
2417 DTRACE_SCHED(on__cpu);
2418
2419 #if KPERF
2420 kperf_on_cpu(self, NULL, __builtin_frame_address(0));
2421 #endif /* KPERF */
2422
2423 /*
2424 * We have been resumed and are set to run.
2425 */
2426 thread_dispatch(thread, self);
2427
2428 if (continuation) {
2429 self->continuation = self->parameter = NULL;
2430
2431 (void) spllo();
2432
2433 call_continuation(continuation, parameter, self->wait_result);
2434 /*NOTREACHED*/
2435 }
2436
2437 return (TRUE);
2438 }
2439
2440 #if defined(CONFIG_SCHED_DEFERRED_AST)
2441 /*
2442 * pset_cancel_deferred_dispatch:
2443 *
2444 * Cancels all ASTs that we can cancel for the given processor set
2445 * if the current processor is running the last runnable thread in the
2446 * system.
2447 *
2448 * This function assumes the current thread is runnable. This must
2449 * be called with the pset unlocked.
2450 */
2451 static void
2452 pset_cancel_deferred_dispatch(
2453 processor_set_t pset,
2454 processor_t processor)
2455 {
2456 processor_t active_processor = NULL;
2457 uint32_t sampled_sched_run_count;
2458
2459 pset_lock(pset);
2460 sampled_sched_run_count = (volatile uint32_t) sched_run_buckets[TH_BUCKET_RUN];
2461
2462 /*
2463 * If we have emptied the run queue, and our current thread is runnable, we
2464 * should tell any processors that are still DISPATCHING that they will
2465 * probably not have any work to do. In the event that there are no
2466 * pending signals that we can cancel, this is also uninteresting.
2467 *
2468 * In the unlikely event that another thread becomes runnable while we are
2469 * doing this (sched_run_count is atomically updated, not guarded), the
2470 * codepath making it runnable SHOULD (a dangerous word) need the pset lock
2471 * in order to dispatch it to a processor in our pset. So, the other
2472 * codepath will wait while we squash all cancelable ASTs, get the pset
2473 * lock, and then dispatch the freshly runnable thread. So this should be
2474 * correct (we won't accidentally have a runnable thread that hasn't been
2475 * dispatched to an idle processor), if not ideal (we may be restarting the
2476 * dispatch process, which could have some overhead).
2477 *
2478 */
2479 if ((sampled_sched_run_count == 1) &&
2480 (pset->pending_deferred_AST_cpu_mask)) {
2481 qe_foreach_element_safe(active_processor, &pset->active_queue, processor_queue) {
2482 /*
2483 * If a processor is DISPATCHING, it could be because of
2484 * a cancelable signal.
2485 *
2486 * IF the processor is not our
2487 * current processor (the current processor should not
2488 * be DISPATCHING, so this is a bit paranoid), AND there
2489 * is a cancelable signal pending on the processor, AND
2490 * there is no non-cancelable signal pending (as there is
2491 * no point trying to backtrack on bringing the processor
2492 * up if a signal we cannot cancel is outstanding), THEN
2493 * it should make sense to roll back the processor state
2494 * to the IDLE state.
2495 *
2496 * If the racey nature of this approach (as the signal
2497 * will be arbitrated by hardware, and can fire as we
2498 * roll back state) results in the core responding
2499 * despite being pushed back to the IDLE state, it
2500 * should be no different than if the core took some
2501 * interrupt while IDLE.
2502 */
2503 if ((active_processor->state == PROCESSOR_DISPATCHING) &&
2504 (bit_test(pset->pending_deferred_AST_cpu_mask, active_processor->cpu_id)) &&
2505 (!bit_test(pset->pending_AST_cpu_mask, active_processor->cpu_id)) &&
2506 (active_processor != processor)) {
2507 /*
2508 * Squash all of the processor state back to some
2509 * reasonable facsimile of PROCESSOR_IDLE.
2510 *
2511 * TODO: What queue policy do we actually want here?
2512 * We want to promote selection of a good processor
2513 * to run on. Do we want to enqueue at the head?
2514 * The tail? At the (relative) old position in the
2515 * queue? Or something else entirely?
2516 */
2517 if (!active_processor->is_recommended) {
2518 re_queue_head(&pset->unused_queue, &active_processor->processor_queue);
2519 } else if (active_processor->processor_primary == active_processor) {
2520 re_queue_head(&pset->idle_queue, &active_processor->processor_queue);
2521 } else {
2522 re_queue_head(&pset->idle_secondary_queue, &active_processor->processor_queue);
2523 }
2524
2525 pset->active_processor_count--;
2526 sched_update_pset_load_average(pset);
2527
2528 assert(active_processor->next_thread == THREAD_NULL);
2529 processor_state_update_idle(active_processor);
2530 active_processor->deadline = UINT64_MAX;
2531 active_processor->state = PROCESSOR_IDLE;
2532 bit_clear(pset->pending_deferred_AST_cpu_mask, active_processor->cpu_id);
2533 machine_signal_idle_cancel(active_processor);
2534 }
2535
2536 }
2537 }
2538
2539 pset_unlock(pset);
2540 }
2541 #else
2542 /* We don't support deferred ASTs; everything is candycanes and sunshine. */
2543 #endif
2544
2545 static void
2546 thread_csw_callout(
2547 thread_t old,
2548 thread_t new,
2549 uint64_t timestamp)
2550 {
2551 perfcontrol_event event = (new->state & TH_IDLE) ? IDLE : CONTEXT_SWITCH;
2552 uint64_t same_pri_latency = (new->state & TH_IDLE) ? 0 : new->same_pri_latency;
2553 machine_switch_perfcontrol_context(event, timestamp, 0,
2554 same_pri_latency, old, new);
2555 }
2556
2557
2558 /*
2559 * thread_dispatch:
2560 *
2561 * Handle threads at context switch. Re-dispatch other thread
2562 * if still running, otherwise update run state and perform
2563 * special actions. Update quantum for other thread and begin
2564 * the quantum for ourselves.
2565 *
2566 * "thread" is the old thread that we have switched away from.
2567 * "self" is the new current thread that we have context switched to
2568 *
2569 * Called at splsched.
2570 */
2571 void
2572 thread_dispatch(
2573 thread_t thread,
2574 thread_t self)
2575 {
2576 processor_t processor = self->last_processor;
2577
2578 assert(processor == current_processor());
2579 assert(self == current_thread());
2580 assert(thread != self);
2581
2582 if (thread != THREAD_NULL) {
2583 /*
2584 * Do the perfcontrol callout for context switch.
2585 * The reason we do this here is:
2586 * - thread_dispatch() is called from various places that are not
2587 * the direct context switch path for eg. processor shutdown etc.
2588 * So adding the callout here covers all those cases.
2589 * - We want this callout as early as possible to be close
2590 * to the timestamp taken in thread_invoke()
2591 * - We want to avoid holding the thread lock while doing the
2592 * callout
2593 * - We do not want to callout if "thread" is NULL.
2594 */
2595 thread_csw_callout(thread, self, processor->last_dispatch);
2596
2597 /*
2598 * If blocked at a continuation, discard
2599 * the stack.
2600 */
2601 if (thread->continuation != NULL && thread->kernel_stack != 0)
2602 stack_free(thread);
2603
2604 if (thread->state & TH_IDLE) {
2605 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2606 MACHDBG_CODE(DBG_MACH_SCHED,MACH_DISPATCH) | DBG_FUNC_NONE,
2607 (uintptr_t)thread_tid(thread), 0, thread->state,
2608 sched_run_buckets[TH_BUCKET_RUN], 0);
2609 } else {
2610 int64_t consumed;
2611 int64_t remainder = 0;
2612
2613 if (processor->quantum_end > processor->last_dispatch)
2614 remainder = processor->quantum_end -
2615 processor->last_dispatch;
2616
2617 consumed = thread->quantum_remaining - remainder;
2618
2619 if ((thread->reason & AST_LEDGER) == 0) {
2620 /*
2621 * Bill CPU time to both the task and
2622 * the individual thread.
2623 */
2624 ledger_credit(thread->t_ledger,
2625 task_ledgers.cpu_time, consumed);
2626 ledger_credit(thread->t_threadledger,
2627 thread_ledgers.cpu_time, consumed);
2628 if (thread->t_bankledger) {
2629 ledger_credit(thread->t_bankledger,
2630 bank_ledgers.cpu_time,
2631 (consumed - thread->t_deduct_bank_ledger_time));
2632
2633 }
2634 thread->t_deduct_bank_ledger_time =0;
2635 }
2636
2637 wake_lock(thread);
2638 thread_lock(thread);
2639
2640 /*
2641 * Apply a priority floor if the thread holds a kernel resource
2642 * Do this before checking starting_pri to avoid overpenalizing
2643 * repeated rwlock blockers.
2644 */
2645 if (__improbable(thread->rwlock_count != 0))
2646 lck_rw_set_promotion_locked(thread);
2647
2648 boolean_t keep_quantum = processor->first_timeslice;
2649
2650 /*
2651 * Treat a thread which has dropped priority since it got on core
2652 * as having expired its quantum.
2653 */
2654 if (processor->starting_pri > thread->sched_pri)
2655 keep_quantum = FALSE;
2656
2657 /* Compute remainder of current quantum. */
2658 if (keep_quantum &&
2659 processor->quantum_end > processor->last_dispatch)
2660 thread->quantum_remaining = (uint32_t)remainder;
2661 else
2662 thread->quantum_remaining = 0;
2663
2664 if (thread->sched_mode == TH_MODE_REALTIME) {
2665 /*
2666 * Cancel the deadline if the thread has
2667 * consumed the entire quantum.
2668 */
2669 if (thread->quantum_remaining == 0) {
2670 thread->realtime.deadline = UINT64_MAX;
2671 }
2672 } else {
2673 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
2674 /*
2675 * For non-realtime threads treat a tiny
2676 * remaining quantum as an expired quantum
2677 * but include what's left next time.
2678 */
2679 if (thread->quantum_remaining < min_std_quantum) {
2680 thread->reason |= AST_QUANTUM;
2681 thread->quantum_remaining += SCHED(initial_quantum_size)(thread);
2682 }
2683 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
2684 }
2685
2686 /*
2687 * If we are doing a direct handoff then
2688 * take the remainder of the quantum.
2689 */
2690 if ((thread->reason & (AST_HANDOFF|AST_QUANTUM)) == AST_HANDOFF) {
2691 self->quantum_remaining = thread->quantum_remaining;
2692 thread->reason |= AST_QUANTUM;
2693 thread->quantum_remaining = 0;
2694 } else {
2695 #if defined(CONFIG_SCHED_MULTIQ)
2696 if (SCHED(sched_groups_enabled) &&
2697 thread->sched_group == self->sched_group) {
2698 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2699 MACHDBG_CODE(DBG_MACH_SCHED, MACH_QUANTUM_HANDOFF),
2700 self->reason, (uintptr_t)thread_tid(thread),
2701 self->quantum_remaining, thread->quantum_remaining, 0);
2702
2703 self->quantum_remaining = thread->quantum_remaining;
2704 thread->quantum_remaining = 0;
2705 /* Don't set AST_QUANTUM here - old thread might still want to preempt someone else */
2706 }
2707 #endif /* defined(CONFIG_SCHED_MULTIQ) */
2708 }
2709
2710 thread->computation_metered += (processor->last_dispatch - thread->computation_epoch);
2711
2712 if (!(thread->state & TH_WAIT)) {
2713 /*
2714 * Still runnable.
2715 */
2716 thread->last_made_runnable_time = thread->last_basepri_change_time = processor->last_dispatch;
2717
2718 machine_thread_going_off_core(thread, FALSE, processor->last_dispatch);
2719
2720 ast_t reason = thread->reason;
2721 sched_options_t options = SCHED_NONE;
2722
2723 if (reason & AST_REBALANCE) {
2724 options |= SCHED_REBALANCE;
2725 if (reason & AST_QUANTUM) {
2726 /* Having gone to the trouble of forcing this thread off a less preferred core,
2727 * we should force the preferable core to reschedule immediatey to give this
2728 * thread a chance to run instead of just sitting on the run queue where
2729 * it may just be stolen back by the idle core we just forced it off.
2730 * But only do this at the end of a quantum to prevent cascading effects.
2731 */
2732 options |= SCHED_PREEMPT;
2733 }
2734 }
2735
2736 if (reason & AST_QUANTUM)
2737 options |= SCHED_TAILQ;
2738 else if (reason & AST_PREEMPT)
2739 options |= SCHED_HEADQ;
2740 else
2741 options |= (SCHED_PREEMPT | SCHED_TAILQ);
2742
2743 thread_setrun(thread, options);
2744
2745 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2746 MACHDBG_CODE(DBG_MACH_SCHED,MACH_DISPATCH) | DBG_FUNC_NONE,
2747 (uintptr_t)thread_tid(thread), thread->reason, thread->state,
2748 sched_run_buckets[TH_BUCKET_RUN], 0);
2749
2750 if (thread->wake_active) {
2751 thread->wake_active = FALSE;
2752 thread_unlock(thread);
2753
2754 thread_wakeup(&thread->wake_active);
2755 } else {
2756 thread_unlock(thread);
2757 }
2758
2759 wake_unlock(thread);
2760 } else {
2761 /*
2762 * Waiting.
2763 */
2764 boolean_t should_terminate = FALSE;
2765 uint32_t new_run_count;
2766
2767 /* Only the first call to thread_dispatch
2768 * after explicit termination should add
2769 * the thread to the termination queue
2770 */
2771 if ((thread->state & (TH_TERMINATE|TH_TERMINATE2)) == TH_TERMINATE) {
2772 should_terminate = TRUE;
2773 thread->state |= TH_TERMINATE2;
2774 }
2775
2776 thread->state &= ~TH_RUN;
2777 thread->last_made_runnable_time = thread->last_basepri_change_time = THREAD_NOT_RUNNABLE;
2778 thread->chosen_processor = PROCESSOR_NULL;
2779
2780 new_run_count = sched_run_decr(thread);
2781
2782 #if CONFIG_SCHED_SFI
2783 if ((thread->state & (TH_WAIT | TH_TERMINATE)) == TH_WAIT) {
2784 if (thread->reason & AST_SFI) {
2785 thread->wait_sfi_begin_time = processor->last_dispatch;
2786 }
2787 }
2788 #endif
2789
2790 machine_thread_going_off_core(thread, should_terminate, processor->last_dispatch);
2791
2792 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2793 MACHDBG_CODE(DBG_MACH_SCHED,MACH_DISPATCH) | DBG_FUNC_NONE,
2794 (uintptr_t)thread_tid(thread), thread->reason, thread->state,
2795 new_run_count, 0);
2796
2797 (*thread->sched_call)(SCHED_CALL_BLOCK, thread);
2798
2799 if (thread->wake_active) {
2800 thread->wake_active = FALSE;
2801 thread_unlock(thread);
2802
2803 thread_wakeup(&thread->wake_active);
2804 } else {
2805 thread_unlock(thread);
2806 }
2807
2808 wake_unlock(thread);
2809
2810 if (should_terminate)
2811 thread_terminate_enqueue(thread);
2812 }
2813 }
2814 }
2815
2816 int urgency = THREAD_URGENCY_NONE;
2817 uint64_t latency = 0;
2818
2819 /* Update (new) current thread and reprogram quantum timer */
2820 thread_lock(self);
2821
2822 if (!(self->state & TH_IDLE)) {
2823 uint64_t arg1, arg2;
2824
2825 #if CONFIG_SCHED_SFI
2826 ast_t new_ast;
2827
2828 new_ast = sfi_thread_needs_ast(self, NULL);
2829
2830 if (new_ast != AST_NONE) {
2831 ast_on(new_ast);
2832 }
2833 #endif
2834
2835 assertf(processor->last_dispatch >= self->last_made_runnable_time,
2836 "Non-monotonic time? dispatch at 0x%llx, runnable at 0x%llx",
2837 processor->last_dispatch, self->last_made_runnable_time);
2838
2839 assert(self->last_made_runnable_time <= self->last_basepri_change_time);
2840
2841 latency = processor->last_dispatch - self->last_made_runnable_time;
2842 assert(latency >= self->same_pri_latency);
2843
2844 urgency = thread_get_urgency(self, &arg1, &arg2);
2845
2846 thread_tell_urgency(urgency, arg1, arg2, latency, self);
2847
2848 /*
2849 * Get a new quantum if none remaining.
2850 */
2851 if (self->quantum_remaining == 0) {
2852 thread_quantum_init(self);
2853 }
2854
2855 /*
2856 * Set up quantum timer and timeslice.
2857 */
2858 processor->quantum_end = processor->last_dispatch + self->quantum_remaining;
2859 timer_call_quantum_timer_enter(&processor->quantum_timer, self,
2860 processor->quantum_end, processor->last_dispatch);
2861
2862 processor->first_timeslice = TRUE;
2863 } else {
2864 timer_call_quantum_timer_cancel(&processor->quantum_timer);
2865 processor->first_timeslice = FALSE;
2866
2867 thread_tell_urgency(THREAD_URGENCY_NONE, 0, 0, 0, self);
2868 }
2869
2870 assert(self->block_hint == kThreadWaitNone);
2871 self->computation_epoch = processor->last_dispatch;
2872 self->reason = AST_NONE;
2873 processor->starting_pri = self->sched_pri;
2874
2875 thread_unlock(self);
2876
2877 machine_thread_going_on_core(self, urgency, latency, self->same_pri_latency,
2878 processor->last_dispatch);
2879
2880 #if defined(CONFIG_SCHED_DEFERRED_AST)
2881 /*
2882 * TODO: Can we state that redispatching our old thread is also
2883 * uninteresting?
2884 */
2885 if ((((volatile uint32_t)sched_run_buckets[TH_BUCKET_RUN]) == 1) &&
2886 !(self->state & TH_IDLE)) {
2887 pset_cancel_deferred_dispatch(processor->processor_set, processor);
2888 }
2889 #endif
2890
2891 }
2892
2893 /*
2894 * thread_block_reason:
2895 *
2896 * Forces a reschedule, blocking the caller if a wait
2897 * has been asserted.
2898 *
2899 * If a continuation is specified, then thread_invoke will
2900 * attempt to discard the thread's kernel stack. When the
2901 * thread resumes, it will execute the continuation function
2902 * on a new kernel stack.
2903 */
2904 counter(mach_counter_t c_thread_block_calls = 0;)
2905
2906 wait_result_t
2907 thread_block_reason(
2908 thread_continue_t continuation,
2909 void *parameter,
2910 ast_t reason)
2911 {
2912 thread_t self = current_thread();
2913 processor_t processor;
2914 thread_t new_thread;
2915 spl_t s;
2916
2917 counter(++c_thread_block_calls);
2918
2919 s = splsched();
2920
2921 processor = current_processor();
2922
2923 /* If we're explicitly yielding, force a subsequent quantum */
2924 if (reason & AST_YIELD)
2925 processor->first_timeslice = FALSE;
2926
2927 /* We're handling all scheduling AST's */
2928 ast_off(AST_SCHEDULING);
2929
2930 #if PROC_REF_DEBUG
2931 if ((continuation != NULL) && (self->task != kernel_task)) {
2932 if (uthread_get_proc_refcount(self->uthread) != 0) {
2933 panic("thread_block_reason with continuation uthread %p with uu_proc_refcount != 0", self->uthread);
2934 }
2935 }
2936 #endif
2937
2938 self->continuation = continuation;
2939 self->parameter = parameter;
2940
2941 if (self->state & ~(TH_RUN | TH_IDLE)) {
2942 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2943 MACHDBG_CODE(DBG_MACH_SCHED,MACH_BLOCK),
2944 reason, VM_KERNEL_UNSLIDE(continuation), 0, 0, 0);
2945 }
2946
2947 do {
2948 thread_lock(self);
2949 new_thread = thread_select(self, processor, &reason);
2950 thread_unlock(self);
2951 } while (!thread_invoke(self, new_thread, reason));
2952
2953 splx(s);
2954
2955 return (self->wait_result);
2956 }
2957
2958 /*
2959 * thread_block:
2960 *
2961 * Block the current thread if a wait has been asserted.
2962 */
2963 wait_result_t
2964 thread_block(
2965 thread_continue_t continuation)
2966 {
2967 return thread_block_reason(continuation, NULL, AST_NONE);
2968 }
2969
2970 wait_result_t
2971 thread_block_parameter(
2972 thread_continue_t continuation,
2973 void *parameter)
2974 {
2975 return thread_block_reason(continuation, parameter, AST_NONE);
2976 }
2977
2978 /*
2979 * thread_run:
2980 *
2981 * Switch directly from the current thread to the
2982 * new thread, handing off our quantum if appropriate.
2983 *
2984 * New thread must be runnable, and not on a run queue.
2985 *
2986 * Called at splsched.
2987 */
2988 int
2989 thread_run(
2990 thread_t self,
2991 thread_continue_t continuation,
2992 void *parameter,
2993 thread_t new_thread)
2994 {
2995 ast_t reason = AST_HANDOFF;
2996
2997 self->continuation = continuation;
2998 self->parameter = parameter;
2999
3000 while (!thread_invoke(self, new_thread, reason)) {
3001 /* the handoff failed, so we have to fall back to the normal block path */
3002 processor_t processor = current_processor();
3003
3004 reason = AST_NONE;
3005
3006 thread_lock(self);
3007 new_thread = thread_select(self, processor, &reason);
3008 thread_unlock(self);
3009 }
3010
3011 return (self->wait_result);
3012 }
3013
3014 /*
3015 * thread_continue:
3016 *
3017 * Called at splsched when a thread first receives
3018 * a new stack after a continuation.
3019 */
3020 void
3021 thread_continue(
3022 thread_t thread)
3023 {
3024 thread_t self = current_thread();
3025 thread_continue_t continuation;
3026 void *parameter;
3027
3028 DTRACE_SCHED(on__cpu);
3029
3030 continuation = self->continuation;
3031 parameter = self->parameter;
3032
3033 #if KPERF
3034 kperf_on_cpu(self, continuation, NULL);
3035 #endif
3036
3037 thread_dispatch(thread, self);
3038
3039 self->continuation = self->parameter = NULL;
3040
3041 #if INTERRUPT_MASKED_DEBUG
3042 /* Reset interrupt-masked spin debugging timeout */
3043 ml_spin_debug_clear(self);
3044 #endif
3045
3046 if (thread != THREAD_NULL)
3047 (void)spllo();
3048
3049 TLOG(1, "thread_continue: calling call_continuation \n");
3050 call_continuation(continuation, parameter, self->wait_result);
3051 /*NOTREACHED*/
3052 }
3053
3054 void
3055 thread_quantum_init(thread_t thread)
3056 {
3057 if (thread->sched_mode == TH_MODE_REALTIME) {
3058 thread->quantum_remaining = thread->realtime.computation;
3059 } else {
3060 thread->quantum_remaining = SCHED(initial_quantum_size)(thread);
3061 }
3062 }
3063
3064 uint32_t
3065 sched_timeshare_initial_quantum_size(thread_t thread)
3066 {
3067 if ((thread != THREAD_NULL) && thread->th_sched_bucket == TH_BUCKET_SHARE_BG)
3068 return bg_quantum;
3069 else
3070 return std_quantum;
3071 }
3072
3073 /*
3074 * run_queue_init:
3075 *
3076 * Initialize a run queue before first use.
3077 */
3078 void
3079 run_queue_init(
3080 run_queue_t rq)
3081 {
3082 rq->highq = NOPRI;
3083 for (u_int i = 0; i < BITMAP_LEN(NRQS); i++)
3084 rq->bitmap[i] = 0;
3085 rq->urgency = rq->count = 0;
3086 for (int i = 0; i < NRQS; i++)
3087 queue_init(&rq->queues[i]);
3088 }
3089
3090 /*
3091 * run_queue_dequeue:
3092 *
3093 * Perform a dequeue operation on a run queue,
3094 * and return the resulting thread.
3095 *
3096 * The run queue must be locked (see thread_run_queue_remove()
3097 * for more info), and not empty.
3098 */
3099 thread_t
3100 run_queue_dequeue(
3101 run_queue_t rq,
3102 integer_t options)
3103 {
3104 thread_t thread;
3105 queue_t queue = &rq->queues[rq->highq];
3106
3107 if (options & SCHED_HEADQ) {
3108 thread = qe_dequeue_head(queue, struct thread, runq_links);
3109 } else {
3110 thread = qe_dequeue_tail(queue, struct thread, runq_links);
3111 }
3112
3113 assert(thread != THREAD_NULL);
3114 assert_thread_magic(thread);
3115
3116 thread->runq = PROCESSOR_NULL;
3117 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
3118 rq->count--;
3119 if (SCHED(priority_is_urgent)(rq->highq)) {
3120 rq->urgency--; assert(rq->urgency >= 0);
3121 }
3122 if (queue_empty(queue)) {
3123 bitmap_clear(rq->bitmap, rq->highq);
3124 rq->highq = bitmap_first(rq->bitmap, NRQS);
3125 }
3126
3127 return thread;
3128 }
3129
3130 /*
3131 * run_queue_enqueue:
3132 *
3133 * Perform a enqueue operation on a run queue.
3134 *
3135 * The run queue must be locked (see thread_run_queue_remove()
3136 * for more info).
3137 */
3138 boolean_t
3139 run_queue_enqueue(
3140 run_queue_t rq,
3141 thread_t thread,
3142 integer_t options)
3143 {
3144 queue_t queue = &rq->queues[thread->sched_pri];
3145 boolean_t result = FALSE;
3146
3147 assert_thread_magic(thread);
3148
3149 if (queue_empty(queue)) {
3150 enqueue_tail(queue, &thread->runq_links);
3151
3152 rq_bitmap_set(rq->bitmap, thread->sched_pri);
3153 if (thread->sched_pri > rq->highq) {
3154 rq->highq = thread->sched_pri;
3155 result = TRUE;
3156 }
3157 } else {
3158 if (options & SCHED_TAILQ)
3159 enqueue_tail(queue, &thread->runq_links);
3160 else
3161 enqueue_head(queue, &thread->runq_links);
3162 }
3163 if (SCHED(priority_is_urgent)(thread->sched_pri))
3164 rq->urgency++;
3165 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
3166 rq->count++;
3167
3168 return (result);
3169 }
3170
3171 /*
3172 * run_queue_remove:
3173 *
3174 * Remove a specific thread from a runqueue.
3175 *
3176 * The run queue must be locked.
3177 */
3178 void
3179 run_queue_remove(
3180 run_queue_t rq,
3181 thread_t thread)
3182 {
3183 assert(thread->runq != PROCESSOR_NULL);
3184 assert_thread_magic(thread);
3185
3186 remqueue(&thread->runq_links);
3187 SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
3188 rq->count--;
3189 if (SCHED(priority_is_urgent)(thread->sched_pri)) {
3190 rq->urgency--; assert(rq->urgency >= 0);
3191 }
3192
3193 if (queue_empty(&rq->queues[thread->sched_pri])) {
3194 /* update run queue status */
3195 bitmap_clear(rq->bitmap, thread->sched_pri);
3196 rq->highq = bitmap_first(rq->bitmap, NRQS);
3197 }
3198
3199 thread->runq = PROCESSOR_NULL;
3200 }
3201
3202 /* Assumes RT lock is not held, and acquires splsched/rt_lock itself */
3203 void
3204 sched_rtglobal_runq_scan(sched_update_scan_context_t scan_context)
3205 {
3206 spl_t s;
3207 thread_t thread;
3208
3209 processor_set_t pset = &pset0;
3210
3211 s = splsched();
3212 rt_lock_lock(pset);
3213
3214 qe_foreach_element_safe(thread, &pset->rt_runq.queue, runq_links) {
3215 if (thread->last_made_runnable_time < scan_context->earliest_rt_make_runnable_time) {
3216 scan_context->earliest_rt_make_runnable_time = thread->last_made_runnable_time;
3217 }
3218 }
3219
3220 rt_lock_unlock(pset);
3221 splx(s);
3222 }
3223
3224 int64_t
3225 sched_rtglobal_runq_count_sum(void)
3226 {
3227 return pset0.rt_runq.runq_stats.count_sum;
3228 }
3229
3230 /*
3231 * realtime_queue_insert:
3232 *
3233 * Enqueue a thread for realtime execution.
3234 */
3235 static boolean_t
3236 realtime_queue_insert(processor_t processor, processor_set_t pset, thread_t thread)
3237 {
3238 queue_t queue = &SCHED(rt_runq)(pset)->queue;
3239 uint64_t deadline = thread->realtime.deadline;
3240 boolean_t preempt = FALSE;
3241
3242 rt_lock_lock(pset);
3243
3244 if (queue_empty(queue)) {
3245 enqueue_tail(queue, &thread->runq_links);
3246 preempt = TRUE;
3247 } else {
3248 /* Insert into rt_runq in thread deadline order */
3249 queue_entry_t iter;
3250 qe_foreach(iter, queue) {
3251 thread_t iter_thread = qe_element(iter, struct thread, runq_links);
3252 assert_thread_magic(iter_thread);
3253
3254 if (deadline < iter_thread->realtime.deadline) {
3255 if (iter == queue_first(queue))
3256 preempt = TRUE;
3257 insque(&thread->runq_links, queue_prev(iter));
3258 break;
3259 } else if (iter == queue_last(queue)) {
3260 enqueue_tail(queue, &thread->runq_links);
3261 break;
3262 }
3263 }
3264 }
3265
3266 thread->runq = processor;
3267 SCHED_STATS_RUNQ_CHANGE(&SCHED(rt_runq)(pset)->runq_stats, rt_runq_count(pset));
3268 rt_runq_count_incr(pset);
3269
3270 rt_lock_unlock(pset);
3271
3272 return (preempt);
3273 }
3274
3275 /*
3276 * realtime_setrun:
3277 *
3278 * Dispatch a thread for realtime execution.
3279 *
3280 * Thread must be locked. Associated pset must
3281 * be locked, and is returned unlocked.
3282 */
3283 static void
3284 realtime_setrun(
3285 processor_t processor,
3286 thread_t thread)
3287 {
3288 processor_set_t pset = processor->processor_set;
3289 ast_t preempt;
3290
3291 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
3292
3293 thread->chosen_processor = processor;
3294
3295 /* <rdar://problem/15102234> */
3296 assert(thread->bound_processor == PROCESSOR_NULL);
3297
3298 /*
3299 * Dispatch directly onto idle processor.
3300 */
3301 if ( (thread->bound_processor == processor)
3302 && processor->state == PROCESSOR_IDLE) {
3303 re_queue_tail(&pset->active_queue, &processor->processor_queue);
3304
3305 pset->active_processor_count++;
3306 sched_update_pset_load_average(pset);
3307
3308 processor->next_thread = thread;
3309 processor_state_update_from_thread(processor, thread);
3310 processor->deadline = thread->realtime.deadline;
3311 processor->state = PROCESSOR_DISPATCHING;
3312
3313 ipi_type = sched_ipi_action(processor, thread, true, SCHED_IPI_EVENT_BOUND_THR);
3314 pset_unlock(pset);
3315 sched_ipi_perform(processor, ipi_type);
3316 return;
3317 }
3318
3319 if (processor->current_pri < BASEPRI_RTQUEUES)
3320 preempt = (AST_PREEMPT | AST_URGENT);
3321 else if (thread->realtime.deadline < processor->deadline)
3322 preempt = (AST_PREEMPT | AST_URGENT);
3323 else
3324 preempt = AST_NONE;
3325
3326 realtime_queue_insert(processor, pset, thread);
3327
3328 ipi_type = SCHED_IPI_NONE;
3329 if (preempt != AST_NONE) {
3330 if (processor->state == PROCESSOR_IDLE) {
3331 re_queue_tail(&pset->active_queue, &processor->processor_queue);
3332
3333 pset->active_processor_count++;
3334 sched_update_pset_load_average(pset);
3335
3336 processor->next_thread = THREAD_NULL;
3337 processor_state_update_from_thread(processor, thread);
3338 processor->deadline = thread->realtime.deadline;
3339 processor->state = PROCESSOR_DISPATCHING;
3340 if (processor == current_processor()) {
3341 ast_on(preempt);
3342 } else {
3343 ipi_type = sched_ipi_action(processor, thread, true, SCHED_IPI_EVENT_PREEMPT);
3344 }
3345 } else if (processor->state == PROCESSOR_DISPATCHING) {
3346 if ((processor->next_thread == THREAD_NULL) && ((processor->current_pri < thread->sched_pri) || (processor->deadline > thread->realtime.deadline))) {
3347 processor_state_update_from_thread(processor, thread);
3348 processor->deadline = thread->realtime.deadline;
3349 }
3350 } else {
3351 if (processor == current_processor()) {
3352 ast_on(preempt);
3353 } else {
3354 ipi_type = sched_ipi_action(processor, thread, false, SCHED_IPI_EVENT_PREEMPT);
3355 }
3356 }
3357 } else {
3358 /* Selected processor was too busy, just keep thread enqueued and let other processors drain it naturally. */
3359 }
3360
3361 pset_unlock(pset);
3362 sched_ipi_perform(processor, ipi_type);
3363 }
3364
3365
3366 sched_ipi_type_t sched_ipi_deferred_policy(processor_set_t pset, processor_t dst,
3367 __unused sched_ipi_event_t event)
3368 {
3369 #if defined(CONFIG_SCHED_DEFERRED_AST)
3370 if (!bit_test(pset->pending_deferred_AST_cpu_mask, dst->cpu_id)) {
3371 return SCHED_IPI_DEFERRED;
3372 }
3373 #else /* CONFIG_SCHED_DEFERRED_AST */
3374 panic("Request for deferred IPI on an unsupported platform; pset: %p CPU: %d", pset, dst->cpu_id);
3375 #endif /* CONFIG_SCHED_DEFERRED_AST */
3376 return SCHED_IPI_NONE;
3377 }
3378
3379 sched_ipi_type_t sched_ipi_action(processor_t dst, thread_t thread, boolean_t dst_idle, sched_ipi_event_t event)
3380 {
3381 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
3382 assert(dst != NULL);
3383
3384 processor_set_t pset = dst->processor_set;
3385 if (current_processor() == dst) {
3386 return SCHED_IPI_NONE;
3387 }
3388
3389 if (bit_test(pset->pending_AST_cpu_mask, dst->cpu_id)) {
3390 return SCHED_IPI_NONE;
3391 }
3392
3393 ipi_type = SCHED(ipi_policy)(dst, thread, dst_idle, event);
3394 switch(ipi_type) {
3395 case SCHED_IPI_NONE:
3396 return SCHED_IPI_NONE;
3397 #if defined(CONFIG_SCHED_DEFERRED_AST)
3398 case SCHED_IPI_DEFERRED:
3399 bit_set(pset->pending_deferred_AST_cpu_mask, dst->cpu_id);
3400 break;
3401 #endif /* CONFIG_SCHED_DEFERRED_AST */
3402 default:
3403 bit_set(pset->pending_AST_cpu_mask, dst->cpu_id);
3404 break;
3405 }
3406 return ipi_type;
3407 }
3408
3409 sched_ipi_type_t sched_ipi_policy(processor_t dst, thread_t thread, boolean_t dst_idle, sched_ipi_event_t event)
3410 {
3411 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
3412 boolean_t deferred_ipi_supported = false;
3413 processor_set_t pset = dst->processor_set;
3414
3415 #if defined(CONFIG_SCHED_DEFERRED_AST)
3416 deferred_ipi_supported = true;
3417 #endif /* CONFIG_SCHED_DEFERRED_AST */
3418
3419 switch(event) {
3420 case SCHED_IPI_EVENT_SPILL:
3421 case SCHED_IPI_EVENT_SMT_REBAL:
3422 case SCHED_IPI_EVENT_REBALANCE:
3423 case SCHED_IPI_EVENT_BOUND_THR:
3424 /*
3425 * The spill, SMT rebalance, rebalance and the bound thread
3426 * scenarios use immediate IPIs always.
3427 */
3428 ipi_type = dst_idle ? SCHED_IPI_IDLE : SCHED_IPI_IMMEDIATE;
3429 break;
3430 case SCHED_IPI_EVENT_PREEMPT:
3431 /* In the preemption case, use immediate IPIs for RT threads */
3432 if (thread && (thread->sched_pri >= BASEPRI_RTQUEUES)) {
3433 ipi_type = dst_idle ? SCHED_IPI_IDLE : SCHED_IPI_IMMEDIATE;
3434 break;
3435 }
3436
3437 /*
3438 * For Non-RT threads preemption,
3439 * If the core is active, use immediate IPIs.
3440 * If the core is idle, use deferred IPIs if supported; otherwise immediate IPI.
3441 */
3442 if (deferred_ipi_supported && dst_idle) {
3443 return sched_ipi_deferred_policy(pset, dst, event);
3444 }
3445 ipi_type = dst_idle ? SCHED_IPI_IDLE : SCHED_IPI_IMMEDIATE;
3446 break;
3447 default:
3448 panic("Unrecognized scheduler IPI event type %d", event);
3449 }
3450 assert(ipi_type != SCHED_IPI_NONE);
3451 return ipi_type;
3452 }
3453
3454 void sched_ipi_perform(processor_t dst, sched_ipi_type_t ipi)
3455 {
3456 switch (ipi) {
3457 case SCHED_IPI_NONE:
3458 break;
3459 case SCHED_IPI_IDLE:
3460 machine_signal_idle(dst);
3461 break;
3462 case SCHED_IPI_IMMEDIATE:
3463 cause_ast_check(dst);
3464 break;
3465 case SCHED_IPI_DEFERRED:
3466 machine_signal_idle_deferred(dst);
3467 break;
3468 default:
3469 panic("Unrecognized scheduler IPI type: %d", ipi);
3470 }
3471 }
3472
3473 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
3474
3475 boolean_t
3476 priority_is_urgent(int priority)
3477 {
3478 return bitmap_test(sched_preempt_pri, priority) ? TRUE : FALSE;
3479 }
3480
3481 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
3482
3483 /*
3484 * processor_setrun:
3485 *
3486 * Dispatch a thread for execution on a
3487 * processor.
3488 *
3489 * Thread must be locked. Associated pset must
3490 * be locked, and is returned unlocked.
3491 */
3492 static void
3493 processor_setrun(
3494 processor_t processor,
3495 thread_t thread,
3496 integer_t options)
3497 {
3498 processor_set_t pset = processor->processor_set;
3499 ast_t preempt;
3500 enum { eExitIdle, eInterruptRunning, eDoNothing } ipi_action = eDoNothing;
3501
3502 sched_ipi_type_t ipi_type = SCHED_IPI_NONE;
3503
3504 thread->chosen_processor = processor;
3505
3506 /*
3507 * Dispatch directly onto idle processor.
3508 */
3509 if ( (SCHED(direct_dispatch_to_idle_processors) ||
3510 thread->bound_processor == processor)
3511 && processor->state == PROCESSOR_IDLE) {
3512
3513 re_queue_tail(&pset->active_queue, &processor->processor_queue);
3514
3515 pset->active_processor_count++;
3516 sched_update_pset_load_average(pset);
3517
3518 processor->next_thread = thread;
3519 processor_state_update_from_thread(processor, thread);
3520 processor->deadline = UINT64_MAX;
3521 processor->state = PROCESSOR_DISPATCHING;
3522
3523 ipi_type = sched_ipi_action(processor, thread, true, SCHED_IPI_EVENT_BOUND_THR);
3524 pset_unlock(pset);
3525 sched_ipi_perform(processor, ipi_type);
3526 return;
3527 }
3528
3529 /*
3530 * Set preemption mode.
3531 */
3532 #if defined(CONFIG_SCHED_DEFERRED_AST)
3533 /* TODO: Do we need to care about urgency (see rdar://problem/20136239)? */
3534 #endif
3535 if (SCHED(priority_is_urgent)(thread->sched_pri) && thread->sched_pri > processor->current_pri)
3536 preempt = (AST_PREEMPT | AST_URGENT);
3537 else if(processor->active_thread && thread_eager_preemption(processor->active_thread))
3538 preempt = (AST_PREEMPT | AST_URGENT);
3539 else if ((thread->sched_mode == TH_MODE_TIMESHARE) && (thread->sched_pri < thread->base_pri)) {
3540 if(SCHED(priority_is_urgent)(thread->base_pri) && thread->sched_pri > processor->current_pri) {
3541 preempt = (options & SCHED_PREEMPT)? AST_PREEMPT: AST_NONE;
3542 } else {
3543 preempt = AST_NONE;
3544 }
3545 } else
3546 preempt = (options & SCHED_PREEMPT)? AST_PREEMPT: AST_NONE;
3547
3548 SCHED(processor_enqueue)(processor, thread, options);
3549 sched_update_pset_load_average(pset);
3550
3551 if (preempt != AST_NONE) {
3552 if (processor->state == PROCESSOR_IDLE) {
3553 re_queue_tail(&pset->active_queue, &processor->processor_queue);
3554 pset->active_processor_count++;
3555 processor->next_thread = THREAD_NULL;
3556 processor_state_update_from_thread(processor, thread);
3557 processor->deadline = UINT64_MAX;
3558 processor->state = PROCESSOR_DISPATCHING;
3559 ipi_action = eExitIdle;
3560 } else if ( processor->state == PROCESSOR_DISPATCHING) {
3561 if ((processor->next_thread == THREAD_NULL) && (processor->current_pri < thread->sched_pri)) {
3562 processor_state_update_from_thread(processor, thread);
3563 processor->deadline = UINT64_MAX;
3564 }
3565 } else if ( (processor->state == PROCESSOR_RUNNING ||
3566 processor->state == PROCESSOR_SHUTDOWN) &&
3567 (thread->sched_pri >= processor->current_pri)) {
3568 ipi_action = eInterruptRunning;
3569 }
3570 } else {
3571 /*
3572 * New thread is not important enough to preempt what is running, but
3573 * special processor states may need special handling
3574 */
3575 if (processor->state == PROCESSOR_SHUTDOWN &&
3576 thread->sched_pri >= processor->current_pri ) {
3577 ipi_action = eInterruptRunning;
3578 } else if (processor->state == PROCESSOR_IDLE) {
3579 re_queue_tail(&pset->active_queue, &processor->processor_queue);
3580
3581 pset->active_processor_count++;
3582 // sched_update_pset_load_average(pset);
3583
3584 processor->next_thread = THREAD_NULL;
3585 processor_state_update_from_thread(processor, thread);
3586 processor->deadline = UINT64_MAX;
3587 processor->state = PROCESSOR_DISPATCHING;
3588
3589 ipi_action = eExitIdle;
3590 }
3591 }
3592
3593 if (ipi_action != eDoNothing) {
3594 if (processor == current_processor()) {
3595 if (csw_check_locked(processor, pset, AST_NONE) != AST_NONE)
3596 ast_on(preempt);
3597 } else {
3598 sched_ipi_event_t event = (options & SCHED_REBALANCE) ? SCHED_IPI_EVENT_REBALANCE : SCHED_IPI_EVENT_PREEMPT;
3599 ipi_type = sched_ipi_action(processor, thread, (ipi_action == eExitIdle), event);
3600 }
3601 }
3602 pset_unlock(pset);
3603 sched_ipi_perform(processor, ipi_type);
3604 }
3605
3606 /*
3607 * choose_next_pset:
3608 *
3609 * Return the next sibling pset containing
3610 * available processors.
3611 *
3612 * Returns the original pset if none other is
3613 * suitable.
3614 */
3615 static processor_set_t
3616 choose_next_pset(
3617 processor_set_t pset)
3618 {
3619 processor_set_t nset = pset;
3620
3621 do {
3622 nset = next_pset(nset);
3623 } while (nset->online_processor_count < 1 && nset != pset);
3624
3625 return (nset);
3626 }
3627
3628 /*
3629 * choose_processor:
3630 *
3631 * Choose a processor for the thread, beginning at
3632 * the pset. Accepts an optional processor hint in
3633 * the pset.
3634 *
3635 * Returns a processor, possibly from a different pset.
3636 *
3637 * The thread must be locked. The pset must be locked,
3638 * and the resulting pset is locked on return.
3639 */
3640 processor_t
3641 choose_processor(
3642 processor_set_t pset,
3643 processor_t processor,
3644 thread_t thread)
3645 {
3646 processor_set_t nset, cset = pset;
3647
3648 assert(thread->sched_pri <= BASEPRI_RTQUEUES);
3649
3650 /*
3651 * Prefer the hinted processor, when appropriate.
3652 */
3653
3654 /* Fold last processor hint from secondary processor to its primary */
3655 if (processor != PROCESSOR_NULL) {
3656 processor = processor->processor_primary;
3657 }
3658
3659 /*
3660 * Only consult platform layer if pset is active, which
3661 * it may not be in some cases when a multi-set system
3662 * is going to sleep.
3663 */
3664 if (pset->online_processor_count) {
3665 if ((processor == PROCESSOR_NULL) || (processor->processor_set == pset && processor->state == PROCESSOR_IDLE)) {
3666 processor_t mc_processor = machine_choose_processor(pset, processor);
3667 if (mc_processor != PROCESSOR_NULL)
3668 processor = mc_processor->processor_primary;
3669 }
3670 }
3671
3672 /*
3673 * At this point, we may have a processor hint, and we may have
3674 * an initial starting pset. If the hint is not in the pset, or
3675 * if the hint is for a processor in an invalid state, discard
3676 * the hint.
3677 */
3678 if (processor != PROCESSOR_NULL) {
3679 if (processor->processor_set != pset) {
3680 processor = PROCESSOR_NULL;
3681 } else if (!processor->is_recommended) {
3682 processor = PROCESSOR_NULL;
3683 } else {
3684 switch (processor->state) {
3685 case PROCESSOR_START:
3686 case PROCESSOR_SHUTDOWN:
3687 case PROCESSOR_OFF_LINE:
3688 /*
3689 * Hint is for a processor that cannot support running new threads.
3690 */
3691 processor = PROCESSOR_NULL;
3692 break;
3693 case PROCESSOR_IDLE:
3694 /*
3695 * Hint is for an idle processor. Assume it is no worse than any other
3696 * idle processor. The platform layer had an opportunity to provide
3697 * the "least cost idle" processor above.
3698 */
3699 return (processor);
3700 case PROCESSOR_RUNNING:
3701 case PROCESSOR_DISPATCHING:
3702 /*
3703 * Hint is for an active CPU. This fast-path allows
3704 * realtime threads to preempt non-realtime threads
3705 * to regain their previous executing processor.
3706 */
3707 if ((thread->sched_pri >= BASEPRI_RTQUEUES) &&
3708 (processor->current_pri < BASEPRI_RTQUEUES))
3709 return (processor);
3710
3711 /* Otherwise, use hint as part of search below */
3712 break;
3713 default:
3714 processor = PROCESSOR_NULL;
3715 break;
3716 }
3717 }
3718 }
3719
3720 /*
3721 * Iterate through the processor sets to locate
3722 * an appropriate processor. Seed results with
3723 * a last-processor hint, if available, so that
3724 * a search must find something strictly better
3725 * to replace it.
3726 *
3727 * A primary/secondary pair of SMT processors are
3728 * "unpaired" if the primary is busy but its
3729 * corresponding secondary is idle (so the physical
3730 * core has full use of its resources).
3731 */
3732
3733 integer_t lowest_priority = MAXPRI + 1;
3734 integer_t lowest_unpaired_primary_priority = MAXPRI + 1;
3735 integer_t lowest_count = INT_MAX;
3736 uint64_t furthest_deadline = 1;
3737 processor_t lp_processor = PROCESSOR_NULL;
3738 processor_t lp_unpaired_primary_processor = PROCESSOR_NULL;
3739 processor_t lp_unpaired_secondary_processor = PROCESSOR_NULL;
3740 processor_t lc_processor = PROCESSOR_NULL;
3741 processor_t fd_processor = PROCESSOR_NULL;
3742
3743 if (processor != PROCESSOR_NULL) {
3744 /* All other states should be enumerated above. */
3745 assert(processor->state == PROCESSOR_RUNNING || processor->state == PROCESSOR_DISPATCHING);
3746
3747 lowest_priority = processor->current_pri;
3748 lp_processor = processor;
3749
3750 if (processor->current_pri >= BASEPRI_RTQUEUES) {
3751 furthest_deadline = processor->deadline;
3752 fd_processor = processor;
3753 }
3754
3755 lowest_count = SCHED(processor_runq_count)(processor);
3756 lc_processor = processor;
3757 }
3758
3759 do {
3760
3761 /*
3762 * Choose an idle processor, in pset traversal order
3763 */
3764 qe_foreach_element(processor, &cset->idle_queue, processor_queue) {
3765 if (processor->is_recommended)
3766 return processor;
3767 }
3768
3769 /*
3770 * Otherwise, enumerate active and idle processors to find candidates
3771 * with lower priority/etc.
3772 */
3773
3774 qe_foreach_element(processor, &cset->active_queue, processor_queue) {
3775
3776 if (!processor->is_recommended) {
3777 continue;
3778 }
3779
3780 integer_t cpri = processor->current_pri;
3781 if (cpri < lowest_priority) {
3782 lowest_priority = cpri;
3783 lp_processor = processor;
3784 }
3785
3786 if ((cpri >= BASEPRI_RTQUEUES) && (processor->deadline > furthest_deadline)) {
3787 furthest_deadline = processor->deadline;
3788 fd_processor = processor;
3789 }
3790
3791 integer_t ccount = SCHED(processor_runq_count)(processor);
3792 if (ccount < lowest_count) {
3793 lowest_count = ccount;
3794 lc_processor = processor;
3795 }
3796 }
3797
3798 /*
3799 * For SMT configs, these idle secondary processors must have active primary. Otherwise
3800 * the idle primary would have short-circuited the loop above
3801 */
3802 qe_foreach_element(processor, &cset->idle_secondary_queue, processor_queue) {
3803
3804 if (!processor->is_recommended) {
3805 continue;
3806 }
3807
3808 processor_t cprimary = processor->processor_primary;
3809
3810 /* If the primary processor is offline or starting up, it's not a candidate for this path */
3811 if (cprimary->state == PROCESSOR_RUNNING || cprimary->state == PROCESSOR_DISPATCHING) {
3812 integer_t primary_pri = cprimary->current_pri;
3813
3814 if (primary_pri < lowest_unpaired_primary_priority) {
3815 lowest_unpaired_primary_priority = primary_pri;
3816 lp_unpaired_primary_processor = cprimary;
3817 lp_unpaired_secondary_processor = processor;
3818 }
3819 }
3820 }
3821
3822
3823 if (thread->sched_pri >= BASEPRI_RTQUEUES) {
3824
3825 /*
3826 * For realtime threads, the most important aspect is
3827 * scheduling latency, so we attempt to assign threads
3828 * to good preemption candidates (assuming an idle primary
3829 * processor was not available above).
3830 */
3831
3832 if (thread->sched_pri > lowest_unpaired_primary_priority) {
3833 /* Move to end of active queue so that the next thread doesn't also pick it */
3834 re_queue_tail(&cset->active_queue, &lp_unpaired_primary_processor->processor_queue);
3835 return lp_unpaired_primary_processor;
3836 }
3837 if (thread->sched_pri > lowest_priority) {
3838 /* Move to end of active queue so that the next thread doesn't also pick it */
3839 re_queue_tail(&cset->active_queue, &lp_processor->processor_queue);
3840 return lp_processor;
3841 }
3842 if (thread->realtime.deadline < furthest_deadline)
3843 return fd_processor;
3844
3845 /*
3846 * If all primary and secondary CPUs are busy with realtime
3847 * threads with deadlines earlier than us, move on to next
3848 * pset.
3849 */
3850 }
3851 else {
3852
3853 if (thread->sched_pri > lowest_unpaired_primary_priority) {
3854 /* Move to end of active queue so that the next thread doesn't also pick it */
3855 re_queue_tail(&cset->active_queue, &lp_unpaired_primary_processor->processor_queue);
3856 return lp_unpaired_primary_processor;
3857 }
3858 if (thread->sched_pri > lowest_priority) {
3859 /* Move to end of active queue so that the next thread doesn't also pick it */
3860 re_queue_tail(&cset->active_queue, &lp_processor->processor_queue);
3861 return lp_processor;
3862 }
3863
3864 /*
3865 * If all primary processor in this pset are running a higher
3866 * priority thread, move on to next pset. Only when we have
3867 * exhausted this search do we fall back to other heuristics.
3868 */
3869 }
3870
3871 /*
3872 * Move onto the next processor set.
3873 */
3874 nset = next_pset(cset);
3875
3876 if (nset != pset) {
3877 pset_unlock(cset);
3878
3879 cset = nset;
3880 pset_lock(cset);
3881 }
3882 } while (nset != pset);
3883
3884 /*
3885 * Make sure that we pick a running processor,
3886 * and that the correct processor set is locked.
3887 * Since we may have unlock the candidate processor's
3888 * pset, it may have changed state.
3889 *
3890 * All primary processors are running a higher priority
3891 * thread, so the only options left are enqueuing on
3892 * the secondary processor that would perturb the least priority
3893 * primary, or the least busy primary.
3894 */
3895 do {
3896
3897 /* lowest_priority is evaluated in the main loops above */
3898 if (lp_unpaired_secondary_processor != PROCESSOR_NULL) {
3899 processor = lp_unpaired_secondary_processor;
3900 lp_unpaired_secondary_processor = PROCESSOR_NULL;
3901 } else if (lc_processor != PROCESSOR_NULL) {
3902 processor = lc_processor;
3903 lc_processor = PROCESSOR_NULL;
3904 } else {
3905 /*
3906 * All processors are executing higher
3907 * priority threads, and the lowest_count
3908 * candidate was not usable
3909 */
3910 processor = master_processor;
3911 }
3912
3913 /*
3914 * Check that the correct processor set is
3915 * returned locked.
3916 */
3917 if (cset != processor->processor_set) {
3918 pset_unlock(cset);
3919 cset = processor->processor_set;
3920 pset_lock(cset);
3921 }
3922
3923 /*
3924 * We must verify that the chosen processor is still available.
3925 * master_processor is an exception, since we may need to preempt
3926 * a running thread on it during processor shutdown (for sleep),
3927 * and that thread needs to be enqueued on its runqueue to run
3928 * when the processor is restarted.
3929 */
3930 if (processor != master_processor && (processor->state == PROCESSOR_SHUTDOWN || processor->state == PROCESSOR_OFF_LINE))
3931 processor = PROCESSOR_NULL;
3932
3933 } while (processor == PROCESSOR_NULL);
3934
3935 if (processor->state == PROCESSOR_RUNNING) {
3936 re_queue_tail(&cset->active_queue, &processor->processor_queue);
3937 }
3938
3939 return (processor);
3940 }
3941
3942 /*
3943 * thread_setrun:
3944 *
3945 * Dispatch thread for execution, onto an idle
3946 * processor or run queue, and signal a preemption
3947 * as appropriate.
3948 *
3949 * Thread must be locked.
3950 */
3951 void
3952 thread_setrun(
3953 thread_t thread,
3954 integer_t options)
3955 {
3956 processor_t processor;
3957 processor_set_t pset;
3958
3959 assert((thread->state & (TH_RUN|TH_WAIT|TH_UNINT|TH_TERMINATE|TH_TERMINATE2)) == TH_RUN);
3960 assert(thread->runq == PROCESSOR_NULL);
3961
3962 /*
3963 * Update priority if needed.
3964 */
3965 if (SCHED(can_update_priority)(thread))
3966 SCHED(update_priority)(thread);
3967
3968 thread->sfi_class = sfi_thread_classify(thread);
3969
3970 assert(thread->runq == PROCESSOR_NULL);
3971
3972 #if __SMP__
3973 if (thread->bound_processor == PROCESSOR_NULL) {
3974 /*
3975 * Unbound case.
3976 */
3977 if (thread->affinity_set != AFFINITY_SET_NULL) {
3978 /*
3979 * Use affinity set policy hint.
3980 */
3981 pset = thread->affinity_set->aset_pset;
3982 pset_lock(pset);
3983
3984 processor = SCHED(choose_processor)(pset, PROCESSOR_NULL, thread);
3985 pset = processor->processor_set;
3986
3987 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR)|DBG_FUNC_NONE,
3988 (uintptr_t)thread_tid(thread), (uintptr_t)-1, processor->cpu_id, processor->state, 0);
3989 } else if (thread->last_processor != PROCESSOR_NULL) {
3990 /*
3991 * Simple (last processor) affinity case.
3992 */
3993 processor = thread->last_processor;
3994 pset = processor->processor_set;
3995 pset_lock(pset);
3996 processor = SCHED(choose_processor)(pset, processor, thread);
3997 pset = processor->processor_set;
3998
3999 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR)|DBG_FUNC_NONE,
4000 (uintptr_t)thread_tid(thread), thread->last_processor->cpu_id, processor->cpu_id, processor->state, 0);
4001 } else {
4002 /*
4003 * No Affinity case:
4004 *
4005 * Utilitize a per task hint to spread threads
4006 * among the available processor sets.
4007 */
4008 task_t task = thread->task;
4009
4010 pset = task->pset_hint;
4011 if (pset == PROCESSOR_SET_NULL)
4012 pset = current_processor()->processor_set;
4013
4014 pset = choose_next_pset(pset);
4015 pset_lock(pset);
4016
4017 processor = SCHED(choose_processor)(pset, PROCESSOR_NULL, thread);
4018 pset = processor->processor_set;
4019 task->pset_hint = pset;
4020
4021 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR)|DBG_FUNC_NONE,
4022 (uintptr_t)thread_tid(thread), (uintptr_t)-1, processor->cpu_id, processor->state, 0);
4023 }
4024 } else {
4025 /*
4026 * Bound case:
4027 *
4028 * Unconditionally dispatch on the processor.
4029 */
4030 processor = thread->bound_processor;
4031 pset = processor->processor_set;
4032 pset_lock(pset);
4033
4034 SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR)|DBG_FUNC_NONE,
4035 (uintptr_t)thread_tid(thread), (uintptr_t)-2, processor->cpu_id, processor->state, 0);
4036 }
4037 #else /* !__SMP__ */
4038 /* Only one processor to choose */
4039 assert(thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == master_processor);
4040 processor = master_processor;
4041 pset = processor->processor_set;
4042 pset_lock(pset);
4043 #endif /* !__SMP__ */
4044
4045 /*
4046 * Dispatch the thread on the chosen processor.
4047 * TODO: This should be based on sched_mode, not sched_pri
4048 */
4049 if (thread->sched_pri >= BASEPRI_RTQUEUES) {
4050 realtime_setrun(processor, thread);
4051 } else {
4052 processor_setrun(processor, thread, options);
4053 /* pset is now unlocked */
4054 if (thread->bound_processor == PROCESSOR_NULL) {
4055 SCHED(check_spill)(pset, thread);
4056 }
4057 }
4058 }
4059
4060 processor_set_t
4061 task_choose_pset(
4062 task_t task)
4063 {
4064 processor_set_t pset = task->pset_hint;
4065
4066 if (pset != PROCESSOR_SET_NULL)
4067 pset = choose_next_pset(pset);
4068
4069 return (pset);
4070 }
4071
4072 /*
4073 * Check for a preemption point in
4074 * the current context.
4075 *
4076 * Called at splsched with thread locked.
4077 */
4078 ast_t
4079 csw_check(
4080 processor_t processor,
4081 ast_t check_reason)
4082 {
4083 processor_set_t pset = processor->processor_set;
4084 ast_t result;
4085
4086 pset_lock(pset);
4087
4088 /* If we were sent a remote AST and interrupted a running processor, acknowledge it here with pset lock held */
4089 bit_clear(pset->pending_AST_cpu_mask, processor->cpu_id);
4090
4091 result = csw_check_locked(processor, pset, check_reason);
4092
4093 pset_unlock(pset);
4094
4095 return result;
4096 }
4097
4098 /*
4099 * Check for preemption at splsched with
4100 * pset and thread locked
4101 */
4102 ast_t
4103 csw_check_locked(
4104 processor_t processor,
4105 processor_set_t pset,
4106 ast_t check_reason)
4107 {
4108 ast_t result;
4109 thread_t thread = processor->active_thread;
4110
4111 if (processor->first_timeslice) {
4112 if (rt_runq_count(pset) > 0)
4113 return (check_reason | AST_PREEMPT | AST_URGENT);
4114 }
4115 else {
4116 if (rt_runq_count(pset) > 0) {
4117 if (BASEPRI_RTQUEUES > processor->current_pri)
4118 return (check_reason | AST_PREEMPT | AST_URGENT);
4119 else
4120 return (check_reason | AST_PREEMPT);
4121 }
4122 }
4123
4124 result = SCHED(processor_csw_check)(processor);
4125 if (result != AST_NONE)
4126 return (check_reason | result | (thread_eager_preemption(thread) ? AST_URGENT : AST_NONE));
4127
4128 #if __SMP__
4129
4130 /*
4131 * If the current thread is running on a processor that is no longer recommended, gently
4132 * (non-urgently) get to a point and then block, and which point thread_select() should
4133 * try to idle the processor and re-dispatch the thread to a recommended processor.
4134 */
4135 if (!processor->is_recommended) {
4136 return (check_reason | AST_PREEMPT);
4137 }
4138
4139 /*
4140 * Same for avoid-processor
4141 *
4142 * TODO: Should these set AST_REBALANCE?
4143 */
4144 if (SCHED(avoid_processor_enabled) && SCHED(thread_avoid_processor)(processor, thread)) {
4145 return (check_reason | AST_PREEMPT);
4146 }
4147
4148 /*
4149 * Even though we could continue executing on this processor, a
4150 * secondary SMT core should try to shed load to another primary core.
4151 *
4152 * TODO: Should this do the same check that thread_select does? i.e.
4153 * if no bound threads target this processor, and idle primaries exist, preempt
4154 * The case of RT threads existing is already taken care of above
4155 * Consider Capri in this scenario.
4156 *
4157 * if (!SCHED(processor_bound_count)(processor) && !queue_empty(&pset->idle_queue))
4158 *
4159 * TODO: Alternatively - check if only primary is idle, or check if primary's pri is lower than mine.
4160 */
4161
4162 if (processor->current_pri < BASEPRI_RTQUEUES &&
4163 processor->processor_primary != processor)
4164 return (check_reason | AST_PREEMPT);
4165 #endif
4166
4167 if (thread->state & TH_SUSP)
4168 return (check_reason | AST_PREEMPT);
4169
4170 #if CONFIG_SCHED_SFI
4171 /*
4172 * Current thread may not need to be preempted, but maybe needs
4173 * an SFI wait?
4174 */
4175 result = sfi_thread_needs_ast(thread, NULL);
4176 if (result != AST_NONE)
4177 return (check_reason | result);
4178 #endif
4179
4180 return (AST_NONE);
4181 }
4182
4183 /*
4184 * set_sched_pri:
4185 *
4186 * Set the scheduled priority of the specified thread.
4187 *
4188 * This may cause the thread to change queues.
4189 *
4190 * Thread must be locked.
4191 */
4192 void
4193 set_sched_pri(
4194 thread_t thread,
4195 int new_priority)
4196 {
4197 thread_t cthread = current_thread();
4198 boolean_t is_current_thread = (thread == cthread) ? TRUE : FALSE;
4199 int curgency, nurgency;
4200 uint64_t urgency_param1, urgency_param2;
4201 boolean_t removed_from_runq = FALSE;
4202
4203 int old_priority = thread->sched_pri;
4204
4205 /* If we're already at this priority, no need to mess with the runqueue */
4206 if (new_priority == old_priority)
4207 return;
4208
4209 if (is_current_thread) {
4210 assert(thread->runq == PROCESSOR_NULL);
4211 curgency = thread_get_urgency(thread, &urgency_param1, &urgency_param2);
4212 } else {
4213 removed_from_runq = thread_run_queue_remove(thread);
4214 }
4215
4216 thread->sched_pri = new_priority;
4217
4218 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHANGE_PRIORITY),
4219 (uintptr_t)thread_tid(thread),
4220 thread->base_pri,
4221 thread->sched_pri,
4222 0, /* eventually, 'reason' */
4223 0);
4224
4225 if (is_current_thread) {
4226 nurgency = thread_get_urgency(thread, &urgency_param1, &urgency_param2);
4227 /*
4228 * set_sched_pri doesn't alter RT params. We expect direct base priority/QoS
4229 * class alterations from user space to occur relatively infrequently, hence
4230 * those are lazily handled. QoS classes have distinct priority bands, and QoS
4231 * inheritance is expected to involve priority changes.
4232 */
4233 uint64_t ctime = mach_approximate_time();
4234 if (nurgency != curgency) {
4235 thread_tell_urgency(nurgency, urgency_param1, urgency_param2, 0, thread);
4236 }
4237 machine_thread_going_on_core(thread, nurgency, 0, 0, ctime);
4238 }
4239
4240 if (removed_from_runq)
4241 thread_run_queue_reinsert(thread, SCHED_PREEMPT | SCHED_TAILQ);
4242 else if (thread->state & TH_RUN) {
4243 processor_t processor = thread->last_processor;
4244
4245 if (is_current_thread) {
4246 processor_state_update_from_thread(processor, thread);
4247
4248 /*
4249 * When dropping in priority, check if the thread no longer belongs on core.
4250 * If a thread raises its own priority, don't aggressively rebalance it.
4251 * <rdar://problem/31699165>
4252 */
4253 if (new_priority < old_priority) {
4254 ast_t preempt;
4255
4256 if ((preempt = csw_check(processor, AST_NONE)) != AST_NONE)
4257 ast_on(preempt);
4258 }
4259 } else if (processor != PROCESSOR_NULL && processor->active_thread == thread) {
4260 cause_ast_check(processor);
4261 }
4262 }
4263 }
4264
4265 /*
4266 * thread_run_queue_remove_for_handoff
4267 *
4268 * Pull a thread or its (recursive) push target out of the runqueue
4269 * so that it is ready for thread_run()
4270 *
4271 * Called at splsched
4272 *
4273 * Returns the thread that was pulled or THREAD_NULL if no thread could be pulled.
4274 * This may be different than the thread that was passed in.
4275 */
4276 thread_t
4277 thread_run_queue_remove_for_handoff(thread_t thread) {
4278
4279 thread_t pulled_thread = THREAD_NULL;
4280
4281 thread_lock(thread);
4282
4283 /*
4284 * Check that the thread is not bound
4285 * to a different processor, and that realtime
4286 * is not involved.
4287 *
4288 * Next, pull it off its run queue. If it
4289 * doesn't come, it's not eligible.
4290 */
4291
4292 processor_t processor = current_processor();
4293 if (processor->current_pri < BASEPRI_RTQUEUES && thread->sched_pri < BASEPRI_RTQUEUES &&
4294 (thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == processor)) {
4295
4296 if (thread_run_queue_remove(thread))
4297 pulled_thread = thread;
4298 }
4299
4300 thread_unlock(thread);
4301
4302 return pulled_thread;
4303 }
4304
4305 /*
4306 * thread_run_queue_remove:
4307 *
4308 * Remove a thread from its current run queue and
4309 * return TRUE if successful.
4310 *
4311 * Thread must be locked.
4312 *
4313 * If thread->runq is PROCESSOR_NULL, the thread will not re-enter the
4314 * run queues because the caller locked the thread. Otherwise
4315 * the thread is on a run queue, but could be chosen for dispatch
4316 * and removed by another processor under a different lock, which
4317 * will set thread->runq to PROCESSOR_NULL.
4318 *
4319 * Hence the thread select path must not rely on anything that could
4320 * be changed under the thread lock after calling this function,
4321 * most importantly thread->sched_pri.
4322 */
4323 boolean_t
4324 thread_run_queue_remove(
4325 thread_t thread)
4326 {
4327 boolean_t removed = FALSE;
4328 processor_t processor = thread->runq;
4329
4330 if ((thread->state & (TH_RUN|TH_WAIT)) == TH_WAIT) {
4331 /* Thread isn't runnable */
4332 assert(thread->runq == PROCESSOR_NULL);
4333 return FALSE;
4334 }
4335
4336 if (processor == PROCESSOR_NULL) {
4337 /*
4338 * The thread is either not on the runq,
4339 * or is in the midst of being removed from the runq.
4340 *
4341 * runq is set to NULL under the pset lock, not the thread
4342 * lock, so the thread may still be in the process of being dequeued
4343 * from the runq. It will wait in invoke for the thread lock to be
4344 * dropped.
4345 */
4346
4347 return FALSE;
4348 }
4349
4350 if (thread->sched_pri < BASEPRI_RTQUEUES) {
4351 return SCHED(processor_queue_remove)(processor, thread);
4352 }
4353
4354 processor_set_t pset = processor->processor_set;
4355
4356 rt_lock_lock(pset);
4357
4358 if (thread->runq != PROCESSOR_NULL) {
4359 /*
4360 * Thread is on the RT run queue and we have a lock on
4361 * that run queue.
4362 */
4363
4364 remqueue(&thread->runq_links);
4365 SCHED_STATS_RUNQ_CHANGE(&SCHED(rt_runq)(pset)->runq_stats, rt_runq_count(pset));
4366 rt_runq_count_decr(pset);
4367
4368 thread->runq = PROCESSOR_NULL;
4369
4370 removed = TRUE;
4371 }
4372
4373 rt_lock_unlock(pset);
4374
4375 return (removed);
4376 }
4377
4378 /*
4379 * Put the thread back where it goes after a thread_run_queue_remove
4380 *
4381 * Thread must have been removed under the same thread lock hold
4382 *
4383 * thread locked, at splsched
4384 */
4385 void
4386 thread_run_queue_reinsert(thread_t thread, integer_t options)
4387 {
4388 assert(thread->runq == PROCESSOR_NULL);
4389 assert(thread->state & (TH_RUN));
4390
4391 thread_setrun(thread, options);
4392 }
4393
4394 void
4395 sys_override_cpu_throttle(int flag)
4396 {
4397 if (flag == CPU_THROTTLE_ENABLE)
4398 cpu_throttle_enabled = 1;
4399 if (flag == CPU_THROTTLE_DISABLE)
4400 cpu_throttle_enabled = 0;
4401 }
4402
4403 int
4404 thread_get_urgency(thread_t thread, uint64_t *arg1, uint64_t *arg2)
4405 {
4406 if (thread == NULL || (thread->state & TH_IDLE)) {
4407 *arg1 = 0;
4408 *arg2 = 0;
4409
4410 return (THREAD_URGENCY_NONE);
4411 } else if (thread->sched_mode == TH_MODE_REALTIME) {
4412 *arg1 = thread->realtime.period;
4413 *arg2 = thread->realtime.deadline;
4414
4415 return (THREAD_URGENCY_REAL_TIME);
4416 } else if (cpu_throttle_enabled &&
4417 ((thread->sched_pri <= MAXPRI_THROTTLE) && (thread->base_pri <= MAXPRI_THROTTLE))) {
4418 /*
4419 * Background urgency applied when thread priority is MAXPRI_THROTTLE or lower and thread is not promoted
4420 */
4421 *arg1 = thread->sched_pri;
4422 *arg2 = thread->base_pri;
4423
4424 return (THREAD_URGENCY_BACKGROUND);
4425 } else {
4426 /* For otherwise unclassified threads, report throughput QoS
4427 * parameters
4428 */
4429 *arg1 = proc_get_effective_thread_policy(thread, TASK_POLICY_THROUGH_QOS);
4430 *arg2 = proc_get_effective_task_policy(thread->task, TASK_POLICY_THROUGH_QOS);
4431
4432 return (THREAD_URGENCY_NORMAL);
4433 }
4434 }
4435
4436 perfcontrol_class_t
4437 thread_get_perfcontrol_class(thread_t thread)
4438 {
4439 /* Special case handling */
4440 if (thread->state & TH_IDLE)
4441 return PERFCONTROL_CLASS_IDLE;
4442 if (thread->task == kernel_task)
4443 return PERFCONTROL_CLASS_KERNEL;
4444 if (thread->sched_mode == TH_MODE_REALTIME)
4445 return PERFCONTROL_CLASS_REALTIME;
4446
4447 /* perfcontrol_class based on base_pri */
4448 if (thread->base_pri <= MAXPRI_THROTTLE)
4449 return PERFCONTROL_CLASS_BACKGROUND;
4450 else if (thread->base_pri <= BASEPRI_UTILITY)
4451 return PERFCONTROL_CLASS_UTILITY;
4452 else if (thread->base_pri <= BASEPRI_DEFAULT)
4453 return PERFCONTROL_CLASS_NONUI;
4454 else if (thread->base_pri <= BASEPRI_FOREGROUND)
4455 return PERFCONTROL_CLASS_UI;
4456 else
4457 return PERFCONTROL_CLASS_ABOVEUI;
4458 }
4459
4460 /*
4461 * This is the processor idle loop, which just looks for other threads
4462 * to execute. Processor idle threads invoke this without supplying a
4463 * current thread to idle without an asserted wait state.
4464 *
4465 * Returns a the next thread to execute if dispatched directly.
4466 */
4467
4468 #if 0
4469 #define IDLE_KERNEL_DEBUG_CONSTANT(...) KERNEL_DEBUG_CONSTANT(__VA_ARGS__)
4470 #else
4471 #define IDLE_KERNEL_DEBUG_CONSTANT(...) do { } while(0)
4472 #endif
4473
4474 thread_t
4475 processor_idle(
4476 thread_t thread,
4477 processor_t processor)
4478 {
4479 processor_set_t pset = processor->processor_set;
4480 thread_t new_thread;
4481 int state;
4482 (void)splsched();
4483
4484 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
4485 MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_START,
4486 (uintptr_t)thread_tid(thread), 0, 0, 0, 0);
4487
4488 SCHED_STATS_CPU_IDLE_START(processor);
4489
4490 timer_switch(&PROCESSOR_DATA(processor, system_state),
4491 mach_absolute_time(), &PROCESSOR_DATA(processor, idle_state));
4492 PROCESSOR_DATA(processor, current_state) = &PROCESSOR_DATA(processor, idle_state);
4493
4494 while (1) {
4495 /*
4496 * Ensure that updates to my processor and pset state,
4497 * made by the IPI source processor before sending the IPI,
4498 * are visible on this processor now (even though we don't
4499 * take the pset lock yet).
4500 */
4501 atomic_thread_fence(memory_order_acquire);
4502
4503 if (processor->state != PROCESSOR_IDLE)
4504 break;
4505 if (bit_test(pset->pending_AST_cpu_mask, processor->cpu_id))
4506 break;
4507 #if defined(CONFIG_SCHED_DEFERRED_AST)
4508 if (bit_test(pset->pending_deferred_AST_cpu_mask, processor->cpu_id))
4509 break;
4510 #endif
4511 if (processor->is_recommended) {
4512 if (rt_runq_count(pset))
4513 break;
4514 } else {
4515 if (SCHED(processor_bound_count)(processor))
4516 break;
4517 }
4518
4519 #if CONFIG_SCHED_IDLE_IN_PLACE
4520 if (thread != THREAD_NULL) {
4521 /* Did idle-in-place thread wake up */
4522 if ((thread->state & (TH_WAIT|TH_SUSP)) != TH_WAIT || thread->wake_active)
4523 break;
4524 }
4525 #endif
4526
4527 IDLE_KERNEL_DEBUG_CONSTANT(
4528 MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), rt_runq_count(pset), SCHED(processor_runq_count)(processor), -1, 0);
4529
4530 machine_track_platform_idle(TRUE);
4531
4532 machine_idle();
4533
4534 machine_track_platform_idle(FALSE);
4535
4536 (void)splsched();
4537
4538 IDLE_KERNEL_DEBUG_CONSTANT(
4539 MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), rt_runq_count(pset), SCHED(processor_runq_count)(processor), -2, 0);
4540
4541 if (!SCHED(processor_queue_empty)(processor)) {
4542 /* Secondary SMT processors respond to directed wakeups
4543 * exclusively. Some platforms induce 'spurious' SMT wakeups.
4544 */
4545 if (processor->processor_primary == processor)
4546 break;
4547 }
4548 }
4549
4550 timer_switch(&PROCESSOR_DATA(processor, idle_state),
4551 mach_absolute_time(), &PROCESSOR_DATA(processor, system_state));
4552 PROCESSOR_DATA(processor, current_state) = &PROCESSOR_DATA(processor, system_state);
4553
4554 pset_lock(pset);
4555
4556 /* If we were sent a remote AST and came out of idle, acknowledge it here with pset lock held */
4557 bit_clear(pset->pending_AST_cpu_mask, processor->cpu_id);
4558 #if defined(CONFIG_SCHED_DEFERRED_AST)
4559 bit_clear(pset->pending_deferred_AST_cpu_mask, processor->cpu_id);
4560 #endif
4561
4562 state = processor->state;
4563 if (state == PROCESSOR_DISPATCHING) {
4564 /*
4565 * Commmon case -- cpu dispatched.
4566 */
4567 new_thread = processor->next_thread;
4568 processor->next_thread = THREAD_NULL;
4569 processor->state = PROCESSOR_RUNNING;
4570
4571 if ((new_thread != THREAD_NULL) && (SCHED(processor_queue_has_priority)(processor, new_thread->sched_pri, FALSE) ||
4572 (rt_runq_count(pset) > 0)) ) {
4573 /* Something higher priority has popped up on the runqueue - redispatch this thread elsewhere */
4574 processor_state_update_idle(processor);
4575 processor->deadline = UINT64_MAX;
4576
4577 pset_unlock(pset);
4578
4579 thread_lock(new_thread);
4580 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REDISPATCH), (uintptr_t)thread_tid(new_thread), new_thread->sched_pri, rt_runq_count(pset), 0, 0);
4581 thread_setrun(new_thread, SCHED_HEADQ);
4582 thread_unlock(new_thread);
4583
4584 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
4585 MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_END,
4586 (uintptr_t)thread_tid(thread), state, 0, 0, 0);
4587
4588 return (THREAD_NULL);
4589 }
4590
4591 sched_update_pset_load_average(pset);
4592
4593 pset_unlock(pset);
4594
4595 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
4596 MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_END,
4597 (uintptr_t)thread_tid(thread), state, (uintptr_t)thread_tid(new_thread), 0, 0);
4598
4599 return (new_thread);
4600
4601 } else if (state == PROCESSOR_IDLE) {
4602 re_queue_tail(&pset->active_queue, &processor->processor_queue);
4603
4604 pset->active_processor_count++;
4605 sched_update_pset_load_average(pset);
4606
4607 processor->state = PROCESSOR_RUNNING;
4608 processor_state_update_idle(processor);
4609 processor->deadline = UINT64_MAX;
4610
4611 } else if (state == PROCESSOR_SHUTDOWN) {
4612 /*
4613 * Going off-line. Force a
4614 * reschedule.
4615 */
4616 if ((new_thread = processor->next_thread) != THREAD_NULL) {
4617 processor->next_thread = THREAD_NULL;
4618 processor_state_update_idle(processor);
4619 processor->deadline = UINT64_MAX;
4620
4621 pset_unlock(pset);
4622
4623 thread_lock(new_thread);
4624 thread_setrun(new_thread, SCHED_HEADQ);
4625 thread_unlock(new_thread);
4626
4627 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
4628 MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_END,
4629 (uintptr_t)thread_tid(thread), state, 0, 0, 0);
4630
4631 return (THREAD_NULL);
4632 }
4633 }
4634
4635 pset_unlock(pset);
4636
4637 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
4638 MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_END,
4639 (uintptr_t)thread_tid(thread), state, 0, 0, 0);
4640
4641 return (THREAD_NULL);
4642 }
4643
4644 /*
4645 * Each processor has a dedicated thread which
4646 * executes the idle loop when there is no suitable
4647 * previous context.
4648 */
4649 void
4650 idle_thread(void)
4651 {
4652 processor_t processor = current_processor();
4653 thread_t new_thread;
4654
4655 new_thread = processor_idle(THREAD_NULL, processor);
4656 if (new_thread != THREAD_NULL) {
4657 thread_run(processor->idle_thread, (thread_continue_t)idle_thread, NULL, new_thread);
4658 /*NOTREACHED*/
4659 }
4660
4661 thread_block((thread_continue_t)idle_thread);
4662 /*NOTREACHED*/
4663 }
4664
4665 kern_return_t
4666 idle_thread_create(
4667 processor_t processor)
4668 {
4669 kern_return_t result;
4670 thread_t thread;
4671 spl_t s;
4672 char name[MAXTHREADNAMESIZE];
4673
4674 result = kernel_thread_create((thread_continue_t)idle_thread, NULL, MAXPRI_KERNEL, &thread);
4675 if (result != KERN_SUCCESS)
4676 return (result);
4677
4678 snprintf(name, sizeof(name), "idle #%d", processor->cpu_id);
4679 thread_set_thread_name(thread, name);
4680
4681 s = splsched();
4682 thread_lock(thread);
4683 thread->bound_processor = processor;
4684 processor->idle_thread = thread;
4685 thread->sched_pri = thread->base_pri = IDLEPRI;
4686 thread->state = (TH_RUN | TH_IDLE);
4687 thread->options |= TH_OPT_IDLE_THREAD;
4688 thread_unlock(thread);
4689 splx(s);
4690
4691 thread_deallocate(thread);
4692
4693 return (KERN_SUCCESS);
4694 }
4695
4696 /*
4697 * sched_startup:
4698 *
4699 * Kicks off scheduler services.
4700 *
4701 * Called at splsched.
4702 */
4703 void
4704 sched_startup(void)
4705 {
4706 kern_return_t result;
4707 thread_t thread;
4708
4709 simple_lock_init(&sched_vm_group_list_lock, 0);
4710
4711 #if __arm__ || __arm64__
4712 simple_lock_init(&sched_recommended_cores_lock, 0);
4713 #endif /* __arm__ || __arm64__ */
4714
4715 result = kernel_thread_start_priority((thread_continue_t)sched_init_thread,
4716 (void *)SCHED(maintenance_continuation), MAXPRI_KERNEL, &thread);
4717 if (result != KERN_SUCCESS)
4718 panic("sched_startup");
4719
4720 thread_deallocate(thread);
4721
4722 assert_thread_magic(thread);
4723
4724 /*
4725 * Yield to the sched_init_thread once, to
4726 * initialize our own thread after being switched
4727 * back to.
4728 *
4729 * The current thread is the only other thread
4730 * active at this point.
4731 */
4732 thread_block(THREAD_CONTINUE_NULL);
4733 }
4734
4735 #if __arm64__
4736 static _Atomic uint64_t sched_perfcontrol_callback_deadline;
4737 #endif /* __arm64__ */
4738
4739
4740 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
4741
4742 static volatile uint64_t sched_maintenance_deadline;
4743 static uint64_t sched_tick_last_abstime;
4744 static uint64_t sched_tick_delta;
4745 uint64_t sched_tick_max_delta;
4746
4747
4748 /*
4749 * sched_init_thread:
4750 *
4751 * Perform periodic bookkeeping functions about ten
4752 * times per second.
4753 */
4754 void
4755 sched_timeshare_maintenance_continue(void)
4756 {
4757 uint64_t sched_tick_ctime, late_time;
4758
4759 struct sched_update_scan_context scan_context = {
4760 .earliest_bg_make_runnable_time = UINT64_MAX,
4761 .earliest_normal_make_runnable_time = UINT64_MAX,
4762 .earliest_rt_make_runnable_time = UINT64_MAX
4763 };
4764
4765 sched_tick_ctime = mach_absolute_time();
4766
4767 if (__improbable(sched_tick_last_abstime == 0)) {
4768 sched_tick_last_abstime = sched_tick_ctime;
4769 late_time = 0;
4770 sched_tick_delta = 1;
4771 } else {
4772 late_time = sched_tick_ctime - sched_tick_last_abstime;
4773 sched_tick_delta = late_time / sched_tick_interval;
4774 /* Ensure a delta of 1, since the interval could be slightly
4775 * smaller than the sched_tick_interval due to dispatch
4776 * latencies.
4777 */
4778 sched_tick_delta = MAX(sched_tick_delta, 1);
4779
4780 /* In the event interrupt latencies or platform
4781 * idle events that advanced the timebase resulted
4782 * in periods where no threads were dispatched,
4783 * cap the maximum "tick delta" at SCHED_TICK_MAX_DELTA
4784 * iterations.
4785 */
4786 sched_tick_delta = MIN(sched_tick_delta, SCHED_TICK_MAX_DELTA);
4787
4788 sched_tick_last_abstime = sched_tick_ctime;
4789 sched_tick_max_delta = MAX(sched_tick_delta, sched_tick_max_delta);
4790 }
4791
4792 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_MAINTENANCE)|DBG_FUNC_START,
4793 sched_tick_delta, late_time, 0, 0, 0);
4794
4795 /* Add a number of pseudo-ticks corresponding to the elapsed interval
4796 * This could be greater than 1 if substantial intervals where
4797 * all processors are idle occur, which rarely occurs in practice.
4798 */
4799
4800 sched_tick += sched_tick_delta;
4801
4802 update_vm_info();
4803
4804 /*
4805 * Compute various averages.
4806 */
4807 compute_averages(sched_tick_delta);
4808
4809 /*
4810 * Scan the run queues for threads which
4811 * may need to be updated, and find the earliest runnable thread on the runqueue
4812 * to report its latency.
4813 */
4814 SCHED(thread_update_scan)(&scan_context);
4815
4816 SCHED(rt_runq_scan)(&scan_context);
4817
4818 uint64_t ctime = mach_absolute_time();
4819
4820 uint64_t bg_max_latency = (ctime > scan_context.earliest_bg_make_runnable_time) ?
4821 ctime - scan_context.earliest_bg_make_runnable_time : 0;
4822
4823 uint64_t default_max_latency = (ctime > scan_context.earliest_normal_make_runnable_time) ?
4824 ctime - scan_context.earliest_normal_make_runnable_time : 0;
4825
4826 uint64_t realtime_max_latency = (ctime > scan_context.earliest_rt_make_runnable_time) ?
4827 ctime - scan_context.earliest_rt_make_runnable_time : 0;
4828
4829 machine_max_runnable_latency(bg_max_latency, default_max_latency, realtime_max_latency);
4830
4831 /*
4832 * Check to see if the special sched VM group needs attention.
4833 */
4834 sched_vm_group_maintenance();
4835
4836 #if __arm__ || __arm64__
4837 /* Check to see if the recommended cores failsafe is active */
4838 sched_recommended_cores_maintenance();
4839 #endif /* __arm__ || __arm64__ */
4840
4841
4842 #if DEBUG || DEVELOPMENT
4843 #if __x86_64__
4844 #include <i386/misc_protos.h>
4845 /* Check for long-duration interrupts */
4846 mp_interrupt_watchdog();
4847 #endif /* __x86_64__ */
4848 #endif /* DEBUG || DEVELOPMENT */
4849
4850 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_MAINTENANCE) | DBG_FUNC_END,
4851 sched_pri_shifts[TH_BUCKET_SHARE_FG], sched_pri_shifts[TH_BUCKET_SHARE_BG],
4852 sched_pri_shifts[TH_BUCKET_SHARE_UT], 0, 0);
4853
4854 assert_wait((event_t)sched_timeshare_maintenance_continue, THREAD_UNINT);
4855 thread_block((thread_continue_t)sched_timeshare_maintenance_continue);
4856 /*NOTREACHED*/
4857 }
4858
4859 static uint64_t sched_maintenance_wakeups;
4860
4861 /*
4862 * Determine if the set of routines formerly driven by a maintenance timer
4863 * must be invoked, based on a deadline comparison. Signals the scheduler
4864 * maintenance thread on deadline expiration. Must be invoked at an interval
4865 * lower than the "sched_tick_interval", currently accomplished by
4866 * invocation via the quantum expiration timer and at context switch time.
4867 * Performance matters: this routine reuses a timestamp approximating the
4868 * current absolute time received from the caller, and should perform
4869 * no more than a comparison against the deadline in the common case.
4870 */
4871 void
4872 sched_timeshare_consider_maintenance(uint64_t ctime) {
4873 uint64_t ndeadline, deadline = sched_maintenance_deadline;
4874
4875 if (__improbable(ctime >= deadline)) {
4876 if (__improbable(current_thread() == sched_maintenance_thread))
4877 return;
4878 OSMemoryBarrier();
4879
4880 ndeadline = ctime + sched_tick_interval;
4881
4882 if (__probable(__sync_bool_compare_and_swap(&sched_maintenance_deadline, deadline, ndeadline))) {
4883 thread_wakeup((event_t)sched_timeshare_maintenance_continue);
4884 sched_maintenance_wakeups++;
4885 }
4886 }
4887
4888 #if __arm64__
4889 uint64_t perf_deadline = __c11_atomic_load(&sched_perfcontrol_callback_deadline, memory_order_relaxed);
4890
4891 if (__improbable(perf_deadline && ctime >= perf_deadline)) {
4892 /* CAS in 0, if success, make callback. Otherwise let the next context switch check again. */
4893 if (__c11_atomic_compare_exchange_strong(&sched_perfcontrol_callback_deadline, &perf_deadline, 0,
4894 memory_order_relaxed, memory_order_relaxed)) {
4895 machine_perfcontrol_deadline_passed(perf_deadline);
4896 }
4897 }
4898 #endif /* __arm64__ */
4899
4900 }
4901
4902 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
4903
4904 void
4905 sched_init_thread(void (*continuation)(void))
4906 {
4907 thread_block(THREAD_CONTINUE_NULL);
4908
4909 thread_t thread = current_thread();
4910
4911 thread_set_thread_name(thread, "sched_maintenance_thread");
4912
4913 sched_maintenance_thread = thread;
4914
4915 continuation();
4916
4917 /*NOTREACHED*/
4918 }
4919
4920 #if defined(CONFIG_SCHED_TIMESHARE_CORE)
4921
4922 /*
4923 * thread_update_scan / runq_scan:
4924 *
4925 * Scan the run queues to account for timesharing threads
4926 * which need to be updated.
4927 *
4928 * Scanner runs in two passes. Pass one squirrels likely
4929 * threads away in an array, pass two does the update.
4930 *
4931 * This is necessary because the run queue is locked for
4932 * the candidate scan, but the thread is locked for the update.
4933 *
4934 * Array should be sized to make forward progress, without
4935 * disabling preemption for long periods.
4936 */
4937
4938 #define THREAD_UPDATE_SIZE 128
4939
4940 static thread_t thread_update_array[THREAD_UPDATE_SIZE];
4941 static uint32_t thread_update_count = 0;
4942
4943 /* Returns TRUE if thread was added, FALSE if thread_update_array is full */
4944 boolean_t
4945 thread_update_add_thread(thread_t thread)
4946 {
4947 if (thread_update_count == THREAD_UPDATE_SIZE)
4948 return (FALSE);
4949
4950 thread_update_array[thread_update_count++] = thread;
4951 thread_reference_internal(thread);
4952 return (TRUE);
4953 }
4954
4955 void
4956 thread_update_process_threads(void)
4957 {
4958 assert(thread_update_count <= THREAD_UPDATE_SIZE);
4959
4960 for (uint32_t i = 0 ; i < thread_update_count ; i++) {
4961 thread_t thread = thread_update_array[i];
4962 assert_thread_magic(thread);
4963 thread_update_array[i] = THREAD_NULL;
4964
4965 spl_t s = splsched();
4966 thread_lock(thread);
4967 if (!(thread->state & (TH_WAIT)) && thread->sched_stamp != sched_tick) {
4968 SCHED(update_priority)(thread);
4969 }
4970 thread_unlock(thread);
4971 splx(s);
4972
4973 thread_deallocate(thread);
4974 }
4975
4976 thread_update_count = 0;
4977 }
4978
4979 /*
4980 * Scan a runq for candidate threads.
4981 *
4982 * Returns TRUE if retry is needed.
4983 */
4984 boolean_t
4985 runq_scan(
4986 run_queue_t runq,
4987 sched_update_scan_context_t scan_context)
4988 {
4989 int count = runq->count;
4990 int queue_index;
4991
4992 assert(count >= 0);
4993
4994 if (count == 0)
4995 return FALSE;
4996
4997 for (queue_index = bitmap_first(runq->bitmap, NRQS);
4998 queue_index >= 0;
4999 queue_index = bitmap_next(runq->bitmap, queue_index)) {
5000
5001 thread_t thread;
5002 queue_t queue = &runq->queues[queue_index];
5003
5004 qe_foreach_element(thread, queue, runq_links) {
5005 assert(count > 0);
5006 assert_thread_magic(thread);
5007
5008 if (thread->sched_stamp != sched_tick &&
5009 thread->sched_mode == TH_MODE_TIMESHARE) {
5010 if (thread_update_add_thread(thread) == FALSE)
5011 return TRUE;
5012 }
5013
5014 if (cpu_throttle_enabled && ((thread->sched_pri <= MAXPRI_THROTTLE) && (thread->base_pri <= MAXPRI_THROTTLE))) {
5015 if (thread->last_made_runnable_time < scan_context->earliest_bg_make_runnable_time) {
5016 scan_context->earliest_bg_make_runnable_time = thread->last_made_runnable_time;
5017 }
5018 } else {
5019 if (thread->last_made_runnable_time < scan_context->earliest_normal_make_runnable_time) {
5020 scan_context->earliest_normal_make_runnable_time = thread->last_made_runnable_time;
5021 }
5022 }
5023 count--;
5024 }
5025 }
5026
5027 return FALSE;
5028 }
5029
5030 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
5031
5032 boolean_t
5033 thread_eager_preemption(thread_t thread)
5034 {
5035 return ((thread->sched_flags & TH_SFLAG_EAGERPREEMPT) != 0);
5036 }
5037
5038 void
5039 thread_set_eager_preempt(thread_t thread)
5040 {
5041 spl_t x;
5042 processor_t p;
5043 ast_t ast = AST_NONE;
5044
5045 x = splsched();
5046 p = current_processor();
5047
5048 thread_lock(thread);
5049 thread->sched_flags |= TH_SFLAG_EAGERPREEMPT;
5050
5051 if (thread == current_thread()) {
5052
5053 ast = csw_check(p, AST_NONE);
5054 thread_unlock(thread);
5055 if (ast != AST_NONE) {
5056 (void) thread_block_reason(THREAD_CONTINUE_NULL, NULL, ast);
5057 }
5058 } else {
5059 p = thread->last_processor;
5060
5061 if (p != PROCESSOR_NULL && p->state == PROCESSOR_RUNNING &&
5062 p->active_thread == thread) {
5063 cause_ast_check(p);
5064 }
5065
5066 thread_unlock(thread);
5067 }
5068
5069 splx(x);
5070 }
5071
5072 void
5073 thread_clear_eager_preempt(thread_t thread)
5074 {
5075 spl_t x;
5076
5077 x = splsched();
5078 thread_lock(thread);
5079
5080 thread->sched_flags &= ~TH_SFLAG_EAGERPREEMPT;
5081
5082 thread_unlock(thread);
5083 splx(x);
5084 }
5085
5086 /*
5087 * Scheduling statistics
5088 */
5089 void
5090 sched_stats_handle_csw(processor_t processor, int reasons, int selfpri, int otherpri)
5091 {
5092 struct processor_sched_statistics *stats;
5093 boolean_t to_realtime = FALSE;
5094
5095 stats = &processor->processor_data.sched_stats;
5096 stats->csw_count++;
5097
5098 if (otherpri >= BASEPRI_REALTIME) {
5099 stats->rt_sched_count++;
5100 to_realtime = TRUE;
5101 }
5102
5103 if ((reasons & AST_PREEMPT) != 0) {
5104 stats->preempt_count++;
5105
5106 if (selfpri >= BASEPRI_REALTIME) {
5107 stats->preempted_rt_count++;
5108 }
5109
5110 if (to_realtime) {
5111 stats->preempted_by_rt_count++;
5112 }
5113
5114 }
5115 }
5116
5117 void
5118 sched_stats_handle_runq_change(struct runq_stats *stats, int old_count)
5119 {
5120 uint64_t timestamp = mach_absolute_time();
5121
5122 stats->count_sum += (timestamp - stats->last_change_timestamp) * old_count;
5123 stats->last_change_timestamp = timestamp;
5124 }
5125
5126 /*
5127 * For calls from assembly code
5128 */
5129 #undef thread_wakeup
5130 void
5131 thread_wakeup(
5132 event_t x);
5133
5134 void
5135 thread_wakeup(
5136 event_t x)
5137 {
5138 thread_wakeup_with_result(x, THREAD_AWAKENED);
5139 }
5140
5141 boolean_t
5142 preemption_enabled(void)
5143 {
5144 return (get_preemption_level() == 0 && ml_get_interrupts_enabled());
5145 }
5146
5147 static void
5148 sched_timer_deadline_tracking_init(void) {
5149 nanoseconds_to_absolutetime(TIMER_DEADLINE_TRACKING_BIN_1_DEFAULT, &timer_deadline_tracking_bin_1);
5150 nanoseconds_to_absolutetime(TIMER_DEADLINE_TRACKING_BIN_2_DEFAULT, &timer_deadline_tracking_bin_2);
5151 }
5152
5153 #if __arm__ || __arm64__
5154
5155 uint32_t perfcontrol_requested_recommended_cores = ALL_CORES_RECOMMENDED;
5156 uint32_t perfcontrol_requested_recommended_core_count = MAX_CPUS;
5157 boolean_t perfcontrol_failsafe_active = FALSE;
5158
5159 uint64_t perfcontrol_failsafe_maintenance_runnable_time;
5160 uint64_t perfcontrol_failsafe_activation_time;
5161 uint64_t perfcontrol_failsafe_deactivation_time;
5162
5163 /* data covering who likely caused it and how long they ran */
5164 #define FAILSAFE_NAME_LEN 33 /* (2*MAXCOMLEN)+1 from size of p_name */
5165 char perfcontrol_failsafe_name[FAILSAFE_NAME_LEN];
5166 int perfcontrol_failsafe_pid;
5167 uint64_t perfcontrol_failsafe_tid;
5168 uint64_t perfcontrol_failsafe_thread_timer_at_start;
5169 uint64_t perfcontrol_failsafe_thread_timer_last_seen;
5170 uint32_t perfcontrol_failsafe_recommended_at_trigger;
5171
5172 /*
5173 * Perf controller calls here to update the recommended core bitmask.
5174 * If the failsafe is active, we don't immediately apply the new value.
5175 * Instead, we store the new request and use it after the failsafe deactivates.
5176 *
5177 * If the failsafe is not active, immediately apply the update.
5178 *
5179 * No scheduler locks are held, no other locks are held that scheduler might depend on,
5180 * interrupts are enabled
5181 *
5182 * currently prototype is in osfmk/arm/machine_routines.h
5183 */
5184 void
5185 sched_perfcontrol_update_recommended_cores(uint32_t recommended_cores)
5186 {
5187 assert(preemption_enabled());
5188
5189 spl_t s = splsched();
5190 simple_lock(&sched_recommended_cores_lock);
5191
5192 perfcontrol_requested_recommended_cores = recommended_cores;
5193 perfcontrol_requested_recommended_core_count = __builtin_popcountll(recommended_cores);
5194
5195 if (perfcontrol_failsafe_active == FALSE)
5196 sched_update_recommended_cores(perfcontrol_requested_recommended_cores);
5197 else
5198 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
5199 MACHDBG_CODE(DBG_MACH_SCHED,MACH_REC_CORES_FAILSAFE) | DBG_FUNC_NONE,
5200 perfcontrol_requested_recommended_cores,
5201 sched_maintenance_thread->last_made_runnable_time, 0, 0, 0);
5202
5203 simple_unlock(&sched_recommended_cores_lock);
5204 splx(s);
5205 }
5206
5207 /*
5208 * Consider whether we need to activate the recommended cores failsafe
5209 *
5210 * Called from quantum timer interrupt context of a realtime thread
5211 * No scheduler locks are held, interrupts are disabled
5212 */
5213 void
5214 sched_consider_recommended_cores(uint64_t ctime, thread_t cur_thread)
5215 {
5216 /*
5217 * Check if a realtime thread is starving the system
5218 * and bringing up non-recommended cores would help
5219 *
5220 * TODO: Is this the correct check for recommended == possible cores?
5221 * TODO: Validate the checks without the relevant lock are OK.
5222 */
5223
5224 if (__improbable(perfcontrol_failsafe_active == TRUE)) {
5225 /* keep track of how long the responsible thread runs */
5226
5227 simple_lock(&sched_recommended_cores_lock);
5228
5229 if (perfcontrol_failsafe_active == TRUE &&
5230 cur_thread->thread_id == perfcontrol_failsafe_tid) {
5231 perfcontrol_failsafe_thread_timer_last_seen = timer_grab(&cur_thread->user_timer) +
5232 timer_grab(&cur_thread->system_timer);
5233 }
5234
5235 simple_unlock(&sched_recommended_cores_lock);
5236
5237 /* we're already trying to solve the problem, so bail */
5238 return;
5239 }
5240
5241 /* The failsafe won't help if there are no more processors to enable */
5242 if (__probable(perfcontrol_requested_recommended_core_count >= processor_count))
5243 return;
5244
5245 uint64_t too_long_ago = ctime - perfcontrol_failsafe_starvation_threshold;
5246
5247 /* Use the maintenance thread as our canary in the coal mine */
5248 thread_t m_thread = sched_maintenance_thread;
5249
5250 /* If it doesn't look bad, nothing to see here */
5251 if (__probable(m_thread->last_made_runnable_time >= too_long_ago))
5252 return;
5253
5254 /* It looks bad, take the lock to be sure */
5255 thread_lock(m_thread);
5256
5257 if (m_thread->runq == PROCESSOR_NULL ||
5258 (m_thread->state & (TH_RUN|TH_WAIT)) != TH_RUN ||
5259 m_thread->last_made_runnable_time >= too_long_ago) {
5260 /*
5261 * Maintenance thread is either on cpu or blocked, and
5262 * therefore wouldn't benefit from more cores
5263 */
5264 thread_unlock(m_thread);
5265 return;
5266 }
5267
5268 uint64_t maintenance_runnable_time = m_thread->last_made_runnable_time;
5269
5270 thread_unlock(m_thread);
5271
5272 /*
5273 * There are cores disabled at perfcontrol's recommendation, but the
5274 * system is so overloaded that the maintenance thread can't run.
5275 * That likely means that perfcontrol can't run either, so it can't fix
5276 * the recommendation. We have to kick in a failsafe to keep from starving.
5277 *
5278 * When the maintenance thread has been starved for too long,
5279 * ignore the recommendation from perfcontrol and light up all the cores.
5280 *
5281 * TODO: Consider weird states like boot, sleep, or debugger
5282 */
5283
5284 simple_lock(&sched_recommended_cores_lock);
5285
5286 if (perfcontrol_failsafe_active == TRUE) {
5287 simple_unlock(&sched_recommended_cores_lock);
5288 return;
5289 }
5290
5291 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
5292 MACHDBG_CODE(DBG_MACH_SCHED,MACH_REC_CORES_FAILSAFE) | DBG_FUNC_START,
5293 perfcontrol_requested_recommended_cores, maintenance_runnable_time, 0, 0, 0);
5294
5295 perfcontrol_failsafe_active = TRUE;
5296 perfcontrol_failsafe_activation_time = mach_absolute_time();
5297 perfcontrol_failsafe_maintenance_runnable_time = maintenance_runnable_time;
5298 perfcontrol_failsafe_recommended_at_trigger = perfcontrol_requested_recommended_cores;
5299
5300 /* Capture some data about who screwed up (assuming that the thread on core is at fault) */
5301 task_t task = cur_thread->task;
5302 perfcontrol_failsafe_pid = task_pid(task);
5303 strlcpy(perfcontrol_failsafe_name, proc_name_address(task->bsd_info), sizeof(perfcontrol_failsafe_name));
5304
5305 perfcontrol_failsafe_tid = cur_thread->thread_id;
5306
5307 /* Blame the thread for time it has run recently */
5308 uint64_t recent_computation = (ctime - cur_thread->computation_epoch) + cur_thread->computation_metered;
5309
5310 uint64_t last_seen = timer_grab(&cur_thread->user_timer) + timer_grab(&cur_thread->system_timer);
5311
5312 /* Compute the start time of the bad behavior in terms of the thread's on core time */
5313 perfcontrol_failsafe_thread_timer_at_start = last_seen - recent_computation;
5314 perfcontrol_failsafe_thread_timer_last_seen = last_seen;
5315
5316 /* Ignore the previously recommended core configuration */
5317 sched_update_recommended_cores(ALL_CORES_RECOMMENDED);
5318
5319 simple_unlock(&sched_recommended_cores_lock);
5320 }
5321
5322 /*
5323 * Now that our bacon has been saved by the failsafe, consider whether to turn it off
5324 *
5325 * Runs in the context of the maintenance thread, no locks held
5326 */
5327 static void
5328 sched_recommended_cores_maintenance(void)
5329 {
5330 /* Common case - no failsafe, nothing to be done here */
5331 if (__probable(perfcontrol_failsafe_active == FALSE))
5332 return;
5333
5334 uint64_t ctime = mach_absolute_time();
5335
5336 boolean_t print_diagnostic = FALSE;
5337 char p_name[FAILSAFE_NAME_LEN] = "";
5338
5339 spl_t s = splsched();
5340 simple_lock(&sched_recommended_cores_lock);
5341
5342 /* Check again, under the lock, to avoid races */
5343 if (perfcontrol_failsafe_active == FALSE)
5344 goto out;
5345
5346 /*
5347 * Ensure that the other cores get another few ticks to run some threads
5348 * If we don't have this hysteresis, the maintenance thread is the first
5349 * to run, and then it immediately kills the other cores
5350 */
5351 if ((ctime - perfcontrol_failsafe_activation_time) < perfcontrol_failsafe_starvation_threshold)
5352 goto out;
5353
5354 /* Capture some diagnostic state under the lock so we can print it out later */
5355
5356 int pid = perfcontrol_failsafe_pid;
5357 uint64_t tid = perfcontrol_failsafe_tid;
5358
5359 uint64_t thread_usage = perfcontrol_failsafe_thread_timer_last_seen -
5360 perfcontrol_failsafe_thread_timer_at_start;
5361 uint32_t rec_cores_before = perfcontrol_failsafe_recommended_at_trigger;
5362 uint32_t rec_cores_after = perfcontrol_requested_recommended_cores;
5363 uint64_t failsafe_duration = ctime - perfcontrol_failsafe_activation_time;
5364 strlcpy(p_name, perfcontrol_failsafe_name, sizeof(p_name));
5365
5366 print_diagnostic = TRUE;
5367
5368 /* Deactivate the failsafe and reinstate the requested recommendation settings */
5369
5370 perfcontrol_failsafe_deactivation_time = ctime;
5371 perfcontrol_failsafe_active = FALSE;
5372
5373 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
5374 MACHDBG_CODE(DBG_MACH_SCHED,MACH_REC_CORES_FAILSAFE) | DBG_FUNC_END,
5375 perfcontrol_requested_recommended_cores, failsafe_duration, 0, 0, 0);
5376
5377 sched_update_recommended_cores(perfcontrol_requested_recommended_cores);
5378
5379 out:
5380 simple_unlock(&sched_recommended_cores_lock);
5381 splx(s);
5382
5383 if (print_diagnostic) {
5384 uint64_t failsafe_duration_ms = 0, thread_usage_ms = 0;
5385
5386 absolutetime_to_nanoseconds(failsafe_duration, &failsafe_duration_ms);
5387 failsafe_duration_ms = failsafe_duration_ms / NSEC_PER_MSEC;
5388
5389 absolutetime_to_nanoseconds(thread_usage, &thread_usage_ms);
5390 thread_usage_ms = thread_usage_ms / NSEC_PER_MSEC;
5391
5392 printf("recommended core failsafe kicked in for %lld ms "
5393 "likely due to %s[%d] thread 0x%llx spending "
5394 "%lld ms on cpu at realtime priority - "
5395 "new recommendation: 0x%x -> 0x%x\n",
5396 failsafe_duration_ms, p_name, pid, tid, thread_usage_ms,
5397 rec_cores_before, rec_cores_after);
5398 }
5399 }
5400
5401 /*
5402 * Apply a new recommended cores mask to the processors it affects
5403 * Runs after considering failsafes and such
5404 *
5405 * Iterate over processors and update their ->is_recommended field.
5406 * If a processor is running, we let it drain out at its next
5407 * quantum expiration or blocking point. If a processor is idle, there
5408 * may be more work for it to do, so IPI it.
5409 *
5410 * interrupts disabled, sched_recommended_cores_lock is held
5411 */
5412 static void
5413 sched_update_recommended_cores(uint32_t recommended_cores)
5414 {
5415 processor_set_t pset, nset;
5416 processor_t processor;
5417 uint64_t needs_exit_idle_mask = 0x0;
5418
5419 processor = processor_list;
5420 pset = processor->processor_set;
5421
5422 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
5423 MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED_UPDATE_REC_CORES) | DBG_FUNC_START,
5424 recommended_cores, perfcontrol_failsafe_active, 0, 0, 0);
5425
5426 if (__builtin_popcount(recommended_cores) == 0) {
5427 recommended_cores |= 0x1U; /* add boot processor or we hang */
5428 }
5429
5430 /* First set recommended cores */
5431 pset_lock(pset);
5432 do {
5433
5434 nset = processor->processor_set;
5435 if (nset != pset) {
5436 pset_unlock(pset);
5437 pset = nset;
5438 pset_lock(pset);
5439 }
5440
5441 pset->recommended_bitmask = recommended_cores;
5442
5443 if (recommended_cores & (1ULL << processor->cpu_id)) {
5444 processor->is_recommended = TRUE;
5445
5446 if (processor->state == PROCESSOR_IDLE) {
5447 if (processor->processor_primary == processor) {
5448 re_queue_head(&pset->idle_queue, &processor->processor_queue);
5449 } else {
5450 re_queue_head(&pset->idle_secondary_queue, &processor->processor_queue);
5451 }
5452 if (processor != current_processor()) {
5453 needs_exit_idle_mask |= (1ULL << processor->cpu_id);
5454 }
5455 }
5456 }
5457 } while ((processor = processor->processor_list) != NULL);
5458 pset_unlock(pset);
5459
5460 /* Now shutdown not recommended cores */
5461 processor = processor_list;
5462 pset = processor->processor_set;
5463
5464 pset_lock(pset);
5465 do {
5466
5467 nset = processor->processor_set;
5468 if (nset != pset) {
5469 pset_unlock(pset);
5470 pset = nset;
5471 pset_lock(pset);
5472 }
5473
5474 if (!(recommended_cores & (1ULL << processor->cpu_id))) {
5475 processor->is_recommended = FALSE;
5476 if (processor->state == PROCESSOR_IDLE) {
5477 re_queue_head(&pset->unused_queue, &processor->processor_queue);
5478 }
5479 SCHED(processor_queue_shutdown)(processor);
5480 /* pset unlocked */
5481
5482 SCHED(rt_queue_shutdown)(processor);
5483
5484 pset_lock(pset);
5485 }
5486 } while ((processor = processor->processor_list) != NULL);
5487 pset_unlock(pset);
5488
5489 /* Issue all pending IPIs now that the pset lock has been dropped */
5490 for (int cpuid = lsb_first(needs_exit_idle_mask); cpuid >= 0; cpuid = lsb_next(needs_exit_idle_mask, cpuid)) {
5491 processor = processor_array[cpuid];
5492 machine_signal_idle(processor);
5493 }
5494
5495 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
5496 MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED_UPDATE_REC_CORES) | DBG_FUNC_END,
5497 needs_exit_idle_mask, 0, 0, 0, 0);
5498 }
5499 #endif /* __arm__ || __arm64__ */
5500
5501 void thread_set_options(uint32_t thopt) {
5502 spl_t x;
5503 thread_t t = current_thread();
5504
5505 x = splsched();
5506 thread_lock(t);
5507
5508 t->options |= thopt;
5509
5510 thread_unlock(t);
5511 splx(x);
5512 }
5513
5514 void thread_set_pending_block_hint(thread_t thread, block_hint_t block_hint) {
5515 thread->pending_block_hint = block_hint;
5516 }
5517
5518 uint32_t qos_max_parallelism(int qos, uint64_t options)
5519 {
5520 return SCHED(qos_max_parallelism)(qos, options);
5521 }
5522
5523 uint32_t sched_qos_max_parallelism(__unused int qos, uint64_t options)
5524 {
5525 host_basic_info_data_t hinfo;
5526 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
5527 /* Query the machine layer for core information */
5528 __assert_only kern_return_t kret = host_info(host_self(), HOST_BASIC_INFO,
5529 (host_info_t)&hinfo, &count);
5530 assert(kret == KERN_SUCCESS);
5531
5532 /* We would not want multiple realtime threads running on the
5533 * same physical core; even for SMT capable machines.
5534 */
5535 if (options & QOS_PARALLELISM_REALTIME) {
5536 return hinfo.physical_cpu;
5537 }
5538
5539 if (options & QOS_PARALLELISM_COUNT_LOGICAL) {
5540 return hinfo.logical_cpu;
5541 } else {
5542 return hinfo.physical_cpu;
5543 }
5544 }
5545
5546 #if __arm64__
5547
5548 /*
5549 * Set up or replace old timer with new timer
5550 *
5551 * Returns true if canceled old timer, false if it did not
5552 */
5553 boolean_t
5554 sched_perfcontrol_update_callback_deadline(uint64_t new_deadline)
5555 {
5556 /*
5557 * Exchange deadline for new deadline, if old deadline was nonzero,
5558 * then I cancelled the callback, otherwise I didn't
5559 */
5560
5561 uint64_t old_deadline = __c11_atomic_load(&sched_perfcontrol_callback_deadline,
5562 memory_order_relaxed);
5563
5564
5565 while (!__c11_atomic_compare_exchange_weak(&sched_perfcontrol_callback_deadline,
5566 &old_deadline, new_deadline,
5567 memory_order_relaxed, memory_order_relaxed));
5568
5569
5570 /* now old_deadline contains previous value, which might not be the same if it raced */
5571
5572 return (old_deadline != 0) ? TRUE : FALSE;
5573 }
5574
5575 #endif /* __arm64__ */
5576
5577 int
5578 sched_get_pset_load_average(processor_set_t pset)
5579 {
5580 return pset->load_average >> (PSET_LOAD_NUMERATOR_SHIFT - PSET_LOAD_FRACTIONAL_SHIFT);
5581 }
5582
5583 void
5584 sched_update_pset_load_average(processor_set_t pset)
5585 {
5586 #if DEBUG
5587 queue_entry_t iter;
5588 int count = 0;
5589 qe_foreach(iter, &pset->active_queue) {
5590 count++;
5591 }
5592 assertf(count == pset->active_processor_count, "count %d pset->active_processor_count %d\n", count, pset->active_processor_count);
5593 #endif
5594
5595 int load = ((pset->active_processor_count + pset->pset_runq.count + rt_runq_count(pset)) << PSET_LOAD_NUMERATOR_SHIFT);
5596 int new_load_average = (pset->load_average + load) >> 1;
5597
5598 pset->load_average = new_load_average;
5599
5600 #if (DEVELOPMENT || DEBUG)
5601 #endif
5602 }