2 * Copyright (c) 2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #ifndef _KERN_SCHED_AMP_COMMON_H_
30 #define _KERN_SCHED_AMP_COMMON_H_
34 /* Routine to initialize processor sets on AMP platforms */
35 void sched_amp_init(void);
38 * The AMP scheduler uses spill/steal/rebalance logic to make sure the most appropriate threads
39 * are scheduled on the P/E clusters. Here are the definitions of those terms:
41 * - Spill: Spill threads from an overcommited P-cluster onto the E-cluster. This is needed to make sure
42 * that high priority P-recommended threads experience low scheduling latency in the presence of
43 * lots of P-recommended threads.
45 * - Steal: From an E-core, steal a thread from the P-cluster to provide low scheduling latency for
46 * P-recommended threads.
48 * - Rebalance: Once a P-core goes idle, check if the E-cores are running any P-recommended threads and
49 * bring it back to run on its recommended cluster type.
53 int sched_amp_spill_threshold(processor_set_t pset
);
54 void pset_signal_spill(processor_set_t pset
, int spilled_thread_priority
);
55 bool pset_should_accept_spilled_thread(processor_set_t pset
, int spilled_thread_priority
);
56 bool should_spill_to_ecores(processor_set_t nset
, thread_t thread
);
57 void sched_amp_check_spill(processor_set_t pset
, thread_t thread
);
60 int sched_amp_steal_threshold(processor_set_t pset
, bool spill_pending
);
61 bool sched_amp_steal_thread_enabled(processor_set_t pset
);
64 void sched_amp_balance(processor_t cprocessor
, processor_set_t cpset
);
67 sched_ipi_type_t
sched_amp_ipi_policy(processor_t dst
, thread_t thread
, boolean_t dst_idle
, sched_ipi_event_t event
);
69 /* AMP realtime runq management */
70 rt_queue_t
sched_amp_rt_runq(processor_set_t pset
);
71 void sched_amp_rt_init(processor_set_t pset
);
72 void sched_amp_rt_queue_shutdown(processor_t processor
);
73 void sched_amp_rt_runq_scan(sched_update_scan_context_t scan_context
);
74 int64_t sched_amp_rt_runq_count_sum(void);
76 uint32_t sched_amp_qos_max_parallelism(int qos
, uint64_t options
);
77 void sched_amp_bounce_thread_group_from_ecores(processor_set_t pset
, struct thread_group
*tg
);
79 pset_node_t
sched_amp_choose_node(thread_t thread
);
83 #endif /* _KERN_SCHED_AMP_COMMON_H_ */