]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
cb323159 | 2 | * Copyright (c) 2000-2019 Apple Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
0a7de745 | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
0a7de745 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
0a7de745 | 17 | * |
2d21ac55 A |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
0a7de745 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
0a7de745 | 31 | /* |
1c79356b A |
32 | * Mach Operating System |
33 | * Copyright (c) 1991,1990,1989 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
0a7de745 | 35 | * |
1c79356b A |
36 | * Permission to use, copy, modify and distribute this software and its |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
0a7de745 | 41 | * |
1c79356b A |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
0a7de745 | 45 | * |
1c79356b | 46 | * Carnegie Mellon requests users of this software to return to |
0a7de745 | 47 | * |
1c79356b A |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
0a7de745 | 52 | * |
1c79356b A |
53 | * any improvements or extensions that they make and grant Carnegie Mellon |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | ||
59 | /* | |
91447636 | 60 | * processor.h: Processor and processor-related definitions. |
1c79356b A |
61 | */ |
62 | ||
0a7de745 A |
63 | #ifndef _KERN_PROCESSOR_H_ |
64 | #define _KERN_PROCESSOR_H_ | |
1c79356b | 65 | |
1c79356b A |
66 | #include <mach/boolean.h> |
67 | #include <mach/kern_return.h> | |
68 | #include <kern/kern_types.h> | |
69 | ||
91447636 | 70 | #include <sys/cdefs.h> |
1c79356b | 71 | |
0a7de745 | 72 | #ifdef MACH_KERNEL_PRIVATE |
1c79356b | 73 | |
1c79356b | 74 | #include <mach/mach_types.h> |
91447636 | 75 | #include <kern/ast.h> |
1c79356b | 76 | #include <kern/cpu_number.h> |
3e170ce0 | 77 | #include <kern/smp.h> |
fe8ab488 A |
78 | #include <kern/simple_lock.h> |
79 | #include <kern/locks.h> | |
1c79356b A |
80 | #include <kern/queue.h> |
81 | #include <kern/sched.h> | |
0a7de745 | 82 | #include <kern/sched_urgency.h> |
fe8ab488 | 83 | #include <mach/sfi_class.h> |
91447636 | 84 | #include <kern/processor_data.h> |
d9a64523 | 85 | #include <kern/cpu_quiesce.h> |
cb323159 A |
86 | #include <kern/sched_clutch.h> |
87 | #include <kern/assert.h> | |
88 | #include <machine/limits.h> | |
d9a64523 A |
89 | |
90 | /* | |
91 | * Processor state is accessed by locking the scheduling lock | |
92 | * for the assigned processor set. | |
93 | * | |
94 | * -------------------- SHUTDOWN | |
95 | * / ^ ^ | |
96 | * _/ | \ | |
97 | * OFF_LINE ---> START ---> RUNNING ---> IDLE ---> DISPATCHING | |
98 | * \_________________^ ^ ^______/ / | |
99 | * \__________________/ | |
100 | * | |
101 | * Most of these state transitions are externally driven as a | |
102 | * a directive (for instance telling an IDLE processor to start | |
103 | * coming out of the idle state to run a thread). However these | |
104 | * are typically paired with a handshake by the processor itself | |
105 | * to indicate that it has completed a transition of indeterminate | |
106 | * length (for example, the DISPATCHING->RUNNING or START->RUNNING | |
107 | * transitions must occur on the processor itself). | |
108 | * | |
109 | * The boot processor has some special cases, and skips the START state, | |
110 | * since it has already bootstrapped and is ready to context switch threads. | |
111 | * | |
112 | * When a processor is in DISPATCHING or RUNNING state, the current_pri, | |
113 | * current_thmode, and deadline fields should be set, so that other | |
114 | * processors can evaluate if it is an appropriate candidate for preemption. | |
115 | */ | |
116 | #if defined(CONFIG_SCHED_DEFERRED_AST) | |
117 | /* | |
118 | * -------------------- SHUTDOWN | |
119 | * / ^ ^ | |
120 | * _/ | \ | |
121 | * OFF_LINE ---> START ---> RUNNING ---> IDLE ---> DISPATCHING | |
122 | * \_________________^ ^ ^______/ ^_____ / / | |
123 | * \__________________/ | |
124 | * | |
125 | * A DISPATCHING processor may be put back into IDLE, if another | |
126 | * processor determines that the target processor will have nothing to do | |
127 | * upon reaching the RUNNING state. This is racy, but if the target | |
128 | * responds and becomes RUNNING, it will not break the processor state | |
129 | * machine. | |
130 | * | |
131 | * This change allows us to cancel an outstanding signal/AST on a processor | |
132 | * (if such an operation is supported through hardware or software), and | |
133 | * push the processor back into the IDLE state as a power optimization. | |
134 | */ | |
135 | #endif | |
136 | ||
cb323159 A |
137 | typedef enum { |
138 | PROCESSOR_OFF_LINE = 0, /* Not available */ | |
139 | PROCESSOR_SHUTDOWN = 1, /* Going off-line */ | |
140 | PROCESSOR_START = 2, /* Being started */ | |
141 | PROCESSOR_UNUSED = 3, /* Formerly Inactive (unavailable) */ | |
142 | PROCESSOR_IDLE = 4, /* Idle (available) */ | |
143 | PROCESSOR_DISPATCHING = 5, /* Dispatching (idle -> active) */ | |
144 | PROCESSOR_RUNNING = 6, /* Normal execution */ | |
145 | PROCESSOR_STATE_LEN = (PROCESSOR_RUNNING + 1) | |
146 | } processor_state_t; | |
1c79356b | 147 | |
5ba3f43e A |
148 | typedef enum { |
149 | PSET_SMP, | |
c6bf4f31 A |
150 | #if __AMP__ |
151 | PSET_AMP_E, | |
152 | PSET_AMP_P, | |
153 | #endif | |
5ba3f43e A |
154 | } pset_cluster_type_t; |
155 | ||
d9a64523 | 156 | typedef bitmap_t cpumap_t; |
9bccf70c | 157 | |
d9a64523 A |
158 | struct processor_set { |
159 | int online_processor_count; | |
160 | int load_average; | |
9bccf70c | 161 | |
d9a64523 A |
162 | int cpu_set_low, cpu_set_hi; |
163 | int cpu_set_count; | |
164 | int last_chosen; | |
165 | cpumap_t cpu_bitmask; | |
166 | cpumap_t recommended_bitmask; | |
167 | cpumap_t cpu_state_map[PROCESSOR_STATE_LEN]; | |
168 | cpumap_t primary_map; | |
0a7de745 | 169 | #define SCHED_PSET_TLOCK (1) |
3e170ce0 | 170 | #if __SMP__ |
0a7de745 | 171 | #if defined(SCHED_PSET_TLOCK) |
cb323159 | 172 | /* TODO: reorder struct for temporal cache locality */ |
0a7de745 A |
173 | __attribute__((aligned(128))) lck_ticket_t sched_lock; |
174 | #else /* SCHED_PSET_TLOCK*/ | |
cb323159 | 175 | __attribute__((aligned(128))) lck_spin_t sched_lock; /* lock for above */ |
0a7de745 | 176 | #endif /* SCHED_PSET_TLOCK*/ |
3e170ce0 | 177 | #endif |
9bccf70c | 178 | |
fe8ab488 | 179 | #if defined(CONFIG_SCHED_TRADITIONAL) || defined(CONFIG_SCHED_MULTIQ) |
0a7de745 | 180 | struct run_queue pset_runq; /* runq for this processor set */ |
fe8ab488 | 181 | #endif |
0a7de745 | 182 | struct rt_queue rt_runq; /* realtime runq for this processor set */ |
cb323159 A |
183 | #if CONFIG_SCHED_CLUTCH |
184 | struct sched_clutch_root pset_clutch_root; /* clutch hierarchy root */ | |
185 | #endif /* CONFIG_SCHED_CLUTCH */ | |
fe8ab488 A |
186 | |
187 | #if defined(CONFIG_SCHED_TRADITIONAL) | |
0a7de745 A |
188 | int pset_runq_bound_count; |
189 | /* # of threads in runq bound to any processor in pset */ | |
6d2010ae | 190 | #endif |
fe8ab488 | 191 | |
39236c6e | 192 | /* CPUs that have been sent an unacknowledged remote AST for scheduling purposes */ |
0a7de745 A |
193 | cpumap_t pending_AST_URGENT_cpu_mask; |
194 | cpumap_t pending_AST_PREEMPT_cpu_mask; | |
3e170ce0 A |
195 | #if defined(CONFIG_SCHED_DEFERRED_AST) |
196 | /* | |
5ba3f43e | 197 | * A separate mask, for ASTs that we may be able to cancel. This is dependent on |
3e170ce0 A |
198 | * some level of support for requesting an AST on a processor, and then quashing |
199 | * that request later. | |
200 | * | |
201 | * The purpose of this field (and the associated codepaths) is to infer when we | |
202 | * no longer need a processor that is DISPATCHING to come up, and to prevent it | |
203 | * from coming out of IDLE if possible. This should serve to decrease the number | |
204 | * of spurious ASTs in the system, and let processors spend longer periods in | |
205 | * IDLE. | |
206 | */ | |
0a7de745 | 207 | cpumap_t pending_deferred_AST_cpu_mask; |
3e170ce0 | 208 | #endif |
0a7de745 | 209 | cpumap_t pending_spill_cpu_mask; |
39236c6e | 210 | |
0a7de745 A |
211 | struct ipc_port * pset_self; /* port for operations */ |
212 | struct ipc_port * pset_name_self; /* port for information */ | |
9bccf70c | 213 | |
0a7de745 A |
214 | processor_set_t pset_list; /* chain of associated psets */ |
215 | pset_node_t node; | |
216 | uint32_t pset_cluster_id; | |
217 | pset_cluster_type_t pset_cluster_type; | |
2d21ac55 A |
218 | }; |
219 | ||
0a7de745 | 220 | extern struct processor_set pset0; |
9bccf70c | 221 | |
2d21ac55 | 222 | struct pset_node { |
0a7de745 A |
223 | processor_set_t psets; /* list of associated psets */ |
224 | uint32_t pset_count; /* count of associated psets */ | |
91447636 | 225 | |
0a7de745 A |
226 | pset_node_t nodes; /* list of associated subnodes */ |
227 | pset_node_t node_list; /* chain of associated nodes */ | |
2d21ac55 | 228 | |
0a7de745 | 229 | pset_node_t parent; |
1c79356b A |
230 | }; |
231 | ||
0a7de745 | 232 | extern struct pset_node pset_node0; |
2d21ac55 | 233 | |
0a7de745 A |
234 | extern queue_head_t tasks, terminated_tasks, threads, corpse_tasks; /* Terminated tasks are ONLY for stackshot */ |
235 | extern int tasks_count, terminated_tasks_count, threads_count; | |
cb323159 A |
236 | decl_lck_mtx_data(extern, tasks_threads_lock); |
237 | decl_lck_mtx_data(extern, tasks_corpse_lock); | |
b0d623f7 | 238 | |
1c79356b | 239 | struct processor { |
cb323159 | 240 | processor_state_t state; /* See above */ |
d9a64523 A |
241 | bool is_SMT; |
242 | bool is_recommended; | |
243 | struct thread *active_thread; /* thread running on processor */ | |
d9a64523 | 244 | struct thread *idle_thread; /* this processor's idle thread. */ |
cb323159 | 245 | struct thread *startup_thread; |
55e303ae | 246 | |
0a7de745 | 247 | processor_set_t processor_set; /* assigned set */ |
55e303ae | 248 | |
0a7de745 A |
249 | int current_pri; /* priority of current thread */ |
250 | sfi_class_id_t current_sfi_class; /* SFI class of current thread */ | |
251 | perfcontrol_class_t current_perfctl_class; /* Perfcontrol class for current thread */ | |
252 | pset_cluster_type_t current_recommended_pset_type; /* Cluster type recommended for current thread */ | |
253 | thread_urgency_t current_urgency; /* cached urgency of current thread */ | |
254 | bool current_is_NO_SMT; /* cached TH_SFLAG_NO_SMT of current thread */ | |
255 | bool current_is_bound; /* current thread is bound to this processor */ | |
256 | ||
257 | int starting_pri; /* priority of current thread as it was when scheduled */ | |
258 | int cpu_id; /* platform numeric id */ | |
d9a64523 A |
259 | cpu_quiescent_state_t cpu_quiesce_state; |
260 | uint64_t cpu_quiesce_last_checkin; | |
55e303ae | 261 | |
0a7de745 A |
262 | timer_call_data_t quantum_timer; /* timer for quantum expiration */ |
263 | uint64_t quantum_end; /* time when current quantum ends */ | |
264 | uint64_t last_dispatch; /* time of last dispatch */ | |
1c79356b | 265 | |
0a7de745 | 266 | uint64_t kperf_last_sample_time; /* time of last kperf sample */ |
d9a64523 | 267 | |
0a7de745 | 268 | uint64_t deadline; /* current deadline */ |
d9a64523 | 269 | bool first_timeslice; /* has the quantum expired since context switch */ |
cb323159 | 270 | bool processor_offlined; /* has the processor been explicitly processor_offline'ed */ |
0a7de745 A |
271 | bool must_idle; /* Needs to be forced idle as next selected thread is allowed on this processor */ |
272 | ||
273 | processor_t processor_primary; /* pointer to primary processor for | |
274 | * secondary SMT processors, or a pointer | |
275 | * to ourselves for primaries or non-SMT */ | |
276 | processor_t processor_secondary; | |
55e303ae | 277 | |
fe8ab488 | 278 | #if defined(CONFIG_SCHED_TRADITIONAL) || defined(CONFIG_SCHED_MULTIQ) |
0a7de745 | 279 | struct run_queue runq; /* runq for this processor */ |
fe8ab488 A |
280 | #endif |
281 | ||
282 | #if defined(CONFIG_SCHED_TRADITIONAL) | |
0a7de745 | 283 | int runq_bound_count; /* # of threads bound to this processor */ |
6d2010ae A |
284 | #endif |
285 | #if defined(CONFIG_SCHED_GRRR) | |
0a7de745 | 286 | struct grrr_run_queue grrr_runq; /* Group Ratio Round-Robin runq */ |
6d2010ae | 287 | #endif |
0a7de745 | 288 | struct ipc_port * processor_self; /* port for operations */ |
9bccf70c | 289 | |
0a7de745 A |
290 | processor_t processor_list; /* all existing processors */ |
291 | processor_data_t processor_data; /* per-processor data */ | |
1c79356b A |
292 | }; |
293 | ||
0a7de745 | 294 | extern processor_t processor_list; |
cb323159 | 295 | decl_simple_lock_data(extern, processor_list_lock); |
9bccf70c | 296 | |
5ba3f43e A |
297 | #define MAX_SCHED_CPUS 64 /* Maximum number of CPUs supported by the scheduler. bits.h:bitmap_*() macros need to be used to support greater than 64 */ |
298 | extern processor_t processor_array[MAX_SCHED_CPUS]; /* array indexed by cpuid */ | |
299 | ||
0a7de745 A |
300 | extern uint32_t processor_avail_count; |
301 | extern uint32_t processor_avail_count_user; | |
c910b4d9 | 302 | |
0a7de745 | 303 | extern processor_t master_processor; |
1c79356b | 304 | |
0a7de745 | 305 | extern boolean_t sched_stats_active; |
6d2010ae | 306 | |
0a7de745 | 307 | extern processor_t current_processor(void); |
1c79356b | 308 | |
3e170ce0 | 309 | /* Lock macros, always acquired and released with interrupts disabled (splsched()) */ |
2d21ac55 | 310 | |
0a7de745 A |
311 | extern lck_grp_t pset_lck_grp; |
312 | ||
3e170ce0 | 313 | #if __SMP__ |
0a7de745 A |
314 | #if defined(SCHED_PSET_TLOCK) |
315 | #define pset_lock_init(p) lck_ticket_init(&(p)->sched_lock) | |
316 | #define pset_lock(p) lck_ticket_lock(&(p)->sched_lock) | |
317 | #define pset_unlock(p) lck_ticket_unlock(&(p)->sched_lock) | |
318 | #define pset_assert_locked(p) lck_ticket_assert_owned(&(p)->sched_lock) | |
319 | #else /* SCHED_PSET_TLOCK*/ | |
cb323159 A |
320 | #define pset_lock_init(p) lck_spin_init(&(p)->sched_lock, &pset_lck_grp, NULL) |
321 | #define pset_lock(p) lck_spin_lock_grp(&(p)->sched_lock, &pset_lck_grp) | |
322 | #define pset_unlock(p) lck_spin_unlock(&(p)->sched_lock) | |
d9a64523 | 323 | #define pset_assert_locked(p) LCK_SPIN_ASSERT(&(p)->sched_lock, LCK_ASSERT_OWNED) |
cb323159 A |
324 | #endif /*!SCHED_PSET_TLOCK*/ |
325 | ||
0a7de745 A |
326 | #define rt_lock_lock(p) simple_lock(&SCHED(rt_runq)(p)->rt_lock, &pset_lck_grp) |
327 | #define rt_lock_unlock(p) simple_unlock(&SCHED(rt_runq)(p)->rt_lock) | |
328 | #define rt_lock_init(p) simple_lock_init(&SCHED(rt_runq)(p)->rt_lock, 0) | |
cb323159 | 329 | #else |
0a7de745 A |
330 | #define pset_lock(p) do { (void)p; } while(0) |
331 | #define pset_unlock(p) do { (void)p; } while(0) | |
332 | #define pset_lock_init(p) do { (void)p; } while(0) | |
d9a64523 | 333 | #define pset_assert_locked(p) do { (void)p; } while(0) |
5ba3f43e | 334 | |
0a7de745 A |
335 | #define rt_lock_lock(p) do { (void)p; } while(0) |
336 | #define rt_lock_unlock(p) do { (void)p; } while(0) | |
337 | #define rt_lock_init(p) do { (void)p; } while(0) | |
338 | #endif /* SMP */ | |
339 | ||
340 | extern void processor_bootstrap(void); | |
341 | ||
342 | extern void processor_init( | |
343 | processor_t processor, | |
344 | int cpu_id, | |
345 | processor_set_t processor_set); | |
2d21ac55 | 346 | |
0a7de745 A |
347 | extern void processor_set_primary( |
348 | processor_t processor, | |
349 | processor_t primary); | |
91447636 | 350 | |
0a7de745 A |
351 | extern kern_return_t processor_shutdown( |
352 | processor_t processor); | |
1c79356b | 353 | |
0a7de745 A |
354 | extern kern_return_t processor_start_from_user( |
355 | processor_t processor); | |
356 | extern kern_return_t processor_exit_from_user( | |
357 | processor_t processor); | |
b0d623f7 | 358 | |
0a7de745 A |
359 | kern_return_t |
360 | sched_processor_enable(processor_t processor, boolean_t enable); | |
1c79356b | 361 | |
0a7de745 A |
362 | extern void processor_queue_shutdown( |
363 | processor_t processor); | |
1c79356b | 364 | |
0a7de745 A |
365 | extern void processor_queue_shutdown( |
366 | processor_t processor); | |
1c79356b | 367 | |
0a7de745 A |
368 | extern processor_set_t processor_pset( |
369 | processor_t processor); | |
1c79356b | 370 | |
0a7de745 | 371 | extern pset_node_t pset_node_root(void); |
1c79356b | 372 | |
0a7de745 A |
373 | extern processor_set_t pset_create( |
374 | pset_node_t node); | |
375 | ||
376 | extern void pset_init( | |
377 | processor_set_t pset, | |
378 | pset_node_t node); | |
1c79356b | 379 | |
5ba3f43e | 380 | extern processor_set_t pset_find( |
0a7de745 A |
381 | uint32_t cluster_id, |
382 | processor_set_t default_pset); | |
5ba3f43e | 383 | |
0a7de745 A |
384 | extern kern_return_t processor_info_count( |
385 | processor_flavor_t flavor, | |
386 | mach_msg_type_number_t *count); | |
9bccf70c | 387 | |
2d21ac55 A |
388 | #define pset_deallocate(x) |
389 | #define pset_reference(x) | |
390 | ||
0a7de745 A |
391 | extern void machine_run_count( |
392 | uint32_t count); | |
b7266188 | 393 | |
0a7de745 A |
394 | extern processor_t machine_choose_processor( |
395 | processor_set_t pset, | |
396 | processor_t processor); | |
c910b4d9 | 397 | |
0a7de745 | 398 | #define next_pset(p) (((p)->pset_list != PROCESSOR_SET_NULL)? (p)->pset_list: (p)->node->psets) |
fe8ab488 | 399 | |
0a7de745 A |
400 | #define PSET_THING_TASK 0 |
401 | #define PSET_THING_THREAD 1 | |
3e170ce0 | 402 | |
0a7de745 A |
403 | extern kern_return_t processor_set_things( |
404 | processor_set_t pset, | |
405 | void **thing_list, | |
406 | mach_msg_type_number_t *count, | |
407 | int type); | |
3e170ce0 | 408 | |
5ba3f43e A |
409 | extern pset_cluster_type_t recommended_pset_type(thread_t thread); |
410 | ||
411 | inline static bool | |
412 | pset_is_recommended(processor_set_t pset) | |
413 | { | |
0a7de745 | 414 | return (pset->recommended_bitmask & pset->cpu_bitmask) != 0; |
5ba3f43e A |
415 | } |
416 | ||
417 | extern void processor_state_update_idle(processor_t processor); | |
418 | extern void processor_state_update_from_thread(processor_t processor, thread_t thread); | |
419 | extern void processor_state_update_explicit(processor_t processor, int pri, | |
0a7de745 A |
420 | sfi_class_id_t sfi_class, pset_cluster_type_t pset_type, |
421 | perfcontrol_class_t perfctl_class, thread_urgency_t urgency); | |
5ba3f43e | 422 | |
d9a64523 A |
423 | #define PSET_LOAD_NUMERATOR_SHIFT 16 |
424 | #define PSET_LOAD_FRACTIONAL_SHIFT 4 | |
425 | ||
426 | inline static int | |
427 | sched_get_pset_load_average(processor_set_t pset) | |
428 | { | |
429 | return pset->load_average >> (PSET_LOAD_NUMERATOR_SHIFT - PSET_LOAD_FRACTIONAL_SHIFT); | |
430 | } | |
431 | extern void sched_update_pset_load_average(processor_set_t pset); | |
432 | ||
433 | inline static void | |
434 | pset_update_processor_state(processor_set_t pset, processor_t processor, uint new_state) | |
435 | { | |
436 | pset_assert_locked(pset); | |
437 | ||
438 | uint old_state = processor->state; | |
439 | uint cpuid = processor->cpu_id; | |
440 | ||
441 | assert(processor->processor_set == pset); | |
442 | assert(bit_test(pset->cpu_bitmask, cpuid)); | |
443 | ||
444 | assert(old_state < PROCESSOR_STATE_LEN); | |
445 | assert(new_state < PROCESSOR_STATE_LEN); | |
446 | ||
447 | processor->state = new_state; | |
448 | ||
449 | bit_clear(pset->cpu_state_map[old_state], cpuid); | |
450 | bit_set(pset->cpu_state_map[new_state], cpuid); | |
451 | ||
452 | if ((old_state == PROCESSOR_RUNNING) || (new_state == PROCESSOR_RUNNING)) { | |
453 | sched_update_pset_load_average(pset); | |
0a7de745 A |
454 | if (new_state == PROCESSOR_RUNNING) { |
455 | assert(processor == current_processor()); | |
456 | } | |
d9a64523 A |
457 | } |
458 | } | |
459 | ||
0a7de745 | 460 | #else /* MACH_KERNEL_PRIVATE */ |
1c79356b | 461 | |
91447636 | 462 | __BEGIN_DECLS |
9bccf70c | 463 | |
0a7de745 A |
464 | extern void pset_deallocate( |
465 | processor_set_t pset); | |
9bccf70c | 466 | |
0a7de745 A |
467 | extern void pset_reference( |
468 | processor_set_t pset); | |
9bccf70c | 469 | |
91447636 A |
470 | __END_DECLS |
471 | ||
0a7de745 | 472 | #endif /* MACH_KERNEL_PRIVATE */ |
2d21ac55 | 473 | |
fe8ab488 A |
474 | #ifdef KERNEL_PRIVATE |
475 | __BEGIN_DECLS | |
0a7de745 A |
476 | extern unsigned int processor_count; |
477 | extern processor_t cpu_to_processor(int cpu); | |
478 | ||
479 | extern kern_return_t enable_smt_processors(bool enable); | |
cb323159 A |
480 | |
481 | extern boolean_t processor_in_panic_context(processor_t processor); | |
fe8ab488 A |
482 | __END_DECLS |
483 | ||
484 | #endif /* KERNEL_PRIVATE */ | |
485 | ||
0a7de745 | 486 | #endif /* _KERN_PROCESSOR_H_ */ |