]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/processor.c
xnu-6153.141.1.tar.gz
[apple/xnu.git] / osfmk / kern / processor.c
CommitLineData
1c79356b 1/*
cb323159 2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
0a7de745 31/*
1c79356b
A
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
0a7de745 35 *
1c79356b
A
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
0a7de745 41 *
1c79356b
A
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
0a7de745 45 *
1c79356b 46 * Carnegie Mellon requests users of this software to return to
0a7de745 47 *
1c79356b
A
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
0a7de745 52 *
1c79356b
A
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58
59/*
60 * processor.c: processor and processor_set manipulation routines.
61 */
62
1c79356b
A
63#include <mach/boolean.h>
64#include <mach/policy.h>
91447636 65#include <mach/processor.h>
1c79356b
A
66#include <mach/processor_info.h>
67#include <mach/vm_param.h>
68#include <kern/cpu_number.h>
69#include <kern/host.h>
70#include <kern/machine.h>
71#include <kern/misc_protos.h>
72#include <kern/processor.h>
73#include <kern/sched.h>
74#include <kern/task.h>
75#include <kern/thread.h>
76#include <kern/ipc_host.h>
77#include <kern/ipc_tt.h>
78#include <ipc/ipc_port.h>
79#include <kern/kalloc.h>
80
3e170ce0
A
81#include <security/mac_mach_internal.h>
82
d9a64523
A
83#if defined(CONFIG_XNUPOST)
84
85#include <tests/xnupost.h>
86
87#endif /* CONFIG_XNUPOST */
5ba3f43e 88
1c79356b
A
89/*
90 * Exported interface
91 */
92#include <mach/mach_host_server.h>
91447636 93#include <mach/processor_set_server.h>
1c79356b 94
0a7de745
A
95struct processor_set pset0;
96struct pset_node pset_node0;
cb323159 97decl_simple_lock_data(static, pset_node_lock);
0a7de745
A
98
99lck_grp_t pset_lck_grp;
2d21ac55 100
0a7de745
A
101queue_head_t tasks;
102queue_head_t terminated_tasks; /* To be used ONLY for stackshot. */
103queue_head_t corpse_tasks;
104int tasks_count;
105int terminated_tasks_count;
106queue_head_t threads;
107int threads_count;
cb323159
A
108decl_lck_mtx_data(, tasks_threads_lock);
109decl_lck_mtx_data(, tasks_corpse_lock);
1c79356b 110
0a7de745
A
111processor_t processor_list;
112unsigned int processor_count;
113static processor_t processor_list_tail;
cb323159 114decl_simple_lock_data(, processor_list_lock);
9bccf70c 115
0a7de745
A
116uint32_t processor_avail_count;
117uint32_t processor_avail_count_user;
2d21ac55 118
0a7de745
A
119processor_t master_processor;
120int master_cpu = 0;
121boolean_t sched_stats_active = FALSE;
1c79356b 122
0a7de745 123processor_t processor_array[MAX_SCHED_CPUS] = { 0 };
5ba3f43e 124
d9a64523
A
125#if defined(CONFIG_XNUPOST)
126kern_return_t ipi_test(void);
127extern void arm64_ipi_test(void);
128
129kern_return_t
130ipi_test()
131{
132#if __arm64__
133 processor_t p;
134
135 for (p = processor_list; p != NULL; p = p->processor_list) {
136 thread_bind(p);
137 thread_block(THREAD_CONTINUE_NULL);
138 kprintf("Running IPI test on cpu %d\n", p->cpu_id);
139 arm64_ipi_test();
140 }
141
142 /* unbind thread from specific cpu */
143 thread_bind(PROCESSOR_NULL);
144 thread_block(THREAD_CONTINUE_NULL);
145
146 T_PASS("Done running IPI tests");
147#else
148 T_PASS("Unsupported platform. Not running IPI tests");
149
150#endif /* __arm64__ */
151
152 return KERN_SUCCESS;
153}
154#endif /* defined(CONFIG_XNUPOST) */
5ba3f43e 155
0a7de745 156int sched_enable_smt = 1;
5ba3f43e 157
1c79356b 158void
91447636 159processor_bootstrap(void)
1c79356b 160{
0a7de745 161 lck_grp_init(&pset_lck_grp, "pset", LCK_GRP_ATTR_NULL);
55e303ae 162
2d21ac55 163 simple_lock_init(&pset_node_lock, 0);
55e303ae 164
0a7de745
A
165 pset_node0.psets = &pset0;
166 pset_init(&pset0, &pset_node0);
167
2d21ac55 168 queue_init(&tasks);
6d2010ae 169 queue_init(&terminated_tasks);
2d21ac55 170 queue_init(&threads);
39037602 171 queue_init(&corpse_tasks);
1c79356b 172
2d21ac55 173 simple_lock_init(&processor_list_lock, 0);
1c79356b 174
2d21ac55 175 master_processor = cpu_to_processor(master_cpu);
1c79356b 176
2d21ac55 177 processor_init(master_processor, master_cpu, &pset0);
1c79356b
A
178}
179
180/*
2d21ac55 181 * Initialize the given processor for the cpu
b0d623f7 182 * indicated by cpu_id, and assign to the
2d21ac55 183 * specified processor set.
1c79356b
A
184 */
185void
186processor_init(
0a7de745
A
187 processor_t processor,
188 int cpu_id,
189 processor_set_t pset)
1c79356b 190{
0a7de745 191 spl_t s;
bd504ef0 192
6d2010ae 193 if (processor != master_processor) {
5ba3f43e 194 /* Scheduler state for master_processor initialized in sched_init() */
6d2010ae
A
195 SCHED(processor_init)(processor);
196 }
c910b4d9 197
d9a64523
A
198 assert(cpu_id < MAX_SCHED_CPUS);
199
c910b4d9 200 processor->state = PROCESSOR_OFF_LINE;
cb323159 201 processor->active_thread = processor->startup_thread = processor->idle_thread = THREAD_NULL;
c910b4d9 202 processor->processor_set = pset;
5ba3f43e 203 processor_state_update_idle(processor);
39037602 204 processor->starting_pri = MINPRI;
b0d623f7 205 processor->cpu_id = cpu_id;
c910b4d9 206 timer_call_setup(&processor->quantum_timer, thread_quantum_expire, processor);
fe8ab488 207 processor->quantum_end = UINT64_MAX;
c910b4d9 208 processor->deadline = UINT64_MAX;
3e170ce0 209 processor->first_timeslice = FALSE;
cb323159 210 processor->processor_offlined = false;
fe8ab488
A
211 processor->processor_primary = processor; /* no SMT relationship known at this point */
212 processor->processor_secondary = NULL;
cb323159
A
213 processor->is_SMT = false;
214 processor->is_recommended = true;
c910b4d9 215 processor->processor_self = IP_NULL;
c910b4d9
A
216 processor_data_init(processor);
217 processor->processor_list = NULL;
d9a64523
A
218 processor->cpu_quiesce_state = CPU_QUIESCE_COUNTER_NONE;
219 processor->cpu_quiesce_last_checkin = 0;
0a7de745 220 processor->must_idle = false;
91447636 221
bd504ef0 222 s = splsched();
b7266188 223 pset_lock(pset);
5ba3f43e 224 bit_set(pset->cpu_bitmask, cpu_id);
cb323159
A
225 bit_set(pset->recommended_bitmask, cpu_id);
226 bit_set(pset->primary_map, cpu_id);
227 bit_set(pset->cpu_state_map[PROCESSOR_OFF_LINE], cpu_id);
0a7de745 228 if (pset->cpu_set_count++ == 0) {
b7266188 229 pset->cpu_set_low = pset->cpu_set_hi = cpu_id;
0a7de745 230 } else {
b7266188
A
231 pset->cpu_set_low = (cpu_id < pset->cpu_set_low)? cpu_id: pset->cpu_set_low;
232 pset->cpu_set_hi = (cpu_id > pset->cpu_set_hi)? cpu_id: pset->cpu_set_hi;
233 }
234 pset_unlock(pset);
bd504ef0 235 splx(s);
b7266188 236
0a7de745
A
237 simple_lock(&processor_list_lock, LCK_GRP_NULL);
238 if (processor_list == NULL) {
c910b4d9 239 processor_list = processor;
0a7de745 240 } else {
c910b4d9 241 processor_list_tail->processor_list = processor;
0a7de745 242 }
c910b4d9 243 processor_list_tail = processor;
91447636 244 processor_count++;
5ba3f43e 245 processor_array[cpu_id] = processor;
91447636 246 simple_unlock(&processor_list_lock);
9bccf70c 247}
1c79356b 248
b0d623f7 249void
fe8ab488 250processor_set_primary(
0a7de745
A
251 processor_t processor,
252 processor_t primary)
b0d623f7 253{
fe8ab488
A
254 assert(processor->processor_primary == primary || processor->processor_primary == processor);
255 /* Re-adjust primary point for this (possibly) secondary processor */
256 processor->processor_primary = primary;
257
258 assert(primary->processor_secondary == NULL || primary->processor_secondary == processor);
259 if (primary != processor) {
260 /* Link primary to secondary, assumes a 2-way SMT model
261 * We'll need to move to a queue if any future architecture
262 * requires otherwise.
263 */
264 assert(processor->processor_secondary == NULL);
265 primary->processor_secondary = processor;
266 /* Mark both processors as SMT siblings */
267 primary->is_SMT = TRUE;
268 processor->is_SMT = TRUE;
d9a64523
A
269
270 processor_set_t pset = processor->processor_set;
0a7de745
A
271 spl_t s = splsched();
272 pset_lock(pset);
273 bit_clear(pset->primary_map, processor->cpu_id);
274 pset_unlock(pset);
275 splx(s);
b0d623f7 276 }
b0d623f7
A
277}
278
2d21ac55
A
279processor_set_t
280processor_pset(
0a7de745 281 processor_t processor)
9bccf70c 282{
0a7de745 283 return processor->processor_set;
1c79356b
A
284}
285
5ba3f43e
A
286void
287processor_state_update_idle(processor_t processor)
288{
0a7de745
A
289 processor->current_pri = IDLEPRI;
290 processor->current_sfi_class = SFI_CLASS_KERNEL;
291 processor->current_recommended_pset_type = PSET_SMP;
292 processor->current_perfctl_class = PERFCONTROL_CLASS_IDLE;
293 processor->current_urgency = THREAD_URGENCY_NONE;
294 processor->current_is_NO_SMT = false;
295 processor->current_is_bound = false;
5ba3f43e
A
296}
297
298void
299processor_state_update_from_thread(processor_t processor, thread_t thread)
300{
0a7de745
A
301 processor->current_pri = thread->sched_pri;
302 processor->current_sfi_class = thread->sfi_class;
303 processor->current_recommended_pset_type = recommended_pset_type(thread);
304 processor->current_perfctl_class = thread_get_perfcontrol_class(thread);
305 processor->current_urgency = thread_get_urgency(thread, NULL, NULL);
306#if DEBUG || DEVELOPMENT
307 processor->current_is_NO_SMT = (thread->sched_flags & TH_SFLAG_NO_SMT) || (thread->task->t_flags & TF_NO_SMT);
308#else
309 processor->current_is_NO_SMT = (thread->sched_flags & TH_SFLAG_NO_SMT);
310#endif
311 processor->current_is_bound = thread->bound_processor != PROCESSOR_NULL;
5ba3f43e
A
312}
313
314void
0a7de745
A
315processor_state_update_explicit(processor_t processor, int pri, sfi_class_id_t sfi_class,
316 pset_cluster_type_t pset_type, perfcontrol_class_t perfctl_class, thread_urgency_t urgency)
5ba3f43e 317{
0a7de745
A
318 processor->current_pri = pri;
319 processor->current_sfi_class = sfi_class;
320 processor->current_recommended_pset_type = pset_type;
321 processor->current_perfctl_class = perfctl_class;
322 processor->current_urgency = urgency;
5ba3f43e
A
323}
324
2d21ac55
A
325pset_node_t
326pset_node_root(void)
9bccf70c 327{
2d21ac55 328 return &pset_node0;
9bccf70c
A
329}
330
2d21ac55
A
331processor_set_t
332pset_create(
0a7de745 333 pset_node_t node)
1c79356b 334{
3e170ce0 335 /* some schedulers do not support multiple psets */
0a7de745 336 if (SCHED(multiple_psets_enabled) == FALSE) {
fe8ab488 337 return processor_pset(master_processor);
0a7de745 338 }
fe8ab488 339
0a7de745 340 processor_set_t *prev, pset = kalloc(sizeof(*pset));
1c79356b 341
2d21ac55
A
342 if (pset != PROCESSOR_SET_NULL) {
343 pset_init(pset, node);
1c79356b 344
0a7de745 345 simple_lock(&pset_node_lock, LCK_GRP_NULL);
1c79356b 346
2d21ac55 347 prev = &node->psets;
0a7de745 348 while (*prev != PROCESSOR_SET_NULL) {
2d21ac55 349 prev = &(*prev)->pset_list;
0a7de745 350 }
1c79356b 351
2d21ac55 352 *prev = pset;
1c79356b 353
2d21ac55
A
354 simple_unlock(&pset_node_lock);
355 }
1c79356b 356
0a7de745 357 return pset;
1c79356b
A
358}
359
5ba3f43e
A
360/*
361 * Find processor set in specified node with specified cluster_id.
362 * Returns default_pset if not found.
363 */
364processor_set_t
365pset_find(
366 uint32_t cluster_id,
367 processor_set_t default_pset)
368{
0a7de745 369 simple_lock(&pset_node_lock, LCK_GRP_NULL);
5ba3f43e
A
370 pset_node_t node = &pset_node0;
371 processor_set_t pset = NULL;
372
373 do {
374 pset = node->psets;
375 while (pset != NULL) {
0a7de745 376 if (pset->pset_cluster_id == cluster_id) {
5ba3f43e 377 break;
0a7de745 378 }
5ba3f43e
A
379 pset = pset->pset_list;
380 }
381 } while ((node = node->node_list) != NULL);
382 simple_unlock(&pset_node_lock);
0a7de745 383 if (pset == NULL) {
5ba3f43e 384 return default_pset;
0a7de745
A
385 }
386 return pset;
5ba3f43e
A
387}
388
1c79356b 389/*
2d21ac55 390 * Initialize the given processor_set structure.
1c79356b
A
391 */
392void
2d21ac55 393pset_init(
0a7de745
A
394 processor_set_t pset,
395 pset_node_t node)
1c79356b 396{
6d2010ae 397 if (pset != &pset0) {
5ba3f43e 398 /* Scheduler state for pset0 initialized in sched_init() */
6d2010ae 399 SCHED(pset_init)(pset);
5ba3f43e 400 SCHED(rt_init)(pset);
6d2010ae
A
401 }
402
6d2010ae 403 pset->online_processor_count = 0;
5ba3f43e 404 pset->load_average = 0;
b7266188
A
405 pset->cpu_set_low = pset->cpu_set_hi = 0;
406 pset->cpu_set_count = 0;
d9a64523 407 pset->last_chosen = -1;
5ba3f43e 408 pset->cpu_bitmask = 0;
cb323159
A
409 pset->recommended_bitmask = 0;
410 pset->primary_map = 0;
411 for (uint i = 0; i < PROCESSOR_STATE_LEN; i++) {
d9a64523
A
412 pset->cpu_state_map[i] = 0;
413 }
0a7de745
A
414 pset->pending_AST_URGENT_cpu_mask = 0;
415 pset->pending_AST_PREEMPT_cpu_mask = 0;
3e170ce0
A
416#if defined(CONFIG_SCHED_DEFERRED_AST)
417 pset->pending_deferred_AST_cpu_mask = 0;
418#endif
5ba3f43e 419 pset->pending_spill_cpu_mask = 0;
2d21ac55
A
420 pset_lock_init(pset);
421 pset->pset_self = IP_NULL;
422 pset->pset_name_self = IP_NULL;
423 pset->pset_list = PROCESSOR_SET_NULL;
424 pset->node = node;
5ba3f43e
A
425 pset->pset_cluster_type = PSET_SMP;
426 pset->pset_cluster_id = 0;
0a7de745
A
427
428 simple_lock(&pset_node_lock, LCK_GRP_NULL);
429 node->pset_count++;
430 simple_unlock(&pset_node_lock);
1c79356b
A
431}
432
1c79356b
A
433kern_return_t
434processor_info_count(
0a7de745
A
435 processor_flavor_t flavor,
436 mach_msg_type_number_t *count)
1c79356b 437{
1c79356b
A
438 switch (flavor) {
439 case PROCESSOR_BASIC_INFO:
440 *count = PROCESSOR_BASIC_INFO_COUNT;
91447636
A
441 break;
442
1c79356b
A
443 case PROCESSOR_CPU_LOAD_INFO:
444 *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
91447636
A
445 break;
446
1c79356b 447 default:
0a7de745 448 return cpu_info_count(flavor, count);
1c79356b 449 }
91447636 450
0a7de745 451 return KERN_SUCCESS;
1c79356b
A
452}
453
454
455kern_return_t
456processor_info(
0a7de745
A
457 processor_t processor,
458 processor_flavor_t flavor,
459 host_t *host,
460 processor_info_t info,
461 mach_msg_type_number_t *count)
1c79356b 462{
0a7de745
A
463 int cpu_id, state;
464 kern_return_t result;
1c79356b 465
0a7de745
A
466 if (processor == PROCESSOR_NULL) {
467 return KERN_INVALID_ARGUMENT;
468 }
1c79356b 469
b0d623f7 470 cpu_id = processor->cpu_id;
1c79356b
A
471
472 switch (flavor) {
1c79356b 473 case PROCESSOR_BASIC_INFO:
91447636 474 {
0a7de745 475 processor_basic_info_t basic_info;
91447636 476
0a7de745
A
477 if (*count < PROCESSOR_BASIC_INFO_COUNT) {
478 return KERN_FAILURE;
479 }
91447636
A
480
481 basic_info = (processor_basic_info_t) info;
b0d623f7
A
482 basic_info->cpu_type = slot_type(cpu_id);
483 basic_info->cpu_subtype = slot_subtype(cpu_id);
91447636 484 state = processor->state;
0a7de745
A
485 if (state == PROCESSOR_OFF_LINE
486#if defined(__x86_64__)
487 || !processor->is_recommended
488#endif
489 ) {
91447636 490 basic_info->running = FALSE;
0a7de745 491 } else {
91447636 492 basic_info->running = TRUE;
0a7de745 493 }
b0d623f7 494 basic_info->slot_num = cpu_id;
0a7de745 495 if (processor == master_processor) {
91447636 496 basic_info->is_master = TRUE;
0a7de745 497 } else {
91447636 498 basic_info->is_master = FALSE;
0a7de745 499 }
91447636
A
500
501 *count = PROCESSOR_BASIC_INFO_COUNT;
502 *host = &realhost;
503
0a7de745 504 return KERN_SUCCESS;
91447636
A
505 }
506
1c79356b 507 case PROCESSOR_CPU_LOAD_INFO:
91447636 508 {
0a7de745
A
509 processor_cpu_load_info_t cpu_load_info;
510 timer_t idle_state;
511 uint64_t idle_time_snapshot1, idle_time_snapshot2;
512 uint64_t idle_time_tstamp1, idle_time_tstamp2;
39236c6e
A
513
514 /*
515 * We capture the accumulated idle time twice over
516 * the course of this function, as well as the timestamps
517 * when each were last updated. Since these are
518 * all done using non-atomic racy mechanisms, the
519 * most we can infer is whether values are stable.
520 * timer_grab() is the only function that can be
521 * used reliably on another processor's per-processor
522 * data.
523 */
91447636 524
0a7de745
A
525 if (*count < PROCESSOR_CPU_LOAD_INFO_COUNT) {
526 return KERN_FAILURE;
527 }
1c79356b 528
6d2010ae 529 cpu_load_info = (processor_cpu_load_info_t) info;
316670eb
A
530 if (precise_user_kernel_time) {
531 cpu_load_info->cpu_ticks[CPU_STATE_USER] =
0a7de745 532 (uint32_t)(timer_grab(&PROCESSOR_DATA(processor, user_state)) / hz_tick_interval);
316670eb 533 cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] =
0a7de745 534 (uint32_t)(timer_grab(&PROCESSOR_DATA(processor, system_state)) / hz_tick_interval);
316670eb
A
535 } else {
536 uint64_t tval = timer_grab(&PROCESSOR_DATA(processor, user_state)) +
0a7de745 537 timer_grab(&PROCESSOR_DATA(processor, system_state));
316670eb
A
538
539 cpu_load_info->cpu_ticks[CPU_STATE_USER] = (uint32_t)(tval / hz_tick_interval);
540 cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] = 0;
541 }
6d2010ae
A
542
543 idle_state = &PROCESSOR_DATA(processor, idle_state);
39236c6e
A
544 idle_time_snapshot1 = timer_grab(idle_state);
545 idle_time_tstamp1 = idle_state->tstamp;
546
547 /*
548 * Idle processors are not continually updating their
549 * per-processor idle timer, so it may be extremely
550 * out of date, resulting in an over-representation
551 * of non-idle time between two measurement
552 * intervals by e.g. top(1). If we are non-idle, or
553 * have evidence that the timer is being updated
554 * concurrently, we consider its value up-to-date.
555 */
556 if (PROCESSOR_DATA(processor, current_state) != idle_state) {
557 cpu_load_info->cpu_ticks[CPU_STATE_IDLE] =
0a7de745 558 (uint32_t)(idle_time_snapshot1 / hz_tick_interval);
39236c6e 559 } else if ((idle_time_snapshot1 != (idle_time_snapshot2 = timer_grab(idle_state))) ||
0a7de745 560 (idle_time_tstamp1 != (idle_time_tstamp2 = idle_state->tstamp))) {
39236c6e 561 /* Idle timer is being updated concurrently, second stamp is good enough */
6d2010ae 562 cpu_load_info->cpu_ticks[CPU_STATE_IDLE] =
0a7de745 563 (uint32_t)(idle_time_snapshot2 / hz_tick_interval);
316670eb 564 } else {
39236c6e
A
565 /*
566 * Idle timer may be very stale. Fortunately we have established
567 * that idle_time_snapshot1 and idle_time_tstamp1 are unchanging
568 */
569 idle_time_snapshot1 += mach_absolute_time() - idle_time_tstamp1;
0a7de745 570
6d2010ae 571 cpu_load_info->cpu_ticks[CPU_STATE_IDLE] =
0a7de745 572 (uint32_t)(idle_time_snapshot1 / hz_tick_interval);
6d2010ae 573 }
316670eb 574
2d21ac55 575 cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0;
1c79356b 576
0a7de745
A
577 *count = PROCESSOR_CPU_LOAD_INFO_COUNT;
578 *host = &realhost;
91447636 579
0a7de745 580 return KERN_SUCCESS;
91447636
A
581 }
582
1c79356b 583 default:
0a7de745
A
584 result = cpu_info(flavor, cpu_id, info, count);
585 if (result == KERN_SUCCESS) {
586 *host = &realhost;
587 }
91447636 588
0a7de745 589 return result;
1c79356b
A
590 }
591}
592
593kern_return_t
594processor_start(
0a7de745 595 processor_t processor)
1c79356b 596{
0a7de745
A
597 processor_set_t pset;
598 thread_t thread;
599 kern_return_t result;
600 spl_t s;
1c79356b 601
0a7de745
A
602 if (processor == PROCESSOR_NULL || processor->processor_set == PROCESSOR_SET_NULL) {
603 return KERN_INVALID_ARGUMENT;
604 }
1c79356b 605
0b4e3aa0 606 if (processor == master_processor) {
0a7de745 607 processor_t prev;
55e303ae 608
2d21ac55 609 prev = thread_bind(processor);
9bccf70c 610 thread_block(THREAD_CONTINUE_NULL);
0b4e3aa0 611
b0d623f7 612 result = cpu_start(processor->cpu_id);
55e303ae 613
2d21ac55 614 thread_bind(prev);
55e303ae 615
0a7de745
A
616 return result;
617 }
618
619 bool scheduler_disable = false;
620
621 if ((processor->processor_primary != processor) && (sched_enable_smt == 0)) {
622 if (cpu_can_exit(processor->cpu_id)) {
623 return KERN_SUCCESS;
624 }
625 /*
626 * This secondary SMT processor must start in order to service interrupts,
627 * so instead it will be disabled at the scheduler level.
628 */
629 scheduler_disable = true;
0b4e3aa0 630 }
1c79356b
A
631
632 s = splsched();
2d21ac55
A
633 pset = processor->processor_set;
634 pset_lock(pset);
55e303ae 635 if (processor->state != PROCESSOR_OFF_LINE) {
2d21ac55 636 pset_unlock(pset);
1c79356b 637 splx(s);
55e303ae 638
0a7de745 639 return KERN_FAILURE;
1c79356b 640 }
55e303ae 641
d9a64523 642 pset_update_processor_state(pset, processor, PROCESSOR_START);
2d21ac55 643 pset_unlock(pset);
1c79356b
A
644 splx(s);
645
91447636
A
646 /*
647 * Create the idle processor thread.
648 */
649 if (processor->idle_thread == THREAD_NULL) {
650 result = idle_thread_create(processor);
651 if (result != KERN_SUCCESS) {
652 s = splsched();
2d21ac55 653 pset_lock(pset);
d9a64523 654 pset_update_processor_state(pset, processor, PROCESSOR_OFF_LINE);
2d21ac55 655 pset_unlock(pset);
91447636
A
656 splx(s);
657
0a7de745 658 return result;
91447636
A
659 }
660 }
661
662 /*
663 * If there is no active thread, the processor
664 * has never been started. Create a dedicated
665 * start up thread.
666 */
0a7de745 667 if (processor->active_thread == THREAD_NULL &&
cb323159
A
668 processor->startup_thread == THREAD_NULL) {
669 result = kernel_thread_create(processor_start_thread, NULL, MAXPRI_KERNEL, &thread);
91447636
A
670 if (result != KERN_SUCCESS) {
671 s = splsched();
2d21ac55 672 pset_lock(pset);
d9a64523 673 pset_update_processor_state(pset, processor, PROCESSOR_OFF_LINE);
2d21ac55 674 pset_unlock(pset);
91447636
A
675 splx(s);
676
0a7de745 677 return result;
91447636 678 }
1c79356b
A
679
680 s = splsched();
681 thread_lock(thread);
55e303ae 682 thread->bound_processor = processor;
cb323159 683 processor->startup_thread = thread;
55e303ae 684 thread->state = TH_RUN;
3e170ce0 685 thread->last_made_runnable_time = mach_absolute_time();
1c79356b
A
686 thread_unlock(thread);
687 splx(s);
91447636
A
688
689 thread_deallocate(thread);
1c79356b
A
690 }
691
0a7de745 692 if (processor->processor_self == IP_NULL) {
55e303ae 693 ipc_processor_init(processor);
0a7de745 694 }
1c79356b 695
b0d623f7 696 result = cpu_start(processor->cpu_id);
55e303ae 697 if (result != KERN_SUCCESS) {
1c79356b 698 s = splsched();
2d21ac55 699 pset_lock(pset);
d9a64523 700 pset_update_processor_state(pset, processor, PROCESSOR_OFF_LINE);
2d21ac55 701 pset_unlock(pset);
1c79356b 702 splx(s);
55e303ae 703
0a7de745
A
704 return result;
705 }
706 if (scheduler_disable) {
707 assert(processor->processor_primary != processor);
708 sched_processor_enable(processor, FALSE);
1c79356b
A
709 }
710
55e303ae
A
711 ipc_processor_enable(processor);
712
0a7de745 713 return KERN_SUCCESS;
1c79356b
A
714}
715
0a7de745 716
1c79356b
A
717kern_return_t
718processor_exit(
0a7de745
A
719 processor_t processor)
720{
721 if (processor == PROCESSOR_NULL) {
722 return KERN_INVALID_ARGUMENT;
723 }
724
725 return processor_shutdown(processor);
726}
727
728
729kern_return_t
730processor_start_from_user(
731 processor_t processor)
732{
733 kern_return_t ret;
734
735 if (processor == PROCESSOR_NULL) {
736 return KERN_INVALID_ARGUMENT;
737 }
738
739 if (!cpu_can_exit(processor->cpu_id)) {
740 ret = sched_processor_enable(processor, TRUE);
741 } else {
742 ret = processor_start(processor);
743 }
744
745 return ret;
746}
747
748kern_return_t
749processor_exit_from_user(
750 processor_t processor)
751{
752 kern_return_t ret;
753
754 if (processor == PROCESSOR_NULL) {
755 return KERN_INVALID_ARGUMENT;
756 }
757
758 if (!cpu_can_exit(processor->cpu_id)) {
759 ret = sched_processor_enable(processor, FALSE);
760 } else {
761 ret = processor_shutdown(processor);
762 }
763
764 return ret;
765}
766
767kern_return_t
768enable_smt_processors(bool enable)
1c79356b 769{
0a7de745
A
770 if (machine_info.logical_cpu_max == machine_info.physical_cpu_max) {
771 /* Not an SMT system */
772 return KERN_INVALID_ARGUMENT;
773 }
774
775 int ncpus = machine_info.logical_cpu_max;
776
777 for (int i = 1; i < ncpus; i++) {
778 processor_t processor = processor_array[i];
779
780 if (processor->processor_primary != processor) {
781 if (enable) {
782 processor_start_from_user(processor);
783 } else { /* Disable */
784 processor_exit_from_user(processor);
785 }
786 }
787 }
788
789#define BSD_HOST 1
790 host_basic_info_data_t hinfo;
791 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
792 kern_return_t kret = host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
793 if (kret != KERN_SUCCESS) {
794 return kret;
795 }
796
797 if (enable && (hinfo.logical_cpu != hinfo.logical_cpu_max)) {
798 return KERN_FAILURE;
799 }
800
801 if (!enable && (hinfo.logical_cpu != hinfo.physical_cpu)) {
802 return KERN_FAILURE;
803 }
1c79356b 804
0a7de745 805 return KERN_SUCCESS;
1c79356b
A
806}
807
808kern_return_t
809processor_control(
0a7de745
A
810 processor_t processor,
811 processor_info_t info,
812 mach_msg_type_number_t count)
1c79356b 813{
0a7de745
A
814 if (processor == PROCESSOR_NULL) {
815 return KERN_INVALID_ARGUMENT;
816 }
1c79356b 817
0a7de745 818 return cpu_control(processor->cpu_id, info, count);
1c79356b 819}
0a7de745 820
1c79356b
A
821kern_return_t
822processor_set_create(
0a7de745
A
823 __unused host_t host,
824 __unused processor_set_t *new_set,
825 __unused processor_set_t *new_name)
1c79356b 826{
0a7de745 827 return KERN_FAILURE;
1c79356b
A
828}
829
830kern_return_t
831processor_set_destroy(
0a7de745 832 __unused processor_set_t pset)
1c79356b 833{
0a7de745 834 return KERN_FAILURE;
1c79356b
A
835}
836
837kern_return_t
838processor_get_assignment(
0a7de745
A
839 processor_t processor,
840 processor_set_t *pset)
1c79356b 841{
2d21ac55 842 int state;
1c79356b 843
0a7de745
A
844 if (processor == PROCESSOR_NULL) {
845 return KERN_INVALID_ARGUMENT;
846 }
316670eb 847
1c79356b 848 state = processor->state;
0a7de745
A
849 if (state == PROCESSOR_SHUTDOWN || state == PROCESSOR_OFF_LINE) {
850 return KERN_FAILURE;
851 }
1c79356b 852
2d21ac55
A
853 *pset = &pset0;
854
0a7de745 855 return KERN_SUCCESS;
1c79356b
A
856}
857
858kern_return_t
859processor_set_info(
0a7de745
A
860 processor_set_t pset,
861 int flavor,
862 host_t *host,
863 processor_set_info_t info,
864 mach_msg_type_number_t *count)
1c79356b 865{
0a7de745
A
866 if (pset == PROCESSOR_SET_NULL) {
867 return KERN_INVALID_ARGUMENT;
868 }
1c79356b
A
869
870 if (flavor == PROCESSOR_SET_BASIC_INFO) {
0a7de745 871 processor_set_basic_info_t basic_info;
1c79356b 872
0a7de745
A
873 if (*count < PROCESSOR_SET_BASIC_INFO_COUNT) {
874 return KERN_FAILURE;
875 }
1c79356b
A
876
877 basic_info = (processor_set_basic_info_t) info;
0a7de745
A
878#if defined(__x86_64__)
879 basic_info->processor_count = processor_avail_count_user;
880#else
2d21ac55 881 basic_info->processor_count = processor_avail_count;
0a7de745 882#endif
0b4e3aa0 883 basic_info->default_policy = POLICY_TIMESHARE;
1c79356b
A
884
885 *count = PROCESSOR_SET_BASIC_INFO_COUNT;
886 *host = &realhost;
0a7de745
A
887 return KERN_SUCCESS;
888 } else if (flavor == PROCESSOR_SET_TIMESHARE_DEFAULT) {
889 policy_timeshare_base_t ts_base;
1c79356b 890
0a7de745
A
891 if (*count < POLICY_TIMESHARE_BASE_COUNT) {
892 return KERN_FAILURE;
893 }
1c79356b
A
894
895 ts_base = (policy_timeshare_base_t) info;
0b4e3aa0 896 ts_base->base_priority = BASEPRI_DEFAULT;
1c79356b
A
897
898 *count = POLICY_TIMESHARE_BASE_COUNT;
899 *host = &realhost;
0a7de745
A
900 return KERN_SUCCESS;
901 } else if (flavor == PROCESSOR_SET_FIFO_DEFAULT) {
902 policy_fifo_base_t fifo_base;
1c79356b 903
0a7de745
A
904 if (*count < POLICY_FIFO_BASE_COUNT) {
905 return KERN_FAILURE;
906 }
1c79356b
A
907
908 fifo_base = (policy_fifo_base_t) info;
0b4e3aa0 909 fifo_base->base_priority = BASEPRI_DEFAULT;
1c79356b
A
910
911 *count = POLICY_FIFO_BASE_COUNT;
912 *host = &realhost;
0a7de745
A
913 return KERN_SUCCESS;
914 } else if (flavor == PROCESSOR_SET_RR_DEFAULT) {
915 policy_rr_base_t rr_base;
1c79356b 916
0a7de745
A
917 if (*count < POLICY_RR_BASE_COUNT) {
918 return KERN_FAILURE;
919 }
1c79356b
A
920
921 rr_base = (policy_rr_base_t) info;
0b4e3aa0
A
922 rr_base->base_priority = BASEPRI_DEFAULT;
923 rr_base->quantum = 1;
1c79356b
A
924
925 *count = POLICY_RR_BASE_COUNT;
926 *host = &realhost;
0a7de745
A
927 return KERN_SUCCESS;
928 } else if (flavor == PROCESSOR_SET_TIMESHARE_LIMITS) {
929 policy_timeshare_limit_t ts_limit;
1c79356b 930
0a7de745
A
931 if (*count < POLICY_TIMESHARE_LIMIT_COUNT) {
932 return KERN_FAILURE;
933 }
1c79356b
A
934
935 ts_limit = (policy_timeshare_limit_t) info;
91447636 936 ts_limit->max_priority = MAXPRI_KERNEL;
1c79356b
A
937
938 *count = POLICY_TIMESHARE_LIMIT_COUNT;
939 *host = &realhost;
0a7de745
A
940 return KERN_SUCCESS;
941 } else if (flavor == PROCESSOR_SET_FIFO_LIMITS) {
942 policy_fifo_limit_t fifo_limit;
1c79356b 943
0a7de745
A
944 if (*count < POLICY_FIFO_LIMIT_COUNT) {
945 return KERN_FAILURE;
946 }
1c79356b
A
947
948 fifo_limit = (policy_fifo_limit_t) info;
91447636 949 fifo_limit->max_priority = MAXPRI_KERNEL;
1c79356b
A
950
951 *count = POLICY_FIFO_LIMIT_COUNT;
952 *host = &realhost;
0a7de745
A
953 return KERN_SUCCESS;
954 } else if (flavor == PROCESSOR_SET_RR_LIMITS) {
955 policy_rr_limit_t rr_limit;
1c79356b 956
0a7de745
A
957 if (*count < POLICY_RR_LIMIT_COUNT) {
958 return KERN_FAILURE;
959 }
1c79356b
A
960
961 rr_limit = (policy_rr_limit_t) info;
91447636 962 rr_limit->max_priority = MAXPRI_KERNEL;
1c79356b
A
963
964 *count = POLICY_RR_LIMIT_COUNT;
965 *host = &realhost;
0a7de745
A
966 return KERN_SUCCESS;
967 } else if (flavor == PROCESSOR_SET_ENABLED_POLICIES) {
968 int *enabled;
1c79356b 969
0a7de745
A
970 if (*count < (sizeof(*enabled) / sizeof(int))) {
971 return KERN_FAILURE;
972 }
1c79356b
A
973
974 enabled = (int *) info;
0b4e3aa0 975 *enabled = POLICY_TIMESHARE | POLICY_RR | POLICY_FIFO;
1c79356b 976
0a7de745 977 *count = sizeof(*enabled) / sizeof(int);
1c79356b 978 *host = &realhost;
0a7de745 979 return KERN_SUCCESS;
1c79356b
A
980 }
981
982
983 *host = HOST_NULL;
0a7de745 984 return KERN_INVALID_ARGUMENT;
1c79356b
A
985}
986
987/*
988 * processor_set_statistics
989 *
0a7de745 990 * Returns scheduling statistics for a processor set.
1c79356b 991 */
0a7de745 992kern_return_t
1c79356b
A
993processor_set_statistics(
994 processor_set_t pset,
995 int flavor,
996 processor_set_info_t info,
0a7de745 997 mach_msg_type_number_t *count)
1c79356b 998{
0a7de745
A
999 if (pset == PROCESSOR_SET_NULL || pset != &pset0) {
1000 return KERN_INVALID_PROCESSOR_SET;
1001 }
2d21ac55
A
1002
1003 if (flavor == PROCESSOR_SET_LOAD_INFO) {
39037602 1004 processor_set_load_info_t load_info;
1c79356b 1005
0a7de745
A
1006 if (*count < PROCESSOR_SET_LOAD_INFO_COUNT) {
1007 return KERN_FAILURE;
1008 }
1c79356b 1009
2d21ac55 1010 load_info = (processor_set_load_info_t) info;
1c79356b 1011
2d21ac55
A
1012 load_info->mach_factor = sched_mach_factor;
1013 load_info->load_average = sched_load_average;
1c79356b 1014
2d21ac55
A
1015 load_info->task_count = tasks_count;
1016 load_info->thread_count = threads_count;
1c79356b 1017
2d21ac55 1018 *count = PROCESSOR_SET_LOAD_INFO_COUNT;
0a7de745 1019 return KERN_SUCCESS;
2d21ac55 1020 }
1c79356b 1021
0a7de745 1022 return KERN_INVALID_ARGUMENT;
1c79356b
A
1023}
1024
1025/*
1026 * processor_set_max_priority:
1027 *
1028 * Specify max priority permitted on processor set. This affects
1029 * newly created and assigned threads. Optionally change existing
0a7de745 1030 * ones.
1c79356b
A
1031 */
1032kern_return_t
1033processor_set_max_priority(
0a7de745
A
1034 __unused processor_set_t pset,
1035 __unused int max_priority,
1036 __unused boolean_t change_threads)
1c79356b 1037{
0a7de745 1038 return KERN_INVALID_ARGUMENT;
1c79356b
A
1039}
1040
1041/*
1042 * processor_set_policy_enable:
1043 *
1044 * Allow indicated policy on processor set.
1045 */
1046
1047kern_return_t
1048processor_set_policy_enable(
0a7de745
A
1049 __unused processor_set_t pset,
1050 __unused int policy)
1c79356b 1051{
0a7de745 1052 return KERN_INVALID_ARGUMENT;
1c79356b
A
1053}
1054
1055/*
1056 * processor_set_policy_disable:
1057 *
1058 * Forbid indicated policy on processor set. Time sharing cannot
1059 * be forbidden.
1060 */
1061kern_return_t
1062processor_set_policy_disable(
0a7de745
A
1063 __unused processor_set_t pset,
1064 __unused int policy,
1065 __unused boolean_t change_threads)
1c79356b 1066{
0a7de745 1067 return KERN_INVALID_ARGUMENT;
1c79356b
A
1068}
1069
1c79356b
A
1070/*
1071 * processor_set_things:
1072 *
1073 * Common internals for processor_set_{threads,tasks}
1074 */
1075kern_return_t
1076processor_set_things(
0a7de745 1077 processor_set_t pset,
3e170ce0
A
1078 void **thing_list,
1079 mach_msg_type_number_t *count,
1080 int type)
1c79356b 1081{
39037602 1082 unsigned int i;
3e170ce0
A
1083 task_t task;
1084 thread_t thread;
1085
1086 task_t *task_list;
1087 unsigned int actual_tasks;
1088 vm_size_t task_size, task_size_needed;
1c79356b 1089
3e170ce0
A
1090 thread_t *thread_list;
1091 unsigned int actual_threads;
1092 vm_size_t thread_size, thread_size_needed;
1093
1094 void *addr, *newaddr;
1c79356b 1095 vm_size_t size, size_needed;
1c79356b 1096
0a7de745
A
1097 if (pset == PROCESSOR_SET_NULL || pset != &pset0) {
1098 return KERN_INVALID_ARGUMENT;
1099 }
1c79356b 1100
3e170ce0
A
1101 task_size = 0;
1102 task_size_needed = 0;
1103 task_list = NULL;
1104 actual_tasks = 0;
1105
1106 thread_size = 0;
1107 thread_size_needed = 0;
1108 thread_list = NULL;
1109 actual_threads = 0;
1c79356b
A
1110
1111 for (;;) {
b0d623f7 1112 lck_mtx_lock(&tasks_threads_lock);
1c79356b 1113
1c79356b 1114 /* do we have the memory we need? */
0a7de745 1115 if (type == PSET_THING_THREAD) {
3e170ce0 1116 thread_size_needed = threads_count * sizeof(void *);
0a7de745 1117 }
3e170ce0
A
1118#if !CONFIG_MACF
1119 else
1120#endif
0a7de745 1121 task_size_needed = tasks_count * sizeof(void *);
1c79356b 1122
3e170ce0 1123 if (task_size_needed <= task_size &&
0a7de745 1124 thread_size_needed <= thread_size) {
1c79356b 1125 break;
0a7de745 1126 }
1c79356b 1127
2d21ac55 1128 /* unlock and allocate more memory */
b0d623f7 1129 lck_mtx_unlock(&tasks_threads_lock);
1c79356b 1130
3e170ce0
A
1131 /* grow task array */
1132 if (task_size_needed > task_size) {
0a7de745 1133 if (task_size != 0) {
3e170ce0 1134 kfree(task_list, task_size);
0a7de745 1135 }
1c79356b 1136
3e170ce0
A
1137 assert(task_size_needed > 0);
1138 task_size = task_size_needed;
1c79356b 1139
3e170ce0
A
1140 task_list = (task_t *)kalloc(task_size);
1141 if (task_list == NULL) {
0a7de745 1142 if (thread_size != 0) {
3e170ce0 1143 kfree(thread_list, thread_size);
0a7de745
A
1144 }
1145 return KERN_RESOURCE_SHORTAGE;
3e170ce0
A
1146 }
1147 }
1c79356b 1148
3e170ce0
A
1149 /* grow thread array */
1150 if (thread_size_needed > thread_size) {
0a7de745 1151 if (thread_size != 0) {
3e170ce0 1152 kfree(thread_list, thread_size);
0a7de745 1153 }
1c79356b 1154
3e170ce0
A
1155 assert(thread_size_needed > 0);
1156 thread_size = thread_size_needed;
1157
1158 thread_list = (thread_t *)kalloc(thread_size);
1159 if (thread_list == 0) {
0a7de745 1160 if (task_size != 0) {
3e170ce0 1161 kfree(task_list, task_size);
0a7de745
A
1162 }
1163 return KERN_RESOURCE_SHORTAGE;
3e170ce0
A
1164 }
1165 }
1166 }
91447636 1167
3e170ce0 1168 /* OK, have memory and the list locked */
2d21ac55 1169
3e170ce0
A
1170 /* If we need it, get the thread list */
1171 if (type == PSET_THING_THREAD) {
1172 for (thread = (thread_t)queue_first(&threads);
0a7de745
A
1173 !queue_end(&threads, (queue_entry_t)thread);
1174 thread = (thread_t)queue_next(&thread->threads)) {
3e170ce0
A
1175#if defined(SECURE_KERNEL)
1176 if (thread->task != kernel_task) {
1177#endif
0a7de745
A
1178 thread_reference_internal(thread);
1179 thread_list[actual_threads++] = thread;
3e170ce0 1180#if defined(SECURE_KERNEL)
0a7de745 1181 }
3e170ce0
A
1182#endif
1183 }
1184 }
1185#if !CONFIG_MACF
0a7de745 1186 else {
3e170ce0 1187#endif
0a7de745
A
1188 /* get a list of the tasks */
1189 for (task = (task_t)queue_first(&tasks);
1190 !queue_end(&tasks, (queue_entry_t)task);
1191 task = (task_t)queue_next(&task->tasks)) {
2d21ac55 1192#if defined(SECURE_KERNEL)
0a7de745 1193 if (task != kernel_task) {
2d21ac55 1194#endif
0a7de745
A
1195 task_reference_internal(task);
1196 task_list[actual_tasks++] = task;
2d21ac55 1197#if defined(SECURE_KERNEL)
0a7de745 1198 }
2d21ac55 1199#endif
91447636 1200 }
0a7de745
A
1201#if !CONFIG_MACF
1202}
3e170ce0 1203#endif
2d21ac55 1204
b0d623f7 1205 lck_mtx_unlock(&tasks_threads_lock);
1c79356b 1206
3e170ce0 1207#if CONFIG_MACF
39037602
A
1208 unsigned int j, used;
1209
3e170ce0
A
1210 /* for each task, make sure we are allowed to examine it */
1211 for (i = used = 0; i < actual_tasks; i++) {
1212 if (mac_task_check_expose_task(task_list[i])) {
1213 task_deallocate(task_list[i]);
1214 continue;
1215 }
1216 task_list[used++] = task_list[i];
91447636 1217 }
3e170ce0
A
1218 actual_tasks = used;
1219 task_size_needed = actual_tasks * sizeof(void *);
91447636 1220
3e170ce0 1221 if (type == PSET_THING_THREAD) {
3e170ce0
A
1222 /* for each thread (if any), make sure it's task is in the allowed list */
1223 for (i = used = 0; i < actual_threads; i++) {
1224 boolean_t found_task = FALSE;
1c79356b 1225
3e170ce0
A
1226 task = thread_list[i]->task;
1227 for (j = 0; j < actual_tasks; j++) {
1228 if (task_list[j] == task) {
1229 found_task = TRUE;
1c79356b 1230 break;
1c79356b 1231 }
1c79356b 1232 }
0a7de745 1233 if (found_task) {
3e170ce0 1234 thread_list[used++] = thread_list[i];
0a7de745 1235 } else {
3e170ce0 1236 thread_deallocate(thread_list[i]);
0a7de745 1237 }
1c79356b 1238 }
3e170ce0
A
1239 actual_threads = used;
1240 thread_size_needed = actual_threads * sizeof(void *);
1241
1242 /* done with the task list */
0a7de745 1243 for (i = 0; i < actual_tasks; i++) {
3e170ce0 1244 task_deallocate(task_list[i]);
0a7de745 1245 }
3e170ce0
A
1246 kfree(task_list, task_size);
1247 task_size = 0;
1248 actual_tasks = 0;
1249 task_list = NULL;
1250 }
1251#endif
1c79356b 1252
3e170ce0
A
1253 if (type == PSET_THING_THREAD) {
1254 if (actual_threads == 0) {
1255 /* no threads available to return */
1256 assert(task_size == 0);
0a7de745 1257 if (thread_size != 0) {
3e170ce0 1258 kfree(thread_list, thread_size);
0a7de745 1259 }
3e170ce0
A
1260 *thing_list = NULL;
1261 *count = 0;
1262 return KERN_SUCCESS;
91447636 1263 }
3e170ce0
A
1264 size_needed = actual_threads * sizeof(void *);
1265 size = thread_size;
1266 addr = thread_list;
1267 } else {
1268 if (actual_tasks == 0) {
1269 /* no tasks available to return */
1270 assert(thread_size == 0);
0a7de745 1271 if (task_size != 0) {
3e170ce0 1272 kfree(task_list, task_size);
0a7de745 1273 }
3e170ce0
A
1274 *thing_list = NULL;
1275 *count = 0;
1276 return KERN_SUCCESS;
0a7de745 1277 }
3e170ce0
A
1278 size_needed = actual_tasks * sizeof(void *);
1279 size = task_size;
1280 addr = task_list;
1281 }
1c79356b 1282
3e170ce0
A
1283 /* if we allocated too much, must copy */
1284 if (size_needed < size) {
1285 newaddr = kalloc(size_needed);
1286 if (newaddr == 0) {
1287 for (i = 0; i < actual_tasks; i++) {
0a7de745 1288 if (type == PSET_THING_THREAD) {
3e170ce0 1289 thread_deallocate(thread_list[i]);
0a7de745 1290 } else {
3e170ce0 1291 task_deallocate(task_list[i]);
0a7de745 1292 }
3e170ce0 1293 }
0a7de745 1294 if (size) {
3e170ce0 1295 kfree(addr, size);
0a7de745
A
1296 }
1297 return KERN_RESOURCE_SHORTAGE;
91447636 1298 }
2d21ac55 1299
3e170ce0
A
1300 bcopy((void *) addr, (void *) newaddr, size_needed);
1301 kfree(addr, size);
1302
1303 addr = newaddr;
1304 size = size_needed;
1c79356b
A
1305 }
1306
3e170ce0
A
1307 *thing_list = (void **)addr;
1308 *count = (unsigned int)size / sizeof(void *);
1309
0a7de745 1310 return KERN_SUCCESS;
1c79356b
A
1311}
1312
1313
1314/*
1315 * processor_set_tasks:
1316 *
1317 * List all tasks in the processor set.
1318 */
1319kern_return_t
1320processor_set_tasks(
0a7de745
A
1321 processor_set_t pset,
1322 task_array_t *task_list,
1323 mach_msg_type_number_t *count)
1c79356b 1324{
3e170ce0
A
1325 kern_return_t ret;
1326 mach_msg_type_number_t i;
1327
1328 ret = processor_set_things(pset, (void **)task_list, count, PSET_THING_TASK);
0a7de745 1329 if (ret != KERN_SUCCESS) {
3e170ce0 1330 return ret;
0a7de745 1331 }
3e170ce0
A
1332
1333 /* do the conversion that Mig should handle */
0a7de745 1334 for (i = 0; i < *count; i++) {
3e170ce0 1335 (*task_list)[i] = (task_t)convert_task_to_port((*task_list)[i]);
0a7de745 1336 }
3e170ce0 1337 return KERN_SUCCESS;
1c79356b
A
1338}
1339
1340/*
1341 * processor_set_threads:
1342 *
1343 * List all threads in the processor set.
1344 */
2d21ac55
A
1345#if defined(SECURE_KERNEL)
1346kern_return_t
1347processor_set_threads(
0a7de745
A
1348 __unused processor_set_t pset,
1349 __unused thread_array_t *thread_list,
1350 __unused mach_msg_type_number_t *count)
2d21ac55 1351{
0a7de745 1352 return KERN_FAILURE;
2d21ac55 1353}
5ba3f43e
A
1354#elif defined(CONFIG_EMBEDDED)
1355kern_return_t
1356processor_set_threads(
0a7de745
A
1357 __unused processor_set_t pset,
1358 __unused thread_array_t *thread_list,
1359 __unused mach_msg_type_number_t *count)
5ba3f43e 1360{
0a7de745 1361 return KERN_NOT_SUPPORTED;
5ba3f43e 1362}
2d21ac55 1363#else
1c79356b
A
1364kern_return_t
1365processor_set_threads(
0a7de745
A
1366 processor_set_t pset,
1367 thread_array_t *thread_list,
1368 mach_msg_type_number_t *count)
1c79356b 1369{
3e170ce0
A
1370 kern_return_t ret;
1371 mach_msg_type_number_t i;
1372
1373 ret = processor_set_things(pset, (void **)thread_list, count, PSET_THING_THREAD);
0a7de745 1374 if (ret != KERN_SUCCESS) {
3e170ce0 1375 return ret;
0a7de745 1376 }
3e170ce0
A
1377
1378 /* do the conversion that Mig should handle */
0a7de745 1379 for (i = 0; i < *count; i++) {
3e170ce0 1380 (*thread_list)[i] = (thread_t)convert_thread_to_port((*thread_list)[i]);
0a7de745 1381 }
3e170ce0 1382 return KERN_SUCCESS;
1c79356b 1383}
2d21ac55 1384#endif
1c79356b
A
1385
1386/*
1387 * processor_set_policy_control
1388 *
1389 * Controls the scheduling attributes governing the processor set.
1390 * Allows control of enabled policies, and per-policy base and limit
1391 * priorities.
1392 */
1393kern_return_t
1394processor_set_policy_control(
0a7de745
A
1395 __unused processor_set_t pset,
1396 __unused int flavor,
1397 __unused processor_set_info_t policy_info,
1398 __unused mach_msg_type_number_t count,
1399 __unused boolean_t change)
1c79356b 1400{
0a7de745 1401 return KERN_INVALID_ARGUMENT;
1c79356b 1402}
2d21ac55
A
1403
1404#undef pset_deallocate
1405void pset_deallocate(processor_set_t pset);
1406void
1407pset_deallocate(
0a7de745 1408 __unused processor_set_t pset)
2d21ac55
A
1409{
1410 return;
1411}
1412
1413#undef pset_reference
1414void pset_reference(processor_set_t pset);
1415void
1416pset_reference(
0a7de745 1417 __unused processor_set_t pset)
2d21ac55
A
1418{
1419 return;
1420}
5ba3f43e 1421
cb323159
A
1422
1423#if CONFIG_SCHED_CLUTCH
1424
1425/*
1426 * The clutch scheduler decides the recommendation of a thread based
1427 * on its thread group's properties and recommendations. The only thread
1428 * level property it looks at is the bucket for the thread to implement
1429 * the policy of not running Utility & BG buckets on the P-cores. Any
1430 * other policy being added to this routine might need to be reflected
1431 * in places such as sched_clutch_hierarchy_thread_pset() &
1432 * sched_clutch_migrate_thread_group() which rely on getting the recommendations
1433 * right.
1434 *
1435 * Note: The current implementation does not support TH_SFLAG_ECORE_ONLY &
1436 * TH_SFLAG_PCORE_ONLY flags which are used for debugging utilities. A similar
1437 * version of that functionality can be implemented by putting these flags
1438 * on a thread group instead of individual thread basis.
1439 *
1440 */
5ba3f43e
A
1441pset_cluster_type_t
1442recommended_pset_type(thread_t thread)
1443{
1444 (void)thread;
1445 return PSET_SMP;
1446}
cb323159
A
1447
1448#else /* CONFIG_SCHED_CLUTCH */
1449
1450pset_cluster_type_t
1451recommended_pset_type(thread_t thread)
1452{
1453 (void)thread;
1454 return PSET_SMP;
1455}
1456
1457#endif /* CONFIG_SCHED_CLUTCH */