2  * Copyright (c) 2000-2019 Apple Inc. All rights reserved. 
   4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 
   6  * This file contains Original Code and/or Modifications of Original Code 
   7  * as defined in and that are subject to the Apple Public Source License 
   8  * Version 2.0 (the 'License'). You may not use this file except in 
   9  * compliance with the License. The rights granted to you under the License 
  10  * may not be used to create, or enable the creation or redistribution of, 
  11  * unlawful or unlicensed copies of an Apple operating system, or to 
  12  * circumvent, violate, or enable the circumvention or violation of, any 
  13  * terms of an Apple operating system software license agreement. 
  15  * Please obtain a copy of the License at 
  16  * http://www.opensource.apple.com/apsl/ and read it before using this file. 
  18  * The Original Code and all software distributed under the License are 
  19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
  20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
  21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
  22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
  23  * Please see the License for the specific language governing rights and 
  24  * limitations under the License. 
  26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 
  32  * Mach Operating System 
  33  * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University 
  34  * All Rights Reserved. 
  36  * Permission to use, copy, modify and distribute this software and its 
  37  * documentation is hereby granted, provided that both the copyright 
  38  * notice and this permission notice appear in all copies of the 
  39  * software, derivative works or modified versions, and any portions 
  40  * thereof, and that both notices appear in supporting documentation. 
  42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 
  43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR 
  44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 
  46  * Carnegie Mellon requests users of this software to return to 
  48  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU 
  49  *  School of Computer Science 
  50  *  Carnegie Mellon University 
  51  *  Pittsburgh PA 15213-3890 
  53  * any improvements or extensions that they make and grant Carnegie Mellon 
  54  * the rights to redistribute these changes. 
  60  *      processor.c: processor and processor_set manipulation routines. 
  63 #include <mach/boolean.h> 
  64 #include <mach/policy.h> 
  65 #include <mach/processor.h> 
  66 #include <mach/processor_info.h> 
  67 #include <mach/vm_param.h> 
  68 #include <kern/cpu_number.h> 
  69 #include <kern/host.h> 
  70 #include <kern/ipc_host.h> 
  71 #include <kern/ipc_tt.h> 
  72 #include <kern/kalloc.h> 
  73 #include <kern/machine.h> 
  74 #include <kern/misc_protos.h> 
  75 #include <kern/processor.h> 
  76 #include <kern/sched.h> 
  77 #include <kern/task.h> 
  78 #include <kern/thread.h> 
  79 #include <kern/timer.h> 
  81 #include <kperf/kperf.h> 
  83 #include <ipc/ipc_port.h> 
  85 #include <security/mac_mach_internal.h> 
  87 #if defined(CONFIG_XNUPOST) 
  89 #include <tests/xnupost.h> 
  91 #endif /* CONFIG_XNUPOST */ 
  96 #include <mach/mach_host_server.h> 
  97 #include <mach/processor_set_server.h> 
  99 struct processor_set    pset0
; 
 100 struct pset_node        pset_node0
; 
 102 static SIMPLE_LOCK_DECLARE(pset_node_lock
, 0); 
 103 LCK_GRP_DECLARE(pset_lck_grp
, "pset"); 
 106 queue_head_t            terminated_tasks
;       /* To be used ONLY for stackshot. */ 
 107 queue_head_t            corpse_tasks
; 
 109 int                     terminated_tasks_count
; 
 110 queue_head_t            threads
; 
 111 queue_head_t            terminated_threads
; 
 113 int                     terminated_threads_count
; 
 114 LCK_GRP_DECLARE(task_lck_grp
, "task"); 
 115 LCK_ATTR_DECLARE(task_lck_attr
, 0, 0); 
 116 LCK_MTX_DECLARE_ATTR(tasks_threads_lock
, &task_lck_grp
, &task_lck_attr
); 
 117 LCK_MTX_DECLARE_ATTR(tasks_corpse_lock
, &task_lck_grp
, &task_lck_attr
); 
 119 processor_t             processor_list
; 
 120 unsigned int            processor_count
; 
 121 static processor_t      processor_list_tail
; 
 122 SIMPLE_LOCK_DECLARE(processor_list_lock
, 0); 
 124 uint32_t                processor_avail_count
; 
 125 uint32_t                processor_avail_count_user
; 
 126 uint32_t                primary_processor_avail_count
; 
 127 uint32_t                primary_processor_avail_count_user
; 
 131 struct processor        
PERCPU_DATA(processor
); 
 132 processor_t             processor_array
[MAX_SCHED_CPUS
] = { 0 }; 
 133 processor_set_t         pset_array
[MAX_PSETS
] = { 0 }; 
 135 static timer_call_func_t running_timer_funcs
[] = { 
 136         [RUNNING_TIMER_QUANTUM
] = thread_quantum_expire
, 
 137         [RUNNING_TIMER_KPERF
] = kperf_timer_expire
, 
 139 static_assert(sizeof(running_timer_funcs
) / sizeof(running_timer_funcs
[0]) 
 140     == RUNNING_TIMER_MAX
, "missing running timer function"); 
 142 #if defined(CONFIG_XNUPOST) 
 143 kern_return_t 
ipi_test(void); 
 144 extern void arm64_ipi_test(void); 
 152         for (p 
= processor_list
; p 
!= NULL
; p 
= p
->processor_list
) { 
 154                 thread_block(THREAD_CONTINUE_NULL
); 
 155                 kprintf("Running IPI test on cpu %d\n", p
->cpu_id
); 
 159         /* unbind thread from specific cpu */ 
 160         thread_bind(PROCESSOR_NULL
); 
 161         thread_block(THREAD_CONTINUE_NULL
); 
 163         T_PASS("Done running IPI tests"); 
 165         T_PASS("Unsupported platform. Not running IPI tests"); 
 167 #endif /* __arm64__ */ 
 171 #endif /* defined(CONFIG_XNUPOST) */ 
 173 int sched_enable_smt 
= 1; 
 176 processor_bootstrap(void) 
 178         pset_node0
.psets 
= &pset0
; 
 179         pset_init(&pset0
, &pset_node0
); 
 182         queue_init(&terminated_tasks
); 
 183         queue_init(&threads
); 
 184         queue_init(&terminated_threads
); 
 185         queue_init(&corpse_tasks
); 
 187         processor_init(master_processor
, master_cpu
, &pset0
); 
 191  *      Initialize the given processor for the cpu 
 192  *      indicated by cpu_id, and assign to the 
 193  *      specified processor set. 
 197         processor_t            processor
, 
 199         processor_set_t        pset
) 
 203         assert(cpu_id 
< MAX_SCHED_CPUS
); 
 204         processor
->cpu_id 
= cpu_id
; 
 206         if (processor 
!= master_processor
) { 
 207                 /* Scheduler state for master_processor initialized in sched_init() */ 
 208                 SCHED(processor_init
)(processor
); 
 211         processor
->state 
= PROCESSOR_OFF_LINE
; 
 212         processor
->active_thread 
= processor
->startup_thread 
= processor
->idle_thread 
= THREAD_NULL
; 
 213         processor
->processor_set 
= pset
; 
 214         processor_state_update_idle(processor
); 
 215         processor
->starting_pri 
= MINPRI
; 
 216         processor
->quantum_end 
= UINT64_MAX
; 
 217         processor
->deadline 
= UINT64_MAX
; 
 218         processor
->first_timeslice 
= FALSE
; 
 219         processor
->processor_offlined 
= false; 
 220         processor
->processor_primary 
= processor
; /* no SMT relationship known at this point */ 
 221         processor
->processor_secondary 
= NULL
; 
 222         processor
->is_SMT 
= false; 
 223         processor
->is_recommended 
= true; 
 224         processor
->processor_self 
= IP_NULL
; 
 225         processor
->processor_list 
= NULL
; 
 226         processor
->must_idle 
= false; 
 227         processor
->running_timers_active 
= false; 
 228         for (int i 
= 0; i 
< RUNNING_TIMER_MAX
; i
++) { 
 229                 timer_call_setup(&processor
->running_timers
[i
], 
 230                     running_timer_funcs
[i
], processor
); 
 231                 running_timer_clear(processor
, i
); 
 234         timer_init(&processor
->idle_state
); 
 235         timer_init(&processor
->system_state
); 
 236         timer_init(&processor
->user_state
); 
 240         bit_set(pset
->cpu_bitmask
, cpu_id
); 
 241         bit_set(pset
->recommended_bitmask
, cpu_id
); 
 242         bit_set(pset
->primary_map
, cpu_id
); 
 243         bit_set(pset
->cpu_state_map
[PROCESSOR_OFF_LINE
], cpu_id
); 
 244         if (pset
->cpu_set_count
++ == 0) { 
 245                 pset
->cpu_set_low 
= pset
->cpu_set_hi 
= cpu_id
; 
 247                 pset
->cpu_set_low 
= (cpu_id 
< pset
->cpu_set_low
)? cpu_id
: pset
->cpu_set_low
; 
 248                 pset
->cpu_set_hi 
= (cpu_id 
> pset
->cpu_set_hi
)? cpu_id
: pset
->cpu_set_hi
; 
 253         simple_lock(&processor_list_lock
, LCK_GRP_NULL
); 
 254         if (processor_list 
== NULL
) { 
 255                 processor_list 
= processor
; 
 257                 processor_list_tail
->processor_list 
= processor
; 
 259         processor_list_tail 
= processor
; 
 261         processor_array
[cpu_id
] = processor
; 
 262         simple_unlock(&processor_list_lock
); 
 265 bool system_is_SMT 
= false; 
 268 processor_set_primary( 
 269         processor_t             processor
, 
 272         assert(processor
->processor_primary 
== primary 
|| processor
->processor_primary 
== processor
); 
 273         /* Re-adjust primary point for this (possibly) secondary processor */ 
 274         processor
->processor_primary 
= primary
; 
 276         assert(primary
->processor_secondary 
== NULL 
|| primary
->processor_secondary 
== processor
); 
 277         if (primary 
!= processor
) { 
 278                 /* Link primary to secondary, assumes a 2-way SMT model 
 279                  * We'll need to move to a queue if any future architecture 
 280                  * requires otherwise. 
 282                 assert(processor
->processor_secondary 
== NULL
); 
 283                 primary
->processor_secondary 
= processor
; 
 284                 /* Mark both processors as SMT siblings */ 
 285                 primary
->is_SMT 
= TRUE
; 
 286                 processor
->is_SMT 
= TRUE
; 
 288                 if (!system_is_SMT
) { 
 289                         system_is_SMT 
= true; 
 292                 processor_set_t pset 
= processor
->processor_set
; 
 293                 spl_t s 
= splsched(); 
 298                 bit_clear(pset
->primary_map
, processor
->cpu_id
); 
 306         processor_t     processor
) 
 308         return processor
->processor_set
; 
 311 #if CONFIG_SCHED_EDGE 
 314 pset_type_for_id(uint32_t cluster_id
) 
 316         return pset_array
[cluster_id
]->pset_type
; 
 320  * Processor foreign threads 
 322  * With the Edge scheduler, each pset maintains a bitmap of processors running threads 
 323  * which are foreign to the pset/cluster. A thread is defined as foreign for a cluster 
 324  * if its of a different type than its preferred cluster type (E/P). The bitmap should 
 325  * be updated every time a new thread is assigned to run on a processor. 
 327  * This bitmap allows the Edge scheduler to quickly find CPUs running foreign threads 
 331 processor_state_update_running_foreign(processor_t processor
, thread_t thread
) 
 333         cluster_type_t current_processor_type 
= pset_type_for_id(processor
->processor_set
->pset_cluster_id
); 
 334         cluster_type_t thread_type 
= pset_type_for_id(sched_edge_thread_preferred_cluster(thread
)); 
 336         /* Update the bitmap for the pset only for unbounded non-RT threads. */ 
 337         if ((processor
->current_pri 
< BASEPRI_RTQUEUES
) && (thread
->bound_processor 
== PROCESSOR_NULL
) && (current_processor_type 
!= thread_type
)) { 
 338                 bit_set(processor
->processor_set
->cpu_running_foreign
, processor
->cpu_id
); 
 340                 bit_clear(processor
->processor_set
->cpu_running_foreign
, processor
->cpu_id
); 
 343 #else /* CONFIG_SCHED_EDGE */ 
 345 processor_state_update_running_foreign(__unused processor_t processor
, __unused thread_t thread
) 
 348 #endif /* CONFIG_SCHED_EDGE */ 
 351 processor_state_update_idle(processor_t processor
) 
 353         processor
->current_pri 
= IDLEPRI
; 
 354         processor
->current_sfi_class 
= SFI_CLASS_KERNEL
; 
 355         processor
->current_recommended_pset_type 
= PSET_SMP
; 
 356 #if CONFIG_THREAD_GROUPS 
 357         processor
->current_thread_group 
= NULL
; 
 359         processor
->current_perfctl_class 
= PERFCONTROL_CLASS_IDLE
; 
 360         processor
->current_urgency 
= THREAD_URGENCY_NONE
; 
 361         processor
->current_is_NO_SMT 
= false; 
 362         processor
->current_is_bound 
= false; 
 363         processor
->current_is_eagerpreempt 
= false; 
 364         os_atomic_store(&processor
->processor_set
->cpu_running_buckets
[processor
->cpu_id
], TH_BUCKET_SCHED_MAX
, relaxed
); 
 368 processor_state_update_from_thread(processor_t processor
, thread_t thread
) 
 370         processor
->current_pri 
= thread
->sched_pri
; 
 371         processor
->current_sfi_class 
= thread
->sfi_class
; 
 372         processor
->current_recommended_pset_type 
= recommended_pset_type(thread
); 
 373         processor_state_update_running_foreign(processor
, thread
); 
 374         /* Since idle and bound threads are not tracked by the edge scheduler, ignore when those threads go on-core */ 
 375         sched_bucket_t bucket 
= ((thread
->state 
& TH_IDLE
) || (thread
->bound_processor 
!= PROCESSOR_NULL
)) ? TH_BUCKET_SCHED_MAX 
: thread
->th_sched_bucket
; 
 376         os_atomic_store(&processor
->processor_set
->cpu_running_buckets
[processor
->cpu_id
], bucket
, relaxed
); 
 378 #if CONFIG_THREAD_GROUPS 
 379         processor
->current_thread_group 
= thread_group_get(thread
); 
 381         processor
->current_perfctl_class 
= thread_get_perfcontrol_class(thread
); 
 382         processor
->current_urgency 
= thread_get_urgency(thread
, NULL
, NULL
); 
 383         processor
->current_is_NO_SMT 
= thread_no_smt(thread
); 
 384         processor
->current_is_bound 
= thread
->bound_processor 
!= PROCESSOR_NULL
; 
 385         processor
->current_is_eagerpreempt 
= thread_is_eager_preempt(thread
); 
 389 processor_state_update_explicit(processor_t processor
, int pri
, sfi_class_id_t sfi_class
, 
 390     pset_cluster_type_t pset_type
, perfcontrol_class_t perfctl_class
, thread_urgency_t urgency
, sched_bucket_t bucket
) 
 392         processor
->current_pri 
= pri
; 
 393         processor
->current_sfi_class 
= sfi_class
; 
 394         processor
->current_recommended_pset_type 
= pset_type
; 
 395         processor
->current_perfctl_class 
= perfctl_class
; 
 396         processor
->current_urgency 
= urgency
; 
 397         os_atomic_store(&processor
->processor_set
->cpu_running_buckets
[processor
->cpu_id
], bucket
, relaxed
); 
 410         /* some schedulers do not support multiple psets */ 
 411         if (SCHED(multiple_psets_enabled
) == FALSE
) { 
 412                 return processor_pset(master_processor
); 
 415         processor_set_t 
*prev
, pset 
= zalloc_permanent_type(struct processor_set
); 
 417         if (pset 
!= PROCESSOR_SET_NULL
) { 
 418                 pset_init(pset
, node
); 
 420                 simple_lock(&pset_node_lock
, LCK_GRP_NULL
); 
 423                 while (*prev 
!= PROCESSOR_SET_NULL
) { 
 424                         prev 
= &(*prev
)->pset_list
; 
 429                 simple_unlock(&pset_node_lock
); 
 436  *      Find processor set with specified cluster_id. 
 437  *      Returns default_pset if not found. 
 442         processor_set_t default_pset
) 
 444         simple_lock(&pset_node_lock
, LCK_GRP_NULL
); 
 445         pset_node_t node 
= &pset_node0
; 
 446         processor_set_t pset 
= NULL
; 
 450                 while (pset 
!= NULL
) { 
 451                         if (pset
->pset_cluster_id 
== cluster_id
) { 
 454                         pset 
= pset
->pset_list
; 
 456         } while (pset 
== NULL 
&& (node 
= node
->node_list
) != NULL
); 
 457         simple_unlock(&pset_node_lock
); 
 464 #if !defined(RC_HIDE_XNU_FIRESTORM) && (MAX_CPU_CLUSTERS > 2) 
 467  * Find the first processor_set for the given pset_cluster_type. 
 468  * Should be removed with rdar://57340304, as it's only 
 469  * useful for the workaround described in rdar://57306691. 
 473 pset_find_first_by_cluster_type( 
 474         pset_cluster_type_t pset_cluster_type
) 
 476         simple_lock(&pset_node_lock
, LCK_GRP_NULL
); 
 477         pset_node_t node 
= &pset_node0
; 
 478         processor_set_t pset 
= NULL
; 
 482                 while (pset 
!= NULL
) { 
 483                         if (pset
->pset_cluster_type 
== pset_cluster_type
) { 
 486                         pset 
= pset
->pset_list
; 
 488         } while (pset 
== NULL 
&& (node 
= node
->node_list
) != NULL
); 
 489         simple_unlock(&pset_node_lock
); 
 493 #endif /* !defined(RC_HIDE_XNU_FIRESTORM) && (MAX_CPU_CLUSTERS > 2) */ 
 496  *      Initialize the given processor_set structure. 
 500         processor_set_t         pset
, 
 503         static uint32_t pset_count 
= 0; 
 505         if (pset 
!= &pset0
) { 
 507                  * Scheduler runqueue initialization for non-boot psets. 
 508                  * This initialization for pset0 happens in sched_init(). 
 510                 SCHED(pset_init
)(pset
); 
 511                 SCHED(rt_init
)(pset
); 
 514         pset
->online_processor_count 
= 0; 
 515         pset
->load_average 
= 0; 
 516         bzero(&pset
->pset_load_average
, sizeof(pset
->pset_load_average
)); 
 517 #if CONFIG_SCHED_EDGE 
 518         bzero(&pset
->pset_execution_time
, sizeof(pset
->pset_execution_time
)); 
 519 #endif /* CONFIG_SCHED_EDGE */ 
 520         pset
->cpu_set_low 
= pset
->cpu_set_hi 
= 0; 
 521         pset
->cpu_set_count 
= 0; 
 522         pset
->last_chosen 
= -1; 
 523         pset
->cpu_bitmask 
= 0; 
 524         pset
->recommended_bitmask 
= 0; 
 525         pset
->primary_map 
= 0; 
 526         pset
->realtime_map 
= 0; 
 527         pset
->cpu_running_foreign 
= 0; 
 529         for (uint i 
= 0; i 
< PROCESSOR_STATE_LEN
; i
++) { 
 530                 pset
->cpu_state_map
[i
] = 0; 
 532         pset
->pending_AST_URGENT_cpu_mask 
= 0; 
 533         pset
->pending_AST_PREEMPT_cpu_mask 
= 0; 
 534 #if defined(CONFIG_SCHED_DEFERRED_AST) 
 535         pset
->pending_deferred_AST_cpu_mask 
= 0; 
 537         pset
->pending_spill_cpu_mask 
= 0; 
 538         pset_lock_init(pset
); 
 539         pset
->pset_self 
= IP_NULL
; 
 540         pset
->pset_name_self 
= IP_NULL
; 
 541         pset
->pset_list 
= PROCESSOR_SET_NULL
; 
 545          * The pset_cluster_type & pset_cluster_id for all psets 
 546          * on the platform are initialized as part of the SCHED(init). 
 547          * That works well for small cluster platforms; for large cluster 
 548          * count systems, it might be cleaner to do all the setup 
 549          * dynamically in SCHED(pset_init). 
 551          * <Edge Multi-cluster Support Needed> 
 553         pset
->is_SMT 
= false; 
 555         simple_lock(&pset_node_lock
, LCK_GRP_NULL
); 
 556         pset
->pset_id 
= pset_count
++; 
 557         bit_set(node
->pset_map
, pset
->pset_id
); 
 558         simple_unlock(&pset_node_lock
); 
 560         pset_array
[pset
->pset_id
] = pset
; 
 564 processor_info_count( 
 565         processor_flavor_t              flavor
, 
 566         mach_msg_type_number_t  
*count
) 
 569         case PROCESSOR_BASIC_INFO
: 
 570                 *count 
= PROCESSOR_BASIC_INFO_COUNT
; 
 573         case PROCESSOR_CPU_LOAD_INFO
: 
 574                 *count 
= PROCESSOR_CPU_LOAD_INFO_COUNT
; 
 578                 return cpu_info_count(flavor
, count
); 
 587         processor_t     processor
, 
 588         processor_flavor_t              flavor
, 
 590         processor_info_t                info
, 
 591         mach_msg_type_number_t  
*count
) 
 594         kern_return_t   result
; 
 596         if (processor 
== PROCESSOR_NULL
) { 
 597                 return KERN_INVALID_ARGUMENT
; 
 600         cpu_id 
= processor
->cpu_id
; 
 603         case PROCESSOR_BASIC_INFO
: 
 605                 processor_basic_info_t          basic_info
; 
 607                 if (*count 
< PROCESSOR_BASIC_INFO_COUNT
) { 
 611                 basic_info 
= (processor_basic_info_t
) info
; 
 612                 basic_info
->cpu_type 
= slot_type(cpu_id
); 
 613                 basic_info
->cpu_subtype 
= slot_subtype(cpu_id
); 
 614                 state 
= processor
->state
; 
 615                 if (state 
== PROCESSOR_OFF_LINE
 
 616 #if defined(__x86_64__) 
 617                     || !processor
->is_recommended
 
 620                         basic_info
->running 
= FALSE
; 
 622                         basic_info
->running 
= TRUE
; 
 624                 basic_info
->slot_num 
= cpu_id
; 
 625                 if (processor 
== master_processor
) { 
 626                         basic_info
->is_master 
= TRUE
; 
 628                         basic_info
->is_master 
= FALSE
; 
 631                 *count 
= PROCESSOR_BASIC_INFO_COUNT
; 
 637         case PROCESSOR_CPU_LOAD_INFO
: 
 639                 processor_cpu_load_info_t       cpu_load_info
; 
 641                 uint64_t        idle_time_snapshot1
, idle_time_snapshot2
; 
 642                 uint64_t        idle_time_tstamp1
, idle_time_tstamp2
; 
 645                  * We capture the accumulated idle time twice over 
 646                  * the course of this function, as well as the timestamps 
 647                  * when each were last updated. Since these are 
 648                  * all done using non-atomic racy mechanisms, the 
 649                  * most we can infer is whether values are stable. 
 650                  * timer_grab() is the only function that can be 
 651                  * used reliably on another processor's per-processor 
 655                 if (*count 
< PROCESSOR_CPU_LOAD_INFO_COUNT
) { 
 659                 cpu_load_info 
= (processor_cpu_load_info_t
) info
; 
 660                 if (precise_user_kernel_time
) { 
 661                         cpu_load_info
->cpu_ticks
[CPU_STATE_USER
] = 
 662                             (uint32_t)(timer_grab(&processor
->user_state
) / hz_tick_interval
); 
 663                         cpu_load_info
->cpu_ticks
[CPU_STATE_SYSTEM
] = 
 664                             (uint32_t)(timer_grab(&processor
->system_state
) / hz_tick_interval
); 
 666                         uint64_t tval 
= timer_grab(&processor
->user_state
) + 
 667                             timer_grab(&processor
->system_state
); 
 669                         cpu_load_info
->cpu_ticks
[CPU_STATE_USER
] = (uint32_t)(tval 
/ hz_tick_interval
); 
 670                         cpu_load_info
->cpu_ticks
[CPU_STATE_SYSTEM
] = 0; 
 673                 idle_state 
= &processor
->idle_state
; 
 674                 idle_time_snapshot1 
= timer_grab(idle_state
); 
 675                 idle_time_tstamp1 
= idle_state
->tstamp
; 
 678                  * Idle processors are not continually updating their 
 679                  * per-processor idle timer, so it may be extremely 
 680                  * out of date, resulting in an over-representation 
 681                  * of non-idle time between two measurement 
 682                  * intervals by e.g. top(1). If we are non-idle, or 
 683                  * have evidence that the timer is being updated 
 684                  * concurrently, we consider its value up-to-date. 
 686                 if (processor
->current_state 
!= idle_state
) { 
 687                         cpu_load_info
->cpu_ticks
[CPU_STATE_IDLE
] = 
 688                             (uint32_t)(idle_time_snapshot1 
/ hz_tick_interval
); 
 689                 } else if ((idle_time_snapshot1 
!= (idle_time_snapshot2 
= timer_grab(idle_state
))) || 
 690                     (idle_time_tstamp1 
!= (idle_time_tstamp2 
= idle_state
->tstamp
))) { 
 691                         /* Idle timer is being updated concurrently, second stamp is good enough */ 
 692                         cpu_load_info
->cpu_ticks
[CPU_STATE_IDLE
] = 
 693                             (uint32_t)(idle_time_snapshot2 
/ hz_tick_interval
); 
 696                          * Idle timer may be very stale. Fortunately we have established 
 697                          * that idle_time_snapshot1 and idle_time_tstamp1 are unchanging 
 699                         idle_time_snapshot1 
+= mach_absolute_time() - idle_time_tstamp1
; 
 701                         cpu_load_info
->cpu_ticks
[CPU_STATE_IDLE
] = 
 702                             (uint32_t)(idle_time_snapshot1 
/ hz_tick_interval
); 
 705                 cpu_load_info
->cpu_ticks
[CPU_STATE_NICE
] = 0; 
 707                 *count 
= PROCESSOR_CPU_LOAD_INFO_COUNT
; 
 714                 result 
= cpu_info(flavor
, cpu_id
, info
, count
); 
 715                 if (result 
== KERN_SUCCESS
) { 
 725         processor_t                     processor
) 
 727         processor_set_t         pset
; 
 729         kern_return_t           result
; 
 732         if (processor 
== PROCESSOR_NULL 
|| processor
->processor_set 
== PROCESSOR_SET_NULL
) { 
 733                 return KERN_INVALID_ARGUMENT
; 
 736         if (processor 
== master_processor
) { 
 739                 prev 
= thread_bind(processor
); 
 740                 thread_block(THREAD_CONTINUE_NULL
); 
 742                 result 
= cpu_start(processor
->cpu_id
); 
 749         bool scheduler_disable 
= false; 
 751         if ((processor
->processor_primary 
!= processor
) && (sched_enable_smt 
== 0)) { 
 752                 if (cpu_can_exit(processor
->cpu_id
)) { 
 756                  * This secondary SMT processor must start in order to service interrupts, 
 757                  * so instead it will be disabled at the scheduler level. 
 759                 scheduler_disable 
= true; 
 762         ml_cpu_begin_state_transition(processor
->cpu_id
); 
 764         pset 
= processor
->processor_set
; 
 766         if (processor
->state 
!= PROCESSOR_OFF_LINE
) { 
 769                 ml_cpu_end_state_transition(processor
->cpu_id
); 
 774         pset_update_processor_state(pset
, processor
, PROCESSOR_START
); 
 779          *      Create the idle processor thread. 
 781         if (processor
->idle_thread 
== THREAD_NULL
) { 
 782                 result 
= idle_thread_create(processor
); 
 783                 if (result 
!= KERN_SUCCESS
) { 
 786                         pset_update_processor_state(pset
, processor
, PROCESSOR_OFF_LINE
); 
 789                         ml_cpu_end_state_transition(processor
->cpu_id
); 
 796          *      If there is no active thread, the processor 
 797          *      has never been started.  Create a dedicated 
 800         if (processor
->active_thread 
== THREAD_NULL 
&& 
 801             processor
->startup_thread 
== THREAD_NULL
) { 
 802                 result 
= kernel_thread_create(processor_start_thread
, NULL
, MAXPRI_KERNEL
, &thread
); 
 803                 if (result 
!= KERN_SUCCESS
) { 
 806                         pset_update_processor_state(pset
, processor
, PROCESSOR_OFF_LINE
); 
 809                         ml_cpu_end_state_transition(processor
->cpu_id
); 
 816                 thread
->bound_processor 
= processor
; 
 817                 processor
->startup_thread 
= thread
; 
 818                 thread
->state 
= TH_RUN
; 
 819                 thread
->last_made_runnable_time 
= mach_absolute_time(); 
 820                 thread_unlock(thread
); 
 823                 thread_deallocate(thread
); 
 826         if (processor
->processor_self 
== IP_NULL
) { 
 827                 ipc_processor_init(processor
); 
 830         ml_broadcast_cpu_event(CPU_BOOT_REQUESTED
, processor
->cpu_id
); 
 831         result 
= cpu_start(processor
->cpu_id
); 
 832         if (result 
!= KERN_SUCCESS
) { 
 835                 pset_update_processor_state(pset
, processor
, PROCESSOR_OFF_LINE
); 
 838                 ml_cpu_end_state_transition(processor
->cpu_id
); 
 842         if (scheduler_disable
) { 
 843                 assert(processor
->processor_primary 
!= processor
); 
 844                 sched_processor_enable(processor
, FALSE
); 
 847         ipc_processor_enable(processor
); 
 848         ml_cpu_end_state_transition(processor
->cpu_id
); 
 849         ml_broadcast_cpu_event(CPU_ACTIVE
, processor
->cpu_id
); 
 857         processor_t     processor
) 
 859         if (processor 
== PROCESSOR_NULL
) { 
 860                 return KERN_INVALID_ARGUMENT
; 
 863         return processor_shutdown(processor
); 
 868 processor_start_from_user( 
 869         processor_t                     processor
) 
 873         if (processor 
== PROCESSOR_NULL
) { 
 874                 return KERN_INVALID_ARGUMENT
; 
 877         if (!cpu_can_exit(processor
->cpu_id
)) { 
 878                 ret 
= sched_processor_enable(processor
, TRUE
); 
 880                 ret 
= processor_start(processor
); 
 887 processor_exit_from_user( 
 888         processor_t     processor
) 
 892         if (processor 
== PROCESSOR_NULL
) { 
 893                 return KERN_INVALID_ARGUMENT
; 
 896         if (!cpu_can_exit(processor
->cpu_id
)) { 
 897                 ret 
= sched_processor_enable(processor
, FALSE
); 
 899                 ret 
= processor_shutdown(processor
); 
 906 enable_smt_processors(bool enable
) 
 908         if (machine_info
.logical_cpu_max 
== machine_info
.physical_cpu_max
) { 
 909                 /* Not an SMT system */ 
 910                 return KERN_INVALID_ARGUMENT
; 
 913         int ncpus 
= machine_info
.logical_cpu_max
; 
 915         for (int i 
= 1; i 
< ncpus
; i
++) { 
 916                 processor_t processor 
= processor_array
[i
]; 
 918                 if (processor
->processor_primary 
!= processor
) { 
 920                                 processor_start_from_user(processor
); 
 921                         } else { /* Disable */ 
 922                                 processor_exit_from_user(processor
); 
 928         host_basic_info_data_t hinfo
; 
 929         mach_msg_type_number_t count 
= HOST_BASIC_INFO_COUNT
; 
 930         kern_return_t kret 
= host_info((host_t
)BSD_HOST
, HOST_BASIC_INFO
, (host_info_t
)&hinfo
, &count
); 
 931         if (kret 
!= KERN_SUCCESS
) { 
 935         if (enable 
&& (hinfo
.logical_cpu 
!= hinfo
.logical_cpu_max
)) { 
 939         if (!enable 
&& (hinfo
.logical_cpu 
!= hinfo
.physical_cpu
)) { 
 948         processor_t             processor
, 
 949         processor_info_t        info
, 
 950         mach_msg_type_number_t  count
) 
 952         if (processor 
== PROCESSOR_NULL
) { 
 953                 return KERN_INVALID_ARGUMENT
; 
 956         return cpu_control(processor
->cpu_id
, info
, count
); 
 960 processor_set_create( 
 961         __unused host_t         host
, 
 962         __unused processor_set_t        
*new_set
, 
 963         __unused processor_set_t        
*new_name
) 
 969 processor_set_destroy( 
 970         __unused processor_set_t        pset
) 
 976 processor_get_assignment( 
 977         processor_t     processor
, 
 978         processor_set_t 
*pset
) 
 982         if (processor 
== PROCESSOR_NULL
) { 
 983                 return KERN_INVALID_ARGUMENT
; 
 986         state 
= processor
->state
; 
 987         if (state 
== PROCESSOR_SHUTDOWN 
|| state 
== PROCESSOR_OFF_LINE
) { 
 998         processor_set_t         pset
, 
1001         processor_set_info_t    info
, 
1002         mach_msg_type_number_t  
*count
) 
1004         if (pset 
== PROCESSOR_SET_NULL
) { 
1005                 return KERN_INVALID_ARGUMENT
; 
1008         if (flavor 
== PROCESSOR_SET_BASIC_INFO
) { 
1009                 processor_set_basic_info_t      basic_info
; 
1011                 if (*count 
< PROCESSOR_SET_BASIC_INFO_COUNT
) { 
1012                         return KERN_FAILURE
; 
1015                 basic_info 
= (processor_set_basic_info_t
) info
; 
1016 #if defined(__x86_64__) 
1017                 basic_info
->processor_count 
= processor_avail_count_user
; 
1019                 basic_info
->processor_count 
= processor_avail_count
; 
1021                 basic_info
->default_policy 
= POLICY_TIMESHARE
; 
1023                 *count 
= PROCESSOR_SET_BASIC_INFO_COUNT
; 
1025                 return KERN_SUCCESS
; 
1026         } else if (flavor 
== PROCESSOR_SET_TIMESHARE_DEFAULT
) { 
1027                 policy_timeshare_base_t ts_base
; 
1029                 if (*count 
< POLICY_TIMESHARE_BASE_COUNT
) { 
1030                         return KERN_FAILURE
; 
1033                 ts_base 
= (policy_timeshare_base_t
) info
; 
1034                 ts_base
->base_priority 
= BASEPRI_DEFAULT
; 
1036                 *count 
= POLICY_TIMESHARE_BASE_COUNT
; 
1038                 return KERN_SUCCESS
; 
1039         } else if (flavor 
== PROCESSOR_SET_FIFO_DEFAULT
) { 
1040                 policy_fifo_base_t              fifo_base
; 
1042                 if (*count 
< POLICY_FIFO_BASE_COUNT
) { 
1043                         return KERN_FAILURE
; 
1046                 fifo_base 
= (policy_fifo_base_t
) info
; 
1047                 fifo_base
->base_priority 
= BASEPRI_DEFAULT
; 
1049                 *count 
= POLICY_FIFO_BASE_COUNT
; 
1051                 return KERN_SUCCESS
; 
1052         } else if (flavor 
== PROCESSOR_SET_RR_DEFAULT
) { 
1053                 policy_rr_base_t                rr_base
; 
1055                 if (*count 
< POLICY_RR_BASE_COUNT
) { 
1056                         return KERN_FAILURE
; 
1059                 rr_base 
= (policy_rr_base_t
) info
; 
1060                 rr_base
->base_priority 
= BASEPRI_DEFAULT
; 
1061                 rr_base
->quantum 
= 1; 
1063                 *count 
= POLICY_RR_BASE_COUNT
; 
1065                 return KERN_SUCCESS
; 
1066         } else if (flavor 
== PROCESSOR_SET_TIMESHARE_LIMITS
) { 
1067                 policy_timeshare_limit_t        ts_limit
; 
1069                 if (*count 
< POLICY_TIMESHARE_LIMIT_COUNT
) { 
1070                         return KERN_FAILURE
; 
1073                 ts_limit 
= (policy_timeshare_limit_t
) info
; 
1074                 ts_limit
->max_priority 
= MAXPRI_KERNEL
; 
1076                 *count 
= POLICY_TIMESHARE_LIMIT_COUNT
; 
1078                 return KERN_SUCCESS
; 
1079         } else if (flavor 
== PROCESSOR_SET_FIFO_LIMITS
) { 
1080                 policy_fifo_limit_t             fifo_limit
; 
1082                 if (*count 
< POLICY_FIFO_LIMIT_COUNT
) { 
1083                         return KERN_FAILURE
; 
1086                 fifo_limit 
= (policy_fifo_limit_t
) info
; 
1087                 fifo_limit
->max_priority 
= MAXPRI_KERNEL
; 
1089                 *count 
= POLICY_FIFO_LIMIT_COUNT
; 
1091                 return KERN_SUCCESS
; 
1092         } else if (flavor 
== PROCESSOR_SET_RR_LIMITS
) { 
1093                 policy_rr_limit_t               rr_limit
; 
1095                 if (*count 
< POLICY_RR_LIMIT_COUNT
) { 
1096                         return KERN_FAILURE
; 
1099                 rr_limit 
= (policy_rr_limit_t
) info
; 
1100                 rr_limit
->max_priority 
= MAXPRI_KERNEL
; 
1102                 *count 
= POLICY_RR_LIMIT_COUNT
; 
1104                 return KERN_SUCCESS
; 
1105         } else if (flavor 
== PROCESSOR_SET_ENABLED_POLICIES
) { 
1108                 if (*count 
< (sizeof(*enabled
) / sizeof(int))) { 
1109                         return KERN_FAILURE
; 
1112                 enabled 
= (int *) info
; 
1113                 *enabled 
= POLICY_TIMESHARE 
| POLICY_RR 
| POLICY_FIFO
; 
1115                 *count 
= sizeof(*enabled
) / sizeof(int); 
1117                 return KERN_SUCCESS
; 
1122         return KERN_INVALID_ARGUMENT
; 
1126  *      processor_set_statistics 
1128  *      Returns scheduling statistics for a processor set. 
1131 processor_set_statistics( 
1132         processor_set_t         pset
, 
1134         processor_set_info_t    info
, 
1135         mach_msg_type_number_t  
*count
) 
1137         if (pset 
== PROCESSOR_SET_NULL 
|| pset 
!= &pset0
) { 
1138                 return KERN_INVALID_PROCESSOR_SET
; 
1141         if (flavor 
== PROCESSOR_SET_LOAD_INFO
) { 
1142                 processor_set_load_info_t     load_info
; 
1144                 if (*count 
< PROCESSOR_SET_LOAD_INFO_COUNT
) { 
1145                         return KERN_FAILURE
; 
1148                 load_info 
= (processor_set_load_info_t
) info
; 
1150                 load_info
->mach_factor 
= sched_mach_factor
; 
1151                 load_info
->load_average 
= sched_load_average
; 
1153                 load_info
->task_count 
= tasks_count
; 
1154                 load_info
->thread_count 
= threads_count
; 
1156                 *count 
= PROCESSOR_SET_LOAD_INFO_COUNT
; 
1157                 return KERN_SUCCESS
; 
1160         return KERN_INVALID_ARGUMENT
; 
1164  *      processor_set_max_priority: 
1166  *      Specify max priority permitted on processor set.  This affects 
1167  *      newly created and assigned threads.  Optionally change existing 
1171 processor_set_max_priority( 
1172         __unused processor_set_t        pset
, 
1173         __unused 
int                    max_priority
, 
1174         __unused boolean_t              change_threads
) 
1176         return KERN_INVALID_ARGUMENT
; 
1180  *      processor_set_policy_enable: 
1182  *      Allow indicated policy on processor set. 
1186 processor_set_policy_enable( 
1187         __unused processor_set_t        pset
, 
1188         __unused 
int                    policy
) 
1190         return KERN_INVALID_ARGUMENT
; 
1194  *      processor_set_policy_disable: 
1196  *      Forbid indicated policy on processor set.  Time sharing cannot 
1200 processor_set_policy_disable( 
1201         __unused processor_set_t        pset
, 
1202         __unused 
int                    policy
, 
1203         __unused boolean_t              change_threads
) 
1205         return KERN_INVALID_ARGUMENT
; 
1209  *      processor_set_things: 
1211  *      Common internals for processor_set_{threads,tasks} 
1213 static kern_return_t
 
1214 processor_set_things( 
1215         processor_set_t pset
, 
1217         mach_msg_type_number_t 
*count
, 
1219         mach_task_flavor_t flavor
) 
1226         unsigned int actual_tasks
; 
1227         vm_size_t task_size
, task_size_needed
; 
1229         thread_t 
*thread_list
; 
1230         unsigned int actual_threads
; 
1231         vm_size_t thread_size
, thread_size_needed
; 
1233         void *addr
, *newaddr
; 
1234         vm_size_t size
, size_needed
; 
1236         if (pset 
== PROCESSOR_SET_NULL 
|| pset 
!= &pset0
) { 
1237                 return KERN_INVALID_ARGUMENT
; 
1241         task_size_needed 
= 0; 
1246         thread_size_needed 
= 0; 
1251                 lck_mtx_lock(&tasks_threads_lock
); 
1253                 /* do we have the memory we need? */ 
1254                 if (type 
== PSET_THING_THREAD
) { 
1255                         thread_size_needed 
= threads_count 
* sizeof(void *); 
1260                 task_size_needed 
= tasks_count 
* sizeof(void *); 
1262                 if (task_size_needed 
<= task_size 
&& 
1263                     thread_size_needed 
<= thread_size
) { 
1267                 /* unlock and allocate more memory */ 
1268                 lck_mtx_unlock(&tasks_threads_lock
); 
1270                 /* grow task array */ 
1271                 if (task_size_needed 
> task_size
) { 
1272                         if (task_size 
!= 0) { 
1273                                 kfree(task_list
, task_size
); 
1276                         assert(task_size_needed 
> 0); 
1277                         task_size 
= task_size_needed
; 
1279                         task_list 
= (task_t 
*)kalloc(task_size
); 
1280                         if (task_list 
== NULL
) { 
1281                                 if (thread_size 
!= 0) { 
1282                                         kfree(thread_list
, thread_size
); 
1284                                 return KERN_RESOURCE_SHORTAGE
; 
1288                 /* grow thread array */ 
1289                 if (thread_size_needed 
> thread_size
) { 
1290                         if (thread_size 
!= 0) { 
1291                                 kfree(thread_list
, thread_size
); 
1294                         assert(thread_size_needed 
> 0); 
1295                         thread_size 
= thread_size_needed
; 
1297                         thread_list 
= (thread_t 
*)kalloc(thread_size
); 
1298                         if (thread_list 
== 0) { 
1299                                 if (task_size 
!= 0) { 
1300                                         kfree(task_list
, task_size
); 
1302                                 return KERN_RESOURCE_SHORTAGE
; 
1307         /* OK, have memory and the list locked */ 
1309         /* If we need it, get the thread list */ 
1310         if (type 
== PSET_THING_THREAD
) { 
1311                 for (thread 
= (thread_t
)queue_first(&threads
); 
1312                     !queue_end(&threads
, (queue_entry_t
)thread
); 
1313                     thread 
= (thread_t
)queue_next(&thread
->threads
)) { 
1314 #if defined(SECURE_KERNEL) 
1315                         if (thread
->task 
!= kernel_task
) { 
1317                         thread_reference_internal(thread
); 
1318                         thread_list
[actual_threads
++] = thread
; 
1319 #if defined(SECURE_KERNEL) 
1327         /* get a list of the tasks */ 
1328         for (task 
= (task_t
)queue_first(&tasks
); 
1329             !queue_end(&tasks
, (queue_entry_t
)task
); 
1330             task 
= (task_t
)queue_next(&task
->tasks
)) { 
1331 #if defined(SECURE_KERNEL) 
1332                 if (task 
!= kernel_task
) { 
1334                 task_reference_internal(task
); 
1335                 task_list
[actual_tasks
++] = task
; 
1336 #if defined(SECURE_KERNEL) 
1344         lck_mtx_unlock(&tasks_threads_lock
); 
1347         unsigned int j
, used
; 
1349         /* for each task, make sure we are allowed to examine it */ 
1350         for (i 
= used 
= 0; i 
< actual_tasks
; i
++) { 
1351                 if (mac_task_check_expose_task(task_list
[i
], flavor
)) { 
1352                         task_deallocate(task_list
[i
]); 
1355                 task_list
[used
++] = task_list
[i
]; 
1357         actual_tasks 
= used
; 
1358         task_size_needed 
= actual_tasks 
* sizeof(void *); 
1360         if (type 
== PSET_THING_THREAD
) { 
1361                 /* for each thread (if any), make sure it's task is in the allowed list */ 
1362                 for (i 
= used 
= 0; i 
< actual_threads
; i
++) { 
1363                         boolean_t found_task 
= FALSE
; 
1365                         task 
= thread_list
[i
]->task
; 
1366                         for (j 
= 0; j 
< actual_tasks
; j
++) { 
1367                                 if (task_list
[j
] == task
) { 
1373                                 thread_list
[used
++] = thread_list
[i
]; 
1375                                 thread_deallocate(thread_list
[i
]); 
1378                 actual_threads 
= used
; 
1379                 thread_size_needed 
= actual_threads 
* sizeof(void *); 
1381                 /* done with the task list */ 
1382                 for (i 
= 0; i 
< actual_tasks
; i
++) { 
1383                         task_deallocate(task_list
[i
]); 
1385                 kfree(task_list
, task_size
); 
1392         if (type 
== PSET_THING_THREAD
) { 
1393                 if (actual_threads 
== 0) { 
1394                         /* no threads available to return */ 
1395                         assert(task_size 
== 0); 
1396                         if (thread_size 
!= 0) { 
1397                                 kfree(thread_list
, thread_size
); 
1401                         return KERN_SUCCESS
; 
1403                 size_needed 
= actual_threads 
* sizeof(void *); 
1407                 if (actual_tasks 
== 0) { 
1408                         /* no tasks available to return */ 
1409                         assert(thread_size 
== 0); 
1410                         if (task_size 
!= 0) { 
1411                                 kfree(task_list
, task_size
); 
1415                         return KERN_SUCCESS
; 
1417                 size_needed 
= actual_tasks 
* sizeof(void *); 
1422         /* if we allocated too much, must copy */ 
1423         if (size_needed 
< size
) { 
1424                 newaddr 
= kalloc(size_needed
); 
1426                         for (i 
= 0; i 
< actual_tasks
; i
++) { 
1427                                 if (type 
== PSET_THING_THREAD
) { 
1428                                         thread_deallocate(thread_list
[i
]); 
1430                                         task_deallocate(task_list
[i
]); 
1436                         return KERN_RESOURCE_SHORTAGE
; 
1439                 bcopy((void *) addr
, (void *) newaddr
, size_needed
); 
1446         *thing_list 
= (void **)addr
; 
1447         *count 
= (unsigned int)size 
/ sizeof(void *); 
1449         return KERN_SUCCESS
; 
1453  *      processor_set_tasks: 
1455  *      List all tasks in the processor set. 
1457 static kern_return_t
 
1458 processor_set_tasks_internal( 
1459         processor_set_t         pset
, 
1460         task_array_t            
*task_list
, 
1461         mach_msg_type_number_t  
*count
, 
1462         mach_task_flavor_t      flavor
) 
1465         mach_msg_type_number_t i
; 
1467         ret 
= processor_set_things(pset
, (void **)task_list
, count
, PSET_THING_TASK
, flavor
); 
1468         if (ret 
!= KERN_SUCCESS
) { 
1472         /* do the conversion that Mig should handle */ 
1474         case TASK_FLAVOR_CONTROL
: 
1475                 for (i 
= 0; i 
< *count
; i
++) { 
1476                         if ((*task_list
)[i
] == current_task()) { 
1477                                 /* if current_task(), return pinned port */ 
1478                                 (*task_list
)[i
] = (task_t
)convert_task_to_port_pinned((*task_list
)[i
]); 
1480                                 (*task_list
)[i
] = (task_t
)convert_task_to_port((*task_list
)[i
]); 
1484         case TASK_FLAVOR_READ
: 
1485                 for (i 
= 0; i 
< *count
; i
++) { 
1486                         (*task_list
)[i
] = (task_t
)convert_task_read_to_port((*task_list
)[i
]); 
1489         case TASK_FLAVOR_INSPECT
: 
1490                 for (i 
= 0; i 
< *count
; i
++) { 
1491                         (*task_list
)[i
] = (task_t
)convert_task_inspect_to_port((*task_list
)[i
]); 
1494         case TASK_FLAVOR_NAME
: 
1495                 for (i 
= 0; i 
< *count
; i
++) { 
1496                         (*task_list
)[i
] = (task_t
)convert_task_name_to_port((*task_list
)[i
]); 
1500                 return KERN_INVALID_ARGUMENT
; 
1503         return KERN_SUCCESS
; 
1507 processor_set_tasks( 
1508         processor_set_t         pset
, 
1509         task_array_t            
*task_list
, 
1510         mach_msg_type_number_t  
*count
) 
1512         return processor_set_tasks_internal(pset
, task_list
, count
, TASK_FLAVOR_CONTROL
); 
1516  *      processor_set_tasks_with_flavor: 
1518  *      Based on flavor, return task/inspect/read port to all tasks in the processor set. 
1521 processor_set_tasks_with_flavor( 
1522         processor_set_t         pset
, 
1523         mach_task_flavor_t      flavor
, 
1524         task_array_t            
*task_list
, 
1525         mach_msg_type_number_t  
*count
) 
1528         case TASK_FLAVOR_CONTROL
: 
1529         case TASK_FLAVOR_READ
: 
1530         case TASK_FLAVOR_INSPECT
: 
1531         case TASK_FLAVOR_NAME
: 
1532                 return processor_set_tasks_internal(pset
, task_list
, count
, flavor
); 
1534                 return KERN_INVALID_ARGUMENT
; 
1539  *      processor_set_threads: 
1541  *      List all threads in the processor set. 
1543 #if defined(SECURE_KERNEL) 
1545 processor_set_threads( 
1546         __unused processor_set_t                pset
, 
1547         __unused thread_array_t         
*thread_list
, 
1548         __unused mach_msg_type_number_t 
*count
) 
1550         return KERN_FAILURE
; 
1552 #elif !defined(XNU_TARGET_OS_OSX) 
1554 processor_set_threads( 
1555         __unused processor_set_t                pset
, 
1556         __unused thread_array_t         
*thread_list
, 
1557         __unused mach_msg_type_number_t 
*count
) 
1559         return KERN_NOT_SUPPORTED
; 
1563 processor_set_threads( 
1564         processor_set_t         pset
, 
1565         thread_array_t          
*thread_list
, 
1566         mach_msg_type_number_t  
*count
) 
1569         mach_msg_type_number_t i
; 
1571         ret 
= processor_set_things(pset
, (void **)thread_list
, count
, PSET_THING_THREAD
, TASK_FLAVOR_CONTROL
); 
1572         if (ret 
!= KERN_SUCCESS
) { 
1576         /* do the conversion that Mig should handle */ 
1577         for (i 
= 0; i 
< *count
; i
++) { 
1578                 (*thread_list
)[i
] = (thread_t
)convert_thread_to_port((*thread_list
)[i
]); 
1580         return KERN_SUCCESS
; 
1585  *      processor_set_policy_control 
1587  *      Controls the scheduling attributes governing the processor set. 
1588  *      Allows control of enabled policies, and per-policy base and limit 
1592 processor_set_policy_control( 
1593         __unused processor_set_t                pset
, 
1594         __unused 
int                            flavor
, 
1595         __unused processor_set_info_t   policy_info
, 
1596         __unused mach_msg_type_number_t count
, 
1597         __unused boolean_t                      change
) 
1599         return KERN_INVALID_ARGUMENT
; 
1602 #undef pset_deallocate 
1603 void pset_deallocate(processor_set_t pset
); 
1606         __unused processor_set_t        pset
) 
1611 #undef pset_reference 
1612 void pset_reference(processor_set_t pset
); 
1615         __unused processor_set_t        pset
) 
1620 #if CONFIG_THREAD_GROUPS 
1623 thread_group_pset_recommendation(__unused 
struct thread_group 
*tg
, __unused cluster_type_t recommendation
) 
1626         switch (recommendation
) { 
1627         case CLUSTER_TYPE_SMP
: 
1630                  * In case of SMP recommendations, check if the thread 
1631                  * group has special flags which restrict it to the E 
1634                 if (thread_group_smp_restricted(tg
)) { 
1638         case CLUSTER_TYPE_E
: 
1640         case CLUSTER_TYPE_P
: 
1645 #endif /* __AMP__ */ 
1651 recommended_pset_type(thread_t thread
) 
1653 #if CONFIG_THREAD_GROUPS && __AMP__ 
1654         if (thread 
== THREAD_NULL
) { 
1658         if (thread
->sched_flags 
& TH_SFLAG_ECORE_ONLY
) { 
1660         } else if (thread
->sched_flags 
& TH_SFLAG_PCORE_ONLY
) { 
1664         if (thread
->base_pri 
<= MAXPRI_THROTTLE
) { 
1665                 if (os_atomic_load(&sched_perfctl_policy_bg
, relaxed
) != SCHED_PERFCTL_POLICY_FOLLOW_GROUP
) { 
1668         } else if (thread
->base_pri 
<= BASEPRI_UTILITY
) { 
1669                 if (os_atomic_load(&sched_perfctl_policy_util
, relaxed
) != SCHED_PERFCTL_POLICY_FOLLOW_GROUP
) { 
1674 #if DEVELOPMENT || DEBUG 
1675         extern bool system_ecore_only
; 
1676         extern processor_set_t pcore_set
; 
1677         if (system_ecore_only
) { 
1678                 if (thread
->task
->pset_hint 
== pcore_set
) { 
1685         struct thread_group 
*tg 
= thread_group_get(thread
); 
1686         cluster_type_t recommendation 
= thread_group_recommendation(tg
); 
1687         switch (recommendation
) { 
1688         case CLUSTER_TYPE_SMP
: 
1690                 if (thread
->task 
== kernel_task
) { 
1694         case CLUSTER_TYPE_E
: 
1696         case CLUSTER_TYPE_P
: 
1705 #if CONFIG_THREAD_GROUPS && __AMP__ 
1708 sched_perfcontrol_inherit_recommendation_from_tg(perfcontrol_class_t perfctl_class
, boolean_t inherit
) 
1710         sched_perfctl_class_policy_t sched_policy 
= inherit 
? SCHED_PERFCTL_POLICY_FOLLOW_GROUP 
: SCHED_PERFCTL_POLICY_RESTRICT_E
; 
1712         KDBG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_AMP_PERFCTL_POLICY_CHANGE
) | DBG_FUNC_NONE
, perfctl_class
, sched_policy
, 0, 0); 
1714         switch (perfctl_class
) { 
1715         case PERFCONTROL_CLASS_UTILITY
: 
1716                 os_atomic_store(&sched_perfctl_policy_util
, sched_policy
, relaxed
); 
1718         case PERFCONTROL_CLASS_BACKGROUND
: 
1719                 os_atomic_store(&sched_perfctl_policy_bg
, sched_policy
, relaxed
); 
1722                 panic("perfctl_class invalid"); 
1727 #elif defined(__arm64__) 
1729 /* Define a stub routine since this symbol is exported on all arm64 platforms */ 
1731 sched_perfcontrol_inherit_recommendation_from_tg(__unused perfcontrol_class_t perfctl_class
, __unused boolean_t inherit
) 
1735 #endif /* defined(__arm64__) */