2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * processor.c: processor and processor_set manipulation routines.
63 #include <mach/boolean.h>
64 #include <mach/policy.h>
65 #include <mach/processor.h>
66 #include <mach/processor_info.h>
67 #include <mach/vm_param.h>
68 #include <kern/cpu_number.h>
69 #include <kern/host.h>
70 #include <kern/machine.h>
71 #include <kern/misc_protos.h>
72 #include <kern/processor.h>
73 #include <kern/sched.h>
74 #include <kern/task.h>
75 #include <kern/thread.h>
76 #include <kern/ipc_host.h>
77 #include <kern/ipc_tt.h>
78 #include <ipc/ipc_port.h>
79 #include <kern/kalloc.h>
81 #include <security/mac_mach_internal.h>
83 #if defined(CONFIG_XNUPOST)
85 #include <tests/xnupost.h>
87 #endif /* CONFIG_XNUPOST */
92 #include <mach/mach_host_server.h>
93 #include <mach/processor_set_server.h>
95 struct processor_set pset0
;
96 struct pset_node pset_node0
;
97 decl_simple_lock_data(static,pset_node_lock
)
100 queue_head_t terminated_tasks
; /* To be used ONLY for stackshot. */
101 queue_head_t corpse_tasks
;
103 int terminated_tasks_count
;
104 queue_head_t threads
;
106 decl_lck_mtx_data(,tasks_threads_lock
)
107 decl_lck_mtx_data(,tasks_corpse_lock
)
109 processor_t processor_list
;
110 unsigned int processor_count
;
111 static processor_t processor_list_tail
;
112 decl_simple_lock_data(,processor_list_lock
)
114 uint32_t processor_avail_count
;
116 processor_t master_processor
;
118 boolean_t sched_stats_active
= FALSE
;
120 processor_t processor_array
[MAX_SCHED_CPUS
] = { 0 };
122 #if defined(CONFIG_XNUPOST)
123 kern_return_t
ipi_test(void);
124 extern void arm64_ipi_test(void);
132 for (p
= processor_list
; p
!= NULL
; p
= p
->processor_list
) {
134 thread_block(THREAD_CONTINUE_NULL
);
135 kprintf("Running IPI test on cpu %d\n", p
->cpu_id
);
139 /* unbind thread from specific cpu */
140 thread_bind(PROCESSOR_NULL
);
141 thread_block(THREAD_CONTINUE_NULL
);
143 T_PASS("Done running IPI tests");
145 T_PASS("Unsupported platform. Not running IPI tests");
147 #endif /* __arm64__ */
151 #endif /* defined(CONFIG_XNUPOST) */
155 processor_bootstrap(void)
157 pset_init(&pset0
, &pset_node0
);
158 pset_node0
.psets
= &pset0
;
160 simple_lock_init(&pset_node_lock
, 0);
163 queue_init(&terminated_tasks
);
164 queue_init(&threads
);
165 queue_init(&corpse_tasks
);
167 simple_lock_init(&processor_list_lock
, 0);
169 master_processor
= cpu_to_processor(master_cpu
);
171 processor_init(master_processor
, master_cpu
, &pset0
);
175 * Initialize the given processor for the cpu
176 * indicated by cpu_id, and assign to the
177 * specified processor set.
181 processor_t processor
,
183 processor_set_t pset
)
187 if (processor
!= master_processor
) {
188 /* Scheduler state for master_processor initialized in sched_init() */
189 SCHED(processor_init
)(processor
);
192 assert(cpu_id
< MAX_SCHED_CPUS
);
194 processor
->state
= PROCESSOR_OFF_LINE
;
195 processor
->active_thread
= processor
->next_thread
= processor
->idle_thread
= THREAD_NULL
;
196 processor
->processor_set
= pset
;
197 processor_state_update_idle(processor
);
198 processor
->starting_pri
= MINPRI
;
199 processor
->cpu_id
= cpu_id
;
200 timer_call_setup(&processor
->quantum_timer
, thread_quantum_expire
, processor
);
201 processor
->quantum_end
= UINT64_MAX
;
202 processor
->deadline
= UINT64_MAX
;
203 processor
->first_timeslice
= FALSE
;
204 processor
->processor_primary
= processor
; /* no SMT relationship known at this point */
205 processor
->processor_secondary
= NULL
;
206 processor
->is_SMT
= FALSE
;
207 processor
->is_recommended
= (pset
->recommended_bitmask
& (1ULL << cpu_id
)) ? TRUE
: FALSE
;
208 processor
->processor_self
= IP_NULL
;
209 processor_data_init(processor
);
210 processor
->processor_list
= NULL
;
211 processor
->cpu_quiesce_state
= CPU_QUIESCE_COUNTER_NONE
;
212 processor
->cpu_quiesce_last_checkin
= 0;
216 bit_set(pset
->cpu_bitmask
, cpu_id
);
217 if (pset
->cpu_set_count
++ == 0)
218 pset
->cpu_set_low
= pset
->cpu_set_hi
= cpu_id
;
220 pset
->cpu_set_low
= (cpu_id
< pset
->cpu_set_low
)? cpu_id
: pset
->cpu_set_low
;
221 pset
->cpu_set_hi
= (cpu_id
> pset
->cpu_set_hi
)? cpu_id
: pset
->cpu_set_hi
;
226 simple_lock(&processor_list_lock
);
227 if (processor_list
== NULL
)
228 processor_list
= processor
;
230 processor_list_tail
->processor_list
= processor
;
231 processor_list_tail
= processor
;
233 processor_array
[cpu_id
] = processor
;
234 simple_unlock(&processor_list_lock
);
238 processor_set_primary(
239 processor_t processor
,
242 assert(processor
->processor_primary
== primary
|| processor
->processor_primary
== processor
);
243 /* Re-adjust primary point for this (possibly) secondary processor */
244 processor
->processor_primary
= primary
;
246 assert(primary
->processor_secondary
== NULL
|| primary
->processor_secondary
== processor
);
247 if (primary
!= processor
) {
248 /* Link primary to secondary, assumes a 2-way SMT model
249 * We'll need to move to a queue if any future architecture
250 * requires otherwise.
252 assert(processor
->processor_secondary
== NULL
);
253 primary
->processor_secondary
= processor
;
254 /* Mark both processors as SMT siblings */
255 primary
->is_SMT
= TRUE
;
256 processor
->is_SMT
= TRUE
;
258 processor_set_t pset
= processor
->processor_set
;
259 atomic_bit_clear(&pset
->primary_map
, processor
->cpu_id
, memory_order_relaxed
);
265 processor_t processor
)
267 return (processor
->processor_set
);
271 processor_state_update_idle(processor_t processor
)
273 processor
->current_pri
= IDLEPRI
;
274 processor
->current_sfi_class
= SFI_CLASS_KERNEL
;
275 processor
->current_recommended_pset_type
= PSET_SMP
;
276 processor
->current_perfctl_class
= PERFCONTROL_CLASS_IDLE
;
280 processor_state_update_from_thread(processor_t processor
, thread_t thread
)
282 processor
->current_pri
= thread
->sched_pri
;
283 processor
->current_sfi_class
= thread
->sfi_class
;
284 processor
->current_recommended_pset_type
= recommended_pset_type(thread
);
285 processor
->current_perfctl_class
= thread_get_perfcontrol_class(thread
);
289 processor_state_update_explicit(processor_t processor
, int pri
, sfi_class_id_t sfi_class
,
290 pset_cluster_type_t pset_type
, perfcontrol_class_t perfctl_class
)
292 processor
->current_pri
= pri
;
293 processor
->current_sfi_class
= sfi_class
;
294 processor
->current_recommended_pset_type
= pset_type
;
295 processor
->current_perfctl_class
= perfctl_class
;
308 /* some schedulers do not support multiple psets */
309 if (SCHED(multiple_psets_enabled
) == FALSE
)
310 return processor_pset(master_processor
);
312 processor_set_t
*prev
, pset
= kalloc(sizeof (*pset
));
314 if (pset
!= PROCESSOR_SET_NULL
) {
315 pset_init(pset
, node
);
317 simple_lock(&pset_node_lock
);
320 while (*prev
!= PROCESSOR_SET_NULL
)
321 prev
= &(*prev
)->pset_list
;
325 simple_unlock(&pset_node_lock
);
332 * Find processor set in specified node with specified cluster_id.
333 * Returns default_pset if not found.
338 processor_set_t default_pset
)
340 simple_lock(&pset_node_lock
);
341 pset_node_t node
= &pset_node0
;
342 processor_set_t pset
= NULL
;
346 while (pset
!= NULL
) {
347 if (pset
->pset_cluster_id
== cluster_id
)
349 pset
= pset
->pset_list
;
351 } while ((node
= node
->node_list
) != NULL
);
352 simple_unlock(&pset_node_lock
);
359 * Initialize the given processor_set structure.
363 processor_set_t pset
,
366 if (pset
!= &pset0
) {
367 /* Scheduler state for pset0 initialized in sched_init() */
368 SCHED(pset_init
)(pset
);
369 SCHED(rt_init
)(pset
);
372 pset
->online_processor_count
= 0;
373 pset
->load_average
= 0;
374 pset
->cpu_set_low
= pset
->cpu_set_hi
= 0;
375 pset
->cpu_set_count
= 0;
376 pset
->last_chosen
= -1;
377 pset
->cpu_bitmask
= 0;
378 pset
->recommended_bitmask
= ~0ULL;
379 pset
->primary_map
= ~0ULL;
380 pset
->cpu_state_map
[PROCESSOR_OFF_LINE
] = ~0ULL;
381 for (uint i
= PROCESSOR_SHUTDOWN
; i
< PROCESSOR_STATE_LEN
; i
++) {
382 pset
->cpu_state_map
[i
] = 0;
384 pset
->pending_AST_cpu_mask
= 0;
385 #if defined(CONFIG_SCHED_DEFERRED_AST)
386 pset
->pending_deferred_AST_cpu_mask
= 0;
388 pset
->pending_spill_cpu_mask
= 0;
389 pset_lock_init(pset
);
390 pset
->pset_self
= IP_NULL
;
391 pset
->pset_name_self
= IP_NULL
;
392 pset
->pset_list
= PROCESSOR_SET_NULL
;
394 pset
->pset_cluster_type
= PSET_SMP
;
395 pset
->pset_cluster_id
= 0;
399 processor_info_count(
400 processor_flavor_t flavor
,
401 mach_msg_type_number_t
*count
)
405 case PROCESSOR_BASIC_INFO
:
406 *count
= PROCESSOR_BASIC_INFO_COUNT
;
409 case PROCESSOR_CPU_LOAD_INFO
:
410 *count
= PROCESSOR_CPU_LOAD_INFO_COUNT
;
414 return (cpu_info_count(flavor
, count
));
417 return (KERN_SUCCESS
);
423 processor_t processor
,
424 processor_flavor_t flavor
,
426 processor_info_t info
,
427 mach_msg_type_number_t
*count
)
430 kern_return_t result
;
432 if (processor
== PROCESSOR_NULL
)
433 return (KERN_INVALID_ARGUMENT
);
435 cpu_id
= processor
->cpu_id
;
439 case PROCESSOR_BASIC_INFO
:
441 processor_basic_info_t basic_info
;
443 if (*count
< PROCESSOR_BASIC_INFO_COUNT
)
444 return (KERN_FAILURE
);
446 basic_info
= (processor_basic_info_t
) info
;
447 basic_info
->cpu_type
= slot_type(cpu_id
);
448 basic_info
->cpu_subtype
= slot_subtype(cpu_id
);
449 state
= processor
->state
;
450 if (state
== PROCESSOR_OFF_LINE
)
451 basic_info
->running
= FALSE
;
453 basic_info
->running
= TRUE
;
454 basic_info
->slot_num
= cpu_id
;
455 if (processor
== master_processor
)
456 basic_info
->is_master
= TRUE
;
458 basic_info
->is_master
= FALSE
;
460 *count
= PROCESSOR_BASIC_INFO_COUNT
;
463 return (KERN_SUCCESS
);
466 case PROCESSOR_CPU_LOAD_INFO
:
468 processor_cpu_load_info_t cpu_load_info
;
470 uint64_t idle_time_snapshot1
, idle_time_snapshot2
;
471 uint64_t idle_time_tstamp1
, idle_time_tstamp2
;
474 * We capture the accumulated idle time twice over
475 * the course of this function, as well as the timestamps
476 * when each were last updated. Since these are
477 * all done using non-atomic racy mechanisms, the
478 * most we can infer is whether values are stable.
479 * timer_grab() is the only function that can be
480 * used reliably on another processor's per-processor
484 if (*count
< PROCESSOR_CPU_LOAD_INFO_COUNT
)
485 return (KERN_FAILURE
);
487 cpu_load_info
= (processor_cpu_load_info_t
) info
;
488 if (precise_user_kernel_time
) {
489 cpu_load_info
->cpu_ticks
[CPU_STATE_USER
] =
490 (uint32_t)(timer_grab(&PROCESSOR_DATA(processor
, user_state
)) / hz_tick_interval
);
491 cpu_load_info
->cpu_ticks
[CPU_STATE_SYSTEM
] =
492 (uint32_t)(timer_grab(&PROCESSOR_DATA(processor
, system_state
)) / hz_tick_interval
);
494 uint64_t tval
= timer_grab(&PROCESSOR_DATA(processor
, user_state
)) +
495 timer_grab(&PROCESSOR_DATA(processor
, system_state
));
497 cpu_load_info
->cpu_ticks
[CPU_STATE_USER
] = (uint32_t)(tval
/ hz_tick_interval
);
498 cpu_load_info
->cpu_ticks
[CPU_STATE_SYSTEM
] = 0;
501 idle_state
= &PROCESSOR_DATA(processor
, idle_state
);
502 idle_time_snapshot1
= timer_grab(idle_state
);
503 idle_time_tstamp1
= idle_state
->tstamp
;
506 * Idle processors are not continually updating their
507 * per-processor idle timer, so it may be extremely
508 * out of date, resulting in an over-representation
509 * of non-idle time between two measurement
510 * intervals by e.g. top(1). If we are non-idle, or
511 * have evidence that the timer is being updated
512 * concurrently, we consider its value up-to-date.
514 if (PROCESSOR_DATA(processor
, current_state
) != idle_state
) {
515 cpu_load_info
->cpu_ticks
[CPU_STATE_IDLE
] =
516 (uint32_t)(idle_time_snapshot1
/ hz_tick_interval
);
517 } else if ((idle_time_snapshot1
!= (idle_time_snapshot2
= timer_grab(idle_state
))) ||
518 (idle_time_tstamp1
!= (idle_time_tstamp2
= idle_state
->tstamp
))){
519 /* Idle timer is being updated concurrently, second stamp is good enough */
520 cpu_load_info
->cpu_ticks
[CPU_STATE_IDLE
] =
521 (uint32_t)(idle_time_snapshot2
/ hz_tick_interval
);
524 * Idle timer may be very stale. Fortunately we have established
525 * that idle_time_snapshot1 and idle_time_tstamp1 are unchanging
527 idle_time_snapshot1
+= mach_absolute_time() - idle_time_tstamp1
;
529 cpu_load_info
->cpu_ticks
[CPU_STATE_IDLE
] =
530 (uint32_t)(idle_time_snapshot1
/ hz_tick_interval
);
533 cpu_load_info
->cpu_ticks
[CPU_STATE_NICE
] = 0;
535 *count
= PROCESSOR_CPU_LOAD_INFO_COUNT
;
538 return (KERN_SUCCESS
);
542 result
= cpu_info(flavor
, cpu_id
, info
, count
);
543 if (result
== KERN_SUCCESS
)
552 processor_t processor
)
554 processor_set_t pset
;
556 kern_return_t result
;
559 if (processor
== PROCESSOR_NULL
|| processor
->processor_set
== PROCESSOR_SET_NULL
)
560 return (KERN_INVALID_ARGUMENT
);
562 if (processor
== master_processor
) {
565 prev
= thread_bind(processor
);
566 thread_block(THREAD_CONTINUE_NULL
);
568 result
= cpu_start(processor
->cpu_id
);
576 pset
= processor
->processor_set
;
578 if (processor
->state
!= PROCESSOR_OFF_LINE
) {
582 return (KERN_FAILURE
);
585 pset_update_processor_state(pset
, processor
, PROCESSOR_START
);
590 * Create the idle processor thread.
592 if (processor
->idle_thread
== THREAD_NULL
) {
593 result
= idle_thread_create(processor
);
594 if (result
!= KERN_SUCCESS
) {
597 pset_update_processor_state(pset
, processor
, PROCESSOR_OFF_LINE
);
606 * If there is no active thread, the processor
607 * has never been started. Create a dedicated
610 if ( processor
->active_thread
== THREAD_NULL
&&
611 processor
->next_thread
== THREAD_NULL
) {
612 result
= kernel_thread_create((thread_continue_t
)processor_start_thread
, NULL
, MAXPRI_KERNEL
, &thread
);
613 if (result
!= KERN_SUCCESS
) {
616 pset_update_processor_state(pset
, processor
, PROCESSOR_OFF_LINE
);
625 thread
->bound_processor
= processor
;
626 processor
->next_thread
= thread
;
627 thread
->state
= TH_RUN
;
628 thread
->last_made_runnable_time
= mach_absolute_time();
629 thread_unlock(thread
);
632 thread_deallocate(thread
);
635 if (processor
->processor_self
== IP_NULL
)
636 ipc_processor_init(processor
);
638 result
= cpu_start(processor
->cpu_id
);
639 if (result
!= KERN_SUCCESS
) {
642 pset_update_processor_state(pset
, processor
, PROCESSOR_OFF_LINE
);
649 ipc_processor_enable(processor
);
651 return (KERN_SUCCESS
);
656 processor_t processor
)
658 if (processor
== PROCESSOR_NULL
)
659 return(KERN_INVALID_ARGUMENT
);
661 return(processor_shutdown(processor
));
666 processor_t processor
,
667 processor_info_t info
,
668 mach_msg_type_number_t count
)
670 if (processor
== PROCESSOR_NULL
)
671 return(KERN_INVALID_ARGUMENT
);
673 return(cpu_control(processor
->cpu_id
, info
, count
));
677 processor_set_create(
678 __unused host_t host
,
679 __unused processor_set_t
*new_set
,
680 __unused processor_set_t
*new_name
)
682 return(KERN_FAILURE
);
686 processor_set_destroy(
687 __unused processor_set_t pset
)
689 return(KERN_FAILURE
);
693 processor_get_assignment(
694 processor_t processor
,
695 processor_set_t
*pset
)
699 if (processor
== PROCESSOR_NULL
)
700 return(KERN_INVALID_ARGUMENT
);
702 state
= processor
->state
;
703 if (state
== PROCESSOR_SHUTDOWN
|| state
== PROCESSOR_OFF_LINE
)
704 return(KERN_FAILURE
);
708 return(KERN_SUCCESS
);
713 processor_set_t pset
,
716 processor_set_info_t info
,
717 mach_msg_type_number_t
*count
)
719 if (pset
== PROCESSOR_SET_NULL
)
720 return(KERN_INVALID_ARGUMENT
);
722 if (flavor
== PROCESSOR_SET_BASIC_INFO
) {
723 processor_set_basic_info_t basic_info
;
725 if (*count
< PROCESSOR_SET_BASIC_INFO_COUNT
)
726 return(KERN_FAILURE
);
728 basic_info
= (processor_set_basic_info_t
) info
;
729 basic_info
->processor_count
= processor_avail_count
;
730 basic_info
->default_policy
= POLICY_TIMESHARE
;
732 *count
= PROCESSOR_SET_BASIC_INFO_COUNT
;
734 return(KERN_SUCCESS
);
736 else if (flavor
== PROCESSOR_SET_TIMESHARE_DEFAULT
) {
737 policy_timeshare_base_t ts_base
;
739 if (*count
< POLICY_TIMESHARE_BASE_COUNT
)
740 return(KERN_FAILURE
);
742 ts_base
= (policy_timeshare_base_t
) info
;
743 ts_base
->base_priority
= BASEPRI_DEFAULT
;
745 *count
= POLICY_TIMESHARE_BASE_COUNT
;
747 return(KERN_SUCCESS
);
749 else if (flavor
== PROCESSOR_SET_FIFO_DEFAULT
) {
750 policy_fifo_base_t fifo_base
;
752 if (*count
< POLICY_FIFO_BASE_COUNT
)
753 return(KERN_FAILURE
);
755 fifo_base
= (policy_fifo_base_t
) info
;
756 fifo_base
->base_priority
= BASEPRI_DEFAULT
;
758 *count
= POLICY_FIFO_BASE_COUNT
;
760 return(KERN_SUCCESS
);
762 else if (flavor
== PROCESSOR_SET_RR_DEFAULT
) {
763 policy_rr_base_t rr_base
;
765 if (*count
< POLICY_RR_BASE_COUNT
)
766 return(KERN_FAILURE
);
768 rr_base
= (policy_rr_base_t
) info
;
769 rr_base
->base_priority
= BASEPRI_DEFAULT
;
770 rr_base
->quantum
= 1;
772 *count
= POLICY_RR_BASE_COUNT
;
774 return(KERN_SUCCESS
);
776 else if (flavor
== PROCESSOR_SET_TIMESHARE_LIMITS
) {
777 policy_timeshare_limit_t ts_limit
;
779 if (*count
< POLICY_TIMESHARE_LIMIT_COUNT
)
780 return(KERN_FAILURE
);
782 ts_limit
= (policy_timeshare_limit_t
) info
;
783 ts_limit
->max_priority
= MAXPRI_KERNEL
;
785 *count
= POLICY_TIMESHARE_LIMIT_COUNT
;
787 return(KERN_SUCCESS
);
789 else if (flavor
== PROCESSOR_SET_FIFO_LIMITS
) {
790 policy_fifo_limit_t fifo_limit
;
792 if (*count
< POLICY_FIFO_LIMIT_COUNT
)
793 return(KERN_FAILURE
);
795 fifo_limit
= (policy_fifo_limit_t
) info
;
796 fifo_limit
->max_priority
= MAXPRI_KERNEL
;
798 *count
= POLICY_FIFO_LIMIT_COUNT
;
800 return(KERN_SUCCESS
);
802 else if (flavor
== PROCESSOR_SET_RR_LIMITS
) {
803 policy_rr_limit_t rr_limit
;
805 if (*count
< POLICY_RR_LIMIT_COUNT
)
806 return(KERN_FAILURE
);
808 rr_limit
= (policy_rr_limit_t
) info
;
809 rr_limit
->max_priority
= MAXPRI_KERNEL
;
811 *count
= POLICY_RR_LIMIT_COUNT
;
813 return(KERN_SUCCESS
);
815 else if (flavor
== PROCESSOR_SET_ENABLED_POLICIES
) {
818 if (*count
< (sizeof(*enabled
)/sizeof(int)))
819 return(KERN_FAILURE
);
821 enabled
= (int *) info
;
822 *enabled
= POLICY_TIMESHARE
| POLICY_RR
| POLICY_FIFO
;
824 *count
= sizeof(*enabled
)/sizeof(int);
826 return(KERN_SUCCESS
);
831 return(KERN_INVALID_ARGUMENT
);
835 * processor_set_statistics
837 * Returns scheduling statistics for a processor set.
840 processor_set_statistics(
841 processor_set_t pset
,
843 processor_set_info_t info
,
844 mach_msg_type_number_t
*count
)
846 if (pset
== PROCESSOR_SET_NULL
|| pset
!= &pset0
)
847 return (KERN_INVALID_PROCESSOR_SET
);
849 if (flavor
== PROCESSOR_SET_LOAD_INFO
) {
850 processor_set_load_info_t load_info
;
852 if (*count
< PROCESSOR_SET_LOAD_INFO_COUNT
)
853 return(KERN_FAILURE
);
855 load_info
= (processor_set_load_info_t
) info
;
857 load_info
->mach_factor
= sched_mach_factor
;
858 load_info
->load_average
= sched_load_average
;
860 load_info
->task_count
= tasks_count
;
861 load_info
->thread_count
= threads_count
;
863 *count
= PROCESSOR_SET_LOAD_INFO_COUNT
;
864 return(KERN_SUCCESS
);
867 return(KERN_INVALID_ARGUMENT
);
871 * processor_set_max_priority:
873 * Specify max priority permitted on processor set. This affects
874 * newly created and assigned threads. Optionally change existing
878 processor_set_max_priority(
879 __unused processor_set_t pset
,
880 __unused
int max_priority
,
881 __unused boolean_t change_threads
)
883 return (KERN_INVALID_ARGUMENT
);
887 * processor_set_policy_enable:
889 * Allow indicated policy on processor set.
893 processor_set_policy_enable(
894 __unused processor_set_t pset
,
897 return (KERN_INVALID_ARGUMENT
);
901 * processor_set_policy_disable:
903 * Forbid indicated policy on processor set. Time sharing cannot
907 processor_set_policy_disable(
908 __unused processor_set_t pset
,
910 __unused boolean_t change_threads
)
912 return (KERN_INVALID_ARGUMENT
);
916 * processor_set_things:
918 * Common internals for processor_set_{threads,tasks}
921 processor_set_things(
922 processor_set_t pset
,
924 mach_msg_type_number_t
*count
,
932 unsigned int actual_tasks
;
933 vm_size_t task_size
, task_size_needed
;
935 thread_t
*thread_list
;
936 unsigned int actual_threads
;
937 vm_size_t thread_size
, thread_size_needed
;
939 void *addr
, *newaddr
;
940 vm_size_t size
, size_needed
;
942 if (pset
== PROCESSOR_SET_NULL
|| pset
!= &pset0
)
943 return (KERN_INVALID_ARGUMENT
);
946 task_size_needed
= 0;
951 thread_size_needed
= 0;
956 lck_mtx_lock(&tasks_threads_lock
);
958 /* do we have the memory we need? */
959 if (type
== PSET_THING_THREAD
)
960 thread_size_needed
= threads_count
* sizeof(void *);
964 task_size_needed
= tasks_count
* sizeof(void *);
966 if (task_size_needed
<= task_size
&&
967 thread_size_needed
<= thread_size
)
970 /* unlock and allocate more memory */
971 lck_mtx_unlock(&tasks_threads_lock
);
973 /* grow task array */
974 if (task_size_needed
> task_size
) {
976 kfree(task_list
, task_size
);
978 assert(task_size_needed
> 0);
979 task_size
= task_size_needed
;
981 task_list
= (task_t
*)kalloc(task_size
);
982 if (task_list
== NULL
) {
983 if (thread_size
!= 0)
984 kfree(thread_list
, thread_size
);
985 return (KERN_RESOURCE_SHORTAGE
);
989 /* grow thread array */
990 if (thread_size_needed
> thread_size
) {
991 if (thread_size
!= 0)
992 kfree(thread_list
, thread_size
);
994 assert(thread_size_needed
> 0);
995 thread_size
= thread_size_needed
;
997 thread_list
= (thread_t
*)kalloc(thread_size
);
998 if (thread_list
== 0) {
1000 kfree(task_list
, task_size
);
1001 return (KERN_RESOURCE_SHORTAGE
);
1006 /* OK, have memory and the list locked */
1008 /* If we need it, get the thread list */
1009 if (type
== PSET_THING_THREAD
) {
1010 for (thread
= (thread_t
)queue_first(&threads
);
1011 !queue_end(&threads
, (queue_entry_t
)thread
);
1012 thread
= (thread_t
)queue_next(&thread
->threads
)) {
1013 #if defined(SECURE_KERNEL)
1014 if (thread
->task
!= kernel_task
) {
1016 thread_reference_internal(thread
);
1017 thread_list
[actual_threads
++] = thread
;
1018 #if defined(SECURE_KERNEL)
1026 /* get a list of the tasks */
1027 for (task
= (task_t
)queue_first(&tasks
);
1028 !queue_end(&tasks
, (queue_entry_t
)task
);
1029 task
= (task_t
)queue_next(&task
->tasks
)) {
1030 #if defined(SECURE_KERNEL)
1031 if (task
!= kernel_task
) {
1033 task_reference_internal(task
);
1034 task_list
[actual_tasks
++] = task
;
1035 #if defined(SECURE_KERNEL)
1043 lck_mtx_unlock(&tasks_threads_lock
);
1046 unsigned int j
, used
;
1048 /* for each task, make sure we are allowed to examine it */
1049 for (i
= used
= 0; i
< actual_tasks
; i
++) {
1050 if (mac_task_check_expose_task(task_list
[i
])) {
1051 task_deallocate(task_list
[i
]);
1054 task_list
[used
++] = task_list
[i
];
1056 actual_tasks
= used
;
1057 task_size_needed
= actual_tasks
* sizeof(void *);
1059 if (type
== PSET_THING_THREAD
) {
1061 /* for each thread (if any), make sure it's task is in the allowed list */
1062 for (i
= used
= 0; i
< actual_threads
; i
++) {
1063 boolean_t found_task
= FALSE
;
1065 task
= thread_list
[i
]->task
;
1066 for (j
= 0; j
< actual_tasks
; j
++) {
1067 if (task_list
[j
] == task
) {
1073 thread_list
[used
++] = thread_list
[i
];
1075 thread_deallocate(thread_list
[i
]);
1077 actual_threads
= used
;
1078 thread_size_needed
= actual_threads
* sizeof(void *);
1080 /* done with the task list */
1081 for (i
= 0; i
< actual_tasks
; i
++)
1082 task_deallocate(task_list
[i
]);
1083 kfree(task_list
, task_size
);
1090 if (type
== PSET_THING_THREAD
) {
1091 if (actual_threads
== 0) {
1092 /* no threads available to return */
1093 assert(task_size
== 0);
1094 if (thread_size
!= 0)
1095 kfree(thread_list
, thread_size
);
1098 return KERN_SUCCESS
;
1100 size_needed
= actual_threads
* sizeof(void *);
1104 if (actual_tasks
== 0) {
1105 /* no tasks available to return */
1106 assert(thread_size
== 0);
1108 kfree(task_list
, task_size
);
1111 return KERN_SUCCESS
;
1113 size_needed
= actual_tasks
* sizeof(void *);
1118 /* if we allocated too much, must copy */
1119 if (size_needed
< size
) {
1120 newaddr
= kalloc(size_needed
);
1122 for (i
= 0; i
< actual_tasks
; i
++) {
1123 if (type
== PSET_THING_THREAD
)
1124 thread_deallocate(thread_list
[i
]);
1126 task_deallocate(task_list
[i
]);
1130 return (KERN_RESOURCE_SHORTAGE
);
1133 bcopy((void *) addr
, (void *) newaddr
, size_needed
);
1140 *thing_list
= (void **)addr
;
1141 *count
= (unsigned int)size
/ sizeof(void *);
1143 return (KERN_SUCCESS
);
1148 * processor_set_tasks:
1150 * List all tasks in the processor set.
1153 processor_set_tasks(
1154 processor_set_t pset
,
1155 task_array_t
*task_list
,
1156 mach_msg_type_number_t
*count
)
1159 mach_msg_type_number_t i
;
1161 ret
= processor_set_things(pset
, (void **)task_list
, count
, PSET_THING_TASK
);
1162 if (ret
!= KERN_SUCCESS
)
1165 /* do the conversion that Mig should handle */
1166 for (i
= 0; i
< *count
; i
++)
1167 (*task_list
)[i
] = (task_t
)convert_task_to_port((*task_list
)[i
]);
1168 return KERN_SUCCESS
;
1172 * processor_set_threads:
1174 * List all threads in the processor set.
1176 #if defined(SECURE_KERNEL)
1178 processor_set_threads(
1179 __unused processor_set_t pset
,
1180 __unused thread_array_t
*thread_list
,
1181 __unused mach_msg_type_number_t
*count
)
1183 return KERN_FAILURE
;
1185 #elif defined(CONFIG_EMBEDDED)
1187 processor_set_threads(
1188 __unused processor_set_t pset
,
1189 __unused thread_array_t
*thread_list
,
1190 __unused mach_msg_type_number_t
*count
)
1192 return KERN_NOT_SUPPORTED
;
1196 processor_set_threads(
1197 processor_set_t pset
,
1198 thread_array_t
*thread_list
,
1199 mach_msg_type_number_t
*count
)
1202 mach_msg_type_number_t i
;
1204 ret
= processor_set_things(pset
, (void **)thread_list
, count
, PSET_THING_THREAD
);
1205 if (ret
!= KERN_SUCCESS
)
1208 /* do the conversion that Mig should handle */
1209 for (i
= 0; i
< *count
; i
++)
1210 (*thread_list
)[i
] = (thread_t
)convert_thread_to_port((*thread_list
)[i
]);
1211 return KERN_SUCCESS
;
1216 * processor_set_policy_control
1218 * Controls the scheduling attributes governing the processor set.
1219 * Allows control of enabled policies, and per-policy base and limit
1223 processor_set_policy_control(
1224 __unused processor_set_t pset
,
1225 __unused
int flavor
,
1226 __unused processor_set_info_t policy_info
,
1227 __unused mach_msg_type_number_t count
,
1228 __unused boolean_t change
)
1230 return (KERN_INVALID_ARGUMENT
);
1233 #undef pset_deallocate
1234 void pset_deallocate(processor_set_t pset
);
1237 __unused processor_set_t pset
)
1242 #undef pset_reference
1243 void pset_reference(processor_set_t pset
);
1246 __unused processor_set_t pset
)
1252 recommended_pset_type(thread_t thread
)