2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * processor.c: processor and processor_set manipulation routines.
63 #include <mach/boolean.h>
64 #include <mach/policy.h>
65 #include <mach/processor.h>
66 #include <mach/processor_info.h>
67 #include <mach/vm_param.h>
68 #include <kern/cpu_number.h>
69 #include <kern/host.h>
70 #include <kern/machine.h>
71 #include <kern/misc_protos.h>
72 #include <kern/processor.h>
73 #include <kern/sched.h>
74 #include <kern/task.h>
75 #include <kern/thread.h>
76 #include <kern/ipc_host.h>
77 #include <kern/ipc_tt.h>
78 #include <ipc/ipc_port.h>
79 #include <kern/kalloc.h>
84 #include <mach/mach_host_server.h>
85 #include <mach/processor_set_server.h>
87 struct processor_set pset0
;
88 struct pset_node pset_node0
;
89 decl_simple_lock_data(static,pset_node_lock
)
92 queue_head_t terminated_tasks
; /* To be used ONLY for stackshot. */
96 decl_lck_mtx_data(,tasks_threads_lock
)
98 processor_t processor_list
;
99 unsigned int processor_count
;
100 static processor_t processor_list_tail
;
101 decl_simple_lock_data(,processor_list_lock
)
103 uint32_t processor_avail_count
;
105 processor_t master_processor
;
107 boolean_t sched_stats_active
= FALSE
;
110 kern_return_t
processor_set_things(
111 processor_set_t pset
,
112 mach_port_t
**thing_list
,
113 mach_msg_type_number_t
*count
,
117 processor_bootstrap(void)
119 pset_init(&pset0
, &pset_node0
);
120 pset_node0
.psets
= &pset0
;
122 simple_lock_init(&pset_node_lock
, 0);
125 queue_init(&terminated_tasks
);
126 queue_init(&threads
);
128 simple_lock_init(&processor_list_lock
, 0);
130 master_processor
= cpu_to_processor(master_cpu
);
132 processor_init(master_processor
, master_cpu
, &pset0
);
136 * Initialize the given processor for the cpu
137 * indicated by cpu_id, and assign to the
138 * specified processor set.
142 processor_t processor
,
144 processor_set_t pset
)
148 if (processor
!= master_processor
) {
149 /* Scheduler state deferred until sched_init() */
150 SCHED(processor_init
)(processor
);
153 processor
->state
= PROCESSOR_OFF_LINE
;
154 processor
->active_thread
= processor
->next_thread
= processor
->idle_thread
= THREAD_NULL
;
155 processor
->processor_set
= pset
;
156 processor
->current_pri
= MINPRI
;
157 processor
->current_thmode
= TH_MODE_NONE
;
158 processor
->cpu_id
= cpu_id
;
159 timer_call_setup(&processor
->quantum_timer
, thread_quantum_expire
, processor
);
160 processor
->deadline
= UINT64_MAX
;
161 processor
->timeslice
= 0;
162 processor
->processor_meta
= PROCESSOR_META_NULL
;
163 processor
->processor_self
= IP_NULL
;
164 processor_data_init(processor
);
165 processor
->processor_list
= NULL
;
169 if (pset
->cpu_set_count
++ == 0)
170 pset
->cpu_set_low
= pset
->cpu_set_hi
= cpu_id
;
172 pset
->cpu_set_low
= (cpu_id
< pset
->cpu_set_low
)? cpu_id
: pset
->cpu_set_low
;
173 pset
->cpu_set_hi
= (cpu_id
> pset
->cpu_set_hi
)? cpu_id
: pset
->cpu_set_hi
;
178 simple_lock(&processor_list_lock
);
179 if (processor_list
== NULL
)
180 processor_list
= processor
;
182 processor_list_tail
->processor_list
= processor
;
183 processor_list_tail
= processor
;
185 simple_unlock(&processor_list_lock
);
190 processor_t processor
,
193 processor_meta_t pmeta
= primary
->processor_meta
;
195 if (pmeta
== PROCESSOR_META_NULL
) {
196 pmeta
= kalloc(sizeof (*pmeta
));
198 queue_init(&pmeta
->idle_queue
);
200 pmeta
->primary
= primary
;
203 processor
->processor_meta
= pmeta
;
208 processor_t processor
)
210 return (processor
->processor_set
);
223 processor_set_t
*prev
, pset
= kalloc(sizeof (*pset
));
225 if (pset
!= PROCESSOR_SET_NULL
) {
226 pset_init(pset
, node
);
228 simple_lock(&pset_node_lock
);
231 while (*prev
!= PROCESSOR_SET_NULL
)
232 prev
= &(*prev
)->pset_list
;
236 simple_unlock(&pset_node_lock
);
243 * Initialize the given processor_set structure.
247 processor_set_t pset
,
250 if (pset
!= &pset0
) {
251 /* Scheduler state deferred until sched_init() */
252 SCHED(pset_init
)(pset
);
255 queue_init(&pset
->active_queue
);
256 queue_init(&pset
->idle_queue
);
257 pset
->online_processor_count
= 0;
258 pset_pri_init_hint(pset
, PROCESSOR_NULL
);
259 pset_count_init_hint(pset
, PROCESSOR_NULL
);
260 pset
->cpu_set_low
= pset
->cpu_set_hi
= 0;
261 pset
->cpu_set_count
= 0;
262 pset_lock_init(pset
);
263 pset
->pset_self
= IP_NULL
;
264 pset
->pset_name_self
= IP_NULL
;
265 pset
->pset_list
= PROCESSOR_SET_NULL
;
270 processor_info_count(
271 processor_flavor_t flavor
,
272 mach_msg_type_number_t
*count
)
276 case PROCESSOR_BASIC_INFO
:
277 *count
= PROCESSOR_BASIC_INFO_COUNT
;
280 case PROCESSOR_CPU_LOAD_INFO
:
281 *count
= PROCESSOR_CPU_LOAD_INFO_COUNT
;
285 return (cpu_info_count(flavor
, count
));
288 return (KERN_SUCCESS
);
294 register processor_t processor
,
295 processor_flavor_t flavor
,
297 processor_info_t info
,
298 mach_msg_type_number_t
*count
)
300 register int cpu_id
, state
;
301 kern_return_t result
;
303 if (processor
== PROCESSOR_NULL
)
304 return (KERN_INVALID_ARGUMENT
);
306 cpu_id
= processor
->cpu_id
;
310 case PROCESSOR_BASIC_INFO
:
312 register processor_basic_info_t basic_info
;
314 if (*count
< PROCESSOR_BASIC_INFO_COUNT
)
315 return (KERN_FAILURE
);
317 basic_info
= (processor_basic_info_t
) info
;
318 basic_info
->cpu_type
= slot_type(cpu_id
);
319 basic_info
->cpu_subtype
= slot_subtype(cpu_id
);
320 state
= processor
->state
;
321 if (state
== PROCESSOR_OFF_LINE
)
322 basic_info
->running
= FALSE
;
324 basic_info
->running
= TRUE
;
325 basic_info
->slot_num
= cpu_id
;
326 if (processor
== master_processor
)
327 basic_info
->is_master
= TRUE
;
329 basic_info
->is_master
= FALSE
;
331 *count
= PROCESSOR_BASIC_INFO_COUNT
;
334 return (KERN_SUCCESS
);
337 case PROCESSOR_CPU_LOAD_INFO
:
339 processor_cpu_load_info_t cpu_load_info
;
340 timer_data_t idle_temp
;
343 if (*count
< PROCESSOR_CPU_LOAD_INFO_COUNT
)
344 return (KERN_FAILURE
);
346 cpu_load_info
= (processor_cpu_load_info_t
) info
;
347 if (precise_user_kernel_time
) {
348 cpu_load_info
->cpu_ticks
[CPU_STATE_USER
] =
349 (uint32_t)(timer_grab(&PROCESSOR_DATA(processor
, user_state
)) / hz_tick_interval
);
350 cpu_load_info
->cpu_ticks
[CPU_STATE_SYSTEM
] =
351 (uint32_t)(timer_grab(&PROCESSOR_DATA(processor
, system_state
)) / hz_tick_interval
);
353 uint64_t tval
= timer_grab(&PROCESSOR_DATA(processor
, user_state
)) +
354 timer_grab(&PROCESSOR_DATA(processor
, system_state
));
356 cpu_load_info
->cpu_ticks
[CPU_STATE_USER
] = (uint32_t)(tval
/ hz_tick_interval
);
357 cpu_load_info
->cpu_ticks
[CPU_STATE_SYSTEM
] = 0;
360 idle_state
= &PROCESSOR_DATA(processor
, idle_state
);
361 idle_temp
= *idle_state
;
363 if (PROCESSOR_DATA(processor
, current_state
) != idle_state
||
364 timer_grab(&idle_temp
) != timer_grab(idle_state
)) {
365 cpu_load_info
->cpu_ticks
[CPU_STATE_IDLE
] =
366 (uint32_t)(timer_grab(&PROCESSOR_DATA(processor
, idle_state
)) / hz_tick_interval
);
368 timer_advance(&idle_temp
, mach_absolute_time() - idle_temp
.tstamp
);
370 cpu_load_info
->cpu_ticks
[CPU_STATE_IDLE
] =
371 (uint32_t)(timer_grab(&idle_temp
) / hz_tick_interval
);
374 cpu_load_info
->cpu_ticks
[CPU_STATE_NICE
] = 0;
376 *count
= PROCESSOR_CPU_LOAD_INFO_COUNT
;
379 return (KERN_SUCCESS
);
383 result
= cpu_info(flavor
, cpu_id
, info
, count
);
384 if (result
== KERN_SUCCESS
)
393 processor_t processor
)
395 processor_set_t pset
;
397 kern_return_t result
;
400 if (processor
== PROCESSOR_NULL
|| processor
->processor_set
== PROCESSOR_SET_NULL
)
401 return (KERN_INVALID_ARGUMENT
);
403 if (processor
== master_processor
) {
406 prev
= thread_bind(processor
);
407 thread_block(THREAD_CONTINUE_NULL
);
409 result
= cpu_start(processor
->cpu_id
);
417 pset
= processor
->processor_set
;
419 if (processor
->state
!= PROCESSOR_OFF_LINE
) {
423 return (KERN_FAILURE
);
426 processor
->state
= PROCESSOR_START
;
431 * Create the idle processor thread.
433 if (processor
->idle_thread
== THREAD_NULL
) {
434 result
= idle_thread_create(processor
);
435 if (result
!= KERN_SUCCESS
) {
438 processor
->state
= PROCESSOR_OFF_LINE
;
447 * If there is no active thread, the processor
448 * has never been started. Create a dedicated
451 if ( processor
->active_thread
== THREAD_NULL
&&
452 processor
->next_thread
== THREAD_NULL
) {
453 result
= kernel_thread_create((thread_continue_t
)processor_start_thread
, NULL
, MAXPRI_KERNEL
, &thread
);
454 if (result
!= KERN_SUCCESS
) {
457 processor
->state
= PROCESSOR_OFF_LINE
;
466 thread
->bound_processor
= processor
;
467 processor
->next_thread
= thread
;
468 thread
->state
= TH_RUN
;
469 thread_unlock(thread
);
472 thread_deallocate(thread
);
475 if (processor
->processor_self
== IP_NULL
)
476 ipc_processor_init(processor
);
478 result
= cpu_start(processor
->cpu_id
);
479 if (result
!= KERN_SUCCESS
) {
482 processor
->state
= PROCESSOR_OFF_LINE
;
489 ipc_processor_enable(processor
);
491 return (KERN_SUCCESS
);
496 processor_t processor
)
498 if (processor
== PROCESSOR_NULL
)
499 return(KERN_INVALID_ARGUMENT
);
501 return(processor_shutdown(processor
));
506 processor_t processor
,
507 processor_info_t info
,
508 mach_msg_type_number_t count
)
510 if (processor
== PROCESSOR_NULL
)
511 return(KERN_INVALID_ARGUMENT
);
513 return(cpu_control(processor
->cpu_id
, info
, count
));
517 processor_set_create(
518 __unused host_t host
,
519 __unused processor_set_t
*new_set
,
520 __unused processor_set_t
*new_name
)
522 return(KERN_FAILURE
);
526 processor_set_destroy(
527 __unused processor_set_t pset
)
529 return(KERN_FAILURE
);
533 processor_get_assignment(
534 processor_t processor
,
535 processor_set_t
*pset
)
539 if (processor
== PROCESSOR_NULL
)
540 return(KERN_INVALID_ARGUMENT
);
542 state
= processor
->state
;
543 if (state
== PROCESSOR_SHUTDOWN
|| state
== PROCESSOR_OFF_LINE
)
544 return(KERN_FAILURE
);
548 return(KERN_SUCCESS
);
553 processor_set_t pset
,
556 processor_set_info_t info
,
557 mach_msg_type_number_t
*count
)
559 if (pset
== PROCESSOR_SET_NULL
)
560 return(KERN_INVALID_ARGUMENT
);
562 if (flavor
== PROCESSOR_SET_BASIC_INFO
) {
563 register processor_set_basic_info_t basic_info
;
565 if (*count
< PROCESSOR_SET_BASIC_INFO_COUNT
)
566 return(KERN_FAILURE
);
568 basic_info
= (processor_set_basic_info_t
) info
;
569 basic_info
->processor_count
= processor_avail_count
;
570 basic_info
->default_policy
= POLICY_TIMESHARE
;
572 *count
= PROCESSOR_SET_BASIC_INFO_COUNT
;
574 return(KERN_SUCCESS
);
576 else if (flavor
== PROCESSOR_SET_TIMESHARE_DEFAULT
) {
577 register policy_timeshare_base_t ts_base
;
579 if (*count
< POLICY_TIMESHARE_BASE_COUNT
)
580 return(KERN_FAILURE
);
582 ts_base
= (policy_timeshare_base_t
) info
;
583 ts_base
->base_priority
= BASEPRI_DEFAULT
;
585 *count
= POLICY_TIMESHARE_BASE_COUNT
;
587 return(KERN_SUCCESS
);
589 else if (flavor
== PROCESSOR_SET_FIFO_DEFAULT
) {
590 register policy_fifo_base_t fifo_base
;
592 if (*count
< POLICY_FIFO_BASE_COUNT
)
593 return(KERN_FAILURE
);
595 fifo_base
= (policy_fifo_base_t
) info
;
596 fifo_base
->base_priority
= BASEPRI_DEFAULT
;
598 *count
= POLICY_FIFO_BASE_COUNT
;
600 return(KERN_SUCCESS
);
602 else if (flavor
== PROCESSOR_SET_RR_DEFAULT
) {
603 register policy_rr_base_t rr_base
;
605 if (*count
< POLICY_RR_BASE_COUNT
)
606 return(KERN_FAILURE
);
608 rr_base
= (policy_rr_base_t
) info
;
609 rr_base
->base_priority
= BASEPRI_DEFAULT
;
610 rr_base
->quantum
= 1;
612 *count
= POLICY_RR_BASE_COUNT
;
614 return(KERN_SUCCESS
);
616 else if (flavor
== PROCESSOR_SET_TIMESHARE_LIMITS
) {
617 register policy_timeshare_limit_t ts_limit
;
619 if (*count
< POLICY_TIMESHARE_LIMIT_COUNT
)
620 return(KERN_FAILURE
);
622 ts_limit
= (policy_timeshare_limit_t
) info
;
623 ts_limit
->max_priority
= MAXPRI_KERNEL
;
625 *count
= POLICY_TIMESHARE_LIMIT_COUNT
;
627 return(KERN_SUCCESS
);
629 else if (flavor
== PROCESSOR_SET_FIFO_LIMITS
) {
630 register policy_fifo_limit_t fifo_limit
;
632 if (*count
< POLICY_FIFO_LIMIT_COUNT
)
633 return(KERN_FAILURE
);
635 fifo_limit
= (policy_fifo_limit_t
) info
;
636 fifo_limit
->max_priority
= MAXPRI_KERNEL
;
638 *count
= POLICY_FIFO_LIMIT_COUNT
;
640 return(KERN_SUCCESS
);
642 else if (flavor
== PROCESSOR_SET_RR_LIMITS
) {
643 register policy_rr_limit_t rr_limit
;
645 if (*count
< POLICY_RR_LIMIT_COUNT
)
646 return(KERN_FAILURE
);
648 rr_limit
= (policy_rr_limit_t
) info
;
649 rr_limit
->max_priority
= MAXPRI_KERNEL
;
651 *count
= POLICY_RR_LIMIT_COUNT
;
653 return(KERN_SUCCESS
);
655 else if (flavor
== PROCESSOR_SET_ENABLED_POLICIES
) {
656 register int *enabled
;
658 if (*count
< (sizeof(*enabled
)/sizeof(int)))
659 return(KERN_FAILURE
);
661 enabled
= (int *) info
;
662 *enabled
= POLICY_TIMESHARE
| POLICY_RR
| POLICY_FIFO
;
664 *count
= sizeof(*enabled
)/sizeof(int);
666 return(KERN_SUCCESS
);
671 return(KERN_INVALID_ARGUMENT
);
675 * processor_set_statistics
677 * Returns scheduling statistics for a processor set.
680 processor_set_statistics(
681 processor_set_t pset
,
683 processor_set_info_t info
,
684 mach_msg_type_number_t
*count
)
686 if (pset
== PROCESSOR_SET_NULL
|| pset
!= &pset0
)
687 return (KERN_INVALID_PROCESSOR_SET
);
689 if (flavor
== PROCESSOR_SET_LOAD_INFO
) {
690 register processor_set_load_info_t load_info
;
692 if (*count
< PROCESSOR_SET_LOAD_INFO_COUNT
)
693 return(KERN_FAILURE
);
695 load_info
= (processor_set_load_info_t
) info
;
697 load_info
->mach_factor
= sched_mach_factor
;
698 load_info
->load_average
= sched_load_average
;
700 load_info
->task_count
= tasks_count
;
701 load_info
->thread_count
= threads_count
;
703 *count
= PROCESSOR_SET_LOAD_INFO_COUNT
;
704 return(KERN_SUCCESS
);
707 return(KERN_INVALID_ARGUMENT
);
711 * processor_set_max_priority:
713 * Specify max priority permitted on processor set. This affects
714 * newly created and assigned threads. Optionally change existing
718 processor_set_max_priority(
719 __unused processor_set_t pset
,
720 __unused
int max_priority
,
721 __unused boolean_t change_threads
)
723 return (KERN_INVALID_ARGUMENT
);
727 * processor_set_policy_enable:
729 * Allow indicated policy on processor set.
733 processor_set_policy_enable(
734 __unused processor_set_t pset
,
737 return (KERN_INVALID_ARGUMENT
);
741 * processor_set_policy_disable:
743 * Forbid indicated policy on processor set. Time sharing cannot
747 processor_set_policy_disable(
748 __unused processor_set_t pset
,
750 __unused boolean_t change_threads
)
752 return (KERN_INVALID_ARGUMENT
);
756 #define THING_THREAD 1
759 * processor_set_things:
761 * Common internals for processor_set_{threads,tasks}
764 processor_set_things(
765 processor_set_t pset
,
766 mach_port_t
**thing_list
,
767 mach_msg_type_number_t
*count
,
770 unsigned int actual
; /* this many things */
771 unsigned int maxthings
;
774 vm_size_t size
, size_needed
;
777 if (pset
== PROCESSOR_SET_NULL
|| pset
!= &pset0
)
778 return (KERN_INVALID_ARGUMENT
);
784 lck_mtx_lock(&tasks_threads_lock
);
786 if (type
== THING_TASK
)
787 maxthings
= tasks_count
;
789 maxthings
= threads_count
;
791 /* do we have the memory we need? */
793 size_needed
= maxthings
* sizeof (mach_port_t
);
794 if (size_needed
<= size
)
797 /* unlock and allocate more memory */
798 lck_mtx_unlock(&tasks_threads_lock
);
803 assert(size_needed
> 0);
808 return (KERN_RESOURCE_SHORTAGE
);
811 /* OK, have memory and the list locked */
817 task_t task
, *task_list
= (task_t
*)addr
;
819 for (task
= (task_t
)queue_first(&tasks
);
820 !queue_end(&tasks
, (queue_entry_t
)task
);
821 task
= (task_t
)queue_next(&task
->tasks
)) {
822 #if defined(SECURE_KERNEL)
823 if (task
!= kernel_task
) {
825 task_reference_internal(task
);
826 task_list
[actual
++] = task
;
827 #if defined(SECURE_KERNEL)
836 thread_t thread
, *thread_list
= (thread_t
*)addr
;
838 for (thread
= (thread_t
)queue_first(&threads
);
839 !queue_end(&threads
, (queue_entry_t
)thread
);
840 thread
= (thread_t
)queue_next(&thread
->threads
)) {
841 thread_reference_internal(thread
);
842 thread_list
[actual
++] = thread
;
850 lck_mtx_unlock(&tasks_threads_lock
);
852 if (actual
< maxthings
)
853 size_needed
= actual
* sizeof (mach_port_t
);
856 /* no things, so return null pointer and deallocate memory */
864 /* if we allocated too much, must copy */
866 if (size_needed
< size
) {
869 newaddr
= kalloc(size_needed
);
874 task_t
*task_list
= (task_t
*)addr
;
876 for (i
= 0; i
< actual
; i
++)
877 task_deallocate(task_list
[i
]);
882 thread_t
*thread_list
= (thread_t
*)addr
;
884 for (i
= 0; i
< actual
; i
++)
885 thread_deallocate(thread_list
[i
]);
892 return (KERN_RESOURCE_SHORTAGE
);
895 bcopy((void *) addr
, (void *) newaddr
, size_needed
);
900 *thing_list
= (mach_port_t
*)addr
;
903 /* do the conversion that Mig should handle */
908 task_t
*task_list
= (task_t
*)addr
;
910 for (i
= 0; i
< actual
; i
++)
911 (*thing_list
)[i
] = convert_task_to_port(task_list
[i
]);
916 thread_t
*thread_list
= (thread_t
*)addr
;
918 for (i
= 0; i
< actual
; i
++)
919 (*thing_list
)[i
] = convert_thread_to_port(thread_list
[i
]);
926 return (KERN_SUCCESS
);
931 * processor_set_tasks:
933 * List all tasks in the processor set.
937 processor_set_t pset
,
938 task_array_t
*task_list
,
939 mach_msg_type_number_t
*count
)
941 return(processor_set_things(pset
, (mach_port_t
**)task_list
, count
, THING_TASK
));
945 * processor_set_threads:
947 * List all threads in the processor set.
949 #if defined(SECURE_KERNEL)
951 processor_set_threads(
952 __unused processor_set_t pset
,
953 __unused thread_array_t
*thread_list
,
954 __unused mach_msg_type_number_t
*count
)
958 #elif defined(CONFIG_EMBEDDED)
960 processor_set_threads(
961 __unused processor_set_t pset
,
962 __unused thread_array_t
*thread_list
,
963 __unused mach_msg_type_number_t
*count
)
965 return KERN_NOT_SUPPORTED
;
969 processor_set_threads(
970 processor_set_t pset
,
971 thread_array_t
*thread_list
,
972 mach_msg_type_number_t
*count
)
974 return(processor_set_things(pset
, (mach_port_t
**)thread_list
, count
, THING_THREAD
));
979 * processor_set_policy_control
981 * Controls the scheduling attributes governing the processor set.
982 * Allows control of enabled policies, and per-policy base and limit
986 processor_set_policy_control(
987 __unused processor_set_t pset
,
989 __unused processor_set_info_t policy_info
,
990 __unused mach_msg_type_number_t count
,
991 __unused boolean_t change
)
993 return (KERN_INVALID_ARGUMENT
);
996 #undef pset_deallocate
997 void pset_deallocate(processor_set_t pset
);
1000 __unused processor_set_t pset
)
1005 #undef pset_reference
1006 void pset_reference(processor_set_t pset
);
1009 __unused processor_set_t pset
)