2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * processor.c: processor and processor_set manipulation routines.
63 #include <mach/boolean.h>
64 #include <mach/policy.h>
65 #include <mach/processor.h>
66 #include <mach/processor_info.h>
67 #include <mach/vm_param.h>
68 #include <kern/cpu_number.h>
69 #include <kern/host.h>
70 #include <kern/machine.h>
71 #include <kern/misc_protos.h>
72 #include <kern/processor.h>
73 #include <kern/sched.h>
74 #include <kern/task.h>
75 #include <kern/thread.h>
76 #include <kern/ipc_host.h>
77 #include <kern/ipc_tt.h>
78 #include <ipc/ipc_port.h>
79 #include <kern/kalloc.h>
84 #include <mach/mach_host_server.h>
85 #include <mach/processor_set_server.h>
87 struct processor_set pset0
;
88 struct pset_node pset_node0
;
89 decl_simple_lock_data(static,pset_node_lock
)
92 queue_head_t terminated_tasks
; /* To be used ONLY for stackshot. */
96 decl_lck_mtx_data(,tasks_threads_lock
)
98 processor_t processor_list
;
99 unsigned int processor_count
;
100 static processor_t processor_list_tail
;
101 decl_simple_lock_data(,processor_list_lock
)
103 uint32_t processor_avail_count
;
105 processor_t master_processor
;
107 boolean_t sched_stats_active
= FALSE
;
110 kern_return_t
processor_set_things(
111 processor_set_t pset
,
112 mach_port_t
**thing_list
,
113 mach_msg_type_number_t
*count
,
117 processor_bootstrap(void)
119 pset_init(&pset0
, &pset_node0
);
120 pset_node0
.psets
= &pset0
;
122 simple_lock_init(&pset_node_lock
, 0);
125 queue_init(&terminated_tasks
);
126 queue_init(&threads
);
128 simple_lock_init(&processor_list_lock
, 0);
130 master_processor
= cpu_to_processor(master_cpu
);
132 processor_init(master_processor
, master_cpu
, &pset0
);
136 * Initialize the given processor for the cpu
137 * indicated by cpu_id, and assign to the
138 * specified processor set.
142 processor_t processor
,
144 processor_set_t pset
)
146 if (processor
!= master_processor
) {
147 /* Scheduler state deferred until sched_init() */
148 SCHED(processor_init
)(processor
);
151 processor
->state
= PROCESSOR_OFF_LINE
;
152 processor
->active_thread
= processor
->next_thread
= processor
->idle_thread
= THREAD_NULL
;
153 processor
->processor_set
= pset
;
154 processor
->current_pri
= MINPRI
;
155 processor
->current_thmode
= TH_MODE_NONE
;
156 processor
->cpu_id
= cpu_id
;
157 timer_call_setup(&processor
->quantum_timer
, thread_quantum_expire
, processor
);
158 processor
->deadline
= UINT64_MAX
;
159 processor
->timeslice
= 0;
160 processor
->processor_meta
= PROCESSOR_META_NULL
;
161 processor
->processor_self
= IP_NULL
;
162 processor_data_init(processor
);
163 processor
->processor_list
= NULL
;
166 if (pset
->cpu_set_count
++ == 0)
167 pset
->cpu_set_low
= pset
->cpu_set_hi
= cpu_id
;
169 pset
->cpu_set_low
= (cpu_id
< pset
->cpu_set_low
)? cpu_id
: pset
->cpu_set_low
;
170 pset
->cpu_set_hi
= (cpu_id
> pset
->cpu_set_hi
)? cpu_id
: pset
->cpu_set_hi
;
174 simple_lock(&processor_list_lock
);
175 if (processor_list
== NULL
)
176 processor_list
= processor
;
178 processor_list_tail
->processor_list
= processor
;
179 processor_list_tail
= processor
;
181 simple_unlock(&processor_list_lock
);
186 processor_t processor
,
189 processor_meta_t pmeta
= primary
->processor_meta
;
191 if (pmeta
== PROCESSOR_META_NULL
) {
192 pmeta
= kalloc(sizeof (*pmeta
));
194 queue_init(&pmeta
->idle_queue
);
196 pmeta
->primary
= primary
;
199 processor
->processor_meta
= pmeta
;
204 processor_t processor
)
206 return (processor
->processor_set
);
219 processor_set_t
*prev
, pset
= kalloc(sizeof (*pset
));
221 if (pset
!= PROCESSOR_SET_NULL
) {
222 pset_init(pset
, node
);
224 simple_lock(&pset_node_lock
);
227 while (*prev
!= PROCESSOR_SET_NULL
)
228 prev
= &(*prev
)->pset_list
;
232 simple_unlock(&pset_node_lock
);
239 * Initialize the given processor_set structure.
243 processor_set_t pset
,
246 if (pset
!= &pset0
) {
247 /* Scheduler state deferred until sched_init() */
248 SCHED(pset_init
)(pset
);
251 queue_init(&pset
->active_queue
);
252 queue_init(&pset
->idle_queue
);
253 pset
->online_processor_count
= 0;
254 pset_pri_init_hint(pset
, PROCESSOR_NULL
);
255 pset_count_init_hint(pset
, PROCESSOR_NULL
);
256 pset
->cpu_set_low
= pset
->cpu_set_hi
= 0;
257 pset
->cpu_set_count
= 0;
258 pset_lock_init(pset
);
259 pset
->pset_self
= IP_NULL
;
260 pset
->pset_name_self
= IP_NULL
;
261 pset
->pset_list
= PROCESSOR_SET_NULL
;
266 processor_info_count(
267 processor_flavor_t flavor
,
268 mach_msg_type_number_t
*count
)
272 case PROCESSOR_BASIC_INFO
:
273 *count
= PROCESSOR_BASIC_INFO_COUNT
;
276 case PROCESSOR_CPU_LOAD_INFO
:
277 *count
= PROCESSOR_CPU_LOAD_INFO_COUNT
;
281 return (cpu_info_count(flavor
, count
));
284 return (KERN_SUCCESS
);
290 register processor_t processor
,
291 processor_flavor_t flavor
,
293 processor_info_t info
,
294 mach_msg_type_number_t
*count
)
296 register int cpu_id
, state
;
297 kern_return_t result
;
299 if (processor
== PROCESSOR_NULL
)
300 return (KERN_INVALID_ARGUMENT
);
302 cpu_id
= processor
->cpu_id
;
306 case PROCESSOR_BASIC_INFO
:
308 register processor_basic_info_t basic_info
;
310 if (*count
< PROCESSOR_BASIC_INFO_COUNT
)
311 return (KERN_FAILURE
);
313 basic_info
= (processor_basic_info_t
) info
;
314 basic_info
->cpu_type
= slot_type(cpu_id
);
315 basic_info
->cpu_subtype
= slot_subtype(cpu_id
);
316 state
= processor
->state
;
317 if (state
== PROCESSOR_OFF_LINE
)
318 basic_info
->running
= FALSE
;
320 basic_info
->running
= TRUE
;
321 basic_info
->slot_num
= cpu_id
;
322 if (processor
== master_processor
)
323 basic_info
->is_master
= TRUE
;
325 basic_info
->is_master
= FALSE
;
327 *count
= PROCESSOR_BASIC_INFO_COUNT
;
330 return (KERN_SUCCESS
);
333 case PROCESSOR_CPU_LOAD_INFO
:
335 processor_cpu_load_info_t cpu_load_info
;
336 timer_data_t idle_temp
;
339 if (*count
< PROCESSOR_CPU_LOAD_INFO_COUNT
)
340 return (KERN_FAILURE
);
342 cpu_load_info
= (processor_cpu_load_info_t
) info
;
343 if (precise_user_kernel_time
) {
344 cpu_load_info
->cpu_ticks
[CPU_STATE_USER
] =
345 (uint32_t)(timer_grab(&PROCESSOR_DATA(processor
, user_state
)) / hz_tick_interval
);
346 cpu_load_info
->cpu_ticks
[CPU_STATE_SYSTEM
] =
347 (uint32_t)(timer_grab(&PROCESSOR_DATA(processor
, system_state
)) / hz_tick_interval
);
349 uint64_t tval
= timer_grab(&PROCESSOR_DATA(processor
, user_state
)) +
350 timer_grab(&PROCESSOR_DATA(processor
, system_state
));
352 cpu_load_info
->cpu_ticks
[CPU_STATE_USER
] = (uint32_t)(tval
/ hz_tick_interval
);
353 cpu_load_info
->cpu_ticks
[CPU_STATE_SYSTEM
] = 0;
356 idle_state
= &PROCESSOR_DATA(processor
, idle_state
);
357 idle_temp
= *idle_state
;
359 if (PROCESSOR_DATA(processor
, current_state
) != idle_state
||
360 timer_grab(&idle_temp
) != timer_grab(idle_state
)) {
361 cpu_load_info
->cpu_ticks
[CPU_STATE_IDLE
] =
362 (uint32_t)(timer_grab(&PROCESSOR_DATA(processor
, idle_state
)) / hz_tick_interval
);
364 timer_advance(&idle_temp
, mach_absolute_time() - idle_temp
.tstamp
);
366 cpu_load_info
->cpu_ticks
[CPU_STATE_IDLE
] =
367 (uint32_t)(timer_grab(&idle_temp
) / hz_tick_interval
);
370 cpu_load_info
->cpu_ticks
[CPU_STATE_NICE
] = 0;
372 *count
= PROCESSOR_CPU_LOAD_INFO_COUNT
;
375 return (KERN_SUCCESS
);
379 result
= cpu_info(flavor
, cpu_id
, info
, count
);
380 if (result
== KERN_SUCCESS
)
389 processor_t processor
)
391 processor_set_t pset
;
393 kern_return_t result
;
396 if (processor
== PROCESSOR_NULL
|| processor
->processor_set
== PROCESSOR_SET_NULL
)
397 return (KERN_INVALID_ARGUMENT
);
399 if (processor
== master_processor
) {
402 prev
= thread_bind(processor
);
403 thread_block(THREAD_CONTINUE_NULL
);
405 result
= cpu_start(processor
->cpu_id
);
413 pset
= processor
->processor_set
;
415 if (processor
->state
!= PROCESSOR_OFF_LINE
) {
419 return (KERN_FAILURE
);
422 processor
->state
= PROCESSOR_START
;
427 * Create the idle processor thread.
429 if (processor
->idle_thread
== THREAD_NULL
) {
430 result
= idle_thread_create(processor
);
431 if (result
!= KERN_SUCCESS
) {
434 processor
->state
= PROCESSOR_OFF_LINE
;
443 * If there is no active thread, the processor
444 * has never been started. Create a dedicated
447 if ( processor
->active_thread
== THREAD_NULL
&&
448 processor
->next_thread
== THREAD_NULL
) {
449 result
= kernel_thread_create((thread_continue_t
)processor_start_thread
, NULL
, MAXPRI_KERNEL
, &thread
);
450 if (result
!= KERN_SUCCESS
) {
453 processor
->state
= PROCESSOR_OFF_LINE
;
462 thread
->bound_processor
= processor
;
463 processor
->next_thread
= thread
;
464 thread
->state
= TH_RUN
;
465 thread_unlock(thread
);
468 thread_deallocate(thread
);
471 if (processor
->processor_self
== IP_NULL
)
472 ipc_processor_init(processor
);
474 result
= cpu_start(processor
->cpu_id
);
475 if (result
!= KERN_SUCCESS
) {
478 processor
->state
= PROCESSOR_OFF_LINE
;
485 ipc_processor_enable(processor
);
487 return (KERN_SUCCESS
);
492 processor_t processor
)
494 if (processor
== PROCESSOR_NULL
)
495 return(KERN_INVALID_ARGUMENT
);
497 return(processor_shutdown(processor
));
502 processor_t processor
,
503 processor_info_t info
,
504 mach_msg_type_number_t count
)
506 if (processor
== PROCESSOR_NULL
)
507 return(KERN_INVALID_ARGUMENT
);
509 return(cpu_control(processor
->cpu_id
, info
, count
));
513 processor_set_create(
514 __unused host_t host
,
515 __unused processor_set_t
*new_set
,
516 __unused processor_set_t
*new_name
)
518 return(KERN_FAILURE
);
522 processor_set_destroy(
523 __unused processor_set_t pset
)
525 return(KERN_FAILURE
);
529 processor_get_assignment(
530 processor_t processor
,
531 processor_set_t
*pset
)
535 if (processor
== PROCESSOR_NULL
)
536 return(KERN_INVALID_ARGUMENT
);
538 state
= processor
->state
;
539 if (state
== PROCESSOR_SHUTDOWN
|| state
== PROCESSOR_OFF_LINE
)
540 return(KERN_FAILURE
);
544 return(KERN_SUCCESS
);
549 processor_set_t pset
,
552 processor_set_info_t info
,
553 mach_msg_type_number_t
*count
)
555 if (pset
== PROCESSOR_SET_NULL
)
556 return(KERN_INVALID_ARGUMENT
);
558 if (flavor
== PROCESSOR_SET_BASIC_INFO
) {
559 register processor_set_basic_info_t basic_info
;
561 if (*count
< PROCESSOR_SET_BASIC_INFO_COUNT
)
562 return(KERN_FAILURE
);
564 basic_info
= (processor_set_basic_info_t
) info
;
565 basic_info
->processor_count
= processor_avail_count
;
566 basic_info
->default_policy
= POLICY_TIMESHARE
;
568 *count
= PROCESSOR_SET_BASIC_INFO_COUNT
;
570 return(KERN_SUCCESS
);
572 else if (flavor
== PROCESSOR_SET_TIMESHARE_DEFAULT
) {
573 register policy_timeshare_base_t ts_base
;
575 if (*count
< POLICY_TIMESHARE_BASE_COUNT
)
576 return(KERN_FAILURE
);
578 ts_base
= (policy_timeshare_base_t
) info
;
579 ts_base
->base_priority
= BASEPRI_DEFAULT
;
581 *count
= POLICY_TIMESHARE_BASE_COUNT
;
583 return(KERN_SUCCESS
);
585 else if (flavor
== PROCESSOR_SET_FIFO_DEFAULT
) {
586 register policy_fifo_base_t fifo_base
;
588 if (*count
< POLICY_FIFO_BASE_COUNT
)
589 return(KERN_FAILURE
);
591 fifo_base
= (policy_fifo_base_t
) info
;
592 fifo_base
->base_priority
= BASEPRI_DEFAULT
;
594 *count
= POLICY_FIFO_BASE_COUNT
;
596 return(KERN_SUCCESS
);
598 else if (flavor
== PROCESSOR_SET_RR_DEFAULT
) {
599 register policy_rr_base_t rr_base
;
601 if (*count
< POLICY_RR_BASE_COUNT
)
602 return(KERN_FAILURE
);
604 rr_base
= (policy_rr_base_t
) info
;
605 rr_base
->base_priority
= BASEPRI_DEFAULT
;
606 rr_base
->quantum
= 1;
608 *count
= POLICY_RR_BASE_COUNT
;
610 return(KERN_SUCCESS
);
612 else if (flavor
== PROCESSOR_SET_TIMESHARE_LIMITS
) {
613 register policy_timeshare_limit_t ts_limit
;
615 if (*count
< POLICY_TIMESHARE_LIMIT_COUNT
)
616 return(KERN_FAILURE
);
618 ts_limit
= (policy_timeshare_limit_t
) info
;
619 ts_limit
->max_priority
= MAXPRI_KERNEL
;
621 *count
= POLICY_TIMESHARE_LIMIT_COUNT
;
623 return(KERN_SUCCESS
);
625 else if (flavor
== PROCESSOR_SET_FIFO_LIMITS
) {
626 register policy_fifo_limit_t fifo_limit
;
628 if (*count
< POLICY_FIFO_LIMIT_COUNT
)
629 return(KERN_FAILURE
);
631 fifo_limit
= (policy_fifo_limit_t
) info
;
632 fifo_limit
->max_priority
= MAXPRI_KERNEL
;
634 *count
= POLICY_FIFO_LIMIT_COUNT
;
636 return(KERN_SUCCESS
);
638 else if (flavor
== PROCESSOR_SET_RR_LIMITS
) {
639 register policy_rr_limit_t rr_limit
;
641 if (*count
< POLICY_RR_LIMIT_COUNT
)
642 return(KERN_FAILURE
);
644 rr_limit
= (policy_rr_limit_t
) info
;
645 rr_limit
->max_priority
= MAXPRI_KERNEL
;
647 *count
= POLICY_RR_LIMIT_COUNT
;
649 return(KERN_SUCCESS
);
651 else if (flavor
== PROCESSOR_SET_ENABLED_POLICIES
) {
652 register int *enabled
;
654 if (*count
< (sizeof(*enabled
)/sizeof(int)))
655 return(KERN_FAILURE
);
657 enabled
= (int *) info
;
658 *enabled
= POLICY_TIMESHARE
| POLICY_RR
| POLICY_FIFO
;
660 *count
= sizeof(*enabled
)/sizeof(int);
662 return(KERN_SUCCESS
);
667 return(KERN_INVALID_ARGUMENT
);
671 * processor_set_statistics
673 * Returns scheduling statistics for a processor set.
676 processor_set_statistics(
677 processor_set_t pset
,
679 processor_set_info_t info
,
680 mach_msg_type_number_t
*count
)
682 if (pset
== PROCESSOR_SET_NULL
|| pset
!= &pset0
)
683 return (KERN_INVALID_PROCESSOR_SET
);
685 if (flavor
== PROCESSOR_SET_LOAD_INFO
) {
686 register processor_set_load_info_t load_info
;
688 if (*count
< PROCESSOR_SET_LOAD_INFO_COUNT
)
689 return(KERN_FAILURE
);
691 load_info
= (processor_set_load_info_t
) info
;
693 load_info
->mach_factor
= sched_mach_factor
;
694 load_info
->load_average
= sched_load_average
;
696 load_info
->task_count
= tasks_count
;
697 load_info
->thread_count
= threads_count
;
699 *count
= PROCESSOR_SET_LOAD_INFO_COUNT
;
700 return(KERN_SUCCESS
);
703 return(KERN_INVALID_ARGUMENT
);
707 * processor_set_max_priority:
709 * Specify max priority permitted on processor set. This affects
710 * newly created and assigned threads. Optionally change existing
714 processor_set_max_priority(
715 __unused processor_set_t pset
,
716 __unused
int max_priority
,
717 __unused boolean_t change_threads
)
719 return (KERN_INVALID_ARGUMENT
);
723 * processor_set_policy_enable:
725 * Allow indicated policy on processor set.
729 processor_set_policy_enable(
730 __unused processor_set_t pset
,
733 return (KERN_INVALID_ARGUMENT
);
737 * processor_set_policy_disable:
739 * Forbid indicated policy on processor set. Time sharing cannot
743 processor_set_policy_disable(
744 __unused processor_set_t pset
,
746 __unused boolean_t change_threads
)
748 return (KERN_INVALID_ARGUMENT
);
752 #define THING_THREAD 1
755 * processor_set_things:
757 * Common internals for processor_set_{threads,tasks}
760 processor_set_things(
761 processor_set_t pset
,
762 mach_port_t
**thing_list
,
763 mach_msg_type_number_t
*count
,
766 unsigned int actual
; /* this many things */
767 unsigned int maxthings
;
770 vm_size_t size
, size_needed
;
773 if (pset
== PROCESSOR_SET_NULL
|| pset
!= &pset0
)
774 return (KERN_INVALID_ARGUMENT
);
780 lck_mtx_lock(&tasks_threads_lock
);
782 if (type
== THING_TASK
)
783 maxthings
= tasks_count
;
785 maxthings
= threads_count
;
787 /* do we have the memory we need? */
789 size_needed
= maxthings
* sizeof (mach_port_t
);
790 if (size_needed
<= size
)
793 /* unlock and allocate more memory */
794 lck_mtx_unlock(&tasks_threads_lock
);
799 assert(size_needed
> 0);
804 return (KERN_RESOURCE_SHORTAGE
);
807 /* OK, have memory and the list locked */
813 task_t task
, *task_list
= (task_t
*)addr
;
815 for (task
= (task_t
)queue_first(&tasks
);
816 !queue_end(&tasks
, (queue_entry_t
)task
);
817 task
= (task_t
)queue_next(&task
->tasks
)) {
818 #if defined(SECURE_KERNEL)
819 if (task
!= kernel_task
) {
821 task_reference_internal(task
);
822 task_list
[actual
++] = task
;
823 #if defined(SECURE_KERNEL)
832 thread_t thread
, *thread_list
= (thread_t
*)addr
;
834 for (thread
= (thread_t
)queue_first(&threads
);
835 !queue_end(&threads
, (queue_entry_t
)thread
);
836 thread
= (thread_t
)queue_next(&thread
->threads
)) {
837 thread_reference_internal(thread
);
838 thread_list
[actual
++] = thread
;
846 lck_mtx_unlock(&tasks_threads_lock
);
848 if (actual
< maxthings
)
849 size_needed
= actual
* sizeof (mach_port_t
);
852 /* no things, so return null pointer and deallocate memory */
860 /* if we allocated too much, must copy */
862 if (size_needed
< size
) {
865 newaddr
= kalloc(size_needed
);
870 task_t
*task_list
= (task_t
*)addr
;
872 for (i
= 0; i
< actual
; i
++)
873 task_deallocate(task_list
[i
]);
878 thread_t
*thread_list
= (thread_t
*)addr
;
880 for (i
= 0; i
< actual
; i
++)
881 thread_deallocate(thread_list
[i
]);
888 return (KERN_RESOURCE_SHORTAGE
);
891 bcopy((void *) addr
, (void *) newaddr
, size_needed
);
896 *thing_list
= (mach_port_t
*)addr
;
899 /* do the conversion that Mig should handle */
904 task_t
*task_list
= (task_t
*)addr
;
906 for (i
= 0; i
< actual
; i
++)
907 (*thing_list
)[i
] = convert_task_to_port(task_list
[i
]);
912 thread_t
*thread_list
= (thread_t
*)addr
;
914 for (i
= 0; i
< actual
; i
++)
915 (*thing_list
)[i
] = convert_thread_to_port(thread_list
[i
]);
922 return (KERN_SUCCESS
);
927 * processor_set_tasks:
929 * List all tasks in the processor set.
933 processor_set_t pset
,
934 task_array_t
*task_list
,
935 mach_msg_type_number_t
*count
)
937 return(processor_set_things(pset
, (mach_port_t
**)task_list
, count
, THING_TASK
));
941 * processor_set_threads:
943 * List all threads in the processor set.
945 #if defined(SECURE_KERNEL)
947 processor_set_threads(
948 __unused processor_set_t pset
,
949 __unused thread_array_t
*thread_list
,
950 __unused mach_msg_type_number_t
*count
)
954 #elif defined(CONFIG_EMBEDDED)
956 processor_set_threads(
957 __unused processor_set_t pset
,
958 __unused thread_array_t
*thread_list
,
959 __unused mach_msg_type_number_t
*count
)
961 return KERN_NOT_SUPPORTED
;
965 processor_set_threads(
966 processor_set_t pset
,
967 thread_array_t
*thread_list
,
968 mach_msg_type_number_t
*count
)
970 return(processor_set_things(pset
, (mach_port_t
**)thread_list
, count
, THING_THREAD
));
975 * processor_set_policy_control
977 * Controls the scheduling attributes governing the processor set.
978 * Allows control of enabled policies, and per-policy base and limit
982 processor_set_policy_control(
983 __unused processor_set_t pset
,
985 __unused processor_set_info_t policy_info
,
986 __unused mach_msg_type_number_t count
,
987 __unused boolean_t change
)
989 return (KERN_INVALID_ARGUMENT
);
992 #undef pset_deallocate
993 void pset_deallocate(processor_set_t pset
);
996 __unused processor_set_t pset
)
1001 #undef pset_reference
1002 void pset_reference(processor_set_t pset
);
1005 __unused processor_set_t pset
)