2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * processor.c: processor and processor_set manipulation routines.
63 #include <mach/boolean.h>
64 #include <mach/policy.h>
65 #include <mach/processor.h>
66 #include <mach/processor_info.h>
67 #include <mach/vm_param.h>
68 #include <kern/cpu_number.h>
69 #include <kern/host.h>
70 #include <kern/machine.h>
71 #include <kern/misc_protos.h>
72 #include <kern/processor.h>
73 #include <kern/sched.h>
74 #include <kern/task.h>
75 #include <kern/thread.h>
76 #include <kern/ipc_host.h>
77 #include <kern/ipc_tt.h>
78 #include <ipc/ipc_port.h>
79 #include <kern/kalloc.h>
84 #include <mach/mach_host_server.h>
85 #include <mach/processor_set_server.h>
87 struct processor_set pset0
;
88 struct pset_node pset_node0
;
89 decl_simple_lock_data(static,pset_node_lock
)
92 queue_head_t terminated_tasks
; /* To be used ONLY for stackshot. */
96 decl_lck_mtx_data(,tasks_threads_lock
)
98 processor_t processor_list
;
99 unsigned int processor_count
;
100 static processor_t processor_list_tail
;
101 decl_simple_lock_data(,processor_list_lock
)
103 uint32_t processor_avail_count
;
105 processor_t master_processor
;
107 boolean_t sched_stats_active
= FALSE
;
110 kern_return_t
processor_set_things(
111 processor_set_t pset
,
112 mach_port_t
**thing_list
,
113 mach_msg_type_number_t
*count
,
117 processor_bootstrap(void)
119 pset_init(&pset0
, &pset_node0
);
120 pset_node0
.psets
= &pset0
;
122 simple_lock_init(&pset_node_lock
, 0);
125 queue_init(&terminated_tasks
);
126 queue_init(&threads
);
128 simple_lock_init(&processor_list_lock
, 0);
130 master_processor
= cpu_to_processor(master_cpu
);
132 processor_init(master_processor
, master_cpu
, &pset0
);
136 * Initialize the given processor for the cpu
137 * indicated by cpu_id, and assign to the
138 * specified processor set.
142 processor_t processor
,
144 processor_set_t pset
)
146 if (processor
!= master_processor
) {
147 /* Scheduler state deferred until sched_init() */
148 SCHED(processor_init
)(processor
);
151 processor
->state
= PROCESSOR_OFF_LINE
;
152 processor
->active_thread
= processor
->next_thread
= processor
->idle_thread
= THREAD_NULL
;
153 processor
->processor_set
= pset
;
154 processor
->current_pri
= MINPRI
;
155 processor
->current_thmode
= TH_MODE_NONE
;
156 processor
->cpu_id
= cpu_id
;
157 timer_call_setup(&processor
->quantum_timer
, thread_quantum_expire
, processor
);
158 processor
->deadline
= UINT64_MAX
;
159 processor
->timeslice
= 0;
160 processor
->processor_meta
= PROCESSOR_META_NULL
;
161 processor
->processor_self
= IP_NULL
;
162 processor_data_init(processor
);
163 processor
->processor_list
= NULL
;
166 if (pset
->cpu_set_count
++ == 0)
167 pset
->cpu_set_low
= pset
->cpu_set_hi
= cpu_id
;
169 pset
->cpu_set_low
= (cpu_id
< pset
->cpu_set_low
)? cpu_id
: pset
->cpu_set_low
;
170 pset
->cpu_set_hi
= (cpu_id
> pset
->cpu_set_hi
)? cpu_id
: pset
->cpu_set_hi
;
174 simple_lock(&processor_list_lock
);
175 if (processor_list
== NULL
)
176 processor_list
= processor
;
178 processor_list_tail
->processor_list
= processor
;
179 processor_list_tail
= processor
;
181 simple_unlock(&processor_list_lock
);
186 processor_t processor
,
189 processor_meta_t pmeta
= primary
->processor_meta
;
191 if (pmeta
== PROCESSOR_META_NULL
) {
192 pmeta
= kalloc(sizeof (*pmeta
));
194 queue_init(&pmeta
->idle_queue
);
196 pmeta
->primary
= primary
;
199 processor
->processor_meta
= pmeta
;
204 processor_t processor
)
206 return (processor
->processor_set
);
219 processor_set_t
*prev
, pset
= kalloc(sizeof (*pset
));
221 if (pset
!= PROCESSOR_SET_NULL
) {
222 pset_init(pset
, node
);
224 simple_lock(&pset_node_lock
);
227 while (*prev
!= PROCESSOR_SET_NULL
)
228 prev
= &(*prev
)->pset_list
;
232 simple_unlock(&pset_node_lock
);
239 * Initialize the given processor_set structure.
243 processor_set_t pset
,
246 if (pset
!= &pset0
) {
247 /* Scheduler state deferred until sched_init() */
248 SCHED(pset_init
)(pset
);
251 queue_init(&pset
->active_queue
);
252 queue_init(&pset
->idle_queue
);
253 pset
->online_processor_count
= 0;
254 pset_pri_init_hint(pset
, PROCESSOR_NULL
);
255 pset_count_init_hint(pset
, PROCESSOR_NULL
);
256 pset
->cpu_set_low
= pset
->cpu_set_hi
= 0;
257 pset
->cpu_set_count
= 0;
258 pset_lock_init(pset
);
259 pset
->pset_self
= IP_NULL
;
260 pset
->pset_name_self
= IP_NULL
;
261 pset
->pset_list
= PROCESSOR_SET_NULL
;
266 processor_info_count(
267 processor_flavor_t flavor
,
268 mach_msg_type_number_t
*count
)
272 case PROCESSOR_BASIC_INFO
:
273 *count
= PROCESSOR_BASIC_INFO_COUNT
;
276 case PROCESSOR_CPU_LOAD_INFO
:
277 *count
= PROCESSOR_CPU_LOAD_INFO_COUNT
;
281 return (cpu_info_count(flavor
, count
));
284 return (KERN_SUCCESS
);
290 register processor_t processor
,
291 processor_flavor_t flavor
,
293 processor_info_t info
,
294 mach_msg_type_number_t
*count
)
296 register int cpu_id
, state
;
297 kern_return_t result
;
299 if (processor
== PROCESSOR_NULL
)
300 return (KERN_INVALID_ARGUMENT
);
302 cpu_id
= processor
->cpu_id
;
306 case PROCESSOR_BASIC_INFO
:
308 register processor_basic_info_t basic_info
;
310 if (*count
< PROCESSOR_BASIC_INFO_COUNT
)
311 return (KERN_FAILURE
);
313 basic_info
= (processor_basic_info_t
) info
;
314 basic_info
->cpu_type
= slot_type(cpu_id
);
315 basic_info
->cpu_subtype
= slot_subtype(cpu_id
);
316 state
= processor
->state
;
317 if (state
== PROCESSOR_OFF_LINE
)
318 basic_info
->running
= FALSE
;
320 basic_info
->running
= TRUE
;
321 basic_info
->slot_num
= cpu_id
;
322 if (processor
== master_processor
)
323 basic_info
->is_master
= TRUE
;
325 basic_info
->is_master
= FALSE
;
327 *count
= PROCESSOR_BASIC_INFO_COUNT
;
330 return (KERN_SUCCESS
);
333 case PROCESSOR_CPU_LOAD_INFO
:
335 register processor_cpu_load_info_t cpu_load_info
;
337 if (*count
< PROCESSOR_CPU_LOAD_INFO_COUNT
)
338 return (KERN_FAILURE
);
340 cpu_load_info
= (processor_cpu_load_info_t
) info
;
341 cpu_load_info
->cpu_ticks
[CPU_STATE_USER
] =
342 (uint32_t)(timer_grab(&PROCESSOR_DATA(processor
, user_state
)) / hz_tick_interval
);
343 cpu_load_info
->cpu_ticks
[CPU_STATE_SYSTEM
] =
344 (uint32_t)(timer_grab(&PROCESSOR_DATA(processor
, system_state
)) / hz_tick_interval
);
346 timer_data_t idle_temp
;
349 idle_state
= &PROCESSOR_DATA(processor
, idle_state
);
350 idle_temp
= *idle_state
;
352 if (PROCESSOR_DATA(processor
, current_state
) != idle_state
||
353 timer_grab(&idle_temp
) != timer_grab(idle_state
))
354 cpu_load_info
->cpu_ticks
[CPU_STATE_IDLE
] =
355 (uint32_t)(timer_grab(&PROCESSOR_DATA(processor
, idle_state
)) / hz_tick_interval
);
357 timer_advance(&idle_temp
, mach_absolute_time() - idle_temp
.tstamp
);
359 cpu_load_info
->cpu_ticks
[CPU_STATE_IDLE
] =
360 (uint32_t)(timer_grab(&idle_temp
) / hz_tick_interval
);
363 cpu_load_info
->cpu_ticks
[CPU_STATE_NICE
] = 0;
365 *count
= PROCESSOR_CPU_LOAD_INFO_COUNT
;
368 return (KERN_SUCCESS
);
372 result
= cpu_info(flavor
, cpu_id
, info
, count
);
373 if (result
== KERN_SUCCESS
)
382 processor_t processor
)
384 processor_set_t pset
;
386 kern_return_t result
;
389 if (processor
== PROCESSOR_NULL
|| processor
->processor_set
== PROCESSOR_SET_NULL
)
390 return (KERN_INVALID_ARGUMENT
);
392 if (processor
== master_processor
) {
395 prev
= thread_bind(processor
);
396 thread_block(THREAD_CONTINUE_NULL
);
398 result
= cpu_start(processor
->cpu_id
);
406 pset
= processor
->processor_set
;
408 if (processor
->state
!= PROCESSOR_OFF_LINE
) {
412 return (KERN_FAILURE
);
415 processor
->state
= PROCESSOR_START
;
420 * Create the idle processor thread.
422 if (processor
->idle_thread
== THREAD_NULL
) {
423 result
= idle_thread_create(processor
);
424 if (result
!= KERN_SUCCESS
) {
427 processor
->state
= PROCESSOR_OFF_LINE
;
436 * If there is no active thread, the processor
437 * has never been started. Create a dedicated
440 if ( processor
->active_thread
== THREAD_NULL
&&
441 processor
->next_thread
== THREAD_NULL
) {
442 result
= kernel_thread_create((thread_continue_t
)processor_start_thread
, NULL
, MAXPRI_KERNEL
, &thread
);
443 if (result
!= KERN_SUCCESS
) {
446 processor
->state
= PROCESSOR_OFF_LINE
;
455 thread
->bound_processor
= processor
;
456 processor
->next_thread
= thread
;
457 thread
->state
= TH_RUN
;
458 thread_unlock(thread
);
461 thread_deallocate(thread
);
464 if (processor
->processor_self
== IP_NULL
)
465 ipc_processor_init(processor
);
467 result
= cpu_start(processor
->cpu_id
);
468 if (result
!= KERN_SUCCESS
) {
471 processor
->state
= PROCESSOR_OFF_LINE
;
478 ipc_processor_enable(processor
);
480 return (KERN_SUCCESS
);
485 processor_t processor
)
487 if (processor
== PROCESSOR_NULL
)
488 return(KERN_INVALID_ARGUMENT
);
490 return(processor_shutdown(processor
));
495 processor_t processor
,
496 processor_info_t info
,
497 mach_msg_type_number_t count
)
499 if (processor
== PROCESSOR_NULL
)
500 return(KERN_INVALID_ARGUMENT
);
502 return(cpu_control(processor
->cpu_id
, info
, count
));
506 processor_set_create(
507 __unused host_t host
,
508 __unused processor_set_t
*new_set
,
509 __unused processor_set_t
*new_name
)
511 return(KERN_FAILURE
);
515 processor_set_destroy(
516 __unused processor_set_t pset
)
518 return(KERN_FAILURE
);
522 processor_get_assignment(
523 processor_t processor
,
524 processor_set_t
*pset
)
528 state
= processor
->state
;
529 if (state
== PROCESSOR_SHUTDOWN
|| state
== PROCESSOR_OFF_LINE
)
530 return(KERN_FAILURE
);
534 return(KERN_SUCCESS
);
539 processor_set_t pset
,
542 processor_set_info_t info
,
543 mach_msg_type_number_t
*count
)
545 if (pset
== PROCESSOR_SET_NULL
)
546 return(KERN_INVALID_ARGUMENT
);
548 if (flavor
== PROCESSOR_SET_BASIC_INFO
) {
549 register processor_set_basic_info_t basic_info
;
551 if (*count
< PROCESSOR_SET_BASIC_INFO_COUNT
)
552 return(KERN_FAILURE
);
554 basic_info
= (processor_set_basic_info_t
) info
;
555 basic_info
->processor_count
= processor_avail_count
;
556 basic_info
->default_policy
= POLICY_TIMESHARE
;
558 *count
= PROCESSOR_SET_BASIC_INFO_COUNT
;
560 return(KERN_SUCCESS
);
562 else if (flavor
== PROCESSOR_SET_TIMESHARE_DEFAULT
) {
563 register policy_timeshare_base_t ts_base
;
565 if (*count
< POLICY_TIMESHARE_BASE_COUNT
)
566 return(KERN_FAILURE
);
568 ts_base
= (policy_timeshare_base_t
) info
;
569 ts_base
->base_priority
= BASEPRI_DEFAULT
;
571 *count
= POLICY_TIMESHARE_BASE_COUNT
;
573 return(KERN_SUCCESS
);
575 else if (flavor
== PROCESSOR_SET_FIFO_DEFAULT
) {
576 register policy_fifo_base_t fifo_base
;
578 if (*count
< POLICY_FIFO_BASE_COUNT
)
579 return(KERN_FAILURE
);
581 fifo_base
= (policy_fifo_base_t
) info
;
582 fifo_base
->base_priority
= BASEPRI_DEFAULT
;
584 *count
= POLICY_FIFO_BASE_COUNT
;
586 return(KERN_SUCCESS
);
588 else if (flavor
== PROCESSOR_SET_RR_DEFAULT
) {
589 register policy_rr_base_t rr_base
;
591 if (*count
< POLICY_RR_BASE_COUNT
)
592 return(KERN_FAILURE
);
594 rr_base
= (policy_rr_base_t
) info
;
595 rr_base
->base_priority
= BASEPRI_DEFAULT
;
596 rr_base
->quantum
= 1;
598 *count
= POLICY_RR_BASE_COUNT
;
600 return(KERN_SUCCESS
);
602 else if (flavor
== PROCESSOR_SET_TIMESHARE_LIMITS
) {
603 register policy_timeshare_limit_t ts_limit
;
605 if (*count
< POLICY_TIMESHARE_LIMIT_COUNT
)
606 return(KERN_FAILURE
);
608 ts_limit
= (policy_timeshare_limit_t
) info
;
609 ts_limit
->max_priority
= MAXPRI_KERNEL
;
611 *count
= POLICY_TIMESHARE_LIMIT_COUNT
;
613 return(KERN_SUCCESS
);
615 else if (flavor
== PROCESSOR_SET_FIFO_LIMITS
) {
616 register policy_fifo_limit_t fifo_limit
;
618 if (*count
< POLICY_FIFO_LIMIT_COUNT
)
619 return(KERN_FAILURE
);
621 fifo_limit
= (policy_fifo_limit_t
) info
;
622 fifo_limit
->max_priority
= MAXPRI_KERNEL
;
624 *count
= POLICY_FIFO_LIMIT_COUNT
;
626 return(KERN_SUCCESS
);
628 else if (flavor
== PROCESSOR_SET_RR_LIMITS
) {
629 register policy_rr_limit_t rr_limit
;
631 if (*count
< POLICY_RR_LIMIT_COUNT
)
632 return(KERN_FAILURE
);
634 rr_limit
= (policy_rr_limit_t
) info
;
635 rr_limit
->max_priority
= MAXPRI_KERNEL
;
637 *count
= POLICY_RR_LIMIT_COUNT
;
639 return(KERN_SUCCESS
);
641 else if (flavor
== PROCESSOR_SET_ENABLED_POLICIES
) {
642 register int *enabled
;
644 if (*count
< (sizeof(*enabled
)/sizeof(int)))
645 return(KERN_FAILURE
);
647 enabled
= (int *) info
;
648 *enabled
= POLICY_TIMESHARE
| POLICY_RR
| POLICY_FIFO
;
650 *count
= sizeof(*enabled
)/sizeof(int);
652 return(KERN_SUCCESS
);
657 return(KERN_INVALID_ARGUMENT
);
661 * processor_set_statistics
663 * Returns scheduling statistics for a processor set.
666 processor_set_statistics(
667 processor_set_t pset
,
669 processor_set_info_t info
,
670 mach_msg_type_number_t
*count
)
672 if (pset
== PROCESSOR_SET_NULL
|| pset
!= &pset0
)
673 return (KERN_INVALID_PROCESSOR_SET
);
675 if (flavor
== PROCESSOR_SET_LOAD_INFO
) {
676 register processor_set_load_info_t load_info
;
678 if (*count
< PROCESSOR_SET_LOAD_INFO_COUNT
)
679 return(KERN_FAILURE
);
681 load_info
= (processor_set_load_info_t
) info
;
683 load_info
->mach_factor
= sched_mach_factor
;
684 load_info
->load_average
= sched_load_average
;
686 load_info
->task_count
= tasks_count
;
687 load_info
->thread_count
= threads_count
;
689 *count
= PROCESSOR_SET_LOAD_INFO_COUNT
;
690 return(KERN_SUCCESS
);
693 return(KERN_INVALID_ARGUMENT
);
697 * processor_set_max_priority:
699 * Specify max priority permitted on processor set. This affects
700 * newly created and assigned threads. Optionally change existing
704 processor_set_max_priority(
705 __unused processor_set_t pset
,
706 __unused
int max_priority
,
707 __unused boolean_t change_threads
)
709 return (KERN_INVALID_ARGUMENT
);
713 * processor_set_policy_enable:
715 * Allow indicated policy on processor set.
719 processor_set_policy_enable(
720 __unused processor_set_t pset
,
723 return (KERN_INVALID_ARGUMENT
);
727 * processor_set_policy_disable:
729 * Forbid indicated policy on processor set. Time sharing cannot
733 processor_set_policy_disable(
734 __unused processor_set_t pset
,
736 __unused boolean_t change_threads
)
738 return (KERN_INVALID_ARGUMENT
);
742 #define THING_THREAD 1
745 * processor_set_things:
747 * Common internals for processor_set_{threads,tasks}
750 processor_set_things(
751 processor_set_t pset
,
752 mach_port_t
**thing_list
,
753 mach_msg_type_number_t
*count
,
756 unsigned int actual
; /* this many things */
757 unsigned int maxthings
;
760 vm_size_t size
, size_needed
;
763 if (pset
== PROCESSOR_SET_NULL
|| pset
!= &pset0
)
764 return (KERN_INVALID_ARGUMENT
);
770 lck_mtx_lock(&tasks_threads_lock
);
772 if (type
== THING_TASK
)
773 maxthings
= tasks_count
;
775 maxthings
= threads_count
;
777 /* do we have the memory we need? */
779 size_needed
= maxthings
* sizeof (mach_port_t
);
780 if (size_needed
<= size
)
783 /* unlock and allocate more memory */
784 lck_mtx_unlock(&tasks_threads_lock
);
789 assert(size_needed
> 0);
794 return (KERN_RESOURCE_SHORTAGE
);
797 /* OK, have memory and the list locked */
803 task_t task
, *task_list
= (task_t
*)addr
;
805 for (task
= (task_t
)queue_first(&tasks
);
806 !queue_end(&tasks
, (queue_entry_t
)task
);
807 task
= (task_t
)queue_next(&task
->tasks
)) {
808 #if defined(SECURE_KERNEL)
809 if (task
!= kernel_task
) {
811 task_reference_internal(task
);
812 task_list
[actual
++] = task
;
813 #if defined(SECURE_KERNEL)
822 thread_t thread
, *thread_list
= (thread_t
*)addr
;
824 for (thread
= (thread_t
)queue_first(&threads
);
825 !queue_end(&threads
, (queue_entry_t
)thread
);
826 thread
= (thread_t
)queue_next(&thread
->threads
)) {
827 thread_reference_internal(thread
);
828 thread_list
[actual
++] = thread
;
836 lck_mtx_unlock(&tasks_threads_lock
);
838 if (actual
< maxthings
)
839 size_needed
= actual
* sizeof (mach_port_t
);
842 /* no things, so return null pointer and deallocate memory */
850 /* if we allocated too much, must copy */
852 if (size_needed
< size
) {
855 newaddr
= kalloc(size_needed
);
860 task_t
*task_list
= (task_t
*)addr
;
862 for (i
= 0; i
< actual
; i
++)
863 task_deallocate(task_list
[i
]);
868 thread_t
*thread_list
= (thread_t
*)addr
;
870 for (i
= 0; i
< actual
; i
++)
871 thread_deallocate(thread_list
[i
]);
878 return (KERN_RESOURCE_SHORTAGE
);
881 bcopy((void *) addr
, (void *) newaddr
, size_needed
);
886 *thing_list
= (mach_port_t
*)addr
;
889 /* do the conversion that Mig should handle */
894 task_t
*task_list
= (task_t
*)addr
;
896 for (i
= 0; i
< actual
; i
++)
897 (*thing_list
)[i
] = convert_task_to_port(task_list
[i
]);
902 thread_t
*thread_list
= (thread_t
*)addr
;
904 for (i
= 0; i
< actual
; i
++)
905 (*thing_list
)[i
] = convert_thread_to_port(thread_list
[i
]);
912 return (KERN_SUCCESS
);
917 * processor_set_tasks:
919 * List all tasks in the processor set.
923 processor_set_t pset
,
924 task_array_t
*task_list
,
925 mach_msg_type_number_t
*count
)
927 return(processor_set_things(pset
, (mach_port_t
**)task_list
, count
, THING_TASK
));
931 * processor_set_threads:
933 * List all threads in the processor set.
935 #if defined(SECURE_KERNEL)
937 processor_set_threads(
938 __unused processor_set_t pset
,
939 __unused thread_array_t
*thread_list
,
940 __unused mach_msg_type_number_t
*count
)
944 #elif defined(CONFIG_EMBEDDED)
946 processor_set_threads(
947 __unused processor_set_t pset
,
948 __unused thread_array_t
*thread_list
,
949 __unused mach_msg_type_number_t
*count
)
951 return KERN_NOT_SUPPORTED
;
955 processor_set_threads(
956 processor_set_t pset
,
957 thread_array_t
*thread_list
,
958 mach_msg_type_number_t
*count
)
960 return(processor_set_things(pset
, (mach_port_t
**)thread_list
, count
, THING_THREAD
));
965 * processor_set_policy_control
967 * Controls the scheduling attributes governing the processor set.
968 * Allows control of enabled policies, and per-policy base and limit
972 processor_set_policy_control(
973 __unused processor_set_t pset
,
975 __unused processor_set_info_t policy_info
,
976 __unused mach_msg_type_number_t count
,
977 __unused boolean_t change
)
979 return (KERN_INVALID_ARGUMENT
);
982 #undef pset_deallocate
983 void pset_deallocate(processor_set_t pset
);
986 __unused processor_set_t pset
)
991 #undef pset_reference
992 void pset_reference(processor_set_t pset
);
995 __unused processor_set_t pset
)