2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * processor.c: processor and processor_set manipulation routines.
63 #include <mach/boolean.h>
64 #include <mach/policy.h>
65 #include <mach/processor.h>
66 #include <mach/processor_info.h>
67 #include <mach/vm_param.h>
68 #include <kern/cpu_number.h>
69 #include <kern/host.h>
70 #include <kern/machine.h>
71 #include <kern/misc_protos.h>
72 #include <kern/processor.h>
73 #include <kern/sched.h>
74 #include <kern/task.h>
75 #include <kern/thread.h>
76 #include <kern/ipc_host.h>
77 #include <kern/ipc_tt.h>
78 #include <ipc/ipc_port.h>
79 #include <kern/kalloc.h>
84 #include <mach/mach_host_server.h>
85 #include <mach/processor_set_server.h>
87 struct processor_set pset0
;
88 struct pset_node pset_node0
;
89 decl_simple_lock_data(static,pset_node_lock
)
92 queue_head_t terminated_tasks
; /* To be used ONLY for stackshot. */
94 int terminated_tasks_count
;
97 decl_lck_mtx_data(,tasks_threads_lock
)
99 processor_t processor_list
;
100 unsigned int processor_count
;
101 static processor_t processor_list_tail
;
102 decl_simple_lock_data(,processor_list_lock
)
104 uint32_t processor_avail_count
;
106 processor_t master_processor
;
108 boolean_t sched_stats_active
= FALSE
;
111 kern_return_t
processor_set_things(
112 processor_set_t pset
,
113 mach_port_t
**thing_list
,
114 mach_msg_type_number_t
*count
,
118 processor_bootstrap(void)
120 pset_init(&pset0
, &pset_node0
);
121 pset_node0
.psets
= &pset0
;
123 simple_lock_init(&pset_node_lock
, 0);
126 queue_init(&terminated_tasks
);
127 queue_init(&threads
);
129 simple_lock_init(&processor_list_lock
, 0);
131 master_processor
= cpu_to_processor(master_cpu
);
133 processor_init(master_processor
, master_cpu
, &pset0
);
137 * Initialize the given processor for the cpu
138 * indicated by cpu_id, and assign to the
139 * specified processor set.
143 processor_t processor
,
145 processor_set_t pset
)
149 if (processor
!= master_processor
) {
150 /* Scheduler state deferred until sched_init() */
151 SCHED(processor_init
)(processor
);
154 processor
->state
= PROCESSOR_OFF_LINE
;
155 processor
->active_thread
= processor
->next_thread
= processor
->idle_thread
= THREAD_NULL
;
156 processor
->processor_set
= pset
;
157 processor
->current_pri
= MINPRI
;
158 processor
->current_thmode
= TH_MODE_NONE
;
159 processor
->cpu_id
= cpu_id
;
160 timer_call_setup(&processor
->quantum_timer
, thread_quantum_expire
, processor
);
161 processor
->quantum_end
= UINT64_MAX
;
162 processor
->deadline
= UINT64_MAX
;
163 processor
->timeslice
= 0;
164 processor
->processor_primary
= processor
; /* no SMT relationship known at this point */
165 processor
->processor_secondary
= NULL
;
166 processor
->is_SMT
= FALSE
;
167 processor
->processor_self
= IP_NULL
;
168 processor_data_init(processor
);
169 processor
->processor_list
= NULL
;
173 if (pset
->cpu_set_count
++ == 0)
174 pset
->cpu_set_low
= pset
->cpu_set_hi
= cpu_id
;
176 pset
->cpu_set_low
= (cpu_id
< pset
->cpu_set_low
)? cpu_id
: pset
->cpu_set_low
;
177 pset
->cpu_set_hi
= (cpu_id
> pset
->cpu_set_hi
)? cpu_id
: pset
->cpu_set_hi
;
182 simple_lock(&processor_list_lock
);
183 if (processor_list
== NULL
)
184 processor_list
= processor
;
186 processor_list_tail
->processor_list
= processor
;
187 processor_list_tail
= processor
;
189 simple_unlock(&processor_list_lock
);
193 processor_set_primary(
194 processor_t processor
,
197 assert(processor
->processor_primary
== primary
|| processor
->processor_primary
== processor
);
198 /* Re-adjust primary point for this (possibly) secondary processor */
199 processor
->processor_primary
= primary
;
201 assert(primary
->processor_secondary
== NULL
|| primary
->processor_secondary
== processor
);
202 if (primary
!= processor
) {
203 /* Link primary to secondary, assumes a 2-way SMT model
204 * We'll need to move to a queue if any future architecture
205 * requires otherwise.
207 assert(processor
->processor_secondary
== NULL
);
208 primary
->processor_secondary
= processor
;
209 /* Mark both processors as SMT siblings */
210 primary
->is_SMT
= TRUE
;
211 processor
->is_SMT
= TRUE
;
217 processor_t processor
)
219 return (processor
->processor_set
);
232 #if defined(CONFIG_SCHED_MULTIQ)
233 /* multiq scheduler is not currently compatible with multiple psets */
234 if (sched_groups_enabled
)
235 return processor_pset(master_processor
);
236 #endif /* defined(CONFIG_SCHED_MULTIQ) */
238 processor_set_t
*prev
, pset
= kalloc(sizeof (*pset
));
240 if (pset
!= PROCESSOR_SET_NULL
) {
241 pset_init(pset
, node
);
243 simple_lock(&pset_node_lock
);
246 while (*prev
!= PROCESSOR_SET_NULL
)
247 prev
= &(*prev
)->pset_list
;
251 simple_unlock(&pset_node_lock
);
258 * Initialize the given processor_set structure.
262 processor_set_t pset
,
265 if (pset
!= &pset0
) {
266 /* Scheduler state deferred until sched_init() */
267 SCHED(pset_init
)(pset
);
270 queue_init(&pset
->active_queue
);
271 queue_init(&pset
->idle_queue
);
272 queue_init(&pset
->idle_secondary_queue
);
273 pset
->online_processor_count
= 0;
274 pset
->cpu_set_low
= pset
->cpu_set_hi
= 0;
275 pset
->cpu_set_count
= 0;
276 pset
->pending_AST_cpu_mask
= 0;
277 pset_lock_init(pset
);
278 pset
->pset_self
= IP_NULL
;
279 pset
->pset_name_self
= IP_NULL
;
280 pset
->pset_list
= PROCESSOR_SET_NULL
;
285 processor_info_count(
286 processor_flavor_t flavor
,
287 mach_msg_type_number_t
*count
)
291 case PROCESSOR_BASIC_INFO
:
292 *count
= PROCESSOR_BASIC_INFO_COUNT
;
295 case PROCESSOR_CPU_LOAD_INFO
:
296 *count
= PROCESSOR_CPU_LOAD_INFO_COUNT
;
300 return (cpu_info_count(flavor
, count
));
303 return (KERN_SUCCESS
);
309 register processor_t processor
,
310 processor_flavor_t flavor
,
312 processor_info_t info
,
313 mach_msg_type_number_t
*count
)
315 register int cpu_id
, state
;
316 kern_return_t result
;
318 if (processor
== PROCESSOR_NULL
)
319 return (KERN_INVALID_ARGUMENT
);
321 cpu_id
= processor
->cpu_id
;
325 case PROCESSOR_BASIC_INFO
:
327 register processor_basic_info_t basic_info
;
329 if (*count
< PROCESSOR_BASIC_INFO_COUNT
)
330 return (KERN_FAILURE
);
332 basic_info
= (processor_basic_info_t
) info
;
333 basic_info
->cpu_type
= slot_type(cpu_id
);
334 basic_info
->cpu_subtype
= slot_subtype(cpu_id
);
335 state
= processor
->state
;
336 if (state
== PROCESSOR_OFF_LINE
)
337 basic_info
->running
= FALSE
;
339 basic_info
->running
= TRUE
;
340 basic_info
->slot_num
= cpu_id
;
341 if (processor
== master_processor
)
342 basic_info
->is_master
= TRUE
;
344 basic_info
->is_master
= FALSE
;
346 *count
= PROCESSOR_BASIC_INFO_COUNT
;
349 return (KERN_SUCCESS
);
352 case PROCESSOR_CPU_LOAD_INFO
:
354 processor_cpu_load_info_t cpu_load_info
;
356 uint64_t idle_time_snapshot1
, idle_time_snapshot2
;
357 uint64_t idle_time_tstamp1
, idle_time_tstamp2
;
360 * We capture the accumulated idle time twice over
361 * the course of this function, as well as the timestamps
362 * when each were last updated. Since these are
363 * all done using non-atomic racy mechanisms, the
364 * most we can infer is whether values are stable.
365 * timer_grab() is the only function that can be
366 * used reliably on another processor's per-processor
370 if (*count
< PROCESSOR_CPU_LOAD_INFO_COUNT
)
371 return (KERN_FAILURE
);
373 cpu_load_info
= (processor_cpu_load_info_t
) info
;
374 if (precise_user_kernel_time
) {
375 cpu_load_info
->cpu_ticks
[CPU_STATE_USER
] =
376 (uint32_t)(timer_grab(&PROCESSOR_DATA(processor
, user_state
)) / hz_tick_interval
);
377 cpu_load_info
->cpu_ticks
[CPU_STATE_SYSTEM
] =
378 (uint32_t)(timer_grab(&PROCESSOR_DATA(processor
, system_state
)) / hz_tick_interval
);
380 uint64_t tval
= timer_grab(&PROCESSOR_DATA(processor
, user_state
)) +
381 timer_grab(&PROCESSOR_DATA(processor
, system_state
));
383 cpu_load_info
->cpu_ticks
[CPU_STATE_USER
] = (uint32_t)(tval
/ hz_tick_interval
);
384 cpu_load_info
->cpu_ticks
[CPU_STATE_SYSTEM
] = 0;
387 idle_state
= &PROCESSOR_DATA(processor
, idle_state
);
388 idle_time_snapshot1
= timer_grab(idle_state
);
389 idle_time_tstamp1
= idle_state
->tstamp
;
392 * Idle processors are not continually updating their
393 * per-processor idle timer, so it may be extremely
394 * out of date, resulting in an over-representation
395 * of non-idle time between two measurement
396 * intervals by e.g. top(1). If we are non-idle, or
397 * have evidence that the timer is being updated
398 * concurrently, we consider its value up-to-date.
400 if (PROCESSOR_DATA(processor
, current_state
) != idle_state
) {
401 cpu_load_info
->cpu_ticks
[CPU_STATE_IDLE
] =
402 (uint32_t)(idle_time_snapshot1
/ hz_tick_interval
);
403 } else if ((idle_time_snapshot1
!= (idle_time_snapshot2
= timer_grab(idle_state
))) ||
404 (idle_time_tstamp1
!= (idle_time_tstamp2
= idle_state
->tstamp
))){
405 /* Idle timer is being updated concurrently, second stamp is good enough */
406 cpu_load_info
->cpu_ticks
[CPU_STATE_IDLE
] =
407 (uint32_t)(idle_time_snapshot2
/ hz_tick_interval
);
410 * Idle timer may be very stale. Fortunately we have established
411 * that idle_time_snapshot1 and idle_time_tstamp1 are unchanging
413 idle_time_snapshot1
+= mach_absolute_time() - idle_time_tstamp1
;
415 cpu_load_info
->cpu_ticks
[CPU_STATE_IDLE
] =
416 (uint32_t)(idle_time_snapshot1
/ hz_tick_interval
);
419 cpu_load_info
->cpu_ticks
[CPU_STATE_NICE
] = 0;
421 *count
= PROCESSOR_CPU_LOAD_INFO_COUNT
;
424 return (KERN_SUCCESS
);
428 result
= cpu_info(flavor
, cpu_id
, info
, count
);
429 if (result
== KERN_SUCCESS
)
438 processor_t processor
)
440 processor_set_t pset
;
442 kern_return_t result
;
445 if (processor
== PROCESSOR_NULL
|| processor
->processor_set
== PROCESSOR_SET_NULL
)
446 return (KERN_INVALID_ARGUMENT
);
448 if (processor
== master_processor
) {
451 prev
= thread_bind(processor
);
452 thread_block(THREAD_CONTINUE_NULL
);
454 result
= cpu_start(processor
->cpu_id
);
462 pset
= processor
->processor_set
;
464 if (processor
->state
!= PROCESSOR_OFF_LINE
) {
468 return (KERN_FAILURE
);
471 processor
->state
= PROCESSOR_START
;
476 * Create the idle processor thread.
478 if (processor
->idle_thread
== THREAD_NULL
) {
479 result
= idle_thread_create(processor
);
480 if (result
!= KERN_SUCCESS
) {
483 processor
->state
= PROCESSOR_OFF_LINE
;
492 * If there is no active thread, the processor
493 * has never been started. Create a dedicated
496 if ( processor
->active_thread
== THREAD_NULL
&&
497 processor
->next_thread
== THREAD_NULL
) {
498 result
= kernel_thread_create((thread_continue_t
)processor_start_thread
, NULL
, MAXPRI_KERNEL
, &thread
);
499 if (result
!= KERN_SUCCESS
) {
502 processor
->state
= PROCESSOR_OFF_LINE
;
511 thread
->bound_processor
= processor
;
512 processor
->next_thread
= thread
;
513 thread
->state
= TH_RUN
;
514 thread_unlock(thread
);
517 thread_deallocate(thread
);
520 if (processor
->processor_self
== IP_NULL
)
521 ipc_processor_init(processor
);
523 result
= cpu_start(processor
->cpu_id
);
524 if (result
!= KERN_SUCCESS
) {
527 processor
->state
= PROCESSOR_OFF_LINE
;
534 ipc_processor_enable(processor
);
536 return (KERN_SUCCESS
);
541 processor_t processor
)
543 if (processor
== PROCESSOR_NULL
)
544 return(KERN_INVALID_ARGUMENT
);
546 return(processor_shutdown(processor
));
551 processor_t processor
,
552 processor_info_t info
,
553 mach_msg_type_number_t count
)
555 if (processor
== PROCESSOR_NULL
)
556 return(KERN_INVALID_ARGUMENT
);
558 return(cpu_control(processor
->cpu_id
, info
, count
));
562 processor_set_create(
563 __unused host_t host
,
564 __unused processor_set_t
*new_set
,
565 __unused processor_set_t
*new_name
)
567 return(KERN_FAILURE
);
571 processor_set_destroy(
572 __unused processor_set_t pset
)
574 return(KERN_FAILURE
);
578 processor_get_assignment(
579 processor_t processor
,
580 processor_set_t
*pset
)
584 if (processor
== PROCESSOR_NULL
)
585 return(KERN_INVALID_ARGUMENT
);
587 state
= processor
->state
;
588 if (state
== PROCESSOR_SHUTDOWN
|| state
== PROCESSOR_OFF_LINE
)
589 return(KERN_FAILURE
);
593 return(KERN_SUCCESS
);
598 processor_set_t pset
,
601 processor_set_info_t info
,
602 mach_msg_type_number_t
*count
)
604 if (pset
== PROCESSOR_SET_NULL
)
605 return(KERN_INVALID_ARGUMENT
);
607 if (flavor
== PROCESSOR_SET_BASIC_INFO
) {
608 register processor_set_basic_info_t basic_info
;
610 if (*count
< PROCESSOR_SET_BASIC_INFO_COUNT
)
611 return(KERN_FAILURE
);
613 basic_info
= (processor_set_basic_info_t
) info
;
614 basic_info
->processor_count
= processor_avail_count
;
615 basic_info
->default_policy
= POLICY_TIMESHARE
;
617 *count
= PROCESSOR_SET_BASIC_INFO_COUNT
;
619 return(KERN_SUCCESS
);
621 else if (flavor
== PROCESSOR_SET_TIMESHARE_DEFAULT
) {
622 register policy_timeshare_base_t ts_base
;
624 if (*count
< POLICY_TIMESHARE_BASE_COUNT
)
625 return(KERN_FAILURE
);
627 ts_base
= (policy_timeshare_base_t
) info
;
628 ts_base
->base_priority
= BASEPRI_DEFAULT
;
630 *count
= POLICY_TIMESHARE_BASE_COUNT
;
632 return(KERN_SUCCESS
);
634 else if (flavor
== PROCESSOR_SET_FIFO_DEFAULT
) {
635 register policy_fifo_base_t fifo_base
;
637 if (*count
< POLICY_FIFO_BASE_COUNT
)
638 return(KERN_FAILURE
);
640 fifo_base
= (policy_fifo_base_t
) info
;
641 fifo_base
->base_priority
= BASEPRI_DEFAULT
;
643 *count
= POLICY_FIFO_BASE_COUNT
;
645 return(KERN_SUCCESS
);
647 else if (flavor
== PROCESSOR_SET_RR_DEFAULT
) {
648 register policy_rr_base_t rr_base
;
650 if (*count
< POLICY_RR_BASE_COUNT
)
651 return(KERN_FAILURE
);
653 rr_base
= (policy_rr_base_t
) info
;
654 rr_base
->base_priority
= BASEPRI_DEFAULT
;
655 rr_base
->quantum
= 1;
657 *count
= POLICY_RR_BASE_COUNT
;
659 return(KERN_SUCCESS
);
661 else if (flavor
== PROCESSOR_SET_TIMESHARE_LIMITS
) {
662 register policy_timeshare_limit_t ts_limit
;
664 if (*count
< POLICY_TIMESHARE_LIMIT_COUNT
)
665 return(KERN_FAILURE
);
667 ts_limit
= (policy_timeshare_limit_t
) info
;
668 ts_limit
->max_priority
= MAXPRI_KERNEL
;
670 *count
= POLICY_TIMESHARE_LIMIT_COUNT
;
672 return(KERN_SUCCESS
);
674 else if (flavor
== PROCESSOR_SET_FIFO_LIMITS
) {
675 register policy_fifo_limit_t fifo_limit
;
677 if (*count
< POLICY_FIFO_LIMIT_COUNT
)
678 return(KERN_FAILURE
);
680 fifo_limit
= (policy_fifo_limit_t
) info
;
681 fifo_limit
->max_priority
= MAXPRI_KERNEL
;
683 *count
= POLICY_FIFO_LIMIT_COUNT
;
685 return(KERN_SUCCESS
);
687 else if (flavor
== PROCESSOR_SET_RR_LIMITS
) {
688 register policy_rr_limit_t rr_limit
;
690 if (*count
< POLICY_RR_LIMIT_COUNT
)
691 return(KERN_FAILURE
);
693 rr_limit
= (policy_rr_limit_t
) info
;
694 rr_limit
->max_priority
= MAXPRI_KERNEL
;
696 *count
= POLICY_RR_LIMIT_COUNT
;
698 return(KERN_SUCCESS
);
700 else if (flavor
== PROCESSOR_SET_ENABLED_POLICIES
) {
701 register int *enabled
;
703 if (*count
< (sizeof(*enabled
)/sizeof(int)))
704 return(KERN_FAILURE
);
706 enabled
= (int *) info
;
707 *enabled
= POLICY_TIMESHARE
| POLICY_RR
| POLICY_FIFO
;
709 *count
= sizeof(*enabled
)/sizeof(int);
711 return(KERN_SUCCESS
);
716 return(KERN_INVALID_ARGUMENT
);
720 * processor_set_statistics
722 * Returns scheduling statistics for a processor set.
725 processor_set_statistics(
726 processor_set_t pset
,
728 processor_set_info_t info
,
729 mach_msg_type_number_t
*count
)
731 if (pset
== PROCESSOR_SET_NULL
|| pset
!= &pset0
)
732 return (KERN_INVALID_PROCESSOR_SET
);
734 if (flavor
== PROCESSOR_SET_LOAD_INFO
) {
735 register processor_set_load_info_t load_info
;
737 if (*count
< PROCESSOR_SET_LOAD_INFO_COUNT
)
738 return(KERN_FAILURE
);
740 load_info
= (processor_set_load_info_t
) info
;
742 load_info
->mach_factor
= sched_mach_factor
;
743 load_info
->load_average
= sched_load_average
;
745 load_info
->task_count
= tasks_count
;
746 load_info
->thread_count
= threads_count
;
748 *count
= PROCESSOR_SET_LOAD_INFO_COUNT
;
749 return(KERN_SUCCESS
);
752 return(KERN_INVALID_ARGUMENT
);
756 * processor_set_max_priority:
758 * Specify max priority permitted on processor set. This affects
759 * newly created and assigned threads. Optionally change existing
763 processor_set_max_priority(
764 __unused processor_set_t pset
,
765 __unused
int max_priority
,
766 __unused boolean_t change_threads
)
768 return (KERN_INVALID_ARGUMENT
);
772 * processor_set_policy_enable:
774 * Allow indicated policy on processor set.
778 processor_set_policy_enable(
779 __unused processor_set_t pset
,
782 return (KERN_INVALID_ARGUMENT
);
786 * processor_set_policy_disable:
788 * Forbid indicated policy on processor set. Time sharing cannot
792 processor_set_policy_disable(
793 __unused processor_set_t pset
,
795 __unused boolean_t change_threads
)
797 return (KERN_INVALID_ARGUMENT
);
801 #define THING_THREAD 1
804 * processor_set_things:
806 * Common internals for processor_set_{threads,tasks}
809 processor_set_things(
810 processor_set_t pset
,
811 mach_port_t
**thing_list
,
812 mach_msg_type_number_t
*count
,
815 unsigned int actual
; /* this many things */
816 unsigned int maxthings
;
819 vm_size_t size
, size_needed
;
822 if (pset
== PROCESSOR_SET_NULL
|| pset
!= &pset0
)
823 return (KERN_INVALID_ARGUMENT
);
829 lck_mtx_lock(&tasks_threads_lock
);
831 if (type
== THING_TASK
)
832 maxthings
= tasks_count
;
834 maxthings
= threads_count
;
836 /* do we have the memory we need? */
838 size_needed
= maxthings
* sizeof (mach_port_t
);
839 if (size_needed
<= size
)
842 /* unlock and allocate more memory */
843 lck_mtx_unlock(&tasks_threads_lock
);
848 assert(size_needed
> 0);
853 return (KERN_RESOURCE_SHORTAGE
);
856 /* OK, have memory and the list locked */
862 task_t task
, *task_list
= (task_t
*)addr
;
864 for (task
= (task_t
)queue_first(&tasks
);
865 !queue_end(&tasks
, (queue_entry_t
)task
);
866 task
= (task_t
)queue_next(&task
->tasks
)) {
867 #if defined(SECURE_KERNEL)
868 if (task
!= kernel_task
) {
870 task_reference_internal(task
);
871 task_list
[actual
++] = task
;
872 #if defined(SECURE_KERNEL)
881 thread_t thread
, *thread_list
= (thread_t
*)addr
;
883 for (thread
= (thread_t
)queue_first(&threads
);
884 !queue_end(&threads
, (queue_entry_t
)thread
);
885 thread
= (thread_t
)queue_next(&thread
->threads
)) {
886 thread_reference_internal(thread
);
887 thread_list
[actual
++] = thread
;
895 lck_mtx_unlock(&tasks_threads_lock
);
897 if (actual
< maxthings
)
898 size_needed
= actual
* sizeof (mach_port_t
);
901 /* no things, so return null pointer and deallocate memory */
909 /* if we allocated too much, must copy */
911 if (size_needed
< size
) {
914 newaddr
= kalloc(size_needed
);
919 task_t
*task_list
= (task_t
*)addr
;
921 for (i
= 0; i
< actual
; i
++)
922 task_deallocate(task_list
[i
]);
927 thread_t
*thread_list
= (thread_t
*)addr
;
929 for (i
= 0; i
< actual
; i
++)
930 thread_deallocate(thread_list
[i
]);
937 return (KERN_RESOURCE_SHORTAGE
);
940 bcopy((void *) addr
, (void *) newaddr
, size_needed
);
945 *thing_list
= (mach_port_t
*)addr
;
948 /* do the conversion that Mig should handle */
953 task_t
*task_list
= (task_t
*)addr
;
955 for (i
= 0; i
< actual
; i
++)
956 (*thing_list
)[i
] = convert_task_to_port(task_list
[i
]);
961 thread_t
*thread_list
= (thread_t
*)addr
;
963 for (i
= 0; i
< actual
; i
++)
964 (*thing_list
)[i
] = convert_thread_to_port(thread_list
[i
]);
971 return (KERN_SUCCESS
);
976 * processor_set_tasks:
978 * List all tasks in the processor set.
982 processor_set_t pset
,
983 task_array_t
*task_list
,
984 mach_msg_type_number_t
*count
)
986 return(processor_set_things(pset
, (mach_port_t
**)task_list
, count
, THING_TASK
));
990 * processor_set_threads:
992 * List all threads in the processor set.
994 #if defined(SECURE_KERNEL)
996 processor_set_threads(
997 __unused processor_set_t pset
,
998 __unused thread_array_t
*thread_list
,
999 __unused mach_msg_type_number_t
*count
)
1001 return KERN_FAILURE
;
1005 processor_set_threads(
1006 processor_set_t pset
,
1007 thread_array_t
*thread_list
,
1008 mach_msg_type_number_t
*count
)
1010 return(processor_set_things(pset
, (mach_port_t
**)thread_list
, count
, THING_THREAD
));
1015 * processor_set_policy_control
1017 * Controls the scheduling attributes governing the processor set.
1018 * Allows control of enabled policies, and per-policy base and limit
1022 processor_set_policy_control(
1023 __unused processor_set_t pset
,
1024 __unused
int flavor
,
1025 __unused processor_set_info_t policy_info
,
1026 __unused mach_msg_type_number_t count
,
1027 __unused boolean_t change
)
1029 return (KERN_INVALID_ARGUMENT
);
1032 #undef pset_deallocate
1033 void pset_deallocate(processor_set_t pset
);
1036 __unused processor_set_t pset
)
1041 #undef pset_reference
1042 void pset_reference(processor_set_t pset
);
1045 __unused processor_set_t pset
)