2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * processor.c: processor and processor_set manipulation routines.
63 #include <mach/boolean.h>
64 #include <mach/policy.h>
65 #include <mach/processor.h>
66 #include <mach/processor_info.h>
67 #include <mach/vm_param.h>
68 #include <kern/cpu_number.h>
69 #include <kern/host.h>
70 #include <kern/machine.h>
71 #include <kern/misc_protos.h>
72 #include <kern/processor.h>
73 #include <kern/sched.h>
74 #include <kern/task.h>
75 #include <kern/thread.h>
76 #include <kern/ipc_host.h>
77 #include <kern/ipc_tt.h>
78 #include <ipc/ipc_port.h>
79 #include <kern/kalloc.h>
84 #include <mach/mach_host_server.h>
85 #include <mach/processor_set_server.h>
87 struct processor_set pset0
;
88 struct pset_node pset_node0
;
89 decl_simple_lock_data(static,pset_node_lock
)
92 queue_head_t terminated_tasks
; /* To be used ONLY for stackshot. */
94 int terminated_tasks_count
;
97 decl_lck_mtx_data(,tasks_threads_lock
)
99 processor_t processor_list
;
100 unsigned int processor_count
;
101 static processor_t processor_list_tail
;
102 decl_simple_lock_data(,processor_list_lock
)
104 uint32_t processor_avail_count
;
106 processor_t master_processor
;
108 boolean_t sched_stats_active
= FALSE
;
111 kern_return_t
processor_set_things(
112 processor_set_t pset
,
113 mach_port_t
**thing_list
,
114 mach_msg_type_number_t
*count
,
118 processor_bootstrap(void)
120 pset_init(&pset0
, &pset_node0
);
121 pset_node0
.psets
= &pset0
;
123 simple_lock_init(&pset_node_lock
, 0);
126 queue_init(&terminated_tasks
);
127 queue_init(&threads
);
129 simple_lock_init(&processor_list_lock
, 0);
131 master_processor
= cpu_to_processor(master_cpu
);
133 processor_init(master_processor
, master_cpu
, &pset0
);
137 * Initialize the given processor for the cpu
138 * indicated by cpu_id, and assign to the
139 * specified processor set.
143 processor_t processor
,
145 processor_set_t pset
)
149 if (processor
!= master_processor
) {
150 /* Scheduler state deferred until sched_init() */
151 SCHED(processor_init
)(processor
);
154 processor
->state
= PROCESSOR_OFF_LINE
;
155 processor
->active_thread
= processor
->next_thread
= processor
->idle_thread
= THREAD_NULL
;
156 processor
->processor_set
= pset
;
157 processor
->current_pri
= MINPRI
;
158 processor
->current_thmode
= TH_MODE_NONE
;
159 processor
->cpu_id
= cpu_id
;
160 timer_call_setup(&processor
->quantum_timer
, thread_quantum_expire
, processor
);
161 processor
->deadline
= UINT64_MAX
;
162 processor
->timeslice
= 0;
163 processor
->processor_meta
= PROCESSOR_META_NULL
;
164 processor
->processor_self
= IP_NULL
;
165 processor_data_init(processor
);
166 processor
->processor_list
= NULL
;
170 if (pset
->cpu_set_count
++ == 0)
171 pset
->cpu_set_low
= pset
->cpu_set_hi
= cpu_id
;
173 pset
->cpu_set_low
= (cpu_id
< pset
->cpu_set_low
)? cpu_id
: pset
->cpu_set_low
;
174 pset
->cpu_set_hi
= (cpu_id
> pset
->cpu_set_hi
)? cpu_id
: pset
->cpu_set_hi
;
179 simple_lock(&processor_list_lock
);
180 if (processor_list
== NULL
)
181 processor_list
= processor
;
183 processor_list_tail
->processor_list
= processor
;
184 processor_list_tail
= processor
;
186 simple_unlock(&processor_list_lock
);
191 processor_t processor
,
194 processor_meta_t pmeta
= primary
->processor_meta
;
196 if (pmeta
== PROCESSOR_META_NULL
) {
197 pmeta
= kalloc(sizeof (*pmeta
));
199 queue_init(&pmeta
->idle_queue
);
201 pmeta
->primary
= primary
;
204 processor
->processor_meta
= pmeta
;
209 processor_t processor
)
211 return (processor
->processor_set
);
224 processor_set_t
*prev
, pset
= kalloc(sizeof (*pset
));
226 if (pset
!= PROCESSOR_SET_NULL
) {
227 pset_init(pset
, node
);
229 simple_lock(&pset_node_lock
);
232 while (*prev
!= PROCESSOR_SET_NULL
)
233 prev
= &(*prev
)->pset_list
;
237 simple_unlock(&pset_node_lock
);
244 * Initialize the given processor_set structure.
248 processor_set_t pset
,
251 if (pset
!= &pset0
) {
252 /* Scheduler state deferred until sched_init() */
253 SCHED(pset_init
)(pset
);
256 queue_init(&pset
->active_queue
);
257 queue_init(&pset
->idle_queue
);
258 pset
->online_processor_count
= 0;
259 pset_pri_init_hint(pset
, PROCESSOR_NULL
);
260 pset_count_init_hint(pset
, PROCESSOR_NULL
);
261 pset
->cpu_set_low
= pset
->cpu_set_hi
= 0;
262 pset
->cpu_set_count
= 0;
263 pset
->pending_AST_cpu_mask
= 0;
264 pset_lock_init(pset
);
265 pset
->pset_self
= IP_NULL
;
266 pset
->pset_name_self
= IP_NULL
;
267 pset
->pset_list
= PROCESSOR_SET_NULL
;
272 processor_info_count(
273 processor_flavor_t flavor
,
274 mach_msg_type_number_t
*count
)
278 case PROCESSOR_BASIC_INFO
:
279 *count
= PROCESSOR_BASIC_INFO_COUNT
;
282 case PROCESSOR_CPU_LOAD_INFO
:
283 *count
= PROCESSOR_CPU_LOAD_INFO_COUNT
;
287 return (cpu_info_count(flavor
, count
));
290 return (KERN_SUCCESS
);
296 register processor_t processor
,
297 processor_flavor_t flavor
,
299 processor_info_t info
,
300 mach_msg_type_number_t
*count
)
302 register int cpu_id
, state
;
303 kern_return_t result
;
305 if (processor
== PROCESSOR_NULL
)
306 return (KERN_INVALID_ARGUMENT
);
308 cpu_id
= processor
->cpu_id
;
312 case PROCESSOR_BASIC_INFO
:
314 register processor_basic_info_t basic_info
;
316 if (*count
< PROCESSOR_BASIC_INFO_COUNT
)
317 return (KERN_FAILURE
);
319 basic_info
= (processor_basic_info_t
) info
;
320 basic_info
->cpu_type
= slot_type(cpu_id
);
321 basic_info
->cpu_subtype
= slot_subtype(cpu_id
);
322 state
= processor
->state
;
323 if (state
== PROCESSOR_OFF_LINE
)
324 basic_info
->running
= FALSE
;
326 basic_info
->running
= TRUE
;
327 basic_info
->slot_num
= cpu_id
;
328 if (processor
== master_processor
)
329 basic_info
->is_master
= TRUE
;
331 basic_info
->is_master
= FALSE
;
333 *count
= PROCESSOR_BASIC_INFO_COUNT
;
336 return (KERN_SUCCESS
);
339 case PROCESSOR_CPU_LOAD_INFO
:
341 processor_cpu_load_info_t cpu_load_info
;
343 uint64_t idle_time_snapshot1
, idle_time_snapshot2
;
344 uint64_t idle_time_tstamp1
, idle_time_tstamp2
;
347 * We capture the accumulated idle time twice over
348 * the course of this function, as well as the timestamps
349 * when each were last updated. Since these are
350 * all done using non-atomic racy mechanisms, the
351 * most we can infer is whether values are stable.
352 * timer_grab() is the only function that can be
353 * used reliably on another processor's per-processor
357 if (*count
< PROCESSOR_CPU_LOAD_INFO_COUNT
)
358 return (KERN_FAILURE
);
360 cpu_load_info
= (processor_cpu_load_info_t
) info
;
361 if (precise_user_kernel_time
) {
362 cpu_load_info
->cpu_ticks
[CPU_STATE_USER
] =
363 (uint32_t)(timer_grab(&PROCESSOR_DATA(processor
, user_state
)) / hz_tick_interval
);
364 cpu_load_info
->cpu_ticks
[CPU_STATE_SYSTEM
] =
365 (uint32_t)(timer_grab(&PROCESSOR_DATA(processor
, system_state
)) / hz_tick_interval
);
367 uint64_t tval
= timer_grab(&PROCESSOR_DATA(processor
, user_state
)) +
368 timer_grab(&PROCESSOR_DATA(processor
, system_state
));
370 cpu_load_info
->cpu_ticks
[CPU_STATE_USER
] = (uint32_t)(tval
/ hz_tick_interval
);
371 cpu_load_info
->cpu_ticks
[CPU_STATE_SYSTEM
] = 0;
374 idle_state
= &PROCESSOR_DATA(processor
, idle_state
);
375 idle_time_snapshot1
= timer_grab(idle_state
);
376 idle_time_tstamp1
= idle_state
->tstamp
;
379 * Idle processors are not continually updating their
380 * per-processor idle timer, so it may be extremely
381 * out of date, resulting in an over-representation
382 * of non-idle time between two measurement
383 * intervals by e.g. top(1). If we are non-idle, or
384 * have evidence that the timer is being updated
385 * concurrently, we consider its value up-to-date.
387 if (PROCESSOR_DATA(processor
, current_state
) != idle_state
) {
388 cpu_load_info
->cpu_ticks
[CPU_STATE_IDLE
] =
389 (uint32_t)(idle_time_snapshot1
/ hz_tick_interval
);
390 } else if ((idle_time_snapshot1
!= (idle_time_snapshot2
= timer_grab(idle_state
))) ||
391 (idle_time_tstamp1
!= (idle_time_tstamp2
= idle_state
->tstamp
))){
392 /* Idle timer is being updated concurrently, second stamp is good enough */
393 cpu_load_info
->cpu_ticks
[CPU_STATE_IDLE
] =
394 (uint32_t)(idle_time_snapshot2
/ hz_tick_interval
);
397 * Idle timer may be very stale. Fortunately we have established
398 * that idle_time_snapshot1 and idle_time_tstamp1 are unchanging
400 idle_time_snapshot1
+= mach_absolute_time() - idle_time_tstamp1
;
402 cpu_load_info
->cpu_ticks
[CPU_STATE_IDLE
] =
403 (uint32_t)(idle_time_snapshot1
/ hz_tick_interval
);
406 cpu_load_info
->cpu_ticks
[CPU_STATE_NICE
] = 0;
408 *count
= PROCESSOR_CPU_LOAD_INFO_COUNT
;
411 return (KERN_SUCCESS
);
415 result
= cpu_info(flavor
, cpu_id
, info
, count
);
416 if (result
== KERN_SUCCESS
)
425 processor_t processor
)
427 processor_set_t pset
;
429 kern_return_t result
;
432 if (processor
== PROCESSOR_NULL
|| processor
->processor_set
== PROCESSOR_SET_NULL
)
433 return (KERN_INVALID_ARGUMENT
);
435 if (processor
== master_processor
) {
438 prev
= thread_bind(processor
);
439 thread_block(THREAD_CONTINUE_NULL
);
441 result
= cpu_start(processor
->cpu_id
);
449 pset
= processor
->processor_set
;
451 if (processor
->state
!= PROCESSOR_OFF_LINE
) {
455 return (KERN_FAILURE
);
458 processor
->state
= PROCESSOR_START
;
463 * Create the idle processor thread.
465 if (processor
->idle_thread
== THREAD_NULL
) {
466 result
= idle_thread_create(processor
);
467 if (result
!= KERN_SUCCESS
) {
470 processor
->state
= PROCESSOR_OFF_LINE
;
479 * If there is no active thread, the processor
480 * has never been started. Create a dedicated
483 if ( processor
->active_thread
== THREAD_NULL
&&
484 processor
->next_thread
== THREAD_NULL
) {
485 result
= kernel_thread_create((thread_continue_t
)processor_start_thread
, NULL
, MAXPRI_KERNEL
, &thread
);
486 if (result
!= KERN_SUCCESS
) {
489 processor
->state
= PROCESSOR_OFF_LINE
;
498 thread
->bound_processor
= processor
;
499 processor
->next_thread
= thread
;
500 thread
->state
= TH_RUN
;
501 thread_unlock(thread
);
504 thread_deallocate(thread
);
507 if (processor
->processor_self
== IP_NULL
)
508 ipc_processor_init(processor
);
510 result
= cpu_start(processor
->cpu_id
);
511 if (result
!= KERN_SUCCESS
) {
514 processor
->state
= PROCESSOR_OFF_LINE
;
521 ipc_processor_enable(processor
);
523 return (KERN_SUCCESS
);
528 processor_t processor
)
530 if (processor
== PROCESSOR_NULL
)
531 return(KERN_INVALID_ARGUMENT
);
533 return(processor_shutdown(processor
));
538 processor_t processor
,
539 processor_info_t info
,
540 mach_msg_type_number_t count
)
542 if (processor
== PROCESSOR_NULL
)
543 return(KERN_INVALID_ARGUMENT
);
545 return(cpu_control(processor
->cpu_id
, info
, count
));
549 processor_set_create(
550 __unused host_t host
,
551 __unused processor_set_t
*new_set
,
552 __unused processor_set_t
*new_name
)
554 return(KERN_FAILURE
);
558 processor_set_destroy(
559 __unused processor_set_t pset
)
561 return(KERN_FAILURE
);
565 processor_get_assignment(
566 processor_t processor
,
567 processor_set_t
*pset
)
571 if (processor
== PROCESSOR_NULL
)
572 return(KERN_INVALID_ARGUMENT
);
574 state
= processor
->state
;
575 if (state
== PROCESSOR_SHUTDOWN
|| state
== PROCESSOR_OFF_LINE
)
576 return(KERN_FAILURE
);
580 return(KERN_SUCCESS
);
585 processor_set_t pset
,
588 processor_set_info_t info
,
589 mach_msg_type_number_t
*count
)
591 if (pset
== PROCESSOR_SET_NULL
)
592 return(KERN_INVALID_ARGUMENT
);
594 if (flavor
== PROCESSOR_SET_BASIC_INFO
) {
595 register processor_set_basic_info_t basic_info
;
597 if (*count
< PROCESSOR_SET_BASIC_INFO_COUNT
)
598 return(KERN_FAILURE
);
600 basic_info
= (processor_set_basic_info_t
) info
;
601 basic_info
->processor_count
= processor_avail_count
;
602 basic_info
->default_policy
= POLICY_TIMESHARE
;
604 *count
= PROCESSOR_SET_BASIC_INFO_COUNT
;
606 return(KERN_SUCCESS
);
608 else if (flavor
== PROCESSOR_SET_TIMESHARE_DEFAULT
) {
609 register policy_timeshare_base_t ts_base
;
611 if (*count
< POLICY_TIMESHARE_BASE_COUNT
)
612 return(KERN_FAILURE
);
614 ts_base
= (policy_timeshare_base_t
) info
;
615 ts_base
->base_priority
= BASEPRI_DEFAULT
;
617 *count
= POLICY_TIMESHARE_BASE_COUNT
;
619 return(KERN_SUCCESS
);
621 else if (flavor
== PROCESSOR_SET_FIFO_DEFAULT
) {
622 register policy_fifo_base_t fifo_base
;
624 if (*count
< POLICY_FIFO_BASE_COUNT
)
625 return(KERN_FAILURE
);
627 fifo_base
= (policy_fifo_base_t
) info
;
628 fifo_base
->base_priority
= BASEPRI_DEFAULT
;
630 *count
= POLICY_FIFO_BASE_COUNT
;
632 return(KERN_SUCCESS
);
634 else if (flavor
== PROCESSOR_SET_RR_DEFAULT
) {
635 register policy_rr_base_t rr_base
;
637 if (*count
< POLICY_RR_BASE_COUNT
)
638 return(KERN_FAILURE
);
640 rr_base
= (policy_rr_base_t
) info
;
641 rr_base
->base_priority
= BASEPRI_DEFAULT
;
642 rr_base
->quantum
= 1;
644 *count
= POLICY_RR_BASE_COUNT
;
646 return(KERN_SUCCESS
);
648 else if (flavor
== PROCESSOR_SET_TIMESHARE_LIMITS
) {
649 register policy_timeshare_limit_t ts_limit
;
651 if (*count
< POLICY_TIMESHARE_LIMIT_COUNT
)
652 return(KERN_FAILURE
);
654 ts_limit
= (policy_timeshare_limit_t
) info
;
655 ts_limit
->max_priority
= MAXPRI_KERNEL
;
657 *count
= POLICY_TIMESHARE_LIMIT_COUNT
;
659 return(KERN_SUCCESS
);
661 else if (flavor
== PROCESSOR_SET_FIFO_LIMITS
) {
662 register policy_fifo_limit_t fifo_limit
;
664 if (*count
< POLICY_FIFO_LIMIT_COUNT
)
665 return(KERN_FAILURE
);
667 fifo_limit
= (policy_fifo_limit_t
) info
;
668 fifo_limit
->max_priority
= MAXPRI_KERNEL
;
670 *count
= POLICY_FIFO_LIMIT_COUNT
;
672 return(KERN_SUCCESS
);
674 else if (flavor
== PROCESSOR_SET_RR_LIMITS
) {
675 register policy_rr_limit_t rr_limit
;
677 if (*count
< POLICY_RR_LIMIT_COUNT
)
678 return(KERN_FAILURE
);
680 rr_limit
= (policy_rr_limit_t
) info
;
681 rr_limit
->max_priority
= MAXPRI_KERNEL
;
683 *count
= POLICY_RR_LIMIT_COUNT
;
685 return(KERN_SUCCESS
);
687 else if (flavor
== PROCESSOR_SET_ENABLED_POLICIES
) {
688 register int *enabled
;
690 if (*count
< (sizeof(*enabled
)/sizeof(int)))
691 return(KERN_FAILURE
);
693 enabled
= (int *) info
;
694 *enabled
= POLICY_TIMESHARE
| POLICY_RR
| POLICY_FIFO
;
696 *count
= sizeof(*enabled
)/sizeof(int);
698 return(KERN_SUCCESS
);
703 return(KERN_INVALID_ARGUMENT
);
707 * processor_set_statistics
709 * Returns scheduling statistics for a processor set.
712 processor_set_statistics(
713 processor_set_t pset
,
715 processor_set_info_t info
,
716 mach_msg_type_number_t
*count
)
718 if (pset
== PROCESSOR_SET_NULL
|| pset
!= &pset0
)
719 return (KERN_INVALID_PROCESSOR_SET
);
721 if (flavor
== PROCESSOR_SET_LOAD_INFO
) {
722 register processor_set_load_info_t load_info
;
724 if (*count
< PROCESSOR_SET_LOAD_INFO_COUNT
)
725 return(KERN_FAILURE
);
727 load_info
= (processor_set_load_info_t
) info
;
729 load_info
->mach_factor
= sched_mach_factor
;
730 load_info
->load_average
= sched_load_average
;
732 load_info
->task_count
= tasks_count
;
733 load_info
->thread_count
= threads_count
;
735 *count
= PROCESSOR_SET_LOAD_INFO_COUNT
;
736 return(KERN_SUCCESS
);
739 return(KERN_INVALID_ARGUMENT
);
743 * processor_set_max_priority:
745 * Specify max priority permitted on processor set. This affects
746 * newly created and assigned threads. Optionally change existing
750 processor_set_max_priority(
751 __unused processor_set_t pset
,
752 __unused
int max_priority
,
753 __unused boolean_t change_threads
)
755 return (KERN_INVALID_ARGUMENT
);
759 * processor_set_policy_enable:
761 * Allow indicated policy on processor set.
765 processor_set_policy_enable(
766 __unused processor_set_t pset
,
769 return (KERN_INVALID_ARGUMENT
);
773 * processor_set_policy_disable:
775 * Forbid indicated policy on processor set. Time sharing cannot
779 processor_set_policy_disable(
780 __unused processor_set_t pset
,
782 __unused boolean_t change_threads
)
784 return (KERN_INVALID_ARGUMENT
);
788 #define THING_THREAD 1
791 * processor_set_things:
793 * Common internals for processor_set_{threads,tasks}
796 processor_set_things(
797 processor_set_t pset
,
798 mach_port_t
**thing_list
,
799 mach_msg_type_number_t
*count
,
802 unsigned int actual
; /* this many things */
803 unsigned int maxthings
;
806 vm_size_t size
, size_needed
;
809 if (pset
== PROCESSOR_SET_NULL
|| pset
!= &pset0
)
810 return (KERN_INVALID_ARGUMENT
);
816 lck_mtx_lock(&tasks_threads_lock
);
818 if (type
== THING_TASK
)
819 maxthings
= tasks_count
;
821 maxthings
= threads_count
;
823 /* do we have the memory we need? */
825 size_needed
= maxthings
* sizeof (mach_port_t
);
826 if (size_needed
<= size
)
829 /* unlock and allocate more memory */
830 lck_mtx_unlock(&tasks_threads_lock
);
835 assert(size_needed
> 0);
840 return (KERN_RESOURCE_SHORTAGE
);
843 /* OK, have memory and the list locked */
849 task_t task
, *task_list
= (task_t
*)addr
;
851 for (task
= (task_t
)queue_first(&tasks
);
852 !queue_end(&tasks
, (queue_entry_t
)task
);
853 task
= (task_t
)queue_next(&task
->tasks
)) {
854 #if defined(SECURE_KERNEL)
855 if (task
!= kernel_task
) {
857 task_reference_internal(task
);
858 task_list
[actual
++] = task
;
859 #if defined(SECURE_KERNEL)
868 thread_t thread
, *thread_list
= (thread_t
*)addr
;
870 for (thread
= (thread_t
)queue_first(&threads
);
871 !queue_end(&threads
, (queue_entry_t
)thread
);
872 thread
= (thread_t
)queue_next(&thread
->threads
)) {
873 thread_reference_internal(thread
);
874 thread_list
[actual
++] = thread
;
882 lck_mtx_unlock(&tasks_threads_lock
);
884 if (actual
< maxthings
)
885 size_needed
= actual
* sizeof (mach_port_t
);
888 /* no things, so return null pointer and deallocate memory */
896 /* if we allocated too much, must copy */
898 if (size_needed
< size
) {
901 newaddr
= kalloc(size_needed
);
906 task_t
*task_list
= (task_t
*)addr
;
908 for (i
= 0; i
< actual
; i
++)
909 task_deallocate(task_list
[i
]);
914 thread_t
*thread_list
= (thread_t
*)addr
;
916 for (i
= 0; i
< actual
; i
++)
917 thread_deallocate(thread_list
[i
]);
924 return (KERN_RESOURCE_SHORTAGE
);
927 bcopy((void *) addr
, (void *) newaddr
, size_needed
);
932 *thing_list
= (mach_port_t
*)addr
;
935 /* do the conversion that Mig should handle */
940 task_t
*task_list
= (task_t
*)addr
;
942 for (i
= 0; i
< actual
; i
++)
943 (*thing_list
)[i
] = convert_task_to_port(task_list
[i
]);
948 thread_t
*thread_list
= (thread_t
*)addr
;
950 for (i
= 0; i
< actual
; i
++)
951 (*thing_list
)[i
] = convert_thread_to_port(thread_list
[i
]);
958 return (KERN_SUCCESS
);
963 * processor_set_tasks:
965 * List all tasks in the processor set.
969 processor_set_t pset
,
970 task_array_t
*task_list
,
971 mach_msg_type_number_t
*count
)
973 return(processor_set_things(pset
, (mach_port_t
**)task_list
, count
, THING_TASK
));
977 * processor_set_threads:
979 * List all threads in the processor set.
981 #if defined(SECURE_KERNEL)
983 processor_set_threads(
984 __unused processor_set_t pset
,
985 __unused thread_array_t
*thread_list
,
986 __unused mach_msg_type_number_t
*count
)
992 processor_set_threads(
993 processor_set_t pset
,
994 thread_array_t
*thread_list
,
995 mach_msg_type_number_t
*count
)
997 return(processor_set_things(pset
, (mach_port_t
**)thread_list
, count
, THING_THREAD
));
1002 * processor_set_policy_control
1004 * Controls the scheduling attributes governing the processor set.
1005 * Allows control of enabled policies, and per-policy base and limit
1009 processor_set_policy_control(
1010 __unused processor_set_t pset
,
1011 __unused
int flavor
,
1012 __unused processor_set_info_t policy_info
,
1013 __unused mach_msg_type_number_t count
,
1014 __unused boolean_t change
)
1016 return (KERN_INVALID_ARGUMENT
);
1019 #undef pset_deallocate
1020 void pset_deallocate(processor_set_t pset
);
1023 __unused processor_set_t pset
)
1028 #undef pset_reference
1029 void pset_reference(processor_set_t pset
);
1032 __unused processor_set_t pset
)