2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * processor.c: processor and processor_set manipulation routines.
63 #include <mach/boolean.h>
64 #include <mach/policy.h>
65 #include <mach/processor.h>
66 #include <mach/processor_info.h>
67 #include <mach/vm_param.h>
68 #include <kern/cpu_number.h>
69 #include <kern/host.h>
70 #include <kern/machine.h>
71 #include <kern/misc_protos.h>
72 #include <kern/processor.h>
73 #include <kern/sched.h>
74 #include <kern/task.h>
75 #include <kern/thread.h>
76 #include <kern/ipc_host.h>
77 #include <kern/ipc_tt.h>
78 #include <ipc/ipc_port.h>
79 #include <kern/kalloc.h>
81 #include <security/mac_mach_internal.h>
86 #include <mach/mach_host_server.h>
87 #include <mach/processor_set_server.h>
89 struct processor_set pset0
;
90 struct pset_node pset_node0
;
91 decl_simple_lock_data(static,pset_node_lock
)
94 queue_head_t terminated_tasks
; /* To be used ONLY for stackshot. */
96 int terminated_tasks_count
;
99 decl_lck_mtx_data(,tasks_threads_lock
)
101 processor_t processor_list
;
102 unsigned int processor_count
;
103 static processor_t processor_list_tail
;
104 decl_simple_lock_data(,processor_list_lock
)
106 uint32_t processor_avail_count
;
108 processor_t master_processor
;
110 boolean_t sched_stats_active
= FALSE
;
113 processor_bootstrap(void)
115 pset_init(&pset0
, &pset_node0
);
116 pset_node0
.psets
= &pset0
;
118 simple_lock_init(&pset_node_lock
, 0);
121 queue_init(&terminated_tasks
);
122 queue_init(&threads
);
124 simple_lock_init(&processor_list_lock
, 0);
126 master_processor
= cpu_to_processor(master_cpu
);
128 processor_init(master_processor
, master_cpu
, &pset0
);
132 * Initialize the given processor for the cpu
133 * indicated by cpu_id, and assign to the
134 * specified processor set.
138 processor_t processor
,
140 processor_set_t pset
)
144 if (processor
!= master_processor
) {
145 /* Scheduler state deferred until sched_init() */
146 SCHED(processor_init
)(processor
);
149 processor
->state
= PROCESSOR_OFF_LINE
;
150 processor
->active_thread
= processor
->next_thread
= processor
->idle_thread
= THREAD_NULL
;
151 processor
->processor_set
= pset
;
152 processor
->current_pri
= MINPRI
;
153 processor
->current_thmode
= TH_MODE_NONE
;
154 processor
->cpu_id
= cpu_id
;
155 timer_call_setup(&processor
->quantum_timer
, thread_quantum_expire
, processor
);
156 processor
->quantum_end
= UINT64_MAX
;
157 processor
->deadline
= UINT64_MAX
;
158 processor
->first_timeslice
= FALSE
;
159 processor
->processor_primary
= processor
; /* no SMT relationship known at this point */
160 processor
->processor_secondary
= NULL
;
161 processor
->is_SMT
= FALSE
;
162 processor
->is_recommended
= TRUE
;
163 processor
->processor_self
= IP_NULL
;
164 processor_data_init(processor
);
165 processor
->processor_list
= NULL
;
169 if (pset
->cpu_set_count
++ == 0)
170 pset
->cpu_set_low
= pset
->cpu_set_hi
= cpu_id
;
172 pset
->cpu_set_low
= (cpu_id
< pset
->cpu_set_low
)? cpu_id
: pset
->cpu_set_low
;
173 pset
->cpu_set_hi
= (cpu_id
> pset
->cpu_set_hi
)? cpu_id
: pset
->cpu_set_hi
;
178 simple_lock(&processor_list_lock
);
179 if (processor_list
== NULL
)
180 processor_list
= processor
;
182 processor_list_tail
->processor_list
= processor
;
183 processor_list_tail
= processor
;
185 simple_unlock(&processor_list_lock
);
189 processor_set_primary(
190 processor_t processor
,
193 assert(processor
->processor_primary
== primary
|| processor
->processor_primary
== processor
);
194 /* Re-adjust primary point for this (possibly) secondary processor */
195 processor
->processor_primary
= primary
;
197 assert(primary
->processor_secondary
== NULL
|| primary
->processor_secondary
== processor
);
198 if (primary
!= processor
) {
199 /* Link primary to secondary, assumes a 2-way SMT model
200 * We'll need to move to a queue if any future architecture
201 * requires otherwise.
203 assert(processor
->processor_secondary
== NULL
);
204 primary
->processor_secondary
= processor
;
205 /* Mark both processors as SMT siblings */
206 primary
->is_SMT
= TRUE
;
207 processor
->is_SMT
= TRUE
;
213 processor_t processor
)
215 return (processor
->processor_set
);
228 /* some schedulers do not support multiple psets */
229 if (SCHED(multiple_psets_enabled
) == FALSE
)
230 return processor_pset(master_processor
);
232 processor_set_t
*prev
, pset
= kalloc(sizeof (*pset
));
234 if (pset
!= PROCESSOR_SET_NULL
) {
235 pset_init(pset
, node
);
237 simple_lock(&pset_node_lock
);
240 while (*prev
!= PROCESSOR_SET_NULL
)
241 prev
= &(*prev
)->pset_list
;
245 simple_unlock(&pset_node_lock
);
252 * Initialize the given processor_set structure.
256 processor_set_t pset
,
259 if (pset
!= &pset0
) {
260 /* Scheduler state deferred until sched_init() */
261 SCHED(pset_init
)(pset
);
264 queue_init(&pset
->active_queue
);
265 queue_init(&pset
->idle_queue
);
266 queue_init(&pset
->idle_secondary_queue
);
267 pset
->online_processor_count
= 0;
268 pset
->cpu_set_low
= pset
->cpu_set_hi
= 0;
269 pset
->cpu_set_count
= 0;
270 pset
->pending_AST_cpu_mask
= 0;
271 #if defined(CONFIG_SCHED_DEFERRED_AST)
272 pset
->pending_deferred_AST_cpu_mask
= 0;
274 pset_lock_init(pset
);
275 pset
->pset_self
= IP_NULL
;
276 pset
->pset_name_self
= IP_NULL
;
277 pset
->pset_list
= PROCESSOR_SET_NULL
;
282 processor_info_count(
283 processor_flavor_t flavor
,
284 mach_msg_type_number_t
*count
)
288 case PROCESSOR_BASIC_INFO
:
289 *count
= PROCESSOR_BASIC_INFO_COUNT
;
292 case PROCESSOR_CPU_LOAD_INFO
:
293 *count
= PROCESSOR_CPU_LOAD_INFO_COUNT
;
297 return (cpu_info_count(flavor
, count
));
300 return (KERN_SUCCESS
);
306 register processor_t processor
,
307 processor_flavor_t flavor
,
309 processor_info_t info
,
310 mach_msg_type_number_t
*count
)
312 register int cpu_id
, state
;
313 kern_return_t result
;
315 if (processor
== PROCESSOR_NULL
)
316 return (KERN_INVALID_ARGUMENT
);
318 cpu_id
= processor
->cpu_id
;
322 case PROCESSOR_BASIC_INFO
:
324 register processor_basic_info_t basic_info
;
326 if (*count
< PROCESSOR_BASIC_INFO_COUNT
)
327 return (KERN_FAILURE
);
329 basic_info
= (processor_basic_info_t
) info
;
330 basic_info
->cpu_type
= slot_type(cpu_id
);
331 basic_info
->cpu_subtype
= slot_subtype(cpu_id
);
332 state
= processor
->state
;
333 if (state
== PROCESSOR_OFF_LINE
)
334 basic_info
->running
= FALSE
;
336 basic_info
->running
= TRUE
;
337 basic_info
->slot_num
= cpu_id
;
338 if (processor
== master_processor
)
339 basic_info
->is_master
= TRUE
;
341 basic_info
->is_master
= FALSE
;
343 *count
= PROCESSOR_BASIC_INFO_COUNT
;
346 return (KERN_SUCCESS
);
349 case PROCESSOR_CPU_LOAD_INFO
:
351 processor_cpu_load_info_t cpu_load_info
;
353 uint64_t idle_time_snapshot1
, idle_time_snapshot2
;
354 uint64_t idle_time_tstamp1
, idle_time_tstamp2
;
357 * We capture the accumulated idle time twice over
358 * the course of this function, as well as the timestamps
359 * when each were last updated. Since these are
360 * all done using non-atomic racy mechanisms, the
361 * most we can infer is whether values are stable.
362 * timer_grab() is the only function that can be
363 * used reliably on another processor's per-processor
367 if (*count
< PROCESSOR_CPU_LOAD_INFO_COUNT
)
368 return (KERN_FAILURE
);
370 cpu_load_info
= (processor_cpu_load_info_t
) info
;
371 if (precise_user_kernel_time
) {
372 cpu_load_info
->cpu_ticks
[CPU_STATE_USER
] =
373 (uint32_t)(timer_grab(&PROCESSOR_DATA(processor
, user_state
)) / hz_tick_interval
);
374 cpu_load_info
->cpu_ticks
[CPU_STATE_SYSTEM
] =
375 (uint32_t)(timer_grab(&PROCESSOR_DATA(processor
, system_state
)) / hz_tick_interval
);
377 uint64_t tval
= timer_grab(&PROCESSOR_DATA(processor
, user_state
)) +
378 timer_grab(&PROCESSOR_DATA(processor
, system_state
));
380 cpu_load_info
->cpu_ticks
[CPU_STATE_USER
] = (uint32_t)(tval
/ hz_tick_interval
);
381 cpu_load_info
->cpu_ticks
[CPU_STATE_SYSTEM
] = 0;
384 idle_state
= &PROCESSOR_DATA(processor
, idle_state
);
385 idle_time_snapshot1
= timer_grab(idle_state
);
386 idle_time_tstamp1
= idle_state
->tstamp
;
389 * Idle processors are not continually updating their
390 * per-processor idle timer, so it may be extremely
391 * out of date, resulting in an over-representation
392 * of non-idle time between two measurement
393 * intervals by e.g. top(1). If we are non-idle, or
394 * have evidence that the timer is being updated
395 * concurrently, we consider its value up-to-date.
397 if (PROCESSOR_DATA(processor
, current_state
) != idle_state
) {
398 cpu_load_info
->cpu_ticks
[CPU_STATE_IDLE
] =
399 (uint32_t)(idle_time_snapshot1
/ hz_tick_interval
);
400 } else if ((idle_time_snapshot1
!= (idle_time_snapshot2
= timer_grab(idle_state
))) ||
401 (idle_time_tstamp1
!= (idle_time_tstamp2
= idle_state
->tstamp
))){
402 /* Idle timer is being updated concurrently, second stamp is good enough */
403 cpu_load_info
->cpu_ticks
[CPU_STATE_IDLE
] =
404 (uint32_t)(idle_time_snapshot2
/ hz_tick_interval
);
407 * Idle timer may be very stale. Fortunately we have established
408 * that idle_time_snapshot1 and idle_time_tstamp1 are unchanging
410 idle_time_snapshot1
+= mach_absolute_time() - idle_time_tstamp1
;
412 cpu_load_info
->cpu_ticks
[CPU_STATE_IDLE
] =
413 (uint32_t)(idle_time_snapshot1
/ hz_tick_interval
);
416 cpu_load_info
->cpu_ticks
[CPU_STATE_NICE
] = 0;
418 *count
= PROCESSOR_CPU_LOAD_INFO_COUNT
;
421 return (KERN_SUCCESS
);
425 result
= cpu_info(flavor
, cpu_id
, info
, count
);
426 if (result
== KERN_SUCCESS
)
435 processor_t processor
)
437 processor_set_t pset
;
439 kern_return_t result
;
442 if (processor
== PROCESSOR_NULL
|| processor
->processor_set
== PROCESSOR_SET_NULL
)
443 return (KERN_INVALID_ARGUMENT
);
445 if (processor
== master_processor
) {
448 prev
= thread_bind(processor
);
449 thread_block(THREAD_CONTINUE_NULL
);
451 result
= cpu_start(processor
->cpu_id
);
459 pset
= processor
->processor_set
;
461 if (processor
->state
!= PROCESSOR_OFF_LINE
) {
465 return (KERN_FAILURE
);
468 processor
->state
= PROCESSOR_START
;
473 * Create the idle processor thread.
475 if (processor
->idle_thread
== THREAD_NULL
) {
476 result
= idle_thread_create(processor
);
477 if (result
!= KERN_SUCCESS
) {
480 processor
->state
= PROCESSOR_OFF_LINE
;
489 * If there is no active thread, the processor
490 * has never been started. Create a dedicated
493 if ( processor
->active_thread
== THREAD_NULL
&&
494 processor
->next_thread
== THREAD_NULL
) {
495 result
= kernel_thread_create((thread_continue_t
)processor_start_thread
, NULL
, MAXPRI_KERNEL
, &thread
);
496 if (result
!= KERN_SUCCESS
) {
499 processor
->state
= PROCESSOR_OFF_LINE
;
508 thread
->bound_processor
= processor
;
509 processor
->next_thread
= thread
;
510 thread
->state
= TH_RUN
;
511 thread
->last_made_runnable_time
= mach_absolute_time();
512 thread_unlock(thread
);
515 thread_deallocate(thread
);
518 if (processor
->processor_self
== IP_NULL
)
519 ipc_processor_init(processor
);
521 result
= cpu_start(processor
->cpu_id
);
522 if (result
!= KERN_SUCCESS
) {
525 processor
->state
= PROCESSOR_OFF_LINE
;
532 ipc_processor_enable(processor
);
534 return (KERN_SUCCESS
);
539 processor_t processor
)
541 if (processor
== PROCESSOR_NULL
)
542 return(KERN_INVALID_ARGUMENT
);
544 return(processor_shutdown(processor
));
549 processor_t processor
,
550 processor_info_t info
,
551 mach_msg_type_number_t count
)
553 if (processor
== PROCESSOR_NULL
)
554 return(KERN_INVALID_ARGUMENT
);
556 return(cpu_control(processor
->cpu_id
, info
, count
));
560 processor_set_create(
561 __unused host_t host
,
562 __unused processor_set_t
*new_set
,
563 __unused processor_set_t
*new_name
)
565 return(KERN_FAILURE
);
569 processor_set_destroy(
570 __unused processor_set_t pset
)
572 return(KERN_FAILURE
);
576 processor_get_assignment(
577 processor_t processor
,
578 processor_set_t
*pset
)
582 if (processor
== PROCESSOR_NULL
)
583 return(KERN_INVALID_ARGUMENT
);
585 state
= processor
->state
;
586 if (state
== PROCESSOR_SHUTDOWN
|| state
== PROCESSOR_OFF_LINE
)
587 return(KERN_FAILURE
);
591 return(KERN_SUCCESS
);
596 processor_set_t pset
,
599 processor_set_info_t info
,
600 mach_msg_type_number_t
*count
)
602 if (pset
== PROCESSOR_SET_NULL
)
603 return(KERN_INVALID_ARGUMENT
);
605 if (flavor
== PROCESSOR_SET_BASIC_INFO
) {
606 register processor_set_basic_info_t basic_info
;
608 if (*count
< PROCESSOR_SET_BASIC_INFO_COUNT
)
609 return(KERN_FAILURE
);
611 basic_info
= (processor_set_basic_info_t
) info
;
612 basic_info
->processor_count
= processor_avail_count
;
613 basic_info
->default_policy
= POLICY_TIMESHARE
;
615 *count
= PROCESSOR_SET_BASIC_INFO_COUNT
;
617 return(KERN_SUCCESS
);
619 else if (flavor
== PROCESSOR_SET_TIMESHARE_DEFAULT
) {
620 register policy_timeshare_base_t ts_base
;
622 if (*count
< POLICY_TIMESHARE_BASE_COUNT
)
623 return(KERN_FAILURE
);
625 ts_base
= (policy_timeshare_base_t
) info
;
626 ts_base
->base_priority
= BASEPRI_DEFAULT
;
628 *count
= POLICY_TIMESHARE_BASE_COUNT
;
630 return(KERN_SUCCESS
);
632 else if (flavor
== PROCESSOR_SET_FIFO_DEFAULT
) {
633 register policy_fifo_base_t fifo_base
;
635 if (*count
< POLICY_FIFO_BASE_COUNT
)
636 return(KERN_FAILURE
);
638 fifo_base
= (policy_fifo_base_t
) info
;
639 fifo_base
->base_priority
= BASEPRI_DEFAULT
;
641 *count
= POLICY_FIFO_BASE_COUNT
;
643 return(KERN_SUCCESS
);
645 else if (flavor
== PROCESSOR_SET_RR_DEFAULT
) {
646 register policy_rr_base_t rr_base
;
648 if (*count
< POLICY_RR_BASE_COUNT
)
649 return(KERN_FAILURE
);
651 rr_base
= (policy_rr_base_t
) info
;
652 rr_base
->base_priority
= BASEPRI_DEFAULT
;
653 rr_base
->quantum
= 1;
655 *count
= POLICY_RR_BASE_COUNT
;
657 return(KERN_SUCCESS
);
659 else if (flavor
== PROCESSOR_SET_TIMESHARE_LIMITS
) {
660 register policy_timeshare_limit_t ts_limit
;
662 if (*count
< POLICY_TIMESHARE_LIMIT_COUNT
)
663 return(KERN_FAILURE
);
665 ts_limit
= (policy_timeshare_limit_t
) info
;
666 ts_limit
->max_priority
= MAXPRI_KERNEL
;
668 *count
= POLICY_TIMESHARE_LIMIT_COUNT
;
670 return(KERN_SUCCESS
);
672 else if (flavor
== PROCESSOR_SET_FIFO_LIMITS
) {
673 register policy_fifo_limit_t fifo_limit
;
675 if (*count
< POLICY_FIFO_LIMIT_COUNT
)
676 return(KERN_FAILURE
);
678 fifo_limit
= (policy_fifo_limit_t
) info
;
679 fifo_limit
->max_priority
= MAXPRI_KERNEL
;
681 *count
= POLICY_FIFO_LIMIT_COUNT
;
683 return(KERN_SUCCESS
);
685 else if (flavor
== PROCESSOR_SET_RR_LIMITS
) {
686 register policy_rr_limit_t rr_limit
;
688 if (*count
< POLICY_RR_LIMIT_COUNT
)
689 return(KERN_FAILURE
);
691 rr_limit
= (policy_rr_limit_t
) info
;
692 rr_limit
->max_priority
= MAXPRI_KERNEL
;
694 *count
= POLICY_RR_LIMIT_COUNT
;
696 return(KERN_SUCCESS
);
698 else if (flavor
== PROCESSOR_SET_ENABLED_POLICIES
) {
699 register int *enabled
;
701 if (*count
< (sizeof(*enabled
)/sizeof(int)))
702 return(KERN_FAILURE
);
704 enabled
= (int *) info
;
705 *enabled
= POLICY_TIMESHARE
| POLICY_RR
| POLICY_FIFO
;
707 *count
= sizeof(*enabled
)/sizeof(int);
709 return(KERN_SUCCESS
);
714 return(KERN_INVALID_ARGUMENT
);
718 * processor_set_statistics
720 * Returns scheduling statistics for a processor set.
723 processor_set_statistics(
724 processor_set_t pset
,
726 processor_set_info_t info
,
727 mach_msg_type_number_t
*count
)
729 if (pset
== PROCESSOR_SET_NULL
|| pset
!= &pset0
)
730 return (KERN_INVALID_PROCESSOR_SET
);
732 if (flavor
== PROCESSOR_SET_LOAD_INFO
) {
733 register processor_set_load_info_t load_info
;
735 if (*count
< PROCESSOR_SET_LOAD_INFO_COUNT
)
736 return(KERN_FAILURE
);
738 load_info
= (processor_set_load_info_t
) info
;
740 load_info
->mach_factor
= sched_mach_factor
;
741 load_info
->load_average
= sched_load_average
;
743 load_info
->task_count
= tasks_count
;
744 load_info
->thread_count
= threads_count
;
746 *count
= PROCESSOR_SET_LOAD_INFO_COUNT
;
747 return(KERN_SUCCESS
);
750 return(KERN_INVALID_ARGUMENT
);
754 * processor_set_max_priority:
756 * Specify max priority permitted on processor set. This affects
757 * newly created and assigned threads. Optionally change existing
761 processor_set_max_priority(
762 __unused processor_set_t pset
,
763 __unused
int max_priority
,
764 __unused boolean_t change_threads
)
766 return (KERN_INVALID_ARGUMENT
);
770 * processor_set_policy_enable:
772 * Allow indicated policy on processor set.
776 processor_set_policy_enable(
777 __unused processor_set_t pset
,
780 return (KERN_INVALID_ARGUMENT
);
784 * processor_set_policy_disable:
786 * Forbid indicated policy on processor set. Time sharing cannot
790 processor_set_policy_disable(
791 __unused processor_set_t pset
,
793 __unused boolean_t change_threads
)
795 return (KERN_INVALID_ARGUMENT
);
799 * processor_set_things:
801 * Common internals for processor_set_{threads,tasks}
804 processor_set_things(
805 processor_set_t pset
,
807 mach_msg_type_number_t
*count
,
810 unsigned int i
, j
, used
;
815 unsigned int actual_tasks
;
816 vm_size_t task_size
, task_size_needed
;
818 thread_t
*thread_list
;
819 unsigned int actual_threads
;
820 vm_size_t thread_size
, thread_size_needed
;
822 void *addr
, *newaddr
;
823 vm_size_t size
, size_needed
;
825 if (pset
== PROCESSOR_SET_NULL
|| pset
!= &pset0
)
826 return (KERN_INVALID_ARGUMENT
);
829 task_size_needed
= 0;
834 thread_size_needed
= 0;
839 lck_mtx_lock(&tasks_threads_lock
);
841 /* do we have the memory we need? */
842 if (type
== PSET_THING_THREAD
)
843 thread_size_needed
= threads_count
* sizeof(void *);
847 task_size_needed
= tasks_count
* sizeof(void *);
849 if (task_size_needed
<= task_size
&&
850 thread_size_needed
<= thread_size
)
853 /* unlock and allocate more memory */
854 lck_mtx_unlock(&tasks_threads_lock
);
856 /* grow task array */
857 if (task_size_needed
> task_size
) {
859 kfree(task_list
, task_size
);
861 assert(task_size_needed
> 0);
862 task_size
= task_size_needed
;
864 task_list
= (task_t
*)kalloc(task_size
);
865 if (task_list
== NULL
) {
866 if (thread_size
!= 0)
867 kfree(thread_list
, thread_size
);
868 return (KERN_RESOURCE_SHORTAGE
);
872 /* grow thread array */
873 if (thread_size_needed
> thread_size
) {
874 if (thread_size
!= 0)
875 kfree(thread_list
, thread_size
);
877 assert(thread_size_needed
> 0);
878 thread_size
= thread_size_needed
;
880 thread_list
= (thread_t
*)kalloc(thread_size
);
881 if (thread_list
== 0) {
883 kfree(task_list
, task_size
);
884 return (KERN_RESOURCE_SHORTAGE
);
889 /* OK, have memory and the list locked */
891 /* If we need it, get the thread list */
892 if (type
== PSET_THING_THREAD
) {
893 for (thread
= (thread_t
)queue_first(&threads
);
894 !queue_end(&threads
, (queue_entry_t
)thread
);
895 thread
= (thread_t
)queue_next(&thread
->threads
)) {
896 #if defined(SECURE_KERNEL)
897 if (thread
->task
!= kernel_task
) {
899 thread_reference_internal(thread
);
900 thread_list
[actual_threads
++] = thread
;
901 #if defined(SECURE_KERNEL)
909 /* get a list of the tasks */
910 for (task
= (task_t
)queue_first(&tasks
);
911 !queue_end(&tasks
, (queue_entry_t
)task
);
912 task
= (task_t
)queue_next(&task
->tasks
)) {
913 #if defined(SECURE_KERNEL)
914 if (task
!= kernel_task
) {
916 task_reference_internal(task
);
917 task_list
[actual_tasks
++] = task
;
918 #if defined(SECURE_KERNEL)
926 lck_mtx_unlock(&tasks_threads_lock
);
929 /* for each task, make sure we are allowed to examine it */
930 for (i
= used
= 0; i
< actual_tasks
; i
++) {
931 if (mac_task_check_expose_task(task_list
[i
])) {
932 task_deallocate(task_list
[i
]);
935 task_list
[used
++] = task_list
[i
];
938 task_size_needed
= actual_tasks
* sizeof(void *);
940 if (type
== PSET_THING_THREAD
) {
942 /* for each thread (if any), make sure it's task is in the allowed list */
943 for (i
= used
= 0; i
< actual_threads
; i
++) {
944 boolean_t found_task
= FALSE
;
946 task
= thread_list
[i
]->task
;
947 for (j
= 0; j
< actual_tasks
; j
++) {
948 if (task_list
[j
] == task
) {
954 thread_list
[used
++] = thread_list
[i
];
956 thread_deallocate(thread_list
[i
]);
958 actual_threads
= used
;
959 thread_size_needed
= actual_threads
* sizeof(void *);
961 /* done with the task list */
962 for (i
= 0; i
< actual_tasks
; i
++)
963 task_deallocate(task_list
[i
]);
964 kfree(task_list
, task_size
);
971 if (type
== PSET_THING_THREAD
) {
972 if (actual_threads
== 0) {
973 /* no threads available to return */
974 assert(task_size
== 0);
975 if (thread_size
!= 0)
976 kfree(thread_list
, thread_size
);
981 size_needed
= actual_threads
* sizeof(void *);
985 if (actual_tasks
== 0) {
986 /* no tasks available to return */
987 assert(thread_size
== 0);
989 kfree(task_list
, task_size
);
994 size_needed
= actual_tasks
* sizeof(void *);
999 /* if we allocated too much, must copy */
1000 if (size_needed
< size
) {
1001 newaddr
= kalloc(size_needed
);
1003 for (i
= 0; i
< actual_tasks
; i
++) {
1004 if (type
== PSET_THING_THREAD
)
1005 thread_deallocate(thread_list
[i
]);
1007 task_deallocate(task_list
[i
]);
1011 return (KERN_RESOURCE_SHORTAGE
);
1014 bcopy((void *) addr
, (void *) newaddr
, size_needed
);
1021 *thing_list
= (void **)addr
;
1022 *count
= (unsigned int)size
/ sizeof(void *);
1024 return (KERN_SUCCESS
);
1029 * processor_set_tasks:
1031 * List all tasks in the processor set.
1034 processor_set_tasks(
1035 processor_set_t pset
,
1036 task_array_t
*task_list
,
1037 mach_msg_type_number_t
*count
)
1040 mach_msg_type_number_t i
;
1042 ret
= processor_set_things(pset
, (void **)task_list
, count
, PSET_THING_TASK
);
1043 if (ret
!= KERN_SUCCESS
)
1046 /* do the conversion that Mig should handle */
1047 for (i
= 0; i
< *count
; i
++)
1048 (*task_list
)[i
] = (task_t
)convert_task_to_port((*task_list
)[i
]);
1049 return KERN_SUCCESS
;
1053 * processor_set_threads:
1055 * List all threads in the processor set.
1057 #if defined(SECURE_KERNEL)
1059 processor_set_threads(
1060 __unused processor_set_t pset
,
1061 __unused thread_array_t
*thread_list
,
1062 __unused mach_msg_type_number_t
*count
)
1064 return KERN_FAILURE
;
1068 processor_set_threads(
1069 processor_set_t pset
,
1070 thread_array_t
*thread_list
,
1071 mach_msg_type_number_t
*count
)
1074 mach_msg_type_number_t i
;
1076 ret
= processor_set_things(pset
, (void **)thread_list
, count
, PSET_THING_THREAD
);
1077 if (ret
!= KERN_SUCCESS
)
1080 /* do the conversion that Mig should handle */
1081 for (i
= 0; i
< *count
; i
++)
1082 (*thread_list
)[i
] = (thread_t
)convert_thread_to_port((*thread_list
)[i
]);
1083 return KERN_SUCCESS
;
1088 * processor_set_policy_control
1090 * Controls the scheduling attributes governing the processor set.
1091 * Allows control of enabled policies, and per-policy base and limit
1095 processor_set_policy_control(
1096 __unused processor_set_t pset
,
1097 __unused
int flavor
,
1098 __unused processor_set_info_t policy_info
,
1099 __unused mach_msg_type_number_t count
,
1100 __unused boolean_t change
)
1102 return (KERN_INVALID_ARGUMENT
);
1105 #undef pset_deallocate
1106 void pset_deallocate(processor_set_t pset
);
1109 __unused processor_set_t pset
)
1114 #undef pset_reference
1115 void pset_reference(processor_set_t pset
);
1118 __unused processor_set_t pset
)