2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * processor.c: processor and processor_set manipulation routines.
63 #include <mach/boolean.h>
64 #include <mach/policy.h>
65 #include <mach/processor.h>
66 #include <mach/processor_info.h>
67 #include <mach/vm_param.h>
68 #include <kern/cpu_number.h>
69 #include <kern/host.h>
70 #include <kern/machine.h>
71 #include <kern/misc_protos.h>
72 #include <kern/processor.h>
73 #include <kern/sched.h>
74 #include <kern/task.h>
75 #include <kern/thread.h>
76 #include <kern/ipc_host.h>
77 #include <kern/ipc_tt.h>
78 #include <ipc/ipc_port.h>
79 #include <kern/kalloc.h>
81 #include <security/mac_mach_internal.h>
86 #include <mach/mach_host_server.h>
87 #include <mach/processor_set_server.h>
89 struct processor_set pset0
;
90 struct pset_node pset_node0
;
91 decl_simple_lock_data(static,pset_node_lock
)
94 queue_head_t terminated_tasks
; /* To be used ONLY for stackshot. */
95 queue_head_t corpse_tasks
;
97 int terminated_tasks_count
;
100 decl_lck_mtx_data(,tasks_threads_lock
)
101 decl_lck_mtx_data(,tasks_corpse_lock
)
103 processor_t processor_list
;
104 unsigned int processor_count
;
105 static processor_t processor_list_tail
;
106 decl_simple_lock_data(,processor_list_lock
)
108 uint32_t processor_avail_count
;
110 processor_t master_processor
;
112 boolean_t sched_stats_active
= FALSE
;
115 processor_bootstrap(void)
117 pset_init(&pset0
, &pset_node0
);
118 pset_node0
.psets
= &pset0
;
120 simple_lock_init(&pset_node_lock
, 0);
123 queue_init(&terminated_tasks
);
124 queue_init(&threads
);
125 queue_init(&corpse_tasks
);
127 simple_lock_init(&processor_list_lock
, 0);
129 master_processor
= cpu_to_processor(master_cpu
);
131 processor_init(master_processor
, master_cpu
, &pset0
);
135 * Initialize the given processor for the cpu
136 * indicated by cpu_id, and assign to the
137 * specified processor set.
141 processor_t processor
,
143 processor_set_t pset
)
147 if (processor
!= master_processor
) {
148 /* Scheduler state deferred until sched_init() */
149 SCHED(processor_init
)(processor
);
152 processor
->state
= PROCESSOR_OFF_LINE
;
153 processor
->active_thread
= processor
->next_thread
= processor
->idle_thread
= THREAD_NULL
;
154 processor
->processor_set
= pset
;
155 processor
->current_pri
= MINPRI
;
156 processor
->current_thmode
= TH_MODE_NONE
;
157 processor
->current_sfi_class
= SFI_CLASS_KERNEL
;
158 processor
->starting_pri
= MINPRI
;
159 processor
->cpu_id
= cpu_id
;
160 timer_call_setup(&processor
->quantum_timer
, thread_quantum_expire
, processor
);
161 processor
->quantum_end
= UINT64_MAX
;
162 processor
->deadline
= UINT64_MAX
;
163 processor
->first_timeslice
= FALSE
;
164 processor
->processor_primary
= processor
; /* no SMT relationship known at this point */
165 processor
->processor_secondary
= NULL
;
166 processor
->is_SMT
= FALSE
;
167 processor
->is_recommended
= (pset
->recommended_bitmask
& (1ULL << cpu_id
)) ? TRUE
: FALSE
;
168 processor
->processor_self
= IP_NULL
;
169 processor_data_init(processor
);
170 processor
->processor_list
= NULL
;
174 if (pset
->cpu_set_count
++ == 0)
175 pset
->cpu_set_low
= pset
->cpu_set_hi
= cpu_id
;
177 pset
->cpu_set_low
= (cpu_id
< pset
->cpu_set_low
)? cpu_id
: pset
->cpu_set_low
;
178 pset
->cpu_set_hi
= (cpu_id
> pset
->cpu_set_hi
)? cpu_id
: pset
->cpu_set_hi
;
183 simple_lock(&processor_list_lock
);
184 if (processor_list
== NULL
)
185 processor_list
= processor
;
187 processor_list_tail
->processor_list
= processor
;
188 processor_list_tail
= processor
;
190 simple_unlock(&processor_list_lock
);
194 processor_set_primary(
195 processor_t processor
,
198 assert(processor
->processor_primary
== primary
|| processor
->processor_primary
== processor
);
199 /* Re-adjust primary point for this (possibly) secondary processor */
200 processor
->processor_primary
= primary
;
202 assert(primary
->processor_secondary
== NULL
|| primary
->processor_secondary
== processor
);
203 if (primary
!= processor
) {
204 /* Link primary to secondary, assumes a 2-way SMT model
205 * We'll need to move to a queue if any future architecture
206 * requires otherwise.
208 assert(processor
->processor_secondary
== NULL
);
209 primary
->processor_secondary
= processor
;
210 /* Mark both processors as SMT siblings */
211 primary
->is_SMT
= TRUE
;
212 processor
->is_SMT
= TRUE
;
218 processor_t processor
)
220 return (processor
->processor_set
);
233 /* some schedulers do not support multiple psets */
234 if (SCHED(multiple_psets_enabled
) == FALSE
)
235 return processor_pset(master_processor
);
237 processor_set_t
*prev
, pset
= kalloc(sizeof (*pset
));
239 if (pset
!= PROCESSOR_SET_NULL
) {
240 pset_init(pset
, node
);
242 simple_lock(&pset_node_lock
);
245 while (*prev
!= PROCESSOR_SET_NULL
)
246 prev
= &(*prev
)->pset_list
;
250 simple_unlock(&pset_node_lock
);
257 * Initialize the given processor_set structure.
261 processor_set_t pset
,
264 if (pset
!= &pset0
) {
265 /* Scheduler state deferred until sched_init() */
266 SCHED(pset_init
)(pset
);
269 queue_init(&pset
->active_queue
);
270 queue_init(&pset
->idle_queue
);
271 queue_init(&pset
->idle_secondary_queue
);
272 pset
->online_processor_count
= 0;
273 pset
->cpu_set_low
= pset
->cpu_set_hi
= 0;
274 pset
->cpu_set_count
= 0;
275 pset
->recommended_bitmask
= ~0ULL;
276 pset
->pending_AST_cpu_mask
= 0;
277 #if defined(CONFIG_SCHED_DEFERRED_AST)
278 pset
->pending_deferred_AST_cpu_mask
= 0;
280 pset_lock_init(pset
);
281 pset
->pset_self
= IP_NULL
;
282 pset
->pset_name_self
= IP_NULL
;
283 pset
->pset_list
= PROCESSOR_SET_NULL
;
288 processor_info_count(
289 processor_flavor_t flavor
,
290 mach_msg_type_number_t
*count
)
294 case PROCESSOR_BASIC_INFO
:
295 *count
= PROCESSOR_BASIC_INFO_COUNT
;
298 case PROCESSOR_CPU_LOAD_INFO
:
299 *count
= PROCESSOR_CPU_LOAD_INFO_COUNT
;
303 return (cpu_info_count(flavor
, count
));
306 return (KERN_SUCCESS
);
312 processor_t processor
,
313 processor_flavor_t flavor
,
315 processor_info_t info
,
316 mach_msg_type_number_t
*count
)
319 kern_return_t result
;
321 if (processor
== PROCESSOR_NULL
)
322 return (KERN_INVALID_ARGUMENT
);
324 cpu_id
= processor
->cpu_id
;
328 case PROCESSOR_BASIC_INFO
:
330 processor_basic_info_t basic_info
;
332 if (*count
< PROCESSOR_BASIC_INFO_COUNT
)
333 return (KERN_FAILURE
);
335 basic_info
= (processor_basic_info_t
) info
;
336 basic_info
->cpu_type
= slot_type(cpu_id
);
337 basic_info
->cpu_subtype
= slot_subtype(cpu_id
);
338 state
= processor
->state
;
339 if (state
== PROCESSOR_OFF_LINE
)
340 basic_info
->running
= FALSE
;
342 basic_info
->running
= TRUE
;
343 basic_info
->slot_num
= cpu_id
;
344 if (processor
== master_processor
)
345 basic_info
->is_master
= TRUE
;
347 basic_info
->is_master
= FALSE
;
349 *count
= PROCESSOR_BASIC_INFO_COUNT
;
352 return (KERN_SUCCESS
);
355 case PROCESSOR_CPU_LOAD_INFO
:
357 processor_cpu_load_info_t cpu_load_info
;
359 uint64_t idle_time_snapshot1
, idle_time_snapshot2
;
360 uint64_t idle_time_tstamp1
, idle_time_tstamp2
;
363 * We capture the accumulated idle time twice over
364 * the course of this function, as well as the timestamps
365 * when each were last updated. Since these are
366 * all done using non-atomic racy mechanisms, the
367 * most we can infer is whether values are stable.
368 * timer_grab() is the only function that can be
369 * used reliably on another processor's per-processor
373 if (*count
< PROCESSOR_CPU_LOAD_INFO_COUNT
)
374 return (KERN_FAILURE
);
376 cpu_load_info
= (processor_cpu_load_info_t
) info
;
377 if (precise_user_kernel_time
) {
378 cpu_load_info
->cpu_ticks
[CPU_STATE_USER
] =
379 (uint32_t)(timer_grab(&PROCESSOR_DATA(processor
, user_state
)) / hz_tick_interval
);
380 cpu_load_info
->cpu_ticks
[CPU_STATE_SYSTEM
] =
381 (uint32_t)(timer_grab(&PROCESSOR_DATA(processor
, system_state
)) / hz_tick_interval
);
383 uint64_t tval
= timer_grab(&PROCESSOR_DATA(processor
, user_state
)) +
384 timer_grab(&PROCESSOR_DATA(processor
, system_state
));
386 cpu_load_info
->cpu_ticks
[CPU_STATE_USER
] = (uint32_t)(tval
/ hz_tick_interval
);
387 cpu_load_info
->cpu_ticks
[CPU_STATE_SYSTEM
] = 0;
390 idle_state
= &PROCESSOR_DATA(processor
, idle_state
);
391 idle_time_snapshot1
= timer_grab(idle_state
);
392 idle_time_tstamp1
= idle_state
->tstamp
;
395 * Idle processors are not continually updating their
396 * per-processor idle timer, so it may be extremely
397 * out of date, resulting in an over-representation
398 * of non-idle time between two measurement
399 * intervals by e.g. top(1). If we are non-idle, or
400 * have evidence that the timer is being updated
401 * concurrently, we consider its value up-to-date.
403 if (PROCESSOR_DATA(processor
, current_state
) != idle_state
) {
404 cpu_load_info
->cpu_ticks
[CPU_STATE_IDLE
] =
405 (uint32_t)(idle_time_snapshot1
/ hz_tick_interval
);
406 } else if ((idle_time_snapshot1
!= (idle_time_snapshot2
= timer_grab(idle_state
))) ||
407 (idle_time_tstamp1
!= (idle_time_tstamp2
= idle_state
->tstamp
))){
408 /* Idle timer is being updated concurrently, second stamp is good enough */
409 cpu_load_info
->cpu_ticks
[CPU_STATE_IDLE
] =
410 (uint32_t)(idle_time_snapshot2
/ hz_tick_interval
);
413 * Idle timer may be very stale. Fortunately we have established
414 * that idle_time_snapshot1 and idle_time_tstamp1 are unchanging
416 idle_time_snapshot1
+= mach_absolute_time() - idle_time_tstamp1
;
418 cpu_load_info
->cpu_ticks
[CPU_STATE_IDLE
] =
419 (uint32_t)(idle_time_snapshot1
/ hz_tick_interval
);
422 cpu_load_info
->cpu_ticks
[CPU_STATE_NICE
] = 0;
424 *count
= PROCESSOR_CPU_LOAD_INFO_COUNT
;
427 return (KERN_SUCCESS
);
431 result
= cpu_info(flavor
, cpu_id
, info
, count
);
432 if (result
== KERN_SUCCESS
)
441 processor_t processor
)
443 processor_set_t pset
;
445 kern_return_t result
;
448 if (processor
== PROCESSOR_NULL
|| processor
->processor_set
== PROCESSOR_SET_NULL
)
449 return (KERN_INVALID_ARGUMENT
);
451 if (processor
== master_processor
) {
454 prev
= thread_bind(processor
);
455 thread_block(THREAD_CONTINUE_NULL
);
457 result
= cpu_start(processor
->cpu_id
);
465 pset
= processor
->processor_set
;
467 if (processor
->state
!= PROCESSOR_OFF_LINE
) {
471 return (KERN_FAILURE
);
474 processor
->state
= PROCESSOR_START
;
479 * Create the idle processor thread.
481 if (processor
->idle_thread
== THREAD_NULL
) {
482 result
= idle_thread_create(processor
);
483 if (result
!= KERN_SUCCESS
) {
486 processor
->state
= PROCESSOR_OFF_LINE
;
495 * If there is no active thread, the processor
496 * has never been started. Create a dedicated
499 if ( processor
->active_thread
== THREAD_NULL
&&
500 processor
->next_thread
== THREAD_NULL
) {
501 result
= kernel_thread_create((thread_continue_t
)processor_start_thread
, NULL
, MAXPRI_KERNEL
, &thread
);
502 if (result
!= KERN_SUCCESS
) {
505 processor
->state
= PROCESSOR_OFF_LINE
;
514 thread
->bound_processor
= processor
;
515 processor
->next_thread
= thread
;
516 thread
->state
= TH_RUN
;
517 thread
->last_made_runnable_time
= mach_absolute_time();
518 thread_unlock(thread
);
521 thread_deallocate(thread
);
524 if (processor
->processor_self
== IP_NULL
)
525 ipc_processor_init(processor
);
527 result
= cpu_start(processor
->cpu_id
);
528 if (result
!= KERN_SUCCESS
) {
531 processor
->state
= PROCESSOR_OFF_LINE
;
538 ipc_processor_enable(processor
);
540 return (KERN_SUCCESS
);
545 processor_t processor
)
547 if (processor
== PROCESSOR_NULL
)
548 return(KERN_INVALID_ARGUMENT
);
550 return(processor_shutdown(processor
));
555 processor_t processor
,
556 processor_info_t info
,
557 mach_msg_type_number_t count
)
559 if (processor
== PROCESSOR_NULL
)
560 return(KERN_INVALID_ARGUMENT
);
562 return(cpu_control(processor
->cpu_id
, info
, count
));
566 processor_set_create(
567 __unused host_t host
,
568 __unused processor_set_t
*new_set
,
569 __unused processor_set_t
*new_name
)
571 return(KERN_FAILURE
);
575 processor_set_destroy(
576 __unused processor_set_t pset
)
578 return(KERN_FAILURE
);
582 processor_get_assignment(
583 processor_t processor
,
584 processor_set_t
*pset
)
588 if (processor
== PROCESSOR_NULL
)
589 return(KERN_INVALID_ARGUMENT
);
591 state
= processor
->state
;
592 if (state
== PROCESSOR_SHUTDOWN
|| state
== PROCESSOR_OFF_LINE
)
593 return(KERN_FAILURE
);
597 return(KERN_SUCCESS
);
602 processor_set_t pset
,
605 processor_set_info_t info
,
606 mach_msg_type_number_t
*count
)
608 if (pset
== PROCESSOR_SET_NULL
)
609 return(KERN_INVALID_ARGUMENT
);
611 if (flavor
== PROCESSOR_SET_BASIC_INFO
) {
612 processor_set_basic_info_t basic_info
;
614 if (*count
< PROCESSOR_SET_BASIC_INFO_COUNT
)
615 return(KERN_FAILURE
);
617 basic_info
= (processor_set_basic_info_t
) info
;
618 basic_info
->processor_count
= processor_avail_count
;
619 basic_info
->default_policy
= POLICY_TIMESHARE
;
621 *count
= PROCESSOR_SET_BASIC_INFO_COUNT
;
623 return(KERN_SUCCESS
);
625 else if (flavor
== PROCESSOR_SET_TIMESHARE_DEFAULT
) {
626 policy_timeshare_base_t ts_base
;
628 if (*count
< POLICY_TIMESHARE_BASE_COUNT
)
629 return(KERN_FAILURE
);
631 ts_base
= (policy_timeshare_base_t
) info
;
632 ts_base
->base_priority
= BASEPRI_DEFAULT
;
634 *count
= POLICY_TIMESHARE_BASE_COUNT
;
636 return(KERN_SUCCESS
);
638 else if (flavor
== PROCESSOR_SET_FIFO_DEFAULT
) {
639 policy_fifo_base_t fifo_base
;
641 if (*count
< POLICY_FIFO_BASE_COUNT
)
642 return(KERN_FAILURE
);
644 fifo_base
= (policy_fifo_base_t
) info
;
645 fifo_base
->base_priority
= BASEPRI_DEFAULT
;
647 *count
= POLICY_FIFO_BASE_COUNT
;
649 return(KERN_SUCCESS
);
651 else if (flavor
== PROCESSOR_SET_RR_DEFAULT
) {
652 policy_rr_base_t rr_base
;
654 if (*count
< POLICY_RR_BASE_COUNT
)
655 return(KERN_FAILURE
);
657 rr_base
= (policy_rr_base_t
) info
;
658 rr_base
->base_priority
= BASEPRI_DEFAULT
;
659 rr_base
->quantum
= 1;
661 *count
= POLICY_RR_BASE_COUNT
;
663 return(KERN_SUCCESS
);
665 else if (flavor
== PROCESSOR_SET_TIMESHARE_LIMITS
) {
666 policy_timeshare_limit_t ts_limit
;
668 if (*count
< POLICY_TIMESHARE_LIMIT_COUNT
)
669 return(KERN_FAILURE
);
671 ts_limit
= (policy_timeshare_limit_t
) info
;
672 ts_limit
->max_priority
= MAXPRI_KERNEL
;
674 *count
= POLICY_TIMESHARE_LIMIT_COUNT
;
676 return(KERN_SUCCESS
);
678 else if (flavor
== PROCESSOR_SET_FIFO_LIMITS
) {
679 policy_fifo_limit_t fifo_limit
;
681 if (*count
< POLICY_FIFO_LIMIT_COUNT
)
682 return(KERN_FAILURE
);
684 fifo_limit
= (policy_fifo_limit_t
) info
;
685 fifo_limit
->max_priority
= MAXPRI_KERNEL
;
687 *count
= POLICY_FIFO_LIMIT_COUNT
;
689 return(KERN_SUCCESS
);
691 else if (flavor
== PROCESSOR_SET_RR_LIMITS
) {
692 policy_rr_limit_t rr_limit
;
694 if (*count
< POLICY_RR_LIMIT_COUNT
)
695 return(KERN_FAILURE
);
697 rr_limit
= (policy_rr_limit_t
) info
;
698 rr_limit
->max_priority
= MAXPRI_KERNEL
;
700 *count
= POLICY_RR_LIMIT_COUNT
;
702 return(KERN_SUCCESS
);
704 else if (flavor
== PROCESSOR_SET_ENABLED_POLICIES
) {
707 if (*count
< (sizeof(*enabled
)/sizeof(int)))
708 return(KERN_FAILURE
);
710 enabled
= (int *) info
;
711 *enabled
= POLICY_TIMESHARE
| POLICY_RR
| POLICY_FIFO
;
713 *count
= sizeof(*enabled
)/sizeof(int);
715 return(KERN_SUCCESS
);
720 return(KERN_INVALID_ARGUMENT
);
724 * processor_set_statistics
726 * Returns scheduling statistics for a processor set.
729 processor_set_statistics(
730 processor_set_t pset
,
732 processor_set_info_t info
,
733 mach_msg_type_number_t
*count
)
735 if (pset
== PROCESSOR_SET_NULL
|| pset
!= &pset0
)
736 return (KERN_INVALID_PROCESSOR_SET
);
738 if (flavor
== PROCESSOR_SET_LOAD_INFO
) {
739 processor_set_load_info_t load_info
;
741 if (*count
< PROCESSOR_SET_LOAD_INFO_COUNT
)
742 return(KERN_FAILURE
);
744 load_info
= (processor_set_load_info_t
) info
;
746 load_info
->mach_factor
= sched_mach_factor
;
747 load_info
->load_average
= sched_load_average
;
749 load_info
->task_count
= tasks_count
;
750 load_info
->thread_count
= threads_count
;
752 *count
= PROCESSOR_SET_LOAD_INFO_COUNT
;
753 return(KERN_SUCCESS
);
756 return(KERN_INVALID_ARGUMENT
);
760 * processor_set_max_priority:
762 * Specify max priority permitted on processor set. This affects
763 * newly created and assigned threads. Optionally change existing
767 processor_set_max_priority(
768 __unused processor_set_t pset
,
769 __unused
int max_priority
,
770 __unused boolean_t change_threads
)
772 return (KERN_INVALID_ARGUMENT
);
776 * processor_set_policy_enable:
778 * Allow indicated policy on processor set.
782 processor_set_policy_enable(
783 __unused processor_set_t pset
,
786 return (KERN_INVALID_ARGUMENT
);
790 * processor_set_policy_disable:
792 * Forbid indicated policy on processor set. Time sharing cannot
796 processor_set_policy_disable(
797 __unused processor_set_t pset
,
799 __unused boolean_t change_threads
)
801 return (KERN_INVALID_ARGUMENT
);
805 * processor_set_things:
807 * Common internals for processor_set_{threads,tasks}
810 processor_set_things(
811 processor_set_t pset
,
813 mach_msg_type_number_t
*count
,
821 unsigned int actual_tasks
;
822 vm_size_t task_size
, task_size_needed
;
824 thread_t
*thread_list
;
825 unsigned int actual_threads
;
826 vm_size_t thread_size
, thread_size_needed
;
828 void *addr
, *newaddr
;
829 vm_size_t size
, size_needed
;
831 if (pset
== PROCESSOR_SET_NULL
|| pset
!= &pset0
)
832 return (KERN_INVALID_ARGUMENT
);
835 task_size_needed
= 0;
840 thread_size_needed
= 0;
845 lck_mtx_lock(&tasks_threads_lock
);
847 /* do we have the memory we need? */
848 if (type
== PSET_THING_THREAD
)
849 thread_size_needed
= threads_count
* sizeof(void *);
853 task_size_needed
= tasks_count
* sizeof(void *);
855 if (task_size_needed
<= task_size
&&
856 thread_size_needed
<= thread_size
)
859 /* unlock and allocate more memory */
860 lck_mtx_unlock(&tasks_threads_lock
);
862 /* grow task array */
863 if (task_size_needed
> task_size
) {
865 kfree(task_list
, task_size
);
867 assert(task_size_needed
> 0);
868 task_size
= task_size_needed
;
870 task_list
= (task_t
*)kalloc(task_size
);
871 if (task_list
== NULL
) {
872 if (thread_size
!= 0)
873 kfree(thread_list
, thread_size
);
874 return (KERN_RESOURCE_SHORTAGE
);
878 /* grow thread array */
879 if (thread_size_needed
> thread_size
) {
880 if (thread_size
!= 0)
881 kfree(thread_list
, thread_size
);
883 assert(thread_size_needed
> 0);
884 thread_size
= thread_size_needed
;
886 thread_list
= (thread_t
*)kalloc(thread_size
);
887 if (thread_list
== 0) {
889 kfree(task_list
, task_size
);
890 return (KERN_RESOURCE_SHORTAGE
);
895 /* OK, have memory and the list locked */
897 /* If we need it, get the thread list */
898 if (type
== PSET_THING_THREAD
) {
899 for (thread
= (thread_t
)queue_first(&threads
);
900 !queue_end(&threads
, (queue_entry_t
)thread
);
901 thread
= (thread_t
)queue_next(&thread
->threads
)) {
902 #if defined(SECURE_KERNEL)
903 if (thread
->task
!= kernel_task
) {
905 thread_reference_internal(thread
);
906 thread_list
[actual_threads
++] = thread
;
907 #if defined(SECURE_KERNEL)
915 /* get a list of the tasks */
916 for (task
= (task_t
)queue_first(&tasks
);
917 !queue_end(&tasks
, (queue_entry_t
)task
);
918 task
= (task_t
)queue_next(&task
->tasks
)) {
919 #if defined(SECURE_KERNEL)
920 if (task
!= kernel_task
) {
922 task_reference_internal(task
);
923 task_list
[actual_tasks
++] = task
;
924 #if defined(SECURE_KERNEL)
932 lck_mtx_unlock(&tasks_threads_lock
);
935 unsigned int j
, used
;
937 /* for each task, make sure we are allowed to examine it */
938 for (i
= used
= 0; i
< actual_tasks
; i
++) {
939 if (mac_task_check_expose_task(task_list
[i
])) {
940 task_deallocate(task_list
[i
]);
943 task_list
[used
++] = task_list
[i
];
946 task_size_needed
= actual_tasks
* sizeof(void *);
948 if (type
== PSET_THING_THREAD
) {
950 /* for each thread (if any), make sure it's task is in the allowed list */
951 for (i
= used
= 0; i
< actual_threads
; i
++) {
952 boolean_t found_task
= FALSE
;
954 task
= thread_list
[i
]->task
;
955 for (j
= 0; j
< actual_tasks
; j
++) {
956 if (task_list
[j
] == task
) {
962 thread_list
[used
++] = thread_list
[i
];
964 thread_deallocate(thread_list
[i
]);
966 actual_threads
= used
;
967 thread_size_needed
= actual_threads
* sizeof(void *);
969 /* done with the task list */
970 for (i
= 0; i
< actual_tasks
; i
++)
971 task_deallocate(task_list
[i
]);
972 kfree(task_list
, task_size
);
979 if (type
== PSET_THING_THREAD
) {
980 if (actual_threads
== 0) {
981 /* no threads available to return */
982 assert(task_size
== 0);
983 if (thread_size
!= 0)
984 kfree(thread_list
, thread_size
);
989 size_needed
= actual_threads
* sizeof(void *);
993 if (actual_tasks
== 0) {
994 /* no tasks available to return */
995 assert(thread_size
== 0);
997 kfree(task_list
, task_size
);
1000 return KERN_SUCCESS
;
1002 size_needed
= actual_tasks
* sizeof(void *);
1007 /* if we allocated too much, must copy */
1008 if (size_needed
< size
) {
1009 newaddr
= kalloc(size_needed
);
1011 for (i
= 0; i
< actual_tasks
; i
++) {
1012 if (type
== PSET_THING_THREAD
)
1013 thread_deallocate(thread_list
[i
]);
1015 task_deallocate(task_list
[i
]);
1019 return (KERN_RESOURCE_SHORTAGE
);
1022 bcopy((void *) addr
, (void *) newaddr
, size_needed
);
1029 *thing_list
= (void **)addr
;
1030 *count
= (unsigned int)size
/ sizeof(void *);
1032 return (KERN_SUCCESS
);
1037 * processor_set_tasks:
1039 * List all tasks in the processor set.
1042 processor_set_tasks(
1043 processor_set_t pset
,
1044 task_array_t
*task_list
,
1045 mach_msg_type_number_t
*count
)
1048 mach_msg_type_number_t i
;
1050 ret
= processor_set_things(pset
, (void **)task_list
, count
, PSET_THING_TASK
);
1051 if (ret
!= KERN_SUCCESS
)
1054 /* do the conversion that Mig should handle */
1055 for (i
= 0; i
< *count
; i
++)
1056 (*task_list
)[i
] = (task_t
)convert_task_to_port((*task_list
)[i
]);
1057 return KERN_SUCCESS
;
1061 * processor_set_threads:
1063 * List all threads in the processor set.
1065 #if defined(SECURE_KERNEL)
1067 processor_set_threads(
1068 __unused processor_set_t pset
,
1069 __unused thread_array_t
*thread_list
,
1070 __unused mach_msg_type_number_t
*count
)
1072 return KERN_FAILURE
;
1076 processor_set_threads(
1077 processor_set_t pset
,
1078 thread_array_t
*thread_list
,
1079 mach_msg_type_number_t
*count
)
1082 mach_msg_type_number_t i
;
1084 ret
= processor_set_things(pset
, (void **)thread_list
, count
, PSET_THING_THREAD
);
1085 if (ret
!= KERN_SUCCESS
)
1088 /* do the conversion that Mig should handle */
1089 for (i
= 0; i
< *count
; i
++)
1090 (*thread_list
)[i
] = (thread_t
)convert_thread_to_port((*thread_list
)[i
]);
1091 return KERN_SUCCESS
;
1096 * processor_set_policy_control
1098 * Controls the scheduling attributes governing the processor set.
1099 * Allows control of enabled policies, and per-policy base and limit
1103 processor_set_policy_control(
1104 __unused processor_set_t pset
,
1105 __unused
int flavor
,
1106 __unused processor_set_info_t policy_info
,
1107 __unused mach_msg_type_number_t count
,
1108 __unused boolean_t change
)
1110 return (KERN_INVALID_ARGUMENT
);
1113 #undef pset_deallocate
1114 void pset_deallocate(processor_set_t pset
);
1117 __unused processor_set_t pset
)
1122 #undef pset_reference
1123 void pset_reference(processor_set_t pset
);
1126 __unused processor_set_t pset
)