2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * processor.c: processor and processor_set manipulation routines.
63 #include <mach/boolean.h>
64 #include <mach/policy.h>
65 #include <mach/processor.h>
66 #include <mach/processor_info.h>
67 #include <mach/vm_param.h>
68 #include <kern/cpu_number.h>
69 #include <kern/host.h>
70 #include <kern/machine.h>
71 #include <kern/misc_protos.h>
72 #include <kern/processor.h>
73 #include <kern/sched.h>
74 #include <kern/task.h>
75 #include <kern/thread.h>
76 #include <kern/ipc_host.h>
77 #include <kern/ipc_tt.h>
78 #include <ipc/ipc_port.h>
79 #include <kern/kalloc.h>
84 #include <mach/mach_host_server.h>
85 #include <mach/processor_set_server.h>
87 struct processor_set pset0
;
88 struct pset_node pset_node0
;
89 decl_simple_lock_data(static,pset_node_lock
)
95 decl_lck_mtx_data(,tasks_threads_lock
)
97 processor_t processor_list
;
98 unsigned int processor_count
;
99 static processor_t processor_list_tail
;
100 decl_simple_lock_data(,processor_list_lock
)
102 uint32_t processor_avail_count
;
104 processor_t master_processor
;
108 kern_return_t
processor_set_things(
109 processor_set_t pset
,
110 mach_port_t
**thing_list
,
111 mach_msg_type_number_t
*count
,
115 processor_bootstrap(void)
117 pset_init(&pset0
, &pset_node0
);
118 pset_node0
.psets
= &pset0
;
120 simple_lock_init(&pset_node_lock
, 0);
123 queue_init(&threads
);
125 simple_lock_init(&processor_list_lock
, 0);
127 master_processor
= cpu_to_processor(master_cpu
);
129 processor_init(master_processor
, master_cpu
, &pset0
);
133 * Initialize the given processor for the cpu
134 * indicated by cpu_id, and assign to the
135 * specified processor set.
139 processor_t processor
,
141 processor_set_t pset
)
143 run_queue_init(&processor
->runq
);
145 processor
->state
= PROCESSOR_OFF_LINE
;
146 processor
->active_thread
= processor
->next_thread
= processor
->idle_thread
= THREAD_NULL
;
147 processor
->processor_set
= pset
;
148 processor
->current_pri
= MINPRI
;
149 processor
->cpu_id
= cpu_id
;
150 timer_call_setup(&processor
->quantum_timer
, thread_quantum_expire
, processor
);
151 processor
->deadline
= UINT64_MAX
;
152 processor
->timeslice
= 0;
153 processor
->processor_meta
= PROCESSOR_META_NULL
;
154 processor
->processor_self
= IP_NULL
;
155 processor_data_init(processor
);
156 processor
->processor_list
= NULL
;
158 simple_lock(&processor_list_lock
);
159 if (processor_list
== NULL
)
160 processor_list
= processor
;
162 processor_list_tail
->processor_list
= processor
;
163 processor_list_tail
= processor
;
165 simple_unlock(&processor_list_lock
);
170 processor_t processor
,
173 processor_meta_t pmeta
= primary
->processor_meta
;
175 if (pmeta
== PROCESSOR_META_NULL
) {
176 pmeta
= kalloc(sizeof (*pmeta
));
178 queue_init(&pmeta
->idle_queue
);
180 pmeta
->primary
= primary
;
183 processor
->processor_meta
= pmeta
;
188 processor_t processor
)
190 return (processor
->processor_set
);
203 processor_set_t
*prev
, pset
= kalloc(sizeof (*pset
));
205 if (pset
!= PROCESSOR_SET_NULL
) {
206 pset_init(pset
, node
);
208 simple_lock(&pset_node_lock
);
211 while (*prev
!= PROCESSOR_SET_NULL
)
212 prev
= &(*prev
)->pset_list
;
216 simple_unlock(&pset_node_lock
);
223 * Initialize the given processor_set structure.
227 processor_set_t pset
,
230 queue_init(&pset
->active_queue
);
231 queue_init(&pset
->idle_queue
);
232 pset
->processor_count
= 0;
233 pset
->low_pri
= pset
->low_count
= PROCESSOR_NULL
;
234 pset_lock_init(pset
);
235 pset
->pset_self
= IP_NULL
;
236 pset
->pset_name_self
= IP_NULL
;
237 pset
->pset_list
= PROCESSOR_SET_NULL
;
242 processor_info_count(
243 processor_flavor_t flavor
,
244 mach_msg_type_number_t
*count
)
248 case PROCESSOR_BASIC_INFO
:
249 *count
= PROCESSOR_BASIC_INFO_COUNT
;
252 case PROCESSOR_CPU_LOAD_INFO
:
253 *count
= PROCESSOR_CPU_LOAD_INFO_COUNT
;
257 return (cpu_info_count(flavor
, count
));
260 return (KERN_SUCCESS
);
266 register processor_t processor
,
267 processor_flavor_t flavor
,
269 processor_info_t info
,
270 mach_msg_type_number_t
*count
)
272 register int cpu_id
, state
;
273 kern_return_t result
;
275 if (processor
== PROCESSOR_NULL
)
276 return (KERN_INVALID_ARGUMENT
);
278 cpu_id
= processor
->cpu_id
;
282 case PROCESSOR_BASIC_INFO
:
284 register processor_basic_info_t basic_info
;
286 if (*count
< PROCESSOR_BASIC_INFO_COUNT
)
287 return (KERN_FAILURE
);
289 basic_info
= (processor_basic_info_t
) info
;
290 basic_info
->cpu_type
= slot_type(cpu_id
);
291 basic_info
->cpu_subtype
= slot_subtype(cpu_id
);
292 state
= processor
->state
;
293 if (state
== PROCESSOR_OFF_LINE
)
294 basic_info
->running
= FALSE
;
296 basic_info
->running
= TRUE
;
297 basic_info
->slot_num
= cpu_id
;
298 if (processor
== master_processor
)
299 basic_info
->is_master
= TRUE
;
301 basic_info
->is_master
= FALSE
;
303 *count
= PROCESSOR_BASIC_INFO_COUNT
;
306 return (KERN_SUCCESS
);
309 case PROCESSOR_CPU_LOAD_INFO
:
311 register processor_cpu_load_info_t cpu_load_info
;
313 if (*count
< PROCESSOR_CPU_LOAD_INFO_COUNT
)
314 return (KERN_FAILURE
);
316 cpu_load_info
= (processor_cpu_load_info_t
) info
;
317 cpu_load_info
->cpu_ticks
[CPU_STATE_USER
] =
318 (uint32_t)(timer_grab(&PROCESSOR_DATA(processor
, user_state
)) / hz_tick_interval
);
319 cpu_load_info
->cpu_ticks
[CPU_STATE_SYSTEM
] =
320 (uint32_t)(timer_grab(&PROCESSOR_DATA(processor
, system_state
)) / hz_tick_interval
);
321 cpu_load_info
->cpu_ticks
[CPU_STATE_IDLE
] =
322 (uint32_t)(timer_grab(&PROCESSOR_DATA(processor
, idle_state
)) / hz_tick_interval
);
323 cpu_load_info
->cpu_ticks
[CPU_STATE_NICE
] = 0;
325 *count
= PROCESSOR_CPU_LOAD_INFO_COUNT
;
328 return (KERN_SUCCESS
);
332 result
= cpu_info(flavor
, cpu_id
, info
, count
);
333 if (result
== KERN_SUCCESS
)
342 processor_t processor
)
344 processor_set_t pset
;
346 kern_return_t result
;
349 if (processor
== PROCESSOR_NULL
|| processor
->processor_set
== PROCESSOR_SET_NULL
)
350 return (KERN_INVALID_ARGUMENT
);
352 if (processor
== master_processor
) {
355 prev
= thread_bind(processor
);
356 thread_block(THREAD_CONTINUE_NULL
);
358 result
= cpu_start(processor
->cpu_id
);
366 pset
= processor
->processor_set
;
368 if (processor
->state
!= PROCESSOR_OFF_LINE
) {
372 return (KERN_FAILURE
);
375 processor
->state
= PROCESSOR_START
;
380 * Create the idle processor thread.
382 if (processor
->idle_thread
== THREAD_NULL
) {
383 result
= idle_thread_create(processor
);
384 if (result
!= KERN_SUCCESS
) {
387 processor
->state
= PROCESSOR_OFF_LINE
;
396 * If there is no active thread, the processor
397 * has never been started. Create a dedicated
400 if ( processor
->active_thread
== THREAD_NULL
&&
401 processor
->next_thread
== THREAD_NULL
) {
402 result
= kernel_thread_create((thread_continue_t
)processor_start_thread
, NULL
, MAXPRI_KERNEL
, &thread
);
403 if (result
!= KERN_SUCCESS
) {
406 processor
->state
= PROCESSOR_OFF_LINE
;
415 thread
->bound_processor
= processor
;
416 processor
->next_thread
= thread
;
417 thread
->state
= TH_RUN
;
418 thread_unlock(thread
);
421 thread_deallocate(thread
);
424 if (processor
->processor_self
== IP_NULL
)
425 ipc_processor_init(processor
);
427 result
= cpu_start(processor
->cpu_id
);
428 if (result
!= KERN_SUCCESS
) {
431 processor
->state
= PROCESSOR_OFF_LINE
;
438 ipc_processor_enable(processor
);
440 return (KERN_SUCCESS
);
445 processor_t processor
)
447 if (processor
== PROCESSOR_NULL
)
448 return(KERN_INVALID_ARGUMENT
);
450 return(processor_shutdown(processor
));
455 processor_t processor
,
456 processor_info_t info
,
457 mach_msg_type_number_t count
)
459 if (processor
== PROCESSOR_NULL
)
460 return(KERN_INVALID_ARGUMENT
);
462 return(cpu_control(processor
->cpu_id
, info
, count
));
466 processor_set_create(
467 __unused host_t host
,
468 __unused processor_set_t
*new_set
,
469 __unused processor_set_t
*new_name
)
471 return(KERN_FAILURE
);
475 processor_set_destroy(
476 __unused processor_set_t pset
)
478 return(KERN_FAILURE
);
482 processor_get_assignment(
483 processor_t processor
,
484 processor_set_t
*pset
)
488 state
= processor
->state
;
489 if (state
== PROCESSOR_SHUTDOWN
|| state
== PROCESSOR_OFF_LINE
)
490 return(KERN_FAILURE
);
494 return(KERN_SUCCESS
);
499 processor_set_t pset
,
502 processor_set_info_t info
,
503 mach_msg_type_number_t
*count
)
505 if (pset
== PROCESSOR_SET_NULL
)
506 return(KERN_INVALID_ARGUMENT
);
508 if (flavor
== PROCESSOR_SET_BASIC_INFO
) {
509 register processor_set_basic_info_t basic_info
;
511 if (*count
< PROCESSOR_SET_BASIC_INFO_COUNT
)
512 return(KERN_FAILURE
);
514 basic_info
= (processor_set_basic_info_t
) info
;
515 basic_info
->processor_count
= processor_avail_count
;
516 basic_info
->default_policy
= POLICY_TIMESHARE
;
518 *count
= PROCESSOR_SET_BASIC_INFO_COUNT
;
520 return(KERN_SUCCESS
);
522 else if (flavor
== PROCESSOR_SET_TIMESHARE_DEFAULT
) {
523 register policy_timeshare_base_t ts_base
;
525 if (*count
< POLICY_TIMESHARE_BASE_COUNT
)
526 return(KERN_FAILURE
);
528 ts_base
= (policy_timeshare_base_t
) info
;
529 ts_base
->base_priority
= BASEPRI_DEFAULT
;
531 *count
= POLICY_TIMESHARE_BASE_COUNT
;
533 return(KERN_SUCCESS
);
535 else if (flavor
== PROCESSOR_SET_FIFO_DEFAULT
) {
536 register policy_fifo_base_t fifo_base
;
538 if (*count
< POLICY_FIFO_BASE_COUNT
)
539 return(KERN_FAILURE
);
541 fifo_base
= (policy_fifo_base_t
) info
;
542 fifo_base
->base_priority
= BASEPRI_DEFAULT
;
544 *count
= POLICY_FIFO_BASE_COUNT
;
546 return(KERN_SUCCESS
);
548 else if (flavor
== PROCESSOR_SET_RR_DEFAULT
) {
549 register policy_rr_base_t rr_base
;
551 if (*count
< POLICY_RR_BASE_COUNT
)
552 return(KERN_FAILURE
);
554 rr_base
= (policy_rr_base_t
) info
;
555 rr_base
->base_priority
= BASEPRI_DEFAULT
;
556 rr_base
->quantum
= 1;
558 *count
= POLICY_RR_BASE_COUNT
;
560 return(KERN_SUCCESS
);
562 else if (flavor
== PROCESSOR_SET_TIMESHARE_LIMITS
) {
563 register policy_timeshare_limit_t ts_limit
;
565 if (*count
< POLICY_TIMESHARE_LIMIT_COUNT
)
566 return(KERN_FAILURE
);
568 ts_limit
= (policy_timeshare_limit_t
) info
;
569 ts_limit
->max_priority
= MAXPRI_KERNEL
;
571 *count
= POLICY_TIMESHARE_LIMIT_COUNT
;
573 return(KERN_SUCCESS
);
575 else if (flavor
== PROCESSOR_SET_FIFO_LIMITS
) {
576 register policy_fifo_limit_t fifo_limit
;
578 if (*count
< POLICY_FIFO_LIMIT_COUNT
)
579 return(KERN_FAILURE
);
581 fifo_limit
= (policy_fifo_limit_t
) info
;
582 fifo_limit
->max_priority
= MAXPRI_KERNEL
;
584 *count
= POLICY_FIFO_LIMIT_COUNT
;
586 return(KERN_SUCCESS
);
588 else if (flavor
== PROCESSOR_SET_RR_LIMITS
) {
589 register policy_rr_limit_t rr_limit
;
591 if (*count
< POLICY_RR_LIMIT_COUNT
)
592 return(KERN_FAILURE
);
594 rr_limit
= (policy_rr_limit_t
) info
;
595 rr_limit
->max_priority
= MAXPRI_KERNEL
;
597 *count
= POLICY_RR_LIMIT_COUNT
;
599 return(KERN_SUCCESS
);
601 else if (flavor
== PROCESSOR_SET_ENABLED_POLICIES
) {
602 register int *enabled
;
604 if (*count
< (sizeof(*enabled
)/sizeof(int)))
605 return(KERN_FAILURE
);
607 enabled
= (int *) info
;
608 *enabled
= POLICY_TIMESHARE
| POLICY_RR
| POLICY_FIFO
;
610 *count
= sizeof(*enabled
)/sizeof(int);
612 return(KERN_SUCCESS
);
617 return(KERN_INVALID_ARGUMENT
);
621 * processor_set_statistics
623 * Returns scheduling statistics for a processor set.
626 processor_set_statistics(
627 processor_set_t pset
,
629 processor_set_info_t info
,
630 mach_msg_type_number_t
*count
)
632 if (pset
== PROCESSOR_SET_NULL
|| pset
!= &pset0
)
633 return (KERN_INVALID_PROCESSOR_SET
);
635 if (flavor
== PROCESSOR_SET_LOAD_INFO
) {
636 register processor_set_load_info_t load_info
;
638 if (*count
< PROCESSOR_SET_LOAD_INFO_COUNT
)
639 return(KERN_FAILURE
);
641 load_info
= (processor_set_load_info_t
) info
;
643 load_info
->mach_factor
= sched_mach_factor
;
644 load_info
->load_average
= sched_load_average
;
646 load_info
->task_count
= tasks_count
;
647 load_info
->thread_count
= threads_count
;
649 *count
= PROCESSOR_SET_LOAD_INFO_COUNT
;
650 return(KERN_SUCCESS
);
653 return(KERN_INVALID_ARGUMENT
);
657 * processor_set_max_priority:
659 * Specify max priority permitted on processor set. This affects
660 * newly created and assigned threads. Optionally change existing
664 processor_set_max_priority(
665 __unused processor_set_t pset
,
666 __unused
int max_priority
,
667 __unused boolean_t change_threads
)
669 return (KERN_INVALID_ARGUMENT
);
673 * processor_set_policy_enable:
675 * Allow indicated policy on processor set.
679 processor_set_policy_enable(
680 __unused processor_set_t pset
,
683 return (KERN_INVALID_ARGUMENT
);
687 * processor_set_policy_disable:
689 * Forbid indicated policy on processor set. Time sharing cannot
693 processor_set_policy_disable(
694 __unused processor_set_t pset
,
696 __unused boolean_t change_threads
)
698 return (KERN_INVALID_ARGUMENT
);
702 #define THING_THREAD 1
705 * processor_set_things:
707 * Common internals for processor_set_{threads,tasks}
710 processor_set_things(
711 processor_set_t pset
,
712 mach_port_t
**thing_list
,
713 mach_msg_type_number_t
*count
,
716 unsigned int actual
; /* this many things */
717 unsigned int maxthings
;
720 vm_size_t size
, size_needed
;
723 if (pset
== PROCESSOR_SET_NULL
|| pset
!= &pset0
)
724 return (KERN_INVALID_ARGUMENT
);
730 lck_mtx_lock(&tasks_threads_lock
);
732 if (type
== THING_TASK
)
733 maxthings
= tasks_count
;
735 maxthings
= threads_count
;
737 /* do we have the memory we need? */
739 size_needed
= maxthings
* sizeof (mach_port_t
);
740 if (size_needed
<= size
)
743 /* unlock and allocate more memory */
744 lck_mtx_unlock(&tasks_threads_lock
);
749 assert(size_needed
> 0);
754 return (KERN_RESOURCE_SHORTAGE
);
757 /* OK, have memory and the list locked */
763 task_t task
, *task_list
= (task_t
*)addr
;
765 for (task
= (task_t
)queue_first(&tasks
);
766 !queue_end(&tasks
, (queue_entry_t
)task
);
767 task
= (task_t
)queue_next(&task
->tasks
)) {
768 #if defined(SECURE_KERNEL)
769 if (task
!= kernel_task
) {
771 task_reference_internal(task
);
772 task_list
[actual
++] = task
;
773 #if defined(SECURE_KERNEL)
782 thread_t thread
, *thread_list
= (thread_t
*)addr
;
784 for (thread
= (thread_t
)queue_first(&threads
);
785 !queue_end(&threads
, (queue_entry_t
)thread
);
786 thread
= (thread_t
)queue_next(&thread
->threads
)) {
787 thread_reference_internal(thread
);
788 thread_list
[actual
++] = thread
;
796 lck_mtx_unlock(&tasks_threads_lock
);
798 if (actual
< maxthings
)
799 size_needed
= actual
* sizeof (mach_port_t
);
802 /* no things, so return null pointer and deallocate memory */
810 /* if we allocated too much, must copy */
812 if (size_needed
< size
) {
815 newaddr
= kalloc(size_needed
);
820 task_t
*task_list
= (task_t
*)addr
;
822 for (i
= 0; i
< actual
; i
++)
823 task_deallocate(task_list
[i
]);
828 thread_t
*thread_list
= (thread_t
*)addr
;
830 for (i
= 0; i
< actual
; i
++)
831 thread_deallocate(thread_list
[i
]);
838 return (KERN_RESOURCE_SHORTAGE
);
841 bcopy((void *) addr
, (void *) newaddr
, size_needed
);
846 *thing_list
= (mach_port_t
*)addr
;
849 /* do the conversion that Mig should handle */
854 task_t
*task_list
= (task_t
*)addr
;
856 for (i
= 0; i
< actual
; i
++)
857 (*thing_list
)[i
] = convert_task_to_port(task_list
[i
]);
862 thread_t
*thread_list
= (thread_t
*)addr
;
864 for (i
= 0; i
< actual
; i
++)
865 (*thing_list
)[i
] = convert_thread_to_port(thread_list
[i
]);
872 return (KERN_SUCCESS
);
877 * processor_set_tasks:
879 * List all tasks in the processor set.
883 processor_set_t pset
,
884 task_array_t
*task_list
,
885 mach_msg_type_number_t
*count
)
887 return(processor_set_things(pset
, (mach_port_t
**)task_list
, count
, THING_TASK
));
891 * processor_set_threads:
893 * List all threads in the processor set.
895 #if defined(SECURE_KERNEL)
897 processor_set_threads(
898 __unused processor_set_t pset
,
899 __unused thread_array_t
*thread_list
,
900 __unused mach_msg_type_number_t
*count
)
904 #elif defined(CONFIG_EMBEDDED)
906 processor_set_threads(
907 __unused processor_set_t pset
,
908 __unused thread_array_t
*thread_list
,
909 __unused mach_msg_type_number_t
*count
)
911 return KERN_NOT_SUPPORTED
;
915 processor_set_threads(
916 processor_set_t pset
,
917 thread_array_t
*thread_list
,
918 mach_msg_type_number_t
*count
)
920 return(processor_set_things(pset
, (mach_port_t
**)thread_list
, count
, THING_THREAD
));
925 * processor_set_policy_control
927 * Controls the scheduling attributes governing the processor set.
928 * Allows control of enabled policies, and per-policy base and limit
932 processor_set_policy_control(
933 __unused processor_set_t pset
,
935 __unused processor_set_info_t policy_info
,
936 __unused mach_msg_type_number_t count
,
937 __unused boolean_t change
)
939 return (KERN_INVALID_ARGUMENT
);
942 #undef pset_deallocate
943 void pset_deallocate(processor_set_t pset
);
946 __unused processor_set_t pset
)
951 #undef pset_reference
952 void pset_reference(processor_set_t pset
);
955 __unused processor_set_t pset
)