2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
62 * Non-ipc host functions.
65 #include <mach/mach_types.h>
66 #include <mach/boolean.h>
67 #include <mach/host_info.h>
68 #include <mach/host_special_ports.h>
69 #include <mach/kern_return.h>
70 #include <mach/machine.h>
71 #include <mach/port.h>
72 #include <mach/processor_info.h>
73 #include <mach/vm_param.h>
74 #include <mach/processor.h>
75 #include <mach/mach_host_server.h>
76 #include <mach/host_priv_server.h>
77 #include <mach/vm_map.h>
78 #include <mach/task_info.h>
80 #include <kern/kern_types.h>
81 #include <kern/assert.h>
82 #include <kern/kalloc.h>
83 #include <kern/host.h>
84 #include <kern/host_statistics.h>
85 #include <kern/ipc_host.h>
86 #include <kern/misc_protos.h>
87 #include <kern/sched.h>
88 #include <kern/processor.h>
90 #include <vm/vm_map.h>
91 #include <vm/vm_purgeable_internal.h>
92 #include <vm/vm_pageout.h>
95 #include <atm/atm_internal.h>
99 #include <security/mac_mach_internal.h>
102 host_data_t realhost
;
104 vm_extmod_statistics_data_t host_extmod_statistics
;
107 host_processors(host_priv_t host_priv
, processor_array_t
* out_array
, mach_msg_type_number_t
* countp
)
109 register processor_t processor
, *tp
;
111 unsigned int count
, i
;
113 if (host_priv
== HOST_PRIV_NULL
)
114 return (KERN_INVALID_ARGUMENT
);
116 assert(host_priv
== &realhost
);
118 count
= processor_count
;
121 addr
= kalloc((vm_size_t
)(count
* sizeof(mach_port_t
)));
123 return (KERN_RESOURCE_SHORTAGE
);
125 tp
= (processor_t
*)addr
;
126 *tp
++ = processor
= processor_list
;
129 simple_lock(&processor_list_lock
);
131 for (i
= 1; i
< count
; i
++)
132 *tp
++ = processor
= processor
->processor_list
;
134 simple_unlock(&processor_list_lock
);
138 *out_array
= (processor_array_t
)addr
;
140 /* do the conversion that Mig should handle */
141 tp
= (processor_t
*)addr
;
142 for (i
= 0; i
< count
; i
++)
143 ((mach_port_t
*)tp
)[i
] = (mach_port_t
)convert_processor_to_port(tp
[i
]);
145 return (KERN_SUCCESS
);
149 host_info(host_t host
, host_flavor_t flavor
, host_info_t info
, mach_msg_type_number_t
* count
)
151 if (host
== HOST_NULL
)
152 return (KERN_INVALID_ARGUMENT
);
155 case HOST_BASIC_INFO
: {
156 register host_basic_info_t basic_info
;
157 register int master_id
;
160 * Basic information about this host.
162 if (*count
< HOST_BASIC_INFO_OLD_COUNT
)
163 return (KERN_FAILURE
);
165 basic_info
= (host_basic_info_t
)info
;
167 basic_info
->memory_size
= machine_info
.memory_size
;
168 basic_info
->max_cpus
= machine_info
.max_cpus
;
169 basic_info
->avail_cpus
= processor_avail_count
;
170 master_id
= master_processor
->cpu_id
;
171 basic_info
->cpu_type
= slot_type(master_id
);
172 basic_info
->cpu_subtype
= slot_subtype(master_id
);
174 if (*count
>= HOST_BASIC_INFO_COUNT
) {
175 basic_info
->cpu_threadtype
= slot_threadtype(master_id
);
176 basic_info
->physical_cpu
= machine_info
.physical_cpu
;
177 basic_info
->physical_cpu_max
= machine_info
.physical_cpu_max
;
178 basic_info
->logical_cpu
= machine_info
.logical_cpu
;
179 basic_info
->logical_cpu_max
= machine_info
.logical_cpu_max
;
180 basic_info
->max_mem
= machine_info
.max_mem
;
182 *count
= HOST_BASIC_INFO_COUNT
;
184 *count
= HOST_BASIC_INFO_OLD_COUNT
;
187 return (KERN_SUCCESS
);
190 case HOST_SCHED_INFO
: {
191 register host_sched_info_t sched_info
;
192 uint32_t quantum_time
;
196 * Return scheduler information.
198 if (*count
< HOST_SCHED_INFO_COUNT
)
199 return (KERN_FAILURE
);
201 sched_info
= (host_sched_info_t
)info
;
203 quantum_time
= SCHED(initial_quantum_size
)(THREAD_NULL
);
204 absolutetime_to_nanoseconds(quantum_time
, &quantum_ns
);
206 sched_info
->min_timeout
= sched_info
->min_quantum
= (uint32_t)(quantum_ns
/ 1000 / 1000);
208 *count
= HOST_SCHED_INFO_COUNT
;
210 return (KERN_SUCCESS
);
213 case HOST_RESOURCE_SIZES
: {
215 * Return sizes of kernel data structures
217 if (*count
< HOST_RESOURCE_SIZES_COUNT
)
218 return (KERN_FAILURE
);
220 /* XXX Fail until ledgers are implemented */
221 return (KERN_INVALID_ARGUMENT
);
224 case HOST_PRIORITY_INFO
: {
225 register host_priority_info_t priority_info
;
227 if (*count
< HOST_PRIORITY_INFO_COUNT
)
228 return (KERN_FAILURE
);
230 priority_info
= (host_priority_info_t
)info
;
232 priority_info
->kernel_priority
= MINPRI_KERNEL
;
233 priority_info
->system_priority
= MINPRI_KERNEL
;
234 priority_info
->server_priority
= MINPRI_RESERVED
;
235 priority_info
->user_priority
= BASEPRI_DEFAULT
;
236 priority_info
->depress_priority
= DEPRESSPRI
;
237 priority_info
->idle_priority
= IDLEPRI
;
238 priority_info
->minimum_priority
= MINPRI_USER
;
239 priority_info
->maximum_priority
= MAXPRI_RESERVED
;
241 *count
= HOST_PRIORITY_INFO_COUNT
;
243 return (KERN_SUCCESS
);
247 * Gestalt for various trap facilities.
249 case HOST_MACH_MSG_TRAP
:
250 case HOST_SEMAPHORE_TRAPS
: {
252 return (KERN_SUCCESS
);
255 case HOST_VM_PURGABLE
: {
256 if (*count
< HOST_VM_PURGABLE_COUNT
)
257 return (KERN_FAILURE
);
259 vm_purgeable_stats((vm_purgeable_info_t
)info
, NULL
);
261 *count
= HOST_VM_PURGABLE_COUNT
;
262 return (KERN_SUCCESS
);
265 case HOST_DEBUG_INFO_INTERNAL
: {
266 #if DEVELOPMENT || DEBUG
267 if (*count
< HOST_DEBUG_INFO_INTERNAL_COUNT
)
268 return (KERN_FAILURE
);
270 host_debug_info_internal_t debug_info
= (host_debug_info_internal_t
)info
;
271 bzero(debug_info
, sizeof(host_debug_info_internal_data_t
));
272 *count
= HOST_DEBUG_INFO_INTERNAL_COUNT
;
274 #if CONFIG_COALITIONS
275 debug_info
->config_coalitions
= 1;
278 debug_info
->config_bank
= 1;
281 debug_info
->config_atm
= 1;
284 debug_info
->config_csr
= 1;
286 return (KERN_SUCCESS
);
287 #else /* DEVELOPMENT || DEBUG */
288 return (KERN_NOT_SUPPORTED
);
292 default: return (KERN_INVALID_ARGUMENT
);
297 host_statistics(host_t host
, host_flavor_t flavor
, host_info_t info
, mach_msg_type_number_t
* count
)
301 if (host
== HOST_NULL
)
302 return (KERN_INVALID_HOST
);
305 case HOST_LOAD_INFO
: {
306 host_load_info_t load_info
;
308 if (*count
< HOST_LOAD_INFO_COUNT
)
309 return (KERN_FAILURE
);
311 load_info
= (host_load_info_t
)info
;
313 bcopy((char *)avenrun
, (char *)load_info
->avenrun
, sizeof avenrun
);
314 bcopy((char *)mach_factor
, (char *)load_info
->mach_factor
, sizeof mach_factor
);
316 *count
= HOST_LOAD_INFO_COUNT
;
317 return (KERN_SUCCESS
);
321 register processor_t processor
;
322 register vm_statistics64_t stat
;
323 vm_statistics64_data_t host_vm_stat
;
324 vm_statistics_t stat32
;
325 mach_msg_type_number_t original_count
;
327 if (*count
< HOST_VM_INFO_REV0_COUNT
)
328 return (KERN_FAILURE
);
330 processor
= processor_list
;
331 stat
= &PROCESSOR_DATA(processor
, vm_stat
);
332 host_vm_stat
= *stat
;
334 if (processor_count
> 1) {
335 simple_lock(&processor_list_lock
);
337 while ((processor
= processor
->processor_list
) != NULL
) {
338 stat
= &PROCESSOR_DATA(processor
, vm_stat
);
340 host_vm_stat
.zero_fill_count
+= stat
->zero_fill_count
;
341 host_vm_stat
.reactivations
+= stat
->reactivations
;
342 host_vm_stat
.pageins
+= stat
->pageins
;
343 host_vm_stat
.pageouts
+= stat
->pageouts
;
344 host_vm_stat
.faults
+= stat
->faults
;
345 host_vm_stat
.cow_faults
+= stat
->cow_faults
;
346 host_vm_stat
.lookups
+= stat
->lookups
;
347 host_vm_stat
.hits
+= stat
->hits
;
350 simple_unlock(&processor_list_lock
);
353 stat32
= (vm_statistics_t
)info
;
355 stat32
->free_count
= VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_free_count
+ vm_page_speculative_count
);
356 stat32
->active_count
= VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_active_count
);
358 if (vm_page_local_q
) {
359 for (i
= 0; i
< vm_page_local_q_count
; i
++) {
362 lq
= &vm_page_local_q
[i
].vpl_un
.vpl
;
364 stat32
->active_count
+= VM_STATISTICS_TRUNCATE_TO_32_BIT(lq
->vpl_count
);
367 stat32
->inactive_count
= VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_inactive_count
);
368 stat32
->wire_count
= VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count
+ vm_page_throttled_count
+ vm_lopage_free_count
);
369 stat32
->zero_fill_count
= VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat
.zero_fill_count
);
370 stat32
->reactivations
= VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat
.reactivations
);
371 stat32
->pageins
= VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat
.pageins
);
372 stat32
->pageouts
= VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat
.pageouts
);
373 stat32
->faults
= VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat
.faults
);
374 stat32
->cow_faults
= VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat
.cow_faults
);
375 stat32
->lookups
= VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat
.lookups
);
376 stat32
->hits
= VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat
.hits
);
379 * Fill in extra info added in later revisions of the
380 * vm_statistics data structure. Fill in only what can fit
381 * in the data structure the caller gave us !
383 original_count
= *count
;
384 *count
= HOST_VM_INFO_REV0_COUNT
; /* rev0 already filled in */
385 if (original_count
>= HOST_VM_INFO_REV1_COUNT
) {
386 /* rev1 added "purgeable" info */
387 stat32
->purgeable_count
= VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purgeable_count
);
388 stat32
->purges
= VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purged_count
);
389 *count
= HOST_VM_INFO_REV1_COUNT
;
392 if (original_count
>= HOST_VM_INFO_REV2_COUNT
) {
393 /* rev2 added "speculative" info */
394 stat32
->speculative_count
= VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_speculative_count
);
395 *count
= HOST_VM_INFO_REV2_COUNT
;
398 /* rev3 changed some of the fields to be 64-bit*/
400 return (KERN_SUCCESS
);
403 case HOST_CPU_LOAD_INFO
: {
404 register processor_t processor
;
405 host_cpu_load_info_t cpu_load_info
;
407 if (*count
< HOST_CPU_LOAD_INFO_COUNT
)
408 return (KERN_FAILURE
);
410 #define GET_TICKS_VALUE(state, ticks) \
411 MACRO_BEGIN cpu_load_info->cpu_ticks[(state)] += (uint32_t)(ticks / hz_tick_interval); \
413 #define GET_TICKS_VALUE_FROM_TIMER(processor, state, timer) \
414 MACRO_BEGIN GET_TICKS_VALUE(state, timer_grab(&PROCESSOR_DATA(processor, timer))); \
417 cpu_load_info
= (host_cpu_load_info_t
)info
;
418 cpu_load_info
->cpu_ticks
[CPU_STATE_USER
] = 0;
419 cpu_load_info
->cpu_ticks
[CPU_STATE_SYSTEM
] = 0;
420 cpu_load_info
->cpu_ticks
[CPU_STATE_IDLE
] = 0;
421 cpu_load_info
->cpu_ticks
[CPU_STATE_NICE
] = 0;
423 simple_lock(&processor_list_lock
);
425 for (processor
= processor_list
; processor
!= NULL
; processor
= processor
->processor_list
) {
427 uint64_t idle_time_snapshot1
, idle_time_snapshot2
;
428 uint64_t idle_time_tstamp1
, idle_time_tstamp2
;
430 /* See discussion in processor_info(PROCESSOR_CPU_LOAD_INFO) */
432 GET_TICKS_VALUE_FROM_TIMER(processor
, CPU_STATE_USER
, user_state
);
433 if (precise_user_kernel_time
) {
434 GET_TICKS_VALUE_FROM_TIMER(processor
, CPU_STATE_SYSTEM
, system_state
);
436 /* system_state may represent either sys or user */
437 GET_TICKS_VALUE_FROM_TIMER(processor
, CPU_STATE_USER
, system_state
);
440 idle_state
= &PROCESSOR_DATA(processor
, idle_state
);
441 idle_time_snapshot1
= timer_grab(idle_state
);
442 idle_time_tstamp1
= idle_state
->tstamp
;
444 if (PROCESSOR_DATA(processor
, current_state
) != idle_state
) {
445 /* Processor is non-idle, so idle timer should be accurate */
446 GET_TICKS_VALUE_FROM_TIMER(processor
, CPU_STATE_IDLE
, idle_state
);
447 } else if ((idle_time_snapshot1
!= (idle_time_snapshot2
= timer_grab(idle_state
))) ||
448 (idle_time_tstamp1
!= (idle_time_tstamp2
= idle_state
->tstamp
))) {
449 /* Idle timer is being updated concurrently, second stamp is good enough */
450 GET_TICKS_VALUE(CPU_STATE_IDLE
, idle_time_snapshot2
);
453 * Idle timer may be very stale. Fortunately we have established
454 * that idle_time_snapshot1 and idle_time_tstamp1 are unchanging
456 idle_time_snapshot1
+= mach_absolute_time() - idle_time_tstamp1
;
458 GET_TICKS_VALUE(CPU_STATE_IDLE
, idle_time_snapshot1
);
461 simple_unlock(&processor_list_lock
);
463 *count
= HOST_CPU_LOAD_INFO_COUNT
;
465 return (KERN_SUCCESS
);
468 case HOST_EXPIRED_TASK_INFO
: {
469 if (*count
< TASK_POWER_INFO_COUNT
) {
470 return (KERN_FAILURE
);
473 task_power_info_t tinfo
= (task_power_info_t
)info
;
475 tinfo
->task_interrupt_wakeups
= dead_task_statistics
.task_interrupt_wakeups
;
476 tinfo
->task_platform_idle_wakeups
= dead_task_statistics
.task_platform_idle_wakeups
;
478 tinfo
->task_timer_wakeups_bin_1
= dead_task_statistics
.task_timer_wakeups_bin_1
;
480 tinfo
->task_timer_wakeups_bin_2
= dead_task_statistics
.task_timer_wakeups_bin_2
;
482 tinfo
->total_user
= dead_task_statistics
.total_user_time
;
483 tinfo
->total_system
= dead_task_statistics
.total_system_time
;
485 return (KERN_SUCCESS
);
487 default: return (KERN_INVALID_ARGUMENT
);
491 extern uint32_t c_segment_pages_compressed
;
494 host_statistics64(host_t host
, host_flavor_t flavor
, host_info64_t info
, mach_msg_type_number_t
* count
)
498 if (host
== HOST_NULL
)
499 return (KERN_INVALID_HOST
);
502 case HOST_VM_INFO64
: /* We were asked to get vm_statistics64 */
504 register processor_t processor
;
505 register vm_statistics64_t stat
;
506 vm_statistics64_data_t host_vm_stat
;
507 mach_msg_type_number_t original_count
;
508 unsigned int local_q_internal_count
;
509 unsigned int local_q_external_count
;
511 if (*count
< HOST_VM_INFO64_REV0_COUNT
)
512 return (KERN_FAILURE
);
514 processor
= processor_list
;
515 stat
= &PROCESSOR_DATA(processor
, vm_stat
);
516 host_vm_stat
= *stat
;
518 if (processor_count
> 1) {
519 simple_lock(&processor_list_lock
);
521 while ((processor
= processor
->processor_list
) != NULL
) {
522 stat
= &PROCESSOR_DATA(processor
, vm_stat
);
524 host_vm_stat
.zero_fill_count
+= stat
->zero_fill_count
;
525 host_vm_stat
.reactivations
+= stat
->reactivations
;
526 host_vm_stat
.pageins
+= stat
->pageins
;
527 host_vm_stat
.pageouts
+= stat
->pageouts
;
528 host_vm_stat
.faults
+= stat
->faults
;
529 host_vm_stat
.cow_faults
+= stat
->cow_faults
;
530 host_vm_stat
.lookups
+= stat
->lookups
;
531 host_vm_stat
.hits
+= stat
->hits
;
532 host_vm_stat
.compressions
+= stat
->compressions
;
533 host_vm_stat
.decompressions
+= stat
->decompressions
;
534 host_vm_stat
.swapins
+= stat
->swapins
;
535 host_vm_stat
.swapouts
+= stat
->swapouts
;
538 simple_unlock(&processor_list_lock
);
541 stat
= (vm_statistics64_t
)info
;
543 stat
->free_count
= vm_page_free_count
+ vm_page_speculative_count
;
544 stat
->active_count
= vm_page_active_count
;
546 local_q_internal_count
= 0;
547 local_q_external_count
= 0;
548 if (vm_page_local_q
) {
549 for (i
= 0; i
< vm_page_local_q_count
; i
++) {
552 lq
= &vm_page_local_q
[i
].vpl_un
.vpl
;
554 stat
->active_count
+= lq
->vpl_count
;
555 local_q_internal_count
+= lq
->vpl_internal_count
;
556 local_q_external_count
+= lq
->vpl_external_count
;
559 stat
->inactive_count
= vm_page_inactive_count
;
560 stat
->wire_count
= vm_page_wire_count
+ vm_page_throttled_count
+ vm_lopage_free_count
;
561 stat
->zero_fill_count
= host_vm_stat
.zero_fill_count
;
562 stat
->reactivations
= host_vm_stat
.reactivations
;
563 stat
->pageins
= host_vm_stat
.pageins
;
564 stat
->pageouts
= host_vm_stat
.pageouts
;
565 stat
->faults
= host_vm_stat
.faults
;
566 stat
->cow_faults
= host_vm_stat
.cow_faults
;
567 stat
->lookups
= host_vm_stat
.lookups
;
568 stat
->hits
= host_vm_stat
.hits
;
570 stat
->purgeable_count
= vm_page_purgeable_count
;
571 stat
->purges
= vm_page_purged_count
;
573 stat
->speculative_count
= vm_page_speculative_count
;
576 * Fill in extra info added in later revisions of the
577 * vm_statistics data structure. Fill in only what can fit
578 * in the data structure the caller gave us !
580 original_count
= *count
;
581 *count
= HOST_VM_INFO64_REV0_COUNT
; /* rev0 already filled in */
582 if (original_count
>= HOST_VM_INFO64_REV1_COUNT
) {
583 /* rev1 added "throttled count" */
584 stat
->throttled_count
= vm_page_throttled_count
;
585 /* rev1 added "compression" info */
586 stat
->compressor_page_count
= VM_PAGE_COMPRESSOR_COUNT
;
587 stat
->compressions
= host_vm_stat
.compressions
;
588 stat
->decompressions
= host_vm_stat
.decompressions
;
589 stat
->swapins
= host_vm_stat
.swapins
;
590 stat
->swapouts
= host_vm_stat
.swapouts
;
592 * "external page count"
593 * "anonymous page count"
594 * "total # of pages (uncompressed) held in the compressor"
596 stat
->external_page_count
= (vm_page_pageable_external_count
+ local_q_external_count
);
597 stat
->internal_page_count
= (vm_page_pageable_internal_count
+ local_q_internal_count
);
598 stat
->total_uncompressed_pages_in_compressor
= c_segment_pages_compressed
;
599 *count
= HOST_VM_INFO64_REV1_COUNT
;
602 return (KERN_SUCCESS
);
605 case HOST_EXTMOD_INFO64
: /* We were asked to get vm_statistics64 */
607 vm_extmod_statistics_t out_extmod_statistics
;
609 if (*count
< HOST_EXTMOD_INFO64_COUNT
)
610 return (KERN_FAILURE
);
612 out_extmod_statistics
= (vm_extmod_statistics_t
)info
;
613 *out_extmod_statistics
= host_extmod_statistics
;
615 *count
= HOST_EXTMOD_INFO64_COUNT
;
617 return (KERN_SUCCESS
);
620 default: /* If we didn't recognize the flavor, send to host_statistics */
621 return (host_statistics(host
, flavor
, (host_info_t
)info
, count
));
626 * Get host statistics that require privilege.
627 * None for now, just call the un-privileged version.
630 host_priv_statistics(host_priv_t host_priv
, host_flavor_t flavor
, host_info_t info
, mach_msg_type_number_t
* count
)
632 return (host_statistics((host_t
)host_priv
, flavor
, info
, count
));
636 set_sched_stats_active(boolean_t active
)
638 sched_stats_active
= active
;
639 return (KERN_SUCCESS
);
643 get_sched_statistics(struct _processor_statistics_np
* out
, uint32_t * count
)
645 processor_t processor
;
647 if (!sched_stats_active
) {
648 return (KERN_FAILURE
);
651 simple_lock(&processor_list_lock
);
653 if (*count
< (processor_count
+ 1) * sizeof(struct _processor_statistics_np
)) { /* One for RT */
654 simple_unlock(&processor_list_lock
);
655 return (KERN_FAILURE
);
658 processor
= processor_list
;
660 struct processor_sched_statistics
* stats
= &processor
->processor_data
.sched_stats
;
662 out
->ps_cpuid
= processor
->cpu_id
;
663 out
->ps_csw_count
= stats
->csw_count
;
664 out
->ps_preempt_count
= stats
->preempt_count
;
665 out
->ps_preempted_rt_count
= stats
->preempted_rt_count
;
666 out
->ps_preempted_by_rt_count
= stats
->preempted_by_rt_count
;
667 out
->ps_rt_sched_count
= stats
->rt_sched_count
;
668 out
->ps_interrupt_count
= stats
->interrupt_count
;
669 out
->ps_ipi_count
= stats
->ipi_count
;
670 out
->ps_timer_pop_count
= stats
->timer_pop_count
;
671 out
->ps_runq_count_sum
= SCHED(processor_runq_stats_count_sum
)(processor
);
672 out
->ps_idle_transitions
= stats
->idle_transitions
;
673 out
->ps_quantum_timer_expirations
= stats
->quantum_timer_expirations
;
676 processor
= processor
->processor_list
;
679 *count
= (uint32_t)(processor_count
* sizeof(struct _processor_statistics_np
));
681 simple_unlock(&processor_list_lock
);
683 /* And include RT Queue information */
684 bzero(out
, sizeof(*out
));
685 out
->ps_cpuid
= (-1);
686 out
->ps_runq_count_sum
= rt_runq
.runq_stats
.count_sum
;
688 *count
+= (uint32_t)sizeof(struct _processor_statistics_np
);
690 return (KERN_SUCCESS
);
694 host_page_size(host_t host
, vm_size_t
* out_page_size
)
696 if (host
== HOST_NULL
)
697 return (KERN_INVALID_ARGUMENT
);
699 *out_page_size
= PAGE_SIZE
;
701 return (KERN_SUCCESS
);
705 * Return kernel version string (more than you ever
706 * wanted to know about what version of the kernel this is).
708 extern char version
[];
711 host_kernel_version(host_t host
, kernel_version_t out_version
)
713 if (host
== HOST_NULL
)
714 return (KERN_INVALID_ARGUMENT
);
716 (void)strncpy(out_version
, version
, sizeof(kernel_version_t
));
718 return (KERN_SUCCESS
);
722 * host_processor_sets:
724 * List all processor sets on the host.
727 host_processor_sets(host_priv_t host_priv
, processor_set_name_array_t
* pset_list
, mach_msg_type_number_t
* count
)
731 if (host_priv
== HOST_PRIV_NULL
)
732 return (KERN_INVALID_ARGUMENT
);
735 * Allocate memory. Can be pageable because it won't be
736 * touched while holding a lock.
739 addr
= kalloc((vm_size_t
)sizeof(mach_port_t
));
741 return (KERN_RESOURCE_SHORTAGE
);
743 /* do the conversion that Mig should handle */
744 *((ipc_port_t
*)addr
) = convert_pset_name_to_port(&pset0
);
746 *pset_list
= (processor_set_array_t
)addr
;
749 return (KERN_SUCCESS
);
753 * host_processor_set_priv:
755 * Return control port for given processor set.
758 host_processor_set_priv(host_priv_t host_priv
, processor_set_t pset_name
, processor_set_t
* pset
)
760 if (host_priv
== HOST_PRIV_NULL
|| pset_name
== PROCESSOR_SET_NULL
) {
761 *pset
= PROCESSOR_SET_NULL
;
763 return (KERN_INVALID_ARGUMENT
);
768 return (KERN_SUCCESS
);
772 * host_processor_info
774 * Return info about the processors on this host. It will return
775 * the number of processors, and the specific type of info requested
779 host_processor_info(host_t host
,
780 processor_flavor_t flavor
,
781 natural_t
* out_pcount
,
782 processor_info_array_t
* out_array
,
783 mach_msg_type_number_t
* out_array_count
)
785 kern_return_t result
;
786 processor_t processor
;
788 processor_info_t info
;
789 unsigned int icount
, tcount
;
790 unsigned int pcount
, i
;
792 vm_size_t size
, needed
;
795 if (host
== HOST_NULL
)
796 return (KERN_INVALID_ARGUMENT
);
798 result
= processor_info_count(flavor
, &icount
);
799 if (result
!= KERN_SUCCESS
)
802 pcount
= processor_count
;
805 needed
= pcount
* icount
* sizeof(natural_t
);
806 size
= vm_map_round_page(needed
, VM_MAP_PAGE_MASK(ipc_kernel_map
));
807 result
= kmem_alloc(ipc_kernel_map
, &addr
, size
, VM_KERN_MEMORY_IPC
);
808 if (result
!= KERN_SUCCESS
)
809 return (KERN_RESOURCE_SHORTAGE
);
811 info
= (processor_info_t
)addr
;
812 processor
= processor_list
;
815 result
= processor_info(processor
, flavor
, &thost
, info
, &tcount
);
816 if (result
!= KERN_SUCCESS
) {
817 kmem_free(ipc_kernel_map
, addr
, size
);
822 for (i
= 1; i
< pcount
; i
++) {
823 simple_lock(&processor_list_lock
);
824 processor
= processor
->processor_list
;
825 simple_unlock(&processor_list_lock
);
829 result
= processor_info(processor
, flavor
, &thost
, info
, &tcount
);
830 if (result
!= KERN_SUCCESS
) {
831 kmem_free(ipc_kernel_map
, addr
, size
);
838 bzero((char *)addr
+ needed
, size
- needed
);
840 result
= vm_map_unwire(ipc_kernel_map
, vm_map_trunc_page(addr
, VM_MAP_PAGE_MASK(ipc_kernel_map
)),
841 vm_map_round_page(addr
+ size
, VM_MAP_PAGE_MASK(ipc_kernel_map
)), FALSE
);
842 assert(result
== KERN_SUCCESS
);
843 result
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)addr
, (vm_map_size_t
)needed
, TRUE
, ©
);
844 assert(result
== KERN_SUCCESS
);
846 *out_pcount
= pcount
;
847 *out_array
= (processor_info_array_t
)copy
;
848 *out_array_count
= pcount
* icount
;
850 return (KERN_SUCCESS
);
854 * Kernel interface for setting a special port.
857 kernel_set_special_port(host_priv_t host_priv
, int id
, ipc_port_t port
)
861 host_lock(host_priv
);
862 old_port
= host_priv
->special
[id
];
863 host_priv
->special
[id
] = port
;
864 host_unlock(host_priv
);
865 if (IP_VALID(old_port
))
866 ipc_port_release_send(old_port
);
867 return (KERN_SUCCESS
);
871 * User interface for setting a special port.
873 * Only permits the user to set a user-owned special port
874 * ID, rejecting a kernel-owned special port ID.
876 * A special kernel port cannot be set up using this
877 * routine; use kernel_set_special_port() instead.
880 host_set_special_port(host_priv_t host_priv
, int id
, ipc_port_t port
)
882 if (host_priv
== HOST_PRIV_NULL
|| id
<= HOST_MAX_SPECIAL_KERNEL_PORT
|| id
> HOST_MAX_SPECIAL_PORT
)
883 return (KERN_INVALID_ARGUMENT
);
886 if (mac_task_check_set_host_special_port(current_task(), id
, port
) != 0)
887 return (KERN_NO_ACCESS
);
890 return (kernel_set_special_port(host_priv
, id
, port
));
894 * User interface for retrieving a special port.
896 * Note that there is nothing to prevent a user special
897 * port from disappearing after it has been discovered by
898 * the caller; thus, using a special port can always result
899 * in a "port not valid" error.
903 host_get_special_port(host_priv_t host_priv
, __unused
int node
, int id
, ipc_port_t
* portp
)
907 if (host_priv
== HOST_PRIV_NULL
|| id
== HOST_SECURITY_PORT
|| id
> HOST_MAX_SPECIAL_PORT
|| id
< 0)
908 return (KERN_INVALID_ARGUMENT
);
910 host_lock(host_priv
);
911 port
= realhost
.special
[id
];
912 *portp
= ipc_port_copy_send(port
);
913 host_unlock(host_priv
);
915 return (KERN_SUCCESS
);
921 * Return the IO master access port for this host.
924 host_get_io_master(host_t host
, io_master_t
* io_masterp
)
926 if (host
== HOST_NULL
)
927 return (KERN_INVALID_ARGUMENT
);
929 return (host_get_io_master_port(host_priv_self(), io_masterp
));
945 host_security_self(void)
951 host_set_atm_diagnostic_flag(host_priv_t host_priv
, uint32_t diagnostic_flag
)
953 if (host_priv
== HOST_PRIV_NULL
)
954 return (KERN_INVALID_ARGUMENT
);
956 assert(host_priv
== &realhost
);
959 return (atm_set_diagnostic_config(diagnostic_flag
));
961 (void)diagnostic_flag
;
962 return (KERN_NOT_SUPPORTED
);