2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
62 * Non-ipc host functions.
65 #include <mach/mach_types.h>
66 #include <mach/boolean.h>
67 #include <mach/host_info.h>
68 #include <mach/host_special_ports.h>
69 #include <mach/kern_return.h>
70 #include <mach/machine.h>
71 #include <mach/port.h>
72 #include <mach/processor_info.h>
73 #include <mach/vm_param.h>
74 #include <mach/processor.h>
75 #include <mach/mach_host_server.h>
76 #include <mach/host_priv_server.h>
77 #include <mach/vm_map.h>
78 #include <mach/task_info.h>
80 #include <machine/commpage.h>
81 #include <machine/cpu_capabilities.h>
83 #include <kern/kern_types.h>
84 #include <kern/assert.h>
85 #include <kern/kalloc.h>
86 #include <kern/host.h>
87 #include <kern/host_statistics.h>
88 #include <kern/ipc_host.h>
89 #include <kern/misc_protos.h>
90 #include <kern/sched.h>
91 #include <kern/processor.h>
92 #include <kern/mach_node.h> // mach_node_port_changed()
94 #include <vm/vm_map.h>
95 #include <vm/vm_purgeable_internal.h>
96 #include <vm/vm_pageout.h>
100 #include <atm/atm_internal.h>
104 #include <security/mac_mach_internal.h>
107 #include <pexpert/pexpert.h>
109 host_data_t realhost
;
111 vm_extmod_statistics_data_t host_extmod_statistics
;
114 host_processors(host_priv_t host_priv
, processor_array_t
* out_array
, mach_msg_type_number_t
* countp
)
116 processor_t processor
, *tp
;
118 unsigned int count
, i
;
120 if (host_priv
== HOST_PRIV_NULL
)
121 return (KERN_INVALID_ARGUMENT
);
123 assert(host_priv
== &realhost
);
125 count
= processor_count
;
128 addr
= kalloc((vm_size_t
)(count
* sizeof(mach_port_t
)));
130 return (KERN_RESOURCE_SHORTAGE
);
132 tp
= (processor_t
*)addr
;
133 *tp
++ = processor
= processor_list
;
136 simple_lock(&processor_list_lock
);
138 for (i
= 1; i
< count
; i
++)
139 *tp
++ = processor
= processor
->processor_list
;
141 simple_unlock(&processor_list_lock
);
145 *out_array
= (processor_array_t
)addr
;
147 /* do the conversion that Mig should handle */
148 tp
= (processor_t
*)addr
;
149 for (i
= 0; i
< count
; i
++)
150 ((mach_port_t
*)tp
)[i
] = (mach_port_t
)convert_processor_to_port(tp
[i
]);
152 return (KERN_SUCCESS
);
156 host_info(host_t host
, host_flavor_t flavor
, host_info_t info
, mach_msg_type_number_t
* count
)
158 if (host
== HOST_NULL
)
159 return (KERN_INVALID_ARGUMENT
);
162 case HOST_BASIC_INFO
: {
163 host_basic_info_t basic_info
;
167 * Basic information about this host.
169 if (*count
< HOST_BASIC_INFO_OLD_COUNT
)
170 return (KERN_FAILURE
);
172 basic_info
= (host_basic_info_t
)info
;
174 basic_info
->memory_size
= machine_info
.memory_size
;
175 basic_info
->max_cpus
= machine_info
.max_cpus
;
176 basic_info
->avail_cpus
= processor_avail_count
;
177 master_id
= master_processor
->cpu_id
;
178 basic_info
->cpu_type
= slot_type(master_id
);
179 basic_info
->cpu_subtype
= slot_subtype(master_id
);
181 if (*count
>= HOST_BASIC_INFO_COUNT
) {
182 basic_info
->cpu_threadtype
= slot_threadtype(master_id
);
183 basic_info
->physical_cpu
= machine_info
.physical_cpu
;
184 basic_info
->physical_cpu_max
= machine_info
.physical_cpu_max
;
185 basic_info
->logical_cpu
= machine_info
.logical_cpu
;
186 basic_info
->logical_cpu_max
= machine_info
.logical_cpu_max
;
187 basic_info
->max_mem
= machine_info
.max_mem
;
189 *count
= HOST_BASIC_INFO_COUNT
;
191 *count
= HOST_BASIC_INFO_OLD_COUNT
;
194 return (KERN_SUCCESS
);
197 case HOST_SCHED_INFO
: {
198 host_sched_info_t sched_info
;
199 uint32_t quantum_time
;
203 * Return scheduler information.
205 if (*count
< HOST_SCHED_INFO_COUNT
)
206 return (KERN_FAILURE
);
208 sched_info
= (host_sched_info_t
)info
;
210 quantum_time
= SCHED(initial_quantum_size
)(THREAD_NULL
);
211 absolutetime_to_nanoseconds(quantum_time
, &quantum_ns
);
213 sched_info
->min_timeout
= sched_info
->min_quantum
= (uint32_t)(quantum_ns
/ 1000 / 1000);
215 *count
= HOST_SCHED_INFO_COUNT
;
217 return (KERN_SUCCESS
);
220 case HOST_RESOURCE_SIZES
: {
222 * Return sizes of kernel data structures
224 if (*count
< HOST_RESOURCE_SIZES_COUNT
)
225 return (KERN_FAILURE
);
227 /* XXX Fail until ledgers are implemented */
228 return (KERN_INVALID_ARGUMENT
);
231 case HOST_PRIORITY_INFO
: {
232 host_priority_info_t priority_info
;
234 if (*count
< HOST_PRIORITY_INFO_COUNT
)
235 return (KERN_FAILURE
);
237 priority_info
= (host_priority_info_t
)info
;
239 priority_info
->kernel_priority
= MINPRI_KERNEL
;
240 priority_info
->system_priority
= MINPRI_KERNEL
;
241 priority_info
->server_priority
= MINPRI_RESERVED
;
242 priority_info
->user_priority
= BASEPRI_DEFAULT
;
243 priority_info
->depress_priority
= DEPRESSPRI
;
244 priority_info
->idle_priority
= IDLEPRI
;
245 priority_info
->minimum_priority
= MINPRI_USER
;
246 priority_info
->maximum_priority
= MAXPRI_RESERVED
;
248 *count
= HOST_PRIORITY_INFO_COUNT
;
250 return (KERN_SUCCESS
);
254 * Gestalt for various trap facilities.
256 case HOST_MACH_MSG_TRAP
:
257 case HOST_SEMAPHORE_TRAPS
: {
259 return (KERN_SUCCESS
);
262 case HOST_CAN_HAS_DEBUGGER
: {
263 host_can_has_debugger_info_t can_has_debugger_info
;
265 if (*count
< HOST_CAN_HAS_DEBUGGER_COUNT
)
266 return (KERN_FAILURE
);
268 can_has_debugger_info
= (host_can_has_debugger_info_t
)info
;
269 can_has_debugger_info
->can_has_debugger
= PE_i_can_has_debugger(NULL
);
270 *count
= HOST_CAN_HAS_DEBUGGER_COUNT
;
275 case HOST_VM_PURGABLE
: {
276 if (*count
< HOST_VM_PURGABLE_COUNT
)
277 return (KERN_FAILURE
);
279 vm_purgeable_stats((vm_purgeable_info_t
)info
, NULL
);
281 *count
= HOST_VM_PURGABLE_COUNT
;
282 return (KERN_SUCCESS
);
285 case HOST_DEBUG_INFO_INTERNAL
: {
286 #if DEVELOPMENT || DEBUG
287 if (*count
< HOST_DEBUG_INFO_INTERNAL_COUNT
)
288 return (KERN_FAILURE
);
290 host_debug_info_internal_t debug_info
= (host_debug_info_internal_t
)info
;
291 bzero(debug_info
, sizeof(host_debug_info_internal_data_t
));
292 *count
= HOST_DEBUG_INFO_INTERNAL_COUNT
;
294 #if CONFIG_COALITIONS
295 debug_info
->config_coalitions
= 1;
297 debug_info
->config_bank
= 1;
299 debug_info
->config_atm
= 1;
302 debug_info
->config_csr
= 1;
304 return (KERN_SUCCESS
);
305 #else /* DEVELOPMENT || DEBUG */
306 return (KERN_NOT_SUPPORTED
);
310 default: return (KERN_INVALID_ARGUMENT
);
314 kern_return_t
host_statistics(host_t host
, host_flavor_t flavor
, host_info_t info
, mach_msg_type_number_t
* count
);
317 host_statistics(host_t host
, host_flavor_t flavor
, host_info_t info
, mach_msg_type_number_t
* count
)
321 if (host
== HOST_NULL
)
322 return (KERN_INVALID_HOST
);
325 case HOST_LOAD_INFO
: {
326 host_load_info_t load_info
;
328 if (*count
< HOST_LOAD_INFO_COUNT
)
329 return (KERN_FAILURE
);
331 load_info
= (host_load_info_t
)info
;
333 bcopy((char *)avenrun
, (char *)load_info
->avenrun
, sizeof avenrun
);
334 bcopy((char *)mach_factor
, (char *)load_info
->mach_factor
, sizeof mach_factor
);
336 *count
= HOST_LOAD_INFO_COUNT
;
337 return (KERN_SUCCESS
);
341 processor_t processor
;
342 vm_statistics64_t stat
;
343 vm_statistics64_data_t host_vm_stat
;
344 vm_statistics_t stat32
;
345 mach_msg_type_number_t original_count
;
347 if (*count
< HOST_VM_INFO_REV0_COUNT
)
348 return (KERN_FAILURE
);
350 processor
= processor_list
;
351 stat
= &PROCESSOR_DATA(processor
, vm_stat
);
352 host_vm_stat
= *stat
;
354 if (processor_count
> 1) {
355 simple_lock(&processor_list_lock
);
357 while ((processor
= processor
->processor_list
) != NULL
) {
358 stat
= &PROCESSOR_DATA(processor
, vm_stat
);
360 host_vm_stat
.zero_fill_count
+= stat
->zero_fill_count
;
361 host_vm_stat
.reactivations
+= stat
->reactivations
;
362 host_vm_stat
.pageins
+= stat
->pageins
;
363 host_vm_stat
.pageouts
+= stat
->pageouts
;
364 host_vm_stat
.faults
+= stat
->faults
;
365 host_vm_stat
.cow_faults
+= stat
->cow_faults
;
366 host_vm_stat
.lookups
+= stat
->lookups
;
367 host_vm_stat
.hits
+= stat
->hits
;
370 simple_unlock(&processor_list_lock
);
373 stat32
= (vm_statistics_t
)info
;
375 stat32
->free_count
= VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_free_count
+ vm_page_speculative_count
);
376 stat32
->active_count
= VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_active_count
);
378 if (vm_page_local_q
) {
379 for (i
= 0; i
< vm_page_local_q_count
; i
++) {
382 lq
= &vm_page_local_q
[i
].vpl_un
.vpl
;
384 stat32
->active_count
+= VM_STATISTICS_TRUNCATE_TO_32_BIT(lq
->vpl_count
);
387 stat32
->inactive_count
= VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_inactive_count
);
389 stat32
->wire_count
= VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count
);
391 stat32
->wire_count
= VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count
+ vm_page_throttled_count
+ vm_lopage_free_count
);
393 stat32
->zero_fill_count
= VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat
.zero_fill_count
);
394 stat32
->reactivations
= VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat
.reactivations
);
395 stat32
->pageins
= VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat
.pageins
);
396 stat32
->pageouts
= VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat
.pageouts
);
397 stat32
->faults
= VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat
.faults
);
398 stat32
->cow_faults
= VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat
.cow_faults
);
399 stat32
->lookups
= VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat
.lookups
);
400 stat32
->hits
= VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat
.hits
);
403 * Fill in extra info added in later revisions of the
404 * vm_statistics data structure. Fill in only what can fit
405 * in the data structure the caller gave us !
407 original_count
= *count
;
408 *count
= HOST_VM_INFO_REV0_COUNT
; /* rev0 already filled in */
409 if (original_count
>= HOST_VM_INFO_REV1_COUNT
) {
410 /* rev1 added "purgeable" info */
411 stat32
->purgeable_count
= VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purgeable_count
);
412 stat32
->purges
= VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purged_count
);
413 *count
= HOST_VM_INFO_REV1_COUNT
;
416 if (original_count
>= HOST_VM_INFO_REV2_COUNT
) {
417 /* rev2 added "speculative" info */
418 stat32
->speculative_count
= VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_speculative_count
);
419 *count
= HOST_VM_INFO_REV2_COUNT
;
422 /* rev3 changed some of the fields to be 64-bit*/
424 return (KERN_SUCCESS
);
427 case HOST_CPU_LOAD_INFO
: {
428 processor_t processor
;
429 host_cpu_load_info_t cpu_load_info
;
431 if (*count
< HOST_CPU_LOAD_INFO_COUNT
)
432 return (KERN_FAILURE
);
434 #define GET_TICKS_VALUE(state, ticks) \
435 MACRO_BEGIN cpu_load_info->cpu_ticks[(state)] += (uint32_t)(ticks / hz_tick_interval); \
437 #define GET_TICKS_VALUE_FROM_TIMER(processor, state, timer) \
438 MACRO_BEGIN GET_TICKS_VALUE(state, timer_grab(&PROCESSOR_DATA(processor, timer))); \
441 cpu_load_info
= (host_cpu_load_info_t
)info
;
442 cpu_load_info
->cpu_ticks
[CPU_STATE_USER
] = 0;
443 cpu_load_info
->cpu_ticks
[CPU_STATE_SYSTEM
] = 0;
444 cpu_load_info
->cpu_ticks
[CPU_STATE_IDLE
] = 0;
445 cpu_load_info
->cpu_ticks
[CPU_STATE_NICE
] = 0;
447 simple_lock(&processor_list_lock
);
449 for (processor
= processor_list
; processor
!= NULL
; processor
= processor
->processor_list
) {
451 uint64_t idle_time_snapshot1
, idle_time_snapshot2
;
452 uint64_t idle_time_tstamp1
, idle_time_tstamp2
;
454 /* See discussion in processor_info(PROCESSOR_CPU_LOAD_INFO) */
456 GET_TICKS_VALUE_FROM_TIMER(processor
, CPU_STATE_USER
, user_state
);
457 if (precise_user_kernel_time
) {
458 GET_TICKS_VALUE_FROM_TIMER(processor
, CPU_STATE_SYSTEM
, system_state
);
460 /* system_state may represent either sys or user */
461 GET_TICKS_VALUE_FROM_TIMER(processor
, CPU_STATE_USER
, system_state
);
464 idle_state
= &PROCESSOR_DATA(processor
, idle_state
);
465 idle_time_snapshot1
= timer_grab(idle_state
);
466 idle_time_tstamp1
= idle_state
->tstamp
;
468 if (PROCESSOR_DATA(processor
, current_state
) != idle_state
) {
469 /* Processor is non-idle, so idle timer should be accurate */
470 GET_TICKS_VALUE_FROM_TIMER(processor
, CPU_STATE_IDLE
, idle_state
);
471 } else if ((idle_time_snapshot1
!= (idle_time_snapshot2
= timer_grab(idle_state
))) ||
472 (idle_time_tstamp1
!= (idle_time_tstamp2
= idle_state
->tstamp
))) {
473 /* Idle timer is being updated concurrently, second stamp is good enough */
474 GET_TICKS_VALUE(CPU_STATE_IDLE
, idle_time_snapshot2
);
477 * Idle timer may be very stale. Fortunately we have established
478 * that idle_time_snapshot1 and idle_time_tstamp1 are unchanging
480 idle_time_snapshot1
+= mach_absolute_time() - idle_time_tstamp1
;
482 GET_TICKS_VALUE(CPU_STATE_IDLE
, idle_time_snapshot1
);
485 simple_unlock(&processor_list_lock
);
487 *count
= HOST_CPU_LOAD_INFO_COUNT
;
489 return (KERN_SUCCESS
);
492 case HOST_EXPIRED_TASK_INFO
: {
493 if (*count
< TASK_POWER_INFO_COUNT
) {
494 return (KERN_FAILURE
);
497 task_power_info_t tinfo1
= (task_power_info_t
)info
;
498 task_power_info_v2_t tinfo2
= (task_power_info_v2_t
)info
;
500 tinfo1
->task_interrupt_wakeups
= dead_task_statistics
.task_interrupt_wakeups
;
501 tinfo1
->task_platform_idle_wakeups
= dead_task_statistics
.task_platform_idle_wakeups
;
503 tinfo1
->task_timer_wakeups_bin_1
= dead_task_statistics
.task_timer_wakeups_bin_1
;
505 tinfo1
->task_timer_wakeups_bin_2
= dead_task_statistics
.task_timer_wakeups_bin_2
;
507 tinfo1
->total_user
= dead_task_statistics
.total_user_time
;
508 tinfo1
->total_system
= dead_task_statistics
.total_system_time
;
509 if (*count
< TASK_POWER_INFO_V2_COUNT
) {
510 *count
= TASK_POWER_INFO_COUNT
;
512 else if (*count
>= TASK_POWER_INFO_V2_COUNT
) {
513 tinfo2
->gpu_energy
.task_gpu_utilisation
= dead_task_statistics
.task_gpu_ns
;
514 #if defined(__arm__) || defined(__arm64__)
515 tinfo2
->task_energy
= dead_task_statistics
.task_energy
;
516 tinfo2
->task_ptime
= dead_task_statistics
.total_ptime
;
517 tinfo2
->task_pset_switches
= dead_task_statistics
.total_pset_switches
;
519 *count
= TASK_POWER_INFO_V2_COUNT
;
522 return (KERN_SUCCESS
);
524 default: return (KERN_INVALID_ARGUMENT
);
528 extern uint32_t c_segment_pages_compressed
;
530 #define HOST_STATISTICS_TIME_WINDOW 1 /* seconds */
531 #define HOST_STATISTICS_MAX_REQUESTS 10 /* maximum number of requests per window */
532 #define HOST_STATISTICS_MIN_REQUESTS 2 /* minimum number of requests per window */
534 uint64_t host_statistics_time_window
;
536 static lck_mtx_t host_statistics_lck
;
537 static lck_grp_t
* host_statistics_lck_grp
;
539 #define HOST_VM_INFO64_REV0 0
540 #define HOST_VM_INFO64_REV1 1
541 #define HOST_EXTMOD_INFO64_REV0 2
542 #define HOST_LOAD_INFO_REV0 3
543 #define HOST_VM_INFO_REV0 4
544 #define HOST_VM_INFO_REV1 5
545 #define HOST_VM_INFO_REV2 6
546 #define HOST_CPU_LOAD_INFO_REV0 7
547 #define HOST_EXPIRED_TASK_INFO_REV0 8
548 #define HOST_EXPIRED_TASK_INFO_REV1 9
549 #define NUM_HOST_INFO_DATA_TYPES 10
551 static vm_statistics64_data_t host_vm_info64_rev0
= {};
552 static vm_statistics64_data_t host_vm_info64_rev1
= {};
553 static vm_extmod_statistics_data_t host_extmod_info64
= {};
554 static host_load_info_data_t host_load_info
= {};
555 static vm_statistics_data_t host_vm_info_rev0
= {};
556 static vm_statistics_data_t host_vm_info_rev1
= {};
557 static vm_statistics_data_t host_vm_info_rev2
= {};
558 static host_cpu_load_info_data_t host_cpu_load_info
= {};
559 static task_power_info_data_t host_expired_task_info
= {};
560 static task_power_info_v2_data_t host_expired_task_info2
= {};
562 struct host_stats_cache
{
563 uint64_t last_access
;
564 uint64_t current_requests
;
565 uint64_t max_requests
;
567 mach_msg_type_number_t count
; //NOTE count is in sizeof(integer_t)
570 static struct host_stats_cache g_host_stats_cache
[NUM_HOST_INFO_DATA_TYPES
] = {
571 [HOST_VM_INFO64_REV0
] = { .last_access
= 0, .current_requests
= 0, .max_requests
= 0, .data
= (uintptr_t)&host_vm_info64_rev0
, .count
= HOST_VM_INFO64_REV0_COUNT
},
572 [HOST_VM_INFO64_REV1
] = { .last_access
= 0, .current_requests
= 0, .max_requests
= 0, .data
= (uintptr_t)&host_vm_info64_rev1
, .count
= HOST_VM_INFO64_REV1_COUNT
},
573 [HOST_EXTMOD_INFO64_REV0
] = { .last_access
= 0, .current_requests
= 0, .max_requests
= 0, .data
= (uintptr_t)&host_extmod_info64
, .count
= HOST_EXTMOD_INFO64_COUNT
},
574 [HOST_LOAD_INFO_REV0
] = { .last_access
= 0, .current_requests
= 0, .max_requests
= 0, .data
= (uintptr_t)&host_load_info
, .count
= HOST_LOAD_INFO_COUNT
},
575 [HOST_VM_INFO_REV0
] = { .last_access
= 0, .current_requests
= 0, .max_requests
= 0, .data
= (uintptr_t)&host_vm_info_rev0
, .count
= HOST_VM_INFO_REV0_COUNT
},
576 [HOST_VM_INFO_REV1
] = { .last_access
= 0, .current_requests
= 0, .max_requests
= 0, .data
= (uintptr_t)&host_vm_info_rev1
, .count
= HOST_VM_INFO_REV1_COUNT
},
577 [HOST_VM_INFO_REV2
] = { .last_access
= 0, .current_requests
= 0, .max_requests
= 0, .data
= (uintptr_t)&host_vm_info_rev2
, .count
= HOST_VM_INFO_REV2_COUNT
},
578 [HOST_CPU_LOAD_INFO_REV0
] = { .last_access
= 0, .current_requests
= 0, .max_requests
= 0, .data
= (uintptr_t)&host_cpu_load_info
, .count
= HOST_CPU_LOAD_INFO_COUNT
},
579 [HOST_EXPIRED_TASK_INFO_REV0
] = { .last_access
= 0, .current_requests
= 0, .max_requests
= 0, .data
= (uintptr_t)&host_expired_task_info
, .count
= TASK_POWER_INFO_COUNT
},
580 [HOST_EXPIRED_TASK_INFO_REV1
] = { .last_access
= 0, .current_requests
= 0, .max_requests
= 0, .data
= (uintptr_t)&host_expired_task_info2
, .count
= TASK_POWER_INFO_V2_COUNT
},
585 host_statistics_init(void)
587 host_statistics_lck_grp
= lck_grp_alloc_init("host_statistics", LCK_GRP_ATTR_NULL
);
588 lck_mtx_init(&host_statistics_lck
, host_statistics_lck_grp
, LCK_ATTR_NULL
);
589 nanoseconds_to_absolutetime((HOST_STATISTICS_TIME_WINDOW
* NSEC_PER_SEC
), &host_statistics_time_window
);
593 cache_host_statistics(int index
, host_info64_t info
)
595 if (index
< 0 || index
>= NUM_HOST_INFO_DATA_TYPES
)
598 task_t task
= current_task();
599 if (task
->t_flags
& TF_PLATFORM
)
602 memcpy((void *)g_host_stats_cache
[index
].data
, info
, g_host_stats_cache
[index
].count
* sizeof(integer_t
));
607 get_cached_info(int index
, host_info64_t info
, mach_msg_type_number_t
* count
)
609 if (index
< 0 || index
>= NUM_HOST_INFO_DATA_TYPES
) {
614 *count
= g_host_stats_cache
[index
].count
;
615 memcpy(info
, (void *)g_host_stats_cache
[index
].data
, g_host_stats_cache
[index
].count
* sizeof(integer_t
));
619 get_host_info_data_index(bool is_stat64
, host_flavor_t flavor
, mach_msg_type_number_t
* count
, kern_return_t
* ret
)
625 *ret
= KERN_INVALID_ARGUMENT
;
628 if (*count
< HOST_VM_INFO64_REV0_COUNT
) {
632 if (*count
>= HOST_VM_INFO64_REV1_COUNT
) {
633 return HOST_VM_INFO64_REV1
;
635 return HOST_VM_INFO64_REV0
;
637 case HOST_EXTMOD_INFO64
:
639 *ret
= KERN_INVALID_ARGUMENT
;
642 if (*count
< HOST_EXTMOD_INFO64_COUNT
) {
646 return HOST_EXTMOD_INFO64_REV0
;
649 if (*count
< HOST_LOAD_INFO_COUNT
) {
653 return HOST_LOAD_INFO_REV0
;
656 if (*count
< HOST_VM_INFO_REV0_COUNT
) {
660 if (*count
>= HOST_VM_INFO_REV2_COUNT
) {
661 return HOST_VM_INFO_REV2
;
663 if (*count
>= HOST_VM_INFO_REV1_COUNT
) {
664 return HOST_VM_INFO_REV1
;
666 return HOST_VM_INFO_REV0
;
668 case HOST_CPU_LOAD_INFO
:
669 if (*count
< HOST_CPU_LOAD_INFO_COUNT
) {
673 return HOST_CPU_LOAD_INFO_REV0
;
675 case HOST_EXPIRED_TASK_INFO
:
676 if (*count
< TASK_POWER_INFO_COUNT
){
680 if (*count
>= TASK_POWER_INFO_V2_COUNT
){
681 return HOST_EXPIRED_TASK_INFO_REV1
;
683 return HOST_EXPIRED_TASK_INFO_REV0
;
686 *ret
= KERN_INVALID_ARGUMENT
;
694 rate_limit_host_statistics(bool is_stat64
, host_flavor_t flavor
, host_info64_t info
, mach_msg_type_number_t
* count
, kern_return_t
* ret
, int *pindex
)
696 task_t task
= current_task();
698 assert(task
!= kernel_task
);
702 /* Access control only for third party applications */
703 if (task
->t_flags
& TF_PLATFORM
) {
707 /* Rate limit to HOST_STATISTICS_MAX_REQUESTS queries for each HOST_STATISTICS_TIME_WINDOW window of time */
708 bool rate_limited
= FALSE
;
709 bool set_last_access
= TRUE
;
711 /* there is a cache for every flavor */
712 int index
= get_host_info_data_index(is_stat64
, flavor
, count
, ret
);
717 lck_mtx_lock(&host_statistics_lck
);
718 if (g_host_stats_cache
[index
].last_access
> mach_continuous_time() - host_statistics_time_window
) {
719 set_last_access
= FALSE
;
720 if (g_host_stats_cache
[index
].current_requests
++ >= g_host_stats_cache
[index
].max_requests
) {
722 get_cached_info(index
, info
, count
);
725 if (set_last_access
) {
726 g_host_stats_cache
[index
].current_requests
= 1;
728 * select a random number of requests (included between HOST_STATISTICS_MIN_REQUESTS and HOST_STATISTICS_MAX_REQUESTS)
729 * to let query host_statistics.
730 * In this way it is not possible to infer looking at when the a cached copy changes if host_statistics was called on
731 * the provious window.
733 g_host_stats_cache
[index
].max_requests
= (mach_absolute_time() % (HOST_STATISTICS_MAX_REQUESTS
- HOST_STATISTICS_MIN_REQUESTS
+ 1)) + HOST_STATISTICS_MIN_REQUESTS
;
734 g_host_stats_cache
[index
].last_access
= mach_continuous_time();
736 lck_mtx_unlock(&host_statistics_lck
);
741 kern_return_t
host_statistics64(host_t host
, host_flavor_t flavor
, host_info_t info
, mach_msg_type_number_t
* count
);
744 host_statistics64(host_t host
, host_flavor_t flavor
, host_info64_t info
, mach_msg_type_number_t
* count
)
748 if (host
== HOST_NULL
)
749 return (KERN_INVALID_HOST
);
752 case HOST_VM_INFO64
: /* We were asked to get vm_statistics64 */
754 processor_t processor
;
755 vm_statistics64_t stat
;
756 vm_statistics64_data_t host_vm_stat
;
757 mach_msg_type_number_t original_count
;
758 unsigned int local_q_internal_count
;
759 unsigned int local_q_external_count
;
761 if (*count
< HOST_VM_INFO64_REV0_COUNT
)
762 return (KERN_FAILURE
);
764 processor
= processor_list
;
765 stat
= &PROCESSOR_DATA(processor
, vm_stat
);
766 host_vm_stat
= *stat
;
768 if (processor_count
> 1) {
769 simple_lock(&processor_list_lock
);
771 while ((processor
= processor
->processor_list
) != NULL
) {
772 stat
= &PROCESSOR_DATA(processor
, vm_stat
);
774 host_vm_stat
.zero_fill_count
+= stat
->zero_fill_count
;
775 host_vm_stat
.reactivations
+= stat
->reactivations
;
776 host_vm_stat
.pageins
+= stat
->pageins
;
777 host_vm_stat
.pageouts
+= stat
->pageouts
;
778 host_vm_stat
.faults
+= stat
->faults
;
779 host_vm_stat
.cow_faults
+= stat
->cow_faults
;
780 host_vm_stat
.lookups
+= stat
->lookups
;
781 host_vm_stat
.hits
+= stat
->hits
;
782 host_vm_stat
.compressions
+= stat
->compressions
;
783 host_vm_stat
.decompressions
+= stat
->decompressions
;
784 host_vm_stat
.swapins
+= stat
->swapins
;
785 host_vm_stat
.swapouts
+= stat
->swapouts
;
788 simple_unlock(&processor_list_lock
);
791 stat
= (vm_statistics64_t
)info
;
793 stat
->free_count
= vm_page_free_count
+ vm_page_speculative_count
;
794 stat
->active_count
= vm_page_active_count
;
796 local_q_internal_count
= 0;
797 local_q_external_count
= 0;
798 if (vm_page_local_q
) {
799 for (i
= 0; i
< vm_page_local_q_count
; i
++) {
802 lq
= &vm_page_local_q
[i
].vpl_un
.vpl
;
804 stat
->active_count
+= lq
->vpl_count
;
805 local_q_internal_count
+= lq
->vpl_internal_count
;
806 local_q_external_count
+= lq
->vpl_external_count
;
809 stat
->inactive_count
= vm_page_inactive_count
;
811 stat
->wire_count
= vm_page_wire_count
;
813 stat
->wire_count
= vm_page_wire_count
+ vm_page_throttled_count
+ vm_lopage_free_count
;
815 stat
->zero_fill_count
= host_vm_stat
.zero_fill_count
;
816 stat
->reactivations
= host_vm_stat
.reactivations
;
817 stat
->pageins
= host_vm_stat
.pageins
;
818 stat
->pageouts
= host_vm_stat
.pageouts
;
819 stat
->faults
= host_vm_stat
.faults
;
820 stat
->cow_faults
= host_vm_stat
.cow_faults
;
821 stat
->lookups
= host_vm_stat
.lookups
;
822 stat
->hits
= host_vm_stat
.hits
;
824 stat
->purgeable_count
= vm_page_purgeable_count
;
825 stat
->purges
= vm_page_purged_count
;
827 stat
->speculative_count
= vm_page_speculative_count
;
830 * Fill in extra info added in later revisions of the
831 * vm_statistics data structure. Fill in only what can fit
832 * in the data structure the caller gave us !
834 original_count
= *count
;
835 *count
= HOST_VM_INFO64_REV0_COUNT
; /* rev0 already filled in */
836 if (original_count
>= HOST_VM_INFO64_REV1_COUNT
) {
837 /* rev1 added "throttled count" */
838 stat
->throttled_count
= vm_page_throttled_count
;
839 /* rev1 added "compression" info */
840 stat
->compressor_page_count
= VM_PAGE_COMPRESSOR_COUNT
;
841 stat
->compressions
= host_vm_stat
.compressions
;
842 stat
->decompressions
= host_vm_stat
.decompressions
;
843 stat
->swapins
= host_vm_stat
.swapins
;
844 stat
->swapouts
= host_vm_stat
.swapouts
;
846 * "external page count"
847 * "anonymous page count"
848 * "total # of pages (uncompressed) held in the compressor"
850 stat
->external_page_count
= (vm_page_pageable_external_count
+ local_q_external_count
);
851 stat
->internal_page_count
= (vm_page_pageable_internal_count
+ local_q_internal_count
);
852 stat
->total_uncompressed_pages_in_compressor
= c_segment_pages_compressed
;
853 *count
= HOST_VM_INFO64_REV1_COUNT
;
856 return (KERN_SUCCESS
);
859 case HOST_EXTMOD_INFO64
: /* We were asked to get vm_statistics64 */
861 vm_extmod_statistics_t out_extmod_statistics
;
863 if (*count
< HOST_EXTMOD_INFO64_COUNT
)
864 return (KERN_FAILURE
);
866 out_extmod_statistics
= (vm_extmod_statistics_t
)info
;
867 *out_extmod_statistics
= host_extmod_statistics
;
869 *count
= HOST_EXTMOD_INFO64_COUNT
;
871 return (KERN_SUCCESS
);
874 default: /* If we didn't recognize the flavor, send to host_statistics */
875 return (host_statistics(host
, flavor
, (host_info_t
)info
, count
));
880 host_statistics64_from_user(host_t host
, host_flavor_t flavor
, host_info64_t info
, mach_msg_type_number_t
* count
)
882 kern_return_t ret
= KERN_SUCCESS
;
885 if (host
== HOST_NULL
)
886 return (KERN_INVALID_HOST
);
888 if (rate_limit_host_statistics(TRUE
, flavor
, info
, count
, &ret
, &index
))
891 if (ret
!= KERN_SUCCESS
)
894 ret
= host_statistics64(host
, flavor
, info
, count
);
896 if (ret
== KERN_SUCCESS
)
897 cache_host_statistics(index
, info
);
903 host_statistics_from_user(host_t host
, host_flavor_t flavor
, host_info64_t info
, mach_msg_type_number_t
* count
)
905 kern_return_t ret
= KERN_SUCCESS
;
908 if (host
== HOST_NULL
)
909 return (KERN_INVALID_HOST
);
911 if (rate_limit_host_statistics(FALSE
, flavor
, info
, count
, &ret
, &index
))
914 if (ret
!= KERN_SUCCESS
)
917 ret
= host_statistics(host
, flavor
, info
, count
);
919 if (ret
== KERN_SUCCESS
)
920 cache_host_statistics(index
, info
);
926 * Get host statistics that require privilege.
927 * None for now, just call the un-privileged version.
930 host_priv_statistics(host_priv_t host_priv
, host_flavor_t flavor
, host_info_t info
, mach_msg_type_number_t
* count
)
932 return (host_statistics((host_t
)host_priv
, flavor
, info
, count
));
936 set_sched_stats_active(boolean_t active
)
938 sched_stats_active
= active
;
939 return (KERN_SUCCESS
);
943 get_sched_statistics(struct _processor_statistics_np
* out
, uint32_t * count
)
945 processor_t processor
;
947 if (!sched_stats_active
) {
948 return (KERN_FAILURE
);
951 simple_lock(&processor_list_lock
);
953 if (*count
< (processor_count
+ 1) * sizeof(struct _processor_statistics_np
)) { /* One for RT */
954 simple_unlock(&processor_list_lock
);
955 return (KERN_FAILURE
);
958 processor
= processor_list
;
960 struct processor_sched_statistics
* stats
= &processor
->processor_data
.sched_stats
;
962 out
->ps_cpuid
= processor
->cpu_id
;
963 out
->ps_csw_count
= stats
->csw_count
;
964 out
->ps_preempt_count
= stats
->preempt_count
;
965 out
->ps_preempted_rt_count
= stats
->preempted_rt_count
;
966 out
->ps_preempted_by_rt_count
= stats
->preempted_by_rt_count
;
967 out
->ps_rt_sched_count
= stats
->rt_sched_count
;
968 out
->ps_interrupt_count
= stats
->interrupt_count
;
969 out
->ps_ipi_count
= stats
->ipi_count
;
970 out
->ps_timer_pop_count
= stats
->timer_pop_count
;
971 out
->ps_runq_count_sum
= SCHED(processor_runq_stats_count_sum
)(processor
);
972 out
->ps_idle_transitions
= stats
->idle_transitions
;
973 out
->ps_quantum_timer_expirations
= stats
->quantum_timer_expirations
;
976 processor
= processor
->processor_list
;
979 *count
= (uint32_t)(processor_count
* sizeof(struct _processor_statistics_np
));
981 simple_unlock(&processor_list_lock
);
983 /* And include RT Queue information */
984 bzero(out
, sizeof(*out
));
985 out
->ps_cpuid
= (-1);
986 out
->ps_runq_count_sum
= SCHED(rt_runq_count_sum
)();
988 *count
+= (uint32_t)sizeof(struct _processor_statistics_np
);
990 return (KERN_SUCCESS
);
994 host_page_size(host_t host
, vm_size_t
* out_page_size
)
996 if (host
== HOST_NULL
)
997 return (KERN_INVALID_ARGUMENT
);
999 *out_page_size
= PAGE_SIZE
;
1001 return (KERN_SUCCESS
);
1005 * Return kernel version string (more than you ever
1006 * wanted to know about what version of the kernel this is).
1008 extern char version
[];
1011 host_kernel_version(host_t host
, kernel_version_t out_version
)
1013 if (host
== HOST_NULL
)
1014 return (KERN_INVALID_ARGUMENT
);
1016 (void)strncpy(out_version
, version
, sizeof(kernel_version_t
));
1018 return (KERN_SUCCESS
);
1022 * host_processor_sets:
1024 * List all processor sets on the host.
1027 host_processor_sets(host_priv_t host_priv
, processor_set_name_array_t
* pset_list
, mach_msg_type_number_t
* count
)
1031 if (host_priv
== HOST_PRIV_NULL
)
1032 return (KERN_INVALID_ARGUMENT
);
1035 * Allocate memory. Can be pageable because it won't be
1036 * touched while holding a lock.
1039 addr
= kalloc((vm_size_t
)sizeof(mach_port_t
));
1041 return (KERN_RESOURCE_SHORTAGE
);
1043 /* do the conversion that Mig should handle */
1044 *((ipc_port_t
*)addr
) = convert_pset_name_to_port(&pset0
);
1046 *pset_list
= (processor_set_array_t
)addr
;
1049 return (KERN_SUCCESS
);
1053 * host_processor_set_priv:
1055 * Return control port for given processor set.
1058 host_processor_set_priv(host_priv_t host_priv
, processor_set_t pset_name
, processor_set_t
* pset
)
1060 if (host_priv
== HOST_PRIV_NULL
|| pset_name
== PROCESSOR_SET_NULL
) {
1061 *pset
= PROCESSOR_SET_NULL
;
1063 return (KERN_INVALID_ARGUMENT
);
1068 return (KERN_SUCCESS
);
1072 * host_processor_info
1074 * Return info about the processors on this host. It will return
1075 * the number of processors, and the specific type of info requested
1079 host_processor_info(host_t host
,
1080 processor_flavor_t flavor
,
1081 natural_t
* out_pcount
,
1082 processor_info_array_t
* out_array
,
1083 mach_msg_type_number_t
* out_array_count
)
1085 kern_return_t result
;
1086 processor_t processor
;
1088 processor_info_t info
;
1089 unsigned int icount
, tcount
;
1090 unsigned int pcount
, i
;
1092 vm_size_t size
, needed
;
1095 if (host
== HOST_NULL
)
1096 return (KERN_INVALID_ARGUMENT
);
1098 result
= processor_info_count(flavor
, &icount
);
1099 if (result
!= KERN_SUCCESS
)
1102 pcount
= processor_count
;
1103 assert(pcount
!= 0);
1105 needed
= pcount
* icount
* sizeof(natural_t
);
1106 size
= vm_map_round_page(needed
, VM_MAP_PAGE_MASK(ipc_kernel_map
));
1107 result
= kmem_alloc(ipc_kernel_map
, &addr
, size
, VM_KERN_MEMORY_IPC
);
1108 if (result
!= KERN_SUCCESS
)
1109 return (KERN_RESOURCE_SHORTAGE
);
1111 info
= (processor_info_t
)addr
;
1112 processor
= processor_list
;
1115 result
= processor_info(processor
, flavor
, &thost
, info
, &tcount
);
1116 if (result
!= KERN_SUCCESS
) {
1117 kmem_free(ipc_kernel_map
, addr
, size
);
1122 for (i
= 1; i
< pcount
; i
++) {
1123 simple_lock(&processor_list_lock
);
1124 processor
= processor
->processor_list
;
1125 simple_unlock(&processor_list_lock
);
1129 result
= processor_info(processor
, flavor
, &thost
, info
, &tcount
);
1130 if (result
!= KERN_SUCCESS
) {
1131 kmem_free(ipc_kernel_map
, addr
, size
);
1138 bzero((char *)addr
+ needed
, size
- needed
);
1140 result
= vm_map_unwire(ipc_kernel_map
, vm_map_trunc_page(addr
, VM_MAP_PAGE_MASK(ipc_kernel_map
)),
1141 vm_map_round_page(addr
+ size
, VM_MAP_PAGE_MASK(ipc_kernel_map
)), FALSE
);
1142 assert(result
== KERN_SUCCESS
);
1143 result
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)addr
, (vm_map_size_t
)needed
, TRUE
, ©
);
1144 assert(result
== KERN_SUCCESS
);
1146 *out_pcount
= pcount
;
1147 *out_array
= (processor_info_array_t
)copy
;
1148 *out_array_count
= pcount
* icount
;
1150 return (KERN_SUCCESS
);
1154 * Kernel interface for setting a special port.
1157 kernel_set_special_port(host_priv_t host_priv
, int id
, ipc_port_t port
)
1159 ipc_port_t old_port
;
1162 if (id
== HOST_NODE_PORT
)
1163 return (KERN_NOT_SUPPORTED
);
1166 host_lock(host_priv
);
1167 old_port
= host_priv
->special
[id
];
1168 host_priv
->special
[id
] = port
;
1169 host_unlock(host_priv
);
1172 if (id
== HOST_NODE_PORT
)
1173 mach_node_port_changed();
1176 if (IP_VALID(old_port
))
1177 ipc_port_release_send(old_port
);
1178 return (KERN_SUCCESS
);
1182 * Kernel interface for retrieving a special port.
1185 kernel_get_special_port(host_priv_t host_priv
, int id
, ipc_port_t
* portp
)
1187 host_lock(host_priv
);
1188 *portp
= host_priv
->special
[id
];
1189 host_unlock(host_priv
);
1190 return (KERN_SUCCESS
);
1194 * User interface for setting a special port.
1196 * Only permits the user to set a user-owned special port
1197 * ID, rejecting a kernel-owned special port ID.
1199 * A special kernel port cannot be set up using this
1200 * routine; use kernel_set_special_port() instead.
1203 host_set_special_port(host_priv_t host_priv
, int id
, ipc_port_t port
)
1205 if (host_priv
== HOST_PRIV_NULL
|| id
<= HOST_MAX_SPECIAL_KERNEL_PORT
|| id
> HOST_MAX_SPECIAL_PORT
)
1206 return (KERN_INVALID_ARGUMENT
);
1209 if (mac_task_check_set_host_special_port(current_task(), id
, port
) != 0)
1210 return (KERN_NO_ACCESS
);
1213 return (kernel_set_special_port(host_priv
, id
, port
));
1217 * User interface for retrieving a special port.
1219 * Note that there is nothing to prevent a user special
1220 * port from disappearing after it has been discovered by
1221 * the caller; thus, using a special port can always result
1222 * in a "port not valid" error.
1226 host_get_special_port(host_priv_t host_priv
, __unused
int node
, int id
, ipc_port_t
* portp
)
1230 if (host_priv
== HOST_PRIV_NULL
|| id
== HOST_SECURITY_PORT
|| id
> HOST_MAX_SPECIAL_PORT
|| id
< 0)
1231 return (KERN_INVALID_ARGUMENT
);
1233 host_lock(host_priv
);
1234 port
= realhost
.special
[id
];
1235 *portp
= ipc_port_copy_send(port
);
1236 host_unlock(host_priv
);
1238 return (KERN_SUCCESS
);
1242 * host_get_io_master
1244 * Return the IO master access port for this host.
1247 host_get_io_master(host_t host
, io_master_t
* io_masterp
)
1249 if (host
== HOST_NULL
)
1250 return (KERN_INVALID_ARGUMENT
);
1252 return (host_get_io_master_port(host_priv_self(), io_masterp
));
1262 host_priv_self(void)
1268 host_security_self(void)
1274 host_set_atm_diagnostic_flag(host_priv_t host_priv
, uint32_t diagnostic_flag
)
1276 if (host_priv
== HOST_PRIV_NULL
)
1277 return (KERN_INVALID_ARGUMENT
);
1279 assert(host_priv
== &realhost
);
1282 return (atm_set_diagnostic_config(diagnostic_flag
));
1284 (void)diagnostic_flag
;
1285 return (KERN_NOT_SUPPORTED
);
1290 host_set_multiuser_config_flags(host_priv_t host_priv
, uint32_t multiuser_config
)
1293 if (host_priv
== HOST_PRIV_NULL
)
1294 return (KERN_INVALID_ARGUMENT
);
1296 assert(host_priv
== &realhost
);
1299 * Always enforce that the multiuser bit is set
1300 * if a value is written to the commpage word.
1302 commpage_update_multiuser_config(multiuser_config
| kIsMultiUserDevice
);
1303 return (KERN_SUCCESS
);
1306 (void)multiuser_config
;
1307 return (KERN_NOT_SUPPORTED
);