2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
62 * Non-ipc host functions.
65 #include <mach/mach_types.h>
66 #include <mach/boolean.h>
67 #include <mach/host_info.h>
68 #include <mach/host_special_ports.h>
69 #include <mach/kern_return.h>
70 #include <mach/machine.h>
71 #include <mach/port.h>
72 #include <mach/processor_info.h>
73 #include <mach/vm_param.h>
74 #include <mach/processor.h>
75 #include <mach/mach_host_server.h>
76 #include <mach/host_priv_server.h>
77 #include <mach/vm_map.h>
78 #include <mach/task_info.h>
80 #include <machine/commpage.h>
81 #include <machine/cpu_capabilities.h>
83 #include <kern/kern_types.h>
84 #include <kern/assert.h>
85 #include <kern/kalloc.h>
86 #include <kern/host.h>
87 #include <kern/host_statistics.h>
88 #include <kern/ipc_host.h>
89 #include <kern/misc_protos.h>
90 #include <kern/sched.h>
91 #include <kern/processor.h>
92 #include <kern/mach_node.h> // mach_node_port_changed()
94 #include <vm/vm_map.h>
95 #include <vm/vm_purgeable_internal.h>
96 #include <vm/vm_pageout.h>
98 #include <IOKit/IOBSD.h> // IOTaskHasEntitlement
99 #include <IOKit/IOKitKeys.h> // DriverKit entitlement strings
103 #include <atm/atm_internal.h>
107 #include <security/mac_mach_internal.h>
110 #include <pexpert/pexpert.h>
112 SCALABLE_COUNTER_DEFINE(vm_statistics_zero_fill_count
); /* # of zero fill pages */
113 SCALABLE_COUNTER_DEFINE(vm_statistics_reactivations
); /* # of pages reactivated */
114 SCALABLE_COUNTER_DEFINE(vm_statistics_pageins
); /* # of pageins */
115 SCALABLE_COUNTER_DEFINE(vm_statistics_pageouts
); /* # of pageouts */
116 SCALABLE_COUNTER_DEFINE(vm_statistics_faults
); /* # of faults */
117 SCALABLE_COUNTER_DEFINE(vm_statistics_cow_faults
); /* # of copy-on-writes */
118 SCALABLE_COUNTER_DEFINE(vm_statistics_lookups
); /* object cache lookups */
119 SCALABLE_COUNTER_DEFINE(vm_statistics_hits
); /* object cache hits */
120 SCALABLE_COUNTER_DEFINE(vm_statistics_purges
); /* # of pages purged */
121 SCALABLE_COUNTER_DEFINE(vm_statistics_decompressions
); /* # of pages decompressed */
122 SCALABLE_COUNTER_DEFINE(vm_statistics_compressions
); /* # of pages compressed */
123 SCALABLE_COUNTER_DEFINE(vm_statistics_swapins
); /* # of pages swapped in (via compression segments) */
124 SCALABLE_COUNTER_DEFINE(vm_statistics_swapouts
); /* # of pages swapped out (via compression segments) */
125 SCALABLE_COUNTER_DEFINE(vm_statistics_total_uncompressed_pages_in_compressor
); /* # of pages (uncompressed) held within the compressor. */
126 SCALABLE_COUNTER_DEFINE(vm_page_grab_count
);
128 host_data_t realhost
;
131 get_host_vm_stats(vm_statistics64_t out
)
133 out
->zero_fill_count
= counter_load(&vm_statistics_zero_fill_count
);
134 out
->reactivations
= counter_load(&vm_statistics_reactivations
);
135 out
->pageins
= counter_load(&vm_statistics_pageins
);
136 out
->pageouts
= counter_load(&vm_statistics_pageouts
);
137 out
->faults
= counter_load(&vm_statistics_faults
);
138 out
->cow_faults
= counter_load(&vm_statistics_cow_faults
);
139 out
->lookups
= counter_load(&vm_statistics_lookups
);
140 out
->hits
= counter_load(&vm_statistics_hits
);
141 out
->compressions
= counter_load(&vm_statistics_compressions
);
142 out
->decompressions
= counter_load(&vm_statistics_decompressions
);
143 out
->swapins
= counter_load(&vm_statistics_swapins
);
144 out
->swapouts
= counter_load(&vm_statistics_swapouts
);
146 vm_extmod_statistics_data_t host_extmod_statistics
;
149 host_processors(host_priv_t host_priv
, processor_array_t
* out_array
, mach_msg_type_number_t
* countp
)
151 if (host_priv
== HOST_PRIV_NULL
) {
152 return KERN_INVALID_ARGUMENT
;
155 unsigned int count
= processor_count
;
158 static_assert(sizeof(mach_port_t
) == sizeof(processor_t
));
160 mach_port_t
* ports
= kalloc((vm_size_t
)(count
* sizeof(mach_port_t
)));
162 return KERN_RESOURCE_SHORTAGE
;
165 for (unsigned int i
= 0; i
< count
; i
++) {
166 processor_t processor
= processor_array
[i
];
167 assert(processor
!= PROCESSOR_NULL
);
169 /* do the conversion that Mig should handle */
170 ipc_port_t processor_port
= convert_processor_to_port(processor
);
171 ports
[i
] = processor_port
;
175 *out_array
= (processor_array_t
)ports
;
180 extern int sched_allow_NO_SMT_threads
;
183 host_info(host_t host
, host_flavor_t flavor
, host_info_t info
, mach_msg_type_number_t
* count
)
185 if (host
== HOST_NULL
) {
186 return KERN_INVALID_ARGUMENT
;
190 case HOST_BASIC_INFO
: {
191 host_basic_info_t basic_info
;
192 int master_id
= master_processor
->cpu_id
;
195 * Basic information about this host.
197 if (*count
< HOST_BASIC_INFO_OLD_COUNT
) {
201 basic_info
= (host_basic_info_t
)info
;
203 basic_info
->memory_size
= machine_info
.memory_size
;
204 basic_info
->cpu_type
= slot_type(master_id
);
205 basic_info
->cpu_subtype
= slot_subtype(master_id
);
206 basic_info
->max_cpus
= machine_info
.max_cpus
;
207 #if defined(__x86_64__)
208 if (sched_allow_NO_SMT_threads
&& current_task()->t_flags
& TF_NO_SMT
) {
209 basic_info
->avail_cpus
= primary_processor_avail_count_user
;
211 basic_info
->avail_cpus
= processor_avail_count_user
;
214 basic_info
->avail_cpus
= processor_avail_count
;
218 if (*count
>= HOST_BASIC_INFO_COUNT
) {
219 basic_info
->cpu_threadtype
= slot_threadtype(master_id
);
220 basic_info
->physical_cpu
= machine_info
.physical_cpu
;
221 basic_info
->physical_cpu_max
= machine_info
.physical_cpu_max
;
222 #if defined(__x86_64__)
223 basic_info
->logical_cpu
= basic_info
->avail_cpus
;
225 basic_info
->logical_cpu
= machine_info
.logical_cpu
;
227 basic_info
->logical_cpu_max
= machine_info
.logical_cpu_max
;
229 basic_info
->max_mem
= machine_info
.max_mem
;
231 *count
= HOST_BASIC_INFO_COUNT
;
233 *count
= HOST_BASIC_INFO_OLD_COUNT
;
239 case HOST_SCHED_INFO
: {
240 host_sched_info_t sched_info
;
241 uint32_t quantum_time
;
245 * Return scheduler information.
247 if (*count
< HOST_SCHED_INFO_COUNT
) {
251 sched_info
= (host_sched_info_t
)info
;
253 quantum_time
= SCHED(initial_quantum_size
)(THREAD_NULL
);
254 absolutetime_to_nanoseconds(quantum_time
, &quantum_ns
);
256 sched_info
->min_timeout
= sched_info
->min_quantum
= (uint32_t)(quantum_ns
/ 1000 / 1000);
258 *count
= HOST_SCHED_INFO_COUNT
;
263 case HOST_RESOURCE_SIZES
: {
265 * Return sizes of kernel data structures
267 if (*count
< HOST_RESOURCE_SIZES_COUNT
) {
271 /* XXX Fail until ledgers are implemented */
272 return KERN_INVALID_ARGUMENT
;
275 case HOST_PRIORITY_INFO
: {
276 host_priority_info_t priority_info
;
278 if (*count
< HOST_PRIORITY_INFO_COUNT
) {
282 priority_info
= (host_priority_info_t
)info
;
284 priority_info
->kernel_priority
= MINPRI_KERNEL
;
285 priority_info
->system_priority
= MINPRI_KERNEL
;
286 priority_info
->server_priority
= MINPRI_RESERVED
;
287 priority_info
->user_priority
= BASEPRI_DEFAULT
;
288 priority_info
->depress_priority
= DEPRESSPRI
;
289 priority_info
->idle_priority
= IDLEPRI
;
290 priority_info
->minimum_priority
= MINPRI_USER
;
291 priority_info
->maximum_priority
= MAXPRI_RESERVED
;
293 *count
= HOST_PRIORITY_INFO_COUNT
;
299 * Gestalt for various trap facilities.
301 case HOST_MACH_MSG_TRAP
:
302 case HOST_SEMAPHORE_TRAPS
: {
307 case HOST_CAN_HAS_DEBUGGER
: {
308 host_can_has_debugger_info_t can_has_debugger_info
;
310 if (*count
< HOST_CAN_HAS_DEBUGGER_COUNT
) {
314 can_has_debugger_info
= (host_can_has_debugger_info_t
)info
;
315 can_has_debugger_info
->can_has_debugger
= PE_i_can_has_debugger(NULL
);
316 *count
= HOST_CAN_HAS_DEBUGGER_COUNT
;
321 case HOST_VM_PURGABLE
: {
322 if (*count
< HOST_VM_PURGABLE_COUNT
) {
326 vm_purgeable_stats((vm_purgeable_info_t
)info
, NULL
);
328 *count
= HOST_VM_PURGABLE_COUNT
;
332 case HOST_DEBUG_INFO_INTERNAL
: {
333 #if DEVELOPMENT || DEBUG
334 if (*count
< HOST_DEBUG_INFO_INTERNAL_COUNT
) {
338 host_debug_info_internal_t debug_info
= (host_debug_info_internal_t
)info
;
339 bzero(debug_info
, sizeof(host_debug_info_internal_data_t
));
340 *count
= HOST_DEBUG_INFO_INTERNAL_COUNT
;
342 #if CONFIG_COALITIONS
343 debug_info
->config_coalitions
= 1;
345 debug_info
->config_bank
= 1;
347 debug_info
->config_atm
= 1;
350 debug_info
->config_csr
= 1;
353 #else /* DEVELOPMENT || DEBUG */
354 return KERN_NOT_SUPPORTED
;
358 case HOST_PREFERRED_USER_ARCH
: {
359 host_preferred_user_arch_t user_arch_info
;
362 * Basic information about this host.
364 if (*count
< HOST_PREFERRED_USER_ARCH_COUNT
) {
368 user_arch_info
= (host_preferred_user_arch_t
)info
;
370 #if defined(PREFERRED_USER_CPU_TYPE) && defined(PREFERRED_USER_CPU_SUBTYPE)
371 cpu_type_t preferred_cpu_type
;
372 cpu_subtype_t preferred_cpu_subtype
;
373 if (!PE_get_default("kern.preferred_cpu_type", &preferred_cpu_type
, sizeof(cpu_type_t
))) {
374 preferred_cpu_type
= PREFERRED_USER_CPU_TYPE
;
376 if (!PE_get_default("kern.preferred_cpu_subtype", &preferred_cpu_subtype
, sizeof(cpu_subtype_t
))) {
377 preferred_cpu_subtype
= PREFERRED_USER_CPU_SUBTYPE
;
379 user_arch_info
->cpu_type
= preferred_cpu_type
;
380 user_arch_info
->cpu_subtype
= preferred_cpu_subtype
;
382 int master_id
= master_processor
->cpu_id
;
383 user_arch_info
->cpu_type
= slot_type(master_id
);
384 user_arch_info
->cpu_subtype
= slot_subtype(master_id
);
388 *count
= HOST_PREFERRED_USER_ARCH_COUNT
;
393 default: return KERN_INVALID_ARGUMENT
;
397 kern_return_t
host_statistics(host_t host
, host_flavor_t flavor
, host_info_t info
, mach_msg_type_number_t
* count
);
400 host_statistics(host_t host
, host_flavor_t flavor
, host_info_t info
, mach_msg_type_number_t
* count
)
402 if (host
== HOST_NULL
) {
403 return KERN_INVALID_HOST
;
407 case HOST_LOAD_INFO
: {
408 host_load_info_t load_info
;
410 if (*count
< HOST_LOAD_INFO_COUNT
) {
414 load_info
= (host_load_info_t
)info
;
416 bcopy((char *)avenrun
, (char *)load_info
->avenrun
, sizeof avenrun
);
417 bcopy((char *)mach_factor
, (char *)load_info
->mach_factor
, sizeof mach_factor
);
419 *count
= HOST_LOAD_INFO_COUNT
;
424 vm_statistics64_data_t host_vm_stat
;
425 vm_statistics_t stat32
;
426 mach_msg_type_number_t original_count
;
428 if (*count
< HOST_VM_INFO_REV0_COUNT
) {
432 get_host_vm_stats(&host_vm_stat
);
434 stat32
= (vm_statistics_t
)info
;
436 stat32
->free_count
= VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_free_count
+ vm_page_speculative_count
);
437 stat32
->active_count
= VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_active_count
);
439 if (vm_page_local_q
) {
440 zpercpu_foreach(lq
, vm_page_local_q
) {
441 stat32
->active_count
+= VM_STATISTICS_TRUNCATE_TO_32_BIT(lq
->vpl_count
);
444 stat32
->inactive_count
= VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_inactive_count
);
445 #if !XNU_TARGET_OS_OSX
446 stat32
->wire_count
= VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count
);
447 #else /* !XNU_TARGET_OS_OSX */
448 stat32
->wire_count
= VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count
+ vm_page_throttled_count
+ vm_lopage_free_count
);
449 #endif /* !XNU_TARGET_OS_OSX */
450 stat32
->zero_fill_count
= VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat
.zero_fill_count
);
451 stat32
->reactivations
= VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat
.reactivations
);
452 stat32
->pageins
= VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat
.pageins
);
453 stat32
->pageouts
= VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat
.pageouts
);
454 stat32
->faults
= VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat
.faults
);
455 stat32
->cow_faults
= VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat
.cow_faults
);
456 stat32
->lookups
= VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat
.lookups
);
457 stat32
->hits
= VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat
.hits
);
460 * Fill in extra info added in later revisions of the
461 * vm_statistics data structure. Fill in only what can fit
462 * in the data structure the caller gave us !
464 original_count
= *count
;
465 *count
= HOST_VM_INFO_REV0_COUNT
; /* rev0 already filled in */
466 if (original_count
>= HOST_VM_INFO_REV1_COUNT
) {
467 /* rev1 added "purgeable" info */
468 stat32
->purgeable_count
= VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purgeable_count
);
469 stat32
->purges
= VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purged_count
);
470 *count
= HOST_VM_INFO_REV1_COUNT
;
473 if (original_count
>= HOST_VM_INFO_REV2_COUNT
) {
474 /* rev2 added "speculative" info */
475 stat32
->speculative_count
= VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_speculative_count
);
476 *count
= HOST_VM_INFO_REV2_COUNT
;
479 /* rev3 changed some of the fields to be 64-bit*/
484 case HOST_CPU_LOAD_INFO
: {
485 host_cpu_load_info_t cpu_load_info
;
487 if (*count
< HOST_CPU_LOAD_INFO_COUNT
) {
491 #define GET_TICKS_VALUE(state, ticks) \
492 MACRO_BEGIN cpu_load_info->cpu_ticks[(state)] += (uint32_t)(ticks / hz_tick_interval); \
494 #define GET_TICKS_VALUE_FROM_TIMER(processor, state, timer) \
495 MACRO_BEGIN GET_TICKS_VALUE(state, timer_grab(&(processor)->timer)); \
498 cpu_load_info
= (host_cpu_load_info_t
)info
;
499 cpu_load_info
->cpu_ticks
[CPU_STATE_USER
] = 0;
500 cpu_load_info
->cpu_ticks
[CPU_STATE_SYSTEM
] = 0;
501 cpu_load_info
->cpu_ticks
[CPU_STATE_IDLE
] = 0;
502 cpu_load_info
->cpu_ticks
[CPU_STATE_NICE
] = 0;
504 simple_lock(&processor_list_lock
, LCK_GRP_NULL
);
506 unsigned int pcount
= processor_count
;
508 for (unsigned int i
= 0; i
< pcount
; i
++) {
509 processor_t processor
= processor_array
[i
];
510 assert(processor
!= PROCESSOR_NULL
);
513 uint64_t idle_time_snapshot1
, idle_time_snapshot2
;
514 uint64_t idle_time_tstamp1
, idle_time_tstamp2
;
516 /* See discussion in processor_info(PROCESSOR_CPU_LOAD_INFO) */
518 GET_TICKS_VALUE_FROM_TIMER(processor
, CPU_STATE_USER
, user_state
);
519 if (precise_user_kernel_time
) {
520 GET_TICKS_VALUE_FROM_TIMER(processor
, CPU_STATE_SYSTEM
, system_state
);
522 /* system_state may represent either sys or user */
523 GET_TICKS_VALUE_FROM_TIMER(processor
, CPU_STATE_USER
, system_state
);
526 idle_state
= &processor
->idle_state
;
527 idle_time_snapshot1
= timer_grab(idle_state
);
528 idle_time_tstamp1
= idle_state
->tstamp
;
530 if (processor
->current_state
!= idle_state
) {
531 /* Processor is non-idle, so idle timer should be accurate */
532 GET_TICKS_VALUE_FROM_TIMER(processor
, CPU_STATE_IDLE
, idle_state
);
533 } else if ((idle_time_snapshot1
!= (idle_time_snapshot2
= timer_grab(idle_state
))) ||
534 (idle_time_tstamp1
!= (idle_time_tstamp2
= idle_state
->tstamp
))) {
535 /* Idle timer is being updated concurrently, second stamp is good enough */
536 GET_TICKS_VALUE(CPU_STATE_IDLE
, idle_time_snapshot2
);
539 * Idle timer may be very stale. Fortunately we have established
540 * that idle_time_snapshot1 and idle_time_tstamp1 are unchanging
542 idle_time_snapshot1
+= mach_absolute_time() - idle_time_tstamp1
;
544 GET_TICKS_VALUE(CPU_STATE_IDLE
, idle_time_snapshot1
);
547 simple_unlock(&processor_list_lock
);
549 *count
= HOST_CPU_LOAD_INFO_COUNT
;
554 case HOST_EXPIRED_TASK_INFO
: {
555 if (*count
< TASK_POWER_INFO_COUNT
) {
559 task_power_info_t tinfo1
= (task_power_info_t
)info
;
560 task_power_info_v2_t tinfo2
= (task_power_info_v2_t
)info
;
562 tinfo1
->task_interrupt_wakeups
= dead_task_statistics
.task_interrupt_wakeups
;
563 tinfo1
->task_platform_idle_wakeups
= dead_task_statistics
.task_platform_idle_wakeups
;
565 tinfo1
->task_timer_wakeups_bin_1
= dead_task_statistics
.task_timer_wakeups_bin_1
;
567 tinfo1
->task_timer_wakeups_bin_2
= dead_task_statistics
.task_timer_wakeups_bin_2
;
569 tinfo1
->total_user
= dead_task_statistics
.total_user_time
;
570 tinfo1
->total_system
= dead_task_statistics
.total_system_time
;
571 if (*count
< TASK_POWER_INFO_V2_COUNT
) {
572 *count
= TASK_POWER_INFO_COUNT
;
573 } else if (*count
>= TASK_POWER_INFO_V2_COUNT
) {
574 tinfo2
->gpu_energy
.task_gpu_utilisation
= dead_task_statistics
.task_gpu_ns
;
575 #if defined(__arm__) || defined(__arm64__)
576 tinfo2
->task_energy
= dead_task_statistics
.task_energy
;
577 tinfo2
->task_ptime
= dead_task_statistics
.total_ptime
;
578 tinfo2
->task_pset_switches
= dead_task_statistics
.total_pset_switches
;
580 *count
= TASK_POWER_INFO_V2_COUNT
;
585 default: return KERN_INVALID_ARGUMENT
;
589 extern uint32_t c_segment_pages_compressed
;
591 #define HOST_STATISTICS_TIME_WINDOW 1 /* seconds */
592 #define HOST_STATISTICS_MAX_REQUESTS 10 /* maximum number of requests per window */
593 #define HOST_STATISTICS_MIN_REQUESTS 2 /* minimum number of requests per window */
595 uint64_t host_statistics_time_window
;
597 static LCK_GRP_DECLARE(host_statistics_lck_grp
, "host_statistics");
598 static LCK_MTX_DECLARE(host_statistics_lck
, &host_statistics_lck_grp
);
600 #define HOST_VM_INFO64_REV0 0
601 #define HOST_VM_INFO64_REV1 1
602 #define HOST_EXTMOD_INFO64_REV0 2
603 #define HOST_LOAD_INFO_REV0 3
604 #define HOST_VM_INFO_REV0 4
605 #define HOST_VM_INFO_REV1 5
606 #define HOST_VM_INFO_REV2 6
607 #define HOST_CPU_LOAD_INFO_REV0 7
608 #define HOST_EXPIRED_TASK_INFO_REV0 8
609 #define HOST_EXPIRED_TASK_INFO_REV1 9
610 #define NUM_HOST_INFO_DATA_TYPES 10
612 static vm_statistics64_data_t host_vm_info64_rev0
= {};
613 static vm_statistics64_data_t host_vm_info64_rev1
= {};
614 static vm_extmod_statistics_data_t host_extmod_info64
= {};
615 static host_load_info_data_t host_load_info
= {};
616 static vm_statistics_data_t host_vm_info_rev0
= {};
617 static vm_statistics_data_t host_vm_info_rev1
= {};
618 static vm_statistics_data_t host_vm_info_rev2
= {};
619 static host_cpu_load_info_data_t host_cpu_load_info
= {};
620 static task_power_info_data_t host_expired_task_info
= {};
621 static task_power_info_v2_data_t host_expired_task_info2
= {};
623 struct host_stats_cache
{
624 uint64_t last_access
;
625 uint64_t current_requests
;
626 uint64_t max_requests
;
628 mach_msg_type_number_t count
; //NOTE count is in sizeof(integer_t)
631 static struct host_stats_cache g_host_stats_cache
[NUM_HOST_INFO_DATA_TYPES
] = {
632 [HOST_VM_INFO64_REV0
] = { .last_access
= 0, .current_requests
= 0, .max_requests
= 0, .data
= (uintptr_t)&host_vm_info64_rev0
, .count
= HOST_VM_INFO64_REV0_COUNT
},
633 [HOST_VM_INFO64_REV1
] = { .last_access
= 0, .current_requests
= 0, .max_requests
= 0, .data
= (uintptr_t)&host_vm_info64_rev1
, .count
= HOST_VM_INFO64_REV1_COUNT
},
634 [HOST_EXTMOD_INFO64_REV0
] = { .last_access
= 0, .current_requests
= 0, .max_requests
= 0, .data
= (uintptr_t)&host_extmod_info64
, .count
= HOST_EXTMOD_INFO64_COUNT
},
635 [HOST_LOAD_INFO_REV0
] = { .last_access
= 0, .current_requests
= 0, .max_requests
= 0, .data
= (uintptr_t)&host_load_info
, .count
= HOST_LOAD_INFO_COUNT
},
636 [HOST_VM_INFO_REV0
] = { .last_access
= 0, .current_requests
= 0, .max_requests
= 0, .data
= (uintptr_t)&host_vm_info_rev0
, .count
= HOST_VM_INFO_REV0_COUNT
},
637 [HOST_VM_INFO_REV1
] = { .last_access
= 0, .current_requests
= 0, .max_requests
= 0, .data
= (uintptr_t)&host_vm_info_rev1
, .count
= HOST_VM_INFO_REV1_COUNT
},
638 [HOST_VM_INFO_REV2
] = { .last_access
= 0, .current_requests
= 0, .max_requests
= 0, .data
= (uintptr_t)&host_vm_info_rev2
, .count
= HOST_VM_INFO_REV2_COUNT
},
639 [HOST_CPU_LOAD_INFO_REV0
] = { .last_access
= 0, .current_requests
= 0, .max_requests
= 0, .data
= (uintptr_t)&host_cpu_load_info
, .count
= HOST_CPU_LOAD_INFO_COUNT
},
640 [HOST_EXPIRED_TASK_INFO_REV0
] = { .last_access
= 0, .current_requests
= 0, .max_requests
= 0, .data
= (uintptr_t)&host_expired_task_info
, .count
= TASK_POWER_INFO_COUNT
},
641 [HOST_EXPIRED_TASK_INFO_REV1
] = { .last_access
= 0, .current_requests
= 0, .max_requests
= 0, .data
= (uintptr_t)&host_expired_task_info2
, .count
= TASK_POWER_INFO_V2_COUNT
},
646 host_statistics_init(void)
648 nanoseconds_to_absolutetime((HOST_STATISTICS_TIME_WINDOW
* NSEC_PER_SEC
), &host_statistics_time_window
);
652 cache_host_statistics(int index
, host_info64_t info
)
654 if (index
< 0 || index
>= NUM_HOST_INFO_DATA_TYPES
) {
658 task_t task
= current_task();
659 if (task
->t_flags
& TF_PLATFORM
) {
663 memcpy((void *)g_host_stats_cache
[index
].data
, info
, g_host_stats_cache
[index
].count
* sizeof(integer_t
));
668 get_cached_info(int index
, host_info64_t info
, mach_msg_type_number_t
* count
)
670 if (index
< 0 || index
>= NUM_HOST_INFO_DATA_TYPES
) {
675 *count
= g_host_stats_cache
[index
].count
;
676 memcpy(info
, (void *)g_host_stats_cache
[index
].data
, g_host_stats_cache
[index
].count
* sizeof(integer_t
));
680 get_host_info_data_index(bool is_stat64
, host_flavor_t flavor
, mach_msg_type_number_t
* count
, kern_return_t
* ret
)
685 *ret
= KERN_INVALID_ARGUMENT
;
688 if (*count
< HOST_VM_INFO64_REV0_COUNT
) {
692 if (*count
>= HOST_VM_INFO64_REV1_COUNT
) {
693 return HOST_VM_INFO64_REV1
;
695 return HOST_VM_INFO64_REV0
;
697 case HOST_EXTMOD_INFO64
:
699 *ret
= KERN_INVALID_ARGUMENT
;
702 if (*count
< HOST_EXTMOD_INFO64_COUNT
) {
706 return HOST_EXTMOD_INFO64_REV0
;
709 if (*count
< HOST_LOAD_INFO_COUNT
) {
713 return HOST_LOAD_INFO_REV0
;
716 if (*count
< HOST_VM_INFO_REV0_COUNT
) {
720 if (*count
>= HOST_VM_INFO_REV2_COUNT
) {
721 return HOST_VM_INFO_REV2
;
723 if (*count
>= HOST_VM_INFO_REV1_COUNT
) {
724 return HOST_VM_INFO_REV1
;
726 return HOST_VM_INFO_REV0
;
728 case HOST_CPU_LOAD_INFO
:
729 if (*count
< HOST_CPU_LOAD_INFO_COUNT
) {
733 return HOST_CPU_LOAD_INFO_REV0
;
735 case HOST_EXPIRED_TASK_INFO
:
736 if (*count
< TASK_POWER_INFO_COUNT
) {
740 if (*count
>= TASK_POWER_INFO_V2_COUNT
) {
741 return HOST_EXPIRED_TASK_INFO_REV1
;
743 return HOST_EXPIRED_TASK_INFO_REV0
;
746 *ret
= KERN_INVALID_ARGUMENT
;
752 rate_limit_host_statistics(bool is_stat64
, host_flavor_t flavor
, host_info64_t info
, mach_msg_type_number_t
* count
, kern_return_t
* ret
, int *pindex
)
754 task_t task
= current_task();
756 assert(task
!= kernel_task
);
760 /* Access control only for third party applications */
761 if (task
->t_flags
& TF_PLATFORM
) {
765 /* Rate limit to HOST_STATISTICS_MAX_REQUESTS queries for each HOST_STATISTICS_TIME_WINDOW window of time */
766 bool rate_limited
= FALSE
;
767 bool set_last_access
= TRUE
;
769 /* there is a cache for every flavor */
770 int index
= get_host_info_data_index(is_stat64
, flavor
, count
, ret
);
776 lck_mtx_lock(&host_statistics_lck
);
777 if (g_host_stats_cache
[index
].last_access
> mach_continuous_time() - host_statistics_time_window
) {
778 set_last_access
= FALSE
;
779 if (g_host_stats_cache
[index
].current_requests
++ >= g_host_stats_cache
[index
].max_requests
) {
781 get_cached_info(index
, info
, count
);
784 if (set_last_access
) {
785 g_host_stats_cache
[index
].current_requests
= 1;
787 * select a random number of requests (included between HOST_STATISTICS_MIN_REQUESTS and HOST_STATISTICS_MAX_REQUESTS)
788 * to let query host_statistics.
789 * In this way it is not possible to infer looking at when the a cached copy changes if host_statistics was called on
790 * the provious window.
792 g_host_stats_cache
[index
].max_requests
= (mach_absolute_time() % (HOST_STATISTICS_MAX_REQUESTS
- HOST_STATISTICS_MIN_REQUESTS
+ 1)) + HOST_STATISTICS_MIN_REQUESTS
;
793 g_host_stats_cache
[index
].last_access
= mach_continuous_time();
795 lck_mtx_unlock(&host_statistics_lck
);
801 vm_stats(void *info
, unsigned int *count
)
803 vm_statistics64_data_t host_vm_stat
;
804 mach_msg_type_number_t original_count
;
805 unsigned int local_q_internal_count
;
806 unsigned int local_q_external_count
;
808 if (*count
< HOST_VM_INFO64_REV0_COUNT
) {
811 get_host_vm_stats(&host_vm_stat
);
813 vm_statistics64_t stat
= (vm_statistics64_t
)info
;
815 stat
->free_count
= vm_page_free_count
+ vm_page_speculative_count
;
816 stat
->active_count
= vm_page_active_count
;
818 local_q_internal_count
= 0;
819 local_q_external_count
= 0;
820 if (vm_page_local_q
) {
821 zpercpu_foreach(lq
, vm_page_local_q
) {
822 stat
->active_count
+= lq
->vpl_count
;
823 local_q_internal_count
+= lq
->vpl_internal_count
;
824 local_q_external_count
+= lq
->vpl_external_count
;
827 stat
->inactive_count
= vm_page_inactive_count
;
828 #if !XNU_TARGET_OS_OSX
829 stat
->wire_count
= vm_page_wire_count
;
830 #else /* !XNU_TARGET_OS_OSX */
831 stat
->wire_count
= vm_page_wire_count
+ vm_page_throttled_count
+ vm_lopage_free_count
;
832 #endif /* !XNU_TARGET_OS_OSX */
833 stat
->zero_fill_count
= host_vm_stat
.zero_fill_count
;
834 stat
->reactivations
= host_vm_stat
.reactivations
;
835 stat
->pageins
= host_vm_stat
.pageins
;
836 stat
->pageouts
= host_vm_stat
.pageouts
;
837 stat
->faults
= host_vm_stat
.faults
;
838 stat
->cow_faults
= host_vm_stat
.cow_faults
;
839 stat
->lookups
= host_vm_stat
.lookups
;
840 stat
->hits
= host_vm_stat
.hits
;
842 stat
->purgeable_count
= vm_page_purgeable_count
;
843 stat
->purges
= vm_page_purged_count
;
845 stat
->speculative_count
= vm_page_speculative_count
;
848 * Fill in extra info added in later revisions of the
849 * vm_statistics data structure. Fill in only what can fit
850 * in the data structure the caller gave us !
852 original_count
= *count
;
853 *count
= HOST_VM_INFO64_REV0_COUNT
; /* rev0 already filled in */
854 if (original_count
>= HOST_VM_INFO64_REV1_COUNT
) {
855 /* rev1 added "throttled count" */
856 stat
->throttled_count
= vm_page_throttled_count
;
857 /* rev1 added "compression" info */
858 stat
->compressor_page_count
= VM_PAGE_COMPRESSOR_COUNT
;
859 stat
->compressions
= host_vm_stat
.compressions
;
860 stat
->decompressions
= host_vm_stat
.decompressions
;
861 stat
->swapins
= host_vm_stat
.swapins
;
862 stat
->swapouts
= host_vm_stat
.swapouts
;
864 * "external page count"
865 * "anonymous page count"
866 * "total # of pages (uncompressed) held in the compressor"
868 stat
->external_page_count
= (vm_page_pageable_external_count
+ local_q_external_count
);
869 stat
->internal_page_count
= (vm_page_pageable_internal_count
+ local_q_internal_count
);
870 stat
->total_uncompressed_pages_in_compressor
= c_segment_pages_compressed
;
871 *count
= HOST_VM_INFO64_REV1_COUNT
;
877 kern_return_t
host_statistics64(host_t host
, host_flavor_t flavor
, host_info_t info
, mach_msg_type_number_t
* count
);
880 host_statistics64(host_t host
, host_flavor_t flavor
, host_info64_t info
, mach_msg_type_number_t
* count
)
882 if (host
== HOST_NULL
) {
883 return KERN_INVALID_HOST
;
887 case HOST_VM_INFO64
: /* We were asked to get vm_statistics64 */
888 return vm_stats(info
, count
);
890 case HOST_EXTMOD_INFO64
: /* We were asked to get vm_statistics64 */
892 vm_extmod_statistics_t out_extmod_statistics
;
894 if (*count
< HOST_EXTMOD_INFO64_COUNT
) {
898 out_extmod_statistics
= (vm_extmod_statistics_t
)info
;
899 *out_extmod_statistics
= host_extmod_statistics
;
901 *count
= HOST_EXTMOD_INFO64_COUNT
;
906 default: /* If we didn't recognize the flavor, send to host_statistics */
907 return host_statistics(host
, flavor
, (host_info_t
)info
, count
);
912 host_statistics64_from_user(host_t host
, host_flavor_t flavor
, host_info64_t info
, mach_msg_type_number_t
* count
)
914 kern_return_t ret
= KERN_SUCCESS
;
917 if (host
== HOST_NULL
) {
918 return KERN_INVALID_HOST
;
921 if (rate_limit_host_statistics(TRUE
, flavor
, info
, count
, &ret
, &index
)) {
925 if (ret
!= KERN_SUCCESS
) {
929 ret
= host_statistics64(host
, flavor
, info
, count
);
931 if (ret
== KERN_SUCCESS
) {
932 cache_host_statistics(index
, info
);
939 host_statistics_from_user(host_t host
, host_flavor_t flavor
, host_info64_t info
, mach_msg_type_number_t
* count
)
941 kern_return_t ret
= KERN_SUCCESS
;
944 if (host
== HOST_NULL
) {
945 return KERN_INVALID_HOST
;
948 if (rate_limit_host_statistics(FALSE
, flavor
, info
, count
, &ret
, &index
)) {
952 if (ret
!= KERN_SUCCESS
) {
956 ret
= host_statistics(host
, flavor
, info
, count
);
958 if (ret
== KERN_SUCCESS
) {
959 cache_host_statistics(index
, info
);
966 * Get host statistics that require privilege.
967 * None for now, just call the un-privileged version.
970 host_priv_statistics(host_priv_t host_priv
, host_flavor_t flavor
, host_info_t info
, mach_msg_type_number_t
* count
)
972 return host_statistics((host_t
)host_priv
, flavor
, info
, count
);
976 set_sched_stats_active(boolean_t active
)
978 sched_stats_active
= active
;
983 get_sched_statistics(struct _processor_statistics_np
* out
, uint32_t * count
)
987 if (!sched_stats_active
) {
991 percpu_foreach_base(pcpu_base
) {
992 struct sched_statistics stats
;
993 processor_t processor
;
995 pos
+= sizeof(struct _processor_statistics_np
);
1000 stats
= *PERCPU_GET_WITH_BASE(pcpu_base
, sched_stats
);
1001 processor
= PERCPU_GET_WITH_BASE(pcpu_base
, processor
);
1003 out
->ps_cpuid
= processor
->cpu_id
;
1004 out
->ps_csw_count
= stats
.csw_count
;
1005 out
->ps_preempt_count
= stats
.preempt_count
;
1006 out
->ps_preempted_rt_count
= stats
.preempted_rt_count
;
1007 out
->ps_preempted_by_rt_count
= stats
.preempted_by_rt_count
;
1008 out
->ps_rt_sched_count
= stats
.rt_sched_count
;
1009 out
->ps_interrupt_count
= stats
.interrupt_count
;
1010 out
->ps_ipi_count
= stats
.ipi_count
;
1011 out
->ps_timer_pop_count
= stats
.timer_pop_count
;
1012 out
->ps_runq_count_sum
= SCHED(processor_runq_stats_count_sum
)(processor
);
1013 out
->ps_idle_transitions
= stats
.idle_transitions
;
1014 out
->ps_quantum_timer_expirations
= stats
.quantum_timer_expirations
;
1019 /* And include RT Queue information */
1020 pos
+= sizeof(struct _processor_statistics_np
);
1022 return KERN_FAILURE
;
1025 bzero(out
, sizeof(*out
));
1026 out
->ps_cpuid
= (-1);
1027 out
->ps_runq_count_sum
= SCHED(rt_runq_count_sum
)();
1032 return KERN_SUCCESS
;
1036 host_page_size(host_t host
, vm_size_t
* out_page_size
)
1038 if (host
== HOST_NULL
) {
1039 return KERN_INVALID_ARGUMENT
;
1042 *out_page_size
= PAGE_SIZE
;
1044 return KERN_SUCCESS
;
1048 * Return kernel version string (more than you ever
1049 * wanted to know about what version of the kernel this is).
1051 extern char version
[];
1054 host_kernel_version(host_t host
, kernel_version_t out_version
)
1056 if (host
== HOST_NULL
) {
1057 return KERN_INVALID_ARGUMENT
;
1060 (void)strncpy(out_version
, version
, sizeof(kernel_version_t
));
1062 return KERN_SUCCESS
;
1066 * host_processor_sets:
1068 * List all processor sets on the host.
1071 host_processor_sets(host_priv_t host_priv
, processor_set_name_array_t
* pset_list
, mach_msg_type_number_t
* count
)
1075 if (host_priv
== HOST_PRIV_NULL
) {
1076 return KERN_INVALID_ARGUMENT
;
1080 * Allocate memory. Can be pageable because it won't be
1081 * touched while holding a lock.
1084 addr
= kalloc((vm_size_t
)sizeof(mach_port_t
));
1086 return KERN_RESOURCE_SHORTAGE
;
1089 /* do the conversion that Mig should handle */
1090 *((ipc_port_t
*)addr
) = convert_pset_name_to_port(&pset0
);
1092 *pset_list
= (processor_set_array_t
)addr
;
1095 return KERN_SUCCESS
;
1099 * host_processor_set_priv:
1101 * Return control port for given processor set.
1104 host_processor_set_priv(host_priv_t host_priv
, processor_set_t pset_name
, processor_set_t
* pset
)
1106 if (host_priv
== HOST_PRIV_NULL
|| pset_name
== PROCESSOR_SET_NULL
) {
1107 *pset
= PROCESSOR_SET_NULL
;
1109 return KERN_INVALID_ARGUMENT
;
1114 return KERN_SUCCESS
;
1118 * host_processor_info
1120 * Return info about the processors on this host. It will return
1121 * the number of processors, and the specific type of info requested
1125 host_processor_info(host_t host
,
1126 processor_flavor_t flavor
,
1127 natural_t
* out_pcount
,
1128 processor_info_array_t
* out_array
,
1129 mach_msg_type_number_t
* out_array_count
)
1131 kern_return_t result
;
1133 processor_info_t info
;
1134 unsigned int icount
;
1135 unsigned int pcount
;
1137 vm_size_t size
, needed
;
1140 if (host
== HOST_NULL
) {
1141 return KERN_INVALID_ARGUMENT
;
1144 result
= processor_info_count(flavor
, &icount
);
1145 if (result
!= KERN_SUCCESS
) {
1149 pcount
= processor_count
;
1150 assert(pcount
!= 0);
1152 needed
= pcount
* icount
* sizeof(natural_t
);
1153 size
= vm_map_round_page(needed
, VM_MAP_PAGE_MASK(ipc_kernel_map
));
1154 result
= kmem_alloc(ipc_kernel_map
, &addr
, size
, VM_KERN_MEMORY_IPC
);
1155 if (result
!= KERN_SUCCESS
) {
1156 return KERN_RESOURCE_SHORTAGE
;
1159 info
= (processor_info_t
)addr
;
1161 for (unsigned int i
= 0; i
< pcount
; i
++) {
1162 processor_t processor
= processor_array
[i
];
1163 assert(processor
!= PROCESSOR_NULL
);
1165 unsigned int tcount
= icount
;
1167 result
= processor_info(processor
, flavor
, &thost
, info
, &tcount
);
1168 if (result
!= KERN_SUCCESS
) {
1169 kmem_free(ipc_kernel_map
, addr
, size
);
1175 if (size
!= needed
) {
1176 bzero((char *)addr
+ needed
, size
- needed
);
1179 result
= vm_map_unwire(ipc_kernel_map
, vm_map_trunc_page(addr
, VM_MAP_PAGE_MASK(ipc_kernel_map
)),
1180 vm_map_round_page(addr
+ size
, VM_MAP_PAGE_MASK(ipc_kernel_map
)), FALSE
);
1181 assert(result
== KERN_SUCCESS
);
1182 result
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)addr
, (vm_map_size_t
)needed
, TRUE
, ©
);
1183 assert(result
== KERN_SUCCESS
);
1185 *out_pcount
= pcount
;
1186 *out_array
= (processor_info_array_t
)copy
;
1187 *out_array_count
= pcount
* icount
;
1189 return KERN_SUCCESS
;
1193 is_valid_host_special_port(int id
)
1195 return (id
<= HOST_MAX_SPECIAL_PORT
) &&
1196 (id
>= HOST_MIN_SPECIAL_PORT
) &&
1197 ((id
<= HOST_LAST_SPECIAL_KERNEL_PORT
) || (id
> HOST_MAX_SPECIAL_KERNEL_PORT
));
1200 extern void * XNU_PTRAUTH_SIGNED_PTR("initproc") initproc
;
1203 * Kernel interface for setting a special port.
1206 kernel_set_special_port(host_priv_t host_priv
, int id
, ipc_port_t port
)
1208 ipc_port_t old_port
;
1210 if (!is_valid_host_special_port(id
)) {
1211 panic("attempted to set invalid special port %d", id
);
1215 if (id
== HOST_NODE_PORT
) {
1216 return KERN_NOT_SUPPORTED
;
1220 host_lock(host_priv
);
1221 old_port
= host_priv
->special
[id
];
1222 if ((id
== HOST_AMFID_PORT
) && (current_task()->bsd_info
!= initproc
)) {
1223 host_unlock(host_priv
);
1224 return KERN_NO_ACCESS
;
1226 host_priv
->special
[id
] = port
;
1227 host_unlock(host_priv
);
1230 if (id
== HOST_NODE_PORT
) {
1231 mach_node_port_changed();
1235 if (IP_VALID(old_port
)) {
1236 ipc_port_release_send(old_port
);
1238 return KERN_SUCCESS
;
1242 * Kernel interface for retrieving a special port.
1245 kernel_get_special_port(host_priv_t host_priv
, int id
, ipc_port_t
* portp
)
1247 if (!is_valid_host_special_port(id
)) {
1248 panic("attempted to get invalid special port %d", id
);
1251 host_lock(host_priv
);
1252 *portp
= host_priv
->special
[id
];
1253 host_unlock(host_priv
);
1254 return KERN_SUCCESS
;
1258 * User interface for setting a special port.
1260 * Only permits the user to set a user-owned special port
1261 * ID, rejecting a kernel-owned special port ID.
1263 * A special kernel port cannot be set up using this
1264 * routine; use kernel_set_special_port() instead.
1267 host_set_special_port_from_user(host_priv_t host_priv
, int id
, ipc_port_t port
)
1269 if (host_priv
== HOST_PRIV_NULL
|| id
<= HOST_MAX_SPECIAL_KERNEL_PORT
|| id
> HOST_MAX_SPECIAL_PORT
) {
1270 return KERN_INVALID_ARGUMENT
;
1273 if (task_is_driver(current_task())) {
1274 return KERN_NO_ACCESS
;
1277 if (IP_VALID(port
) && (port
->ip_immovable_receive
|| port
->ip_immovable_send
)) {
1278 return KERN_INVALID_RIGHT
;
1281 return host_set_special_port(host_priv
, id
, port
);
1285 host_set_special_port(host_priv_t host_priv
, int id
, ipc_port_t port
)
1287 if (host_priv
== HOST_PRIV_NULL
|| id
<= HOST_MAX_SPECIAL_KERNEL_PORT
|| id
> HOST_MAX_SPECIAL_PORT
) {
1288 return KERN_INVALID_ARGUMENT
;
1292 if (mac_task_check_set_host_special_port(current_task(), id
, port
) != 0) {
1293 return KERN_NO_ACCESS
;
1297 return kernel_set_special_port(host_priv
, id
, port
);
1301 * User interface for retrieving a special port.
1303 * Note that there is nothing to prevent a user special
1304 * port from disappearing after it has been discovered by
1305 * the caller; thus, using a special port can always result
1306 * in a "port not valid" error.
1310 host_get_special_port_from_user(host_priv_t host_priv
, __unused
int node
, int id
, ipc_port_t
* portp
)
1312 if (host_priv
== HOST_PRIV_NULL
|| id
== HOST_SECURITY_PORT
|| id
> HOST_MAX_SPECIAL_PORT
|| id
< HOST_MIN_SPECIAL_PORT
) {
1313 return KERN_INVALID_ARGUMENT
;
1316 task_t task
= current_task();
1317 if (task
&& task_is_driver(task
) && id
> HOST_MAX_SPECIAL_KERNEL_PORT
) {
1318 /* allow HID drivers to get the sysdiagnose port for keychord handling */
1319 if (id
== HOST_SYSDIAGNOSE_PORT
&&
1320 IOTaskHasEntitlement(task
, kIODriverKitHIDFamilyEventServiceEntitlementKey
)) {
1321 goto get_special_port
;
1323 return KERN_NO_ACCESS
;
1326 return host_get_special_port(host_priv
, node
, id
, portp
);
1330 host_get_special_port(host_priv_t host_priv
, __unused
int node
, int id
, ipc_port_t
* portp
)
1334 if (host_priv
== HOST_PRIV_NULL
|| id
== HOST_SECURITY_PORT
|| id
> HOST_MAX_SPECIAL_PORT
|| id
< HOST_MIN_SPECIAL_PORT
) {
1335 return KERN_INVALID_ARGUMENT
;
1338 host_lock(host_priv
);
1339 port
= realhost
.special
[id
];
1340 *portp
= ipc_port_copy_send(port
);
1341 host_unlock(host_priv
);
1343 return KERN_SUCCESS
;
1347 * host_get_io_master
1349 * Return the IO master access port for this host.
1352 host_get_io_master(host_t host
, io_master_t
* io_masterp
)
1354 if (host
== HOST_NULL
) {
1355 return KERN_INVALID_ARGUMENT
;
1358 return host_get_io_master_port(host_priv_self(), io_masterp
);
1368 host_priv_self(void)
1374 host_security_self(void)
1380 host_set_atm_diagnostic_flag(host_t host
, uint32_t diagnostic_flag
)
1382 if (host
== HOST_NULL
) {
1383 return KERN_INVALID_ARGUMENT
;
1386 if (!IOTaskHasEntitlement(current_task(), "com.apple.private.set-atm-diagnostic-flag")) {
1387 return KERN_NO_ACCESS
;
1391 return atm_set_diagnostic_config(diagnostic_flag
);
1393 (void)diagnostic_flag
;
1394 return KERN_NOT_SUPPORTED
;
1399 host_set_multiuser_config_flags(host_priv_t host_priv
, uint32_t multiuser_config
)
1401 #if !defined(XNU_TARGET_OS_OSX)
1402 if (host_priv
== HOST_PRIV_NULL
) {
1403 return KERN_INVALID_ARGUMENT
;
1407 * Always enforce that the multiuser bit is set
1408 * if a value is written to the commpage word.
1410 commpage_update_multiuser_config(multiuser_config
| kIsMultiUserDevice
);
1411 return KERN_SUCCESS
;
1414 (void)multiuser_config
;
1415 return KERN_NOT_SUPPORTED
;