2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
62 * Non-ipc host functions.
65 #include <mach/mach_types.h>
66 #include <mach/boolean.h>
67 #include <mach/host_info.h>
68 #include <mach/host_special_ports.h>
69 #include <mach/kern_return.h>
70 #include <mach/machine.h>
71 #include <mach/port.h>
72 #include <mach/processor_info.h>
73 #include <mach/vm_param.h>
74 #include <mach/processor.h>
75 #include <mach/mach_host_server.h>
76 #include <mach/host_priv_server.h>
77 #include <mach/vm_map.h>
78 #include <mach/task_info.h>
80 #include <machine/commpage.h>
81 #include <machine/cpu_capabilities.h>
83 #include <kern/kern_types.h>
84 #include <kern/assert.h>
85 #include <kern/kalloc.h>
86 #include <kern/host.h>
87 #include <kern/host_statistics.h>
88 #include <kern/ipc_host.h>
89 #include <kern/misc_protos.h>
90 #include <kern/sched.h>
91 #include <kern/processor.h>
92 #include <kern/mach_node.h> // mach_node_port_changed()
94 #include <vm/vm_map.h>
95 #include <vm/vm_purgeable_internal.h>
96 #include <vm/vm_pageout.h>
98 #include <IOKit/IOBSD.h> // IOTaskHasEntitlement
99 #include <IOKit/IOKitKeys.h> // DriverKit entitlement strings
103 #include <atm/atm_internal.h>
107 #include <security/mac_mach_internal.h>
110 #include <pexpert/pexpert.h>
112 vm_statistics64_data_t
PERCPU_DATA(vm_stat
);
113 uint64_t PERCPU_DATA(vm_page_grab_count
);
115 host_data_t realhost
;
117 vm_extmod_statistics_data_t host_extmod_statistics
;
120 host_processors(host_priv_t host_priv
, processor_array_t
* out_array
, mach_msg_type_number_t
* countp
)
122 if (host_priv
== HOST_PRIV_NULL
) {
123 return KERN_INVALID_ARGUMENT
;
126 assert(host_priv
== &realhost
);
128 unsigned int count
= processor_count
;
131 static_assert(sizeof(mach_port_t
) == sizeof(processor_t
));
133 mach_port_t
* ports
= kalloc((vm_size_t
)(count
* sizeof(mach_port_t
)));
135 return KERN_RESOURCE_SHORTAGE
;
138 for (unsigned int i
= 0; i
< count
; i
++) {
139 processor_t processor
= processor_array
[i
];
140 assert(processor
!= PROCESSOR_NULL
);
142 /* do the conversion that Mig should handle */
143 ipc_port_t processor_port
= convert_processor_to_port(processor
);
144 ports
[i
] = processor_port
;
148 *out_array
= (processor_array_t
)ports
;
153 extern int sched_allow_NO_SMT_threads
;
156 host_info(host_t host
, host_flavor_t flavor
, host_info_t info
, mach_msg_type_number_t
* count
)
158 if (host
== HOST_NULL
) {
159 return KERN_INVALID_ARGUMENT
;
163 case HOST_BASIC_INFO
: {
164 host_basic_info_t basic_info
;
165 int master_id
= master_processor
->cpu_id
;
168 * Basic information about this host.
170 if (*count
< HOST_BASIC_INFO_OLD_COUNT
) {
174 basic_info
= (host_basic_info_t
)info
;
176 basic_info
->memory_size
= machine_info
.memory_size
;
177 basic_info
->cpu_type
= slot_type(master_id
);
178 basic_info
->cpu_subtype
= slot_subtype(master_id
);
179 basic_info
->max_cpus
= machine_info
.max_cpus
;
180 #if defined(__x86_64__)
181 if (sched_allow_NO_SMT_threads
&& current_task()->t_flags
& TF_NO_SMT
) {
182 basic_info
->avail_cpus
= primary_processor_avail_count_user
;
184 basic_info
->avail_cpus
= processor_avail_count_user
;
187 basic_info
->avail_cpus
= processor_avail_count
;
191 if (*count
>= HOST_BASIC_INFO_COUNT
) {
192 basic_info
->cpu_threadtype
= slot_threadtype(master_id
);
193 basic_info
->physical_cpu
= machine_info
.physical_cpu
;
194 basic_info
->physical_cpu_max
= machine_info
.physical_cpu_max
;
195 #if defined(__x86_64__)
196 basic_info
->logical_cpu
= basic_info
->avail_cpus
;
198 basic_info
->logical_cpu
= machine_info
.logical_cpu
;
200 basic_info
->logical_cpu_max
= machine_info
.logical_cpu_max
;
202 basic_info
->max_mem
= machine_info
.max_mem
;
204 *count
= HOST_BASIC_INFO_COUNT
;
206 *count
= HOST_BASIC_INFO_OLD_COUNT
;
212 case HOST_SCHED_INFO
: {
213 host_sched_info_t sched_info
;
214 uint32_t quantum_time
;
218 * Return scheduler information.
220 if (*count
< HOST_SCHED_INFO_COUNT
) {
224 sched_info
= (host_sched_info_t
)info
;
226 quantum_time
= SCHED(initial_quantum_size
)(THREAD_NULL
);
227 absolutetime_to_nanoseconds(quantum_time
, &quantum_ns
);
229 sched_info
->min_timeout
= sched_info
->min_quantum
= (uint32_t)(quantum_ns
/ 1000 / 1000);
231 *count
= HOST_SCHED_INFO_COUNT
;
236 case HOST_RESOURCE_SIZES
: {
238 * Return sizes of kernel data structures
240 if (*count
< HOST_RESOURCE_SIZES_COUNT
) {
244 /* XXX Fail until ledgers are implemented */
245 return KERN_INVALID_ARGUMENT
;
248 case HOST_PRIORITY_INFO
: {
249 host_priority_info_t priority_info
;
251 if (*count
< HOST_PRIORITY_INFO_COUNT
) {
255 priority_info
= (host_priority_info_t
)info
;
257 priority_info
->kernel_priority
= MINPRI_KERNEL
;
258 priority_info
->system_priority
= MINPRI_KERNEL
;
259 priority_info
->server_priority
= MINPRI_RESERVED
;
260 priority_info
->user_priority
= BASEPRI_DEFAULT
;
261 priority_info
->depress_priority
= DEPRESSPRI
;
262 priority_info
->idle_priority
= IDLEPRI
;
263 priority_info
->minimum_priority
= MINPRI_USER
;
264 priority_info
->maximum_priority
= MAXPRI_RESERVED
;
266 *count
= HOST_PRIORITY_INFO_COUNT
;
272 * Gestalt for various trap facilities.
274 case HOST_MACH_MSG_TRAP
:
275 case HOST_SEMAPHORE_TRAPS
: {
280 case HOST_CAN_HAS_DEBUGGER
: {
281 host_can_has_debugger_info_t can_has_debugger_info
;
283 if (*count
< HOST_CAN_HAS_DEBUGGER_COUNT
) {
287 can_has_debugger_info
= (host_can_has_debugger_info_t
)info
;
288 can_has_debugger_info
->can_has_debugger
= PE_i_can_has_debugger(NULL
);
289 *count
= HOST_CAN_HAS_DEBUGGER_COUNT
;
294 case HOST_VM_PURGABLE
: {
295 if (*count
< HOST_VM_PURGABLE_COUNT
) {
299 vm_purgeable_stats((vm_purgeable_info_t
)info
, NULL
);
301 *count
= HOST_VM_PURGABLE_COUNT
;
305 case HOST_DEBUG_INFO_INTERNAL
: {
306 #if DEVELOPMENT || DEBUG
307 if (*count
< HOST_DEBUG_INFO_INTERNAL_COUNT
) {
311 host_debug_info_internal_t debug_info
= (host_debug_info_internal_t
)info
;
312 bzero(debug_info
, sizeof(host_debug_info_internal_data_t
));
313 *count
= HOST_DEBUG_INFO_INTERNAL_COUNT
;
315 #if CONFIG_COALITIONS
316 debug_info
->config_coalitions
= 1;
318 debug_info
->config_bank
= 1;
320 debug_info
->config_atm
= 1;
323 debug_info
->config_csr
= 1;
326 #else /* DEVELOPMENT || DEBUG */
327 return KERN_NOT_SUPPORTED
;
331 case HOST_PREFERRED_USER_ARCH
: {
332 host_preferred_user_arch_t user_arch_info
;
335 * Basic information about this host.
337 if (*count
< HOST_PREFERRED_USER_ARCH_COUNT
) {
341 user_arch_info
= (host_preferred_user_arch_t
)info
;
343 #if defined(PREFERRED_USER_CPU_TYPE) && defined(PREFERRED_USER_CPU_SUBTYPE)
344 cpu_type_t preferred_cpu_type
;
345 cpu_subtype_t preferred_cpu_subtype
;
346 if (!PE_get_default("kern.preferred_cpu_type", &preferred_cpu_type
, sizeof(cpu_type_t
))) {
347 preferred_cpu_type
= PREFERRED_USER_CPU_TYPE
;
349 if (!PE_get_default("kern.preferred_cpu_subtype", &preferred_cpu_subtype
, sizeof(cpu_subtype_t
))) {
350 preferred_cpu_subtype
= PREFERRED_USER_CPU_SUBTYPE
;
352 user_arch_info
->cpu_type
= preferred_cpu_type
;
353 user_arch_info
->cpu_subtype
= preferred_cpu_subtype
;
355 int master_id
= master_processor
->cpu_id
;
356 user_arch_info
->cpu_type
= slot_type(master_id
);
357 user_arch_info
->cpu_subtype
= slot_subtype(master_id
);
361 *count
= HOST_PREFERRED_USER_ARCH_COUNT
;
366 default: return KERN_INVALID_ARGUMENT
;
370 kern_return_t
host_statistics(host_t host
, host_flavor_t flavor
, host_info_t info
, mach_msg_type_number_t
* count
);
373 host_statistics(host_t host
, host_flavor_t flavor
, host_info_t info
, mach_msg_type_number_t
* count
)
375 if (host
== HOST_NULL
) {
376 return KERN_INVALID_HOST
;
380 case HOST_LOAD_INFO
: {
381 host_load_info_t load_info
;
383 if (*count
< HOST_LOAD_INFO_COUNT
) {
387 load_info
= (host_load_info_t
)info
;
389 bcopy((char *)avenrun
, (char *)load_info
->avenrun
, sizeof avenrun
);
390 bcopy((char *)mach_factor
, (char *)load_info
->mach_factor
, sizeof mach_factor
);
392 *count
= HOST_LOAD_INFO_COUNT
;
397 vm_statistics64_data_t host_vm_stat
;
398 vm_statistics_t stat32
;
399 mach_msg_type_number_t original_count
;
401 if (*count
< HOST_VM_INFO_REV0_COUNT
) {
405 host_vm_stat
= *PERCPU_GET_MASTER(vm_stat
);
407 percpu_foreach_secondary(stat
, vm_stat
) {
408 vm_statistics64_data_t data
= *stat
;
409 host_vm_stat
.zero_fill_count
+= data
.zero_fill_count
;
410 host_vm_stat
.reactivations
+= data
.reactivations
;
411 host_vm_stat
.pageins
+= data
.pageins
;
412 host_vm_stat
.pageouts
+= data
.pageouts
;
413 host_vm_stat
.faults
+= data
.faults
;
414 host_vm_stat
.cow_faults
+= data
.cow_faults
;
415 host_vm_stat
.lookups
+= data
.lookups
;
416 host_vm_stat
.hits
+= data
.hits
;
419 stat32
= (vm_statistics_t
)info
;
421 stat32
->free_count
= VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_free_count
+ vm_page_speculative_count
);
422 stat32
->active_count
= VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_active_count
);
424 if (vm_page_local_q
) {
425 zpercpu_foreach(lq
, vm_page_local_q
) {
426 stat32
->active_count
+= VM_STATISTICS_TRUNCATE_TO_32_BIT(lq
->vpl_count
);
429 stat32
->inactive_count
= VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_inactive_count
);
431 stat32
->wire_count
= VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count
);
433 stat32
->wire_count
= VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count
+ vm_page_throttled_count
+ vm_lopage_free_count
);
435 stat32
->zero_fill_count
= VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat
.zero_fill_count
);
436 stat32
->reactivations
= VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat
.reactivations
);
437 stat32
->pageins
= VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat
.pageins
);
438 stat32
->pageouts
= VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat
.pageouts
);
439 stat32
->faults
= VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat
.faults
);
440 stat32
->cow_faults
= VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat
.cow_faults
);
441 stat32
->lookups
= VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat
.lookups
);
442 stat32
->hits
= VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat
.hits
);
445 * Fill in extra info added in later revisions of the
446 * vm_statistics data structure. Fill in only what can fit
447 * in the data structure the caller gave us !
449 original_count
= *count
;
450 *count
= HOST_VM_INFO_REV0_COUNT
; /* rev0 already filled in */
451 if (original_count
>= HOST_VM_INFO_REV1_COUNT
) {
452 /* rev1 added "purgeable" info */
453 stat32
->purgeable_count
= VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purgeable_count
);
454 stat32
->purges
= VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purged_count
);
455 *count
= HOST_VM_INFO_REV1_COUNT
;
458 if (original_count
>= HOST_VM_INFO_REV2_COUNT
) {
459 /* rev2 added "speculative" info */
460 stat32
->speculative_count
= VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_speculative_count
);
461 *count
= HOST_VM_INFO_REV2_COUNT
;
464 /* rev3 changed some of the fields to be 64-bit*/
469 case HOST_CPU_LOAD_INFO
: {
470 host_cpu_load_info_t cpu_load_info
;
472 if (*count
< HOST_CPU_LOAD_INFO_COUNT
) {
476 #define GET_TICKS_VALUE(state, ticks) \
477 MACRO_BEGIN cpu_load_info->cpu_ticks[(state)] += (uint32_t)(ticks / hz_tick_interval); \
479 #define GET_TICKS_VALUE_FROM_TIMER(processor, state, timer) \
480 MACRO_BEGIN GET_TICKS_VALUE(state, timer_grab(&(processor)->timer)); \
483 cpu_load_info
= (host_cpu_load_info_t
)info
;
484 cpu_load_info
->cpu_ticks
[CPU_STATE_USER
] = 0;
485 cpu_load_info
->cpu_ticks
[CPU_STATE_SYSTEM
] = 0;
486 cpu_load_info
->cpu_ticks
[CPU_STATE_IDLE
] = 0;
487 cpu_load_info
->cpu_ticks
[CPU_STATE_NICE
] = 0;
489 simple_lock(&processor_list_lock
, LCK_GRP_NULL
);
491 unsigned int pcount
= processor_count
;
493 for (unsigned int i
= 0; i
< pcount
; i
++) {
494 processor_t processor
= processor_array
[i
];
495 assert(processor
!= PROCESSOR_NULL
);
498 uint64_t idle_time_snapshot1
, idle_time_snapshot2
;
499 uint64_t idle_time_tstamp1
, idle_time_tstamp2
;
501 /* See discussion in processor_info(PROCESSOR_CPU_LOAD_INFO) */
503 GET_TICKS_VALUE_FROM_TIMER(processor
, CPU_STATE_USER
, user_state
);
504 if (precise_user_kernel_time
) {
505 GET_TICKS_VALUE_FROM_TIMER(processor
, CPU_STATE_SYSTEM
, system_state
);
507 /* system_state may represent either sys or user */
508 GET_TICKS_VALUE_FROM_TIMER(processor
, CPU_STATE_USER
, system_state
);
511 idle_state
= &processor
->idle_state
;
512 idle_time_snapshot1
= timer_grab(idle_state
);
513 idle_time_tstamp1
= idle_state
->tstamp
;
515 if (processor
->current_state
!= idle_state
) {
516 /* Processor is non-idle, so idle timer should be accurate */
517 GET_TICKS_VALUE_FROM_TIMER(processor
, CPU_STATE_IDLE
, idle_state
);
518 } else if ((idle_time_snapshot1
!= (idle_time_snapshot2
= timer_grab(idle_state
))) ||
519 (idle_time_tstamp1
!= (idle_time_tstamp2
= idle_state
->tstamp
))) {
520 /* Idle timer is being updated concurrently, second stamp is good enough */
521 GET_TICKS_VALUE(CPU_STATE_IDLE
, idle_time_snapshot2
);
524 * Idle timer may be very stale. Fortunately we have established
525 * that idle_time_snapshot1 and idle_time_tstamp1 are unchanging
527 idle_time_snapshot1
+= mach_absolute_time() - idle_time_tstamp1
;
529 GET_TICKS_VALUE(CPU_STATE_IDLE
, idle_time_snapshot1
);
532 simple_unlock(&processor_list_lock
);
534 *count
= HOST_CPU_LOAD_INFO_COUNT
;
539 case HOST_EXPIRED_TASK_INFO
: {
540 if (*count
< TASK_POWER_INFO_COUNT
) {
544 task_power_info_t tinfo1
= (task_power_info_t
)info
;
545 task_power_info_v2_t tinfo2
= (task_power_info_v2_t
)info
;
547 tinfo1
->task_interrupt_wakeups
= dead_task_statistics
.task_interrupt_wakeups
;
548 tinfo1
->task_platform_idle_wakeups
= dead_task_statistics
.task_platform_idle_wakeups
;
550 tinfo1
->task_timer_wakeups_bin_1
= dead_task_statistics
.task_timer_wakeups_bin_1
;
552 tinfo1
->task_timer_wakeups_bin_2
= dead_task_statistics
.task_timer_wakeups_bin_2
;
554 tinfo1
->total_user
= dead_task_statistics
.total_user_time
;
555 tinfo1
->total_system
= dead_task_statistics
.total_system_time
;
556 if (*count
< TASK_POWER_INFO_V2_COUNT
) {
557 *count
= TASK_POWER_INFO_COUNT
;
558 } else if (*count
>= TASK_POWER_INFO_V2_COUNT
) {
559 tinfo2
->gpu_energy
.task_gpu_utilisation
= dead_task_statistics
.task_gpu_ns
;
560 #if defined(__arm__) || defined(__arm64__)
561 tinfo2
->task_energy
= dead_task_statistics
.task_energy
;
562 tinfo2
->task_ptime
= dead_task_statistics
.total_ptime
;
563 tinfo2
->task_pset_switches
= dead_task_statistics
.total_pset_switches
;
565 *count
= TASK_POWER_INFO_V2_COUNT
;
570 default: return KERN_INVALID_ARGUMENT
;
574 extern uint32_t c_segment_pages_compressed
;
576 #define HOST_STATISTICS_TIME_WINDOW 1 /* seconds */
577 #define HOST_STATISTICS_MAX_REQUESTS 10 /* maximum number of requests per window */
578 #define HOST_STATISTICS_MIN_REQUESTS 2 /* minimum number of requests per window */
580 uint64_t host_statistics_time_window
;
582 static LCK_GRP_DECLARE(host_statistics_lck_grp
, "host_statistics");
583 static LCK_MTX_DECLARE(host_statistics_lck
, &host_statistics_lck_grp
);
585 #define HOST_VM_INFO64_REV0 0
586 #define HOST_VM_INFO64_REV1 1
587 #define HOST_EXTMOD_INFO64_REV0 2
588 #define HOST_LOAD_INFO_REV0 3
589 #define HOST_VM_INFO_REV0 4
590 #define HOST_VM_INFO_REV1 5
591 #define HOST_VM_INFO_REV2 6
592 #define HOST_CPU_LOAD_INFO_REV0 7
593 #define HOST_EXPIRED_TASK_INFO_REV0 8
594 #define HOST_EXPIRED_TASK_INFO_REV1 9
595 #define NUM_HOST_INFO_DATA_TYPES 10
597 static vm_statistics64_data_t host_vm_info64_rev0
= {};
598 static vm_statistics64_data_t host_vm_info64_rev1
= {};
599 static vm_extmod_statistics_data_t host_extmod_info64
= {};
600 static host_load_info_data_t host_load_info
= {};
601 static vm_statistics_data_t host_vm_info_rev0
= {};
602 static vm_statistics_data_t host_vm_info_rev1
= {};
603 static vm_statistics_data_t host_vm_info_rev2
= {};
604 static host_cpu_load_info_data_t host_cpu_load_info
= {};
605 static task_power_info_data_t host_expired_task_info
= {};
606 static task_power_info_v2_data_t host_expired_task_info2
= {};
608 struct host_stats_cache
{
609 uint64_t last_access
;
610 uint64_t current_requests
;
611 uint64_t max_requests
;
613 mach_msg_type_number_t count
; //NOTE count is in sizeof(integer_t)
616 static struct host_stats_cache g_host_stats_cache
[NUM_HOST_INFO_DATA_TYPES
] = {
617 [HOST_VM_INFO64_REV0
] = { .last_access
= 0, .current_requests
= 0, .max_requests
= 0, .data
= (uintptr_t)&host_vm_info64_rev0
, .count
= HOST_VM_INFO64_REV0_COUNT
},
618 [HOST_VM_INFO64_REV1
] = { .last_access
= 0, .current_requests
= 0, .max_requests
= 0, .data
= (uintptr_t)&host_vm_info64_rev1
, .count
= HOST_VM_INFO64_REV1_COUNT
},
619 [HOST_EXTMOD_INFO64_REV0
] = { .last_access
= 0, .current_requests
= 0, .max_requests
= 0, .data
= (uintptr_t)&host_extmod_info64
, .count
= HOST_EXTMOD_INFO64_COUNT
},
620 [HOST_LOAD_INFO_REV0
] = { .last_access
= 0, .current_requests
= 0, .max_requests
= 0, .data
= (uintptr_t)&host_load_info
, .count
= HOST_LOAD_INFO_COUNT
},
621 [HOST_VM_INFO_REV0
] = { .last_access
= 0, .current_requests
= 0, .max_requests
= 0, .data
= (uintptr_t)&host_vm_info_rev0
, .count
= HOST_VM_INFO_REV0_COUNT
},
622 [HOST_VM_INFO_REV1
] = { .last_access
= 0, .current_requests
= 0, .max_requests
= 0, .data
= (uintptr_t)&host_vm_info_rev1
, .count
= HOST_VM_INFO_REV1_COUNT
},
623 [HOST_VM_INFO_REV2
] = { .last_access
= 0, .current_requests
= 0, .max_requests
= 0, .data
= (uintptr_t)&host_vm_info_rev2
, .count
= HOST_VM_INFO_REV2_COUNT
},
624 [HOST_CPU_LOAD_INFO_REV0
] = { .last_access
= 0, .current_requests
= 0, .max_requests
= 0, .data
= (uintptr_t)&host_cpu_load_info
, .count
= HOST_CPU_LOAD_INFO_COUNT
},
625 [HOST_EXPIRED_TASK_INFO_REV0
] = { .last_access
= 0, .current_requests
= 0, .max_requests
= 0, .data
= (uintptr_t)&host_expired_task_info
, .count
= TASK_POWER_INFO_COUNT
},
626 [HOST_EXPIRED_TASK_INFO_REV1
] = { .last_access
= 0, .current_requests
= 0, .max_requests
= 0, .data
= (uintptr_t)&host_expired_task_info2
, .count
= TASK_POWER_INFO_V2_COUNT
},
631 host_statistics_init(void)
633 nanoseconds_to_absolutetime((HOST_STATISTICS_TIME_WINDOW
* NSEC_PER_SEC
), &host_statistics_time_window
);
637 cache_host_statistics(int index
, host_info64_t info
)
639 if (index
< 0 || index
>= NUM_HOST_INFO_DATA_TYPES
) {
643 task_t task
= current_task();
644 if (task
->t_flags
& TF_PLATFORM
) {
648 memcpy((void *)g_host_stats_cache
[index
].data
, info
, g_host_stats_cache
[index
].count
* sizeof(integer_t
));
653 get_cached_info(int index
, host_info64_t info
, mach_msg_type_number_t
* count
)
655 if (index
< 0 || index
>= NUM_HOST_INFO_DATA_TYPES
) {
660 *count
= g_host_stats_cache
[index
].count
;
661 memcpy(info
, (void *)g_host_stats_cache
[index
].data
, g_host_stats_cache
[index
].count
* sizeof(integer_t
));
665 get_host_info_data_index(bool is_stat64
, host_flavor_t flavor
, mach_msg_type_number_t
* count
, kern_return_t
* ret
)
670 *ret
= KERN_INVALID_ARGUMENT
;
673 if (*count
< HOST_VM_INFO64_REV0_COUNT
) {
677 if (*count
>= HOST_VM_INFO64_REV1_COUNT
) {
678 return HOST_VM_INFO64_REV1
;
680 return HOST_VM_INFO64_REV0
;
682 case HOST_EXTMOD_INFO64
:
684 *ret
= KERN_INVALID_ARGUMENT
;
687 if (*count
< HOST_EXTMOD_INFO64_COUNT
) {
691 return HOST_EXTMOD_INFO64_REV0
;
694 if (*count
< HOST_LOAD_INFO_COUNT
) {
698 return HOST_LOAD_INFO_REV0
;
701 if (*count
< HOST_VM_INFO_REV0_COUNT
) {
705 if (*count
>= HOST_VM_INFO_REV2_COUNT
) {
706 return HOST_VM_INFO_REV2
;
708 if (*count
>= HOST_VM_INFO_REV1_COUNT
) {
709 return HOST_VM_INFO_REV1
;
711 return HOST_VM_INFO_REV0
;
713 case HOST_CPU_LOAD_INFO
:
714 if (*count
< HOST_CPU_LOAD_INFO_COUNT
) {
718 return HOST_CPU_LOAD_INFO_REV0
;
720 case HOST_EXPIRED_TASK_INFO
:
721 if (*count
< TASK_POWER_INFO_COUNT
) {
725 if (*count
>= TASK_POWER_INFO_V2_COUNT
) {
726 return HOST_EXPIRED_TASK_INFO_REV1
;
728 return HOST_EXPIRED_TASK_INFO_REV0
;
731 *ret
= KERN_INVALID_ARGUMENT
;
737 rate_limit_host_statistics(bool is_stat64
, host_flavor_t flavor
, host_info64_t info
, mach_msg_type_number_t
* count
, kern_return_t
* ret
, int *pindex
)
739 task_t task
= current_task();
741 assert(task
!= kernel_task
);
745 /* Access control only for third party applications */
746 if (task
->t_flags
& TF_PLATFORM
) {
750 /* Rate limit to HOST_STATISTICS_MAX_REQUESTS queries for each HOST_STATISTICS_TIME_WINDOW window of time */
751 bool rate_limited
= FALSE
;
752 bool set_last_access
= TRUE
;
754 /* there is a cache for every flavor */
755 int index
= get_host_info_data_index(is_stat64
, flavor
, count
, ret
);
761 lck_mtx_lock(&host_statistics_lck
);
762 if (g_host_stats_cache
[index
].last_access
> mach_continuous_time() - host_statistics_time_window
) {
763 set_last_access
= FALSE
;
764 if (g_host_stats_cache
[index
].current_requests
++ >= g_host_stats_cache
[index
].max_requests
) {
766 get_cached_info(index
, info
, count
);
769 if (set_last_access
) {
770 g_host_stats_cache
[index
].current_requests
= 1;
772 * select a random number of requests (included between HOST_STATISTICS_MIN_REQUESTS and HOST_STATISTICS_MAX_REQUESTS)
773 * to let query host_statistics.
774 * In this way it is not possible to infer looking at when the a cached copy changes if host_statistics was called on
775 * the provious window.
777 g_host_stats_cache
[index
].max_requests
= (mach_absolute_time() % (HOST_STATISTICS_MAX_REQUESTS
- HOST_STATISTICS_MIN_REQUESTS
+ 1)) + HOST_STATISTICS_MIN_REQUESTS
;
778 g_host_stats_cache
[index
].last_access
= mach_continuous_time();
780 lck_mtx_unlock(&host_statistics_lck
);
786 vm_stats(void *info
, unsigned int *count
)
788 vm_statistics64_data_t host_vm_stat
;
789 mach_msg_type_number_t original_count
;
790 unsigned int local_q_internal_count
;
791 unsigned int local_q_external_count
;
793 if (*count
< HOST_VM_INFO64_REV0_COUNT
) {
797 host_vm_stat
= *PERCPU_GET_MASTER(vm_stat
);
799 percpu_foreach_secondary(stat
, vm_stat
) {
800 vm_statistics64_data_t data
= *stat
;
801 host_vm_stat
.zero_fill_count
+= data
.zero_fill_count
;
802 host_vm_stat
.reactivations
+= data
.reactivations
;
803 host_vm_stat
.pageins
+= data
.pageins
;
804 host_vm_stat
.pageouts
+= data
.pageouts
;
805 host_vm_stat
.faults
+= data
.faults
;
806 host_vm_stat
.cow_faults
+= data
.cow_faults
;
807 host_vm_stat
.lookups
+= data
.lookups
;
808 host_vm_stat
.hits
+= data
.hits
;
809 host_vm_stat
.compressions
+= data
.compressions
;
810 host_vm_stat
.decompressions
+= data
.decompressions
;
811 host_vm_stat
.swapins
+= data
.swapins
;
812 host_vm_stat
.swapouts
+= data
.swapouts
;
815 vm_statistics64_t stat
= (vm_statistics64_t
)info
;
817 stat
->free_count
= vm_page_free_count
+ vm_page_speculative_count
;
818 stat
->active_count
= vm_page_active_count
;
820 local_q_internal_count
= 0;
821 local_q_external_count
= 0;
822 if (vm_page_local_q
) {
823 zpercpu_foreach(lq
, vm_page_local_q
) {
824 stat
->active_count
+= lq
->vpl_count
;
825 local_q_internal_count
+= lq
->vpl_internal_count
;
826 local_q_external_count
+= lq
->vpl_external_count
;
829 stat
->inactive_count
= vm_page_inactive_count
;
831 stat
->wire_count
= vm_page_wire_count
;
833 stat
->wire_count
= vm_page_wire_count
+ vm_page_throttled_count
+ vm_lopage_free_count
;
835 stat
->zero_fill_count
= host_vm_stat
.zero_fill_count
;
836 stat
->reactivations
= host_vm_stat
.reactivations
;
837 stat
->pageins
= host_vm_stat
.pageins
;
838 stat
->pageouts
= host_vm_stat
.pageouts
;
839 stat
->faults
= host_vm_stat
.faults
;
840 stat
->cow_faults
= host_vm_stat
.cow_faults
;
841 stat
->lookups
= host_vm_stat
.lookups
;
842 stat
->hits
= host_vm_stat
.hits
;
844 stat
->purgeable_count
= vm_page_purgeable_count
;
845 stat
->purges
= vm_page_purged_count
;
847 stat
->speculative_count
= vm_page_speculative_count
;
850 * Fill in extra info added in later revisions of the
851 * vm_statistics data structure. Fill in only what can fit
852 * in the data structure the caller gave us !
854 original_count
= *count
;
855 *count
= HOST_VM_INFO64_REV0_COUNT
; /* rev0 already filled in */
856 if (original_count
>= HOST_VM_INFO64_REV1_COUNT
) {
857 /* rev1 added "throttled count" */
858 stat
->throttled_count
= vm_page_throttled_count
;
859 /* rev1 added "compression" info */
860 stat
->compressor_page_count
= VM_PAGE_COMPRESSOR_COUNT
;
861 stat
->compressions
= host_vm_stat
.compressions
;
862 stat
->decompressions
= host_vm_stat
.decompressions
;
863 stat
->swapins
= host_vm_stat
.swapins
;
864 stat
->swapouts
= host_vm_stat
.swapouts
;
866 * "external page count"
867 * "anonymous page count"
868 * "total # of pages (uncompressed) held in the compressor"
870 stat
->external_page_count
= (vm_page_pageable_external_count
+ local_q_external_count
);
871 stat
->internal_page_count
= (vm_page_pageable_internal_count
+ local_q_internal_count
);
872 stat
->total_uncompressed_pages_in_compressor
= c_segment_pages_compressed
;
873 *count
= HOST_VM_INFO64_REV1_COUNT
;
879 kern_return_t
host_statistics64(host_t host
, host_flavor_t flavor
, host_info_t info
, mach_msg_type_number_t
* count
);
882 host_statistics64(host_t host
, host_flavor_t flavor
, host_info64_t info
, mach_msg_type_number_t
* count
)
884 if (host
== HOST_NULL
) {
885 return KERN_INVALID_HOST
;
889 case HOST_VM_INFO64
: /* We were asked to get vm_statistics64 */
890 return vm_stats(info
, count
);
892 case HOST_EXTMOD_INFO64
: /* We were asked to get vm_statistics64 */
894 vm_extmod_statistics_t out_extmod_statistics
;
896 if (*count
< HOST_EXTMOD_INFO64_COUNT
) {
900 out_extmod_statistics
= (vm_extmod_statistics_t
)info
;
901 *out_extmod_statistics
= host_extmod_statistics
;
903 *count
= HOST_EXTMOD_INFO64_COUNT
;
908 default: /* If we didn't recognize the flavor, send to host_statistics */
909 return host_statistics(host
, flavor
, (host_info_t
)info
, count
);
914 host_statistics64_from_user(host_t host
, host_flavor_t flavor
, host_info64_t info
, mach_msg_type_number_t
* count
)
916 kern_return_t ret
= KERN_SUCCESS
;
919 if (host
== HOST_NULL
) {
920 return KERN_INVALID_HOST
;
923 if (rate_limit_host_statistics(TRUE
, flavor
, info
, count
, &ret
, &index
)) {
927 if (ret
!= KERN_SUCCESS
) {
931 ret
= host_statistics64(host
, flavor
, info
, count
);
933 if (ret
== KERN_SUCCESS
) {
934 cache_host_statistics(index
, info
);
941 host_statistics_from_user(host_t host
, host_flavor_t flavor
, host_info64_t info
, mach_msg_type_number_t
* count
)
943 kern_return_t ret
= KERN_SUCCESS
;
946 if (host
== HOST_NULL
) {
947 return KERN_INVALID_HOST
;
950 if (rate_limit_host_statistics(FALSE
, flavor
, info
, count
, &ret
, &index
)) {
954 if (ret
!= KERN_SUCCESS
) {
958 ret
= host_statistics(host
, flavor
, info
, count
);
960 if (ret
== KERN_SUCCESS
) {
961 cache_host_statistics(index
, info
);
968 * Get host statistics that require privilege.
969 * None for now, just call the un-privileged version.
972 host_priv_statistics(host_priv_t host_priv
, host_flavor_t flavor
, host_info_t info
, mach_msg_type_number_t
* count
)
974 return host_statistics((host_t
)host_priv
, flavor
, info
, count
);
978 set_sched_stats_active(boolean_t active
)
980 sched_stats_active
= active
;
986 get_pages_grabbed_count(void)
988 uint64_t pages_grabbed_count
= 0;
990 percpu_foreach(count
, vm_page_grab_count
) {
991 pages_grabbed_count
+= *count
;
994 return pages_grabbed_count
;
999 get_sched_statistics(struct _processor_statistics_np
* out
, uint32_t * count
)
1003 if (!sched_stats_active
) {
1004 return KERN_FAILURE
;
1007 percpu_foreach_base(pcpu_base
) {
1008 struct sched_statistics stats
;
1009 processor_t processor
;
1011 pos
+= sizeof(struct _processor_statistics_np
);
1013 return KERN_FAILURE
;
1016 stats
= *PERCPU_GET_WITH_BASE(pcpu_base
, sched_stats
);
1017 processor
= PERCPU_GET_WITH_BASE(pcpu_base
, processor
);
1019 out
->ps_cpuid
= processor
->cpu_id
;
1020 out
->ps_csw_count
= stats
.csw_count
;
1021 out
->ps_preempt_count
= stats
.preempt_count
;
1022 out
->ps_preempted_rt_count
= stats
.preempted_rt_count
;
1023 out
->ps_preempted_by_rt_count
= stats
.preempted_by_rt_count
;
1024 out
->ps_rt_sched_count
= stats
.rt_sched_count
;
1025 out
->ps_interrupt_count
= stats
.interrupt_count
;
1026 out
->ps_ipi_count
= stats
.ipi_count
;
1027 out
->ps_timer_pop_count
= stats
.timer_pop_count
;
1028 out
->ps_runq_count_sum
= SCHED(processor_runq_stats_count_sum
)(processor
);
1029 out
->ps_idle_transitions
= stats
.idle_transitions
;
1030 out
->ps_quantum_timer_expirations
= stats
.quantum_timer_expirations
;
1035 /* And include RT Queue information */
1036 pos
+= sizeof(struct _processor_statistics_np
);
1038 return KERN_FAILURE
;
1041 bzero(out
, sizeof(*out
));
1042 out
->ps_cpuid
= (-1);
1043 out
->ps_runq_count_sum
= SCHED(rt_runq_count_sum
)();
1048 return KERN_SUCCESS
;
1052 host_page_size(host_t host
, vm_size_t
* out_page_size
)
1054 if (host
== HOST_NULL
) {
1055 return KERN_INVALID_ARGUMENT
;
1058 *out_page_size
= PAGE_SIZE
;
1060 return KERN_SUCCESS
;
1064 * Return kernel version string (more than you ever
1065 * wanted to know about what version of the kernel this is).
1067 extern char version
[];
1070 host_kernel_version(host_t host
, kernel_version_t out_version
)
1072 if (host
== HOST_NULL
) {
1073 return KERN_INVALID_ARGUMENT
;
1076 (void)strncpy(out_version
, version
, sizeof(kernel_version_t
));
1078 return KERN_SUCCESS
;
1082 * host_processor_sets:
1084 * List all processor sets on the host.
1087 host_processor_sets(host_priv_t host_priv
, processor_set_name_array_t
* pset_list
, mach_msg_type_number_t
* count
)
1091 if (host_priv
== HOST_PRIV_NULL
) {
1092 return KERN_INVALID_ARGUMENT
;
1096 * Allocate memory. Can be pageable because it won't be
1097 * touched while holding a lock.
1100 addr
= kalloc((vm_size_t
)sizeof(mach_port_t
));
1102 return KERN_RESOURCE_SHORTAGE
;
1105 /* do the conversion that Mig should handle */
1106 *((ipc_port_t
*)addr
) = convert_pset_name_to_port(&pset0
);
1108 *pset_list
= (processor_set_array_t
)addr
;
1111 return KERN_SUCCESS
;
1115 * host_processor_set_priv:
1117 * Return control port for given processor set.
1120 host_processor_set_priv(host_priv_t host_priv
, processor_set_t pset_name
, processor_set_t
* pset
)
1122 if (host_priv
== HOST_PRIV_NULL
|| pset_name
== PROCESSOR_SET_NULL
) {
1123 *pset
= PROCESSOR_SET_NULL
;
1125 return KERN_INVALID_ARGUMENT
;
1130 return KERN_SUCCESS
;
1134 * host_processor_info
1136 * Return info about the processors on this host. It will return
1137 * the number of processors, and the specific type of info requested
1141 host_processor_info(host_t host
,
1142 processor_flavor_t flavor
,
1143 natural_t
* out_pcount
,
1144 processor_info_array_t
* out_array
,
1145 mach_msg_type_number_t
* out_array_count
)
1147 kern_return_t result
;
1149 processor_info_t info
;
1150 unsigned int icount
;
1151 unsigned int pcount
;
1153 vm_size_t size
, needed
;
1156 if (host
== HOST_NULL
) {
1157 return KERN_INVALID_ARGUMENT
;
1160 result
= processor_info_count(flavor
, &icount
);
1161 if (result
!= KERN_SUCCESS
) {
1165 pcount
= processor_count
;
1166 assert(pcount
!= 0);
1168 needed
= pcount
* icount
* sizeof(natural_t
);
1169 size
= vm_map_round_page(needed
, VM_MAP_PAGE_MASK(ipc_kernel_map
));
1170 result
= kmem_alloc(ipc_kernel_map
, &addr
, size
, VM_KERN_MEMORY_IPC
);
1171 if (result
!= KERN_SUCCESS
) {
1172 return KERN_RESOURCE_SHORTAGE
;
1175 info
= (processor_info_t
)addr
;
1177 for (unsigned int i
= 0; i
< pcount
; i
++) {
1178 processor_t processor
= processor_array
[i
];
1179 assert(processor
!= PROCESSOR_NULL
);
1181 unsigned int tcount
= icount
;
1183 result
= processor_info(processor
, flavor
, &thost
, info
, &tcount
);
1184 if (result
!= KERN_SUCCESS
) {
1185 kmem_free(ipc_kernel_map
, addr
, size
);
1191 if (size
!= needed
) {
1192 bzero((char *)addr
+ needed
, size
- needed
);
1195 result
= vm_map_unwire(ipc_kernel_map
, vm_map_trunc_page(addr
, VM_MAP_PAGE_MASK(ipc_kernel_map
)),
1196 vm_map_round_page(addr
+ size
, VM_MAP_PAGE_MASK(ipc_kernel_map
)), FALSE
);
1197 assert(result
== KERN_SUCCESS
);
1198 result
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)addr
, (vm_map_size_t
)needed
, TRUE
, ©
);
1199 assert(result
== KERN_SUCCESS
);
1201 *out_pcount
= pcount
;
1202 *out_array
= (processor_info_array_t
)copy
;
1203 *out_array_count
= pcount
* icount
;
1205 return KERN_SUCCESS
;
1209 is_valid_host_special_port(int id
)
1211 return (id
<= HOST_MAX_SPECIAL_PORT
) &&
1212 (id
>= HOST_MIN_SPECIAL_PORT
) &&
1213 ((id
<= HOST_LAST_SPECIAL_KERNEL_PORT
) || (id
> HOST_MAX_SPECIAL_KERNEL_PORT
));
1216 extern void * XNU_PTRAUTH_SIGNED_PTR("initproc") initproc
;
1219 * Kernel interface for setting a special port.
1222 kernel_set_special_port(host_priv_t host_priv
, int id
, ipc_port_t port
)
1224 ipc_port_t old_port
;
1226 if (!is_valid_host_special_port(id
)) {
1227 panic("attempted to set invalid special port %d", id
);
1231 if (id
== HOST_NODE_PORT
) {
1232 return KERN_NOT_SUPPORTED
;
1236 host_lock(host_priv
);
1237 old_port
= host_priv
->special
[id
];
1238 if ((id
== HOST_AMFID_PORT
) && (current_task()->bsd_info
!= initproc
)) {
1239 host_unlock(host_priv
);
1240 return KERN_NO_ACCESS
;
1242 host_priv
->special
[id
] = port
;
1243 host_unlock(host_priv
);
1246 if (id
== HOST_NODE_PORT
) {
1247 mach_node_port_changed();
1251 if (IP_VALID(old_port
)) {
1252 ipc_port_release_send(old_port
);
1254 return KERN_SUCCESS
;
1258 * Kernel interface for retrieving a special port.
1261 kernel_get_special_port(host_priv_t host_priv
, int id
, ipc_port_t
* portp
)
1263 if (!is_valid_host_special_port(id
)) {
1264 panic("attempted to get invalid special port %d", id
);
1267 host_lock(host_priv
);
1268 *portp
= host_priv
->special
[id
];
1269 host_unlock(host_priv
);
1270 return KERN_SUCCESS
;
1274 * User interface for setting a special port.
1276 * Only permits the user to set a user-owned special port
1277 * ID, rejecting a kernel-owned special port ID.
1279 * A special kernel port cannot be set up using this
1280 * routine; use kernel_set_special_port() instead.
1283 host_set_special_port_from_user(host_priv_t host_priv
, int id
, ipc_port_t port
)
1285 if (host_priv
== HOST_PRIV_NULL
|| id
<= HOST_MAX_SPECIAL_KERNEL_PORT
|| id
> HOST_MAX_SPECIAL_PORT
) {
1286 return KERN_INVALID_ARGUMENT
;
1289 if (task_is_driver(current_task())) {
1290 return KERN_NO_ACCESS
;
1293 return host_set_special_port(host_priv
, id
, port
);
1297 host_set_special_port(host_priv_t host_priv
, int id
, ipc_port_t port
)
1299 if (host_priv
== HOST_PRIV_NULL
|| id
<= HOST_MAX_SPECIAL_KERNEL_PORT
|| id
> HOST_MAX_SPECIAL_PORT
) {
1300 return KERN_INVALID_ARGUMENT
;
1304 if (mac_task_check_set_host_special_port(current_task(), id
, port
) != 0) {
1305 return KERN_NO_ACCESS
;
1309 return kernel_set_special_port(host_priv
, id
, port
);
1313 * User interface for retrieving a special port.
1315 * Note that there is nothing to prevent a user special
1316 * port from disappearing after it has been discovered by
1317 * the caller; thus, using a special port can always result
1318 * in a "port not valid" error.
1322 host_get_special_port_from_user(host_priv_t host_priv
, __unused
int node
, int id
, ipc_port_t
* portp
)
1324 if (host_priv
== HOST_PRIV_NULL
|| id
== HOST_SECURITY_PORT
|| id
> HOST_MAX_SPECIAL_PORT
|| id
< HOST_MIN_SPECIAL_PORT
) {
1325 return KERN_INVALID_ARGUMENT
;
1328 task_t task
= current_task();
1329 if (task
&& task_is_driver(task
) && id
> HOST_MAX_SPECIAL_KERNEL_PORT
) {
1330 /* allow HID drivers to get the sysdiagnose port for keychord handling */
1331 if (id
== HOST_SYSDIAGNOSE_PORT
&&
1332 IOTaskHasEntitlement(task
, kIODriverKitHIDFamilyEventServiceEntitlementKey
)) {
1333 goto get_special_port
;
1335 return KERN_NO_ACCESS
;
1338 return host_get_special_port(host_priv
, node
, id
, portp
);
1342 host_get_special_port(host_priv_t host_priv
, __unused
int node
, int id
, ipc_port_t
* portp
)
1346 if (host_priv
== HOST_PRIV_NULL
|| id
== HOST_SECURITY_PORT
|| id
> HOST_MAX_SPECIAL_PORT
|| id
< HOST_MIN_SPECIAL_PORT
) {
1347 return KERN_INVALID_ARGUMENT
;
1350 host_lock(host_priv
);
1351 port
= realhost
.special
[id
];
1352 *portp
= ipc_port_copy_send(port
);
1353 host_unlock(host_priv
);
1355 return KERN_SUCCESS
;
1359 * host_get_io_master
1361 * Return the IO master access port for this host.
1364 host_get_io_master(host_t host
, io_master_t
* io_masterp
)
1366 if (host
== HOST_NULL
) {
1367 return KERN_INVALID_ARGUMENT
;
1370 return host_get_io_master_port(host_priv_self(), io_masterp
);
1380 host_priv_self(void)
1386 host_security_self(void)
1392 host_set_atm_diagnostic_flag(host_t host
, uint32_t diagnostic_flag
)
1394 if (host
== HOST_NULL
) {
1395 return KERN_INVALID_ARGUMENT
;
1398 if (!IOTaskHasEntitlement(current_task(), "com.apple.private.set-atm-diagnostic-flag")) {
1399 return KERN_NO_ACCESS
;
1403 return atm_set_diagnostic_config(diagnostic_flag
);
1405 (void)diagnostic_flag
;
1406 return KERN_NOT_SUPPORTED
;
1411 host_set_multiuser_config_flags(host_priv_t host_priv
, uint32_t multiuser_config
)
1413 #if !defined(XNU_TARGET_OS_OSX)
1414 if (host_priv
== HOST_PRIV_NULL
) {
1415 return KERN_INVALID_ARGUMENT
;
1418 assert(host_priv
== &realhost
);
1421 * Always enforce that the multiuser bit is set
1422 * if a value is written to the commpage word.
1424 commpage_update_multiuser_config(multiuser_config
| kIsMultiUserDevice
);
1425 return KERN_SUCCESS
;
1428 (void)multiuser_config
;
1429 return KERN_NOT_SUPPORTED
;