]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/host.c
5b60219f7d3ec52c9ed02537129d0a396c8478e5
[apple/xnu.git] / osfmk / kern / host.c
1 /*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58
59 /*
60 * host.c
61 *
62 * Non-ipc host functions.
63 */
64
65 #include <mach/mach_types.h>
66 #include <mach/boolean.h>
67 #include <mach/host_info.h>
68 #include <mach/host_special_ports.h>
69 #include <mach/kern_return.h>
70 #include <mach/machine.h>
71 #include <mach/port.h>
72 #include <mach/processor_info.h>
73 #include <mach/vm_param.h>
74 #include <mach/processor.h>
75 #include <mach/mach_host_server.h>
76 #include <mach/host_priv_server.h>
77 #include <mach/vm_map.h>
78 #include <mach/task_info.h>
79
80 #include <machine/commpage.h>
81 #include <machine/cpu_capabilities.h>
82
83 #include <kern/kern_types.h>
84 #include <kern/assert.h>
85 #include <kern/kalloc.h>
86 #include <kern/host.h>
87 #include <kern/host_statistics.h>
88 #include <kern/ipc_host.h>
89 #include <kern/misc_protos.h>
90 #include <kern/sched.h>
91 #include <kern/processor.h>
92 #include <kern/mach_node.h> // mach_node_port_changed()
93
94 #include <vm/vm_map.h>
95 #include <vm/vm_purgeable_internal.h>
96 #include <vm/vm_pageout.h>
97
98 #include <IOKit/IOBSD.h> // IOTaskHasEntitlement
99 #include <IOKit/IOKitKeys.h> // DriverKit entitlement strings
100
101
102 #if CONFIG_ATM
103 #include <atm/atm_internal.h>
104 #endif
105
106 #if CONFIG_MACF
107 #include <security/mac_mach_internal.h>
108 #endif
109
110 #include <pexpert/pexpert.h>
111
112 vm_statistics64_data_t PERCPU_DATA(vm_stat);
113 uint64_t PERCPU_DATA(vm_page_grab_count);
114
115 host_data_t realhost;
116
117 vm_extmod_statistics_data_t host_extmod_statistics;
118
119 kern_return_t
120 host_processors(host_priv_t host_priv, processor_array_t * out_array, mach_msg_type_number_t * countp)
121 {
122 if (host_priv == HOST_PRIV_NULL) {
123 return KERN_INVALID_ARGUMENT;
124 }
125
126 assert(host_priv == &realhost);
127
128 unsigned int count = processor_count;
129 assert(count != 0);
130
131 static_assert(sizeof(mach_port_t) == sizeof(processor_t));
132
133 mach_port_t* ports = kalloc((vm_size_t)(count * sizeof(mach_port_t)));
134 if (!ports) {
135 return KERN_RESOURCE_SHORTAGE;
136 }
137
138 for (unsigned int i = 0; i < count; i++) {
139 processor_t processor = processor_array[i];
140 assert(processor != PROCESSOR_NULL);
141
142 /* do the conversion that Mig should handle */
143 ipc_port_t processor_port = convert_processor_to_port(processor);
144 ports[i] = processor_port;
145 }
146
147 *countp = count;
148 *out_array = (processor_array_t)ports;
149
150 return KERN_SUCCESS;
151 }
152
153 extern int sched_allow_NO_SMT_threads;
154
155 kern_return_t
156 host_info(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
157 {
158 if (host == HOST_NULL) {
159 return KERN_INVALID_ARGUMENT;
160 }
161
162 switch (flavor) {
163 case HOST_BASIC_INFO: {
164 host_basic_info_t basic_info;
165 int master_id = master_processor->cpu_id;
166
167 /*
168 * Basic information about this host.
169 */
170 if (*count < HOST_BASIC_INFO_OLD_COUNT) {
171 return KERN_FAILURE;
172 }
173
174 basic_info = (host_basic_info_t)info;
175
176 basic_info->memory_size = machine_info.memory_size;
177 basic_info->cpu_type = slot_type(master_id);
178 basic_info->cpu_subtype = slot_subtype(master_id);
179 basic_info->max_cpus = machine_info.max_cpus;
180 #if defined(__x86_64__)
181 if (sched_allow_NO_SMT_threads && current_task()->t_flags & TF_NO_SMT) {
182 basic_info->avail_cpus = primary_processor_avail_count_user;
183 } else {
184 basic_info->avail_cpus = processor_avail_count_user;
185 }
186 #else
187 basic_info->avail_cpus = processor_avail_count;
188 #endif
189
190
191 if (*count >= HOST_BASIC_INFO_COUNT) {
192 basic_info->cpu_threadtype = slot_threadtype(master_id);
193 basic_info->physical_cpu = machine_info.physical_cpu;
194 basic_info->physical_cpu_max = machine_info.physical_cpu_max;
195 #if defined(__x86_64__)
196 basic_info->logical_cpu = basic_info->avail_cpus;
197 #else
198 basic_info->logical_cpu = machine_info.logical_cpu;
199 #endif
200 basic_info->logical_cpu_max = machine_info.logical_cpu_max;
201
202 basic_info->max_mem = machine_info.max_mem;
203
204 *count = HOST_BASIC_INFO_COUNT;
205 } else {
206 *count = HOST_BASIC_INFO_OLD_COUNT;
207 }
208
209 return KERN_SUCCESS;
210 }
211
212 case HOST_SCHED_INFO: {
213 host_sched_info_t sched_info;
214 uint32_t quantum_time;
215 uint64_t quantum_ns;
216
217 /*
218 * Return scheduler information.
219 */
220 if (*count < HOST_SCHED_INFO_COUNT) {
221 return KERN_FAILURE;
222 }
223
224 sched_info = (host_sched_info_t)info;
225
226 quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
227 absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
228
229 sched_info->min_timeout = sched_info->min_quantum = (uint32_t)(quantum_ns / 1000 / 1000);
230
231 *count = HOST_SCHED_INFO_COUNT;
232
233 return KERN_SUCCESS;
234 }
235
236 case HOST_RESOURCE_SIZES: {
237 /*
238 * Return sizes of kernel data structures
239 */
240 if (*count < HOST_RESOURCE_SIZES_COUNT) {
241 return KERN_FAILURE;
242 }
243
244 /* XXX Fail until ledgers are implemented */
245 return KERN_INVALID_ARGUMENT;
246 }
247
248 case HOST_PRIORITY_INFO: {
249 host_priority_info_t priority_info;
250
251 if (*count < HOST_PRIORITY_INFO_COUNT) {
252 return KERN_FAILURE;
253 }
254
255 priority_info = (host_priority_info_t)info;
256
257 priority_info->kernel_priority = MINPRI_KERNEL;
258 priority_info->system_priority = MINPRI_KERNEL;
259 priority_info->server_priority = MINPRI_RESERVED;
260 priority_info->user_priority = BASEPRI_DEFAULT;
261 priority_info->depress_priority = DEPRESSPRI;
262 priority_info->idle_priority = IDLEPRI;
263 priority_info->minimum_priority = MINPRI_USER;
264 priority_info->maximum_priority = MAXPRI_RESERVED;
265
266 *count = HOST_PRIORITY_INFO_COUNT;
267
268 return KERN_SUCCESS;
269 }
270
271 /*
272 * Gestalt for various trap facilities.
273 */
274 case HOST_MACH_MSG_TRAP:
275 case HOST_SEMAPHORE_TRAPS: {
276 *count = 0;
277 return KERN_SUCCESS;
278 }
279
280 case HOST_CAN_HAS_DEBUGGER: {
281 host_can_has_debugger_info_t can_has_debugger_info;
282
283 if (*count < HOST_CAN_HAS_DEBUGGER_COUNT) {
284 return KERN_FAILURE;
285 }
286
287 can_has_debugger_info = (host_can_has_debugger_info_t)info;
288 can_has_debugger_info->can_has_debugger = PE_i_can_has_debugger(NULL);
289 *count = HOST_CAN_HAS_DEBUGGER_COUNT;
290
291 return KERN_SUCCESS;
292 }
293
294 case HOST_VM_PURGABLE: {
295 if (*count < HOST_VM_PURGABLE_COUNT) {
296 return KERN_FAILURE;
297 }
298
299 vm_purgeable_stats((vm_purgeable_info_t)info, NULL);
300
301 *count = HOST_VM_PURGABLE_COUNT;
302 return KERN_SUCCESS;
303 }
304
305 case HOST_DEBUG_INFO_INTERNAL: {
306 #if DEVELOPMENT || DEBUG
307 if (*count < HOST_DEBUG_INFO_INTERNAL_COUNT) {
308 return KERN_FAILURE;
309 }
310
311 host_debug_info_internal_t debug_info = (host_debug_info_internal_t)info;
312 bzero(debug_info, sizeof(host_debug_info_internal_data_t));
313 *count = HOST_DEBUG_INFO_INTERNAL_COUNT;
314
315 #if CONFIG_COALITIONS
316 debug_info->config_coalitions = 1;
317 #endif
318 debug_info->config_bank = 1;
319 #if CONFIG_ATM
320 debug_info->config_atm = 1;
321 #endif
322 #if CONFIG_CSR
323 debug_info->config_csr = 1;
324 #endif
325 return KERN_SUCCESS;
326 #else /* DEVELOPMENT || DEBUG */
327 return KERN_NOT_SUPPORTED;
328 #endif
329 }
330
331 case HOST_PREFERRED_USER_ARCH: {
332 host_preferred_user_arch_t user_arch_info;
333
334 /*
335 * Basic information about this host.
336 */
337 if (*count < HOST_PREFERRED_USER_ARCH_COUNT) {
338 return KERN_FAILURE;
339 }
340
341 user_arch_info = (host_preferred_user_arch_t)info;
342
343 #if defined(PREFERRED_USER_CPU_TYPE) && defined(PREFERRED_USER_CPU_SUBTYPE)
344 cpu_type_t preferred_cpu_type;
345 cpu_subtype_t preferred_cpu_subtype;
346 if (!PE_get_default("kern.preferred_cpu_type", &preferred_cpu_type, sizeof(cpu_type_t))) {
347 preferred_cpu_type = PREFERRED_USER_CPU_TYPE;
348 }
349 if (!PE_get_default("kern.preferred_cpu_subtype", &preferred_cpu_subtype, sizeof(cpu_subtype_t))) {
350 preferred_cpu_subtype = PREFERRED_USER_CPU_SUBTYPE;
351 }
352 user_arch_info->cpu_type = preferred_cpu_type;
353 user_arch_info->cpu_subtype = preferred_cpu_subtype;
354 #else
355 int master_id = master_processor->cpu_id;
356 user_arch_info->cpu_type = slot_type(master_id);
357 user_arch_info->cpu_subtype = slot_subtype(master_id);
358 #endif
359
360
361 *count = HOST_PREFERRED_USER_ARCH_COUNT;
362
363 return KERN_SUCCESS;
364 }
365
366 default: return KERN_INVALID_ARGUMENT;
367 }
368 }
369
370 kern_return_t host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count);
371
372 kern_return_t
373 host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
374 {
375 if (host == HOST_NULL) {
376 return KERN_INVALID_HOST;
377 }
378
379 switch (flavor) {
380 case HOST_LOAD_INFO: {
381 host_load_info_t load_info;
382
383 if (*count < HOST_LOAD_INFO_COUNT) {
384 return KERN_FAILURE;
385 }
386
387 load_info = (host_load_info_t)info;
388
389 bcopy((char *)avenrun, (char *)load_info->avenrun, sizeof avenrun);
390 bcopy((char *)mach_factor, (char *)load_info->mach_factor, sizeof mach_factor);
391
392 *count = HOST_LOAD_INFO_COUNT;
393 return KERN_SUCCESS;
394 }
395
396 case HOST_VM_INFO: {
397 vm_statistics64_data_t host_vm_stat;
398 vm_statistics_t stat32;
399 mach_msg_type_number_t original_count;
400
401 if (*count < HOST_VM_INFO_REV0_COUNT) {
402 return KERN_FAILURE;
403 }
404
405 host_vm_stat = *PERCPU_GET_MASTER(vm_stat);
406
407 percpu_foreach_secondary(stat, vm_stat) {
408 vm_statistics64_data_t data = *stat;
409 host_vm_stat.zero_fill_count += data.zero_fill_count;
410 host_vm_stat.reactivations += data.reactivations;
411 host_vm_stat.pageins += data.pageins;
412 host_vm_stat.pageouts += data.pageouts;
413 host_vm_stat.faults += data.faults;
414 host_vm_stat.cow_faults += data.cow_faults;
415 host_vm_stat.lookups += data.lookups;
416 host_vm_stat.hits += data.hits;
417 }
418
419 stat32 = (vm_statistics_t)info;
420
421 stat32->free_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_free_count + vm_page_speculative_count);
422 stat32->active_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_active_count);
423
424 if (vm_page_local_q) {
425 zpercpu_foreach(lq, vm_page_local_q) {
426 stat32->active_count += VM_STATISTICS_TRUNCATE_TO_32_BIT(lq->vpl_count);
427 }
428 }
429 stat32->inactive_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_inactive_count);
430 #if CONFIG_EMBEDDED
431 stat32->wire_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count);
432 #else
433 stat32->wire_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count + vm_page_throttled_count + vm_lopage_free_count);
434 #endif
435 stat32->zero_fill_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.zero_fill_count);
436 stat32->reactivations = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.reactivations);
437 stat32->pageins = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.pageins);
438 stat32->pageouts = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.pageouts);
439 stat32->faults = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.faults);
440 stat32->cow_faults = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.cow_faults);
441 stat32->lookups = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.lookups);
442 stat32->hits = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.hits);
443
444 /*
445 * Fill in extra info added in later revisions of the
446 * vm_statistics data structure. Fill in only what can fit
447 * in the data structure the caller gave us !
448 */
449 original_count = *count;
450 *count = HOST_VM_INFO_REV0_COUNT; /* rev0 already filled in */
451 if (original_count >= HOST_VM_INFO_REV1_COUNT) {
452 /* rev1 added "purgeable" info */
453 stat32->purgeable_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purgeable_count);
454 stat32->purges = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purged_count);
455 *count = HOST_VM_INFO_REV1_COUNT;
456 }
457
458 if (original_count >= HOST_VM_INFO_REV2_COUNT) {
459 /* rev2 added "speculative" info */
460 stat32->speculative_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_speculative_count);
461 *count = HOST_VM_INFO_REV2_COUNT;
462 }
463
464 /* rev3 changed some of the fields to be 64-bit*/
465
466 return KERN_SUCCESS;
467 }
468
469 case HOST_CPU_LOAD_INFO: {
470 host_cpu_load_info_t cpu_load_info;
471
472 if (*count < HOST_CPU_LOAD_INFO_COUNT) {
473 return KERN_FAILURE;
474 }
475
476 #define GET_TICKS_VALUE(state, ticks) \
477 MACRO_BEGIN cpu_load_info->cpu_ticks[(state)] += (uint32_t)(ticks / hz_tick_interval); \
478 MACRO_END
479 #define GET_TICKS_VALUE_FROM_TIMER(processor, state, timer) \
480 MACRO_BEGIN GET_TICKS_VALUE(state, timer_grab(&(processor)->timer)); \
481 MACRO_END
482
483 cpu_load_info = (host_cpu_load_info_t)info;
484 cpu_load_info->cpu_ticks[CPU_STATE_USER] = 0;
485 cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] = 0;
486 cpu_load_info->cpu_ticks[CPU_STATE_IDLE] = 0;
487 cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0;
488
489 simple_lock(&processor_list_lock, LCK_GRP_NULL);
490
491 unsigned int pcount = processor_count;
492
493 for (unsigned int i = 0; i < pcount; i++) {
494 processor_t processor = processor_array[i];
495 assert(processor != PROCESSOR_NULL);
496
497 timer_t idle_state;
498 uint64_t idle_time_snapshot1, idle_time_snapshot2;
499 uint64_t idle_time_tstamp1, idle_time_tstamp2;
500
501 /* See discussion in processor_info(PROCESSOR_CPU_LOAD_INFO) */
502
503 GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_USER, user_state);
504 if (precise_user_kernel_time) {
505 GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_SYSTEM, system_state);
506 } else {
507 /* system_state may represent either sys or user */
508 GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_USER, system_state);
509 }
510
511 idle_state = &processor->idle_state;
512 idle_time_snapshot1 = timer_grab(idle_state);
513 idle_time_tstamp1 = idle_state->tstamp;
514
515 if (processor->current_state != idle_state) {
516 /* Processor is non-idle, so idle timer should be accurate */
517 GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_IDLE, idle_state);
518 } else if ((idle_time_snapshot1 != (idle_time_snapshot2 = timer_grab(idle_state))) ||
519 (idle_time_tstamp1 != (idle_time_tstamp2 = idle_state->tstamp))) {
520 /* Idle timer is being updated concurrently, second stamp is good enough */
521 GET_TICKS_VALUE(CPU_STATE_IDLE, idle_time_snapshot2);
522 } else {
523 /*
524 * Idle timer may be very stale. Fortunately we have established
525 * that idle_time_snapshot1 and idle_time_tstamp1 are unchanging
526 */
527 idle_time_snapshot1 += mach_absolute_time() - idle_time_tstamp1;
528
529 GET_TICKS_VALUE(CPU_STATE_IDLE, idle_time_snapshot1);
530 }
531 }
532 simple_unlock(&processor_list_lock);
533
534 *count = HOST_CPU_LOAD_INFO_COUNT;
535
536 return KERN_SUCCESS;
537 }
538
539 case HOST_EXPIRED_TASK_INFO: {
540 if (*count < TASK_POWER_INFO_COUNT) {
541 return KERN_FAILURE;
542 }
543
544 task_power_info_t tinfo1 = (task_power_info_t)info;
545 task_power_info_v2_t tinfo2 = (task_power_info_v2_t)info;
546
547 tinfo1->task_interrupt_wakeups = dead_task_statistics.task_interrupt_wakeups;
548 tinfo1->task_platform_idle_wakeups = dead_task_statistics.task_platform_idle_wakeups;
549
550 tinfo1->task_timer_wakeups_bin_1 = dead_task_statistics.task_timer_wakeups_bin_1;
551
552 tinfo1->task_timer_wakeups_bin_2 = dead_task_statistics.task_timer_wakeups_bin_2;
553
554 tinfo1->total_user = dead_task_statistics.total_user_time;
555 tinfo1->total_system = dead_task_statistics.total_system_time;
556 if (*count < TASK_POWER_INFO_V2_COUNT) {
557 *count = TASK_POWER_INFO_COUNT;
558 } else if (*count >= TASK_POWER_INFO_V2_COUNT) {
559 tinfo2->gpu_energy.task_gpu_utilisation = dead_task_statistics.task_gpu_ns;
560 #if defined(__arm__) || defined(__arm64__)
561 tinfo2->task_energy = dead_task_statistics.task_energy;
562 tinfo2->task_ptime = dead_task_statistics.total_ptime;
563 tinfo2->task_pset_switches = dead_task_statistics.total_pset_switches;
564 #endif
565 *count = TASK_POWER_INFO_V2_COUNT;
566 }
567
568 return KERN_SUCCESS;
569 }
570 default: return KERN_INVALID_ARGUMENT;
571 }
572 }
573
574 extern uint32_t c_segment_pages_compressed;
575
576 #define HOST_STATISTICS_TIME_WINDOW 1 /* seconds */
577 #define HOST_STATISTICS_MAX_REQUESTS 10 /* maximum number of requests per window */
578 #define HOST_STATISTICS_MIN_REQUESTS 2 /* minimum number of requests per window */
579
580 uint64_t host_statistics_time_window;
581
582 static LCK_GRP_DECLARE(host_statistics_lck_grp, "host_statistics");
583 static LCK_MTX_DECLARE(host_statistics_lck, &host_statistics_lck_grp);
584
585 #define HOST_VM_INFO64_REV0 0
586 #define HOST_VM_INFO64_REV1 1
587 #define HOST_EXTMOD_INFO64_REV0 2
588 #define HOST_LOAD_INFO_REV0 3
589 #define HOST_VM_INFO_REV0 4
590 #define HOST_VM_INFO_REV1 5
591 #define HOST_VM_INFO_REV2 6
592 #define HOST_CPU_LOAD_INFO_REV0 7
593 #define HOST_EXPIRED_TASK_INFO_REV0 8
594 #define HOST_EXPIRED_TASK_INFO_REV1 9
595 #define NUM_HOST_INFO_DATA_TYPES 10
596
597 static vm_statistics64_data_t host_vm_info64_rev0 = {};
598 static vm_statistics64_data_t host_vm_info64_rev1 = {};
599 static vm_extmod_statistics_data_t host_extmod_info64 = {};
600 static host_load_info_data_t host_load_info = {};
601 static vm_statistics_data_t host_vm_info_rev0 = {};
602 static vm_statistics_data_t host_vm_info_rev1 = {};
603 static vm_statistics_data_t host_vm_info_rev2 = {};
604 static host_cpu_load_info_data_t host_cpu_load_info = {};
605 static task_power_info_data_t host_expired_task_info = {};
606 static task_power_info_v2_data_t host_expired_task_info2 = {};
607
608 struct host_stats_cache {
609 uint64_t last_access;
610 uint64_t current_requests;
611 uint64_t max_requests;
612 uintptr_t data;
613 mach_msg_type_number_t count; //NOTE count is in sizeof(integer_t)
614 };
615
616 static struct host_stats_cache g_host_stats_cache[NUM_HOST_INFO_DATA_TYPES] = {
617 [HOST_VM_INFO64_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info64_rev0, .count = HOST_VM_INFO64_REV0_COUNT },
618 [HOST_VM_INFO64_REV1] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info64_rev1, .count = HOST_VM_INFO64_REV1_COUNT },
619 [HOST_EXTMOD_INFO64_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_extmod_info64, .count = HOST_EXTMOD_INFO64_COUNT },
620 [HOST_LOAD_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_load_info, .count = HOST_LOAD_INFO_COUNT },
621 [HOST_VM_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info_rev0, .count = HOST_VM_INFO_REV0_COUNT },
622 [HOST_VM_INFO_REV1] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info_rev1, .count = HOST_VM_INFO_REV1_COUNT },
623 [HOST_VM_INFO_REV2] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info_rev2, .count = HOST_VM_INFO_REV2_COUNT },
624 [HOST_CPU_LOAD_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_cpu_load_info, .count = HOST_CPU_LOAD_INFO_COUNT },
625 [HOST_EXPIRED_TASK_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_expired_task_info, .count = TASK_POWER_INFO_COUNT },
626 [HOST_EXPIRED_TASK_INFO_REV1] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_expired_task_info2, .count = TASK_POWER_INFO_V2_COUNT},
627 };
628
629
630 void
631 host_statistics_init(void)
632 {
633 nanoseconds_to_absolutetime((HOST_STATISTICS_TIME_WINDOW * NSEC_PER_SEC), &host_statistics_time_window);
634 }
635
636 static void
637 cache_host_statistics(int index, host_info64_t info)
638 {
639 if (index < 0 || index >= NUM_HOST_INFO_DATA_TYPES) {
640 return;
641 }
642
643 task_t task = current_task();
644 if (task->t_flags & TF_PLATFORM) {
645 return;
646 }
647
648 memcpy((void *)g_host_stats_cache[index].data, info, g_host_stats_cache[index].count * sizeof(integer_t));
649 return;
650 }
651
652 static void
653 get_cached_info(int index, host_info64_t info, mach_msg_type_number_t* count)
654 {
655 if (index < 0 || index >= NUM_HOST_INFO_DATA_TYPES) {
656 *count = 0;
657 return;
658 }
659
660 *count = g_host_stats_cache[index].count;
661 memcpy(info, (void *)g_host_stats_cache[index].data, g_host_stats_cache[index].count * sizeof(integer_t));
662 }
663
664 static int
665 get_host_info_data_index(bool is_stat64, host_flavor_t flavor, mach_msg_type_number_t* count, kern_return_t* ret)
666 {
667 switch (flavor) {
668 case HOST_VM_INFO64:
669 if (!is_stat64) {
670 *ret = KERN_INVALID_ARGUMENT;
671 return -1;
672 }
673 if (*count < HOST_VM_INFO64_REV0_COUNT) {
674 *ret = KERN_FAILURE;
675 return -1;
676 }
677 if (*count >= HOST_VM_INFO64_REV1_COUNT) {
678 return HOST_VM_INFO64_REV1;
679 }
680 return HOST_VM_INFO64_REV0;
681
682 case HOST_EXTMOD_INFO64:
683 if (!is_stat64) {
684 *ret = KERN_INVALID_ARGUMENT;
685 return -1;
686 }
687 if (*count < HOST_EXTMOD_INFO64_COUNT) {
688 *ret = KERN_FAILURE;
689 return -1;
690 }
691 return HOST_EXTMOD_INFO64_REV0;
692
693 case HOST_LOAD_INFO:
694 if (*count < HOST_LOAD_INFO_COUNT) {
695 *ret = KERN_FAILURE;
696 return -1;
697 }
698 return HOST_LOAD_INFO_REV0;
699
700 case HOST_VM_INFO:
701 if (*count < HOST_VM_INFO_REV0_COUNT) {
702 *ret = KERN_FAILURE;
703 return -1;
704 }
705 if (*count >= HOST_VM_INFO_REV2_COUNT) {
706 return HOST_VM_INFO_REV2;
707 }
708 if (*count >= HOST_VM_INFO_REV1_COUNT) {
709 return HOST_VM_INFO_REV1;
710 }
711 return HOST_VM_INFO_REV0;
712
713 case HOST_CPU_LOAD_INFO:
714 if (*count < HOST_CPU_LOAD_INFO_COUNT) {
715 *ret = KERN_FAILURE;
716 return -1;
717 }
718 return HOST_CPU_LOAD_INFO_REV0;
719
720 case HOST_EXPIRED_TASK_INFO:
721 if (*count < TASK_POWER_INFO_COUNT) {
722 *ret = KERN_FAILURE;
723 return -1;
724 }
725 if (*count >= TASK_POWER_INFO_V2_COUNT) {
726 return HOST_EXPIRED_TASK_INFO_REV1;
727 }
728 return HOST_EXPIRED_TASK_INFO_REV0;
729
730 default:
731 *ret = KERN_INVALID_ARGUMENT;
732 return -1;
733 }
734 }
735
736 static bool
737 rate_limit_host_statistics(bool is_stat64, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t* count, kern_return_t* ret, int *pindex)
738 {
739 task_t task = current_task();
740
741 assert(task != kernel_task);
742
743 *ret = KERN_SUCCESS;
744
745 /* Access control only for third party applications */
746 if (task->t_flags & TF_PLATFORM) {
747 return FALSE;
748 }
749
750 /* Rate limit to HOST_STATISTICS_MAX_REQUESTS queries for each HOST_STATISTICS_TIME_WINDOW window of time */
751 bool rate_limited = FALSE;
752 bool set_last_access = TRUE;
753
754 /* there is a cache for every flavor */
755 int index = get_host_info_data_index(is_stat64, flavor, count, ret);
756 if (index == -1) {
757 goto out;
758 }
759
760 *pindex = index;
761 lck_mtx_lock(&host_statistics_lck);
762 if (g_host_stats_cache[index].last_access > mach_continuous_time() - host_statistics_time_window) {
763 set_last_access = FALSE;
764 if (g_host_stats_cache[index].current_requests++ >= g_host_stats_cache[index].max_requests) {
765 rate_limited = TRUE;
766 get_cached_info(index, info, count);
767 }
768 }
769 if (set_last_access) {
770 g_host_stats_cache[index].current_requests = 1;
771 /*
772 * select a random number of requests (included between HOST_STATISTICS_MIN_REQUESTS and HOST_STATISTICS_MAX_REQUESTS)
773 * to let query host_statistics.
774 * In this way it is not possible to infer looking at when the a cached copy changes if host_statistics was called on
775 * the provious window.
776 */
777 g_host_stats_cache[index].max_requests = (mach_absolute_time() % (HOST_STATISTICS_MAX_REQUESTS - HOST_STATISTICS_MIN_REQUESTS + 1)) + HOST_STATISTICS_MIN_REQUESTS;
778 g_host_stats_cache[index].last_access = mach_continuous_time();
779 }
780 lck_mtx_unlock(&host_statistics_lck);
781 out:
782 return rate_limited;
783 }
784
785 kern_return_t
786 vm_stats(void *info, unsigned int *count)
787 {
788 vm_statistics64_data_t host_vm_stat;
789 mach_msg_type_number_t original_count;
790 unsigned int local_q_internal_count;
791 unsigned int local_q_external_count;
792
793 if (*count < HOST_VM_INFO64_REV0_COUNT) {
794 return KERN_FAILURE;
795 }
796
797 host_vm_stat = *PERCPU_GET_MASTER(vm_stat);
798
799 percpu_foreach_secondary(stat, vm_stat) {
800 vm_statistics64_data_t data = *stat;
801 host_vm_stat.zero_fill_count += data.zero_fill_count;
802 host_vm_stat.reactivations += data.reactivations;
803 host_vm_stat.pageins += data.pageins;
804 host_vm_stat.pageouts += data.pageouts;
805 host_vm_stat.faults += data.faults;
806 host_vm_stat.cow_faults += data.cow_faults;
807 host_vm_stat.lookups += data.lookups;
808 host_vm_stat.hits += data.hits;
809 host_vm_stat.compressions += data.compressions;
810 host_vm_stat.decompressions += data.decompressions;
811 host_vm_stat.swapins += data.swapins;
812 host_vm_stat.swapouts += data.swapouts;
813 }
814
815 vm_statistics64_t stat = (vm_statistics64_t)info;
816
817 stat->free_count = vm_page_free_count + vm_page_speculative_count;
818 stat->active_count = vm_page_active_count;
819
820 local_q_internal_count = 0;
821 local_q_external_count = 0;
822 if (vm_page_local_q) {
823 zpercpu_foreach(lq, vm_page_local_q) {
824 stat->active_count += lq->vpl_count;
825 local_q_internal_count += lq->vpl_internal_count;
826 local_q_external_count += lq->vpl_external_count;
827 }
828 }
829 stat->inactive_count = vm_page_inactive_count;
830 #if CONFIG_EMBEDDED
831 stat->wire_count = vm_page_wire_count;
832 #else
833 stat->wire_count = vm_page_wire_count + vm_page_throttled_count + vm_lopage_free_count;
834 #endif
835 stat->zero_fill_count = host_vm_stat.zero_fill_count;
836 stat->reactivations = host_vm_stat.reactivations;
837 stat->pageins = host_vm_stat.pageins;
838 stat->pageouts = host_vm_stat.pageouts;
839 stat->faults = host_vm_stat.faults;
840 stat->cow_faults = host_vm_stat.cow_faults;
841 stat->lookups = host_vm_stat.lookups;
842 stat->hits = host_vm_stat.hits;
843
844 stat->purgeable_count = vm_page_purgeable_count;
845 stat->purges = vm_page_purged_count;
846
847 stat->speculative_count = vm_page_speculative_count;
848
849 /*
850 * Fill in extra info added in later revisions of the
851 * vm_statistics data structure. Fill in only what can fit
852 * in the data structure the caller gave us !
853 */
854 original_count = *count;
855 *count = HOST_VM_INFO64_REV0_COUNT; /* rev0 already filled in */
856 if (original_count >= HOST_VM_INFO64_REV1_COUNT) {
857 /* rev1 added "throttled count" */
858 stat->throttled_count = vm_page_throttled_count;
859 /* rev1 added "compression" info */
860 stat->compressor_page_count = VM_PAGE_COMPRESSOR_COUNT;
861 stat->compressions = host_vm_stat.compressions;
862 stat->decompressions = host_vm_stat.decompressions;
863 stat->swapins = host_vm_stat.swapins;
864 stat->swapouts = host_vm_stat.swapouts;
865 /* rev1 added:
866 * "external page count"
867 * "anonymous page count"
868 * "total # of pages (uncompressed) held in the compressor"
869 */
870 stat->external_page_count = (vm_page_pageable_external_count + local_q_external_count);
871 stat->internal_page_count = (vm_page_pageable_internal_count + local_q_internal_count);
872 stat->total_uncompressed_pages_in_compressor = c_segment_pages_compressed;
873 *count = HOST_VM_INFO64_REV1_COUNT;
874 }
875
876 return KERN_SUCCESS;
877 }
878
879 kern_return_t host_statistics64(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count);
880
881 kern_return_t
882 host_statistics64(host_t host, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t * count)
883 {
884 if (host == HOST_NULL) {
885 return KERN_INVALID_HOST;
886 }
887
888 switch (flavor) {
889 case HOST_VM_INFO64: /* We were asked to get vm_statistics64 */
890 return vm_stats(info, count);
891
892 case HOST_EXTMOD_INFO64: /* We were asked to get vm_statistics64 */
893 {
894 vm_extmod_statistics_t out_extmod_statistics;
895
896 if (*count < HOST_EXTMOD_INFO64_COUNT) {
897 return KERN_FAILURE;
898 }
899
900 out_extmod_statistics = (vm_extmod_statistics_t)info;
901 *out_extmod_statistics = host_extmod_statistics;
902
903 *count = HOST_EXTMOD_INFO64_COUNT;
904
905 return KERN_SUCCESS;
906 }
907
908 default: /* If we didn't recognize the flavor, send to host_statistics */
909 return host_statistics(host, flavor, (host_info_t)info, count);
910 }
911 }
912
913 kern_return_t
914 host_statistics64_from_user(host_t host, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t * count)
915 {
916 kern_return_t ret = KERN_SUCCESS;
917 int index;
918
919 if (host == HOST_NULL) {
920 return KERN_INVALID_HOST;
921 }
922
923 if (rate_limit_host_statistics(TRUE, flavor, info, count, &ret, &index)) {
924 return ret;
925 }
926
927 if (ret != KERN_SUCCESS) {
928 return ret;
929 }
930
931 ret = host_statistics64(host, flavor, info, count);
932
933 if (ret == KERN_SUCCESS) {
934 cache_host_statistics(index, info);
935 }
936
937 return ret;
938 }
939
940 kern_return_t
941 host_statistics_from_user(host_t host, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t * count)
942 {
943 kern_return_t ret = KERN_SUCCESS;
944 int index;
945
946 if (host == HOST_NULL) {
947 return KERN_INVALID_HOST;
948 }
949
950 if (rate_limit_host_statistics(FALSE, flavor, info, count, &ret, &index)) {
951 return ret;
952 }
953
954 if (ret != KERN_SUCCESS) {
955 return ret;
956 }
957
958 ret = host_statistics(host, flavor, info, count);
959
960 if (ret == KERN_SUCCESS) {
961 cache_host_statistics(index, info);
962 }
963
964 return ret;
965 }
966
967 /*
968 * Get host statistics that require privilege.
969 * None for now, just call the un-privileged version.
970 */
971 kern_return_t
972 host_priv_statistics(host_priv_t host_priv, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
973 {
974 return host_statistics((host_t)host_priv, flavor, info, count);
975 }
976
977 kern_return_t
978 set_sched_stats_active(boolean_t active)
979 {
980 sched_stats_active = active;
981 return KERN_SUCCESS;
982 }
983
984
985 uint64_t
986 get_pages_grabbed_count(void)
987 {
988 uint64_t pages_grabbed_count = 0;
989
990 percpu_foreach(count, vm_page_grab_count) {
991 pages_grabbed_count += *count;
992 }
993
994 return pages_grabbed_count;
995 }
996
997
998 kern_return_t
999 get_sched_statistics(struct _processor_statistics_np * out, uint32_t * count)
1000 {
1001 uint32_t pos = 0;
1002
1003 if (!sched_stats_active) {
1004 return KERN_FAILURE;
1005 }
1006
1007 percpu_foreach_base(pcpu_base) {
1008 struct sched_statistics stats;
1009 processor_t processor;
1010
1011 pos += sizeof(struct _processor_statistics_np);
1012 if (pos > *count) {
1013 return KERN_FAILURE;
1014 }
1015
1016 stats = *PERCPU_GET_WITH_BASE(pcpu_base, sched_stats);
1017 processor = PERCPU_GET_WITH_BASE(pcpu_base, processor);
1018
1019 out->ps_cpuid = processor->cpu_id;
1020 out->ps_csw_count = stats.csw_count;
1021 out->ps_preempt_count = stats.preempt_count;
1022 out->ps_preempted_rt_count = stats.preempted_rt_count;
1023 out->ps_preempted_by_rt_count = stats.preempted_by_rt_count;
1024 out->ps_rt_sched_count = stats.rt_sched_count;
1025 out->ps_interrupt_count = stats.interrupt_count;
1026 out->ps_ipi_count = stats.ipi_count;
1027 out->ps_timer_pop_count = stats.timer_pop_count;
1028 out->ps_runq_count_sum = SCHED(processor_runq_stats_count_sum)(processor);
1029 out->ps_idle_transitions = stats.idle_transitions;
1030 out->ps_quantum_timer_expirations = stats.quantum_timer_expirations;
1031
1032 out++;
1033 }
1034
1035 /* And include RT Queue information */
1036 pos += sizeof(struct _processor_statistics_np);
1037 if (pos > *count) {
1038 return KERN_FAILURE;
1039 }
1040
1041 bzero(out, sizeof(*out));
1042 out->ps_cpuid = (-1);
1043 out->ps_runq_count_sum = SCHED(rt_runq_count_sum)();
1044 out++;
1045
1046 *count = pos;
1047
1048 return KERN_SUCCESS;
1049 }
1050
1051 kern_return_t
1052 host_page_size(host_t host, vm_size_t * out_page_size)
1053 {
1054 if (host == HOST_NULL) {
1055 return KERN_INVALID_ARGUMENT;
1056 }
1057
1058 *out_page_size = PAGE_SIZE;
1059
1060 return KERN_SUCCESS;
1061 }
1062
1063 /*
1064 * Return kernel version string (more than you ever
1065 * wanted to know about what version of the kernel this is).
1066 */
1067 extern char version[];
1068
1069 kern_return_t
1070 host_kernel_version(host_t host, kernel_version_t out_version)
1071 {
1072 if (host == HOST_NULL) {
1073 return KERN_INVALID_ARGUMENT;
1074 }
1075
1076 (void)strncpy(out_version, version, sizeof(kernel_version_t));
1077
1078 return KERN_SUCCESS;
1079 }
1080
1081 /*
1082 * host_processor_sets:
1083 *
1084 * List all processor sets on the host.
1085 */
1086 kern_return_t
1087 host_processor_sets(host_priv_t host_priv, processor_set_name_array_t * pset_list, mach_msg_type_number_t * count)
1088 {
1089 void * addr;
1090
1091 if (host_priv == HOST_PRIV_NULL) {
1092 return KERN_INVALID_ARGUMENT;
1093 }
1094
1095 /*
1096 * Allocate memory. Can be pageable because it won't be
1097 * touched while holding a lock.
1098 */
1099
1100 addr = kalloc((vm_size_t)sizeof(mach_port_t));
1101 if (addr == 0) {
1102 return KERN_RESOURCE_SHORTAGE;
1103 }
1104
1105 /* do the conversion that Mig should handle */
1106 *((ipc_port_t *)addr) = convert_pset_name_to_port(&pset0);
1107
1108 *pset_list = (processor_set_array_t)addr;
1109 *count = 1;
1110
1111 return KERN_SUCCESS;
1112 }
1113
1114 /*
1115 * host_processor_set_priv:
1116 *
1117 * Return control port for given processor set.
1118 */
1119 kern_return_t
1120 host_processor_set_priv(host_priv_t host_priv, processor_set_t pset_name, processor_set_t * pset)
1121 {
1122 if (host_priv == HOST_PRIV_NULL || pset_name == PROCESSOR_SET_NULL) {
1123 *pset = PROCESSOR_SET_NULL;
1124
1125 return KERN_INVALID_ARGUMENT;
1126 }
1127
1128 *pset = pset_name;
1129
1130 return KERN_SUCCESS;
1131 }
1132
1133 /*
1134 * host_processor_info
1135 *
1136 * Return info about the processors on this host. It will return
1137 * the number of processors, and the specific type of info requested
1138 * in an OOL array.
1139 */
1140 kern_return_t
1141 host_processor_info(host_t host,
1142 processor_flavor_t flavor,
1143 natural_t * out_pcount,
1144 processor_info_array_t * out_array,
1145 mach_msg_type_number_t * out_array_count)
1146 {
1147 kern_return_t result;
1148 host_t thost;
1149 processor_info_t info;
1150 unsigned int icount;
1151 unsigned int pcount;
1152 vm_offset_t addr;
1153 vm_size_t size, needed;
1154 vm_map_copy_t copy;
1155
1156 if (host == HOST_NULL) {
1157 return KERN_INVALID_ARGUMENT;
1158 }
1159
1160 result = processor_info_count(flavor, &icount);
1161 if (result != KERN_SUCCESS) {
1162 return result;
1163 }
1164
1165 pcount = processor_count;
1166 assert(pcount != 0);
1167
1168 needed = pcount * icount * sizeof(natural_t);
1169 size = vm_map_round_page(needed, VM_MAP_PAGE_MASK(ipc_kernel_map));
1170 result = kmem_alloc(ipc_kernel_map, &addr, size, VM_KERN_MEMORY_IPC);
1171 if (result != KERN_SUCCESS) {
1172 return KERN_RESOURCE_SHORTAGE;
1173 }
1174
1175 info = (processor_info_t)addr;
1176
1177 for (unsigned int i = 0; i < pcount; i++) {
1178 processor_t processor = processor_array[i];
1179 assert(processor != PROCESSOR_NULL);
1180
1181 unsigned int tcount = icount;
1182
1183 result = processor_info(processor, flavor, &thost, info, &tcount);
1184 if (result != KERN_SUCCESS) {
1185 kmem_free(ipc_kernel_map, addr, size);
1186 return result;
1187 }
1188 info += icount;
1189 }
1190
1191 if (size != needed) {
1192 bzero((char *)addr + needed, size - needed);
1193 }
1194
1195 result = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr, VM_MAP_PAGE_MASK(ipc_kernel_map)),
1196 vm_map_round_page(addr + size, VM_MAP_PAGE_MASK(ipc_kernel_map)), FALSE);
1197 assert(result == KERN_SUCCESS);
1198 result = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr, (vm_map_size_t)needed, TRUE, &copy);
1199 assert(result == KERN_SUCCESS);
1200
1201 *out_pcount = pcount;
1202 *out_array = (processor_info_array_t)copy;
1203 *out_array_count = pcount * icount;
1204
1205 return KERN_SUCCESS;
1206 }
1207
1208 static bool
1209 is_valid_host_special_port(int id)
1210 {
1211 return (id <= HOST_MAX_SPECIAL_PORT) &&
1212 (id >= HOST_MIN_SPECIAL_PORT) &&
1213 ((id <= HOST_LAST_SPECIAL_KERNEL_PORT) || (id > HOST_MAX_SPECIAL_KERNEL_PORT));
1214 }
1215
1216 extern void * XNU_PTRAUTH_SIGNED_PTR("initproc") initproc;
1217
1218 /*
1219 * Kernel interface for setting a special port.
1220 */
1221 kern_return_t
1222 kernel_set_special_port(host_priv_t host_priv, int id, ipc_port_t port)
1223 {
1224 ipc_port_t old_port;
1225
1226 if (!is_valid_host_special_port(id)) {
1227 panic("attempted to set invalid special port %d", id);
1228 }
1229
1230 #if !MACH_FLIPC
1231 if (id == HOST_NODE_PORT) {
1232 return KERN_NOT_SUPPORTED;
1233 }
1234 #endif
1235
1236 host_lock(host_priv);
1237 old_port = host_priv->special[id];
1238 if ((id == HOST_AMFID_PORT) && (current_task()->bsd_info != initproc)) {
1239 host_unlock(host_priv);
1240 return KERN_NO_ACCESS;
1241 }
1242 host_priv->special[id] = port;
1243 host_unlock(host_priv);
1244
1245 #if MACH_FLIPC
1246 if (id == HOST_NODE_PORT) {
1247 mach_node_port_changed();
1248 }
1249 #endif
1250
1251 if (IP_VALID(old_port)) {
1252 ipc_port_release_send(old_port);
1253 }
1254 return KERN_SUCCESS;
1255 }
1256
1257 /*
1258 * Kernel interface for retrieving a special port.
1259 */
1260 kern_return_t
1261 kernel_get_special_port(host_priv_t host_priv, int id, ipc_port_t * portp)
1262 {
1263 if (!is_valid_host_special_port(id)) {
1264 panic("attempted to get invalid special port %d", id);
1265 }
1266
1267 host_lock(host_priv);
1268 *portp = host_priv->special[id];
1269 host_unlock(host_priv);
1270 return KERN_SUCCESS;
1271 }
1272
1273 /*
1274 * User interface for setting a special port.
1275 *
1276 * Only permits the user to set a user-owned special port
1277 * ID, rejecting a kernel-owned special port ID.
1278 *
1279 * A special kernel port cannot be set up using this
1280 * routine; use kernel_set_special_port() instead.
1281 */
1282 kern_return_t
1283 host_set_special_port_from_user(host_priv_t host_priv, int id, ipc_port_t port)
1284 {
1285 if (host_priv == HOST_PRIV_NULL || id <= HOST_MAX_SPECIAL_KERNEL_PORT || id > HOST_MAX_SPECIAL_PORT) {
1286 return KERN_INVALID_ARGUMENT;
1287 }
1288
1289 if (task_is_driver(current_task())) {
1290 return KERN_NO_ACCESS;
1291 }
1292
1293 return host_set_special_port(host_priv, id, port);
1294 }
1295
1296 kern_return_t
1297 host_set_special_port(host_priv_t host_priv, int id, ipc_port_t port)
1298 {
1299 if (host_priv == HOST_PRIV_NULL || id <= HOST_MAX_SPECIAL_KERNEL_PORT || id > HOST_MAX_SPECIAL_PORT) {
1300 return KERN_INVALID_ARGUMENT;
1301 }
1302
1303 #if CONFIG_MACF
1304 if (mac_task_check_set_host_special_port(current_task(), id, port) != 0) {
1305 return KERN_NO_ACCESS;
1306 }
1307 #endif
1308
1309 return kernel_set_special_port(host_priv, id, port);
1310 }
1311
1312 /*
1313 * User interface for retrieving a special port.
1314 *
1315 * Note that there is nothing to prevent a user special
1316 * port from disappearing after it has been discovered by
1317 * the caller; thus, using a special port can always result
1318 * in a "port not valid" error.
1319 */
1320
1321 kern_return_t
1322 host_get_special_port_from_user(host_priv_t host_priv, __unused int node, int id, ipc_port_t * portp)
1323 {
1324 if (host_priv == HOST_PRIV_NULL || id == HOST_SECURITY_PORT || id > HOST_MAX_SPECIAL_PORT || id < HOST_MIN_SPECIAL_PORT) {
1325 return KERN_INVALID_ARGUMENT;
1326 }
1327
1328 task_t task = current_task();
1329 if (task && task_is_driver(task) && id > HOST_MAX_SPECIAL_KERNEL_PORT) {
1330 /* allow HID drivers to get the sysdiagnose port for keychord handling */
1331 if (id == HOST_SYSDIAGNOSE_PORT &&
1332 IOTaskHasEntitlement(task, kIODriverKitHIDFamilyEventServiceEntitlementKey)) {
1333 goto get_special_port;
1334 }
1335 return KERN_NO_ACCESS;
1336 }
1337 get_special_port:
1338 return host_get_special_port(host_priv, node, id, portp);
1339 }
1340
1341 kern_return_t
1342 host_get_special_port(host_priv_t host_priv, __unused int node, int id, ipc_port_t * portp)
1343 {
1344 ipc_port_t port;
1345
1346 if (host_priv == HOST_PRIV_NULL || id == HOST_SECURITY_PORT || id > HOST_MAX_SPECIAL_PORT || id < HOST_MIN_SPECIAL_PORT) {
1347 return KERN_INVALID_ARGUMENT;
1348 }
1349
1350 host_lock(host_priv);
1351 port = realhost.special[id];
1352 *portp = ipc_port_copy_send(port);
1353 host_unlock(host_priv);
1354
1355 return KERN_SUCCESS;
1356 }
1357
1358 /*
1359 * host_get_io_master
1360 *
1361 * Return the IO master access port for this host.
1362 */
1363 kern_return_t
1364 host_get_io_master(host_t host, io_master_t * io_masterp)
1365 {
1366 if (host == HOST_NULL) {
1367 return KERN_INVALID_ARGUMENT;
1368 }
1369
1370 return host_get_io_master_port(host_priv_self(), io_masterp);
1371 }
1372
1373 host_t
1374 host_self(void)
1375 {
1376 return &realhost;
1377 }
1378
1379 host_priv_t
1380 host_priv_self(void)
1381 {
1382 return &realhost;
1383 }
1384
1385 host_security_t
1386 host_security_self(void)
1387 {
1388 return &realhost;
1389 }
1390
1391 kern_return_t
1392 host_set_atm_diagnostic_flag(host_t host, uint32_t diagnostic_flag)
1393 {
1394 if (host == HOST_NULL) {
1395 return KERN_INVALID_ARGUMENT;
1396 }
1397
1398 if (!IOTaskHasEntitlement(current_task(), "com.apple.private.set-atm-diagnostic-flag")) {
1399 return KERN_NO_ACCESS;
1400 }
1401
1402 #if CONFIG_ATM
1403 return atm_set_diagnostic_config(diagnostic_flag);
1404 #else
1405 (void)diagnostic_flag;
1406 return KERN_NOT_SUPPORTED;
1407 #endif
1408 }
1409
1410 kern_return_t
1411 host_set_multiuser_config_flags(host_priv_t host_priv, uint32_t multiuser_config)
1412 {
1413 #if !defined(XNU_TARGET_OS_OSX)
1414 if (host_priv == HOST_PRIV_NULL) {
1415 return KERN_INVALID_ARGUMENT;
1416 }
1417
1418 assert(host_priv == &realhost);
1419
1420 /*
1421 * Always enforce that the multiuser bit is set
1422 * if a value is written to the commpage word.
1423 */
1424 commpage_update_multiuser_config(multiuser_config | kIsMultiUserDevice);
1425 return KERN_SUCCESS;
1426 #else
1427 (void)host_priv;
1428 (void)multiuser_config;
1429 return KERN_NOT_SUPPORTED;
1430 #endif
1431 }