]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/host.c
526bfce3111f835a25a02c0cc13e6b4fc7e0f9e2
[apple/xnu.git] / osfmk / kern / host.c
1 /*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58
59 /*
60 * host.c
61 *
62 * Non-ipc host functions.
63 */
64
65 #include <mach/mach_types.h>
66 #include <mach/boolean.h>
67 #include <mach/host_info.h>
68 #include <mach/host_special_ports.h>
69 #include <mach/kern_return.h>
70 #include <mach/machine.h>
71 #include <mach/port.h>
72 #include <mach/processor_info.h>
73 #include <mach/vm_param.h>
74 #include <mach/processor.h>
75 #include <mach/mach_host_server.h>
76 #include <mach/host_priv_server.h>
77 #include <mach/vm_map.h>
78 #include <mach/task_info.h>
79
80 #include <machine/commpage.h>
81 #include <machine/cpu_capabilities.h>
82
83 #include <kern/kern_types.h>
84 #include <kern/assert.h>
85 #include <kern/kalloc.h>
86 #include <kern/host.h>
87 #include <kern/host_statistics.h>
88 #include <kern/ipc_host.h>
89 #include <kern/misc_protos.h>
90 #include <kern/sched.h>
91 #include <kern/processor.h>
92 #include <kern/mach_node.h> // mach_node_port_changed()
93
94 #include <vm/vm_map.h>
95 #include <vm/vm_purgeable_internal.h>
96 #include <vm/vm_pageout.h>
97
98 #include <IOKit/IOBSD.h> // IOTaskHasEntitlement
99 #include <IOKit/IOKitKeys.h> // DriverKit entitlement strings
100
101
102 #if CONFIG_ATM
103 #include <atm/atm_internal.h>
104 #endif
105
106 #if CONFIG_MACF
107 #include <security/mac_mach_internal.h>
108 #endif
109
110 #include <pexpert/pexpert.h>
111
112 vm_statistics64_data_t PERCPU_DATA(vm_stat);
113 uint64_t PERCPU_DATA(vm_page_grab_count);
114
115 host_data_t realhost;
116
117 vm_extmod_statistics_data_t host_extmod_statistics;
118
119 kern_return_t
120 host_processors(host_priv_t host_priv, processor_array_t * out_array, mach_msg_type_number_t * countp)
121 {
122 processor_t processor, *tp;
123 void * addr;
124 unsigned int count, i;
125
126 if (host_priv == HOST_PRIV_NULL) {
127 return KERN_INVALID_ARGUMENT;
128 }
129
130 assert(host_priv == &realhost);
131
132 count = processor_count;
133 assert(count != 0);
134
135 addr = kalloc((vm_size_t)(count * sizeof(mach_port_t)));
136 if (addr == 0) {
137 return KERN_RESOURCE_SHORTAGE;
138 }
139
140 tp = (processor_t *)addr;
141 *tp++ = processor = processor_list;
142
143 if (count > 1) {
144 simple_lock(&processor_list_lock, LCK_GRP_NULL);
145
146 for (i = 1; i < count; i++) {
147 *tp++ = processor = processor->processor_list;
148 }
149
150 simple_unlock(&processor_list_lock);
151 }
152
153 *countp = count;
154 *out_array = (processor_array_t)addr;
155
156 /* do the conversion that Mig should handle */
157 tp = (processor_t *)addr;
158 for (i = 0; i < count; i++) {
159 ((mach_port_t *)tp)[i] = (mach_port_t)convert_processor_to_port(tp[i]);
160 }
161
162 return KERN_SUCCESS;
163 }
164
165 extern int sched_allow_NO_SMT_threads;
166
167 kern_return_t
168 host_info(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
169 {
170 if (host == HOST_NULL) {
171 return KERN_INVALID_ARGUMENT;
172 }
173
174 switch (flavor) {
175 case HOST_BASIC_INFO: {
176 host_basic_info_t basic_info;
177 int master_id = master_processor->cpu_id;
178
179 /*
180 * Basic information about this host.
181 */
182 if (*count < HOST_BASIC_INFO_OLD_COUNT) {
183 return KERN_FAILURE;
184 }
185
186 basic_info = (host_basic_info_t)info;
187
188 basic_info->memory_size = machine_info.memory_size;
189 basic_info->cpu_type = slot_type(master_id);
190 basic_info->cpu_subtype = slot_subtype(master_id);
191 basic_info->max_cpus = machine_info.max_cpus;
192 #if defined(__x86_64__)
193 if (sched_allow_NO_SMT_threads && current_task()->t_flags & TF_NO_SMT) {
194 basic_info->avail_cpus = primary_processor_avail_count_user;
195 } else {
196 basic_info->avail_cpus = processor_avail_count_user;
197 }
198 #else
199 basic_info->avail_cpus = processor_avail_count;
200 #endif
201
202
203 if (*count >= HOST_BASIC_INFO_COUNT) {
204 basic_info->cpu_threadtype = slot_threadtype(master_id);
205 basic_info->physical_cpu = machine_info.physical_cpu;
206 basic_info->physical_cpu_max = machine_info.physical_cpu_max;
207 #if defined(__x86_64__)
208 basic_info->logical_cpu = basic_info->avail_cpus;
209 #else
210 basic_info->logical_cpu = machine_info.logical_cpu;
211 #endif
212 basic_info->logical_cpu_max = machine_info.logical_cpu_max;
213
214 basic_info->max_mem = machine_info.max_mem;
215
216 *count = HOST_BASIC_INFO_COUNT;
217 } else {
218 *count = HOST_BASIC_INFO_OLD_COUNT;
219 }
220
221 return KERN_SUCCESS;
222 }
223
224 case HOST_SCHED_INFO: {
225 host_sched_info_t sched_info;
226 uint32_t quantum_time;
227 uint64_t quantum_ns;
228
229 /*
230 * Return scheduler information.
231 */
232 if (*count < HOST_SCHED_INFO_COUNT) {
233 return KERN_FAILURE;
234 }
235
236 sched_info = (host_sched_info_t)info;
237
238 quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
239 absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
240
241 sched_info->min_timeout = sched_info->min_quantum = (uint32_t)(quantum_ns / 1000 / 1000);
242
243 *count = HOST_SCHED_INFO_COUNT;
244
245 return KERN_SUCCESS;
246 }
247
248 case HOST_RESOURCE_SIZES: {
249 /*
250 * Return sizes of kernel data structures
251 */
252 if (*count < HOST_RESOURCE_SIZES_COUNT) {
253 return KERN_FAILURE;
254 }
255
256 /* XXX Fail until ledgers are implemented */
257 return KERN_INVALID_ARGUMENT;
258 }
259
260 case HOST_PRIORITY_INFO: {
261 host_priority_info_t priority_info;
262
263 if (*count < HOST_PRIORITY_INFO_COUNT) {
264 return KERN_FAILURE;
265 }
266
267 priority_info = (host_priority_info_t)info;
268
269 priority_info->kernel_priority = MINPRI_KERNEL;
270 priority_info->system_priority = MINPRI_KERNEL;
271 priority_info->server_priority = MINPRI_RESERVED;
272 priority_info->user_priority = BASEPRI_DEFAULT;
273 priority_info->depress_priority = DEPRESSPRI;
274 priority_info->idle_priority = IDLEPRI;
275 priority_info->minimum_priority = MINPRI_USER;
276 priority_info->maximum_priority = MAXPRI_RESERVED;
277
278 *count = HOST_PRIORITY_INFO_COUNT;
279
280 return KERN_SUCCESS;
281 }
282
283 /*
284 * Gestalt for various trap facilities.
285 */
286 case HOST_MACH_MSG_TRAP:
287 case HOST_SEMAPHORE_TRAPS: {
288 *count = 0;
289 return KERN_SUCCESS;
290 }
291
292 case HOST_CAN_HAS_DEBUGGER: {
293 host_can_has_debugger_info_t can_has_debugger_info;
294
295 if (*count < HOST_CAN_HAS_DEBUGGER_COUNT) {
296 return KERN_FAILURE;
297 }
298
299 can_has_debugger_info = (host_can_has_debugger_info_t)info;
300 can_has_debugger_info->can_has_debugger = PE_i_can_has_debugger(NULL);
301 *count = HOST_CAN_HAS_DEBUGGER_COUNT;
302
303 return KERN_SUCCESS;
304 }
305
306 case HOST_VM_PURGABLE: {
307 if (*count < HOST_VM_PURGABLE_COUNT) {
308 return KERN_FAILURE;
309 }
310
311 vm_purgeable_stats((vm_purgeable_info_t)info, NULL);
312
313 *count = HOST_VM_PURGABLE_COUNT;
314 return KERN_SUCCESS;
315 }
316
317 case HOST_DEBUG_INFO_INTERNAL: {
318 #if DEVELOPMENT || DEBUG
319 if (*count < HOST_DEBUG_INFO_INTERNAL_COUNT) {
320 return KERN_FAILURE;
321 }
322
323 host_debug_info_internal_t debug_info = (host_debug_info_internal_t)info;
324 bzero(debug_info, sizeof(host_debug_info_internal_data_t));
325 *count = HOST_DEBUG_INFO_INTERNAL_COUNT;
326
327 #if CONFIG_COALITIONS
328 debug_info->config_coalitions = 1;
329 #endif
330 debug_info->config_bank = 1;
331 #if CONFIG_ATM
332 debug_info->config_atm = 1;
333 #endif
334 #if CONFIG_CSR
335 debug_info->config_csr = 1;
336 #endif
337 return KERN_SUCCESS;
338 #else /* DEVELOPMENT || DEBUG */
339 return KERN_NOT_SUPPORTED;
340 #endif
341 }
342
343 case HOST_PREFERRED_USER_ARCH: {
344 host_preferred_user_arch_t user_arch_info;
345
346 /*
347 * Basic information about this host.
348 */
349 if (*count < HOST_PREFERRED_USER_ARCH_COUNT) {
350 return KERN_FAILURE;
351 }
352
353 user_arch_info = (host_preferred_user_arch_t)info;
354
355 #if defined(PREFERRED_USER_CPU_TYPE) && defined(PREFERRED_USER_CPU_SUBTYPE)
356 cpu_type_t preferred_cpu_type;
357 cpu_subtype_t preferred_cpu_subtype;
358 if (!PE_get_default("kern.preferred_cpu_type", &preferred_cpu_type, sizeof(cpu_type_t))) {
359 preferred_cpu_type = PREFERRED_USER_CPU_TYPE;
360 }
361 if (!PE_get_default("kern.preferred_cpu_subtype", &preferred_cpu_subtype, sizeof(cpu_subtype_t))) {
362 preferred_cpu_subtype = PREFERRED_USER_CPU_SUBTYPE;
363 }
364 user_arch_info->cpu_type = preferred_cpu_type;
365 user_arch_info->cpu_subtype = preferred_cpu_subtype;
366 #else
367 int master_id = master_processor->cpu_id;
368 user_arch_info->cpu_type = slot_type(master_id);
369 user_arch_info->cpu_subtype = slot_subtype(master_id);
370 #endif
371
372
373 *count = HOST_PREFERRED_USER_ARCH_COUNT;
374
375 return KERN_SUCCESS;
376 }
377
378 default: return KERN_INVALID_ARGUMENT;
379 }
380 }
381
382 kern_return_t host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count);
383
384 kern_return_t
385 host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
386 {
387 if (host == HOST_NULL) {
388 return KERN_INVALID_HOST;
389 }
390
391 switch (flavor) {
392 case HOST_LOAD_INFO: {
393 host_load_info_t load_info;
394
395 if (*count < HOST_LOAD_INFO_COUNT) {
396 return KERN_FAILURE;
397 }
398
399 load_info = (host_load_info_t)info;
400
401 bcopy((char *)avenrun, (char *)load_info->avenrun, sizeof avenrun);
402 bcopy((char *)mach_factor, (char *)load_info->mach_factor, sizeof mach_factor);
403
404 *count = HOST_LOAD_INFO_COUNT;
405 return KERN_SUCCESS;
406 }
407
408 case HOST_VM_INFO: {
409 vm_statistics64_data_t host_vm_stat;
410 vm_statistics_t stat32;
411 mach_msg_type_number_t original_count;
412
413 if (*count < HOST_VM_INFO_REV0_COUNT) {
414 return KERN_FAILURE;
415 }
416
417 host_vm_stat = *PERCPU_GET_MASTER(vm_stat);
418
419 percpu_foreach_secondary(stat, vm_stat) {
420 vm_statistics64_data_t data = *stat;
421 host_vm_stat.zero_fill_count += data.zero_fill_count;
422 host_vm_stat.reactivations += data.reactivations;
423 host_vm_stat.pageins += data.pageins;
424 host_vm_stat.pageouts += data.pageouts;
425 host_vm_stat.faults += data.faults;
426 host_vm_stat.cow_faults += data.cow_faults;
427 host_vm_stat.lookups += data.lookups;
428 host_vm_stat.hits += data.hits;
429 }
430
431 stat32 = (vm_statistics_t)info;
432
433 stat32->free_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_free_count + vm_page_speculative_count);
434 stat32->active_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_active_count);
435
436 if (vm_page_local_q) {
437 zpercpu_foreach(lq, vm_page_local_q) {
438 stat32->active_count += VM_STATISTICS_TRUNCATE_TO_32_BIT(lq->vpl_count);
439 }
440 }
441 stat32->inactive_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_inactive_count);
442 #if CONFIG_EMBEDDED
443 stat32->wire_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count);
444 #else
445 stat32->wire_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count + vm_page_throttled_count + vm_lopage_free_count);
446 #endif
447 stat32->zero_fill_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.zero_fill_count);
448 stat32->reactivations = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.reactivations);
449 stat32->pageins = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.pageins);
450 stat32->pageouts = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.pageouts);
451 stat32->faults = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.faults);
452 stat32->cow_faults = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.cow_faults);
453 stat32->lookups = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.lookups);
454 stat32->hits = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.hits);
455
456 /*
457 * Fill in extra info added in later revisions of the
458 * vm_statistics data structure. Fill in only what can fit
459 * in the data structure the caller gave us !
460 */
461 original_count = *count;
462 *count = HOST_VM_INFO_REV0_COUNT; /* rev0 already filled in */
463 if (original_count >= HOST_VM_INFO_REV1_COUNT) {
464 /* rev1 added "purgeable" info */
465 stat32->purgeable_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purgeable_count);
466 stat32->purges = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purged_count);
467 *count = HOST_VM_INFO_REV1_COUNT;
468 }
469
470 if (original_count >= HOST_VM_INFO_REV2_COUNT) {
471 /* rev2 added "speculative" info */
472 stat32->speculative_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_speculative_count);
473 *count = HOST_VM_INFO_REV2_COUNT;
474 }
475
476 /* rev3 changed some of the fields to be 64-bit*/
477
478 return KERN_SUCCESS;
479 }
480
481 case HOST_CPU_LOAD_INFO: {
482 processor_t processor;
483 host_cpu_load_info_t cpu_load_info;
484
485 if (*count < HOST_CPU_LOAD_INFO_COUNT) {
486 return KERN_FAILURE;
487 }
488
489 #define GET_TICKS_VALUE(state, ticks) \
490 MACRO_BEGIN cpu_load_info->cpu_ticks[(state)] += (uint32_t)(ticks / hz_tick_interval); \
491 MACRO_END
492 #define GET_TICKS_VALUE_FROM_TIMER(processor, state, timer) \
493 MACRO_BEGIN GET_TICKS_VALUE(state, timer_grab(&(processor)->timer)); \
494 MACRO_END
495
496 cpu_load_info = (host_cpu_load_info_t)info;
497 cpu_load_info->cpu_ticks[CPU_STATE_USER] = 0;
498 cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] = 0;
499 cpu_load_info->cpu_ticks[CPU_STATE_IDLE] = 0;
500 cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0;
501
502 simple_lock(&processor_list_lock, LCK_GRP_NULL);
503
504 for (processor = processor_list; processor != NULL; processor = processor->processor_list) {
505 timer_t idle_state;
506 uint64_t idle_time_snapshot1, idle_time_snapshot2;
507 uint64_t idle_time_tstamp1, idle_time_tstamp2;
508
509 /* See discussion in processor_info(PROCESSOR_CPU_LOAD_INFO) */
510
511 GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_USER, user_state);
512 if (precise_user_kernel_time) {
513 GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_SYSTEM, system_state);
514 } else {
515 /* system_state may represent either sys or user */
516 GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_USER, system_state);
517 }
518
519 idle_state = &processor->idle_state;
520 idle_time_snapshot1 = timer_grab(idle_state);
521 idle_time_tstamp1 = idle_state->tstamp;
522
523 if (processor->current_state != idle_state) {
524 /* Processor is non-idle, so idle timer should be accurate */
525 GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_IDLE, idle_state);
526 } else if ((idle_time_snapshot1 != (idle_time_snapshot2 = timer_grab(idle_state))) ||
527 (idle_time_tstamp1 != (idle_time_tstamp2 = idle_state->tstamp))) {
528 /* Idle timer is being updated concurrently, second stamp is good enough */
529 GET_TICKS_VALUE(CPU_STATE_IDLE, idle_time_snapshot2);
530 } else {
531 /*
532 * Idle timer may be very stale. Fortunately we have established
533 * that idle_time_snapshot1 and idle_time_tstamp1 are unchanging
534 */
535 idle_time_snapshot1 += mach_absolute_time() - idle_time_tstamp1;
536
537 GET_TICKS_VALUE(CPU_STATE_IDLE, idle_time_snapshot1);
538 }
539 }
540 simple_unlock(&processor_list_lock);
541
542 *count = HOST_CPU_LOAD_INFO_COUNT;
543
544 return KERN_SUCCESS;
545 }
546
547 case HOST_EXPIRED_TASK_INFO: {
548 if (*count < TASK_POWER_INFO_COUNT) {
549 return KERN_FAILURE;
550 }
551
552 task_power_info_t tinfo1 = (task_power_info_t)info;
553 task_power_info_v2_t tinfo2 = (task_power_info_v2_t)info;
554
555 tinfo1->task_interrupt_wakeups = dead_task_statistics.task_interrupt_wakeups;
556 tinfo1->task_platform_idle_wakeups = dead_task_statistics.task_platform_idle_wakeups;
557
558 tinfo1->task_timer_wakeups_bin_1 = dead_task_statistics.task_timer_wakeups_bin_1;
559
560 tinfo1->task_timer_wakeups_bin_2 = dead_task_statistics.task_timer_wakeups_bin_2;
561
562 tinfo1->total_user = dead_task_statistics.total_user_time;
563 tinfo1->total_system = dead_task_statistics.total_system_time;
564 if (*count < TASK_POWER_INFO_V2_COUNT) {
565 *count = TASK_POWER_INFO_COUNT;
566 } else if (*count >= TASK_POWER_INFO_V2_COUNT) {
567 tinfo2->gpu_energy.task_gpu_utilisation = dead_task_statistics.task_gpu_ns;
568 #if defined(__arm__) || defined(__arm64__)
569 tinfo2->task_energy = dead_task_statistics.task_energy;
570 tinfo2->task_ptime = dead_task_statistics.total_ptime;
571 tinfo2->task_pset_switches = dead_task_statistics.total_pset_switches;
572 #endif
573 *count = TASK_POWER_INFO_V2_COUNT;
574 }
575
576 return KERN_SUCCESS;
577 }
578 default: return KERN_INVALID_ARGUMENT;
579 }
580 }
581
582 extern uint32_t c_segment_pages_compressed;
583
584 #define HOST_STATISTICS_TIME_WINDOW 1 /* seconds */
585 #define HOST_STATISTICS_MAX_REQUESTS 10 /* maximum number of requests per window */
586 #define HOST_STATISTICS_MIN_REQUESTS 2 /* minimum number of requests per window */
587
588 uint64_t host_statistics_time_window;
589
590 static LCK_GRP_DECLARE(host_statistics_lck_grp, "host_statistics");
591 static LCK_MTX_DECLARE(host_statistics_lck, &host_statistics_lck_grp);
592
593 #define HOST_VM_INFO64_REV0 0
594 #define HOST_VM_INFO64_REV1 1
595 #define HOST_EXTMOD_INFO64_REV0 2
596 #define HOST_LOAD_INFO_REV0 3
597 #define HOST_VM_INFO_REV0 4
598 #define HOST_VM_INFO_REV1 5
599 #define HOST_VM_INFO_REV2 6
600 #define HOST_CPU_LOAD_INFO_REV0 7
601 #define HOST_EXPIRED_TASK_INFO_REV0 8
602 #define HOST_EXPIRED_TASK_INFO_REV1 9
603 #define NUM_HOST_INFO_DATA_TYPES 10
604
605 static vm_statistics64_data_t host_vm_info64_rev0 = {};
606 static vm_statistics64_data_t host_vm_info64_rev1 = {};
607 static vm_extmod_statistics_data_t host_extmod_info64 = {};
608 static host_load_info_data_t host_load_info = {};
609 static vm_statistics_data_t host_vm_info_rev0 = {};
610 static vm_statistics_data_t host_vm_info_rev1 = {};
611 static vm_statistics_data_t host_vm_info_rev2 = {};
612 static host_cpu_load_info_data_t host_cpu_load_info = {};
613 static task_power_info_data_t host_expired_task_info = {};
614 static task_power_info_v2_data_t host_expired_task_info2 = {};
615
616 struct host_stats_cache {
617 uint64_t last_access;
618 uint64_t current_requests;
619 uint64_t max_requests;
620 uintptr_t data;
621 mach_msg_type_number_t count; //NOTE count is in sizeof(integer_t)
622 };
623
624 static struct host_stats_cache g_host_stats_cache[NUM_HOST_INFO_DATA_TYPES] = {
625 [HOST_VM_INFO64_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info64_rev0, .count = HOST_VM_INFO64_REV0_COUNT },
626 [HOST_VM_INFO64_REV1] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info64_rev1, .count = HOST_VM_INFO64_REV1_COUNT },
627 [HOST_EXTMOD_INFO64_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_extmod_info64, .count = HOST_EXTMOD_INFO64_COUNT },
628 [HOST_LOAD_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_load_info, .count = HOST_LOAD_INFO_COUNT },
629 [HOST_VM_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info_rev0, .count = HOST_VM_INFO_REV0_COUNT },
630 [HOST_VM_INFO_REV1] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info_rev1, .count = HOST_VM_INFO_REV1_COUNT },
631 [HOST_VM_INFO_REV2] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info_rev2, .count = HOST_VM_INFO_REV2_COUNT },
632 [HOST_CPU_LOAD_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_cpu_load_info, .count = HOST_CPU_LOAD_INFO_COUNT },
633 [HOST_EXPIRED_TASK_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_expired_task_info, .count = TASK_POWER_INFO_COUNT },
634 [HOST_EXPIRED_TASK_INFO_REV1] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_expired_task_info2, .count = TASK_POWER_INFO_V2_COUNT},
635 };
636
637
638 void
639 host_statistics_init(void)
640 {
641 nanoseconds_to_absolutetime((HOST_STATISTICS_TIME_WINDOW * NSEC_PER_SEC), &host_statistics_time_window);
642 }
643
644 static void
645 cache_host_statistics(int index, host_info64_t info)
646 {
647 if (index < 0 || index >= NUM_HOST_INFO_DATA_TYPES) {
648 return;
649 }
650
651 task_t task = current_task();
652 if (task->t_flags & TF_PLATFORM) {
653 return;
654 }
655
656 memcpy((void *)g_host_stats_cache[index].data, info, g_host_stats_cache[index].count * sizeof(integer_t));
657 return;
658 }
659
660 static void
661 get_cached_info(int index, host_info64_t info, mach_msg_type_number_t* count)
662 {
663 if (index < 0 || index >= NUM_HOST_INFO_DATA_TYPES) {
664 *count = 0;
665 return;
666 }
667
668 *count = g_host_stats_cache[index].count;
669 memcpy(info, (void *)g_host_stats_cache[index].data, g_host_stats_cache[index].count * sizeof(integer_t));
670 }
671
672 static int
673 get_host_info_data_index(bool is_stat64, host_flavor_t flavor, mach_msg_type_number_t* count, kern_return_t* ret)
674 {
675 switch (flavor) {
676 case HOST_VM_INFO64:
677 if (!is_stat64) {
678 *ret = KERN_INVALID_ARGUMENT;
679 return -1;
680 }
681 if (*count < HOST_VM_INFO64_REV0_COUNT) {
682 *ret = KERN_FAILURE;
683 return -1;
684 }
685 if (*count >= HOST_VM_INFO64_REV1_COUNT) {
686 return HOST_VM_INFO64_REV1;
687 }
688 return HOST_VM_INFO64_REV0;
689
690 case HOST_EXTMOD_INFO64:
691 if (!is_stat64) {
692 *ret = KERN_INVALID_ARGUMENT;
693 return -1;
694 }
695 if (*count < HOST_EXTMOD_INFO64_COUNT) {
696 *ret = KERN_FAILURE;
697 return -1;
698 }
699 return HOST_EXTMOD_INFO64_REV0;
700
701 case HOST_LOAD_INFO:
702 if (*count < HOST_LOAD_INFO_COUNT) {
703 *ret = KERN_FAILURE;
704 return -1;
705 }
706 return HOST_LOAD_INFO_REV0;
707
708 case HOST_VM_INFO:
709 if (*count < HOST_VM_INFO_REV0_COUNT) {
710 *ret = KERN_FAILURE;
711 return -1;
712 }
713 if (*count >= HOST_VM_INFO_REV2_COUNT) {
714 return HOST_VM_INFO_REV2;
715 }
716 if (*count >= HOST_VM_INFO_REV1_COUNT) {
717 return HOST_VM_INFO_REV1;
718 }
719 return HOST_VM_INFO_REV0;
720
721 case HOST_CPU_LOAD_INFO:
722 if (*count < HOST_CPU_LOAD_INFO_COUNT) {
723 *ret = KERN_FAILURE;
724 return -1;
725 }
726 return HOST_CPU_LOAD_INFO_REV0;
727
728 case HOST_EXPIRED_TASK_INFO:
729 if (*count < TASK_POWER_INFO_COUNT) {
730 *ret = KERN_FAILURE;
731 return -1;
732 }
733 if (*count >= TASK_POWER_INFO_V2_COUNT) {
734 return HOST_EXPIRED_TASK_INFO_REV1;
735 }
736 return HOST_EXPIRED_TASK_INFO_REV0;
737
738 default:
739 *ret = KERN_INVALID_ARGUMENT;
740 return -1;
741 }
742 }
743
744 static bool
745 rate_limit_host_statistics(bool is_stat64, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t* count, kern_return_t* ret, int *pindex)
746 {
747 task_t task = current_task();
748
749 assert(task != kernel_task);
750
751 *ret = KERN_SUCCESS;
752
753 /* Access control only for third party applications */
754 if (task->t_flags & TF_PLATFORM) {
755 return FALSE;
756 }
757
758 /* Rate limit to HOST_STATISTICS_MAX_REQUESTS queries for each HOST_STATISTICS_TIME_WINDOW window of time */
759 bool rate_limited = FALSE;
760 bool set_last_access = TRUE;
761
762 /* there is a cache for every flavor */
763 int index = get_host_info_data_index(is_stat64, flavor, count, ret);
764 if (index == -1) {
765 goto out;
766 }
767
768 *pindex = index;
769 lck_mtx_lock(&host_statistics_lck);
770 if (g_host_stats_cache[index].last_access > mach_continuous_time() - host_statistics_time_window) {
771 set_last_access = FALSE;
772 if (g_host_stats_cache[index].current_requests++ >= g_host_stats_cache[index].max_requests) {
773 rate_limited = TRUE;
774 get_cached_info(index, info, count);
775 }
776 }
777 if (set_last_access) {
778 g_host_stats_cache[index].current_requests = 1;
779 /*
780 * select a random number of requests (included between HOST_STATISTICS_MIN_REQUESTS and HOST_STATISTICS_MAX_REQUESTS)
781 * to let query host_statistics.
782 * In this way it is not possible to infer looking at when the a cached copy changes if host_statistics was called on
783 * the provious window.
784 */
785 g_host_stats_cache[index].max_requests = (mach_absolute_time() % (HOST_STATISTICS_MAX_REQUESTS - HOST_STATISTICS_MIN_REQUESTS + 1)) + HOST_STATISTICS_MIN_REQUESTS;
786 g_host_stats_cache[index].last_access = mach_continuous_time();
787 }
788 lck_mtx_unlock(&host_statistics_lck);
789 out:
790 return rate_limited;
791 }
792
793 kern_return_t
794 vm_stats(void *info, unsigned int *count)
795 {
796 vm_statistics64_data_t host_vm_stat;
797 mach_msg_type_number_t original_count;
798 unsigned int local_q_internal_count;
799 unsigned int local_q_external_count;
800
801 if (*count < HOST_VM_INFO64_REV0_COUNT) {
802 return KERN_FAILURE;
803 }
804
805 host_vm_stat = *PERCPU_GET_MASTER(vm_stat);
806
807 percpu_foreach_secondary(stat, vm_stat) {
808 vm_statistics64_data_t data = *stat;
809 host_vm_stat.zero_fill_count += data.zero_fill_count;
810 host_vm_stat.reactivations += data.reactivations;
811 host_vm_stat.pageins += data.pageins;
812 host_vm_stat.pageouts += data.pageouts;
813 host_vm_stat.faults += data.faults;
814 host_vm_stat.cow_faults += data.cow_faults;
815 host_vm_stat.lookups += data.lookups;
816 host_vm_stat.hits += data.hits;
817 host_vm_stat.compressions += data.compressions;
818 host_vm_stat.decompressions += data.decompressions;
819 host_vm_stat.swapins += data.swapins;
820 host_vm_stat.swapouts += data.swapouts;
821 }
822
823 vm_statistics64_t stat = (vm_statistics64_t)info;
824
825 stat->free_count = vm_page_free_count + vm_page_speculative_count;
826 stat->active_count = vm_page_active_count;
827
828 local_q_internal_count = 0;
829 local_q_external_count = 0;
830 if (vm_page_local_q) {
831 zpercpu_foreach(lq, vm_page_local_q) {
832 stat->active_count += lq->vpl_count;
833 local_q_internal_count += lq->vpl_internal_count;
834 local_q_external_count += lq->vpl_external_count;
835 }
836 }
837 stat->inactive_count = vm_page_inactive_count;
838 #if CONFIG_EMBEDDED
839 stat->wire_count = vm_page_wire_count;
840 #else
841 stat->wire_count = vm_page_wire_count + vm_page_throttled_count + vm_lopage_free_count;
842 #endif
843 stat->zero_fill_count = host_vm_stat.zero_fill_count;
844 stat->reactivations = host_vm_stat.reactivations;
845 stat->pageins = host_vm_stat.pageins;
846 stat->pageouts = host_vm_stat.pageouts;
847 stat->faults = host_vm_stat.faults;
848 stat->cow_faults = host_vm_stat.cow_faults;
849 stat->lookups = host_vm_stat.lookups;
850 stat->hits = host_vm_stat.hits;
851
852 stat->purgeable_count = vm_page_purgeable_count;
853 stat->purges = vm_page_purged_count;
854
855 stat->speculative_count = vm_page_speculative_count;
856
857 /*
858 * Fill in extra info added in later revisions of the
859 * vm_statistics data structure. Fill in only what can fit
860 * in the data structure the caller gave us !
861 */
862 original_count = *count;
863 *count = HOST_VM_INFO64_REV0_COUNT; /* rev0 already filled in */
864 if (original_count >= HOST_VM_INFO64_REV1_COUNT) {
865 /* rev1 added "throttled count" */
866 stat->throttled_count = vm_page_throttled_count;
867 /* rev1 added "compression" info */
868 stat->compressor_page_count = VM_PAGE_COMPRESSOR_COUNT;
869 stat->compressions = host_vm_stat.compressions;
870 stat->decompressions = host_vm_stat.decompressions;
871 stat->swapins = host_vm_stat.swapins;
872 stat->swapouts = host_vm_stat.swapouts;
873 /* rev1 added:
874 * "external page count"
875 * "anonymous page count"
876 * "total # of pages (uncompressed) held in the compressor"
877 */
878 stat->external_page_count = (vm_page_pageable_external_count + local_q_external_count);
879 stat->internal_page_count = (vm_page_pageable_internal_count + local_q_internal_count);
880 stat->total_uncompressed_pages_in_compressor = c_segment_pages_compressed;
881 *count = HOST_VM_INFO64_REV1_COUNT;
882 }
883
884 return KERN_SUCCESS;
885 }
886
887 kern_return_t host_statistics64(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count);
888
889 kern_return_t
890 host_statistics64(host_t host, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t * count)
891 {
892 if (host == HOST_NULL) {
893 return KERN_INVALID_HOST;
894 }
895
896 switch (flavor) {
897 case HOST_VM_INFO64: /* We were asked to get vm_statistics64 */
898 return vm_stats(info, count);
899
900 case HOST_EXTMOD_INFO64: /* We were asked to get vm_statistics64 */
901 {
902 vm_extmod_statistics_t out_extmod_statistics;
903
904 if (*count < HOST_EXTMOD_INFO64_COUNT) {
905 return KERN_FAILURE;
906 }
907
908 out_extmod_statistics = (vm_extmod_statistics_t)info;
909 *out_extmod_statistics = host_extmod_statistics;
910
911 *count = HOST_EXTMOD_INFO64_COUNT;
912
913 return KERN_SUCCESS;
914 }
915
916 default: /* If we didn't recognize the flavor, send to host_statistics */
917 return host_statistics(host, flavor, (host_info_t)info, count);
918 }
919 }
920
921 kern_return_t
922 host_statistics64_from_user(host_t host, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t * count)
923 {
924 kern_return_t ret = KERN_SUCCESS;
925 int index;
926
927 if (host == HOST_NULL) {
928 return KERN_INVALID_HOST;
929 }
930
931 if (rate_limit_host_statistics(TRUE, flavor, info, count, &ret, &index)) {
932 return ret;
933 }
934
935 if (ret != KERN_SUCCESS) {
936 return ret;
937 }
938
939 ret = host_statistics64(host, flavor, info, count);
940
941 if (ret == KERN_SUCCESS) {
942 cache_host_statistics(index, info);
943 }
944
945 return ret;
946 }
947
948 kern_return_t
949 host_statistics_from_user(host_t host, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t * count)
950 {
951 kern_return_t ret = KERN_SUCCESS;
952 int index;
953
954 if (host == HOST_NULL) {
955 return KERN_INVALID_HOST;
956 }
957
958 if (rate_limit_host_statistics(FALSE, flavor, info, count, &ret, &index)) {
959 return ret;
960 }
961
962 if (ret != KERN_SUCCESS) {
963 return ret;
964 }
965
966 ret = host_statistics(host, flavor, info, count);
967
968 if (ret == KERN_SUCCESS) {
969 cache_host_statistics(index, info);
970 }
971
972 return ret;
973 }
974
975 /*
976 * Get host statistics that require privilege.
977 * None for now, just call the un-privileged version.
978 */
979 kern_return_t
980 host_priv_statistics(host_priv_t host_priv, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
981 {
982 return host_statistics((host_t)host_priv, flavor, info, count);
983 }
984
985 kern_return_t
986 set_sched_stats_active(boolean_t active)
987 {
988 sched_stats_active = active;
989 return KERN_SUCCESS;
990 }
991
992
993 uint64_t
994 get_pages_grabbed_count(void)
995 {
996 uint64_t pages_grabbed_count = 0;
997
998 percpu_foreach(count, vm_page_grab_count) {
999 pages_grabbed_count += *count;
1000 }
1001
1002 return pages_grabbed_count;
1003 }
1004
1005
1006 kern_return_t
1007 get_sched_statistics(struct _processor_statistics_np * out, uint32_t * count)
1008 {
1009 uint32_t pos = 0;
1010
1011 if (!sched_stats_active) {
1012 return KERN_FAILURE;
1013 }
1014
1015 percpu_foreach_base(pcpu_base) {
1016 struct sched_statistics stats;
1017 processor_t processor;
1018
1019 pos += sizeof(struct _processor_statistics_np);
1020 if (pos > *count) {
1021 return KERN_FAILURE;
1022 }
1023
1024 stats = *PERCPU_GET_WITH_BASE(pcpu_base, sched_stats);
1025 processor = PERCPU_GET_WITH_BASE(pcpu_base, processor);
1026
1027 out->ps_cpuid = processor->cpu_id;
1028 out->ps_csw_count = stats.csw_count;
1029 out->ps_preempt_count = stats.preempt_count;
1030 out->ps_preempted_rt_count = stats.preempted_rt_count;
1031 out->ps_preempted_by_rt_count = stats.preempted_by_rt_count;
1032 out->ps_rt_sched_count = stats.rt_sched_count;
1033 out->ps_interrupt_count = stats.interrupt_count;
1034 out->ps_ipi_count = stats.ipi_count;
1035 out->ps_timer_pop_count = stats.timer_pop_count;
1036 out->ps_runq_count_sum = SCHED(processor_runq_stats_count_sum)(processor);
1037 out->ps_idle_transitions = stats.idle_transitions;
1038 out->ps_quantum_timer_expirations = stats.quantum_timer_expirations;
1039
1040 out++;
1041 }
1042
1043 /* And include RT Queue information */
1044 pos += sizeof(struct _processor_statistics_np);
1045 if (pos > *count) {
1046 return KERN_FAILURE;
1047 }
1048
1049 bzero(out, sizeof(*out));
1050 out->ps_cpuid = (-1);
1051 out->ps_runq_count_sum = SCHED(rt_runq_count_sum)();
1052 out++;
1053
1054 *count = pos;
1055
1056 return KERN_SUCCESS;
1057 }
1058
1059 kern_return_t
1060 host_page_size(host_t host, vm_size_t * out_page_size)
1061 {
1062 if (host == HOST_NULL) {
1063 return KERN_INVALID_ARGUMENT;
1064 }
1065
1066 *out_page_size = PAGE_SIZE;
1067
1068 return KERN_SUCCESS;
1069 }
1070
1071 /*
1072 * Return kernel version string (more than you ever
1073 * wanted to know about what version of the kernel this is).
1074 */
1075 extern char version[];
1076
1077 kern_return_t
1078 host_kernel_version(host_t host, kernel_version_t out_version)
1079 {
1080 if (host == HOST_NULL) {
1081 return KERN_INVALID_ARGUMENT;
1082 }
1083
1084 (void)strncpy(out_version, version, sizeof(kernel_version_t));
1085
1086 return KERN_SUCCESS;
1087 }
1088
1089 /*
1090 * host_processor_sets:
1091 *
1092 * List all processor sets on the host.
1093 */
1094 kern_return_t
1095 host_processor_sets(host_priv_t host_priv, processor_set_name_array_t * pset_list, mach_msg_type_number_t * count)
1096 {
1097 void * addr;
1098
1099 if (host_priv == HOST_PRIV_NULL) {
1100 return KERN_INVALID_ARGUMENT;
1101 }
1102
1103 /*
1104 * Allocate memory. Can be pageable because it won't be
1105 * touched while holding a lock.
1106 */
1107
1108 addr = kalloc((vm_size_t)sizeof(mach_port_t));
1109 if (addr == 0) {
1110 return KERN_RESOURCE_SHORTAGE;
1111 }
1112
1113 /* do the conversion that Mig should handle */
1114 *((ipc_port_t *)addr) = convert_pset_name_to_port(&pset0);
1115
1116 *pset_list = (processor_set_array_t)addr;
1117 *count = 1;
1118
1119 return KERN_SUCCESS;
1120 }
1121
1122 /*
1123 * host_processor_set_priv:
1124 *
1125 * Return control port for given processor set.
1126 */
1127 kern_return_t
1128 host_processor_set_priv(host_priv_t host_priv, processor_set_t pset_name, processor_set_t * pset)
1129 {
1130 if (host_priv == HOST_PRIV_NULL || pset_name == PROCESSOR_SET_NULL) {
1131 *pset = PROCESSOR_SET_NULL;
1132
1133 return KERN_INVALID_ARGUMENT;
1134 }
1135
1136 *pset = pset_name;
1137
1138 return KERN_SUCCESS;
1139 }
1140
1141 /*
1142 * host_processor_info
1143 *
1144 * Return info about the processors on this host. It will return
1145 * the number of processors, and the specific type of info requested
1146 * in an OOL array.
1147 */
1148 kern_return_t
1149 host_processor_info(host_t host,
1150 processor_flavor_t flavor,
1151 natural_t * out_pcount,
1152 processor_info_array_t * out_array,
1153 mach_msg_type_number_t * out_array_count)
1154 {
1155 kern_return_t result;
1156 processor_t processor;
1157 host_t thost;
1158 processor_info_t info;
1159 unsigned int icount, tcount;
1160 unsigned int pcount, i;
1161 vm_offset_t addr;
1162 vm_size_t size, needed;
1163 vm_map_copy_t copy;
1164
1165 if (host == HOST_NULL) {
1166 return KERN_INVALID_ARGUMENT;
1167 }
1168
1169 result = processor_info_count(flavor, &icount);
1170 if (result != KERN_SUCCESS) {
1171 return result;
1172 }
1173
1174 pcount = processor_count;
1175 assert(pcount != 0);
1176
1177 needed = pcount * icount * sizeof(natural_t);
1178 size = vm_map_round_page(needed, VM_MAP_PAGE_MASK(ipc_kernel_map));
1179 result = kmem_alloc(ipc_kernel_map, &addr, size, VM_KERN_MEMORY_IPC);
1180 if (result != KERN_SUCCESS) {
1181 return KERN_RESOURCE_SHORTAGE;
1182 }
1183
1184 info = (processor_info_t)addr;
1185 processor = processor_list;
1186 tcount = icount;
1187
1188 result = processor_info(processor, flavor, &thost, info, &tcount);
1189 if (result != KERN_SUCCESS) {
1190 kmem_free(ipc_kernel_map, addr, size);
1191 return result;
1192 }
1193
1194 if (pcount > 1) {
1195 for (i = 1; i < pcount; i++) {
1196 simple_lock(&processor_list_lock, LCK_GRP_NULL);
1197 processor = processor->processor_list;
1198 simple_unlock(&processor_list_lock);
1199
1200 info += icount;
1201 tcount = icount;
1202 result = processor_info(processor, flavor, &thost, info, &tcount);
1203 if (result != KERN_SUCCESS) {
1204 kmem_free(ipc_kernel_map, addr, size);
1205 return result;
1206 }
1207 }
1208 }
1209
1210 if (size != needed) {
1211 bzero((char *)addr + needed, size - needed);
1212 }
1213
1214 result = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr, VM_MAP_PAGE_MASK(ipc_kernel_map)),
1215 vm_map_round_page(addr + size, VM_MAP_PAGE_MASK(ipc_kernel_map)), FALSE);
1216 assert(result == KERN_SUCCESS);
1217 result = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr, (vm_map_size_t)needed, TRUE, &copy);
1218 assert(result == KERN_SUCCESS);
1219
1220 *out_pcount = pcount;
1221 *out_array = (processor_info_array_t)copy;
1222 *out_array_count = pcount * icount;
1223
1224 return KERN_SUCCESS;
1225 }
1226
1227 static bool
1228 is_valid_host_special_port(int id)
1229 {
1230 return (id <= HOST_MAX_SPECIAL_PORT) &&
1231 (id >= HOST_MIN_SPECIAL_PORT) &&
1232 ((id <= HOST_LAST_SPECIAL_KERNEL_PORT) || (id > HOST_MAX_SPECIAL_KERNEL_PORT));
1233 }
1234
1235 extern void * XNU_PTRAUTH_SIGNED_PTR("initproc") initproc;
1236
1237 /*
1238 * Kernel interface for setting a special port.
1239 */
1240 kern_return_t
1241 kernel_set_special_port(host_priv_t host_priv, int id, ipc_port_t port)
1242 {
1243 ipc_port_t old_port;
1244
1245 if (!is_valid_host_special_port(id)) {
1246 panic("attempted to set invalid special port %d", id);
1247 }
1248
1249 #if !MACH_FLIPC
1250 if (id == HOST_NODE_PORT) {
1251 return KERN_NOT_SUPPORTED;
1252 }
1253 #endif
1254
1255 host_lock(host_priv);
1256 old_port = host_priv->special[id];
1257 if ((id == HOST_AMFID_PORT) && (current_task()->bsd_info != initproc)) {
1258 host_unlock(host_priv);
1259 return KERN_NO_ACCESS;
1260 }
1261 host_priv->special[id] = port;
1262 host_unlock(host_priv);
1263
1264 #if MACH_FLIPC
1265 if (id == HOST_NODE_PORT) {
1266 mach_node_port_changed();
1267 }
1268 #endif
1269
1270 if (IP_VALID(old_port)) {
1271 ipc_port_release_send(old_port);
1272 }
1273 return KERN_SUCCESS;
1274 }
1275
1276 /*
1277 * Kernel interface for retrieving a special port.
1278 */
1279 kern_return_t
1280 kernel_get_special_port(host_priv_t host_priv, int id, ipc_port_t * portp)
1281 {
1282 if (!is_valid_host_special_port(id)) {
1283 panic("attempted to get invalid special port %d", id);
1284 }
1285
1286 host_lock(host_priv);
1287 *portp = host_priv->special[id];
1288 host_unlock(host_priv);
1289 return KERN_SUCCESS;
1290 }
1291
1292 /*
1293 * User interface for setting a special port.
1294 *
1295 * Only permits the user to set a user-owned special port
1296 * ID, rejecting a kernel-owned special port ID.
1297 *
1298 * A special kernel port cannot be set up using this
1299 * routine; use kernel_set_special_port() instead.
1300 */
1301 kern_return_t
1302 host_set_special_port_from_user(host_priv_t host_priv, int id, ipc_port_t port)
1303 {
1304 if (host_priv == HOST_PRIV_NULL || id <= HOST_MAX_SPECIAL_KERNEL_PORT || id > HOST_MAX_SPECIAL_PORT) {
1305 return KERN_INVALID_ARGUMENT;
1306 }
1307
1308 if (task_is_driver(current_task())) {
1309 return KERN_NO_ACCESS;
1310 }
1311
1312 return host_set_special_port(host_priv, id, port);
1313 }
1314
1315 kern_return_t
1316 host_set_special_port(host_priv_t host_priv, int id, ipc_port_t port)
1317 {
1318 if (host_priv == HOST_PRIV_NULL || id <= HOST_MAX_SPECIAL_KERNEL_PORT || id > HOST_MAX_SPECIAL_PORT) {
1319 return KERN_INVALID_ARGUMENT;
1320 }
1321
1322 #if CONFIG_MACF
1323 if (mac_task_check_set_host_special_port(current_task(), id, port) != 0) {
1324 return KERN_NO_ACCESS;
1325 }
1326 #endif
1327
1328 return kernel_set_special_port(host_priv, id, port);
1329 }
1330
1331 /*
1332 * User interface for retrieving a special port.
1333 *
1334 * Note that there is nothing to prevent a user special
1335 * port from disappearing after it has been discovered by
1336 * the caller; thus, using a special port can always result
1337 * in a "port not valid" error.
1338 */
1339
1340 kern_return_t
1341 host_get_special_port_from_user(host_priv_t host_priv, __unused int node, int id, ipc_port_t * portp)
1342 {
1343 if (host_priv == HOST_PRIV_NULL || id == HOST_SECURITY_PORT || id > HOST_MAX_SPECIAL_PORT || id < HOST_MIN_SPECIAL_PORT) {
1344 return KERN_INVALID_ARGUMENT;
1345 }
1346
1347 task_t task = current_task();
1348 if (task && task_is_driver(task) && id > HOST_MAX_SPECIAL_KERNEL_PORT) {
1349 /* allow HID drivers to get the sysdiagnose port for keychord handling */
1350 if (id == HOST_SYSDIAGNOSE_PORT &&
1351 IOTaskHasEntitlement(task, kIODriverKitHIDFamilyEventServiceEntitlementKey)) {
1352 goto get_special_port;
1353 }
1354 return KERN_NO_ACCESS;
1355 }
1356 get_special_port:
1357 return host_get_special_port(host_priv, node, id, portp);
1358 }
1359
1360 kern_return_t
1361 host_get_special_port(host_priv_t host_priv, __unused int node, int id, ipc_port_t * portp)
1362 {
1363 ipc_port_t port;
1364
1365 if (host_priv == HOST_PRIV_NULL || id == HOST_SECURITY_PORT || id > HOST_MAX_SPECIAL_PORT || id < HOST_MIN_SPECIAL_PORT) {
1366 return KERN_INVALID_ARGUMENT;
1367 }
1368
1369 host_lock(host_priv);
1370 port = realhost.special[id];
1371 *portp = ipc_port_copy_send(port);
1372 host_unlock(host_priv);
1373
1374 return KERN_SUCCESS;
1375 }
1376
1377 /*
1378 * host_get_io_master
1379 *
1380 * Return the IO master access port for this host.
1381 */
1382 kern_return_t
1383 host_get_io_master(host_t host, io_master_t * io_masterp)
1384 {
1385 if (host == HOST_NULL) {
1386 return KERN_INVALID_ARGUMENT;
1387 }
1388
1389 return host_get_io_master_port(host_priv_self(), io_masterp);
1390 }
1391
1392 host_t
1393 host_self(void)
1394 {
1395 return &realhost;
1396 }
1397
1398 host_priv_t
1399 host_priv_self(void)
1400 {
1401 return &realhost;
1402 }
1403
1404 host_security_t
1405 host_security_self(void)
1406 {
1407 return &realhost;
1408 }
1409
1410 kern_return_t
1411 host_set_atm_diagnostic_flag(host_t host, uint32_t diagnostic_flag)
1412 {
1413 if (host == HOST_NULL) {
1414 return KERN_INVALID_ARGUMENT;
1415 }
1416
1417 if (!IOTaskHasEntitlement(current_task(), "com.apple.private.set-atm-diagnostic-flag")) {
1418 return KERN_NO_ACCESS;
1419 }
1420
1421 #if CONFIG_ATM
1422 return atm_set_diagnostic_config(diagnostic_flag);
1423 #else
1424 (void)diagnostic_flag;
1425 return KERN_NOT_SUPPORTED;
1426 #endif
1427 }
1428
1429 kern_return_t
1430 host_set_multiuser_config_flags(host_priv_t host_priv, uint32_t multiuser_config)
1431 {
1432 #if !defined(XNU_TARGET_OS_OSX)
1433 if (host_priv == HOST_PRIV_NULL) {
1434 return KERN_INVALID_ARGUMENT;
1435 }
1436
1437 assert(host_priv == &realhost);
1438
1439 /*
1440 * Always enforce that the multiuser bit is set
1441 * if a value is written to the commpage word.
1442 */
1443 commpage_update_multiuser_config(multiuser_config | kIsMultiUserDevice);
1444 return KERN_SUCCESS;
1445 #else
1446 (void)host_priv;
1447 (void)multiuser_config;
1448 return KERN_NOT_SUPPORTED;
1449 #endif
1450 }