]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/host.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / kern / host.c
1 /*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58
59 /*
60 * host.c
61 *
62 * Non-ipc host functions.
63 */
64
65 #include <mach/mach_types.h>
66 #include <mach/boolean.h>
67 #include <mach/host_info.h>
68 #include <mach/host_special_ports.h>
69 #include <mach/kern_return.h>
70 #include <mach/machine.h>
71 #include <mach/port.h>
72 #include <mach/processor_info.h>
73 #include <mach/vm_param.h>
74 #include <mach/processor.h>
75 #include <mach/mach_host_server.h>
76 #include <mach/host_priv_server.h>
77 #include <mach/vm_map.h>
78 #include <mach/task_info.h>
79
80 #include <machine/commpage.h>
81 #include <machine/cpu_capabilities.h>
82
83 #include <kern/kern_types.h>
84 #include <kern/assert.h>
85 #include <kern/kalloc.h>
86 #include <kern/host.h>
87 #include <kern/host_statistics.h>
88 #include <kern/ipc_host.h>
89 #include <kern/misc_protos.h>
90 #include <kern/sched.h>
91 #include <kern/processor.h>
92 #include <kern/mach_node.h> // mach_node_port_changed()
93
94 #include <vm/vm_map.h>
95 #include <vm/vm_purgeable_internal.h>
96 #include <vm/vm_pageout.h>
97
98 #include <IOKit/IOBSD.h> // IOTaskHasEntitlement
99 #include <IOKit/IOKitKeys.h> // DriverKit entitlement strings
100
101
102 #if CONFIG_ATM
103 #include <atm/atm_internal.h>
104 #endif
105
106 #if CONFIG_MACF
107 #include <security/mac_mach_internal.h>
108 #endif
109
110 #include <pexpert/pexpert.h>
111
112 SCALABLE_COUNTER_DEFINE(vm_statistics_zero_fill_count); /* # of zero fill pages */
113 SCALABLE_COUNTER_DEFINE(vm_statistics_reactivations); /* # of pages reactivated */
114 SCALABLE_COUNTER_DEFINE(vm_statistics_pageins); /* # of pageins */
115 SCALABLE_COUNTER_DEFINE(vm_statistics_pageouts); /* # of pageouts */
116 SCALABLE_COUNTER_DEFINE(vm_statistics_faults); /* # of faults */
117 SCALABLE_COUNTER_DEFINE(vm_statistics_cow_faults); /* # of copy-on-writes */
118 SCALABLE_COUNTER_DEFINE(vm_statistics_lookups); /* object cache lookups */
119 SCALABLE_COUNTER_DEFINE(vm_statistics_hits); /* object cache hits */
120 SCALABLE_COUNTER_DEFINE(vm_statistics_purges); /* # of pages purged */
121 SCALABLE_COUNTER_DEFINE(vm_statistics_decompressions); /* # of pages decompressed */
122 SCALABLE_COUNTER_DEFINE(vm_statistics_compressions); /* # of pages compressed */
123 SCALABLE_COUNTER_DEFINE(vm_statistics_swapins); /* # of pages swapped in (via compression segments) */
124 SCALABLE_COUNTER_DEFINE(vm_statistics_swapouts); /* # of pages swapped out (via compression segments) */
125 SCALABLE_COUNTER_DEFINE(vm_statistics_total_uncompressed_pages_in_compressor); /* # of pages (uncompressed) held within the compressor. */
126 SCALABLE_COUNTER_DEFINE(vm_page_grab_count);
127
128 host_data_t realhost;
129
130 static void
131 get_host_vm_stats(vm_statistics64_t out)
132 {
133 out->zero_fill_count = counter_load(&vm_statistics_zero_fill_count);
134 out->reactivations = counter_load(&vm_statistics_reactivations);
135 out->pageins = counter_load(&vm_statistics_pageins);
136 out->pageouts = counter_load(&vm_statistics_pageouts);
137 out->faults = counter_load(&vm_statistics_faults);
138 out->cow_faults = counter_load(&vm_statistics_cow_faults);
139 out->lookups = counter_load(&vm_statistics_lookups);
140 out->hits = counter_load(&vm_statistics_hits);
141 out->compressions = counter_load(&vm_statistics_compressions);
142 out->decompressions = counter_load(&vm_statistics_decompressions);
143 out->swapins = counter_load(&vm_statistics_swapins);
144 out->swapouts = counter_load(&vm_statistics_swapouts);
145 }
146 vm_extmod_statistics_data_t host_extmod_statistics;
147
148 kern_return_t
149 host_processors(host_priv_t host_priv, processor_array_t * out_array, mach_msg_type_number_t * countp)
150 {
151 if (host_priv == HOST_PRIV_NULL) {
152 return KERN_INVALID_ARGUMENT;
153 }
154
155 unsigned int count = processor_count;
156 assert(count != 0);
157
158 static_assert(sizeof(mach_port_t) == sizeof(processor_t));
159
160 mach_port_t* ports = kalloc((vm_size_t)(count * sizeof(mach_port_t)));
161 if (!ports) {
162 return KERN_RESOURCE_SHORTAGE;
163 }
164
165 for (unsigned int i = 0; i < count; i++) {
166 processor_t processor = processor_array[i];
167 assert(processor != PROCESSOR_NULL);
168
169 /* do the conversion that Mig should handle */
170 ipc_port_t processor_port = convert_processor_to_port(processor);
171 ports[i] = processor_port;
172 }
173
174 *countp = count;
175 *out_array = (processor_array_t)ports;
176
177 return KERN_SUCCESS;
178 }
179
180 extern int sched_allow_NO_SMT_threads;
181
182 kern_return_t
183 host_info(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
184 {
185 if (host == HOST_NULL) {
186 return KERN_INVALID_ARGUMENT;
187 }
188
189 switch (flavor) {
190 case HOST_BASIC_INFO: {
191 host_basic_info_t basic_info;
192 int master_id = master_processor->cpu_id;
193
194 /*
195 * Basic information about this host.
196 */
197 if (*count < HOST_BASIC_INFO_OLD_COUNT) {
198 return KERN_FAILURE;
199 }
200
201 basic_info = (host_basic_info_t)info;
202
203 basic_info->memory_size = machine_info.memory_size;
204 basic_info->cpu_type = slot_type(master_id);
205 basic_info->cpu_subtype = slot_subtype(master_id);
206 basic_info->max_cpus = machine_info.max_cpus;
207 #if defined(__x86_64__)
208 if (sched_allow_NO_SMT_threads && current_task()->t_flags & TF_NO_SMT) {
209 basic_info->avail_cpus = primary_processor_avail_count_user;
210 } else {
211 basic_info->avail_cpus = processor_avail_count_user;
212 }
213 #else
214 basic_info->avail_cpus = processor_avail_count;
215 #endif
216
217
218 if (*count >= HOST_BASIC_INFO_COUNT) {
219 basic_info->cpu_threadtype = slot_threadtype(master_id);
220 basic_info->physical_cpu = machine_info.physical_cpu;
221 basic_info->physical_cpu_max = machine_info.physical_cpu_max;
222 #if defined(__x86_64__)
223 basic_info->logical_cpu = basic_info->avail_cpus;
224 #else
225 basic_info->logical_cpu = machine_info.logical_cpu;
226 #endif
227 basic_info->logical_cpu_max = machine_info.logical_cpu_max;
228
229 basic_info->max_mem = machine_info.max_mem;
230
231 *count = HOST_BASIC_INFO_COUNT;
232 } else {
233 *count = HOST_BASIC_INFO_OLD_COUNT;
234 }
235
236 return KERN_SUCCESS;
237 }
238
239 case HOST_SCHED_INFO: {
240 host_sched_info_t sched_info;
241 uint32_t quantum_time;
242 uint64_t quantum_ns;
243
244 /*
245 * Return scheduler information.
246 */
247 if (*count < HOST_SCHED_INFO_COUNT) {
248 return KERN_FAILURE;
249 }
250
251 sched_info = (host_sched_info_t)info;
252
253 quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
254 absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
255
256 sched_info->min_timeout = sched_info->min_quantum = (uint32_t)(quantum_ns / 1000 / 1000);
257
258 *count = HOST_SCHED_INFO_COUNT;
259
260 return KERN_SUCCESS;
261 }
262
263 case HOST_RESOURCE_SIZES: {
264 /*
265 * Return sizes of kernel data structures
266 */
267 if (*count < HOST_RESOURCE_SIZES_COUNT) {
268 return KERN_FAILURE;
269 }
270
271 /* XXX Fail until ledgers are implemented */
272 return KERN_INVALID_ARGUMENT;
273 }
274
275 case HOST_PRIORITY_INFO: {
276 host_priority_info_t priority_info;
277
278 if (*count < HOST_PRIORITY_INFO_COUNT) {
279 return KERN_FAILURE;
280 }
281
282 priority_info = (host_priority_info_t)info;
283
284 priority_info->kernel_priority = MINPRI_KERNEL;
285 priority_info->system_priority = MINPRI_KERNEL;
286 priority_info->server_priority = MINPRI_RESERVED;
287 priority_info->user_priority = BASEPRI_DEFAULT;
288 priority_info->depress_priority = DEPRESSPRI;
289 priority_info->idle_priority = IDLEPRI;
290 priority_info->minimum_priority = MINPRI_USER;
291 priority_info->maximum_priority = MAXPRI_RESERVED;
292
293 *count = HOST_PRIORITY_INFO_COUNT;
294
295 return KERN_SUCCESS;
296 }
297
298 /*
299 * Gestalt for various trap facilities.
300 */
301 case HOST_MACH_MSG_TRAP:
302 case HOST_SEMAPHORE_TRAPS: {
303 *count = 0;
304 return KERN_SUCCESS;
305 }
306
307 case HOST_CAN_HAS_DEBUGGER: {
308 host_can_has_debugger_info_t can_has_debugger_info;
309
310 if (*count < HOST_CAN_HAS_DEBUGGER_COUNT) {
311 return KERN_FAILURE;
312 }
313
314 can_has_debugger_info = (host_can_has_debugger_info_t)info;
315 can_has_debugger_info->can_has_debugger = PE_i_can_has_debugger(NULL);
316 *count = HOST_CAN_HAS_DEBUGGER_COUNT;
317
318 return KERN_SUCCESS;
319 }
320
321 case HOST_VM_PURGABLE: {
322 if (*count < HOST_VM_PURGABLE_COUNT) {
323 return KERN_FAILURE;
324 }
325
326 vm_purgeable_stats((vm_purgeable_info_t)info, NULL);
327
328 *count = HOST_VM_PURGABLE_COUNT;
329 return KERN_SUCCESS;
330 }
331
332 case HOST_DEBUG_INFO_INTERNAL: {
333 #if DEVELOPMENT || DEBUG
334 if (*count < HOST_DEBUG_INFO_INTERNAL_COUNT) {
335 return KERN_FAILURE;
336 }
337
338 host_debug_info_internal_t debug_info = (host_debug_info_internal_t)info;
339 bzero(debug_info, sizeof(host_debug_info_internal_data_t));
340 *count = HOST_DEBUG_INFO_INTERNAL_COUNT;
341
342 #if CONFIG_COALITIONS
343 debug_info->config_coalitions = 1;
344 #endif
345 debug_info->config_bank = 1;
346 #if CONFIG_ATM
347 debug_info->config_atm = 1;
348 #endif
349 #if CONFIG_CSR
350 debug_info->config_csr = 1;
351 #endif
352 return KERN_SUCCESS;
353 #else /* DEVELOPMENT || DEBUG */
354 return KERN_NOT_SUPPORTED;
355 #endif
356 }
357
358 case HOST_PREFERRED_USER_ARCH: {
359 host_preferred_user_arch_t user_arch_info;
360
361 /*
362 * Basic information about this host.
363 */
364 if (*count < HOST_PREFERRED_USER_ARCH_COUNT) {
365 return KERN_FAILURE;
366 }
367
368 user_arch_info = (host_preferred_user_arch_t)info;
369
370 #if defined(PREFERRED_USER_CPU_TYPE) && defined(PREFERRED_USER_CPU_SUBTYPE)
371 cpu_type_t preferred_cpu_type;
372 cpu_subtype_t preferred_cpu_subtype;
373 if (!PE_get_default("kern.preferred_cpu_type", &preferred_cpu_type, sizeof(cpu_type_t))) {
374 preferred_cpu_type = PREFERRED_USER_CPU_TYPE;
375 }
376 if (!PE_get_default("kern.preferred_cpu_subtype", &preferred_cpu_subtype, sizeof(cpu_subtype_t))) {
377 preferred_cpu_subtype = PREFERRED_USER_CPU_SUBTYPE;
378 }
379 user_arch_info->cpu_type = preferred_cpu_type;
380 user_arch_info->cpu_subtype = preferred_cpu_subtype;
381 #else
382 int master_id = master_processor->cpu_id;
383 user_arch_info->cpu_type = slot_type(master_id);
384 user_arch_info->cpu_subtype = slot_subtype(master_id);
385 #endif
386
387
388 *count = HOST_PREFERRED_USER_ARCH_COUNT;
389
390 return KERN_SUCCESS;
391 }
392
393 default: return KERN_INVALID_ARGUMENT;
394 }
395 }
396
397 kern_return_t host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count);
398
399 kern_return_t
400 host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
401 {
402 if (host == HOST_NULL) {
403 return KERN_INVALID_HOST;
404 }
405
406 switch (flavor) {
407 case HOST_LOAD_INFO: {
408 host_load_info_t load_info;
409
410 if (*count < HOST_LOAD_INFO_COUNT) {
411 return KERN_FAILURE;
412 }
413
414 load_info = (host_load_info_t)info;
415
416 bcopy((char *)avenrun, (char *)load_info->avenrun, sizeof avenrun);
417 bcopy((char *)mach_factor, (char *)load_info->mach_factor, sizeof mach_factor);
418
419 *count = HOST_LOAD_INFO_COUNT;
420 return KERN_SUCCESS;
421 }
422
423 case HOST_VM_INFO: {
424 vm_statistics64_data_t host_vm_stat;
425 vm_statistics_t stat32;
426 mach_msg_type_number_t original_count;
427
428 if (*count < HOST_VM_INFO_REV0_COUNT) {
429 return KERN_FAILURE;
430 }
431
432 get_host_vm_stats(&host_vm_stat);
433
434 stat32 = (vm_statistics_t)info;
435
436 stat32->free_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_free_count + vm_page_speculative_count);
437 stat32->active_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_active_count);
438
439 if (vm_page_local_q) {
440 zpercpu_foreach(lq, vm_page_local_q) {
441 stat32->active_count += VM_STATISTICS_TRUNCATE_TO_32_BIT(lq->vpl_count);
442 }
443 }
444 stat32->inactive_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_inactive_count);
445 #if !XNU_TARGET_OS_OSX
446 stat32->wire_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count);
447 #else /* !XNU_TARGET_OS_OSX */
448 stat32->wire_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count + vm_page_throttled_count + vm_lopage_free_count);
449 #endif /* !XNU_TARGET_OS_OSX */
450 stat32->zero_fill_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.zero_fill_count);
451 stat32->reactivations = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.reactivations);
452 stat32->pageins = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.pageins);
453 stat32->pageouts = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.pageouts);
454 stat32->faults = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.faults);
455 stat32->cow_faults = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.cow_faults);
456 stat32->lookups = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.lookups);
457 stat32->hits = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.hits);
458
459 /*
460 * Fill in extra info added in later revisions of the
461 * vm_statistics data structure. Fill in only what can fit
462 * in the data structure the caller gave us !
463 */
464 original_count = *count;
465 *count = HOST_VM_INFO_REV0_COUNT; /* rev0 already filled in */
466 if (original_count >= HOST_VM_INFO_REV1_COUNT) {
467 /* rev1 added "purgeable" info */
468 stat32->purgeable_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purgeable_count);
469 stat32->purges = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purged_count);
470 *count = HOST_VM_INFO_REV1_COUNT;
471 }
472
473 if (original_count >= HOST_VM_INFO_REV2_COUNT) {
474 /* rev2 added "speculative" info */
475 stat32->speculative_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_speculative_count);
476 *count = HOST_VM_INFO_REV2_COUNT;
477 }
478
479 /* rev3 changed some of the fields to be 64-bit*/
480
481 return KERN_SUCCESS;
482 }
483
484 case HOST_CPU_LOAD_INFO: {
485 host_cpu_load_info_t cpu_load_info;
486
487 if (*count < HOST_CPU_LOAD_INFO_COUNT) {
488 return KERN_FAILURE;
489 }
490
491 #define GET_TICKS_VALUE(state, ticks) \
492 MACRO_BEGIN cpu_load_info->cpu_ticks[(state)] += (uint32_t)(ticks / hz_tick_interval); \
493 MACRO_END
494 #define GET_TICKS_VALUE_FROM_TIMER(processor, state, timer) \
495 MACRO_BEGIN GET_TICKS_VALUE(state, timer_grab(&(processor)->timer)); \
496 MACRO_END
497
498 cpu_load_info = (host_cpu_load_info_t)info;
499 cpu_load_info->cpu_ticks[CPU_STATE_USER] = 0;
500 cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] = 0;
501 cpu_load_info->cpu_ticks[CPU_STATE_IDLE] = 0;
502 cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0;
503
504 simple_lock(&processor_list_lock, LCK_GRP_NULL);
505
506 unsigned int pcount = processor_count;
507
508 for (unsigned int i = 0; i < pcount; i++) {
509 processor_t processor = processor_array[i];
510 assert(processor != PROCESSOR_NULL);
511
512 timer_t idle_state;
513 uint64_t idle_time_snapshot1, idle_time_snapshot2;
514 uint64_t idle_time_tstamp1, idle_time_tstamp2;
515
516 /* See discussion in processor_info(PROCESSOR_CPU_LOAD_INFO) */
517
518 GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_USER, user_state);
519 if (precise_user_kernel_time) {
520 GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_SYSTEM, system_state);
521 } else {
522 /* system_state may represent either sys or user */
523 GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_USER, system_state);
524 }
525
526 idle_state = &processor->idle_state;
527 idle_time_snapshot1 = timer_grab(idle_state);
528 idle_time_tstamp1 = idle_state->tstamp;
529
530 if (processor->current_state != idle_state) {
531 /* Processor is non-idle, so idle timer should be accurate */
532 GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_IDLE, idle_state);
533 } else if ((idle_time_snapshot1 != (idle_time_snapshot2 = timer_grab(idle_state))) ||
534 (idle_time_tstamp1 != (idle_time_tstamp2 = idle_state->tstamp))) {
535 /* Idle timer is being updated concurrently, second stamp is good enough */
536 GET_TICKS_VALUE(CPU_STATE_IDLE, idle_time_snapshot2);
537 } else {
538 /*
539 * Idle timer may be very stale. Fortunately we have established
540 * that idle_time_snapshot1 and idle_time_tstamp1 are unchanging
541 */
542 idle_time_snapshot1 += mach_absolute_time() - idle_time_tstamp1;
543
544 GET_TICKS_VALUE(CPU_STATE_IDLE, idle_time_snapshot1);
545 }
546 }
547 simple_unlock(&processor_list_lock);
548
549 *count = HOST_CPU_LOAD_INFO_COUNT;
550
551 return KERN_SUCCESS;
552 }
553
554 case HOST_EXPIRED_TASK_INFO: {
555 if (*count < TASK_POWER_INFO_COUNT) {
556 return KERN_FAILURE;
557 }
558
559 task_power_info_t tinfo1 = (task_power_info_t)info;
560 task_power_info_v2_t tinfo2 = (task_power_info_v2_t)info;
561
562 tinfo1->task_interrupt_wakeups = dead_task_statistics.task_interrupt_wakeups;
563 tinfo1->task_platform_idle_wakeups = dead_task_statistics.task_platform_idle_wakeups;
564
565 tinfo1->task_timer_wakeups_bin_1 = dead_task_statistics.task_timer_wakeups_bin_1;
566
567 tinfo1->task_timer_wakeups_bin_2 = dead_task_statistics.task_timer_wakeups_bin_2;
568
569 tinfo1->total_user = dead_task_statistics.total_user_time;
570 tinfo1->total_system = dead_task_statistics.total_system_time;
571 if (*count < TASK_POWER_INFO_V2_COUNT) {
572 *count = TASK_POWER_INFO_COUNT;
573 } else if (*count >= TASK_POWER_INFO_V2_COUNT) {
574 tinfo2->gpu_energy.task_gpu_utilisation = dead_task_statistics.task_gpu_ns;
575 #if defined(__arm__) || defined(__arm64__)
576 tinfo2->task_energy = dead_task_statistics.task_energy;
577 tinfo2->task_ptime = dead_task_statistics.total_ptime;
578 tinfo2->task_pset_switches = dead_task_statistics.total_pset_switches;
579 #endif
580 *count = TASK_POWER_INFO_V2_COUNT;
581 }
582
583 return KERN_SUCCESS;
584 }
585 default: return KERN_INVALID_ARGUMENT;
586 }
587 }
588
589 extern uint32_t c_segment_pages_compressed;
590
591 #define HOST_STATISTICS_TIME_WINDOW 1 /* seconds */
592 #define HOST_STATISTICS_MAX_REQUESTS 10 /* maximum number of requests per window */
593 #define HOST_STATISTICS_MIN_REQUESTS 2 /* minimum number of requests per window */
594
595 uint64_t host_statistics_time_window;
596
597 static LCK_GRP_DECLARE(host_statistics_lck_grp, "host_statistics");
598 static LCK_MTX_DECLARE(host_statistics_lck, &host_statistics_lck_grp);
599
600 #define HOST_VM_INFO64_REV0 0
601 #define HOST_VM_INFO64_REV1 1
602 #define HOST_EXTMOD_INFO64_REV0 2
603 #define HOST_LOAD_INFO_REV0 3
604 #define HOST_VM_INFO_REV0 4
605 #define HOST_VM_INFO_REV1 5
606 #define HOST_VM_INFO_REV2 6
607 #define HOST_CPU_LOAD_INFO_REV0 7
608 #define HOST_EXPIRED_TASK_INFO_REV0 8
609 #define HOST_EXPIRED_TASK_INFO_REV1 9
610 #define NUM_HOST_INFO_DATA_TYPES 10
611
612 static vm_statistics64_data_t host_vm_info64_rev0 = {};
613 static vm_statistics64_data_t host_vm_info64_rev1 = {};
614 static vm_extmod_statistics_data_t host_extmod_info64 = {};
615 static host_load_info_data_t host_load_info = {};
616 static vm_statistics_data_t host_vm_info_rev0 = {};
617 static vm_statistics_data_t host_vm_info_rev1 = {};
618 static vm_statistics_data_t host_vm_info_rev2 = {};
619 static host_cpu_load_info_data_t host_cpu_load_info = {};
620 static task_power_info_data_t host_expired_task_info = {};
621 static task_power_info_v2_data_t host_expired_task_info2 = {};
622
623 struct host_stats_cache {
624 uint64_t last_access;
625 uint64_t current_requests;
626 uint64_t max_requests;
627 uintptr_t data;
628 mach_msg_type_number_t count; //NOTE count is in sizeof(integer_t)
629 };
630
631 static struct host_stats_cache g_host_stats_cache[NUM_HOST_INFO_DATA_TYPES] = {
632 [HOST_VM_INFO64_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info64_rev0, .count = HOST_VM_INFO64_REV0_COUNT },
633 [HOST_VM_INFO64_REV1] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info64_rev1, .count = HOST_VM_INFO64_REV1_COUNT },
634 [HOST_EXTMOD_INFO64_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_extmod_info64, .count = HOST_EXTMOD_INFO64_COUNT },
635 [HOST_LOAD_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_load_info, .count = HOST_LOAD_INFO_COUNT },
636 [HOST_VM_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info_rev0, .count = HOST_VM_INFO_REV0_COUNT },
637 [HOST_VM_INFO_REV1] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info_rev1, .count = HOST_VM_INFO_REV1_COUNT },
638 [HOST_VM_INFO_REV2] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info_rev2, .count = HOST_VM_INFO_REV2_COUNT },
639 [HOST_CPU_LOAD_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_cpu_load_info, .count = HOST_CPU_LOAD_INFO_COUNT },
640 [HOST_EXPIRED_TASK_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_expired_task_info, .count = TASK_POWER_INFO_COUNT },
641 [HOST_EXPIRED_TASK_INFO_REV1] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_expired_task_info2, .count = TASK_POWER_INFO_V2_COUNT},
642 };
643
644
645 void
646 host_statistics_init(void)
647 {
648 nanoseconds_to_absolutetime((HOST_STATISTICS_TIME_WINDOW * NSEC_PER_SEC), &host_statistics_time_window);
649 }
650
651 static void
652 cache_host_statistics(int index, host_info64_t info)
653 {
654 if (index < 0 || index >= NUM_HOST_INFO_DATA_TYPES) {
655 return;
656 }
657
658 task_t task = current_task();
659 if (task->t_flags & TF_PLATFORM) {
660 return;
661 }
662
663 memcpy((void *)g_host_stats_cache[index].data, info, g_host_stats_cache[index].count * sizeof(integer_t));
664 return;
665 }
666
667 static void
668 get_cached_info(int index, host_info64_t info, mach_msg_type_number_t* count)
669 {
670 if (index < 0 || index >= NUM_HOST_INFO_DATA_TYPES) {
671 *count = 0;
672 return;
673 }
674
675 *count = g_host_stats_cache[index].count;
676 memcpy(info, (void *)g_host_stats_cache[index].data, g_host_stats_cache[index].count * sizeof(integer_t));
677 }
678
679 static int
680 get_host_info_data_index(bool is_stat64, host_flavor_t flavor, mach_msg_type_number_t* count, kern_return_t* ret)
681 {
682 switch (flavor) {
683 case HOST_VM_INFO64:
684 if (!is_stat64) {
685 *ret = KERN_INVALID_ARGUMENT;
686 return -1;
687 }
688 if (*count < HOST_VM_INFO64_REV0_COUNT) {
689 *ret = KERN_FAILURE;
690 return -1;
691 }
692 if (*count >= HOST_VM_INFO64_REV1_COUNT) {
693 return HOST_VM_INFO64_REV1;
694 }
695 return HOST_VM_INFO64_REV0;
696
697 case HOST_EXTMOD_INFO64:
698 if (!is_stat64) {
699 *ret = KERN_INVALID_ARGUMENT;
700 return -1;
701 }
702 if (*count < HOST_EXTMOD_INFO64_COUNT) {
703 *ret = KERN_FAILURE;
704 return -1;
705 }
706 return HOST_EXTMOD_INFO64_REV0;
707
708 case HOST_LOAD_INFO:
709 if (*count < HOST_LOAD_INFO_COUNT) {
710 *ret = KERN_FAILURE;
711 return -1;
712 }
713 return HOST_LOAD_INFO_REV0;
714
715 case HOST_VM_INFO:
716 if (*count < HOST_VM_INFO_REV0_COUNT) {
717 *ret = KERN_FAILURE;
718 return -1;
719 }
720 if (*count >= HOST_VM_INFO_REV2_COUNT) {
721 return HOST_VM_INFO_REV2;
722 }
723 if (*count >= HOST_VM_INFO_REV1_COUNT) {
724 return HOST_VM_INFO_REV1;
725 }
726 return HOST_VM_INFO_REV0;
727
728 case HOST_CPU_LOAD_INFO:
729 if (*count < HOST_CPU_LOAD_INFO_COUNT) {
730 *ret = KERN_FAILURE;
731 return -1;
732 }
733 return HOST_CPU_LOAD_INFO_REV0;
734
735 case HOST_EXPIRED_TASK_INFO:
736 if (*count < TASK_POWER_INFO_COUNT) {
737 *ret = KERN_FAILURE;
738 return -1;
739 }
740 if (*count >= TASK_POWER_INFO_V2_COUNT) {
741 return HOST_EXPIRED_TASK_INFO_REV1;
742 }
743 return HOST_EXPIRED_TASK_INFO_REV0;
744
745 default:
746 *ret = KERN_INVALID_ARGUMENT;
747 return -1;
748 }
749 }
750
751 static bool
752 rate_limit_host_statistics(bool is_stat64, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t* count, kern_return_t* ret, int *pindex)
753 {
754 task_t task = current_task();
755
756 assert(task != kernel_task);
757
758 *ret = KERN_SUCCESS;
759
760 /* Access control only for third party applications */
761 if (task->t_flags & TF_PLATFORM) {
762 return FALSE;
763 }
764
765 /* Rate limit to HOST_STATISTICS_MAX_REQUESTS queries for each HOST_STATISTICS_TIME_WINDOW window of time */
766 bool rate_limited = FALSE;
767 bool set_last_access = TRUE;
768
769 /* there is a cache for every flavor */
770 int index = get_host_info_data_index(is_stat64, flavor, count, ret);
771 if (index == -1) {
772 goto out;
773 }
774
775 *pindex = index;
776 lck_mtx_lock(&host_statistics_lck);
777 if (g_host_stats_cache[index].last_access > mach_continuous_time() - host_statistics_time_window) {
778 set_last_access = FALSE;
779 if (g_host_stats_cache[index].current_requests++ >= g_host_stats_cache[index].max_requests) {
780 rate_limited = TRUE;
781 get_cached_info(index, info, count);
782 }
783 }
784 if (set_last_access) {
785 g_host_stats_cache[index].current_requests = 1;
786 /*
787 * select a random number of requests (included between HOST_STATISTICS_MIN_REQUESTS and HOST_STATISTICS_MAX_REQUESTS)
788 * to let query host_statistics.
789 * In this way it is not possible to infer looking at when the a cached copy changes if host_statistics was called on
790 * the provious window.
791 */
792 g_host_stats_cache[index].max_requests = (mach_absolute_time() % (HOST_STATISTICS_MAX_REQUESTS - HOST_STATISTICS_MIN_REQUESTS + 1)) + HOST_STATISTICS_MIN_REQUESTS;
793 g_host_stats_cache[index].last_access = mach_continuous_time();
794 }
795 lck_mtx_unlock(&host_statistics_lck);
796 out:
797 return rate_limited;
798 }
799
800 kern_return_t
801 vm_stats(void *info, unsigned int *count)
802 {
803 vm_statistics64_data_t host_vm_stat;
804 mach_msg_type_number_t original_count;
805 unsigned int local_q_internal_count;
806 unsigned int local_q_external_count;
807
808 if (*count < HOST_VM_INFO64_REV0_COUNT) {
809 return KERN_FAILURE;
810 }
811 get_host_vm_stats(&host_vm_stat);
812
813 vm_statistics64_t stat = (vm_statistics64_t)info;
814
815 stat->free_count = vm_page_free_count + vm_page_speculative_count;
816 stat->active_count = vm_page_active_count;
817
818 local_q_internal_count = 0;
819 local_q_external_count = 0;
820 if (vm_page_local_q) {
821 zpercpu_foreach(lq, vm_page_local_q) {
822 stat->active_count += lq->vpl_count;
823 local_q_internal_count += lq->vpl_internal_count;
824 local_q_external_count += lq->vpl_external_count;
825 }
826 }
827 stat->inactive_count = vm_page_inactive_count;
828 #if !XNU_TARGET_OS_OSX
829 stat->wire_count = vm_page_wire_count;
830 #else /* !XNU_TARGET_OS_OSX */
831 stat->wire_count = vm_page_wire_count + vm_page_throttled_count + vm_lopage_free_count;
832 #endif /* !XNU_TARGET_OS_OSX */
833 stat->zero_fill_count = host_vm_stat.zero_fill_count;
834 stat->reactivations = host_vm_stat.reactivations;
835 stat->pageins = host_vm_stat.pageins;
836 stat->pageouts = host_vm_stat.pageouts;
837 stat->faults = host_vm_stat.faults;
838 stat->cow_faults = host_vm_stat.cow_faults;
839 stat->lookups = host_vm_stat.lookups;
840 stat->hits = host_vm_stat.hits;
841
842 stat->purgeable_count = vm_page_purgeable_count;
843 stat->purges = vm_page_purged_count;
844
845 stat->speculative_count = vm_page_speculative_count;
846
847 /*
848 * Fill in extra info added in later revisions of the
849 * vm_statistics data structure. Fill in only what can fit
850 * in the data structure the caller gave us !
851 */
852 original_count = *count;
853 *count = HOST_VM_INFO64_REV0_COUNT; /* rev0 already filled in */
854 if (original_count >= HOST_VM_INFO64_REV1_COUNT) {
855 /* rev1 added "throttled count" */
856 stat->throttled_count = vm_page_throttled_count;
857 /* rev1 added "compression" info */
858 stat->compressor_page_count = VM_PAGE_COMPRESSOR_COUNT;
859 stat->compressions = host_vm_stat.compressions;
860 stat->decompressions = host_vm_stat.decompressions;
861 stat->swapins = host_vm_stat.swapins;
862 stat->swapouts = host_vm_stat.swapouts;
863 /* rev1 added:
864 * "external page count"
865 * "anonymous page count"
866 * "total # of pages (uncompressed) held in the compressor"
867 */
868 stat->external_page_count = (vm_page_pageable_external_count + local_q_external_count);
869 stat->internal_page_count = (vm_page_pageable_internal_count + local_q_internal_count);
870 stat->total_uncompressed_pages_in_compressor = c_segment_pages_compressed;
871 *count = HOST_VM_INFO64_REV1_COUNT;
872 }
873
874 return KERN_SUCCESS;
875 }
876
877 kern_return_t host_statistics64(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count);
878
879 kern_return_t
880 host_statistics64(host_t host, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t * count)
881 {
882 if (host == HOST_NULL) {
883 return KERN_INVALID_HOST;
884 }
885
886 switch (flavor) {
887 case HOST_VM_INFO64: /* We were asked to get vm_statistics64 */
888 return vm_stats(info, count);
889
890 case HOST_EXTMOD_INFO64: /* We were asked to get vm_statistics64 */
891 {
892 vm_extmod_statistics_t out_extmod_statistics;
893
894 if (*count < HOST_EXTMOD_INFO64_COUNT) {
895 return KERN_FAILURE;
896 }
897
898 out_extmod_statistics = (vm_extmod_statistics_t)info;
899 *out_extmod_statistics = host_extmod_statistics;
900
901 *count = HOST_EXTMOD_INFO64_COUNT;
902
903 return KERN_SUCCESS;
904 }
905
906 default: /* If we didn't recognize the flavor, send to host_statistics */
907 return host_statistics(host, flavor, (host_info_t)info, count);
908 }
909 }
910
911 kern_return_t
912 host_statistics64_from_user(host_t host, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t * count)
913 {
914 kern_return_t ret = KERN_SUCCESS;
915 int index;
916
917 if (host == HOST_NULL) {
918 return KERN_INVALID_HOST;
919 }
920
921 if (rate_limit_host_statistics(TRUE, flavor, info, count, &ret, &index)) {
922 return ret;
923 }
924
925 if (ret != KERN_SUCCESS) {
926 return ret;
927 }
928
929 ret = host_statistics64(host, flavor, info, count);
930
931 if (ret == KERN_SUCCESS) {
932 cache_host_statistics(index, info);
933 }
934
935 return ret;
936 }
937
938 kern_return_t
939 host_statistics_from_user(host_t host, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t * count)
940 {
941 kern_return_t ret = KERN_SUCCESS;
942 int index;
943
944 if (host == HOST_NULL) {
945 return KERN_INVALID_HOST;
946 }
947
948 if (rate_limit_host_statistics(FALSE, flavor, info, count, &ret, &index)) {
949 return ret;
950 }
951
952 if (ret != KERN_SUCCESS) {
953 return ret;
954 }
955
956 ret = host_statistics(host, flavor, info, count);
957
958 if (ret == KERN_SUCCESS) {
959 cache_host_statistics(index, info);
960 }
961
962 return ret;
963 }
964
965 /*
966 * Get host statistics that require privilege.
967 * None for now, just call the un-privileged version.
968 */
969 kern_return_t
970 host_priv_statistics(host_priv_t host_priv, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
971 {
972 return host_statistics((host_t)host_priv, flavor, info, count);
973 }
974
975 kern_return_t
976 set_sched_stats_active(boolean_t active)
977 {
978 sched_stats_active = active;
979 return KERN_SUCCESS;
980 }
981
982 kern_return_t
983 get_sched_statistics(struct _processor_statistics_np * out, uint32_t * count)
984 {
985 uint32_t pos = 0;
986
987 if (!sched_stats_active) {
988 return KERN_FAILURE;
989 }
990
991 percpu_foreach_base(pcpu_base) {
992 struct sched_statistics stats;
993 processor_t processor;
994
995 pos += sizeof(struct _processor_statistics_np);
996 if (pos > *count) {
997 return KERN_FAILURE;
998 }
999
1000 stats = *PERCPU_GET_WITH_BASE(pcpu_base, sched_stats);
1001 processor = PERCPU_GET_WITH_BASE(pcpu_base, processor);
1002
1003 out->ps_cpuid = processor->cpu_id;
1004 out->ps_csw_count = stats.csw_count;
1005 out->ps_preempt_count = stats.preempt_count;
1006 out->ps_preempted_rt_count = stats.preempted_rt_count;
1007 out->ps_preempted_by_rt_count = stats.preempted_by_rt_count;
1008 out->ps_rt_sched_count = stats.rt_sched_count;
1009 out->ps_interrupt_count = stats.interrupt_count;
1010 out->ps_ipi_count = stats.ipi_count;
1011 out->ps_timer_pop_count = stats.timer_pop_count;
1012 out->ps_runq_count_sum = SCHED(processor_runq_stats_count_sum)(processor);
1013 out->ps_idle_transitions = stats.idle_transitions;
1014 out->ps_quantum_timer_expirations = stats.quantum_timer_expirations;
1015
1016 out++;
1017 }
1018
1019 /* And include RT Queue information */
1020 pos += sizeof(struct _processor_statistics_np);
1021 if (pos > *count) {
1022 return KERN_FAILURE;
1023 }
1024
1025 bzero(out, sizeof(*out));
1026 out->ps_cpuid = (-1);
1027 out->ps_runq_count_sum = SCHED(rt_runq_count_sum)();
1028 out++;
1029
1030 *count = pos;
1031
1032 return KERN_SUCCESS;
1033 }
1034
1035 kern_return_t
1036 host_page_size(host_t host, vm_size_t * out_page_size)
1037 {
1038 if (host == HOST_NULL) {
1039 return KERN_INVALID_ARGUMENT;
1040 }
1041
1042 *out_page_size = PAGE_SIZE;
1043
1044 return KERN_SUCCESS;
1045 }
1046
1047 /*
1048 * Return kernel version string (more than you ever
1049 * wanted to know about what version of the kernel this is).
1050 */
1051 extern char version[];
1052
1053 kern_return_t
1054 host_kernel_version(host_t host, kernel_version_t out_version)
1055 {
1056 if (host == HOST_NULL) {
1057 return KERN_INVALID_ARGUMENT;
1058 }
1059
1060 (void)strncpy(out_version, version, sizeof(kernel_version_t));
1061
1062 return KERN_SUCCESS;
1063 }
1064
1065 /*
1066 * host_processor_sets:
1067 *
1068 * List all processor sets on the host.
1069 */
1070 kern_return_t
1071 host_processor_sets(host_priv_t host_priv, processor_set_name_array_t * pset_list, mach_msg_type_number_t * count)
1072 {
1073 void * addr;
1074
1075 if (host_priv == HOST_PRIV_NULL) {
1076 return KERN_INVALID_ARGUMENT;
1077 }
1078
1079 /*
1080 * Allocate memory. Can be pageable because it won't be
1081 * touched while holding a lock.
1082 */
1083
1084 addr = kalloc((vm_size_t)sizeof(mach_port_t));
1085 if (addr == 0) {
1086 return KERN_RESOURCE_SHORTAGE;
1087 }
1088
1089 /* do the conversion that Mig should handle */
1090 *((ipc_port_t *)addr) = convert_pset_name_to_port(&pset0);
1091
1092 *pset_list = (processor_set_array_t)addr;
1093 *count = 1;
1094
1095 return KERN_SUCCESS;
1096 }
1097
1098 /*
1099 * host_processor_set_priv:
1100 *
1101 * Return control port for given processor set.
1102 */
1103 kern_return_t
1104 host_processor_set_priv(host_priv_t host_priv, processor_set_t pset_name, processor_set_t * pset)
1105 {
1106 if (host_priv == HOST_PRIV_NULL || pset_name == PROCESSOR_SET_NULL) {
1107 *pset = PROCESSOR_SET_NULL;
1108
1109 return KERN_INVALID_ARGUMENT;
1110 }
1111
1112 *pset = pset_name;
1113
1114 return KERN_SUCCESS;
1115 }
1116
1117 /*
1118 * host_processor_info
1119 *
1120 * Return info about the processors on this host. It will return
1121 * the number of processors, and the specific type of info requested
1122 * in an OOL array.
1123 */
1124 kern_return_t
1125 host_processor_info(host_t host,
1126 processor_flavor_t flavor,
1127 natural_t * out_pcount,
1128 processor_info_array_t * out_array,
1129 mach_msg_type_number_t * out_array_count)
1130 {
1131 kern_return_t result;
1132 host_t thost;
1133 processor_info_t info;
1134 unsigned int icount;
1135 unsigned int pcount;
1136 vm_offset_t addr;
1137 vm_size_t size, needed;
1138 vm_map_copy_t copy;
1139
1140 if (host == HOST_NULL) {
1141 return KERN_INVALID_ARGUMENT;
1142 }
1143
1144 result = processor_info_count(flavor, &icount);
1145 if (result != KERN_SUCCESS) {
1146 return result;
1147 }
1148
1149 pcount = processor_count;
1150 assert(pcount != 0);
1151
1152 needed = pcount * icount * sizeof(natural_t);
1153 size = vm_map_round_page(needed, VM_MAP_PAGE_MASK(ipc_kernel_map));
1154 result = kmem_alloc(ipc_kernel_map, &addr, size, VM_KERN_MEMORY_IPC);
1155 if (result != KERN_SUCCESS) {
1156 return KERN_RESOURCE_SHORTAGE;
1157 }
1158
1159 info = (processor_info_t)addr;
1160
1161 for (unsigned int i = 0; i < pcount; i++) {
1162 processor_t processor = processor_array[i];
1163 assert(processor != PROCESSOR_NULL);
1164
1165 unsigned int tcount = icount;
1166
1167 result = processor_info(processor, flavor, &thost, info, &tcount);
1168 if (result != KERN_SUCCESS) {
1169 kmem_free(ipc_kernel_map, addr, size);
1170 return result;
1171 }
1172 info += icount;
1173 }
1174
1175 if (size != needed) {
1176 bzero((char *)addr + needed, size - needed);
1177 }
1178
1179 result = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr, VM_MAP_PAGE_MASK(ipc_kernel_map)),
1180 vm_map_round_page(addr + size, VM_MAP_PAGE_MASK(ipc_kernel_map)), FALSE);
1181 assert(result == KERN_SUCCESS);
1182 result = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr, (vm_map_size_t)needed, TRUE, &copy);
1183 assert(result == KERN_SUCCESS);
1184
1185 *out_pcount = pcount;
1186 *out_array = (processor_info_array_t)copy;
1187 *out_array_count = pcount * icount;
1188
1189 return KERN_SUCCESS;
1190 }
1191
1192 static bool
1193 is_valid_host_special_port(int id)
1194 {
1195 return (id <= HOST_MAX_SPECIAL_PORT) &&
1196 (id >= HOST_MIN_SPECIAL_PORT) &&
1197 ((id <= HOST_LAST_SPECIAL_KERNEL_PORT) || (id > HOST_MAX_SPECIAL_KERNEL_PORT));
1198 }
1199
1200 extern void * XNU_PTRAUTH_SIGNED_PTR("initproc") initproc;
1201
1202 /*
1203 * Kernel interface for setting a special port.
1204 */
1205 kern_return_t
1206 kernel_set_special_port(host_priv_t host_priv, int id, ipc_port_t port)
1207 {
1208 ipc_port_t old_port;
1209
1210 if (!is_valid_host_special_port(id)) {
1211 panic("attempted to set invalid special port %d", id);
1212 }
1213
1214 #if !MACH_FLIPC
1215 if (id == HOST_NODE_PORT) {
1216 return KERN_NOT_SUPPORTED;
1217 }
1218 #endif
1219
1220 host_lock(host_priv);
1221 old_port = host_priv->special[id];
1222 if ((id == HOST_AMFID_PORT) && (current_task()->bsd_info != initproc)) {
1223 host_unlock(host_priv);
1224 return KERN_NO_ACCESS;
1225 }
1226 host_priv->special[id] = port;
1227 host_unlock(host_priv);
1228
1229 #if MACH_FLIPC
1230 if (id == HOST_NODE_PORT) {
1231 mach_node_port_changed();
1232 }
1233 #endif
1234
1235 if (IP_VALID(old_port)) {
1236 ipc_port_release_send(old_port);
1237 }
1238 return KERN_SUCCESS;
1239 }
1240
1241 /*
1242 * Kernel interface for retrieving a special port.
1243 */
1244 kern_return_t
1245 kernel_get_special_port(host_priv_t host_priv, int id, ipc_port_t * portp)
1246 {
1247 if (!is_valid_host_special_port(id)) {
1248 panic("attempted to get invalid special port %d", id);
1249 }
1250
1251 host_lock(host_priv);
1252 *portp = host_priv->special[id];
1253 host_unlock(host_priv);
1254 return KERN_SUCCESS;
1255 }
1256
1257 /*
1258 * User interface for setting a special port.
1259 *
1260 * Only permits the user to set a user-owned special port
1261 * ID, rejecting a kernel-owned special port ID.
1262 *
1263 * A special kernel port cannot be set up using this
1264 * routine; use kernel_set_special_port() instead.
1265 */
1266 kern_return_t
1267 host_set_special_port_from_user(host_priv_t host_priv, int id, ipc_port_t port)
1268 {
1269 if (host_priv == HOST_PRIV_NULL || id <= HOST_MAX_SPECIAL_KERNEL_PORT || id > HOST_MAX_SPECIAL_PORT) {
1270 return KERN_INVALID_ARGUMENT;
1271 }
1272
1273 if (task_is_driver(current_task())) {
1274 return KERN_NO_ACCESS;
1275 }
1276
1277 if (IP_VALID(port) && (port->ip_immovable_receive || port->ip_immovable_send)) {
1278 return KERN_INVALID_RIGHT;
1279 }
1280
1281 return host_set_special_port(host_priv, id, port);
1282 }
1283
1284 kern_return_t
1285 host_set_special_port(host_priv_t host_priv, int id, ipc_port_t port)
1286 {
1287 if (host_priv == HOST_PRIV_NULL || id <= HOST_MAX_SPECIAL_KERNEL_PORT || id > HOST_MAX_SPECIAL_PORT) {
1288 return KERN_INVALID_ARGUMENT;
1289 }
1290
1291 #if CONFIG_MACF
1292 if (mac_task_check_set_host_special_port(current_task(), id, port) != 0) {
1293 return KERN_NO_ACCESS;
1294 }
1295 #endif
1296
1297 return kernel_set_special_port(host_priv, id, port);
1298 }
1299
1300 /*
1301 * User interface for retrieving a special port.
1302 *
1303 * Note that there is nothing to prevent a user special
1304 * port from disappearing after it has been discovered by
1305 * the caller; thus, using a special port can always result
1306 * in a "port not valid" error.
1307 */
1308
1309 kern_return_t
1310 host_get_special_port_from_user(host_priv_t host_priv, __unused int node, int id, ipc_port_t * portp)
1311 {
1312 if (host_priv == HOST_PRIV_NULL || id == HOST_SECURITY_PORT || id > HOST_MAX_SPECIAL_PORT || id < HOST_MIN_SPECIAL_PORT) {
1313 return KERN_INVALID_ARGUMENT;
1314 }
1315
1316 task_t task = current_task();
1317 if (task && task_is_driver(task) && id > HOST_MAX_SPECIAL_KERNEL_PORT) {
1318 /* allow HID drivers to get the sysdiagnose port for keychord handling */
1319 if (id == HOST_SYSDIAGNOSE_PORT &&
1320 IOTaskHasEntitlement(task, kIODriverKitHIDFamilyEventServiceEntitlementKey)) {
1321 goto get_special_port;
1322 }
1323 return KERN_NO_ACCESS;
1324 }
1325 get_special_port:
1326 return host_get_special_port(host_priv, node, id, portp);
1327 }
1328
1329 kern_return_t
1330 host_get_special_port(host_priv_t host_priv, __unused int node, int id, ipc_port_t * portp)
1331 {
1332 ipc_port_t port;
1333
1334 if (host_priv == HOST_PRIV_NULL || id == HOST_SECURITY_PORT || id > HOST_MAX_SPECIAL_PORT || id < HOST_MIN_SPECIAL_PORT) {
1335 return KERN_INVALID_ARGUMENT;
1336 }
1337
1338 host_lock(host_priv);
1339 port = realhost.special[id];
1340 *portp = ipc_port_copy_send(port);
1341 host_unlock(host_priv);
1342
1343 return KERN_SUCCESS;
1344 }
1345
1346 /*
1347 * host_get_io_master
1348 *
1349 * Return the IO master access port for this host.
1350 */
1351 kern_return_t
1352 host_get_io_master(host_t host, io_master_t * io_masterp)
1353 {
1354 if (host == HOST_NULL) {
1355 return KERN_INVALID_ARGUMENT;
1356 }
1357
1358 return host_get_io_master_port(host_priv_self(), io_masterp);
1359 }
1360
1361 host_t
1362 host_self(void)
1363 {
1364 return &realhost;
1365 }
1366
1367 host_priv_t
1368 host_priv_self(void)
1369 {
1370 return &realhost;
1371 }
1372
1373 host_security_t
1374 host_security_self(void)
1375 {
1376 return &realhost;
1377 }
1378
1379 kern_return_t
1380 host_set_atm_diagnostic_flag(host_t host, uint32_t diagnostic_flag)
1381 {
1382 if (host == HOST_NULL) {
1383 return KERN_INVALID_ARGUMENT;
1384 }
1385
1386 if (!IOTaskHasEntitlement(current_task(), "com.apple.private.set-atm-diagnostic-flag")) {
1387 return KERN_NO_ACCESS;
1388 }
1389
1390 #if CONFIG_ATM
1391 return atm_set_diagnostic_config(diagnostic_flag);
1392 #else
1393 (void)diagnostic_flag;
1394 return KERN_NOT_SUPPORTED;
1395 #endif
1396 }
1397
1398 kern_return_t
1399 host_set_multiuser_config_flags(host_priv_t host_priv, uint32_t multiuser_config)
1400 {
1401 #if !defined(XNU_TARGET_OS_OSX)
1402 if (host_priv == HOST_PRIV_NULL) {
1403 return KERN_INVALID_ARGUMENT;
1404 }
1405
1406 /*
1407 * Always enforce that the multiuser bit is set
1408 * if a value is written to the commpage word.
1409 */
1410 commpage_update_multiuser_config(multiuser_config | kIsMultiUserDevice);
1411 return KERN_SUCCESS;
1412 #else
1413 (void)host_priv;
1414 (void)multiuser_config;
1415 return KERN_NOT_SUPPORTED;
1416 #endif
1417 }