]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/host.c
xnu-6153.41.3.tar.gz
[apple/xnu.git] / osfmk / kern / host.c
1 /*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58
59 /*
60 * host.c
61 *
62 * Non-ipc host functions.
63 */
64
65 #include <mach/mach_types.h>
66 #include <mach/boolean.h>
67 #include <mach/host_info.h>
68 #include <mach/host_special_ports.h>
69 #include <mach/kern_return.h>
70 #include <mach/machine.h>
71 #include <mach/port.h>
72 #include <mach/processor_info.h>
73 #include <mach/vm_param.h>
74 #include <mach/processor.h>
75 #include <mach/mach_host_server.h>
76 #include <mach/host_priv_server.h>
77 #include <mach/vm_map.h>
78 #include <mach/task_info.h>
79
80 #include <machine/commpage.h>
81 #include <machine/cpu_capabilities.h>
82
83 #include <kern/kern_types.h>
84 #include <kern/assert.h>
85 #include <kern/kalloc.h>
86 #include <kern/host.h>
87 #include <kern/host_statistics.h>
88 #include <kern/ipc_host.h>
89 #include <kern/misc_protos.h>
90 #include <kern/sched.h>
91 #include <kern/processor.h>
92 #include <kern/mach_node.h> // mach_node_port_changed()
93
94 #include <vm/vm_map.h>
95 #include <vm/vm_purgeable_internal.h>
96 #include <vm/vm_pageout.h>
97
98 #include <IOKit/IOBSD.h> // IOTaskHasEntitlement
99 #include <IOKit/IOKitKeys.h> // DriverKit entitlement strings
100
101
102 #if CONFIG_ATM
103 #include <atm/atm_internal.h>
104 #endif
105
106 #if CONFIG_MACF
107 #include <security/mac_mach_internal.h>
108 #endif
109
110 #include <pexpert/pexpert.h>
111
112 host_data_t realhost;
113
114 vm_extmod_statistics_data_t host_extmod_statistics;
115
116 kern_return_t
117 host_processors(host_priv_t host_priv, processor_array_t * out_array, mach_msg_type_number_t * countp)
118 {
119 processor_t processor, *tp;
120 void * addr;
121 unsigned int count, i;
122
123 if (host_priv == HOST_PRIV_NULL) {
124 return KERN_INVALID_ARGUMENT;
125 }
126
127 assert(host_priv == &realhost);
128
129 count = processor_count;
130 assert(count != 0);
131
132 addr = kalloc((vm_size_t)(count * sizeof(mach_port_t)));
133 if (addr == 0) {
134 return KERN_RESOURCE_SHORTAGE;
135 }
136
137 tp = (processor_t *)addr;
138 *tp++ = processor = processor_list;
139
140 if (count > 1) {
141 simple_lock(&processor_list_lock, LCK_GRP_NULL);
142
143 for (i = 1; i < count; i++) {
144 *tp++ = processor = processor->processor_list;
145 }
146
147 simple_unlock(&processor_list_lock);
148 }
149
150 *countp = count;
151 *out_array = (processor_array_t)addr;
152
153 /* do the conversion that Mig should handle */
154 tp = (processor_t *)addr;
155 for (i = 0; i < count; i++) {
156 ((mach_port_t *)tp)[i] = (mach_port_t)convert_processor_to_port(tp[i]);
157 }
158
159 return KERN_SUCCESS;
160 }
161
162 kern_return_t
163 host_info(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
164 {
165 if (host == HOST_NULL) {
166 return KERN_INVALID_ARGUMENT;
167 }
168
169 switch (flavor) {
170 case HOST_BASIC_INFO: {
171 host_basic_info_t basic_info;
172 int master_id;
173
174 /*
175 * Basic information about this host.
176 */
177 if (*count < HOST_BASIC_INFO_OLD_COUNT) {
178 return KERN_FAILURE;
179 }
180
181 basic_info = (host_basic_info_t)info;
182
183 basic_info->memory_size = machine_info.memory_size;
184 basic_info->max_cpus = machine_info.max_cpus;
185 #if defined(__x86_64__)
186 basic_info->avail_cpus = processor_avail_count_user;
187 #else
188 basic_info->avail_cpus = processor_avail_count;
189 #endif
190 master_id = master_processor->cpu_id;
191 basic_info->cpu_type = slot_type(master_id);
192 basic_info->cpu_subtype = slot_subtype(master_id);
193
194 if (*count >= HOST_BASIC_INFO_COUNT) {
195 basic_info->cpu_threadtype = slot_threadtype(master_id);
196 basic_info->physical_cpu = machine_info.physical_cpu;
197 basic_info->physical_cpu_max = machine_info.physical_cpu_max;
198 #if defined(__x86_64__)
199 basic_info->logical_cpu = basic_info->avail_cpus;
200 #else
201 basic_info->logical_cpu = machine_info.logical_cpu;
202 #endif
203 basic_info->logical_cpu_max = machine_info.logical_cpu_max;
204 basic_info->max_mem = machine_info.max_mem;
205
206 *count = HOST_BASIC_INFO_COUNT;
207 } else {
208 *count = HOST_BASIC_INFO_OLD_COUNT;
209 }
210
211 return KERN_SUCCESS;
212 }
213
214 case HOST_SCHED_INFO: {
215 host_sched_info_t sched_info;
216 uint32_t quantum_time;
217 uint64_t quantum_ns;
218
219 /*
220 * Return scheduler information.
221 */
222 if (*count < HOST_SCHED_INFO_COUNT) {
223 return KERN_FAILURE;
224 }
225
226 sched_info = (host_sched_info_t)info;
227
228 quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
229 absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
230
231 sched_info->min_timeout = sched_info->min_quantum = (uint32_t)(quantum_ns / 1000 / 1000);
232
233 *count = HOST_SCHED_INFO_COUNT;
234
235 return KERN_SUCCESS;
236 }
237
238 case HOST_RESOURCE_SIZES: {
239 /*
240 * Return sizes of kernel data structures
241 */
242 if (*count < HOST_RESOURCE_SIZES_COUNT) {
243 return KERN_FAILURE;
244 }
245
246 /* XXX Fail until ledgers are implemented */
247 return KERN_INVALID_ARGUMENT;
248 }
249
250 case HOST_PRIORITY_INFO: {
251 host_priority_info_t priority_info;
252
253 if (*count < HOST_PRIORITY_INFO_COUNT) {
254 return KERN_FAILURE;
255 }
256
257 priority_info = (host_priority_info_t)info;
258
259 priority_info->kernel_priority = MINPRI_KERNEL;
260 priority_info->system_priority = MINPRI_KERNEL;
261 priority_info->server_priority = MINPRI_RESERVED;
262 priority_info->user_priority = BASEPRI_DEFAULT;
263 priority_info->depress_priority = DEPRESSPRI;
264 priority_info->idle_priority = IDLEPRI;
265 priority_info->minimum_priority = MINPRI_USER;
266 priority_info->maximum_priority = MAXPRI_RESERVED;
267
268 *count = HOST_PRIORITY_INFO_COUNT;
269
270 return KERN_SUCCESS;
271 }
272
273 /*
274 * Gestalt for various trap facilities.
275 */
276 case HOST_MACH_MSG_TRAP:
277 case HOST_SEMAPHORE_TRAPS: {
278 *count = 0;
279 return KERN_SUCCESS;
280 }
281
282 case HOST_CAN_HAS_DEBUGGER: {
283 host_can_has_debugger_info_t can_has_debugger_info;
284
285 if (*count < HOST_CAN_HAS_DEBUGGER_COUNT) {
286 return KERN_FAILURE;
287 }
288
289 can_has_debugger_info = (host_can_has_debugger_info_t)info;
290 can_has_debugger_info->can_has_debugger = PE_i_can_has_debugger(NULL);
291 *count = HOST_CAN_HAS_DEBUGGER_COUNT;
292
293 return KERN_SUCCESS;
294 }
295
296 case HOST_VM_PURGABLE: {
297 if (*count < HOST_VM_PURGABLE_COUNT) {
298 return KERN_FAILURE;
299 }
300
301 vm_purgeable_stats((vm_purgeable_info_t)info, NULL);
302
303 *count = HOST_VM_PURGABLE_COUNT;
304 return KERN_SUCCESS;
305 }
306
307 case HOST_DEBUG_INFO_INTERNAL: {
308 #if DEVELOPMENT || DEBUG
309 if (*count < HOST_DEBUG_INFO_INTERNAL_COUNT) {
310 return KERN_FAILURE;
311 }
312
313 host_debug_info_internal_t debug_info = (host_debug_info_internal_t)info;
314 bzero(debug_info, sizeof(host_debug_info_internal_data_t));
315 *count = HOST_DEBUG_INFO_INTERNAL_COUNT;
316
317 #if CONFIG_COALITIONS
318 debug_info->config_coalitions = 1;
319 #endif
320 debug_info->config_bank = 1;
321 #if CONFIG_ATM
322 debug_info->config_atm = 1;
323 #endif
324 #if CONFIG_CSR
325 debug_info->config_csr = 1;
326 #endif
327 return KERN_SUCCESS;
328 #else /* DEVELOPMENT || DEBUG */
329 return KERN_NOT_SUPPORTED;
330 #endif
331 }
332
333 case HOST_PREFERRED_USER_ARCH: {
334 host_preferred_user_arch_t user_arch_info;
335
336 /*
337 * Basic information about this host.
338 */
339 if (*count < HOST_PREFERRED_USER_ARCH_COUNT) {
340 return KERN_FAILURE;
341 }
342
343 user_arch_info = (host_preferred_user_arch_t)info;
344
345 #if defined(PREFERRED_USER_CPU_TYPE) && defined(PREFERRED_USER_CPU_SUBTYPE)
346 cpu_type_t preferred_cpu_type;
347 cpu_subtype_t preferred_cpu_subtype;
348 if (!PE_get_default("kern.preferred_cpu_type", &preferred_cpu_type, sizeof(cpu_type_t))) {
349 preferred_cpu_type = PREFERRED_USER_CPU_TYPE;
350 }
351 if (!PE_get_default("kern.preferred_cpu_subtype", &preferred_cpu_subtype, sizeof(cpu_subtype_t))) {
352 preferred_cpu_subtype = PREFERRED_USER_CPU_SUBTYPE;
353 }
354 user_arch_info->cpu_type = preferred_cpu_type;
355 user_arch_info->cpu_subtype = preferred_cpu_subtype;
356 #else
357 int master_id = master_processor->cpu_id;
358 user_arch_info->cpu_type = slot_type(master_id);
359 user_arch_info->cpu_subtype = slot_subtype(master_id);
360 #endif
361
362 *count = HOST_PREFERRED_USER_ARCH_COUNT;
363
364 return KERN_SUCCESS;
365 }
366
367 default: return KERN_INVALID_ARGUMENT;
368 }
369 }
370
371 kern_return_t host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count);
372
373 kern_return_t
374 host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
375 {
376 uint32_t i;
377
378 if (host == HOST_NULL) {
379 return KERN_INVALID_HOST;
380 }
381
382 switch (flavor) {
383 case HOST_LOAD_INFO: {
384 host_load_info_t load_info;
385
386 if (*count < HOST_LOAD_INFO_COUNT) {
387 return KERN_FAILURE;
388 }
389
390 load_info = (host_load_info_t)info;
391
392 bcopy((char *)avenrun, (char *)load_info->avenrun, sizeof avenrun);
393 bcopy((char *)mach_factor, (char *)load_info->mach_factor, sizeof mach_factor);
394
395 *count = HOST_LOAD_INFO_COUNT;
396 return KERN_SUCCESS;
397 }
398
399 case HOST_VM_INFO: {
400 processor_t processor;
401 vm_statistics64_t stat;
402 vm_statistics64_data_t host_vm_stat;
403 vm_statistics_t stat32;
404 mach_msg_type_number_t original_count;
405
406 if (*count < HOST_VM_INFO_REV0_COUNT) {
407 return KERN_FAILURE;
408 }
409
410 processor = processor_list;
411 stat = &PROCESSOR_DATA(processor, vm_stat);
412 host_vm_stat = *stat;
413
414 if (processor_count > 1) {
415 simple_lock(&processor_list_lock, LCK_GRP_NULL);
416
417 while ((processor = processor->processor_list) != NULL) {
418 stat = &PROCESSOR_DATA(processor, vm_stat);
419
420 host_vm_stat.zero_fill_count += stat->zero_fill_count;
421 host_vm_stat.reactivations += stat->reactivations;
422 host_vm_stat.pageins += stat->pageins;
423 host_vm_stat.pageouts += stat->pageouts;
424 host_vm_stat.faults += stat->faults;
425 host_vm_stat.cow_faults += stat->cow_faults;
426 host_vm_stat.lookups += stat->lookups;
427 host_vm_stat.hits += stat->hits;
428 }
429
430 simple_unlock(&processor_list_lock);
431 }
432
433 stat32 = (vm_statistics_t)info;
434
435 stat32->free_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_free_count + vm_page_speculative_count);
436 stat32->active_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_active_count);
437
438 if (vm_page_local_q) {
439 for (i = 0; i < vm_page_local_q_count; i++) {
440 struct vpl * lq;
441
442 lq = &vm_page_local_q[i].vpl_un.vpl;
443
444 stat32->active_count += VM_STATISTICS_TRUNCATE_TO_32_BIT(lq->vpl_count);
445 }
446 }
447 stat32->inactive_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_inactive_count);
448 #if CONFIG_EMBEDDED
449 stat32->wire_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count);
450 #else
451 stat32->wire_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count + vm_page_throttled_count + vm_lopage_free_count);
452 #endif
453 stat32->zero_fill_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.zero_fill_count);
454 stat32->reactivations = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.reactivations);
455 stat32->pageins = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.pageins);
456 stat32->pageouts = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.pageouts);
457 stat32->faults = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.faults);
458 stat32->cow_faults = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.cow_faults);
459 stat32->lookups = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.lookups);
460 stat32->hits = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.hits);
461
462 /*
463 * Fill in extra info added in later revisions of the
464 * vm_statistics data structure. Fill in only what can fit
465 * in the data structure the caller gave us !
466 */
467 original_count = *count;
468 *count = HOST_VM_INFO_REV0_COUNT; /* rev0 already filled in */
469 if (original_count >= HOST_VM_INFO_REV1_COUNT) {
470 /* rev1 added "purgeable" info */
471 stat32->purgeable_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purgeable_count);
472 stat32->purges = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purged_count);
473 *count = HOST_VM_INFO_REV1_COUNT;
474 }
475
476 if (original_count >= HOST_VM_INFO_REV2_COUNT) {
477 /* rev2 added "speculative" info */
478 stat32->speculative_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_speculative_count);
479 *count = HOST_VM_INFO_REV2_COUNT;
480 }
481
482 /* rev3 changed some of the fields to be 64-bit*/
483
484 return KERN_SUCCESS;
485 }
486
487 case HOST_CPU_LOAD_INFO: {
488 processor_t processor;
489 host_cpu_load_info_t cpu_load_info;
490
491 if (*count < HOST_CPU_LOAD_INFO_COUNT) {
492 return KERN_FAILURE;
493 }
494
495 #define GET_TICKS_VALUE(state, ticks) \
496 MACRO_BEGIN cpu_load_info->cpu_ticks[(state)] += (uint32_t)(ticks / hz_tick_interval); \
497 MACRO_END
498 #define GET_TICKS_VALUE_FROM_TIMER(processor, state, timer) \
499 MACRO_BEGIN GET_TICKS_VALUE(state, timer_grab(&PROCESSOR_DATA(processor, timer))); \
500 MACRO_END
501
502 cpu_load_info = (host_cpu_load_info_t)info;
503 cpu_load_info->cpu_ticks[CPU_STATE_USER] = 0;
504 cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] = 0;
505 cpu_load_info->cpu_ticks[CPU_STATE_IDLE] = 0;
506 cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0;
507
508 simple_lock(&processor_list_lock, LCK_GRP_NULL);
509
510 for (processor = processor_list; processor != NULL; processor = processor->processor_list) {
511 timer_t idle_state;
512 uint64_t idle_time_snapshot1, idle_time_snapshot2;
513 uint64_t idle_time_tstamp1, idle_time_tstamp2;
514
515 /* See discussion in processor_info(PROCESSOR_CPU_LOAD_INFO) */
516
517 GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_USER, user_state);
518 if (precise_user_kernel_time) {
519 GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_SYSTEM, system_state);
520 } else {
521 /* system_state may represent either sys or user */
522 GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_USER, system_state);
523 }
524
525 idle_state = &PROCESSOR_DATA(processor, idle_state);
526 idle_time_snapshot1 = timer_grab(idle_state);
527 idle_time_tstamp1 = idle_state->tstamp;
528
529 if (PROCESSOR_DATA(processor, current_state) != idle_state) {
530 /* Processor is non-idle, so idle timer should be accurate */
531 GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_IDLE, idle_state);
532 } else if ((idle_time_snapshot1 != (idle_time_snapshot2 = timer_grab(idle_state))) ||
533 (idle_time_tstamp1 != (idle_time_tstamp2 = idle_state->tstamp))) {
534 /* Idle timer is being updated concurrently, second stamp is good enough */
535 GET_TICKS_VALUE(CPU_STATE_IDLE, idle_time_snapshot2);
536 } else {
537 /*
538 * Idle timer may be very stale. Fortunately we have established
539 * that idle_time_snapshot1 and idle_time_tstamp1 are unchanging
540 */
541 idle_time_snapshot1 += mach_absolute_time() - idle_time_tstamp1;
542
543 GET_TICKS_VALUE(CPU_STATE_IDLE, idle_time_snapshot1);
544 }
545 }
546 simple_unlock(&processor_list_lock);
547
548 *count = HOST_CPU_LOAD_INFO_COUNT;
549
550 return KERN_SUCCESS;
551 }
552
553 case HOST_EXPIRED_TASK_INFO: {
554 if (*count < TASK_POWER_INFO_COUNT) {
555 return KERN_FAILURE;
556 }
557
558 task_power_info_t tinfo1 = (task_power_info_t)info;
559 task_power_info_v2_t tinfo2 = (task_power_info_v2_t)info;
560
561 tinfo1->task_interrupt_wakeups = dead_task_statistics.task_interrupt_wakeups;
562 tinfo1->task_platform_idle_wakeups = dead_task_statistics.task_platform_idle_wakeups;
563
564 tinfo1->task_timer_wakeups_bin_1 = dead_task_statistics.task_timer_wakeups_bin_1;
565
566 tinfo1->task_timer_wakeups_bin_2 = dead_task_statistics.task_timer_wakeups_bin_2;
567
568 tinfo1->total_user = dead_task_statistics.total_user_time;
569 tinfo1->total_system = dead_task_statistics.total_system_time;
570 if (*count < TASK_POWER_INFO_V2_COUNT) {
571 *count = TASK_POWER_INFO_COUNT;
572 } else if (*count >= TASK_POWER_INFO_V2_COUNT) {
573 tinfo2->gpu_energy.task_gpu_utilisation = dead_task_statistics.task_gpu_ns;
574 #if defined(__arm__) || defined(__arm64__)
575 tinfo2->task_energy = dead_task_statistics.task_energy;
576 tinfo2->task_ptime = dead_task_statistics.total_ptime;
577 tinfo2->task_pset_switches = dead_task_statistics.total_pset_switches;
578 #endif
579 *count = TASK_POWER_INFO_V2_COUNT;
580 }
581
582 return KERN_SUCCESS;
583 }
584 default: return KERN_INVALID_ARGUMENT;
585 }
586 }
587
588 extern uint32_t c_segment_pages_compressed;
589
590 #define HOST_STATISTICS_TIME_WINDOW 1 /* seconds */
591 #define HOST_STATISTICS_MAX_REQUESTS 10 /* maximum number of requests per window */
592 #define HOST_STATISTICS_MIN_REQUESTS 2 /* minimum number of requests per window */
593
594 uint64_t host_statistics_time_window;
595
596 static lck_mtx_t host_statistics_lck;
597 static lck_grp_t* host_statistics_lck_grp;
598
599 #define HOST_VM_INFO64_REV0 0
600 #define HOST_VM_INFO64_REV1 1
601 #define HOST_EXTMOD_INFO64_REV0 2
602 #define HOST_LOAD_INFO_REV0 3
603 #define HOST_VM_INFO_REV0 4
604 #define HOST_VM_INFO_REV1 5
605 #define HOST_VM_INFO_REV2 6
606 #define HOST_CPU_LOAD_INFO_REV0 7
607 #define HOST_EXPIRED_TASK_INFO_REV0 8
608 #define HOST_EXPIRED_TASK_INFO_REV1 9
609 #define NUM_HOST_INFO_DATA_TYPES 10
610
611 static vm_statistics64_data_t host_vm_info64_rev0 = {};
612 static vm_statistics64_data_t host_vm_info64_rev1 = {};
613 static vm_extmod_statistics_data_t host_extmod_info64 = {};
614 static host_load_info_data_t host_load_info = {};
615 static vm_statistics_data_t host_vm_info_rev0 = {};
616 static vm_statistics_data_t host_vm_info_rev1 = {};
617 static vm_statistics_data_t host_vm_info_rev2 = {};
618 static host_cpu_load_info_data_t host_cpu_load_info = {};
619 static task_power_info_data_t host_expired_task_info = {};
620 static task_power_info_v2_data_t host_expired_task_info2 = {};
621
622 struct host_stats_cache {
623 uint64_t last_access;
624 uint64_t current_requests;
625 uint64_t max_requests;
626 uintptr_t data;
627 mach_msg_type_number_t count; //NOTE count is in sizeof(integer_t)
628 };
629
630 static struct host_stats_cache g_host_stats_cache[NUM_HOST_INFO_DATA_TYPES] = {
631 [HOST_VM_INFO64_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info64_rev0, .count = HOST_VM_INFO64_REV0_COUNT },
632 [HOST_VM_INFO64_REV1] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info64_rev1, .count = HOST_VM_INFO64_REV1_COUNT },
633 [HOST_EXTMOD_INFO64_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_extmod_info64, .count = HOST_EXTMOD_INFO64_COUNT },
634 [HOST_LOAD_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_load_info, .count = HOST_LOAD_INFO_COUNT },
635 [HOST_VM_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info_rev0, .count = HOST_VM_INFO_REV0_COUNT },
636 [HOST_VM_INFO_REV1] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info_rev1, .count = HOST_VM_INFO_REV1_COUNT },
637 [HOST_VM_INFO_REV2] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info_rev2, .count = HOST_VM_INFO_REV2_COUNT },
638 [HOST_CPU_LOAD_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_cpu_load_info, .count = HOST_CPU_LOAD_INFO_COUNT },
639 [HOST_EXPIRED_TASK_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_expired_task_info, .count = TASK_POWER_INFO_COUNT },
640 [HOST_EXPIRED_TASK_INFO_REV1] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_expired_task_info2, .count = TASK_POWER_INFO_V2_COUNT},
641 };
642
643
644 void
645 host_statistics_init(void)
646 {
647 host_statistics_lck_grp = lck_grp_alloc_init("host_statistics", LCK_GRP_ATTR_NULL);
648 lck_mtx_init(&host_statistics_lck, host_statistics_lck_grp, LCK_ATTR_NULL);
649 nanoseconds_to_absolutetime((HOST_STATISTICS_TIME_WINDOW * NSEC_PER_SEC), &host_statistics_time_window);
650 }
651
652 static void
653 cache_host_statistics(int index, host_info64_t info)
654 {
655 if (index < 0 || index >= NUM_HOST_INFO_DATA_TYPES) {
656 return;
657 }
658
659 task_t task = current_task();
660 if (task->t_flags & TF_PLATFORM) {
661 return;
662 }
663
664 memcpy((void *)g_host_stats_cache[index].data, info, g_host_stats_cache[index].count * sizeof(integer_t));
665 return;
666 }
667
668 static void
669 get_cached_info(int index, host_info64_t info, mach_msg_type_number_t* count)
670 {
671 if (index < 0 || index >= NUM_HOST_INFO_DATA_TYPES) {
672 *count = 0;
673 return;
674 }
675
676 *count = g_host_stats_cache[index].count;
677 memcpy(info, (void *)g_host_stats_cache[index].data, g_host_stats_cache[index].count * sizeof(integer_t));
678 }
679
680 static int
681 get_host_info_data_index(bool is_stat64, host_flavor_t flavor, mach_msg_type_number_t* count, kern_return_t* ret)
682 {
683 switch (flavor) {
684 case HOST_VM_INFO64:
685 if (!is_stat64) {
686 *ret = KERN_INVALID_ARGUMENT;
687 return -1;
688 }
689 if (*count < HOST_VM_INFO64_REV0_COUNT) {
690 *ret = KERN_FAILURE;
691 return -1;
692 }
693 if (*count >= HOST_VM_INFO64_REV1_COUNT) {
694 return HOST_VM_INFO64_REV1;
695 }
696 return HOST_VM_INFO64_REV0;
697
698 case HOST_EXTMOD_INFO64:
699 if (!is_stat64) {
700 *ret = KERN_INVALID_ARGUMENT;
701 return -1;
702 }
703 if (*count < HOST_EXTMOD_INFO64_COUNT) {
704 *ret = KERN_FAILURE;
705 return -1;
706 }
707 return HOST_EXTMOD_INFO64_REV0;
708
709 case HOST_LOAD_INFO:
710 if (*count < HOST_LOAD_INFO_COUNT) {
711 *ret = KERN_FAILURE;
712 return -1;
713 }
714 return HOST_LOAD_INFO_REV0;
715
716 case HOST_VM_INFO:
717 if (*count < HOST_VM_INFO_REV0_COUNT) {
718 *ret = KERN_FAILURE;
719 return -1;
720 }
721 if (*count >= HOST_VM_INFO_REV2_COUNT) {
722 return HOST_VM_INFO_REV2;
723 }
724 if (*count >= HOST_VM_INFO_REV1_COUNT) {
725 return HOST_VM_INFO_REV1;
726 }
727 return HOST_VM_INFO_REV0;
728
729 case HOST_CPU_LOAD_INFO:
730 if (*count < HOST_CPU_LOAD_INFO_COUNT) {
731 *ret = KERN_FAILURE;
732 return -1;
733 }
734 return HOST_CPU_LOAD_INFO_REV0;
735
736 case HOST_EXPIRED_TASK_INFO:
737 if (*count < TASK_POWER_INFO_COUNT) {
738 *ret = KERN_FAILURE;
739 return -1;
740 }
741 if (*count >= TASK_POWER_INFO_V2_COUNT) {
742 return HOST_EXPIRED_TASK_INFO_REV1;
743 }
744 return HOST_EXPIRED_TASK_INFO_REV0;
745
746 default:
747 *ret = KERN_INVALID_ARGUMENT;
748 return -1;
749 }
750 }
751
752 static bool
753 rate_limit_host_statistics(bool is_stat64, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t* count, kern_return_t* ret, int *pindex)
754 {
755 task_t task = current_task();
756
757 assert(task != kernel_task);
758
759 *ret = KERN_SUCCESS;
760
761 /* Access control only for third party applications */
762 if (task->t_flags & TF_PLATFORM) {
763 return FALSE;
764 }
765
766 /* Rate limit to HOST_STATISTICS_MAX_REQUESTS queries for each HOST_STATISTICS_TIME_WINDOW window of time */
767 bool rate_limited = FALSE;
768 bool set_last_access = TRUE;
769
770 /* there is a cache for every flavor */
771 int index = get_host_info_data_index(is_stat64, flavor, count, ret);
772 if (index == -1) {
773 goto out;
774 }
775
776 *pindex = index;
777 lck_mtx_lock(&host_statistics_lck);
778 if (g_host_stats_cache[index].last_access > mach_continuous_time() - host_statistics_time_window) {
779 set_last_access = FALSE;
780 if (g_host_stats_cache[index].current_requests++ >= g_host_stats_cache[index].max_requests) {
781 rate_limited = TRUE;
782 get_cached_info(index, info, count);
783 }
784 }
785 if (set_last_access) {
786 g_host_stats_cache[index].current_requests = 1;
787 /*
788 * select a random number of requests (included between HOST_STATISTICS_MIN_REQUESTS and HOST_STATISTICS_MAX_REQUESTS)
789 * to let query host_statistics.
790 * In this way it is not possible to infer looking at when the a cached copy changes if host_statistics was called on
791 * the provious window.
792 */
793 g_host_stats_cache[index].max_requests = (mach_absolute_time() % (HOST_STATISTICS_MAX_REQUESTS - HOST_STATISTICS_MIN_REQUESTS + 1)) + HOST_STATISTICS_MIN_REQUESTS;
794 g_host_stats_cache[index].last_access = mach_continuous_time();
795 }
796 lck_mtx_unlock(&host_statistics_lck);
797 out:
798 return rate_limited;
799 }
800
801 kern_return_t host_statistics64(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count);
802
803 kern_return_t
804 host_statistics64(host_t host, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t * count)
805 {
806 uint32_t i;
807
808 if (host == HOST_NULL) {
809 return KERN_INVALID_HOST;
810 }
811
812 switch (flavor) {
813 case HOST_VM_INFO64: /* We were asked to get vm_statistics64 */
814 {
815 processor_t processor;
816 vm_statistics64_t stat;
817 vm_statistics64_data_t host_vm_stat;
818 mach_msg_type_number_t original_count;
819 unsigned int local_q_internal_count;
820 unsigned int local_q_external_count;
821
822 if (*count < HOST_VM_INFO64_REV0_COUNT) {
823 return KERN_FAILURE;
824 }
825
826 processor = processor_list;
827 stat = &PROCESSOR_DATA(processor, vm_stat);
828 host_vm_stat = *stat;
829
830 if (processor_count > 1) {
831 simple_lock(&processor_list_lock, LCK_GRP_NULL);
832
833 while ((processor = processor->processor_list) != NULL) {
834 stat = &PROCESSOR_DATA(processor, vm_stat);
835
836 host_vm_stat.zero_fill_count += stat->zero_fill_count;
837 host_vm_stat.reactivations += stat->reactivations;
838 host_vm_stat.pageins += stat->pageins;
839 host_vm_stat.pageouts += stat->pageouts;
840 host_vm_stat.faults += stat->faults;
841 host_vm_stat.cow_faults += stat->cow_faults;
842 host_vm_stat.lookups += stat->lookups;
843 host_vm_stat.hits += stat->hits;
844 host_vm_stat.compressions += stat->compressions;
845 host_vm_stat.decompressions += stat->decompressions;
846 host_vm_stat.swapins += stat->swapins;
847 host_vm_stat.swapouts += stat->swapouts;
848 }
849
850 simple_unlock(&processor_list_lock);
851 }
852
853 stat = (vm_statistics64_t)info;
854
855 stat->free_count = vm_page_free_count + vm_page_speculative_count;
856 stat->active_count = vm_page_active_count;
857
858 local_q_internal_count = 0;
859 local_q_external_count = 0;
860 if (vm_page_local_q) {
861 for (i = 0; i < vm_page_local_q_count; i++) {
862 struct vpl * lq;
863
864 lq = &vm_page_local_q[i].vpl_un.vpl;
865
866 stat->active_count += lq->vpl_count;
867 local_q_internal_count += lq->vpl_internal_count;
868 local_q_external_count += lq->vpl_external_count;
869 }
870 }
871 stat->inactive_count = vm_page_inactive_count;
872 #if CONFIG_EMBEDDED
873 stat->wire_count = vm_page_wire_count;
874 #else
875 stat->wire_count = vm_page_wire_count + vm_page_throttled_count + vm_lopage_free_count;
876 #endif
877 stat->zero_fill_count = host_vm_stat.zero_fill_count;
878 stat->reactivations = host_vm_stat.reactivations;
879 stat->pageins = host_vm_stat.pageins;
880 stat->pageouts = host_vm_stat.pageouts;
881 stat->faults = host_vm_stat.faults;
882 stat->cow_faults = host_vm_stat.cow_faults;
883 stat->lookups = host_vm_stat.lookups;
884 stat->hits = host_vm_stat.hits;
885
886 stat->purgeable_count = vm_page_purgeable_count;
887 stat->purges = vm_page_purged_count;
888
889 stat->speculative_count = vm_page_speculative_count;
890
891 /*
892 * Fill in extra info added in later revisions of the
893 * vm_statistics data structure. Fill in only what can fit
894 * in the data structure the caller gave us !
895 */
896 original_count = *count;
897 *count = HOST_VM_INFO64_REV0_COUNT; /* rev0 already filled in */
898 if (original_count >= HOST_VM_INFO64_REV1_COUNT) {
899 /* rev1 added "throttled count" */
900 stat->throttled_count = vm_page_throttled_count;
901 /* rev1 added "compression" info */
902 stat->compressor_page_count = VM_PAGE_COMPRESSOR_COUNT;
903 stat->compressions = host_vm_stat.compressions;
904 stat->decompressions = host_vm_stat.decompressions;
905 stat->swapins = host_vm_stat.swapins;
906 stat->swapouts = host_vm_stat.swapouts;
907 /* rev1 added:
908 * "external page count"
909 * "anonymous page count"
910 * "total # of pages (uncompressed) held in the compressor"
911 */
912 stat->external_page_count = (vm_page_pageable_external_count + local_q_external_count);
913 stat->internal_page_count = (vm_page_pageable_internal_count + local_q_internal_count);
914 stat->total_uncompressed_pages_in_compressor = c_segment_pages_compressed;
915 *count = HOST_VM_INFO64_REV1_COUNT;
916 }
917
918 return KERN_SUCCESS;
919 }
920
921 case HOST_EXTMOD_INFO64: /* We were asked to get vm_statistics64 */
922 {
923 vm_extmod_statistics_t out_extmod_statistics;
924
925 if (*count < HOST_EXTMOD_INFO64_COUNT) {
926 return KERN_FAILURE;
927 }
928
929 out_extmod_statistics = (vm_extmod_statistics_t)info;
930 *out_extmod_statistics = host_extmod_statistics;
931
932 *count = HOST_EXTMOD_INFO64_COUNT;
933
934 return KERN_SUCCESS;
935 }
936
937 default: /* If we didn't recognize the flavor, send to host_statistics */
938 return host_statistics(host, flavor, (host_info_t)info, count);
939 }
940 }
941
942 kern_return_t
943 host_statistics64_from_user(host_t host, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t * count)
944 {
945 kern_return_t ret = KERN_SUCCESS;
946 int index;
947
948 if (host == HOST_NULL) {
949 return KERN_INVALID_HOST;
950 }
951
952 if (rate_limit_host_statistics(TRUE, flavor, info, count, &ret, &index)) {
953 return ret;
954 }
955
956 if (ret != KERN_SUCCESS) {
957 return ret;
958 }
959
960 ret = host_statistics64(host, flavor, info, count);
961
962 if (ret == KERN_SUCCESS) {
963 cache_host_statistics(index, info);
964 }
965
966 return ret;
967 }
968
969 kern_return_t
970 host_statistics_from_user(host_t host, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t * count)
971 {
972 kern_return_t ret = KERN_SUCCESS;
973 int index;
974
975 if (host == HOST_NULL) {
976 return KERN_INVALID_HOST;
977 }
978
979 if (rate_limit_host_statistics(FALSE, flavor, info, count, &ret, &index)) {
980 return ret;
981 }
982
983 if (ret != KERN_SUCCESS) {
984 return ret;
985 }
986
987 ret = host_statistics(host, flavor, info, count);
988
989 if (ret == KERN_SUCCESS) {
990 cache_host_statistics(index, info);
991 }
992
993 return ret;
994 }
995
996 /*
997 * Get host statistics that require privilege.
998 * None for now, just call the un-privileged version.
999 */
1000 kern_return_t
1001 host_priv_statistics(host_priv_t host_priv, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
1002 {
1003 return host_statistics((host_t)host_priv, flavor, info, count);
1004 }
1005
1006 kern_return_t
1007 set_sched_stats_active(boolean_t active)
1008 {
1009 sched_stats_active = active;
1010 return KERN_SUCCESS;
1011 }
1012
1013
1014 uint64_t
1015 get_pages_grabbed_count(void)
1016 {
1017 processor_t processor;
1018 uint64_t pages_grabbed_count = 0;
1019
1020 simple_lock(&processor_list_lock, LCK_GRP_NULL);
1021
1022 processor = processor_list;
1023
1024 while (processor) {
1025 pages_grabbed_count += PROCESSOR_DATA(processor, page_grab_count);
1026 processor = processor->processor_list;
1027 }
1028 simple_unlock(&processor_list_lock);
1029
1030 return pages_grabbed_count;
1031 }
1032
1033
1034 kern_return_t
1035 get_sched_statistics(struct _processor_statistics_np * out, uint32_t * count)
1036 {
1037 processor_t processor;
1038
1039 if (!sched_stats_active) {
1040 return KERN_FAILURE;
1041 }
1042
1043 simple_lock(&processor_list_lock, LCK_GRP_NULL);
1044
1045 if (*count < (processor_count + 1) * sizeof(struct _processor_statistics_np)) { /* One for RT */
1046 simple_unlock(&processor_list_lock);
1047 return KERN_FAILURE;
1048 }
1049
1050 processor = processor_list;
1051 while (processor) {
1052 struct processor_sched_statistics * stats = &processor->processor_data.sched_stats;
1053
1054 out->ps_cpuid = processor->cpu_id;
1055 out->ps_csw_count = stats->csw_count;
1056 out->ps_preempt_count = stats->preempt_count;
1057 out->ps_preempted_rt_count = stats->preempted_rt_count;
1058 out->ps_preempted_by_rt_count = stats->preempted_by_rt_count;
1059 out->ps_rt_sched_count = stats->rt_sched_count;
1060 out->ps_interrupt_count = stats->interrupt_count;
1061 out->ps_ipi_count = stats->ipi_count;
1062 out->ps_timer_pop_count = stats->timer_pop_count;
1063 out->ps_runq_count_sum = SCHED(processor_runq_stats_count_sum)(processor);
1064 out->ps_idle_transitions = stats->idle_transitions;
1065 out->ps_quantum_timer_expirations = stats->quantum_timer_expirations;
1066
1067 out++;
1068 processor = processor->processor_list;
1069 }
1070
1071 *count = (uint32_t)(processor_count * sizeof(struct _processor_statistics_np));
1072
1073 simple_unlock(&processor_list_lock);
1074
1075 /* And include RT Queue information */
1076 bzero(out, sizeof(*out));
1077 out->ps_cpuid = (-1);
1078 out->ps_runq_count_sum = SCHED(rt_runq_count_sum)();
1079 out++;
1080 *count += (uint32_t)sizeof(struct _processor_statistics_np);
1081
1082 return KERN_SUCCESS;
1083 }
1084
1085 kern_return_t
1086 host_page_size(host_t host, vm_size_t * out_page_size)
1087 {
1088 if (host == HOST_NULL) {
1089 return KERN_INVALID_ARGUMENT;
1090 }
1091
1092 *out_page_size = PAGE_SIZE;
1093
1094 return KERN_SUCCESS;
1095 }
1096
1097 /*
1098 * Return kernel version string (more than you ever
1099 * wanted to know about what version of the kernel this is).
1100 */
1101 extern char version[];
1102
1103 kern_return_t
1104 host_kernel_version(host_t host, kernel_version_t out_version)
1105 {
1106 if (host == HOST_NULL) {
1107 return KERN_INVALID_ARGUMENT;
1108 }
1109
1110 (void)strncpy(out_version, version, sizeof(kernel_version_t));
1111
1112 return KERN_SUCCESS;
1113 }
1114
1115 /*
1116 * host_processor_sets:
1117 *
1118 * List all processor sets on the host.
1119 */
1120 kern_return_t
1121 host_processor_sets(host_priv_t host_priv, processor_set_name_array_t * pset_list, mach_msg_type_number_t * count)
1122 {
1123 void * addr;
1124
1125 if (host_priv == HOST_PRIV_NULL) {
1126 return KERN_INVALID_ARGUMENT;
1127 }
1128
1129 /*
1130 * Allocate memory. Can be pageable because it won't be
1131 * touched while holding a lock.
1132 */
1133
1134 addr = kalloc((vm_size_t)sizeof(mach_port_t));
1135 if (addr == 0) {
1136 return KERN_RESOURCE_SHORTAGE;
1137 }
1138
1139 /* do the conversion that Mig should handle */
1140 *((ipc_port_t *)addr) = convert_pset_name_to_port(&pset0);
1141
1142 *pset_list = (processor_set_array_t)addr;
1143 *count = 1;
1144
1145 return KERN_SUCCESS;
1146 }
1147
1148 /*
1149 * host_processor_set_priv:
1150 *
1151 * Return control port for given processor set.
1152 */
1153 kern_return_t
1154 host_processor_set_priv(host_priv_t host_priv, processor_set_t pset_name, processor_set_t * pset)
1155 {
1156 if (host_priv == HOST_PRIV_NULL || pset_name == PROCESSOR_SET_NULL) {
1157 *pset = PROCESSOR_SET_NULL;
1158
1159 return KERN_INVALID_ARGUMENT;
1160 }
1161
1162 *pset = pset_name;
1163
1164 return KERN_SUCCESS;
1165 }
1166
1167 /*
1168 * host_processor_info
1169 *
1170 * Return info about the processors on this host. It will return
1171 * the number of processors, and the specific type of info requested
1172 * in an OOL array.
1173 */
1174 kern_return_t
1175 host_processor_info(host_t host,
1176 processor_flavor_t flavor,
1177 natural_t * out_pcount,
1178 processor_info_array_t * out_array,
1179 mach_msg_type_number_t * out_array_count)
1180 {
1181 kern_return_t result;
1182 processor_t processor;
1183 host_t thost;
1184 processor_info_t info;
1185 unsigned int icount, tcount;
1186 unsigned int pcount, i;
1187 vm_offset_t addr;
1188 vm_size_t size, needed;
1189 vm_map_copy_t copy;
1190
1191 if (host == HOST_NULL) {
1192 return KERN_INVALID_ARGUMENT;
1193 }
1194
1195 result = processor_info_count(flavor, &icount);
1196 if (result != KERN_SUCCESS) {
1197 return result;
1198 }
1199
1200 pcount = processor_count;
1201 assert(pcount != 0);
1202
1203 needed = pcount * icount * sizeof(natural_t);
1204 size = vm_map_round_page(needed, VM_MAP_PAGE_MASK(ipc_kernel_map));
1205 result = kmem_alloc(ipc_kernel_map, &addr, size, VM_KERN_MEMORY_IPC);
1206 if (result != KERN_SUCCESS) {
1207 return KERN_RESOURCE_SHORTAGE;
1208 }
1209
1210 info = (processor_info_t)addr;
1211 processor = processor_list;
1212 tcount = icount;
1213
1214 result = processor_info(processor, flavor, &thost, info, &tcount);
1215 if (result != KERN_SUCCESS) {
1216 kmem_free(ipc_kernel_map, addr, size);
1217 return result;
1218 }
1219
1220 if (pcount > 1) {
1221 for (i = 1; i < pcount; i++) {
1222 simple_lock(&processor_list_lock, LCK_GRP_NULL);
1223 processor = processor->processor_list;
1224 simple_unlock(&processor_list_lock);
1225
1226 info += icount;
1227 tcount = icount;
1228 result = processor_info(processor, flavor, &thost, info, &tcount);
1229 if (result != KERN_SUCCESS) {
1230 kmem_free(ipc_kernel_map, addr, size);
1231 return result;
1232 }
1233 }
1234 }
1235
1236 if (size != needed) {
1237 bzero((char *)addr + needed, size - needed);
1238 }
1239
1240 result = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr, VM_MAP_PAGE_MASK(ipc_kernel_map)),
1241 vm_map_round_page(addr + size, VM_MAP_PAGE_MASK(ipc_kernel_map)), FALSE);
1242 assert(result == KERN_SUCCESS);
1243 result = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr, (vm_map_size_t)needed, TRUE, &copy);
1244 assert(result == KERN_SUCCESS);
1245
1246 *out_pcount = pcount;
1247 *out_array = (processor_info_array_t)copy;
1248 *out_array_count = pcount * icount;
1249
1250 return KERN_SUCCESS;
1251 }
1252
1253 static bool
1254 is_valid_host_special_port(int id)
1255 {
1256 return (id <= HOST_MAX_SPECIAL_PORT) &&
1257 (id >= HOST_MIN_SPECIAL_PORT) &&
1258 ((id <= HOST_LAST_SPECIAL_KERNEL_PORT) || (id > HOST_MAX_SPECIAL_KERNEL_PORT));
1259 }
1260
1261 /*
1262 * Kernel interface for setting a special port.
1263 */
1264 kern_return_t
1265 kernel_set_special_port(host_priv_t host_priv, int id, ipc_port_t port)
1266 {
1267 ipc_port_t old_port;
1268
1269 if (!is_valid_host_special_port(id)) {
1270 panic("attempted to set invalid special port %d", id);
1271 }
1272
1273 #if !MACH_FLIPC
1274 if (id == HOST_NODE_PORT) {
1275 return KERN_NOT_SUPPORTED;
1276 }
1277 #endif
1278
1279 host_lock(host_priv);
1280 old_port = host_priv->special[id];
1281 if ((id == HOST_AMFID_PORT) && (task_pid(current_task()) != 1)) {
1282 host_unlock(host_priv);
1283 return KERN_NO_ACCESS;
1284 }
1285 host_priv->special[id] = port;
1286 host_unlock(host_priv);
1287
1288 #if MACH_FLIPC
1289 if (id == HOST_NODE_PORT) {
1290 mach_node_port_changed();
1291 }
1292 #endif
1293
1294 if (IP_VALID(old_port)) {
1295 ipc_port_release_send(old_port);
1296 }
1297 return KERN_SUCCESS;
1298 }
1299
1300 /*
1301 * Kernel interface for retrieving a special port.
1302 */
1303 kern_return_t
1304 kernel_get_special_port(host_priv_t host_priv, int id, ipc_port_t * portp)
1305 {
1306 if (!is_valid_host_special_port(id)) {
1307 panic("attempted to get invalid special port %d", id);
1308 }
1309
1310 host_lock(host_priv);
1311 *portp = host_priv->special[id];
1312 host_unlock(host_priv);
1313 return KERN_SUCCESS;
1314 }
1315
1316 /*
1317 * User interface for setting a special port.
1318 *
1319 * Only permits the user to set a user-owned special port
1320 * ID, rejecting a kernel-owned special port ID.
1321 *
1322 * A special kernel port cannot be set up using this
1323 * routine; use kernel_set_special_port() instead.
1324 */
1325 kern_return_t
1326 host_set_special_port(host_priv_t host_priv, int id, ipc_port_t port)
1327 {
1328 if (host_priv == HOST_PRIV_NULL || id <= HOST_MAX_SPECIAL_KERNEL_PORT || id > HOST_MAX_SPECIAL_PORT) {
1329 return KERN_INVALID_ARGUMENT;
1330 }
1331
1332 if (task_is_driver(current_task())) {
1333 return KERN_NO_ACCESS;
1334 }
1335
1336 #if CONFIG_MACF
1337 if (mac_task_check_set_host_special_port(current_task(), id, port) != 0) {
1338 return KERN_NO_ACCESS;
1339 }
1340 #endif
1341
1342 return kernel_set_special_port(host_priv, id, port);
1343 }
1344
1345 /*
1346 * User interface for retrieving a special port.
1347 *
1348 * Note that there is nothing to prevent a user special
1349 * port from disappearing after it has been discovered by
1350 * the caller; thus, using a special port can always result
1351 * in a "port not valid" error.
1352 */
1353
1354 kern_return_t
1355 host_get_special_port(host_priv_t host_priv, __unused int node, int id, ipc_port_t * portp)
1356 {
1357 ipc_port_t port;
1358
1359 if (host_priv == HOST_PRIV_NULL || id == HOST_SECURITY_PORT || id > HOST_MAX_SPECIAL_PORT || id < HOST_MIN_SPECIAL_PORT) {
1360 return KERN_INVALID_ARGUMENT;
1361 }
1362
1363 task_t task = current_task();
1364 if (task && task_is_driver(task) && id > HOST_MAX_SPECIAL_KERNEL_PORT) {
1365 /* allow HID drivers to get the sysdiagnose port for keychord handling */
1366 if (IOTaskHasEntitlement(task, kIODriverKitHIDFamilyEventServiceEntitlementKey) &&
1367 id == HOST_SYSDIAGNOSE_PORT) {
1368 goto get_special_port;
1369 }
1370 return KERN_NO_ACCESS;
1371 }
1372
1373 get_special_port:
1374 host_lock(host_priv);
1375 port = realhost.special[id];
1376 *portp = ipc_port_copy_send(port);
1377 host_unlock(host_priv);
1378
1379 return KERN_SUCCESS;
1380 }
1381
1382 /*
1383 * host_get_io_master
1384 *
1385 * Return the IO master access port for this host.
1386 */
1387 kern_return_t
1388 host_get_io_master(host_t host, io_master_t * io_masterp)
1389 {
1390 if (host == HOST_NULL) {
1391 return KERN_INVALID_ARGUMENT;
1392 }
1393
1394 return host_get_io_master_port(host_priv_self(), io_masterp);
1395 }
1396
1397 host_t
1398 host_self(void)
1399 {
1400 return &realhost;
1401 }
1402
1403 host_priv_t
1404 host_priv_self(void)
1405 {
1406 return &realhost;
1407 }
1408
1409 host_security_t
1410 host_security_self(void)
1411 {
1412 return &realhost;
1413 }
1414
1415 kern_return_t
1416 host_set_atm_diagnostic_flag(host_priv_t host_priv, uint32_t diagnostic_flag)
1417 {
1418 if (host_priv == HOST_PRIV_NULL) {
1419 return KERN_INVALID_ARGUMENT;
1420 }
1421
1422 assert(host_priv == &realhost);
1423
1424 #if CONFIG_ATM
1425 return atm_set_diagnostic_config(diagnostic_flag);
1426 #else
1427 (void)diagnostic_flag;
1428 return KERN_NOT_SUPPORTED;
1429 #endif
1430 }
1431
1432 kern_return_t
1433 host_set_multiuser_config_flags(host_priv_t host_priv, uint32_t multiuser_config)
1434 {
1435 #if CONFIG_EMBEDDED
1436 if (host_priv == HOST_PRIV_NULL) {
1437 return KERN_INVALID_ARGUMENT;
1438 }
1439
1440 assert(host_priv == &realhost);
1441
1442 /*
1443 * Always enforce that the multiuser bit is set
1444 * if a value is written to the commpage word.
1445 */
1446 commpage_update_multiuser_config(multiuser_config | kIsMultiUserDevice);
1447 return KERN_SUCCESS;
1448 #else
1449 (void)host_priv;
1450 (void)multiuser_config;
1451 return KERN_NOT_SUPPORTED;
1452 #endif
1453 }