]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/host.c
xnu-4903.270.47.tar.gz
[apple/xnu.git] / osfmk / kern / host.c
1 /*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58
59 /*
60 * host.c
61 *
62 * Non-ipc host functions.
63 */
64
65 #include <mach/mach_types.h>
66 #include <mach/boolean.h>
67 #include <mach/host_info.h>
68 #include <mach/host_special_ports.h>
69 #include <mach/kern_return.h>
70 #include <mach/machine.h>
71 #include <mach/port.h>
72 #include <mach/processor_info.h>
73 #include <mach/vm_param.h>
74 #include <mach/processor.h>
75 #include <mach/mach_host_server.h>
76 #include <mach/host_priv_server.h>
77 #include <mach/vm_map.h>
78 #include <mach/task_info.h>
79
80 #include <machine/commpage.h>
81 #include <machine/cpu_capabilities.h>
82
83 #include <kern/kern_types.h>
84 #include <kern/assert.h>
85 #include <kern/kalloc.h>
86 #include <kern/host.h>
87 #include <kern/host_statistics.h>
88 #include <kern/ipc_host.h>
89 #include <kern/misc_protos.h>
90 #include <kern/sched.h>
91 #include <kern/processor.h>
92 #include <kern/mach_node.h> // mach_node_port_changed()
93
94 #include <vm/vm_map.h>
95 #include <vm/vm_purgeable_internal.h>
96 #include <vm/vm_pageout.h>
97
98
99 #if CONFIG_ATM
100 #include <atm/atm_internal.h>
101 #endif
102
103 #if CONFIG_MACF
104 #include <security/mac_mach_internal.h>
105 #endif
106
107 #include <pexpert/pexpert.h>
108
109 host_data_t realhost;
110
111 vm_extmod_statistics_data_t host_extmod_statistics;
112
113 kern_return_t
114 host_processors(host_priv_t host_priv, processor_array_t * out_array, mach_msg_type_number_t * countp)
115 {
116 processor_t processor, *tp;
117 void * addr;
118 unsigned int count, i;
119
120 if (host_priv == HOST_PRIV_NULL) {
121 return KERN_INVALID_ARGUMENT;
122 }
123
124 assert(host_priv == &realhost);
125
126 count = processor_count;
127 assert(count != 0);
128
129 addr = kalloc((vm_size_t)(count * sizeof(mach_port_t)));
130 if (addr == 0) {
131 return KERN_RESOURCE_SHORTAGE;
132 }
133
134 tp = (processor_t *)addr;
135 *tp++ = processor = processor_list;
136
137 if (count > 1) {
138 simple_lock(&processor_list_lock, LCK_GRP_NULL);
139
140 for (i = 1; i < count; i++) {
141 *tp++ = processor = processor->processor_list;
142 }
143
144 simple_unlock(&processor_list_lock);
145 }
146
147 *countp = count;
148 *out_array = (processor_array_t)addr;
149
150 /* do the conversion that Mig should handle */
151 tp = (processor_t *)addr;
152 for (i = 0; i < count; i++) {
153 ((mach_port_t *)tp)[i] = (mach_port_t)convert_processor_to_port(tp[i]);
154 }
155
156 return KERN_SUCCESS;
157 }
158
159 kern_return_t
160 host_info(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
161 {
162 if (host == HOST_NULL) {
163 return KERN_INVALID_ARGUMENT;
164 }
165
166 switch (flavor) {
167 case HOST_BASIC_INFO: {
168 host_basic_info_t basic_info;
169 int master_id;
170
171 /*
172 * Basic information about this host.
173 */
174 if (*count < HOST_BASIC_INFO_OLD_COUNT) {
175 return KERN_FAILURE;
176 }
177
178 basic_info = (host_basic_info_t)info;
179
180 basic_info->memory_size = machine_info.memory_size;
181 basic_info->max_cpus = machine_info.max_cpus;
182 #if defined(__x86_64__)
183 basic_info->avail_cpus = processor_avail_count_user;
184 #else
185 basic_info->avail_cpus = processor_avail_count;
186 #endif
187 master_id = master_processor->cpu_id;
188 basic_info->cpu_type = slot_type(master_id);
189 basic_info->cpu_subtype = slot_subtype(master_id);
190
191 if (*count >= HOST_BASIC_INFO_COUNT) {
192 basic_info->cpu_threadtype = slot_threadtype(master_id);
193 basic_info->physical_cpu = machine_info.physical_cpu;
194 basic_info->physical_cpu_max = machine_info.physical_cpu_max;
195 #if defined(__x86_64__)
196 basic_info->logical_cpu = basic_info->avail_cpus;
197 #else
198 basic_info->logical_cpu = machine_info.logical_cpu;
199 #endif
200 basic_info->logical_cpu_max = machine_info.logical_cpu_max;
201 basic_info->max_mem = machine_info.max_mem;
202
203 *count = HOST_BASIC_INFO_COUNT;
204 } else {
205 *count = HOST_BASIC_INFO_OLD_COUNT;
206 }
207
208 return KERN_SUCCESS;
209 }
210
211 case HOST_SCHED_INFO: {
212 host_sched_info_t sched_info;
213 uint32_t quantum_time;
214 uint64_t quantum_ns;
215
216 /*
217 * Return scheduler information.
218 */
219 if (*count < HOST_SCHED_INFO_COUNT) {
220 return KERN_FAILURE;
221 }
222
223 sched_info = (host_sched_info_t)info;
224
225 quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
226 absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
227
228 sched_info->min_timeout = sched_info->min_quantum = (uint32_t)(quantum_ns / 1000 / 1000);
229
230 *count = HOST_SCHED_INFO_COUNT;
231
232 return KERN_SUCCESS;
233 }
234
235 case HOST_RESOURCE_SIZES: {
236 /*
237 * Return sizes of kernel data structures
238 */
239 if (*count < HOST_RESOURCE_SIZES_COUNT) {
240 return KERN_FAILURE;
241 }
242
243 /* XXX Fail until ledgers are implemented */
244 return KERN_INVALID_ARGUMENT;
245 }
246
247 case HOST_PRIORITY_INFO: {
248 host_priority_info_t priority_info;
249
250 if (*count < HOST_PRIORITY_INFO_COUNT) {
251 return KERN_FAILURE;
252 }
253
254 priority_info = (host_priority_info_t)info;
255
256 priority_info->kernel_priority = MINPRI_KERNEL;
257 priority_info->system_priority = MINPRI_KERNEL;
258 priority_info->server_priority = MINPRI_RESERVED;
259 priority_info->user_priority = BASEPRI_DEFAULT;
260 priority_info->depress_priority = DEPRESSPRI;
261 priority_info->idle_priority = IDLEPRI;
262 priority_info->minimum_priority = MINPRI_USER;
263 priority_info->maximum_priority = MAXPRI_RESERVED;
264
265 *count = HOST_PRIORITY_INFO_COUNT;
266
267 return KERN_SUCCESS;
268 }
269
270 /*
271 * Gestalt for various trap facilities.
272 */
273 case HOST_MACH_MSG_TRAP:
274 case HOST_SEMAPHORE_TRAPS: {
275 *count = 0;
276 return KERN_SUCCESS;
277 }
278
279 case HOST_CAN_HAS_DEBUGGER: {
280 host_can_has_debugger_info_t can_has_debugger_info;
281
282 if (*count < HOST_CAN_HAS_DEBUGGER_COUNT) {
283 return KERN_FAILURE;
284 }
285
286 can_has_debugger_info = (host_can_has_debugger_info_t)info;
287 can_has_debugger_info->can_has_debugger = PE_i_can_has_debugger(NULL);
288 *count = HOST_CAN_HAS_DEBUGGER_COUNT;
289
290 return KERN_SUCCESS;
291 }
292
293 case HOST_VM_PURGABLE: {
294 if (*count < HOST_VM_PURGABLE_COUNT) {
295 return KERN_FAILURE;
296 }
297
298 vm_purgeable_stats((vm_purgeable_info_t)info, NULL);
299
300 *count = HOST_VM_PURGABLE_COUNT;
301 return KERN_SUCCESS;
302 }
303
304 case HOST_DEBUG_INFO_INTERNAL: {
305 #if DEVELOPMENT || DEBUG
306 if (*count < HOST_DEBUG_INFO_INTERNAL_COUNT) {
307 return KERN_FAILURE;
308 }
309
310 host_debug_info_internal_t debug_info = (host_debug_info_internal_t)info;
311 bzero(debug_info, sizeof(host_debug_info_internal_data_t));
312 *count = HOST_DEBUG_INFO_INTERNAL_COUNT;
313
314 #if CONFIG_COALITIONS
315 debug_info->config_coalitions = 1;
316 #endif
317 debug_info->config_bank = 1;
318 #if CONFIG_ATM
319 debug_info->config_atm = 1;
320 #endif
321 #if CONFIG_CSR
322 debug_info->config_csr = 1;
323 #endif
324 return KERN_SUCCESS;
325 #else /* DEVELOPMENT || DEBUG */
326 return KERN_NOT_SUPPORTED;
327 #endif
328 }
329
330 case HOST_PREFERRED_USER_ARCH: {
331 host_preferred_user_arch_t user_arch_info;
332
333 /*
334 * Basic information about this host.
335 */
336 if (*count < HOST_PREFERRED_USER_ARCH_COUNT) {
337 return KERN_FAILURE;
338 }
339
340 user_arch_info = (host_preferred_user_arch_t)info;
341
342 #if defined(PREFERRED_USER_CPU_TYPE) && defined(PREFERRED_USER_CPU_SUBTYPE)
343 user_arch_info->cpu_type = PREFERRED_USER_CPU_TYPE;
344 user_arch_info->cpu_subtype = PREFERRED_USER_CPU_SUBTYPE;
345 #else
346 int master_id = master_processor->cpu_id;
347 user_arch_info->cpu_type = slot_type(master_id);
348 user_arch_info->cpu_subtype = slot_subtype(master_id);
349 #endif
350
351 *count = HOST_PREFERRED_USER_ARCH_COUNT;
352
353 return KERN_SUCCESS;
354 }
355
356 default: return KERN_INVALID_ARGUMENT;
357 }
358 }
359
360 kern_return_t host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count);
361
362 kern_return_t
363 host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
364 {
365 uint32_t i;
366
367 if (host == HOST_NULL) {
368 return KERN_INVALID_HOST;
369 }
370
371 switch (flavor) {
372 case HOST_LOAD_INFO: {
373 host_load_info_t load_info;
374
375 if (*count < HOST_LOAD_INFO_COUNT) {
376 return KERN_FAILURE;
377 }
378
379 load_info = (host_load_info_t)info;
380
381 bcopy((char *)avenrun, (char *)load_info->avenrun, sizeof avenrun);
382 bcopy((char *)mach_factor, (char *)load_info->mach_factor, sizeof mach_factor);
383
384 *count = HOST_LOAD_INFO_COUNT;
385 return KERN_SUCCESS;
386 }
387
388 case HOST_VM_INFO: {
389 processor_t processor;
390 vm_statistics64_t stat;
391 vm_statistics64_data_t host_vm_stat;
392 vm_statistics_t stat32;
393 mach_msg_type_number_t original_count;
394
395 if (*count < HOST_VM_INFO_REV0_COUNT) {
396 return KERN_FAILURE;
397 }
398
399 processor = processor_list;
400 stat = &PROCESSOR_DATA(processor, vm_stat);
401 host_vm_stat = *stat;
402
403 if (processor_count > 1) {
404 simple_lock(&processor_list_lock, LCK_GRP_NULL);
405
406 while ((processor = processor->processor_list) != NULL) {
407 stat = &PROCESSOR_DATA(processor, vm_stat);
408
409 host_vm_stat.zero_fill_count += stat->zero_fill_count;
410 host_vm_stat.reactivations += stat->reactivations;
411 host_vm_stat.pageins += stat->pageins;
412 host_vm_stat.pageouts += stat->pageouts;
413 host_vm_stat.faults += stat->faults;
414 host_vm_stat.cow_faults += stat->cow_faults;
415 host_vm_stat.lookups += stat->lookups;
416 host_vm_stat.hits += stat->hits;
417 }
418
419 simple_unlock(&processor_list_lock);
420 }
421
422 stat32 = (vm_statistics_t)info;
423
424 stat32->free_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_free_count + vm_page_speculative_count);
425 stat32->active_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_active_count);
426
427 if (vm_page_local_q) {
428 for (i = 0; i < vm_page_local_q_count; i++) {
429 struct vpl * lq;
430
431 lq = &vm_page_local_q[i].vpl_un.vpl;
432
433 stat32->active_count += VM_STATISTICS_TRUNCATE_TO_32_BIT(lq->vpl_count);
434 }
435 }
436 stat32->inactive_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_inactive_count);
437 #if CONFIG_EMBEDDED
438 stat32->wire_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count);
439 #else
440 stat32->wire_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count + vm_page_throttled_count + vm_lopage_free_count);
441 #endif
442 stat32->zero_fill_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.zero_fill_count);
443 stat32->reactivations = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.reactivations);
444 stat32->pageins = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.pageins);
445 stat32->pageouts = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.pageouts);
446 stat32->faults = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.faults);
447 stat32->cow_faults = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.cow_faults);
448 stat32->lookups = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.lookups);
449 stat32->hits = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.hits);
450
451 /*
452 * Fill in extra info added in later revisions of the
453 * vm_statistics data structure. Fill in only what can fit
454 * in the data structure the caller gave us !
455 */
456 original_count = *count;
457 *count = HOST_VM_INFO_REV0_COUNT; /* rev0 already filled in */
458 if (original_count >= HOST_VM_INFO_REV1_COUNT) {
459 /* rev1 added "purgeable" info */
460 stat32->purgeable_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purgeable_count);
461 stat32->purges = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purged_count);
462 *count = HOST_VM_INFO_REV1_COUNT;
463 }
464
465 if (original_count >= HOST_VM_INFO_REV2_COUNT) {
466 /* rev2 added "speculative" info */
467 stat32->speculative_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_speculative_count);
468 *count = HOST_VM_INFO_REV2_COUNT;
469 }
470
471 /* rev3 changed some of the fields to be 64-bit*/
472
473 return KERN_SUCCESS;
474 }
475
476 case HOST_CPU_LOAD_INFO: {
477 processor_t processor;
478 host_cpu_load_info_t cpu_load_info;
479
480 if (*count < HOST_CPU_LOAD_INFO_COUNT) {
481 return KERN_FAILURE;
482 }
483
484 #define GET_TICKS_VALUE(state, ticks) \
485 MACRO_BEGIN cpu_load_info->cpu_ticks[(state)] += (uint32_t)(ticks / hz_tick_interval); \
486 MACRO_END
487 #define GET_TICKS_VALUE_FROM_TIMER(processor, state, timer) \
488 MACRO_BEGIN GET_TICKS_VALUE(state, timer_grab(&PROCESSOR_DATA(processor, timer))); \
489 MACRO_END
490
491 cpu_load_info = (host_cpu_load_info_t)info;
492 cpu_load_info->cpu_ticks[CPU_STATE_USER] = 0;
493 cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] = 0;
494 cpu_load_info->cpu_ticks[CPU_STATE_IDLE] = 0;
495 cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0;
496
497 simple_lock(&processor_list_lock, LCK_GRP_NULL);
498
499 for (processor = processor_list; processor != NULL; processor = processor->processor_list) {
500 timer_t idle_state;
501 uint64_t idle_time_snapshot1, idle_time_snapshot2;
502 uint64_t idle_time_tstamp1, idle_time_tstamp2;
503
504 /* See discussion in processor_info(PROCESSOR_CPU_LOAD_INFO) */
505
506 GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_USER, user_state);
507 if (precise_user_kernel_time) {
508 GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_SYSTEM, system_state);
509 } else {
510 /* system_state may represent either sys or user */
511 GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_USER, system_state);
512 }
513
514 idle_state = &PROCESSOR_DATA(processor, idle_state);
515 idle_time_snapshot1 = timer_grab(idle_state);
516 idle_time_tstamp1 = idle_state->tstamp;
517
518 if (PROCESSOR_DATA(processor, current_state) != idle_state) {
519 /* Processor is non-idle, so idle timer should be accurate */
520 GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_IDLE, idle_state);
521 } else if ((idle_time_snapshot1 != (idle_time_snapshot2 = timer_grab(idle_state))) ||
522 (idle_time_tstamp1 != (idle_time_tstamp2 = idle_state->tstamp))) {
523 /* Idle timer is being updated concurrently, second stamp is good enough */
524 GET_TICKS_VALUE(CPU_STATE_IDLE, idle_time_snapshot2);
525 } else {
526 /*
527 * Idle timer may be very stale. Fortunately we have established
528 * that idle_time_snapshot1 and idle_time_tstamp1 are unchanging
529 */
530 idle_time_snapshot1 += mach_absolute_time() - idle_time_tstamp1;
531
532 GET_TICKS_VALUE(CPU_STATE_IDLE, idle_time_snapshot1);
533 }
534 }
535 simple_unlock(&processor_list_lock);
536
537 *count = HOST_CPU_LOAD_INFO_COUNT;
538
539 return KERN_SUCCESS;
540 }
541
542 case HOST_EXPIRED_TASK_INFO: {
543 if (*count < TASK_POWER_INFO_COUNT) {
544 return KERN_FAILURE;
545 }
546
547 task_power_info_t tinfo1 = (task_power_info_t)info;
548 task_power_info_v2_t tinfo2 = (task_power_info_v2_t)info;
549
550 tinfo1->task_interrupt_wakeups = dead_task_statistics.task_interrupt_wakeups;
551 tinfo1->task_platform_idle_wakeups = dead_task_statistics.task_platform_idle_wakeups;
552
553 tinfo1->task_timer_wakeups_bin_1 = dead_task_statistics.task_timer_wakeups_bin_1;
554
555 tinfo1->task_timer_wakeups_bin_2 = dead_task_statistics.task_timer_wakeups_bin_2;
556
557 tinfo1->total_user = dead_task_statistics.total_user_time;
558 tinfo1->total_system = dead_task_statistics.total_system_time;
559 if (*count < TASK_POWER_INFO_V2_COUNT) {
560 *count = TASK_POWER_INFO_COUNT;
561 } else if (*count >= TASK_POWER_INFO_V2_COUNT) {
562 tinfo2->gpu_energy.task_gpu_utilisation = dead_task_statistics.task_gpu_ns;
563 #if defined(__arm__) || defined(__arm64__)
564 tinfo2->task_energy = dead_task_statistics.task_energy;
565 tinfo2->task_ptime = dead_task_statistics.total_ptime;
566 tinfo2->task_pset_switches = dead_task_statistics.total_pset_switches;
567 #endif
568 *count = TASK_POWER_INFO_V2_COUNT;
569 }
570
571 return KERN_SUCCESS;
572 }
573 default: return KERN_INVALID_ARGUMENT;
574 }
575 }
576
577 extern uint32_t c_segment_pages_compressed;
578
579 #define HOST_STATISTICS_TIME_WINDOW 1 /* seconds */
580 #define HOST_STATISTICS_MAX_REQUESTS 10 /* maximum number of requests per window */
581 #define HOST_STATISTICS_MIN_REQUESTS 2 /* minimum number of requests per window */
582
583 uint64_t host_statistics_time_window;
584
585 static lck_mtx_t host_statistics_lck;
586 static lck_grp_t* host_statistics_lck_grp;
587
588 #define HOST_VM_INFO64_REV0 0
589 #define HOST_VM_INFO64_REV1 1
590 #define HOST_EXTMOD_INFO64_REV0 2
591 #define HOST_LOAD_INFO_REV0 3
592 #define HOST_VM_INFO_REV0 4
593 #define HOST_VM_INFO_REV1 5
594 #define HOST_VM_INFO_REV2 6
595 #define HOST_CPU_LOAD_INFO_REV0 7
596 #define HOST_EXPIRED_TASK_INFO_REV0 8
597 #define HOST_EXPIRED_TASK_INFO_REV1 9
598 #define NUM_HOST_INFO_DATA_TYPES 10
599
600 static vm_statistics64_data_t host_vm_info64_rev0 = {};
601 static vm_statistics64_data_t host_vm_info64_rev1 = {};
602 static vm_extmod_statistics_data_t host_extmod_info64 = {};
603 static host_load_info_data_t host_load_info = {};
604 static vm_statistics_data_t host_vm_info_rev0 = {};
605 static vm_statistics_data_t host_vm_info_rev1 = {};
606 static vm_statistics_data_t host_vm_info_rev2 = {};
607 static host_cpu_load_info_data_t host_cpu_load_info = {};
608 static task_power_info_data_t host_expired_task_info = {};
609 static task_power_info_v2_data_t host_expired_task_info2 = {};
610
611 struct host_stats_cache {
612 uint64_t last_access;
613 uint64_t current_requests;
614 uint64_t max_requests;
615 uintptr_t data;
616 mach_msg_type_number_t count; //NOTE count is in sizeof(integer_t)
617 };
618
619 static struct host_stats_cache g_host_stats_cache[NUM_HOST_INFO_DATA_TYPES] = {
620 [HOST_VM_INFO64_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info64_rev0, .count = HOST_VM_INFO64_REV0_COUNT },
621 [HOST_VM_INFO64_REV1] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info64_rev1, .count = HOST_VM_INFO64_REV1_COUNT },
622 [HOST_EXTMOD_INFO64_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_extmod_info64, .count = HOST_EXTMOD_INFO64_COUNT },
623 [HOST_LOAD_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_load_info, .count = HOST_LOAD_INFO_COUNT },
624 [HOST_VM_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info_rev0, .count = HOST_VM_INFO_REV0_COUNT },
625 [HOST_VM_INFO_REV1] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info_rev1, .count = HOST_VM_INFO_REV1_COUNT },
626 [HOST_VM_INFO_REV2] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info_rev2, .count = HOST_VM_INFO_REV2_COUNT },
627 [HOST_CPU_LOAD_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_cpu_load_info, .count = HOST_CPU_LOAD_INFO_COUNT },
628 [HOST_EXPIRED_TASK_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_expired_task_info, .count = TASK_POWER_INFO_COUNT },
629 [HOST_EXPIRED_TASK_INFO_REV1] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_expired_task_info2, .count = TASK_POWER_INFO_V2_COUNT},
630 };
631
632
633 void
634 host_statistics_init(void)
635 {
636 host_statistics_lck_grp = lck_grp_alloc_init("host_statistics", LCK_GRP_ATTR_NULL);
637 lck_mtx_init(&host_statistics_lck, host_statistics_lck_grp, LCK_ATTR_NULL);
638 nanoseconds_to_absolutetime((HOST_STATISTICS_TIME_WINDOW * NSEC_PER_SEC), &host_statistics_time_window);
639 }
640
641 static void
642 cache_host_statistics(int index, host_info64_t info)
643 {
644 if (index < 0 || index >= NUM_HOST_INFO_DATA_TYPES) {
645 return;
646 }
647
648 task_t task = current_task();
649 if (task->t_flags & TF_PLATFORM) {
650 return;
651 }
652
653 memcpy((void *)g_host_stats_cache[index].data, info, g_host_stats_cache[index].count * sizeof(integer_t));
654 return;
655 }
656
657 static void
658 get_cached_info(int index, host_info64_t info, mach_msg_type_number_t* count)
659 {
660 if (index < 0 || index >= NUM_HOST_INFO_DATA_TYPES) {
661 *count = 0;
662 return;
663 }
664
665 *count = g_host_stats_cache[index].count;
666 memcpy(info, (void *)g_host_stats_cache[index].data, g_host_stats_cache[index].count * sizeof(integer_t));
667 }
668
669 static int
670 get_host_info_data_index(bool is_stat64, host_flavor_t flavor, mach_msg_type_number_t* count, kern_return_t* ret)
671 {
672 switch (flavor) {
673 case HOST_VM_INFO64:
674 if (!is_stat64) {
675 *ret = KERN_INVALID_ARGUMENT;
676 return -1;
677 }
678 if (*count < HOST_VM_INFO64_REV0_COUNT) {
679 *ret = KERN_FAILURE;
680 return -1;
681 }
682 if (*count >= HOST_VM_INFO64_REV1_COUNT) {
683 return HOST_VM_INFO64_REV1;
684 }
685 return HOST_VM_INFO64_REV0;
686
687 case HOST_EXTMOD_INFO64:
688 if (!is_stat64) {
689 *ret = KERN_INVALID_ARGUMENT;
690 return -1;
691 }
692 if (*count < HOST_EXTMOD_INFO64_COUNT) {
693 *ret = KERN_FAILURE;
694 return -1;
695 }
696 return HOST_EXTMOD_INFO64_REV0;
697
698 case HOST_LOAD_INFO:
699 if (*count < HOST_LOAD_INFO_COUNT) {
700 *ret = KERN_FAILURE;
701 return -1;
702 }
703 return HOST_LOAD_INFO_REV0;
704
705 case HOST_VM_INFO:
706 if (*count < HOST_VM_INFO_REV0_COUNT) {
707 *ret = KERN_FAILURE;
708 return -1;
709 }
710 if (*count >= HOST_VM_INFO_REV2_COUNT) {
711 return HOST_VM_INFO_REV2;
712 }
713 if (*count >= HOST_VM_INFO_REV1_COUNT) {
714 return HOST_VM_INFO_REV1;
715 }
716 return HOST_VM_INFO_REV0;
717
718 case HOST_CPU_LOAD_INFO:
719 if (*count < HOST_CPU_LOAD_INFO_COUNT) {
720 *ret = KERN_FAILURE;
721 return -1;
722 }
723 return HOST_CPU_LOAD_INFO_REV0;
724
725 case HOST_EXPIRED_TASK_INFO:
726 if (*count < TASK_POWER_INFO_COUNT) {
727 *ret = KERN_FAILURE;
728 return -1;
729 }
730 if (*count >= TASK_POWER_INFO_V2_COUNT) {
731 return HOST_EXPIRED_TASK_INFO_REV1;
732 }
733 return HOST_EXPIRED_TASK_INFO_REV0;
734
735 default:
736 *ret = KERN_INVALID_ARGUMENT;
737 return -1;
738 }
739 }
740
741 static bool
742 rate_limit_host_statistics(bool is_stat64, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t* count, kern_return_t* ret, int *pindex)
743 {
744 task_t task = current_task();
745
746 assert(task != kernel_task);
747
748 *ret = KERN_SUCCESS;
749
750 /* Access control only for third party applications */
751 if (task->t_flags & TF_PLATFORM) {
752 return FALSE;
753 }
754
755 /* Rate limit to HOST_STATISTICS_MAX_REQUESTS queries for each HOST_STATISTICS_TIME_WINDOW window of time */
756 bool rate_limited = FALSE;
757 bool set_last_access = TRUE;
758
759 /* there is a cache for every flavor */
760 int index = get_host_info_data_index(is_stat64, flavor, count, ret);
761 if (index == -1) {
762 goto out;
763 }
764
765 *pindex = index;
766 lck_mtx_lock(&host_statistics_lck);
767 if (g_host_stats_cache[index].last_access > mach_continuous_time() - host_statistics_time_window) {
768 set_last_access = FALSE;
769 if (g_host_stats_cache[index].current_requests++ >= g_host_stats_cache[index].max_requests) {
770 rate_limited = TRUE;
771 get_cached_info(index, info, count);
772 }
773 }
774 if (set_last_access) {
775 g_host_stats_cache[index].current_requests = 1;
776 /*
777 * select a random number of requests (included between HOST_STATISTICS_MIN_REQUESTS and HOST_STATISTICS_MAX_REQUESTS)
778 * to let query host_statistics.
779 * In this way it is not possible to infer looking at when the a cached copy changes if host_statistics was called on
780 * the provious window.
781 */
782 g_host_stats_cache[index].max_requests = (mach_absolute_time() % (HOST_STATISTICS_MAX_REQUESTS - HOST_STATISTICS_MIN_REQUESTS + 1)) + HOST_STATISTICS_MIN_REQUESTS;
783 g_host_stats_cache[index].last_access = mach_continuous_time();
784 }
785 lck_mtx_unlock(&host_statistics_lck);
786 out:
787 return rate_limited;
788 }
789
790 kern_return_t host_statistics64(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count);
791
792 kern_return_t
793 host_statistics64(host_t host, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t * count)
794 {
795 uint32_t i;
796
797 if (host == HOST_NULL) {
798 return KERN_INVALID_HOST;
799 }
800
801 switch (flavor) {
802 case HOST_VM_INFO64: /* We were asked to get vm_statistics64 */
803 {
804 processor_t processor;
805 vm_statistics64_t stat;
806 vm_statistics64_data_t host_vm_stat;
807 mach_msg_type_number_t original_count;
808 unsigned int local_q_internal_count;
809 unsigned int local_q_external_count;
810
811 if (*count < HOST_VM_INFO64_REV0_COUNT) {
812 return KERN_FAILURE;
813 }
814
815 processor = processor_list;
816 stat = &PROCESSOR_DATA(processor, vm_stat);
817 host_vm_stat = *stat;
818
819 if (processor_count > 1) {
820 simple_lock(&processor_list_lock, LCK_GRP_NULL);
821
822 while ((processor = processor->processor_list) != NULL) {
823 stat = &PROCESSOR_DATA(processor, vm_stat);
824
825 host_vm_stat.zero_fill_count += stat->zero_fill_count;
826 host_vm_stat.reactivations += stat->reactivations;
827 host_vm_stat.pageins += stat->pageins;
828 host_vm_stat.pageouts += stat->pageouts;
829 host_vm_stat.faults += stat->faults;
830 host_vm_stat.cow_faults += stat->cow_faults;
831 host_vm_stat.lookups += stat->lookups;
832 host_vm_stat.hits += stat->hits;
833 host_vm_stat.compressions += stat->compressions;
834 host_vm_stat.decompressions += stat->decompressions;
835 host_vm_stat.swapins += stat->swapins;
836 host_vm_stat.swapouts += stat->swapouts;
837 }
838
839 simple_unlock(&processor_list_lock);
840 }
841
842 stat = (vm_statistics64_t)info;
843
844 stat->free_count = vm_page_free_count + vm_page_speculative_count;
845 stat->active_count = vm_page_active_count;
846
847 local_q_internal_count = 0;
848 local_q_external_count = 0;
849 if (vm_page_local_q) {
850 for (i = 0; i < vm_page_local_q_count; i++) {
851 struct vpl * lq;
852
853 lq = &vm_page_local_q[i].vpl_un.vpl;
854
855 stat->active_count += lq->vpl_count;
856 local_q_internal_count += lq->vpl_internal_count;
857 local_q_external_count += lq->vpl_external_count;
858 }
859 }
860 stat->inactive_count = vm_page_inactive_count;
861 #if CONFIG_EMBEDDED
862 stat->wire_count = vm_page_wire_count;
863 #else
864 stat->wire_count = vm_page_wire_count + vm_page_throttled_count + vm_lopage_free_count;
865 #endif
866 stat->zero_fill_count = host_vm_stat.zero_fill_count;
867 stat->reactivations = host_vm_stat.reactivations;
868 stat->pageins = host_vm_stat.pageins;
869 stat->pageouts = host_vm_stat.pageouts;
870 stat->faults = host_vm_stat.faults;
871 stat->cow_faults = host_vm_stat.cow_faults;
872 stat->lookups = host_vm_stat.lookups;
873 stat->hits = host_vm_stat.hits;
874
875 stat->purgeable_count = vm_page_purgeable_count;
876 stat->purges = vm_page_purged_count;
877
878 stat->speculative_count = vm_page_speculative_count;
879
880 /*
881 * Fill in extra info added in later revisions of the
882 * vm_statistics data structure. Fill in only what can fit
883 * in the data structure the caller gave us !
884 */
885 original_count = *count;
886 *count = HOST_VM_INFO64_REV0_COUNT; /* rev0 already filled in */
887 if (original_count >= HOST_VM_INFO64_REV1_COUNT) {
888 /* rev1 added "throttled count" */
889 stat->throttled_count = vm_page_throttled_count;
890 /* rev1 added "compression" info */
891 stat->compressor_page_count = VM_PAGE_COMPRESSOR_COUNT;
892 stat->compressions = host_vm_stat.compressions;
893 stat->decompressions = host_vm_stat.decompressions;
894 stat->swapins = host_vm_stat.swapins;
895 stat->swapouts = host_vm_stat.swapouts;
896 /* rev1 added:
897 * "external page count"
898 * "anonymous page count"
899 * "total # of pages (uncompressed) held in the compressor"
900 */
901 stat->external_page_count = (vm_page_pageable_external_count + local_q_external_count);
902 stat->internal_page_count = (vm_page_pageable_internal_count + local_q_internal_count);
903 stat->total_uncompressed_pages_in_compressor = c_segment_pages_compressed;
904 *count = HOST_VM_INFO64_REV1_COUNT;
905 }
906
907 return KERN_SUCCESS;
908 }
909
910 case HOST_EXTMOD_INFO64: /* We were asked to get vm_statistics64 */
911 {
912 vm_extmod_statistics_t out_extmod_statistics;
913
914 if (*count < HOST_EXTMOD_INFO64_COUNT) {
915 return KERN_FAILURE;
916 }
917
918 out_extmod_statistics = (vm_extmod_statistics_t)info;
919 *out_extmod_statistics = host_extmod_statistics;
920
921 *count = HOST_EXTMOD_INFO64_COUNT;
922
923 return KERN_SUCCESS;
924 }
925
926 default: /* If we didn't recognize the flavor, send to host_statistics */
927 return host_statistics(host, flavor, (host_info_t)info, count);
928 }
929 }
930
931 kern_return_t
932 host_statistics64_from_user(host_t host, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t * count)
933 {
934 kern_return_t ret = KERN_SUCCESS;
935 int index;
936
937 if (host == HOST_NULL) {
938 return KERN_INVALID_HOST;
939 }
940
941 if (rate_limit_host_statistics(TRUE, flavor, info, count, &ret, &index)) {
942 return ret;
943 }
944
945 if (ret != KERN_SUCCESS) {
946 return ret;
947 }
948
949 ret = host_statistics64(host, flavor, info, count);
950
951 if (ret == KERN_SUCCESS) {
952 cache_host_statistics(index, info);
953 }
954
955 return ret;
956 }
957
958 kern_return_t
959 host_statistics_from_user(host_t host, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t * count)
960 {
961 kern_return_t ret = KERN_SUCCESS;
962 int index;
963
964 if (host == HOST_NULL) {
965 return KERN_INVALID_HOST;
966 }
967
968 if (rate_limit_host_statistics(FALSE, flavor, info, count, &ret, &index)) {
969 return ret;
970 }
971
972 if (ret != KERN_SUCCESS) {
973 return ret;
974 }
975
976 ret = host_statistics(host, flavor, info, count);
977
978 if (ret == KERN_SUCCESS) {
979 cache_host_statistics(index, info);
980 }
981
982 return ret;
983 }
984
985 /*
986 * Get host statistics that require privilege.
987 * None for now, just call the un-privileged version.
988 */
989 kern_return_t
990 host_priv_statistics(host_priv_t host_priv, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
991 {
992 return host_statistics((host_t)host_priv, flavor, info, count);
993 }
994
995 kern_return_t
996 set_sched_stats_active(boolean_t active)
997 {
998 sched_stats_active = active;
999 return KERN_SUCCESS;
1000 }
1001
1002
1003 uint64_t
1004 get_pages_grabbed_count(void)
1005 {
1006 processor_t processor;
1007 uint64_t pages_grabbed_count = 0;
1008
1009 simple_lock(&processor_list_lock, LCK_GRP_NULL);
1010
1011 processor = processor_list;
1012
1013 while (processor) {
1014 pages_grabbed_count += PROCESSOR_DATA(processor, page_grab_count);
1015 processor = processor->processor_list;
1016 }
1017 simple_unlock(&processor_list_lock);
1018
1019 return pages_grabbed_count;
1020 }
1021
1022
1023 kern_return_t
1024 get_sched_statistics(struct _processor_statistics_np * out, uint32_t * count)
1025 {
1026 processor_t processor;
1027
1028 if (!sched_stats_active) {
1029 return KERN_FAILURE;
1030 }
1031
1032 simple_lock(&processor_list_lock, LCK_GRP_NULL);
1033
1034 if (*count < (processor_count + 1) * sizeof(struct _processor_statistics_np)) { /* One for RT */
1035 simple_unlock(&processor_list_lock);
1036 return KERN_FAILURE;
1037 }
1038
1039 processor = processor_list;
1040 while (processor) {
1041 struct processor_sched_statistics * stats = &processor->processor_data.sched_stats;
1042
1043 out->ps_cpuid = processor->cpu_id;
1044 out->ps_csw_count = stats->csw_count;
1045 out->ps_preempt_count = stats->preempt_count;
1046 out->ps_preempted_rt_count = stats->preempted_rt_count;
1047 out->ps_preempted_by_rt_count = stats->preempted_by_rt_count;
1048 out->ps_rt_sched_count = stats->rt_sched_count;
1049 out->ps_interrupt_count = stats->interrupt_count;
1050 out->ps_ipi_count = stats->ipi_count;
1051 out->ps_timer_pop_count = stats->timer_pop_count;
1052 out->ps_runq_count_sum = SCHED(processor_runq_stats_count_sum)(processor);
1053 out->ps_idle_transitions = stats->idle_transitions;
1054 out->ps_quantum_timer_expirations = stats->quantum_timer_expirations;
1055
1056 out++;
1057 processor = processor->processor_list;
1058 }
1059
1060 *count = (uint32_t)(processor_count * sizeof(struct _processor_statistics_np));
1061
1062 simple_unlock(&processor_list_lock);
1063
1064 /* And include RT Queue information */
1065 bzero(out, sizeof(*out));
1066 out->ps_cpuid = (-1);
1067 out->ps_runq_count_sum = SCHED(rt_runq_count_sum)();
1068 out++;
1069 *count += (uint32_t)sizeof(struct _processor_statistics_np);
1070
1071 return KERN_SUCCESS;
1072 }
1073
1074 kern_return_t
1075 host_page_size(host_t host, vm_size_t * out_page_size)
1076 {
1077 if (host == HOST_NULL) {
1078 return KERN_INVALID_ARGUMENT;
1079 }
1080
1081 *out_page_size = PAGE_SIZE;
1082
1083 return KERN_SUCCESS;
1084 }
1085
1086 /*
1087 * Return kernel version string (more than you ever
1088 * wanted to know about what version of the kernel this is).
1089 */
1090 extern char version[];
1091
1092 kern_return_t
1093 host_kernel_version(host_t host, kernel_version_t out_version)
1094 {
1095 if (host == HOST_NULL) {
1096 return KERN_INVALID_ARGUMENT;
1097 }
1098
1099 (void)strncpy(out_version, version, sizeof(kernel_version_t));
1100
1101 return KERN_SUCCESS;
1102 }
1103
1104 /*
1105 * host_processor_sets:
1106 *
1107 * List all processor sets on the host.
1108 */
1109 kern_return_t
1110 host_processor_sets(host_priv_t host_priv, processor_set_name_array_t * pset_list, mach_msg_type_number_t * count)
1111 {
1112 void * addr;
1113
1114 if (host_priv == HOST_PRIV_NULL) {
1115 return KERN_INVALID_ARGUMENT;
1116 }
1117
1118 /*
1119 * Allocate memory. Can be pageable because it won't be
1120 * touched while holding a lock.
1121 */
1122
1123 addr = kalloc((vm_size_t)sizeof(mach_port_t));
1124 if (addr == 0) {
1125 return KERN_RESOURCE_SHORTAGE;
1126 }
1127
1128 /* do the conversion that Mig should handle */
1129 *((ipc_port_t *)addr) = convert_pset_name_to_port(&pset0);
1130
1131 *pset_list = (processor_set_array_t)addr;
1132 *count = 1;
1133
1134 return KERN_SUCCESS;
1135 }
1136
1137 /*
1138 * host_processor_set_priv:
1139 *
1140 * Return control port for given processor set.
1141 */
1142 kern_return_t
1143 host_processor_set_priv(host_priv_t host_priv, processor_set_t pset_name, processor_set_t * pset)
1144 {
1145 if (host_priv == HOST_PRIV_NULL || pset_name == PROCESSOR_SET_NULL) {
1146 *pset = PROCESSOR_SET_NULL;
1147
1148 return KERN_INVALID_ARGUMENT;
1149 }
1150
1151 *pset = pset_name;
1152
1153 return KERN_SUCCESS;
1154 }
1155
1156 /*
1157 * host_processor_info
1158 *
1159 * Return info about the processors on this host. It will return
1160 * the number of processors, and the specific type of info requested
1161 * in an OOL array.
1162 */
1163 kern_return_t
1164 host_processor_info(host_t host,
1165 processor_flavor_t flavor,
1166 natural_t * out_pcount,
1167 processor_info_array_t * out_array,
1168 mach_msg_type_number_t * out_array_count)
1169 {
1170 kern_return_t result;
1171 processor_t processor;
1172 host_t thost;
1173 processor_info_t info;
1174 unsigned int icount, tcount;
1175 unsigned int pcount, i;
1176 vm_offset_t addr;
1177 vm_size_t size, needed;
1178 vm_map_copy_t copy;
1179
1180 if (host == HOST_NULL) {
1181 return KERN_INVALID_ARGUMENT;
1182 }
1183
1184 result = processor_info_count(flavor, &icount);
1185 if (result != KERN_SUCCESS) {
1186 return result;
1187 }
1188
1189 pcount = processor_count;
1190 assert(pcount != 0);
1191
1192 needed = pcount * icount * sizeof(natural_t);
1193 size = vm_map_round_page(needed, VM_MAP_PAGE_MASK(ipc_kernel_map));
1194 result = kmem_alloc(ipc_kernel_map, &addr, size, VM_KERN_MEMORY_IPC);
1195 if (result != KERN_SUCCESS) {
1196 return KERN_RESOURCE_SHORTAGE;
1197 }
1198
1199 info = (processor_info_t)addr;
1200 processor = processor_list;
1201 tcount = icount;
1202
1203 result = processor_info(processor, flavor, &thost, info, &tcount);
1204 if (result != KERN_SUCCESS) {
1205 kmem_free(ipc_kernel_map, addr, size);
1206 return result;
1207 }
1208
1209 if (pcount > 1) {
1210 for (i = 1; i < pcount; i++) {
1211 simple_lock(&processor_list_lock, LCK_GRP_NULL);
1212 processor = processor->processor_list;
1213 simple_unlock(&processor_list_lock);
1214
1215 info += icount;
1216 tcount = icount;
1217 result = processor_info(processor, flavor, &thost, info, &tcount);
1218 if (result != KERN_SUCCESS) {
1219 kmem_free(ipc_kernel_map, addr, size);
1220 return result;
1221 }
1222 }
1223 }
1224
1225 if (size != needed) {
1226 bzero((char *)addr + needed, size - needed);
1227 }
1228
1229 result = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr, VM_MAP_PAGE_MASK(ipc_kernel_map)),
1230 vm_map_round_page(addr + size, VM_MAP_PAGE_MASK(ipc_kernel_map)), FALSE);
1231 assert(result == KERN_SUCCESS);
1232 result = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr, (vm_map_size_t)needed, TRUE, &copy);
1233 assert(result == KERN_SUCCESS);
1234
1235 *out_pcount = pcount;
1236 *out_array = (processor_info_array_t)copy;
1237 *out_array_count = pcount * icount;
1238
1239 return KERN_SUCCESS;
1240 }
1241
1242 static bool
1243 is_valid_host_special_port(int id)
1244 {
1245 return (id <= HOST_MAX_SPECIAL_PORT) &&
1246 (id >= HOST_MIN_SPECIAL_PORT) &&
1247 ((id <= HOST_LAST_SPECIAL_KERNEL_PORT) || (id > HOST_MAX_SPECIAL_KERNEL_PORT));
1248 }
1249
1250 /*
1251 * Kernel interface for setting a special port.
1252 */
1253 kern_return_t
1254 kernel_set_special_port(host_priv_t host_priv, int id, ipc_port_t port)
1255 {
1256 ipc_port_t old_port;
1257
1258 if (!is_valid_host_special_port(id)) {
1259 panic("attempted to set invalid special port %d", id);
1260 }
1261
1262 #if !MACH_FLIPC
1263 if (id == HOST_NODE_PORT) {
1264 return KERN_NOT_SUPPORTED;
1265 }
1266 #endif
1267
1268 host_lock(host_priv);
1269 old_port = host_priv->special[id];
1270 host_priv->special[id] = port;
1271 host_unlock(host_priv);
1272
1273 #if MACH_FLIPC
1274 if (id == HOST_NODE_PORT) {
1275 mach_node_port_changed();
1276 }
1277 #endif
1278
1279 if (IP_VALID(old_port)) {
1280 ipc_port_release_send(old_port);
1281 }
1282 return KERN_SUCCESS;
1283 }
1284
1285 /*
1286 * Kernel interface for retrieving a special port.
1287 */
1288 kern_return_t
1289 kernel_get_special_port(host_priv_t host_priv, int id, ipc_port_t * portp)
1290 {
1291 if (!is_valid_host_special_port(id)) {
1292 panic("attempted to get invalid special port %d", id);
1293 }
1294
1295 host_lock(host_priv);
1296 *portp = host_priv->special[id];
1297 host_unlock(host_priv);
1298 return KERN_SUCCESS;
1299 }
1300
1301 /*
1302 * User interface for setting a special port.
1303 *
1304 * Only permits the user to set a user-owned special port
1305 * ID, rejecting a kernel-owned special port ID.
1306 *
1307 * A special kernel port cannot be set up using this
1308 * routine; use kernel_set_special_port() instead.
1309 */
1310 kern_return_t
1311 host_set_special_port(host_priv_t host_priv, int id, ipc_port_t port)
1312 {
1313 if (host_priv == HOST_PRIV_NULL || id <= HOST_MAX_SPECIAL_KERNEL_PORT || id > HOST_MAX_SPECIAL_PORT) {
1314 return KERN_INVALID_ARGUMENT;
1315 }
1316
1317 #if CONFIG_MACF
1318 if (mac_task_check_set_host_special_port(current_task(), id, port) != 0) {
1319 return KERN_NO_ACCESS;
1320 }
1321 #endif
1322
1323 return kernel_set_special_port(host_priv, id, port);
1324 }
1325
1326 /*
1327 * User interface for retrieving a special port.
1328 *
1329 * Note that there is nothing to prevent a user special
1330 * port from disappearing after it has been discovered by
1331 * the caller; thus, using a special port can always result
1332 * in a "port not valid" error.
1333 */
1334
1335 kern_return_t
1336 host_get_special_port(host_priv_t host_priv, __unused int node, int id, ipc_port_t * portp)
1337 {
1338 ipc_port_t port;
1339
1340 if (host_priv == HOST_PRIV_NULL || id == HOST_SECURITY_PORT || id > HOST_MAX_SPECIAL_PORT || id < HOST_MIN_SPECIAL_PORT) {
1341 return KERN_INVALID_ARGUMENT;
1342 }
1343
1344 host_lock(host_priv);
1345 port = realhost.special[id];
1346 *portp = ipc_port_copy_send(port);
1347 host_unlock(host_priv);
1348
1349 return KERN_SUCCESS;
1350 }
1351
1352 /*
1353 * host_get_io_master
1354 *
1355 * Return the IO master access port for this host.
1356 */
1357 kern_return_t
1358 host_get_io_master(host_t host, io_master_t * io_masterp)
1359 {
1360 if (host == HOST_NULL) {
1361 return KERN_INVALID_ARGUMENT;
1362 }
1363
1364 return host_get_io_master_port(host_priv_self(), io_masterp);
1365 }
1366
1367 host_t
1368 host_self(void)
1369 {
1370 return &realhost;
1371 }
1372
1373 host_priv_t
1374 host_priv_self(void)
1375 {
1376 return &realhost;
1377 }
1378
1379 host_security_t
1380 host_security_self(void)
1381 {
1382 return &realhost;
1383 }
1384
1385 kern_return_t
1386 host_set_atm_diagnostic_flag(host_priv_t host_priv, uint32_t diagnostic_flag)
1387 {
1388 if (host_priv == HOST_PRIV_NULL) {
1389 return KERN_INVALID_ARGUMENT;
1390 }
1391
1392 assert(host_priv == &realhost);
1393
1394 #if CONFIG_ATM
1395 return atm_set_diagnostic_config(diagnostic_flag);
1396 #else
1397 (void)diagnostic_flag;
1398 return KERN_NOT_SUPPORTED;
1399 #endif
1400 }
1401
1402 kern_return_t
1403 host_set_multiuser_config_flags(host_priv_t host_priv, uint32_t multiuser_config)
1404 {
1405 #if CONFIG_EMBEDDED
1406 if (host_priv == HOST_PRIV_NULL) {
1407 return KERN_INVALID_ARGUMENT;
1408 }
1409
1410 assert(host_priv == &realhost);
1411
1412 /*
1413 * Always enforce that the multiuser bit is set
1414 * if a value is written to the commpage word.
1415 */
1416 commpage_update_multiuser_config(multiuser_config | kIsMultiUserDevice);
1417 return KERN_SUCCESS;
1418 #else
1419 (void)host_priv;
1420 (void)multiuser_config;
1421 return KERN_NOT_SUPPORTED;
1422 #endif
1423 }