]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/host.c
xnu-4570.41.2.tar.gz
[apple/xnu.git] / osfmk / kern / host.c
1 /*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58
59 /*
60 * host.c
61 *
62 * Non-ipc host functions.
63 */
64
65 #include <mach/mach_types.h>
66 #include <mach/boolean.h>
67 #include <mach/host_info.h>
68 #include <mach/host_special_ports.h>
69 #include <mach/kern_return.h>
70 #include <mach/machine.h>
71 #include <mach/port.h>
72 #include <mach/processor_info.h>
73 #include <mach/vm_param.h>
74 #include <mach/processor.h>
75 #include <mach/mach_host_server.h>
76 #include <mach/host_priv_server.h>
77 #include <mach/vm_map.h>
78 #include <mach/task_info.h>
79
80 #include <machine/commpage.h>
81 #include <machine/cpu_capabilities.h>
82
83 #include <kern/kern_types.h>
84 #include <kern/assert.h>
85 #include <kern/kalloc.h>
86 #include <kern/host.h>
87 #include <kern/host_statistics.h>
88 #include <kern/ipc_host.h>
89 #include <kern/misc_protos.h>
90 #include <kern/sched.h>
91 #include <kern/processor.h>
92 #include <kern/mach_node.h> // mach_node_port_changed()
93
94 #include <vm/vm_map.h>
95 #include <vm/vm_purgeable_internal.h>
96 #include <vm/vm_pageout.h>
97
98
99 #if CONFIG_ATM
100 #include <atm/atm_internal.h>
101 #endif
102
103 #if CONFIG_MACF
104 #include <security/mac_mach_internal.h>
105 #endif
106
107 #include <pexpert/pexpert.h>
108
109 host_data_t realhost;
110
111 vm_extmod_statistics_data_t host_extmod_statistics;
112
113 kern_return_t
114 host_processors(host_priv_t host_priv, processor_array_t * out_array, mach_msg_type_number_t * countp)
115 {
116 processor_t processor, *tp;
117 void * addr;
118 unsigned int count, i;
119
120 if (host_priv == HOST_PRIV_NULL)
121 return (KERN_INVALID_ARGUMENT);
122
123 assert(host_priv == &realhost);
124
125 count = processor_count;
126 assert(count != 0);
127
128 addr = kalloc((vm_size_t)(count * sizeof(mach_port_t)));
129 if (addr == 0)
130 return (KERN_RESOURCE_SHORTAGE);
131
132 tp = (processor_t *)addr;
133 *tp++ = processor = processor_list;
134
135 if (count > 1) {
136 simple_lock(&processor_list_lock);
137
138 for (i = 1; i < count; i++)
139 *tp++ = processor = processor->processor_list;
140
141 simple_unlock(&processor_list_lock);
142 }
143
144 *countp = count;
145 *out_array = (processor_array_t)addr;
146
147 /* do the conversion that Mig should handle */
148 tp = (processor_t *)addr;
149 for (i = 0; i < count; i++)
150 ((mach_port_t *)tp)[i] = (mach_port_t)convert_processor_to_port(tp[i]);
151
152 return (KERN_SUCCESS);
153 }
154
155 kern_return_t
156 host_info(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
157 {
158 if (host == HOST_NULL)
159 return (KERN_INVALID_ARGUMENT);
160
161 switch (flavor) {
162 case HOST_BASIC_INFO: {
163 host_basic_info_t basic_info;
164 int master_id;
165
166 /*
167 * Basic information about this host.
168 */
169 if (*count < HOST_BASIC_INFO_OLD_COUNT)
170 return (KERN_FAILURE);
171
172 basic_info = (host_basic_info_t)info;
173
174 basic_info->memory_size = machine_info.memory_size;
175 basic_info->max_cpus = machine_info.max_cpus;
176 basic_info->avail_cpus = processor_avail_count;
177 master_id = master_processor->cpu_id;
178 basic_info->cpu_type = slot_type(master_id);
179 basic_info->cpu_subtype = slot_subtype(master_id);
180
181 if (*count >= HOST_BASIC_INFO_COUNT) {
182 basic_info->cpu_threadtype = slot_threadtype(master_id);
183 basic_info->physical_cpu = machine_info.physical_cpu;
184 basic_info->physical_cpu_max = machine_info.physical_cpu_max;
185 basic_info->logical_cpu = machine_info.logical_cpu;
186 basic_info->logical_cpu_max = machine_info.logical_cpu_max;
187 basic_info->max_mem = machine_info.max_mem;
188
189 *count = HOST_BASIC_INFO_COUNT;
190 } else {
191 *count = HOST_BASIC_INFO_OLD_COUNT;
192 }
193
194 return (KERN_SUCCESS);
195 }
196
197 case HOST_SCHED_INFO: {
198 host_sched_info_t sched_info;
199 uint32_t quantum_time;
200 uint64_t quantum_ns;
201
202 /*
203 * Return scheduler information.
204 */
205 if (*count < HOST_SCHED_INFO_COUNT)
206 return (KERN_FAILURE);
207
208 sched_info = (host_sched_info_t)info;
209
210 quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
211 absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
212
213 sched_info->min_timeout = sched_info->min_quantum = (uint32_t)(quantum_ns / 1000 / 1000);
214
215 *count = HOST_SCHED_INFO_COUNT;
216
217 return (KERN_SUCCESS);
218 }
219
220 case HOST_RESOURCE_SIZES: {
221 /*
222 * Return sizes of kernel data structures
223 */
224 if (*count < HOST_RESOURCE_SIZES_COUNT)
225 return (KERN_FAILURE);
226
227 /* XXX Fail until ledgers are implemented */
228 return (KERN_INVALID_ARGUMENT);
229 }
230
231 case HOST_PRIORITY_INFO: {
232 host_priority_info_t priority_info;
233
234 if (*count < HOST_PRIORITY_INFO_COUNT)
235 return (KERN_FAILURE);
236
237 priority_info = (host_priority_info_t)info;
238
239 priority_info->kernel_priority = MINPRI_KERNEL;
240 priority_info->system_priority = MINPRI_KERNEL;
241 priority_info->server_priority = MINPRI_RESERVED;
242 priority_info->user_priority = BASEPRI_DEFAULT;
243 priority_info->depress_priority = DEPRESSPRI;
244 priority_info->idle_priority = IDLEPRI;
245 priority_info->minimum_priority = MINPRI_USER;
246 priority_info->maximum_priority = MAXPRI_RESERVED;
247
248 *count = HOST_PRIORITY_INFO_COUNT;
249
250 return (KERN_SUCCESS);
251 }
252
253 /*
254 * Gestalt for various trap facilities.
255 */
256 case HOST_MACH_MSG_TRAP:
257 case HOST_SEMAPHORE_TRAPS: {
258 *count = 0;
259 return (KERN_SUCCESS);
260 }
261
262 case HOST_CAN_HAS_DEBUGGER: {
263 host_can_has_debugger_info_t can_has_debugger_info;
264
265 if (*count < HOST_CAN_HAS_DEBUGGER_COUNT)
266 return (KERN_FAILURE);
267
268 can_has_debugger_info = (host_can_has_debugger_info_t)info;
269 can_has_debugger_info->can_has_debugger = PE_i_can_has_debugger(NULL);
270 *count = HOST_CAN_HAS_DEBUGGER_COUNT;
271
272 return KERN_SUCCESS;
273 }
274
275 case HOST_VM_PURGABLE: {
276 if (*count < HOST_VM_PURGABLE_COUNT)
277 return (KERN_FAILURE);
278
279 vm_purgeable_stats((vm_purgeable_info_t)info, NULL);
280
281 *count = HOST_VM_PURGABLE_COUNT;
282 return (KERN_SUCCESS);
283 }
284
285 case HOST_DEBUG_INFO_INTERNAL: {
286 #if DEVELOPMENT || DEBUG
287 if (*count < HOST_DEBUG_INFO_INTERNAL_COUNT)
288 return (KERN_FAILURE);
289
290 host_debug_info_internal_t debug_info = (host_debug_info_internal_t)info;
291 bzero(debug_info, sizeof(host_debug_info_internal_data_t));
292 *count = HOST_DEBUG_INFO_INTERNAL_COUNT;
293
294 #if CONFIG_COALITIONS
295 debug_info->config_coalitions = 1;
296 #endif
297 debug_info->config_bank = 1;
298 #if CONFIG_ATM
299 debug_info->config_atm = 1;
300 #endif
301 #if CONFIG_CSR
302 debug_info->config_csr = 1;
303 #endif
304 return (KERN_SUCCESS);
305 #else /* DEVELOPMENT || DEBUG */
306 return (KERN_NOT_SUPPORTED);
307 #endif
308 }
309
310 default: return (KERN_INVALID_ARGUMENT);
311 }
312 }
313
314 kern_return_t host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count);
315
316 kern_return_t
317 host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
318 {
319 uint32_t i;
320
321 if (host == HOST_NULL)
322 return (KERN_INVALID_HOST);
323
324 switch (flavor) {
325 case HOST_LOAD_INFO: {
326 host_load_info_t load_info;
327
328 if (*count < HOST_LOAD_INFO_COUNT)
329 return (KERN_FAILURE);
330
331 load_info = (host_load_info_t)info;
332
333 bcopy((char *)avenrun, (char *)load_info->avenrun, sizeof avenrun);
334 bcopy((char *)mach_factor, (char *)load_info->mach_factor, sizeof mach_factor);
335
336 *count = HOST_LOAD_INFO_COUNT;
337 return (KERN_SUCCESS);
338 }
339
340 case HOST_VM_INFO: {
341 processor_t processor;
342 vm_statistics64_t stat;
343 vm_statistics64_data_t host_vm_stat;
344 vm_statistics_t stat32;
345 mach_msg_type_number_t original_count;
346
347 if (*count < HOST_VM_INFO_REV0_COUNT)
348 return (KERN_FAILURE);
349
350 processor = processor_list;
351 stat = &PROCESSOR_DATA(processor, vm_stat);
352 host_vm_stat = *stat;
353
354 if (processor_count > 1) {
355 simple_lock(&processor_list_lock);
356
357 while ((processor = processor->processor_list) != NULL) {
358 stat = &PROCESSOR_DATA(processor, vm_stat);
359
360 host_vm_stat.zero_fill_count += stat->zero_fill_count;
361 host_vm_stat.reactivations += stat->reactivations;
362 host_vm_stat.pageins += stat->pageins;
363 host_vm_stat.pageouts += stat->pageouts;
364 host_vm_stat.faults += stat->faults;
365 host_vm_stat.cow_faults += stat->cow_faults;
366 host_vm_stat.lookups += stat->lookups;
367 host_vm_stat.hits += stat->hits;
368 }
369
370 simple_unlock(&processor_list_lock);
371 }
372
373 stat32 = (vm_statistics_t)info;
374
375 stat32->free_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_free_count + vm_page_speculative_count);
376 stat32->active_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_active_count);
377
378 if (vm_page_local_q) {
379 for (i = 0; i < vm_page_local_q_count; i++) {
380 struct vpl * lq;
381
382 lq = &vm_page_local_q[i].vpl_un.vpl;
383
384 stat32->active_count += VM_STATISTICS_TRUNCATE_TO_32_BIT(lq->vpl_count);
385 }
386 }
387 stat32->inactive_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_inactive_count);
388 #if CONFIG_EMBEDDED
389 stat32->wire_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count);
390 #else
391 stat32->wire_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count + vm_page_throttled_count + vm_lopage_free_count);
392 #endif
393 stat32->zero_fill_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.zero_fill_count);
394 stat32->reactivations = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.reactivations);
395 stat32->pageins = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.pageins);
396 stat32->pageouts = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.pageouts);
397 stat32->faults = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.faults);
398 stat32->cow_faults = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.cow_faults);
399 stat32->lookups = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.lookups);
400 stat32->hits = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.hits);
401
402 /*
403 * Fill in extra info added in later revisions of the
404 * vm_statistics data structure. Fill in only what can fit
405 * in the data structure the caller gave us !
406 */
407 original_count = *count;
408 *count = HOST_VM_INFO_REV0_COUNT; /* rev0 already filled in */
409 if (original_count >= HOST_VM_INFO_REV1_COUNT) {
410 /* rev1 added "purgeable" info */
411 stat32->purgeable_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purgeable_count);
412 stat32->purges = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purged_count);
413 *count = HOST_VM_INFO_REV1_COUNT;
414 }
415
416 if (original_count >= HOST_VM_INFO_REV2_COUNT) {
417 /* rev2 added "speculative" info */
418 stat32->speculative_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_speculative_count);
419 *count = HOST_VM_INFO_REV2_COUNT;
420 }
421
422 /* rev3 changed some of the fields to be 64-bit*/
423
424 return (KERN_SUCCESS);
425 }
426
427 case HOST_CPU_LOAD_INFO: {
428 processor_t processor;
429 host_cpu_load_info_t cpu_load_info;
430
431 if (*count < HOST_CPU_LOAD_INFO_COUNT)
432 return (KERN_FAILURE);
433
434 #define GET_TICKS_VALUE(state, ticks) \
435 MACRO_BEGIN cpu_load_info->cpu_ticks[(state)] += (uint32_t)(ticks / hz_tick_interval); \
436 MACRO_END
437 #define GET_TICKS_VALUE_FROM_TIMER(processor, state, timer) \
438 MACRO_BEGIN GET_TICKS_VALUE(state, timer_grab(&PROCESSOR_DATA(processor, timer))); \
439 MACRO_END
440
441 cpu_load_info = (host_cpu_load_info_t)info;
442 cpu_load_info->cpu_ticks[CPU_STATE_USER] = 0;
443 cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] = 0;
444 cpu_load_info->cpu_ticks[CPU_STATE_IDLE] = 0;
445 cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0;
446
447 simple_lock(&processor_list_lock);
448
449 for (processor = processor_list; processor != NULL; processor = processor->processor_list) {
450 timer_t idle_state;
451 uint64_t idle_time_snapshot1, idle_time_snapshot2;
452 uint64_t idle_time_tstamp1, idle_time_tstamp2;
453
454 /* See discussion in processor_info(PROCESSOR_CPU_LOAD_INFO) */
455
456 GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_USER, user_state);
457 if (precise_user_kernel_time) {
458 GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_SYSTEM, system_state);
459 } else {
460 /* system_state may represent either sys or user */
461 GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_USER, system_state);
462 }
463
464 idle_state = &PROCESSOR_DATA(processor, idle_state);
465 idle_time_snapshot1 = timer_grab(idle_state);
466 idle_time_tstamp1 = idle_state->tstamp;
467
468 if (PROCESSOR_DATA(processor, current_state) != idle_state) {
469 /* Processor is non-idle, so idle timer should be accurate */
470 GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_IDLE, idle_state);
471 } else if ((idle_time_snapshot1 != (idle_time_snapshot2 = timer_grab(idle_state))) ||
472 (idle_time_tstamp1 != (idle_time_tstamp2 = idle_state->tstamp))) {
473 /* Idle timer is being updated concurrently, second stamp is good enough */
474 GET_TICKS_VALUE(CPU_STATE_IDLE, idle_time_snapshot2);
475 } else {
476 /*
477 * Idle timer may be very stale. Fortunately we have established
478 * that idle_time_snapshot1 and idle_time_tstamp1 are unchanging
479 */
480 idle_time_snapshot1 += mach_absolute_time() - idle_time_tstamp1;
481
482 GET_TICKS_VALUE(CPU_STATE_IDLE, idle_time_snapshot1);
483 }
484 }
485 simple_unlock(&processor_list_lock);
486
487 *count = HOST_CPU_LOAD_INFO_COUNT;
488
489 return (KERN_SUCCESS);
490 }
491
492 case HOST_EXPIRED_TASK_INFO: {
493 if (*count < TASK_POWER_INFO_COUNT) {
494 return (KERN_FAILURE);
495 }
496
497 task_power_info_t tinfo1 = (task_power_info_t)info;
498 task_power_info_v2_t tinfo2 = (task_power_info_v2_t)info;
499
500 tinfo1->task_interrupt_wakeups = dead_task_statistics.task_interrupt_wakeups;
501 tinfo1->task_platform_idle_wakeups = dead_task_statistics.task_platform_idle_wakeups;
502
503 tinfo1->task_timer_wakeups_bin_1 = dead_task_statistics.task_timer_wakeups_bin_1;
504
505 tinfo1->task_timer_wakeups_bin_2 = dead_task_statistics.task_timer_wakeups_bin_2;
506
507 tinfo1->total_user = dead_task_statistics.total_user_time;
508 tinfo1->total_system = dead_task_statistics.total_system_time;
509 if (*count < TASK_POWER_INFO_V2_COUNT) {
510 *count = TASK_POWER_INFO_COUNT;
511 }
512 else if (*count >= TASK_POWER_INFO_V2_COUNT) {
513 tinfo2->gpu_energy.task_gpu_utilisation = dead_task_statistics.task_gpu_ns;
514 #if defined(__arm__) || defined(__arm64__)
515 tinfo2->task_energy = dead_task_statistics.task_energy;
516 tinfo2->task_ptime = dead_task_statistics.total_ptime;
517 tinfo2->task_pset_switches = dead_task_statistics.total_pset_switches;
518 #endif
519 *count = TASK_POWER_INFO_V2_COUNT;
520 }
521
522 return (KERN_SUCCESS);
523 }
524 default: return (KERN_INVALID_ARGUMENT);
525 }
526 }
527
528 extern uint32_t c_segment_pages_compressed;
529
530 #define HOST_STATISTICS_TIME_WINDOW 1 /* seconds */
531 #define HOST_STATISTICS_MAX_REQUESTS 10 /* maximum number of requests per window */
532 #define HOST_STATISTICS_MIN_REQUESTS 2 /* minimum number of requests per window */
533
534 uint64_t host_statistics_time_window;
535
536 static lck_mtx_t host_statistics_lck;
537 static lck_grp_t* host_statistics_lck_grp;
538
539 #define HOST_VM_INFO64_REV0 0
540 #define HOST_VM_INFO64_REV1 1
541 #define HOST_EXTMOD_INFO64_REV0 2
542 #define HOST_LOAD_INFO_REV0 3
543 #define HOST_VM_INFO_REV0 4
544 #define HOST_VM_INFO_REV1 5
545 #define HOST_VM_INFO_REV2 6
546 #define HOST_CPU_LOAD_INFO_REV0 7
547 #define HOST_EXPIRED_TASK_INFO_REV0 8
548 #define HOST_EXPIRED_TASK_INFO_REV1 9
549 #define NUM_HOST_INFO_DATA_TYPES 10
550
551 static vm_statistics64_data_t host_vm_info64_rev0 = {};
552 static vm_statistics64_data_t host_vm_info64_rev1 = {};
553 static vm_extmod_statistics_data_t host_extmod_info64 = {};
554 static host_load_info_data_t host_load_info = {};
555 static vm_statistics_data_t host_vm_info_rev0 = {};
556 static vm_statistics_data_t host_vm_info_rev1 = {};
557 static vm_statistics_data_t host_vm_info_rev2 = {};
558 static host_cpu_load_info_data_t host_cpu_load_info = {};
559 static task_power_info_data_t host_expired_task_info = {};
560 static task_power_info_v2_data_t host_expired_task_info2 = {};
561
562 struct host_stats_cache {
563 uint64_t last_access;
564 uint64_t current_requests;
565 uint64_t max_requests;
566 uintptr_t data;
567 mach_msg_type_number_t count; //NOTE count is in sizeof(integer_t)
568 };
569
570 static struct host_stats_cache g_host_stats_cache[NUM_HOST_INFO_DATA_TYPES] = {
571 [HOST_VM_INFO64_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info64_rev0, .count = HOST_VM_INFO64_REV0_COUNT },
572 [HOST_VM_INFO64_REV1] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info64_rev1, .count = HOST_VM_INFO64_REV1_COUNT },
573 [HOST_EXTMOD_INFO64_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_extmod_info64, .count = HOST_EXTMOD_INFO64_COUNT },
574 [HOST_LOAD_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_load_info, .count = HOST_LOAD_INFO_COUNT },
575 [HOST_VM_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info_rev0, .count = HOST_VM_INFO_REV0_COUNT },
576 [HOST_VM_INFO_REV1] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info_rev1, .count = HOST_VM_INFO_REV1_COUNT },
577 [HOST_VM_INFO_REV2] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_vm_info_rev2, .count = HOST_VM_INFO_REV2_COUNT },
578 [HOST_CPU_LOAD_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_cpu_load_info, .count = HOST_CPU_LOAD_INFO_COUNT },
579 [HOST_EXPIRED_TASK_INFO_REV0] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_expired_task_info, .count = TASK_POWER_INFO_COUNT },
580 [HOST_EXPIRED_TASK_INFO_REV1] = { .last_access = 0, .current_requests = 0, .max_requests = 0, .data = (uintptr_t)&host_expired_task_info2, .count = TASK_POWER_INFO_V2_COUNT},
581 };
582
583
584 void
585 host_statistics_init(void)
586 {
587 host_statistics_lck_grp = lck_grp_alloc_init("host_statistics", LCK_GRP_ATTR_NULL);
588 lck_mtx_init(&host_statistics_lck, host_statistics_lck_grp, LCK_ATTR_NULL);
589 nanoseconds_to_absolutetime((HOST_STATISTICS_TIME_WINDOW * NSEC_PER_SEC), &host_statistics_time_window);
590 }
591
592 static void
593 cache_host_statistics(int index, host_info64_t info)
594 {
595 if (index < 0 || index >= NUM_HOST_INFO_DATA_TYPES)
596 return;
597
598 task_t task = current_task();
599 if (task->t_flags & TF_PLATFORM)
600 return;
601
602 memcpy((void *)g_host_stats_cache[index].data, info, g_host_stats_cache[index].count * sizeof(integer_t));
603 return;
604 }
605
606 static void
607 get_cached_info(int index, host_info64_t info, mach_msg_type_number_t* count)
608 {
609 if (index < 0 || index >= NUM_HOST_INFO_DATA_TYPES) {
610 *count = 0;
611 return;
612 }
613
614 *count = g_host_stats_cache[index].count;
615 memcpy(info, (void *)g_host_stats_cache[index].data, g_host_stats_cache[index].count * sizeof(integer_t));
616 }
617
618 static int
619 get_host_info_data_index(bool is_stat64, host_flavor_t flavor, mach_msg_type_number_t* count, kern_return_t* ret)
620 {
621 switch (flavor) {
622
623 case HOST_VM_INFO64:
624 if (!is_stat64){
625 *ret = KERN_INVALID_ARGUMENT;
626 return -1;
627 }
628 if (*count < HOST_VM_INFO64_REV0_COUNT) {
629 *ret = KERN_FAILURE;
630 return -1;
631 }
632 if (*count >= HOST_VM_INFO64_REV1_COUNT) {
633 return HOST_VM_INFO64_REV1;
634 }
635 return HOST_VM_INFO64_REV0;
636
637 case HOST_EXTMOD_INFO64:
638 if (!is_stat64){
639 *ret = KERN_INVALID_ARGUMENT;
640 return -1;
641 }
642 if (*count < HOST_EXTMOD_INFO64_COUNT) {
643 *ret = KERN_FAILURE;
644 return -1;
645 }
646 return HOST_EXTMOD_INFO64_REV0;
647
648 case HOST_LOAD_INFO:
649 if (*count < HOST_LOAD_INFO_COUNT) {
650 *ret = KERN_FAILURE;
651 return -1;
652 }
653 return HOST_LOAD_INFO_REV0;
654
655 case HOST_VM_INFO:
656 if (*count < HOST_VM_INFO_REV0_COUNT) {
657 *ret = KERN_FAILURE;
658 return -1;
659 }
660 if (*count >= HOST_VM_INFO_REV2_COUNT) {
661 return HOST_VM_INFO_REV2;
662 }
663 if (*count >= HOST_VM_INFO_REV1_COUNT) {
664 return HOST_VM_INFO_REV1;
665 }
666 return HOST_VM_INFO_REV0;
667
668 case HOST_CPU_LOAD_INFO:
669 if (*count < HOST_CPU_LOAD_INFO_COUNT) {
670 *ret = KERN_FAILURE;
671 return -1;
672 }
673 return HOST_CPU_LOAD_INFO_REV0;
674
675 case HOST_EXPIRED_TASK_INFO:
676 if (*count < TASK_POWER_INFO_COUNT){
677 *ret = KERN_FAILURE;
678 return -1;
679 }
680 if (*count >= TASK_POWER_INFO_V2_COUNT){
681 return HOST_EXPIRED_TASK_INFO_REV1;
682 }
683 return HOST_EXPIRED_TASK_INFO_REV0;
684
685 default:
686 *ret = KERN_INVALID_ARGUMENT;
687 return -1;
688
689 }
690
691 }
692
693 static bool
694 rate_limit_host_statistics(bool is_stat64, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t* count, kern_return_t* ret, int *pindex)
695 {
696 task_t task = current_task();
697
698 assert(task != kernel_task);
699
700 *ret = KERN_SUCCESS;
701
702 /* Access control only for third party applications */
703 if (task->t_flags & TF_PLATFORM) {
704 return FALSE;
705 }
706
707 /* Rate limit to HOST_STATISTICS_MAX_REQUESTS queries for each HOST_STATISTICS_TIME_WINDOW window of time */
708 bool rate_limited = FALSE;
709 bool set_last_access = TRUE;
710
711 /* there is a cache for every flavor */
712 int index = get_host_info_data_index(is_stat64, flavor, count, ret);
713 if (index == -1)
714 goto out;
715
716 *pindex = index;
717 lck_mtx_lock(&host_statistics_lck);
718 if (g_host_stats_cache[index].last_access > mach_continuous_time() - host_statistics_time_window) {
719 set_last_access = FALSE;
720 if (g_host_stats_cache[index].current_requests++ >= g_host_stats_cache[index].max_requests) {
721 rate_limited = TRUE;
722 get_cached_info(index, info, count);
723 }
724 }
725 if (set_last_access) {
726 g_host_stats_cache[index].current_requests = 1;
727 /*
728 * select a random number of requests (included between HOST_STATISTICS_MIN_REQUESTS and HOST_STATISTICS_MAX_REQUESTS)
729 * to let query host_statistics.
730 * In this way it is not possible to infer looking at when the a cached copy changes if host_statistics was called on
731 * the provious window.
732 */
733 g_host_stats_cache[index].max_requests = (mach_absolute_time() % (HOST_STATISTICS_MAX_REQUESTS - HOST_STATISTICS_MIN_REQUESTS + 1)) + HOST_STATISTICS_MIN_REQUESTS;
734 g_host_stats_cache[index].last_access = mach_continuous_time();
735 }
736 lck_mtx_unlock(&host_statistics_lck);
737 out:
738 return rate_limited;
739 }
740
741 kern_return_t host_statistics64(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count);
742
743 kern_return_t
744 host_statistics64(host_t host, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t * count)
745 {
746 uint32_t i;
747
748 if (host == HOST_NULL)
749 return (KERN_INVALID_HOST);
750
751 switch (flavor) {
752 case HOST_VM_INFO64: /* We were asked to get vm_statistics64 */
753 {
754 processor_t processor;
755 vm_statistics64_t stat;
756 vm_statistics64_data_t host_vm_stat;
757 mach_msg_type_number_t original_count;
758 unsigned int local_q_internal_count;
759 unsigned int local_q_external_count;
760
761 if (*count < HOST_VM_INFO64_REV0_COUNT)
762 return (KERN_FAILURE);
763
764 processor = processor_list;
765 stat = &PROCESSOR_DATA(processor, vm_stat);
766 host_vm_stat = *stat;
767
768 if (processor_count > 1) {
769 simple_lock(&processor_list_lock);
770
771 while ((processor = processor->processor_list) != NULL) {
772 stat = &PROCESSOR_DATA(processor, vm_stat);
773
774 host_vm_stat.zero_fill_count += stat->zero_fill_count;
775 host_vm_stat.reactivations += stat->reactivations;
776 host_vm_stat.pageins += stat->pageins;
777 host_vm_stat.pageouts += stat->pageouts;
778 host_vm_stat.faults += stat->faults;
779 host_vm_stat.cow_faults += stat->cow_faults;
780 host_vm_stat.lookups += stat->lookups;
781 host_vm_stat.hits += stat->hits;
782 host_vm_stat.compressions += stat->compressions;
783 host_vm_stat.decompressions += stat->decompressions;
784 host_vm_stat.swapins += stat->swapins;
785 host_vm_stat.swapouts += stat->swapouts;
786 }
787
788 simple_unlock(&processor_list_lock);
789 }
790
791 stat = (vm_statistics64_t)info;
792
793 stat->free_count = vm_page_free_count + vm_page_speculative_count;
794 stat->active_count = vm_page_active_count;
795
796 local_q_internal_count = 0;
797 local_q_external_count = 0;
798 if (vm_page_local_q) {
799 for (i = 0; i < vm_page_local_q_count; i++) {
800 struct vpl * lq;
801
802 lq = &vm_page_local_q[i].vpl_un.vpl;
803
804 stat->active_count += lq->vpl_count;
805 local_q_internal_count += lq->vpl_internal_count;
806 local_q_external_count += lq->vpl_external_count;
807 }
808 }
809 stat->inactive_count = vm_page_inactive_count;
810 #if CONFIG_EMBEDDED
811 stat->wire_count = vm_page_wire_count;
812 #else
813 stat->wire_count = vm_page_wire_count + vm_page_throttled_count + vm_lopage_free_count;
814 #endif
815 stat->zero_fill_count = host_vm_stat.zero_fill_count;
816 stat->reactivations = host_vm_stat.reactivations;
817 stat->pageins = host_vm_stat.pageins;
818 stat->pageouts = host_vm_stat.pageouts;
819 stat->faults = host_vm_stat.faults;
820 stat->cow_faults = host_vm_stat.cow_faults;
821 stat->lookups = host_vm_stat.lookups;
822 stat->hits = host_vm_stat.hits;
823
824 stat->purgeable_count = vm_page_purgeable_count;
825 stat->purges = vm_page_purged_count;
826
827 stat->speculative_count = vm_page_speculative_count;
828
829 /*
830 * Fill in extra info added in later revisions of the
831 * vm_statistics data structure. Fill in only what can fit
832 * in the data structure the caller gave us !
833 */
834 original_count = *count;
835 *count = HOST_VM_INFO64_REV0_COUNT; /* rev0 already filled in */
836 if (original_count >= HOST_VM_INFO64_REV1_COUNT) {
837 /* rev1 added "throttled count" */
838 stat->throttled_count = vm_page_throttled_count;
839 /* rev1 added "compression" info */
840 stat->compressor_page_count = VM_PAGE_COMPRESSOR_COUNT;
841 stat->compressions = host_vm_stat.compressions;
842 stat->decompressions = host_vm_stat.decompressions;
843 stat->swapins = host_vm_stat.swapins;
844 stat->swapouts = host_vm_stat.swapouts;
845 /* rev1 added:
846 * "external page count"
847 * "anonymous page count"
848 * "total # of pages (uncompressed) held in the compressor"
849 */
850 stat->external_page_count = (vm_page_pageable_external_count + local_q_external_count);
851 stat->internal_page_count = (vm_page_pageable_internal_count + local_q_internal_count);
852 stat->total_uncompressed_pages_in_compressor = c_segment_pages_compressed;
853 *count = HOST_VM_INFO64_REV1_COUNT;
854 }
855
856 return (KERN_SUCCESS);
857 }
858
859 case HOST_EXTMOD_INFO64: /* We were asked to get vm_statistics64 */
860 {
861 vm_extmod_statistics_t out_extmod_statistics;
862
863 if (*count < HOST_EXTMOD_INFO64_COUNT)
864 return (KERN_FAILURE);
865
866 out_extmod_statistics = (vm_extmod_statistics_t)info;
867 *out_extmod_statistics = host_extmod_statistics;
868
869 *count = HOST_EXTMOD_INFO64_COUNT;
870
871 return (KERN_SUCCESS);
872 }
873
874 default: /* If we didn't recognize the flavor, send to host_statistics */
875 return (host_statistics(host, flavor, (host_info_t)info, count));
876 }
877 }
878
879 kern_return_t
880 host_statistics64_from_user(host_t host, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t * count)
881 {
882 kern_return_t ret = KERN_SUCCESS;
883 int index;
884
885 if (host == HOST_NULL)
886 return (KERN_INVALID_HOST);
887
888 if (rate_limit_host_statistics(TRUE, flavor, info, count, &ret, &index))
889 return ret;
890
891 if (ret != KERN_SUCCESS)
892 return ret;
893
894 ret = host_statistics64(host, flavor, info, count);
895
896 if (ret == KERN_SUCCESS)
897 cache_host_statistics(index, info);
898
899 return ret;
900 }
901
902 kern_return_t
903 host_statistics_from_user(host_t host, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t * count)
904 {
905 kern_return_t ret = KERN_SUCCESS;
906 int index;
907
908 if (host == HOST_NULL)
909 return (KERN_INVALID_HOST);
910
911 if (rate_limit_host_statistics(FALSE, flavor, info, count, &ret, &index))
912 return ret;
913
914 if (ret != KERN_SUCCESS)
915 return ret;
916
917 ret = host_statistics(host, flavor, info, count);
918
919 if (ret == KERN_SUCCESS)
920 cache_host_statistics(index, info);
921
922 return ret;
923 }
924
925 /*
926 * Get host statistics that require privilege.
927 * None for now, just call the un-privileged version.
928 */
929 kern_return_t
930 host_priv_statistics(host_priv_t host_priv, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
931 {
932 return (host_statistics((host_t)host_priv, flavor, info, count));
933 }
934
935 kern_return_t
936 set_sched_stats_active(boolean_t active)
937 {
938 sched_stats_active = active;
939 return (KERN_SUCCESS);
940 }
941
942 kern_return_t
943 get_sched_statistics(struct _processor_statistics_np * out, uint32_t * count)
944 {
945 processor_t processor;
946
947 if (!sched_stats_active) {
948 return (KERN_FAILURE);
949 }
950
951 simple_lock(&processor_list_lock);
952
953 if (*count < (processor_count + 1) * sizeof(struct _processor_statistics_np)) { /* One for RT */
954 simple_unlock(&processor_list_lock);
955 return (KERN_FAILURE);
956 }
957
958 processor = processor_list;
959 while (processor) {
960 struct processor_sched_statistics * stats = &processor->processor_data.sched_stats;
961
962 out->ps_cpuid = processor->cpu_id;
963 out->ps_csw_count = stats->csw_count;
964 out->ps_preempt_count = stats->preempt_count;
965 out->ps_preempted_rt_count = stats->preempted_rt_count;
966 out->ps_preempted_by_rt_count = stats->preempted_by_rt_count;
967 out->ps_rt_sched_count = stats->rt_sched_count;
968 out->ps_interrupt_count = stats->interrupt_count;
969 out->ps_ipi_count = stats->ipi_count;
970 out->ps_timer_pop_count = stats->timer_pop_count;
971 out->ps_runq_count_sum = SCHED(processor_runq_stats_count_sum)(processor);
972 out->ps_idle_transitions = stats->idle_transitions;
973 out->ps_quantum_timer_expirations = stats->quantum_timer_expirations;
974
975 out++;
976 processor = processor->processor_list;
977 }
978
979 *count = (uint32_t)(processor_count * sizeof(struct _processor_statistics_np));
980
981 simple_unlock(&processor_list_lock);
982
983 /* And include RT Queue information */
984 bzero(out, sizeof(*out));
985 out->ps_cpuid = (-1);
986 out->ps_runq_count_sum = SCHED(rt_runq_count_sum)();
987 out++;
988 *count += (uint32_t)sizeof(struct _processor_statistics_np);
989
990 return (KERN_SUCCESS);
991 }
992
993 kern_return_t
994 host_page_size(host_t host, vm_size_t * out_page_size)
995 {
996 if (host == HOST_NULL)
997 return (KERN_INVALID_ARGUMENT);
998
999 *out_page_size = PAGE_SIZE;
1000
1001 return (KERN_SUCCESS);
1002 }
1003
1004 /*
1005 * Return kernel version string (more than you ever
1006 * wanted to know about what version of the kernel this is).
1007 */
1008 extern char version[];
1009
1010 kern_return_t
1011 host_kernel_version(host_t host, kernel_version_t out_version)
1012 {
1013 if (host == HOST_NULL)
1014 return (KERN_INVALID_ARGUMENT);
1015
1016 (void)strncpy(out_version, version, sizeof(kernel_version_t));
1017
1018 return (KERN_SUCCESS);
1019 }
1020
1021 /*
1022 * host_processor_sets:
1023 *
1024 * List all processor sets on the host.
1025 */
1026 kern_return_t
1027 host_processor_sets(host_priv_t host_priv, processor_set_name_array_t * pset_list, mach_msg_type_number_t * count)
1028 {
1029 void * addr;
1030
1031 if (host_priv == HOST_PRIV_NULL)
1032 return (KERN_INVALID_ARGUMENT);
1033
1034 /*
1035 * Allocate memory. Can be pageable because it won't be
1036 * touched while holding a lock.
1037 */
1038
1039 addr = kalloc((vm_size_t)sizeof(mach_port_t));
1040 if (addr == 0)
1041 return (KERN_RESOURCE_SHORTAGE);
1042
1043 /* do the conversion that Mig should handle */
1044 *((ipc_port_t *)addr) = convert_pset_name_to_port(&pset0);
1045
1046 *pset_list = (processor_set_array_t)addr;
1047 *count = 1;
1048
1049 return (KERN_SUCCESS);
1050 }
1051
1052 /*
1053 * host_processor_set_priv:
1054 *
1055 * Return control port for given processor set.
1056 */
1057 kern_return_t
1058 host_processor_set_priv(host_priv_t host_priv, processor_set_t pset_name, processor_set_t * pset)
1059 {
1060 if (host_priv == HOST_PRIV_NULL || pset_name == PROCESSOR_SET_NULL) {
1061 *pset = PROCESSOR_SET_NULL;
1062
1063 return (KERN_INVALID_ARGUMENT);
1064 }
1065
1066 *pset = pset_name;
1067
1068 return (KERN_SUCCESS);
1069 }
1070
1071 /*
1072 * host_processor_info
1073 *
1074 * Return info about the processors on this host. It will return
1075 * the number of processors, and the specific type of info requested
1076 * in an OOL array.
1077 */
1078 kern_return_t
1079 host_processor_info(host_t host,
1080 processor_flavor_t flavor,
1081 natural_t * out_pcount,
1082 processor_info_array_t * out_array,
1083 mach_msg_type_number_t * out_array_count)
1084 {
1085 kern_return_t result;
1086 processor_t processor;
1087 host_t thost;
1088 processor_info_t info;
1089 unsigned int icount, tcount;
1090 unsigned int pcount, i;
1091 vm_offset_t addr;
1092 vm_size_t size, needed;
1093 vm_map_copy_t copy;
1094
1095 if (host == HOST_NULL)
1096 return (KERN_INVALID_ARGUMENT);
1097
1098 result = processor_info_count(flavor, &icount);
1099 if (result != KERN_SUCCESS)
1100 return (result);
1101
1102 pcount = processor_count;
1103 assert(pcount != 0);
1104
1105 needed = pcount * icount * sizeof(natural_t);
1106 size = vm_map_round_page(needed, VM_MAP_PAGE_MASK(ipc_kernel_map));
1107 result = kmem_alloc(ipc_kernel_map, &addr, size, VM_KERN_MEMORY_IPC);
1108 if (result != KERN_SUCCESS)
1109 return (KERN_RESOURCE_SHORTAGE);
1110
1111 info = (processor_info_t)addr;
1112 processor = processor_list;
1113 tcount = icount;
1114
1115 result = processor_info(processor, flavor, &thost, info, &tcount);
1116 if (result != KERN_SUCCESS) {
1117 kmem_free(ipc_kernel_map, addr, size);
1118 return (result);
1119 }
1120
1121 if (pcount > 1) {
1122 for (i = 1; i < pcount; i++) {
1123 simple_lock(&processor_list_lock);
1124 processor = processor->processor_list;
1125 simple_unlock(&processor_list_lock);
1126
1127 info += icount;
1128 tcount = icount;
1129 result = processor_info(processor, flavor, &thost, info, &tcount);
1130 if (result != KERN_SUCCESS) {
1131 kmem_free(ipc_kernel_map, addr, size);
1132 return (result);
1133 }
1134 }
1135 }
1136
1137 if (size != needed)
1138 bzero((char *)addr + needed, size - needed);
1139
1140 result = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr, VM_MAP_PAGE_MASK(ipc_kernel_map)),
1141 vm_map_round_page(addr + size, VM_MAP_PAGE_MASK(ipc_kernel_map)), FALSE);
1142 assert(result == KERN_SUCCESS);
1143 result = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr, (vm_map_size_t)needed, TRUE, &copy);
1144 assert(result == KERN_SUCCESS);
1145
1146 *out_pcount = pcount;
1147 *out_array = (processor_info_array_t)copy;
1148 *out_array_count = pcount * icount;
1149
1150 return (KERN_SUCCESS);
1151 }
1152
1153 /*
1154 * Kernel interface for setting a special port.
1155 */
1156 kern_return_t
1157 kernel_set_special_port(host_priv_t host_priv, int id, ipc_port_t port)
1158 {
1159 ipc_port_t old_port;
1160
1161 #if !MACH_FLIPC
1162 if (id == HOST_NODE_PORT)
1163 return (KERN_NOT_SUPPORTED);
1164 #endif
1165
1166 host_lock(host_priv);
1167 old_port = host_priv->special[id];
1168 host_priv->special[id] = port;
1169 host_unlock(host_priv);
1170
1171 #if MACH_FLIPC
1172 if (id == HOST_NODE_PORT)
1173 mach_node_port_changed();
1174 #endif
1175
1176 if (IP_VALID(old_port))
1177 ipc_port_release_send(old_port);
1178 return (KERN_SUCCESS);
1179 }
1180
1181 /*
1182 * Kernel interface for retrieving a special port.
1183 */
1184 kern_return_t
1185 kernel_get_special_port(host_priv_t host_priv, int id, ipc_port_t * portp)
1186 {
1187 host_lock(host_priv);
1188 *portp = host_priv->special[id];
1189 host_unlock(host_priv);
1190 return (KERN_SUCCESS);
1191 }
1192
1193 /*
1194 * User interface for setting a special port.
1195 *
1196 * Only permits the user to set a user-owned special port
1197 * ID, rejecting a kernel-owned special port ID.
1198 *
1199 * A special kernel port cannot be set up using this
1200 * routine; use kernel_set_special_port() instead.
1201 */
1202 kern_return_t
1203 host_set_special_port(host_priv_t host_priv, int id, ipc_port_t port)
1204 {
1205 if (host_priv == HOST_PRIV_NULL || id <= HOST_MAX_SPECIAL_KERNEL_PORT || id > HOST_MAX_SPECIAL_PORT)
1206 return (KERN_INVALID_ARGUMENT);
1207
1208 #if CONFIG_MACF
1209 if (mac_task_check_set_host_special_port(current_task(), id, port) != 0)
1210 return (KERN_NO_ACCESS);
1211 #endif
1212
1213 return (kernel_set_special_port(host_priv, id, port));
1214 }
1215
1216 /*
1217 * User interface for retrieving a special port.
1218 *
1219 * Note that there is nothing to prevent a user special
1220 * port from disappearing after it has been discovered by
1221 * the caller; thus, using a special port can always result
1222 * in a "port not valid" error.
1223 */
1224
1225 kern_return_t
1226 host_get_special_port(host_priv_t host_priv, __unused int node, int id, ipc_port_t * portp)
1227 {
1228 ipc_port_t port;
1229
1230 if (host_priv == HOST_PRIV_NULL || id == HOST_SECURITY_PORT || id > HOST_MAX_SPECIAL_PORT || id < 0)
1231 return (KERN_INVALID_ARGUMENT);
1232
1233 host_lock(host_priv);
1234 port = realhost.special[id];
1235 *portp = ipc_port_copy_send(port);
1236 host_unlock(host_priv);
1237
1238 return (KERN_SUCCESS);
1239 }
1240
1241 /*
1242 * host_get_io_master
1243 *
1244 * Return the IO master access port for this host.
1245 */
1246 kern_return_t
1247 host_get_io_master(host_t host, io_master_t * io_masterp)
1248 {
1249 if (host == HOST_NULL)
1250 return (KERN_INVALID_ARGUMENT);
1251
1252 return (host_get_io_master_port(host_priv_self(), io_masterp));
1253 }
1254
1255 host_t
1256 host_self(void)
1257 {
1258 return (&realhost);
1259 }
1260
1261 host_priv_t
1262 host_priv_self(void)
1263 {
1264 return (&realhost);
1265 }
1266
1267 host_security_t
1268 host_security_self(void)
1269 {
1270 return (&realhost);
1271 }
1272
1273 kern_return_t
1274 host_set_atm_diagnostic_flag(host_priv_t host_priv, uint32_t diagnostic_flag)
1275 {
1276 if (host_priv == HOST_PRIV_NULL)
1277 return (KERN_INVALID_ARGUMENT);
1278
1279 assert(host_priv == &realhost);
1280
1281 #if CONFIG_ATM
1282 return (atm_set_diagnostic_config(diagnostic_flag));
1283 #else
1284 (void)diagnostic_flag;
1285 return (KERN_NOT_SUPPORTED);
1286 #endif
1287 }
1288
1289 kern_return_t
1290 host_set_multiuser_config_flags(host_priv_t host_priv, uint32_t multiuser_config)
1291 {
1292 #if CONFIG_EMBEDDED
1293 if (host_priv == HOST_PRIV_NULL)
1294 return (KERN_INVALID_ARGUMENT);
1295
1296 assert(host_priv == &realhost);
1297
1298 /*
1299 * Always enforce that the multiuser bit is set
1300 * if a value is written to the commpage word.
1301 */
1302 commpage_update_multiuser_config(multiuser_config | kIsMultiUserDevice);
1303 return (KERN_SUCCESS);
1304 #else
1305 (void)host_priv;
1306 (void)multiuser_config;
1307 return (KERN_NOT_SUPPORTED);
1308 #endif
1309 }