]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/host.c
xnu-3248.40.184.tar.gz
[apple/xnu.git] / osfmk / kern / host.c
1 /*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58
59 /*
60 * host.c
61 *
62 * Non-ipc host functions.
63 */
64
65 #include <mach/mach_types.h>
66 #include <mach/boolean.h>
67 #include <mach/host_info.h>
68 #include <mach/host_special_ports.h>
69 #include <mach/kern_return.h>
70 #include <mach/machine.h>
71 #include <mach/port.h>
72 #include <mach/processor_info.h>
73 #include <mach/vm_param.h>
74 #include <mach/processor.h>
75 #include <mach/mach_host_server.h>
76 #include <mach/host_priv_server.h>
77 #include <mach/vm_map.h>
78 #include <mach/task_info.h>
79
80 #include <machine/commpage.h>
81 #include <machine/cpu_capabilities.h>
82
83 #include <kern/kern_types.h>
84 #include <kern/assert.h>
85 #include <kern/kalloc.h>
86 #include <kern/host.h>
87 #include <kern/host_statistics.h>
88 #include <kern/ipc_host.h>
89 #include <kern/misc_protos.h>
90 #include <kern/sched.h>
91 #include <kern/processor.h>
92
93 #include <vm/vm_map.h>
94 #include <vm/vm_purgeable_internal.h>
95 #include <vm/vm_pageout.h>
96
97 #if CONFIG_ATM
98 #include <atm/atm_internal.h>
99 #endif
100
101 #if CONFIG_MACF
102 #include <security/mac_mach_internal.h>
103 #endif
104
105 host_data_t realhost;
106
107 vm_extmod_statistics_data_t host_extmod_statistics;
108
109 kern_return_t
110 host_processors(host_priv_t host_priv, processor_array_t * out_array, mach_msg_type_number_t * countp)
111 {
112 register processor_t processor, *tp;
113 void * addr;
114 unsigned int count, i;
115
116 if (host_priv == HOST_PRIV_NULL)
117 return (KERN_INVALID_ARGUMENT);
118
119 assert(host_priv == &realhost);
120
121 count = processor_count;
122 assert(count != 0);
123
124 addr = kalloc((vm_size_t)(count * sizeof(mach_port_t)));
125 if (addr == 0)
126 return (KERN_RESOURCE_SHORTAGE);
127
128 tp = (processor_t *)addr;
129 *tp++ = processor = processor_list;
130
131 if (count > 1) {
132 simple_lock(&processor_list_lock);
133
134 for (i = 1; i < count; i++)
135 *tp++ = processor = processor->processor_list;
136
137 simple_unlock(&processor_list_lock);
138 }
139
140 *countp = count;
141 *out_array = (processor_array_t)addr;
142
143 /* do the conversion that Mig should handle */
144 tp = (processor_t *)addr;
145 for (i = 0; i < count; i++)
146 ((mach_port_t *)tp)[i] = (mach_port_t)convert_processor_to_port(tp[i]);
147
148 return (KERN_SUCCESS);
149 }
150
151 kern_return_t
152 host_info(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
153 {
154 if (host == HOST_NULL)
155 return (KERN_INVALID_ARGUMENT);
156
157 switch (flavor) {
158 case HOST_BASIC_INFO: {
159 register host_basic_info_t basic_info;
160 register int master_id;
161
162 /*
163 * Basic information about this host.
164 */
165 if (*count < HOST_BASIC_INFO_OLD_COUNT)
166 return (KERN_FAILURE);
167
168 basic_info = (host_basic_info_t)info;
169
170 basic_info->memory_size = machine_info.memory_size;
171 basic_info->max_cpus = machine_info.max_cpus;
172 basic_info->avail_cpus = processor_avail_count;
173 master_id = master_processor->cpu_id;
174 basic_info->cpu_type = slot_type(master_id);
175 basic_info->cpu_subtype = slot_subtype(master_id);
176
177 if (*count >= HOST_BASIC_INFO_COUNT) {
178 basic_info->cpu_threadtype = slot_threadtype(master_id);
179 basic_info->physical_cpu = machine_info.physical_cpu;
180 basic_info->physical_cpu_max = machine_info.physical_cpu_max;
181 basic_info->logical_cpu = machine_info.logical_cpu;
182 basic_info->logical_cpu_max = machine_info.logical_cpu_max;
183 basic_info->max_mem = machine_info.max_mem;
184
185 *count = HOST_BASIC_INFO_COUNT;
186 } else {
187 *count = HOST_BASIC_INFO_OLD_COUNT;
188 }
189
190 return (KERN_SUCCESS);
191 }
192
193 case HOST_SCHED_INFO: {
194 register host_sched_info_t sched_info;
195 uint32_t quantum_time;
196 uint64_t quantum_ns;
197
198 /*
199 * Return scheduler information.
200 */
201 if (*count < HOST_SCHED_INFO_COUNT)
202 return (KERN_FAILURE);
203
204 sched_info = (host_sched_info_t)info;
205
206 quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
207 absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
208
209 sched_info->min_timeout = sched_info->min_quantum = (uint32_t)(quantum_ns / 1000 / 1000);
210
211 *count = HOST_SCHED_INFO_COUNT;
212
213 return (KERN_SUCCESS);
214 }
215
216 case HOST_RESOURCE_SIZES: {
217 /*
218 * Return sizes of kernel data structures
219 */
220 if (*count < HOST_RESOURCE_SIZES_COUNT)
221 return (KERN_FAILURE);
222
223 /* XXX Fail until ledgers are implemented */
224 return (KERN_INVALID_ARGUMENT);
225 }
226
227 case HOST_PRIORITY_INFO: {
228 register host_priority_info_t priority_info;
229
230 if (*count < HOST_PRIORITY_INFO_COUNT)
231 return (KERN_FAILURE);
232
233 priority_info = (host_priority_info_t)info;
234
235 priority_info->kernel_priority = MINPRI_KERNEL;
236 priority_info->system_priority = MINPRI_KERNEL;
237 priority_info->server_priority = MINPRI_RESERVED;
238 priority_info->user_priority = BASEPRI_DEFAULT;
239 priority_info->depress_priority = DEPRESSPRI;
240 priority_info->idle_priority = IDLEPRI;
241 priority_info->minimum_priority = MINPRI_USER;
242 priority_info->maximum_priority = MAXPRI_RESERVED;
243
244 *count = HOST_PRIORITY_INFO_COUNT;
245
246 return (KERN_SUCCESS);
247 }
248
249 /*
250 * Gestalt for various trap facilities.
251 */
252 case HOST_MACH_MSG_TRAP:
253 case HOST_SEMAPHORE_TRAPS: {
254 *count = 0;
255 return (KERN_SUCCESS);
256 }
257
258 case HOST_VM_PURGABLE: {
259 if (*count < HOST_VM_PURGABLE_COUNT)
260 return (KERN_FAILURE);
261
262 vm_purgeable_stats((vm_purgeable_info_t)info, NULL);
263
264 *count = HOST_VM_PURGABLE_COUNT;
265 return (KERN_SUCCESS);
266 }
267
268 case HOST_DEBUG_INFO_INTERNAL: {
269 #if DEVELOPMENT || DEBUG
270 if (*count < HOST_DEBUG_INFO_INTERNAL_COUNT)
271 return (KERN_FAILURE);
272
273 host_debug_info_internal_t debug_info = (host_debug_info_internal_t)info;
274 bzero(debug_info, sizeof(host_debug_info_internal_data_t));
275 *count = HOST_DEBUG_INFO_INTERNAL_COUNT;
276
277 #if CONFIG_COALITIONS
278 debug_info->config_coalitions = 1;
279 #endif
280 #if CONFIG_BANK
281 debug_info->config_bank = 1;
282 #endif
283 #if CONFIG_ATM
284 debug_info->config_atm = 1;
285 #endif
286 #if CONFIG_CSR
287 debug_info->config_csr = 1;
288 #endif
289 return (KERN_SUCCESS);
290 #else /* DEVELOPMENT || DEBUG */
291 return (KERN_NOT_SUPPORTED);
292 #endif
293 }
294
295 default: return (KERN_INVALID_ARGUMENT);
296 }
297 }
298
299 kern_return_t
300 host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
301 {
302 uint32_t i;
303
304 if (host == HOST_NULL)
305 return (KERN_INVALID_HOST);
306
307 switch (flavor) {
308 case HOST_LOAD_INFO: {
309 host_load_info_t load_info;
310
311 if (*count < HOST_LOAD_INFO_COUNT)
312 return (KERN_FAILURE);
313
314 load_info = (host_load_info_t)info;
315
316 bcopy((char *)avenrun, (char *)load_info->avenrun, sizeof avenrun);
317 bcopy((char *)mach_factor, (char *)load_info->mach_factor, sizeof mach_factor);
318
319 *count = HOST_LOAD_INFO_COUNT;
320 return (KERN_SUCCESS);
321 }
322
323 case HOST_VM_INFO: {
324 register processor_t processor;
325 register vm_statistics64_t stat;
326 vm_statistics64_data_t host_vm_stat;
327 vm_statistics_t stat32;
328 mach_msg_type_number_t original_count;
329
330 if (*count < HOST_VM_INFO_REV0_COUNT)
331 return (KERN_FAILURE);
332
333 processor = processor_list;
334 stat = &PROCESSOR_DATA(processor, vm_stat);
335 host_vm_stat = *stat;
336
337 if (processor_count > 1) {
338 simple_lock(&processor_list_lock);
339
340 while ((processor = processor->processor_list) != NULL) {
341 stat = &PROCESSOR_DATA(processor, vm_stat);
342
343 host_vm_stat.zero_fill_count += stat->zero_fill_count;
344 host_vm_stat.reactivations += stat->reactivations;
345 host_vm_stat.pageins += stat->pageins;
346 host_vm_stat.pageouts += stat->pageouts;
347 host_vm_stat.faults += stat->faults;
348 host_vm_stat.cow_faults += stat->cow_faults;
349 host_vm_stat.lookups += stat->lookups;
350 host_vm_stat.hits += stat->hits;
351 }
352
353 simple_unlock(&processor_list_lock);
354 }
355
356 stat32 = (vm_statistics_t)info;
357
358 stat32->free_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_free_count + vm_page_speculative_count);
359 stat32->active_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_active_count);
360
361 if (vm_page_local_q) {
362 for (i = 0; i < vm_page_local_q_count; i++) {
363 struct vpl * lq;
364
365 lq = &vm_page_local_q[i].vpl_un.vpl;
366
367 stat32->active_count += VM_STATISTICS_TRUNCATE_TO_32_BIT(lq->vpl_count);
368 }
369 }
370 stat32->inactive_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_inactive_count);
371 stat32->wire_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count + vm_page_throttled_count + vm_lopage_free_count);
372 stat32->zero_fill_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.zero_fill_count);
373 stat32->reactivations = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.reactivations);
374 stat32->pageins = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.pageins);
375 stat32->pageouts = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.pageouts);
376 stat32->faults = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.faults);
377 stat32->cow_faults = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.cow_faults);
378 stat32->lookups = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.lookups);
379 stat32->hits = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.hits);
380
381 /*
382 * Fill in extra info added in later revisions of the
383 * vm_statistics data structure. Fill in only what can fit
384 * in the data structure the caller gave us !
385 */
386 original_count = *count;
387 *count = HOST_VM_INFO_REV0_COUNT; /* rev0 already filled in */
388 if (original_count >= HOST_VM_INFO_REV1_COUNT) {
389 /* rev1 added "purgeable" info */
390 stat32->purgeable_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purgeable_count);
391 stat32->purges = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purged_count);
392 *count = HOST_VM_INFO_REV1_COUNT;
393 }
394
395 if (original_count >= HOST_VM_INFO_REV2_COUNT) {
396 /* rev2 added "speculative" info */
397 stat32->speculative_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_speculative_count);
398 *count = HOST_VM_INFO_REV2_COUNT;
399 }
400
401 /* rev3 changed some of the fields to be 64-bit*/
402
403 return (KERN_SUCCESS);
404 }
405
406 case HOST_CPU_LOAD_INFO: {
407 register processor_t processor;
408 host_cpu_load_info_t cpu_load_info;
409
410 if (*count < HOST_CPU_LOAD_INFO_COUNT)
411 return (KERN_FAILURE);
412
413 #define GET_TICKS_VALUE(state, ticks) \
414 MACRO_BEGIN cpu_load_info->cpu_ticks[(state)] += (uint32_t)(ticks / hz_tick_interval); \
415 MACRO_END
416 #define GET_TICKS_VALUE_FROM_TIMER(processor, state, timer) \
417 MACRO_BEGIN GET_TICKS_VALUE(state, timer_grab(&PROCESSOR_DATA(processor, timer))); \
418 MACRO_END
419
420 cpu_load_info = (host_cpu_load_info_t)info;
421 cpu_load_info->cpu_ticks[CPU_STATE_USER] = 0;
422 cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] = 0;
423 cpu_load_info->cpu_ticks[CPU_STATE_IDLE] = 0;
424 cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0;
425
426 simple_lock(&processor_list_lock);
427
428 for (processor = processor_list; processor != NULL; processor = processor->processor_list) {
429 timer_t idle_state;
430 uint64_t idle_time_snapshot1, idle_time_snapshot2;
431 uint64_t idle_time_tstamp1, idle_time_tstamp2;
432
433 /* See discussion in processor_info(PROCESSOR_CPU_LOAD_INFO) */
434
435 GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_USER, user_state);
436 if (precise_user_kernel_time) {
437 GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_SYSTEM, system_state);
438 } else {
439 /* system_state may represent either sys or user */
440 GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_USER, system_state);
441 }
442
443 idle_state = &PROCESSOR_DATA(processor, idle_state);
444 idle_time_snapshot1 = timer_grab(idle_state);
445 idle_time_tstamp1 = idle_state->tstamp;
446
447 if (PROCESSOR_DATA(processor, current_state) != idle_state) {
448 /* Processor is non-idle, so idle timer should be accurate */
449 GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_IDLE, idle_state);
450 } else if ((idle_time_snapshot1 != (idle_time_snapshot2 = timer_grab(idle_state))) ||
451 (idle_time_tstamp1 != (idle_time_tstamp2 = idle_state->tstamp))) {
452 /* Idle timer is being updated concurrently, second stamp is good enough */
453 GET_TICKS_VALUE(CPU_STATE_IDLE, idle_time_snapshot2);
454 } else {
455 /*
456 * Idle timer may be very stale. Fortunately we have established
457 * that idle_time_snapshot1 and idle_time_tstamp1 are unchanging
458 */
459 idle_time_snapshot1 += mach_absolute_time() - idle_time_tstamp1;
460
461 GET_TICKS_VALUE(CPU_STATE_IDLE, idle_time_snapshot1);
462 }
463 }
464 simple_unlock(&processor_list_lock);
465
466 *count = HOST_CPU_LOAD_INFO_COUNT;
467
468 return (KERN_SUCCESS);
469 }
470
471 case HOST_EXPIRED_TASK_INFO: {
472 if (*count < TASK_POWER_INFO_COUNT) {
473 return (KERN_FAILURE);
474 }
475
476 task_power_info_t tinfo = (task_power_info_t)info;
477
478 tinfo->task_interrupt_wakeups = dead_task_statistics.task_interrupt_wakeups;
479 tinfo->task_platform_idle_wakeups = dead_task_statistics.task_platform_idle_wakeups;
480
481 tinfo->task_timer_wakeups_bin_1 = dead_task_statistics.task_timer_wakeups_bin_1;
482
483 tinfo->task_timer_wakeups_bin_2 = dead_task_statistics.task_timer_wakeups_bin_2;
484
485 tinfo->total_user = dead_task_statistics.total_user_time;
486 tinfo->total_system = dead_task_statistics.total_system_time;
487
488 return (KERN_SUCCESS);
489 }
490 default: return (KERN_INVALID_ARGUMENT);
491 }
492 }
493
494 extern uint32_t c_segment_pages_compressed;
495
496 kern_return_t
497 host_statistics64(host_t host, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t * count)
498 {
499 uint32_t i;
500
501 if (host == HOST_NULL)
502 return (KERN_INVALID_HOST);
503
504 switch (flavor) {
505 case HOST_VM_INFO64: /* We were asked to get vm_statistics64 */
506 {
507 register processor_t processor;
508 register vm_statistics64_t stat;
509 vm_statistics64_data_t host_vm_stat;
510 mach_msg_type_number_t original_count;
511 unsigned int local_q_internal_count;
512 unsigned int local_q_external_count;
513
514 if (*count < HOST_VM_INFO64_REV0_COUNT)
515 return (KERN_FAILURE);
516
517 processor = processor_list;
518 stat = &PROCESSOR_DATA(processor, vm_stat);
519 host_vm_stat = *stat;
520
521 if (processor_count > 1) {
522 simple_lock(&processor_list_lock);
523
524 while ((processor = processor->processor_list) != NULL) {
525 stat = &PROCESSOR_DATA(processor, vm_stat);
526
527 host_vm_stat.zero_fill_count += stat->zero_fill_count;
528 host_vm_stat.reactivations += stat->reactivations;
529 host_vm_stat.pageins += stat->pageins;
530 host_vm_stat.pageouts += stat->pageouts;
531 host_vm_stat.faults += stat->faults;
532 host_vm_stat.cow_faults += stat->cow_faults;
533 host_vm_stat.lookups += stat->lookups;
534 host_vm_stat.hits += stat->hits;
535 host_vm_stat.compressions += stat->compressions;
536 host_vm_stat.decompressions += stat->decompressions;
537 host_vm_stat.swapins += stat->swapins;
538 host_vm_stat.swapouts += stat->swapouts;
539 }
540
541 simple_unlock(&processor_list_lock);
542 }
543
544 stat = (vm_statistics64_t)info;
545
546 stat->free_count = vm_page_free_count + vm_page_speculative_count;
547 stat->active_count = vm_page_active_count;
548
549 local_q_internal_count = 0;
550 local_q_external_count = 0;
551 if (vm_page_local_q) {
552 for (i = 0; i < vm_page_local_q_count; i++) {
553 struct vpl * lq;
554
555 lq = &vm_page_local_q[i].vpl_un.vpl;
556
557 stat->active_count += lq->vpl_count;
558 local_q_internal_count += lq->vpl_internal_count;
559 local_q_external_count += lq->vpl_external_count;
560 }
561 }
562 stat->inactive_count = vm_page_inactive_count;
563 stat->wire_count = vm_page_wire_count + vm_page_throttled_count + vm_lopage_free_count;
564 stat->zero_fill_count = host_vm_stat.zero_fill_count;
565 stat->reactivations = host_vm_stat.reactivations;
566 stat->pageins = host_vm_stat.pageins;
567 stat->pageouts = host_vm_stat.pageouts;
568 stat->faults = host_vm_stat.faults;
569 stat->cow_faults = host_vm_stat.cow_faults;
570 stat->lookups = host_vm_stat.lookups;
571 stat->hits = host_vm_stat.hits;
572
573 stat->purgeable_count = vm_page_purgeable_count;
574 stat->purges = vm_page_purged_count;
575
576 stat->speculative_count = vm_page_speculative_count;
577
578 /*
579 * Fill in extra info added in later revisions of the
580 * vm_statistics data structure. Fill in only what can fit
581 * in the data structure the caller gave us !
582 */
583 original_count = *count;
584 *count = HOST_VM_INFO64_REV0_COUNT; /* rev0 already filled in */
585 if (original_count >= HOST_VM_INFO64_REV1_COUNT) {
586 /* rev1 added "throttled count" */
587 stat->throttled_count = vm_page_throttled_count;
588 /* rev1 added "compression" info */
589 stat->compressor_page_count = VM_PAGE_COMPRESSOR_COUNT;
590 stat->compressions = host_vm_stat.compressions;
591 stat->decompressions = host_vm_stat.decompressions;
592 stat->swapins = host_vm_stat.swapins;
593 stat->swapouts = host_vm_stat.swapouts;
594 /* rev1 added:
595 * "external page count"
596 * "anonymous page count"
597 * "total # of pages (uncompressed) held in the compressor"
598 */
599 stat->external_page_count = (vm_page_pageable_external_count + local_q_external_count);
600 stat->internal_page_count = (vm_page_pageable_internal_count + local_q_internal_count);
601 stat->total_uncompressed_pages_in_compressor = c_segment_pages_compressed;
602 *count = HOST_VM_INFO64_REV1_COUNT;
603 }
604
605 return (KERN_SUCCESS);
606 }
607
608 case HOST_EXTMOD_INFO64: /* We were asked to get vm_statistics64 */
609 {
610 vm_extmod_statistics_t out_extmod_statistics;
611
612 if (*count < HOST_EXTMOD_INFO64_COUNT)
613 return (KERN_FAILURE);
614
615 out_extmod_statistics = (vm_extmod_statistics_t)info;
616 *out_extmod_statistics = host_extmod_statistics;
617
618 *count = HOST_EXTMOD_INFO64_COUNT;
619
620 return (KERN_SUCCESS);
621 }
622
623 default: /* If we didn't recognize the flavor, send to host_statistics */
624 return (host_statistics(host, flavor, (host_info_t)info, count));
625 }
626 }
627
628 /*
629 * Get host statistics that require privilege.
630 * None for now, just call the un-privileged version.
631 */
632 kern_return_t
633 host_priv_statistics(host_priv_t host_priv, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
634 {
635 return (host_statistics((host_t)host_priv, flavor, info, count));
636 }
637
638 kern_return_t
639 set_sched_stats_active(boolean_t active)
640 {
641 sched_stats_active = active;
642 return (KERN_SUCCESS);
643 }
644
645 kern_return_t
646 get_sched_statistics(struct _processor_statistics_np * out, uint32_t * count)
647 {
648 processor_t processor;
649
650 if (!sched_stats_active) {
651 return (KERN_FAILURE);
652 }
653
654 simple_lock(&processor_list_lock);
655
656 if (*count < (processor_count + 1) * sizeof(struct _processor_statistics_np)) { /* One for RT */
657 simple_unlock(&processor_list_lock);
658 return (KERN_FAILURE);
659 }
660
661 processor = processor_list;
662 while (processor) {
663 struct processor_sched_statistics * stats = &processor->processor_data.sched_stats;
664
665 out->ps_cpuid = processor->cpu_id;
666 out->ps_csw_count = stats->csw_count;
667 out->ps_preempt_count = stats->preempt_count;
668 out->ps_preempted_rt_count = stats->preempted_rt_count;
669 out->ps_preempted_by_rt_count = stats->preempted_by_rt_count;
670 out->ps_rt_sched_count = stats->rt_sched_count;
671 out->ps_interrupt_count = stats->interrupt_count;
672 out->ps_ipi_count = stats->ipi_count;
673 out->ps_timer_pop_count = stats->timer_pop_count;
674 out->ps_runq_count_sum = SCHED(processor_runq_stats_count_sum)(processor);
675 out->ps_idle_transitions = stats->idle_transitions;
676 out->ps_quantum_timer_expirations = stats->quantum_timer_expirations;
677
678 out++;
679 processor = processor->processor_list;
680 }
681
682 *count = (uint32_t)(processor_count * sizeof(struct _processor_statistics_np));
683
684 simple_unlock(&processor_list_lock);
685
686 /* And include RT Queue information */
687 bzero(out, sizeof(*out));
688 out->ps_cpuid = (-1);
689 out->ps_runq_count_sum = rt_runq.runq_stats.count_sum;
690 out++;
691 *count += (uint32_t)sizeof(struct _processor_statistics_np);
692
693 return (KERN_SUCCESS);
694 }
695
696 kern_return_t
697 host_page_size(host_t host, vm_size_t * out_page_size)
698 {
699 if (host == HOST_NULL)
700 return (KERN_INVALID_ARGUMENT);
701
702 *out_page_size = PAGE_SIZE;
703
704 return (KERN_SUCCESS);
705 }
706
707 /*
708 * Return kernel version string (more than you ever
709 * wanted to know about what version of the kernel this is).
710 */
711 extern char version[];
712
713 kern_return_t
714 host_kernel_version(host_t host, kernel_version_t out_version)
715 {
716 if (host == HOST_NULL)
717 return (KERN_INVALID_ARGUMENT);
718
719 (void)strncpy(out_version, version, sizeof(kernel_version_t));
720
721 return (KERN_SUCCESS);
722 }
723
724 /*
725 * host_processor_sets:
726 *
727 * List all processor sets on the host.
728 */
729 kern_return_t
730 host_processor_sets(host_priv_t host_priv, processor_set_name_array_t * pset_list, mach_msg_type_number_t * count)
731 {
732 void * addr;
733
734 if (host_priv == HOST_PRIV_NULL)
735 return (KERN_INVALID_ARGUMENT);
736
737 /*
738 * Allocate memory. Can be pageable because it won't be
739 * touched while holding a lock.
740 */
741
742 addr = kalloc((vm_size_t)sizeof(mach_port_t));
743 if (addr == 0)
744 return (KERN_RESOURCE_SHORTAGE);
745
746 /* do the conversion that Mig should handle */
747 *((ipc_port_t *)addr) = convert_pset_name_to_port(&pset0);
748
749 *pset_list = (processor_set_array_t)addr;
750 *count = 1;
751
752 return (KERN_SUCCESS);
753 }
754
755 /*
756 * host_processor_set_priv:
757 *
758 * Return control port for given processor set.
759 */
760 kern_return_t
761 host_processor_set_priv(host_priv_t host_priv, processor_set_t pset_name, processor_set_t * pset)
762 {
763 if (host_priv == HOST_PRIV_NULL || pset_name == PROCESSOR_SET_NULL) {
764 *pset = PROCESSOR_SET_NULL;
765
766 return (KERN_INVALID_ARGUMENT);
767 }
768
769 *pset = pset_name;
770
771 return (KERN_SUCCESS);
772 }
773
774 /*
775 * host_processor_info
776 *
777 * Return info about the processors on this host. It will return
778 * the number of processors, and the specific type of info requested
779 * in an OOL array.
780 */
781 kern_return_t
782 host_processor_info(host_t host,
783 processor_flavor_t flavor,
784 natural_t * out_pcount,
785 processor_info_array_t * out_array,
786 mach_msg_type_number_t * out_array_count)
787 {
788 kern_return_t result;
789 processor_t processor;
790 host_t thost;
791 processor_info_t info;
792 unsigned int icount, tcount;
793 unsigned int pcount, i;
794 vm_offset_t addr;
795 vm_size_t size, needed;
796 vm_map_copy_t copy;
797
798 if (host == HOST_NULL)
799 return (KERN_INVALID_ARGUMENT);
800
801 result = processor_info_count(flavor, &icount);
802 if (result != KERN_SUCCESS)
803 return (result);
804
805 pcount = processor_count;
806 assert(pcount != 0);
807
808 needed = pcount * icount * sizeof(natural_t);
809 size = vm_map_round_page(needed, VM_MAP_PAGE_MASK(ipc_kernel_map));
810 result = kmem_alloc(ipc_kernel_map, &addr, size, VM_KERN_MEMORY_IPC);
811 if (result != KERN_SUCCESS)
812 return (KERN_RESOURCE_SHORTAGE);
813
814 info = (processor_info_t)addr;
815 processor = processor_list;
816 tcount = icount;
817
818 result = processor_info(processor, flavor, &thost, info, &tcount);
819 if (result != KERN_SUCCESS) {
820 kmem_free(ipc_kernel_map, addr, size);
821 return (result);
822 }
823
824 if (pcount > 1) {
825 for (i = 1; i < pcount; i++) {
826 simple_lock(&processor_list_lock);
827 processor = processor->processor_list;
828 simple_unlock(&processor_list_lock);
829
830 info += icount;
831 tcount = icount;
832 result = processor_info(processor, flavor, &thost, info, &tcount);
833 if (result != KERN_SUCCESS) {
834 kmem_free(ipc_kernel_map, addr, size);
835 return (result);
836 }
837 }
838 }
839
840 if (size != needed)
841 bzero((char *)addr + needed, size - needed);
842
843 result = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr, VM_MAP_PAGE_MASK(ipc_kernel_map)),
844 vm_map_round_page(addr + size, VM_MAP_PAGE_MASK(ipc_kernel_map)), FALSE);
845 assert(result == KERN_SUCCESS);
846 result = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr, (vm_map_size_t)needed, TRUE, &copy);
847 assert(result == KERN_SUCCESS);
848
849 *out_pcount = pcount;
850 *out_array = (processor_info_array_t)copy;
851 *out_array_count = pcount * icount;
852
853 return (KERN_SUCCESS);
854 }
855
856 /*
857 * Kernel interface for setting a special port.
858 */
859 kern_return_t
860 kernel_set_special_port(host_priv_t host_priv, int id, ipc_port_t port)
861 {
862 ipc_port_t old_port;
863
864 host_lock(host_priv);
865 old_port = host_priv->special[id];
866 host_priv->special[id] = port;
867 host_unlock(host_priv);
868 if (IP_VALID(old_port))
869 ipc_port_release_send(old_port);
870 return (KERN_SUCCESS);
871 }
872
873 /*
874 * User interface for setting a special port.
875 *
876 * Only permits the user to set a user-owned special port
877 * ID, rejecting a kernel-owned special port ID.
878 *
879 * A special kernel port cannot be set up using this
880 * routine; use kernel_set_special_port() instead.
881 */
882 kern_return_t
883 host_set_special_port(host_priv_t host_priv, int id, ipc_port_t port)
884 {
885 if (host_priv == HOST_PRIV_NULL || id <= HOST_MAX_SPECIAL_KERNEL_PORT || id > HOST_MAX_SPECIAL_PORT)
886 return (KERN_INVALID_ARGUMENT);
887
888 #if CONFIG_MACF
889 if (mac_task_check_set_host_special_port(current_task(), id, port) != 0)
890 return (KERN_NO_ACCESS);
891 #endif
892
893 return (kernel_set_special_port(host_priv, id, port));
894 }
895
896 /*
897 * User interface for retrieving a special port.
898 *
899 * Note that there is nothing to prevent a user special
900 * port from disappearing after it has been discovered by
901 * the caller; thus, using a special port can always result
902 * in a "port not valid" error.
903 */
904
905 kern_return_t
906 host_get_special_port(host_priv_t host_priv, __unused int node, int id, ipc_port_t * portp)
907 {
908 ipc_port_t port;
909
910 if (host_priv == HOST_PRIV_NULL || id == HOST_SECURITY_PORT || id > HOST_MAX_SPECIAL_PORT || id < 0)
911 return (KERN_INVALID_ARGUMENT);
912
913 host_lock(host_priv);
914 port = realhost.special[id];
915 *portp = ipc_port_copy_send(port);
916 host_unlock(host_priv);
917
918 return (KERN_SUCCESS);
919 }
920
921 /*
922 * host_get_io_master
923 *
924 * Return the IO master access port for this host.
925 */
926 kern_return_t
927 host_get_io_master(host_t host, io_master_t * io_masterp)
928 {
929 if (host == HOST_NULL)
930 return (KERN_INVALID_ARGUMENT);
931
932 return (host_get_io_master_port(host_priv_self(), io_masterp));
933 }
934
935 host_t
936 host_self(void)
937 {
938 return (&realhost);
939 }
940
941 host_priv_t
942 host_priv_self(void)
943 {
944 return (&realhost);
945 }
946
947 host_security_t
948 host_security_self(void)
949 {
950 return (&realhost);
951 }
952
953 kern_return_t
954 host_set_atm_diagnostic_flag(host_priv_t host_priv, uint32_t diagnostic_flag)
955 {
956 if (host_priv == HOST_PRIV_NULL)
957 return (KERN_INVALID_ARGUMENT);
958
959 assert(host_priv == &realhost);
960
961 #if CONFIG_ATM
962 return (atm_set_diagnostic_config(diagnostic_flag));
963 #else
964 (void)diagnostic_flag;
965 return (KERN_NOT_SUPPORTED);
966 #endif
967 }
968
969 kern_return_t
970 host_set_multiuser_config_flags(host_priv_t host_priv, uint32_t multiuser_config)
971 {
972 (void)host_priv;
973 (void)multiuser_config;
974 return (KERN_NOT_SUPPORTED);
975 }