]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/host.c
xnu-3789.51.2.tar.gz
[apple/xnu.git] / osfmk / kern / host.c
1 /*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58
59 /*
60 * host.c
61 *
62 * Non-ipc host functions.
63 */
64
65 #include <mach/mach_types.h>
66 #include <mach/boolean.h>
67 #include <mach/host_info.h>
68 #include <mach/host_special_ports.h>
69 #include <mach/kern_return.h>
70 #include <mach/machine.h>
71 #include <mach/port.h>
72 #include <mach/processor_info.h>
73 #include <mach/vm_param.h>
74 #include <mach/processor.h>
75 #include <mach/mach_host_server.h>
76 #include <mach/host_priv_server.h>
77 #include <mach/vm_map.h>
78 #include <mach/task_info.h>
79
80 #include <machine/commpage.h>
81 #include <machine/cpu_capabilities.h>
82
83 #include <kern/kern_types.h>
84 #include <kern/assert.h>
85 #include <kern/kalloc.h>
86 #include <kern/host.h>
87 #include <kern/host_statistics.h>
88 #include <kern/ipc_host.h>
89 #include <kern/misc_protos.h>
90 #include <kern/sched.h>
91 #include <kern/processor.h>
92 #include <kern/mach_node.h> // mach_node_port_changed()
93
94 #include <vm/vm_map.h>
95 #include <vm/vm_purgeable_internal.h>
96 #include <vm/vm_pageout.h>
97
98
99 #if CONFIG_ATM
100 #include <atm/atm_internal.h>
101 #endif
102
103 #if CONFIG_MACF
104 #include <security/mac_mach_internal.h>
105 #endif
106
107 #include <pexpert/pexpert.h>
108
109 host_data_t realhost;
110
111 vm_extmod_statistics_data_t host_extmod_statistics;
112
113 kern_return_t
114 host_processors(host_priv_t host_priv, processor_array_t * out_array, mach_msg_type_number_t * countp)
115 {
116 processor_t processor, *tp;
117 void * addr;
118 unsigned int count, i;
119
120 if (host_priv == HOST_PRIV_NULL)
121 return (KERN_INVALID_ARGUMENT);
122
123 assert(host_priv == &realhost);
124
125 count = processor_count;
126 assert(count != 0);
127
128 addr = kalloc((vm_size_t)(count * sizeof(mach_port_t)));
129 if (addr == 0)
130 return (KERN_RESOURCE_SHORTAGE);
131
132 tp = (processor_t *)addr;
133 *tp++ = processor = processor_list;
134
135 if (count > 1) {
136 simple_lock(&processor_list_lock);
137
138 for (i = 1; i < count; i++)
139 *tp++ = processor = processor->processor_list;
140
141 simple_unlock(&processor_list_lock);
142 }
143
144 *countp = count;
145 *out_array = (processor_array_t)addr;
146
147 /* do the conversion that Mig should handle */
148 tp = (processor_t *)addr;
149 for (i = 0; i < count; i++)
150 ((mach_port_t *)tp)[i] = (mach_port_t)convert_processor_to_port(tp[i]);
151
152 return (KERN_SUCCESS);
153 }
154
155 kern_return_t
156 host_info(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
157 {
158 if (host == HOST_NULL)
159 return (KERN_INVALID_ARGUMENT);
160
161 switch (flavor) {
162 case HOST_BASIC_INFO: {
163 host_basic_info_t basic_info;
164 int master_id;
165
166 /*
167 * Basic information about this host.
168 */
169 if (*count < HOST_BASIC_INFO_OLD_COUNT)
170 return (KERN_FAILURE);
171
172 basic_info = (host_basic_info_t)info;
173
174 basic_info->memory_size = machine_info.memory_size;
175 basic_info->max_cpus = machine_info.max_cpus;
176 basic_info->avail_cpus = processor_avail_count;
177 master_id = master_processor->cpu_id;
178 basic_info->cpu_type = slot_type(master_id);
179 basic_info->cpu_subtype = slot_subtype(master_id);
180
181 if (*count >= HOST_BASIC_INFO_COUNT) {
182 basic_info->cpu_threadtype = slot_threadtype(master_id);
183 basic_info->physical_cpu = machine_info.physical_cpu;
184 basic_info->physical_cpu_max = machine_info.physical_cpu_max;
185 basic_info->logical_cpu = machine_info.logical_cpu;
186 basic_info->logical_cpu_max = machine_info.logical_cpu_max;
187 basic_info->max_mem = machine_info.max_mem;
188
189 *count = HOST_BASIC_INFO_COUNT;
190 } else {
191 *count = HOST_BASIC_INFO_OLD_COUNT;
192 }
193
194 return (KERN_SUCCESS);
195 }
196
197 case HOST_SCHED_INFO: {
198 host_sched_info_t sched_info;
199 uint32_t quantum_time;
200 uint64_t quantum_ns;
201
202 /*
203 * Return scheduler information.
204 */
205 if (*count < HOST_SCHED_INFO_COUNT)
206 return (KERN_FAILURE);
207
208 sched_info = (host_sched_info_t)info;
209
210 quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
211 absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
212
213 sched_info->min_timeout = sched_info->min_quantum = (uint32_t)(quantum_ns / 1000 / 1000);
214
215 *count = HOST_SCHED_INFO_COUNT;
216
217 return (KERN_SUCCESS);
218 }
219
220 case HOST_RESOURCE_SIZES: {
221 /*
222 * Return sizes of kernel data structures
223 */
224 if (*count < HOST_RESOURCE_SIZES_COUNT)
225 return (KERN_FAILURE);
226
227 /* XXX Fail until ledgers are implemented */
228 return (KERN_INVALID_ARGUMENT);
229 }
230
231 case HOST_PRIORITY_INFO: {
232 host_priority_info_t priority_info;
233
234 if (*count < HOST_PRIORITY_INFO_COUNT)
235 return (KERN_FAILURE);
236
237 priority_info = (host_priority_info_t)info;
238
239 priority_info->kernel_priority = MINPRI_KERNEL;
240 priority_info->system_priority = MINPRI_KERNEL;
241 priority_info->server_priority = MINPRI_RESERVED;
242 priority_info->user_priority = BASEPRI_DEFAULT;
243 priority_info->depress_priority = DEPRESSPRI;
244 priority_info->idle_priority = IDLEPRI;
245 priority_info->minimum_priority = MINPRI_USER;
246 priority_info->maximum_priority = MAXPRI_RESERVED;
247
248 *count = HOST_PRIORITY_INFO_COUNT;
249
250 return (KERN_SUCCESS);
251 }
252
253 /*
254 * Gestalt for various trap facilities.
255 */
256 case HOST_MACH_MSG_TRAP:
257 case HOST_SEMAPHORE_TRAPS: {
258 *count = 0;
259 return (KERN_SUCCESS);
260 }
261
262 case HOST_CAN_HAS_DEBUGGER: {
263 host_can_has_debugger_info_t can_has_debugger_info;
264
265 if (*count < HOST_CAN_HAS_DEBUGGER_COUNT)
266 return (KERN_FAILURE);
267
268 can_has_debugger_info = (host_can_has_debugger_info_t)info;
269 can_has_debugger_info->can_has_debugger = PE_i_can_has_debugger(NULL);
270 *count = HOST_CAN_HAS_DEBUGGER_COUNT;
271
272 return KERN_SUCCESS;
273 }
274
275 case HOST_VM_PURGABLE: {
276 if (*count < HOST_VM_PURGABLE_COUNT)
277 return (KERN_FAILURE);
278
279 vm_purgeable_stats((vm_purgeable_info_t)info, NULL);
280
281 *count = HOST_VM_PURGABLE_COUNT;
282 return (KERN_SUCCESS);
283 }
284
285 case HOST_DEBUG_INFO_INTERNAL: {
286 #if DEVELOPMENT || DEBUG
287 if (*count < HOST_DEBUG_INFO_INTERNAL_COUNT)
288 return (KERN_FAILURE);
289
290 host_debug_info_internal_t debug_info = (host_debug_info_internal_t)info;
291 bzero(debug_info, sizeof(host_debug_info_internal_data_t));
292 *count = HOST_DEBUG_INFO_INTERNAL_COUNT;
293
294 #if CONFIG_COALITIONS
295 debug_info->config_coalitions = 1;
296 #endif
297 #if CONFIG_BANK
298 debug_info->config_bank = 1;
299 #endif
300 #if CONFIG_ATM
301 debug_info->config_atm = 1;
302 #endif
303 #if CONFIG_CSR
304 debug_info->config_csr = 1;
305 #endif
306 return (KERN_SUCCESS);
307 #else /* DEVELOPMENT || DEBUG */
308 return (KERN_NOT_SUPPORTED);
309 #endif
310 }
311
312 default: return (KERN_INVALID_ARGUMENT);
313 }
314 }
315
316 kern_return_t
317 host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
318 {
319 uint32_t i;
320
321 if (host == HOST_NULL)
322 return (KERN_INVALID_HOST);
323
324 switch (flavor) {
325 case HOST_LOAD_INFO: {
326 host_load_info_t load_info;
327
328 if (*count < HOST_LOAD_INFO_COUNT)
329 return (KERN_FAILURE);
330
331 load_info = (host_load_info_t)info;
332
333 bcopy((char *)avenrun, (char *)load_info->avenrun, sizeof avenrun);
334 bcopy((char *)mach_factor, (char *)load_info->mach_factor, sizeof mach_factor);
335
336 *count = HOST_LOAD_INFO_COUNT;
337 return (KERN_SUCCESS);
338 }
339
340 case HOST_VM_INFO: {
341 processor_t processor;
342 vm_statistics64_t stat;
343 vm_statistics64_data_t host_vm_stat;
344 vm_statistics_t stat32;
345 mach_msg_type_number_t original_count;
346
347 if (*count < HOST_VM_INFO_REV0_COUNT)
348 return (KERN_FAILURE);
349
350 processor = processor_list;
351 stat = &PROCESSOR_DATA(processor, vm_stat);
352 host_vm_stat = *stat;
353
354 if (processor_count > 1) {
355 simple_lock(&processor_list_lock);
356
357 while ((processor = processor->processor_list) != NULL) {
358 stat = &PROCESSOR_DATA(processor, vm_stat);
359
360 host_vm_stat.zero_fill_count += stat->zero_fill_count;
361 host_vm_stat.reactivations += stat->reactivations;
362 host_vm_stat.pageins += stat->pageins;
363 host_vm_stat.pageouts += stat->pageouts;
364 host_vm_stat.faults += stat->faults;
365 host_vm_stat.cow_faults += stat->cow_faults;
366 host_vm_stat.lookups += stat->lookups;
367 host_vm_stat.hits += stat->hits;
368 }
369
370 simple_unlock(&processor_list_lock);
371 }
372
373 stat32 = (vm_statistics_t)info;
374
375 stat32->free_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_free_count + vm_page_speculative_count);
376 stat32->active_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_active_count);
377
378 if (vm_page_local_q) {
379 for (i = 0; i < vm_page_local_q_count; i++) {
380 struct vpl * lq;
381
382 lq = &vm_page_local_q[i].vpl_un.vpl;
383
384 stat32->active_count += VM_STATISTICS_TRUNCATE_TO_32_BIT(lq->vpl_count);
385 }
386 }
387 stat32->inactive_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_inactive_count);
388 stat32->wire_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count + vm_page_throttled_count + vm_lopage_free_count);
389 stat32->zero_fill_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.zero_fill_count);
390 stat32->reactivations = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.reactivations);
391 stat32->pageins = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.pageins);
392 stat32->pageouts = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.pageouts);
393 stat32->faults = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.faults);
394 stat32->cow_faults = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.cow_faults);
395 stat32->lookups = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.lookups);
396 stat32->hits = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.hits);
397
398 /*
399 * Fill in extra info added in later revisions of the
400 * vm_statistics data structure. Fill in only what can fit
401 * in the data structure the caller gave us !
402 */
403 original_count = *count;
404 *count = HOST_VM_INFO_REV0_COUNT; /* rev0 already filled in */
405 if (original_count >= HOST_VM_INFO_REV1_COUNT) {
406 /* rev1 added "purgeable" info */
407 stat32->purgeable_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purgeable_count);
408 stat32->purges = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purged_count);
409 *count = HOST_VM_INFO_REV1_COUNT;
410 }
411
412 if (original_count >= HOST_VM_INFO_REV2_COUNT) {
413 /* rev2 added "speculative" info */
414 stat32->speculative_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_speculative_count);
415 *count = HOST_VM_INFO_REV2_COUNT;
416 }
417
418 /* rev3 changed some of the fields to be 64-bit*/
419
420 return (KERN_SUCCESS);
421 }
422
423 case HOST_CPU_LOAD_INFO: {
424 processor_t processor;
425 host_cpu_load_info_t cpu_load_info;
426
427 if (*count < HOST_CPU_LOAD_INFO_COUNT)
428 return (KERN_FAILURE);
429
430 #define GET_TICKS_VALUE(state, ticks) \
431 MACRO_BEGIN cpu_load_info->cpu_ticks[(state)] += (uint32_t)(ticks / hz_tick_interval); \
432 MACRO_END
433 #define GET_TICKS_VALUE_FROM_TIMER(processor, state, timer) \
434 MACRO_BEGIN GET_TICKS_VALUE(state, timer_grab(&PROCESSOR_DATA(processor, timer))); \
435 MACRO_END
436
437 cpu_load_info = (host_cpu_load_info_t)info;
438 cpu_load_info->cpu_ticks[CPU_STATE_USER] = 0;
439 cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] = 0;
440 cpu_load_info->cpu_ticks[CPU_STATE_IDLE] = 0;
441 cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0;
442
443 simple_lock(&processor_list_lock);
444
445 for (processor = processor_list; processor != NULL; processor = processor->processor_list) {
446 timer_t idle_state;
447 uint64_t idle_time_snapshot1, idle_time_snapshot2;
448 uint64_t idle_time_tstamp1, idle_time_tstamp2;
449
450 /* See discussion in processor_info(PROCESSOR_CPU_LOAD_INFO) */
451
452 GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_USER, user_state);
453 if (precise_user_kernel_time) {
454 GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_SYSTEM, system_state);
455 } else {
456 /* system_state may represent either sys or user */
457 GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_USER, system_state);
458 }
459
460 idle_state = &PROCESSOR_DATA(processor, idle_state);
461 idle_time_snapshot1 = timer_grab(idle_state);
462 idle_time_tstamp1 = idle_state->tstamp;
463
464 if (PROCESSOR_DATA(processor, current_state) != idle_state) {
465 /* Processor is non-idle, so idle timer should be accurate */
466 GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_IDLE, idle_state);
467 } else if ((idle_time_snapshot1 != (idle_time_snapshot2 = timer_grab(idle_state))) ||
468 (idle_time_tstamp1 != (idle_time_tstamp2 = idle_state->tstamp))) {
469 /* Idle timer is being updated concurrently, second stamp is good enough */
470 GET_TICKS_VALUE(CPU_STATE_IDLE, idle_time_snapshot2);
471 } else {
472 /*
473 * Idle timer may be very stale. Fortunately we have established
474 * that idle_time_snapshot1 and idle_time_tstamp1 are unchanging
475 */
476 idle_time_snapshot1 += mach_absolute_time() - idle_time_tstamp1;
477
478 GET_TICKS_VALUE(CPU_STATE_IDLE, idle_time_snapshot1);
479 }
480 }
481 simple_unlock(&processor_list_lock);
482
483 *count = HOST_CPU_LOAD_INFO_COUNT;
484
485 return (KERN_SUCCESS);
486 }
487
488 case HOST_EXPIRED_TASK_INFO: {
489 if (*count < TASK_POWER_INFO_COUNT) {
490 return (KERN_FAILURE);
491 }
492
493 task_power_info_t tinfo = (task_power_info_t)info;
494
495 tinfo->task_interrupt_wakeups = dead_task_statistics.task_interrupt_wakeups;
496 tinfo->task_platform_idle_wakeups = dead_task_statistics.task_platform_idle_wakeups;
497
498 tinfo->task_timer_wakeups_bin_1 = dead_task_statistics.task_timer_wakeups_bin_1;
499
500 tinfo->task_timer_wakeups_bin_2 = dead_task_statistics.task_timer_wakeups_bin_2;
501
502 tinfo->total_user = dead_task_statistics.total_user_time;
503 tinfo->total_system = dead_task_statistics.total_system_time;
504
505 return (KERN_SUCCESS);
506 }
507 default: return (KERN_INVALID_ARGUMENT);
508 }
509 }
510
511 extern uint32_t c_segment_pages_compressed;
512
513 kern_return_t
514 host_statistics64(host_t host, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t * count)
515 {
516 uint32_t i;
517
518 if (host == HOST_NULL)
519 return (KERN_INVALID_HOST);
520
521 switch (flavor) {
522 case HOST_VM_INFO64: /* We were asked to get vm_statistics64 */
523 {
524 processor_t processor;
525 vm_statistics64_t stat;
526 vm_statistics64_data_t host_vm_stat;
527 mach_msg_type_number_t original_count;
528 unsigned int local_q_internal_count;
529 unsigned int local_q_external_count;
530
531 if (*count < HOST_VM_INFO64_REV0_COUNT)
532 return (KERN_FAILURE);
533
534 processor = processor_list;
535 stat = &PROCESSOR_DATA(processor, vm_stat);
536 host_vm_stat = *stat;
537
538 if (processor_count > 1) {
539 simple_lock(&processor_list_lock);
540
541 while ((processor = processor->processor_list) != NULL) {
542 stat = &PROCESSOR_DATA(processor, vm_stat);
543
544 host_vm_stat.zero_fill_count += stat->zero_fill_count;
545 host_vm_stat.reactivations += stat->reactivations;
546 host_vm_stat.pageins += stat->pageins;
547 host_vm_stat.pageouts += stat->pageouts;
548 host_vm_stat.faults += stat->faults;
549 host_vm_stat.cow_faults += stat->cow_faults;
550 host_vm_stat.lookups += stat->lookups;
551 host_vm_stat.hits += stat->hits;
552 host_vm_stat.compressions += stat->compressions;
553 host_vm_stat.decompressions += stat->decompressions;
554 host_vm_stat.swapins += stat->swapins;
555 host_vm_stat.swapouts += stat->swapouts;
556 }
557
558 simple_unlock(&processor_list_lock);
559 }
560
561 stat = (vm_statistics64_t)info;
562
563 stat->free_count = vm_page_free_count + vm_page_speculative_count;
564 stat->active_count = vm_page_active_count;
565
566 local_q_internal_count = 0;
567 local_q_external_count = 0;
568 if (vm_page_local_q) {
569 for (i = 0; i < vm_page_local_q_count; i++) {
570 struct vpl * lq;
571
572 lq = &vm_page_local_q[i].vpl_un.vpl;
573
574 stat->active_count += lq->vpl_count;
575 local_q_internal_count += lq->vpl_internal_count;
576 local_q_external_count += lq->vpl_external_count;
577 }
578 }
579 stat->inactive_count = vm_page_inactive_count;
580 stat->wire_count = vm_page_wire_count + vm_page_throttled_count + vm_lopage_free_count;
581 stat->zero_fill_count = host_vm_stat.zero_fill_count;
582 stat->reactivations = host_vm_stat.reactivations;
583 stat->pageins = host_vm_stat.pageins;
584 stat->pageouts = host_vm_stat.pageouts;
585 stat->faults = host_vm_stat.faults;
586 stat->cow_faults = host_vm_stat.cow_faults;
587 stat->lookups = host_vm_stat.lookups;
588 stat->hits = host_vm_stat.hits;
589
590 stat->purgeable_count = vm_page_purgeable_count;
591 stat->purges = vm_page_purged_count;
592
593 stat->speculative_count = vm_page_speculative_count;
594
595 /*
596 * Fill in extra info added in later revisions of the
597 * vm_statistics data structure. Fill in only what can fit
598 * in the data structure the caller gave us !
599 */
600 original_count = *count;
601 *count = HOST_VM_INFO64_REV0_COUNT; /* rev0 already filled in */
602 if (original_count >= HOST_VM_INFO64_REV1_COUNT) {
603 /* rev1 added "throttled count" */
604 stat->throttled_count = vm_page_throttled_count;
605 /* rev1 added "compression" info */
606 stat->compressor_page_count = VM_PAGE_COMPRESSOR_COUNT;
607 stat->compressions = host_vm_stat.compressions;
608 stat->decompressions = host_vm_stat.decompressions;
609 stat->swapins = host_vm_stat.swapins;
610 stat->swapouts = host_vm_stat.swapouts;
611 /* rev1 added:
612 * "external page count"
613 * "anonymous page count"
614 * "total # of pages (uncompressed) held in the compressor"
615 */
616 stat->external_page_count = (vm_page_pageable_external_count + local_q_external_count);
617 stat->internal_page_count = (vm_page_pageable_internal_count + local_q_internal_count);
618 stat->total_uncompressed_pages_in_compressor = c_segment_pages_compressed;
619 *count = HOST_VM_INFO64_REV1_COUNT;
620 }
621
622 return (KERN_SUCCESS);
623 }
624
625 case HOST_EXTMOD_INFO64: /* We were asked to get vm_statistics64 */
626 {
627 vm_extmod_statistics_t out_extmod_statistics;
628
629 if (*count < HOST_EXTMOD_INFO64_COUNT)
630 return (KERN_FAILURE);
631
632 out_extmod_statistics = (vm_extmod_statistics_t)info;
633 *out_extmod_statistics = host_extmod_statistics;
634
635 *count = HOST_EXTMOD_INFO64_COUNT;
636
637 return (KERN_SUCCESS);
638 }
639
640 default: /* If we didn't recognize the flavor, send to host_statistics */
641 return (host_statistics(host, flavor, (host_info_t)info, count));
642 }
643 }
644
645 /*
646 * Get host statistics that require privilege.
647 * None for now, just call the un-privileged version.
648 */
649 kern_return_t
650 host_priv_statistics(host_priv_t host_priv, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
651 {
652 return (host_statistics((host_t)host_priv, flavor, info, count));
653 }
654
655 kern_return_t
656 set_sched_stats_active(boolean_t active)
657 {
658 sched_stats_active = active;
659 return (KERN_SUCCESS);
660 }
661
662 kern_return_t
663 get_sched_statistics(struct _processor_statistics_np * out, uint32_t * count)
664 {
665 processor_t processor;
666
667 if (!sched_stats_active) {
668 return (KERN_FAILURE);
669 }
670
671 simple_lock(&processor_list_lock);
672
673 if (*count < (processor_count + 1) * sizeof(struct _processor_statistics_np)) { /* One for RT */
674 simple_unlock(&processor_list_lock);
675 return (KERN_FAILURE);
676 }
677
678 processor = processor_list;
679 while (processor) {
680 struct processor_sched_statistics * stats = &processor->processor_data.sched_stats;
681
682 out->ps_cpuid = processor->cpu_id;
683 out->ps_csw_count = stats->csw_count;
684 out->ps_preempt_count = stats->preempt_count;
685 out->ps_preempted_rt_count = stats->preempted_rt_count;
686 out->ps_preempted_by_rt_count = stats->preempted_by_rt_count;
687 out->ps_rt_sched_count = stats->rt_sched_count;
688 out->ps_interrupt_count = stats->interrupt_count;
689 out->ps_ipi_count = stats->ipi_count;
690 out->ps_timer_pop_count = stats->timer_pop_count;
691 out->ps_runq_count_sum = SCHED(processor_runq_stats_count_sum)(processor);
692 out->ps_idle_transitions = stats->idle_transitions;
693 out->ps_quantum_timer_expirations = stats->quantum_timer_expirations;
694
695 out++;
696 processor = processor->processor_list;
697 }
698
699 *count = (uint32_t)(processor_count * sizeof(struct _processor_statistics_np));
700
701 simple_unlock(&processor_list_lock);
702
703 /* And include RT Queue information */
704 bzero(out, sizeof(*out));
705 out->ps_cpuid = (-1);
706 out->ps_runq_count_sum = rt_runq.runq_stats.count_sum;
707 out++;
708 *count += (uint32_t)sizeof(struct _processor_statistics_np);
709
710 return (KERN_SUCCESS);
711 }
712
713 kern_return_t
714 host_page_size(host_t host, vm_size_t * out_page_size)
715 {
716 if (host == HOST_NULL)
717 return (KERN_INVALID_ARGUMENT);
718
719 *out_page_size = PAGE_SIZE;
720
721 return (KERN_SUCCESS);
722 }
723
724 /*
725 * Return kernel version string (more than you ever
726 * wanted to know about what version of the kernel this is).
727 */
728 extern char version[];
729
730 kern_return_t
731 host_kernel_version(host_t host, kernel_version_t out_version)
732 {
733 if (host == HOST_NULL)
734 return (KERN_INVALID_ARGUMENT);
735
736 (void)strncpy(out_version, version, sizeof(kernel_version_t));
737
738 return (KERN_SUCCESS);
739 }
740
741 /*
742 * host_processor_sets:
743 *
744 * List all processor sets on the host.
745 */
746 kern_return_t
747 host_processor_sets(host_priv_t host_priv, processor_set_name_array_t * pset_list, mach_msg_type_number_t * count)
748 {
749 void * addr;
750
751 if (host_priv == HOST_PRIV_NULL)
752 return (KERN_INVALID_ARGUMENT);
753
754 /*
755 * Allocate memory. Can be pageable because it won't be
756 * touched while holding a lock.
757 */
758
759 addr = kalloc((vm_size_t)sizeof(mach_port_t));
760 if (addr == 0)
761 return (KERN_RESOURCE_SHORTAGE);
762
763 /* do the conversion that Mig should handle */
764 *((ipc_port_t *)addr) = convert_pset_name_to_port(&pset0);
765
766 *pset_list = (processor_set_array_t)addr;
767 *count = 1;
768
769 return (KERN_SUCCESS);
770 }
771
772 /*
773 * host_processor_set_priv:
774 *
775 * Return control port for given processor set.
776 */
777 kern_return_t
778 host_processor_set_priv(host_priv_t host_priv, processor_set_t pset_name, processor_set_t * pset)
779 {
780 if (host_priv == HOST_PRIV_NULL || pset_name == PROCESSOR_SET_NULL) {
781 *pset = PROCESSOR_SET_NULL;
782
783 return (KERN_INVALID_ARGUMENT);
784 }
785
786 *pset = pset_name;
787
788 return (KERN_SUCCESS);
789 }
790
791 /*
792 * host_processor_info
793 *
794 * Return info about the processors on this host. It will return
795 * the number of processors, and the specific type of info requested
796 * in an OOL array.
797 */
798 kern_return_t
799 host_processor_info(host_t host,
800 processor_flavor_t flavor,
801 natural_t * out_pcount,
802 processor_info_array_t * out_array,
803 mach_msg_type_number_t * out_array_count)
804 {
805 kern_return_t result;
806 processor_t processor;
807 host_t thost;
808 processor_info_t info;
809 unsigned int icount, tcount;
810 unsigned int pcount, i;
811 vm_offset_t addr;
812 vm_size_t size, needed;
813 vm_map_copy_t copy;
814
815 if (host == HOST_NULL)
816 return (KERN_INVALID_ARGUMENT);
817
818 result = processor_info_count(flavor, &icount);
819 if (result != KERN_SUCCESS)
820 return (result);
821
822 pcount = processor_count;
823 assert(pcount != 0);
824
825 needed = pcount * icount * sizeof(natural_t);
826 size = vm_map_round_page(needed, VM_MAP_PAGE_MASK(ipc_kernel_map));
827 result = kmem_alloc(ipc_kernel_map, &addr, size, VM_KERN_MEMORY_IPC);
828 if (result != KERN_SUCCESS)
829 return (KERN_RESOURCE_SHORTAGE);
830
831 info = (processor_info_t)addr;
832 processor = processor_list;
833 tcount = icount;
834
835 result = processor_info(processor, flavor, &thost, info, &tcount);
836 if (result != KERN_SUCCESS) {
837 kmem_free(ipc_kernel_map, addr, size);
838 return (result);
839 }
840
841 if (pcount > 1) {
842 for (i = 1; i < pcount; i++) {
843 simple_lock(&processor_list_lock);
844 processor = processor->processor_list;
845 simple_unlock(&processor_list_lock);
846
847 info += icount;
848 tcount = icount;
849 result = processor_info(processor, flavor, &thost, info, &tcount);
850 if (result != KERN_SUCCESS) {
851 kmem_free(ipc_kernel_map, addr, size);
852 return (result);
853 }
854 }
855 }
856
857 if (size != needed)
858 bzero((char *)addr + needed, size - needed);
859
860 result = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr, VM_MAP_PAGE_MASK(ipc_kernel_map)),
861 vm_map_round_page(addr + size, VM_MAP_PAGE_MASK(ipc_kernel_map)), FALSE);
862 assert(result == KERN_SUCCESS);
863 result = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr, (vm_map_size_t)needed, TRUE, &copy);
864 assert(result == KERN_SUCCESS);
865
866 *out_pcount = pcount;
867 *out_array = (processor_info_array_t)copy;
868 *out_array_count = pcount * icount;
869
870 return (KERN_SUCCESS);
871 }
872
873 /*
874 * Kernel interface for setting a special port.
875 */
876 kern_return_t
877 kernel_set_special_port(host_priv_t host_priv, int id, ipc_port_t port)
878 {
879 ipc_port_t old_port;
880
881 #if !MACH_FLIPC
882 if (id == HOST_NODE_PORT)
883 return (KERN_NOT_SUPPORTED);
884 #endif
885
886 host_lock(host_priv);
887 old_port = host_priv->special[id];
888 host_priv->special[id] = port;
889 host_unlock(host_priv);
890
891 #if MACH_FLIPC
892 if (id == HOST_NODE_PORT)
893 mach_node_port_changed();
894 #endif
895
896 if (IP_VALID(old_port))
897 ipc_port_release_send(old_port);
898 return (KERN_SUCCESS);
899 }
900
901 /*
902 * Kernel interface for retrieving a special port.
903 */
904 kern_return_t
905 kernel_get_special_port(host_priv_t host_priv, int id, ipc_port_t * portp)
906 {
907 host_lock(host_priv);
908 *portp = host_priv->special[id];
909 host_unlock(host_priv);
910 return (KERN_SUCCESS);
911 }
912
913 /*
914 * User interface for setting a special port.
915 *
916 * Only permits the user to set a user-owned special port
917 * ID, rejecting a kernel-owned special port ID.
918 *
919 * A special kernel port cannot be set up using this
920 * routine; use kernel_set_special_port() instead.
921 */
922 kern_return_t
923 host_set_special_port(host_priv_t host_priv, int id, ipc_port_t port)
924 {
925 if (host_priv == HOST_PRIV_NULL || id <= HOST_MAX_SPECIAL_KERNEL_PORT || id > HOST_MAX_SPECIAL_PORT)
926 return (KERN_INVALID_ARGUMENT);
927
928 #if CONFIG_MACF
929 if (mac_task_check_set_host_special_port(current_task(), id, port) != 0)
930 return (KERN_NO_ACCESS);
931 #endif
932
933 return (kernel_set_special_port(host_priv, id, port));
934 }
935
936 /*
937 * User interface for retrieving a special port.
938 *
939 * Note that there is nothing to prevent a user special
940 * port from disappearing after it has been discovered by
941 * the caller; thus, using a special port can always result
942 * in a "port not valid" error.
943 */
944
945 kern_return_t
946 host_get_special_port(host_priv_t host_priv, __unused int node, int id, ipc_port_t * portp)
947 {
948 ipc_port_t port;
949
950 if (host_priv == HOST_PRIV_NULL || id == HOST_SECURITY_PORT || id > HOST_MAX_SPECIAL_PORT || id < 0)
951 return (KERN_INVALID_ARGUMENT);
952
953 host_lock(host_priv);
954 port = realhost.special[id];
955 *portp = ipc_port_copy_send(port);
956 host_unlock(host_priv);
957
958 return (KERN_SUCCESS);
959 }
960
961 /*
962 * host_get_io_master
963 *
964 * Return the IO master access port for this host.
965 */
966 kern_return_t
967 host_get_io_master(host_t host, io_master_t * io_masterp)
968 {
969 if (host == HOST_NULL)
970 return (KERN_INVALID_ARGUMENT);
971
972 return (host_get_io_master_port(host_priv_self(), io_masterp));
973 }
974
975 host_t
976 host_self(void)
977 {
978 return (&realhost);
979 }
980
981 host_priv_t
982 host_priv_self(void)
983 {
984 return (&realhost);
985 }
986
987 host_security_t
988 host_security_self(void)
989 {
990 return (&realhost);
991 }
992
993 kern_return_t
994 host_set_atm_diagnostic_flag(host_priv_t host_priv, uint32_t diagnostic_flag)
995 {
996 if (host_priv == HOST_PRIV_NULL)
997 return (KERN_INVALID_ARGUMENT);
998
999 assert(host_priv == &realhost);
1000
1001 #if CONFIG_ATM
1002 return (atm_set_diagnostic_config(diagnostic_flag));
1003 #else
1004 (void)diagnostic_flag;
1005 return (KERN_NOT_SUPPORTED);
1006 #endif
1007 }
1008
1009 kern_return_t
1010 host_set_multiuser_config_flags(host_priv_t host_priv, uint32_t multiuser_config)
1011 {
1012 (void)host_priv;
1013 (void)multiuser_config;
1014 return (KERN_NOT_SUPPORTED);
1015 }