2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
62 * Non-ipc host functions.
65 #include <mach/mach_types.h>
66 #include <mach/boolean.h>
67 #include <mach/host_info.h>
68 #include <mach/host_special_ports.h>
69 #include <mach/kern_return.h>
70 #include <mach/machine.h>
71 #include <mach/port.h>
72 #include <mach/processor_info.h>
73 #include <mach/vm_param.h>
74 #include <mach/processor.h>
75 #include <mach/mach_host_server.h>
76 #include <mach/host_priv_server.h>
77 #include <mach/vm_map.h>
79 #include <kern/kern_types.h>
80 #include <kern/assert.h>
81 #include <kern/kalloc.h>
82 #include <kern/host.h>
83 #include <kern/host_statistics.h>
84 #include <kern/ipc_host.h>
85 #include <kern/misc_protos.h>
86 #include <kern/sched.h>
87 #include <kern/processor.h>
89 #include <vm/vm_map.h>
93 vm_extmod_statistics_data_t host_extmod_statistics
;
97 host_priv_t host_priv
,
98 processor_array_t
*out_array
,
99 mach_msg_type_number_t
*countp
)
101 register processor_t processor
, *tp
;
103 unsigned int count
, i
;
105 if (host_priv
== HOST_PRIV_NULL
)
106 return (KERN_INVALID_ARGUMENT
);
108 assert(host_priv
== &realhost
);
110 count
= processor_count
;
113 addr
= kalloc((vm_size_t
) (count
* sizeof(mach_port_t
)));
115 return (KERN_RESOURCE_SHORTAGE
);
117 tp
= (processor_t
*) addr
;
118 *tp
++ = processor
= processor_list
;
121 simple_lock(&processor_list_lock
);
123 for (i
= 1; i
< count
; i
++)
124 *tp
++ = processor
= processor
->processor_list
;
126 simple_unlock(&processor_list_lock
);
130 *out_array
= (processor_array_t
)addr
;
132 /* do the conversion that Mig should handle */
134 tp
= (processor_t
*) addr
;
135 for (i
= 0; i
< count
; i
++)
136 ((mach_port_t
*) tp
)[i
] =
137 (mach_port_t
)convert_processor_to_port(tp
[i
]);
139 return (KERN_SUCCESS
);
145 host_flavor_t flavor
,
147 mach_msg_type_number_t
*count
)
150 if (host
== HOST_NULL
)
151 return (KERN_INVALID_ARGUMENT
);
155 case HOST_BASIC_INFO
:
157 register host_basic_info_t basic_info
;
158 register int master_id
;
161 * Basic information about this host.
163 if (*count
< HOST_BASIC_INFO_OLD_COUNT
)
164 return (KERN_FAILURE
);
166 basic_info
= (host_basic_info_t
) info
;
168 basic_info
->memory_size
= machine_info
.memory_size
;
169 basic_info
->max_cpus
= machine_info
.max_cpus
;
170 basic_info
->avail_cpus
= processor_avail_count
;
171 master_id
= master_processor
->cpu_id
;
172 basic_info
->cpu_type
= slot_type(master_id
);
173 basic_info
->cpu_subtype
= slot_subtype(master_id
);
175 if (*count
>= HOST_BASIC_INFO_COUNT
) {
176 basic_info
->cpu_threadtype
= slot_threadtype(master_id
);
177 basic_info
->physical_cpu
= machine_info
.physical_cpu
;
178 basic_info
->physical_cpu_max
= machine_info
.physical_cpu_max
;
179 basic_info
->logical_cpu
= machine_info
.logical_cpu
;
180 basic_info
->logical_cpu_max
= machine_info
.logical_cpu_max
;
181 basic_info
->max_mem
= machine_info
.max_mem
;
183 *count
= HOST_BASIC_INFO_COUNT
;
185 *count
= HOST_BASIC_INFO_OLD_COUNT
;
188 return (KERN_SUCCESS
);
191 case HOST_SCHED_INFO
:
193 register host_sched_info_t sched_info
;
194 uint32_t quantum_time
;
198 * Return scheduler information.
200 if (*count
< HOST_SCHED_INFO_COUNT
)
201 return (KERN_FAILURE
);
203 sched_info
= (host_sched_info_t
) info
;
205 quantum_time
= SCHED(initial_quantum_size
)(THREAD_NULL
);
206 absolutetime_to_nanoseconds(quantum_time
, &quantum_ns
);
208 sched_info
->min_timeout
=
209 sched_info
->min_quantum
= (uint32_t)(quantum_ns
/ 1000 / 1000);
211 *count
= HOST_SCHED_INFO_COUNT
;
213 return (KERN_SUCCESS
);
216 case HOST_RESOURCE_SIZES
:
219 * Return sizes of kernel data structures
221 if (*count
< HOST_RESOURCE_SIZES_COUNT
)
222 return (KERN_FAILURE
);
224 /* XXX Fail until ledgers are implemented */
225 return (KERN_INVALID_ARGUMENT
);
228 case HOST_PRIORITY_INFO
:
230 register host_priority_info_t priority_info
;
232 if (*count
< HOST_PRIORITY_INFO_COUNT
)
233 return (KERN_FAILURE
);
235 priority_info
= (host_priority_info_t
) info
;
237 priority_info
->kernel_priority
= MINPRI_KERNEL
;
238 priority_info
->system_priority
= MINPRI_KERNEL
;
239 priority_info
->server_priority
= MINPRI_RESERVED
;
240 priority_info
->user_priority
= BASEPRI_DEFAULT
;
241 priority_info
->depress_priority
= DEPRESSPRI
;
242 priority_info
->idle_priority
= IDLEPRI
;
243 priority_info
->minimum_priority
= MINPRI_USER
;
244 priority_info
->maximum_priority
= MAXPRI_RESERVED
;
246 *count
= HOST_PRIORITY_INFO_COUNT
;
248 return (KERN_SUCCESS
);
252 * Gestalt for various trap facilities.
254 case HOST_MACH_MSG_TRAP
:
255 case HOST_SEMAPHORE_TRAPS
:
258 return (KERN_SUCCESS
);
262 return (KERN_INVALID_ARGUMENT
);
269 host_flavor_t flavor
,
271 mach_msg_type_number_t
*count
)
275 if (host
== HOST_NULL
)
276 return (KERN_INVALID_HOST
);
282 host_load_info_t load_info
;
284 if (*count
< HOST_LOAD_INFO_COUNT
)
285 return (KERN_FAILURE
);
287 load_info
= (host_load_info_t
) info
;
289 bcopy((char *) avenrun
,
290 (char *) load_info
->avenrun
, sizeof avenrun
);
291 bcopy((char *) mach_factor
,
292 (char *) load_info
->mach_factor
, sizeof mach_factor
);
294 *count
= HOST_LOAD_INFO_COUNT
;
295 return (KERN_SUCCESS
);
300 register processor_t processor
;
301 register vm_statistics64_t stat
;
302 vm_statistics64_data_t host_vm_stat
;
303 vm_statistics_t stat32
;
304 mach_msg_type_number_t original_count
;
306 if (*count
< HOST_VM_INFO_REV0_COUNT
)
307 return (KERN_FAILURE
);
309 processor
= processor_list
;
310 stat
= &PROCESSOR_DATA(processor
, vm_stat
);
311 host_vm_stat
= *stat
;
313 if (processor_count
> 1) {
314 simple_lock(&processor_list_lock
);
316 while ((processor
= processor
->processor_list
) != NULL
) {
317 stat
= &PROCESSOR_DATA(processor
, vm_stat
);
319 host_vm_stat
.zero_fill_count
+= stat
->zero_fill_count
;
320 host_vm_stat
.reactivations
+= stat
->reactivations
;
321 host_vm_stat
.pageins
+= stat
->pageins
;
322 host_vm_stat
.pageouts
+= stat
->pageouts
;
323 host_vm_stat
.faults
+= stat
->faults
;
324 host_vm_stat
.cow_faults
+= stat
->cow_faults
;
325 host_vm_stat
.lookups
+= stat
->lookups
;
326 host_vm_stat
.hits
+= stat
->hits
;
329 simple_unlock(&processor_list_lock
);
332 stat32
= (vm_statistics_t
) info
;
334 stat32
->free_count
= VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_free_count
+ vm_page_speculative_count
);
335 stat32
->active_count
= VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_active_count
);
337 if (vm_page_local_q
) {
338 for (i
= 0; i
< vm_page_local_q_count
; i
++) {
341 lq
= &vm_page_local_q
[i
].vpl_un
.vpl
;
343 stat32
->active_count
+= VM_STATISTICS_TRUNCATE_TO_32_BIT(lq
->vpl_count
);
346 stat32
->inactive_count
= VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_inactive_count
);
348 stat32
->wire_count
= VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count
);
350 stat32
->wire_count
= VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count
+ vm_page_throttled_count
+ vm_lopage_free_count
);
352 stat32
->zero_fill_count
= VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat
.zero_fill_count
);
353 stat32
->reactivations
= VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat
.reactivations
);
354 stat32
->pageins
= VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat
.pageins
);
355 stat32
->pageouts
= VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat
.pageouts
);
356 stat32
->faults
= VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat
.faults
);
357 stat32
->cow_faults
= VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat
.cow_faults
);
358 stat32
->lookups
= VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat
.lookups
);
359 stat32
->hits
= VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat
.hits
);
362 * Fill in extra info added in later revisions of the
363 * vm_statistics data structure. Fill in only what can fit
364 * in the data structure the caller gave us !
366 original_count
= *count
;
367 *count
= HOST_VM_INFO_REV0_COUNT
; /* rev0 already filled in */
368 if (original_count
>= HOST_VM_INFO_REV1_COUNT
) {
369 /* rev1 added "purgeable" info */
370 stat32
->purgeable_count
= VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purgeable_count
);
371 stat32
->purges
= VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purged_count
);
372 *count
= HOST_VM_INFO_REV1_COUNT
;
375 if (original_count
>= HOST_VM_INFO_REV2_COUNT
) {
376 /* rev2 added "speculative" info */
377 stat32
->speculative_count
= VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_speculative_count
);
378 *count
= HOST_VM_INFO_REV2_COUNT
;
381 /* rev3 changed some of the fields to be 64-bit*/
383 return (KERN_SUCCESS
);
386 case HOST_CPU_LOAD_INFO
:
388 register processor_t processor
;
389 host_cpu_load_info_t cpu_load_info
;
391 if (*count
< HOST_CPU_LOAD_INFO_COUNT
)
392 return (KERN_FAILURE
);
394 #define GET_TICKS_VALUE(processor, state, timer) \
396 cpu_load_info->cpu_ticks[(state)] += \
397 (uint32_t)(timer_grab(&PROCESSOR_DATA(processor, timer)) \
398 / hz_tick_interval); \
401 cpu_load_info
= (host_cpu_load_info_t
)info
;
402 cpu_load_info
->cpu_ticks
[CPU_STATE_USER
] = 0;
403 cpu_load_info
->cpu_ticks
[CPU_STATE_SYSTEM
] = 0;
404 cpu_load_info
->cpu_ticks
[CPU_STATE_IDLE
] = 0;
405 cpu_load_info
->cpu_ticks
[CPU_STATE_NICE
] = 0;
407 simple_lock(&processor_list_lock
);
409 for (processor
= processor_list
; processor
!= NULL
; processor
= processor
->processor_list
) {
410 timer_data_t idle_temp
;
413 GET_TICKS_VALUE(processor
, CPU_STATE_USER
, user_state
);
414 if (precise_user_kernel_time
) {
415 GET_TICKS_VALUE(processor
, CPU_STATE_SYSTEM
, system_state
);
417 /* system_state may represent either sys or user */
418 GET_TICKS_VALUE(processor
, CPU_STATE_USER
, system_state
);
421 idle_state
= &PROCESSOR_DATA(processor
, idle_state
);
422 idle_temp
= *idle_state
;
424 if (PROCESSOR_DATA(processor
, current_state
) != idle_state
||
425 timer_grab(&idle_temp
) != timer_grab(idle_state
))
426 GET_TICKS_VALUE(processor
, CPU_STATE_IDLE
, idle_state
);
428 timer_advance(&idle_temp
, mach_absolute_time() - idle_temp
.tstamp
);
430 cpu_load_info
->cpu_ticks
[CPU_STATE_IDLE
] +=
431 (uint32_t)(timer_grab(&idle_temp
) / hz_tick_interval
);
434 simple_unlock(&processor_list_lock
);
436 *count
= HOST_CPU_LOAD_INFO_COUNT
;
438 return (KERN_SUCCESS
);
441 case HOST_EXPIRED_TASK_INFO
:
443 if (*count
< TASK_POWER_INFO_COUNT
) {
444 return (KERN_FAILURE
);
447 task_power_info_t tinfo
= (task_power_info_t
)info
;
449 tinfo
->task_interrupt_wakeups
= dead_task_statistics
.task_interrupt_wakeups
;
450 tinfo
->task_platform_idle_wakeups
= dead_task_statistics
.task_platform_idle_wakeups
;
452 tinfo
->task_timer_wakeups_bin_1
= dead_task_statistics
.task_timer_wakeups_bin_1
;
453 tinfo
->task_timer_wakeups_bin_2
= dead_task_statistics
.task_timer_wakeups_bin_2
;
455 tinfo
->total_user
= dead_task_statistics
.total_user_time
;
456 tinfo
->total_system
= dead_task_statistics
.total_system_time
;
458 return (KERN_SUCCESS
);
462 return (KERN_INVALID_ARGUMENT
);
470 host_flavor_t flavor
,
472 mach_msg_type_number_t
*count
)
476 if (host
== HOST_NULL
)
477 return (KERN_INVALID_HOST
);
481 case HOST_VM_INFO64
: /* We were asked to get vm_statistics64 */
483 register processor_t processor
;
484 register vm_statistics64_t stat
;
485 vm_statistics64_data_t host_vm_stat
;
487 if (*count
< HOST_VM_INFO64_COUNT
)
488 return (KERN_FAILURE
);
490 processor
= processor_list
;
491 stat
= &PROCESSOR_DATA(processor
, vm_stat
);
492 host_vm_stat
= *stat
;
494 if (processor_count
> 1) {
495 simple_lock(&processor_list_lock
);
497 while ((processor
= processor
->processor_list
) != NULL
) {
498 stat
= &PROCESSOR_DATA(processor
, vm_stat
);
500 host_vm_stat
.zero_fill_count
+= stat
->zero_fill_count
;
501 host_vm_stat
.reactivations
+= stat
->reactivations
;
502 host_vm_stat
.pageins
+= stat
->pageins
;
503 host_vm_stat
.pageouts
+= stat
->pageouts
;
504 host_vm_stat
.faults
+= stat
->faults
;
505 host_vm_stat
.cow_faults
+= stat
->cow_faults
;
506 host_vm_stat
.lookups
+= stat
->lookups
;
507 host_vm_stat
.hits
+= stat
->hits
;
510 simple_unlock(&processor_list_lock
);
513 stat
= (vm_statistics64_t
) info
;
515 stat
->free_count
= vm_page_free_count
+ vm_page_speculative_count
;
516 stat
->active_count
= vm_page_active_count
;
518 if (vm_page_local_q
) {
519 for (i
= 0; i
< vm_page_local_q_count
; i
++) {
522 lq
= &vm_page_local_q
[i
].vpl_un
.vpl
;
524 stat
->active_count
+= lq
->vpl_count
;
527 stat
->inactive_count
= vm_page_inactive_count
;
529 stat
->wire_count
= vm_page_wire_count
;
531 stat
->wire_count
= vm_page_wire_count
+ vm_page_throttled_count
+ vm_lopage_free_count
;
533 stat
->zero_fill_count
= host_vm_stat
.zero_fill_count
;
534 stat
->reactivations
= host_vm_stat
.reactivations
;
535 stat
->pageins
= host_vm_stat
.pageins
;
536 stat
->pageouts
= host_vm_stat
.pageouts
;
537 stat
->faults
= host_vm_stat
.faults
;
538 stat
->cow_faults
= host_vm_stat
.cow_faults
;
539 stat
->lookups
= host_vm_stat
.lookups
;
540 stat
->hits
= host_vm_stat
.hits
;
542 /* rev1 added "purgable" info */
543 stat
->purgeable_count
= vm_page_purgeable_count
;
544 stat
->purges
= vm_page_purged_count
;
546 /* rev2 added "speculative" info */
547 stat
->speculative_count
= vm_page_speculative_count
;
549 *count
= HOST_VM_INFO64_COUNT
;
551 return(KERN_SUCCESS
);
554 case HOST_EXTMOD_INFO64
: /* We were asked to get vm_statistics64 */
556 vm_extmod_statistics_t out_extmod_statistics
;
558 if (*count
< HOST_EXTMOD_INFO64_COUNT
)
559 return (KERN_FAILURE
);
561 out_extmod_statistics
= (vm_extmod_statistics_t
) info
;
562 *out_extmod_statistics
= host_extmod_statistics
;
564 *count
= HOST_EXTMOD_INFO64_COUNT
;
566 return(KERN_SUCCESS
);
569 default: /* If we didn't recognize the flavor, send to host_statistics */
570 return(host_statistics(host
, flavor
, (host_info_t
) info
, count
));
576 * Get host statistics that require privilege.
577 * None for now, just call the un-privileged version.
580 host_priv_statistics(
581 host_priv_t host_priv
,
582 host_flavor_t flavor
,
584 mach_msg_type_number_t
*count
)
586 return(host_statistics((host_t
)host_priv
, flavor
, info
, count
));
590 set_sched_stats_active(
593 sched_stats_active
= active
;
599 get_sched_statistics(
600 struct _processor_statistics_np
*out
,
603 processor_t processor
;
605 if (!sched_stats_active
) {
609 simple_lock(&processor_list_lock
);
611 if (*count
< (processor_count
+ 2) * sizeof(struct _processor_statistics_np
)) { /* One for RT, one for FS */
612 simple_unlock(&processor_list_lock
);
616 processor
= processor_list
;
618 struct processor_sched_statistics
*stats
= &processor
->processor_data
.sched_stats
;
620 out
->ps_cpuid
= processor
->cpu_id
;
621 out
->ps_csw_count
= stats
->csw_count
;
622 out
->ps_preempt_count
= stats
->preempt_count
;
623 out
->ps_preempted_rt_count
= stats
->preempted_rt_count
;
624 out
->ps_preempted_by_rt_count
= stats
->preempted_by_rt_count
;
625 out
->ps_rt_sched_count
= stats
->rt_sched_count
;
626 out
->ps_interrupt_count
= stats
->interrupt_count
;
627 out
->ps_ipi_count
= stats
->ipi_count
;
628 out
->ps_timer_pop_count
= stats
->timer_pop_count
;
629 out
->ps_runq_count_sum
= SCHED(processor_runq_stats_count_sum
)(processor
);
630 out
->ps_idle_transitions
= stats
->idle_transitions
;
631 out
->ps_quantum_timer_expirations
= stats
->quantum_timer_expirations
;
634 processor
= processor
->processor_list
;
637 *count
= (uint32_t) (processor_count
* sizeof(struct _processor_statistics_np
));
639 simple_unlock(&processor_list_lock
);
641 /* And include RT Queue information */
642 bzero(out
, sizeof(*out
));
643 out
->ps_cpuid
= (-1);
644 out
->ps_runq_count_sum
= rt_runq
.runq_stats
.count_sum
;
646 *count
+= (uint32_t)sizeof(struct _processor_statistics_np
);
648 /* And include Fair Share Queue information at the end */
649 bzero(out
, sizeof(*out
));
650 out
->ps_cpuid
= (-2);
651 out
->ps_runq_count_sum
= SCHED(fairshare_runq_stats_count_sum
)();
652 *count
+= (uint32_t)sizeof(struct _processor_statistics_np
);
660 vm_size_t
*out_page_size
)
662 if (host
== HOST_NULL
)
663 return(KERN_INVALID_ARGUMENT
);
665 *out_page_size
= PAGE_SIZE
;
667 return(KERN_SUCCESS
);
671 * Return kernel version string (more than you ever
672 * wanted to know about what version of the kernel this is).
674 extern char version
[];
679 kernel_version_t out_version
)
682 if (host
== HOST_NULL
)
683 return(KERN_INVALID_ARGUMENT
);
685 (void) strncpy(out_version
, version
, sizeof(kernel_version_t
));
687 return(KERN_SUCCESS
);
691 * host_processor_sets:
693 * List all processor sets on the host.
697 host_priv_t host_priv
,
698 processor_set_name_array_t
*pset_list
,
699 mach_msg_type_number_t
*count
)
703 if (host_priv
== HOST_PRIV_NULL
)
704 return (KERN_INVALID_ARGUMENT
);
707 * Allocate memory. Can be pageable because it won't be
708 * touched while holding a lock.
711 addr
= kalloc((vm_size_t
) sizeof(mach_port_t
));
713 return (KERN_RESOURCE_SHORTAGE
);
715 /* do the conversion that Mig should handle */
716 *((ipc_port_t
*) addr
) = convert_pset_name_to_port(&pset0
);
718 *pset_list
= (processor_set_array_t
)addr
;
721 return (KERN_SUCCESS
);
725 * host_processor_set_priv:
727 * Return control port for given processor set.
730 host_processor_set_priv(
731 host_priv_t host_priv
,
732 processor_set_t pset_name
,
733 processor_set_t
*pset
)
735 if (host_priv
== HOST_PRIV_NULL
|| pset_name
== PROCESSOR_SET_NULL
) {
736 *pset
= PROCESSOR_SET_NULL
;
738 return (KERN_INVALID_ARGUMENT
);
743 return (KERN_SUCCESS
);
747 * host_processor_info
749 * Return info about the processors on this host. It will return
750 * the number of processors, and the specific type of info requested
756 processor_flavor_t flavor
,
757 natural_t
*out_pcount
,
758 processor_info_array_t
*out_array
,
759 mach_msg_type_number_t
*out_array_count
)
761 kern_return_t result
;
762 processor_t processor
;
764 processor_info_t info
;
765 unsigned int icount
, tcount
;
766 unsigned int pcount
, i
;
768 vm_size_t size
, needed
;
771 if (host
== HOST_NULL
)
772 return (KERN_INVALID_ARGUMENT
);
774 result
= processor_info_count(flavor
, &icount
);
775 if (result
!= KERN_SUCCESS
)
778 pcount
= processor_count
;
781 needed
= pcount
* icount
* sizeof(natural_t
);
782 size
= round_page(needed
);
783 result
= kmem_alloc(ipc_kernel_map
, &addr
, size
);
784 if (result
!= KERN_SUCCESS
)
785 return (KERN_RESOURCE_SHORTAGE
);
787 info
= (processor_info_t
) addr
;
788 processor
= processor_list
;
791 result
= processor_info(processor
, flavor
, &thost
, info
, &tcount
);
792 if (result
!= KERN_SUCCESS
) {
793 kmem_free(ipc_kernel_map
, addr
, size
);
798 for (i
= 1; i
< pcount
; i
++) {
799 simple_lock(&processor_list_lock
);
800 processor
= processor
->processor_list
;
801 simple_unlock(&processor_list_lock
);
805 result
= processor_info(processor
, flavor
, &thost
, info
, &tcount
);
806 if (result
!= KERN_SUCCESS
) {
807 kmem_free(ipc_kernel_map
, addr
, size
);
814 bzero((char *) addr
+ needed
, size
- needed
);
816 result
= vm_map_unwire(ipc_kernel_map
, vm_map_trunc_page(addr
),
817 vm_map_round_page(addr
+ size
), FALSE
);
818 assert(result
== KERN_SUCCESS
);
819 result
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)addr
,
820 (vm_map_size_t
)size
, TRUE
, ©
);
821 assert(result
== KERN_SUCCESS
);
823 *out_pcount
= pcount
;
824 *out_array
= (processor_info_array_t
) copy
;
825 *out_array_count
= pcount
* icount
;
827 return (KERN_SUCCESS
);
831 * Kernel interface for setting a special port.
834 kernel_set_special_port(
835 host_priv_t host_priv
,
841 host_lock(host_priv
);
842 old_port
= host_priv
->special
[id
];
843 host_priv
->special
[id
] = port
;
844 host_unlock(host_priv
);
845 if (IP_VALID(old_port
))
846 ipc_port_release_send(old_port
);
851 * User interface for setting a special port.
853 * Only permits the user to set a user-owned special port
854 * ID, rejecting a kernel-owned special port ID.
856 * A special kernel port cannot be set up using this
857 * routine; use kernel_set_special_port() instead.
860 host_set_special_port(
861 host_priv_t host_priv
,
865 if (host_priv
== HOST_PRIV_NULL
||
866 id
<= HOST_MAX_SPECIAL_KERNEL_PORT
|| id
> HOST_MAX_SPECIAL_PORT
) {
868 ipc_port_release_send(port
);
869 return KERN_INVALID_ARGUMENT
;
872 return kernel_set_special_port(host_priv
, id
, port
);
877 * User interface for retrieving a special port.
879 * Note that there is nothing to prevent a user special
880 * port from disappearing after it has been discovered by
881 * the caller; thus, using a special port can always result
882 * in a "port not valid" error.
886 host_get_special_port(
887 host_priv_t host_priv
,
894 if (host_priv
== HOST_PRIV_NULL
||
895 id
== HOST_SECURITY_PORT
|| id
> HOST_MAX_SPECIAL_PORT
|| id
< 0)
896 return KERN_INVALID_ARGUMENT
;
898 host_lock(host_priv
);
899 port
= realhost
.special
[id
];
900 *portp
= ipc_port_copy_send(port
);
901 host_unlock(host_priv
);
910 * Return the IO master access port for this host.
915 io_master_t
*io_masterp
)
917 if (host
== HOST_NULL
)
918 return KERN_INVALID_ARGUMENT
;
920 return (host_get_io_master_port(host_priv_self(), io_masterp
));
936 host_security_self(void)