/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License"). You may not use this file except in compliance with the
- * License. Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
*
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
*
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* @OSF_COPYRIGHT@
* Non-ipc host functions.
*/
-#include <cpus.h>
-#include <mach_host.h>
-
+#include <mach/mach_types.h>
#include <mach/boolean.h>
-#include <kern/assert.h>
-#include <kern/kalloc.h>
-#include <kern/host.h>
-#include <kern/host_statistics.h>
-#include <kern/ipc_host.h>
-#include <kern/misc_protos.h>
#include <mach/host_info.h>
+#include <mach/host_special_ports.h>
#include <mach/kern_return.h>
#include <mach/machine.h>
#include <mach/port.h>
-#include <kern/processor.h>
#include <mach/processor_info.h>
#include <mach/vm_param.h>
+#include <mach/processor.h>
#include <mach/mach_host_server.h>
-#if DIPC
-#include <dipc/dipc_funcs.h>
-#include <dipc/special_ports.h>
-#endif
+#include <mach/host_priv_server.h>
+#include <mach/vm_map.h>
-vm_statistics_data_t vm_stat[NCPUS];
+#include <kern/kern_types.h>
+#include <kern/assert.h>
+#include <kern/kalloc.h>
+#include <kern/host.h>
+#include <kern/host_statistics.h>
+#include <kern/ipc_host.h>
+#include <kern/misc_protos.h>
+#include <kern/sched.h>
+#include <kern/processor.h>
+
+#include <vm/vm_map.h>
host_data_t realhost;
+vm_extmod_statistics_data_t host_extmod_statistics;
+
kern_return_t
host_processors(
- host_priv_t host_priv,
- processor_array_t *processor_list,
+ host_priv_t host_priv,
+ processor_array_t *out_array,
mach_msg_type_number_t *countp)
{
- register int i;
- register processor_t *tp;
- vm_offset_t addr;
- unsigned int count;
+ register processor_t processor, *tp;
+ void *addr;
+ unsigned int count, i;
if (host_priv == HOST_PRIV_NULL)
- return(KERN_INVALID_ARGUMENT);
+ return (KERN_INVALID_ARGUMENT);
assert(host_priv == &realhost);
- /*
- * Determine how many processors we have.
- * (This number shouldn't change.)
- */
-
- count = 0;
- for (i = 0; i < NCPUS; i++)
- if (machine_slot[i].is_cpu)
- count++;
-
- if (count == 0)
- panic("host_processors");
+ count = processor_count;
+ assert(count != 0);
addr = kalloc((vm_size_t) (count * sizeof(mach_port_t)));
if (addr == 0)
- return KERN_RESOURCE_SHORTAGE;
+ return (KERN_RESOURCE_SHORTAGE);
tp = (processor_t *) addr;
- for (i = 0; i < NCPUS; i++)
- if (machine_slot[i].is_cpu)
- *tp++ = cpu_to_processor(i);
+ *tp++ = processor = processor_list;
+
+ if (count > 1) {
+ simple_lock(&processor_list_lock);
+
+ for (i = 1; i < count; i++)
+ *tp++ = processor = processor->processor_list;
+
+ simple_unlock(&processor_list_lock);
+ }
*countp = count;
- *processor_list = (processor_array_t)addr;
+ *out_array = (processor_array_t)addr;
/* do the conversion that Mig should handle */
((mach_port_t *) tp)[i] =
(mach_port_t)convert_processor_to_port(tp[i]);
- return KERN_SUCCESS;
+ return (KERN_SUCCESS);
}
kern_return_t
{
if (host == HOST_NULL)
- return(KERN_INVALID_ARGUMENT);
+ return (KERN_INVALID_ARGUMENT);
- switch(flavor) {
+ switch (flavor) {
case HOST_BASIC_INFO:
{
register host_basic_info_t basic_info;
+ register int master_id;
/*
* Basic information about this host.
*/
- if (*count < HOST_BASIC_INFO_COUNT)
- return(KERN_FAILURE);
+ if (*count < HOST_BASIC_INFO_OLD_COUNT)
+ return (KERN_FAILURE);
basic_info = (host_basic_info_t) info;
- basic_info->max_cpus = machine_info.max_cpus;
- basic_info->avail_cpus = machine_info.avail_cpus;
basic_info->memory_size = machine_info.memory_size;
- basic_info->cpu_type =
- machine_slot[master_processor->slot_num].cpu_type;
- basic_info->cpu_subtype =
- machine_slot[master_processor->slot_num].cpu_subtype;
-
- *count = HOST_BASIC_INFO_COUNT;
+ basic_info->max_cpus = machine_info.max_cpus;
+ basic_info->avail_cpus = processor_avail_count;
+ master_id = master_processor->cpu_id;
+ basic_info->cpu_type = slot_type(master_id);
+ basic_info->cpu_subtype = slot_subtype(master_id);
+
+ if (*count >= HOST_BASIC_INFO_COUNT) {
+ basic_info->cpu_threadtype = slot_threadtype(master_id);
+ basic_info->physical_cpu = machine_info.physical_cpu;
+ basic_info->physical_cpu_max = machine_info.physical_cpu_max;
+ basic_info->logical_cpu = machine_info.logical_cpu;
+ basic_info->logical_cpu_max = machine_info.logical_cpu_max;
+ basic_info->max_mem = machine_info.max_mem;
+
+ *count = HOST_BASIC_INFO_COUNT;
+ } else {
+ *count = HOST_BASIC_INFO_OLD_COUNT;
+ }
- return(KERN_SUCCESS);
+ return (KERN_SUCCESS);
}
case HOST_SCHED_INFO:
{
register host_sched_info_t sched_info;
- extern int tick; /* XXX */
+ uint32_t quantum_time;
+ uint64_t quantum_ns;
/*
* Return scheduler information.
*/
if (*count < HOST_SCHED_INFO_COUNT)
- return(KERN_FAILURE);
+ return (KERN_FAILURE);
sched_info = (host_sched_info_t) info;
- sched_info->min_timeout = tick / 1000; /* XXX */
- sched_info->min_quantum = tick / 1000; /* XXX */
+ quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
+ absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
+
+ sched_info->min_timeout =
+ sched_info->min_quantum = (uint32_t)(quantum_ns / 1000 / 1000);
*count = HOST_SCHED_INFO_COUNT;
- return(KERN_SUCCESS);
+ return (KERN_SUCCESS);
}
case HOST_RESOURCE_SIZES:
* Return sizes of kernel data structures
*/
if (*count < HOST_RESOURCE_SIZES_COUNT)
- return(KERN_FAILURE);
+ return (KERN_FAILURE);
/* XXX Fail until ledgers are implemented */
- return(KERN_INVALID_ARGUMENT);
+ return (KERN_INVALID_ARGUMENT);
}
case HOST_PRIORITY_INFO:
register host_priority_info_t priority_info;
if (*count < HOST_PRIORITY_INFO_COUNT)
- return(KERN_FAILURE);
+ return (KERN_FAILURE);
priority_info = (host_priority_info_t) info;
priority_info->kernel_priority = MINPRI_KERNEL;
priority_info->system_priority = MINPRI_KERNEL;
- priority_info->server_priority = MINPRI_SYSTEM;
+ priority_info->server_priority = MINPRI_RESERVED;
priority_info->user_priority = BASEPRI_DEFAULT;
priority_info->depress_priority = DEPRESSPRI;
priority_info->idle_priority = IDLEPRI;
- priority_info->minimum_priority = MINPRI_STANDARD;
- priority_info->maximum_priority = MAXPRI_SYSTEM;
+ priority_info->minimum_priority = MINPRI_USER;
+ priority_info->maximum_priority = MAXPRI_RESERVED;
*count = HOST_PRIORITY_INFO_COUNT;
- return(KERN_SUCCESS);
+ return (KERN_SUCCESS);
}
/*
- * JMM - Temporary check to see if semaphore traps are
- * supported on this machine. Sadly, just trying to call
- * the traps gets your process terminated instead of
- * returning an error, so we have to query during mach_init
- * to see if the machine supports them.
- *
- * KERN_INVALID_ARGUMENT - kernel has no semaphore traps
- * KERN_SUCCESS - kernel has sema traps (up to semaphore_signal_wait)
- * KERN_SEMAPHORE_DESTROYED - kernel has the latest semaphore traps
+ * Gestalt for various trap facilities.
*/
+ case HOST_MACH_MSG_TRAP:
case HOST_SEMAPHORE_TRAPS:
{
*count = 0;
- return KERN_SUCCESS;
+ return (KERN_SUCCESS);
}
default:
- return(KERN_INVALID_ARGUMENT);
+ return (KERN_INVALID_ARGUMENT);
}
}
kern_return_t
host_statistics(
- host_t host,
- host_flavor_t flavor,
- host_info_t info,
+ host_t host,
+ host_flavor_t flavor,
+ host_info_t info,
mach_msg_type_number_t *count)
{
+ uint32_t i;
if (host == HOST_NULL)
- return(KERN_INVALID_HOST);
+ return (KERN_INVALID_HOST);
switch(flavor) {
- case HOST_LOAD_INFO: {
- register host_load_info_t load_info;
- extern integer_t avenrun[3], mach_factor[3];
+ case HOST_LOAD_INFO:
+ {
+ host_load_info_t load_info;
if (*count < HOST_LOAD_INFO_COUNT)
- return(KERN_FAILURE);
+ return (KERN_FAILURE);
load_info = (host_load_info_t) info;
bcopy((char *) avenrun,
- (char *) load_info->avenrun,
- sizeof avenrun);
+ (char *) load_info->avenrun, sizeof avenrun);
bcopy((char *) mach_factor,
- (char *) load_info->mach_factor,
- sizeof mach_factor);
+ (char *) load_info->mach_factor, sizeof mach_factor);
*count = HOST_LOAD_INFO_COUNT;
- return(KERN_SUCCESS);
- }
-
- case HOST_VM_INFO: {
- register vm_statistics_t stat;
- vm_statistics_data_t host_vm_stat;
- extern int vm_page_free_count, vm_page_active_count,
- vm_page_inactive_count, vm_page_wire_count;
+ return (KERN_SUCCESS);
+ }
+
+ case HOST_VM_INFO:
+ {
+ register processor_t processor;
+ register vm_statistics64_t stat;
+ vm_statistics64_data_t host_vm_stat;
+ vm_statistics_t stat32;
+ mach_msg_type_number_t original_count;
- if (*count < HOST_VM_INFO_COUNT)
- return(KERN_FAILURE);
+ if (*count < HOST_VM_INFO_REV0_COUNT)
+ return (KERN_FAILURE);
- stat = &vm_stat[0];
+ processor = processor_list;
+ stat = &PROCESSOR_DATA(processor, vm_stat);
host_vm_stat = *stat;
-#if NCPUS > 1
- {
- register int i;
-
- for (i = 1; i < NCPUS; i++) {
- stat++;
- host_vm_stat.zero_fill_count +=
- stat->zero_fill_count;
- host_vm_stat.reactivations +=
- stat->reactivations;
+
+ if (processor_count > 1) {
+ simple_lock(&processor_list_lock);
+
+ while ((processor = processor->processor_list) != NULL) {
+ stat = &PROCESSOR_DATA(processor, vm_stat);
+
+ host_vm_stat.zero_fill_count += stat->zero_fill_count;
+ host_vm_stat.reactivations += stat->reactivations;
host_vm_stat.pageins += stat->pageins;
host_vm_stat.pageouts += stat->pageouts;
host_vm_stat.faults += stat->faults;
host_vm_stat.lookups += stat->lookups;
host_vm_stat.hits += stat->hits;
}
+
+ simple_unlock(&processor_list_lock);
}
+
+ stat32 = (vm_statistics_t) info;
+
+ stat32->free_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_free_count + vm_page_speculative_count);
+ stat32->active_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_active_count);
+
+ if (vm_page_local_q) {
+ for (i = 0; i < vm_page_local_q_count; i++) {
+ struct vpl *lq;
+
+ lq = &vm_page_local_q[i].vpl_un.vpl;
+
+ stat32->active_count += VM_STATISTICS_TRUNCATE_TO_32_BIT(lq->vpl_count);
+ }
+ }
+ stat32->inactive_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_inactive_count);
+#if CONFIG_EMBEDDED
+ stat32->wire_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count);
+#else
+ stat32->wire_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count + vm_page_throttled_count + vm_lopage_free_count);
#endif
+ stat32->zero_fill_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.zero_fill_count);
+ stat32->reactivations = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.reactivations);
+ stat32->pageins = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.pageins);
+ stat32->pageouts = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.pageouts);
+ stat32->faults = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.faults);
+ stat32->cow_faults = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.cow_faults);
+ stat32->lookups = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.lookups);
+ stat32->hits = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.hits);
- stat = (vm_statistics_t) info;
-
- stat->free_count = vm_page_free_count;
- stat->active_count = vm_page_active_count;
- stat->inactive_count = vm_page_inactive_count;
- stat->wire_count = vm_page_wire_count;
- stat->zero_fill_count = host_vm_stat.zero_fill_count;
- stat->reactivations = host_vm_stat.reactivations;
- stat->pageins = host_vm_stat.pageins;
- stat->pageouts = host_vm_stat.pageouts;
- stat->faults = host_vm_stat.faults;
- stat->cow_faults = host_vm_stat.cow_faults;
- stat->lookups = host_vm_stat.lookups;
- stat->hits = host_vm_stat.hits;
-
- *count = HOST_VM_INFO_COUNT;
- return(KERN_SUCCESS);
- }
+ /*
+ * Fill in extra info added in later revisions of the
+ * vm_statistics data structure. Fill in only what can fit
+ * in the data structure the caller gave us !
+ */
+ original_count = *count;
+ *count = HOST_VM_INFO_REV0_COUNT; /* rev0 already filled in */
+ if (original_count >= HOST_VM_INFO_REV1_COUNT) {
+ /* rev1 added "purgeable" info */
+ stat32->purgeable_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purgeable_count);
+ stat32->purges = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purged_count);
+ *count = HOST_VM_INFO_REV1_COUNT;
+ }
+
+ if (original_count >= HOST_VM_INFO_REV2_COUNT) {
+ /* rev2 added "speculative" info */
+ stat32->speculative_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_speculative_count);
+ *count = HOST_VM_INFO_REV2_COUNT;
+ }
+
+ /* rev3 changed some of the fields to be 64-bit*/
+
+ return (KERN_SUCCESS);
+ }
- case HOST_CPU_LOAD_INFO: {
+ case HOST_CPU_LOAD_INFO:
+ {
+ register processor_t processor;
host_cpu_load_info_t cpu_load_info;
- unsigned long ticks_value1, ticks_value2;
- int i;
-
-#define GET_TICKS_VALUE(__cpu,__state) \
-MACRO_BEGIN \
- do { \
- ticks_value1 = *(volatile integer_t *) \
- (&machine_slot[(__cpu)].cpu_ticks[(__state)]); \
- ticks_value2 = *(volatile integer_t *) \
- (&machine_slot[(__cpu)].cpu_ticks[(__state)]); \
- } while (ticks_value1 != ticks_value2); \
- cpu_load_info->cpu_ticks[(__state)] += ticks_value1; \
-MACRO_END
if (*count < HOST_CPU_LOAD_INFO_COUNT)
- return KERN_FAILURE;
+ return (KERN_FAILURE);
- cpu_load_info = (host_cpu_load_info_t) info;
+#define GET_TICKS_VALUE(processor, state, timer) \
+MACRO_BEGIN \
+ cpu_load_info->cpu_ticks[(state)] += \
+ (uint32_t)(timer_grab(&PROCESSOR_DATA(processor, timer)) \
+ / hz_tick_interval); \
+MACRO_END
+ cpu_load_info = (host_cpu_load_info_t)info;
cpu_load_info->cpu_ticks[CPU_STATE_USER] = 0;
- cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0;
cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] = 0;
cpu_load_info->cpu_ticks[CPU_STATE_IDLE] = 0;
- for (i = 0; i < NCPUS; i++) {
- if (!machine_slot[i].is_cpu ||
- !machine_slot[i].running)
- continue;
- GET_TICKS_VALUE(i, CPU_STATE_USER);
- GET_TICKS_VALUE(i, CPU_STATE_NICE);
- GET_TICKS_VALUE(i, CPU_STATE_SYSTEM);
- GET_TICKS_VALUE(i, CPU_STATE_IDLE);
- }
+ cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0;
+ simple_lock(&processor_list_lock);
+
+ for (processor = processor_list; processor != NULL; processor = processor->processor_list) {
+ timer_data_t idle_temp;
+ timer_t idle_state;
+
+ GET_TICKS_VALUE(processor, CPU_STATE_USER, user_state);
+ GET_TICKS_VALUE(processor, CPU_STATE_SYSTEM, system_state);
+
+ idle_state = &PROCESSOR_DATA(processor, idle_state);
+ idle_temp = *idle_state;
+
+ if (PROCESSOR_DATA(processor, current_state) != idle_state ||
+ timer_grab(&idle_temp) != timer_grab(idle_state))
+ GET_TICKS_VALUE(processor, CPU_STATE_IDLE, idle_state);
+ else {
+ timer_advance(&idle_temp, mach_absolute_time() - idle_temp.tstamp);
+
+ cpu_load_info->cpu_ticks[CPU_STATE_IDLE] +=
+ (uint32_t)(timer_grab(&idle_temp) / hz_tick_interval);
+ }
+ }
+ simple_unlock(&processor_list_lock);
*count = HOST_CPU_LOAD_INFO_COUNT;
- return KERN_SUCCESS;
- }
+
+ return (KERN_SUCCESS);
+ }
default:
- return(KERN_INVALID_ARGUMENT);
+ return (KERN_INVALID_ARGUMENT);
+ }
+}
+
+
+kern_return_t
+host_statistics64(
+ host_t host,
+ host_flavor_t flavor,
+ host_info64_t info,
+ mach_msg_type_number_t *count)
+{
+ uint32_t i;
+
+ if (host == HOST_NULL)
+ return (KERN_INVALID_HOST);
+
+ switch(flavor) {
+
+ case HOST_VM_INFO64: /* We were asked to get vm_statistics64 */
+ {
+ register processor_t processor;
+ register vm_statistics64_t stat;
+ vm_statistics64_data_t host_vm_stat;
+
+ if (*count < HOST_VM_INFO64_COUNT)
+ return (KERN_FAILURE);
+
+ processor = processor_list;
+ stat = &PROCESSOR_DATA(processor, vm_stat);
+ host_vm_stat = *stat;
+
+ if (processor_count > 1) {
+ simple_lock(&processor_list_lock);
+
+ while ((processor = processor->processor_list) != NULL) {
+ stat = &PROCESSOR_DATA(processor, vm_stat);
+
+ host_vm_stat.zero_fill_count += stat->zero_fill_count;
+ host_vm_stat.reactivations += stat->reactivations;
+ host_vm_stat.pageins += stat->pageins;
+ host_vm_stat.pageouts += stat->pageouts;
+ host_vm_stat.faults += stat->faults;
+ host_vm_stat.cow_faults += stat->cow_faults;
+ host_vm_stat.lookups += stat->lookups;
+ host_vm_stat.hits += stat->hits;
+ }
+
+ simple_unlock(&processor_list_lock);
+ }
+
+ stat = (vm_statistics64_t) info;
+
+ stat->free_count = vm_page_free_count + vm_page_speculative_count;
+ stat->active_count = vm_page_active_count;
+
+ if (vm_page_local_q) {
+ for (i = 0; i < vm_page_local_q_count; i++) {
+ struct vpl *lq;
+
+ lq = &vm_page_local_q[i].vpl_un.vpl;
+
+ stat->active_count += lq->vpl_count;
+ }
+ }
+ stat->inactive_count = vm_page_inactive_count;
+#if CONFIG_EMBEDDED
+ stat->wire_count = vm_page_wire_count;
+#else
+ stat->wire_count = vm_page_wire_count + vm_page_throttled_count + vm_lopage_free_count;
+#endif
+ stat->zero_fill_count = host_vm_stat.zero_fill_count;
+ stat->reactivations = host_vm_stat.reactivations;
+ stat->pageins = host_vm_stat.pageins;
+ stat->pageouts = host_vm_stat.pageouts;
+ stat->faults = host_vm_stat.faults;
+ stat->cow_faults = host_vm_stat.cow_faults;
+ stat->lookups = host_vm_stat.lookups;
+ stat->hits = host_vm_stat.hits;
+
+ /* rev1 added "purgable" info */
+ stat->purgeable_count = vm_page_purgeable_count;
+ stat->purges = vm_page_purged_count;
+
+ /* rev2 added "speculative" info */
+ stat->speculative_count = vm_page_speculative_count;
+
+ *count = HOST_VM_INFO64_COUNT;
+
+ return(KERN_SUCCESS);
+ }
+
+ case HOST_EXTMOD_INFO64: /* We were asked to get vm_statistics64 */
+ {
+ vm_extmod_statistics_t out_extmod_statistics;
+
+ if (*count < HOST_EXTMOD_INFO64_COUNT)
+ return (KERN_FAILURE);
+
+ out_extmod_statistics = (vm_extmod_statistics_t) info;
+ *out_extmod_statistics = host_extmod_statistics;
+
+ *count = HOST_EXTMOD_INFO64_COUNT;
+
+ return(KERN_SUCCESS);
+ }
+
+ default: /* If we didn't recognize the flavor, send to host_statistics */
+ return(host_statistics(host, flavor, (host_info_t) info, count));
}
}
+
/*
* Get host statistics that require privilege.
* None for now, just call the un-privileged version.
return(host_statistics((host_t)host_priv, flavor, info, count));
}
+kern_return_t
+set_sched_stats_active(
+ boolean_t active)
+{
+ sched_stats_active = active;
+ return KERN_SUCCESS;
+}
+
+
+kern_return_t
+get_sched_statistics(
+ struct _processor_statistics_np *out,
+ uint32_t *count)
+{
+ processor_t processor;
+
+ if (!sched_stats_active) {
+ return KERN_FAILURE;
+ }
+
+ simple_lock(&processor_list_lock);
+
+ if (*count < (processor_count + 2) * sizeof(struct _processor_statistics_np)) { /* One for RT, one for FS */
+ simple_unlock(&processor_list_lock);
+ return KERN_FAILURE;
+ }
+
+ processor = processor_list;
+ while (processor) {
+ struct processor_sched_statistics *stats = &processor->processor_data.sched_stats;
+
+ out->ps_cpuid = processor->cpu_id;
+ out->ps_csw_count = stats->csw_count;
+ out->ps_preempt_count = stats->preempt_count;
+ out->ps_preempted_rt_count = stats->preempted_rt_count;
+ out->ps_preempted_by_rt_count = stats->preempted_by_rt_count;
+ out->ps_rt_sched_count = stats->rt_sched_count;
+ out->ps_interrupt_count = stats->interrupt_count;
+ out->ps_ipi_count = stats->ipi_count;
+ out->ps_timer_pop_count = stats->timer_pop_count;
+ out->ps_runq_count_sum = SCHED(processor_runq_stats_count_sum)(processor);
+ out->ps_idle_transitions = stats->idle_transitions;
+ out->ps_quantum_timer_expirations = stats->quantum_timer_expirations;
+
+ out++;
+ processor = processor->processor_list;
+ }
+
+ *count = (uint32_t) (processor_count * sizeof(struct _processor_statistics_np));
+
+ simple_unlock(&processor_list_lock);
+
+ /* And include RT Queue information */
+ bzero(out, sizeof(*out));
+ out->ps_cpuid = (-1);
+ out->ps_runq_count_sum = rt_runq.runq_stats.count_sum;
+ out++;
+ *count += (uint32_t)sizeof(struct _processor_statistics_np);
+
+ /* And include Fair Share Queue information at the end */
+ bzero(out, sizeof(*out));
+ out->ps_cpuid = (-2);
+ out->ps_runq_count_sum = SCHED(fairshare_runq_stats_count_sum)();
+ *count += (uint32_t)sizeof(struct _processor_statistics_np);
+
+ return KERN_SUCCESS;
+}
kern_return_t
host_page_size(
* Return kernel version string (more than you ever
* wanted to know about what version of the kernel this is).
*/
+extern char version[];
kern_return_t
host_kernel_version(
host_t host,
kernel_version_t out_version)
{
- extern char version[];
if (host == HOST_NULL)
return(KERN_INVALID_ARGUMENT);
processor_set_name_array_t *pset_list,
mach_msg_type_number_t *count)
{
- vm_offset_t addr;
+ void *addr;
if (host_priv == HOST_PRIV_NULL)
- return KERN_INVALID_ARGUMENT;
+ return (KERN_INVALID_ARGUMENT);
/*
* Allocate memory. Can be pageable because it won't be
addr = kalloc((vm_size_t) sizeof(mach_port_t));
if (addr == 0)
- return KERN_RESOURCE_SHORTAGE;
+ return (KERN_RESOURCE_SHORTAGE);
- /* take ref for convert_pset_name_to_port */
- pset_reference(&default_pset);
/* do the conversion that Mig should handle */
- *((ipc_port_t *) addr) = convert_pset_name_to_port(&default_pset);
+ *((ipc_port_t *) addr) = convert_pset_name_to_port(&pset0);
*pset_list = (processor_set_array_t)addr;
*count = 1;
- return KERN_SUCCESS;
+ return (KERN_SUCCESS);
}
/*
processor_set_t pset_name,
processor_set_t *pset)
{
- if ((host_priv == HOST_PRIV_NULL) || (pset_name == PROCESSOR_SET_NULL)) {
- *pset = PROCESSOR_SET_NULL;
- return(KERN_INVALID_ARGUMENT);
+ if (host_priv == HOST_PRIV_NULL || pset_name == PROCESSOR_SET_NULL) {
+ *pset = PROCESSOR_SET_NULL;
+
+ return (KERN_INVALID_ARGUMENT);
}
*pset = pset_name;
- pset_reference(*pset);
- return(KERN_SUCCESS);
+
+ return (KERN_SUCCESS);
}
/*
*/
kern_return_t
host_processor_info(
- host_t host,
- processor_flavor_t flavor,
- natural_t *proc_count,
- processor_info_array_t *proc_info,
- mach_msg_type_number_t *proc_info_count)
+ host_t host,
+ processor_flavor_t flavor,
+ natural_t *out_pcount,
+ processor_info_array_t *out_array,
+ mach_msg_type_number_t *out_array_count)
{
- int i;
- int num;
- int count;
- vm_size_t size;
- vm_offset_t addr;
- kern_return_t kr;
- vm_map_copy_t copy;
- processor_info_t proc_data;
+ kern_return_t result;
+ processor_t processor;
+ host_t thost;
+ processor_info_t info;
+ unsigned int icount, tcount;
+ unsigned int pcount, i;
+ vm_offset_t addr;
+ vm_size_t size, needed;
+ vm_map_copy_t copy;
if (host == HOST_NULL)
- return KERN_INVALID_ARGUMENT;
+ return (KERN_INVALID_ARGUMENT);
- kr = processor_info_count(flavor, &count);
- if (kr != KERN_SUCCESS) {
- return kr;
- }
-
- for (num = i = 0; i < NCPUS; i++)
- if (machine_slot[i].is_cpu)
- num++;
+ result = processor_info_count(flavor, &icount);
+ if (result != KERN_SUCCESS)
+ return (result);
- size = (vm_size_t)round_page(num * count * sizeof(natural_t));
+ pcount = processor_count;
+ assert(pcount != 0);
- kr = vm_allocate(ipc_kernel_map, &addr, size, TRUE);
- if (kr != KERN_SUCCESS)
- return KERN_RESOURCE_SHORTAGE;
+ needed = pcount * icount * sizeof(natural_t);
+ size = round_page(needed);
+ result = kmem_alloc(ipc_kernel_map, &addr, size);
+ if (result != KERN_SUCCESS)
+ return (KERN_RESOURCE_SHORTAGE);
- kr = vm_map_wire(ipc_kernel_map, addr, addr + size,
- VM_PROT_READ|VM_PROT_WRITE, FALSE);
- if (kr != KERN_SUCCESS) {
+ info = (processor_info_t) addr;
+ processor = processor_list;
+ tcount = icount;
+
+ result = processor_info(processor, flavor, &thost, info, &tcount);
+ if (result != KERN_SUCCESS) {
kmem_free(ipc_kernel_map, addr, size);
- return KERN_RESOURCE_SHORTAGE;
+ return (result);
}
- proc_data = (processor_info_t) addr;
- for (i = 0; i < NCPUS; i++) {
- int count2 = count;
- host_t host2;
-
- if (machine_slot[i].is_cpu) {
- kr = processor_info(cpu_to_processor(i),
- flavor,
- &host2,
- proc_data,
- &count2);
- if (kr != KERN_SUCCESS) {
+ if (pcount > 1) {
+ for (i = 1; i < pcount; i++) {
+ simple_lock(&processor_list_lock);
+ processor = processor->processor_list;
+ simple_unlock(&processor_list_lock);
+
+ info += icount;
+ tcount = icount;
+ result = processor_info(processor, flavor, &thost, info, &tcount);
+ if (result != KERN_SUCCESS) {
kmem_free(ipc_kernel_map, addr, size);
- return kr;
+ return (result);
}
- assert(count == count2);
- proc_data += count;
}
}
- kr = vm_map_unwire(ipc_kernel_map, addr, addr + size, FALSE);
- assert(kr == KERN_SUCCESS);
- size = (vm_size_t)(num * count * sizeof(natural_t));
- kr = vm_map_copyin(ipc_kernel_map, addr, size, TRUE, ©);
- assert(kr == KERN_SUCCESS);
+ if (size != needed)
+ bzero((char *) addr + needed, size - needed);
- *proc_count = num;
- *proc_info = (processor_info_array_t) copy;
- *proc_info_count = num * count;
- return(KERN_SUCCESS);
-}
+ result = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr),
+ vm_map_round_page(addr + size), FALSE);
+ assert(result == KERN_SUCCESS);
+ result = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
+ (vm_map_size_t)size, TRUE, ©);
+ assert(result == KERN_SUCCESS);
+ *out_pcount = pcount;
+ *out_array = (processor_info_array_t) copy;
+ *out_array_count = pcount * icount;
-/*
- * host_get_io_master
- *
- * Return the IO master access port for this host.
- */
-kern_return_t
-host_get_io_master(
- host_t host,
- io_master_t *io_master)
-{
- if (host == HOST_NULL)
- return KERN_INVALID_ARGUMENT;
- *io_master = ipc_port_copy_send(realhost.io_master);
- return KERN_SUCCESS;
+ return (KERN_SUCCESS);
}
-#define io_master_deallocate(x)
-
/*
- * host_get_io_master
- *
- * Return the IO master access port for this host.
+ * Kernel interface for setting a special port.
*/
kern_return_t
-host_set_io_master(
- host_priv_t host_priv,
- io_master_t io_master)
+kernel_set_special_port(
+ host_priv_t host_priv,
+ int id,
+ ipc_port_t port)
{
- io_master_t old_master;
-
- if (host_priv == HOST_PRIV_NULL)
- return KERN_INVALID_ARGUMENT;
-
- old_master = realhost.io_master;
- realhost.io_master = io_master;
- io_master_deallocate(old_master);
- return KERN_SUCCESS;
+ ipc_port_t old_port;
+
+ host_lock(host_priv);
+ old_port = host_priv->special[id];
+ host_priv->special[id] = port;
+ host_unlock(host_priv);
+ if (IP_VALID(old_port))
+ ipc_port_release_send(old_port);
+ return KERN_SUCCESS;
}
/*
int id,
ipc_port_t port)
{
-#if DIPC
- return norma_set_special_port(host_priv, id, port);
-#else
- return KERN_FAILURE;
-#endif
+ if (host_priv == HOST_PRIV_NULL ||
+ id <= HOST_MAX_SPECIAL_KERNEL_PORT || id > HOST_MAX_SPECIAL_PORT ) {
+ if (IP_VALID(port))
+ ipc_port_release_send(port);
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ return kernel_set_special_port(host_priv, id, port);
}
/*
* User interface for retrieving a special port.
*
- * When all processing is local, this call does not block.
- * If processing goes remote to discover a remote UID,
- * this call blocks but not indefinitely. If the remote
- * node does not exist, has panic'ed, or is booting but
- * hasn't yet turned on DIPC, then we expect the transport
- * to return an error.
- *
- * This routine always returns SUCCESS, even if there's
- * no resulting port.
- *
* Note that there is nothing to prevent a user special
* port from disappearing after it has been discovered by
* the caller; thus, using a special port can always result
kern_return_t
host_get_special_port(
host_priv_t host_priv,
- int node,
+ __unused int node,
int id,
ipc_port_t *portp)
{
-#if DIPC
- return norma_get_special_port(host_priv, node, id, portp);
-#else
- return KERN_FAILURE;
-#endif
+ ipc_port_t port;
+
+ if (host_priv == HOST_PRIV_NULL ||
+ id == HOST_SECURITY_PORT || id > HOST_MAX_SPECIAL_PORT || id < 0)
+ return KERN_INVALID_ARGUMENT;
+
+ host_lock(host_priv);
+ port = realhost.special[id];
+ *portp = ipc_port_copy_send(port);
+ host_unlock(host_priv);
+
+ return KERN_SUCCESS;
+}
+
+
+/*
+ * host_get_io_master
+ *
+ * Return the IO master access port for this host.
+ */
+kern_return_t
+host_get_io_master(
+ host_t host,
+ io_master_t *io_masterp)
+{
+ if (host == HOST_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ return (host_get_io_master_port(host_priv_self(), io_masterp));
}
host_t