/*
- * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License"). You may not use this file except in compliance with the
- * License. Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
*
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
*
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* @OSF_COPYRIGHT@
* Non-ipc host functions.
*/
-#include <mach_host.h>
-
#include <mach/mach_types.h>
#include <mach/boolean.h>
#include <mach/host_info.h>
#include <mach/mach_host_server.h>
#include <mach/host_priv_server.h>
#include <mach/vm_map.h>
+#include <mach/task_info.h>
#include <kern/kern_types.h>
#include <kern/assert.h>
#include <kern/processor.h>
#include <vm/vm_map.h>
-
-#if DIPC
-#include <dipc/dipc_funcs.h>
-#include <dipc/special_ports.h>
-#endif
+#include <vm/vm_purgeable_internal.h>
+#include <vm/vm_pageout.h>
host_data_t realhost;
+vm_extmod_statistics_data_t host_extmod_statistics;
+
kern_return_t
host_processors(
host_priv_t host_priv,
case HOST_BASIC_INFO:
{
register host_basic_info_t basic_info;
- register int master_slot;
+ register int master_id;
/*
* Basic information about this host.
basic_info = (host_basic_info_t) info;
- basic_info->max_cpus = machine_info.max_cpus;
- basic_info->avail_cpus = machine_info.avail_cpus;
basic_info->memory_size = machine_info.memory_size;
- master_slot = PROCESSOR_DATA(master_processor, slot_num);
- basic_info->cpu_type = slot_type(master_slot);
- basic_info->cpu_subtype = slot_subtype(master_slot);
+ basic_info->max_cpus = machine_info.max_cpus;
+ basic_info->avail_cpus = processor_avail_count;
+ master_id = master_processor->cpu_id;
+ basic_info->cpu_type = slot_type(master_id);
+ basic_info->cpu_subtype = slot_subtype(master_id);
if (*count >= HOST_BASIC_INFO_COUNT) {
- basic_info->cpu_threadtype = slot_threadtype(master_slot);
+ basic_info->cpu_threadtype = slot_threadtype(master_id);
basic_info->physical_cpu = machine_info.physical_cpu;
basic_info->physical_cpu_max = machine_info.physical_cpu_max;
basic_info->logical_cpu = machine_info.logical_cpu;
case HOST_SCHED_INFO:
{
register host_sched_info_t sched_info;
+ uint32_t quantum_time;
+ uint64_t quantum_ns;
/*
* Return scheduler information.
sched_info = (host_sched_info_t) info;
+ quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
+ absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
+
sched_info->min_timeout =
- sched_info->min_quantum = std_quantum_us / 1000;
+ sched_info->min_quantum = (uint32_t)(quantum_ns / 1000 / 1000);
*count = HOST_SCHED_INFO_COUNT;
return (KERN_SUCCESS);
}
+ case HOST_VM_PURGABLE:
+ {
+ if (*count < HOST_VM_PURGABLE_COUNT)
+ return (KERN_FAILURE);
+
+ vm_purgeable_stats((vm_purgeable_info_t) info, NULL);
+
+ *count = HOST_VM_PURGABLE_COUNT;
+ return (KERN_SUCCESS);
+ }
+
default:
return (KERN_INVALID_ARGUMENT);
}
host_info_t info,
mach_msg_type_number_t *count)
{
+ uint32_t i;
if (host == HOST_NULL)
return (KERN_INVALID_HOST);
case HOST_VM_INFO:
{
register processor_t processor;
- register vm_statistics_t stat;
- vm_statistics_data_t host_vm_stat;
+ register vm_statistics64_t stat;
+ vm_statistics64_data_t host_vm_stat;
+ vm_statistics_t stat32;
+ mach_msg_type_number_t original_count;
if (*count < HOST_VM_INFO_REV0_COUNT)
return (KERN_FAILURE);
simple_unlock(&processor_list_lock);
}
- stat = (vm_statistics_t) info;
-
- stat->free_count = vm_page_free_count;
- stat->active_count = vm_page_active_count;
- stat->inactive_count = vm_page_inactive_count;
- stat->wire_count = vm_page_wire_count;
- stat->zero_fill_count = host_vm_stat.zero_fill_count;
- stat->reactivations = host_vm_stat.reactivations;
- stat->pageins = host_vm_stat.pageins;
- stat->pageouts = host_vm_stat.pageouts;
- stat->faults = host_vm_stat.faults;
- stat->cow_faults = host_vm_stat.cow_faults;
- stat->lookups = host_vm_stat.lookups;
- stat->hits = host_vm_stat.hits;
-
- if (*count >= HOST_VM_INFO_COUNT) {
- /* info that was not in revision 0 of that interface */
- stat->purgeable_count = vm_page_purgeable_count;
- stat->purges = vm_page_purged_count;
- *count = HOST_VM_INFO_COUNT;
- } else {
- *count = HOST_VM_INFO_REV0_COUNT;
+ stat32 = (vm_statistics_t) info;
+
+ stat32->free_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_free_count + vm_page_speculative_count);
+ stat32->active_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_active_count);
+
+ if (vm_page_local_q) {
+ for (i = 0; i < vm_page_local_q_count; i++) {
+ struct vpl *lq;
+
+ lq = &vm_page_local_q[i].vpl_un.vpl;
+
+ stat32->active_count += VM_STATISTICS_TRUNCATE_TO_32_BIT(lq->vpl_count);
+ }
+ }
+ stat32->inactive_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_inactive_count);
+ stat32->wire_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count + vm_page_throttled_count + vm_lopage_free_count);
+ stat32->zero_fill_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.zero_fill_count);
+ stat32->reactivations = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.reactivations);
+ stat32->pageins = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.pageins);
+ stat32->pageouts = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.pageouts);
+ stat32->faults = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.faults);
+ stat32->cow_faults = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.cow_faults);
+ stat32->lookups = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.lookups);
+ stat32->hits = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.hits);
+
+ /*
+ * Fill in extra info added in later revisions of the
+ * vm_statistics data structure. Fill in only what can fit
+ * in the data structure the caller gave us !
+ */
+ original_count = *count;
+ *count = HOST_VM_INFO_REV0_COUNT; /* rev0 already filled in */
+ if (original_count >= HOST_VM_INFO_REV1_COUNT) {
+ /* rev1 added "purgeable" info */
+ stat32->purgeable_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purgeable_count);
+ stat32->purges = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purged_count);
+ *count = HOST_VM_INFO_REV1_COUNT;
}
+ if (original_count >= HOST_VM_INFO_REV2_COUNT) {
+ /* rev2 added "speculative" info */
+ stat32->speculative_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_speculative_count);
+ *count = HOST_VM_INFO_REV2_COUNT;
+ }
+
+ /* rev3 changed some of the fields to be 64-bit*/
+
return (KERN_SUCCESS);
}
{
register processor_t processor;
host_cpu_load_info_t cpu_load_info;
- unsigned long ticks_value1, ticks_value2;
if (*count < HOST_CPU_LOAD_INFO_COUNT)
return (KERN_FAILURE);
-#define GET_TICKS_VALUE(processor, state) \
-MACRO_BEGIN \
- do { \
- ticks_value1 = *(volatile integer_t *) \
- &PROCESSOR_DATA((processor), cpu_ticks[(state)]); \
- ticks_value2 = *(volatile integer_t *) \
- &PROCESSOR_DATA((processor), cpu_ticks[(state)]); \
- } while (ticks_value1 != ticks_value2); \
- \
- cpu_load_info->cpu_ticks[(state)] += ticks_value1; \
+#define GET_TICKS_VALUE(state, ticks) \
+MACRO_BEGIN \
+ cpu_load_info->cpu_ticks[(state)] += \
+ (uint32_t)(ticks / hz_tick_interval); \
+MACRO_END
+#define GET_TICKS_VALUE_FROM_TIMER(processor, state, timer) \
+MACRO_BEGIN \
+ GET_TICKS_VALUE(state, timer_grab(&PROCESSOR_DATA(processor, timer))); \
MACRO_END
cpu_load_info = (host_cpu_load_info_t)info;
cpu_load_info->cpu_ticks[CPU_STATE_USER] = 0;
- cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0;
cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] = 0;
cpu_load_info->cpu_ticks[CPU_STATE_IDLE] = 0;
+ cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0;
- processor = processor_list;
- GET_TICKS_VALUE(processor, CPU_STATE_USER);
- GET_TICKS_VALUE(processor, CPU_STATE_NICE);
- GET_TICKS_VALUE(processor, CPU_STATE_SYSTEM);
- GET_TICKS_VALUE(processor, CPU_STATE_IDLE);
-
- if (processor_count > 1) {
- simple_lock(&processor_list_lock);
+ simple_lock(&processor_list_lock);
- while ((processor = processor->processor_list) != NULL) {
- GET_TICKS_VALUE(processor, CPU_STATE_USER);
- GET_TICKS_VALUE(processor, CPU_STATE_NICE);
- GET_TICKS_VALUE(processor, CPU_STATE_SYSTEM);
- GET_TICKS_VALUE(processor, CPU_STATE_IDLE);
+ for (processor = processor_list; processor != NULL; processor = processor->processor_list) {
+ timer_t idle_state;
+ uint64_t idle_time_snapshot1, idle_time_snapshot2;
+ uint64_t idle_time_tstamp1, idle_time_tstamp2;
+
+ /* See discussion in processor_info(PROCESSOR_CPU_LOAD_INFO) */
+
+ GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_USER, user_state);
+ if (precise_user_kernel_time) {
+ GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_SYSTEM, system_state);
+ } else {
+ /* system_state may represent either sys or user */
+ GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_USER, system_state);
}
- simple_unlock(&processor_list_lock);
+ idle_state = &PROCESSOR_DATA(processor, idle_state);
+ idle_time_snapshot1 = timer_grab(idle_state);
+ idle_time_tstamp1 = idle_state->tstamp;
+
+ if (PROCESSOR_DATA(processor, current_state) != idle_state) {
+ /* Processor is non-idle, so idle timer should be accurate */
+ GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_IDLE, idle_state);
+ } else if ((idle_time_snapshot1 != (idle_time_snapshot2 = timer_grab(idle_state))) ||
+ (idle_time_tstamp1 != (idle_time_tstamp2 = idle_state->tstamp))){
+ /* Idle timer is being updated concurrently, second stamp is good enough */
+ GET_TICKS_VALUE(CPU_STATE_IDLE, idle_time_snapshot2);
+ } else {
+ /*
+ * Idle timer may be very stale. Fortunately we have established
+ * that idle_time_snapshot1 and idle_time_tstamp1 are unchanging
+ */
+ idle_time_snapshot1 += mach_absolute_time() - idle_time_tstamp1;
+
+ GET_TICKS_VALUE(CPU_STATE_IDLE, idle_time_snapshot1);
+ }
}
+ simple_unlock(&processor_list_lock);
*count = HOST_CPU_LOAD_INFO_COUNT;
return (KERN_SUCCESS);
}
+ case HOST_EXPIRED_TASK_INFO:
+ {
+ if (*count < TASK_POWER_INFO_COUNT) {
+ return (KERN_FAILURE);
+ }
+
+ task_power_info_t tinfo = (task_power_info_t)info;
+
+ tinfo->task_interrupt_wakeups = dead_task_statistics.task_interrupt_wakeups;
+ tinfo->task_platform_idle_wakeups = dead_task_statistics.task_platform_idle_wakeups;
+
+ tinfo->task_timer_wakeups_bin_1 = dead_task_statistics.task_timer_wakeups_bin_1;
+
+ tinfo->task_timer_wakeups_bin_2 = dead_task_statistics.task_timer_wakeups_bin_2;
+
+ tinfo->total_user = dead_task_statistics.total_user_time;
+ tinfo->total_system = dead_task_statistics.total_system_time;
+
+ return (KERN_SUCCESS);
+
+ }
default:
return (KERN_INVALID_ARGUMENT);
}
}
+extern uint32_t c_segment_pages_compressed;
+
+kern_return_t
+host_statistics64(
+ host_t host,
+ host_flavor_t flavor,
+ host_info64_t info,
+ mach_msg_type_number_t *count)
+{
+ uint32_t i;
+
+ if (host == HOST_NULL)
+ return (KERN_INVALID_HOST);
+
+ switch(flavor) {
+
+ case HOST_VM_INFO64: /* We were asked to get vm_statistics64 */
+ {
+ register processor_t processor;
+ register vm_statistics64_t stat;
+ vm_statistics64_data_t host_vm_stat;
+ mach_msg_type_number_t original_count;
+ unsigned int local_q_internal_count;
+ unsigned int local_q_external_count;
+
+ if (*count < HOST_VM_INFO64_REV0_COUNT)
+ return (KERN_FAILURE);
+
+ processor = processor_list;
+ stat = &PROCESSOR_DATA(processor, vm_stat);
+ host_vm_stat = *stat;
+
+ if (processor_count > 1) {
+ simple_lock(&processor_list_lock);
+
+ while ((processor = processor->processor_list) != NULL) {
+ stat = &PROCESSOR_DATA(processor, vm_stat);
+
+ host_vm_stat.zero_fill_count += stat->zero_fill_count;
+ host_vm_stat.reactivations += stat->reactivations;
+ host_vm_stat.pageins += stat->pageins;
+ host_vm_stat.pageouts += stat->pageouts;
+ host_vm_stat.faults += stat->faults;
+ host_vm_stat.cow_faults += stat->cow_faults;
+ host_vm_stat.lookups += stat->lookups;
+ host_vm_stat.hits += stat->hits;
+ host_vm_stat.compressions += stat->compressions;
+ host_vm_stat.decompressions += stat->decompressions;
+ host_vm_stat.swapins += stat->swapins;
+ host_vm_stat.swapouts += stat->swapouts;
+ }
+
+ simple_unlock(&processor_list_lock);
+ }
+
+ stat = (vm_statistics64_t) info;
+
+ stat->free_count = vm_page_free_count + vm_page_speculative_count;
+ stat->active_count = vm_page_active_count;
+
+ local_q_internal_count = 0;
+ local_q_external_count = 0;
+ if (vm_page_local_q) {
+ for (i = 0; i < vm_page_local_q_count; i++) {
+ struct vpl *lq;
+
+ lq = &vm_page_local_q[i].vpl_un.vpl;
+
+ stat->active_count += lq->vpl_count;
+ local_q_internal_count +=
+ lq->vpl_internal_count;
+ local_q_external_count +=
+ lq->vpl_external_count;
+ }
+ }
+ stat->inactive_count = vm_page_inactive_count;
+ stat->wire_count = vm_page_wire_count + vm_page_throttled_count + vm_lopage_free_count;
+ stat->zero_fill_count = host_vm_stat.zero_fill_count;
+ stat->reactivations = host_vm_stat.reactivations;
+ stat->pageins = host_vm_stat.pageins;
+ stat->pageouts = host_vm_stat.pageouts;
+ stat->faults = host_vm_stat.faults;
+ stat->cow_faults = host_vm_stat.cow_faults;
+ stat->lookups = host_vm_stat.lookups;
+ stat->hits = host_vm_stat.hits;
+
+ stat->purgeable_count = vm_page_purgeable_count;
+ stat->purges = vm_page_purged_count;
+
+ stat->speculative_count = vm_page_speculative_count;
+
+ /*
+ * Fill in extra info added in later revisions of the
+ * vm_statistics data structure. Fill in only what can fit
+ * in the data structure the caller gave us !
+ */
+ original_count = *count;
+ *count = HOST_VM_INFO64_REV0_COUNT; /* rev0 already filled in */
+ if (original_count >= HOST_VM_INFO64_REV1_COUNT) {
+ /* rev1 added "throttled count" */
+ stat->throttled_count = vm_page_throttled_count;
+ /* rev1 added "compression" info */
+ stat->compressor_page_count = VM_PAGE_COMPRESSOR_COUNT;
+ stat->compressions = host_vm_stat.compressions;
+ stat->decompressions = host_vm_stat.decompressions;
+ stat->swapins = host_vm_stat.swapins;
+ stat->swapouts = host_vm_stat.swapouts;
+ /* rev1 added:
+ * "external page count"
+ * "anonymous page count"
+ * "total # of pages (uncompressed) held in the compressor"
+ */
+ stat->external_page_count =
+ (vm_page_pageable_external_count +
+ local_q_external_count);
+ stat->internal_page_count =
+ (vm_page_pageable_internal_count +
+ local_q_internal_count);
+ stat->total_uncompressed_pages_in_compressor = c_segment_pages_compressed;
+ *count = HOST_VM_INFO64_REV1_COUNT;
+ }
+
+ return(KERN_SUCCESS);
+ }
+
+ case HOST_EXTMOD_INFO64: /* We were asked to get vm_statistics64 */
+ {
+ vm_extmod_statistics_t out_extmod_statistics;
+
+ if (*count < HOST_EXTMOD_INFO64_COUNT)
+ return (KERN_FAILURE);
+
+ out_extmod_statistics = (vm_extmod_statistics_t) info;
+ *out_extmod_statistics = host_extmod_statistics;
+
+ *count = HOST_EXTMOD_INFO64_COUNT;
+
+ return(KERN_SUCCESS);
+ }
+
+ default: /* If we didn't recognize the flavor, send to host_statistics */
+ return(host_statistics(host, flavor, (host_info_t) info, count));
+ }
+}
+
+
/*
* Get host statistics that require privilege.
* None for now, just call the un-privileged version.
return(host_statistics((host_t)host_priv, flavor, info, count));
}
+kern_return_t
+set_sched_stats_active(
+ boolean_t active)
+{
+ sched_stats_active = active;
+ return KERN_SUCCESS;
+}
+
+
+kern_return_t
+get_sched_statistics(
+ struct _processor_statistics_np *out,
+ uint32_t *count)
+{
+ processor_t processor;
+
+ if (!sched_stats_active) {
+ return KERN_FAILURE;
+ }
+
+ simple_lock(&processor_list_lock);
+
+ if (*count < (processor_count + 2) * sizeof(struct _processor_statistics_np)) { /* One for RT, one for FS */
+ simple_unlock(&processor_list_lock);
+ return KERN_FAILURE;
+ }
+
+ processor = processor_list;
+ while (processor) {
+ struct processor_sched_statistics *stats = &processor->processor_data.sched_stats;
+
+ out->ps_cpuid = processor->cpu_id;
+ out->ps_csw_count = stats->csw_count;
+ out->ps_preempt_count = stats->preempt_count;
+ out->ps_preempted_rt_count = stats->preempted_rt_count;
+ out->ps_preempted_by_rt_count = stats->preempted_by_rt_count;
+ out->ps_rt_sched_count = stats->rt_sched_count;
+ out->ps_interrupt_count = stats->interrupt_count;
+ out->ps_ipi_count = stats->ipi_count;
+ out->ps_timer_pop_count = stats->timer_pop_count;
+ out->ps_runq_count_sum = SCHED(processor_runq_stats_count_sum)(processor);
+ out->ps_idle_transitions = stats->idle_transitions;
+ out->ps_quantum_timer_expirations = stats->quantum_timer_expirations;
+
+ out++;
+ processor = processor->processor_list;
+ }
+
+ *count = (uint32_t) (processor_count * sizeof(struct _processor_statistics_np));
+
+ simple_unlock(&processor_list_lock);
+
+ /* And include RT Queue information */
+ bzero(out, sizeof(*out));
+ out->ps_cpuid = (-1);
+ out->ps_runq_count_sum = rt_runq.runq_stats.count_sum;
+ out++;
+ *count += (uint32_t)sizeof(struct _processor_statistics_np);
+
+ /* And include Fair Share Queue information at the end */
+ bzero(out, sizeof(*out));
+ out->ps_cpuid = (-2);
+ out->ps_runq_count_sum = SCHED(fairshare_runq_stats_count_sum)();
+ *count += (uint32_t)sizeof(struct _processor_statistics_np);
+
+ return KERN_SUCCESS;
+}
kern_return_t
host_page_size(
if (host == HOST_NULL)
return(KERN_INVALID_ARGUMENT);
- *out_page_size = PAGE_SIZE;
+ vm_map_t map = get_task_map(current_task());
+ *out_page_size = vm_map_page_size(map);
return(KERN_SUCCESS);
}
void *addr;
if (host_priv == HOST_PRIV_NULL)
- return KERN_INVALID_ARGUMENT;
+ return (KERN_INVALID_ARGUMENT);
/*
* Allocate memory. Can be pageable because it won't be
addr = kalloc((vm_size_t) sizeof(mach_port_t));
if (addr == 0)
- return KERN_RESOURCE_SHORTAGE;
+ return (KERN_RESOURCE_SHORTAGE);
- /* take ref for convert_pset_name_to_port */
- pset_reference(&default_pset);
/* do the conversion that Mig should handle */
- *((ipc_port_t *) addr) = convert_pset_name_to_port(&default_pset);
+ *((ipc_port_t *) addr) = convert_pset_name_to_port(&pset0);
*pset_list = (processor_set_array_t)addr;
*count = 1;
- return KERN_SUCCESS;
+ return (KERN_SUCCESS);
}
/*
processor_set_t pset_name,
processor_set_t *pset)
{
- if ((host_priv == HOST_PRIV_NULL) || (pset_name == PROCESSOR_SET_NULL)) {
- *pset = PROCESSOR_SET_NULL;
- return(KERN_INVALID_ARGUMENT);
+ if (host_priv == HOST_PRIV_NULL || pset_name == PROCESSOR_SET_NULL) {
+ *pset = PROCESSOR_SET_NULL;
+
+ return (KERN_INVALID_ARGUMENT);
}
*pset = pset_name;
- pset_reference(*pset);
- return(KERN_SUCCESS);
+
+ return (KERN_SUCCESS);
}
/*
assert(pcount != 0);
needed = pcount * icount * sizeof(natural_t);
- size = round_page(needed);
+ size = vm_map_round_page(needed,
+ VM_MAP_PAGE_MASK(ipc_kernel_map));
result = kmem_alloc(ipc_kernel_map, &addr, size);
if (result != KERN_SUCCESS)
return (KERN_RESOURCE_SHORTAGE);
if (size != needed)
bzero((char *) addr + needed, size - needed);
- result = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr),
- vm_map_round_page(addr + size), FALSE);
+ result = vm_map_unwire(
+ ipc_kernel_map,
+ vm_map_trunc_page(addr,
+ VM_MAP_PAGE_MASK(ipc_kernel_map)),
+ vm_map_round_page(addr + size,
+ VM_MAP_PAGE_MASK(ipc_kernel_map)),
+ FALSE);
assert(result == KERN_SUCCESS);
result = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
(vm_map_size_t)size, TRUE, ©);
ipc_port_t port;
if (host_priv == HOST_PRIV_NULL ||
- id == HOST_SECURITY_PORT || id > HOST_MAX_SPECIAL_PORT )
+ id == HOST_SECURITY_PORT || id > HOST_MAX_SPECIAL_PORT || id < 0)
return KERN_INVALID_ARGUMENT;
-#if DIPC
- if (node != HOST_LOCAL_NODE)
- return norma_get_special_port(host_priv, node, id, portp);
-#endif
-
host_lock(host_priv);
port = realhost.special[id];
*portp = ipc_port_copy_send(port);