]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/kern/host.c
xnu-1699.32.7.tar.gz
[apple/xnu.git] / osfmk / kern / host.c
index 0f8de2841a9d3138f600d756e8f55a01723a6cfa..90542a946691c06abbc3f5a2433e0211167a5a9a 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
@@ -90,6 +90,8 @@
 
 host_data_t    realhost;
 
+vm_extmod_statistics_data_t host_extmod_statistics;
+
 kern_return_t
 host_processors(
        host_priv_t                             host_priv,
@@ -153,7 +155,7 @@ host_info(
        case HOST_BASIC_INFO:
        {
                register host_basic_info_t      basic_info;
-               register int                            master_num;
+               register int                            master_id;
 
                /*
                 *      Basic information about this host.
@@ -166,12 +168,12 @@ host_info(
                basic_info->memory_size = machine_info.memory_size;
                basic_info->max_cpus = machine_info.max_cpus;
                basic_info->avail_cpus = processor_avail_count;
-               master_num = master_processor->cpu_num;
-               basic_info->cpu_type = slot_type(master_num);
-               basic_info->cpu_subtype = slot_subtype(master_num);
+               master_id = master_processor->cpu_id;
+               basic_info->cpu_type = slot_type(master_id);
+               basic_info->cpu_subtype = slot_subtype(master_id);
 
                if (*count >= HOST_BASIC_INFO_COUNT) {
-                       basic_info->cpu_threadtype = slot_threadtype(master_num);
+                       basic_info->cpu_threadtype = slot_threadtype(master_id);
                        basic_info->physical_cpu = machine_info.physical_cpu;
                        basic_info->physical_cpu_max = machine_info.physical_cpu_max;
                        basic_info->logical_cpu = machine_info.logical_cpu;
@@ -189,6 +191,8 @@ host_info(
        case HOST_SCHED_INFO:
        {
                register host_sched_info_t      sched_info;
+               uint32_t quantum_time;
+               uint64_t quantum_ns;
 
                /*
                 *      Return scheduler information.
@@ -198,8 +202,11 @@ host_info(
 
                sched_info = (host_sched_info_t) info;
 
+               quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
+               absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
+
                sched_info->min_timeout = 
-                       sched_info->min_quantum = std_quantum_us / 1000;
+                       sched_info->min_quantum = (uint32_t)(quantum_ns / 1000 / 1000);
 
                *count = HOST_SCHED_INFO_COUNT;
 
@@ -263,6 +270,7 @@ host_statistics(
        host_info_t                             info,
        mach_msg_type_number_t  *count)
 {
+       uint32_t        i;
 
        if (host == HOST_NULL)
                return (KERN_INVALID_HOST);
@@ -290,8 +298,9 @@ host_statistics(
        case HOST_VM_INFO:
        {
                register processor_t            processor;
-               register vm_statistics_t        stat;
-               vm_statistics_data_t            host_vm_stat;
+               register vm_statistics64_t      stat;
+               vm_statistics64_data_t          host_vm_stat;
+               vm_statistics_t                 stat32;
                mach_msg_type_number_t          original_count;
                 
                if (*count < HOST_VM_INFO_REV0_COUNT)
@@ -320,20 +329,34 @@ host_statistics(
                        simple_unlock(&processor_list_lock);
                }
 
-               stat = (vm_statistics_t) info;
-
-               stat->free_count = vm_page_free_count + vm_page_speculative_count;
-               stat->active_count = vm_page_active_count;
-               stat->inactive_count = vm_page_inactive_count;
-               stat->wire_count = vm_page_wire_count;
-               stat->zero_fill_count = host_vm_stat.zero_fill_count;
-               stat->reactivations = host_vm_stat.reactivations;
-               stat->pageins = host_vm_stat.pageins;
-               stat->pageouts = host_vm_stat.pageouts;
-               stat->faults = host_vm_stat.faults;
-               stat->cow_faults = host_vm_stat.cow_faults;
-               stat->lookups = host_vm_stat.lookups;
-               stat->hits = host_vm_stat.hits;
+               stat32 = (vm_statistics_t) info;
+
+               stat32->free_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_free_count + vm_page_speculative_count);
+               stat32->active_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_active_count);
+               
+               if (vm_page_local_q) {
+                       for (i = 0; i < vm_page_local_q_count; i++) {
+                               struct vpl      *lq;
+
+                               lq = &vm_page_local_q[i].vpl_un.vpl;
+
+                               stat32->active_count += VM_STATISTICS_TRUNCATE_TO_32_BIT(lq->vpl_count);
+                       }
+               }
+               stat32->inactive_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_inactive_count);
+#if CONFIG_EMBEDDED
+               stat32->wire_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count);
+#else
+               stat32->wire_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count + vm_page_throttled_count + vm_lopage_free_count);
+#endif
+               stat32->zero_fill_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.zero_fill_count);
+               stat32->reactivations = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.reactivations);
+               stat32->pageins = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.pageins);
+               stat32->pageouts = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.pageouts);
+               stat32->faults = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.faults);
+               stat32->cow_faults = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.cow_faults);
+               stat32->lookups = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.lookups);
+               stat32->hits = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.hits);
 
                /*
                 * Fill in extra info added in later revisions of the
@@ -344,16 +367,19 @@ host_statistics(
                *count = HOST_VM_INFO_REV0_COUNT; /* rev0 already filled in */
                if (original_count >= HOST_VM_INFO_REV1_COUNT) {
                        /* rev1 added "purgeable" info */
-                       stat->purgeable_count = vm_page_purgeable_count;
-                       stat->purges = vm_page_purged_count;
+                       stat32->purgeable_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purgeable_count);
+                       stat32->purges = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purged_count);
                        *count = HOST_VM_INFO_REV1_COUNT;
                }
+
                if (original_count >= HOST_VM_INFO_REV2_COUNT) {
                        /* rev2 added "speculative" info */
-                       stat->speculative_count = vm_page_speculative_count;
+                       stat32->speculative_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_speculative_count);
                        *count = HOST_VM_INFO_REV2_COUNT;
                }
 
+               /* rev3 changed some of the fields to be 64-bit*/
+
                return (KERN_SUCCESS);
        }
                 
@@ -365,10 +391,11 @@ host_statistics(
                if (*count < HOST_CPU_LOAD_INFO_COUNT)
                        return (KERN_FAILURE);
 
-#define GET_TICKS_VALUE(processor, state, timer)                                                                               \
-MACRO_BEGIN                                                                                                                                                            \
-       cpu_load_info->cpu_ticks[(state)] +=                                                                                            \
-                               timer_grab(&PROCESSOR_DATA(processor, timer)) / hz_tick_interval;               \
+#define GET_TICKS_VALUE(processor, state, timer)                        \
+MACRO_BEGIN                                                             \
+       cpu_load_info->cpu_ticks[(state)] +=                             \
+               (uint32_t)(timer_grab(&PROCESSOR_DATA(processor, timer)) \
+                               / hz_tick_interval);                     \
 MACRO_END
 
                cpu_load_info = (host_cpu_load_info_t)info;
@@ -377,23 +404,29 @@ MACRO_END
                cpu_load_info->cpu_ticks[CPU_STATE_IDLE] = 0;
                cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0;
 
-               processor = processor_list;
-               GET_TICKS_VALUE(processor, CPU_STATE_USER, user_state);
-               GET_TICKS_VALUE(processor, CPU_STATE_SYSTEM, system_state);
-               GET_TICKS_VALUE(processor, CPU_STATE_IDLE, idle_state);
+               simple_lock(&processor_list_lock);
 
-               if (processor_count > 1) {
-                       simple_lock(&processor_list_lock);
+               for (processor = processor_list; processor != NULL; processor = processor->processor_list) {
+                       timer_data_t    idle_temp;
+                       timer_t         idle_state;
 
-                       while ((processor = processor->processor_list) != NULL) {
-                               GET_TICKS_VALUE(processor, CPU_STATE_USER, user_state);
-                               GET_TICKS_VALUE(processor, CPU_STATE_SYSTEM, system_state);
+                       GET_TICKS_VALUE(processor, CPU_STATE_USER, user_state);
+                       GET_TICKS_VALUE(processor, CPU_STATE_SYSTEM, system_state);
+
+                       idle_state = &PROCESSOR_DATA(processor, idle_state);
+                       idle_temp = *idle_state;
+
+                       if (PROCESSOR_DATA(processor, current_state) != idle_state ||
+                           timer_grab(&idle_temp) != timer_grab(idle_state))
                                GET_TICKS_VALUE(processor, CPU_STATE_IDLE, idle_state);
-                       }
+                       else {
+                               timer_advance(&idle_temp, mach_absolute_time() - idle_temp.tstamp);
 
-                       simple_unlock(&processor_list_lock);
+                               cpu_load_info->cpu_ticks[CPU_STATE_IDLE] +=
+                                       (uint32_t)(timer_grab(&idle_temp) / hz_tick_interval);
+                       }
                }
-
+               simple_unlock(&processor_list_lock);
                *count = HOST_CPU_LOAD_INFO_COUNT;
 
                return (KERN_SUCCESS);
@@ -404,6 +437,115 @@ MACRO_END
        }
 }
 
+
+kern_return_t
+host_statistics64(
+       host_t                          host,
+       host_flavor_t                   flavor,
+       host_info64_t                   info,
+       mach_msg_type_number_t          *count)
+{
+       uint32_t        i;
+       
+       if (host == HOST_NULL)
+               return (KERN_INVALID_HOST);
+       
+       switch(flavor) {
+
+               case HOST_VM_INFO64: /* We were asked to get vm_statistics64 */
+               {
+                       register processor_t            processor;
+                       register vm_statistics64_t      stat;
+                       vm_statistics64_data_t          host_vm_stat;
+
+                       if (*count < HOST_VM_INFO64_COUNT)
+                               return (KERN_FAILURE);
+
+                       processor = processor_list;
+                       stat = &PROCESSOR_DATA(processor, vm_stat);
+                       host_vm_stat = *stat;
+
+                       if (processor_count > 1) {
+                               simple_lock(&processor_list_lock);
+
+                               while ((processor = processor->processor_list) != NULL) {
+                                       stat = &PROCESSOR_DATA(processor, vm_stat);
+
+                                       host_vm_stat.zero_fill_count += stat->zero_fill_count;
+                                       host_vm_stat.reactivations += stat->reactivations;
+                                       host_vm_stat.pageins += stat->pageins;
+                                       host_vm_stat.pageouts += stat->pageouts;
+                                       host_vm_stat.faults += stat->faults;
+                                       host_vm_stat.cow_faults += stat->cow_faults;
+                                       host_vm_stat.lookups += stat->lookups;
+                                       host_vm_stat.hits += stat->hits;
+                               }
+
+                               simple_unlock(&processor_list_lock);
+                       }
+
+                       stat = (vm_statistics64_t) info;
+
+                       stat->free_count = vm_page_free_count + vm_page_speculative_count;
+                       stat->active_count = vm_page_active_count;
+
+                       if (vm_page_local_q) {
+                               for (i = 0; i < vm_page_local_q_count; i++) {
+                                       struct vpl      *lq;
+                               
+                                       lq = &vm_page_local_q[i].vpl_un.vpl;
+
+                                       stat->active_count += lq->vpl_count;
+                               }
+                       }
+                       stat->inactive_count = vm_page_inactive_count;
+#if CONFIG_EMBEDDED
+                       stat->wire_count = vm_page_wire_count;
+#else
+                       stat->wire_count = vm_page_wire_count + vm_page_throttled_count + vm_lopage_free_count;
+#endif
+                       stat->zero_fill_count = host_vm_stat.zero_fill_count;
+                       stat->reactivations = host_vm_stat.reactivations;
+                       stat->pageins = host_vm_stat.pageins;
+                       stat->pageouts = host_vm_stat.pageouts;
+                       stat->faults = host_vm_stat.faults;
+                       stat->cow_faults = host_vm_stat.cow_faults;
+                       stat->lookups = host_vm_stat.lookups;
+                       stat->hits = host_vm_stat.hits;
+               
+                       /* rev1 added "purgable" info */
+                       stat->purgeable_count = vm_page_purgeable_count;
+                       stat->purges = vm_page_purged_count;
+               
+                       /* rev2 added "speculative" info */
+                       stat->speculative_count = vm_page_speculative_count;
+
+                       *count = HOST_VM_INFO64_COUNT;  
+
+                       return(KERN_SUCCESS);
+               }
+
+               case HOST_EXTMOD_INFO64: /* We were asked to get vm_statistics64 */
+               {
+                       vm_extmod_statistics_t          out_extmod_statistics;
+
+                       if (*count < HOST_EXTMOD_INFO64_COUNT)
+                               return (KERN_FAILURE);
+
+                       out_extmod_statistics = (vm_extmod_statistics_t) info;
+                       *out_extmod_statistics = host_extmod_statistics;
+
+                       *count = HOST_EXTMOD_INFO64_COUNT;      
+
+                       return(KERN_SUCCESS);
+               }
+
+               default: /* If we didn't recognize the flavor, send to host_statistics */
+                       return(host_statistics(host, flavor, (host_info_t) info, count)); 
+       }
+}
+
+
 /*
  * Get host statistics that require privilege.
  * None for now, just call the un-privileged version.
@@ -418,6 +560,74 @@ host_priv_statistics(
        return(host_statistics((host_t)host_priv, flavor, info, count));
 }
 
+kern_return_t
+set_sched_stats_active(
+               boolean_t active) 
+{
+       sched_stats_active = active;
+       return KERN_SUCCESS;
+}
+
+
+kern_return_t
+get_sched_statistics( 
+               struct _processor_statistics_np *out, 
+               uint32_t *count)
+{
+       processor_t processor;
+
+       if (!sched_stats_active) {
+               return KERN_FAILURE;
+       }
+
+       simple_lock(&processor_list_lock);
+       
+       if (*count < (processor_count + 2) * sizeof(struct _processor_statistics_np)) { /* One for RT, one for FS */
+               simple_unlock(&processor_list_lock);
+               return KERN_FAILURE;
+       }
+
+       processor = processor_list;
+       while (processor) {
+               struct processor_sched_statistics *stats = &processor->processor_data.sched_stats;
+
+               out->ps_cpuid                   = processor->cpu_id;
+               out->ps_csw_count               = stats->csw_count;
+               out->ps_preempt_count           = stats->preempt_count;
+               out->ps_preempted_rt_count      = stats->preempted_rt_count;
+               out->ps_preempted_by_rt_count   = stats->preempted_by_rt_count;
+               out->ps_rt_sched_count          = stats->rt_sched_count;
+               out->ps_interrupt_count         = stats->interrupt_count;
+               out->ps_ipi_count               = stats->ipi_count;
+               out->ps_timer_pop_count         = stats->timer_pop_count;
+               out->ps_runq_count_sum          = SCHED(processor_runq_stats_count_sum)(processor);
+               out->ps_idle_transitions        = stats->idle_transitions;
+               out->ps_quantum_timer_expirations       = stats->quantum_timer_expirations;
+
+               out++;
+               processor = processor->processor_list;
+       }
+
+       *count = (uint32_t) (processor_count * sizeof(struct _processor_statistics_np));
+
+       simple_unlock(&processor_list_lock);
+
+       /* And include RT Queue information */
+       bzero(out, sizeof(*out));
+       out->ps_cpuid = (-1);
+       out->ps_runq_count_sum = rt_runq.runq_stats.count_sum;
+       out++;
+       *count += (uint32_t)sizeof(struct _processor_statistics_np);
+
+       /* And include Fair Share Queue information at the end */
+       bzero(out, sizeof(*out));
+       out->ps_cpuid = (-2);
+       out->ps_runq_count_sum = SCHED(fairshare_runq_stats_count_sum)();
+       *count += (uint32_t)sizeof(struct _processor_statistics_np);
+       
+       return KERN_SUCCESS;
+}
+
 kern_return_t
 host_page_size(
        host_t          host,
@@ -656,7 +866,7 @@ host_get_special_port(
        ipc_port_t      port;
 
        if (host_priv == HOST_PRIV_NULL ||
-           id == HOST_SECURITY_PORT || id > HOST_MAX_SPECIAL_PORT )
+           id == HOST_SECURITY_PORT || id > HOST_MAX_SPECIAL_PORT || id < 0)
                return KERN_INVALID_ARGUMENT;
 
        host_lock(host_priv);