]> git.saurik.com Git - apple/xnu.git/blobdiff - bsd/kern/kern_clock.c
xnu-4903.221.2.tar.gz
[apple/xnu.git] / bsd / kern / kern_clock.c
index 34088069521715e04fd72ef3759bc946bb800e2e..08507cdc593e6c330f3b97ea0f95959cea5dbe97 100644 (file)
@@ -1,16 +1,19 @@
 /*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
  *
- * @APPLE_LICENSE_HEADER_START@
- * 
- * Copyright (c) 1999-2003 Apple Computer, Inc.  All Rights Reserved.
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
  * This file contains Original Code and/or Modifications of Original Code
  * as defined in and that are subject to the Apple Public Source License
  * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ * 
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
  * 
  * The Original Code and all software distributed under the License are
  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
@@ -20,7 +23,7 @@
  * Please see the License for the specific language governing rights and
  * limitations under the License.
  * 
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
 /*-
  * HISTORY
  */
 
-#include <machine/spl.h>
-
 #include <sys/param.h>
 #include <sys/systm.h>
 #include <sys/time.h>
 #include <sys/resourcevar.h>
 #include <sys/kernel.h>
 #include <sys/resource.h>
-#include <sys/proc.h>
+#include <sys/proc_internal.h>
 #include <sys/vm.h>
+#include <sys/sysctl.h>
 
 #ifdef GPROF
 #include <sys/gmon.h>
@@ -88,6 +90,9 @@
 
 #include <kern/thread_call.h>
 
+void bsd_uprofil(struct time_value *syst, user_addr_t pc);
+int tvtohz(struct timeval *tv);
+
 /*
  * Clock handling routines.
  *
 
 /*
  * The hz hardware interval timer.
- * We update the events relating to real time.
- * If this timer is also being used to gather statistics,
- * we run through the statistics gathering routine as well.
  */
 
-int bsd_hardclockinit = 0;
-/*ARGSUSED*/
-void
-bsd_hardclock(usermode, pc, numticks)
-       boolean_t usermode;
-       caddr_t pc;
-       int numticks;
-{
-       register struct proc *p;
-       register thread_t       thread;
-       int nusecs = numticks * tick;
-       struct timeval          tv;
-
-       if (!bsd_hardclockinit)
-               return;
-
-       /*
-        * Increment the time-of-day.
-        */
-       microtime(&tv);
-       time = tv;
-
-       if (bsd_hardclockinit < 0) {
-           return;
-       }
+int             hz = 100;                /* GET RID OF THIS !!! */
+int             tick = (1000000 / 100);  /* GET RID OF THIS !!! */
 
-       thread = current_act();
-       /*
-        * Charge the time out based on the mode the cpu is in.
-        * Here again we fudge for the lack of proper interval timers
-        * assuming that the current state has been around at least
-        * one tick.
-        */
-       p = (struct proc *)current_proc();
-       if (p && ((p->p_flag & P_WEXIT) == NULL)) {
-               if (usermode) {         
-                       if (p->p_stats && p->p_stats->p_prof.pr_scale) {
-                               p->p_flag |= P_OWEUPC;
-                               astbsd_on();
-                       }
-
-                       /*
-                        * CPU was in user state.  Increment
-                        * user time counter, and process process-virtual time
-                        * interval timer. 
-                        */
-                       if (p->p_stats && 
-                               timerisset(&p->p_stats->p_timer[ITIMER_VIRTUAL].it_value) &&
-                               !itimerdecr(&p->p_stats->p_timer[ITIMER_VIRTUAL], nusecs)) {
-                               extern void psignal_vtalarm(struct proc *);
-                        
-                               /* does psignal(p, SIGVTALRM) in a thread context */
-                               thread_call_func((thread_call_func_t)psignal_vtalarm, p, FALSE);
-                       }
-               }
+/*
+ * Kernel timeout services.
+ */
 
-               /*
-                * If the cpu is currently scheduled to a process, then
-                * charge it with resource utilization for a tick, updating
-                * statistics which run in (user+system) virtual time,
-                * such as the cpu time limit and profiling timers.
-                * This assumes that the current process has been running
-                * the entire last tick.
-                */
-               if (!is_thread_idle(thread)) {          
-                       if (p->p_limit &&
-                               p->p_limit->pl_rlimit[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
-                               time_value_t    sys_time, user_time;
-
-                               thread_read_times(thread, &user_time, &sys_time);
-                               if ((sys_time.seconds + user_time.seconds + 1) >
-                                       p->p_limit->pl_rlimit[RLIMIT_CPU].rlim_cur) {
-                                       extern void psignal_xcpu(struct proc *);
-                        
-                                       /* does psignal(p, SIGXCPU) in a thread context */
-                                       thread_call_func((thread_call_func_t)psignal_xcpu, p, FALSE);
-
-                                       if (p->p_limit->pl_rlimit[RLIMIT_CPU].rlim_cur <
-                                               p->p_limit->pl_rlimit[RLIMIT_CPU].rlim_max)
-                                               p->p_limit->pl_rlimit[RLIMIT_CPU].rlim_cur += 5;
-                               }
-                       }
-                       if (timerisset(&p->p_stats->p_timer[ITIMER_PROF].it_value) &&
-                               !itimerdecr(&p->p_stats->p_timer[ITIMER_PROF], nusecs)) {
-                               extern void psignal_sigprof(struct proc *);
-                        
-                               /* does psignal(p, SIGPROF) in a thread context */
-                               thread_call_func((thread_call_func_t)psignal_sigprof, p, FALSE);
-                       }
-               }
-       }
+/*
+ *     Set a timeout.
+ *
+ *     fcn:            function to call
+ *     param:          parameter to pass to function
+ *     interval:       timeout interval, in hz.
+ */
+void
+timeout(
+       timeout_fcn_t                   fcn,
+       void                                    *param,
+       int                                             interval)
+{
+       uint64_t                deadline;
 
-#ifdef GPROF
-       /*
-        * Gather some statistics.
-        */
-       gatherstats(usermode, pc);
-#endif
+       clock_interval_to_deadline(interval, NSEC_PER_SEC / hz, &deadline);
+       thread_call_func_delayed((thread_call_func_t)fcn, param, deadline);
 }
 
 /*
- * Gather some statistics.
+ *     Set a timeout with leeway.
+ *
+ *     fcn:            function to call
+ *     param:          parameter to pass to function
+ *     interval:       timeout interval, in hz.
+ *     leeway_interval:        leeway interval, in hz.
  */
-/*ARGSUSED*/
 void
-gatherstats(
-       boolean_t       usermode,
-       caddr_t         pc)
+timeout_with_leeway(
+       timeout_fcn_t                   fcn,
+       void                                    *param,
+       int                                             interval,
+       int                                             leeway_interval)
 {
-#ifdef GPROF
-       if (!usermode) {
-               struct gmonparam *p = &_gmonparam;
+       uint64_t                deadline;
+       uint64_t                leeway;
+
+       clock_interval_to_deadline(interval, NSEC_PER_SEC / hz, &deadline);
 
-               if (p->state == GMON_PROF_ON) {
-                       register int s;
+       clock_interval_to_absolutetime_interval(leeway_interval, NSEC_PER_SEC / hz, &leeway);
 
-                       s = pc - p->lowpc;
-                       if (s < p->textsize) {
-                               s /= (HISTFRACTION * sizeof(*p->kcount));
-                               p->kcount[s]++;
-                       }
-               }
-       }
-#endif
+       thread_call_func_delayed_with_leeway((thread_call_func_t)fcn, param, deadline, leeway, THREAD_CALL_DELAY_LEEWAY);
 }
 
-
 /*
- * Kernel timeout services.
+ * Cancel a timeout.
+ * Deprecated because it's very inefficient.
+ * Switch to an allocated thread call instead.
  */
+void
+untimeout(
+       timeout_fcn_t           fcn,
+       void                    *param)
+{
+       thread_call_func_cancel((thread_call_func_t)fcn, param, FALSE);
+}
+
 
 /*
  *     Set a timeout.
  *
  *     fcn:            function to call
  *     param:          parameter to pass to function
- *     interval:       timeout interval, in hz.
+ *     ts:             timeout interval, in timespec
  */
 void
-timeout(
+bsd_timeout(
        timeout_fcn_t                   fcn,
        void                                    *param,
-       int                                             interval)
+       struct timespec         *ts)
 {
-       uint64_t                deadline;
+       uint64_t                deadline = 0;
 
-       clock_interval_to_deadline(interval, NSEC_PER_SEC / hz, &deadline);
+       if (ts && (ts->tv_sec || ts->tv_nsec)) {
+               nanoseconds_to_absolutetime((uint64_t)ts->tv_sec * NSEC_PER_SEC + ts->tv_nsec,  &deadline );
+               clock_absolutetime_interval_to_deadline( deadline, &deadline );
+       }
        thread_call_func_delayed((thread_call_func_t)fcn, param, deadline);
 }
 
 /*
  * Cancel a timeout.
+ * Deprecated because it's very inefficient.
+ * Switch to an allocated thread call instead.
  */
 void
-untimeout(
-       register timeout_fcn_t          fcn,
-       register void                           *param)
+bsd_untimeout(
+       timeout_fcn_t           fcn,
+       void                    *param)
 {
        thread_call_func_cancel((thread_call_func_t)fcn, param, FALSE);
 }
 
 
-
 /*
  * Compute number of hz until specified time.
  * Used to compute third argument to timeout() from an
  * absolute time.
  */
-hzto(tv)
-       struct timeval *tv;
+int
+hzto(struct timeval *tv)
 {
        struct timeval now;
-       register long ticks;
-       register long sec;
+       long ticks;
+       long sec;
 
        microtime(&now);
        /*
@@ -311,33 +251,34 @@ hzto(tv)
 /*
  * Return information about system clocks.
  */
-int
-sysctl_clockrate(where, sizep)
-       register char *where;
-       size_t *sizep;
+static int
+sysctl_clockrate
+(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, __unused struct sysctl_req *req)
 {
-       struct clockinfo clkinfo;
-
-       /*
-        * Construct clockinfo structure.
-        */
-       clkinfo.hz = hz;
-       clkinfo.tick = tick;
-       clkinfo.profhz = hz;
-       clkinfo.stathz = hz;
-       return sysctl_rdstruct(where, sizep, NULL, &clkinfo, sizeof(clkinfo));
+       struct clockinfo clkinfo = {
+               .hz         = hz,
+               .tick       = tick,
+               .tickadj    = 0,
+               .stathz     = hz,
+               .profhz     = hz,
+       };
+
+       return sysctl_io_opaque(req, &clkinfo, sizeof(clkinfo), NULL);
 }
 
+SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate,
+               CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
+               0, 0, sysctl_clockrate, "S,clockinfo", "");
+
 
 /*
  * Compute number of ticks in the specified amount of time.
  */
 int
-tvtohz(tv)
-       struct timeval *tv;
+tvtohz(struct timeval *tv)
 {
-       register unsigned long ticks;
-       register long sec, usec;
+       unsigned long ticks;
+       long sec, usec;
 
        /*
         * If the number of usecs in the whole seconds part of the time
@@ -396,31 +337,30 @@ tvtohz(tv)
  * keeps the profile clock running constantly.
  */
 void
-startprofclock(p)
-       register struct proc *p;
+startprofclock(struct proc *p)
 {
        if ((p->p_flag & P_PROFIL) == 0)
-               p->p_flag |= P_PROFIL;
+               OSBitOrAtomic(P_PROFIL, &p->p_flag);
 }
 
 /*
  * Stop profiling on a process.
  */
 void
-stopprofclock(p)
-       register struct proc *p;
+stopprofclock(struct proc *p)
 {
        if (p->p_flag & P_PROFIL)
-               p->p_flag &= ~P_PROFIL;
+               OSBitAndAtomic(~((uint32_t)P_PROFIL), &p->p_flag);
 }
 
+/* TBD locking user profiling is not resolved yet */
 void
-bsd_uprofil(struct time_value *syst, unsigned int pc)
+bsd_uprofil(struct time_value *syst, user_addr_t pc)
 {
-struct proc *p = current_proc();
-int            ticks;
-struct timeval *tv;
-struct timeval st;
+       struct proc *p = current_proc();
+       int             ticks;
+       struct timeval  *tv;
+       struct timeval st;
 
        if (p == NULL)
                return;
@@ -439,6 +379,7 @@ struct timeval st;
                addupc_task(p, pc, ticks);
 }
 
+/* TBD locking user profiling is not resolved yet */
 void
 get_procrustime(time_value_t *tv)
 {
@@ -450,7 +391,9 @@ get_procrustime(time_value_t *tv)
        if ( !(p->p_flag & P_PROFIL))
                return;
 
+       //proc_lock(p);
        st = p->p_stats->p_ru.ru_stime;
+       //proc_unlock(p);
        
        tv->seconds = st.tv_sec;
        tv->microseconds = st.tv_usec;