]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/kern_clock.c
xnu-344.tar.gz
[apple/xnu.git] / bsd / kern / kern_clock.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
23/*-
24 * Copyright (c) 1982, 1986, 1991, 1993
25 * The Regents of the University of California. All rights reserved.
26 * (c) UNIX System Laboratories, Inc.
27 * All or some portions of this file are derived from material licensed
28 * to the University of California by American Telephone and Telegraph
29 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
30 * the permission of UNIX System Laboratories, Inc.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
61 */
62/*
63 * HISTORY
64 */
65
66#include <machine/spl.h>
67
68#include <sys/param.h>
69#include <sys/systm.h>
70#include <sys/time.h>
1c79356b
A
71#include <sys/resourcevar.h>
72#include <sys/kernel.h>
73#include <sys/resource.h>
74#include <sys/proc.h>
75#include <sys/vm.h>
76
77#ifdef GPROF
78#include <sys/gmon.h>
79#endif
80
81#include <kern/thread.h>
82#include <kern/ast.h>
83#include <kern/assert.h>
84#include <mach/boolean.h>
85
86#include <kern/thread_call.h>
87
88/*
89 * Clock handling routines.
90 *
91 * This code is written to operate with two timers which run
92 * independently of each other. The main clock, running at hz
93 * times per second, is used to do scheduling and timeout calculations.
94 * The second timer does resource utilization estimation statistically
95 * based on the state of the machine phz times a second. Both functions
96 * can be performed by a single clock (ie hz == phz), however the
97 * statistics will be much more prone to errors. Ideally a machine
98 * would have separate clocks measuring time spent in user state, system
99 * state, interrupt state, and idle state. These clocks would allow a non-
100 * approximate measure of resource utilization.
101 */
102
103/*
104 * The hz hardware interval timer.
105 * We update the events relating to real time.
106 * If this timer is also being used to gather statistics,
107 * we run through the statistics gathering routine as well.
108 */
109
110int bsd_hardclockinit = 0;
111/*ARGSUSED*/
112void
113bsd_hardclock(usermode, pc, numticks)
114 boolean_t usermode;
115 caddr_t pc;
116 int numticks;
117{
118 register struct proc *p;
1c79356b
A
119 register thread_t thread;
120 int nusecs = numticks * tick;
121
122 if (!bsd_hardclockinit)
123 return;
124
9bccf70c
A
125 /*
126 * Increment the time-of-day.
127 */
128 microtime(&time);
1c79356b 129
9bccf70c
A
130 if (bsd_hardclockinit < 0) {
131 return;
132 }
133
134 thread = current_thread();
1c79356b
A
135 /*
136 * Charge the time out based on the mode the cpu is in.
137 * Here again we fudge for the lack of proper interval timers
138 * assuming that the current state has been around at least
139 * one tick.
140 */
0b4e3aa0 141 p = (struct proc *)current_proc();
1c79356b 142 if (p && ((p->p_flag & P_WEXIT) == NULL)) {
9bccf70c 143 if (usermode) {
1c79356b
A
144 if (p->p_stats && p->p_stats->p_prof.pr_scale) {
145 p->p_flag |= P_OWEUPC;
9bccf70c
A
146 astbsd_on();
147 }
148
149 /*
150 * CPU was in user state. Increment
151 * user time counter, and process process-virtual time
152 * interval timer.
153 */
154 if (p->p_stats &&
155 timerisset(&p->p_stats->p_timer[ITIMER_VIRTUAL].it_value) &&
156 !itimerdecr(&p->p_stats->p_timer[ITIMER_VIRTUAL], nusecs)) {
157 extern void psignal_vtalarm(struct proc *);
158
159 /* does psignal(p, SIGVTALRM) in a thread context */
160 thread_call_func(psignal_vtalarm, p, FALSE);
1c79356b
A
161 }
162 }
163
164 /*
9bccf70c
A
165 * If the cpu is currently scheduled to a process, then
166 * charge it with resource utilization for a tick, updating
167 * statistics which run in (user+system) virtual time,
168 * such as the cpu time limit and profiling timers.
169 * This assumes that the current process has been running
170 * the entire last tick.
1c79356b 171 */
9bccf70c
A
172 if (!is_thread_idle(thread)) {
173 if (p->p_limit &&
174 p->p_limit->pl_rlimit[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
175 time_value_t sys_time, user_time;
1c79356b 176
9bccf70c
A
177 thread_read_times(thread, &user_time, &sys_time);
178 if ((sys_time.seconds + user_time.seconds + 1) >
179 p->p_limit->pl_rlimit[RLIMIT_CPU].rlim_cur) {
180 extern void psignal_xcpu(struct proc *);
1c79356b 181
9bccf70c
A
182 /* does psignal(p, SIGXCPU) in a thread context */
183 thread_call_func(psignal_xcpu, p, FALSE);
1c79356b 184
9bccf70c
A
185 if (p->p_limit->pl_rlimit[RLIMIT_CPU].rlim_cur <
186 p->p_limit->pl_rlimit[RLIMIT_CPU].rlim_max)
187 p->p_limit->pl_rlimit[RLIMIT_CPU].rlim_cur += 5;
188 }
1c79356b 189 }
9bccf70c
A
190 if (timerisset(&p->p_stats->p_timer[ITIMER_PROF].it_value) &&
191 !itimerdecr(&p->p_stats->p_timer[ITIMER_PROF], nusecs)) {
192 extern void psignal_sigprof(struct proc *);
1c79356b 193
9bccf70c
A
194 /* does psignal(p, SIGPROF) in a thread context */
195 thread_call_func(psignal_sigprof, p, FALSE);
196 }
197 }
1c79356b
A
198 }
199
9bccf70c 200#ifdef GPROF
1c79356b 201 /*
9bccf70c 202 * Gather some statistics.
1c79356b
A
203 */
204 gatherstats(usermode, pc);
9bccf70c 205#endif
1c79356b
A
206}
207
208/*
9bccf70c 209 * Gather some statistics.
1c79356b
A
210 */
211/*ARGSUSED*/
212void
9bccf70c
A
213gatherstats(
214 boolean_t usermode,
215 caddr_t pc)
1c79356b 216{
1c79356b 217#ifdef GPROF
9bccf70c
A
218 if (!usermode) {
219 struct gmonparam *p = &_gmonparam;
1c79356b 220
1c79356b 221 if (p->state == GMON_PROF_ON) {
9bccf70c
A
222 register int s;
223
1c79356b
A
224 s = pc - p->lowpc;
225 if (s < p->textsize) {
226 s /= (HISTFRACTION * sizeof(*p->kcount));
227 p->kcount[s]++;
228 }
229 }
1c79356b 230 }
9bccf70c 231#endif
1c79356b
A
232}
233
234
235/*
236 * Kernel timeout services.
237 */
238
239/*
240 * Set a timeout.
241 *
242 * fcn: function to call
243 * param: parameter to pass to function
244 * interval: timeout interval, in hz.
245 */
246void
247timeout(
248 timeout_fcn_t fcn,
249 void *param,
250 int interval)
251{
0b4e3aa0 252 uint64_t deadline;
1c79356b
A
253
254 clock_interval_to_deadline(interval, NSEC_PER_SEC / hz, &deadline);
255 thread_call_func_delayed((thread_call_func_t)fcn, param, deadline);
256}
257
258/*
259 * Cancel a timeout.
260 */
261void
262untimeout(
263 register timeout_fcn_t fcn,
264 register void *param)
265{
266 thread_call_func_cancel((thread_call_func_t)fcn, param, FALSE);
267}
268
269
270
271/*
272 * Compute number of hz until specified time.
273 * Used to compute third argument to timeout() from an
274 * absolute time.
275 */
276hzto(tv)
277 struct timeval *tv;
278{
9bccf70c 279 struct timeval now;
1c79356b
A
280 register long ticks;
281 register long sec;
9bccf70c
A
282
283 microtime(&now);
1c79356b
A
284 /*
285 * If number of milliseconds will fit in 32 bit arithmetic,
286 * then compute number of milliseconds to time and scale to
287 * ticks. Otherwise just compute number of hz in time, rounding
288 * times greater than representible to maximum value.
289 *
290 * Delta times less than 25 days can be computed ``exactly''.
291 * Maximum value for any timeout in 10ms ticks is 250 days.
292 */
9bccf70c 293 sec = tv->tv_sec - now.tv_sec;
1c79356b 294 if (sec <= 0x7fffffff / 1000 - 1000)
9bccf70c
A
295 ticks = ((tv->tv_sec - now.tv_sec) * 1000 +
296 (tv->tv_usec - now.tv_usec) / 1000)
1c79356b
A
297 / (tick / 1000);
298 else if (sec <= 0x7fffffff / hz)
299 ticks = sec * hz;
300 else
301 ticks = 0x7fffffff;
1c79356b 302
9bccf70c 303 return (ticks);
1c79356b 304}
1c79356b
A
305
306/*
307 * Return information about system clocks.
308 */
309int
310sysctl_clockrate(where, sizep)
311 register char *where;
312 size_t *sizep;
313{
314 struct clockinfo clkinfo;
315
316 /*
317 * Construct clockinfo structure.
318 */
319 clkinfo.hz = hz;
320 clkinfo.tick = tick;
321 clkinfo.profhz = hz;
322 clkinfo.stathz = hz;
323 return sysctl_rdstruct(where, sizep, NULL, &clkinfo, sizeof(clkinfo));
324}
325
326
327/*
328 * Compute number of ticks in the specified amount of time.
329 */
330int
331tvtohz(tv)
332 struct timeval *tv;
333{
334 register unsigned long ticks;
335 register long sec, usec;
336
337 /*
338 * If the number of usecs in the whole seconds part of the time
339 * difference fits in a long, then the total number of usecs will
340 * fit in an unsigned long. Compute the total and convert it to
341 * ticks, rounding up and adding 1 to allow for the current tick
342 * to expire. Rounding also depends on unsigned long arithmetic
343 * to avoid overflow.
344 *
345 * Otherwise, if the number of ticks in the whole seconds part of
346 * the time difference fits in a long, then convert the parts to
347 * ticks separately and add, using similar rounding methods and
348 * overflow avoidance. This method would work in the previous
349 * case but it is slightly slower and assumes that hz is integral.
350 *
351 * Otherwise, round the time difference down to the maximum
352 * representable value.
353 *
354 * If ints have 32 bits, then the maximum value for any timeout in
355 * 10ms ticks is 248 days.
356 */
357 sec = tv->tv_sec;
358 usec = tv->tv_usec;
359 if (usec < 0) {
360 sec--;
361 usec += 1000000;
362 }
363 if (sec < 0) {
364#ifdef DIAGNOSTIC
365 if (usec > 0) {
366 sec++;
367 usec -= 1000000;
368 }
369 printf("tvotohz: negative time difference %ld sec %ld usec\n",
370 sec, usec);
371#endif
372 ticks = 1;
373 } else if (sec <= LONG_MAX / 1000000)
374 ticks = (sec * 1000000 + (unsigned long)usec + (tick - 1))
375 / tick + 1;
376 else if (sec <= LONG_MAX / hz)
377 ticks = sec * hz
378 + ((unsigned long)usec + (tick - 1)) / tick + 1;
379 else
380 ticks = LONG_MAX;
381 if (ticks > INT_MAX)
382 ticks = INT_MAX;
383 return ((int)ticks);
384}
385
386
387/*
388 * Start profiling on a process.
389 *
390 * Kernel profiling passes kernel_proc which never exits and hence
391 * keeps the profile clock running constantly.
392 */
393void
394startprofclock(p)
395 register struct proc *p;
396{
397 if ((p->p_flag & P_PROFIL) == 0)
398 p->p_flag |= P_PROFIL;
399}
400
401/*
402 * Stop profiling on a process.
403 */
404void
405stopprofclock(p)
406 register struct proc *p;
407{
408 if (p->p_flag & P_PROFIL)
409 p->p_flag &= ~P_PROFIL;
410}
411
412void
413bsd_uprofil(struct time_value *syst, unsigned int pc)
414{
415struct proc *p = current_proc();
416int ticks;
417struct timeval *tv;
418struct timeval st;
419
420 if (p == NULL)
421 return;
422 if ( !(p->p_flag & P_PROFIL))
423 return;
424
425 st.tv_sec = syst->seconds;
426 st.tv_usec = syst->microseconds;
427
428 tv = &(p->p_stats->p_ru.ru_stime);
429
430 ticks = ((tv->tv_sec - st.tv_sec) * 1000 +
431 (tv->tv_usec - st.tv_usec) / 1000) /
432 (tick / 1000);
433 if (ticks)
434 addupc_task(p, pc, ticks);
435}
436
437void
438get_procrustime(time_value_t *tv)
439{
440 struct proc *p = current_proc();
441 struct timeval st;
442
443 if (p == NULL)
444 return;
445 if ( !(p->p_flag & P_PROFIL))
446 return;
447
448 st = p->p_stats->p_ru.ru_stime;
449
450 tv->seconds = st.tv_sec;
451 tv->microseconds = st.tv_usec;
452}