]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/kern_clock.c
xnu-792.6.22.tar.gz
[apple/xnu.git] / bsd / kern / kern_clock.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
e5568f75
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
e5568f75
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
e5568f75
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
23/*-
24 * Copyright (c) 1982, 1986, 1991, 1993
25 * The Regents of the University of California. All rights reserved.
26 * (c) UNIX System Laboratories, Inc.
27 * All or some portions of this file are derived from material licensed
28 * to the University of California by American Telephone and Telegraph
29 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
30 * the permission of UNIX System Laboratories, Inc.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
61 */
62/*
63 * HISTORY
64 */
65
66#include <machine/spl.h>
67
68#include <sys/param.h>
69#include <sys/systm.h>
70#include <sys/time.h>
1c79356b
A
71#include <sys/resourcevar.h>
72#include <sys/kernel.h>
73#include <sys/resource.h>
91447636 74#include <sys/proc_internal.h>
1c79356b 75#include <sys/vm.h>
91447636 76#include <sys/sysctl.h>
1c79356b
A
77
78#ifdef GPROF
79#include <sys/gmon.h>
80#endif
81
82#include <kern/thread.h>
83#include <kern/ast.h>
84#include <kern/assert.h>
85#include <mach/boolean.h>
86
87#include <kern/thread_call.h>
88
91447636
A
89void bsd_uprofil(struct time_value *syst, user_addr_t pc);
90void get_procrustime(time_value_t *tv);
91int sysctl_clockrate(user_addr_t where, size_t *sizep);
92int tvtohz(struct timeval *tv);
93extern void psignal_sigprof(struct proc *);
94extern void psignal_vtalarm(struct proc *);
95extern void psignal_xcpu(struct proc *);
96
1c79356b
A
97/*
98 * Clock handling routines.
99 *
100 * This code is written to operate with two timers which run
101 * independently of each other. The main clock, running at hz
102 * times per second, is used to do scheduling and timeout calculations.
103 * The second timer does resource utilization estimation statistically
104 * based on the state of the machine phz times a second. Both functions
105 * can be performed by a single clock (ie hz == phz), however the
106 * statistics will be much more prone to errors. Ideally a machine
107 * would have separate clocks measuring time spent in user state, system
108 * state, interrupt state, and idle state. These clocks would allow a non-
109 * approximate measure of resource utilization.
110 */
111
112/*
113 * The hz hardware interval timer.
114 * We update the events relating to real time.
115 * If this timer is also being used to gather statistics,
116 * we run through the statistics gathering routine as well.
117 */
118
91447636
A
119int hz = 100; /* GET RID OF THIS !!! */
120int tick = (1000000 / 100); /* GET RID OF THIS !!! */
121
1c79356b
A
122int bsd_hardclockinit = 0;
123/*ARGSUSED*/
124void
91447636
A
125bsd_hardclock(
126 boolean_t usermode,
127#ifdef GPROF
128 caddr_t pc,
129#else
130 __unused caddr_t pc,
131#endif
132 int numticks
133 )
1c79356b
A
134{
135 register struct proc *p;
1c79356b
A
136 register thread_t thread;
137 int nusecs = numticks * tick;
55e303ae 138 struct timeval tv;
1c79356b
A
139
140 if (!bsd_hardclockinit)
141 return;
142
9bccf70c
A
143 if (bsd_hardclockinit < 0) {
144 return;
145 }
146
91447636 147 thread = current_thread();
1c79356b
A
148 /*
149 * Charge the time out based on the mode the cpu is in.
150 * Here again we fudge for the lack of proper interval timers
151 * assuming that the current state has been around at least
152 * one tick.
153 */
0b4e3aa0 154 p = (struct proc *)current_proc();
91447636 155 if (p && ((p->p_flag & P_WEXIT) == 0)) {
9bccf70c 156 if (usermode) {
1c79356b
A
157 if (p->p_stats && p->p_stats->p_prof.pr_scale) {
158 p->p_flag |= P_OWEUPC;
9bccf70c
A
159 astbsd_on();
160 }
161
162 /*
163 * CPU was in user state. Increment
164 * user time counter, and process process-virtual time
165 * interval timer.
166 */
167 if (p->p_stats &&
168 timerisset(&p->p_stats->p_timer[ITIMER_VIRTUAL].it_value) &&
169 !itimerdecr(&p->p_stats->p_timer[ITIMER_VIRTUAL], nusecs)) {
9bccf70c
A
170
171 /* does psignal(p, SIGVTALRM) in a thread context */
55e303ae 172 thread_call_func((thread_call_func_t)psignal_vtalarm, p, FALSE);
1c79356b
A
173 }
174 }
175
176 /*
9bccf70c
A
177 * If the cpu is currently scheduled to a process, then
178 * charge it with resource utilization for a tick, updating
179 * statistics which run in (user+system) virtual time,
180 * such as the cpu time limit and profiling timers.
181 * This assumes that the current process has been running
182 * the entire last tick.
1c79356b 183 */
9bccf70c
A
184 if (!is_thread_idle(thread)) {
185 if (p->p_limit &&
186 p->p_limit->pl_rlimit[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
187 time_value_t sys_time, user_time;
1c79356b 188
9bccf70c
A
189 thread_read_times(thread, &user_time, &sys_time);
190 if ((sys_time.seconds + user_time.seconds + 1) >
191 p->p_limit->pl_rlimit[RLIMIT_CPU].rlim_cur) {
1c79356b 192
9bccf70c 193 /* does psignal(p, SIGXCPU) in a thread context */
55e303ae 194 thread_call_func((thread_call_func_t)psignal_xcpu, p, FALSE);
1c79356b 195
9bccf70c
A
196 if (p->p_limit->pl_rlimit[RLIMIT_CPU].rlim_cur <
197 p->p_limit->pl_rlimit[RLIMIT_CPU].rlim_max)
198 p->p_limit->pl_rlimit[RLIMIT_CPU].rlim_cur += 5;
199 }
1c79356b 200 }
9bccf70c
A
201 if (timerisset(&p->p_stats->p_timer[ITIMER_PROF].it_value) &&
202 !itimerdecr(&p->p_stats->p_timer[ITIMER_PROF], nusecs)) {
1c79356b 203
9bccf70c 204 /* does psignal(p, SIGPROF) in a thread context */
55e303ae 205 thread_call_func((thread_call_func_t)psignal_sigprof, p, FALSE);
9bccf70c
A
206 }
207 }
1c79356b
A
208 }
209
9bccf70c 210#ifdef GPROF
1c79356b 211 /*
9bccf70c 212 * Gather some statistics.
1c79356b
A
213 */
214 gatherstats(usermode, pc);
9bccf70c 215#endif
1c79356b
A
216}
217
218/*
9bccf70c 219 * Gather some statistics.
1c79356b
A
220 */
221/*ARGSUSED*/
222void
9bccf70c 223gatherstats(
91447636
A
224#ifdef GPROF
225 boolean_t usermode,
226 caddr_t pc
227#else
228 __unused boolean_t usermode,
229 __unused caddr_t pc
230#endif
231 )
232
1c79356b 233{
1c79356b 234#ifdef GPROF
9bccf70c
A
235 if (!usermode) {
236 struct gmonparam *p = &_gmonparam;
1c79356b 237
1c79356b 238 if (p->state == GMON_PROF_ON) {
9bccf70c
A
239 register int s;
240
1c79356b
A
241 s = pc - p->lowpc;
242 if (s < p->textsize) {
243 s /= (HISTFRACTION * sizeof(*p->kcount));
244 p->kcount[s]++;
245 }
246 }
1c79356b 247 }
9bccf70c 248#endif
1c79356b
A
249}
250
251
252/*
253 * Kernel timeout services.
254 */
255
256/*
257 * Set a timeout.
258 *
259 * fcn: function to call
260 * param: parameter to pass to function
261 * interval: timeout interval, in hz.
262 */
263void
264timeout(
265 timeout_fcn_t fcn,
266 void *param,
267 int interval)
268{
0b4e3aa0 269 uint64_t deadline;
1c79356b
A
270
271 clock_interval_to_deadline(interval, NSEC_PER_SEC / hz, &deadline);
272 thread_call_func_delayed((thread_call_func_t)fcn, param, deadline);
273}
274
275/*
276 * Cancel a timeout.
277 */
278void
279untimeout(
280 register timeout_fcn_t fcn,
281 register void *param)
282{
283 thread_call_func_cancel((thread_call_func_t)fcn, param, FALSE);
284}
285
286
91447636
A
287/*
288 * Set a timeout.
289 *
290 * fcn: function to call
291 * param: parameter to pass to function
292 * ts: timeout interval, in timespec
293 */
294void
295bsd_timeout(
296 timeout_fcn_t fcn,
297 void *param,
298 struct timespec *ts)
299{
300 uint64_t deadline = 0;
301
302 if (ts && (ts->tv_sec || ts->tv_nsec)) {
303 nanoseconds_to_absolutetime((uint64_t)ts->tv_sec * NSEC_PER_SEC + ts->tv_nsec, &deadline );
304 clock_absolutetime_interval_to_deadline( deadline, &deadline );
305 }
306 thread_call_func_delayed((thread_call_func_t)fcn, param, deadline);
307}
308
309/*
310 * Cancel a timeout.
311 */
312void
313bsd_untimeout(
314 register timeout_fcn_t fcn,
315 register void *param)
316{
317 thread_call_func_cancel((thread_call_func_t)fcn, param, FALSE);
318}
319
1c79356b
A
320
321/*
322 * Compute number of hz until specified time.
323 * Used to compute third argument to timeout() from an
324 * absolute time.
325 */
91447636 326int
1c79356b
A
327hzto(tv)
328 struct timeval *tv;
329{
9bccf70c 330 struct timeval now;
1c79356b
A
331 register long ticks;
332 register long sec;
9bccf70c
A
333
334 microtime(&now);
1c79356b
A
335 /*
336 * If number of milliseconds will fit in 32 bit arithmetic,
337 * then compute number of milliseconds to time and scale to
338 * ticks. Otherwise just compute number of hz in time, rounding
339 * times greater than representible to maximum value.
340 *
341 * Delta times less than 25 days can be computed ``exactly''.
342 * Maximum value for any timeout in 10ms ticks is 250 days.
343 */
9bccf70c 344 sec = tv->tv_sec - now.tv_sec;
1c79356b 345 if (sec <= 0x7fffffff / 1000 - 1000)
9bccf70c
A
346 ticks = ((tv->tv_sec - now.tv_sec) * 1000 +
347 (tv->tv_usec - now.tv_usec) / 1000)
1c79356b
A
348 / (tick / 1000);
349 else if (sec <= 0x7fffffff / hz)
350 ticks = sec * hz;
351 else
352 ticks = 0x7fffffff;
1c79356b 353
9bccf70c 354 return (ticks);
1c79356b 355}
1c79356b
A
356
357/*
358 * Return information about system clocks.
359 */
360int
91447636 361sysctl_clockrate(user_addr_t where, size_t *sizep)
1c79356b
A
362{
363 struct clockinfo clkinfo;
364
365 /*
366 * Construct clockinfo structure.
367 */
368 clkinfo.hz = hz;
369 clkinfo.tick = tick;
370 clkinfo.profhz = hz;
371 clkinfo.stathz = hz;
91447636 372 return sysctl_rdstruct(where, sizep, USER_ADDR_NULL, &clkinfo, sizeof(clkinfo));
1c79356b
A
373}
374
375
376/*
377 * Compute number of ticks in the specified amount of time.
378 */
379int
91447636 380tvtohz(struct timeval *tv)
1c79356b
A
381{
382 register unsigned long ticks;
383 register long sec, usec;
384
385 /*
386 * If the number of usecs in the whole seconds part of the time
387 * difference fits in a long, then the total number of usecs will
388 * fit in an unsigned long. Compute the total and convert it to
389 * ticks, rounding up and adding 1 to allow for the current tick
390 * to expire. Rounding also depends on unsigned long arithmetic
391 * to avoid overflow.
392 *
393 * Otherwise, if the number of ticks in the whole seconds part of
394 * the time difference fits in a long, then convert the parts to
395 * ticks separately and add, using similar rounding methods and
396 * overflow avoidance. This method would work in the previous
397 * case but it is slightly slower and assumes that hz is integral.
398 *
399 * Otherwise, round the time difference down to the maximum
400 * representable value.
401 *
402 * If ints have 32 bits, then the maximum value for any timeout in
403 * 10ms ticks is 248 days.
404 */
405 sec = tv->tv_sec;
406 usec = tv->tv_usec;
407 if (usec < 0) {
408 sec--;
409 usec += 1000000;
410 }
411 if (sec < 0) {
412#ifdef DIAGNOSTIC
413 if (usec > 0) {
414 sec++;
415 usec -= 1000000;
416 }
417 printf("tvotohz: negative time difference %ld sec %ld usec\n",
418 sec, usec);
419#endif
420 ticks = 1;
421 } else if (sec <= LONG_MAX / 1000000)
422 ticks = (sec * 1000000 + (unsigned long)usec + (tick - 1))
423 / tick + 1;
424 else if (sec <= LONG_MAX / hz)
425 ticks = sec * hz
426 + ((unsigned long)usec + (tick - 1)) / tick + 1;
427 else
428 ticks = LONG_MAX;
429 if (ticks > INT_MAX)
430 ticks = INT_MAX;
431 return ((int)ticks);
432}
433
434
435/*
436 * Start profiling on a process.
437 *
438 * Kernel profiling passes kernel_proc which never exits and hence
439 * keeps the profile clock running constantly.
440 */
441void
442startprofclock(p)
443 register struct proc *p;
444{
445 if ((p->p_flag & P_PROFIL) == 0)
446 p->p_flag |= P_PROFIL;
447}
448
449/*
450 * Stop profiling on a process.
451 */
452void
453stopprofclock(p)
454 register struct proc *p;
455{
456 if (p->p_flag & P_PROFIL)
457 p->p_flag &= ~P_PROFIL;
458}
459
460void
91447636 461bsd_uprofil(struct time_value *syst, user_addr_t pc)
1c79356b
A
462{
463struct proc *p = current_proc();
464int ticks;
465struct timeval *tv;
466struct timeval st;
467
468 if (p == NULL)
469 return;
470 if ( !(p->p_flag & P_PROFIL))
471 return;
472
473 st.tv_sec = syst->seconds;
474 st.tv_usec = syst->microseconds;
475
476 tv = &(p->p_stats->p_ru.ru_stime);
477
478 ticks = ((tv->tv_sec - st.tv_sec) * 1000 +
479 (tv->tv_usec - st.tv_usec) / 1000) /
480 (tick / 1000);
481 if (ticks)
482 addupc_task(p, pc, ticks);
483}
484
485void
486get_procrustime(time_value_t *tv)
487{
488 struct proc *p = current_proc();
489 struct timeval st;
490
491 if (p == NULL)
492 return;
493 if ( !(p->p_flag & P_PROFIL))
494 return;
495
496 st = p->p_stats->p_ru.ru_stime;
497
498 tv->seconds = st.tv_sec;
499 tv->microseconds = st.tv_usec;
500}