]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_clock.c
xnu-517.11.1.tar.gz
[apple/xnu.git] / bsd / kern / kern_clock.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
23 /*-
24 * Copyright (c) 1982, 1986, 1991, 1993
25 * The Regents of the University of California. All rights reserved.
26 * (c) UNIX System Laboratories, Inc.
27 * All or some portions of this file are derived from material licensed
28 * to the University of California by American Telephone and Telegraph
29 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
30 * the permission of UNIX System Laboratories, Inc.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
61 */
62 /*
63 * HISTORY
64 */
65
66 #include <machine/spl.h>
67
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/time.h>
71 #include <sys/resourcevar.h>
72 #include <sys/kernel.h>
73 #include <sys/resource.h>
74 #include <sys/proc.h>
75 #include <sys/vm.h>
76
77 #ifdef GPROF
78 #include <sys/gmon.h>
79 #endif
80
81 #include <kern/thread.h>
82 #include <kern/ast.h>
83 #include <kern/assert.h>
84 #include <mach/boolean.h>
85
86 #include <kern/thread_call.h>
87
88 /*
89 * Clock handling routines.
90 *
91 * This code is written to operate with two timers which run
92 * independently of each other. The main clock, running at hz
93 * times per second, is used to do scheduling and timeout calculations.
94 * The second timer does resource utilization estimation statistically
95 * based on the state of the machine phz times a second. Both functions
96 * can be performed by a single clock (ie hz == phz), however the
97 * statistics will be much more prone to errors. Ideally a machine
98 * would have separate clocks measuring time spent in user state, system
99 * state, interrupt state, and idle state. These clocks would allow a non-
100 * approximate measure of resource utilization.
101 */
102
103 /*
104 * The hz hardware interval timer.
105 * We update the events relating to real time.
106 * If this timer is also being used to gather statistics,
107 * we run through the statistics gathering routine as well.
108 */
109
110 int bsd_hardclockinit = 0;
111 /*ARGSUSED*/
112 void
113 bsd_hardclock(usermode, pc, numticks)
114 boolean_t usermode;
115 caddr_t pc;
116 int numticks;
117 {
118 register struct proc *p;
119 register thread_t thread;
120 int nusecs = numticks * tick;
121 struct timeval tv;
122
123 if (!bsd_hardclockinit)
124 return;
125
126 /*
127 * Increment the time-of-day.
128 */
129 microtime(&tv);
130 time = tv;
131
132 if (bsd_hardclockinit < 0) {
133 return;
134 }
135
136 thread = current_act();
137 /*
138 * Charge the time out based on the mode the cpu is in.
139 * Here again we fudge for the lack of proper interval timers
140 * assuming that the current state has been around at least
141 * one tick.
142 */
143 p = (struct proc *)current_proc();
144 if (p && ((p->p_flag & P_WEXIT) == NULL)) {
145 if (usermode) {
146 if (p->p_stats && p->p_stats->p_prof.pr_scale) {
147 p->p_flag |= P_OWEUPC;
148 astbsd_on();
149 }
150
151 /*
152 * CPU was in user state. Increment
153 * user time counter, and process process-virtual time
154 * interval timer.
155 */
156 if (p->p_stats &&
157 timerisset(&p->p_stats->p_timer[ITIMER_VIRTUAL].it_value) &&
158 !itimerdecr(&p->p_stats->p_timer[ITIMER_VIRTUAL], nusecs)) {
159 extern void psignal_vtalarm(struct proc *);
160
161 /* does psignal(p, SIGVTALRM) in a thread context */
162 thread_call_func((thread_call_func_t)psignal_vtalarm, p, FALSE);
163 }
164 }
165
166 /*
167 * If the cpu is currently scheduled to a process, then
168 * charge it with resource utilization for a tick, updating
169 * statistics which run in (user+system) virtual time,
170 * such as the cpu time limit and profiling timers.
171 * This assumes that the current process has been running
172 * the entire last tick.
173 */
174 if (!is_thread_idle(thread)) {
175 if (p->p_limit &&
176 p->p_limit->pl_rlimit[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
177 time_value_t sys_time, user_time;
178
179 thread_read_times(thread, &user_time, &sys_time);
180 if ((sys_time.seconds + user_time.seconds + 1) >
181 p->p_limit->pl_rlimit[RLIMIT_CPU].rlim_cur) {
182 extern void psignal_xcpu(struct proc *);
183
184 /* does psignal(p, SIGXCPU) in a thread context */
185 thread_call_func((thread_call_func_t)psignal_xcpu, p, FALSE);
186
187 if (p->p_limit->pl_rlimit[RLIMIT_CPU].rlim_cur <
188 p->p_limit->pl_rlimit[RLIMIT_CPU].rlim_max)
189 p->p_limit->pl_rlimit[RLIMIT_CPU].rlim_cur += 5;
190 }
191 }
192 if (timerisset(&p->p_stats->p_timer[ITIMER_PROF].it_value) &&
193 !itimerdecr(&p->p_stats->p_timer[ITIMER_PROF], nusecs)) {
194 extern void psignal_sigprof(struct proc *);
195
196 /* does psignal(p, SIGPROF) in a thread context */
197 thread_call_func((thread_call_func_t)psignal_sigprof, p, FALSE);
198 }
199 }
200 }
201
202 #ifdef GPROF
203 /*
204 * Gather some statistics.
205 */
206 gatherstats(usermode, pc);
207 #endif
208 }
209
210 /*
211 * Gather some statistics.
212 */
213 /*ARGSUSED*/
214 void
215 gatherstats(
216 boolean_t usermode,
217 caddr_t pc)
218 {
219 #ifdef GPROF
220 if (!usermode) {
221 struct gmonparam *p = &_gmonparam;
222
223 if (p->state == GMON_PROF_ON) {
224 register int s;
225
226 s = pc - p->lowpc;
227 if (s < p->textsize) {
228 s /= (HISTFRACTION * sizeof(*p->kcount));
229 p->kcount[s]++;
230 }
231 }
232 }
233 #endif
234 }
235
236
237 /*
238 * Kernel timeout services.
239 */
240
241 /*
242 * Set a timeout.
243 *
244 * fcn: function to call
245 * param: parameter to pass to function
246 * interval: timeout interval, in hz.
247 */
248 void
249 timeout(
250 timeout_fcn_t fcn,
251 void *param,
252 int interval)
253 {
254 uint64_t deadline;
255
256 clock_interval_to_deadline(interval, NSEC_PER_SEC / hz, &deadline);
257 thread_call_func_delayed((thread_call_func_t)fcn, param, deadline);
258 }
259
260 /*
261 * Cancel a timeout.
262 */
263 void
264 untimeout(
265 register timeout_fcn_t fcn,
266 register void *param)
267 {
268 thread_call_func_cancel((thread_call_func_t)fcn, param, FALSE);
269 }
270
271
272
273 /*
274 * Compute number of hz until specified time.
275 * Used to compute third argument to timeout() from an
276 * absolute time.
277 */
278 hzto(tv)
279 struct timeval *tv;
280 {
281 struct timeval now;
282 register long ticks;
283 register long sec;
284
285 microtime(&now);
286 /*
287 * If number of milliseconds will fit in 32 bit arithmetic,
288 * then compute number of milliseconds to time and scale to
289 * ticks. Otherwise just compute number of hz in time, rounding
290 * times greater than representible to maximum value.
291 *
292 * Delta times less than 25 days can be computed ``exactly''.
293 * Maximum value for any timeout in 10ms ticks is 250 days.
294 */
295 sec = tv->tv_sec - now.tv_sec;
296 if (sec <= 0x7fffffff / 1000 - 1000)
297 ticks = ((tv->tv_sec - now.tv_sec) * 1000 +
298 (tv->tv_usec - now.tv_usec) / 1000)
299 / (tick / 1000);
300 else if (sec <= 0x7fffffff / hz)
301 ticks = sec * hz;
302 else
303 ticks = 0x7fffffff;
304
305 return (ticks);
306 }
307
308 /*
309 * Return information about system clocks.
310 */
311 int
312 sysctl_clockrate(where, sizep)
313 register char *where;
314 size_t *sizep;
315 {
316 struct clockinfo clkinfo;
317
318 /*
319 * Construct clockinfo structure.
320 */
321 clkinfo.hz = hz;
322 clkinfo.tick = tick;
323 clkinfo.profhz = hz;
324 clkinfo.stathz = hz;
325 return sysctl_rdstruct(where, sizep, NULL, &clkinfo, sizeof(clkinfo));
326 }
327
328
329 /*
330 * Compute number of ticks in the specified amount of time.
331 */
332 int
333 tvtohz(tv)
334 struct timeval *tv;
335 {
336 register unsigned long ticks;
337 register long sec, usec;
338
339 /*
340 * If the number of usecs in the whole seconds part of the time
341 * difference fits in a long, then the total number of usecs will
342 * fit in an unsigned long. Compute the total and convert it to
343 * ticks, rounding up and adding 1 to allow for the current tick
344 * to expire. Rounding also depends on unsigned long arithmetic
345 * to avoid overflow.
346 *
347 * Otherwise, if the number of ticks in the whole seconds part of
348 * the time difference fits in a long, then convert the parts to
349 * ticks separately and add, using similar rounding methods and
350 * overflow avoidance. This method would work in the previous
351 * case but it is slightly slower and assumes that hz is integral.
352 *
353 * Otherwise, round the time difference down to the maximum
354 * representable value.
355 *
356 * If ints have 32 bits, then the maximum value for any timeout in
357 * 10ms ticks is 248 days.
358 */
359 sec = tv->tv_sec;
360 usec = tv->tv_usec;
361 if (usec < 0) {
362 sec--;
363 usec += 1000000;
364 }
365 if (sec < 0) {
366 #ifdef DIAGNOSTIC
367 if (usec > 0) {
368 sec++;
369 usec -= 1000000;
370 }
371 printf("tvotohz: negative time difference %ld sec %ld usec\n",
372 sec, usec);
373 #endif
374 ticks = 1;
375 } else if (sec <= LONG_MAX / 1000000)
376 ticks = (sec * 1000000 + (unsigned long)usec + (tick - 1))
377 / tick + 1;
378 else if (sec <= LONG_MAX / hz)
379 ticks = sec * hz
380 + ((unsigned long)usec + (tick - 1)) / tick + 1;
381 else
382 ticks = LONG_MAX;
383 if (ticks > INT_MAX)
384 ticks = INT_MAX;
385 return ((int)ticks);
386 }
387
388
389 /*
390 * Start profiling on a process.
391 *
392 * Kernel profiling passes kernel_proc which never exits and hence
393 * keeps the profile clock running constantly.
394 */
395 void
396 startprofclock(p)
397 register struct proc *p;
398 {
399 if ((p->p_flag & P_PROFIL) == 0)
400 p->p_flag |= P_PROFIL;
401 }
402
403 /*
404 * Stop profiling on a process.
405 */
406 void
407 stopprofclock(p)
408 register struct proc *p;
409 {
410 if (p->p_flag & P_PROFIL)
411 p->p_flag &= ~P_PROFIL;
412 }
413
414 void
415 bsd_uprofil(struct time_value *syst, unsigned int pc)
416 {
417 struct proc *p = current_proc();
418 int ticks;
419 struct timeval *tv;
420 struct timeval st;
421
422 if (p == NULL)
423 return;
424 if ( !(p->p_flag & P_PROFIL))
425 return;
426
427 st.tv_sec = syst->seconds;
428 st.tv_usec = syst->microseconds;
429
430 tv = &(p->p_stats->p_ru.ru_stime);
431
432 ticks = ((tv->tv_sec - st.tv_sec) * 1000 +
433 (tv->tv_usec - st.tv_usec) / 1000) /
434 (tick / 1000);
435 if (ticks)
436 addupc_task(p, pc, ticks);
437 }
438
439 void
440 get_procrustime(time_value_t *tv)
441 {
442 struct proc *p = current_proc();
443 struct timeval st;
444
445 if (p == NULL)
446 return;
447 if ( !(p->p_flag & P_PROFIL))
448 return;
449
450 st = p->p_stats->p_ru.ru_stime;
451
452 tv->seconds = st.tv_sec;
453 tv->microseconds = st.tv_usec;
454 }