]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/hardclock.c
xnu-344.49.tar.gz
[apple/xnu.git] / osfmk / i386 / hardclock.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_COPYRIGHT@
27 */
28 /*
29 * Mach Operating System
30 * Copyright (c) 1991,1990 Carnegie Mellon University
31 * All Rights Reserved.
32 *
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
38 *
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
42 *
43 * Carnegie Mellon requests users of this software to return to
44 *
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
49 *
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
52 */
53 /*
54 */
55
56 /*
57 * Clock interrupt.
58 */
59 #include <cpus.h>
60 #include <time_stamp.h>
61 #include <mach_kdb.h>
62 #include <kern/cpu_number.h>
63 #include <kern/cpu_data.h>
64 #include <kern/kern_types.h>
65 #include <platforms.h>
66 #include <mp_v1_1.h>
67 #include <mach_kprof.h>
68 #include <mach_mp_debug.h>
69 #include <mach/std_types.h>
70
71 #include <mach/clock_types.h>
72 #include <mach/boolean.h>
73 #include <i386/thread.h>
74 #include <i386/eflags.h>
75 #include <kern/assert.h>
76 #include <kern/misc_protos.h>
77 #include <i386/misc_protos.h>
78 #include <kern/time_out.h>
79
80 #include <i386/ipl.h>
81
82 #include <i386/hardclock_entries.h>
83 #include <i386/rtclock_entries.h>
84
85 #if MACH_MP_DEBUG
86 #include <i386/mach_param.h> /* for HZ */
87 #endif /* MACH_MP_DEBUG */
88
89 extern char return_to_iret[];
90
91 #if TIME_STAMP && NCPUS > 1
92 extern unsigned time_stamp;
93 unsigned old_time_stamp, time_stamp_cum, nstamps;
94
95 /*
96 * If H/W provides a counter, record number of ticks and cumulated
97 * time stamps to know timestamps rate.
98 * This should go away when ALARMCLOCKS installed
99 */
100 #define time_stamp_stat() \
101 if (my_cpu == 0) \
102 if (!old_time_stamp) { \
103 old_time_stamp = time_stamp; \
104 nstamps = 0; \
105 } else { \
106 nstamps++; \
107 time_stamp_cum = (time_stamp - old_time_stamp); \
108 }
109 #else /* TIME_STAMP && AT386 && NCPUS > 1 */
110 #define time_stamp_stat()
111 #endif /* TIME_STAMP && AT386 && NCPUS > 1 */
112
113 #if MACH_KPROF
114 int masked_pc[NCPUS];
115 int missed_clock[NCPUS];
116 int detect_lost_tick = 0;
117 #endif /* MACH_KPROF */
118
119 #if MACH_MP_DEBUG
120 int masked_state_cnt[NCPUS];
121 int masked_state_max = 10*HZ;
122 #endif /* MACH_MP_DEBUG */
123
124 /*
125 * In the interest of a fast clock interrupt service path,
126 * this routine should be folded into assembly language with
127 * a direct interrupt vector on the i386. The "pit" interrupt
128 * should always call the rtclock_intr() routine on the master
129 * processor. The return value of the rtclock_intr() routine
130 * indicates whether HZ rate clock processing should be
131 * performed. (On the Sequent, all slave processors will
132 * run at HZ rate). For now, we'll leave this routine in C
133 * (with TIME_STAMP, MACH_MP_DEBUG and MACH_KPROF code this
134 * routine is way too large for assembler anyway).
135 */
136
137 #ifdef PARANOID_KDB
138 int paranoid_debugger = TRUE;
139 int paranoid_count = 1000;
140 int paranoid_current = 0;
141 int paranoid_cpu = 0;
142 #endif /* PARANOID_KDB */
143
144 void
145 hardclock(struct i386_interrupt_state *regs) /* saved registers */
146 {
147 int mycpu;
148 register unsigned pc;
149 register boolean_t usermode;
150
151 mp_disable_preemption();
152 mycpu = cpu_number();
153
154 #ifdef PARANOID_KDB
155 if (paranoid_cpu == mycpu &&
156 paranoid_current++ >= paranoid_count) {
157 paranoid_current = 0;
158 if (paranoid_debugger)
159 Debugger("hardclock");
160 }
161 #endif /* PARANOID_KDB */
162
163 #if 0
164 #if MACH_MP_DEBUG
165 /*
166 * Increments counter of clock ticks handled under a masked state.
167 * Debugger() is called if masked state is kept during 1 sec.
168 * The counter is reset by splx() when ipl mask is set back to SPL0,
169 * and by spl0().
170 */
171 if (SPL_CMP_GT((old_ipl & 0xFF), SPL0)) {
172 if (masked_state_cnt[mycpu]++ >= masked_state_max) {
173 int max_save = masked_state_max;
174
175 masked_state_cnt[mycpu] = 0;
176 masked_state_max = 0x7fffffff;
177
178 if (ret_addr == return_to_iret) {
179 usermode = (regs->efl & EFL_VM) ||
180 ((regs->cs & 0x03) != 0);
181 pc = (unsigned)regs->eip;
182 } else {
183 usermode = FALSE;
184 pc = (unsigned)
185 ((struct i386_interrupt_state *)&old_ipl)->eip;
186 }
187 printf("looping at high IPL, usermode=%d pc=0x%x\n",
188 usermode, pc);
189 Debugger("");
190
191 masked_state_cnt[mycpu] = 0;
192 masked_state_max = max_save;
193 }
194 } else
195 masked_state_cnt[mycpu] = 0;
196 #endif /* MACH_MP_DEBUG */
197 #endif
198
199 #if MACH_KPROF
200 /*
201 * If we were masked against the clock skip call
202 * to rtclock_intr(). When MACH_KPROF is set, the
203 * clock frequency of the master-cpu is confined
204 * to the HZ rate.
205 */
206 if (SPL_CMP_LT(old_ipl & 0xFF, SPL7))
207 #endif /* MACH_KPROF */
208 /*
209 * The master processor executes the rtclock_intr() routine
210 * on every clock tick. The rtclock_intr() routine returns
211 * a zero value on a HZ tick boundary.
212 */
213 if (mycpu == master_cpu) {
214 if (rtclock_intr() != 0) {
215 mp_enable_preemption();
216 return;
217 }
218 }
219
220 /*
221 * The following code is executed at HZ rate by all processors
222 * in the system. This implies that the clock rate on slave
223 * processors must be HZ rate.
224 */
225
226 time_stamp_stat();
227
228 #if 0
229 if (ret_addr == return_to_iret) {
230 /*
231 * A kernel-loaded task executing within itself will look like
232 * "kernel mode", here. This is correct with syscalls
233 * implemented using migrating threads, because it means that
234 * the time spent in the server by a client thread will be
235 * treated as "system" time for the client thread (and nothing
236 * for the server). This conforms to the CPU reporting for an
237 * integrated kernel.
238 */
239 #endif
240 usermode = (regs->efl & EFL_VM) || ((regs->cs & 0x03) != 0);
241 pc = (unsigned)regs->eip;
242 #if 0
243 } else {
244 usermode = FALSE;
245 pc = (unsigned)((struct i386_interrupt_state *)&old_ipl)->eip;
246 }
247 #endif
248
249 #if MACH_KPROF
250 /*
251 * If we were masked against the clock, just memorize pc
252 * and the fact that the clock interrupt is delayed
253 */
254 if (SPL_CMP_GE((old_ipl & 0xFF), SPL7)) {
255 assert(!usermode);
256 if (missed_clock[mycpu]++ && detect_lost_tick > 1)
257 Debugger("Mach_KPROF");
258 masked_pc[mycpu] = pc;
259 } else
260 #endif /* MACH_KPROF */
261
262 hertz_tick(usermode, pc);
263
264 #if NCPUS >1
265 /*
266 * Instead of having the master processor interrupt
267 * all active processors, each processor in turn interrupts
268 * the next active one. This avoids all slave processors
269 * accessing the same R/W data simultaneously.
270 */
271 slave_clock();
272 #endif /* NCPUS >1 && AT386 */
273
274 mp_enable_preemption();
275 }
276
277 #if MACH_KPROF
278 void
279 delayed_clock(void)
280 {
281 int i;
282 int my_cpu;
283
284 mp_disable_preemption();
285 my_cpu = cpu_number();
286
287 if (missed_clock[my_cpu] > 1 && detect_lost_tick)
288 printf("hardclock: missed %d clock interrupt(s) at %x\n",
289 missed_clock[my_cpu]-1, masked_pc[my_cpu]);
290 if (my_cpu == master_cpu) {
291 i = rtclock_intr();
292 assert(i == 0);
293 }
294 hertz_tick(0, masked_pc[my_cpu]);
295 missed_clock[my_cpu] = 0;
296
297 mp_enable_preemption();
298 }
299 #endif /* MACH_KPROF */