]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/hardclock.c
31121460f2b58689bd830896794fe62953c8a7e5
[apple/xnu.git] / osfmk / i386 / hardclock.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52
53 /*
54 * Clock interrupt.
55 */
56 #include <cpus.h>
57 #include <time_stamp.h>
58 #include <mach_kdb.h>
59 #include <kern/cpu_number.h>
60 #include <kern/cpu_data.h>
61 #include <kern/kern_types.h>
62 #include <platforms.h>
63 #include <mp_v1_1.h>
64 #include <mach_kprof.h>
65 #include <mach_mp_debug.h>
66 #include <mach/std_types.h>
67
68 #include <mach/clock_types.h>
69 #include <mach/boolean.h>
70 #include <i386/thread.h>
71 #include <i386/eflags.h>
72 #include <kern/assert.h>
73 #include <kern/misc_protos.h>
74 #include <i386/misc_protos.h>
75 #include <kern/time_out.h>
76
77 #include <i386/ipl.h>
78
79 #include <i386/hardclock_entries.h>
80 #include <i386/rtclock_entries.h>
81
82 #if MACH_MP_DEBUG
83 #include <i386/mach_param.h> /* for HZ */
84 #endif /* MACH_MP_DEBUG */
85
86 extern char return_to_iret[];
87
88 #if TIME_STAMP && NCPUS > 1
89 extern unsigned time_stamp;
90 unsigned old_time_stamp, time_stamp_cum, nstamps;
91
92 /*
93 * If H/W provides a counter, record number of ticks and cumulated
94 * time stamps to know timestamps rate.
95 * This should go away when ALARMCLOCKS installed
96 */
97 #define time_stamp_stat() \
98 if (my_cpu == 0) \
99 if (!old_time_stamp) { \
100 old_time_stamp = time_stamp; \
101 nstamps = 0; \
102 } else { \
103 nstamps++; \
104 time_stamp_cum = (time_stamp - old_time_stamp); \
105 }
106 #else /* TIME_STAMP && AT386 && NCPUS > 1 */
107 #define time_stamp_stat()
108 #endif /* TIME_STAMP && AT386 && NCPUS > 1 */
109
110 #if MACH_KPROF
111 int masked_pc[NCPUS];
112 int missed_clock[NCPUS];
113 int detect_lost_tick = 0;
114 #endif /* MACH_KPROF */
115
116 #if MACH_MP_DEBUG
117 int masked_state_cnt[NCPUS];
118 int masked_state_max = 10*HZ;
119 #endif /* MACH_MP_DEBUG */
120
121 /*
122 * In the interest of a fast clock interrupt service path,
123 * this routine should be folded into assembly language with
124 * a direct interrupt vector on the i386. The "pit" interrupt
125 * should always call the rtclock_intr() routine on the master
126 * processor. The return value of the rtclock_intr() routine
127 * indicates whether HZ rate clock processing should be
128 * performed. (On the Sequent, all slave processors will
129 * run at HZ rate). For now, we'll leave this routine in C
130 * (with TIME_STAMP, MACH_MP_DEBUG and MACH_KPROF code this
131 * routine is way too large for assembler anyway).
132 */
133
134 #ifdef PARANOID_KDB
135 int paranoid_debugger = TRUE;
136 int paranoid_count = 1000;
137 int paranoid_current = 0;
138 int paranoid_cpu = 0;
139 #endif /* PARANOID_KDB */
140
141 void
142 hardclock(struct i386_interrupt_state *regs) /* saved registers */
143 {
144 int mycpu;
145 register unsigned pc;
146 register boolean_t usermode;
147
148 mp_disable_preemption();
149 mycpu = cpu_number();
150
151 #ifdef PARANOID_KDB
152 if (paranoid_cpu == mycpu &&
153 paranoid_current++ >= paranoid_count) {
154 paranoid_current = 0;
155 if (paranoid_debugger)
156 Debugger("hardclock");
157 }
158 #endif /* PARANOID_KDB */
159
160 #if 0
161 #if MACH_MP_DEBUG
162 /*
163 * Increments counter of clock ticks handled under a masked state.
164 * Debugger() is called if masked state is kept during 1 sec.
165 * The counter is reset by splx() when ipl mask is set back to SPL0,
166 * and by spl0().
167 */
168 if (SPL_CMP_GT((old_ipl & 0xFF), SPL0)) {
169 if (masked_state_cnt[mycpu]++ >= masked_state_max) {
170 int max_save = masked_state_max;
171
172 masked_state_cnt[mycpu] = 0;
173 masked_state_max = 0x7fffffff;
174
175 if (ret_addr == return_to_iret) {
176 usermode = (regs->efl & EFL_VM) ||
177 ((regs->cs & 0x03) != 0);
178 pc = (unsigned)regs->eip;
179 } else {
180 usermode = FALSE;
181 pc = (unsigned)
182 ((struct i386_interrupt_state *)&old_ipl)->eip;
183 }
184 printf("looping at high IPL, usermode=%d pc=0x%x\n",
185 usermode, pc);
186 Debugger("");
187
188 masked_state_cnt[mycpu] = 0;
189 masked_state_max = max_save;
190 }
191 } else
192 masked_state_cnt[mycpu] = 0;
193 #endif /* MACH_MP_DEBUG */
194 #endif
195
196 #if MACH_KPROF
197 /*
198 * If we were masked against the clock skip call
199 * to rtclock_intr(). When MACH_KPROF is set, the
200 * clock frequency of the master-cpu is confined
201 * to the HZ rate.
202 */
203 if (SPL_CMP_LT(old_ipl & 0xFF, SPL7))
204 #endif /* MACH_KPROF */
205 /*
206 * The master processor executes the rtclock_intr() routine
207 * on every clock tick. The rtclock_intr() routine returns
208 * a zero value on a HZ tick boundary.
209 */
210 if (mycpu == master_cpu) {
211 if (rtclock_intr() != 0) {
212 mp_enable_preemption();
213 return;
214 }
215 }
216
217 /*
218 * The following code is executed at HZ rate by all processors
219 * in the system. This implies that the clock rate on slave
220 * processors must be HZ rate.
221 */
222
223 time_stamp_stat();
224
225 #if 0
226 if (ret_addr == return_to_iret) {
227 /*
228 * A kernel-loaded task executing within itself will look like
229 * "kernel mode", here. This is correct with syscalls
230 * implemented using migrating threads, because it means that
231 * the time spent in the server by a client thread will be
232 * treated as "system" time for the client thread (and nothing
233 * for the server). This conforms to the CPU reporting for an
234 * integrated kernel.
235 */
236 #endif
237 usermode = (regs->efl & EFL_VM) || ((regs->cs & 0x03) != 0);
238 pc = (unsigned)regs->eip;
239 #if 0
240 } else {
241 usermode = FALSE;
242 pc = (unsigned)((struct i386_interrupt_state *)&old_ipl)->eip;
243 }
244 #endif
245
246 #if MACH_KPROF
247 /*
248 * If we were masked against the clock, just memorize pc
249 * and the fact that the clock interrupt is delayed
250 */
251 if (SPL_CMP_GE((old_ipl & 0xFF), SPL7)) {
252 assert(!usermode);
253 if (missed_clock[mycpu]++ && detect_lost_tick > 1)
254 Debugger("Mach_KPROF");
255 masked_pc[mycpu] = pc;
256 } else
257 #endif /* MACH_KPROF */
258
259 hertz_tick(usermode, pc);
260
261 #if NCPUS >1
262 /*
263 * Instead of having the master processor interrupt
264 * all active processors, each processor in turn interrupts
265 * the next active one. This avoids all slave processors
266 * accessing the same R/W data simultaneously.
267 */
268 slave_clock();
269 #endif /* NCPUS >1 && AT386 */
270
271 mp_enable_preemption();
272 }
273
274 #if MACH_KPROF
275 void
276 delayed_clock(void)
277 {
278 int i;
279 int my_cpu;
280
281 mp_disable_preemption();
282 my_cpu = cpu_number();
283
284 if (missed_clock[my_cpu] > 1 && detect_lost_tick)
285 printf("hardclock: missed %d clock interrupt(s) at %x\n",
286 missed_clock[my_cpu]-1, masked_pc[my_cpu]);
287 if (my_cpu == master_cpu) {
288 i = rtclock_intr();
289 assert(i == 0);
290 }
291 hertz_tick(0, masked_pc[my_cpu]);
292 missed_clock[my_cpu] = 0;
293
294 mp_enable_preemption();
295 }
296 #endif /* MACH_KPROF */