]>
git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/hardclock.c
31121460f2b58689bd830896794fe62953c8a7e5
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
57 #include <time_stamp.h>
59 #include <kern/cpu_number.h>
60 #include <kern/cpu_data.h>
61 #include <kern/kern_types.h>
62 #include <platforms.h>
64 #include <mach_kprof.h>
65 #include <mach_mp_debug.h>
66 #include <mach/std_types.h>
68 #include <mach/clock_types.h>
69 #include <mach/boolean.h>
70 #include <i386/thread.h>
71 #include <i386/eflags.h>
72 #include <kern/assert.h>
73 #include <kern/misc_protos.h>
74 #include <i386/misc_protos.h>
75 #include <kern/time_out.h>
79 #include <i386/hardclock_entries.h>
80 #include <i386/rtclock_entries.h>
83 #include <i386/mach_param.h> /* for HZ */
84 #endif /* MACH_MP_DEBUG */
86 extern char return_to_iret
[];
88 #if TIME_STAMP && NCPUS > 1
89 extern unsigned time_stamp
;
90 unsigned old_time_stamp
, time_stamp_cum
, nstamps
;
93 * If H/W provides a counter, record number of ticks and cumulated
94 * time stamps to know timestamps rate.
95 * This should go away when ALARMCLOCKS installed
97 #define time_stamp_stat() \
99 if (!old_time_stamp) { \
100 old_time_stamp = time_stamp; \
104 time_stamp_cum = (time_stamp - old_time_stamp); \
106 #else /* TIME_STAMP && AT386 && NCPUS > 1 */
107 #define time_stamp_stat()
108 #endif /* TIME_STAMP && AT386 && NCPUS > 1 */
111 int masked_pc
[NCPUS
];
112 int missed_clock
[NCPUS
];
113 int detect_lost_tick
= 0;
114 #endif /* MACH_KPROF */
117 int masked_state_cnt
[NCPUS
];
118 int masked_state_max
= 10*HZ
;
119 #endif /* MACH_MP_DEBUG */
122 * In the interest of a fast clock interrupt service path,
123 * this routine should be folded into assembly language with
124 * a direct interrupt vector on the i386. The "pit" interrupt
125 * should always call the rtclock_intr() routine on the master
126 * processor. The return value of the rtclock_intr() routine
127 * indicates whether HZ rate clock processing should be
128 * performed. (On the Sequent, all slave processors will
129 * run at HZ rate). For now, we'll leave this routine in C
130 * (with TIME_STAMP, MACH_MP_DEBUG and MACH_KPROF code this
131 * routine is way too large for assembler anyway).
135 int paranoid_debugger
= TRUE
;
136 int paranoid_count
= 1000;
137 int paranoid_current
= 0;
138 int paranoid_cpu
= 0;
139 #endif /* PARANOID_KDB */
142 hardclock(struct i386_interrupt_state
*regs
) /* saved registers */
145 register unsigned pc
;
146 register boolean_t usermode
;
148 mp_disable_preemption();
149 mycpu
= cpu_number();
152 if (paranoid_cpu
== mycpu
&&
153 paranoid_current
++ >= paranoid_count
) {
154 paranoid_current
= 0;
155 if (paranoid_debugger
)
156 Debugger("hardclock");
158 #endif /* PARANOID_KDB */
163 * Increments counter of clock ticks handled under a masked state.
164 * Debugger() is called if masked state is kept during 1 sec.
165 * The counter is reset by splx() when ipl mask is set back to SPL0,
168 if (SPL_CMP_GT((old_ipl
& 0xFF), SPL0
)) {
169 if (masked_state_cnt
[mycpu
]++ >= masked_state_max
) {
170 int max_save
= masked_state_max
;
172 masked_state_cnt
[mycpu
] = 0;
173 masked_state_max
= 0x7fffffff;
175 if (ret_addr
== return_to_iret
) {
176 usermode
= (regs
->efl
& EFL_VM
) ||
177 ((regs
->cs
& 0x03) != 0);
178 pc
= (unsigned)regs
->eip
;
182 ((struct i386_interrupt_state
*)&old_ipl
)->eip
;
184 printf("looping at high IPL, usermode=%d pc=0x%x\n",
188 masked_state_cnt
[mycpu
] = 0;
189 masked_state_max
= max_save
;
192 masked_state_cnt
[mycpu
] = 0;
193 #endif /* MACH_MP_DEBUG */
198 * If we were masked against the clock skip call
199 * to rtclock_intr(). When MACH_KPROF is set, the
200 * clock frequency of the master-cpu is confined
203 if (SPL_CMP_LT(old_ipl
& 0xFF, SPL7
))
204 #endif /* MACH_KPROF */
206 * The master processor executes the rtclock_intr() routine
207 * on every clock tick. The rtclock_intr() routine returns
208 * a zero value on a HZ tick boundary.
210 if (mycpu
== master_cpu
) {
211 if (rtclock_intr() != 0) {
212 mp_enable_preemption();
218 * The following code is executed at HZ rate by all processors
219 * in the system. This implies that the clock rate on slave
220 * processors must be HZ rate.
226 if (ret_addr
== return_to_iret
) {
228 * A kernel-loaded task executing within itself will look like
229 * "kernel mode", here. This is correct with syscalls
230 * implemented using migrating threads, because it means that
231 * the time spent in the server by a client thread will be
232 * treated as "system" time for the client thread (and nothing
233 * for the server). This conforms to the CPU reporting for an
237 usermode
= (regs
->efl
& EFL_VM
) || ((regs
->cs
& 0x03) != 0);
238 pc
= (unsigned)regs
->eip
;
242 pc
= (unsigned)((struct i386_interrupt_state
*)&old_ipl
)->eip
;
248 * If we were masked against the clock, just memorize pc
249 * and the fact that the clock interrupt is delayed
251 if (SPL_CMP_GE((old_ipl
& 0xFF), SPL7
)) {
253 if (missed_clock
[mycpu
]++ && detect_lost_tick
> 1)
254 Debugger("Mach_KPROF");
255 masked_pc
[mycpu
] = pc
;
257 #endif /* MACH_KPROF */
259 hertz_tick(usermode
, pc
);
263 * Instead of having the master processor interrupt
264 * all active processors, each processor in turn interrupts
265 * the next active one. This avoids all slave processors
266 * accessing the same R/W data simultaneously.
269 #endif /* NCPUS >1 && AT386 */
271 mp_enable_preemption();
281 mp_disable_preemption();
282 my_cpu
= cpu_number();
284 if (missed_clock
[my_cpu
] > 1 && detect_lost_tick
)
285 printf("hardclock: missed %d clock interrupt(s) at %x\n",
286 missed_clock
[my_cpu
]-1, masked_pc
[my_cpu
]);
287 if (my_cpu
== master_cpu
) {
291 hertz_tick(0, masked_pc
[my_cpu
]);
292 missed_clock
[my_cpu
] = 0;
294 mp_enable_preemption();
296 #endif /* MACH_KPROF */