]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/etap_macros.h
896c0f4a7bc389488b0084bb85d9ee95a65a2386
[apple/xnu.git] / osfmk / kern / etap_macros.h
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_COPYRIGHT@
27 *
28 */
29 /*
30 * The Event Trace Analysis Package
31 * ================================
32 *
33 * Function: Traces micro-kernel events.
34 *
35 * Macro Notes: Several macros are added throughout the lock code.
36 * These macros allow for convenient configuration
37 * and code readability.
38 *
39 * The macro prefixes determine a specific trace
40 * configuration operation:
41 *
42 * CUM - Cumulative trace specific operation.
43 * MON - Monitored trace specific operation.
44 * ETAP - Both a cumulative and monitored trace
45 * operation.
46 */
47
48
49 #ifndef _KERN_ETAP_MACROS_H_
50 #define _KERN_ETAP_MACROS_H_
51
52 #include <kern/etap_options.h>
53 #include <kern/lock.h>
54 #include <mach/etap.h>
55 #include <mach/etap_events.h>
56 #include <kern/etap_pool.h>
57
58
59 #if ETAP
60
61 #include <mach/vm_param.h>
62 #include <mach/message.h>
63
64 #include <kern/macro_help.h>
65
66 extern void etap_init_phase1(void);
67 extern void etap_init_phase2(void);
68 extern void etap_event_table_assign(struct event_table_chain *, etap_event_t);
69 extern unsigned int etap_get_pc(void);
70 extern event_table_t event_table;
71 extern subs_table_t subs_table;
72
73 /*
74 * Time Macros
75 */
76
77 #define ETAP_TIMESTAMP(t) rtc_gettime_interrupts_disabled(&t)
78 #define ETAP_TIME_SUM(t,sum_me) t += sum_me
79 #define ETAP_TIME_SUB(t,stop,start) \
80 MACRO_BEGIN \
81 (t) = (stop); \
82 SUB_MACH_TIMESPEC(&(t), &(start)); \
83 MACRO_END
84 #define ETAP_TIME_SQR(t,sqr_me) t += sqr_me*sqr_me
85 #define ETAP_TIME_DIV(r,n,d) r = (u_short) n/d
86 #define ETAP_TIME_IS_ZERO(t) ((t).tv_sec == 0)
87 #define ETAP_TIME_CLEAR(t) ((t).tv_sec = 0)
88 #define ETAP_TIME_GREATER(t1,t2) ((t1) > (t2))
89
90 #else /* ETAP */
91
92 #define etap_init_phase1()
93 #define etap_init_phase2()
94 #define etap_event_table_assign(event)
95 #define ETAP_TIMESTAMP(t)
96 #define ETAP_TIME_SUB(t,start,stop)
97 #define ETAP_TIME_CLEAR(t)
98
99 #endif /* ETAP */
100
101
102 /*
103 * ===================================================
104 * ETAP: cumulative trace specific macros
105 * ===================================================
106 */
107
108 #if ETAP_LOCK_ACCUMULATE
109
110 extern cbuff_entry_t etap_cbuff_reserve(event_table_t);
111 #if MACH_LDEBUG
112 extern simple_lock_t cbuff_locks;
113 #else
114 extern simple_lock_data_t cbuff_locks;
115 #endif
116 extern int cbuff_width;
117
118 /*
119 * If cumulative hold tracing is enabled for the event (i.e., acquired lock),
120 * the CUM_HOLD_ACCUMULATE macro will update the appropriate cumulative buffer
121 * entry with the newly collected hold data.
122 */
123
124 #define CUM_HOLD_ACCUMULATE(cp,total_time,dynamic,trace) \
125 MACRO_BEGIN \
126 u_short _bucket; \
127 if ((cp) != CBUFF_ENTRY_NULL && ((trace) & CUM_DURATION)) { \
128 if (dynamic) \
129 simple_lock_no_trace(&cbuff_locks[dynamic-1]); \
130 (cp)->hold.triggered++; \
131 ETAP_TIME_SUM((cp)->hold.time,(total_time)); \
132 ETAP_TIME_SQR((cp)->hold.time_sq,(total_time)); \
133 if (ETAP_TIME_IS_ZERO((cp)->hold.min_time) || \
134 ETAP_TIME_GREATER((cp)->hold.min_time,(total_time))) \
135 (cp)->hold.min_time = (total_time); \
136 if (ETAP_TIME_GREATER((total_time),(cp)->hold.max_time)) \
137 (cp)->hold.max_time = (total_time); \
138 ETAP_TIME_DIV(_bucket,(total_time),cbuff_width); \
139 if (_bucket >= ETAP_CBUFF_IBUCKETS) \
140 (cp)->hold_interval[ETAP_CBUFF_IBUCKETS-1]++; \
141 else \
142 (cp)->hold_interval[_bucket]++; \
143 if (dynamic) \
144 simple_unlock_no_trace(&cbuff_locks[dynamic-1]); \
145 } \
146 MACRO_END
147
148 /*
149 * If cumulative wait tracing is enabled for the event (i.e., acquired lock),
150 * the CUM_WAIT_ACCUMULATE macro will update the appropriate cumulative
151 * buffer entry with the newly collected wait data.
152 */
153
154 #define CUM_WAIT_ACCUMULATE(cp,total_time,dynamic,trace) \
155 MACRO_BEGIN \
156 u_short _bucket; \
157 if ((cp) != CBUFF_ENTRY_NULL && ((trace) & CUM_CONTENTION)) { \
158 if (dynamic) \
159 simple_lock_no_trace(&cbuff_locks[dynamic-1]); \
160 (cp)->wait.triggered++; \
161 ETAP_TIME_SUM((cp)->wait.time,(total_time)); \
162 ETAP_TIME_SQR((cp)->wait.time_sq,(total_time)); \
163 if (ETAP_TIME_IS_ZERO((cp)->wait.min_time) || \
164 ETAP_TIME_GREATER((cp)->wait.min_time,(total_time))) \
165 (cp)->wait.min_time = (total_time); \
166 if (ETAP_TIME_GREATER((total_time),(cp)->wait.max_time)) \
167 (cp)->wait.max_time = (total_time); \
168 ETAP_TIME_DIV(_bucket,(total_time),cbuff_width); \
169 if (_bucket >= ETAP_CBUFF_IBUCKETS) \
170 (cp)->wait_interval[ETAP_CBUFF_IBUCKETS-1]++; \
171 else \
172 (cp)->wait_interval[_bucket]++; \
173 if (dynamic) \
174 simple_unlock_no_trace(&cbuff_locks[dynamic-1]); \
175 } \
176 MACRO_END
177
178 /*
179 * Initially a lock's cbuff_read pointer is set to CBUFF_ENTRY_NULL. This
180 * saves space in the cumulative buffer in the event that a read lock is
181 * not acquired. In the case that a read lock is acquired, the
182 * CUM_READ_ENTRY_RESERVE macro is called. Here a cumulative
183 * record is reserved and initialized.
184 */
185
186 #define CUM_READ_ENTRY_RESERVE(l,cp,trace) \
187 MACRO_BEGIN \
188 if ((cp) == CBUFF_ENTRY_NULL && (trace) & ETAP_CUMULATIVE) { \
189 (cp) = etap_cbuff_reserve(lock_event_table(l)); \
190 if ((cp) != CBUFF_ENTRY_NULL) { \
191 (cp)->event = lock_event_table(l)->event; \
192 (cp)->instance = (u_int) l; \
193 (cp)->kind = READ_LOCK; \
194 } \
195 } \
196 MACRO_END
197
198 #else /* ETAP_LOCK_ACCUMULATE */
199 #define etap_cbuff_reserve(et)
200 #define CUM_HOLD_ACCUMULATE(cp,t,d,tr)
201 #define CUM_WAIT_ACCUMULATE(cp,t,d,tr)
202 #define CUM_READ_ENTRY_RESERVE(l,rep,tr)
203 #endif /* ETAP_LOCK_ACCUMULATE */
204
205 /*
206 * ===============================================
207 * ETAP: monitor trace specific macros
208 * ===============================================
209 */
210
211 #if ETAP_MONITOR
212 extern int mbuff_entries;
213 extern monitor_buffer_t mbuff[];
214 #endif /* ETAP_MONITOR */
215
216
217 #if ETAP_LOCK_MONITOR
218
219 /*
220 * If monitor tracing is enabled for the lock, the
221 * MON_DATA_COLLECT macro will write collected lock data to
222 * the next slot in a cpu specific monitor buffer. Circular
223 * buffer maintenance is also performed here.
224 */
225
226 #define MON_DATA_COLLECT(l,e,total_time,type,op,trace) \
227 MACRO_BEGIN \
228 mbuff_entry_t _mp; \
229 int _cpu, _ent, _s; \
230 if ((trace) & op) { \
231 mp_disable_preemption(); \
232 _cpu = cpu_number(); \
233 _s = splhigh(); \
234 _ent = mbuff[_cpu]->free; \
235 _mp = &mbuff[_cpu]->entry[_ent]; \
236 _mp->event = lock_event_table(l)->event; \
237 _mp->flags = ((op) | (type)); \
238 _mp->instance = (u_int) (l); \
239 _mp->time = (total_time); \
240 _mp->data[0] = (e)->start_pc; \
241 _mp->data[1] = (e)->end_pc; \
242 mbuff[_cpu]->free = (_ent+1) % mbuff_entries; \
243 if (mbuff[_cpu]->free == 0) \
244 mbuff[_cpu]->timestamp++; \
245 splx(_s); \
246 mp_enable_preemption(); \
247 } \
248 MACRO_END
249
250 #define MON_CLEAR_PCS(l) \
251 MACRO_BEGIN \
252 (l)->start_pc = 0; \
253 (l)->end_pc = 0; \
254 MACRO_END
255
256 #define MON_ASSIGN_PC(target,source,trace) \
257 if ((trace) & ETAP_MONITORED) target = source
258
259 #else /* ETAP_LOCK_MONITOR */
260 #define MON_DATA_COLLECT(l,le,tt,t,o,tr)
261 #define MON_GET_PC(pc,tr)
262 #define MON_CLEAR_PCS(l)
263 #define MON_ASSIGN_PC(t,s,tr)
264 #endif /* ETAP_LOCK_MONITOR */
265
266
267 #if ETAP_EVENT_MONITOR
268
269 #include <mach/exception_types.h>
270
271 #define ETAP_EXCEPTION_PROBE(_f, _th, _ex, _sysnum) \
272 if (_ex == EXC_SYSCALL) { \
273 ETAP_PROBE_DATA(ETAP_P_SYSCALL_UNIX, \
274 _f, \
275 _th, \
276 _sysnum, \
277 sizeof(int)); \
278 }
279 #else /* ETAP_EVENT_MONITOR */
280 #define ETAP_EXCEPTION_PROBE(_f, _th, _ex, _sysnum)
281 #endif /* ETAP_EVENT_MONITOR */
282
283 #if ETAP_EVENT_MONITOR
284
285 #define ETAP_PROBE_DATA_COND(_event, _flags, _thread, _data, _size, _cond) \
286 MACRO_BEGIN \
287 mbuff_entry_t _mp; \
288 int _cpu, _ent, _s; \
289 if (event_table[_event].status && (_cond)) { \
290 mp_disable_preemption(); \
291 _cpu = cpu_number(); \
292 _s = splhigh(); \
293 _ent = mbuff[_cpu]->free; \
294 _mp = &mbuff[_cpu]->entry[_ent]; \
295 ETAP_TIMESTAMP(_mp->time); \
296 _mp->pc = etap_get_pc(); \
297 _mp->event = _event; \
298 _mp->flags = KERNEL_EVENT | _flags; \
299 _mp->instance = (u_int) _thread; \
300 bcopy((char *) _data, (char *) _mp->data, _size); \
301 mbuff[_cpu]->free = (_ent+1) % mbuff_entries; \
302 if (mbuff[_cpu]->free == 0) \
303 mbuff[_cpu]->timestamp++; \
304 splx(_s); \
305 mp_enable_preemption(); \
306 } \
307 MACRO_END
308
309 #define ETAP_PROBE(_event, _flags, _thread) \
310 ETAP_PROBE_DATA_COND(_event, _flags, _thread, 0, 0, 1)
311
312 #define ETAP_PROBE_DATA(_event, _flags, _thread, _data, _size) \
313 ETAP_PROBE_DATA_COND(_event, _flags, _thread, _data, _size, \
314 (_thread)->etap_trace)
315
316 #define ETAP_DATA_LOAD(ed, x) ((ed) = (u_int) (x))
317 #define ETAP_SET_REASON(_th, _reason) ((_th)->etap_reason = (_reason))
318
319 #else /* ETAP_EVENT_MONITOR */
320 #define ETAP_PROBE(e,f,th)
321 #define ETAP_PROBE_DATA(e,f,th,d,s)
322 #define ETAP_PROBE_DATA_COND(e,f,th,d,s,c)
323 #define ETAP_DATA_LOAD(d,x);
324 #define ETAP_SET_REASON(t,r)
325 #endif /* ETAP_EVENT_MONITOR */
326
327 /*
328 * =================================
329 * ETAP: general lock macros
330 * =================================
331 */
332
333 #if ETAP_LOCK_TRACE
334
335 #define ETAP_TOTAL_TIME(t,stop,start) \
336 ETAP_TIME_SUB((t),(stop),(start))
337
338 #define ETAP_DURATION_TIMESTAMP(e,trace) \
339 MACRO_BEGIN \
340 if ((trace) & ETAP_DURATION) \
341 ETAP_TIMESTAMP((e)->start_hold_time); \
342 MACRO_END
343
344 #define ETAP_COPY_START_HOLD_TIME(entry,time,trace) \
345 MACRO_BEGIN \
346 if ((trace) & ETAP_DURATION) \
347 (entry)->start_hold_time = time; \
348 MACRO_END
349
350 #define ETAP_CONTENTION_TIMESTAMP(e,trace) \
351 MACRO_BEGIN \
352 if ((trace) & ETAP_CONTENTION) \
353 ETAP_TIMESTAMP((e)->start_wait_time); \
354 MACRO_END
355
356 #define ETAP_STAMP(event_table,trace,dynamic) \
357 MACRO_BEGIN \
358 if ((event_table) != EVENT_TABLE_NULL) { \
359 (dynamic) = (event_table)->dynamic; \
360 (trace) = (event_table)->status; \
361 } \
362 MACRO_END
363
364 #define ETAP_WHOLE_OP(l) \
365 (!(ETAP_TIME_IS_ZERO((l)->u.s.start_hold_time)))
366 #define ETAP_DURATION_ENABLED(trace) ((trace) & ETAP_DURATION)
367 #define ETAP_CONTENTION_ENABLED(trace) ((trace) & ETAP_CONTENTION)
368
369 /*
370 * The ETAP_CLEAR_TRACE_DATA macro sets the etap specific fields
371 * of the simple_lock_t structure to zero.
372 *
373 * This is always done just before a simple lock is released.
374 */
375
376 #define ETAP_CLEAR_TRACE_DATA(l) \
377 MACRO_BEGIN \
378 ETAP_TIME_CLEAR((l)->u.s.start_hold_time); \
379 MON_CLEAR_PCS((l)); \
380 MACRO_END
381
382
383 /* ==================================================
384 * The ETAP_XXX_ENTRY macros manipulate the locks
385 * start_list (a linked list of start data).
386 * ==================================================
387 */
388
389 #define ETAP_CREATE_ENTRY(entry,trace) \
390 MACRO_BEGIN \
391 if ((trace) & ETAP_TRACE_ON) \
392 (entry) = get_start_data_node(); \
393 MACRO_END
394
395 #define ETAP_LINK_ENTRY(l,entry,trace) \
396 MACRO_BEGIN \
397 if ((trace) & ETAP_TRACE_ON) { \
398 (entry)->next = (l)->u.s.start_list; \
399 (l)->u.s.start_list = (entry); \
400 (entry)->thread_id = (u_int) current_thread(); \
401 ETAP_TIME_CLEAR((entry)->start_wait_time); \
402 } \
403 MACRO_END
404
405 #define ETAP_FIND_ENTRY(l,entry,trace) \
406 MACRO_BEGIN \
407 u_int _ct; \
408 _ct = (u_int) current_thread(); \
409 (entry) = (l)->u.s.start_list; \
410 while ((entry) != SD_ENTRY_NULL && (entry)->thread_id != _ct) \
411 (entry) = (entry)->next; \
412 if ((entry) == SD_ENTRY_NULL) \
413 (trace) = 0; \
414 MACRO_END
415
416 #define ETAP_UNLINK_ENTRY(l,entry) \
417 MACRO_BEGIN \
418 boolean_t _first = TRUE; \
419 start_data_node_t _prev; \
420 u_int _ct; \
421 _ct = (u_int) current_thread(); \
422 (entry) = (l)->u.s.start_list; \
423 while ((entry) != SD_ENTRY_NULL && (entry)->thread_id != _ct){ \
424 _prev = (entry); \
425 (entry) = (entry)->next; \
426 _first = FALSE; \
427 } \
428 if (entry != SD_ENTRY_NULL) { \
429 if (_first) \
430 (l)->u.s.start_list = (entry)->next; \
431 else \
432 _prev->next = (entry)->next; \
433 (entry)->next = SD_ENTRY_NULL; \
434 } \
435 MACRO_END
436
437 #define ETAP_DESTROY_ENTRY(entry) \
438 MACRO_BEGIN \
439 if ((entry) != SD_ENTRY_NULL) \
440 free_start_data_node ((entry)); \
441 MACRO_END
442
443 #else /* ETAP_LOCK_TRACE */
444 #define ETAP_TOTAL_TIME(t,stop,start)
445 #define ETAP_DURATION_TIMESTAMP(le,tr)
446 #define ETAP_CONTENTION_TIMESTAMP(le,tr)
447 #define ETAP_COPY_START_HOLD_TIME(le,t,tr)
448 #define ETAP_STAMP(tt,tr,d)
449 #define ETAP_DURATION_ENABLED(tr) (0) /* always fails */
450 #define ETAP_CONTENTION_ENABLED(tr) (0) /* always fails */
451 #define ETAP_CLEAR_TRACE_DATA(l)
452 #define ETAP_CREATE_ENTRY(e,tr)
453 #define ETAP_LINK_ENTRY(l,e,tr)
454 #define ETAP_FIND_ENTRY(l,e,tr)
455 #define ETAP_UNLINK_ENTRY(l,e)
456 #define ETAP_DESTROY_ENTRY(e)
457 #endif /* ETAP_LOCK_TRACE */
458
459 #endif /* _KERN_ETAP_MACROS_H_ */