]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
de355530 A |
6 | * The contents of this file constitute Original Code as defined in and |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
1c79356b | 11 | * |
de355530 A |
12 | * This Original Code and all software distributed under the License are |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
1c79356b A |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
de355530 A |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
1c79356b A |
19 | * |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * @OSF_COPYRIGHT@ | |
24 | * | |
25 | */ | |
26 | /* | |
27 | * File: etap.c | |
28 | */ | |
29 | ||
30 | #include <cpus.h> | |
31 | #include <kern/lock.h> | |
32 | #include <kern/etap_macros.h> | |
33 | #include <kern/misc_protos.h> | |
34 | #include <kern/host.h> | |
35 | #include <types.h> | |
36 | #include <mach/kern_return.h> | |
37 | #include <mach/port.h> | |
38 | #include <vm/vm_map.h> | |
39 | #include <vm/vm_kern.h> /* for kernel_map, ipc_kernel_map */ | |
40 | #if ETAP_MONITOR | |
41 | #include <machine/machine_tables.h> | |
42 | #include <mach/clock.h> | |
43 | #include <mach/clock_reply.h> | |
44 | #include <mach/default_pager_object.h> | |
45 | #include <device/device.h> | |
46 | #include <device/device_reply.h> | |
47 | #include <device/device_request.h> | |
48 | #include <mach_debug/mach_debug.h> | |
49 | /*#include <mach/mach_host.h>*/ | |
50 | #include <mach/mach_norma.h> | |
51 | #include <mach/mach_port.h> | |
52 | #include <mach/memory_object_default.h> | |
53 | #include <mach/memory_object_user.h> | |
54 | #include <mach/notify_server.h> | |
55 | #include <mach/prof.h> | |
56 | #include <machine/unix_map.h> | |
57 | #endif | |
58 | #if MACH_KDB | |
59 | #include <ddb/db_output.h> | |
60 | #include <ddb/db_sym.h> | |
61 | #include <ddb/db_command.h> | |
62 | #if 0 /* WHY?? */ | |
63 | #include <i386/ipl.h> | |
64 | #endif | |
65 | #endif | |
66 | ||
67 | /* | |
68 | * Forwards | |
69 | */ | |
70 | ||
71 | kern_return_t | |
72 | etap_get_info(host_priv_t, int*, int*, vm_offset_t*, vm_offset_t*, | |
73 | int*, int*, int*, int*); | |
74 | ||
75 | kern_return_t | |
76 | etap_mon_reconfig(host_priv_t, int); | |
77 | ||
78 | kern_return_t | |
79 | etap_new_probe(host_priv_t, vm_address_t, vm_size_t, boolean_t, vm_address_t); | |
80 | ||
81 | kern_return_t | |
82 | etap_trace_thread(thread_act_t, boolean_t); | |
83 | ||
84 | void | |
85 | etap_trace_reset(int); | |
86 | ||
87 | void | |
88 | etap_interrupt_probe(int, int); | |
89 | ||
90 | void | |
91 | etap_machcall_probe1(int); | |
92 | ||
93 | void | |
94 | etap_machcall_probe2(void); | |
95 | ||
96 | void | |
97 | etap_print(void); | |
98 | ||
99 | ||
100 | #if ETAP | |
101 | ||
102 | #ifndef max | |
103 | #define max(x,y) ((x > y) ? x : y) | |
104 | #endif /* max */ | |
105 | ||
106 | event_table_t | |
107 | etap_event_table_find(etap_event_t); | |
108 | ||
109 | /* ======================= | |
110 | * ETAP Lock definitions | |
111 | * ======================= | |
112 | */ | |
113 | ||
114 | #if ETAP_LOCK_TRACE | |
115 | #define etap_lock simple_lock_no_trace | |
116 | #define etap_unlock simple_unlock_no_trace | |
117 | #else /* ETAP_LOCK_TRACE */ | |
118 | #define etap_lock simple_lock | |
119 | #define etap_unlock simple_unlock | |
120 | #endif /* ETAP_LOCK_TRACE */ | |
121 | ||
122 | #define event_table_lock() etap_lock(&event_table_lock) | |
123 | #define event_table_unlock() etap_unlock(&event_table_lock) | |
124 | ||
125 | #define cumulative_buffer_lock(s) \ | |
126 | MACRO_BEGIN \ | |
127 | s = splhigh(); \ | |
128 | etap_lock(&cbuff_lock); \ | |
129 | MACRO_END | |
130 | ||
131 | #define cumulative_buffer_unlock(s) \ | |
132 | MACRO_BEGIN \ | |
133 | etap_unlock(&cbuff_lock); \ | |
134 | splx(s); \ | |
135 | MACRO_END | |
136 | ||
137 | ||
138 | #if ETAP_LOCK_ACCUMULATE | |
139 | ||
140 | /* ======================================== | |
141 | * ETAP Cumulative lock trace definitions | |
142 | * ======================================== | |
143 | */ | |
144 | ||
145 | int cbuff_width = ETAP_CBUFF_WIDTH; | |
146 | ||
147 | /* | |
148 | * Cumulative buffer declaration | |
149 | * | |
150 | * For both protection and mapping purposes, the cumulative | |
151 | * buffer must be aligned on a page boundary. Since the cumulative | |
152 | * buffer must be statically defined, page boundary alignment is not | |
153 | * garenteed. Instead, the buffer is allocated with 2 extra pages. | |
154 | * The cumulative buffer pointer will round up to the nearest page. | |
155 | * | |
156 | * This will garentee page boundary alignment. | |
157 | */ | |
158 | ||
159 | #define TWO_PAGES 16384 /* XXX does this apply ??*/ | |
160 | #define CBUFF_ALLOCATED_SIZE sizeof(struct cumulative_buffer)+TWO_PAGES | |
161 | ||
162 | decl_simple_lock_data (,cbuff_lock) | |
163 | #if MACH_LDEBUG | |
164 | simple_lock_t cbuff_locks; | |
165 | #else | |
166 | simple_lock_data_t cbuff_locks; | |
167 | #endif | |
168 | char cbuff_allocated [CBUFF_ALLOCATED_SIZE]; | |
169 | cumulative_buffer_t cbuff = {0}; | |
170 | ||
171 | #endif /* ETAP_LOCK_ACCUMULATE */ | |
172 | ||
173 | #if ETAP_MONITOR | |
174 | ||
175 | int mbuff_entries = ETAP_MBUFF_ENTRIES; | |
176 | ||
177 | /* | |
178 | * Create an array of pointers to monitor buffers. | |
179 | * The buffers themselves are allocated at run-time. | |
180 | */ | |
181 | ||
182 | struct monitor_buffer *mbuff[NCPUS]; | |
183 | #endif /* ETAP_MONITOR */ | |
184 | ||
185 | /* ========================== | |
186 | * Event table declarations | |
187 | * ========================== | |
188 | */ | |
189 | ||
190 | decl_simple_lock_data(,event_table_lock) | |
191 | ||
192 | const struct event_table_entry event_table_init[] = | |
193 | { | |
194 | ||
195 | /*-----------------------------------------------------------------------* | |
196 | * ETAP EVENT TRACE STATUS TEXT NAME DYNAMIC * | |
197 | *-----------------------------------------------------------------------*/ | |
198 | ||
199 | #if ETAP_EVENT_MONITOR | |
200 | {ETAP_P_USER_EVENT0 , ETAP_TRACE_OFF , "p_user_event0" , STATIC}, | |
201 | {ETAP_P_USER_EVENT1 , ETAP_TRACE_OFF , "p_user_event1" , STATIC}, | |
202 | {ETAP_P_USER_EVENT2 , ETAP_TRACE_OFF , "p_user_event2" , STATIC}, | |
203 | {ETAP_P_USER_EVENT3 , ETAP_TRACE_OFF , "p_user_event3" , STATIC}, | |
204 | {ETAP_P_USER_EVENT4 , ETAP_TRACE_OFF , "p_user_event4" , STATIC}, | |
205 | {ETAP_P_USER_EVENT5 , ETAP_TRACE_OFF , "p_user_event5" , STATIC}, | |
206 | {ETAP_P_USER_EVENT6 , ETAP_TRACE_OFF , "p_user_event6" , STATIC}, | |
207 | {ETAP_P_USER_EVENT7 , ETAP_TRACE_OFF , "p_user_event7" , STATIC}, | |
208 | {ETAP_P_USER_EVENT8 , ETAP_TRACE_OFF , "p_user_event8" , STATIC}, | |
209 | {ETAP_P_USER_EVENT9 , ETAP_TRACE_OFF , "p_user_event9" , STATIC}, | |
210 | {ETAP_P_USER_EVENT10 , ETAP_TRACE_OFF , "p_user_event10" , STATIC}, | |
211 | {ETAP_P_USER_EVENT11 , ETAP_TRACE_OFF , "p_user_event11" , STATIC}, | |
212 | {ETAP_P_USER_EVENT12 , ETAP_TRACE_OFF , "p_user_event12" , STATIC}, | |
213 | {ETAP_P_USER_EVENT13 , ETAP_TRACE_OFF , "p_user_event13" , STATIC}, | |
214 | {ETAP_P_USER_EVENT14 , ETAP_TRACE_OFF , "p_user_event14" , STATIC}, | |
215 | {ETAP_P_USER_EVENT15 , ETAP_TRACE_OFF , "p_user_event15" , STATIC}, | |
216 | {ETAP_P_USER_EVENT16 , ETAP_TRACE_OFF , "p_user_event16" , STATIC}, | |
217 | {ETAP_P_USER_EVENT17 , ETAP_TRACE_OFF , "p_user_event17" , STATIC}, | |
218 | {ETAP_P_USER_EVENT18 , ETAP_TRACE_OFF , "p_user_event18" , STATIC}, | |
219 | {ETAP_P_USER_EVENT19 , ETAP_TRACE_OFF , "p_user_event19" , STATIC}, | |
220 | {ETAP_P_USER_EVENT20 , ETAP_TRACE_OFF , "p_user_event20" , STATIC}, | |
221 | {ETAP_P_USER_EVENT21 , ETAP_TRACE_OFF , "p_user_event21" , STATIC}, | |
222 | {ETAP_P_USER_EVENT22 , ETAP_TRACE_OFF , "p_user_event22" , STATIC}, | |
223 | {ETAP_P_USER_EVENT23 , ETAP_TRACE_OFF , "p_user_event23" , STATIC}, | |
224 | {ETAP_P_USER_EVENT24 , ETAP_TRACE_OFF , "p_user_event24" , STATIC}, | |
225 | {ETAP_P_USER_EVENT25 , ETAP_TRACE_OFF , "p_user_event25" , STATIC}, | |
226 | {ETAP_P_USER_EVENT26 , ETAP_TRACE_OFF , "p_user_event26" , STATIC}, | |
227 | {ETAP_P_USER_EVENT27 , ETAP_TRACE_OFF , "p_user_event27" , STATIC}, | |
228 | {ETAP_P_USER_EVENT28 , ETAP_TRACE_OFF , "p_user_event28" , STATIC}, | |
229 | {ETAP_P_USER_EVENT29 , ETAP_TRACE_OFF , "p_user_event29" , STATIC}, | |
230 | {ETAP_P_USER_EVENT30 , ETAP_TRACE_OFF , "p_user_event30" , STATIC}, | |
231 | {ETAP_P_USER_EVENT31 , ETAP_TRACE_OFF , "p_user_event31" , STATIC}, | |
232 | {ETAP_P_SYSCALL_MACH , ETAP_TRACE_OFF , "p_syscall_mach" , STATIC}, | |
233 | {ETAP_P_SYSCALL_UNIX , ETAP_TRACE_OFF , "p_syscall_unix" , STATIC}, | |
234 | {ETAP_P_THREAD_LIFE , ETAP_TRACE_OFF , "p_thread_life" , STATIC}, | |
235 | {ETAP_P_THREAD_CTX , ETAP_TRACE_OFF , "p_thread_ctx" , STATIC}, | |
236 | {ETAP_P_RPC , ETAP_TRACE_OFF , "p_rpc" , STATIC}, | |
237 | {ETAP_P_INTERRUPT , ETAP_TRACE_OFF , "p_interrupt" , STATIC}, | |
238 | {ETAP_P_ACT_ABORT , ETAP_TRACE_OFF , "p_act_abort" , STATIC}, | |
239 | {ETAP_P_PRIORITY , ETAP_TRACE_OFF , "p_priority" , STATIC}, | |
240 | {ETAP_P_EXCEPTION , ETAP_TRACE_OFF , "p_exception" , STATIC}, | |
241 | {ETAP_P_DEPRESSION , ETAP_TRACE_OFF , "p_depression" , STATIC}, | |
242 | {ETAP_P_MISC , ETAP_TRACE_OFF , "p_misc" , STATIC}, | |
243 | {ETAP_P_DETAP , ETAP_TRACE_OFF , "p_detap" , STATIC}, | |
244 | #endif /* ETAP_EVENT_MONITOR */ | |
245 | ||
246 | #if ETAP_LOCK_TRACE | |
247 | {ETAP_VM_BUCKET , ETAP_TRACE_OFF , "vm_bucket" , STATIC},/**/ | |
248 | {ETAP_VM_HIMEM , ETAP_TRACE_OFF , "vm_himem" , STATIC}, | |
249 | {ETAP_VM_MAP , ETAP_TRACE_OFF , "vm_map" , 1}, | |
250 | {ETAP_VM_MAP_I , ETAP_TRACE_OFF , "vm_map_i" , 2}, | |
251 | {ETAP_VM_MEMMAN , ETAP_TRACE_OFF , "vm_memman" , STATIC},/**/ | |
252 | {ETAP_VM_MSYNC , ETAP_TRACE_OFF , "vm_msync" , 3}, | |
253 | {ETAP_VM_OBJ , ETAP_TRACE_OFF , "vm_obj" , 4}, | |
254 | {ETAP_VM_OBJ_CACHE , ETAP_TRACE_OFF , "vm_obj_cache" , 5}, | |
255 | {ETAP_VM_PAGE_ALLOC , ETAP_TRACE_OFF , "vm_page_alloc" , STATIC},/**/ | |
256 | {ETAP_VM_PAGEOUT , ETAP_TRACE_OFF , "vm_pageout" , STATIC}, | |
257 | {ETAP_VM_PAGEQ , ETAP_TRACE_OFF , "vm_pageq" , STATIC}, | |
258 | {ETAP_VM_PAGEQ_FREE , ETAP_TRACE_OFF , "vm_pageq_free" , STATIC}, | |
259 | {ETAP_VM_PMAP , ETAP_TRACE_OFF , "vm_pmap" , 6}, | |
260 | {ETAP_VM_PMAP_CACHE , ETAP_TRACE_OFF , "vm_pmap_cache" , STATIC}, | |
261 | {ETAP_VM_PMAP_FREE , ETAP_TRACE_OFF , "vm_pmap_free" , STATIC}, | |
262 | {ETAP_VM_PMAP_KERNEL , ETAP_TRACE_OFF , "vm_pmap_kern" , STATIC}, | |
263 | {ETAP_VM_PMAP_SYS , ETAP_TRACE_OFF , "vm_pmap_sys" , 7}, | |
264 | {ETAP_VM_PMAP_SYS_I , ETAP_TRACE_OFF , "vm_pmap_sys_i" , 8}, | |
265 | {ETAP_VM_PMAP_UPDATE , ETAP_TRACE_OFF , "vm_pmap_update" , STATIC}, | |
266 | {ETAP_VM_PREPPIN , ETAP_TRACE_OFF , "vm_preppin" , STATIC}, | |
267 | {ETAP_VM_RESULT , ETAP_TRACE_OFF , "vm_result" , 9}, | |
268 | {ETAP_VM_TEST , ETAP_TRACE_OFF , "vm_tes" , STATIC},/**/ | |
269 | {ETAP_VM_PMAP_PHYSENTRIES, ETAP_TRACE_OFF , "vm_pmap_physentries", STATIC}, | |
270 | {ETAP_VM_PMAP_SID , ETAP_TRACE_OFF , "vm_pmap_sid" , STATIC}, | |
271 | {ETAP_VM_PMAP_PTE , ETAP_TRACE_OFF , "vm_pmap_pte" , STATIC}, | |
272 | {ETAP_VM_PMAP_PTE_OVFLW , ETAP_TRACE_OFF , "vm_pmap_pte_ovflw", STATIC}, | |
273 | {ETAP_VM_PMAP_TLB , ETAP_TRACE_OFF , "vm_pmap_tlb" , STATIC}, | |
274 | ||
275 | {ETAP_IPC_IHGB , ETAP_TRACE_OFF , "ipc_ihgb" , 10},/**/ | |
276 | {ETAP_IPC_IS , ETAP_TRACE_OFF , "ipc_is" , 11},/**/ | |
277 | {ETAP_IPC_IS_REF , ETAP_TRACE_OFF , "ipc_is_ref" , 12},/**/ | |
278 | {ETAP_IPC_MQUEUE , ETAP_TRACE_OFF , "ipc_mqueue" , STATIC},/**/ | |
279 | {ETAP_IPC_OBJECT , ETAP_TRACE_OFF , "ipc_object" , STATIC},/**/ | |
280 | {ETAP_IPC_PORT_MULT , ETAP_TRACE_OFF , "ipc_port_mult" , 13},/**/ | |
281 | {ETAP_IPC_PORT_TIME , ETAP_TRACE_OFF , "ipc_port_time" , 14},/**/ | |
282 | {ETAP_IPC_RPC , ETAP_TRACE_OFF , "ipc_rpc" , 15},/**/ | |
283 | {ETAP_IPC_PORT_ALLOCQ , ETAP_TRACE_OFF , "ipc_port_allocq" , STATIC},/**/ | |
284 | ||
285 | {ETAP_IO_AHA , ETAP_TRACE_OFF , "io_aha" , STATIC}, | |
286 | {ETAP_IO_CHIP , ETAP_TRACE_OFF , "io_chip" , STATIC}, | |
287 | {ETAP_IO_DEV , ETAP_TRACE_OFF , "io_dev" , 16},/**/ | |
288 | {ETAP_IO_DEV_NUM , ETAP_TRACE_OFF , "io_dev_num" , STATIC}, | |
289 | {ETAP_IO_DEV_PAGEH , ETAP_TRACE_OFF , "io_dev_pageh" , STATIC},/**/ | |
290 | {ETAP_IO_DEV_PAGER , ETAP_TRACE_OFF , "io_dev_pager" , STATIC},/**/ | |
291 | {ETAP_IO_DEV_PORT , ETAP_TRACE_OFF , "io_dev_port" , STATIC},/**/ | |
292 | {ETAP_IO_DEV_REF , ETAP_TRACE_OFF , "io_dev_new" , 17},/**/ | |
293 | {ETAP_IO_DEVINS , ETAP_TRACE_OFF , "io_devins" , STATIC}, | |
294 | {ETAP_IO_DONE_LIST , ETAP_TRACE_OFF , "io_done_list" , STATIC}, | |
295 | {ETAP_IO_DONE_Q , ETAP_TRACE_OFF , "io_doneq" , 18}, | |
296 | {ETAP_IO_DONE_REF , ETAP_TRACE_OFF , "io_done_ref" , 19}, | |
297 | {ETAP_IO_EAHA , ETAP_TRACE_OFF , "io_eaha" , STATIC}, | |
298 | {ETAP_IO_HD_PROBE , ETAP_TRACE_OFF , "io_hd_probe" , STATIC}, | |
299 | {ETAP_IO_IOPB , ETAP_TRACE_OFF , "io_iopb" , STATIC}, | |
300 | {ETAP_IO_KDQ , ETAP_TRACE_OFF , "io_kdq" , STATIC}, | |
301 | {ETAP_IO_KDTTY , ETAP_TRACE_OFF , "io_kdtty" , STATIC}, | |
302 | {ETAP_IO_REQ , ETAP_TRACE_OFF , "io_req" , 20}, | |
303 | {ETAP_IO_TARGET , ETAP_TRACE_OFF , "io_target" , STATIC}, | |
304 | {ETAP_IO_TTY , ETAP_TRACE_OFF , "io_tty" , STATIC}, | |
305 | {ETAP_IO_IOP_LOCK , ETAP_TRACE_OFF , "io_iop" , STATIC},/**/ | |
306 | {ETAP_IO_DEV_NAME , ETAP_TRACE_OFF , "io_dev_name" , STATIC},/**/ | |
307 | {ETAP_IO_CDLI , ETAP_TRACE_OFF , "io_cdli" , STATIC},/**/ | |
308 | {ETAP_IO_HIPPI_FILTER , ETAP_TRACE_OFF , "io_hippi_filter" , STATIC},/**/ | |
309 | {ETAP_IO_HIPPI_SRC , ETAP_TRACE_OFF , "io_hippi_src" , STATIC},/**/ | |
310 | {ETAP_IO_HIPPI_DST , ETAP_TRACE_OFF , "io_hippi_dst" , STATIC},/**/ | |
311 | {ETAP_IO_HIPPI_PKT , ETAP_TRACE_OFF , "io_hippi_pkt" , STATIC},/**/ | |
312 | {ETAP_IO_NOTIFY , ETAP_TRACE_OFF , "io_notify" , STATIC},/**/ | |
313 | {ETAP_IO_DATADEV , ETAP_TRACE_OFF , "io_data_device" , STATIC},/**/ | |
314 | {ETAP_IO_OPEN , ETAP_TRACE_OFF , "io_open" , STATIC}, | |
315 | {ETAP_IO_OPEN_I , ETAP_TRACE_OFF , "io_open_i" , STATIC}, | |
316 | ||
317 | {ETAP_THREAD_ACT , ETAP_TRACE_OFF , "th_act" , 21}, | |
318 | {ETAP_THREAD_ACTION , ETAP_TRACE_OFF , "th_action" , STATIC}, | |
319 | {ETAP_THREAD_LOCK , ETAP_TRACE_OFF , "th_lock" , 22}, | |
320 | {ETAP_THREAD_LOCK_SET , ETAP_TRACE_OFF , "th_lock_set" , 23}, | |
321 | {ETAP_THREAD_NEW , ETAP_TRACE_OFF , "th_new" , 24}, | |
322 | {ETAP_THREAD_PSET , ETAP_TRACE_OFF , "th_pset" , STATIC},/**/ | |
323 | {ETAP_THREAD_PSET_ALL , ETAP_TRACE_OFF , "th_pset_all" , STATIC}, | |
324 | {ETAP_THREAD_PSET_RUNQ , ETAP_TRACE_OFF , "th_pset_runq" , STATIC}, | |
325 | {ETAP_THREAD_PSET_IDLE , ETAP_TRACE_OFF , "th_pset_idle" , STATIC}, | |
326 | {ETAP_THREAD_PSET_QUANT , ETAP_TRACE_OFF , "th_pset_quant" , STATIC}, | |
327 | {ETAP_THREAD_PROC , ETAP_TRACE_OFF , "th_proc" , STATIC}, | |
328 | {ETAP_THREAD_PROC_RUNQ , ETAP_TRACE_OFF , "th_proc_runq" , STATIC}, | |
329 | {ETAP_THREAD_REAPER , ETAP_TRACE_OFF , "th_reaper" , STATIC}, | |
330 | {ETAP_THREAD_RPC , ETAP_TRACE_OFF , "th_rpc" , 25}, | |
331 | {ETAP_THREAD_SEMA , ETAP_TRACE_OFF , "th_sema" , 26}, | |
332 | {ETAP_THREAD_STACK , ETAP_TRACE_OFF , "th_stack" , STATIC}, | |
333 | {ETAP_THREAD_STACK_USAGE , ETAP_TRACE_OFF , "th_stack_usage" , STATIC}, | |
334 | {ETAP_THREAD_TASK_NEW , ETAP_TRACE_OFF , "th_task_new" , 27}, | |
335 | {ETAP_THREAD_TASK_ITK , ETAP_TRACE_OFF , "th_task_itk" , 28}, | |
336 | {ETAP_THREAD_ULOCK , ETAP_TRACE_OFF , "th_ulock" , 29}, | |
337 | {ETAP_THREAD_WAIT , ETAP_TRACE_OFF , "th_wait" , STATIC}, | |
338 | {ETAP_THREAD_WAKE , ETAP_TRACE_OFF , "th_wake" , 30}, | |
339 | {ETAP_THREAD_ACT_LIST , ETAP_TRACE_OFF , "th_act_list" , 31}, | |
340 | {ETAP_THREAD_TASK_SWAP , ETAP_TRACE_OFF , "th_task_swap" , 32}, | |
341 | {ETAP_THREAD_TASK_SWAPOUT, ETAP_TRACE_OFF , "th_task_swapout" , 33}, | |
342 | {ETAP_THREAD_SWAPPER , ETAP_TRACE_OFF , "th_swapper" , STATIC}, | |
343 | ||
344 | {ETAP_NET_IFQ , ETAP_TRACE_OFF , "net_ifq" , STATIC}, | |
345 | {ETAP_NET_KMSG , ETAP_TRACE_OFF , "net_kmsg" , STATIC}, | |
346 | {ETAP_NET_MBUF , ETAP_TRACE_OFF , "net_mbuf" , STATIC},/**/ | |
347 | {ETAP_NET_POOL , ETAP_TRACE_OFF , "net_pool" , STATIC}, | |
348 | {ETAP_NET_Q , ETAP_TRACE_OFF , "net_q" , STATIC}, | |
349 | {ETAP_NET_QFREE , ETAP_TRACE_OFF , "net_qfree" , STATIC}, | |
350 | {ETAP_NET_RCV , ETAP_TRACE_OFF , "net_rcv" , STATIC}, | |
351 | {ETAP_NET_RCV_PLIST , ETAP_TRACE_OFF , "net_rcv_plist" , STATIC},/**/ | |
352 | {ETAP_NET_THREAD , ETAP_TRACE_OFF , "net_thread" , STATIC}, | |
353 | ||
354 | {ETAP_NORMA_XMM , ETAP_TRACE_OFF , "norma_xmm" , STATIC}, | |
355 | {ETAP_NORMA_XMMOBJ , ETAP_TRACE_OFF , "norma_xmmobj" , STATIC}, | |
356 | {ETAP_NORMA_XMMCACHE , ETAP_TRACE_OFF , "norma_xmmcache" , STATIC}, | |
357 | {ETAP_NORMA_MP , ETAP_TRACE_OFF , "norma_mp" , STATIC}, | |
358 | {ETAP_NORMA_VOR , ETAP_TRACE_OFF , "norma_vor" , STATIC},/**/ | |
359 | {ETAP_NORMA_TASK , ETAP_TRACE_OFF , "norma_task" , 38},/**/ | |
360 | ||
361 | {ETAP_DIPC_CLEANUP , ETAP_TRACE_OFF , "dipc_cleanup" , STATIC},/**/ | |
362 | {ETAP_DIPC_MSG_PROG , ETAP_TRACE_OFF , "dipc_msgp_prog" , STATIC},/**/ | |
363 | {ETAP_DIPC_PREP_QUEUE , ETAP_TRACE_OFF , "dipc_prep_queue" , STATIC},/**/ | |
364 | {ETAP_DIPC_PREP_FILL , ETAP_TRACE_OFF , "dipc_prep_fill" , STATIC},/**/ | |
365 | {ETAP_DIPC_MIGRATE , ETAP_TRACE_OFF , "dipc_migrate" , STATIC},/**/ | |
366 | {ETAP_DIPC_DELIVER , ETAP_TRACE_OFF , "dipc_deliver" , STATIC},/**/ | |
367 | {ETAP_DIPC_RECV_SYNC , ETAP_TRACE_OFF , "dipc_recv_sync" , STATIC},/**/ | |
368 | {ETAP_DIPC_RPC , ETAP_TRACE_OFF , "dipc_rpc" , STATIC},/**/ | |
369 | {ETAP_DIPC_MSG_REQ , ETAP_TRACE_OFF , "dipc_msg_req" , STATIC},/**/ | |
370 | {ETAP_DIPC_MSG_ORDER , ETAP_TRACE_OFF , "dipc_msg_order" , STATIC},/**/ | |
371 | {ETAP_DIPC_MSG_PREPQ , ETAP_TRACE_OFF , "dipc_msg_prepq" , STATIC},/**/ | |
372 | {ETAP_DIPC_MSG_FREE , ETAP_TRACE_OFF , "dipc_msg_free" , STATIC},/**/ | |
373 | {ETAP_DIPC_KMSG_AST , ETAP_TRACE_OFF , "dipc_kmsg_ast" , STATIC},/**/ | |
374 | {ETAP_DIPC_TEST_LOCK , ETAP_TRACE_OFF , "dipc_test_lock" , STATIC},/**/ | |
375 | {ETAP_DIPC_SPINLOCK , ETAP_TRACE_OFF , "dipc_spinlock" , STATIC},/**/ | |
376 | {ETAP_DIPC_TRACE , ETAP_TRACE_OFF , "dipc_trace" , STATIC},/**/ | |
377 | {ETAP_DIPC_REQ_CALLBACK , ETAP_TRACE_OFF , "dipc_req_clbck" , STATIC},/**/ | |
378 | {ETAP_DIPC_PORT_NAME , ETAP_TRACE_OFF , "dipc_port_name" , STATIC},/**/ | |
379 | {ETAP_DIPC_RESTART_PORT , ETAP_TRACE_OFF , "dipc_restart_port", STATIC},/**/ | |
380 | {ETAP_DIPC_ZERO_PAGE , ETAP_TRACE_OFF , "dipc_zero_page" , STATIC},/**/ | |
381 | {ETAP_DIPC_BLOCKED_NODE , ETAP_TRACE_OFF , "dipc_blocked_node", STATIC},/**/ | |
382 | {ETAP_DIPC_TIMER , ETAP_TRACE_OFF , "dipc_timer" , STATIC},/**/ | |
383 | {ETAP_DIPC_SPECIAL_PORT , ETAP_TRACE_OFF , "dipc_special_port", STATIC},/**/ | |
384 | ||
385 | {ETAP_KKT_TEST_WORK , ETAP_TRACE_OFF , "kkt_test_work" , STATIC},/**/ | |
386 | {ETAP_KKT_TEST_MP , ETAP_TRACE_OFF , "kkt_work_mp" , STATIC},/**/ | |
387 | {ETAP_KKT_NODE , ETAP_TRACE_OFF , "kkt_node" , STATIC},/**/ | |
388 | {ETAP_KKT_CHANNEL_LIST , ETAP_TRACE_OFF , "kkt_channel_list" , STATIC},/**/ | |
389 | {ETAP_KKT_CHANNEL , ETAP_TRACE_OFF , "kkt_channel" , STATIC},/**/ | |
390 | {ETAP_KKT_HANDLE , ETAP_TRACE_OFF , "kkt_handle" , STATIC},/**/ | |
391 | {ETAP_KKT_MAP , ETAP_TRACE_OFF , "kkt_map" , STATIC},/**/ | |
392 | {ETAP_KKT_RESOURCE , ETAP_TRACE_OFF , "kkt_resource" , STATIC},/**/ | |
393 | ||
394 | {ETAP_XKERNEL_MASTER , ETAP_TRACE_OFF , "xkernel_master" , STATIC},/**/ | |
395 | {ETAP_XKERNEL_EVENT , ETAP_TRACE_OFF , "xkernel_event" , STATIC},/**/ | |
396 | {ETAP_XKERNEL_ETHINPUT , ETAP_TRACE_OFF , "xkernel_input" , STATIC},/**/ | |
397 | ||
398 | {ETAP_MISC_AST , ETAP_TRACE_OFF , "m_ast" , STATIC}, | |
399 | {ETAP_MISC_CLOCK , ETAP_TRACE_OFF , "m_clock" , STATIC}, | |
400 | {ETAP_MISC_EMULATE , ETAP_TRACE_OFF , "m_emulate" , 34}, | |
401 | {ETAP_MISC_EVENT , ETAP_TRACE_OFF , "m_event" , STATIC}, | |
402 | {ETAP_MISC_KDB , ETAP_TRACE_OFF , "m_kdb" , STATIC}, | |
403 | {ETAP_MISC_PCB , ETAP_TRACE_OFF , "m_pcb" , 35}, | |
404 | {ETAP_MISC_PRINTF , ETAP_TRACE_OFF , "m_printf" , STATIC}, | |
405 | {ETAP_MISC_Q , ETAP_TRACE_OFF , "m_q" , STATIC}, | |
406 | {ETAP_MISC_RPC_SUBSYS , ETAP_TRACE_OFF , "m_rpc_sub" , 36}, | |
407 | {ETAP_MISC_RT_CLOCK , ETAP_TRACE_OFF , "m_rt_clock" , STATIC}, | |
408 | {ETAP_MISC_SD_POOL , ETAP_TRACE_OFF , "m_sd_pool" , STATIC}, | |
409 | {ETAP_MISC_TIMER , ETAP_TRACE_OFF , "m_timer" , STATIC}, | |
410 | {ETAP_MISC_UTIME , ETAP_TRACE_OFF , "m_utime" , STATIC}, | |
411 | {ETAP_MISC_XPR , ETAP_TRACE_OFF , "m_xpr" , STATIC}, | |
412 | {ETAP_MISC_ZONE , ETAP_TRACE_OFF , "m_zone" , 37}, | |
413 | {ETAP_MISC_ZONE_ALL , ETAP_TRACE_OFF , "m_zone_all" , STATIC}, | |
414 | {ETAP_MISC_ZONE_GET , ETAP_TRACE_OFF , "m_zone_get" , STATIC}, | |
415 | {ETAP_MISC_ZONE_PTABLE , ETAP_TRACE_OFF , "m_zone_ptable" , STATIC},/**/ | |
416 | {ETAP_MISC_LEDGER , ETAP_TRACE_OFF , "m_ledger" , STATIC},/**/ | |
417 | {ETAP_MISC_SCSIT_TGT , ETAP_TRACE_OFF , "m_scsit_tgt_lock" , STATIC},/**/ | |
418 | {ETAP_MISC_SCSIT_SELF , ETAP_TRACE_OFF , "m_scsit_self_lock", STATIC},/**/ | |
419 | {ETAP_MISC_SPL , ETAP_TRACE_OFF , "m_spl_lock" , STATIC},/**/ | |
420 | {ETAP_MISC_MASTER , ETAP_TRACE_OFF , "m_master" , STATIC},/**/ | |
421 | {ETAP_MISC_FLOAT , ETAP_TRACE_OFF , "m_float" , STATIC},/**/ | |
422 | {ETAP_MISC_GROUP , ETAP_TRACE_OFF , "m_group" , STATIC},/**/ | |
423 | {ETAP_MISC_FLIPC , ETAP_TRACE_OFF , "m_flipc" , STATIC},/**/ | |
424 | {ETAP_MISC_MP_IO , ETAP_TRACE_OFF , "m_mp_io" , STATIC},/**/ | |
425 | {ETAP_MISC_KERNEL_TEST , ETAP_TRACE_OFF , "m_kernel_test" , STATIC},/**/ | |
426 | ||
427 | {ETAP_NO_TRACE , ETAP_TRACE_OFF , "NEVER_TRACE" , STATIC}, | |
428 | #endif /* ETAP_LOCK_TRACE */ | |
429 | }; | |
430 | ||
431 | /* | |
432 | * Variable initially pointing to the event table, then to its mappable | |
433 | * copy. The cast is needed to discard the `const' qualifier; without it | |
434 | * gcc issues a warning. | |
435 | */ | |
436 | event_table_t event_table = (event_table_t) event_table_init; | |
437 | ||
438 | /* | |
439 | * Linked list of pointers into event_table_init[] so they can be switched | |
440 | * into the mappable copy when it is made. | |
441 | */ | |
442 | struct event_table_chain *event_table_chain; | |
443 | ||
444 | /* | |
445 | * max number of event types in the event table | |
446 | */ | |
447 | ||
448 | int event_table_max = sizeof(event_table_init)/sizeof(struct event_table_entry); | |
449 | ||
450 | const struct subs_table_entry subs_table_init[] = | |
451 | { | |
452 | /*------------------------------------------* | |
453 | * ETAP SUBSYSTEM TEXT NAME * | |
454 | *------------------------------------------*/ | |
455 | ||
456 | #if ETAP_EVENT_MONITOR | |
457 | {ETAP_SUBS_PROBE , "event_probes" }, | |
458 | #endif /* ETAP_EVENT_MONITOR */ | |
459 | ||
460 | #if ETAP_LOCK_TRACE | |
461 | {ETAP_SUBS_LOCK_DIPC , "lock_dipc" }, | |
462 | {ETAP_SUBS_LOCK_IO , "lock_io" }, | |
463 | {ETAP_SUBS_LOCK_IPC , "lock_ipc" }, | |
464 | {ETAP_SUBS_LOCK_KKT , "lock_kkt" }, | |
465 | {ETAP_SUBS_LOCK_MISC , "lock_misc" }, | |
466 | {ETAP_SUBS_LOCK_NET , "lock_net" }, | |
467 | {ETAP_SUBS_LOCK_NORMA , "lock_norma" }, | |
468 | {ETAP_SUBS_LOCK_THREAD , "lock_thread" }, | |
469 | {ETAP_SUBS_LOCK_VM , "lock_vm" }, | |
470 | {ETAP_SUBS_LOCK_XKERNEL , "lock_xkernel" }, | |
471 | #endif /* ETAP_LOCK_TRACE */ | |
472 | }; | |
473 | ||
474 | /* | |
475 | * Variable initially pointing to the subsystem table, then to its mappable | |
476 | * copy. | |
477 | */ | |
478 | subs_table_t subs_table = (subs_table_t) subs_table_init; | |
479 | ||
480 | /* | |
481 | * max number of subsystem types in the subsystem table | |
482 | */ | |
483 | ||
484 | int subs_table_max = sizeof(subs_table_init)/sizeof(struct subs_table_entry); | |
485 | ||
486 | #if ETAP_MONITOR | |
487 | #define MAX_NAME_SIZE 35 | |
488 | ||
489 | #define SYS_TABLE_MACH_TRAP 0 | |
490 | #define SYS_TABLE_MACH_MESSAGE 1 | |
491 | #define SYS_TABLE_UNIX_SYSCALL 2 | |
492 | #define SYS_TABLE_INTERRUPT 3 | |
493 | #define SYS_TABLE_EXCEPTION 4 | |
494 | ||
495 | ||
496 | extern char *system_table_lookup (unsigned int table, | |
497 | unsigned int number); | |
498 | ||
499 | ||
500 | char *mach_trap_names[] = { | |
501 | /* 0 */ "undefined", | |
502 | /* 1 */ NULL, | |
503 | /* 2 */ NULL, | |
504 | /* 3 */ NULL, | |
505 | /* 4 */ NULL, | |
506 | /* 5 */ NULL, | |
507 | /* 6 */ NULL, | |
508 | /* 7 */ NULL, | |
509 | /* 8 */ NULL, | |
510 | /* 9 */ NULL, | |
511 | /* 10 */ NULL, | |
512 | /* 11 */ NULL, | |
513 | /* 12 */ NULL, | |
514 | /* 13 */ NULL, | |
515 | /* 14 */ NULL, | |
516 | /* 15 */ NULL, | |
517 | /* 16 */ NULL, | |
518 | /* 17 */ NULL, | |
519 | /* 18 */ NULL, | |
520 | /* 19 */ NULL, | |
521 | /* 20 */ NULL, | |
522 | /* 21 */ NULL, | |
523 | /* 22 */ NULL, | |
524 | /* 23 */ NULL, | |
525 | /* 24 */ NULL, | |
526 | /* 25 */ NULL, | |
527 | /* 26 */ "mach_reply_port", | |
528 | /* 27 */ "mach_thread_self", | |
529 | /* 28 */ "mach_task_self", | |
530 | /* 29 */ "mach_host_self", | |
531 | /* 30 */ "vm_read_overwrite", | |
532 | /* 31 */ "vm_write", | |
533 | /* 32 */ "mach_msg_overwrite_trap", | |
534 | /* 33 */ NULL, | |
535 | /* 34 */ NULL, | |
536 | #ifdef i386 | |
537 | /* 35 */ "mach_rpc_trap", | |
538 | /* 36 */ "mach_rpc_return_trap", | |
539 | #else | |
540 | /* 35 */ NULL, | |
541 | /* 36 */ NULL, | |
542 | #endif /* i386 */ | |
543 | /* 37 */ NULL, | |
544 | /* 38 */ NULL, | |
545 | /* 39 */ NULL, | |
546 | /* 40 */ NULL, | |
547 | /* 41 */ "init_process", | |
548 | /* 42 */ NULL, | |
549 | /* 43 */ "map_fd", | |
550 | /* 44 */ NULL, | |
551 | /* 45 */ NULL, | |
552 | /* 46 */ NULL, | |
553 | /* 47 */ NULL, | |
554 | /* 48 */ NULL, | |
555 | /* 49 */ NULL, | |
556 | /* 50 */ NULL, | |
557 | /* 51 */ NULL, | |
558 | /* 52 */ NULL, | |
559 | /* 53 */ NULL, | |
560 | /* 54 */ NULL, | |
561 | /* 55 */ NULL, | |
562 | /* 56 */ NULL, | |
563 | /* 57 */ NULL, | |
564 | /* 58 */ NULL, | |
565 | /* 59 */ "swtch_pri", | |
566 | /* 60 */ "swtch", | |
567 | /* 61 */ "thread_switch", | |
568 | /* 62 */ "clock_sleep_trap", | |
569 | /* 63 */ NULL, | |
570 | /* 64 */ NULL, | |
571 | /* 65 */ NULL, | |
572 | /* 66 */ NULL, | |
573 | /* 67 */ NULL, | |
574 | /* 68 */ NULL, | |
575 | /* 69 */ NULL, | |
576 | /* 70 */ NULL, | |
577 | /* 71 */ NULL, | |
578 | /* 72 */ NULL, | |
579 | /* 73 */ NULL, | |
580 | /* 74 */ NULL, | |
581 | /* 75 */ NULL, | |
582 | /* 76 */ NULL, | |
583 | /* 77 */ NULL, | |
584 | /* 78 */ NULL, | |
585 | /* 79 */ NULL, | |
586 | /* 80 */ NULL, | |
587 | /* 81 */ NULL, | |
588 | /* 82 */ NULL, | |
589 | /* 83 */ NULL, | |
590 | /* 84 */ NULL, | |
591 | /* 85 */ NULL, | |
592 | /* 86 */ NULL, | |
593 | /* 87 */ NULL, | |
594 | /* 88 */ NULL, | |
595 | /* 89 */ NULL, | |
596 | /* 90 */ NULL, | |
597 | /* 91 */ NULL, | |
598 | /* 92 */ NULL, | |
599 | /* 93 */ NULL, | |
600 | /* 94 */ NULL, | |
601 | /* 95 */ NULL, | |
602 | /* 96 */ NULL, | |
603 | /* 97 */ NULL, | |
604 | /* 98 */ NULL, | |
605 | /* 99 */ NULL, | |
606 | /* 100 */ NULL, | |
607 | /* 101 */ NULL, | |
608 | /* 102 */ NULL, | |
609 | /* 103 */ NULL, | |
610 | /* 104 */ NULL, | |
611 | /* 105 */ NULL, | |
612 | /* 106 */ NULL, | |
613 | /* 107 */ NULL, | |
614 | /* 108 */ NULL, | |
615 | /* 109 */ NULL, | |
616 | }; | |
617 | #define N_MACH_TRAP_NAMES (sizeof mach_trap_names / sizeof mach_trap_names[0]) | |
618 | #define mach_trap_name(nu) \ | |
619 | (((nu) < N_MACH_TRAP_NAMES) ? mach_trap_names[nu] : NULL) | |
620 | ||
621 | struct table_entry { | |
622 | char name[MAX_NAME_SIZE]; | |
623 | u_int number; | |
624 | }; | |
625 | ||
626 | /* | |
627 | * Mach message table | |
628 | * | |
629 | * Note: Most mach system calls are actually implemented as messages. | |
630 | */ | |
631 | struct table_entry mach_message_table[] = { | |
632 | subsystem_to_name_map_bootstrap, | |
633 | subsystem_to_name_map_clock, | |
634 | subsystem_to_name_map_clock_reply, | |
635 | subsystem_to_name_map_default_pager_object, | |
636 | subsystem_to_name_map_device, | |
637 | subsystem_to_name_map_device_reply, | |
638 | subsystem_to_name_map_device_request, | |
639 | subsystem_to_name_map_exc, | |
640 | /* subsystem_to_name_map_mach,*/ | |
641 | subsystem_to_name_map_mach_debug, | |
642 | /* subsystem_to_name_map_mach_host,*/ | |
643 | subsystem_to_name_map_mach_norma, | |
644 | subsystem_to_name_map_mach_port, | |
645 | subsystem_to_name_map_memory_object, | |
646 | subsystem_to_name_map_memory_object_default, | |
647 | subsystem_to_name_map_notify, | |
648 | subsystem_to_name_map_prof, | |
649 | subsystem_to_name_map_sync | |
650 | }; | |
651 | ||
652 | int mach_message_table_entries = sizeof(mach_message_table) / | |
653 | sizeof(struct table_entry); | |
654 | ||
655 | ||
656 | #endif | |
657 | ||
658 | /* | |
659 | * ================================ | |
660 | * Initialization routines for ETAP | |
661 | * ================================ | |
662 | */ | |
663 | ||
664 | /* | |
665 | * ROUTINE: etap_init_phase1 [internal] | |
666 | * | |
667 | * FUNCTION: Event trace instrumentation initialization phase | |
668 | * one of two. The static phase. The cumulative buffer | |
669 | * is initialized. | |
670 | * | |
671 | * NOTES: The cumulative buffer is statically allocated and | |
672 | * must be initialized before the first simple_lock_init() | |
673 | * or lock_init() call is made. | |
674 | * | |
675 | * The first lock init call is made before dynamic allocation | |
676 | * is available. Hence, phase one is executed before dynamic | |
677 | * memory allocation is available. | |
678 | * | |
679 | */ | |
680 | ||
681 | void | |
682 | etap_init_phase1(void) | |
683 | { | |
684 | #if ETAP_LOCK_ACCUMULATE || MACH_ASSERT | |
685 | int x; | |
686 | #if MACH_ASSERT | |
687 | boolean_t out_of_order; | |
688 | #endif /* MACH_ASSERT */ | |
689 | #endif /* ETAP_LOCK_ACCUMULATE || MACH_ASSERT */ | |
690 | ||
691 | #if ETAP_LOCK_ACCUMULATE | |
692 | /* | |
693 | * Initialize Cumulative Buffer | |
694 | * | |
695 | * Note: The cumulative buffer is statically allocated. | |
696 | * This static allocation is necessary since most | |
697 | * of the lock_init calls are made before dynamic | |
698 | * allocation routines are available. | |
699 | */ | |
700 | ||
701 | /* | |
702 | * Align cumulative buffer pointer to a page boundary | |
703 | * (so it can be maped). | |
704 | */ | |
705 | ||
706 | bzero(&cbuff_allocated[0], CBUFF_ALLOCATED_SIZE); | |
707 | cbuff = (cumulative_buffer_t) round_page(&cbuff_allocated); | |
708 | ||
709 | simple_lock_init(&cbuff_lock, ETAP_NO_TRACE); | |
710 | ||
711 | /* | |
712 | * Set the starting point for cumulative buffer entry | |
713 | * reservations. | |
714 | * | |
715 | * This value must leave enough head room in the | |
716 | * cumulative buffer to contain all dynamic events. | |
717 | */ | |
718 | ||
719 | for (x=0; x < event_table_max; x++) | |
720 | if (event_table[x].dynamic > cbuff->static_start) | |
721 | cbuff->static_start = event_table[x].dynamic; | |
722 | ||
723 | cbuff->next = cbuff->static_start; | |
724 | #endif /* ETAP_LOCK_ACCUMULATE */ | |
725 | ||
726 | /* | |
727 | * Initialize the event table lock | |
728 | */ | |
729 | ||
730 | simple_lock_init(&event_table_lock, ETAP_NO_TRACE); | |
731 | ||
732 | #if MACH_ASSERT | |
733 | /* | |
734 | * Check that events are in numerical order so we can do a binary | |
735 | * search on them. Even better would be to make event numbers be | |
736 | * simple contiguous indexes into event_table[], but that would | |
737 | * break the coding of subsystems in the event number. | |
738 | */ | |
739 | out_of_order = FALSE; | |
740 | for (x = 1; x < event_table_max; x++) { | |
741 | if (event_table[x - 1].event > event_table[x].event) { | |
742 | printf("events out of order: %s > %s\n", | |
743 | event_table[x - 1].name, event_table[x].name); | |
744 | out_of_order = TRUE; | |
745 | } | |
746 | } | |
747 | if (out_of_order) | |
748 | panic("etap_init_phase1"); | |
749 | #endif /* MACH_ASSERT */ | |
750 | } | |
751 | ||
752 | ||
753 | /* | |
754 | * ROUTINE: etap_init_phase2 [internal] | |
755 | * | |
756 | * FUNCTION: Event trace instrumentation initialization phase | |
757 | * two of two. The dynamic phase. The monitored buffers | |
758 | * are dynamically allocated and initialized. Cumulative | |
759 | * dynamic entry locks are allocated and initialized. The | |
760 | * start_data_pool is initialized. | |
761 | * | |
762 | * NOTES: Phase two is executed once dynamic memory allocation | |
763 | * is available. | |
764 | * | |
765 | */ | |
766 | ||
767 | void | |
768 | etap_init_phase2(void) | |
769 | { | |
770 | int size; | |
771 | int x; | |
772 | int ret; | |
773 | vm_offset_t table_copy; | |
774 | struct event_table_chain *chainp; | |
775 | ||
776 | /* | |
777 | * Make mappable copies of the event_table and the subs_table. | |
778 | * These tables were originally mapped as they appear in the | |
779 | * kernel image, but that meant that other kernel variables could | |
780 | * end up being mapped with them, which is ugly. It also didn't | |
781 | * work on the HP/PA, where pages with physical address == virtual | |
782 | * do not have real pmap entries allocated and therefore can't be | |
783 | * mapped elsewhere. | |
784 | */ | |
785 | size = sizeof event_table_init + sizeof subs_table_init; | |
786 | ret = kmem_alloc(kernel_map, &table_copy, size); | |
787 | if (ret != KERN_SUCCESS) | |
788 | panic("ETAP: error allocating table copies"); | |
789 | event_table = (event_table_t) table_copy; | |
790 | subs_table = (subs_table_t) (table_copy + sizeof event_table_init); | |
791 | bcopy((char *) event_table_init, (char *) event_table, | |
792 | sizeof event_table_init); | |
793 | bcopy((char *) subs_table_init, (char *) subs_table, | |
794 | sizeof subs_table_init); | |
795 | ||
796 | /* Switch pointers from the old event_table to the new. */ | |
797 | for (chainp = event_table_chain; chainp != NULL; | |
798 | chainp = chainp->event_table_link) { | |
799 | x = chainp->event_tablep - event_table_init; | |
800 | assert(x < event_table_max); | |
801 | chainp->event_tablep = event_table + x; | |
802 | } | |
803 | ||
804 | #if ETAP_LOCK_ACCUMULATE | |
805 | ||
806 | /* | |
807 | * Because several dynamic locks can point to a single | |
808 | * cumulative buffer entry, dynamic lock writes to the | |
809 | * entry are synchronized. | |
810 | * | |
811 | * The spin locks are allocated here. | |
812 | * | |
813 | */ | |
814 | #if MACH_LDEBUG | |
815 | size = sizeof(simple_lock_t) * cbuff->static_start; | |
816 | #else | |
817 | /* | |
818 | * Note: These locks are different from traditional spin locks. | |
819 | * They are of type int instead of type simple_lock_t. | |
820 | * We can reduce lock size this way, since no tracing will | |
821 | * EVER be performed on these locks. | |
822 | */ | |
823 | size = sizeof(simple_lock_data_t) * cbuff->static_start; | |
824 | #endif | |
825 | ||
826 | ret = kmem_alloc(kernel_map, (vm_offset_t *) &cbuff_locks, size); | |
827 | ||
828 | if (ret != KERN_SUCCESS) | |
829 | panic("ETAP: error allocating cumulative write locks"); | |
830 | ||
831 | #if MACH_LDEBUG | |
832 | for(x = 0; x < cbuff->static_start; ++x) { | |
833 | simple_lock_init(&cbuff_locks[x], ETAP_NO_TRACE); | |
834 | } | |
835 | #else | |
836 | bzero((const char *) cbuff_locks, size); | |
837 | #endif | |
838 | ||
839 | #endif /* ETAP_LOCK_ACCUMULATE */ | |
840 | ||
841 | ||
842 | #if ETAP_MONITOR | |
843 | ||
844 | /* | |
845 | * monitor buffer allocation | |
846 | */ | |
847 | ||
848 | size = ((mbuff_entries-1) * sizeof(struct mbuff_entry)) + | |
849 | sizeof(struct monitor_buffer); | |
850 | ||
851 | for (x=0; x < NCPUS; x++) { | |
852 | ret = kmem_alloc(kernel_map, | |
853 | (vm_offset_t *) &mbuff[x], | |
854 | size); | |
855 | ||
856 | if (ret != KERN_SUCCESS) | |
857 | panic ("ETAP: error allocating monitor buffer\n"); | |
858 | ||
859 | /* zero fill buffer */ | |
860 | bzero((char *) mbuff[x], size); | |
861 | } | |
862 | ||
863 | #endif /* ETAP_MONITOR */ | |
864 | ||
865 | ||
866 | #if ETAP_LOCK_TRACE | |
867 | ||
868 | /* | |
869 | * Initialize the start_data_pool | |
870 | */ | |
871 | ||
872 | init_start_data_pool(); | |
873 | ||
874 | #endif /* ETAP_LOCK_TRACE */ | |
875 | } | |
876 | ||
877 | ||
878 | #if ETAP_LOCK_ACCUMULATE | |
879 | ||
880 | /* | |
881 | * ROUTINE: etap_cbuff_reserve [internal] | |
882 | * | |
883 | * FUNCTION: The cumulative buffer operation which returns a pointer | |
884 | * to a free entry in the cumulative buffer. | |
885 | * | |
886 | * NOTES: Disables interrupts. | |
887 | * | |
888 | */ | |
889 | ||
890 | cbuff_entry_t | |
891 | etap_cbuff_reserve(event_table_t etp) | |
892 | { | |
893 | cbuff_entry_t avail; | |
894 | unsigned short de; | |
895 | spl_t s; | |
896 | ||
897 | /* see if type pointer is initialized */ | |
898 | if (etp == EVENT_TABLE_NULL || etp->event == ETAP_NO_TRACE) | |
899 | return (CBUFF_ENTRY_NULL); | |
900 | ||
901 | /* check for DYNAMIC lock */ | |
902 | if (de = etp->dynamic) { | |
903 | if (de <= cbuff->static_start) | |
904 | return (&cbuff->entry[de-1]); | |
905 | else { | |
906 | printf("ETAP: dynamic lock index error [%lu]\n", de); | |
907 | return (CBUFF_ENTRY_NULL); | |
908 | } | |
909 | } | |
910 | ||
911 | cumulative_buffer_lock(s); | |
912 | ||
913 | /* if buffer is full, reservation requests fail */ | |
914 | if (cbuff->next >= ETAP_CBUFF_ENTRIES) { | |
915 | cumulative_buffer_unlock(s); | |
916 | return (CBUFF_ENTRY_NULL); | |
917 | } | |
918 | ||
919 | avail = &cbuff->entry[cbuff->next++]; | |
920 | ||
921 | cumulative_buffer_unlock(s); | |
922 | ||
923 | return (avail); | |
924 | } | |
925 | ||
926 | #endif /* ETAP_LOCK_ACCUMULATE */ | |
927 | ||
928 | /* | |
929 | * ROUTINE: etap_event_table_assign [internal] | |
930 | * | |
931 | * FUNCTION: Returns a pointer to the assigned event type table entry, | |
932 | * using the event type as the index key. | |
933 | * | |
934 | */ | |
935 | ||
936 | event_table_t | |
937 | etap_event_table_find(etap_event_t event) | |
938 | { | |
939 | int last_before, first_after, try; | |
940 | ||
941 | /* Binary search for the event number. last_before is the highest- | |
942 | numbered element known to be <= the number we're looking for; | |
943 | first_after is the lowest-numbered element known to be >. */ | |
944 | last_before = 0; | |
945 | first_after = event_table_max; | |
946 | while (last_before < first_after) { | |
947 | try = (last_before + first_after) >> 1; | |
948 | if (event_table[try].event == event) | |
949 | return (&event_table[try]); | |
950 | else if (event_table[try].event < event) | |
951 | last_before = try; | |
952 | else | |
953 | first_after = try; | |
954 | } | |
955 | return EVENT_TABLE_NULL; | |
956 | } | |
957 | ||
958 | void | |
959 | etap_event_table_assign(struct event_table_chain *chainp, etap_event_t event) | |
960 | { | |
961 | event_table_t event_tablep; | |
962 | ||
963 | event_tablep = etap_event_table_find(event); | |
964 | if (event_tablep == EVENT_TABLE_NULL) | |
965 | printf("\nETAP: event not found in event table: %x\n", event); | |
966 | else { | |
967 | if (event_table == event_table_init) { | |
968 | chainp->event_table_link = event_table_chain; | |
969 | event_table_chain = chainp; | |
970 | } | |
971 | chainp->event_tablep = event_tablep; | |
972 | } | |
973 | } | |
974 | ||
975 | #endif /* ETAP */ | |
976 | ||
977 | /* | |
978 | * | |
979 | * MESSAGE: etap_get_info [exported] | |
980 | * | |
981 | * FUNCTION: provides the server with ETAP buffer configurations. | |
982 | * | |
983 | */ | |
984 | ||
985 | kern_return_t | |
986 | etap_get_info( | |
987 | host_priv_t host_priv, | |
988 | int *et_entries, | |
989 | int *st_entries, | |
990 | vm_offset_t *et_offset, | |
991 | vm_offset_t *st_offset, | |
992 | int *cb_width, | |
993 | int *mb_size, | |
994 | int *mb_entries, | |
995 | int *mb_cpus) | |
996 | { | |
997 | ||
998 | if (host_priv == HOST_PRIV_NULL) | |
999 | return KERN_INVALID_ARGUMENT; | |
1000 | ||
1001 | #if ETAP | |
1002 | *et_entries = event_table_max; | |
1003 | *st_entries = subs_table_max; | |
1004 | *et_offset = (vm_offset_t) ((char*) event_table - | |
1005 | trunc_page((char*) event_table)); | |
1006 | *st_offset = (vm_offset_t) ((char*) subs_table - | |
1007 | trunc_page((char*) subs_table)); | |
1008 | #else /* ETAP */ | |
1009 | *et_entries = 0; | |
1010 | *st_entries = 0; | |
1011 | *et_offset = 0; | |
1012 | *st_offset = 0; | |
1013 | #endif /* ETAP */ | |
1014 | ||
1015 | #if ETAP_LOCK_ACCUMULATE | |
1016 | *cb_width = cbuff_width; | |
1017 | #else /* ETAP_LOCK_ACCUMULATE */ | |
1018 | *cb_width = 0; | |
1019 | #endif /* ETAP_LOCK_ACCUMULATE */ | |
1020 | ||
1021 | #if ETAP_MONITOR | |
1022 | *mb_size = ((mbuff_entries-1) * sizeof(struct mbuff_entry)) + | |
1023 | sizeof(struct monitor_buffer); | |
1024 | *mb_entries = mbuff_entries; | |
1025 | *mb_cpus = NCPUS; | |
1026 | #else /* ETAP_MONITOR */ | |
1027 | *mb_size = 0; | |
1028 | *mb_entries = 0; | |
1029 | *mb_cpus = 0; | |
1030 | #endif /* ETAP_MONITOR */ | |
1031 | ||
1032 | return (KERN_SUCCESS); | |
1033 | } | |
1034 | ||
1035 | /* | |
1036 | * ROUTINE: etap_trace_event [exported] | |
1037 | * | |
1038 | * FUNCTION: The etap_trace_event system call is the user's interface to | |
1039 | * the ETAP kernel instrumentation. | |
1040 | * | |
1041 | * This call allows the user to enable and disable tracing modes | |
1042 | * on specific event types. The call also supports a reset option, | |
1043 | * where the cumulative buffer data and all event type tracing | |
1044 | * is reset to zero. When the reset option is used, a new | |
1045 | * interval width can also be defined using the op parameter. | |
1046 | * | |
1047 | */ | |
1048 | ||
1049 | kern_return_t | |
1050 | etap_trace_event ( | |
1051 | unsigned short mode, | |
1052 | unsigned short type, | |
1053 | boolean_t enable, | |
1054 | unsigned int nargs, | |
1055 | unsigned short args[]) | |
1056 | { | |
1057 | #if ETAP | |
1058 | event_table_t event_tablep; | |
1059 | kern_return_t ret; | |
1060 | int i, args_size; | |
1061 | unsigned short status_mask; | |
1062 | unsigned short *tmp_args; | |
1063 | ||
1064 | /* | |
1065 | * Initialize operation | |
1066 | */ | |
1067 | ||
1068 | if (mode == ETAP_RESET) { | |
1069 | etap_trace_reset(nargs); | |
1070 | return (KERN_SUCCESS); | |
1071 | } | |
1072 | ||
1073 | status_mask = mode & type; | |
1074 | ||
1075 | /* | |
1076 | * Copy args array from user space to kernel space | |
1077 | */ | |
1078 | ||
1079 | args_size = nargs * sizeof *args; | |
1080 | tmp_args = (unsigned short *) kalloc(args_size); | |
1081 | ||
1082 | if (tmp_args == NULL) | |
1083 | return (KERN_NO_SPACE); | |
1084 | ||
1085 | if (copyin((const char *) args, (char *) tmp_args, args_size)) | |
1086 | return (KERN_INVALID_ADDRESS); | |
1087 | ||
1088 | /* | |
1089 | * Change appropriate status fields in the event table | |
1090 | */ | |
1091 | ||
1092 | event_table_lock(); | |
1093 | ||
1094 | for (i = 0; i < nargs; i++) { | |
1095 | if (tmp_args[i] != ETAP_NO_TRACE) { | |
1096 | event_tablep = etap_event_table_find(tmp_args[i]); | |
1097 | if (event_tablep == EVENT_TABLE_NULL) | |
1098 | break; | |
1099 | if (enable) | |
1100 | event_tablep->status |= status_mask; | |
1101 | else | |
1102 | event_tablep->status &= ~status_mask; | |
1103 | } | |
1104 | } | |
1105 | ||
1106 | ret = (i < nargs) ? KERN_INVALID_ARGUMENT : KERN_SUCCESS; | |
1107 | ||
1108 | event_table_unlock(); | |
1109 | ||
1110 | kfree((vm_offset_t) tmp_args, args_size); | |
1111 | ||
1112 | return (ret); | |
1113 | ||
1114 | #else /* ETAP */ | |
1115 | ||
1116 | return (KERN_FAILURE); | |
1117 | ||
1118 | #endif /* ETAP */ | |
1119 | } | |
1120 | ||
1121 | ||
1122 | #if ETAP | |
1123 | ||
1124 | /* | |
1125 | * ROUTINE: etap_trace_reset [internal] | |
1126 | * | |
1127 | * FUNCTION: Turns off all tracing and erases all the data accumulated | |
1128 | * in the cumulative buffer. If the user defined a new | |
1129 | * cumulative buffer interval width, it will be assigned here. | |
1130 | * | |
1131 | */ | |
1132 | void | |
1133 | etap_trace_reset(int new_interval) | |
1134 | { | |
1135 | event_table_t scan; | |
1136 | int x; | |
1137 | register s; | |
1138 | ||
1139 | /* | |
1140 | * Wipe out trace fields in event table | |
1141 | */ | |
1142 | ||
1143 | scan = event_table; | |
1144 | ||
1145 | event_table_lock(); | |
1146 | ||
1147 | for (x=0; x < event_table_max; x++) { | |
1148 | scan->status = ETAP_TRACE_OFF; | |
1149 | scan++; | |
1150 | } | |
1151 | ||
1152 | event_table_unlock(); | |
1153 | ||
1154 | #if ETAP_LOCK_ACCUMULATE | |
1155 | ||
1156 | /* | |
1157 | * Wipe out cumulative buffer statistical fields for all entries | |
1158 | */ | |
1159 | ||
1160 | cumulative_buffer_lock(s); | |
1161 | ||
1162 | for (x=0; x < ETAP_CBUFF_ENTRIES; x++) { | |
1163 | bzero ((char *) &cbuff->entry[x].hold, | |
1164 | sizeof(struct cbuff_data)); | |
1165 | bzero ((char *) &cbuff->entry[x].wait, | |
1166 | sizeof(struct cbuff_data)); | |
1167 | bzero ((char *) &cbuff->entry[x].hold_interval[0], | |
1168 | sizeof(unsigned long) * ETAP_CBUFF_IBUCKETS); | |
1169 | bzero ((char *) &cbuff->entry[x].wait_interval[0], | |
1170 | sizeof(unsigned long) * ETAP_CBUFF_IBUCKETS); | |
1171 | } | |
1172 | ||
1173 | /* | |
1174 | * Assign interval width if the user defined a new one. | |
1175 | */ | |
1176 | ||
1177 | if (new_interval != 0) | |
1178 | cbuff_width = new_interval; | |
1179 | ||
1180 | cumulative_buffer_unlock(s); | |
1181 | ||
1182 | #endif /* ETAP_LOCK_ACCUMULATE */ | |
1183 | } | |
1184 | ||
1185 | #endif /* ETAP */ | |
1186 | ||
1187 | /* | |
1188 | * ROUTINE: etap_probe [exported] | |
1189 | * | |
1190 | * FUNCTION: The etap_probe system call serves as a user-level probe, | |
1191 | * allowing user-level code to store event data into | |
1192 | * the monitored buffer(s). | |
1193 | */ | |
1194 | ||
1195 | kern_return_t | |
1196 | etap_probe( | |
1197 | unsigned short event_type, | |
1198 | unsigned short event_id, | |
1199 | unsigned int data_size, /* total size in bytes */ | |
1200 | etap_data_t *data) | |
1201 | { | |
1202 | ||
1203 | #if ETAP_MONITOR | |
1204 | ||
1205 | mbuff_entry_t mbuff_entryp; | |
1206 | int cpu; | |
1207 | int free; | |
1208 | spl_t s; | |
1209 | ||
1210 | ||
1211 | if (data_size > ETAP_DATA_SIZE) | |
1212 | return (KERN_INVALID_ARGUMENT); | |
1213 | ||
1214 | if (event_table[event_type].status == ETAP_TRACE_OFF || | |
1215 | event_table[event_type].event != event_type) | |
1216 | return (KERN_NO_ACCESS); | |
1217 | ||
1218 | mp_disable_preemption(); | |
1219 | cpu = cpu_number(); | |
1220 | s = splhigh(); | |
1221 | ||
1222 | free = mbuff[cpu]->free; | |
1223 | mbuff_entryp = &mbuff[cpu]->entry[free]; | |
1224 | ||
1225 | /* | |
1226 | * Load monitor buffer entry | |
1227 | */ | |
1228 | ||
1229 | ETAP_TIMESTAMP(mbuff_entryp->time); | |
1230 | mbuff_entryp->event = event_id; | |
1231 | mbuff_entryp->flags = USER_EVENT; | |
1232 | mbuff_entryp->instance = (u_int) current_thread(); | |
1233 | mbuff_entryp->pc = 0; | |
1234 | ||
1235 | if (data != ETAP_DATA_NULL) | |
1236 | copyin((const char *) data, | |
1237 | (char *) mbuff_entryp->data, | |
1238 | data_size); | |
1239 | ||
1240 | mbuff[cpu]->free = (free+1) % mbuff_entries; | |
1241 | ||
1242 | if (mbuff[cpu]->free == 0) | |
1243 | mbuff[cpu]->timestamp++; | |
1244 | ||
1245 | splx(s); | |
1246 | mp_enable_preemption(); | |
1247 | ||
1248 | return (KERN_SUCCESS); | |
1249 | ||
1250 | #else /* ETAP_MONITOR */ | |
1251 | return (KERN_FAILURE); | |
1252 | #endif /* ETAP_MONITOR */ | |
1253 | } | |
1254 | ||
1255 | /* | |
1256 | * ROUTINE: etap_trace_thread [exported] | |
1257 | * | |
1258 | * FUNCTION: Toggles thread's ETAP trace status bit. | |
1259 | */ | |
1260 | ||
1261 | kern_return_t | |
1262 | etap_trace_thread( | |
1263 | thread_act_t thr_act, | |
1264 | boolean_t trace_status) | |
1265 | { | |
1266 | #if ETAP_EVENT_MONITOR | |
1267 | ||
1268 | thread_t thread; | |
1269 | boolean_t old_status; | |
1270 | etap_data_t probe_data; | |
1271 | spl_t s; | |
1272 | ||
1273 | if (thr_act == THR_ACT_NULL) | |
1274 | return (KERN_INVALID_ARGUMENT); | |
1275 | ||
1276 | thread = act_lock_thread(thr_act); | |
1277 | ||
1278 | if (thread == THREAD_NULL) { | |
1279 | act_unlock_thread(thr_act); | |
1280 | return (KERN_INVALID_ARGUMENT); | |
1281 | } | |
1282 | ||
1283 | s = splsched(); | |
1284 | thread_lock(thread); | |
1285 | ||
1286 | old_status = thread->etap_trace; | |
1287 | thread->etap_trace = trace_status; | |
1288 | ||
1289 | ETAP_DATA_LOAD(probe_data[0],thr_act->task); | |
1290 | ETAP_DATA_LOAD(probe_data[1],thr_act); | |
1291 | ETAP_DATA_LOAD(probe_data[2],thread->sched_pri); | |
1292 | ||
1293 | thread_unlock(thread); | |
1294 | splx(s); | |
1295 | ||
1296 | act_unlock_thread(thr_act); | |
1297 | ||
1298 | /* | |
1299 | * Thread creation (ETAP_P_THREAD_LIFE: BEGIN) is ONLY recorded | |
1300 | * here since a threads trace status is disabled by default. | |
1301 | */ | |
1302 | if (trace_status == TRUE && old_status == FALSE) { | |
1303 | ETAP_PROBE_DATA(ETAP_P_THREAD_LIFE, | |
1304 | EVENT_BEGIN, | |
1305 | thread, | |
1306 | &probe_data, | |
1307 | ETAP_DATA_ENTRY*3); | |
1308 | } | |
1309 | ||
1310 | /* | |
1311 | * Thread termination is (falsely) recorded here if the trace | |
1312 | * status has been disabled. This event is recorded to allow | |
1313 | * users the option of tracing a portion of a threads execution. | |
1314 | */ | |
1315 | if (trace_status == FALSE && old_status == TRUE) { | |
1316 | ETAP_PROBE_DATA(ETAP_P_THREAD_LIFE, | |
1317 | EVENT_END, | |
1318 | thread, | |
1319 | &probe_data, | |
1320 | ETAP_DATA_ENTRY*3); | |
1321 | } | |
1322 | ||
1323 | return (KERN_SUCCESS); | |
1324 | ||
1325 | #else /* ETAP_EVENT_MONITOR */ | |
1326 | return (KERN_FAILURE); | |
1327 | #endif /* ETAP_EVENT_MONITOR */ | |
1328 | } | |
1329 | ||
1330 | /* | |
1331 | * ROUTINE: etap_mon_reconfig [exported] | |
1332 | * | |
1333 | * FUNCTION: Reallocates monitor buffers to hold specified number | |
1334 | * of entries. | |
1335 | * | |
1336 | * NOTES: In multiprocessor (SMP) case, a lock needs to be added | |
1337 | * here and in data collection macros to protect access | |
1338 | * to mbuff_entries. | |
1339 | */ | |
1340 | kern_return_t | |
1341 | etap_mon_reconfig( | |
1342 | host_priv_t host_priv, | |
1343 | int nentries) | |
1344 | { | |
1345 | #if ETAP_EVENT_MONITOR | |
1346 | struct monitor_buffer *nmbuff[NCPUS], *ombuff[NCPUS]; | |
1347 | int s, size, osize, i, ret; | |
1348 | ||
1349 | if (host_priv == HOST_PRIV_NULL) | |
1350 | return KERN_INVALID_ARGUMENT; | |
1351 | ||
1352 | if (nentries <= 0) /* must be at least 1 */ | |
1353 | return (KERN_FAILURE); | |
1354 | ||
1355 | size = ((nentries-1) * sizeof(struct mbuff_entry)) + | |
1356 | sizeof(struct monitor_buffer); | |
1357 | ||
1358 | for (i = 0; i < NCPUS; ++i) { | |
1359 | ret = kmem_alloc(kernel_map, | |
1360 | (vm_offset_t *)&nmbuff[i], | |
1361 | size); | |
1362 | if (ret != KERN_SUCCESS) { | |
1363 | if (i > 0) { | |
1364 | int j; | |
1365 | ||
1366 | for (j = 0; j < i; ++j) { | |
1367 | kmem_free(kernel_map, | |
1368 | (vm_offset_t)nmbuff[j], | |
1369 | size); | |
1370 | } | |
1371 | } | |
1372 | return (ret); | |
1373 | } | |
1374 | bzero((char *) nmbuff[i], size); | |
1375 | } | |
1376 | osize = ((mbuff_entries-1) * sizeof (struct mbuff_entry)) + | |
1377 | sizeof (struct monitor_buffer); | |
1378 | ||
1379 | s = splhigh(); | |
1380 | event_table_lock(); | |
1381 | for (i = 0; i < NCPUS; ++i) { | |
1382 | ombuff[i] = mbuff[i]; | |
1383 | mbuff[i] = nmbuff[i]; | |
1384 | } | |
1385 | mbuff_entries = nentries; | |
1386 | event_table_unlock(); | |
1387 | splx(s); | |
1388 | ||
1389 | for (i = 0; i < NCPUS; ++i) { | |
1390 | kmem_free(kernel_map, | |
1391 | (vm_offset_t)ombuff[i], | |
1392 | osize); | |
1393 | } | |
1394 | return (KERN_SUCCESS); | |
1395 | #else | |
1396 | return (KERN_FAILURE); | |
1397 | #endif /* ETAP_MONITOR */ | |
1398 | } | |
1399 | ||
1400 | /* | |
1401 | * ROUTINE: etap_new_probe [exported] | |
1402 | * | |
1403 | * FUNCTION: Reallocates monitor probe table, adding a new entry | |
1404 | * | |
1405 | */ | |
1406 | kern_return_t | |
1407 | etap_new_probe( | |
1408 | host_priv_t host_priv, | |
1409 | vm_address_t name, | |
1410 | vm_size_t namlen, | |
1411 | boolean_t trace_on, | |
1412 | vm_address_t id) | |
1413 | { | |
1414 | #if ETAP_EVENT_MONITOR | |
1415 | event_table_t newtable, oldtable; | |
1416 | unsigned short i, nid; | |
1417 | int s; | |
1418 | vm_size_t newsize = (event_table_max + 1) * | |
1419 | sizeof (struct event_table_entry); | |
1420 | boolean_t duplicate_name = FALSE; | |
1421 | kern_return_t ret; | |
1422 | ||
1423 | if (host_priv == HOST_PRIV_NULL) | |
1424 | return KERN_INVALID_ARGUMENT; | |
1425 | ||
1426 | if (namlen > EVENT_NAME_LENGTH - 1) | |
1427 | return (KERN_INVALID_ARGUMENT); | |
1428 | ||
1429 | if ((ret = kmem_alloc(kernel_map, (vm_address_t *)&newtable, | |
1430 | newsize)) != KERN_SUCCESS) | |
1431 | return (ret); | |
1432 | ||
1433 | bcopy((const char *)event_table, (char *)newtable, event_table_max * | |
1434 | sizeof (struct event_table_entry)); | |
1435 | ||
1436 | if (copyin((const char *)name, | |
1437 | (char *)&newtable[event_table_max].name, namlen)) | |
1438 | return (KERN_INVALID_ADDRESS); | |
1439 | ||
1440 | newtable[event_table_max].name[EVENT_NAME_LENGTH - 1] = '\0'; | |
1441 | newtable[event_table_max].status = trace_on; | |
1442 | newtable[event_table_max].dynamic = 0; | |
1443 | ||
1444 | for (nid = i = 0; i < event_table_max; ++i) { | |
1445 | if (strcmp((char *)newtable[event_table_max].name, | |
1446 | newtable[i].name) == 0) { | |
1447 | duplicate_name = TRUE; | |
1448 | printf("duplicate name\n"); | |
1449 | } | |
1450 | nid = max(nid, newtable[i].event); | |
1451 | } | |
1452 | ++nid; | |
1453 | ||
1454 | if (nid >= ETAP_NO_TRACE || duplicate_name == TRUE) { | |
1455 | kmem_free(kernel_map, (vm_address_t)newtable, newsize); | |
1456 | if (nid >= ETAP_NO_TRACE) { | |
1457 | printf("KERN_RESOURCE_SHORTAGE\n"); | |
1458 | return (KERN_RESOURCE_SHORTAGE); | |
1459 | } | |
1460 | else { | |
1461 | printf("KERN_NAME_EXISTS\n"); | |
1462 | return (KERN_NAME_EXISTS); | |
1463 | } | |
1464 | } | |
1465 | ||
1466 | newtable[event_table_max].event = nid; | |
1467 | ||
1468 | s = splhigh(); | |
1469 | event_table_lock(); | |
1470 | oldtable = event_table; | |
1471 | event_table = newtable; | |
1472 | ++event_table_max; | |
1473 | event_table_unlock(); | |
1474 | splx(s); | |
1475 | ||
1476 | if (oldtable != event_table_init) | |
1477 | kmem_free(kernel_map, (vm_address_t)oldtable, | |
1478 | (event_table_max - 1) * | |
1479 | sizeof (struct event_table_entry)); | |
1480 | ||
1481 | *(unsigned short *)id = nid; | |
1482 | ||
1483 | return (KERN_SUCCESS); | |
1484 | #else | |
1485 | return (KERN_FAILURE); | |
1486 | #endif /* ETAP_EVENT_MONITOR */ | |
1487 | ||
1488 | } | |
1489 | /* | |
1490 | * ETAP trap probe hooks | |
1491 | */ | |
1492 | ||
1493 | void | |
1494 | etap_interrupt_probe(int interrupt, int flag_setting) | |
1495 | { | |
1496 | u_short flag; | |
1497 | ||
1498 | if (flag_setting == 1) | |
1499 | flag = EVENT_BEGIN; | |
1500 | else | |
1501 | flag = EVENT_END; | |
1502 | ||
1503 | ETAP_PROBE_DATA_COND(ETAP_P_INTERRUPT, | |
1504 | flag, | |
1505 | current_thread(), | |
1506 | &interrupt, | |
1507 | sizeof(int), | |
1508 | 1); | |
1509 | } | |
1510 | ||
1511 | void | |
1512 | etap_machcall_probe1(int syscall) | |
1513 | { | |
1514 | ETAP_PROBE_DATA(ETAP_P_SYSCALL_MACH, | |
1515 | EVENT_BEGIN | SYSCALL_TRAP, | |
1516 | current_thread(), | |
1517 | &syscall, | |
1518 | sizeof(int)); | |
1519 | } | |
1520 | ||
1521 | void | |
1522 | etap_machcall_probe2(void) | |
1523 | { | |
1524 | ETAP_PROBE_DATA(ETAP_P_SYSCALL_MACH, | |
1525 | EVENT_END | SYSCALL_TRAP, | |
1526 | current_thread(), | |
1527 | 0, | |
1528 | 0); | |
1529 | } | |
1530 | ||
1531 | static void print_user_event(mbuff_entry_t); | |
1532 | static void print_kernel_event(mbuff_entry_t, boolean_t); | |
1533 | static void print_lock_event(mbuff_entry_t, const char *); | |
1534 | ||
1535 | #if MACH_KDB | |
1536 | void db_show_etap_log(db_expr_t, boolean_t, db_expr_t, char *); | |
1537 | /* | |
1538 | * | |
1539 | * ROUTINE: etap_print [internal] | |
1540 | * | |
1541 | * FUNCTION: print each mbuff table (for use in debugger) | |
1542 | * | |
1543 | */ | |
1544 | void | |
1545 | db_show_etap_log( | |
1546 | db_expr_t addr, | |
1547 | boolean_t have_addr, | |
1548 | db_expr_t count, | |
1549 | char * modif) | |
1550 | { | |
1551 | #if ETAP_MONITOR | |
1552 | int cpu = cpu_number(), last, i, first, step, end, restart; | |
1553 | boolean_t show_data = FALSE; | |
1554 | ||
1555 | last = (mbuff[cpu]->free - 1) % mbuff_entries; | |
1556 | ||
1557 | if(db_option(modif, 'r')) { | |
1558 | first = last; | |
1559 | step = -1; | |
1560 | end = -1; | |
1561 | restart = mbuff_entries - 1; | |
1562 | } else { | |
1563 | first = last + 1; | |
1564 | step = 1; | |
1565 | end = mbuff_entries; | |
1566 | restart = 0; | |
1567 | } | |
1568 | ||
1569 | if(db_option(modif, 'd')) | |
1570 | show_data = TRUE; | |
1571 | ||
1572 | for(i = first; i != end; i += step) { | |
1573 | if (mbuff[cpu]->entry[i].flags & USER_EVENT) | |
1574 | print_user_event(&mbuff[cpu]->entry[i]); | |
1575 | else | |
1576 | print_kernel_event(&mbuff[cpu]->entry[i], show_data); | |
1577 | } | |
1578 | for(i = restart; i != first; i += step) { | |
1579 | if (mbuff[cpu]->entry[i].flags & USER_EVENT) | |
1580 | print_user_event(&mbuff[cpu]->entry[i]); | |
1581 | else | |
1582 | print_kernel_event(&mbuff[cpu]->entry[i], show_data); | |
1583 | } | |
1584 | #else | |
1585 | printf("ETAP event monitor not configured\n"); | |
1586 | #endif /* ETAP_MONITOR */ | |
1587 | } | |
1588 | ||
1589 | #if ETAP_MONITOR | |
1590 | static | |
1591 | void | |
1592 | print_user_event(mbuff_entry_t record) | |
1593 | { | |
1594 | char *s, buf[256]; | |
1595 | ||
1596 | db_printf("%x: %x%08x: ", record->instance, record->time.tv_sec, | |
1597 | record->time.tv_nsec); | |
1598 | switch (record->pc) | |
1599 | { | |
1600 | case ETAP_P_USER_EVENT0: s = "0"; break; | |
1601 | case ETAP_P_USER_EVENT1: s = "1"; break; | |
1602 | case ETAP_P_USER_EVENT2: s = "2"; break; | |
1603 | case ETAP_P_USER_EVENT3: s = "3"; break; | |
1604 | case ETAP_P_USER_EVENT4: s = "4"; break; | |
1605 | case ETAP_P_USER_EVENT5: s = "5"; break; | |
1606 | case ETAP_P_USER_EVENT6: s = "6"; break; | |
1607 | case ETAP_P_USER_EVENT7: s = "7"; break; | |
1608 | case ETAP_P_USER_EVENT8: s = "8"; break; | |
1609 | case ETAP_P_USER_EVENT9: s = "9"; break; | |
1610 | case ETAP_P_USER_EVENT10: s = "10"; break; | |
1611 | case ETAP_P_USER_EVENT11: s = "11"; break; | |
1612 | case ETAP_P_USER_EVENT12: s = "12"; break; | |
1613 | case ETAP_P_USER_EVENT13: s = "13"; break; | |
1614 | case ETAP_P_USER_EVENT14: s = "14"; break; | |
1615 | case ETAP_P_USER_EVENT15: s = "15"; break; | |
1616 | case ETAP_P_USER_EVENT16: s = "16"; break; | |
1617 | case ETAP_P_USER_EVENT17: s = "17"; break; | |
1618 | case ETAP_P_USER_EVENT18: s = "18"; break; | |
1619 | case ETAP_P_USER_EVENT19: s = "19"; break; | |
1620 | case ETAP_P_USER_EVENT20: s = "20"; break; | |
1621 | case ETAP_P_USER_EVENT21: s = "21"; break; | |
1622 | case ETAP_P_USER_EVENT22: s = "22"; break; | |
1623 | case ETAP_P_USER_EVENT23: s = "23"; break; | |
1624 | case ETAP_P_USER_EVENT24: s = "24"; break; | |
1625 | case ETAP_P_USER_EVENT25: s = "25"; break; | |
1626 | case ETAP_P_USER_EVENT26: s = "26"; break; | |
1627 | case ETAP_P_USER_EVENT27: s = "27"; break; | |
1628 | case ETAP_P_USER_EVENT28: s = "28"; break; | |
1629 | case ETAP_P_USER_EVENT29: s = "29"; break; | |
1630 | case ETAP_P_USER_EVENT30: s = "30"; break; | |
1631 | case ETAP_P_USER_EVENT31: s = "31"; break; | |
1632 | default: | |
1633 | sprintf(buf, "dynamic %x", record->pc); | |
1634 | s = buf; | |
1635 | break; | |
1636 | } | |
1637 | ||
1638 | db_printf("user probe %s: [%x] data = %x %x %x %x\n", | |
1639 | s, | |
1640 | record->event, | |
1641 | record->data[0], | |
1642 | record->data[1], | |
1643 | record->data[2], | |
1644 | record->data[3]); | |
1645 | } | |
1646 | ||
1647 | static | |
1648 | void | |
1649 | print_kernel_event(mbuff_entry_t record, boolean_t data) | |
1650 | { | |
1651 | char *text_name; | |
1652 | int i; | |
1653 | ||
1654 | /* assume zero event means that record was never written to */ | |
1655 | if(record->event == 0) | |
1656 | return; | |
1657 | ||
1658 | db_printf("%x: %x%08x: ", record->instance, record->time.tv_sec, | |
1659 | record->time.tv_nsec); | |
1660 | ||
1661 | switch (record->event) { | |
1662 | ||
1663 | case ETAP_P_THREAD_LIFE : | |
1664 | if (record->flags & EVENT_BEGIN) | |
1665 | db_printf("thread created [T:%x A:%x] P:%d\n", | |
1666 | record->data[0], | |
1667 | record->data[1], | |
1668 | record->data[2]); | |
1669 | else | |
1670 | db_printf("thread terminated [T:%x A:%x] P:%d\n", | |
1671 | record->data[0], | |
1672 | record->data[1], | |
1673 | record->data[2]); | |
1674 | break; | |
1675 | ||
1676 | case ETAP_P_SYSCALL_MACH : | |
1677 | if (record->flags & SYSCALL_TRAP) | |
1678 | text_name = system_table_lookup(SYS_TABLE_MACH_TRAP, | |
1679 | record->data[0]); | |
1680 | else | |
1681 | text_name = system_table_lookup(SYS_TABLE_MACH_MESSAGE, | |
1682 | record->data[0]); | |
1683 | ||
1684 | if (record->flags & EVENT_BEGIN) | |
1685 | db_printf("mach enter: %s [%x]\n", | |
1686 | text_name, | |
1687 | record->data[0]); | |
1688 | else | |
1689 | db_printf("mach exit :\n"); | |
1690 | break; | |
1691 | ||
1692 | case ETAP_P_SYSCALL_UNIX : | |
1693 | text_name = system_table_lookup(SYS_TABLE_UNIX_SYSCALL, | |
1694 | record->data[0]); | |
1695 | ||
1696 | if (record->flags & EVENT_BEGIN) | |
1697 | db_printf("unix enter: %s\n", text_name); | |
1698 | else | |
1699 | db_printf("unix exit : %s\n", text_name); | |
1700 | break; | |
1701 | ||
1702 | case ETAP_P_THREAD_CTX : | |
1703 | if (record->flags & EVENT_END) | |
1704 | db_printf("context switch to %x ", | |
1705 | record->data[0]); | |
1706 | else /* EVENT_BEGIN */ | |
1707 | db_printf("context switch from %x ", | |
1708 | record->data[0]); | |
1709 | ||
1710 | switch (record->data[1]) { | |
1711 | case BLOCKED_ON_SEMAPHORE : | |
1712 | db_printf("R: semaphore\n"); break; | |
1713 | case BLOCKED_ON_LOCK : | |
1714 | db_printf("R: lock\n"); break; | |
1715 | case BLOCKED_ON_MUTEX_LOCK : | |
1716 | db_printf("R: mutex lock\n"); break; | |
1717 | case BLOCKED_ON_COMPLEX_LOCK : | |
1718 | db_printf("R: complex lock\n"); break; | |
1719 | case BLOCKED_ON_PORT_RCV : | |
1720 | db_printf("R: port receive\n"); break; | |
1721 | case BLOCKED_ON_REAPER_DONE : | |
1722 | db_printf("R: reaper thread done\n"); break; | |
1723 | case BLOCKED_ON_IDLE_DONE : | |
1724 | db_printf("R: idle thread done\n"); break; | |
1725 | case BLOCKED_ON_TERMINATION : | |
1726 | db_printf("R: termination\n"); break; | |
1727 | default : | |
1728 | if (record->data[2]) | |
1729 | db_printf("R: ast %x\n", record->data[2]); | |
1730 | else | |
1731 | db_printf("R: undefined block\n"); | |
1732 | }; | |
1733 | break; | |
1734 | ||
1735 | case ETAP_P_INTERRUPT : | |
1736 | if (record->flags & EVENT_BEGIN) { | |
1737 | text_name = system_table_lookup(SYS_TABLE_INTERRUPT, | |
1738 | record->data[0]); | |
1739 | db_printf("intr enter: %s\n", text_name); | |
1740 | } else | |
1741 | db_printf("intr exit\n"); | |
1742 | break; | |
1743 | ||
1744 | case ETAP_P_ACT_ABORT : | |
1745 | db_printf("activation abort [A %x : S %x]\n", | |
1746 | record->data[1], | |
1747 | ||
1748 | record->data[0]); | |
1749 | break; | |
1750 | ||
1751 | case ETAP_P_PRIORITY : | |
1752 | db_printf("priority changed for %x N:%d O:%d\n", | |
1753 | record->data[0], | |
1754 | record->data[1], | |
1755 | record->data[2]); | |
1756 | break; | |
1757 | ||
1758 | case ETAP_P_EXCEPTION : | |
1759 | text_name = system_table_lookup(SYS_TABLE_EXCEPTION, | |
1760 | record->data[0]); | |
1761 | db_printf("exception: %s\n", text_name); | |
1762 | break; | |
1763 | ||
1764 | case ETAP_P_DEPRESSION : | |
1765 | if (record->flags & EVENT_BEGIN) | |
1766 | db_printf("priority depressed\n"); | |
1767 | else { | |
1768 | if (record->data[0] == 0) | |
1769 | db_printf("priority undepressed : timed out\n"); | |
1770 | else | |
1771 | db_printf("priority undepressed : self inflicted\n"); | |
1772 | } | |
1773 | break; | |
1774 | ||
1775 | case ETAP_P_MISC : | |
1776 | db_printf("flags: %x data: %x %x %x %x\n", record->flags, | |
1777 | record->data[0], record->data[1], record->data[2], | |
1778 | record->data[3]); | |
1779 | break; | |
1780 | ||
1781 | case ETAP_P_DETAP : | |
1782 | printf("flags: %x rtc: %x %09x dtime: %x %09x\n", | |
1783 | record->flags, record->data[0], record->data[1], | |
1784 | record->data[2], record->data[3]); | |
1785 | break; | |
1786 | ||
1787 | default: | |
1788 | for(i = 0; event_table_init[i].event != ETAP_NO_TRACE; ++i) | |
1789 | if(record->event == event_table_init[i].event) { | |
1790 | print_lock_event(record, event_table_init[i].name); | |
1791 | return; | |
1792 | } | |
1793 | db_printf("Unknown event: %d\n", record->event); | |
1794 | break; | |
1795 | } | |
1796 | if(data) | |
1797 | db_printf(" Data: %08x %08x %08x %08x\n", record->data[0], | |
1798 | record->data[1], record->data[2], record->data[3]); | |
1799 | } | |
1800 | ||
1801 | void print_lock_event(mbuff_entry_t record, const char *name) | |
1802 | { | |
1803 | char *sym1, *sym2; | |
1804 | db_addr_t offset1, offset2; | |
1805 | ||
1806 | db_find_sym_and_offset(record->data[0], &sym1, &offset1); | |
1807 | ||
1808 | db_printf("%15s", name); | |
1809 | if (record->flags & SPIN_LOCK) | |
1810 | printf(" spin "); | |
1811 | else if (record->flags & READ_LOCK) | |
1812 | printf(" read "); | |
1813 | else if (record->flags & WRITE_LOCK) | |
1814 | printf(" write "); | |
1815 | else | |
1816 | printf(" undef "); | |
1817 | ||
1818 | if (record->flags & ETAP_CONTENTION) { | |
1819 | db_printf("wait lock %s+%x\n", | |
1820 | sym1, offset1); | |
1821 | } | |
1822 | else if (record->flags & ETAP_DURATION) { | |
1823 | db_find_sym_and_offset(record->data[1], &sym2, &offset2); | |
1824 | db_printf("lock %x+%x unlock %x+%x\n", | |
1825 | sym1, offset1, sym2, offset2); | |
1826 | } else { | |
1827 | db_printf("illegal op: neither HOLD or WAIT are specified\n"); | |
1828 | } | |
1829 | ||
1830 | } | |
1831 | ||
1832 | char * | |
1833 | system_table_lookup(unsigned int table, unsigned int number) | |
1834 | { | |
1835 | int x; | |
1836 | char *name = NULL; | |
1837 | unsigned int offset; | |
1838 | ||
1839 | switch (table) { | |
1840 | case SYS_TABLE_MACH_TRAP: | |
1841 | name = mach_trap_name(number >> 4); | |
1842 | break; | |
1843 | case SYS_TABLE_MACH_MESSAGE: | |
1844 | for (x=0; x < mach_message_table_entries; x++) { | |
1845 | if (mach_message_table[x].number == number) { | |
1846 | name = mach_message_table[x].name; | |
1847 | break; | |
1848 | } | |
1849 | } | |
1850 | break; | |
1851 | case SYS_TABLE_UNIX_SYSCALL: | |
1852 | number = -number; | |
1853 | name = syscall_name(number); | |
1854 | break; | |
1855 | case SYS_TABLE_INTERRUPT: | |
1856 | db_find_sym_and_offset((int)ivect[number], &name, &offset); | |
1857 | break; | |
1858 | case SYS_TABLE_EXCEPTION: | |
1859 | name = exception_name(number); | |
1860 | break; | |
1861 | } | |
1862 | return (name != NULL) ? name : "undefined"; | |
1863 | } | |
1864 | ||
1865 | #endif /* MACH_KDB */ | |
1866 | #endif /* ETAP_MONITOR */ |