]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
43866e37 | 6 | * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. |
1c79356b | 7 | * |
43866e37 A |
8 | * This file contains Original Code and/or Modifications of Original Code |
9 | * as defined in and that are subject to the Apple Public Source License | |
10 | * Version 2.0 (the 'License'). You may not use this file except in | |
11 | * compliance with the License. Please obtain a copy of the License at | |
12 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
13 | * file. | |
14 | * | |
15 | * The Original Code and all software distributed under the License are | |
16 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
1c79356b A |
17 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
18 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
43866e37 A |
19 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
20 | * Please see the License for the specific language governing rights and | |
21 | * limitations under the License. | |
1c79356b A |
22 | * |
23 | * @APPLE_LICENSE_HEADER_END@ | |
24 | */ | |
25 | /* | |
26 | * @OSF_COPYRIGHT@ | |
27 | * | |
28 | */ | |
29 | /* | |
30 | * File: etap.c | |
31 | */ | |
32 | ||
33 | #include <cpus.h> | |
34 | #include <kern/lock.h> | |
35 | #include <kern/etap_macros.h> | |
36 | #include <kern/misc_protos.h> | |
37 | #include <kern/host.h> | |
38 | #include <types.h> | |
39 | #include <mach/kern_return.h> | |
40 | #include <mach/port.h> | |
41 | #include <vm/vm_map.h> | |
42 | #include <vm/vm_kern.h> /* for kernel_map, ipc_kernel_map */ | |
43 | #if ETAP_MONITOR | |
44 | #include <machine/machine_tables.h> | |
45 | #include <mach/clock.h> | |
46 | #include <mach/clock_reply.h> | |
47 | #include <mach/default_pager_object.h> | |
48 | #include <device/device.h> | |
49 | #include <device/device_reply.h> | |
50 | #include <device/device_request.h> | |
51 | #include <mach_debug/mach_debug.h> | |
52 | /*#include <mach/mach_host.h>*/ | |
53 | #include <mach/mach_norma.h> | |
54 | #include <mach/mach_port.h> | |
55 | #include <mach/memory_object_default.h> | |
56 | #include <mach/memory_object_user.h> | |
57 | #include <mach/notify_server.h> | |
58 | #include <mach/prof.h> | |
59 | #include <machine/unix_map.h> | |
60 | #endif | |
61 | #if MACH_KDB | |
62 | #include <ddb/db_output.h> | |
63 | #include <ddb/db_sym.h> | |
64 | #include <ddb/db_command.h> | |
65 | #if 0 /* WHY?? */ | |
66 | #include <i386/ipl.h> | |
67 | #endif | |
68 | #endif | |
69 | ||
70 | /* | |
71 | * Forwards | |
72 | */ | |
73 | ||
74 | kern_return_t | |
75 | etap_get_info(host_priv_t, int*, int*, vm_offset_t*, vm_offset_t*, | |
76 | int*, int*, int*, int*); | |
77 | ||
78 | kern_return_t | |
79 | etap_mon_reconfig(host_priv_t, int); | |
80 | ||
81 | kern_return_t | |
82 | etap_new_probe(host_priv_t, vm_address_t, vm_size_t, boolean_t, vm_address_t); | |
83 | ||
84 | kern_return_t | |
85 | etap_trace_thread(thread_act_t, boolean_t); | |
86 | ||
87 | void | |
88 | etap_trace_reset(int); | |
89 | ||
90 | void | |
91 | etap_interrupt_probe(int, int); | |
92 | ||
93 | void | |
94 | etap_machcall_probe1(int); | |
95 | ||
96 | void | |
97 | etap_machcall_probe2(void); | |
98 | ||
99 | void | |
100 | etap_print(void); | |
101 | ||
102 | ||
103 | #if ETAP | |
104 | ||
105 | #ifndef max | |
106 | #define max(x,y) ((x > y) ? x : y) | |
107 | #endif /* max */ | |
108 | ||
109 | event_table_t | |
110 | etap_event_table_find(etap_event_t); | |
111 | ||
112 | /* ======================= | |
113 | * ETAP Lock definitions | |
114 | * ======================= | |
115 | */ | |
116 | ||
117 | #if ETAP_LOCK_TRACE | |
118 | #define etap_lock simple_lock_no_trace | |
119 | #define etap_unlock simple_unlock_no_trace | |
120 | #else /* ETAP_LOCK_TRACE */ | |
121 | #define etap_lock simple_lock | |
122 | #define etap_unlock simple_unlock | |
123 | #endif /* ETAP_LOCK_TRACE */ | |
124 | ||
125 | #define event_table_lock() etap_lock(&event_table_lock) | |
126 | #define event_table_unlock() etap_unlock(&event_table_lock) | |
127 | ||
128 | #define cumulative_buffer_lock(s) \ | |
129 | MACRO_BEGIN \ | |
130 | s = splhigh(); \ | |
131 | etap_lock(&cbuff_lock); \ | |
132 | MACRO_END | |
133 | ||
134 | #define cumulative_buffer_unlock(s) \ | |
135 | MACRO_BEGIN \ | |
136 | etap_unlock(&cbuff_lock); \ | |
137 | splx(s); \ | |
138 | MACRO_END | |
139 | ||
140 | ||
141 | #if ETAP_LOCK_ACCUMULATE | |
142 | ||
143 | /* ======================================== | |
144 | * ETAP Cumulative lock trace definitions | |
145 | * ======================================== | |
146 | */ | |
147 | ||
148 | int cbuff_width = ETAP_CBUFF_WIDTH; | |
149 | ||
150 | /* | |
151 | * Cumulative buffer declaration | |
152 | * | |
153 | * For both protection and mapping purposes, the cumulative | |
154 | * buffer must be aligned on a page boundary. Since the cumulative | |
155 | * buffer must be statically defined, page boundary alignment is not | |
156 | * garenteed. Instead, the buffer is allocated with 2 extra pages. | |
157 | * The cumulative buffer pointer will round up to the nearest page. | |
158 | * | |
159 | * This will garentee page boundary alignment. | |
160 | */ | |
161 | ||
162 | #define TWO_PAGES 16384 /* XXX does this apply ??*/ | |
163 | #define CBUFF_ALLOCATED_SIZE sizeof(struct cumulative_buffer)+TWO_PAGES | |
164 | ||
165 | decl_simple_lock_data (,cbuff_lock) | |
166 | #if MACH_LDEBUG | |
167 | simple_lock_t cbuff_locks; | |
168 | #else | |
169 | simple_lock_data_t cbuff_locks; | |
170 | #endif | |
171 | char cbuff_allocated [CBUFF_ALLOCATED_SIZE]; | |
172 | cumulative_buffer_t cbuff = {0}; | |
173 | ||
174 | #endif /* ETAP_LOCK_ACCUMULATE */ | |
175 | ||
176 | #if ETAP_MONITOR | |
177 | ||
178 | int mbuff_entries = ETAP_MBUFF_ENTRIES; | |
179 | ||
180 | /* | |
181 | * Create an array of pointers to monitor buffers. | |
182 | * The buffers themselves are allocated at run-time. | |
183 | */ | |
184 | ||
185 | struct monitor_buffer *mbuff[NCPUS]; | |
186 | #endif /* ETAP_MONITOR */ | |
187 | ||
188 | /* ========================== | |
189 | * Event table declarations | |
190 | * ========================== | |
191 | */ | |
192 | ||
193 | decl_simple_lock_data(,event_table_lock) | |
194 | ||
195 | const struct event_table_entry event_table_init[] = | |
196 | { | |
197 | ||
198 | /*-----------------------------------------------------------------------* | |
199 | * ETAP EVENT TRACE STATUS TEXT NAME DYNAMIC * | |
200 | *-----------------------------------------------------------------------*/ | |
201 | ||
202 | #if ETAP_EVENT_MONITOR | |
203 | {ETAP_P_USER_EVENT0 , ETAP_TRACE_OFF , "p_user_event0" , STATIC}, | |
204 | {ETAP_P_USER_EVENT1 , ETAP_TRACE_OFF , "p_user_event1" , STATIC}, | |
205 | {ETAP_P_USER_EVENT2 , ETAP_TRACE_OFF , "p_user_event2" , STATIC}, | |
206 | {ETAP_P_USER_EVENT3 , ETAP_TRACE_OFF , "p_user_event3" , STATIC}, | |
207 | {ETAP_P_USER_EVENT4 , ETAP_TRACE_OFF , "p_user_event4" , STATIC}, | |
208 | {ETAP_P_USER_EVENT5 , ETAP_TRACE_OFF , "p_user_event5" , STATIC}, | |
209 | {ETAP_P_USER_EVENT6 , ETAP_TRACE_OFF , "p_user_event6" , STATIC}, | |
210 | {ETAP_P_USER_EVENT7 , ETAP_TRACE_OFF , "p_user_event7" , STATIC}, | |
211 | {ETAP_P_USER_EVENT8 , ETAP_TRACE_OFF , "p_user_event8" , STATIC}, | |
212 | {ETAP_P_USER_EVENT9 , ETAP_TRACE_OFF , "p_user_event9" , STATIC}, | |
213 | {ETAP_P_USER_EVENT10 , ETAP_TRACE_OFF , "p_user_event10" , STATIC}, | |
214 | {ETAP_P_USER_EVENT11 , ETAP_TRACE_OFF , "p_user_event11" , STATIC}, | |
215 | {ETAP_P_USER_EVENT12 , ETAP_TRACE_OFF , "p_user_event12" , STATIC}, | |
216 | {ETAP_P_USER_EVENT13 , ETAP_TRACE_OFF , "p_user_event13" , STATIC}, | |
217 | {ETAP_P_USER_EVENT14 , ETAP_TRACE_OFF , "p_user_event14" , STATIC}, | |
218 | {ETAP_P_USER_EVENT15 , ETAP_TRACE_OFF , "p_user_event15" , STATIC}, | |
219 | {ETAP_P_USER_EVENT16 , ETAP_TRACE_OFF , "p_user_event16" , STATIC}, | |
220 | {ETAP_P_USER_EVENT17 , ETAP_TRACE_OFF , "p_user_event17" , STATIC}, | |
221 | {ETAP_P_USER_EVENT18 , ETAP_TRACE_OFF , "p_user_event18" , STATIC}, | |
222 | {ETAP_P_USER_EVENT19 , ETAP_TRACE_OFF , "p_user_event19" , STATIC}, | |
223 | {ETAP_P_USER_EVENT20 , ETAP_TRACE_OFF , "p_user_event20" , STATIC}, | |
224 | {ETAP_P_USER_EVENT21 , ETAP_TRACE_OFF , "p_user_event21" , STATIC}, | |
225 | {ETAP_P_USER_EVENT22 , ETAP_TRACE_OFF , "p_user_event22" , STATIC}, | |
226 | {ETAP_P_USER_EVENT23 , ETAP_TRACE_OFF , "p_user_event23" , STATIC}, | |
227 | {ETAP_P_USER_EVENT24 , ETAP_TRACE_OFF , "p_user_event24" , STATIC}, | |
228 | {ETAP_P_USER_EVENT25 , ETAP_TRACE_OFF , "p_user_event25" , STATIC}, | |
229 | {ETAP_P_USER_EVENT26 , ETAP_TRACE_OFF , "p_user_event26" , STATIC}, | |
230 | {ETAP_P_USER_EVENT27 , ETAP_TRACE_OFF , "p_user_event27" , STATIC}, | |
231 | {ETAP_P_USER_EVENT28 , ETAP_TRACE_OFF , "p_user_event28" , STATIC}, | |
232 | {ETAP_P_USER_EVENT29 , ETAP_TRACE_OFF , "p_user_event29" , STATIC}, | |
233 | {ETAP_P_USER_EVENT30 , ETAP_TRACE_OFF , "p_user_event30" , STATIC}, | |
234 | {ETAP_P_USER_EVENT31 , ETAP_TRACE_OFF , "p_user_event31" , STATIC}, | |
235 | {ETAP_P_SYSCALL_MACH , ETAP_TRACE_OFF , "p_syscall_mach" , STATIC}, | |
236 | {ETAP_P_SYSCALL_UNIX , ETAP_TRACE_OFF , "p_syscall_unix" , STATIC}, | |
237 | {ETAP_P_THREAD_LIFE , ETAP_TRACE_OFF , "p_thread_life" , STATIC}, | |
238 | {ETAP_P_THREAD_CTX , ETAP_TRACE_OFF , "p_thread_ctx" , STATIC}, | |
239 | {ETAP_P_RPC , ETAP_TRACE_OFF , "p_rpc" , STATIC}, | |
240 | {ETAP_P_INTERRUPT , ETAP_TRACE_OFF , "p_interrupt" , STATIC}, | |
241 | {ETAP_P_ACT_ABORT , ETAP_TRACE_OFF , "p_act_abort" , STATIC}, | |
242 | {ETAP_P_PRIORITY , ETAP_TRACE_OFF , "p_priority" , STATIC}, | |
243 | {ETAP_P_EXCEPTION , ETAP_TRACE_OFF , "p_exception" , STATIC}, | |
244 | {ETAP_P_DEPRESSION , ETAP_TRACE_OFF , "p_depression" , STATIC}, | |
245 | {ETAP_P_MISC , ETAP_TRACE_OFF , "p_misc" , STATIC}, | |
246 | {ETAP_P_DETAP , ETAP_TRACE_OFF , "p_detap" , STATIC}, | |
247 | #endif /* ETAP_EVENT_MONITOR */ | |
248 | ||
249 | #if ETAP_LOCK_TRACE | |
250 | {ETAP_VM_BUCKET , ETAP_TRACE_OFF , "vm_bucket" , STATIC},/**/ | |
251 | {ETAP_VM_HIMEM , ETAP_TRACE_OFF , "vm_himem" , STATIC}, | |
252 | {ETAP_VM_MAP , ETAP_TRACE_OFF , "vm_map" , 1}, | |
253 | {ETAP_VM_MAP_I , ETAP_TRACE_OFF , "vm_map_i" , 2}, | |
254 | {ETAP_VM_MEMMAN , ETAP_TRACE_OFF , "vm_memman" , STATIC},/**/ | |
255 | {ETAP_VM_MSYNC , ETAP_TRACE_OFF , "vm_msync" , 3}, | |
256 | {ETAP_VM_OBJ , ETAP_TRACE_OFF , "vm_obj" , 4}, | |
257 | {ETAP_VM_OBJ_CACHE , ETAP_TRACE_OFF , "vm_obj_cache" , 5}, | |
258 | {ETAP_VM_PAGE_ALLOC , ETAP_TRACE_OFF , "vm_page_alloc" , STATIC},/**/ | |
259 | {ETAP_VM_PAGEOUT , ETAP_TRACE_OFF , "vm_pageout" , STATIC}, | |
260 | {ETAP_VM_PAGEQ , ETAP_TRACE_OFF , "vm_pageq" , STATIC}, | |
261 | {ETAP_VM_PAGEQ_FREE , ETAP_TRACE_OFF , "vm_pageq_free" , STATIC}, | |
262 | {ETAP_VM_PMAP , ETAP_TRACE_OFF , "vm_pmap" , 6}, | |
263 | {ETAP_VM_PMAP_CACHE , ETAP_TRACE_OFF , "vm_pmap_cache" , STATIC}, | |
264 | {ETAP_VM_PMAP_FREE , ETAP_TRACE_OFF , "vm_pmap_free" , STATIC}, | |
265 | {ETAP_VM_PMAP_KERNEL , ETAP_TRACE_OFF , "vm_pmap_kern" , STATIC}, | |
266 | {ETAP_VM_PMAP_SYS , ETAP_TRACE_OFF , "vm_pmap_sys" , 7}, | |
267 | {ETAP_VM_PMAP_SYS_I , ETAP_TRACE_OFF , "vm_pmap_sys_i" , 8}, | |
268 | {ETAP_VM_PMAP_UPDATE , ETAP_TRACE_OFF , "vm_pmap_update" , STATIC}, | |
269 | {ETAP_VM_PREPPIN , ETAP_TRACE_OFF , "vm_preppin" , STATIC}, | |
270 | {ETAP_VM_RESULT , ETAP_TRACE_OFF , "vm_result" , 9}, | |
271 | {ETAP_VM_TEST , ETAP_TRACE_OFF , "vm_tes" , STATIC},/**/ | |
272 | {ETAP_VM_PMAP_PHYSENTRIES, ETAP_TRACE_OFF , "vm_pmap_physentries", STATIC}, | |
273 | {ETAP_VM_PMAP_SID , ETAP_TRACE_OFF , "vm_pmap_sid" , STATIC}, | |
274 | {ETAP_VM_PMAP_PTE , ETAP_TRACE_OFF , "vm_pmap_pte" , STATIC}, | |
275 | {ETAP_VM_PMAP_PTE_OVFLW , ETAP_TRACE_OFF , "vm_pmap_pte_ovflw", STATIC}, | |
276 | {ETAP_VM_PMAP_TLB , ETAP_TRACE_OFF , "vm_pmap_tlb" , STATIC}, | |
277 | ||
278 | {ETAP_IPC_IHGB , ETAP_TRACE_OFF , "ipc_ihgb" , 10},/**/ | |
279 | {ETAP_IPC_IS , ETAP_TRACE_OFF , "ipc_is" , 11},/**/ | |
280 | {ETAP_IPC_IS_REF , ETAP_TRACE_OFF , "ipc_is_ref" , 12},/**/ | |
281 | {ETAP_IPC_MQUEUE , ETAP_TRACE_OFF , "ipc_mqueue" , STATIC},/**/ | |
282 | {ETAP_IPC_OBJECT , ETAP_TRACE_OFF , "ipc_object" , STATIC},/**/ | |
283 | {ETAP_IPC_PORT_MULT , ETAP_TRACE_OFF , "ipc_port_mult" , 13},/**/ | |
284 | {ETAP_IPC_PORT_TIME , ETAP_TRACE_OFF , "ipc_port_time" , 14},/**/ | |
285 | {ETAP_IPC_RPC , ETAP_TRACE_OFF , "ipc_rpc" , 15},/**/ | |
286 | {ETAP_IPC_PORT_ALLOCQ , ETAP_TRACE_OFF , "ipc_port_allocq" , STATIC},/**/ | |
287 | ||
288 | {ETAP_IO_AHA , ETAP_TRACE_OFF , "io_aha" , STATIC}, | |
289 | {ETAP_IO_CHIP , ETAP_TRACE_OFF , "io_chip" , STATIC}, | |
290 | {ETAP_IO_DEV , ETAP_TRACE_OFF , "io_dev" , 16},/**/ | |
291 | {ETAP_IO_DEV_NUM , ETAP_TRACE_OFF , "io_dev_num" , STATIC}, | |
292 | {ETAP_IO_DEV_PAGEH , ETAP_TRACE_OFF , "io_dev_pageh" , STATIC},/**/ | |
293 | {ETAP_IO_DEV_PAGER , ETAP_TRACE_OFF , "io_dev_pager" , STATIC},/**/ | |
294 | {ETAP_IO_DEV_PORT , ETAP_TRACE_OFF , "io_dev_port" , STATIC},/**/ | |
295 | {ETAP_IO_DEV_REF , ETAP_TRACE_OFF , "io_dev_new" , 17},/**/ | |
296 | {ETAP_IO_DEVINS , ETAP_TRACE_OFF , "io_devins" , STATIC}, | |
297 | {ETAP_IO_DONE_LIST , ETAP_TRACE_OFF , "io_done_list" , STATIC}, | |
298 | {ETAP_IO_DONE_Q , ETAP_TRACE_OFF , "io_doneq" , 18}, | |
299 | {ETAP_IO_DONE_REF , ETAP_TRACE_OFF , "io_done_ref" , 19}, | |
300 | {ETAP_IO_EAHA , ETAP_TRACE_OFF , "io_eaha" , STATIC}, | |
301 | {ETAP_IO_HD_PROBE , ETAP_TRACE_OFF , "io_hd_probe" , STATIC}, | |
302 | {ETAP_IO_IOPB , ETAP_TRACE_OFF , "io_iopb" , STATIC}, | |
303 | {ETAP_IO_KDQ , ETAP_TRACE_OFF , "io_kdq" , STATIC}, | |
304 | {ETAP_IO_KDTTY , ETAP_TRACE_OFF , "io_kdtty" , STATIC}, | |
305 | {ETAP_IO_REQ , ETAP_TRACE_OFF , "io_req" , 20}, | |
306 | {ETAP_IO_TARGET , ETAP_TRACE_OFF , "io_target" , STATIC}, | |
307 | {ETAP_IO_TTY , ETAP_TRACE_OFF , "io_tty" , STATIC}, | |
308 | {ETAP_IO_IOP_LOCK , ETAP_TRACE_OFF , "io_iop" , STATIC},/**/ | |
309 | {ETAP_IO_DEV_NAME , ETAP_TRACE_OFF , "io_dev_name" , STATIC},/**/ | |
310 | {ETAP_IO_CDLI , ETAP_TRACE_OFF , "io_cdli" , STATIC},/**/ | |
311 | {ETAP_IO_HIPPI_FILTER , ETAP_TRACE_OFF , "io_hippi_filter" , STATIC},/**/ | |
312 | {ETAP_IO_HIPPI_SRC , ETAP_TRACE_OFF , "io_hippi_src" , STATIC},/**/ | |
313 | {ETAP_IO_HIPPI_DST , ETAP_TRACE_OFF , "io_hippi_dst" , STATIC},/**/ | |
314 | {ETAP_IO_HIPPI_PKT , ETAP_TRACE_OFF , "io_hippi_pkt" , STATIC},/**/ | |
315 | {ETAP_IO_NOTIFY , ETAP_TRACE_OFF , "io_notify" , STATIC},/**/ | |
316 | {ETAP_IO_DATADEV , ETAP_TRACE_OFF , "io_data_device" , STATIC},/**/ | |
317 | {ETAP_IO_OPEN , ETAP_TRACE_OFF , "io_open" , STATIC}, | |
318 | {ETAP_IO_OPEN_I , ETAP_TRACE_OFF , "io_open_i" , STATIC}, | |
319 | ||
320 | {ETAP_THREAD_ACT , ETAP_TRACE_OFF , "th_act" , 21}, | |
321 | {ETAP_THREAD_ACTION , ETAP_TRACE_OFF , "th_action" , STATIC}, | |
322 | {ETAP_THREAD_LOCK , ETAP_TRACE_OFF , "th_lock" , 22}, | |
323 | {ETAP_THREAD_LOCK_SET , ETAP_TRACE_OFF , "th_lock_set" , 23}, | |
324 | {ETAP_THREAD_NEW , ETAP_TRACE_OFF , "th_new" , 24}, | |
325 | {ETAP_THREAD_PSET , ETAP_TRACE_OFF , "th_pset" , STATIC},/**/ | |
326 | {ETAP_THREAD_PSET_ALL , ETAP_TRACE_OFF , "th_pset_all" , STATIC}, | |
327 | {ETAP_THREAD_PSET_RUNQ , ETAP_TRACE_OFF , "th_pset_runq" , STATIC}, | |
328 | {ETAP_THREAD_PSET_IDLE , ETAP_TRACE_OFF , "th_pset_idle" , STATIC}, | |
329 | {ETAP_THREAD_PSET_QUANT , ETAP_TRACE_OFF , "th_pset_quant" , STATIC}, | |
330 | {ETAP_THREAD_PROC , ETAP_TRACE_OFF , "th_proc" , STATIC}, | |
331 | {ETAP_THREAD_PROC_RUNQ , ETAP_TRACE_OFF , "th_proc_runq" , STATIC}, | |
332 | {ETAP_THREAD_REAPER , ETAP_TRACE_OFF , "th_reaper" , STATIC}, | |
333 | {ETAP_THREAD_RPC , ETAP_TRACE_OFF , "th_rpc" , 25}, | |
334 | {ETAP_THREAD_SEMA , ETAP_TRACE_OFF , "th_sema" , 26}, | |
335 | {ETAP_THREAD_STACK , ETAP_TRACE_OFF , "th_stack" , STATIC}, | |
336 | {ETAP_THREAD_STACK_USAGE , ETAP_TRACE_OFF , "th_stack_usage" , STATIC}, | |
337 | {ETAP_THREAD_TASK_NEW , ETAP_TRACE_OFF , "th_task_new" , 27}, | |
338 | {ETAP_THREAD_TASK_ITK , ETAP_TRACE_OFF , "th_task_itk" , 28}, | |
339 | {ETAP_THREAD_ULOCK , ETAP_TRACE_OFF , "th_ulock" , 29}, | |
340 | {ETAP_THREAD_WAIT , ETAP_TRACE_OFF , "th_wait" , STATIC}, | |
341 | {ETAP_THREAD_WAKE , ETAP_TRACE_OFF , "th_wake" , 30}, | |
342 | {ETAP_THREAD_ACT_LIST , ETAP_TRACE_OFF , "th_act_list" , 31}, | |
343 | {ETAP_THREAD_TASK_SWAP , ETAP_TRACE_OFF , "th_task_swap" , 32}, | |
344 | {ETAP_THREAD_TASK_SWAPOUT, ETAP_TRACE_OFF , "th_task_swapout" , 33}, | |
345 | {ETAP_THREAD_SWAPPER , ETAP_TRACE_OFF , "th_swapper" , STATIC}, | |
346 | ||
347 | {ETAP_NET_IFQ , ETAP_TRACE_OFF , "net_ifq" , STATIC}, | |
348 | {ETAP_NET_KMSG , ETAP_TRACE_OFF , "net_kmsg" , STATIC}, | |
349 | {ETAP_NET_MBUF , ETAP_TRACE_OFF , "net_mbuf" , STATIC},/**/ | |
350 | {ETAP_NET_POOL , ETAP_TRACE_OFF , "net_pool" , STATIC}, | |
351 | {ETAP_NET_Q , ETAP_TRACE_OFF , "net_q" , STATIC}, | |
352 | {ETAP_NET_QFREE , ETAP_TRACE_OFF , "net_qfree" , STATIC}, | |
353 | {ETAP_NET_RCV , ETAP_TRACE_OFF , "net_rcv" , STATIC}, | |
354 | {ETAP_NET_RCV_PLIST , ETAP_TRACE_OFF , "net_rcv_plist" , STATIC},/**/ | |
355 | {ETAP_NET_THREAD , ETAP_TRACE_OFF , "net_thread" , STATIC}, | |
356 | ||
357 | {ETAP_NORMA_XMM , ETAP_TRACE_OFF , "norma_xmm" , STATIC}, | |
358 | {ETAP_NORMA_XMMOBJ , ETAP_TRACE_OFF , "norma_xmmobj" , STATIC}, | |
359 | {ETAP_NORMA_XMMCACHE , ETAP_TRACE_OFF , "norma_xmmcache" , STATIC}, | |
360 | {ETAP_NORMA_MP , ETAP_TRACE_OFF , "norma_mp" , STATIC}, | |
361 | {ETAP_NORMA_VOR , ETAP_TRACE_OFF , "norma_vor" , STATIC},/**/ | |
362 | {ETAP_NORMA_TASK , ETAP_TRACE_OFF , "norma_task" , 38},/**/ | |
363 | ||
364 | {ETAP_DIPC_CLEANUP , ETAP_TRACE_OFF , "dipc_cleanup" , STATIC},/**/ | |
365 | {ETAP_DIPC_MSG_PROG , ETAP_TRACE_OFF , "dipc_msgp_prog" , STATIC},/**/ | |
366 | {ETAP_DIPC_PREP_QUEUE , ETAP_TRACE_OFF , "dipc_prep_queue" , STATIC},/**/ | |
367 | {ETAP_DIPC_PREP_FILL , ETAP_TRACE_OFF , "dipc_prep_fill" , STATIC},/**/ | |
368 | {ETAP_DIPC_MIGRATE , ETAP_TRACE_OFF , "dipc_migrate" , STATIC},/**/ | |
369 | {ETAP_DIPC_DELIVER , ETAP_TRACE_OFF , "dipc_deliver" , STATIC},/**/ | |
370 | {ETAP_DIPC_RECV_SYNC , ETAP_TRACE_OFF , "dipc_recv_sync" , STATIC},/**/ | |
371 | {ETAP_DIPC_RPC , ETAP_TRACE_OFF , "dipc_rpc" , STATIC},/**/ | |
372 | {ETAP_DIPC_MSG_REQ , ETAP_TRACE_OFF , "dipc_msg_req" , STATIC},/**/ | |
373 | {ETAP_DIPC_MSG_ORDER , ETAP_TRACE_OFF , "dipc_msg_order" , STATIC},/**/ | |
374 | {ETAP_DIPC_MSG_PREPQ , ETAP_TRACE_OFF , "dipc_msg_prepq" , STATIC},/**/ | |
375 | {ETAP_DIPC_MSG_FREE , ETAP_TRACE_OFF , "dipc_msg_free" , STATIC},/**/ | |
376 | {ETAP_DIPC_KMSG_AST , ETAP_TRACE_OFF , "dipc_kmsg_ast" , STATIC},/**/ | |
377 | {ETAP_DIPC_TEST_LOCK , ETAP_TRACE_OFF , "dipc_test_lock" , STATIC},/**/ | |
378 | {ETAP_DIPC_SPINLOCK , ETAP_TRACE_OFF , "dipc_spinlock" , STATIC},/**/ | |
379 | {ETAP_DIPC_TRACE , ETAP_TRACE_OFF , "dipc_trace" , STATIC},/**/ | |
380 | {ETAP_DIPC_REQ_CALLBACK , ETAP_TRACE_OFF , "dipc_req_clbck" , STATIC},/**/ | |
381 | {ETAP_DIPC_PORT_NAME , ETAP_TRACE_OFF , "dipc_port_name" , STATIC},/**/ | |
382 | {ETAP_DIPC_RESTART_PORT , ETAP_TRACE_OFF , "dipc_restart_port", STATIC},/**/ | |
383 | {ETAP_DIPC_ZERO_PAGE , ETAP_TRACE_OFF , "dipc_zero_page" , STATIC},/**/ | |
384 | {ETAP_DIPC_BLOCKED_NODE , ETAP_TRACE_OFF , "dipc_blocked_node", STATIC},/**/ | |
385 | {ETAP_DIPC_TIMER , ETAP_TRACE_OFF , "dipc_timer" , STATIC},/**/ | |
386 | {ETAP_DIPC_SPECIAL_PORT , ETAP_TRACE_OFF , "dipc_special_port", STATIC},/**/ | |
387 | ||
388 | {ETAP_KKT_TEST_WORK , ETAP_TRACE_OFF , "kkt_test_work" , STATIC},/**/ | |
389 | {ETAP_KKT_TEST_MP , ETAP_TRACE_OFF , "kkt_work_mp" , STATIC},/**/ | |
390 | {ETAP_KKT_NODE , ETAP_TRACE_OFF , "kkt_node" , STATIC},/**/ | |
391 | {ETAP_KKT_CHANNEL_LIST , ETAP_TRACE_OFF , "kkt_channel_list" , STATIC},/**/ | |
392 | {ETAP_KKT_CHANNEL , ETAP_TRACE_OFF , "kkt_channel" , STATIC},/**/ | |
393 | {ETAP_KKT_HANDLE , ETAP_TRACE_OFF , "kkt_handle" , STATIC},/**/ | |
394 | {ETAP_KKT_MAP , ETAP_TRACE_OFF , "kkt_map" , STATIC},/**/ | |
395 | {ETAP_KKT_RESOURCE , ETAP_TRACE_OFF , "kkt_resource" , STATIC},/**/ | |
396 | ||
397 | {ETAP_XKERNEL_MASTER , ETAP_TRACE_OFF , "xkernel_master" , STATIC},/**/ | |
398 | {ETAP_XKERNEL_EVENT , ETAP_TRACE_OFF , "xkernel_event" , STATIC},/**/ | |
399 | {ETAP_XKERNEL_ETHINPUT , ETAP_TRACE_OFF , "xkernel_input" , STATIC},/**/ | |
400 | ||
401 | {ETAP_MISC_AST , ETAP_TRACE_OFF , "m_ast" , STATIC}, | |
402 | {ETAP_MISC_CLOCK , ETAP_TRACE_OFF , "m_clock" , STATIC}, | |
403 | {ETAP_MISC_EMULATE , ETAP_TRACE_OFF , "m_emulate" , 34}, | |
404 | {ETAP_MISC_EVENT , ETAP_TRACE_OFF , "m_event" , STATIC}, | |
405 | {ETAP_MISC_KDB , ETAP_TRACE_OFF , "m_kdb" , STATIC}, | |
406 | {ETAP_MISC_PCB , ETAP_TRACE_OFF , "m_pcb" , 35}, | |
407 | {ETAP_MISC_PRINTF , ETAP_TRACE_OFF , "m_printf" , STATIC}, | |
408 | {ETAP_MISC_Q , ETAP_TRACE_OFF , "m_q" , STATIC}, | |
409 | {ETAP_MISC_RPC_SUBSYS , ETAP_TRACE_OFF , "m_rpc_sub" , 36}, | |
410 | {ETAP_MISC_RT_CLOCK , ETAP_TRACE_OFF , "m_rt_clock" , STATIC}, | |
411 | {ETAP_MISC_SD_POOL , ETAP_TRACE_OFF , "m_sd_pool" , STATIC}, | |
412 | {ETAP_MISC_TIMER , ETAP_TRACE_OFF , "m_timer" , STATIC}, | |
413 | {ETAP_MISC_UTIME , ETAP_TRACE_OFF , "m_utime" , STATIC}, | |
414 | {ETAP_MISC_XPR , ETAP_TRACE_OFF , "m_xpr" , STATIC}, | |
415 | {ETAP_MISC_ZONE , ETAP_TRACE_OFF , "m_zone" , 37}, | |
416 | {ETAP_MISC_ZONE_ALL , ETAP_TRACE_OFF , "m_zone_all" , STATIC}, | |
417 | {ETAP_MISC_ZONE_GET , ETAP_TRACE_OFF , "m_zone_get" , STATIC}, | |
418 | {ETAP_MISC_ZONE_PTABLE , ETAP_TRACE_OFF , "m_zone_ptable" , STATIC},/**/ | |
419 | {ETAP_MISC_LEDGER , ETAP_TRACE_OFF , "m_ledger" , STATIC},/**/ | |
420 | {ETAP_MISC_SCSIT_TGT , ETAP_TRACE_OFF , "m_scsit_tgt_lock" , STATIC},/**/ | |
421 | {ETAP_MISC_SCSIT_SELF , ETAP_TRACE_OFF , "m_scsit_self_lock", STATIC},/**/ | |
422 | {ETAP_MISC_SPL , ETAP_TRACE_OFF , "m_spl_lock" , STATIC},/**/ | |
423 | {ETAP_MISC_MASTER , ETAP_TRACE_OFF , "m_master" , STATIC},/**/ | |
424 | {ETAP_MISC_FLOAT , ETAP_TRACE_OFF , "m_float" , STATIC},/**/ | |
425 | {ETAP_MISC_GROUP , ETAP_TRACE_OFF , "m_group" , STATIC},/**/ | |
426 | {ETAP_MISC_FLIPC , ETAP_TRACE_OFF , "m_flipc" , STATIC},/**/ | |
427 | {ETAP_MISC_MP_IO , ETAP_TRACE_OFF , "m_mp_io" , STATIC},/**/ | |
428 | {ETAP_MISC_KERNEL_TEST , ETAP_TRACE_OFF , "m_kernel_test" , STATIC},/**/ | |
429 | ||
430 | {ETAP_NO_TRACE , ETAP_TRACE_OFF , "NEVER_TRACE" , STATIC}, | |
431 | #endif /* ETAP_LOCK_TRACE */ | |
432 | }; | |
433 | ||
434 | /* | |
435 | * Variable initially pointing to the event table, then to its mappable | |
436 | * copy. The cast is needed to discard the `const' qualifier; without it | |
437 | * gcc issues a warning. | |
438 | */ | |
439 | event_table_t event_table = (event_table_t) event_table_init; | |
440 | ||
441 | /* | |
442 | * Linked list of pointers into event_table_init[] so they can be switched | |
443 | * into the mappable copy when it is made. | |
444 | */ | |
445 | struct event_table_chain *event_table_chain; | |
446 | ||
447 | /* | |
448 | * max number of event types in the event table | |
449 | */ | |
450 | ||
451 | int event_table_max = sizeof(event_table_init)/sizeof(struct event_table_entry); | |
452 | ||
453 | const struct subs_table_entry subs_table_init[] = | |
454 | { | |
455 | /*------------------------------------------* | |
456 | * ETAP SUBSYSTEM TEXT NAME * | |
457 | *------------------------------------------*/ | |
458 | ||
459 | #if ETAP_EVENT_MONITOR | |
460 | {ETAP_SUBS_PROBE , "event_probes" }, | |
461 | #endif /* ETAP_EVENT_MONITOR */ | |
462 | ||
463 | #if ETAP_LOCK_TRACE | |
464 | {ETAP_SUBS_LOCK_DIPC , "lock_dipc" }, | |
465 | {ETAP_SUBS_LOCK_IO , "lock_io" }, | |
466 | {ETAP_SUBS_LOCK_IPC , "lock_ipc" }, | |
467 | {ETAP_SUBS_LOCK_KKT , "lock_kkt" }, | |
468 | {ETAP_SUBS_LOCK_MISC , "lock_misc" }, | |
469 | {ETAP_SUBS_LOCK_NET , "lock_net" }, | |
470 | {ETAP_SUBS_LOCK_NORMA , "lock_norma" }, | |
471 | {ETAP_SUBS_LOCK_THREAD , "lock_thread" }, | |
472 | {ETAP_SUBS_LOCK_VM , "lock_vm" }, | |
473 | {ETAP_SUBS_LOCK_XKERNEL , "lock_xkernel" }, | |
474 | #endif /* ETAP_LOCK_TRACE */ | |
475 | }; | |
476 | ||
477 | /* | |
478 | * Variable initially pointing to the subsystem table, then to its mappable | |
479 | * copy. | |
480 | */ | |
481 | subs_table_t subs_table = (subs_table_t) subs_table_init; | |
482 | ||
483 | /* | |
484 | * max number of subsystem types in the subsystem table | |
485 | */ | |
486 | ||
487 | int subs_table_max = sizeof(subs_table_init)/sizeof(struct subs_table_entry); | |
488 | ||
489 | #if ETAP_MONITOR | |
490 | #define MAX_NAME_SIZE 35 | |
491 | ||
492 | #define SYS_TABLE_MACH_TRAP 0 | |
493 | #define SYS_TABLE_MACH_MESSAGE 1 | |
494 | #define SYS_TABLE_UNIX_SYSCALL 2 | |
495 | #define SYS_TABLE_INTERRUPT 3 | |
496 | #define SYS_TABLE_EXCEPTION 4 | |
497 | ||
498 | ||
499 | extern char *system_table_lookup (unsigned int table, | |
500 | unsigned int number); | |
501 | ||
502 | ||
503 | char *mach_trap_names[] = { | |
504 | /* 0 */ "undefined", | |
505 | /* 1 */ NULL, | |
506 | /* 2 */ NULL, | |
507 | /* 3 */ NULL, | |
508 | /* 4 */ NULL, | |
509 | /* 5 */ NULL, | |
510 | /* 6 */ NULL, | |
511 | /* 7 */ NULL, | |
512 | /* 8 */ NULL, | |
513 | /* 9 */ NULL, | |
514 | /* 10 */ NULL, | |
515 | /* 11 */ NULL, | |
516 | /* 12 */ NULL, | |
517 | /* 13 */ NULL, | |
518 | /* 14 */ NULL, | |
519 | /* 15 */ NULL, | |
520 | /* 16 */ NULL, | |
521 | /* 17 */ NULL, | |
522 | /* 18 */ NULL, | |
523 | /* 19 */ NULL, | |
524 | /* 20 */ NULL, | |
525 | /* 21 */ NULL, | |
526 | /* 22 */ NULL, | |
527 | /* 23 */ NULL, | |
528 | /* 24 */ NULL, | |
529 | /* 25 */ NULL, | |
530 | /* 26 */ "mach_reply_port", | |
531 | /* 27 */ "mach_thread_self", | |
532 | /* 28 */ "mach_task_self", | |
533 | /* 29 */ "mach_host_self", | |
534 | /* 30 */ "vm_read_overwrite", | |
535 | /* 31 */ "vm_write", | |
536 | /* 32 */ "mach_msg_overwrite_trap", | |
537 | /* 33 */ NULL, | |
538 | /* 34 */ NULL, | |
539 | #ifdef i386 | |
540 | /* 35 */ "mach_rpc_trap", | |
541 | /* 36 */ "mach_rpc_return_trap", | |
542 | #else | |
543 | /* 35 */ NULL, | |
544 | /* 36 */ NULL, | |
545 | #endif /* i386 */ | |
546 | /* 37 */ NULL, | |
547 | /* 38 */ NULL, | |
548 | /* 39 */ NULL, | |
549 | /* 40 */ NULL, | |
550 | /* 41 */ "init_process", | |
551 | /* 42 */ NULL, | |
552 | /* 43 */ "map_fd", | |
553 | /* 44 */ NULL, | |
554 | /* 45 */ NULL, | |
555 | /* 46 */ NULL, | |
556 | /* 47 */ NULL, | |
557 | /* 48 */ NULL, | |
558 | /* 49 */ NULL, | |
559 | /* 50 */ NULL, | |
560 | /* 51 */ NULL, | |
561 | /* 52 */ NULL, | |
562 | /* 53 */ NULL, | |
563 | /* 54 */ NULL, | |
564 | /* 55 */ NULL, | |
565 | /* 56 */ NULL, | |
566 | /* 57 */ NULL, | |
567 | /* 58 */ NULL, | |
568 | /* 59 */ "swtch_pri", | |
569 | /* 60 */ "swtch", | |
570 | /* 61 */ "thread_switch", | |
571 | /* 62 */ "clock_sleep_trap", | |
572 | /* 63 */ NULL, | |
573 | /* 64 */ NULL, | |
574 | /* 65 */ NULL, | |
575 | /* 66 */ NULL, | |
576 | /* 67 */ NULL, | |
577 | /* 68 */ NULL, | |
578 | /* 69 */ NULL, | |
579 | /* 70 */ NULL, | |
580 | /* 71 */ NULL, | |
581 | /* 72 */ NULL, | |
582 | /* 73 */ NULL, | |
583 | /* 74 */ NULL, | |
584 | /* 75 */ NULL, | |
585 | /* 76 */ NULL, | |
586 | /* 77 */ NULL, | |
587 | /* 78 */ NULL, | |
588 | /* 79 */ NULL, | |
589 | /* 80 */ NULL, | |
590 | /* 81 */ NULL, | |
591 | /* 82 */ NULL, | |
592 | /* 83 */ NULL, | |
593 | /* 84 */ NULL, | |
594 | /* 85 */ NULL, | |
595 | /* 86 */ NULL, | |
596 | /* 87 */ NULL, | |
597 | /* 88 */ NULL, | |
598 | /* 89 */ NULL, | |
599 | /* 90 */ NULL, | |
600 | /* 91 */ NULL, | |
601 | /* 92 */ NULL, | |
602 | /* 93 */ NULL, | |
603 | /* 94 */ NULL, | |
604 | /* 95 */ NULL, | |
605 | /* 96 */ NULL, | |
606 | /* 97 */ NULL, | |
607 | /* 98 */ NULL, | |
608 | /* 99 */ NULL, | |
609 | /* 100 */ NULL, | |
610 | /* 101 */ NULL, | |
611 | /* 102 */ NULL, | |
612 | /* 103 */ NULL, | |
613 | /* 104 */ NULL, | |
614 | /* 105 */ NULL, | |
615 | /* 106 */ NULL, | |
616 | /* 107 */ NULL, | |
617 | /* 108 */ NULL, | |
618 | /* 109 */ NULL, | |
619 | }; | |
620 | #define N_MACH_TRAP_NAMES (sizeof mach_trap_names / sizeof mach_trap_names[0]) | |
621 | #define mach_trap_name(nu) \ | |
622 | (((nu) < N_MACH_TRAP_NAMES) ? mach_trap_names[nu] : NULL) | |
623 | ||
624 | struct table_entry { | |
625 | char name[MAX_NAME_SIZE]; | |
626 | u_int number; | |
627 | }; | |
628 | ||
629 | /* | |
630 | * Mach message table | |
631 | * | |
632 | * Note: Most mach system calls are actually implemented as messages. | |
633 | */ | |
634 | struct table_entry mach_message_table[] = { | |
635 | subsystem_to_name_map_bootstrap, | |
636 | subsystem_to_name_map_clock, | |
637 | subsystem_to_name_map_clock_reply, | |
638 | subsystem_to_name_map_default_pager_object, | |
639 | subsystem_to_name_map_device, | |
640 | subsystem_to_name_map_device_reply, | |
641 | subsystem_to_name_map_device_request, | |
642 | subsystem_to_name_map_exc, | |
643 | /* subsystem_to_name_map_mach,*/ | |
644 | subsystem_to_name_map_mach_debug, | |
645 | /* subsystem_to_name_map_mach_host,*/ | |
646 | subsystem_to_name_map_mach_norma, | |
647 | subsystem_to_name_map_mach_port, | |
648 | subsystem_to_name_map_memory_object, | |
649 | subsystem_to_name_map_memory_object_default, | |
650 | subsystem_to_name_map_notify, | |
651 | subsystem_to_name_map_prof, | |
652 | subsystem_to_name_map_sync | |
653 | }; | |
654 | ||
655 | int mach_message_table_entries = sizeof(mach_message_table) / | |
656 | sizeof(struct table_entry); | |
657 | ||
658 | ||
659 | #endif | |
660 | ||
661 | /* | |
662 | * ================================ | |
663 | * Initialization routines for ETAP | |
664 | * ================================ | |
665 | */ | |
666 | ||
667 | /* | |
668 | * ROUTINE: etap_init_phase1 [internal] | |
669 | * | |
670 | * FUNCTION: Event trace instrumentation initialization phase | |
671 | * one of two. The static phase. The cumulative buffer | |
672 | * is initialized. | |
673 | * | |
674 | * NOTES: The cumulative buffer is statically allocated and | |
675 | * must be initialized before the first simple_lock_init() | |
676 | * or lock_init() call is made. | |
677 | * | |
678 | * The first lock init call is made before dynamic allocation | |
679 | * is available. Hence, phase one is executed before dynamic | |
680 | * memory allocation is available. | |
681 | * | |
682 | */ | |
683 | ||
684 | void | |
685 | etap_init_phase1(void) | |
686 | { | |
687 | #if ETAP_LOCK_ACCUMULATE || MACH_ASSERT | |
688 | int x; | |
689 | #if MACH_ASSERT | |
690 | boolean_t out_of_order; | |
691 | #endif /* MACH_ASSERT */ | |
692 | #endif /* ETAP_LOCK_ACCUMULATE || MACH_ASSERT */ | |
693 | ||
694 | #if ETAP_LOCK_ACCUMULATE | |
695 | /* | |
696 | * Initialize Cumulative Buffer | |
697 | * | |
698 | * Note: The cumulative buffer is statically allocated. | |
699 | * This static allocation is necessary since most | |
700 | * of the lock_init calls are made before dynamic | |
701 | * allocation routines are available. | |
702 | */ | |
703 | ||
704 | /* | |
705 | * Align cumulative buffer pointer to a page boundary | |
706 | * (so it can be maped). | |
707 | */ | |
708 | ||
709 | bzero(&cbuff_allocated[0], CBUFF_ALLOCATED_SIZE); | |
710 | cbuff = (cumulative_buffer_t) round_page(&cbuff_allocated); | |
711 | ||
712 | simple_lock_init(&cbuff_lock, ETAP_NO_TRACE); | |
713 | ||
714 | /* | |
715 | * Set the starting point for cumulative buffer entry | |
716 | * reservations. | |
717 | * | |
718 | * This value must leave enough head room in the | |
719 | * cumulative buffer to contain all dynamic events. | |
720 | */ | |
721 | ||
722 | for (x=0; x < event_table_max; x++) | |
723 | if (event_table[x].dynamic > cbuff->static_start) | |
724 | cbuff->static_start = event_table[x].dynamic; | |
725 | ||
726 | cbuff->next = cbuff->static_start; | |
727 | #endif /* ETAP_LOCK_ACCUMULATE */ | |
728 | ||
729 | /* | |
730 | * Initialize the event table lock | |
731 | */ | |
732 | ||
733 | simple_lock_init(&event_table_lock, ETAP_NO_TRACE); | |
734 | ||
735 | #if MACH_ASSERT | |
736 | /* | |
737 | * Check that events are in numerical order so we can do a binary | |
738 | * search on them. Even better would be to make event numbers be | |
739 | * simple contiguous indexes into event_table[], but that would | |
740 | * break the coding of subsystems in the event number. | |
741 | */ | |
742 | out_of_order = FALSE; | |
743 | for (x = 1; x < event_table_max; x++) { | |
744 | if (event_table[x - 1].event > event_table[x].event) { | |
745 | printf("events out of order: %s > %s\n", | |
746 | event_table[x - 1].name, event_table[x].name); | |
747 | out_of_order = TRUE; | |
748 | } | |
749 | } | |
750 | if (out_of_order) | |
751 | panic("etap_init_phase1"); | |
752 | #endif /* MACH_ASSERT */ | |
753 | } | |
754 | ||
755 | ||
756 | /* | |
757 | * ROUTINE: etap_init_phase2 [internal] | |
758 | * | |
759 | * FUNCTION: Event trace instrumentation initialization phase | |
760 | * two of two. The dynamic phase. The monitored buffers | |
761 | * are dynamically allocated and initialized. Cumulative | |
762 | * dynamic entry locks are allocated and initialized. The | |
763 | * start_data_pool is initialized. | |
764 | * | |
765 | * NOTES: Phase two is executed once dynamic memory allocation | |
766 | * is available. | |
767 | * | |
768 | */ | |
769 | ||
770 | void | |
771 | etap_init_phase2(void) | |
772 | { | |
773 | int size; | |
774 | int x; | |
775 | int ret; | |
776 | vm_offset_t table_copy; | |
777 | struct event_table_chain *chainp; | |
778 | ||
779 | /* | |
780 | * Make mappable copies of the event_table and the subs_table. | |
781 | * These tables were originally mapped as they appear in the | |
782 | * kernel image, but that meant that other kernel variables could | |
783 | * end up being mapped with them, which is ugly. It also didn't | |
784 | * work on the HP/PA, where pages with physical address == virtual | |
785 | * do not have real pmap entries allocated and therefore can't be | |
786 | * mapped elsewhere. | |
787 | */ | |
788 | size = sizeof event_table_init + sizeof subs_table_init; | |
789 | ret = kmem_alloc(kernel_map, &table_copy, size); | |
790 | if (ret != KERN_SUCCESS) | |
791 | panic("ETAP: error allocating table copies"); | |
792 | event_table = (event_table_t) table_copy; | |
793 | subs_table = (subs_table_t) (table_copy + sizeof event_table_init); | |
794 | bcopy((char *) event_table_init, (char *) event_table, | |
795 | sizeof event_table_init); | |
796 | bcopy((char *) subs_table_init, (char *) subs_table, | |
797 | sizeof subs_table_init); | |
798 | ||
799 | /* Switch pointers from the old event_table to the new. */ | |
800 | for (chainp = event_table_chain; chainp != NULL; | |
801 | chainp = chainp->event_table_link) { | |
802 | x = chainp->event_tablep - event_table_init; | |
803 | assert(x < event_table_max); | |
804 | chainp->event_tablep = event_table + x; | |
805 | } | |
806 | ||
807 | #if ETAP_LOCK_ACCUMULATE | |
808 | ||
809 | /* | |
810 | * Because several dynamic locks can point to a single | |
811 | * cumulative buffer entry, dynamic lock writes to the | |
812 | * entry are synchronized. | |
813 | * | |
814 | * The spin locks are allocated here. | |
815 | * | |
816 | */ | |
817 | #if MACH_LDEBUG | |
818 | size = sizeof(simple_lock_t) * cbuff->static_start; | |
819 | #else | |
820 | /* | |
821 | * Note: These locks are different from traditional spin locks. | |
822 | * They are of type int instead of type simple_lock_t. | |
823 | * We can reduce lock size this way, since no tracing will | |
824 | * EVER be performed on these locks. | |
825 | */ | |
826 | size = sizeof(simple_lock_data_t) * cbuff->static_start; | |
827 | #endif | |
828 | ||
829 | ret = kmem_alloc(kernel_map, (vm_offset_t *) &cbuff_locks, size); | |
830 | ||
831 | if (ret != KERN_SUCCESS) | |
832 | panic("ETAP: error allocating cumulative write locks"); | |
833 | ||
834 | #if MACH_LDEBUG | |
835 | for(x = 0; x < cbuff->static_start; ++x) { | |
836 | simple_lock_init(&cbuff_locks[x], ETAP_NO_TRACE); | |
837 | } | |
838 | #else | |
839 | bzero((const char *) cbuff_locks, size); | |
840 | #endif | |
841 | ||
842 | #endif /* ETAP_LOCK_ACCUMULATE */ | |
843 | ||
844 | ||
845 | #if ETAP_MONITOR | |
846 | ||
847 | /* | |
848 | * monitor buffer allocation | |
849 | */ | |
850 | ||
851 | size = ((mbuff_entries-1) * sizeof(struct mbuff_entry)) + | |
852 | sizeof(struct monitor_buffer); | |
853 | ||
854 | for (x=0; x < NCPUS; x++) { | |
855 | ret = kmem_alloc(kernel_map, | |
856 | (vm_offset_t *) &mbuff[x], | |
857 | size); | |
858 | ||
859 | if (ret != KERN_SUCCESS) | |
860 | panic ("ETAP: error allocating monitor buffer\n"); | |
861 | ||
862 | /* zero fill buffer */ | |
863 | bzero((char *) mbuff[x], size); | |
864 | } | |
865 | ||
866 | #endif /* ETAP_MONITOR */ | |
867 | ||
868 | ||
869 | #if ETAP_LOCK_TRACE | |
870 | ||
871 | /* | |
872 | * Initialize the start_data_pool | |
873 | */ | |
874 | ||
875 | init_start_data_pool(); | |
876 | ||
877 | #endif /* ETAP_LOCK_TRACE */ | |
878 | } | |
879 | ||
880 | ||
881 | #if ETAP_LOCK_ACCUMULATE | |
882 | ||
883 | /* | |
884 | * ROUTINE: etap_cbuff_reserve [internal] | |
885 | * | |
886 | * FUNCTION: The cumulative buffer operation which returns a pointer | |
887 | * to a free entry in the cumulative buffer. | |
888 | * | |
889 | * NOTES: Disables interrupts. | |
890 | * | |
891 | */ | |
892 | ||
893 | cbuff_entry_t | |
894 | etap_cbuff_reserve(event_table_t etp) | |
895 | { | |
896 | cbuff_entry_t avail; | |
897 | unsigned short de; | |
898 | spl_t s; | |
899 | ||
900 | /* see if type pointer is initialized */ | |
901 | if (etp == EVENT_TABLE_NULL || etp->event == ETAP_NO_TRACE) | |
902 | return (CBUFF_ENTRY_NULL); | |
903 | ||
904 | /* check for DYNAMIC lock */ | |
905 | if (de = etp->dynamic) { | |
906 | if (de <= cbuff->static_start) | |
907 | return (&cbuff->entry[de-1]); | |
908 | else { | |
909 | printf("ETAP: dynamic lock index error [%lu]\n", de); | |
910 | return (CBUFF_ENTRY_NULL); | |
911 | } | |
912 | } | |
913 | ||
914 | cumulative_buffer_lock(s); | |
915 | ||
916 | /* if buffer is full, reservation requests fail */ | |
917 | if (cbuff->next >= ETAP_CBUFF_ENTRIES) { | |
918 | cumulative_buffer_unlock(s); | |
919 | return (CBUFF_ENTRY_NULL); | |
920 | } | |
921 | ||
922 | avail = &cbuff->entry[cbuff->next++]; | |
923 | ||
924 | cumulative_buffer_unlock(s); | |
925 | ||
926 | return (avail); | |
927 | } | |
928 | ||
929 | #endif /* ETAP_LOCK_ACCUMULATE */ | |
930 | ||
931 | /* | |
932 | * ROUTINE: etap_event_table_assign [internal] | |
933 | * | |
934 | * FUNCTION: Returns a pointer to the assigned event type table entry, | |
935 | * using the event type as the index key. | |
936 | * | |
937 | */ | |
938 | ||
939 | event_table_t | |
940 | etap_event_table_find(etap_event_t event) | |
941 | { | |
942 | int last_before, first_after, try; | |
943 | ||
944 | /* Binary search for the event number. last_before is the highest- | |
945 | numbered element known to be <= the number we're looking for; | |
946 | first_after is the lowest-numbered element known to be >. */ | |
947 | last_before = 0; | |
948 | first_after = event_table_max; | |
949 | while (last_before < first_after) { | |
950 | try = (last_before + first_after) >> 1; | |
951 | if (event_table[try].event == event) | |
952 | return (&event_table[try]); | |
953 | else if (event_table[try].event < event) | |
954 | last_before = try; | |
955 | else | |
956 | first_after = try; | |
957 | } | |
958 | return EVENT_TABLE_NULL; | |
959 | } | |
960 | ||
961 | void | |
962 | etap_event_table_assign(struct event_table_chain *chainp, etap_event_t event) | |
963 | { | |
964 | event_table_t event_tablep; | |
965 | ||
966 | event_tablep = etap_event_table_find(event); | |
967 | if (event_tablep == EVENT_TABLE_NULL) | |
968 | printf("\nETAP: event not found in event table: %x\n", event); | |
969 | else { | |
970 | if (event_table == event_table_init) { | |
971 | chainp->event_table_link = event_table_chain; | |
972 | event_table_chain = chainp; | |
973 | } | |
974 | chainp->event_tablep = event_tablep; | |
975 | } | |
976 | } | |
977 | ||
978 | #endif /* ETAP */ | |
979 | ||
980 | /* | |
981 | * | |
982 | * MESSAGE: etap_get_info [exported] | |
983 | * | |
984 | * FUNCTION: provides the server with ETAP buffer configurations. | |
985 | * | |
986 | */ | |
987 | ||
988 | kern_return_t | |
989 | etap_get_info( | |
990 | host_priv_t host_priv, | |
991 | int *et_entries, | |
992 | int *st_entries, | |
993 | vm_offset_t *et_offset, | |
994 | vm_offset_t *st_offset, | |
995 | int *cb_width, | |
996 | int *mb_size, | |
997 | int *mb_entries, | |
998 | int *mb_cpus) | |
999 | { | |
1000 | ||
1001 | if (host_priv == HOST_PRIV_NULL) | |
1002 | return KERN_INVALID_ARGUMENT; | |
1003 | ||
1004 | #if ETAP | |
1005 | *et_entries = event_table_max; | |
1006 | *st_entries = subs_table_max; | |
1007 | *et_offset = (vm_offset_t) ((char*) event_table - | |
1008 | trunc_page((char*) event_table)); | |
1009 | *st_offset = (vm_offset_t) ((char*) subs_table - | |
1010 | trunc_page((char*) subs_table)); | |
1011 | #else /* ETAP */ | |
1012 | *et_entries = 0; | |
1013 | *st_entries = 0; | |
1014 | *et_offset = 0; | |
1015 | *st_offset = 0; | |
1016 | #endif /* ETAP */ | |
1017 | ||
1018 | #if ETAP_LOCK_ACCUMULATE | |
1019 | *cb_width = cbuff_width; | |
1020 | #else /* ETAP_LOCK_ACCUMULATE */ | |
1021 | *cb_width = 0; | |
1022 | #endif /* ETAP_LOCK_ACCUMULATE */ | |
1023 | ||
1024 | #if ETAP_MONITOR | |
1025 | *mb_size = ((mbuff_entries-1) * sizeof(struct mbuff_entry)) + | |
1026 | sizeof(struct monitor_buffer); | |
1027 | *mb_entries = mbuff_entries; | |
1028 | *mb_cpus = NCPUS; | |
1029 | #else /* ETAP_MONITOR */ | |
1030 | *mb_size = 0; | |
1031 | *mb_entries = 0; | |
1032 | *mb_cpus = 0; | |
1033 | #endif /* ETAP_MONITOR */ | |
1034 | ||
1035 | return (KERN_SUCCESS); | |
1036 | } | |
1037 | ||
1038 | /* | |
1039 | * ROUTINE: etap_trace_event [exported] | |
1040 | * | |
1041 | * FUNCTION: The etap_trace_event system call is the user's interface to | |
1042 | * the ETAP kernel instrumentation. | |
1043 | * | |
1044 | * This call allows the user to enable and disable tracing modes | |
1045 | * on specific event types. The call also supports a reset option, | |
1046 | * where the cumulative buffer data and all event type tracing | |
1047 | * is reset to zero. When the reset option is used, a new | |
1048 | * interval width can also be defined using the op parameter. | |
1049 | * | |
1050 | */ | |
1051 | ||
1052 | kern_return_t | |
1053 | etap_trace_event ( | |
1054 | unsigned short mode, | |
1055 | unsigned short type, | |
1056 | boolean_t enable, | |
1057 | unsigned int nargs, | |
1058 | unsigned short args[]) | |
1059 | { | |
1060 | #if ETAP | |
1061 | event_table_t event_tablep; | |
1062 | kern_return_t ret; | |
1063 | int i, args_size; | |
1064 | unsigned short status_mask; | |
1065 | unsigned short *tmp_args; | |
1066 | ||
1067 | /* | |
1068 | * Initialize operation | |
1069 | */ | |
1070 | ||
1071 | if (mode == ETAP_RESET) { | |
1072 | etap_trace_reset(nargs); | |
1073 | return (KERN_SUCCESS); | |
1074 | } | |
1075 | ||
1076 | status_mask = mode & type; | |
1077 | ||
1078 | /* | |
1079 | * Copy args array from user space to kernel space | |
1080 | */ | |
1081 | ||
1082 | args_size = nargs * sizeof *args; | |
1083 | tmp_args = (unsigned short *) kalloc(args_size); | |
1084 | ||
1085 | if (tmp_args == NULL) | |
1086 | return (KERN_NO_SPACE); | |
1087 | ||
1088 | if (copyin((const char *) args, (char *) tmp_args, args_size)) | |
1089 | return (KERN_INVALID_ADDRESS); | |
1090 | ||
1091 | /* | |
1092 | * Change appropriate status fields in the event table | |
1093 | */ | |
1094 | ||
1095 | event_table_lock(); | |
1096 | ||
1097 | for (i = 0; i < nargs; i++) { | |
1098 | if (tmp_args[i] != ETAP_NO_TRACE) { | |
1099 | event_tablep = etap_event_table_find(tmp_args[i]); | |
1100 | if (event_tablep == EVENT_TABLE_NULL) | |
1101 | break; | |
1102 | if (enable) | |
1103 | event_tablep->status |= status_mask; | |
1104 | else | |
1105 | event_tablep->status &= ~status_mask; | |
1106 | } | |
1107 | } | |
1108 | ||
1109 | ret = (i < nargs) ? KERN_INVALID_ARGUMENT : KERN_SUCCESS; | |
1110 | ||
1111 | event_table_unlock(); | |
1112 | ||
1113 | kfree((vm_offset_t) tmp_args, args_size); | |
1114 | ||
1115 | return (ret); | |
1116 | ||
1117 | #else /* ETAP */ | |
1118 | ||
1119 | return (KERN_FAILURE); | |
1120 | ||
1121 | #endif /* ETAP */ | |
1122 | } | |
1123 | ||
1124 | ||
1125 | #if ETAP | |
1126 | ||
1127 | /* | |
1128 | * ROUTINE: etap_trace_reset [internal] | |
1129 | * | |
1130 | * FUNCTION: Turns off all tracing and erases all the data accumulated | |
1131 | * in the cumulative buffer. If the user defined a new | |
1132 | * cumulative buffer interval width, it will be assigned here. | |
1133 | * | |
1134 | */ | |
1135 | void | |
1136 | etap_trace_reset(int new_interval) | |
1137 | { | |
1138 | event_table_t scan; | |
1139 | int x; | |
1140 | register s; | |
1141 | ||
1142 | /* | |
1143 | * Wipe out trace fields in event table | |
1144 | */ | |
1145 | ||
1146 | scan = event_table; | |
1147 | ||
1148 | event_table_lock(); | |
1149 | ||
1150 | for (x=0; x < event_table_max; x++) { | |
1151 | scan->status = ETAP_TRACE_OFF; | |
1152 | scan++; | |
1153 | } | |
1154 | ||
1155 | event_table_unlock(); | |
1156 | ||
1157 | #if ETAP_LOCK_ACCUMULATE | |
1158 | ||
1159 | /* | |
1160 | * Wipe out cumulative buffer statistical fields for all entries | |
1161 | */ | |
1162 | ||
1163 | cumulative_buffer_lock(s); | |
1164 | ||
1165 | for (x=0; x < ETAP_CBUFF_ENTRIES; x++) { | |
1166 | bzero ((char *) &cbuff->entry[x].hold, | |
1167 | sizeof(struct cbuff_data)); | |
1168 | bzero ((char *) &cbuff->entry[x].wait, | |
1169 | sizeof(struct cbuff_data)); | |
1170 | bzero ((char *) &cbuff->entry[x].hold_interval[0], | |
1171 | sizeof(unsigned long) * ETAP_CBUFF_IBUCKETS); | |
1172 | bzero ((char *) &cbuff->entry[x].wait_interval[0], | |
1173 | sizeof(unsigned long) * ETAP_CBUFF_IBUCKETS); | |
1174 | } | |
1175 | ||
1176 | /* | |
1177 | * Assign interval width if the user defined a new one. | |
1178 | */ | |
1179 | ||
1180 | if (new_interval != 0) | |
1181 | cbuff_width = new_interval; | |
1182 | ||
1183 | cumulative_buffer_unlock(s); | |
1184 | ||
1185 | #endif /* ETAP_LOCK_ACCUMULATE */ | |
1186 | } | |
1187 | ||
1188 | #endif /* ETAP */ | |
1189 | ||
1190 | /* | |
1191 | * ROUTINE: etap_probe [exported] | |
1192 | * | |
1193 | * FUNCTION: The etap_probe system call serves as a user-level probe, | |
1194 | * allowing user-level code to store event data into | |
1195 | * the monitored buffer(s). | |
1196 | */ | |
1197 | ||
1198 | kern_return_t | |
1199 | etap_probe( | |
1200 | unsigned short event_type, | |
1201 | unsigned short event_id, | |
1202 | unsigned int data_size, /* total size in bytes */ | |
1203 | etap_data_t *data) | |
1204 | { | |
1205 | ||
1206 | #if ETAP_MONITOR | |
1207 | ||
1208 | mbuff_entry_t mbuff_entryp; | |
1209 | int cpu; | |
1210 | int free; | |
1211 | spl_t s; | |
1212 | ||
1213 | ||
1214 | if (data_size > ETAP_DATA_SIZE) | |
1215 | return (KERN_INVALID_ARGUMENT); | |
1216 | ||
1217 | if (event_table[event_type].status == ETAP_TRACE_OFF || | |
1218 | event_table[event_type].event != event_type) | |
1219 | return (KERN_NO_ACCESS); | |
1220 | ||
1221 | mp_disable_preemption(); | |
1222 | cpu = cpu_number(); | |
1223 | s = splhigh(); | |
1224 | ||
1225 | free = mbuff[cpu]->free; | |
1226 | mbuff_entryp = &mbuff[cpu]->entry[free]; | |
1227 | ||
1228 | /* | |
1229 | * Load monitor buffer entry | |
1230 | */ | |
1231 | ||
1232 | ETAP_TIMESTAMP(mbuff_entryp->time); | |
1233 | mbuff_entryp->event = event_id; | |
1234 | mbuff_entryp->flags = USER_EVENT; | |
1235 | mbuff_entryp->instance = (u_int) current_thread(); | |
1236 | mbuff_entryp->pc = 0; | |
1237 | ||
1238 | if (data != ETAP_DATA_NULL) | |
1239 | copyin((const char *) data, | |
1240 | (char *) mbuff_entryp->data, | |
1241 | data_size); | |
1242 | ||
1243 | mbuff[cpu]->free = (free+1) % mbuff_entries; | |
1244 | ||
1245 | if (mbuff[cpu]->free == 0) | |
1246 | mbuff[cpu]->timestamp++; | |
1247 | ||
1248 | splx(s); | |
1249 | mp_enable_preemption(); | |
1250 | ||
1251 | return (KERN_SUCCESS); | |
1252 | ||
1253 | #else /* ETAP_MONITOR */ | |
1254 | return (KERN_FAILURE); | |
1255 | #endif /* ETAP_MONITOR */ | |
1256 | } | |
1257 | ||
1258 | /* | |
1259 | * ROUTINE: etap_trace_thread [exported] | |
1260 | * | |
1261 | * FUNCTION: Toggles thread's ETAP trace status bit. | |
1262 | */ | |
1263 | ||
1264 | kern_return_t | |
1265 | etap_trace_thread( | |
1266 | thread_act_t thr_act, | |
1267 | boolean_t trace_status) | |
1268 | { | |
1269 | #if ETAP_EVENT_MONITOR | |
1270 | ||
1271 | thread_t thread; | |
1272 | boolean_t old_status; | |
1273 | etap_data_t probe_data; | |
1274 | spl_t s; | |
1275 | ||
1276 | if (thr_act == THR_ACT_NULL) | |
1277 | return (KERN_INVALID_ARGUMENT); | |
1278 | ||
1279 | thread = act_lock_thread(thr_act); | |
1280 | ||
1281 | if (thread == THREAD_NULL) { | |
1282 | act_unlock_thread(thr_act); | |
1283 | return (KERN_INVALID_ARGUMENT); | |
1284 | } | |
1285 | ||
1286 | s = splsched(); | |
1287 | thread_lock(thread); | |
1288 | ||
1289 | old_status = thread->etap_trace; | |
1290 | thread->etap_trace = trace_status; | |
1291 | ||
1292 | ETAP_DATA_LOAD(probe_data[0],thr_act->task); | |
1293 | ETAP_DATA_LOAD(probe_data[1],thr_act); | |
1294 | ETAP_DATA_LOAD(probe_data[2],thread->sched_pri); | |
1295 | ||
1296 | thread_unlock(thread); | |
1297 | splx(s); | |
1298 | ||
1299 | act_unlock_thread(thr_act); | |
1300 | ||
1301 | /* | |
1302 | * Thread creation (ETAP_P_THREAD_LIFE: BEGIN) is ONLY recorded | |
1303 | * here since a threads trace status is disabled by default. | |
1304 | */ | |
1305 | if (trace_status == TRUE && old_status == FALSE) { | |
1306 | ETAP_PROBE_DATA(ETAP_P_THREAD_LIFE, | |
1307 | EVENT_BEGIN, | |
1308 | thread, | |
1309 | &probe_data, | |
1310 | ETAP_DATA_ENTRY*3); | |
1311 | } | |
1312 | ||
1313 | /* | |
1314 | * Thread termination is (falsely) recorded here if the trace | |
1315 | * status has been disabled. This event is recorded to allow | |
1316 | * users the option of tracing a portion of a threads execution. | |
1317 | */ | |
1318 | if (trace_status == FALSE && old_status == TRUE) { | |
1319 | ETAP_PROBE_DATA(ETAP_P_THREAD_LIFE, | |
1320 | EVENT_END, | |
1321 | thread, | |
1322 | &probe_data, | |
1323 | ETAP_DATA_ENTRY*3); | |
1324 | } | |
1325 | ||
1326 | return (KERN_SUCCESS); | |
1327 | ||
1328 | #else /* ETAP_EVENT_MONITOR */ | |
1329 | return (KERN_FAILURE); | |
1330 | #endif /* ETAP_EVENT_MONITOR */ | |
1331 | } | |
1332 | ||
1333 | /* | |
1334 | * ROUTINE: etap_mon_reconfig [exported] | |
1335 | * | |
1336 | * FUNCTION: Reallocates monitor buffers to hold specified number | |
1337 | * of entries. | |
1338 | * | |
1339 | * NOTES: In multiprocessor (SMP) case, a lock needs to be added | |
1340 | * here and in data collection macros to protect access | |
1341 | * to mbuff_entries. | |
1342 | */ | |
1343 | kern_return_t | |
1344 | etap_mon_reconfig( | |
1345 | host_priv_t host_priv, | |
1346 | int nentries) | |
1347 | { | |
1348 | #if ETAP_EVENT_MONITOR | |
1349 | struct monitor_buffer *nmbuff[NCPUS], *ombuff[NCPUS]; | |
1350 | int s, size, osize, i, ret; | |
1351 | ||
1352 | if (host_priv == HOST_PRIV_NULL) | |
1353 | return KERN_INVALID_ARGUMENT; | |
1354 | ||
1355 | if (nentries <= 0) /* must be at least 1 */ | |
1356 | return (KERN_FAILURE); | |
1357 | ||
1358 | size = ((nentries-1) * sizeof(struct mbuff_entry)) + | |
1359 | sizeof(struct monitor_buffer); | |
1360 | ||
1361 | for (i = 0; i < NCPUS; ++i) { | |
1362 | ret = kmem_alloc(kernel_map, | |
1363 | (vm_offset_t *)&nmbuff[i], | |
1364 | size); | |
1365 | if (ret != KERN_SUCCESS) { | |
1366 | if (i > 0) { | |
1367 | int j; | |
1368 | ||
1369 | for (j = 0; j < i; ++j) { | |
1370 | kmem_free(kernel_map, | |
1371 | (vm_offset_t)nmbuff[j], | |
1372 | size); | |
1373 | } | |
1374 | } | |
1375 | return (ret); | |
1376 | } | |
1377 | bzero((char *) nmbuff[i], size); | |
1378 | } | |
1379 | osize = ((mbuff_entries-1) * sizeof (struct mbuff_entry)) + | |
1380 | sizeof (struct monitor_buffer); | |
1381 | ||
1382 | s = splhigh(); | |
1383 | event_table_lock(); | |
1384 | for (i = 0; i < NCPUS; ++i) { | |
1385 | ombuff[i] = mbuff[i]; | |
1386 | mbuff[i] = nmbuff[i]; | |
1387 | } | |
1388 | mbuff_entries = nentries; | |
1389 | event_table_unlock(); | |
1390 | splx(s); | |
1391 | ||
1392 | for (i = 0; i < NCPUS; ++i) { | |
1393 | kmem_free(kernel_map, | |
1394 | (vm_offset_t)ombuff[i], | |
1395 | osize); | |
1396 | } | |
1397 | return (KERN_SUCCESS); | |
1398 | #else | |
1399 | return (KERN_FAILURE); | |
1400 | #endif /* ETAP_MONITOR */ | |
1401 | } | |
1402 | ||
1403 | /* | |
1404 | * ROUTINE: etap_new_probe [exported] | |
1405 | * | |
1406 | * FUNCTION: Reallocates monitor probe table, adding a new entry | |
1407 | * | |
1408 | */ | |
1409 | kern_return_t | |
1410 | etap_new_probe( | |
1411 | host_priv_t host_priv, | |
1412 | vm_address_t name, | |
1413 | vm_size_t namlen, | |
1414 | boolean_t trace_on, | |
1415 | vm_address_t id) | |
1416 | { | |
1417 | #if ETAP_EVENT_MONITOR | |
1418 | event_table_t newtable, oldtable; | |
1419 | unsigned short i, nid; | |
1420 | int s; | |
1421 | vm_size_t newsize = (event_table_max + 1) * | |
1422 | sizeof (struct event_table_entry); | |
1423 | boolean_t duplicate_name = FALSE; | |
1424 | kern_return_t ret; | |
1425 | ||
1426 | if (host_priv == HOST_PRIV_NULL) | |
1427 | return KERN_INVALID_ARGUMENT; | |
1428 | ||
1429 | if (namlen > EVENT_NAME_LENGTH - 1) | |
1430 | return (KERN_INVALID_ARGUMENT); | |
1431 | ||
1432 | if ((ret = kmem_alloc(kernel_map, (vm_address_t *)&newtable, | |
1433 | newsize)) != KERN_SUCCESS) | |
1434 | return (ret); | |
1435 | ||
1436 | bcopy((const char *)event_table, (char *)newtable, event_table_max * | |
1437 | sizeof (struct event_table_entry)); | |
1438 | ||
1439 | if (copyin((const char *)name, | |
1440 | (char *)&newtable[event_table_max].name, namlen)) | |
1441 | return (KERN_INVALID_ADDRESS); | |
1442 | ||
1443 | newtable[event_table_max].name[EVENT_NAME_LENGTH - 1] = '\0'; | |
1444 | newtable[event_table_max].status = trace_on; | |
1445 | newtable[event_table_max].dynamic = 0; | |
1446 | ||
1447 | for (nid = i = 0; i < event_table_max; ++i) { | |
1448 | if (strcmp((char *)newtable[event_table_max].name, | |
1449 | newtable[i].name) == 0) { | |
1450 | duplicate_name = TRUE; | |
1451 | printf("duplicate name\n"); | |
1452 | } | |
1453 | nid = max(nid, newtable[i].event); | |
1454 | } | |
1455 | ++nid; | |
1456 | ||
1457 | if (nid >= ETAP_NO_TRACE || duplicate_name == TRUE) { | |
1458 | kmem_free(kernel_map, (vm_address_t)newtable, newsize); | |
1459 | if (nid >= ETAP_NO_TRACE) { | |
1460 | printf("KERN_RESOURCE_SHORTAGE\n"); | |
1461 | return (KERN_RESOURCE_SHORTAGE); | |
1462 | } | |
1463 | else { | |
1464 | printf("KERN_NAME_EXISTS\n"); | |
1465 | return (KERN_NAME_EXISTS); | |
1466 | } | |
1467 | } | |
1468 | ||
1469 | newtable[event_table_max].event = nid; | |
1470 | ||
1471 | s = splhigh(); | |
1472 | event_table_lock(); | |
1473 | oldtable = event_table; | |
1474 | event_table = newtable; | |
1475 | ++event_table_max; | |
1476 | event_table_unlock(); | |
1477 | splx(s); | |
1478 | ||
1479 | if (oldtable != event_table_init) | |
1480 | kmem_free(kernel_map, (vm_address_t)oldtable, | |
1481 | (event_table_max - 1) * | |
1482 | sizeof (struct event_table_entry)); | |
1483 | ||
1484 | *(unsigned short *)id = nid; | |
1485 | ||
1486 | return (KERN_SUCCESS); | |
1487 | #else | |
1488 | return (KERN_FAILURE); | |
1489 | #endif /* ETAP_EVENT_MONITOR */ | |
1490 | ||
1491 | } | |
1492 | /* | |
1493 | * ETAP trap probe hooks | |
1494 | */ | |
1495 | ||
1496 | void | |
1497 | etap_interrupt_probe(int interrupt, int flag_setting) | |
1498 | { | |
1499 | u_short flag; | |
1500 | ||
1501 | if (flag_setting == 1) | |
1502 | flag = EVENT_BEGIN; | |
1503 | else | |
1504 | flag = EVENT_END; | |
1505 | ||
1506 | ETAP_PROBE_DATA_COND(ETAP_P_INTERRUPT, | |
1507 | flag, | |
1508 | current_thread(), | |
1509 | &interrupt, | |
1510 | sizeof(int), | |
1511 | 1); | |
1512 | } | |
1513 | ||
1514 | void | |
1515 | etap_machcall_probe1(int syscall) | |
1516 | { | |
1517 | ETAP_PROBE_DATA(ETAP_P_SYSCALL_MACH, | |
1518 | EVENT_BEGIN | SYSCALL_TRAP, | |
1519 | current_thread(), | |
1520 | &syscall, | |
1521 | sizeof(int)); | |
1522 | } | |
1523 | ||
1524 | void | |
1525 | etap_machcall_probe2(void) | |
1526 | { | |
1527 | ETAP_PROBE_DATA(ETAP_P_SYSCALL_MACH, | |
1528 | EVENT_END | SYSCALL_TRAP, | |
1529 | current_thread(), | |
1530 | 0, | |
1531 | 0); | |
1532 | } | |
1533 | ||
1534 | static void print_user_event(mbuff_entry_t); | |
1535 | static void print_kernel_event(mbuff_entry_t, boolean_t); | |
1536 | static void print_lock_event(mbuff_entry_t, const char *); | |
1537 | ||
1538 | #if MACH_KDB | |
1539 | void db_show_etap_log(db_expr_t, boolean_t, db_expr_t, char *); | |
1540 | /* | |
1541 | * | |
1542 | * ROUTINE: etap_print [internal] | |
1543 | * | |
1544 | * FUNCTION: print each mbuff table (for use in debugger) | |
1545 | * | |
1546 | */ | |
1547 | void | |
1548 | db_show_etap_log( | |
1549 | db_expr_t addr, | |
1550 | boolean_t have_addr, | |
1551 | db_expr_t count, | |
1552 | char * modif) | |
1553 | { | |
1554 | #if ETAP_MONITOR | |
1555 | int cpu = cpu_number(), last, i, first, step, end, restart; | |
1556 | boolean_t show_data = FALSE; | |
1557 | ||
1558 | last = (mbuff[cpu]->free - 1) % mbuff_entries; | |
1559 | ||
1560 | if(db_option(modif, 'r')) { | |
1561 | first = last; | |
1562 | step = -1; | |
1563 | end = -1; | |
1564 | restart = mbuff_entries - 1; | |
1565 | } else { | |
1566 | first = last + 1; | |
1567 | step = 1; | |
1568 | end = mbuff_entries; | |
1569 | restart = 0; | |
1570 | } | |
1571 | ||
1572 | if(db_option(modif, 'd')) | |
1573 | show_data = TRUE; | |
1574 | ||
1575 | for(i = first; i != end; i += step) { | |
1576 | if (mbuff[cpu]->entry[i].flags & USER_EVENT) | |
1577 | print_user_event(&mbuff[cpu]->entry[i]); | |
1578 | else | |
1579 | print_kernel_event(&mbuff[cpu]->entry[i], show_data); | |
1580 | } | |
1581 | for(i = restart; i != first; i += step) { | |
1582 | if (mbuff[cpu]->entry[i].flags & USER_EVENT) | |
1583 | print_user_event(&mbuff[cpu]->entry[i]); | |
1584 | else | |
1585 | print_kernel_event(&mbuff[cpu]->entry[i], show_data); | |
1586 | } | |
1587 | #else | |
1588 | printf("ETAP event monitor not configured\n"); | |
1589 | #endif /* ETAP_MONITOR */ | |
1590 | } | |
1591 | ||
1592 | #if ETAP_MONITOR | |
1593 | static | |
1594 | void | |
1595 | print_user_event(mbuff_entry_t record) | |
1596 | { | |
1597 | char *s, buf[256]; | |
1598 | ||
1599 | db_printf("%x: %x%08x: ", record->instance, record->time.tv_sec, | |
1600 | record->time.tv_nsec); | |
1601 | switch (record->pc) | |
1602 | { | |
1603 | case ETAP_P_USER_EVENT0: s = "0"; break; | |
1604 | case ETAP_P_USER_EVENT1: s = "1"; break; | |
1605 | case ETAP_P_USER_EVENT2: s = "2"; break; | |
1606 | case ETAP_P_USER_EVENT3: s = "3"; break; | |
1607 | case ETAP_P_USER_EVENT4: s = "4"; break; | |
1608 | case ETAP_P_USER_EVENT5: s = "5"; break; | |
1609 | case ETAP_P_USER_EVENT6: s = "6"; break; | |
1610 | case ETAP_P_USER_EVENT7: s = "7"; break; | |
1611 | case ETAP_P_USER_EVENT8: s = "8"; break; | |
1612 | case ETAP_P_USER_EVENT9: s = "9"; break; | |
1613 | case ETAP_P_USER_EVENT10: s = "10"; break; | |
1614 | case ETAP_P_USER_EVENT11: s = "11"; break; | |
1615 | case ETAP_P_USER_EVENT12: s = "12"; break; | |
1616 | case ETAP_P_USER_EVENT13: s = "13"; break; | |
1617 | case ETAP_P_USER_EVENT14: s = "14"; break; | |
1618 | case ETAP_P_USER_EVENT15: s = "15"; break; | |
1619 | case ETAP_P_USER_EVENT16: s = "16"; break; | |
1620 | case ETAP_P_USER_EVENT17: s = "17"; break; | |
1621 | case ETAP_P_USER_EVENT18: s = "18"; break; | |
1622 | case ETAP_P_USER_EVENT19: s = "19"; break; | |
1623 | case ETAP_P_USER_EVENT20: s = "20"; break; | |
1624 | case ETAP_P_USER_EVENT21: s = "21"; break; | |
1625 | case ETAP_P_USER_EVENT22: s = "22"; break; | |
1626 | case ETAP_P_USER_EVENT23: s = "23"; break; | |
1627 | case ETAP_P_USER_EVENT24: s = "24"; break; | |
1628 | case ETAP_P_USER_EVENT25: s = "25"; break; | |
1629 | case ETAP_P_USER_EVENT26: s = "26"; break; | |
1630 | case ETAP_P_USER_EVENT27: s = "27"; break; | |
1631 | case ETAP_P_USER_EVENT28: s = "28"; break; | |
1632 | case ETAP_P_USER_EVENT29: s = "29"; break; | |
1633 | case ETAP_P_USER_EVENT30: s = "30"; break; | |
1634 | case ETAP_P_USER_EVENT31: s = "31"; break; | |
1635 | default: | |
1636 | sprintf(buf, "dynamic %x", record->pc); | |
1637 | s = buf; | |
1638 | break; | |
1639 | } | |
1640 | ||
1641 | db_printf("user probe %s: [%x] data = %x %x %x %x\n", | |
1642 | s, | |
1643 | record->event, | |
1644 | record->data[0], | |
1645 | record->data[1], | |
1646 | record->data[2], | |
1647 | record->data[3]); | |
1648 | } | |
1649 | ||
1650 | static | |
1651 | void | |
1652 | print_kernel_event(mbuff_entry_t record, boolean_t data) | |
1653 | { | |
1654 | char *text_name; | |
1655 | int i; | |
1656 | ||
1657 | /* assume zero event means that record was never written to */ | |
1658 | if(record->event == 0) | |
1659 | return; | |
1660 | ||
1661 | db_printf("%x: %x%08x: ", record->instance, record->time.tv_sec, | |
1662 | record->time.tv_nsec); | |
1663 | ||
1664 | switch (record->event) { | |
1665 | ||
1666 | case ETAP_P_THREAD_LIFE : | |
1667 | if (record->flags & EVENT_BEGIN) | |
1668 | db_printf("thread created [T:%x A:%x] P:%d\n", | |
1669 | record->data[0], | |
1670 | record->data[1], | |
1671 | record->data[2]); | |
1672 | else | |
1673 | db_printf("thread terminated [T:%x A:%x] P:%d\n", | |
1674 | record->data[0], | |
1675 | record->data[1], | |
1676 | record->data[2]); | |
1677 | break; | |
1678 | ||
1679 | case ETAP_P_SYSCALL_MACH : | |
1680 | if (record->flags & SYSCALL_TRAP) | |
1681 | text_name = system_table_lookup(SYS_TABLE_MACH_TRAP, | |
1682 | record->data[0]); | |
1683 | else | |
1684 | text_name = system_table_lookup(SYS_TABLE_MACH_MESSAGE, | |
1685 | record->data[0]); | |
1686 | ||
1687 | if (record->flags & EVENT_BEGIN) | |
1688 | db_printf("mach enter: %s [%x]\n", | |
1689 | text_name, | |
1690 | record->data[0]); | |
1691 | else | |
1692 | db_printf("mach exit :\n"); | |
1693 | break; | |
1694 | ||
1695 | case ETAP_P_SYSCALL_UNIX : | |
1696 | text_name = system_table_lookup(SYS_TABLE_UNIX_SYSCALL, | |
1697 | record->data[0]); | |
1698 | ||
1699 | if (record->flags & EVENT_BEGIN) | |
1700 | db_printf("unix enter: %s\n", text_name); | |
1701 | else | |
1702 | db_printf("unix exit : %s\n", text_name); | |
1703 | break; | |
1704 | ||
1705 | case ETAP_P_THREAD_CTX : | |
1706 | if (record->flags & EVENT_END) | |
1707 | db_printf("context switch to %x ", | |
1708 | record->data[0]); | |
1709 | else /* EVENT_BEGIN */ | |
1710 | db_printf("context switch from %x ", | |
1711 | record->data[0]); | |
1712 | ||
1713 | switch (record->data[1]) { | |
1714 | case BLOCKED_ON_SEMAPHORE : | |
1715 | db_printf("R: semaphore\n"); break; | |
1716 | case BLOCKED_ON_LOCK : | |
1717 | db_printf("R: lock\n"); break; | |
1718 | case BLOCKED_ON_MUTEX_LOCK : | |
1719 | db_printf("R: mutex lock\n"); break; | |
1720 | case BLOCKED_ON_COMPLEX_LOCK : | |
1721 | db_printf("R: complex lock\n"); break; | |
1722 | case BLOCKED_ON_PORT_RCV : | |
1723 | db_printf("R: port receive\n"); break; | |
1724 | case BLOCKED_ON_REAPER_DONE : | |
1725 | db_printf("R: reaper thread done\n"); break; | |
1726 | case BLOCKED_ON_IDLE_DONE : | |
1727 | db_printf("R: idle thread done\n"); break; | |
1728 | case BLOCKED_ON_TERMINATION : | |
1729 | db_printf("R: termination\n"); break; | |
1730 | default : | |
1731 | if (record->data[2]) | |
1732 | db_printf("R: ast %x\n", record->data[2]); | |
1733 | else | |
1734 | db_printf("R: undefined block\n"); | |
1735 | }; | |
1736 | break; | |
1737 | ||
1738 | case ETAP_P_INTERRUPT : | |
1739 | if (record->flags & EVENT_BEGIN) { | |
1740 | text_name = system_table_lookup(SYS_TABLE_INTERRUPT, | |
1741 | record->data[0]); | |
1742 | db_printf("intr enter: %s\n", text_name); | |
1743 | } else | |
1744 | db_printf("intr exit\n"); | |
1745 | break; | |
1746 | ||
1747 | case ETAP_P_ACT_ABORT : | |
1748 | db_printf("activation abort [A %x : S %x]\n", | |
1749 | record->data[1], | |
1750 | ||
1751 | record->data[0]); | |
1752 | break; | |
1753 | ||
1754 | case ETAP_P_PRIORITY : | |
1755 | db_printf("priority changed for %x N:%d O:%d\n", | |
1756 | record->data[0], | |
1757 | record->data[1], | |
1758 | record->data[2]); | |
1759 | break; | |
1760 | ||
1761 | case ETAP_P_EXCEPTION : | |
1762 | text_name = system_table_lookup(SYS_TABLE_EXCEPTION, | |
1763 | record->data[0]); | |
1764 | db_printf("exception: %s\n", text_name); | |
1765 | break; | |
1766 | ||
1767 | case ETAP_P_DEPRESSION : | |
1768 | if (record->flags & EVENT_BEGIN) | |
1769 | db_printf("priority depressed\n"); | |
1770 | else { | |
1771 | if (record->data[0] == 0) | |
1772 | db_printf("priority undepressed : timed out\n"); | |
1773 | else | |
1774 | db_printf("priority undepressed : self inflicted\n"); | |
1775 | } | |
1776 | break; | |
1777 | ||
1778 | case ETAP_P_MISC : | |
1779 | db_printf("flags: %x data: %x %x %x %x\n", record->flags, | |
1780 | record->data[0], record->data[1], record->data[2], | |
1781 | record->data[3]); | |
1782 | break; | |
1783 | ||
1784 | case ETAP_P_DETAP : | |
1785 | printf("flags: %x rtc: %x %09x dtime: %x %09x\n", | |
1786 | record->flags, record->data[0], record->data[1], | |
1787 | record->data[2], record->data[3]); | |
1788 | break; | |
1789 | ||
1790 | default: | |
1791 | for(i = 0; event_table_init[i].event != ETAP_NO_TRACE; ++i) | |
1792 | if(record->event == event_table_init[i].event) { | |
1793 | print_lock_event(record, event_table_init[i].name); | |
1794 | return; | |
1795 | } | |
1796 | db_printf("Unknown event: %d\n", record->event); | |
1797 | break; | |
1798 | } | |
1799 | if(data) | |
1800 | db_printf(" Data: %08x %08x %08x %08x\n", record->data[0], | |
1801 | record->data[1], record->data[2], record->data[3]); | |
1802 | } | |
1803 | ||
1804 | void print_lock_event(mbuff_entry_t record, const char *name) | |
1805 | { | |
1806 | char *sym1, *sym2; | |
1807 | db_addr_t offset1, offset2; | |
1808 | ||
1809 | db_find_sym_and_offset(record->data[0], &sym1, &offset1); | |
1810 | ||
1811 | db_printf("%15s", name); | |
1812 | if (record->flags & SPIN_LOCK) | |
1813 | printf(" spin "); | |
1814 | else if (record->flags & READ_LOCK) | |
1815 | printf(" read "); | |
1816 | else if (record->flags & WRITE_LOCK) | |
1817 | printf(" write "); | |
1818 | else | |
1819 | printf(" undef "); | |
1820 | ||
1821 | if (record->flags & ETAP_CONTENTION) { | |
1822 | db_printf("wait lock %s+%x\n", | |
1823 | sym1, offset1); | |
1824 | } | |
1825 | else if (record->flags & ETAP_DURATION) { | |
1826 | db_find_sym_and_offset(record->data[1], &sym2, &offset2); | |
1827 | db_printf("lock %x+%x unlock %x+%x\n", | |
1828 | sym1, offset1, sym2, offset2); | |
1829 | } else { | |
1830 | db_printf("illegal op: neither HOLD or WAIT are specified\n"); | |
1831 | } | |
1832 | ||
1833 | } | |
1834 | ||
1835 | char * | |
1836 | system_table_lookup(unsigned int table, unsigned int number) | |
1837 | { | |
1838 | int x; | |
1839 | char *name = NULL; | |
1840 | unsigned int offset; | |
1841 | ||
1842 | switch (table) { | |
1843 | case SYS_TABLE_MACH_TRAP: | |
1844 | name = mach_trap_name(number >> 4); | |
1845 | break; | |
1846 | case SYS_TABLE_MACH_MESSAGE: | |
1847 | for (x=0; x < mach_message_table_entries; x++) { | |
1848 | if (mach_message_table[x].number == number) { | |
1849 | name = mach_message_table[x].name; | |
1850 | break; | |
1851 | } | |
1852 | } | |
1853 | break; | |
1854 | case SYS_TABLE_UNIX_SYSCALL: | |
1855 | number = -number; | |
1856 | name = syscall_name(number); | |
1857 | break; | |
1858 | case SYS_TABLE_INTERRUPT: | |
1859 | db_find_sym_and_offset((int)ivect[number], &name, &offset); | |
1860 | break; | |
1861 | case SYS_TABLE_EXCEPTION: | |
1862 | name = exception_name(number); | |
1863 | break; | |
1864 | } | |
1865 | return (name != NULL) ? name : "undefined"; | |
1866 | } | |
1867 | ||
1868 | #endif /* MACH_KDB */ | |
1869 | #endif /* ETAP_MONITOR */ |