2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
31 #include <kern/lock.h>
32 #include <kern/etap_macros.h>
33 #include <kern/misc_protos.h>
34 #include <kern/host.h>
36 #include <mach/kern_return.h>
37 #include <mach/port.h>
38 #include <vm/vm_map.h>
39 #include <vm/vm_kern.h> /* for kernel_map, ipc_kernel_map */
41 #include <machine/machine_tables.h>
42 #include <mach/clock.h>
43 #include <mach/clock_reply.h>
44 #include <mach/default_pager_object.h>
45 #include <device/device.h>
46 #include <device/device_reply.h>
47 #include <device/device_request.h>
48 #include <mach_debug/mach_debug.h>
49 /*#include <mach/mach_host.h>*/
50 #include <mach/mach_norma.h>
51 #include <mach/mach_port.h>
52 #include <mach/memory_object_default.h>
53 #include <mach/memory_object_user.h>
54 #include <mach/notify_server.h>
55 #include <mach/prof.h>
56 #include <machine/unix_map.h>
59 #include <ddb/db_output.h>
60 #include <ddb/db_sym.h>
61 #include <ddb/db_command.h>
72 etap_get_info(host_priv_t
, int*, int*, vm_offset_t
*, vm_offset_t
*,
73 int*, int*, int*, int*);
76 etap_mon_reconfig(host_priv_t
, int);
79 etap_new_probe(host_priv_t
, vm_address_t
, vm_size_t
, boolean_t
, vm_address_t
);
82 etap_trace_thread(thread_act_t
, boolean_t
);
85 etap_trace_reset(int);
88 etap_interrupt_probe(int, int);
91 etap_machcall_probe1(int);
94 etap_machcall_probe2(void);
103 #define max(x,y) ((x > y) ? x : y)
107 etap_event_table_find(etap_event_t
);
109 /* =======================
110 * ETAP Lock definitions
111 * =======================
115 #define etap_lock simple_lock_no_trace
116 #define etap_unlock simple_unlock_no_trace
117 #else /* ETAP_LOCK_TRACE */
118 #define etap_lock simple_lock
119 #define etap_unlock simple_unlock
120 #endif /* ETAP_LOCK_TRACE */
122 #define event_table_lock() etap_lock(&event_table_lock)
123 #define event_table_unlock() etap_unlock(&event_table_lock)
125 #define cumulative_buffer_lock(s) \
128 etap_lock(&cbuff_lock); \
131 #define cumulative_buffer_unlock(s) \
133 etap_unlock(&cbuff_lock); \
138 #if ETAP_LOCK_ACCUMULATE
140 /* ========================================
141 * ETAP Cumulative lock trace definitions
142 * ========================================
145 int cbuff_width
= ETAP_CBUFF_WIDTH
;
148 * Cumulative buffer declaration
150 * For both protection and mapping purposes, the cumulative
151 * buffer must be aligned on a page boundary. Since the cumulative
152 * buffer must be statically defined, page boundary alignment is not
153 * garenteed. Instead, the buffer is allocated with 2 extra pages.
154 * The cumulative buffer pointer will round up to the nearest page.
156 * This will garentee page boundary alignment.
159 #define TWO_PAGES 16384 /* XXX does this apply ??*/
160 #define CBUFF_ALLOCATED_SIZE sizeof(struct cumulative_buffer)+TWO_PAGES
162 decl_simple_lock_data (,cbuff_lock
)
164 simple_lock_t cbuff_locks
;
166 simple_lock_data_t cbuff_locks
;
168 char cbuff_allocated
[CBUFF_ALLOCATED_SIZE
];
169 cumulative_buffer_t cbuff
= {0};
171 #endif /* ETAP_LOCK_ACCUMULATE */
175 int mbuff_entries
= ETAP_MBUFF_ENTRIES
;
178 * Create an array of pointers to monitor buffers.
179 * The buffers themselves are allocated at run-time.
182 struct monitor_buffer
*mbuff
[NCPUS
];
183 #endif /* ETAP_MONITOR */
185 /* ==========================
186 * Event table declarations
187 * ==========================
190 decl_simple_lock_data(,event_table_lock
)
192 const struct event_table_entry event_table_init
[] =
195 /*-----------------------------------------------------------------------*
196 * ETAP EVENT TRACE STATUS TEXT NAME DYNAMIC *
197 *-----------------------------------------------------------------------*/
199 #if ETAP_EVENT_MONITOR
200 {ETAP_P_USER_EVENT0
, ETAP_TRACE_OFF
, "p_user_event0" , STATIC
},
201 {ETAP_P_USER_EVENT1
, ETAP_TRACE_OFF
, "p_user_event1" , STATIC
},
202 {ETAP_P_USER_EVENT2
, ETAP_TRACE_OFF
, "p_user_event2" , STATIC
},
203 {ETAP_P_USER_EVENT3
, ETAP_TRACE_OFF
, "p_user_event3" , STATIC
},
204 {ETAP_P_USER_EVENT4
, ETAP_TRACE_OFF
, "p_user_event4" , STATIC
},
205 {ETAP_P_USER_EVENT5
, ETAP_TRACE_OFF
, "p_user_event5" , STATIC
},
206 {ETAP_P_USER_EVENT6
, ETAP_TRACE_OFF
, "p_user_event6" , STATIC
},
207 {ETAP_P_USER_EVENT7
, ETAP_TRACE_OFF
, "p_user_event7" , STATIC
},
208 {ETAP_P_USER_EVENT8
, ETAP_TRACE_OFF
, "p_user_event8" , STATIC
},
209 {ETAP_P_USER_EVENT9
, ETAP_TRACE_OFF
, "p_user_event9" , STATIC
},
210 {ETAP_P_USER_EVENT10
, ETAP_TRACE_OFF
, "p_user_event10" , STATIC
},
211 {ETAP_P_USER_EVENT11
, ETAP_TRACE_OFF
, "p_user_event11" , STATIC
},
212 {ETAP_P_USER_EVENT12
, ETAP_TRACE_OFF
, "p_user_event12" , STATIC
},
213 {ETAP_P_USER_EVENT13
, ETAP_TRACE_OFF
, "p_user_event13" , STATIC
},
214 {ETAP_P_USER_EVENT14
, ETAP_TRACE_OFF
, "p_user_event14" , STATIC
},
215 {ETAP_P_USER_EVENT15
, ETAP_TRACE_OFF
, "p_user_event15" , STATIC
},
216 {ETAP_P_USER_EVENT16
, ETAP_TRACE_OFF
, "p_user_event16" , STATIC
},
217 {ETAP_P_USER_EVENT17
, ETAP_TRACE_OFF
, "p_user_event17" , STATIC
},
218 {ETAP_P_USER_EVENT18
, ETAP_TRACE_OFF
, "p_user_event18" , STATIC
},
219 {ETAP_P_USER_EVENT19
, ETAP_TRACE_OFF
, "p_user_event19" , STATIC
},
220 {ETAP_P_USER_EVENT20
, ETAP_TRACE_OFF
, "p_user_event20" , STATIC
},
221 {ETAP_P_USER_EVENT21
, ETAP_TRACE_OFF
, "p_user_event21" , STATIC
},
222 {ETAP_P_USER_EVENT22
, ETAP_TRACE_OFF
, "p_user_event22" , STATIC
},
223 {ETAP_P_USER_EVENT23
, ETAP_TRACE_OFF
, "p_user_event23" , STATIC
},
224 {ETAP_P_USER_EVENT24
, ETAP_TRACE_OFF
, "p_user_event24" , STATIC
},
225 {ETAP_P_USER_EVENT25
, ETAP_TRACE_OFF
, "p_user_event25" , STATIC
},
226 {ETAP_P_USER_EVENT26
, ETAP_TRACE_OFF
, "p_user_event26" , STATIC
},
227 {ETAP_P_USER_EVENT27
, ETAP_TRACE_OFF
, "p_user_event27" , STATIC
},
228 {ETAP_P_USER_EVENT28
, ETAP_TRACE_OFF
, "p_user_event28" , STATIC
},
229 {ETAP_P_USER_EVENT29
, ETAP_TRACE_OFF
, "p_user_event29" , STATIC
},
230 {ETAP_P_USER_EVENT30
, ETAP_TRACE_OFF
, "p_user_event30" , STATIC
},
231 {ETAP_P_USER_EVENT31
, ETAP_TRACE_OFF
, "p_user_event31" , STATIC
},
232 {ETAP_P_SYSCALL_MACH
, ETAP_TRACE_OFF
, "p_syscall_mach" , STATIC
},
233 {ETAP_P_SYSCALL_UNIX
, ETAP_TRACE_OFF
, "p_syscall_unix" , STATIC
},
234 {ETAP_P_THREAD_LIFE
, ETAP_TRACE_OFF
, "p_thread_life" , STATIC
},
235 {ETAP_P_THREAD_CTX
, ETAP_TRACE_OFF
, "p_thread_ctx" , STATIC
},
236 {ETAP_P_RPC
, ETAP_TRACE_OFF
, "p_rpc" , STATIC
},
237 {ETAP_P_INTERRUPT
, ETAP_TRACE_OFF
, "p_interrupt" , STATIC
},
238 {ETAP_P_ACT_ABORT
, ETAP_TRACE_OFF
, "p_act_abort" , STATIC
},
239 {ETAP_P_PRIORITY
, ETAP_TRACE_OFF
, "p_priority" , STATIC
},
240 {ETAP_P_EXCEPTION
, ETAP_TRACE_OFF
, "p_exception" , STATIC
},
241 {ETAP_P_DEPRESSION
, ETAP_TRACE_OFF
, "p_depression" , STATIC
},
242 {ETAP_P_MISC
, ETAP_TRACE_OFF
, "p_misc" , STATIC
},
243 {ETAP_P_DETAP
, ETAP_TRACE_OFF
, "p_detap" , STATIC
},
244 #endif /* ETAP_EVENT_MONITOR */
247 {ETAP_VM_BUCKET
, ETAP_TRACE_OFF
, "vm_bucket" , STATIC
},/**/
248 {ETAP_VM_HIMEM
, ETAP_TRACE_OFF
, "vm_himem" , STATIC
},
249 {ETAP_VM_MAP
, ETAP_TRACE_OFF
, "vm_map" , 1},
250 {ETAP_VM_MAP_I
, ETAP_TRACE_OFF
, "vm_map_i" , 2},
251 {ETAP_VM_MEMMAN
, ETAP_TRACE_OFF
, "vm_memman" , STATIC
},/**/
252 {ETAP_VM_MSYNC
, ETAP_TRACE_OFF
, "vm_msync" , 3},
253 {ETAP_VM_OBJ
, ETAP_TRACE_OFF
, "vm_obj" , 4},
254 {ETAP_VM_OBJ_CACHE
, ETAP_TRACE_OFF
, "vm_obj_cache" , 5},
255 {ETAP_VM_PAGE_ALLOC
, ETAP_TRACE_OFF
, "vm_page_alloc" , STATIC
},/**/
256 {ETAP_VM_PAGEOUT
, ETAP_TRACE_OFF
, "vm_pageout" , STATIC
},
257 {ETAP_VM_PAGEQ
, ETAP_TRACE_OFF
, "vm_pageq" , STATIC
},
258 {ETAP_VM_PAGEQ_FREE
, ETAP_TRACE_OFF
, "vm_pageq_free" , STATIC
},
259 {ETAP_VM_PMAP
, ETAP_TRACE_OFF
, "vm_pmap" , 6},
260 {ETAP_VM_PMAP_CACHE
, ETAP_TRACE_OFF
, "vm_pmap_cache" , STATIC
},
261 {ETAP_VM_PMAP_FREE
, ETAP_TRACE_OFF
, "vm_pmap_free" , STATIC
},
262 {ETAP_VM_PMAP_KERNEL
, ETAP_TRACE_OFF
, "vm_pmap_kern" , STATIC
},
263 {ETAP_VM_PMAP_SYS
, ETAP_TRACE_OFF
, "vm_pmap_sys" , 7},
264 {ETAP_VM_PMAP_SYS_I
, ETAP_TRACE_OFF
, "vm_pmap_sys_i" , 8},
265 {ETAP_VM_PMAP_UPDATE
, ETAP_TRACE_OFF
, "vm_pmap_update" , STATIC
},
266 {ETAP_VM_PREPPIN
, ETAP_TRACE_OFF
, "vm_preppin" , STATIC
},
267 {ETAP_VM_RESULT
, ETAP_TRACE_OFF
, "vm_result" , 9},
268 {ETAP_VM_TEST
, ETAP_TRACE_OFF
, "vm_tes" , STATIC
},/**/
269 {ETAP_VM_PMAP_PHYSENTRIES
, ETAP_TRACE_OFF
, "vm_pmap_physentries", STATIC
},
270 {ETAP_VM_PMAP_SID
, ETAP_TRACE_OFF
, "vm_pmap_sid" , STATIC
},
271 {ETAP_VM_PMAP_PTE
, ETAP_TRACE_OFF
, "vm_pmap_pte" , STATIC
},
272 {ETAP_VM_PMAP_PTE_OVFLW
, ETAP_TRACE_OFF
, "vm_pmap_pte_ovflw", STATIC
},
273 {ETAP_VM_PMAP_TLB
, ETAP_TRACE_OFF
, "vm_pmap_tlb" , STATIC
},
275 {ETAP_IPC_IHGB
, ETAP_TRACE_OFF
, "ipc_ihgb" , 10},/**/
276 {ETAP_IPC_IS
, ETAP_TRACE_OFF
, "ipc_is" , 11},/**/
277 {ETAP_IPC_IS_REF
, ETAP_TRACE_OFF
, "ipc_is_ref" , 12},/**/
278 {ETAP_IPC_MQUEUE
, ETAP_TRACE_OFF
, "ipc_mqueue" , STATIC
},/**/
279 {ETAP_IPC_OBJECT
, ETAP_TRACE_OFF
, "ipc_object" , STATIC
},/**/
280 {ETAP_IPC_PORT_MULT
, ETAP_TRACE_OFF
, "ipc_port_mult" , 13},/**/
281 {ETAP_IPC_PORT_TIME
, ETAP_TRACE_OFF
, "ipc_port_time" , 14},/**/
282 {ETAP_IPC_RPC
, ETAP_TRACE_OFF
, "ipc_rpc" , 15},/**/
283 {ETAP_IPC_PORT_ALLOCQ
, ETAP_TRACE_OFF
, "ipc_port_allocq" , STATIC
},/**/
285 {ETAP_IO_AHA
, ETAP_TRACE_OFF
, "io_aha" , STATIC
},
286 {ETAP_IO_CHIP
, ETAP_TRACE_OFF
, "io_chip" , STATIC
},
287 {ETAP_IO_DEV
, ETAP_TRACE_OFF
, "io_dev" , 16},/**/
288 {ETAP_IO_DEV_NUM
, ETAP_TRACE_OFF
, "io_dev_num" , STATIC
},
289 {ETAP_IO_DEV_PAGEH
, ETAP_TRACE_OFF
, "io_dev_pageh" , STATIC
},/**/
290 {ETAP_IO_DEV_PAGER
, ETAP_TRACE_OFF
, "io_dev_pager" , STATIC
},/**/
291 {ETAP_IO_DEV_PORT
, ETAP_TRACE_OFF
, "io_dev_port" , STATIC
},/**/
292 {ETAP_IO_DEV_REF
, ETAP_TRACE_OFF
, "io_dev_new" , 17},/**/
293 {ETAP_IO_DEVINS
, ETAP_TRACE_OFF
, "io_devins" , STATIC
},
294 {ETAP_IO_DONE_LIST
, ETAP_TRACE_OFF
, "io_done_list" , STATIC
},
295 {ETAP_IO_DONE_Q
, ETAP_TRACE_OFF
, "io_doneq" , 18},
296 {ETAP_IO_DONE_REF
, ETAP_TRACE_OFF
, "io_done_ref" , 19},
297 {ETAP_IO_EAHA
, ETAP_TRACE_OFF
, "io_eaha" , STATIC
},
298 {ETAP_IO_HD_PROBE
, ETAP_TRACE_OFF
, "io_hd_probe" , STATIC
},
299 {ETAP_IO_IOPB
, ETAP_TRACE_OFF
, "io_iopb" , STATIC
},
300 {ETAP_IO_KDQ
, ETAP_TRACE_OFF
, "io_kdq" , STATIC
},
301 {ETAP_IO_KDTTY
, ETAP_TRACE_OFF
, "io_kdtty" , STATIC
},
302 {ETAP_IO_REQ
, ETAP_TRACE_OFF
, "io_req" , 20},
303 {ETAP_IO_TARGET
, ETAP_TRACE_OFF
, "io_target" , STATIC
},
304 {ETAP_IO_TTY
, ETAP_TRACE_OFF
, "io_tty" , STATIC
},
305 {ETAP_IO_IOP_LOCK
, ETAP_TRACE_OFF
, "io_iop" , STATIC
},/**/
306 {ETAP_IO_DEV_NAME
, ETAP_TRACE_OFF
, "io_dev_name" , STATIC
},/**/
307 {ETAP_IO_CDLI
, ETAP_TRACE_OFF
, "io_cdli" , STATIC
},/**/
308 {ETAP_IO_HIPPI_FILTER
, ETAP_TRACE_OFF
, "io_hippi_filter" , STATIC
},/**/
309 {ETAP_IO_HIPPI_SRC
, ETAP_TRACE_OFF
, "io_hippi_src" , STATIC
},/**/
310 {ETAP_IO_HIPPI_DST
, ETAP_TRACE_OFF
, "io_hippi_dst" , STATIC
},/**/
311 {ETAP_IO_HIPPI_PKT
, ETAP_TRACE_OFF
, "io_hippi_pkt" , STATIC
},/**/
312 {ETAP_IO_NOTIFY
, ETAP_TRACE_OFF
, "io_notify" , STATIC
},/**/
313 {ETAP_IO_DATADEV
, ETAP_TRACE_OFF
, "io_data_device" , STATIC
},/**/
314 {ETAP_IO_OPEN
, ETAP_TRACE_OFF
, "io_open" , STATIC
},
315 {ETAP_IO_OPEN_I
, ETAP_TRACE_OFF
, "io_open_i" , STATIC
},
317 {ETAP_THREAD_ACT
, ETAP_TRACE_OFF
, "th_act" , 21},
318 {ETAP_THREAD_ACTION
, ETAP_TRACE_OFF
, "th_action" , STATIC
},
319 {ETAP_THREAD_LOCK
, ETAP_TRACE_OFF
, "th_lock" , 22},
320 {ETAP_THREAD_LOCK_SET
, ETAP_TRACE_OFF
, "th_lock_set" , 23},
321 {ETAP_THREAD_NEW
, ETAP_TRACE_OFF
, "th_new" , 24},
322 {ETAP_THREAD_PSET
, ETAP_TRACE_OFF
, "th_pset" , STATIC
},/**/
323 {ETAP_THREAD_PSET_ALL
, ETAP_TRACE_OFF
, "th_pset_all" , STATIC
},
324 {ETAP_THREAD_PSET_RUNQ
, ETAP_TRACE_OFF
, "th_pset_runq" , STATIC
},
325 {ETAP_THREAD_PSET_IDLE
, ETAP_TRACE_OFF
, "th_pset_idle" , STATIC
},
326 {ETAP_THREAD_PSET_QUANT
, ETAP_TRACE_OFF
, "th_pset_quant" , STATIC
},
327 {ETAP_THREAD_PROC
, ETAP_TRACE_OFF
, "th_proc" , STATIC
},
328 {ETAP_THREAD_PROC_RUNQ
, ETAP_TRACE_OFF
, "th_proc_runq" , STATIC
},
329 {ETAP_THREAD_REAPER
, ETAP_TRACE_OFF
, "th_reaper" , STATIC
},
330 {ETAP_THREAD_RPC
, ETAP_TRACE_OFF
, "th_rpc" , 25},
331 {ETAP_THREAD_SEMA
, ETAP_TRACE_OFF
, "th_sema" , 26},
332 {ETAP_THREAD_STACK
, ETAP_TRACE_OFF
, "th_stack" , STATIC
},
333 {ETAP_THREAD_STACK_USAGE
, ETAP_TRACE_OFF
, "th_stack_usage" , STATIC
},
334 {ETAP_THREAD_TASK_NEW
, ETAP_TRACE_OFF
, "th_task_new" , 27},
335 {ETAP_THREAD_TASK_ITK
, ETAP_TRACE_OFF
, "th_task_itk" , 28},
336 {ETAP_THREAD_ULOCK
, ETAP_TRACE_OFF
, "th_ulock" , 29},
337 {ETAP_THREAD_WAIT
, ETAP_TRACE_OFF
, "th_wait" , STATIC
},
338 {ETAP_THREAD_WAKE
, ETAP_TRACE_OFF
, "th_wake" , 30},
339 {ETAP_THREAD_ACT_LIST
, ETAP_TRACE_OFF
, "th_act_list" , 31},
340 {ETAP_THREAD_TASK_SWAP
, ETAP_TRACE_OFF
, "th_task_swap" , 32},
341 {ETAP_THREAD_TASK_SWAPOUT
, ETAP_TRACE_OFF
, "th_task_swapout" , 33},
342 {ETAP_THREAD_SWAPPER
, ETAP_TRACE_OFF
, "th_swapper" , STATIC
},
344 {ETAP_NET_IFQ
, ETAP_TRACE_OFF
, "net_ifq" , STATIC
},
345 {ETAP_NET_KMSG
, ETAP_TRACE_OFF
, "net_kmsg" , STATIC
},
346 {ETAP_NET_MBUF
, ETAP_TRACE_OFF
, "net_mbuf" , STATIC
},/**/
347 {ETAP_NET_POOL
, ETAP_TRACE_OFF
, "net_pool" , STATIC
},
348 {ETAP_NET_Q
, ETAP_TRACE_OFF
, "net_q" , STATIC
},
349 {ETAP_NET_QFREE
, ETAP_TRACE_OFF
, "net_qfree" , STATIC
},
350 {ETAP_NET_RCV
, ETAP_TRACE_OFF
, "net_rcv" , STATIC
},
351 {ETAP_NET_RCV_PLIST
, ETAP_TRACE_OFF
, "net_rcv_plist" , STATIC
},/**/
352 {ETAP_NET_THREAD
, ETAP_TRACE_OFF
, "net_thread" , STATIC
},
354 {ETAP_NORMA_XMM
, ETAP_TRACE_OFF
, "norma_xmm" , STATIC
},
355 {ETAP_NORMA_XMMOBJ
, ETAP_TRACE_OFF
, "norma_xmmobj" , STATIC
},
356 {ETAP_NORMA_XMMCACHE
, ETAP_TRACE_OFF
, "norma_xmmcache" , STATIC
},
357 {ETAP_NORMA_MP
, ETAP_TRACE_OFF
, "norma_mp" , STATIC
},
358 {ETAP_NORMA_VOR
, ETAP_TRACE_OFF
, "norma_vor" , STATIC
},/**/
359 {ETAP_NORMA_TASK
, ETAP_TRACE_OFF
, "norma_task" , 38},/**/
361 {ETAP_DIPC_CLEANUP
, ETAP_TRACE_OFF
, "dipc_cleanup" , STATIC
},/**/
362 {ETAP_DIPC_MSG_PROG
, ETAP_TRACE_OFF
, "dipc_msgp_prog" , STATIC
},/**/
363 {ETAP_DIPC_PREP_QUEUE
, ETAP_TRACE_OFF
, "dipc_prep_queue" , STATIC
},/**/
364 {ETAP_DIPC_PREP_FILL
, ETAP_TRACE_OFF
, "dipc_prep_fill" , STATIC
},/**/
365 {ETAP_DIPC_MIGRATE
, ETAP_TRACE_OFF
, "dipc_migrate" , STATIC
},/**/
366 {ETAP_DIPC_DELIVER
, ETAP_TRACE_OFF
, "dipc_deliver" , STATIC
},/**/
367 {ETAP_DIPC_RECV_SYNC
, ETAP_TRACE_OFF
, "dipc_recv_sync" , STATIC
},/**/
368 {ETAP_DIPC_RPC
, ETAP_TRACE_OFF
, "dipc_rpc" , STATIC
},/**/
369 {ETAP_DIPC_MSG_REQ
, ETAP_TRACE_OFF
, "dipc_msg_req" , STATIC
},/**/
370 {ETAP_DIPC_MSG_ORDER
, ETAP_TRACE_OFF
, "dipc_msg_order" , STATIC
},/**/
371 {ETAP_DIPC_MSG_PREPQ
, ETAP_TRACE_OFF
, "dipc_msg_prepq" , STATIC
},/**/
372 {ETAP_DIPC_MSG_FREE
, ETAP_TRACE_OFF
, "dipc_msg_free" , STATIC
},/**/
373 {ETAP_DIPC_KMSG_AST
, ETAP_TRACE_OFF
, "dipc_kmsg_ast" , STATIC
},/**/
374 {ETAP_DIPC_TEST_LOCK
, ETAP_TRACE_OFF
, "dipc_test_lock" , STATIC
},/**/
375 {ETAP_DIPC_SPINLOCK
, ETAP_TRACE_OFF
, "dipc_spinlock" , STATIC
},/**/
376 {ETAP_DIPC_TRACE
, ETAP_TRACE_OFF
, "dipc_trace" , STATIC
},/**/
377 {ETAP_DIPC_REQ_CALLBACK
, ETAP_TRACE_OFF
, "dipc_req_clbck" , STATIC
},/**/
378 {ETAP_DIPC_PORT_NAME
, ETAP_TRACE_OFF
, "dipc_port_name" , STATIC
},/**/
379 {ETAP_DIPC_RESTART_PORT
, ETAP_TRACE_OFF
, "dipc_restart_port", STATIC
},/**/
380 {ETAP_DIPC_ZERO_PAGE
, ETAP_TRACE_OFF
, "dipc_zero_page" , STATIC
},/**/
381 {ETAP_DIPC_BLOCKED_NODE
, ETAP_TRACE_OFF
, "dipc_blocked_node", STATIC
},/**/
382 {ETAP_DIPC_TIMER
, ETAP_TRACE_OFF
, "dipc_timer" , STATIC
},/**/
383 {ETAP_DIPC_SPECIAL_PORT
, ETAP_TRACE_OFF
, "dipc_special_port", STATIC
},/**/
385 {ETAP_KKT_TEST_WORK
, ETAP_TRACE_OFF
, "kkt_test_work" , STATIC
},/**/
386 {ETAP_KKT_TEST_MP
, ETAP_TRACE_OFF
, "kkt_work_mp" , STATIC
},/**/
387 {ETAP_KKT_NODE
, ETAP_TRACE_OFF
, "kkt_node" , STATIC
},/**/
388 {ETAP_KKT_CHANNEL_LIST
, ETAP_TRACE_OFF
, "kkt_channel_list" , STATIC
},/**/
389 {ETAP_KKT_CHANNEL
, ETAP_TRACE_OFF
, "kkt_channel" , STATIC
},/**/
390 {ETAP_KKT_HANDLE
, ETAP_TRACE_OFF
, "kkt_handle" , STATIC
},/**/
391 {ETAP_KKT_MAP
, ETAP_TRACE_OFF
, "kkt_map" , STATIC
},/**/
392 {ETAP_KKT_RESOURCE
, ETAP_TRACE_OFF
, "kkt_resource" , STATIC
},/**/
394 {ETAP_XKERNEL_MASTER
, ETAP_TRACE_OFF
, "xkernel_master" , STATIC
},/**/
395 {ETAP_XKERNEL_EVENT
, ETAP_TRACE_OFF
, "xkernel_event" , STATIC
},/**/
396 {ETAP_XKERNEL_ETHINPUT
, ETAP_TRACE_OFF
, "xkernel_input" , STATIC
},/**/
398 {ETAP_MISC_AST
, ETAP_TRACE_OFF
, "m_ast" , STATIC
},
399 {ETAP_MISC_CLOCK
, ETAP_TRACE_OFF
, "m_clock" , STATIC
},
400 {ETAP_MISC_EMULATE
, ETAP_TRACE_OFF
, "m_emulate" , 34},
401 {ETAP_MISC_EVENT
, ETAP_TRACE_OFF
, "m_event" , STATIC
},
402 {ETAP_MISC_KDB
, ETAP_TRACE_OFF
, "m_kdb" , STATIC
},
403 {ETAP_MISC_PCB
, ETAP_TRACE_OFF
, "m_pcb" , 35},
404 {ETAP_MISC_PRINTF
, ETAP_TRACE_OFF
, "m_printf" , STATIC
},
405 {ETAP_MISC_Q
, ETAP_TRACE_OFF
, "m_q" , STATIC
},
406 {ETAP_MISC_RPC_SUBSYS
, ETAP_TRACE_OFF
, "m_rpc_sub" , 36},
407 {ETAP_MISC_RT_CLOCK
, ETAP_TRACE_OFF
, "m_rt_clock" , STATIC
},
408 {ETAP_MISC_SD_POOL
, ETAP_TRACE_OFF
, "m_sd_pool" , STATIC
},
409 {ETAP_MISC_TIMER
, ETAP_TRACE_OFF
, "m_timer" , STATIC
},
410 {ETAP_MISC_UTIME
, ETAP_TRACE_OFF
, "m_utime" , STATIC
},
411 {ETAP_MISC_XPR
, ETAP_TRACE_OFF
, "m_xpr" , STATIC
},
412 {ETAP_MISC_ZONE
, ETAP_TRACE_OFF
, "m_zone" , 37},
413 {ETAP_MISC_ZONE_ALL
, ETAP_TRACE_OFF
, "m_zone_all" , STATIC
},
414 {ETAP_MISC_ZONE_GET
, ETAP_TRACE_OFF
, "m_zone_get" , STATIC
},
415 {ETAP_MISC_ZONE_PTABLE
, ETAP_TRACE_OFF
, "m_zone_ptable" , STATIC
},/**/
416 {ETAP_MISC_LEDGER
, ETAP_TRACE_OFF
, "m_ledger" , STATIC
},/**/
417 {ETAP_MISC_SCSIT_TGT
, ETAP_TRACE_OFF
, "m_scsit_tgt_lock" , STATIC
},/**/
418 {ETAP_MISC_SCSIT_SELF
, ETAP_TRACE_OFF
, "m_scsit_self_lock", STATIC
},/**/
419 {ETAP_MISC_SPL
, ETAP_TRACE_OFF
, "m_spl_lock" , STATIC
},/**/
420 {ETAP_MISC_MASTER
, ETAP_TRACE_OFF
, "m_master" , STATIC
},/**/
421 {ETAP_MISC_FLOAT
, ETAP_TRACE_OFF
, "m_float" , STATIC
},/**/
422 {ETAP_MISC_GROUP
, ETAP_TRACE_OFF
, "m_group" , STATIC
},/**/
423 {ETAP_MISC_FLIPC
, ETAP_TRACE_OFF
, "m_flipc" , STATIC
},/**/
424 {ETAP_MISC_MP_IO
, ETAP_TRACE_OFF
, "m_mp_io" , STATIC
},/**/
425 {ETAP_MISC_KERNEL_TEST
, ETAP_TRACE_OFF
, "m_kernel_test" , STATIC
},/**/
427 {ETAP_NO_TRACE
, ETAP_TRACE_OFF
, "NEVER_TRACE" , STATIC
},
428 #endif /* ETAP_LOCK_TRACE */
432 * Variable initially pointing to the event table, then to its mappable
433 * copy. The cast is needed to discard the `const' qualifier; without it
434 * gcc issues a warning.
436 event_table_t event_table
= (event_table_t
) event_table_init
;
439 * Linked list of pointers into event_table_init[] so they can be switched
440 * into the mappable copy when it is made.
442 struct event_table_chain
*event_table_chain
;
445 * max number of event types in the event table
448 int event_table_max
= sizeof(event_table_init
)/sizeof(struct event_table_entry
);
450 const struct subs_table_entry subs_table_init
[] =
452 /*------------------------------------------*
453 * ETAP SUBSYSTEM TEXT NAME *
454 *------------------------------------------*/
456 #if ETAP_EVENT_MONITOR
457 {ETAP_SUBS_PROBE
, "event_probes" },
458 #endif /* ETAP_EVENT_MONITOR */
461 {ETAP_SUBS_LOCK_DIPC
, "lock_dipc" },
462 {ETAP_SUBS_LOCK_IO
, "lock_io" },
463 {ETAP_SUBS_LOCK_IPC
, "lock_ipc" },
464 {ETAP_SUBS_LOCK_KKT
, "lock_kkt" },
465 {ETAP_SUBS_LOCK_MISC
, "lock_misc" },
466 {ETAP_SUBS_LOCK_NET
, "lock_net" },
467 {ETAP_SUBS_LOCK_NORMA
, "lock_norma" },
468 {ETAP_SUBS_LOCK_THREAD
, "lock_thread" },
469 {ETAP_SUBS_LOCK_VM
, "lock_vm" },
470 {ETAP_SUBS_LOCK_XKERNEL
, "lock_xkernel" },
471 #endif /* ETAP_LOCK_TRACE */
475 * Variable initially pointing to the subsystem table, then to its mappable
478 subs_table_t subs_table
= (subs_table_t
) subs_table_init
;
481 * max number of subsystem types in the subsystem table
484 int subs_table_max
= sizeof(subs_table_init
)/sizeof(struct subs_table_entry
);
487 #define MAX_NAME_SIZE 35
489 #define SYS_TABLE_MACH_TRAP 0
490 #define SYS_TABLE_MACH_MESSAGE 1
491 #define SYS_TABLE_UNIX_SYSCALL 2
492 #define SYS_TABLE_INTERRUPT 3
493 #define SYS_TABLE_EXCEPTION 4
496 extern char *system_table_lookup (unsigned int table
,
497 unsigned int number
);
500 char *mach_trap_names
[] = {
527 /* 26 */ "mach_reply_port",
528 /* 27 */ "mach_thread_self",
529 /* 28 */ "mach_task_self",
530 /* 29 */ "mach_host_self",
531 /* 30 */ "vm_read_overwrite",
533 /* 32 */ "mach_msg_overwrite_trap",
537 /* 35 */ "mach_rpc_trap",
538 /* 36 */ "mach_rpc_return_trap",
547 /* 41 */ "init_process",
565 /* 59 */ "swtch_pri",
567 /* 61 */ "thread_switch",
568 /* 62 */ "clock_sleep_trap",
617 #define N_MACH_TRAP_NAMES (sizeof mach_trap_names / sizeof mach_trap_names[0])
618 #define mach_trap_name(nu) \
619 (((nu) < N_MACH_TRAP_NAMES) ? mach_trap_names[nu] : NULL)
622 char name
[MAX_NAME_SIZE
];
629 * Note: Most mach system calls are actually implemented as messages.
631 struct table_entry mach_message_table
[] = {
632 subsystem_to_name_map_bootstrap
,
633 subsystem_to_name_map_clock
,
634 subsystem_to_name_map_clock_reply
,
635 subsystem_to_name_map_default_pager_object
,
636 subsystem_to_name_map_device
,
637 subsystem_to_name_map_device_reply
,
638 subsystem_to_name_map_device_request
,
639 subsystem_to_name_map_exc
,
640 /* subsystem_to_name_map_mach,*/
641 subsystem_to_name_map_mach_debug
,
642 /* subsystem_to_name_map_mach_host,*/
643 subsystem_to_name_map_mach_norma
,
644 subsystem_to_name_map_mach_port
,
645 subsystem_to_name_map_memory_object
,
646 subsystem_to_name_map_memory_object_default
,
647 subsystem_to_name_map_notify
,
648 subsystem_to_name_map_prof
,
649 subsystem_to_name_map_sync
652 int mach_message_table_entries
= sizeof(mach_message_table
) /
653 sizeof(struct table_entry
);
659 * ================================
660 * Initialization routines for ETAP
661 * ================================
665 * ROUTINE: etap_init_phase1 [internal]
667 * FUNCTION: Event trace instrumentation initialization phase
668 * one of two. The static phase. The cumulative buffer
671 * NOTES: The cumulative buffer is statically allocated and
672 * must be initialized before the first simple_lock_init()
673 * or lock_init() call is made.
675 * The first lock init call is made before dynamic allocation
676 * is available. Hence, phase one is executed before dynamic
677 * memory allocation is available.
682 etap_init_phase1(void)
684 #if ETAP_LOCK_ACCUMULATE || MACH_ASSERT
687 boolean_t out_of_order
;
688 #endif /* MACH_ASSERT */
689 #endif /* ETAP_LOCK_ACCUMULATE || MACH_ASSERT */
691 #if ETAP_LOCK_ACCUMULATE
693 * Initialize Cumulative Buffer
695 * Note: The cumulative buffer is statically allocated.
696 * This static allocation is necessary since most
697 * of the lock_init calls are made before dynamic
698 * allocation routines are available.
702 * Align cumulative buffer pointer to a page boundary
703 * (so it can be maped).
706 bzero(&cbuff_allocated
[0], CBUFF_ALLOCATED_SIZE
);
707 cbuff
= (cumulative_buffer_t
) round_page(&cbuff_allocated
);
709 simple_lock_init(&cbuff_lock
, ETAP_NO_TRACE
);
712 * Set the starting point for cumulative buffer entry
715 * This value must leave enough head room in the
716 * cumulative buffer to contain all dynamic events.
719 for (x
=0; x
< event_table_max
; x
++)
720 if (event_table
[x
].dynamic
> cbuff
->static_start
)
721 cbuff
->static_start
= event_table
[x
].dynamic
;
723 cbuff
->next
= cbuff
->static_start
;
724 #endif /* ETAP_LOCK_ACCUMULATE */
727 * Initialize the event table lock
730 simple_lock_init(&event_table_lock
, ETAP_NO_TRACE
);
734 * Check that events are in numerical order so we can do a binary
735 * search on them. Even better would be to make event numbers be
736 * simple contiguous indexes into event_table[], but that would
737 * break the coding of subsystems in the event number.
739 out_of_order
= FALSE
;
740 for (x
= 1; x
< event_table_max
; x
++) {
741 if (event_table
[x
- 1].event
> event_table
[x
].event
) {
742 printf("events out of order: %s > %s\n",
743 event_table
[x
- 1].name
, event_table
[x
].name
);
748 panic("etap_init_phase1");
749 #endif /* MACH_ASSERT */
754 * ROUTINE: etap_init_phase2 [internal]
756 * FUNCTION: Event trace instrumentation initialization phase
757 * two of two. The dynamic phase. The monitored buffers
758 * are dynamically allocated and initialized. Cumulative
759 * dynamic entry locks are allocated and initialized. The
760 * start_data_pool is initialized.
762 * NOTES: Phase two is executed once dynamic memory allocation
768 etap_init_phase2(void)
773 vm_offset_t table_copy
;
774 struct event_table_chain
*chainp
;
777 * Make mappable copies of the event_table and the subs_table.
778 * These tables were originally mapped as they appear in the
779 * kernel image, but that meant that other kernel variables could
780 * end up being mapped with them, which is ugly. It also didn't
781 * work on the HP/PA, where pages with physical address == virtual
782 * do not have real pmap entries allocated and therefore can't be
785 size
= sizeof event_table_init
+ sizeof subs_table_init
;
786 ret
= kmem_alloc(kernel_map
, &table_copy
, size
);
787 if (ret
!= KERN_SUCCESS
)
788 panic("ETAP: error allocating table copies");
789 event_table
= (event_table_t
) table_copy
;
790 subs_table
= (subs_table_t
) (table_copy
+ sizeof event_table_init
);
791 bcopy((char *) event_table_init
, (char *) event_table
,
792 sizeof event_table_init
);
793 bcopy((char *) subs_table_init
, (char *) subs_table
,
794 sizeof subs_table_init
);
796 /* Switch pointers from the old event_table to the new. */
797 for (chainp
= event_table_chain
; chainp
!= NULL
;
798 chainp
= chainp
->event_table_link
) {
799 x
= chainp
->event_tablep
- event_table_init
;
800 assert(x
< event_table_max
);
801 chainp
->event_tablep
= event_table
+ x
;
804 #if ETAP_LOCK_ACCUMULATE
807 * Because several dynamic locks can point to a single
808 * cumulative buffer entry, dynamic lock writes to the
809 * entry are synchronized.
811 * The spin locks are allocated here.
815 size
= sizeof(simple_lock_t
) * cbuff
->static_start
;
818 * Note: These locks are different from traditional spin locks.
819 * They are of type int instead of type simple_lock_t.
820 * We can reduce lock size this way, since no tracing will
821 * EVER be performed on these locks.
823 size
= sizeof(simple_lock_data_t
) * cbuff
->static_start
;
826 ret
= kmem_alloc(kernel_map
, (vm_offset_t
*) &cbuff_locks
, size
);
828 if (ret
!= KERN_SUCCESS
)
829 panic("ETAP: error allocating cumulative write locks");
832 for(x
= 0; x
< cbuff
->static_start
; ++x
) {
833 simple_lock_init(&cbuff_locks
[x
], ETAP_NO_TRACE
);
836 bzero((const char *) cbuff_locks
, size
);
839 #endif /* ETAP_LOCK_ACCUMULATE */
845 * monitor buffer allocation
848 size
= ((mbuff_entries
-1) * sizeof(struct mbuff_entry
)) +
849 sizeof(struct monitor_buffer
);
851 for (x
=0; x
< NCPUS
; x
++) {
852 ret
= kmem_alloc(kernel_map
,
853 (vm_offset_t
*) &mbuff
[x
],
856 if (ret
!= KERN_SUCCESS
)
857 panic ("ETAP: error allocating monitor buffer\n");
859 /* zero fill buffer */
860 bzero((char *) mbuff
[x
], size
);
863 #endif /* ETAP_MONITOR */
869 * Initialize the start_data_pool
872 init_start_data_pool();
874 #endif /* ETAP_LOCK_TRACE */
878 #if ETAP_LOCK_ACCUMULATE
881 * ROUTINE: etap_cbuff_reserve [internal]
883 * FUNCTION: The cumulative buffer operation which returns a pointer
884 * to a free entry in the cumulative buffer.
886 * NOTES: Disables interrupts.
891 etap_cbuff_reserve(event_table_t etp
)
897 /* see if type pointer is initialized */
898 if (etp
== EVENT_TABLE_NULL
|| etp
->event
== ETAP_NO_TRACE
)
899 return (CBUFF_ENTRY_NULL
);
901 /* check for DYNAMIC lock */
902 if (de
= etp
->dynamic
) {
903 if (de
<= cbuff
->static_start
)
904 return (&cbuff
->entry
[de
-1]);
906 printf("ETAP: dynamic lock index error [%lu]\n", de
);
907 return (CBUFF_ENTRY_NULL
);
911 cumulative_buffer_lock(s
);
913 /* if buffer is full, reservation requests fail */
914 if (cbuff
->next
>= ETAP_CBUFF_ENTRIES
) {
915 cumulative_buffer_unlock(s
);
916 return (CBUFF_ENTRY_NULL
);
919 avail
= &cbuff
->entry
[cbuff
->next
++];
921 cumulative_buffer_unlock(s
);
926 #endif /* ETAP_LOCK_ACCUMULATE */
929 * ROUTINE: etap_event_table_assign [internal]
931 * FUNCTION: Returns a pointer to the assigned event type table entry,
932 * using the event type as the index key.
937 etap_event_table_find(etap_event_t event
)
939 int last_before
, first_after
, try;
941 /* Binary search for the event number. last_before is the highest-
942 numbered element known to be <= the number we're looking for;
943 first_after is the lowest-numbered element known to be >. */
945 first_after
= event_table_max
;
946 while (last_before
< first_after
) {
947 try = (last_before
+ first_after
) >> 1;
948 if (event_table
[try].event
== event
)
949 return (&event_table
[try]);
950 else if (event_table
[try].event
< event
)
955 return EVENT_TABLE_NULL
;
959 etap_event_table_assign(struct event_table_chain
*chainp
, etap_event_t event
)
961 event_table_t event_tablep
;
963 event_tablep
= etap_event_table_find(event
);
964 if (event_tablep
== EVENT_TABLE_NULL
)
965 printf("\nETAP: event not found in event table: %x\n", event
);
967 if (event_table
== event_table_init
) {
968 chainp
->event_table_link
= event_table_chain
;
969 event_table_chain
= chainp
;
971 chainp
->event_tablep
= event_tablep
;
979 * MESSAGE: etap_get_info [exported]
981 * FUNCTION: provides the server with ETAP buffer configurations.
987 host_priv_t host_priv
,
990 vm_offset_t
*et_offset
,
991 vm_offset_t
*st_offset
,
998 if (host_priv
== HOST_PRIV_NULL
)
999 return KERN_INVALID_ARGUMENT
;
1002 *et_entries
= event_table_max
;
1003 *st_entries
= subs_table_max
;
1004 *et_offset
= (vm_offset_t
) ((char*) event_table
-
1005 trunc_page((char*) event_table
));
1006 *st_offset
= (vm_offset_t
) ((char*) subs_table
-
1007 trunc_page((char*) subs_table
));
1015 #if ETAP_LOCK_ACCUMULATE
1016 *cb_width
= cbuff_width
;
1017 #else /* ETAP_LOCK_ACCUMULATE */
1019 #endif /* ETAP_LOCK_ACCUMULATE */
1022 *mb_size
= ((mbuff_entries
-1) * sizeof(struct mbuff_entry
)) +
1023 sizeof(struct monitor_buffer
);
1024 *mb_entries
= mbuff_entries
;
1026 #else /* ETAP_MONITOR */
1030 #endif /* ETAP_MONITOR */
1032 return (KERN_SUCCESS
);
1036 * ROUTINE: etap_trace_event [exported]
1038 * FUNCTION: The etap_trace_event system call is the user's interface to
1039 * the ETAP kernel instrumentation.
1041 * This call allows the user to enable and disable tracing modes
1042 * on specific event types. The call also supports a reset option,
1043 * where the cumulative buffer data and all event type tracing
1044 * is reset to zero. When the reset option is used, a new
1045 * interval width can also be defined using the op parameter.
1051 unsigned short mode
,
1052 unsigned short type
,
1055 unsigned short args
[])
1058 event_table_t event_tablep
;
1061 unsigned short status_mask
;
1062 unsigned short *tmp_args
;
1065 * Initialize operation
1068 if (mode
== ETAP_RESET
) {
1069 etap_trace_reset(nargs
);
1070 return (KERN_SUCCESS
);
1073 status_mask
= mode
& type
;
1076 * Copy args array from user space to kernel space
1079 args_size
= nargs
* sizeof *args
;
1080 tmp_args
= (unsigned short *) kalloc(args_size
);
1082 if (tmp_args
== NULL
)
1083 return (KERN_NO_SPACE
);
1085 if (copyin((const char *) args
, (char *) tmp_args
, args_size
))
1086 return (KERN_INVALID_ADDRESS
);
1089 * Change appropriate status fields in the event table
1094 for (i
= 0; i
< nargs
; i
++) {
1095 if (tmp_args
[i
] != ETAP_NO_TRACE
) {
1096 event_tablep
= etap_event_table_find(tmp_args
[i
]);
1097 if (event_tablep
== EVENT_TABLE_NULL
)
1100 event_tablep
->status
|= status_mask
;
1102 event_tablep
->status
&= ~status_mask
;
1106 ret
= (i
< nargs
) ? KERN_INVALID_ARGUMENT
: KERN_SUCCESS
;
1108 event_table_unlock();
1110 kfree((vm_offset_t
) tmp_args
, args_size
);
1116 return (KERN_FAILURE
);
1125 * ROUTINE: etap_trace_reset [internal]
1127 * FUNCTION: Turns off all tracing and erases all the data accumulated
1128 * in the cumulative buffer. If the user defined a new
1129 * cumulative buffer interval width, it will be assigned here.
1133 etap_trace_reset(int new_interval
)
1140 * Wipe out trace fields in event table
1147 for (x
=0; x
< event_table_max
; x
++) {
1148 scan
->status
= ETAP_TRACE_OFF
;
1152 event_table_unlock();
1154 #if ETAP_LOCK_ACCUMULATE
1157 * Wipe out cumulative buffer statistical fields for all entries
1160 cumulative_buffer_lock(s
);
1162 for (x
=0; x
< ETAP_CBUFF_ENTRIES
; x
++) {
1163 bzero ((char *) &cbuff
->entry
[x
].hold
,
1164 sizeof(struct cbuff_data
));
1165 bzero ((char *) &cbuff
->entry
[x
].wait
,
1166 sizeof(struct cbuff_data
));
1167 bzero ((char *) &cbuff
->entry
[x
].hold_interval
[0],
1168 sizeof(unsigned long) * ETAP_CBUFF_IBUCKETS
);
1169 bzero ((char *) &cbuff
->entry
[x
].wait_interval
[0],
1170 sizeof(unsigned long) * ETAP_CBUFF_IBUCKETS
);
1174 * Assign interval width if the user defined a new one.
1177 if (new_interval
!= 0)
1178 cbuff_width
= new_interval
;
1180 cumulative_buffer_unlock(s
);
1182 #endif /* ETAP_LOCK_ACCUMULATE */
1188 * ROUTINE: etap_probe [exported]
1190 * FUNCTION: The etap_probe system call serves as a user-level probe,
1191 * allowing user-level code to store event data into
1192 * the monitored buffer(s).
1197 unsigned short event_type
,
1198 unsigned short event_id
,
1199 unsigned int data_size
, /* total size in bytes */
1205 mbuff_entry_t mbuff_entryp
;
1211 if (data_size
> ETAP_DATA_SIZE
)
1212 return (KERN_INVALID_ARGUMENT
);
1214 if (event_table
[event_type
].status
== ETAP_TRACE_OFF
||
1215 event_table
[event_type
].event
!= event_type
)
1216 return (KERN_NO_ACCESS
);
1218 mp_disable_preemption();
1222 free
= mbuff
[cpu
]->free
;
1223 mbuff_entryp
= &mbuff
[cpu
]->entry
[free
];
1226 * Load monitor buffer entry
1229 ETAP_TIMESTAMP(mbuff_entryp
->time
);
1230 mbuff_entryp
->event
= event_id
;
1231 mbuff_entryp
->flags
= USER_EVENT
;
1232 mbuff_entryp
->instance
= (u_int
) current_thread();
1233 mbuff_entryp
->pc
= 0;
1235 if (data
!= ETAP_DATA_NULL
)
1236 copyin((const char *) data
,
1237 (char *) mbuff_entryp
->data
,
1240 mbuff
[cpu
]->free
= (free
+1) % mbuff_entries
;
1242 if (mbuff
[cpu
]->free
== 0)
1243 mbuff
[cpu
]->timestamp
++;
1246 mp_enable_preemption();
1248 return (KERN_SUCCESS
);
1250 #else /* ETAP_MONITOR */
1251 return (KERN_FAILURE
);
1252 #endif /* ETAP_MONITOR */
1256 * ROUTINE: etap_trace_thread [exported]
1258 * FUNCTION: Toggles thread's ETAP trace status bit.
1263 thread_act_t thr_act
,
1264 boolean_t trace_status
)
1266 #if ETAP_EVENT_MONITOR
1269 boolean_t old_status
;
1270 etap_data_t probe_data
;
1273 if (thr_act
== THR_ACT_NULL
)
1274 return (KERN_INVALID_ARGUMENT
);
1276 thread
= act_lock_thread(thr_act
);
1278 if (thread
== THREAD_NULL
) {
1279 act_unlock_thread(thr_act
);
1280 return (KERN_INVALID_ARGUMENT
);
1284 thread_lock(thread
);
1286 old_status
= thread
->etap_trace
;
1287 thread
->etap_trace
= trace_status
;
1289 ETAP_DATA_LOAD(probe_data
[0],thr_act
->task
);
1290 ETAP_DATA_LOAD(probe_data
[1],thr_act
);
1291 ETAP_DATA_LOAD(probe_data
[2],thread
->sched_pri
);
1293 thread_unlock(thread
);
1296 act_unlock_thread(thr_act
);
1299 * Thread creation (ETAP_P_THREAD_LIFE: BEGIN) is ONLY recorded
1300 * here since a threads trace status is disabled by default.
1302 if (trace_status
== TRUE
&& old_status
== FALSE
) {
1303 ETAP_PROBE_DATA(ETAP_P_THREAD_LIFE
,
1311 * Thread termination is (falsely) recorded here if the trace
1312 * status has been disabled. This event is recorded to allow
1313 * users the option of tracing a portion of a threads execution.
1315 if (trace_status
== FALSE
&& old_status
== TRUE
) {
1316 ETAP_PROBE_DATA(ETAP_P_THREAD_LIFE
,
1323 return (KERN_SUCCESS
);
1325 #else /* ETAP_EVENT_MONITOR */
1326 return (KERN_FAILURE
);
1327 #endif /* ETAP_EVENT_MONITOR */
1331 * ROUTINE: etap_mon_reconfig [exported]
1333 * FUNCTION: Reallocates monitor buffers to hold specified number
1336 * NOTES: In multiprocessor (SMP) case, a lock needs to be added
1337 * here and in data collection macros to protect access
1342 host_priv_t host_priv
,
1345 #if ETAP_EVENT_MONITOR
1346 struct monitor_buffer
*nmbuff
[NCPUS
], *ombuff
[NCPUS
];
1347 int s
, size
, osize
, i
, ret
;
1349 if (host_priv
== HOST_PRIV_NULL
)
1350 return KERN_INVALID_ARGUMENT
;
1352 if (nentries
<= 0) /* must be at least 1 */
1353 return (KERN_FAILURE
);
1355 size
= ((nentries
-1) * sizeof(struct mbuff_entry
)) +
1356 sizeof(struct monitor_buffer
);
1358 for (i
= 0; i
< NCPUS
; ++i
) {
1359 ret
= kmem_alloc(kernel_map
,
1360 (vm_offset_t
*)&nmbuff
[i
],
1362 if (ret
!= KERN_SUCCESS
) {
1366 for (j
= 0; j
< i
; ++j
) {
1367 kmem_free(kernel_map
,
1368 (vm_offset_t
)nmbuff
[j
],
1374 bzero((char *) nmbuff
[i
], size
);
1376 osize
= ((mbuff_entries
-1) * sizeof (struct mbuff_entry
)) +
1377 sizeof (struct monitor_buffer
);
1381 for (i
= 0; i
< NCPUS
; ++i
) {
1382 ombuff
[i
] = mbuff
[i
];
1383 mbuff
[i
] = nmbuff
[i
];
1385 mbuff_entries
= nentries
;
1386 event_table_unlock();
1389 for (i
= 0; i
< NCPUS
; ++i
) {
1390 kmem_free(kernel_map
,
1391 (vm_offset_t
)ombuff
[i
],
1394 return (KERN_SUCCESS
);
1396 return (KERN_FAILURE
);
1397 #endif /* ETAP_MONITOR */
1401 * ROUTINE: etap_new_probe [exported]
1403 * FUNCTION: Reallocates monitor probe table, adding a new entry
1408 host_priv_t host_priv
,
1414 #if ETAP_EVENT_MONITOR
1415 event_table_t newtable
, oldtable
;
1416 unsigned short i
, nid
;
1418 vm_size_t newsize
= (event_table_max
+ 1) *
1419 sizeof (struct event_table_entry
);
1420 boolean_t duplicate_name
= FALSE
;
1423 if (host_priv
== HOST_PRIV_NULL
)
1424 return KERN_INVALID_ARGUMENT
;
1426 if (namlen
> EVENT_NAME_LENGTH
- 1)
1427 return (KERN_INVALID_ARGUMENT
);
1429 if ((ret
= kmem_alloc(kernel_map
, (vm_address_t
*)&newtable
,
1430 newsize
)) != KERN_SUCCESS
)
1433 bcopy((const char *)event_table
, (char *)newtable
, event_table_max
*
1434 sizeof (struct event_table_entry
));
1436 if (copyin((const char *)name
,
1437 (char *)&newtable
[event_table_max
].name
, namlen
))
1438 return (KERN_INVALID_ADDRESS
);
1440 newtable
[event_table_max
].name
[EVENT_NAME_LENGTH
- 1] = '\0';
1441 newtable
[event_table_max
].status
= trace_on
;
1442 newtable
[event_table_max
].dynamic
= 0;
1444 for (nid
= i
= 0; i
< event_table_max
; ++i
) {
1445 if (strcmp((char *)newtable
[event_table_max
].name
,
1446 newtable
[i
].name
) == 0) {
1447 duplicate_name
= TRUE
;
1448 printf("duplicate name\n");
1450 nid
= max(nid
, newtable
[i
].event
);
1454 if (nid
>= ETAP_NO_TRACE
|| duplicate_name
== TRUE
) {
1455 kmem_free(kernel_map
, (vm_address_t
)newtable
, newsize
);
1456 if (nid
>= ETAP_NO_TRACE
) {
1457 printf("KERN_RESOURCE_SHORTAGE\n");
1458 return (KERN_RESOURCE_SHORTAGE
);
1461 printf("KERN_NAME_EXISTS\n");
1462 return (KERN_NAME_EXISTS
);
1466 newtable
[event_table_max
].event
= nid
;
1470 oldtable
= event_table
;
1471 event_table
= newtable
;
1473 event_table_unlock();
1476 if (oldtable
!= event_table_init
)
1477 kmem_free(kernel_map
, (vm_address_t
)oldtable
,
1478 (event_table_max
- 1) *
1479 sizeof (struct event_table_entry
));
1481 *(unsigned short *)id
= nid
;
1483 return (KERN_SUCCESS
);
1485 return (KERN_FAILURE
);
1486 #endif /* ETAP_EVENT_MONITOR */
1490 * ETAP trap probe hooks
1494 etap_interrupt_probe(int interrupt
, int flag_setting
)
1498 if (flag_setting
== 1)
1503 ETAP_PROBE_DATA_COND(ETAP_P_INTERRUPT
,
1512 etap_machcall_probe1(int syscall
)
1514 ETAP_PROBE_DATA(ETAP_P_SYSCALL_MACH
,
1515 EVENT_BEGIN
| SYSCALL_TRAP
,
1522 etap_machcall_probe2(void)
1524 ETAP_PROBE_DATA(ETAP_P_SYSCALL_MACH
,
1525 EVENT_END
| SYSCALL_TRAP
,
1531 static void print_user_event(mbuff_entry_t
);
1532 static void print_kernel_event(mbuff_entry_t
, boolean_t
);
1533 static void print_lock_event(mbuff_entry_t
, const char *);
1536 void db_show_etap_log(db_expr_t
, boolean_t
, db_expr_t
, char *);
1539 * ROUTINE: etap_print [internal]
1541 * FUNCTION: print each mbuff table (for use in debugger)
1547 boolean_t have_addr
,
1552 int cpu
= cpu_number(), last
, i
, first
, step
, end
, restart
;
1553 boolean_t show_data
= FALSE
;
1555 last
= (mbuff
[cpu
]->free
- 1) % mbuff_entries
;
1557 if(db_option(modif
, 'r')) {
1561 restart
= mbuff_entries
- 1;
1565 end
= mbuff_entries
;
1569 if(db_option(modif
, 'd'))
1572 for(i
= first
; i
!= end
; i
+= step
) {
1573 if (mbuff
[cpu
]->entry
[i
].flags
& USER_EVENT
)
1574 print_user_event(&mbuff
[cpu
]->entry
[i
]);
1576 print_kernel_event(&mbuff
[cpu
]->entry
[i
], show_data
);
1578 for(i
= restart
; i
!= first
; i
+= step
) {
1579 if (mbuff
[cpu
]->entry
[i
].flags
& USER_EVENT
)
1580 print_user_event(&mbuff
[cpu
]->entry
[i
]);
1582 print_kernel_event(&mbuff
[cpu
]->entry
[i
], show_data
);
1585 printf("ETAP event monitor not configured\n");
1586 #endif /* ETAP_MONITOR */
1592 print_user_event(mbuff_entry_t record
)
1596 db_printf("%x: %x%08x: ", record
->instance
, record
->time
.tv_sec
,
1597 record
->time
.tv_nsec
);
1600 case ETAP_P_USER_EVENT0
: s
= "0"; break;
1601 case ETAP_P_USER_EVENT1
: s
= "1"; break;
1602 case ETAP_P_USER_EVENT2
: s
= "2"; break;
1603 case ETAP_P_USER_EVENT3
: s
= "3"; break;
1604 case ETAP_P_USER_EVENT4
: s
= "4"; break;
1605 case ETAP_P_USER_EVENT5
: s
= "5"; break;
1606 case ETAP_P_USER_EVENT6
: s
= "6"; break;
1607 case ETAP_P_USER_EVENT7
: s
= "7"; break;
1608 case ETAP_P_USER_EVENT8
: s
= "8"; break;
1609 case ETAP_P_USER_EVENT9
: s
= "9"; break;
1610 case ETAP_P_USER_EVENT10
: s
= "10"; break;
1611 case ETAP_P_USER_EVENT11
: s
= "11"; break;
1612 case ETAP_P_USER_EVENT12
: s
= "12"; break;
1613 case ETAP_P_USER_EVENT13
: s
= "13"; break;
1614 case ETAP_P_USER_EVENT14
: s
= "14"; break;
1615 case ETAP_P_USER_EVENT15
: s
= "15"; break;
1616 case ETAP_P_USER_EVENT16
: s
= "16"; break;
1617 case ETAP_P_USER_EVENT17
: s
= "17"; break;
1618 case ETAP_P_USER_EVENT18
: s
= "18"; break;
1619 case ETAP_P_USER_EVENT19
: s
= "19"; break;
1620 case ETAP_P_USER_EVENT20
: s
= "20"; break;
1621 case ETAP_P_USER_EVENT21
: s
= "21"; break;
1622 case ETAP_P_USER_EVENT22
: s
= "22"; break;
1623 case ETAP_P_USER_EVENT23
: s
= "23"; break;
1624 case ETAP_P_USER_EVENT24
: s
= "24"; break;
1625 case ETAP_P_USER_EVENT25
: s
= "25"; break;
1626 case ETAP_P_USER_EVENT26
: s
= "26"; break;
1627 case ETAP_P_USER_EVENT27
: s
= "27"; break;
1628 case ETAP_P_USER_EVENT28
: s
= "28"; break;
1629 case ETAP_P_USER_EVENT29
: s
= "29"; break;
1630 case ETAP_P_USER_EVENT30
: s
= "30"; break;
1631 case ETAP_P_USER_EVENT31
: s
= "31"; break;
1633 sprintf(buf
, "dynamic %x", record
->pc
);
1638 db_printf("user probe %s: [%x] data = %x %x %x %x\n",
1649 print_kernel_event(mbuff_entry_t record
, boolean_t data
)
1654 /* assume zero event means that record was never written to */
1655 if(record
->event
== 0)
1658 db_printf("%x: %x%08x: ", record
->instance
, record
->time
.tv_sec
,
1659 record
->time
.tv_nsec
);
1661 switch (record
->event
) {
1663 case ETAP_P_THREAD_LIFE
:
1664 if (record
->flags
& EVENT_BEGIN
)
1665 db_printf("thread created [T:%x A:%x] P:%d\n",
1670 db_printf("thread terminated [T:%x A:%x] P:%d\n",
1676 case ETAP_P_SYSCALL_MACH
:
1677 if (record
->flags
& SYSCALL_TRAP
)
1678 text_name
= system_table_lookup(SYS_TABLE_MACH_TRAP
,
1681 text_name
= system_table_lookup(SYS_TABLE_MACH_MESSAGE
,
1684 if (record
->flags
& EVENT_BEGIN
)
1685 db_printf("mach enter: %s [%x]\n",
1689 db_printf("mach exit :\n");
1692 case ETAP_P_SYSCALL_UNIX
:
1693 text_name
= system_table_lookup(SYS_TABLE_UNIX_SYSCALL
,
1696 if (record
->flags
& EVENT_BEGIN
)
1697 db_printf("unix enter: %s\n", text_name
);
1699 db_printf("unix exit : %s\n", text_name
);
1702 case ETAP_P_THREAD_CTX
:
1703 if (record
->flags
& EVENT_END
)
1704 db_printf("context switch to %x ",
1706 else /* EVENT_BEGIN */
1707 db_printf("context switch from %x ",
1710 switch (record
->data
[1]) {
1711 case BLOCKED_ON_SEMAPHORE
:
1712 db_printf("R: semaphore\n"); break;
1713 case BLOCKED_ON_LOCK
:
1714 db_printf("R: lock\n"); break;
1715 case BLOCKED_ON_MUTEX_LOCK
:
1716 db_printf("R: mutex lock\n"); break;
1717 case BLOCKED_ON_COMPLEX_LOCK
:
1718 db_printf("R: complex lock\n"); break;
1719 case BLOCKED_ON_PORT_RCV
:
1720 db_printf("R: port receive\n"); break;
1721 case BLOCKED_ON_REAPER_DONE
:
1722 db_printf("R: reaper thread done\n"); break;
1723 case BLOCKED_ON_IDLE_DONE
:
1724 db_printf("R: idle thread done\n"); break;
1725 case BLOCKED_ON_TERMINATION
:
1726 db_printf("R: termination\n"); break;
1728 if (record
->data
[2])
1729 db_printf("R: ast %x\n", record
->data
[2]);
1731 db_printf("R: undefined block\n");
1735 case ETAP_P_INTERRUPT
:
1736 if (record
->flags
& EVENT_BEGIN
) {
1737 text_name
= system_table_lookup(SYS_TABLE_INTERRUPT
,
1739 db_printf("intr enter: %s\n", text_name
);
1741 db_printf("intr exit\n");
1744 case ETAP_P_ACT_ABORT
:
1745 db_printf("activation abort [A %x : S %x]\n",
1751 case ETAP_P_PRIORITY
:
1752 db_printf("priority changed for %x N:%d O:%d\n",
1758 case ETAP_P_EXCEPTION
:
1759 text_name
= system_table_lookup(SYS_TABLE_EXCEPTION
,
1761 db_printf("exception: %s\n", text_name
);
1764 case ETAP_P_DEPRESSION
:
1765 if (record
->flags
& EVENT_BEGIN
)
1766 db_printf("priority depressed\n");
1768 if (record
->data
[0] == 0)
1769 db_printf("priority undepressed : timed out\n");
1771 db_printf("priority undepressed : self inflicted\n");
1776 db_printf("flags: %x data: %x %x %x %x\n", record
->flags
,
1777 record
->data
[0], record
->data
[1], record
->data
[2],
1782 printf("flags: %x rtc: %x %09x dtime: %x %09x\n",
1783 record
->flags
, record
->data
[0], record
->data
[1],
1784 record
->data
[2], record
->data
[3]);
1788 for(i
= 0; event_table_init
[i
].event
!= ETAP_NO_TRACE
; ++i
)
1789 if(record
->event
== event_table_init
[i
].event
) {
1790 print_lock_event(record
, event_table_init
[i
].name
);
1793 db_printf("Unknown event: %d\n", record
->event
);
1797 db_printf(" Data: %08x %08x %08x %08x\n", record
->data
[0],
1798 record
->data
[1], record
->data
[2], record
->data
[3]);
1801 void print_lock_event(mbuff_entry_t record
, const char *name
)
1804 db_addr_t offset1
, offset2
;
1806 db_find_sym_and_offset(record
->data
[0], &sym1
, &offset1
);
1808 db_printf("%15s", name
);
1809 if (record
->flags
& SPIN_LOCK
)
1811 else if (record
->flags
& READ_LOCK
)
1813 else if (record
->flags
& WRITE_LOCK
)
1818 if (record
->flags
& ETAP_CONTENTION
) {
1819 db_printf("wait lock %s+%x\n",
1822 else if (record
->flags
& ETAP_DURATION
) {
1823 db_find_sym_and_offset(record
->data
[1], &sym2
, &offset2
);
1824 db_printf("lock %x+%x unlock %x+%x\n",
1825 sym1
, offset1
, sym2
, offset2
);
1827 db_printf("illegal op: neither HOLD or WAIT are specified\n");
1833 system_table_lookup(unsigned int table
, unsigned int number
)
1837 unsigned int offset
;
1840 case SYS_TABLE_MACH_TRAP
:
1841 name
= mach_trap_name(number
>> 4);
1843 case SYS_TABLE_MACH_MESSAGE
:
1844 for (x
=0; x
< mach_message_table_entries
; x
++) {
1845 if (mach_message_table
[x
].number
== number
) {
1846 name
= mach_message_table
[x
].name
;
1851 case SYS_TABLE_UNIX_SYSCALL
:
1853 name
= syscall_name(number
);
1855 case SYS_TABLE_INTERRUPT
:
1856 db_find_sym_and_offset((int)ivect
[number
], &name
, &offset
);
1858 case SYS_TABLE_EXCEPTION
:
1859 name
= exception_name(number
);
1862 return (name
!= NULL
) ? name
: "undefined";
1865 #endif /* MACH_KDB */
1866 #endif /* ETAP_MONITOR */