2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
34 #include <kern/lock.h>
35 #include <kern/etap_macros.h>
36 #include <kern/misc_protos.h>
37 #include <kern/host.h>
39 #include <mach/kern_return.h>
40 #include <mach/port.h>
41 #include <vm/vm_map.h>
42 #include <vm/vm_kern.h> /* for kernel_map, ipc_kernel_map */
44 #include <machine/machine_tables.h>
45 #include <mach/clock.h>
46 #include <mach/clock_reply.h>
47 #include <mach/default_pager_object.h>
48 #include <device/device.h>
49 #include <device/device_reply.h>
50 #include <device/device_request.h>
51 #include <mach_debug/mach_debug.h>
52 /*#include <mach/mach_host.h>*/
53 #include <mach/mach_norma.h>
54 #include <mach/mach_port.h>
55 #include <mach/memory_object_default.h>
56 #include <mach/memory_object_user.h>
57 #include <mach/notify_server.h>
58 #include <mach/prof.h>
59 #include <machine/unix_map.h>
62 #include <ddb/db_output.h>
63 #include <ddb/db_sym.h>
64 #include <ddb/db_command.h>
75 etap_get_info(host_priv_t
, int*, int*, vm_offset_t
*, vm_offset_t
*,
76 int*, int*, int*, int*);
79 etap_mon_reconfig(host_priv_t
, int);
82 etap_new_probe(host_priv_t
, vm_address_t
, vm_size_t
, boolean_t
, vm_address_t
);
85 etap_trace_thread(thread_act_t
, boolean_t
);
88 etap_trace_reset(int);
91 etap_interrupt_probe(int, int);
94 etap_machcall_probe1(int);
97 etap_machcall_probe2(void);
106 #define max(x,y) ((x > y) ? x : y)
110 etap_event_table_find(etap_event_t
);
112 /* =======================
113 * ETAP Lock definitions
114 * =======================
118 #define etap_lock simple_lock_no_trace
119 #define etap_unlock simple_unlock_no_trace
120 #else /* ETAP_LOCK_TRACE */
121 #define etap_lock simple_lock
122 #define etap_unlock simple_unlock
123 #endif /* ETAP_LOCK_TRACE */
125 #define event_table_lock() etap_lock(&event_table_lock)
126 #define event_table_unlock() etap_unlock(&event_table_lock)
128 #define cumulative_buffer_lock(s) \
131 etap_lock(&cbuff_lock); \
134 #define cumulative_buffer_unlock(s) \
136 etap_unlock(&cbuff_lock); \
141 #if ETAP_LOCK_ACCUMULATE
143 /* ========================================
144 * ETAP Cumulative lock trace definitions
145 * ========================================
148 int cbuff_width
= ETAP_CBUFF_WIDTH
;
151 * Cumulative buffer declaration
153 * For both protection and mapping purposes, the cumulative
154 * buffer must be aligned on a page boundary. Since the cumulative
155 * buffer must be statically defined, page boundary alignment is not
156 * garenteed. Instead, the buffer is allocated with 2 extra pages.
157 * The cumulative buffer pointer will round up to the nearest page.
159 * This will garentee page boundary alignment.
162 #define TWO_PAGES 16384 /* XXX does this apply ??*/
163 #define CBUFF_ALLOCATED_SIZE sizeof(struct cumulative_buffer)+TWO_PAGES
165 decl_simple_lock_data (,cbuff_lock
)
167 simple_lock_t cbuff_locks
;
169 simple_lock_data_t cbuff_locks
;
171 char cbuff_allocated
[CBUFF_ALLOCATED_SIZE
];
172 cumulative_buffer_t cbuff
= {0};
174 #endif /* ETAP_LOCK_ACCUMULATE */
178 int mbuff_entries
= ETAP_MBUFF_ENTRIES
;
181 * Create an array of pointers to monitor buffers.
182 * The buffers themselves are allocated at run-time.
185 struct monitor_buffer
*mbuff
[NCPUS
];
186 #endif /* ETAP_MONITOR */
188 /* ==========================
189 * Event table declarations
190 * ==========================
193 decl_simple_lock_data(,event_table_lock
)
195 const struct event_table_entry event_table_init
[] =
198 /*-----------------------------------------------------------------------*
199 * ETAP EVENT TRACE STATUS TEXT NAME DYNAMIC *
200 *-----------------------------------------------------------------------*/
202 #if ETAP_EVENT_MONITOR
203 {ETAP_P_USER_EVENT0
, ETAP_TRACE_OFF
, "p_user_event0" , STATIC
},
204 {ETAP_P_USER_EVENT1
, ETAP_TRACE_OFF
, "p_user_event1" , STATIC
},
205 {ETAP_P_USER_EVENT2
, ETAP_TRACE_OFF
, "p_user_event2" , STATIC
},
206 {ETAP_P_USER_EVENT3
, ETAP_TRACE_OFF
, "p_user_event3" , STATIC
},
207 {ETAP_P_USER_EVENT4
, ETAP_TRACE_OFF
, "p_user_event4" , STATIC
},
208 {ETAP_P_USER_EVENT5
, ETAP_TRACE_OFF
, "p_user_event5" , STATIC
},
209 {ETAP_P_USER_EVENT6
, ETAP_TRACE_OFF
, "p_user_event6" , STATIC
},
210 {ETAP_P_USER_EVENT7
, ETAP_TRACE_OFF
, "p_user_event7" , STATIC
},
211 {ETAP_P_USER_EVENT8
, ETAP_TRACE_OFF
, "p_user_event8" , STATIC
},
212 {ETAP_P_USER_EVENT9
, ETAP_TRACE_OFF
, "p_user_event9" , STATIC
},
213 {ETAP_P_USER_EVENT10
, ETAP_TRACE_OFF
, "p_user_event10" , STATIC
},
214 {ETAP_P_USER_EVENT11
, ETAP_TRACE_OFF
, "p_user_event11" , STATIC
},
215 {ETAP_P_USER_EVENT12
, ETAP_TRACE_OFF
, "p_user_event12" , STATIC
},
216 {ETAP_P_USER_EVENT13
, ETAP_TRACE_OFF
, "p_user_event13" , STATIC
},
217 {ETAP_P_USER_EVENT14
, ETAP_TRACE_OFF
, "p_user_event14" , STATIC
},
218 {ETAP_P_USER_EVENT15
, ETAP_TRACE_OFF
, "p_user_event15" , STATIC
},
219 {ETAP_P_USER_EVENT16
, ETAP_TRACE_OFF
, "p_user_event16" , STATIC
},
220 {ETAP_P_USER_EVENT17
, ETAP_TRACE_OFF
, "p_user_event17" , STATIC
},
221 {ETAP_P_USER_EVENT18
, ETAP_TRACE_OFF
, "p_user_event18" , STATIC
},
222 {ETAP_P_USER_EVENT19
, ETAP_TRACE_OFF
, "p_user_event19" , STATIC
},
223 {ETAP_P_USER_EVENT20
, ETAP_TRACE_OFF
, "p_user_event20" , STATIC
},
224 {ETAP_P_USER_EVENT21
, ETAP_TRACE_OFF
, "p_user_event21" , STATIC
},
225 {ETAP_P_USER_EVENT22
, ETAP_TRACE_OFF
, "p_user_event22" , STATIC
},
226 {ETAP_P_USER_EVENT23
, ETAP_TRACE_OFF
, "p_user_event23" , STATIC
},
227 {ETAP_P_USER_EVENT24
, ETAP_TRACE_OFF
, "p_user_event24" , STATIC
},
228 {ETAP_P_USER_EVENT25
, ETAP_TRACE_OFF
, "p_user_event25" , STATIC
},
229 {ETAP_P_USER_EVENT26
, ETAP_TRACE_OFF
, "p_user_event26" , STATIC
},
230 {ETAP_P_USER_EVENT27
, ETAP_TRACE_OFF
, "p_user_event27" , STATIC
},
231 {ETAP_P_USER_EVENT28
, ETAP_TRACE_OFF
, "p_user_event28" , STATIC
},
232 {ETAP_P_USER_EVENT29
, ETAP_TRACE_OFF
, "p_user_event29" , STATIC
},
233 {ETAP_P_USER_EVENT30
, ETAP_TRACE_OFF
, "p_user_event30" , STATIC
},
234 {ETAP_P_USER_EVENT31
, ETAP_TRACE_OFF
, "p_user_event31" , STATIC
},
235 {ETAP_P_SYSCALL_MACH
, ETAP_TRACE_OFF
, "p_syscall_mach" , STATIC
},
236 {ETAP_P_SYSCALL_UNIX
, ETAP_TRACE_OFF
, "p_syscall_unix" , STATIC
},
237 {ETAP_P_THREAD_LIFE
, ETAP_TRACE_OFF
, "p_thread_life" , STATIC
},
238 {ETAP_P_THREAD_CTX
, ETAP_TRACE_OFF
, "p_thread_ctx" , STATIC
},
239 {ETAP_P_RPC
, ETAP_TRACE_OFF
, "p_rpc" , STATIC
},
240 {ETAP_P_INTERRUPT
, ETAP_TRACE_OFF
, "p_interrupt" , STATIC
},
241 {ETAP_P_ACT_ABORT
, ETAP_TRACE_OFF
, "p_act_abort" , STATIC
},
242 {ETAP_P_PRIORITY
, ETAP_TRACE_OFF
, "p_priority" , STATIC
},
243 {ETAP_P_EXCEPTION
, ETAP_TRACE_OFF
, "p_exception" , STATIC
},
244 {ETAP_P_DEPRESSION
, ETAP_TRACE_OFF
, "p_depression" , STATIC
},
245 {ETAP_P_MISC
, ETAP_TRACE_OFF
, "p_misc" , STATIC
},
246 {ETAP_P_DETAP
, ETAP_TRACE_OFF
, "p_detap" , STATIC
},
247 #endif /* ETAP_EVENT_MONITOR */
250 {ETAP_VM_BUCKET
, ETAP_TRACE_OFF
, "vm_bucket" , STATIC
},/**/
251 {ETAP_VM_HIMEM
, ETAP_TRACE_OFF
, "vm_himem" , STATIC
},
252 {ETAP_VM_MAP
, ETAP_TRACE_OFF
, "vm_map" , 1},
253 {ETAP_VM_MAP_I
, ETAP_TRACE_OFF
, "vm_map_i" , 2},
254 {ETAP_VM_MEMMAN
, ETAP_TRACE_OFF
, "vm_memman" , STATIC
},/**/
255 {ETAP_VM_MSYNC
, ETAP_TRACE_OFF
, "vm_msync" , 3},
256 {ETAP_VM_OBJ
, ETAP_TRACE_OFF
, "vm_obj" , 4},
257 {ETAP_VM_OBJ_CACHE
, ETAP_TRACE_OFF
, "vm_obj_cache" , 5},
258 {ETAP_VM_PAGE_ALLOC
, ETAP_TRACE_OFF
, "vm_page_alloc" , STATIC
},/**/
259 {ETAP_VM_PAGEOUT
, ETAP_TRACE_OFF
, "vm_pageout" , STATIC
},
260 {ETAP_VM_PAGEQ
, ETAP_TRACE_OFF
, "vm_pageq" , STATIC
},
261 {ETAP_VM_PAGEQ_FREE
, ETAP_TRACE_OFF
, "vm_pageq_free" , STATIC
},
262 {ETAP_VM_PMAP
, ETAP_TRACE_OFF
, "vm_pmap" , 6},
263 {ETAP_VM_PMAP_CACHE
, ETAP_TRACE_OFF
, "vm_pmap_cache" , STATIC
},
264 {ETAP_VM_PMAP_FREE
, ETAP_TRACE_OFF
, "vm_pmap_free" , STATIC
},
265 {ETAP_VM_PMAP_KERNEL
, ETAP_TRACE_OFF
, "vm_pmap_kern" , STATIC
},
266 {ETAP_VM_PMAP_SYS
, ETAP_TRACE_OFF
, "vm_pmap_sys" , 7},
267 {ETAP_VM_PMAP_SYS_I
, ETAP_TRACE_OFF
, "vm_pmap_sys_i" , 8},
268 {ETAP_VM_PMAP_UPDATE
, ETAP_TRACE_OFF
, "vm_pmap_update" , STATIC
},
269 {ETAP_VM_PREPPIN
, ETAP_TRACE_OFF
, "vm_preppin" , STATIC
},
270 {ETAP_VM_RESULT
, ETAP_TRACE_OFF
, "vm_result" , 9},
271 {ETAP_VM_TEST
, ETAP_TRACE_OFF
, "vm_tes" , STATIC
},/**/
272 {ETAP_VM_PMAP_PHYSENTRIES
, ETAP_TRACE_OFF
, "vm_pmap_physentries", STATIC
},
273 {ETAP_VM_PMAP_SID
, ETAP_TRACE_OFF
, "vm_pmap_sid" , STATIC
},
274 {ETAP_VM_PMAP_PTE
, ETAP_TRACE_OFF
, "vm_pmap_pte" , STATIC
},
275 {ETAP_VM_PMAP_PTE_OVFLW
, ETAP_TRACE_OFF
, "vm_pmap_pte_ovflw", STATIC
},
276 {ETAP_VM_PMAP_TLB
, ETAP_TRACE_OFF
, "vm_pmap_tlb" , STATIC
},
278 {ETAP_IPC_IHGB
, ETAP_TRACE_OFF
, "ipc_ihgb" , 10},/**/
279 {ETAP_IPC_IS
, ETAP_TRACE_OFF
, "ipc_is" , 11},/**/
280 {ETAP_IPC_IS_REF
, ETAP_TRACE_OFF
, "ipc_is_ref" , 12},/**/
281 {ETAP_IPC_MQUEUE
, ETAP_TRACE_OFF
, "ipc_mqueue" , STATIC
},/**/
282 {ETAP_IPC_OBJECT
, ETAP_TRACE_OFF
, "ipc_object" , STATIC
},/**/
283 {ETAP_IPC_PORT_MULT
, ETAP_TRACE_OFF
, "ipc_port_mult" , 13},/**/
284 {ETAP_IPC_PORT_TIME
, ETAP_TRACE_OFF
, "ipc_port_time" , 14},/**/
285 {ETAP_IPC_RPC
, ETAP_TRACE_OFF
, "ipc_rpc" , 15},/**/
286 {ETAP_IPC_PORT_ALLOCQ
, ETAP_TRACE_OFF
, "ipc_port_allocq" , STATIC
},/**/
288 {ETAP_IO_AHA
, ETAP_TRACE_OFF
, "io_aha" , STATIC
},
289 {ETAP_IO_CHIP
, ETAP_TRACE_OFF
, "io_chip" , STATIC
},
290 {ETAP_IO_DEV
, ETAP_TRACE_OFF
, "io_dev" , 16},/**/
291 {ETAP_IO_DEV_NUM
, ETAP_TRACE_OFF
, "io_dev_num" , STATIC
},
292 {ETAP_IO_DEV_PAGEH
, ETAP_TRACE_OFF
, "io_dev_pageh" , STATIC
},/**/
293 {ETAP_IO_DEV_PAGER
, ETAP_TRACE_OFF
, "io_dev_pager" , STATIC
},/**/
294 {ETAP_IO_DEV_PORT
, ETAP_TRACE_OFF
, "io_dev_port" , STATIC
},/**/
295 {ETAP_IO_DEV_REF
, ETAP_TRACE_OFF
, "io_dev_new" , 17},/**/
296 {ETAP_IO_DEVINS
, ETAP_TRACE_OFF
, "io_devins" , STATIC
},
297 {ETAP_IO_DONE_LIST
, ETAP_TRACE_OFF
, "io_done_list" , STATIC
},
298 {ETAP_IO_DONE_Q
, ETAP_TRACE_OFF
, "io_doneq" , 18},
299 {ETAP_IO_DONE_REF
, ETAP_TRACE_OFF
, "io_done_ref" , 19},
300 {ETAP_IO_EAHA
, ETAP_TRACE_OFF
, "io_eaha" , STATIC
},
301 {ETAP_IO_HD_PROBE
, ETAP_TRACE_OFF
, "io_hd_probe" , STATIC
},
302 {ETAP_IO_IOPB
, ETAP_TRACE_OFF
, "io_iopb" , STATIC
},
303 {ETAP_IO_KDQ
, ETAP_TRACE_OFF
, "io_kdq" , STATIC
},
304 {ETAP_IO_KDTTY
, ETAP_TRACE_OFF
, "io_kdtty" , STATIC
},
305 {ETAP_IO_REQ
, ETAP_TRACE_OFF
, "io_req" , 20},
306 {ETAP_IO_TARGET
, ETAP_TRACE_OFF
, "io_target" , STATIC
},
307 {ETAP_IO_TTY
, ETAP_TRACE_OFF
, "io_tty" , STATIC
},
308 {ETAP_IO_IOP_LOCK
, ETAP_TRACE_OFF
, "io_iop" , STATIC
},/**/
309 {ETAP_IO_DEV_NAME
, ETAP_TRACE_OFF
, "io_dev_name" , STATIC
},/**/
310 {ETAP_IO_CDLI
, ETAP_TRACE_OFF
, "io_cdli" , STATIC
},/**/
311 {ETAP_IO_HIPPI_FILTER
, ETAP_TRACE_OFF
, "io_hippi_filter" , STATIC
},/**/
312 {ETAP_IO_HIPPI_SRC
, ETAP_TRACE_OFF
, "io_hippi_src" , STATIC
},/**/
313 {ETAP_IO_HIPPI_DST
, ETAP_TRACE_OFF
, "io_hippi_dst" , STATIC
},/**/
314 {ETAP_IO_HIPPI_PKT
, ETAP_TRACE_OFF
, "io_hippi_pkt" , STATIC
},/**/
315 {ETAP_IO_NOTIFY
, ETAP_TRACE_OFF
, "io_notify" , STATIC
},/**/
316 {ETAP_IO_DATADEV
, ETAP_TRACE_OFF
, "io_data_device" , STATIC
},/**/
317 {ETAP_IO_OPEN
, ETAP_TRACE_OFF
, "io_open" , STATIC
},
318 {ETAP_IO_OPEN_I
, ETAP_TRACE_OFF
, "io_open_i" , STATIC
},
320 {ETAP_THREAD_ACT
, ETAP_TRACE_OFF
, "th_act" , 21},
321 {ETAP_THREAD_ACTION
, ETAP_TRACE_OFF
, "th_action" , STATIC
},
322 {ETAP_THREAD_LOCK
, ETAP_TRACE_OFF
, "th_lock" , 22},
323 {ETAP_THREAD_LOCK_SET
, ETAP_TRACE_OFF
, "th_lock_set" , 23},
324 {ETAP_THREAD_NEW
, ETAP_TRACE_OFF
, "th_new" , 24},
325 {ETAP_THREAD_PSET
, ETAP_TRACE_OFF
, "th_pset" , STATIC
},/**/
326 {ETAP_THREAD_PSET_ALL
, ETAP_TRACE_OFF
, "th_pset_all" , STATIC
},
327 {ETAP_THREAD_PSET_RUNQ
, ETAP_TRACE_OFF
, "th_pset_runq" , STATIC
},
328 {ETAP_THREAD_PSET_IDLE
, ETAP_TRACE_OFF
, "th_pset_idle" , STATIC
},
329 {ETAP_THREAD_PSET_QUANT
, ETAP_TRACE_OFF
, "th_pset_quant" , STATIC
},
330 {ETAP_THREAD_PROC
, ETAP_TRACE_OFF
, "th_proc" , STATIC
},
331 {ETAP_THREAD_PROC_RUNQ
, ETAP_TRACE_OFF
, "th_proc_runq" , STATIC
},
332 {ETAP_THREAD_REAPER
, ETAP_TRACE_OFF
, "th_reaper" , STATIC
},
333 {ETAP_THREAD_RPC
, ETAP_TRACE_OFF
, "th_rpc" , 25},
334 {ETAP_THREAD_SEMA
, ETAP_TRACE_OFF
, "th_sema" , 26},
335 {ETAP_THREAD_STACK
, ETAP_TRACE_OFF
, "th_stack" , STATIC
},
336 {ETAP_THREAD_STACK_USAGE
, ETAP_TRACE_OFF
, "th_stack_usage" , STATIC
},
337 {ETAP_THREAD_TASK_NEW
, ETAP_TRACE_OFF
, "th_task_new" , 27},
338 {ETAP_THREAD_TASK_ITK
, ETAP_TRACE_OFF
, "th_task_itk" , 28},
339 {ETAP_THREAD_ULOCK
, ETAP_TRACE_OFF
, "th_ulock" , 29},
340 {ETAP_THREAD_WAIT
, ETAP_TRACE_OFF
, "th_wait" , STATIC
},
341 {ETAP_THREAD_WAKE
, ETAP_TRACE_OFF
, "th_wake" , 30},
342 {ETAP_THREAD_ACT_LIST
, ETAP_TRACE_OFF
, "th_act_list" , 31},
343 {ETAP_THREAD_TASK_SWAP
, ETAP_TRACE_OFF
, "th_task_swap" , 32},
344 {ETAP_THREAD_TASK_SWAPOUT
, ETAP_TRACE_OFF
, "th_task_swapout" , 33},
345 {ETAP_THREAD_SWAPPER
, ETAP_TRACE_OFF
, "th_swapper" , STATIC
},
347 {ETAP_NET_IFQ
, ETAP_TRACE_OFF
, "net_ifq" , STATIC
},
348 {ETAP_NET_KMSG
, ETAP_TRACE_OFF
, "net_kmsg" , STATIC
},
349 {ETAP_NET_MBUF
, ETAP_TRACE_OFF
, "net_mbuf" , STATIC
},/**/
350 {ETAP_NET_POOL
, ETAP_TRACE_OFF
, "net_pool" , STATIC
},
351 {ETAP_NET_Q
, ETAP_TRACE_OFF
, "net_q" , STATIC
},
352 {ETAP_NET_QFREE
, ETAP_TRACE_OFF
, "net_qfree" , STATIC
},
353 {ETAP_NET_RCV
, ETAP_TRACE_OFF
, "net_rcv" , STATIC
},
354 {ETAP_NET_RCV_PLIST
, ETAP_TRACE_OFF
, "net_rcv_plist" , STATIC
},/**/
355 {ETAP_NET_THREAD
, ETAP_TRACE_OFF
, "net_thread" , STATIC
},
357 {ETAP_NORMA_XMM
, ETAP_TRACE_OFF
, "norma_xmm" , STATIC
},
358 {ETAP_NORMA_XMMOBJ
, ETAP_TRACE_OFF
, "norma_xmmobj" , STATIC
},
359 {ETAP_NORMA_XMMCACHE
, ETAP_TRACE_OFF
, "norma_xmmcache" , STATIC
},
360 {ETAP_NORMA_MP
, ETAP_TRACE_OFF
, "norma_mp" , STATIC
},
361 {ETAP_NORMA_VOR
, ETAP_TRACE_OFF
, "norma_vor" , STATIC
},/**/
362 {ETAP_NORMA_TASK
, ETAP_TRACE_OFF
, "norma_task" , 38},/**/
364 {ETAP_DIPC_CLEANUP
, ETAP_TRACE_OFF
, "dipc_cleanup" , STATIC
},/**/
365 {ETAP_DIPC_MSG_PROG
, ETAP_TRACE_OFF
, "dipc_msgp_prog" , STATIC
},/**/
366 {ETAP_DIPC_PREP_QUEUE
, ETAP_TRACE_OFF
, "dipc_prep_queue" , STATIC
},/**/
367 {ETAP_DIPC_PREP_FILL
, ETAP_TRACE_OFF
, "dipc_prep_fill" , STATIC
},/**/
368 {ETAP_DIPC_MIGRATE
, ETAP_TRACE_OFF
, "dipc_migrate" , STATIC
},/**/
369 {ETAP_DIPC_DELIVER
, ETAP_TRACE_OFF
, "dipc_deliver" , STATIC
},/**/
370 {ETAP_DIPC_RECV_SYNC
, ETAP_TRACE_OFF
, "dipc_recv_sync" , STATIC
},/**/
371 {ETAP_DIPC_RPC
, ETAP_TRACE_OFF
, "dipc_rpc" , STATIC
},/**/
372 {ETAP_DIPC_MSG_REQ
, ETAP_TRACE_OFF
, "dipc_msg_req" , STATIC
},/**/
373 {ETAP_DIPC_MSG_ORDER
, ETAP_TRACE_OFF
, "dipc_msg_order" , STATIC
},/**/
374 {ETAP_DIPC_MSG_PREPQ
, ETAP_TRACE_OFF
, "dipc_msg_prepq" , STATIC
},/**/
375 {ETAP_DIPC_MSG_FREE
, ETAP_TRACE_OFF
, "dipc_msg_free" , STATIC
},/**/
376 {ETAP_DIPC_KMSG_AST
, ETAP_TRACE_OFF
, "dipc_kmsg_ast" , STATIC
},/**/
377 {ETAP_DIPC_TEST_LOCK
, ETAP_TRACE_OFF
, "dipc_test_lock" , STATIC
},/**/
378 {ETAP_DIPC_SPINLOCK
, ETAP_TRACE_OFF
, "dipc_spinlock" , STATIC
},/**/
379 {ETAP_DIPC_TRACE
, ETAP_TRACE_OFF
, "dipc_trace" , STATIC
},/**/
380 {ETAP_DIPC_REQ_CALLBACK
, ETAP_TRACE_OFF
, "dipc_req_clbck" , STATIC
},/**/
381 {ETAP_DIPC_PORT_NAME
, ETAP_TRACE_OFF
, "dipc_port_name" , STATIC
},/**/
382 {ETAP_DIPC_RESTART_PORT
, ETAP_TRACE_OFF
, "dipc_restart_port", STATIC
},/**/
383 {ETAP_DIPC_ZERO_PAGE
, ETAP_TRACE_OFF
, "dipc_zero_page" , STATIC
},/**/
384 {ETAP_DIPC_BLOCKED_NODE
, ETAP_TRACE_OFF
, "dipc_blocked_node", STATIC
},/**/
385 {ETAP_DIPC_TIMER
, ETAP_TRACE_OFF
, "dipc_timer" , STATIC
},/**/
386 {ETAP_DIPC_SPECIAL_PORT
, ETAP_TRACE_OFF
, "dipc_special_port", STATIC
},/**/
388 {ETAP_KKT_TEST_WORK
, ETAP_TRACE_OFF
, "kkt_test_work" , STATIC
},/**/
389 {ETAP_KKT_TEST_MP
, ETAP_TRACE_OFF
, "kkt_work_mp" , STATIC
},/**/
390 {ETAP_KKT_NODE
, ETAP_TRACE_OFF
, "kkt_node" , STATIC
},/**/
391 {ETAP_KKT_CHANNEL_LIST
, ETAP_TRACE_OFF
, "kkt_channel_list" , STATIC
},/**/
392 {ETAP_KKT_CHANNEL
, ETAP_TRACE_OFF
, "kkt_channel" , STATIC
},/**/
393 {ETAP_KKT_HANDLE
, ETAP_TRACE_OFF
, "kkt_handle" , STATIC
},/**/
394 {ETAP_KKT_MAP
, ETAP_TRACE_OFF
, "kkt_map" , STATIC
},/**/
395 {ETAP_KKT_RESOURCE
, ETAP_TRACE_OFF
, "kkt_resource" , STATIC
},/**/
397 {ETAP_XKERNEL_MASTER
, ETAP_TRACE_OFF
, "xkernel_master" , STATIC
},/**/
398 {ETAP_XKERNEL_EVENT
, ETAP_TRACE_OFF
, "xkernel_event" , STATIC
},/**/
399 {ETAP_XKERNEL_ETHINPUT
, ETAP_TRACE_OFF
, "xkernel_input" , STATIC
},/**/
401 {ETAP_MISC_AST
, ETAP_TRACE_OFF
, "m_ast" , STATIC
},
402 {ETAP_MISC_CLOCK
, ETAP_TRACE_OFF
, "m_clock" , STATIC
},
403 {ETAP_MISC_EMULATE
, ETAP_TRACE_OFF
, "m_emulate" , 34},
404 {ETAP_MISC_EVENT
, ETAP_TRACE_OFF
, "m_event" , STATIC
},
405 {ETAP_MISC_KDB
, ETAP_TRACE_OFF
, "m_kdb" , STATIC
},
406 {ETAP_MISC_PCB
, ETAP_TRACE_OFF
, "m_pcb" , 35},
407 {ETAP_MISC_PRINTF
, ETAP_TRACE_OFF
, "m_printf" , STATIC
},
408 {ETAP_MISC_Q
, ETAP_TRACE_OFF
, "m_q" , STATIC
},
409 {ETAP_MISC_RPC_SUBSYS
, ETAP_TRACE_OFF
, "m_rpc_sub" , 36},
410 {ETAP_MISC_RT_CLOCK
, ETAP_TRACE_OFF
, "m_rt_clock" , STATIC
},
411 {ETAP_MISC_SD_POOL
, ETAP_TRACE_OFF
, "m_sd_pool" , STATIC
},
412 {ETAP_MISC_TIMER
, ETAP_TRACE_OFF
, "m_timer" , STATIC
},
413 {ETAP_MISC_UTIME
, ETAP_TRACE_OFF
, "m_utime" , STATIC
},
414 {ETAP_MISC_XPR
, ETAP_TRACE_OFF
, "m_xpr" , STATIC
},
415 {ETAP_MISC_ZONE
, ETAP_TRACE_OFF
, "m_zone" , 37},
416 {ETAP_MISC_ZONE_ALL
, ETAP_TRACE_OFF
, "m_zone_all" , STATIC
},
417 {ETAP_MISC_ZONE_GET
, ETAP_TRACE_OFF
, "m_zone_get" , STATIC
},
418 {ETAP_MISC_ZONE_PTABLE
, ETAP_TRACE_OFF
, "m_zone_ptable" , STATIC
},/**/
419 {ETAP_MISC_LEDGER
, ETAP_TRACE_OFF
, "m_ledger" , STATIC
},/**/
420 {ETAP_MISC_SCSIT_TGT
, ETAP_TRACE_OFF
, "m_scsit_tgt_lock" , STATIC
},/**/
421 {ETAP_MISC_SCSIT_SELF
, ETAP_TRACE_OFF
, "m_scsit_self_lock", STATIC
},/**/
422 {ETAP_MISC_SPL
, ETAP_TRACE_OFF
, "m_spl_lock" , STATIC
},/**/
423 {ETAP_MISC_MASTER
, ETAP_TRACE_OFF
, "m_master" , STATIC
},/**/
424 {ETAP_MISC_FLOAT
, ETAP_TRACE_OFF
, "m_float" , STATIC
},/**/
425 {ETAP_MISC_GROUP
, ETAP_TRACE_OFF
, "m_group" , STATIC
},/**/
426 {ETAP_MISC_FLIPC
, ETAP_TRACE_OFF
, "m_flipc" , STATIC
},/**/
427 {ETAP_MISC_MP_IO
, ETAP_TRACE_OFF
, "m_mp_io" , STATIC
},/**/
428 {ETAP_MISC_KERNEL_TEST
, ETAP_TRACE_OFF
, "m_kernel_test" , STATIC
},/**/
430 {ETAP_NO_TRACE
, ETAP_TRACE_OFF
, "NEVER_TRACE" , STATIC
},
431 #endif /* ETAP_LOCK_TRACE */
435 * Variable initially pointing to the event table, then to its mappable
436 * copy. The cast is needed to discard the `const' qualifier; without it
437 * gcc issues a warning.
439 event_table_t event_table
= (event_table_t
) event_table_init
;
442 * Linked list of pointers into event_table_init[] so they can be switched
443 * into the mappable copy when it is made.
445 struct event_table_chain
*event_table_chain
;
448 * max number of event types in the event table
451 int event_table_max
= sizeof(event_table_init
)/sizeof(struct event_table_entry
);
453 const struct subs_table_entry subs_table_init
[] =
455 /*------------------------------------------*
456 * ETAP SUBSYSTEM TEXT NAME *
457 *------------------------------------------*/
459 #if ETAP_EVENT_MONITOR
460 {ETAP_SUBS_PROBE
, "event_probes" },
461 #endif /* ETAP_EVENT_MONITOR */
464 {ETAP_SUBS_LOCK_DIPC
, "lock_dipc" },
465 {ETAP_SUBS_LOCK_IO
, "lock_io" },
466 {ETAP_SUBS_LOCK_IPC
, "lock_ipc" },
467 {ETAP_SUBS_LOCK_KKT
, "lock_kkt" },
468 {ETAP_SUBS_LOCK_MISC
, "lock_misc" },
469 {ETAP_SUBS_LOCK_NET
, "lock_net" },
470 {ETAP_SUBS_LOCK_NORMA
, "lock_norma" },
471 {ETAP_SUBS_LOCK_THREAD
, "lock_thread" },
472 {ETAP_SUBS_LOCK_VM
, "lock_vm" },
473 {ETAP_SUBS_LOCK_XKERNEL
, "lock_xkernel" },
474 #endif /* ETAP_LOCK_TRACE */
478 * Variable initially pointing to the subsystem table, then to its mappable
481 subs_table_t subs_table
= (subs_table_t
) subs_table_init
;
484 * max number of subsystem types in the subsystem table
487 int subs_table_max
= sizeof(subs_table_init
)/sizeof(struct subs_table_entry
);
490 #define MAX_NAME_SIZE 35
492 #define SYS_TABLE_MACH_TRAP 0
493 #define SYS_TABLE_MACH_MESSAGE 1
494 #define SYS_TABLE_UNIX_SYSCALL 2
495 #define SYS_TABLE_INTERRUPT 3
496 #define SYS_TABLE_EXCEPTION 4
499 extern char *system_table_lookup (unsigned int table
,
500 unsigned int number
);
503 char *mach_trap_names
[] = {
530 /* 26 */ "mach_reply_port",
531 /* 27 */ "mach_thread_self",
532 /* 28 */ "mach_task_self",
533 /* 29 */ "mach_host_self",
534 /* 30 */ "vm_read_overwrite",
536 /* 32 */ "mach_msg_overwrite_trap",
540 /* 35 */ "mach_rpc_trap",
541 /* 36 */ "mach_rpc_return_trap",
550 /* 41 */ "init_process",
568 /* 59 */ "swtch_pri",
570 /* 61 */ "thread_switch",
571 /* 62 */ "clock_sleep_trap",
620 #define N_MACH_TRAP_NAMES (sizeof mach_trap_names / sizeof mach_trap_names[0])
621 #define mach_trap_name(nu) \
622 (((nu) < N_MACH_TRAP_NAMES) ? mach_trap_names[nu] : NULL)
625 char name
[MAX_NAME_SIZE
];
632 * Note: Most mach system calls are actually implemented as messages.
634 struct table_entry mach_message_table
[] = {
635 subsystem_to_name_map_bootstrap
,
636 subsystem_to_name_map_clock
,
637 subsystem_to_name_map_clock_reply
,
638 subsystem_to_name_map_default_pager_object
,
639 subsystem_to_name_map_device
,
640 subsystem_to_name_map_device_reply
,
641 subsystem_to_name_map_device_request
,
642 subsystem_to_name_map_exc
,
643 /* subsystem_to_name_map_mach,*/
644 subsystem_to_name_map_mach_debug
,
645 /* subsystem_to_name_map_mach_host,*/
646 subsystem_to_name_map_mach_norma
,
647 subsystem_to_name_map_mach_port
,
648 subsystem_to_name_map_memory_object
,
649 subsystem_to_name_map_memory_object_default
,
650 subsystem_to_name_map_notify
,
651 subsystem_to_name_map_prof
,
652 subsystem_to_name_map_sync
655 int mach_message_table_entries
= sizeof(mach_message_table
) /
656 sizeof(struct table_entry
);
662 * ================================
663 * Initialization routines for ETAP
664 * ================================
668 * ROUTINE: etap_init_phase1 [internal]
670 * FUNCTION: Event trace instrumentation initialization phase
671 * one of two. The static phase. The cumulative buffer
674 * NOTES: The cumulative buffer is statically allocated and
675 * must be initialized before the first simple_lock_init()
676 * or lock_init() call is made.
678 * The first lock init call is made before dynamic allocation
679 * is available. Hence, phase one is executed before dynamic
680 * memory allocation is available.
685 etap_init_phase1(void)
687 #if ETAP_LOCK_ACCUMULATE || MACH_ASSERT
690 boolean_t out_of_order
;
691 #endif /* MACH_ASSERT */
692 #endif /* ETAP_LOCK_ACCUMULATE || MACH_ASSERT */
694 #if ETAP_LOCK_ACCUMULATE
696 * Initialize Cumulative Buffer
698 * Note: The cumulative buffer is statically allocated.
699 * This static allocation is necessary since most
700 * of the lock_init calls are made before dynamic
701 * allocation routines are available.
705 * Align cumulative buffer pointer to a page boundary
706 * (so it can be maped).
709 bzero(&cbuff_allocated
[0], CBUFF_ALLOCATED_SIZE
);
710 cbuff
= (cumulative_buffer_t
) round_page(&cbuff_allocated
);
712 simple_lock_init(&cbuff_lock
, ETAP_NO_TRACE
);
715 * Set the starting point for cumulative buffer entry
718 * This value must leave enough head room in the
719 * cumulative buffer to contain all dynamic events.
722 for (x
=0; x
< event_table_max
; x
++)
723 if (event_table
[x
].dynamic
> cbuff
->static_start
)
724 cbuff
->static_start
= event_table
[x
].dynamic
;
726 cbuff
->next
= cbuff
->static_start
;
727 #endif /* ETAP_LOCK_ACCUMULATE */
730 * Initialize the event table lock
733 simple_lock_init(&event_table_lock
, ETAP_NO_TRACE
);
737 * Check that events are in numerical order so we can do a binary
738 * search on them. Even better would be to make event numbers be
739 * simple contiguous indexes into event_table[], but that would
740 * break the coding of subsystems in the event number.
742 out_of_order
= FALSE
;
743 for (x
= 1; x
< event_table_max
; x
++) {
744 if (event_table
[x
- 1].event
> event_table
[x
].event
) {
745 printf("events out of order: %s > %s\n",
746 event_table
[x
- 1].name
, event_table
[x
].name
);
751 panic("etap_init_phase1");
752 #endif /* MACH_ASSERT */
757 * ROUTINE: etap_init_phase2 [internal]
759 * FUNCTION: Event trace instrumentation initialization phase
760 * two of two. The dynamic phase. The monitored buffers
761 * are dynamically allocated and initialized. Cumulative
762 * dynamic entry locks are allocated and initialized. The
763 * start_data_pool is initialized.
765 * NOTES: Phase two is executed once dynamic memory allocation
771 etap_init_phase2(void)
776 vm_offset_t table_copy
;
777 struct event_table_chain
*chainp
;
780 * Make mappable copies of the event_table and the subs_table.
781 * These tables were originally mapped as they appear in the
782 * kernel image, but that meant that other kernel variables could
783 * end up being mapped with them, which is ugly. It also didn't
784 * work on the HP/PA, where pages with physical address == virtual
785 * do not have real pmap entries allocated and therefore can't be
788 size
= sizeof event_table_init
+ sizeof subs_table_init
;
789 ret
= kmem_alloc(kernel_map
, &table_copy
, size
);
790 if (ret
!= KERN_SUCCESS
)
791 panic("ETAP: error allocating table copies");
792 event_table
= (event_table_t
) table_copy
;
793 subs_table
= (subs_table_t
) (table_copy
+ sizeof event_table_init
);
794 bcopy((char *) event_table_init
, (char *) event_table
,
795 sizeof event_table_init
);
796 bcopy((char *) subs_table_init
, (char *) subs_table
,
797 sizeof subs_table_init
);
799 /* Switch pointers from the old event_table to the new. */
800 for (chainp
= event_table_chain
; chainp
!= NULL
;
801 chainp
= chainp
->event_table_link
) {
802 x
= chainp
->event_tablep
- event_table_init
;
803 assert(x
< event_table_max
);
804 chainp
->event_tablep
= event_table
+ x
;
807 #if ETAP_LOCK_ACCUMULATE
810 * Because several dynamic locks can point to a single
811 * cumulative buffer entry, dynamic lock writes to the
812 * entry are synchronized.
814 * The spin locks are allocated here.
818 size
= sizeof(simple_lock_t
) * cbuff
->static_start
;
821 * Note: These locks are different from traditional spin locks.
822 * They are of type int instead of type simple_lock_t.
823 * We can reduce lock size this way, since no tracing will
824 * EVER be performed on these locks.
826 size
= sizeof(simple_lock_data_t
) * cbuff
->static_start
;
829 ret
= kmem_alloc(kernel_map
, (vm_offset_t
*) &cbuff_locks
, size
);
831 if (ret
!= KERN_SUCCESS
)
832 panic("ETAP: error allocating cumulative write locks");
835 for(x
= 0; x
< cbuff
->static_start
; ++x
) {
836 simple_lock_init(&cbuff_locks
[x
], ETAP_NO_TRACE
);
839 bzero((const char *) cbuff_locks
, size
);
842 #endif /* ETAP_LOCK_ACCUMULATE */
848 * monitor buffer allocation
851 size
= ((mbuff_entries
-1) * sizeof(struct mbuff_entry
)) +
852 sizeof(struct monitor_buffer
);
854 for (x
=0; x
< NCPUS
; x
++) {
855 ret
= kmem_alloc(kernel_map
,
856 (vm_offset_t
*) &mbuff
[x
],
859 if (ret
!= KERN_SUCCESS
)
860 panic ("ETAP: error allocating monitor buffer\n");
862 /* zero fill buffer */
863 bzero((char *) mbuff
[x
], size
);
866 #endif /* ETAP_MONITOR */
872 * Initialize the start_data_pool
875 init_start_data_pool();
877 #endif /* ETAP_LOCK_TRACE */
881 #if ETAP_LOCK_ACCUMULATE
884 * ROUTINE: etap_cbuff_reserve [internal]
886 * FUNCTION: The cumulative buffer operation which returns a pointer
887 * to a free entry in the cumulative buffer.
889 * NOTES: Disables interrupts.
894 etap_cbuff_reserve(event_table_t etp
)
900 /* see if type pointer is initialized */
901 if (etp
== EVENT_TABLE_NULL
|| etp
->event
== ETAP_NO_TRACE
)
902 return (CBUFF_ENTRY_NULL
);
904 /* check for DYNAMIC lock */
905 if (de
= etp
->dynamic
) {
906 if (de
<= cbuff
->static_start
)
907 return (&cbuff
->entry
[de
-1]);
909 printf("ETAP: dynamic lock index error [%lu]\n", de
);
910 return (CBUFF_ENTRY_NULL
);
914 cumulative_buffer_lock(s
);
916 /* if buffer is full, reservation requests fail */
917 if (cbuff
->next
>= ETAP_CBUFF_ENTRIES
) {
918 cumulative_buffer_unlock(s
);
919 return (CBUFF_ENTRY_NULL
);
922 avail
= &cbuff
->entry
[cbuff
->next
++];
924 cumulative_buffer_unlock(s
);
929 #endif /* ETAP_LOCK_ACCUMULATE */
932 * ROUTINE: etap_event_table_assign [internal]
934 * FUNCTION: Returns a pointer to the assigned event type table entry,
935 * using the event type as the index key.
940 etap_event_table_find(etap_event_t event
)
942 int last_before
, first_after
, try;
944 /* Binary search for the event number. last_before is the highest-
945 numbered element known to be <= the number we're looking for;
946 first_after is the lowest-numbered element known to be >. */
948 first_after
= event_table_max
;
949 while (last_before
< first_after
) {
950 try = (last_before
+ first_after
) >> 1;
951 if (event_table
[try].event
== event
)
952 return (&event_table
[try]);
953 else if (event_table
[try].event
< event
)
958 return EVENT_TABLE_NULL
;
962 etap_event_table_assign(struct event_table_chain
*chainp
, etap_event_t event
)
964 event_table_t event_tablep
;
966 event_tablep
= etap_event_table_find(event
);
967 if (event_tablep
== EVENT_TABLE_NULL
)
968 printf("\nETAP: event not found in event table: %x\n", event
);
970 if (event_table
== event_table_init
) {
971 chainp
->event_table_link
= event_table_chain
;
972 event_table_chain
= chainp
;
974 chainp
->event_tablep
= event_tablep
;
982 * MESSAGE: etap_get_info [exported]
984 * FUNCTION: provides the server with ETAP buffer configurations.
990 host_priv_t host_priv
,
993 vm_offset_t
*et_offset
,
994 vm_offset_t
*st_offset
,
1001 if (host_priv
== HOST_PRIV_NULL
)
1002 return KERN_INVALID_ARGUMENT
;
1005 *et_entries
= event_table_max
;
1006 *st_entries
= subs_table_max
;
1007 *et_offset
= (vm_offset_t
) ((char*) event_table
-
1008 trunc_page((char*) event_table
));
1009 *st_offset
= (vm_offset_t
) ((char*) subs_table
-
1010 trunc_page((char*) subs_table
));
1018 #if ETAP_LOCK_ACCUMULATE
1019 *cb_width
= cbuff_width
;
1020 #else /* ETAP_LOCK_ACCUMULATE */
1022 #endif /* ETAP_LOCK_ACCUMULATE */
1025 *mb_size
= ((mbuff_entries
-1) * sizeof(struct mbuff_entry
)) +
1026 sizeof(struct monitor_buffer
);
1027 *mb_entries
= mbuff_entries
;
1029 #else /* ETAP_MONITOR */
1033 #endif /* ETAP_MONITOR */
1035 return (KERN_SUCCESS
);
1039 * ROUTINE: etap_trace_event [exported]
1041 * FUNCTION: The etap_trace_event system call is the user's interface to
1042 * the ETAP kernel instrumentation.
1044 * This call allows the user to enable and disable tracing modes
1045 * on specific event types. The call also supports a reset option,
1046 * where the cumulative buffer data and all event type tracing
1047 * is reset to zero. When the reset option is used, a new
1048 * interval width can also be defined using the op parameter.
1054 unsigned short mode
,
1055 unsigned short type
,
1058 unsigned short args
[])
1061 event_table_t event_tablep
;
1064 unsigned short status_mask
;
1065 unsigned short *tmp_args
;
1068 * Initialize operation
1071 if (mode
== ETAP_RESET
) {
1072 etap_trace_reset(nargs
);
1073 return (KERN_SUCCESS
);
1076 status_mask
= mode
& type
;
1079 * Copy args array from user space to kernel space
1082 args_size
= nargs
* sizeof *args
;
1083 tmp_args
= (unsigned short *) kalloc(args_size
);
1085 if (tmp_args
== NULL
)
1086 return (KERN_NO_SPACE
);
1088 if (copyin((const char *) args
, (char *) tmp_args
, args_size
))
1089 return (KERN_INVALID_ADDRESS
);
1092 * Change appropriate status fields in the event table
1097 for (i
= 0; i
< nargs
; i
++) {
1098 if (tmp_args
[i
] != ETAP_NO_TRACE
) {
1099 event_tablep
= etap_event_table_find(tmp_args
[i
]);
1100 if (event_tablep
== EVENT_TABLE_NULL
)
1103 event_tablep
->status
|= status_mask
;
1105 event_tablep
->status
&= ~status_mask
;
1109 ret
= (i
< nargs
) ? KERN_INVALID_ARGUMENT
: KERN_SUCCESS
;
1111 event_table_unlock();
1113 kfree((vm_offset_t
) tmp_args
, args_size
);
1119 return (KERN_FAILURE
);
1128 * ROUTINE: etap_trace_reset [internal]
1130 * FUNCTION: Turns off all tracing and erases all the data accumulated
1131 * in the cumulative buffer. If the user defined a new
1132 * cumulative buffer interval width, it will be assigned here.
1136 etap_trace_reset(int new_interval
)
1143 * Wipe out trace fields in event table
1150 for (x
=0; x
< event_table_max
; x
++) {
1151 scan
->status
= ETAP_TRACE_OFF
;
1155 event_table_unlock();
1157 #if ETAP_LOCK_ACCUMULATE
1160 * Wipe out cumulative buffer statistical fields for all entries
1163 cumulative_buffer_lock(s
);
1165 for (x
=0; x
< ETAP_CBUFF_ENTRIES
; x
++) {
1166 bzero ((char *) &cbuff
->entry
[x
].hold
,
1167 sizeof(struct cbuff_data
));
1168 bzero ((char *) &cbuff
->entry
[x
].wait
,
1169 sizeof(struct cbuff_data
));
1170 bzero ((char *) &cbuff
->entry
[x
].hold_interval
[0],
1171 sizeof(unsigned long) * ETAP_CBUFF_IBUCKETS
);
1172 bzero ((char *) &cbuff
->entry
[x
].wait_interval
[0],
1173 sizeof(unsigned long) * ETAP_CBUFF_IBUCKETS
);
1177 * Assign interval width if the user defined a new one.
1180 if (new_interval
!= 0)
1181 cbuff_width
= new_interval
;
1183 cumulative_buffer_unlock(s
);
1185 #endif /* ETAP_LOCK_ACCUMULATE */
1191 * ROUTINE: etap_probe [exported]
1193 * FUNCTION: The etap_probe system call serves as a user-level probe,
1194 * allowing user-level code to store event data into
1195 * the monitored buffer(s).
1200 unsigned short event_type
,
1201 unsigned short event_id
,
1202 unsigned int data_size
, /* total size in bytes */
1208 mbuff_entry_t mbuff_entryp
;
1214 if (data_size
> ETAP_DATA_SIZE
)
1215 return (KERN_INVALID_ARGUMENT
);
1217 if (event_table
[event_type
].status
== ETAP_TRACE_OFF
||
1218 event_table
[event_type
].event
!= event_type
)
1219 return (KERN_NO_ACCESS
);
1221 mp_disable_preemption();
1225 free
= mbuff
[cpu
]->free
;
1226 mbuff_entryp
= &mbuff
[cpu
]->entry
[free
];
1229 * Load monitor buffer entry
1232 ETAP_TIMESTAMP(mbuff_entryp
->time
);
1233 mbuff_entryp
->event
= event_id
;
1234 mbuff_entryp
->flags
= USER_EVENT
;
1235 mbuff_entryp
->instance
= (u_int
) current_thread();
1236 mbuff_entryp
->pc
= 0;
1238 if (data
!= ETAP_DATA_NULL
)
1239 copyin((const char *) data
,
1240 (char *) mbuff_entryp
->data
,
1243 mbuff
[cpu
]->free
= (free
+1) % mbuff_entries
;
1245 if (mbuff
[cpu
]->free
== 0)
1246 mbuff
[cpu
]->timestamp
++;
1249 mp_enable_preemption();
1251 return (KERN_SUCCESS
);
1253 #else /* ETAP_MONITOR */
1254 return (KERN_FAILURE
);
1255 #endif /* ETAP_MONITOR */
1259 * ROUTINE: etap_trace_thread [exported]
1261 * FUNCTION: Toggles thread's ETAP trace status bit.
1266 thread_act_t thr_act
,
1267 boolean_t trace_status
)
1269 #if ETAP_EVENT_MONITOR
1272 boolean_t old_status
;
1273 etap_data_t probe_data
;
1276 if (thr_act
== THR_ACT_NULL
)
1277 return (KERN_INVALID_ARGUMENT
);
1279 thread
= act_lock_thread(thr_act
);
1281 if (thread
== THREAD_NULL
) {
1282 act_unlock_thread(thr_act
);
1283 return (KERN_INVALID_ARGUMENT
);
1287 thread_lock(thread
);
1289 old_status
= thread
->etap_trace
;
1290 thread
->etap_trace
= trace_status
;
1292 ETAP_DATA_LOAD(probe_data
[0],thr_act
->task
);
1293 ETAP_DATA_LOAD(probe_data
[1],thr_act
);
1294 ETAP_DATA_LOAD(probe_data
[2],thread
->sched_pri
);
1296 thread_unlock(thread
);
1299 act_unlock_thread(thr_act
);
1302 * Thread creation (ETAP_P_THREAD_LIFE: BEGIN) is ONLY recorded
1303 * here since a threads trace status is disabled by default.
1305 if (trace_status
== TRUE
&& old_status
== FALSE
) {
1306 ETAP_PROBE_DATA(ETAP_P_THREAD_LIFE
,
1314 * Thread termination is (falsely) recorded here if the trace
1315 * status has been disabled. This event is recorded to allow
1316 * users the option of tracing a portion of a threads execution.
1318 if (trace_status
== FALSE
&& old_status
== TRUE
) {
1319 ETAP_PROBE_DATA(ETAP_P_THREAD_LIFE
,
1326 return (KERN_SUCCESS
);
1328 #else /* ETAP_EVENT_MONITOR */
1329 return (KERN_FAILURE
);
1330 #endif /* ETAP_EVENT_MONITOR */
1334 * ROUTINE: etap_mon_reconfig [exported]
1336 * FUNCTION: Reallocates monitor buffers to hold specified number
1339 * NOTES: In multiprocessor (SMP) case, a lock needs to be added
1340 * here and in data collection macros to protect access
1345 host_priv_t host_priv
,
1348 #if ETAP_EVENT_MONITOR
1349 struct monitor_buffer
*nmbuff
[NCPUS
], *ombuff
[NCPUS
];
1350 int s
, size
, osize
, i
, ret
;
1352 if (host_priv
== HOST_PRIV_NULL
)
1353 return KERN_INVALID_ARGUMENT
;
1355 if (nentries
<= 0) /* must be at least 1 */
1356 return (KERN_FAILURE
);
1358 size
= ((nentries
-1) * sizeof(struct mbuff_entry
)) +
1359 sizeof(struct monitor_buffer
);
1361 for (i
= 0; i
< NCPUS
; ++i
) {
1362 ret
= kmem_alloc(kernel_map
,
1363 (vm_offset_t
*)&nmbuff
[i
],
1365 if (ret
!= KERN_SUCCESS
) {
1369 for (j
= 0; j
< i
; ++j
) {
1370 kmem_free(kernel_map
,
1371 (vm_offset_t
)nmbuff
[j
],
1377 bzero((char *) nmbuff
[i
], size
);
1379 osize
= ((mbuff_entries
-1) * sizeof (struct mbuff_entry
)) +
1380 sizeof (struct monitor_buffer
);
1384 for (i
= 0; i
< NCPUS
; ++i
) {
1385 ombuff
[i
] = mbuff
[i
];
1386 mbuff
[i
] = nmbuff
[i
];
1388 mbuff_entries
= nentries
;
1389 event_table_unlock();
1392 for (i
= 0; i
< NCPUS
; ++i
) {
1393 kmem_free(kernel_map
,
1394 (vm_offset_t
)ombuff
[i
],
1397 return (KERN_SUCCESS
);
1399 return (KERN_FAILURE
);
1400 #endif /* ETAP_MONITOR */
1404 * ROUTINE: etap_new_probe [exported]
1406 * FUNCTION: Reallocates monitor probe table, adding a new entry
1411 host_priv_t host_priv
,
1417 #if ETAP_EVENT_MONITOR
1418 event_table_t newtable
, oldtable
;
1419 unsigned short i
, nid
;
1421 vm_size_t newsize
= (event_table_max
+ 1) *
1422 sizeof (struct event_table_entry
);
1423 boolean_t duplicate_name
= FALSE
;
1426 if (host_priv
== HOST_PRIV_NULL
)
1427 return KERN_INVALID_ARGUMENT
;
1429 if (namlen
> EVENT_NAME_LENGTH
- 1)
1430 return (KERN_INVALID_ARGUMENT
);
1432 if ((ret
= kmem_alloc(kernel_map
, (vm_address_t
*)&newtable
,
1433 newsize
)) != KERN_SUCCESS
)
1436 bcopy((const char *)event_table
, (char *)newtable
, event_table_max
*
1437 sizeof (struct event_table_entry
));
1439 if (copyin((const char *)name
,
1440 (char *)&newtable
[event_table_max
].name
, namlen
))
1441 return (KERN_INVALID_ADDRESS
);
1443 newtable
[event_table_max
].name
[EVENT_NAME_LENGTH
- 1] = '\0';
1444 newtable
[event_table_max
].status
= trace_on
;
1445 newtable
[event_table_max
].dynamic
= 0;
1447 for (nid
= i
= 0; i
< event_table_max
; ++i
) {
1448 if (strcmp((char *)newtable
[event_table_max
].name
,
1449 newtable
[i
].name
) == 0) {
1450 duplicate_name
= TRUE
;
1451 printf("duplicate name\n");
1453 nid
= max(nid
, newtable
[i
].event
);
1457 if (nid
>= ETAP_NO_TRACE
|| duplicate_name
== TRUE
) {
1458 kmem_free(kernel_map
, (vm_address_t
)newtable
, newsize
);
1459 if (nid
>= ETAP_NO_TRACE
) {
1460 printf("KERN_RESOURCE_SHORTAGE\n");
1461 return (KERN_RESOURCE_SHORTAGE
);
1464 printf("KERN_NAME_EXISTS\n");
1465 return (KERN_NAME_EXISTS
);
1469 newtable
[event_table_max
].event
= nid
;
1473 oldtable
= event_table
;
1474 event_table
= newtable
;
1476 event_table_unlock();
1479 if (oldtable
!= event_table_init
)
1480 kmem_free(kernel_map
, (vm_address_t
)oldtable
,
1481 (event_table_max
- 1) *
1482 sizeof (struct event_table_entry
));
1484 *(unsigned short *)id
= nid
;
1486 return (KERN_SUCCESS
);
1488 return (KERN_FAILURE
);
1489 #endif /* ETAP_EVENT_MONITOR */
1493 * ETAP trap probe hooks
1497 etap_interrupt_probe(int interrupt
, int flag_setting
)
1501 if (flag_setting
== 1)
1506 ETAP_PROBE_DATA_COND(ETAP_P_INTERRUPT
,
1515 etap_machcall_probe1(int syscall
)
1517 ETAP_PROBE_DATA(ETAP_P_SYSCALL_MACH
,
1518 EVENT_BEGIN
| SYSCALL_TRAP
,
1525 etap_machcall_probe2(void)
1527 ETAP_PROBE_DATA(ETAP_P_SYSCALL_MACH
,
1528 EVENT_END
| SYSCALL_TRAP
,
1534 static void print_user_event(mbuff_entry_t
);
1535 static void print_kernel_event(mbuff_entry_t
, boolean_t
);
1536 static void print_lock_event(mbuff_entry_t
, const char *);
1539 void db_show_etap_log(db_expr_t
, boolean_t
, db_expr_t
, char *);
1542 * ROUTINE: etap_print [internal]
1544 * FUNCTION: print each mbuff table (for use in debugger)
1550 boolean_t have_addr
,
1555 int cpu
= cpu_number(), last
, i
, first
, step
, end
, restart
;
1556 boolean_t show_data
= FALSE
;
1558 last
= (mbuff
[cpu
]->free
- 1) % mbuff_entries
;
1560 if(db_option(modif
, 'r')) {
1564 restart
= mbuff_entries
- 1;
1568 end
= mbuff_entries
;
1572 if(db_option(modif
, 'd'))
1575 for(i
= first
; i
!= end
; i
+= step
) {
1576 if (mbuff
[cpu
]->entry
[i
].flags
& USER_EVENT
)
1577 print_user_event(&mbuff
[cpu
]->entry
[i
]);
1579 print_kernel_event(&mbuff
[cpu
]->entry
[i
], show_data
);
1581 for(i
= restart
; i
!= first
; i
+= step
) {
1582 if (mbuff
[cpu
]->entry
[i
].flags
& USER_EVENT
)
1583 print_user_event(&mbuff
[cpu
]->entry
[i
]);
1585 print_kernel_event(&mbuff
[cpu
]->entry
[i
], show_data
);
1588 printf("ETAP event monitor not configured\n");
1589 #endif /* ETAP_MONITOR */
1595 print_user_event(mbuff_entry_t record
)
1599 db_printf("%x: %x%08x: ", record
->instance
, record
->time
.tv_sec
,
1600 record
->time
.tv_nsec
);
1603 case ETAP_P_USER_EVENT0
: s
= "0"; break;
1604 case ETAP_P_USER_EVENT1
: s
= "1"; break;
1605 case ETAP_P_USER_EVENT2
: s
= "2"; break;
1606 case ETAP_P_USER_EVENT3
: s
= "3"; break;
1607 case ETAP_P_USER_EVENT4
: s
= "4"; break;
1608 case ETAP_P_USER_EVENT5
: s
= "5"; break;
1609 case ETAP_P_USER_EVENT6
: s
= "6"; break;
1610 case ETAP_P_USER_EVENT7
: s
= "7"; break;
1611 case ETAP_P_USER_EVENT8
: s
= "8"; break;
1612 case ETAP_P_USER_EVENT9
: s
= "9"; break;
1613 case ETAP_P_USER_EVENT10
: s
= "10"; break;
1614 case ETAP_P_USER_EVENT11
: s
= "11"; break;
1615 case ETAP_P_USER_EVENT12
: s
= "12"; break;
1616 case ETAP_P_USER_EVENT13
: s
= "13"; break;
1617 case ETAP_P_USER_EVENT14
: s
= "14"; break;
1618 case ETAP_P_USER_EVENT15
: s
= "15"; break;
1619 case ETAP_P_USER_EVENT16
: s
= "16"; break;
1620 case ETAP_P_USER_EVENT17
: s
= "17"; break;
1621 case ETAP_P_USER_EVENT18
: s
= "18"; break;
1622 case ETAP_P_USER_EVENT19
: s
= "19"; break;
1623 case ETAP_P_USER_EVENT20
: s
= "20"; break;
1624 case ETAP_P_USER_EVENT21
: s
= "21"; break;
1625 case ETAP_P_USER_EVENT22
: s
= "22"; break;
1626 case ETAP_P_USER_EVENT23
: s
= "23"; break;
1627 case ETAP_P_USER_EVENT24
: s
= "24"; break;
1628 case ETAP_P_USER_EVENT25
: s
= "25"; break;
1629 case ETAP_P_USER_EVENT26
: s
= "26"; break;
1630 case ETAP_P_USER_EVENT27
: s
= "27"; break;
1631 case ETAP_P_USER_EVENT28
: s
= "28"; break;
1632 case ETAP_P_USER_EVENT29
: s
= "29"; break;
1633 case ETAP_P_USER_EVENT30
: s
= "30"; break;
1634 case ETAP_P_USER_EVENT31
: s
= "31"; break;
1636 sprintf(buf
, "dynamic %x", record
->pc
);
1641 db_printf("user probe %s: [%x] data = %x %x %x %x\n",
1652 print_kernel_event(mbuff_entry_t record
, boolean_t data
)
1657 /* assume zero event means that record was never written to */
1658 if(record
->event
== 0)
1661 db_printf("%x: %x%08x: ", record
->instance
, record
->time
.tv_sec
,
1662 record
->time
.tv_nsec
);
1664 switch (record
->event
) {
1666 case ETAP_P_THREAD_LIFE
:
1667 if (record
->flags
& EVENT_BEGIN
)
1668 db_printf("thread created [T:%x A:%x] P:%d\n",
1673 db_printf("thread terminated [T:%x A:%x] P:%d\n",
1679 case ETAP_P_SYSCALL_MACH
:
1680 if (record
->flags
& SYSCALL_TRAP
)
1681 text_name
= system_table_lookup(SYS_TABLE_MACH_TRAP
,
1684 text_name
= system_table_lookup(SYS_TABLE_MACH_MESSAGE
,
1687 if (record
->flags
& EVENT_BEGIN
)
1688 db_printf("mach enter: %s [%x]\n",
1692 db_printf("mach exit :\n");
1695 case ETAP_P_SYSCALL_UNIX
:
1696 text_name
= system_table_lookup(SYS_TABLE_UNIX_SYSCALL
,
1699 if (record
->flags
& EVENT_BEGIN
)
1700 db_printf("unix enter: %s\n", text_name
);
1702 db_printf("unix exit : %s\n", text_name
);
1705 case ETAP_P_THREAD_CTX
:
1706 if (record
->flags
& EVENT_END
)
1707 db_printf("context switch to %x ",
1709 else /* EVENT_BEGIN */
1710 db_printf("context switch from %x ",
1713 switch (record
->data
[1]) {
1714 case BLOCKED_ON_SEMAPHORE
:
1715 db_printf("R: semaphore\n"); break;
1716 case BLOCKED_ON_LOCK
:
1717 db_printf("R: lock\n"); break;
1718 case BLOCKED_ON_MUTEX_LOCK
:
1719 db_printf("R: mutex lock\n"); break;
1720 case BLOCKED_ON_COMPLEX_LOCK
:
1721 db_printf("R: complex lock\n"); break;
1722 case BLOCKED_ON_PORT_RCV
:
1723 db_printf("R: port receive\n"); break;
1724 case BLOCKED_ON_REAPER_DONE
:
1725 db_printf("R: reaper thread done\n"); break;
1726 case BLOCKED_ON_IDLE_DONE
:
1727 db_printf("R: idle thread done\n"); break;
1728 case BLOCKED_ON_TERMINATION
:
1729 db_printf("R: termination\n"); break;
1731 if (record
->data
[2])
1732 db_printf("R: ast %x\n", record
->data
[2]);
1734 db_printf("R: undefined block\n");
1738 case ETAP_P_INTERRUPT
:
1739 if (record
->flags
& EVENT_BEGIN
) {
1740 text_name
= system_table_lookup(SYS_TABLE_INTERRUPT
,
1742 db_printf("intr enter: %s\n", text_name
);
1744 db_printf("intr exit\n");
1747 case ETAP_P_ACT_ABORT
:
1748 db_printf("activation abort [A %x : S %x]\n",
1754 case ETAP_P_PRIORITY
:
1755 db_printf("priority changed for %x N:%d O:%d\n",
1761 case ETAP_P_EXCEPTION
:
1762 text_name
= system_table_lookup(SYS_TABLE_EXCEPTION
,
1764 db_printf("exception: %s\n", text_name
);
1767 case ETAP_P_DEPRESSION
:
1768 if (record
->flags
& EVENT_BEGIN
)
1769 db_printf("priority depressed\n");
1771 if (record
->data
[0] == 0)
1772 db_printf("priority undepressed : timed out\n");
1774 db_printf("priority undepressed : self inflicted\n");
1779 db_printf("flags: %x data: %x %x %x %x\n", record
->flags
,
1780 record
->data
[0], record
->data
[1], record
->data
[2],
1785 printf("flags: %x rtc: %x %09x dtime: %x %09x\n",
1786 record
->flags
, record
->data
[0], record
->data
[1],
1787 record
->data
[2], record
->data
[3]);
1791 for(i
= 0; event_table_init
[i
].event
!= ETAP_NO_TRACE
; ++i
)
1792 if(record
->event
== event_table_init
[i
].event
) {
1793 print_lock_event(record
, event_table_init
[i
].name
);
1796 db_printf("Unknown event: %d\n", record
->event
);
1800 db_printf(" Data: %08x %08x %08x %08x\n", record
->data
[0],
1801 record
->data
[1], record
->data
[2], record
->data
[3]);
1804 void print_lock_event(mbuff_entry_t record
, const char *name
)
1807 db_addr_t offset1
, offset2
;
1809 db_find_sym_and_offset(record
->data
[0], &sym1
, &offset1
);
1811 db_printf("%15s", name
);
1812 if (record
->flags
& SPIN_LOCK
)
1814 else if (record
->flags
& READ_LOCK
)
1816 else if (record
->flags
& WRITE_LOCK
)
1821 if (record
->flags
& ETAP_CONTENTION
) {
1822 db_printf("wait lock %s+%x\n",
1825 else if (record
->flags
& ETAP_DURATION
) {
1826 db_find_sym_and_offset(record
->data
[1], &sym2
, &offset2
);
1827 db_printf("lock %x+%x unlock %x+%x\n",
1828 sym1
, offset1
, sym2
, offset2
);
1830 db_printf("illegal op: neither HOLD or WAIT are specified\n");
1836 system_table_lookup(unsigned int table
, unsigned int number
)
1840 unsigned int offset
;
1843 case SYS_TABLE_MACH_TRAP
:
1844 name
= mach_trap_name(number
>> 4);
1846 case SYS_TABLE_MACH_MESSAGE
:
1847 for (x
=0; x
< mach_message_table_entries
; x
++) {
1848 if (mach_message_table
[x
].number
== number
) {
1849 name
= mach_message_table
[x
].name
;
1854 case SYS_TABLE_UNIX_SYSCALL
:
1856 name
= syscall_name(number
);
1858 case SYS_TABLE_INTERRUPT
:
1859 db_find_sym_and_offset((int)ivect
[number
], &name
, &offset
);
1861 case SYS_TABLE_EXCEPTION
:
1862 name
= exception_name(number
);
1865 return (name
!= NULL
) ? name
: "undefined";
1868 #endif /* MACH_KDB */
1869 #endif /* ETAP_MONITOR */