2  * Copyright (c) 2000-2013 Apple Inc. All rights reserved. 
   4  * @Apple_LICENSE_HEADER_START@ 
   6  * The contents of this file constitute Original Code as defined in and 
   7  * are subject to the Apple Public Source License Version 1.1 (the 
   8  * "License").  You may not use this file except in compliance with the 
   9  * License.  Please obtain a copy of the License at 
  10  * http://www.apple.com/publicsource and read it before using this file. 
  12  * This Original Code and all software distributed under the License are 
  13  * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
  14  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
  15  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
  16  * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the 
  17  * License for the specific language governing rights and limitations 
  20  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 
  24 #include <machine/spl.h> 
  26 #include <sys/errno.h> 
  27 #include <sys/param.h> 
  28 #include <sys/systm.h> 
  29 #include <sys/proc_internal.h> 
  31 #include <sys/sysctl.h> 
  32 #include <sys/kdebug.h> 
  33 #include <sys/sysproto.h> 
  34 #include <sys/bsdtask_info.h> 
  35 #include <sys/random.h> 
  38 #include <mach/clock_types.h> 
  39 #include <mach/mach_types.h> 
  40 #include <mach/mach_time.h> 
  41 #include <machine/machine_routines.h> 
  43 #if defined(__i386__) || defined(__x86_64__) 
  44 #include <i386/rtclock_protos.h> 
  46 #include <i386/machine_routines.h> 
  49 #include <kern/clock.h> 
  51 #include <kern/thread.h> 
  52 #include <kern/task.h> 
  53 #include <kern/debug.h> 
  54 #include <kern/kalloc.h> 
  55 #include <kern/cpu_data.h> 
  56 #include <kern/assert.h> 
  57 #include <kern/telemetry.h> 
  58 #include <vm/vm_kern.h> 
  61 #include <sys/malloc.h> 
  62 #include <sys/mcache.h> 
  63 #include <sys/kauth.h> 
  65 #include <sys/vnode.h> 
  66 #include <sys/vnode_internal.h> 
  67 #include <sys/fcntl.h> 
  68 #include <sys/file_internal.h> 
  70 #include <sys/param.h>                  /* for isset() */ 
  72 #include <mach/mach_host.h>             /* for host_info() */ 
  73 #include <libkern/OSAtomic.h> 
  75 #include <machine/pal_routines.h> 
  80  * https://coreoswiki.apple.com/wiki/pages/U6z3i0q9/Consistent_Logging_Implementers_Guide.html 
  82  * IOP(s) are auxiliary cores that want to participate in kdebug event logging. 
  83  * They are registered dynamically. Each is assigned a cpu_id at registration. 
  85  * NOTE: IOP trace events may not use the same clock hardware as "normal" 
  86  * cpus. There is an effort made to synchronize the IOP timebase with the 
  87  * AP, but it should be understood that there may be discrepancies. 
  89  * Once registered, an IOP is permanent, it cannot be unloaded/unregistered. 
  90  * The current implementation depends on this for thread safety. 
  92  * New registrations occur by allocating an kd_iop struct and assigning 
  93  * a provisional cpu_id of list_head->cpu_id + 1. Then a CAS to claim the 
  94  * list_head pointer resolves any races. 
  96  * You may safely walk the kd_iops list at any time, without holding locks. 
  98  * When allocating buffers, the current kd_iops head is captured. Any operations 
  99  * that depend on the buffer state (such as flushing IOP traces on reads, 
 100  * etc.) should use the captured list head. This will allow registrations to 
 101  * take place while trace is in use. 
 104 typedef struct kd_iop 
{ 
 105         kd_callback_t   callback
; 
 107         uint64_t        last_timestamp
; /* Prevent timer rollback */ 
 111 static kd_iop_t
* kd_iops 
= NULL
; 
 113 /* XXX should have prototypes, but Mach does not provide one */ 
 114 void task_act_iterate_wth_args(task_t
, void(*)(thread_t
, void *), void *); 
 115 int cpu_number(void);   /* XXX <machine/...> include path broken */ 
 117 /* XXX should probably be static, but it's debugging code... */ 
 118 int kdbg_read(user_addr_t
, size_t *, vnode_t
, vfs_context_t
); 
 119 void kdbg_control_chud(int, void *); 
 120 int kdbg_control(int *, u_int
, user_addr_t
, size_t *); 
 121 int kdbg_readcpumap(user_addr_t
, size_t *); 
 122 int kdbg_readcurcpumap(user_addr_t
, size_t *); 
 123 int kdbg_readthrmap(user_addr_t
, size_t *, vnode_t
, vfs_context_t
); 
 124 int kdbg_readcurthrmap(user_addr_t
, size_t *); 
 125 int kdbg_getreg(kd_regtype 
*); 
 126 int kdbg_setreg(kd_regtype 
*); 
 127 int kdbg_setrtcdec(kd_regtype 
*); 
 128 int kdbg_setpidex(kd_regtype 
*); 
 129 int kdbg_setpid(kd_regtype 
*); 
 130 void kdbg_thrmap_init(void); 
 131 int kdbg_reinit(boolean_t
); 
 132 int kdbg_bootstrap(boolean_t
); 
 134 int kdbg_cpumap_init_internal(kd_iop_t
* iops
, uint32_t cpu_count
, uint8_t** cpumap
, uint32_t* cpumap_size
); 
 135 kd_threadmap
* kdbg_thrmap_init_internal(unsigned int count
, unsigned int *mapsize
, unsigned int *mapcount
); 
 137 static int kdbg_enable_typefilter(void); 
 138 static int kdbg_disable_typefilter(void); 
 140 static int create_buffers(boolean_t
); 
 141 static void delete_buffers(void); 
 143 extern void IOSleep(int); 
 145 /* trace enable status */ 
 146 unsigned int kdebug_enable 
= 0; 
 148 /* A static buffer to record events prior to the start of regular logging */ 
 149 #define KD_EARLY_BUFFER_MAX      64 
 150 static kd_buf           kd_early_buffer
[KD_EARLY_BUFFER_MAX
]; 
 151 static int              kd_early_index 
= 0; 
 152 static boolean_t        kd_early_overflow 
= FALSE
; 
 154 #define SLOW_NOLOG      0x01 
 155 #define SLOW_CHECKS     0x02 
 156 #define SLOW_ENTROPY    0x04                    /* Obsolescent */ 
 157 #define SLOW_CHUD       0x08 
 159 #define EVENTS_PER_STORAGE_UNIT         2048 
 160 #define MIN_STORAGE_UNITS_PER_CPU       4 
 162 #define POINTER_FROM_KDS_PTR(x) (&kd_bufs[x.buffer_index].kdsb_addr[x.offset]) 
 166                 uint32_t buffer_index
:21; 
 173         union   kds_ptr kds_next
; 
 174         uint32_t kds_bufindx
; 
 176         uint32_t kds_readlast
; 
 177         boolean_t kds_lostevents
; 
 178         uint64_t  kds_timestamp
; 
 180         kd_buf  kds_records
[EVENTS_PER_STORAGE_UNIT
]; 
 183 #define MAX_BUFFER_SIZE                 (1024 * 1024 * 128) 
 184 #define N_STORAGE_UNITS_PER_BUFFER      (MAX_BUFFER_SIZE / sizeof(struct kd_storage)) 
 186 struct kd_storage_buffers 
{ 
 187         struct  kd_storage      
*kdsb_addr
; 
 191 #define KDS_PTR_NULL 0xffffffff 
 192 struct kd_storage_buffers 
*kd_bufs 
= NULL
; 
 193 int     n_storage_units 
= 0; 
 194 int     n_storage_buffers 
= 0; 
 195 int     n_storage_threshold 
= 0; 
 200         union  kds_ptr kd_list_head
; 
 201         union  kds_ptr kd_list_tail
; 
 202         boolean_t kd_lostevents
; 
 204         uint64_t kd_prev_timebase
; 
 206 } __attribute__(( aligned(MAX_CPU_CACHE_LINE_SIZE
) )); 
 208 struct kd_ctrl_page_t 
{ 
 209         union kds_ptr kds_free_list
; 
 213         uint32_t kdebug_flags
; 
 214         uint32_t kdebug_slowcheck
; 
 216          * The number of kd_bufinfo structs allocated may not match the current 
 217          * number of active cpus. We capture the iops list head at initialization 
 218          * which we could use to calculate the number of cpus we allocated data for, 
 219          * unless it happens to be null. To avoid that case, we explicitly also 
 220          * capture a cpu count. 
 222         kd_iop_t
* kdebug_iops
; 
 223         uint32_t kdebug_cpus
; 
 224 } kd_ctrl_page 
= { .kds_free_list 
= {.raw 
= KDS_PTR_NULL
}, .kdebug_slowcheck 
= SLOW_NOLOG 
}; 
 228 struct kd_bufinfo 
*kdbip 
= NULL
; 
 230 #define KDCOPYBUF_COUNT 8192 
 231 #define KDCOPYBUF_SIZE  (KDCOPYBUF_COUNT * sizeof(kd_buf)) 
 232 kd_buf 
*kdcopybuf 
= NULL
; 
 234 boolean_t kdlog_bg_trace 
= FALSE
; 
 235 boolean_t kdlog_bg_trace_running 
= FALSE
; 
 236 unsigned int bg_nkdbufs 
= 0; 
 238 unsigned int nkdbufs 
= 0; 
 239 unsigned int kdlog_beg
=0; 
 240 unsigned int kdlog_end
=0; 
 241 unsigned int kdlog_value1
=0; 
 242 unsigned int kdlog_value2
=0; 
 243 unsigned int kdlog_value3
=0; 
 244 unsigned int kdlog_value4
=0; 
 246 static lck_spin_t 
* kdw_spin_lock
; 
 247 static lck_spin_t 
* kds_spin_lock
; 
 248 static lck_mtx_t  
* kd_trace_mtx_sysctl
; 
 249 static lck_grp_t  
* kd_trace_mtx_sysctl_grp
; 
 250 static lck_attr_t 
* kd_trace_mtx_sysctl_attr
; 
 251 static lck_grp_attr_t   
*kd_trace_mtx_sysctl_grp_attr
; 
 253 static lck_grp_t       
*stackshot_subsys_lck_grp
; 
 254 static lck_grp_attr_t  
*stackshot_subsys_lck_grp_attr
; 
 255 static lck_attr_t      
*stackshot_subsys_lck_attr
; 
 256 static lck_mtx_t        stackshot_subsys_mutex
; 
 258 void *stackshot_snapbuf 
= NULL
; 
 261 stack_snapshot2(pid_t pid
, user_addr_t tracebuf
, uint32_t tracebuf_size
, uint32_t flags
, uint32_t dispatch_offset
, int32_t *retval
); 
 264 stack_snapshot_from_kernel(pid_t pid
, void *buf
, uint32_t size
, uint32_t flags
, unsigned *bytesTraced
); 
 266 kdp_snapshot_preflight(int pid
, void  *tracebuf
, uint32_t tracebuf_size
, uint32_t flags
, uint32_t dispatch_offset
); 
 269 kdp_stack_snapshot_geterror(void); 
 271 kdp_stack_snapshot_bytes_traced(void); 
 273 kd_threadmap 
*kd_mapptr 
= 0; 
 274 unsigned int kd_mapsize 
= 0; 
 275 unsigned int kd_mapcount 
= 0; 
 277 off_t   RAW_file_offset 
= 0; 
 278 int     RAW_file_written 
= 0; 
 280 #define RAW_FLUSH_SIZE  (2 * 1024 * 1024) 
 282 pid_t global_state_pid 
= -1;       /* Used to control exclusive use of kd_buffer */ 
 284 #define DBG_FUNC_MASK   0xfffffffc 
 286 /*  TODO: move to kdebug.h */ 
 287 #define CLASS_MASK      0xff000000 
 288 #define CLASS_OFFSET    24 
 289 #define SUBCLASS_MASK   0x00ff0000 
 290 #define SUBCLASS_OFFSET 16 
 291 #define CSC_MASK        0xffff0000      /*  class and subclass mask */ 
 292 #define CSC_OFFSET      SUBCLASS_OFFSET 
 294 #define EXTRACT_CLASS(debugid)          ( (uint8_t) ( ((debugid) & CLASS_MASK   ) >> CLASS_OFFSET    ) ) 
 295 #define EXTRACT_SUBCLASS(debugid)       ( (uint8_t) ( ((debugid) & SUBCLASS_MASK) >> SUBCLASS_OFFSET ) ) 
 296 #define EXTRACT_CSC(debugid)            ( (uint16_t)( ((debugid) & CSC_MASK     ) >> CSC_OFFSET      ) ) 
 298 #define INTERRUPT       0x01050000 
 299 #define MACH_vmfault    0x01300008 
 300 #define BSC_SysCall     0x040c0000 
 301 #define MACH_SysCall    0x010c0000 
 302 #define DBG_SCALL_MASK  0xffff0000 
 305 /* task to string structure */ 
 308   task_t    task
;            /* from procs task */ 
 309   pid_t     pid
;             /* from procs p_pid  */ 
 310   char      task_comm
[20];   /* from procs p_comm */ 
 313 typedef struct tts tts_t
; 
 317         kd_threadmap 
*map
;    /* pointer to the map buffer */ 
 323 typedef struct krt krt_t
; 
 325 /* This is for the CHUD toolkit call */ 
 326 typedef void (*kd_chudhook_fn
) (uint32_t debugid
, uintptr_t arg1
, 
 327                                 uintptr_t arg2
, uintptr_t arg3
, 
 328                                 uintptr_t arg4
, uintptr_t arg5
); 
 330 volatile kd_chudhook_fn kdebug_chudhook 
= 0;   /* pointer to CHUD toolkit function */ 
 332 __private_extern__ 
void stackshot_lock_init( void ); 
 334 static uint8_t *type_filter_bitmap
; 
 337  * This allows kperf to swap out the global state pid when kperf ownership is 
 338  * passed from one process to another. It checks the old global state pid so 
 339  * that kperf can't accidentally steal control of trace when a non-kperf trace user has 
 343 kdbg_swap_global_state_pid(pid_t old_pid
, pid_t new_pid
); 
 346 kdbg_swap_global_state_pid(pid_t old_pid
, pid_t new_pid
) 
 348         if (!(kd_ctrl_page
.kdebug_flags 
& KDBG_LOCKINIT
)) 
 351         lck_mtx_lock(kd_trace_mtx_sysctl
); 
 353         if (old_pid 
== global_state_pid
) 
 354                 global_state_pid 
= new_pid
; 
 356         lck_mtx_unlock(kd_trace_mtx_sysctl
); 
 360 kdbg_cpu_count(boolean_t early_trace
) 
 364                  * we've started tracing before the IOKit has even 
 365                  * started running... just use the static max value 
 370         host_basic_info_data_t hinfo
; 
 371         mach_msg_type_number_t count 
= HOST_BASIC_INFO_COUNT
; 
 372         host_info((host_t
)1 /* BSD_HOST */, HOST_BASIC_INFO
, (host_info_t
)&hinfo
, &count
); 
 373         assert(hinfo
.logical_cpu_max 
> 0); 
 374         return hinfo
.logical_cpu_max
; 
 378 #endif /* MACH_ASSERT */ 
 381 kdbg_iop_list_callback(kd_iop_t
* iop
, kd_callback_type type
, void* arg
) 
 384                 iop
->callback
.func(iop
->callback
.context
, type
, arg
); 
 390 kdbg_set_tracing_enabled(boolean_t enabled
, uint32_t trace_type
) 
 392         int s 
= ml_set_interrupts_enabled(FALSE
); 
 393         lck_spin_lock(kds_spin_lock
); 
 396                 kdebug_enable 
|= trace_type
; 
 397                 kd_ctrl_page
.kdebug_slowcheck 
&= ~SLOW_NOLOG
; 
 398                 kd_ctrl_page
.enabled 
= 1; 
 400                 kdebug_enable 
&= ~(KDEBUG_ENABLE_TRACE
|KDEBUG_ENABLE_PPT
); 
 401                 kd_ctrl_page
.kdebug_slowcheck 
|= SLOW_NOLOG
; 
 402                 kd_ctrl_page
.enabled 
= 0; 
 404         lck_spin_unlock(kds_spin_lock
); 
 405         ml_set_interrupts_enabled(s
); 
 408                 kdbg_iop_list_callback(kd_ctrl_page
.kdebug_iops
, KD_CALLBACK_KDEBUG_ENABLED
, NULL
); 
 411                  * If you do not flush the IOP trace buffers, they can linger 
 412                  * for a considerable period; consider code which disables and 
 413                  * deallocates without a final sync flush. 
 415                 kdbg_iop_list_callback(kd_ctrl_page
.kdebug_iops
, KD_CALLBACK_KDEBUG_DISABLED
, NULL
); 
 416                 kdbg_iop_list_callback(kd_ctrl_page
.kdebug_iops
, KD_CALLBACK_SYNC_FLUSH
, NULL
); 
 421 kdbg_set_flags(int slowflag
, int enableflag
, boolean_t enabled
) 
 423         int s 
= ml_set_interrupts_enabled(FALSE
); 
 424         lck_spin_lock(kds_spin_lock
); 
 427                 kd_ctrl_page
.kdebug_slowcheck 
|= slowflag
; 
 428                 kdebug_enable 
|= enableflag
; 
 430                 kd_ctrl_page
.kdebug_slowcheck 
&= ~slowflag
; 
 431                 kdebug_enable 
&= ~enableflag
; 
 434         lck_spin_unlock(kds_spin_lock
); 
 435         ml_set_interrupts_enabled(s
); 
 439 disable_wrap(uint32_t *old_slowcheck
, uint32_t *old_flags
) 
 441         int s 
= ml_set_interrupts_enabled(FALSE
); 
 442         lck_spin_lock(kds_spin_lock
); 
 444         *old_slowcheck 
= kd_ctrl_page
.kdebug_slowcheck
; 
 445         *old_flags 
= kd_ctrl_page
.kdebug_flags
; 
 447         kd_ctrl_page
.kdebug_flags 
&= ~KDBG_WRAPPED
; 
 448         kd_ctrl_page
.kdebug_flags 
|= KDBG_NOWRAP
; 
 450         lck_spin_unlock(kds_spin_lock
); 
 451         ml_set_interrupts_enabled(s
); 
 455 enable_wrap(uint32_t old_slowcheck
, boolean_t lostevents
) 
 457         int s 
= ml_set_interrupts_enabled(FALSE
); 
 458         lck_spin_lock(kds_spin_lock
); 
 460         kd_ctrl_page
.kdebug_flags 
&= ~KDBG_NOWRAP
; 
 462         if ( !(old_slowcheck 
& SLOW_NOLOG
)) 
 463                 kd_ctrl_page
.kdebug_slowcheck 
&= ~SLOW_NOLOG
; 
 465         if (lostevents 
== TRUE
) 
 466                 kd_ctrl_page
.kdebug_flags 
|= KDBG_WRAPPED
; 
 468         lck_spin_unlock(kds_spin_lock
); 
 469         ml_set_interrupts_enabled(s
); 
 473 create_buffers(boolean_t early_trace
) 
 482          * For the duration of this allocation, trace code will only reference 
 483          * kdebug_iops. Any iops registered after this enabling will not be 
 484          * messaged until the buffers are reallocated. 
 486          * TLDR; Must read kd_iops once and only once! 
 488         kd_ctrl_page
.kdebug_iops 
= kd_iops
; 
 492          * If the list is valid, it is sorted, newest -> oldest. Each iop entry 
 493          * has a cpu_id of "the older entry + 1", so the highest cpu_id will 
 494          * be the list head + 1. 
 497         kd_ctrl_page
.kdebug_cpus 
= kd_ctrl_page
.kdebug_iops 
? kd_ctrl_page
.kdebug_iops
->cpu_id 
+ 1 : kdbg_cpu_count(early_trace
); 
 499         if (kmem_alloc(kernel_map
, (vm_offset_t 
*)&kdbip
, sizeof(struct kd_bufinfo
) * kd_ctrl_page
.kdebug_cpus
) != KERN_SUCCESS
) { 
 504         if (nkdbufs 
< (kd_ctrl_page
.kdebug_cpus 
* EVENTS_PER_STORAGE_UNIT 
* MIN_STORAGE_UNITS_PER_CPU
)) 
 505                 n_storage_units 
= kd_ctrl_page
.kdebug_cpus 
* MIN_STORAGE_UNITS_PER_CPU
; 
 507                 n_storage_units 
= nkdbufs 
/ EVENTS_PER_STORAGE_UNIT
; 
 509         nkdbufs 
= n_storage_units 
* EVENTS_PER_STORAGE_UNIT
; 
 511         f_buffers 
= n_storage_units 
/ N_STORAGE_UNITS_PER_BUFFER
; 
 512         n_storage_buffers 
= f_buffers
; 
 514         f_buffer_size 
= N_STORAGE_UNITS_PER_BUFFER 
* sizeof(struct kd_storage
); 
 515         p_buffer_size 
= (n_storage_units 
% N_STORAGE_UNITS_PER_BUFFER
) * sizeof(struct kd_storage
); 
 522         if (kdcopybuf 
== 0) { 
 523                 if (kmem_alloc(kernel_map
, (vm_offset_t 
*)&kdcopybuf
, (vm_size_t
)KDCOPYBUF_SIZE
) != KERN_SUCCESS
) { 
 528         if (kmem_alloc(kernel_map
, (vm_offset_t 
*)&kd_bufs
, (vm_size_t
)(n_storage_buffers 
* sizeof(struct kd_storage_buffers
))) != KERN_SUCCESS
) { 
 532         bzero(kd_bufs
, n_storage_buffers 
* sizeof(struct kd_storage_buffers
)); 
 534         for (i 
= 0; i 
< f_buffers
; i
++) { 
 535                 if (kmem_alloc(kernel_map
, (vm_offset_t 
*)&kd_bufs
[i
].kdsb_addr
, (vm_size_t
)f_buffer_size
) != KERN_SUCCESS
) { 
 539                 bzero(kd_bufs
[i
].kdsb_addr
, f_buffer_size
); 
 541                 kd_bufs
[i
].kdsb_size 
= f_buffer_size
; 
 544                 if (kmem_alloc(kernel_map
, (vm_offset_t 
*)&kd_bufs
[i
].kdsb_addr
, (vm_size_t
)p_buffer_size
) != KERN_SUCCESS
) { 
 548                 bzero(kd_bufs
[i
].kdsb_addr
, p_buffer_size
); 
 550                 kd_bufs
[i
].kdsb_size 
= p_buffer_size
; 
 554         for (i 
= 0; i 
< n_storage_buffers
; i
++) { 
 555                 struct kd_storage 
*kds
; 
 559                 n_elements 
= kd_bufs
[i
].kdsb_size 
/ sizeof(struct kd_storage
); 
 560                 kds 
= kd_bufs
[i
].kdsb_addr
; 
 562                 for (n 
= 0; n 
< n_elements
; n
++) { 
 563                         kds
[n
].kds_next
.buffer_index 
= kd_ctrl_page
.kds_free_list
.buffer_index
; 
 564                         kds
[n
].kds_next
.offset 
= kd_ctrl_page
.kds_free_list
.offset
; 
 566                         kd_ctrl_page
.kds_free_list
.buffer_index 
= i
; 
 567                         kd_ctrl_page
.kds_free_list
.offset 
= n
; 
 569                 n_storage_units 
+= n_elements
; 
 572         bzero((char *)kdbip
, sizeof(struct kd_bufinfo
) * kd_ctrl_page
.kdebug_cpus
); 
 574         for (i 
= 0; i 
< (int)kd_ctrl_page
.kdebug_cpus
; i
++) { 
 575                 kdbip
[i
].kd_list_head
.raw 
= KDS_PTR_NULL
; 
 576                 kdbip
[i
].kd_list_tail
.raw 
= KDS_PTR_NULL
; 
 577                 kdbip
[i
].kd_lostevents 
= FALSE
; 
 578                 kdbip
[i
].num_bufs 
= 0; 
 581         kd_ctrl_page
.kdebug_flags 
|= KDBG_BUFINIT
; 
 583         kd_ctrl_page
.kds_inuse_count 
= 0; 
 584         n_storage_threshold 
= n_storage_units 
/ 2; 
 598                 for (i 
= 0; i 
< n_storage_buffers
; i
++) { 
 599                         if (kd_bufs
[i
].kdsb_addr
) { 
 600                                 kmem_free(kernel_map
, (vm_offset_t
)kd_bufs
[i
].kdsb_addr
, (vm_size_t
)kd_bufs
[i
].kdsb_size
); 
 603                 kmem_free(kernel_map
, (vm_offset_t
)kd_bufs
, (vm_size_t
)(n_storage_buffers 
* sizeof(struct kd_storage_buffers
))); 
 606                 n_storage_buffers 
= 0; 
 609                 kmem_free(kernel_map
, (vm_offset_t
)kdcopybuf
, KDCOPYBUF_SIZE
); 
 613         kd_ctrl_page
.kds_free_list
.raw 
= KDS_PTR_NULL
; 
 616                 kmem_free(kernel_map
, (vm_offset_t
)kdbip
, sizeof(struct kd_bufinfo
) * kd_ctrl_page
.kdebug_cpus
); 
 620         kd_ctrl_page
.kdebug_iops 
= NULL
; 
 621         kd_ctrl_page
.kdebug_cpus 
= 0; 
 622         kd_ctrl_page
.kdebug_flags 
&= ~KDBG_BUFINIT
; 
 626 release_storage_unit(int cpu
, uint32_t kdsp_raw
) 
 629         struct  kd_storage 
*kdsp_actual
; 
 630         struct kd_bufinfo 
*kdbp
; 
 635         s 
= ml_set_interrupts_enabled(FALSE
); 
 636         lck_spin_lock(kds_spin_lock
); 
 640         if (kdsp
.raw 
== kdbp
->kd_list_head
.raw
) { 
 642                  * it's possible for the storage unit pointed to 
 643                  * by kdsp to have already been stolen... so 
 644                  * check to see if it's still the head of the list 
 645                  * now that we're behind the lock that protects  
 646                  * adding and removing from the queue... 
 647                  * since we only ever release and steal units from 
 648                  * that position, if it's no longer the head 
 649                  * we having nothing to do in this context 
 651                 kdsp_actual 
= POINTER_FROM_KDS_PTR(kdsp
); 
 652                 kdbp
->kd_list_head 
= kdsp_actual
->kds_next
; 
 654                 kdsp_actual
->kds_next 
= kd_ctrl_page
.kds_free_list
; 
 655                 kd_ctrl_page
.kds_free_list 
= kdsp
; 
 657                 kd_ctrl_page
.kds_inuse_count
--; 
 659         lck_spin_unlock(kds_spin_lock
); 
 660         ml_set_interrupts_enabled(s
); 
 665 allocate_storage_unit(int cpu
) 
 668         struct  kd_storage 
*kdsp_actual
, *kdsp_next_actual
; 
 669         struct  kd_bufinfo 
*kdbp
, *kdbp_vict
, *kdbp_try
; 
 670         uint64_t        oldest_ts
, ts
; 
 671         boolean_t       retval 
= TRUE
; 
 674         s 
= ml_set_interrupts_enabled(FALSE
); 
 675         lck_spin_lock(kds_spin_lock
); 
 679         /* If someone beat us to the allocate, return success */ 
 680         if (kdbp
->kd_list_tail
.raw 
!= KDS_PTR_NULL
) { 
 681                 kdsp_actual 
= POINTER_FROM_KDS_PTR(kdbp
->kd_list_tail
); 
 683                 if (kdsp_actual
->kds_bufindx 
< EVENTS_PER_STORAGE_UNIT
) 
 687         if ((kdsp 
= kd_ctrl_page
.kds_free_list
).raw 
!= KDS_PTR_NULL
) { 
 688                 kdsp_actual 
= POINTER_FROM_KDS_PTR(kdsp
); 
 689                 kd_ctrl_page
.kds_free_list 
= kdsp_actual
->kds_next
; 
 691                 kd_ctrl_page
.kds_inuse_count
++; 
 693                 if (kd_ctrl_page
.kdebug_flags 
& KDBG_NOWRAP
) { 
 694                         kd_ctrl_page
.kdebug_slowcheck 
|= SLOW_NOLOG
; 
 695                         kdbp
->kd_lostevents 
= TRUE
; 
 700                 oldest_ts 
= (uint64_t)-1; 
 702                 for (kdbp_try 
= &kdbip
[0]; kdbp_try 
< &kdbip
[kd_ctrl_page
.kdebug_cpus
]; kdbp_try
++) { 
 704                         if (kdbp_try
->kd_list_head
.raw 
== KDS_PTR_NULL
) { 
 706                                  * no storage unit to steal 
 711                         kdsp_actual 
= POINTER_FROM_KDS_PTR(kdbp_try
->kd_list_head
); 
 713                         if (kdsp_actual
->kds_bufcnt 
< EVENTS_PER_STORAGE_UNIT
) { 
 715                                  * make sure we don't steal the storage unit 
 716                                  * being actively recorded to...  need to 
 717                                  * move on because we don't want an out-of-order 
 718                                  * set of events showing up later 
 722                         ts 
= kdbg_get_timestamp(&kdsp_actual
->kds_records
[0]); 
 724                         if (ts 
< oldest_ts
) { 
 726                                  * when 'wrapping', we want to steal the 
 727                                  * storage unit that has the 'earliest' time 
 728                                  * associated with it (first event time) 
 731                                 kdbp_vict 
= kdbp_try
; 
 734                 if (kdbp_vict 
== NULL
) { 
 736                         kd_ctrl_page
.enabled 
= 0; 
 740                 kdsp 
= kdbp_vict
->kd_list_head
; 
 741                 kdsp_actual 
= POINTER_FROM_KDS_PTR(kdsp
); 
 742                 kdbp_vict
->kd_list_head 
= kdsp_actual
->kds_next
; 
 744                 if (kdbp_vict
->kd_list_head
.raw 
!= KDS_PTR_NULL
) { 
 745                         kdsp_next_actual 
= POINTER_FROM_KDS_PTR(kdbp_vict
->kd_list_head
); 
 746                         kdsp_next_actual
->kds_lostevents 
= TRUE
; 
 748                         kdbp_vict
->kd_lostevents 
= TRUE
; 
 750                 kd_ctrl_page
.kdebug_flags 
|= KDBG_WRAPPED
; 
 752         kdsp_actual
->kds_timestamp 
= mach_absolute_time(); 
 753         kdsp_actual
->kds_next
.raw 
= KDS_PTR_NULL
; 
 754         kdsp_actual
->kds_bufcnt   
= 0; 
 755         kdsp_actual
->kds_readlast 
= 0; 
 757         kdsp_actual
->kds_lostevents 
= kdbp
->kd_lostevents
; 
 758         kdbp
->kd_lostevents 
= FALSE
; 
 759         kdsp_actual
->kds_bufindx  
= 0; 
 761         if (kdbp
->kd_list_head
.raw 
== KDS_PTR_NULL
) 
 762                 kdbp
->kd_list_head 
= kdsp
; 
 764                 POINTER_FROM_KDS_PTR(kdbp
->kd_list_tail
)->kds_next 
= kdsp
; 
 765         kdbp
->kd_list_tail 
= kdsp
; 
 767         lck_spin_unlock(kds_spin_lock
); 
 768         ml_set_interrupts_enabled(s
); 
 774 kernel_debug_register_callback(kd_callback_t callback
) 
 777         if (kmem_alloc(kernel_map
, (vm_offset_t 
*)&iop
, sizeof(kd_iop_t
)) == KERN_SUCCESS
) { 
 778                 memcpy(&iop
->callback
, &callback
, sizeof(kd_callback_t
)); 
 781                  * <rdar://problem/13351477> Some IOP clients are not providing a name. 
 786                         boolean_t is_valid_name 
= FALSE
; 
 787                         for (uint32_t length
=0; length
<sizeof(callback
.iop_name
); ++length
) { 
 788                                 /* This is roughly isprintable(c) */ 
 789                                 if (callback
.iop_name
[length
] > 0x20 && callback
.iop_name
[length
] < 0x7F) 
 791                                 if (callback
.iop_name
[length
] == 0) { 
 793                                                 is_valid_name 
= TRUE
; 
 798                         if (!is_valid_name
) { 
 799                                 strlcpy(iop
->callback
.iop_name
, "IOP-???", sizeof(iop
->callback
.iop_name
)); 
 803                 iop
->last_timestamp 
= 0; 
 807                          * We use two pieces of state, the old list head 
 808                          * pointer, and the value of old_list_head->cpu_id. 
 809                          * If we read kd_iops more than once, it can change 
 812                          * TLDR; Must not read kd_iops more than once per loop. 
 815                         iop
->cpu_id 
= iop
->next 
? (iop
->next
->cpu_id
+1) : kdbg_cpu_count(FALSE
); 
 818                          * Header says OSCompareAndSwapPtr has a memory barrier 
 820                 } while (!OSCompareAndSwapPtr(iop
->next
, iop
, (void* volatile*)&kd_iops
)); 
 842         struct kd_bufinfo 
*kdbp
; 
 843         struct kd_storage 
*kdsp_actual
; 
 844         union  kds_ptr kds_raw
; 
 846         if (kd_ctrl_page
.kdebug_slowcheck
) { 
 848                 if ( (kd_ctrl_page
.kdebug_slowcheck 
& SLOW_NOLOG
) || !(kdebug_enable 
& (KDEBUG_ENABLE_TRACE
|KDEBUG_ENABLE_PPT
))) 
 851                 if (kd_ctrl_page
.kdebug_flags 
& KDBG_TYPEFILTER_CHECK
) { 
 852                         if (isset(type_filter_bitmap
, EXTRACT_CSC(debugid
)))  
 856                 else if (kd_ctrl_page
.kdebug_flags 
& KDBG_RANGECHECK
) { 
 857                         if (debugid 
>= kdlog_beg 
&& debugid 
<= kdlog_end
) 
 861                 else if (kd_ctrl_page
.kdebug_flags 
& KDBG_VALCHECK
) { 
 862                         if ((debugid 
& DBG_FUNC_MASK
) != kdlog_value1 
&& 
 863                                 (debugid 
& DBG_FUNC_MASK
) != kdlog_value2 
&& 
 864                                 (debugid 
& DBG_FUNC_MASK
) != kdlog_value3 
&& 
 865                                 (debugid 
& DBG_FUNC_MASK
) != kdlog_value4
) 
 872         disable_preemption(); 
 874         if (kd_ctrl_page
.enabled 
== 0) 
 877         kdbp 
= &kdbip
[coreid
]; 
 878         timestamp 
&= KDBG_TIMESTAMP_MASK
; 
 881         kds_raw 
= kdbp
->kd_list_tail
; 
 883         if (kds_raw
.raw 
!= KDS_PTR_NULL
) { 
 884                 kdsp_actual 
= POINTER_FROM_KDS_PTR(kds_raw
); 
 885                 bindx 
= kdsp_actual
->kds_bufindx
; 
 889         if (kdsp_actual 
== NULL 
|| bindx 
>= EVENTS_PER_STORAGE_UNIT
) { 
 890                 if (allocate_storage_unit(coreid
) == FALSE
) { 
 892                          * this can only happen if wrapping 
 899         if ( !OSCompareAndSwap(bindx
, bindx 
+ 1, &kdsp_actual
->kds_bufindx
)) 
 902         // IOP entries can be allocated before xnu allocates and inits the buffer 
 903         if (timestamp 
< kdsp_actual
->kds_timestamp
) 
 904                 kdsp_actual
->kds_timestamp 
= timestamp
; 
 906         kd 
= &kdsp_actual
->kds_records
[bindx
]; 
 908         kd
->debugid 
= debugid
; 
 915         kdbg_set_timestamp_and_cpu(kd
, timestamp
, coreid
); 
 917         OSAddAtomic(1, &kdsp_actual
->kds_bufcnt
); 
 921         if ((kds_waiter 
&& kd_ctrl_page
.kds_inuse_count 
>= n_storage_threshold
)) { 
 922                 boolean_t need_kds_wakeup 
= FALSE
; 
 926                  * try to take the lock here to synchronize with the 
 927                  * waiter entering the blocked state... use the try 
 928                  * mode to prevent deadlocks caused by re-entering this 
 929                  * routine due to various trace points triggered in the 
 930                  * lck_spin_sleep_xxxx routines used to actually enter 
 931                  * our wait condition... no problem if we fail, 
 932                  * there will be lots of additional events coming in that 
 933                  * will eventually succeed in grabbing this lock 
 935                 s 
= ml_set_interrupts_enabled(FALSE
); 
 937                 if (lck_spin_try_lock(kdw_spin_lock
)) { 
 939                         if (kds_waiter 
&& kd_ctrl_page
.kds_inuse_count 
>= n_storage_threshold
) { 
 941                                 need_kds_wakeup 
= TRUE
; 
 943                         lck_spin_unlock(kdw_spin_lock
); 
 945                         ml_set_interrupts_enabled(s
); 
 947                         if (need_kds_wakeup 
== TRUE
) 
 956 kernel_debug_internal( 
 964 __attribute__((always_inline
)) void 
 965 kernel_debug_internal( 
 973         struct proc     
*curproc
; 
 979         struct kd_bufinfo 
*kdbp
; 
 980         struct kd_storage 
*kdsp_actual
; 
 981         union  kds_ptr kds_raw
; 
 985         if (kd_ctrl_page
.kdebug_slowcheck
) { 
 987                 if (kdebug_enable 
& KDEBUG_ENABLE_CHUD
) { 
 988                         kd_chudhook_fn chudhook
; 
 990                          * Mask interrupts to minimize the interval across 
 991                          * which the driver providing the hook could be 
 994                         s 
= ml_set_interrupts_enabled(FALSE
); 
 995                         chudhook 
= kdebug_chudhook
; 
 997                                 chudhook(debugid
, arg1
, arg2
, arg3
, arg4
, arg5
); 
 998                         ml_set_interrupts_enabled(s
); 
1000                 if ( (kd_ctrl_page
.kdebug_slowcheck 
& SLOW_NOLOG
) || !(kdebug_enable 
& (KDEBUG_ENABLE_TRACE
|KDEBUG_ENABLE_PPT
))) 
1003                 if ( !ml_at_interrupt_context()) { 
1004                         if (kd_ctrl_page
.kdebug_flags 
& KDBG_PIDCHECK
) { 
1006                                  * If kdebug flag is not set for current proc, return 
1008                                 curproc 
= current_proc(); 
1010                                 if ((curproc 
&& !(curproc
->p_kdebug
)) && 
1011                                     ((debugid 
& 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED
, 0) | DBG_FUNC_NONE
)) && 
1012                                       (debugid 
>> 24 != DBG_TRACE
)) 
1015                         else if (kd_ctrl_page
.kdebug_flags 
& KDBG_PIDEXCLUDE
) { 
1017                                  * If kdebug flag is set for current proc, return 
1019                                 curproc 
= current_proc(); 
1021                                 if ((curproc 
&& curproc
->p_kdebug
) && 
1022                                     ((debugid 
& 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED
, 0) | DBG_FUNC_NONE
)) && 
1023                                       (debugid 
>> 24 != DBG_TRACE
)) 
1028                 if (kd_ctrl_page
.kdebug_flags 
& KDBG_TYPEFILTER_CHECK
) { 
1029                         /* Always record trace system info */ 
1030                         if (EXTRACT_CLASS(debugid
) == DBG_TRACE
) 
1033                         if (isset(type_filter_bitmap
, EXTRACT_CSC(debugid
)))  
1037                 else if (kd_ctrl_page
.kdebug_flags 
& KDBG_RANGECHECK
) { 
1038                         /* Always record trace system info */ 
1039                         if (EXTRACT_CLASS(debugid
) == DBG_TRACE
) 
1042                         if (debugid 
< kdlog_beg 
|| debugid 
> kdlog_end
) 
1045                 else if (kd_ctrl_page
.kdebug_flags 
& KDBG_VALCHECK
) { 
1046                         /* Always record trace system info */ 
1047                         if (EXTRACT_CLASS(debugid
) == DBG_TRACE
) 
1050                         if ((debugid 
& DBG_FUNC_MASK
) != kdlog_value1 
&& 
1051                             (debugid 
& DBG_FUNC_MASK
) != kdlog_value2 
&& 
1052                             (debugid 
& DBG_FUNC_MASK
) != kdlog_value3 
&& 
1053                             (debugid 
& DBG_FUNC_MASK
) != kdlog_value4
) 
1058         disable_preemption(); 
1060         if (kd_ctrl_page
.enabled 
== 0) 
1066         kds_raw 
= kdbp
->kd_list_tail
; 
1068         if (kds_raw
.raw 
!= KDS_PTR_NULL
) { 
1069                 kdsp_actual 
= POINTER_FROM_KDS_PTR(kds_raw
); 
1070                 bindx 
= kdsp_actual
->kds_bufindx
; 
1074         if (kdsp_actual 
== NULL 
|| bindx 
>= EVENTS_PER_STORAGE_UNIT
) { 
1075                 if (allocate_storage_unit(cpu
) == FALSE
) { 
1077                          * this can only happen if wrapping 
1084         now 
= mach_absolute_time() & KDBG_TIMESTAMP_MASK
; 
1086         if ( !OSCompareAndSwap(bindx
, bindx 
+ 1, &kdsp_actual
->kds_bufindx
)) 
1089         kd 
= &kdsp_actual
->kds_records
[bindx
]; 
1091         kd
->debugid 
= debugid
; 
1098         kdbg_set_timestamp_and_cpu(kd
, now
, cpu
); 
1100         OSAddAtomic(1, &kdsp_actual
->kds_bufcnt
); 
1102         enable_preemption(); 
1104         if (kds_waiter 
&& kd_ctrl_page
.kds_inuse_count 
>= n_storage_threshold
) { 
1108                 etype 
= debugid 
& DBG_FUNC_MASK
; 
1109                 stype 
= debugid 
& DBG_SCALL_MASK
; 
1111                 if (etype 
== INTERRUPT 
|| etype 
== MACH_vmfault 
|| 
1112                     stype 
== BSC_SysCall 
|| stype 
== MACH_SysCall
) { 
1114                         boolean_t need_kds_wakeup 
= FALSE
; 
1117                          * try to take the lock here to synchronize with the 
1118                          * waiter entering the blocked state... use the try 
1119                          * mode to prevent deadlocks caused by re-entering this 
1120                          * routine due to various trace points triggered in the 
1121                          * lck_spin_sleep_xxxx routines used to actually enter 
1122                          * one of our 2 wait conditions... no problem if we fail, 
1123                          * there will be lots of additional events coming in that 
1124                          * will eventually succeed in grabbing this lock 
1126                         s 
= ml_set_interrupts_enabled(FALSE
); 
1128                         if (lck_spin_try_lock(kdw_spin_lock
)) { 
1130                                 if (kds_waiter 
&& kd_ctrl_page
.kds_inuse_count 
>= n_storage_threshold
) { 
1132                                         need_kds_wakeup 
= TRUE
; 
1134                                 lck_spin_unlock(kdw_spin_lock
); 
1136                         ml_set_interrupts_enabled(s
); 
1138                         if (need_kds_wakeup 
== TRUE
) 
1139                                 wakeup(&kds_waiter
); 
1151         __unused 
uintptr_t arg5
) 
1153         kernel_debug_internal(debugid
, arg1
, arg2
, arg3
, arg4
, (uintptr_t)thread_tid(current_thread())); 
1165         kernel_debug_internal(debugid
, arg1
, arg2
, arg3
, arg4
, arg5
); 
1169 kernel_debug_string(const char *message
) 
1171         uintptr_t arg
[4] = {0, 0, 0, 0}; 
1173         /* Stuff the message string in the args and log it. */ 
1174         strncpy((char *)arg
, message
, MIN(sizeof(arg
), strlen(message
))); 
1176                 (TRACEDBG_CODE(DBG_TRACE_INFO
, 4)) | DBG_FUNC_NONE
, 
1177                 arg
[0], arg
[1], arg
[2], arg
[3]); 
1180 extern int      master_cpu
;             /* MACH_KERNEL_PRIVATE */ 
1182  * Used prior to start_kern_tracing() being called. 
1183  * Log temporarily into a static buffer. 
1193         /* If tracing is already initialized, use it */ 
1195                 KERNEL_DEBUG_CONSTANT(debugid
, arg1
, arg2
, arg3
, arg4
, 0); 
1197         /* Do nothing if the buffer is full or we're not on the boot cpu */  
1198         kd_early_overflow 
= kd_early_index 
>= KD_EARLY_BUFFER_MAX
; 
1199         if (kd_early_overflow 
|| 
1200             cpu_number() != master_cpu
) 
1203         kd_early_buffer
[kd_early_index
].debugid 
= debugid
; 
1204         kd_early_buffer
[kd_early_index
].timestamp 
= mach_absolute_time(); 
1205         kd_early_buffer
[kd_early_index
].arg1 
= arg1
; 
1206         kd_early_buffer
[kd_early_index
].arg2 
= arg2
; 
1207         kd_early_buffer
[kd_early_index
].arg3 
= arg3
; 
1208         kd_early_buffer
[kd_early_index
].arg4 
= arg4
; 
1209         kd_early_buffer
[kd_early_index
].arg5 
= 0; 
1214  * Transfer the contents of the temporary buffer into the trace buffers. 
1215  * Precede that by logging the rebase time (offset) - the TSC-based time (in ns) 
1216  * when mach_absolute_time is set to 0. 
1219 kernel_debug_early_end(void) 
1223         if (cpu_number() != master_cpu
) 
1224                 panic("kernel_debug_early_end() not call on boot processor"); 
1226         /* Fake sentinel marking the start of kernel time relative to TSC */ 
1229                 (TRACEDBG_CODE(DBG_TRACE_INFO
, 1)) | DBG_FUNC_NONE
, 
1231                 (uint32_t)(tsc_rebase_abs_time 
>> 32), 
1232                 (uint32_t)tsc_rebase_abs_time
, 
1236         for (i 
= 0; i 
< kd_early_index
; i
++) { 
1239                         kd_early_buffer
[i
].debugid
, 
1240                         kd_early_buffer
[i
].timestamp
, 
1241                         kd_early_buffer
[i
].arg1
, 
1242                         kd_early_buffer
[i
].arg2
, 
1243                         kd_early_buffer
[i
].arg3
, 
1244                         kd_early_buffer
[i
].arg4
, 
1248         /* Cut events-lost event on overflow */ 
1249         if (kd_early_overflow
) 
1250                 KERNEL_DEBUG_CONSTANT( 
1251                         TRACEDBG_CODE(DBG_TRACE_INFO
, 2), 0, 0, 0, 0, 0); 
1253         /* This trace marks the start of kernel tracing */ 
1254         kernel_debug_string("early trace done"); 
1258  * Support syscall SYS_kdebug_trace 
1261 kdebug_trace(__unused 
struct proc 
*p
, struct kdebug_trace_args 
*uap
, __unused 
int32_t *retval
) 
1263         if ( __probable(kdebug_enable 
== 0) ) 
1266         kernel_debug_internal(uap
->code
, uap
->arg1
, uap
->arg2
, uap
->arg3
, uap
->arg4
, (uintptr_t)thread_tid(current_thread())); 
1273 kdbg_lock_init(void) 
1275         if (kd_ctrl_page
.kdebug_flags 
& KDBG_LOCKINIT
) 
1279          * allocate lock group attribute and group 
1281         kd_trace_mtx_sysctl_grp_attr 
= lck_grp_attr_alloc_init(); 
1282         kd_trace_mtx_sysctl_grp 
= lck_grp_alloc_init("kdebug", kd_trace_mtx_sysctl_grp_attr
); 
1285          * allocate the lock attribute 
1287         kd_trace_mtx_sysctl_attr 
= lck_attr_alloc_init(); 
1291          * allocate and initialize mutex's 
1293         kd_trace_mtx_sysctl 
= lck_mtx_alloc_init(kd_trace_mtx_sysctl_grp
, kd_trace_mtx_sysctl_attr
); 
1294         kds_spin_lock 
= lck_spin_alloc_init(kd_trace_mtx_sysctl_grp
, kd_trace_mtx_sysctl_attr
); 
1295         kdw_spin_lock 
= lck_spin_alloc_init(kd_trace_mtx_sysctl_grp
, kd_trace_mtx_sysctl_attr
); 
1297         kd_ctrl_page
.kdebug_flags 
|= KDBG_LOCKINIT
; 
1302 kdbg_bootstrap(boolean_t early_trace
) 
1304         kd_ctrl_page
.kdebug_flags 
&= ~KDBG_WRAPPED
; 
1306         return (create_buffers(early_trace
)); 
1310 kdbg_reinit(boolean_t early_trace
) 
1315          * Disable trace collecting 
1316          * First make sure we're not in 
1317          * the middle of cutting a trace 
1319         kdbg_set_tracing_enabled(FALSE
, KDEBUG_ENABLE_TRACE
); 
1322          * make sure the SLOW_NOLOG is seen 
1323          * by everyone that might be trying 
1330         if ((kd_ctrl_page
.kdebug_flags 
& KDBG_MAPINIT
) && kd_mapsize 
&& kd_mapptr
) { 
1331                 kmem_free(kernel_map
, (vm_offset_t
)kd_mapptr
, kd_mapsize
); 
1332                 kd_ctrl_page
.kdebug_flags 
&= ~KDBG_MAPINIT
; 
1334                 kd_mapptr 
= (kd_threadmap 
*) 0; 
1337         ret 
= kdbg_bootstrap(early_trace
); 
1339         RAW_file_offset 
= 0; 
1340         RAW_file_written 
= 0; 
1346 kdbg_trace_data(struct proc 
*proc
, long *arg_pid
) 
1351                 *arg_pid 
= proc
->p_pid
; 
1356 kdbg_trace_string(struct proc 
*proc
, long *arg1
, long *arg2
, long *arg3
, long *arg4
) 
1370          * Collect the pathname for tracing 
1372         dbg_nameptr 
= proc
->p_comm
; 
1373         dbg_namelen 
= (int)strlen(proc
->p_comm
); 
1379         if(dbg_namelen 
> (int)sizeof(dbg_parms
)) 
1380                 dbg_namelen 
= (int)sizeof(dbg_parms
); 
1382         strncpy((char *)dbg_parms
, dbg_nameptr
, dbg_namelen
); 
1391 kdbg_resolve_map(thread_t th_act
, void *opaque
) 
1393         kd_threadmap 
*mapptr
; 
1394         krt_t 
*t 
= (krt_t 
*)opaque
; 
1396         if (t
->count 
< t
->maxcount
) { 
1397                 mapptr 
= &t
->map
[t
->count
]; 
1398                 mapptr
->thread  
= (uintptr_t)thread_tid(th_act
); 
1400                 (void) strlcpy (mapptr
->command
, t
->atts
->task_comm
, 
1401                                 sizeof(t
->atts
->task_comm
)); 
1403                  * Some kernel threads have no associated pid. 
1404                  * We still need to mark the entry as valid. 
1407                         mapptr
->valid 
= t
->atts
->pid
; 
1417  * Writes a cpumap for the given iops_list/cpu_count to the provided buffer. 
1419  * You may provide a buffer and size, or if you set the buffer to NULL, a 
1420  * buffer of sufficient size will be allocated. 
1422  * If you provide a buffer and it is too small, sets cpumap_size to the number 
1423  * of bytes required and returns EINVAL. 
1425  * On success, if you provided a buffer, cpumap_size is set to the number of 
1426  * bytes written. If you did not provide a buffer, cpumap is set to the newly 
1427  * allocated buffer and cpumap_size is set to the number of bytes allocated. 
1429  * NOTE: It may seem redundant to pass both iops and a cpu_count. 
1431  * We may be reporting data from "now", or from the "past". 
1433  * The "now" data would be for something like kdbg_readcurcpumap(). 
1434  * The "past" data would be for kdbg_readcpumap(). 
1436  * If we do not pass both iops and cpu_count, and iops is NULL, this function 
1437  * will need to read "now" state to get the number of cpus, which would be in 
1438  * error if we were reporting "past" state. 
1442 kdbg_cpumap_init_internal(kd_iop_t
* iops
, uint32_t cpu_count
, uint8_t** cpumap
, uint32_t* cpumap_size
) 
1445         assert(cpumap_size
); 
1447         assert(!iops 
|| iops
->cpu_id 
+ 1 == cpu_count
); 
1449         uint32_t bytes_needed 
= sizeof(kd_cpumap_header
) + cpu_count 
* sizeof(kd_cpumap
); 
1450         uint32_t bytes_available 
= *cpumap_size
; 
1451         *cpumap_size 
= bytes_needed
; 
1453         if (*cpumap 
== NULL
) { 
1454                 if (kmem_alloc(kernel_map
, (vm_offset_t
*)cpumap
, (vm_size_t
)*cpumap_size
) != KERN_SUCCESS
) { 
1457         } else if (bytes_available 
< bytes_needed
) { 
1461         kd_cpumap_header
* header 
= (kd_cpumap_header
*)(uintptr_t)*cpumap
; 
1463         header
->version_no 
= RAW_VERSION1
; 
1464         header
->cpu_count 
= cpu_count
; 
1466         kd_cpumap
* cpus 
= (kd_cpumap
*)&header
[1]; 
1468         int32_t index 
= cpu_count 
- 1; 
1470                 cpus
[index
].cpu_id 
= iops
->cpu_id
; 
1471                 cpus
[index
].flags 
= KDBG_CPUMAP_IS_IOP
; 
1472                 bzero(cpus
[index
].name
, sizeof(cpus
->name
)); 
1473                 strlcpy(cpus
[index
].name
, iops
->callback
.iop_name
, sizeof(cpus
->name
)); 
1479         while (index 
>= 0) { 
1480                 cpus
[index
].cpu_id 
= index
; 
1481                 cpus
[index
].flags 
= 0; 
1482                 bzero(cpus
[index
].name
, sizeof(cpus
->name
)); 
1483                 strlcpy(cpus
[index
].name
, "AP", sizeof(cpus
->name
)); 
1488         return KERN_SUCCESS
; 
1492 kdbg_thrmap_init(void) 
1494         if (kd_ctrl_page
.kdebug_flags 
& KDBG_MAPINIT
) 
1497         kd_mapptr 
= kdbg_thrmap_init_internal(0, &kd_mapsize
, &kd_mapcount
); 
1500                 kd_ctrl_page
.kdebug_flags 
|= KDBG_MAPINIT
; 
1504 kd_threadmap
* kdbg_thrmap_init_internal(unsigned int count
, unsigned int *mapsize
, unsigned int *mapcount
) 
1506         kd_threadmap    
*mapptr
; 
1509         int             tts_count
;    /* number of task-to-string structures */ 
1510         struct tts      
*tts_mapptr
; 
1511         unsigned int    tts_mapsize 
= 0; 
1516          * need to use PROC_SCANPROCLIST with proc_iterate 
1521          * Calculate the sizes of map buffers 
1523         for (p 
= allproc
.lh_first
, *mapcount
=0, tts_count
=0; p
; p 
= p
->p_list
.le_next
) { 
1524                 *mapcount 
+= get_task_numacts((task_t
)p
->task
); 
1530          * The proc count could change during buffer allocation, 
1531          * so introduce a small fudge factor to bump up the 
1532          * buffer sizes. This gives new tasks some chance of  
1533          * making into the tables.  Bump up by 25%. 
1535         *mapcount 
+= *mapcount
/4; 
1536         tts_count 
+= tts_count
/4; 
1538         *mapsize 
= *mapcount 
* sizeof(kd_threadmap
); 
1540         if (count 
&& count 
< *mapcount
) 
1543         if ((kmem_alloc(kernel_map
, &kaddr
, (vm_size_t
)*mapsize
) == KERN_SUCCESS
)) { 
1544                 bzero((void *)kaddr
, *mapsize
); 
1545                 mapptr 
= (kd_threadmap 
*)kaddr
; 
1549         tts_mapsize 
= tts_count 
* sizeof(struct tts
); 
1551         if ((kmem_alloc(kernel_map
, &kaddr
, (vm_size_t
)tts_mapsize
) == KERN_SUCCESS
)) { 
1552                 bzero((void *)kaddr
, tts_mapsize
); 
1553                 tts_mapptr 
= (struct tts 
*)kaddr
; 
1555                 kmem_free(kernel_map
, (vm_offset_t
)mapptr
, *mapsize
); 
1560          * We need to save the procs command string 
1561          * and take a reference for each task associated 
1562          * with a valid process 
1568          * should use proc_iterate 
1570         for (p 
= allproc
.lh_first
, i
=0; p 
&& i 
< tts_count
; p 
= p
->p_list
.le_next
) { 
1571                 if (p
->p_lflag 
& P_LEXIT
) 
1575                         task_reference(p
->task
); 
1576                         tts_mapptr
[i
].task 
= p
->task
; 
1577                         tts_mapptr
[i
].pid  
= p
->p_pid
; 
1578                         (void)strlcpy(tts_mapptr
[i
].task_comm
, p
->p_comm
, sizeof(tts_mapptr
[i
].task_comm
)); 
1587          * Initialize thread map data 
1591         akrt
.maxcount 
= *mapcount
; 
1593         for (i 
= 0; i 
< tts_count
; i
++) { 
1594                 akrt
.atts 
= &tts_mapptr
[i
]; 
1595                 task_act_iterate_wth_args(tts_mapptr
[i
].task
, kdbg_resolve_map
, &akrt
); 
1596                 task_deallocate((task_t
) tts_mapptr
[i
].task
); 
1598         kmem_free(kernel_map
, (vm_offset_t
)tts_mapptr
, tts_mapsize
); 
1600         *mapcount 
= akrt
.count
; 
1609          * Clean up the trace buffer 
1610          * First make sure we're not in 
1611          * the middle of cutting a trace 
1613         kdbg_set_tracing_enabled(FALSE
, KDEBUG_ENABLE_TRACE
); 
1616          * make sure the SLOW_NOLOG is seen 
1617          * by everyone that might be trying 
1622         global_state_pid 
= -1; 
1623         kd_ctrl_page
.kdebug_flags 
&= (unsigned int)~KDBG_CKTYPES
; 
1624         kd_ctrl_page
.kdebug_flags 
&= ~(KDBG_NOWRAP 
| KDBG_RANGECHECK 
| KDBG_VALCHECK
); 
1625         kd_ctrl_page
.kdebug_flags 
&= ~(KDBG_PIDCHECK 
| KDBG_PIDEXCLUDE
); 
1627         kdbg_disable_typefilter(); 
1632         /* Clean up the thread map buffer */ 
1633         kd_ctrl_page
.kdebug_flags 
&= ~KDBG_MAPINIT
; 
1635                 kmem_free(kernel_map
, (vm_offset_t
)kd_mapptr
, kd_mapsize
); 
1636                 kd_mapptr 
= (kd_threadmap 
*) 0; 
1641         RAW_file_offset 
= 0; 
1642         RAW_file_written 
= 0; 
1646 kdbg_setpid(kd_regtype 
*kdr
) 
1652         pid 
= (pid_t
)kdr
->value1
; 
1653         flag 
= (int)kdr
->value2
; 
1656                 if ((p 
= proc_find(pid
)) == NULL
) 
1661                                  * turn on pid check for this and all pids 
1663                                 kd_ctrl_page
.kdebug_flags 
|= KDBG_PIDCHECK
; 
1664                                 kd_ctrl_page
.kdebug_flags 
&= ~KDBG_PIDEXCLUDE
; 
1665                                 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
); 
1670                                  * turn off pid check for this pid value 
1671                                  * Don't turn off all pid checking though 
1673                                  * kd_ctrl_page.kdebug_flags &= ~KDBG_PIDCHECK; 
1686 /* This is for pid exclusion in the trace buffer */ 
1688 kdbg_setpidex(kd_regtype 
*kdr
) 
1694         pid 
= (pid_t
)kdr
->value1
; 
1695         flag 
= (int)kdr
->value2
; 
1698                 if ((p 
= proc_find(pid
)) == NULL
) 
1703                                  * turn on pid exclusion 
1705                                 kd_ctrl_page
.kdebug_flags 
|= KDBG_PIDEXCLUDE
; 
1706                                 kd_ctrl_page
.kdebug_flags 
&= ~KDBG_PIDCHECK
; 
1707                                 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
); 
1713                                  * turn off pid exclusion for this pid value 
1714                                  * Don't turn off all pid exclusion though 
1716                                  * kd_ctrl_page.kdebug_flags &= ~KDBG_PIDEXCLUDE; 
1730  * This is for setting a maximum decrementer value 
1733 kdbg_setrtcdec(kd_regtype 
*kdr
) 
1738         decval 
= (natural_t
)kdr
->value1
; 
1740         if (decval 
&& decval 
< KDBG_MINRTCDEC
) 
1749 kdbg_enable_typefilter(void) 
1751         if (kd_ctrl_page
.kdebug_flags 
& KDBG_TYPEFILTER_CHECK
) { 
1752                 /* free the old filter */ 
1753                 kdbg_disable_typefilter(); 
1756         if (kmem_alloc(kernel_map
, (vm_offset_t 
*)&type_filter_bitmap
, KDBG_TYPEFILTER_BITMAP_SIZE
) != KERN_SUCCESS
) { 
1760         bzero(type_filter_bitmap
, KDBG_TYPEFILTER_BITMAP_SIZE
); 
1762         /* Turn off range and value checks */ 
1763         kd_ctrl_page
.kdebug_flags 
&= ~(KDBG_RANGECHECK 
| KDBG_VALCHECK
); 
1765         /* Enable filter checking */ 
1766         kd_ctrl_page
.kdebug_flags 
|= KDBG_TYPEFILTER_CHECK
; 
1767         kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
); 
1772 kdbg_disable_typefilter(void) 
1774         /*  Disable filter checking */   
1775         kd_ctrl_page
.kdebug_flags 
&= ~KDBG_TYPEFILTER_CHECK
; 
1777         /*  Turn off slow checks unless pid checks are using them */ 
1778         if ( (kd_ctrl_page
.kdebug_flags 
& (KDBG_PIDCHECK 
| KDBG_PIDEXCLUDE
)) ) 
1779                 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
); 
1781                 kdbg_set_flags(SLOW_CHECKS
, 0, FALSE
); 
1783         if(type_filter_bitmap 
== NULL
) 
1786         vm_offset_t old_bitmap 
= (vm_offset_t
)type_filter_bitmap
; 
1787         type_filter_bitmap 
= NULL
; 
1789         kmem_free(kernel_map
, old_bitmap
, KDBG_TYPEFILTER_BITMAP_SIZE
); 
1794 kdbg_setreg(kd_regtype 
* kdr
) 
1797         unsigned int val_1
, val_2
, val
; 
1798         switch (kdr
->type
) { 
1800         case KDBG_CLASSTYPE 
: 
1801                 val_1 
= (kdr
->value1 
& 0xff); 
1802                 val_2 
= (kdr
->value2 
& 0xff); 
1803                 kdlog_beg 
= (val_1
<<24); 
1804                 kdlog_end 
= (val_2
<<24); 
1805                 kd_ctrl_page
.kdebug_flags 
&= (unsigned int)~KDBG_CKTYPES
; 
1806                 kd_ctrl_page
.kdebug_flags 
&= ~KDBG_VALCHECK
;       /* Turn off specific value check  */ 
1807                 kd_ctrl_page
.kdebug_flags 
|= (KDBG_RANGECHECK 
| KDBG_CLASSTYPE
); 
1808                 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
); 
1810         case KDBG_SUBCLSTYPE 
: 
1811                 val_1 
= (kdr
->value1 
& 0xff); 
1812                 val_2 
= (kdr
->value2 
& 0xff); 
1814                 kdlog_beg 
= ((val_1
<<24) | (val_2 
<< 16)); 
1815                 kdlog_end 
= ((val_1
<<24) | (val 
<< 16)); 
1816                 kd_ctrl_page
.kdebug_flags 
&= (unsigned int)~KDBG_CKTYPES
; 
1817                 kd_ctrl_page
.kdebug_flags 
&= ~KDBG_VALCHECK
;       /* Turn off specific value check  */ 
1818                 kd_ctrl_page
.kdebug_flags 
|= (KDBG_RANGECHECK 
| KDBG_SUBCLSTYPE
); 
1819                 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
); 
1821         case KDBG_RANGETYPE 
: 
1822                 kdlog_beg 
= (kdr
->value1
); 
1823                 kdlog_end 
= (kdr
->value2
); 
1824                 kd_ctrl_page
.kdebug_flags 
&= (unsigned int)~KDBG_CKTYPES
; 
1825                 kd_ctrl_page
.kdebug_flags 
&= ~KDBG_VALCHECK
;       /* Turn off specific value check  */ 
1826                 kd_ctrl_page
.kdebug_flags 
|= (KDBG_RANGECHECK 
| KDBG_RANGETYPE
); 
1827                 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
); 
1830                 kdlog_value1 
= (kdr
->value1
); 
1831                 kdlog_value2 
= (kdr
->value2
); 
1832                 kdlog_value3 
= (kdr
->value3
); 
1833                 kdlog_value4 
= (kdr
->value4
); 
1834                 kd_ctrl_page
.kdebug_flags 
&= (unsigned int)~KDBG_CKTYPES
; 
1835                 kd_ctrl_page
.kdebug_flags 
&= ~KDBG_RANGECHECK
;    /* Turn off range check */ 
1836                 kd_ctrl_page
.kdebug_flags 
|= KDBG_VALCHECK
;       /* Turn on specific value check  */ 
1837                 kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
); 
1839         case KDBG_TYPENONE 
: 
1840                 kd_ctrl_page
.kdebug_flags 
&= (unsigned int)~KDBG_CKTYPES
; 
1842                 if ( (kd_ctrl_page
.kdebug_flags 
& (KDBG_RANGECHECK 
| KDBG_VALCHECK   
|  
1843                                                    KDBG_PIDCHECK   
| KDBG_PIDEXCLUDE 
|  
1844                                                    KDBG_TYPEFILTER_CHECK
)) ) 
1845                         kdbg_set_flags(SLOW_CHECKS
, 0, TRUE
); 
1847                         kdbg_set_flags(SLOW_CHECKS
, 0, FALSE
); 
1860 kdbg_getreg(__unused kd_regtype 
* kdr
) 
1864         unsigned int val_1
, val_2
, val
; 
1866         switch (kdr
->type
) { 
1867         case KDBG_CLASSTYPE 
: 
1868                 val_1 
= (kdr
->value1 
& 0xff); 
1870                 kdlog_beg 
= (val_1
<<24); 
1871                 kdlog_end 
= (val_2
<<24); 
1872                 kd_ctrl_page
.kdebug_flags 
&= (unsigned int)~KDBG_CKTYPES
; 
1873                 kd_ctrl_page
.kdebug_flags 
|= (KDBG_RANGECHECK 
| KDBG_CLASSTYPE
); 
1875         case KDBG_SUBCLSTYPE 
: 
1876                 val_1 
= (kdr
->value1 
& 0xff); 
1877                 val_2 
= (kdr
->value2 
& 0xff); 
1879                 kdlog_beg 
= ((val_1
<<24) | (val_2 
<< 16)); 
1880                 kdlog_end 
= ((val_1
<<24) | (val 
<< 16)); 
1881                 kd_ctrl_page
.kdebug_flags 
&= (unsigned int)~KDBG_CKTYPES
; 
1882                 kd_ctrl_page
.kdebug_flags 
|= (KDBG_RANGECHECK 
| KDBG_SUBCLSTYPE
); 
1884         case KDBG_RANGETYPE 
: 
1885                 kdlog_beg 
= (kdr
->value1
); 
1886                 kdlog_end 
= (kdr
->value2
); 
1887                 kd_ctrl_page
.kdebug_flags 
&= (unsigned int)~KDBG_CKTYPES
; 
1888                 kd_ctrl_page
.kdebug_flags 
|= (KDBG_RANGECHECK 
| KDBG_RANGETYPE
); 
1890         case KDBG_TYPENONE 
: 
1891                 kd_ctrl_page
.kdebug_flags 
&= (unsigned int)~KDBG_CKTYPES
; 
1904 kdbg_readcpumap(user_addr_t user_cpumap
, size_t *user_cpumap_size
) 
1906         uint8_t* cpumap 
= NULL
; 
1907         uint32_t cpumap_size 
= 0; 
1908         int ret 
= KERN_SUCCESS
; 
1910         if (kd_ctrl_page
.kdebug_flags 
& KDBG_BUFINIT
) { 
1911                 if (kdbg_cpumap_init_internal(kd_ctrl_page
.kdebug_iops
, kd_ctrl_page
.kdebug_cpus
, &cpumap
, &cpumap_size
) == KERN_SUCCESS
) { 
1913                                 size_t bytes_to_copy 
= (*user_cpumap_size 
>= cpumap_size
) ? cpumap_size 
: *user_cpumap_size
; 
1914                                 if (copyout(cpumap
, user_cpumap
, (size_t)bytes_to_copy
)) { 
1918                         *user_cpumap_size 
= cpumap_size
; 
1919                         kmem_free(kernel_map
, (vm_offset_t
)cpumap
, cpumap_size
); 
1929 kdbg_readcurthrmap(user_addr_t buffer
, size_t *bufsize
) 
1931         kd_threadmap 
*mapptr
; 
1932         unsigned int mapsize
; 
1933         unsigned int mapcount
; 
1934         unsigned int count 
= 0; 
1937         count 
= *bufsize
/sizeof(kd_threadmap
); 
1940         if ( (mapptr 
= kdbg_thrmap_init_internal(count
, &mapsize
, &mapcount
)) ) { 
1941                 if (copyout(mapptr
, buffer
, mapcount 
* sizeof(kd_threadmap
))) 
1944                         *bufsize 
= (mapcount 
* sizeof(kd_threadmap
)); 
1946                 kmem_free(kernel_map
, (vm_offset_t
)mapptr
, mapsize
); 
1954 kdbg_readthrmap(user_addr_t buffer
, size_t *number
, vnode_t vp
, vfs_context_t ctx
) 
1956         int avail 
= *number
; 
1959         unsigned int mapsize
; 
1961         count 
= avail
/sizeof (kd_threadmap
); 
1963         mapsize 
= kd_mapcount 
* sizeof(kd_threadmap
); 
1965         if (count 
&& (count 
<= kd_mapcount
)) 
1967                 if ((kd_ctrl_page
.kdebug_flags 
& KDBG_MAPINIT
) && kd_mapsize 
&& kd_mapptr
) 
1969                         if (*number 
< mapsize
) 
1980                                         uint32_t extra_thread_count 
= 0; 
1981                                         uint32_t cpumap_size
; 
1984                                          * To write a RAW_VERSION1+ file, we 
1985                                          * must embed a cpumap in the "padding" 
1986                                          * used to page align the events folloing 
1987                                          * the threadmap. If the threadmap happens 
1988                                          * to not require enough padding, we 
1989                                          * artificially increase its footprint 
1990                                          * until it needs enough padding. 
1993                                         pad_size 
= PAGE_SIZE 
- ((sizeof(RAW_header
) + (count 
* sizeof(kd_threadmap
))) & PAGE_MASK_64
); 
1994                                         cpumap_size 
= sizeof(kd_cpumap_header
) + kd_ctrl_page
.kdebug_cpus 
* sizeof(kd_cpumap
); 
1996                                         if (cpumap_size 
> pad_size
) { 
1997                                                 /* Force an overflow onto the next page, we get a full page of padding */ 
1998                                                 extra_thread_count 
= (pad_size 
/ sizeof(kd_threadmap
)) + 1; 
2001                                         header
.version_no 
= RAW_VERSION1
; 
2002                                         header
.thread_count 
= count 
+ extra_thread_count
; 
2004                                         clock_get_calendar_microtime(&secs
, &usecs
); 
2005                                         header
.TOD_secs 
= secs
; 
2006                                         header
.TOD_usecs 
= usecs
; 
2008                                         ret 
= vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)&header
, sizeof(RAW_header
), RAW_file_offset
, 
2009                                                       UIO_SYSSPACE
, IO_NODELOCKED
|IO_UNIT
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
)); 
2012                                         RAW_file_offset 
+= sizeof(RAW_header
); 
2014                                         ret 
= vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)kd_mapptr
, mapsize
, RAW_file_offset
, 
2015                                                       UIO_SYSSPACE
, IO_NODELOCKED
|IO_UNIT
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
)); 
2018                                         RAW_file_offset 
+= mapsize
; 
2020                                         if (extra_thread_count
) { 
2021                                                 pad_size 
= extra_thread_count 
* sizeof(kd_threadmap
); 
2022                                                 pad_buf 
= (char *)kalloc(pad_size
); 
2023                                                 memset(pad_buf
, 0, pad_size
); 
2025                                                 ret 
= vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)pad_buf
, pad_size
, RAW_file_offset
, 
2026                                                               UIO_SYSSPACE
, IO_NODELOCKED
|IO_UNIT
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
)); 
2027                                                 kfree(pad_buf
, pad_size
); 
2031                                                 RAW_file_offset 
+= pad_size
; 
2035                                         pad_size 
= PAGE_SIZE 
- (RAW_file_offset 
& PAGE_MASK_64
); 
2037                                                 pad_buf 
= (char *)kalloc(pad_size
); 
2038                                                 memset(pad_buf
, 0, pad_size
); 
2041                                                  * embed a cpumap in the padding bytes. 
2042                                                  * older code will skip this. 
2043                                                  * newer code will know how to read it. 
2045                                                 uint32_t temp 
= pad_size
; 
2046                                                 if (kdbg_cpumap_init_internal(kd_ctrl_page
.kdebug_iops
, kd_ctrl_page
.kdebug_cpus
, (uint8_t**)&pad_buf
, &temp
) != KERN_SUCCESS
) { 
2047                                                         memset(pad_buf
, 0, pad_size
); 
2050                                                 ret 
= vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)pad_buf
, pad_size
, RAW_file_offset
, 
2051                                                               UIO_SYSSPACE
, IO_NODELOCKED
|IO_UNIT
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
)); 
2052                                                 kfree(pad_buf
, pad_size
); 
2056                                                 RAW_file_offset 
+= pad_size
; 
2058                                         RAW_file_written 
+= sizeof(RAW_header
) + mapsize 
+ pad_size
; 
2061                                         if (copyout(kd_mapptr
, buffer
, mapsize
)) 
2076                 vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)&count
, sizeof(uint32_t), RAW_file_offset
, 
2077                         UIO_SYSSPACE
, IO_NODELOCKED
|IO_UNIT
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
)); 
2078                 RAW_file_offset 
+= sizeof(uint32_t); 
2079                 RAW_file_written 
+= sizeof(uint32_t); 
2082         if ((kd_ctrl_page
.kdebug_flags 
& KDBG_MAPINIT
) && kd_mapsize 
&& kd_mapptr
) 
2084                 kmem_free(kernel_map
, (vm_offset_t
)kd_mapptr
, kd_mapsize
); 
2085                 kd_ctrl_page
.kdebug_flags 
&= ~KDBG_MAPINIT
; 
2087                 kd_mapptr 
= (kd_threadmap 
*) 0; 
2095 kdbg_set_nkdbufs(unsigned int value
) 
2098          * We allow a maximum buffer size of 50% of either ram or max mapped address, whichever is smaller 
2099          * 'value' is the desired number of trace entries 
2101         unsigned int max_entries 
= (sane_size
/2) / sizeof(kd_buf
); 
2103         if (value 
<= max_entries
) 
2106                 return (max_entries
); 
2111 kdbg_enable_bg_trace(void) 
2115         if (kdlog_bg_trace 
== TRUE 
&& kdlog_bg_trace_running 
== FALSE 
&& n_storage_buffers 
== 0) { 
2116                 nkdbufs 
= bg_nkdbufs
; 
2117                 ret 
= kdbg_reinit(FALSE
); 
2119                         kdbg_set_tracing_enabled(TRUE
, KDEBUG_ENABLE_TRACE
); 
2120                         kdlog_bg_trace_running 
= TRUE
; 
2127 kdbg_disable_bg_trace(void) 
2129         if (kdlog_bg_trace_running 
== TRUE
) { 
2130                 kdlog_bg_trace_running 
= FALSE
; 
2138  * This function is provided for the CHUD toolkit only. 
2140  *        zero disables kdebug_chudhook function call 
2141  *        non-zero enables kdebug_chudhook function call 
2143  *        address of the enabled kdebug_chudhook function 
2147 kdbg_control_chud(int val
, void *fn
) 
2152                 /* enable chudhook */ 
2153                 kdebug_chudhook 
= fn
; 
2154                 kdbg_set_flags(SLOW_CHUD
, KDEBUG_ENABLE_CHUD
, TRUE
); 
2157                 /* disable chudhook */ 
2158                 kdbg_set_flags(SLOW_CHUD
, KDEBUG_ENABLE_CHUD
, FALSE
); 
2159                 kdebug_chudhook 
= 0; 
2165 kdbg_control(int *name
, u_int namelen
, user_addr_t where
, size_t *sizep
) 
2168         size_t size 
= *sizep
; 
2169         unsigned int value 
= 0; 
2171         kbufinfo_t kd_bufinfo
; 
2175         if (name
[0] == KERN_KDGETENTROPY 
|| 
2176                 name
[0] == KERN_KDWRITETR 
|| 
2177                 name
[0] == KERN_KDWRITEMAP 
|| 
2178                 name
[0] == KERN_KDEFLAGS 
|| 
2179                 name
[0] == KERN_KDDFLAGS 
|| 
2180                 name
[0] == KERN_KDENABLE 
|| 
2181                 name
[0] == KERN_KDENABLE_BG_TRACE 
|| 
2182                 name
[0] == KERN_KDSETBUF
) { 
2191         if ( !(kd_ctrl_page
.kdebug_flags 
& KDBG_LOCKINIT
)) 
2194         lck_mtx_lock(kd_trace_mtx_sysctl
); 
2199                          * Does not alter the global_state_pid 
2200                          * This is a passive request. 
2202                         if (size 
< sizeof(kd_bufinfo
.nkdbufs
)) { 
2204                                  * There is not enough room to return even 
2205                                  * the first element of the info structure. 
2210                         kd_bufinfo
.nkdbufs 
= nkdbufs
; 
2211                         kd_bufinfo
.nkdthreads 
= kd_mapcount
; 
2213                         if ( (kd_ctrl_page
.kdebug_slowcheck 
& SLOW_NOLOG
) ) 
2214                                 kd_bufinfo
.nolog 
= 1; 
2216                                 kd_bufinfo
.nolog 
= 0; 
2218                         kd_bufinfo
.flags 
= kd_ctrl_page
.kdebug_flags
; 
2219 #if defined(__LP64__) 
2220                         kd_bufinfo
.flags 
|= KDBG_LP64
; 
2222                         kd_bufinfo
.bufid 
= global_state_pid
; 
2224                         if (size 
>= sizeof(kd_bufinfo
)) { 
2226                                  * Provide all the info we have 
2228                                 if (copyout(&kd_bufinfo
, where
, sizeof(kd_bufinfo
))) 
2232                                  * For backwards compatibility, only provide 
2233                                  * as much info as there is room for. 
2235                                 if (copyout(&kd_bufinfo
, where
, size
)) 
2240                 case KERN_KDGETENTROPY
: { 
2241                         /* Obsolescent - just fake with a random buffer */ 
2242                         char    *buffer 
= (char *) kalloc(size
); 
2243                         read_frandom((void *) buffer
, size
); 
2244                         ret 
= copyout(buffer
, where
, size
); 
2245                         kfree(buffer
, size
); 
2249                 case KERN_KDENABLE_BG_TRACE
: 
2250                         bg_nkdbufs 
= kdbg_set_nkdbufs(value
); 
2251                         kdlog_bg_trace 
= TRUE
; 
2252                         ret 
= kdbg_enable_bg_trace(); 
2255                 case KERN_KDDISABLE_BG_TRACE
: 
2256                         kdlog_bg_trace 
= FALSE
; 
2257                         kdbg_disable_bg_trace(); 
2261         if ((curproc 
= current_proc()) != NULL
) 
2262                 curpid 
= curproc
->p_pid
; 
2267         if (global_state_pid 
== -1) 
2268                 global_state_pid 
= curpid
; 
2269         else if (global_state_pid 
!= curpid
) { 
2270                 if ((p 
= proc_find(global_state_pid
)) == NULL
) { 
2272                          * The global pid no longer exists 
2274                         global_state_pid 
= curpid
; 
2277                          * The global pid exists, deny this request 
2288                         kdbg_disable_bg_trace(); 
2290                         value 
&= KDBG_USERFLAGS
; 
2291                         kd_ctrl_page
.kdebug_flags 
|= value
; 
2294                         kdbg_disable_bg_trace(); 
2296                         value 
&= KDBG_USERFLAGS
; 
2297                         kd_ctrl_page
.kdebug_flags 
&= ~value
; 
2301                          * Enable tracing mechanism.  Two types: 
2302                          * KDEBUG_TRACE is the standard one, 
2303                          * and KDEBUG_PPT which is a carefully 
2304                          * chosen subset to avoid performance impact. 
2308                                  * enable only if buffer is initialized 
2310                                 if (!(kd_ctrl_page
.kdebug_flags 
& KDBG_BUFINIT
) ||  
2311                                     !(value 
== KDEBUG_ENABLE_TRACE 
|| value 
== KDEBUG_ENABLE_PPT
)) { 
2317                                 kdbg_set_tracing_enabled(TRUE
, value
); 
2321                                 kdbg_set_tracing_enabled(FALSE
, 0); 
2325                         kdbg_disable_bg_trace(); 
2327                         nkdbufs 
= kdbg_set_nkdbufs(value
); 
2330                         kdbg_disable_bg_trace(); 
2332                         ret 
= kdbg_reinit(FALSE
); 
2336                         ret 
= kdbg_enable_bg_trace(); 
2339                         if(size 
< sizeof(kd_regtype
)) { 
2343                         if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) { 
2347                         kdbg_disable_bg_trace(); 
2349                         ret 
= kdbg_setreg(&kd_Reg
); 
2352                         if (size 
< sizeof(kd_regtype
)) { 
2356                         ret 
= kdbg_getreg(&kd_Reg
); 
2357                         if (copyout(&kd_Reg
, where
, sizeof(kd_regtype
))) { 
2360                         kdbg_disable_bg_trace(); 
2364                         ret 
= kdbg_read(where
, sizep
, NULL
, NULL
); 
2366                 case KERN_KDWRITETR
: 
2367                 case KERN_KDWRITEMAP
: 
2369                         struct  vfs_context context
; 
2370                         struct  fileproc 
*fp
; 
2375                         kdbg_disable_bg_trace(); 
2377                         if (name
[0] == KERN_KDWRITETR
) { 
2379                                 int wait_result 
= THREAD_AWAKENED
; 
2384                                         ns 
= ((u_int64_t
)*sizep
) * (u_int64_t
)(1000 * 1000); 
2385                                         nanoseconds_to_absolutetime(ns
,  &abstime 
); 
2386                                         clock_absolutetime_interval_to_deadline( abstime
, &abstime 
); 
2390                                 s 
= ml_set_interrupts_enabled(FALSE
); 
2391                                 lck_spin_lock(kdw_spin_lock
); 
2393                                 while (wait_result 
== THREAD_AWAKENED 
&& kd_ctrl_page
.kds_inuse_count 
< n_storage_threshold
) { 
2398                                                 wait_result 
= lck_spin_sleep_deadline(kdw_spin_lock
, 0, &kds_waiter
, THREAD_ABORTSAFE
, abstime
); 
2400                                                 wait_result 
= lck_spin_sleep(kdw_spin_lock
, 0, &kds_waiter
, THREAD_ABORTSAFE
); 
2404                                 lck_spin_unlock(kdw_spin_lock
); 
2405                                 ml_set_interrupts_enabled(s
); 
2411                         if ( (ret 
= fp_lookup(p
, fd
, &fp
, 1)) ) { 
2415                         context
.vc_thread 
= current_thread(); 
2416                         context
.vc_ucred 
= fp
->f_fglob
->fg_cred
; 
2418                         if (FILEGLOB_DTYPE(fp
->f_fglob
) != DTYPE_VNODE
) { 
2419                                 fp_drop(p
, fd
, fp
, 1); 
2425                         vp 
= (struct vnode 
*)fp
->f_fglob
->fg_data
; 
2428                         if ((ret 
= vnode_getwithref(vp
)) == 0) { 
2429                                 RAW_file_offset 
= fp
->f_fglob
->fg_offset
; 
2430                                 if (name
[0] == KERN_KDWRITETR
) { 
2431                                         number 
= nkdbufs 
* sizeof(kd_buf
); 
2433                                         KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO
, 3)) | DBG_FUNC_START
, 0, 0, 0, 0, 0); 
2434                                         ret 
= kdbg_read(0, &number
, vp
, &context
); 
2435                                         KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO
, 3)) | DBG_FUNC_END
, number
, 0, 0, 0, 0); 
2439                                         number 
= kd_mapcount 
* sizeof(kd_threadmap
); 
2440                                         kdbg_readthrmap(0, &number
, vp
, &context
); 
2442                                 fp
->f_fglob
->fg_offset 
= RAW_file_offset
; 
2445                         fp_drop(p
, fd
, fp
, 0); 
2449                 case KERN_KDBUFWAIT
: 
2451                         /* WRITETR lite -- just block until there's data */ 
2453                         int wait_result 
= THREAD_AWAKENED
; 
2458                         kdbg_disable_bg_trace(); 
2462                                 ns 
= ((u_int64_t
)*sizep
) * (u_int64_t
)(1000 * 1000); 
2463                                 nanoseconds_to_absolutetime(ns
,  &abstime 
); 
2464                                 clock_absolutetime_interval_to_deadline( abstime
, &abstime 
); 
2468                         s 
= ml_set_interrupts_enabled(FALSE
); 
2470                                 panic("trying to wait with interrupts off"); 
2471                         lck_spin_lock(kdw_spin_lock
); 
2473                         /* drop the mutex so don't exclude others from 
2476                         lck_mtx_unlock(kd_trace_mtx_sysctl
); 
2478                         while (wait_result 
== THREAD_AWAKENED 
&& 
2479                                 kd_ctrl_page
.kds_inuse_count 
< n_storage_threshold
) { 
2484                                         wait_result 
= lck_spin_sleep_deadline(kdw_spin_lock
, 0, &kds_waiter
, THREAD_ABORTSAFE
, abstime
); 
2486                                         wait_result 
= lck_spin_sleep(kdw_spin_lock
, 0, &kds_waiter
, THREAD_ABORTSAFE
); 
2491                         /* check the count under the spinlock */ 
2492                         number 
= (kd_ctrl_page
.kds_inuse_count 
>= n_storage_threshold
); 
2494                         lck_spin_unlock(kdw_spin_lock
); 
2495                         ml_set_interrupts_enabled(s
); 
2497                         /* pick the mutex back up again */ 
2498                         lck_mtx_lock(kd_trace_mtx_sysctl
); 
2500                         /* write out whether we've exceeded the threshold */ 
2505                         if (size 
< sizeof(kd_regtype
)) { 
2509                         if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) { 
2513                         kdbg_disable_bg_trace(); 
2515                         ret 
= kdbg_setpid(&kd_Reg
); 
2518                         if (size 
< sizeof(kd_regtype
)) { 
2522                         if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) { 
2526                         kdbg_disable_bg_trace(); 
2528                         ret 
= kdbg_setpidex(&kd_Reg
); 
2531                         ret 
= kdbg_readcpumap(where
, sizep
); 
2534                         ret 
= kdbg_readthrmap(where
, sizep
, NULL
, NULL
); 
2536                 case KERN_KDREADCURTHRMAP
: 
2537                         ret 
= kdbg_readcurthrmap(where
, sizep
); 
2539                 case KERN_KDSETRTCDEC
: 
2540                         if (size 
< sizeof(kd_regtype
)) { 
2544                         if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) { 
2548                         kdbg_disable_bg_trace(); 
2550                         ret 
= kdbg_setrtcdec(&kd_Reg
); 
2552                 case KERN_KDSET_TYPEFILTER
: 
2553                         kdbg_disable_bg_trace(); 
2555                         if ((kd_ctrl_page
.kdebug_flags 
& KDBG_TYPEFILTER_CHECK
) == 0){ 
2556                                 if ((ret 
= kdbg_enable_typefilter())) 
2560                         if (size 
!= KDBG_TYPEFILTER_BITMAP_SIZE
) { 
2565                         if (copyin(where
, type_filter_bitmap
, KDBG_TYPEFILTER_BITMAP_SIZE
)) { 
2569                         kdbg_iop_list_callback(kd_ctrl_page
.kdebug_iops
, KD_CALLBACK_TYPEFILTER_CHANGED
, type_filter_bitmap
); 
2575         lck_mtx_unlock(kd_trace_mtx_sysctl
); 
2582  * This code can run for the most part concurrently with kernel_debug_internal()... 
2583  * 'release_storage_unit' will take the kds_spin_lock which may cause us to briefly 
2584  * synchronize with the recording side of this puzzle... otherwise, we are able to 
2585  * move through the lists w/o use of any locks 
2588 kdbg_read(user_addr_t buffer
, size_t *number
, vnode_t vp
, vfs_context_t ctx
) 
2591         unsigned int cpu
, min_cpu
; 
2592         uint64_t  mintime
, t
, barrier 
= 0; 
2598         struct kd_storage 
*kdsp_actual
; 
2599         struct kd_bufinfo 
*kdbp
; 
2600         struct kd_bufinfo 
*min_kdbp
; 
2601         uint32_t tempbuf_count
; 
2602         uint32_t tempbuf_number
; 
2603         uint32_t old_kdebug_flags
; 
2604         uint32_t old_kdebug_slowcheck
; 
2605         boolean_t lostevents 
= FALSE
; 
2606         boolean_t out_of_events 
= FALSE
; 
2608         count 
= *number
/sizeof(kd_buf
); 
2611         if (count 
== 0 || !(kd_ctrl_page
.kdebug_flags 
& KDBG_BUFINIT
) || kdcopybuf 
== 0) 
2614         memset(&lostevent
, 0, sizeof(lostevent
)); 
2615         lostevent
.debugid 
= TRACEDBG_CODE(DBG_TRACE_INFO
, 2); 
2617         /* Capture timestamp. Only sort events that have occured before the timestamp. 
2618          * Since the iop is being flushed here, its possible that events occur on the AP 
2619          * while running live tracing. If we are disabled, no new events should  
2623         if (kd_ctrl_page
.enabled
) 
2625                 // timestamp is non-zero value 
2626                 barrier 
= mach_absolute_time() & KDBG_TIMESTAMP_MASK
; 
2629         // Request each IOP to provide us with up to date entries before merging buffers together. 
2630         kdbg_iop_list_callback(kd_ctrl_page
.kdebug_iops
, KD_CALLBACK_SYNC_FLUSH
, NULL
); 
2633          * because we hold kd_trace_mtx_sysctl, no other control threads can  
2634          * be playing with kdebug_flags... the code that cuts new events could 
2635          * be running, but it grabs kds_spin_lock if it needs to acquire a new 
2636          * storage chunk which is where it examines kdebug_flags... it its adding 
2637          * to the same chunk we're reading from, no problem...  
2640         disable_wrap(&old_kdebug_slowcheck
, &old_kdebug_flags
); 
2642         if (count 
> nkdbufs
) 
2645         if ((tempbuf_count 
= count
) > KDCOPYBUF_COUNT
) 
2646                 tempbuf_count 
= KDCOPYBUF_COUNT
; 
2649                 tempbuf 
= kdcopybuf
; 
2653                 while (tempbuf_count
) { 
2654                         mintime 
= 0xffffffffffffffffULL
; 
2659                         for (cpu 
= 0, kdbp 
= &kdbip
[0]; cpu 
< kd_ctrl_page
.kdebug_cpus
; cpu
++, kdbp
++) { 
2661                                 // Find one with raw data 
2662                                 if ((kdsp 
= kdbp
->kd_list_head
).raw 
== KDS_PTR_NULL
) 
2664                                 /* Debugging aid: maintain a copy of the "kdsp" 
2667                                 volatile union kds_ptr kdsp_shadow
; 
2671                                 // Get from cpu data to buffer header to buffer 
2672                                 kdsp_actual 
= POINTER_FROM_KDS_PTR(kdsp
); 
2674                                 volatile struct kd_storage 
*kdsp_actual_shadow
; 
2676                                 kdsp_actual_shadow 
= kdsp_actual
; 
2678                                 // See if there are actual data left in this buffer 
2679                                 rcursor 
= kdsp_actual
->kds_readlast
; 
2681                                 if (rcursor 
== kdsp_actual
->kds_bufindx
) 
2684                                 t 
= kdbg_get_timestamp(&kdsp_actual
->kds_records
[rcursor
]); 
2686                                 if ((t 
> barrier
) && (barrier 
> 0)) { 
2688                                          * Need to wait to flush iop again before we  
2689                                          * sort any more data from the buffers 
2691                                         out_of_events 
= TRUE
; 
2694                                 if (t 
< kdsp_actual
->kds_timestamp
) { 
2696                                          * indicates we've not yet completed filling 
2698                                          * this should only occur when we're looking 
2699                                          * at the buf that the record head is utilizing 
2700                                          * we'll pick these events up on the next 
2702                                          * we bail at this point so that we don't 
2703                                          * get an out-of-order timestream by continuing 
2704                                          * to read events from the other CPUs' timestream(s) 
2706                                         out_of_events 
= TRUE
; 
2715                         if (min_kdbp 
== NULL 
|| out_of_events 
== TRUE
) { 
2717                                  * all buffers ran empty 
2719                                 out_of_events 
= TRUE
; 
2724                         kdsp 
= min_kdbp
->kd_list_head
; 
2725                         kdsp_actual 
= POINTER_FROM_KDS_PTR(kdsp
); 
2727                         if (kdsp_actual
->kds_lostevents 
== TRUE
) { 
2728                                 kdbg_set_timestamp_and_cpu(&lostevent
, kdsp_actual
->kds_records
[kdsp_actual
->kds_readlast
].timestamp
, min_cpu
); 
2729                                 *tempbuf 
= lostevent
; 
2731                                 kdsp_actual
->kds_lostevents 
= FALSE
; 
2738                         *tempbuf 
= kdsp_actual
->kds_records
[kdsp_actual
->kds_readlast
++]; 
2740                         if (kdsp_actual
->kds_readlast 
== EVENTS_PER_STORAGE_UNIT
) 
2741                                 release_storage_unit(min_cpu
, kdsp
.raw
); 
2744                          * Watch for out of order timestamps 
2746                         if (mintime 
< min_kdbp
->kd_prev_timebase
) { 
2748                                  * if so, use the previous timestamp + 1 cycle 
2750                                 min_kdbp
->kd_prev_timebase
++; 
2751                                 kdbg_set_timestamp_and_cpu(tempbuf
, min_kdbp
->kd_prev_timebase
, kdbg_get_cpu(tempbuf
)); 
2753                                 min_kdbp
->kd_prev_timebase 
= mintime
; 
2759                         if ((RAW_file_written 
+= sizeof(kd_buf
)) >= RAW_FLUSH_SIZE
) 
2762                 if (tempbuf_number
) { 
2765                                 error 
= vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)kdcopybuf
, tempbuf_number 
* sizeof(kd_buf
), RAW_file_offset
, 
2766                                                 UIO_SYSSPACE
, IO_NODELOCKED
|IO_UNIT
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
)); 
2768                                 RAW_file_offset 
+= (tempbuf_number 
* sizeof(kd_buf
)); 
2770                                 if (RAW_file_written 
>= RAW_FLUSH_SIZE
) { 
2771                                         cluster_push(vp
, 0); 
2773                                         RAW_file_written 
= 0; 
2776                                 error 
= copyout(kdcopybuf
, buffer
, tempbuf_number 
* sizeof(kd_buf
)); 
2777                                 buffer 
+= (tempbuf_number 
* sizeof(kd_buf
)); 
2784                         count   
-= tempbuf_number
; 
2785                         *number 
+= tempbuf_number
; 
2787                 if (out_of_events 
== TRUE
) 
2789                         * all trace buffers are empty 
2793                 if ((tempbuf_count 
= count
) > KDCOPYBUF_COUNT
) 
2794                         tempbuf_count 
= KDCOPYBUF_COUNT
; 
2796         if ( !(old_kdebug_flags 
& KDBG_NOWRAP
)) { 
2797                 enable_wrap(old_kdebug_slowcheck
, lostevents
); 
2803 unsigned char *getProcName(struct proc 
*proc
); 
2804 unsigned char *getProcName(struct proc 
*proc
) { 
2806         return (unsigned char *) &proc
->p_comm
; /* Return pointer to the proc name */ 
2810 #define STACKSHOT_SUBSYS_LOCK() lck_mtx_lock(&stackshot_subsys_mutex) 
2811 #define STACKSHOT_SUBSYS_UNLOCK() lck_mtx_unlock(&stackshot_subsys_mutex) 
2812 #if defined(__i386__) || defined (__x86_64__) 
2813 #define TRAP_DEBUGGER __asm__ volatile("int3"); 
2815 #error No TRAP_DEBUGGER definition for this architecture 
2818 #define SANE_TRACEBUF_SIZE (8 * 1024 * 1024) 
2819 #define SANE_BOOTPROFILE_TRACEBUF_SIZE (64 * 1024 * 1024) 
2821 /* Initialize the mutex governing access to the stack snapshot subsystem */ 
2822 __private_extern__ 
void 
2823 stackshot_lock_init( void ) 
2825         stackshot_subsys_lck_grp_attr 
= lck_grp_attr_alloc_init(); 
2827         stackshot_subsys_lck_grp 
= lck_grp_alloc_init("stackshot_subsys_lock", stackshot_subsys_lck_grp_attr
); 
2829         stackshot_subsys_lck_attr 
= lck_attr_alloc_init(); 
2831         lck_mtx_init(&stackshot_subsys_mutex
, stackshot_subsys_lck_grp
, stackshot_subsys_lck_attr
); 
2835  * stack_snapshot:   Obtains a coherent set of stack traces for all threads 
2836  *                   on the system, tracing both kernel and user stacks 
2837  *                   where available. Uses machine specific trace routines 
2838  *                   for ppc, ppc64 and x86. 
2839  * Inputs:           uap->pid - process id of process to be traced, or -1 
2840  *                   for the entire system 
2841  *                   uap->tracebuf - address of the user space destination 
2843  *                   uap->tracebuf_size - size of the user space trace buffer 
2844  *                   uap->options - various options, including the maximum 
2845  *                   number of frames to trace. 
2846  * Outputs:          EPERM if the caller is not privileged 
2847  *                   EINVAL if the supplied trace buffer isn't sanely sized 
2848  *                   ENOMEM if we don't have enough memory to satisfy the 
2850  *                   ENOENT if the target pid isn't found 
2851  *                   ENOSPC if the supplied buffer is insufficient 
2852  *                   *retval contains the number of bytes traced, if successful 
2853  *                   and -1 otherwise. If the request failed due to 
2854  *                   tracebuffer exhaustion, we copyout as much as possible. 
2857 stack_snapshot(struct proc 
*p
, register struct stack_snapshot_args 
*uap
, int32_t *retval
) { 
2860         if ((error 
= suser(kauth_cred_get(), &p
->p_acflag
))) 
2863         return stack_snapshot2(uap
->pid
, uap
->tracebuf
, uap
->tracebuf_size
, 
2864             uap
->flags
, uap
->dispatch_offset
, retval
); 
2868 stack_snapshot_from_kernel(pid_t pid
, void *buf
, uint32_t size
, uint32_t flags
, unsigned *bytesTraced
) 
2873         if ((buf 
== NULL
) || (size 
<= 0) || (bytesTraced 
== NULL
)) { 
2877         /* cap in individual stackshot to SANE_TRACEBUF_SIZE */ 
2878         if (size 
> SANE_TRACEBUF_SIZE
) { 
2879                 size 
= SANE_TRACEBUF_SIZE
; 
2882 /* Serialize tracing */  
2883         STACKSHOT_SUBSYS_LOCK(); 
2884         istate 
= ml_set_interrupts_enabled(FALSE
); 
2887 /* Preload trace parameters*/    
2888         kdp_snapshot_preflight(pid
, buf
, size
, flags
, 0); 
2890 /* Trap to the debugger to obtain a coherent stack snapshot; this populates 
2895         ml_set_interrupts_enabled(istate
); 
2897         *bytesTraced 
= kdp_stack_snapshot_bytes_traced(); 
2899         error 
= kdp_stack_snapshot_geterror(); 
2901         STACKSHOT_SUBSYS_UNLOCK(); 
2908 stack_snapshot2(pid_t pid
, user_addr_t tracebuf
, uint32_t tracebuf_size
, uint32_t flags
, uint32_t dispatch_offset
, int32_t *retval
) 
2912         unsigned bytesTraced 
= 0; 
2914 #if CONFIG_TELEMETRY 
2915         if (flags 
& STACKSHOT_GLOBAL_MICROSTACKSHOT_ENABLE
) { 
2916                 telemetry_global_ctl(1); 
2919         } else if (flags 
& STACKSHOT_GLOBAL_MICROSTACKSHOT_DISABLE
) { 
2920                 telemetry_global_ctl(0); 
2925         if (flags 
& STACKSHOT_WINDOWED_MICROSTACKSHOTS_ENABLE
) { 
2926                 error 
= telemetry_enable_window(); 
2928                 if (error 
!= KERN_SUCCESS
) { 
2929                         /* We are probably out of memory */ 
2936         } else if (flags 
& STACKSHOT_WINDOWED_MICROSTACKSHOTS_DISABLE
) { 
2937                 telemetry_disable_window(); 
2944 /* Serialize tracing */  
2945         STACKSHOT_SUBSYS_LOCK(); 
2947         if (tracebuf_size 
<= 0) { 
2952 #if CONFIG_TELEMETRY 
2953         if (flags 
& STACKSHOT_GET_MICROSTACKSHOT
) { 
2955                 if (tracebuf_size 
> SANE_TRACEBUF_SIZE
) { 
2960                 bytesTraced 
= tracebuf_size
; 
2961                 error 
= telemetry_gather(tracebuf
, &bytesTraced
,  
2962                                          (flags 
& STACKSHOT_SET_MICROSTACKSHOT_MARK
) ? TRUE 
: FALSE
); 
2963                 if (error 
== KERN_NO_SPACE
) { 
2967                 *retval 
= (int)bytesTraced
; 
2971         if (flags 
& STACKSHOT_GET_WINDOWED_MICROSTACKSHOTS
) { 
2973                 if (tracebuf_size 
> SANE_TRACEBUF_SIZE
) { 
2978                 bytesTraced 
= tracebuf_size
; 
2979                 error 
= telemetry_gather_windowed(tracebuf
, &bytesTraced
); 
2980                 if (error 
== KERN_NO_SPACE
) { 
2984                 *retval 
= (int)bytesTraced
; 
2988         if (flags 
& STACKSHOT_GET_BOOT_PROFILE
) { 
2990                 if (tracebuf_size 
> SANE_BOOTPROFILE_TRACEBUF_SIZE
) { 
2995                 bytesTraced 
= tracebuf_size
; 
2996                 error 
= bootprofile_gather(tracebuf
, &bytesTraced
); 
2997                 if (error 
== KERN_NO_SPACE
) { 
3001                 *retval 
= (int)bytesTraced
; 
3006         if (tracebuf_size 
> SANE_TRACEBUF_SIZE
) { 
3011         assert(stackshot_snapbuf 
== NULL
); 
3012         if (kmem_alloc_kobject(kernel_map
, (vm_offset_t 
*)&stackshot_snapbuf
, tracebuf_size
) != KERN_SUCCESS
) { 
3017         if (panic_active()) { 
3022         istate 
= ml_set_interrupts_enabled(FALSE
); 
3023 /* Preload trace parameters*/    
3024         kdp_snapshot_preflight(pid
, stackshot_snapbuf
, tracebuf_size
, flags
, dispatch_offset
); 
3026 /* Trap to the debugger to obtain a coherent stack snapshot; this populates 
3032         ml_set_interrupts_enabled(istate
); 
3034         bytesTraced 
= kdp_stack_snapshot_bytes_traced(); 
3036         if (bytesTraced 
> 0) { 
3037                 if ((error 
= copyout(stackshot_snapbuf
, tracebuf
, 
3038                         ((bytesTraced 
< tracebuf_size
) ? 
3039                             bytesTraced 
: tracebuf_size
)))) 
3041                 *retval 
= bytesTraced
; 
3048         error 
= kdp_stack_snapshot_geterror(); 
3056         if (stackshot_snapbuf 
!= NULL
) 
3057                 kmem_free(kernel_map
, (vm_offset_t
) stackshot_snapbuf
, tracebuf_size
); 
3058         stackshot_snapbuf 
= NULL
; 
3059         STACKSHOT_SUBSYS_UNLOCK(); 
3064 start_kern_tracing(unsigned int new_nkdbufs
, boolean_t need_map
) 
3069         nkdbufs 
= kdbg_set_nkdbufs(new_nkdbufs
); 
3072         kernel_debug_string("start_kern_tracing"); 
3074         if (0 == kdbg_reinit(TRUE
)) { 
3076                 if (need_map 
== TRUE
) { 
3077                         uint32_t old1
, old2
; 
3081                         disable_wrap(&old1
, &old2
); 
3084                 /* Hold off interrupts until the early traces are cut */ 
3085                 boolean_t       s 
= ml_set_interrupts_enabled(FALSE
); 
3087                 kdbg_set_tracing_enabled(TRUE
, KDEBUG_ENABLE_TRACE
); 
3090                  * Transfer all very early events from the static buffer 
3091                  * into the real buffers. 
3093                 kernel_debug_early_end(); 
3095                 ml_set_interrupts_enabled(s
); 
3097                 printf("kernel tracing started\n"); 
3099                 printf("error from kdbg_reinit,kernel tracing not started\n"); 
3104 start_kern_tracing_with_typefilter(unsigned int new_nkdbufs
, 
3106                                    unsigned int typefilter
) 
3108         /* startup tracing */ 
3109         start_kern_tracing(new_nkdbufs
, need_map
); 
3111         /* check that tracing was actually enabled */ 
3112         if (!(kdebug_enable 
& KDEBUG_ENABLE_TRACE
)) 
3115         /* setup the typefiltering */ 
3116         if (0 == kdbg_enable_typefilter()) 
3117                 setbit(type_filter_bitmap
, typefilter 
& (CSC_MASK 
>> CSC_OFFSET
)); 
3121 kdbg_dump_trace_to_file(const char *filename
) 
3129         if ( !(kdebug_enable 
& KDEBUG_ENABLE_TRACE
)) 
3132         if (global_state_pid 
!= -1) { 
3133                 if ((proc_find(global_state_pid
)) != NULL
) { 
3135                          * The global pid exists, we're running 
3136                          * due to fs_usage, latency, etc... 
3137                          * don't cut the panic/shutdown trace file 
3138                          * Disable tracing from this point to avoid 
3142                         kd_ctrl_page
.enabled 
= 0; 
3146         KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO
, 0)) | DBG_FUNC_NONE
, 0, 0, 0, 0, 0); 
3149         kd_ctrl_page
.enabled 
= 0; 
3151         ctx 
= vfs_context_kernel(); 
3153         if ((error 
= vnode_open(filename
, (O_CREAT 
| FWRITE 
| O_NOFOLLOW
), 0600, 0, &vp
, ctx
))) 
3156         number 
= kd_mapcount 
* sizeof(kd_threadmap
); 
3157         kdbg_readthrmap(0, &number
, vp
, ctx
); 
3159         number 
= nkdbufs
*sizeof(kd_buf
); 
3160         kdbg_read(0, &number
, vp
, ctx
); 
3162         vnode_close(vp
, FWRITE
, ctx
); 
3164         sync(current_proc(), (void *)NULL
, (int *)NULL
); 
3167 /* Helper function for filling in the BSD name for an address space 
3168  * Defined here because the machine bindings know only Mach threads 
3169  * and nothing about BSD processes. 
3171  * FIXME: need to grab a lock during this? 
3173 void kdbg_get_task_name(char* name_buf
, int len
, task_t task
) 
3177         /* Note: we can't use thread->task (and functions that rely on it) here  
3178          * because it hasn't been initialized yet when this function is called. 
3179          * We use the explicitly-passed task parameter instead. 
3181         proc 
= get_bsdtask_info(task
); 
3182         if (proc 
!= PROC_NULL
) 
3183                 snprintf(name_buf
, len
, "%s/%d", proc
->p_comm
, proc
->p_pid
); 
3185                 snprintf(name_buf
, len
, "%p [!bsd]", task
);