]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kdebug.c
1b89e2d4aa3ad573c6e23a0436161446c60c355f
[apple/xnu.git] / bsd / kern / kdebug.c
1 /*
2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
3 *
4 * @Apple_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
21 */
22
23
24 #include <machine/spl.h>
25
26 #include <sys/errno.h>
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/proc_internal.h>
30 #include <sys/vm.h>
31 #include <sys/sysctl.h>
32 #include <sys/kdebug.h>
33 #include <sys/sysproto.h>
34 #include <sys/bsdtask_info.h>
35 #include <sys/random.h>
36
37 #define HZ 100
38 #include <mach/clock_types.h>
39 #include <mach/mach_types.h>
40 #include <mach/mach_time.h>
41 #include <machine/machine_routines.h>
42
43 #if defined(__i386__) || defined(__x86_64__)
44 #include <i386/rtclock_protos.h>
45 #include <i386/mp.h>
46 #include <i386/machine_routines.h>
47 #endif
48
49 #include <kern/clock.h>
50
51 #include <kern/thread.h>
52 #include <kern/task.h>
53 #include <kern/debug.h>
54 #include <kern/kalloc.h>
55 #include <kern/cpu_data.h>
56 #include <kern/assert.h>
57 #include <kern/telemetry.h>
58 #include <vm/vm_kern.h>
59 #include <sys/lock.h>
60
61 #include <sys/malloc.h>
62 #include <sys/mcache.h>
63 #include <sys/kauth.h>
64
65 #include <sys/vnode.h>
66 #include <sys/vnode_internal.h>
67 #include <sys/fcntl.h>
68 #include <sys/file_internal.h>
69 #include <sys/ubc.h>
70 #include <sys/param.h> /* for isset() */
71
72 #include <mach/mach_host.h> /* for host_info() */
73 #include <libkern/OSAtomic.h>
74
75 #include <machine/pal_routines.h>
76
77 /*
78 * IOP(s)
79 *
80 * https://coreoswiki.apple.com/wiki/pages/U6z3i0q9/Consistent_Logging_Implementers_Guide.html
81 *
82 * IOP(s) are auxiliary cores that want to participate in kdebug event logging.
83 * They are registered dynamically. Each is assigned a cpu_id at registration.
84 *
85 * NOTE: IOP trace events may not use the same clock hardware as "normal"
86 * cpus. There is an effort made to synchronize the IOP timebase with the
87 * AP, but it should be understood that there may be discrepancies.
88 *
89 * Once registered, an IOP is permanent, it cannot be unloaded/unregistered.
90 * The current implementation depends on this for thread safety.
91 *
92 * New registrations occur by allocating an kd_iop struct and assigning
93 * a provisional cpu_id of list_head->cpu_id + 1. Then a CAS to claim the
94 * list_head pointer resolves any races.
95 *
96 * You may safely walk the kd_iops list at any time, without holding locks.
97 *
98 * When allocating buffers, the current kd_iops head is captured. Any operations
99 * that depend on the buffer state (such as flushing IOP traces on reads,
100 * etc.) should use the captured list head. This will allow registrations to
101 * take place while trace is in use.
102 */
103
104 typedef struct kd_iop {
105 kd_callback_t callback;
106 uint32_t cpu_id;
107 uint64_t last_timestamp; /* Prevent timer rollback */
108 struct kd_iop* next;
109 } kd_iop_t;
110
111 static kd_iop_t* kd_iops = NULL;
112
113 /* XXX should have prototypes, but Mach does not provide one */
114 void task_act_iterate_wth_args(task_t, void(*)(thread_t, void *), void *);
115 int cpu_number(void); /* XXX <machine/...> include path broken */
116 void commpage_update_kdebug_enable(void); /* XXX sign */
117
118 /* XXX should probably be static, but it's debugging code... */
119 int kdbg_read(user_addr_t, size_t *, vnode_t, vfs_context_t);
120 void kdbg_control_chud(int, void *);
121 int kdbg_control(int *, u_int, user_addr_t, size_t *);
122 int kdbg_readcpumap(user_addr_t, size_t *);
123 int kdbg_readcurcpumap(user_addr_t, size_t *);
124 int kdbg_readthrmap(user_addr_t, size_t *, vnode_t, vfs_context_t);
125 int kdbg_readcurthrmap(user_addr_t, size_t *);
126 int kdbg_getreg(kd_regtype *);
127 int kdbg_setreg(kd_regtype *);
128 int kdbg_setrtcdec(kd_regtype *);
129 int kdbg_setpidex(kd_regtype *);
130 int kdbg_setpid(kd_regtype *);
131 void kdbg_thrmap_init(void);
132 int kdbg_reinit(boolean_t);
133 int kdbg_bootstrap(boolean_t);
134
135 int kdbg_cpumap_init_internal(kd_iop_t* iops, uint32_t cpu_count, uint8_t** cpumap, uint32_t* cpumap_size);
136 kd_threadmap* kdbg_thrmap_init_internal(unsigned int count, unsigned int *mapsize, unsigned int *mapcount);
137
138 static int kdbg_enable_typefilter(void);
139 static int kdbg_disable_typefilter(void);
140
141 static int create_buffers(boolean_t);
142 static void delete_buffers(void);
143
144 extern void IOSleep(int);
145
146 /* trace enable status */
147 unsigned int kdebug_enable = 0;
148
149 /* A static buffer to record events prior to the start of regular logging */
150 #define KD_EARLY_BUFFER_MAX 64
151 static kd_buf kd_early_buffer[KD_EARLY_BUFFER_MAX];
152 static int kd_early_index = 0;
153 static boolean_t kd_early_overflow = FALSE;
154
155 #define SLOW_NOLOG 0x01
156 #define SLOW_CHECKS 0x02
157 #define SLOW_ENTROPY 0x04 /* Obsolescent */
158 #define SLOW_CHUD 0x08
159
160 #define EVENTS_PER_STORAGE_UNIT 2048
161 #define MIN_STORAGE_UNITS_PER_CPU 4
162
163 #define POINTER_FROM_KDS_PTR(x) (&kd_bufs[x.buffer_index].kdsb_addr[x.offset])
164
165 union kds_ptr {
166 struct {
167 uint32_t buffer_index:21;
168 uint16_t offset:11;
169 };
170 uint32_t raw;
171 };
172
173 struct kd_storage {
174 union kds_ptr kds_next;
175 uint32_t kds_bufindx;
176 uint32_t kds_bufcnt;
177 uint32_t kds_readlast;
178 boolean_t kds_lostevents;
179 uint64_t kds_timestamp;
180
181 kd_buf kds_records[EVENTS_PER_STORAGE_UNIT];
182 };
183
184 #define MAX_BUFFER_SIZE (1024 * 1024 * 128)
185 #define N_STORAGE_UNITS_PER_BUFFER (MAX_BUFFER_SIZE / sizeof(struct kd_storage))
186
187 struct kd_storage_buffers {
188 struct kd_storage *kdsb_addr;
189 uint32_t kdsb_size;
190 };
191
192 #define KDS_PTR_NULL 0xffffffff
193 struct kd_storage_buffers *kd_bufs = NULL;
194 int n_storage_units = 0;
195 int n_storage_buffers = 0;
196 int n_storage_threshold = 0;
197 int kds_waiter = 0;
198
199 #pragma pack(0)
200 struct kd_bufinfo {
201 union kds_ptr kd_list_head;
202 union kds_ptr kd_list_tail;
203 boolean_t kd_lostevents;
204 uint32_t _pad;
205 uint64_t kd_prev_timebase;
206 uint32_t num_bufs;
207 } __attribute__(( aligned(MAX_CPU_CACHE_LINE_SIZE) ));
208
209 struct kd_ctrl_page_t {
210 union kds_ptr kds_free_list;
211 uint32_t enabled :1;
212 uint32_t _pad0 :31;
213 int kds_inuse_count;
214 uint32_t kdebug_flags;
215 uint32_t kdebug_slowcheck;
216 /*
217 * The number of kd_bufinfo structs allocated may not match the current
218 * number of active cpus. We capture the iops list head at initialization
219 * which we could use to calculate the number of cpus we allocated data for,
220 * unless it happens to be null. To avoid that case, we explicitly also
221 * capture a cpu count.
222 */
223 kd_iop_t* kdebug_iops;
224 uint32_t kdebug_cpus;
225 } kd_ctrl_page = { .kds_free_list = {.raw = KDS_PTR_NULL}, .kdebug_slowcheck = SLOW_NOLOG };
226
227 #pragma pack()
228
229 struct kd_bufinfo *kdbip = NULL;
230
231 #define KDCOPYBUF_COUNT 8192
232 #define KDCOPYBUF_SIZE (KDCOPYBUF_COUNT * sizeof(kd_buf))
233 kd_buf *kdcopybuf = NULL;
234
235 boolean_t kdlog_bg_trace = FALSE;
236 boolean_t kdlog_bg_trace_running = FALSE;
237 unsigned int bg_nkdbufs = 0;
238
239 unsigned int nkdbufs = 0;
240 unsigned int kdlog_beg=0;
241 unsigned int kdlog_end=0;
242 unsigned int kdlog_value1=0;
243 unsigned int kdlog_value2=0;
244 unsigned int kdlog_value3=0;
245 unsigned int kdlog_value4=0;
246
247 static lck_spin_t * kdw_spin_lock;
248 static lck_spin_t * kds_spin_lock;
249 static lck_mtx_t * kd_trace_mtx_sysctl;
250 static lck_grp_t * kd_trace_mtx_sysctl_grp;
251 static lck_attr_t * kd_trace_mtx_sysctl_attr;
252 static lck_grp_attr_t *kd_trace_mtx_sysctl_grp_attr;
253
254 static lck_grp_t *stackshot_subsys_lck_grp;
255 static lck_grp_attr_t *stackshot_subsys_lck_grp_attr;
256 static lck_attr_t *stackshot_subsys_lck_attr;
257 static lck_mtx_t stackshot_subsys_mutex;
258
259 void *stackshot_snapbuf = NULL;
260
261 int
262 stack_snapshot2(pid_t pid, user_addr_t tracebuf, uint32_t tracebuf_size, uint32_t flags, uint32_t dispatch_offset, int32_t *retval);
263
264 int
265 stack_snapshot_from_kernel(pid_t pid, void *buf, uint32_t size, uint32_t flags, unsigned *bytesTraced);
266 extern void
267 kdp_snapshot_preflight(int pid, void *tracebuf, uint32_t tracebuf_size, uint32_t flags, uint32_t dispatch_offset);
268
269 extern int
270 kdp_stack_snapshot_geterror(void);
271 extern unsigned int
272 kdp_stack_snapshot_bytes_traced(void);
273
274 kd_threadmap *kd_mapptr = 0;
275 unsigned int kd_mapsize = 0;
276 unsigned int kd_mapcount = 0;
277
278 off_t RAW_file_offset = 0;
279 int RAW_file_written = 0;
280
281 #define RAW_FLUSH_SIZE (2 * 1024 * 1024)
282
283 pid_t global_state_pid = -1; /* Used to control exclusive use of kd_buffer */
284
285 #define DBG_FUNC_MASK 0xfffffffc
286
287 /* TODO: move to kdebug.h */
288 #define CLASS_MASK 0xff000000
289 #define CLASS_OFFSET 24
290 #define SUBCLASS_MASK 0x00ff0000
291 #define SUBCLASS_OFFSET 16
292 #define CSC_MASK 0xffff0000 /* class and subclass mask */
293 #define CSC_OFFSET SUBCLASS_OFFSET
294
295 #define EXTRACT_CLASS(debugid) ( (uint8_t) ( ((debugid) & CLASS_MASK ) >> CLASS_OFFSET ) )
296 #define EXTRACT_SUBCLASS(debugid) ( (uint8_t) ( ((debugid) & SUBCLASS_MASK) >> SUBCLASS_OFFSET ) )
297 #define EXTRACT_CSC(debugid) ( (uint16_t)( ((debugid) & CSC_MASK ) >> CSC_OFFSET ) )
298
299 #define INTERRUPT 0x01050000
300 #define MACH_vmfault 0x01300008
301 #define BSC_SysCall 0x040c0000
302 #define MACH_SysCall 0x010c0000
303 #define DBG_SCALL_MASK 0xffff0000
304
305
306 /* task to string structure */
307 struct tts
308 {
309 task_t task; /* from procs task */
310 pid_t pid; /* from procs p_pid */
311 char task_comm[20]; /* from procs p_comm */
312 };
313
314 typedef struct tts tts_t;
315
316 struct krt
317 {
318 kd_threadmap *map; /* pointer to the map buffer */
319 int count;
320 int maxcount;
321 struct tts *atts;
322 };
323
324 typedef struct krt krt_t;
325
326 /* This is for the CHUD toolkit call */
327 typedef void (*kd_chudhook_fn) (uint32_t debugid, uintptr_t arg1,
328 uintptr_t arg2, uintptr_t arg3,
329 uintptr_t arg4, uintptr_t arg5);
330
331 volatile kd_chudhook_fn kdebug_chudhook = 0; /* pointer to CHUD toolkit function */
332
333 __private_extern__ void stackshot_lock_init( void );
334
335 static uint8_t *type_filter_bitmap;
336
337 /*
338 * This allows kperf to swap out the global state pid when kperf ownership is
339 * passed from one process to another. It checks the old global state pid so
340 * that kperf can't accidentally steal control of trace when a non-kperf trace user has
341 * control of trace.
342 */
343 void
344 kdbg_swap_global_state_pid(pid_t old_pid, pid_t new_pid);
345
346 void
347 kdbg_swap_global_state_pid(pid_t old_pid, pid_t new_pid)
348 {
349 if (!(kd_ctrl_page.kdebug_flags & KDBG_LOCKINIT))
350 return;
351
352 lck_mtx_lock(kd_trace_mtx_sysctl);
353
354 if (old_pid == global_state_pid)
355 global_state_pid = new_pid;
356
357 lck_mtx_unlock(kd_trace_mtx_sysctl);
358 }
359
360 static uint32_t
361 kdbg_cpu_count(boolean_t early_trace)
362 {
363 if (early_trace) {
364 /*
365 * we've started tracing before the IOKit has even
366 * started running... just use the static max value
367 */
368 return max_ncpus;
369 }
370
371 host_basic_info_data_t hinfo;
372 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
373 host_info((host_t)1 /* BSD_HOST */, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
374 assert(hinfo.logical_cpu_max > 0);
375 return hinfo.logical_cpu_max;
376 }
377
378 #if MACH_ASSERT
379 #endif /* MACH_ASSERT */
380
381 static void
382 kdbg_iop_list_callback(kd_iop_t* iop, kd_callback_type type, void* arg)
383 {
384 while (iop) {
385 iop->callback.func(iop->callback.context, type, arg);
386 iop = iop->next;
387 }
388 }
389
390 static void
391 kdbg_set_tracing_enabled(boolean_t enabled, uint32_t trace_type)
392 {
393 int s = ml_set_interrupts_enabled(FALSE);
394 lck_spin_lock(kds_spin_lock);
395
396 if (enabled) {
397 kdebug_enable |= trace_type;
398 kd_ctrl_page.kdebug_slowcheck &= ~SLOW_NOLOG;
399 kd_ctrl_page.enabled = 1;
400 commpage_update_kdebug_enable();
401 } else {
402 kdebug_enable &= ~(KDEBUG_ENABLE_TRACE|KDEBUG_ENABLE_PPT);
403 kd_ctrl_page.kdebug_slowcheck |= SLOW_NOLOG;
404 kd_ctrl_page.enabled = 0;
405 commpage_update_kdebug_enable();
406 }
407 lck_spin_unlock(kds_spin_lock);
408 ml_set_interrupts_enabled(s);
409
410 if (enabled) {
411 kdbg_iop_list_callback(kd_ctrl_page.kdebug_iops, KD_CALLBACK_KDEBUG_ENABLED, NULL);
412 } else {
413 /*
414 * If you do not flush the IOP trace buffers, they can linger
415 * for a considerable period; consider code which disables and
416 * deallocates without a final sync flush.
417 */
418 kdbg_iop_list_callback(kd_ctrl_page.kdebug_iops, KD_CALLBACK_KDEBUG_DISABLED, NULL);
419 kdbg_iop_list_callback(kd_ctrl_page.kdebug_iops, KD_CALLBACK_SYNC_FLUSH, NULL);
420 }
421 }
422
423 static void
424 kdbg_set_flags(int slowflag, int enableflag, boolean_t enabled)
425 {
426 int s = ml_set_interrupts_enabled(FALSE);
427 lck_spin_lock(kds_spin_lock);
428
429 if (enabled) {
430 kd_ctrl_page.kdebug_slowcheck |= slowflag;
431 kdebug_enable |= enableflag;
432 } else {
433 kd_ctrl_page.kdebug_slowcheck &= ~slowflag;
434 kdebug_enable &= ~enableflag;
435 }
436
437 lck_spin_unlock(kds_spin_lock);
438 ml_set_interrupts_enabled(s);
439 }
440
441 void
442 disable_wrap(uint32_t *old_slowcheck, uint32_t *old_flags)
443 {
444 int s = ml_set_interrupts_enabled(FALSE);
445 lck_spin_lock(kds_spin_lock);
446
447 *old_slowcheck = kd_ctrl_page.kdebug_slowcheck;
448 *old_flags = kd_ctrl_page.kdebug_flags;
449
450 kd_ctrl_page.kdebug_flags &= ~KDBG_WRAPPED;
451 kd_ctrl_page.kdebug_flags |= KDBG_NOWRAP;
452
453 lck_spin_unlock(kds_spin_lock);
454 ml_set_interrupts_enabled(s);
455 }
456
457 void
458 enable_wrap(uint32_t old_slowcheck, boolean_t lostevents)
459 {
460 int s = ml_set_interrupts_enabled(FALSE);
461 lck_spin_lock(kds_spin_lock);
462
463 kd_ctrl_page.kdebug_flags &= ~KDBG_NOWRAP;
464
465 if ( !(old_slowcheck & SLOW_NOLOG))
466 kd_ctrl_page.kdebug_slowcheck &= ~SLOW_NOLOG;
467
468 if (lostevents == TRUE)
469 kd_ctrl_page.kdebug_flags |= KDBG_WRAPPED;
470
471 lck_spin_unlock(kds_spin_lock);
472 ml_set_interrupts_enabled(s);
473 }
474
475 static int
476 create_buffers(boolean_t early_trace)
477 {
478 int i;
479 int p_buffer_size;
480 int f_buffer_size;
481 int f_buffers;
482 int error = 0;
483
484 /*
485 * For the duration of this allocation, trace code will only reference
486 * kdebug_iops. Any iops registered after this enabling will not be
487 * messaged until the buffers are reallocated.
488 *
489 * TLDR; Must read kd_iops once and only once!
490 */
491 kd_ctrl_page.kdebug_iops = kd_iops;
492
493
494 /*
495 * If the list is valid, it is sorted, newest -> oldest. Each iop entry
496 * has a cpu_id of "the older entry + 1", so the highest cpu_id will
497 * be the list head + 1.
498 */
499
500 kd_ctrl_page.kdebug_cpus = kd_ctrl_page.kdebug_iops ? kd_ctrl_page.kdebug_iops->cpu_id + 1 : kdbg_cpu_count(early_trace);
501
502 if (kmem_alloc(kernel_map, (vm_offset_t *)&kdbip, sizeof(struct kd_bufinfo) * kd_ctrl_page.kdebug_cpus) != KERN_SUCCESS) {
503 error = ENOSPC;
504 goto out;
505 }
506
507 if (nkdbufs < (kd_ctrl_page.kdebug_cpus * EVENTS_PER_STORAGE_UNIT * MIN_STORAGE_UNITS_PER_CPU))
508 n_storage_units = kd_ctrl_page.kdebug_cpus * MIN_STORAGE_UNITS_PER_CPU;
509 else
510 n_storage_units = nkdbufs / EVENTS_PER_STORAGE_UNIT;
511
512 nkdbufs = n_storage_units * EVENTS_PER_STORAGE_UNIT;
513
514 f_buffers = n_storage_units / N_STORAGE_UNITS_PER_BUFFER;
515 n_storage_buffers = f_buffers;
516
517 f_buffer_size = N_STORAGE_UNITS_PER_BUFFER * sizeof(struct kd_storage);
518 p_buffer_size = (n_storage_units % N_STORAGE_UNITS_PER_BUFFER) * sizeof(struct kd_storage);
519
520 if (p_buffer_size)
521 n_storage_buffers++;
522
523 kd_bufs = NULL;
524
525 if (kdcopybuf == 0) {
526 if (kmem_alloc(kernel_map, (vm_offset_t *)&kdcopybuf, (vm_size_t)KDCOPYBUF_SIZE) != KERN_SUCCESS) {
527 error = ENOSPC;
528 goto out;
529 }
530 }
531 if (kmem_alloc(kernel_map, (vm_offset_t *)&kd_bufs, (vm_size_t)(n_storage_buffers * sizeof(struct kd_storage_buffers))) != KERN_SUCCESS) {
532 error = ENOSPC;
533 goto out;
534 }
535 bzero(kd_bufs, n_storage_buffers * sizeof(struct kd_storage_buffers));
536
537 for (i = 0; i < f_buffers; i++) {
538 if (kmem_alloc(kernel_map, (vm_offset_t *)&kd_bufs[i].kdsb_addr, (vm_size_t)f_buffer_size) != KERN_SUCCESS) {
539 error = ENOSPC;
540 goto out;
541 }
542 bzero(kd_bufs[i].kdsb_addr, f_buffer_size);
543
544 kd_bufs[i].kdsb_size = f_buffer_size;
545 }
546 if (p_buffer_size) {
547 if (kmem_alloc(kernel_map, (vm_offset_t *)&kd_bufs[i].kdsb_addr, (vm_size_t)p_buffer_size) != KERN_SUCCESS) {
548 error = ENOSPC;
549 goto out;
550 }
551 bzero(kd_bufs[i].kdsb_addr, p_buffer_size);
552
553 kd_bufs[i].kdsb_size = p_buffer_size;
554 }
555 n_storage_units = 0;
556
557 for (i = 0; i < n_storage_buffers; i++) {
558 struct kd_storage *kds;
559 int n_elements;
560 int n;
561
562 n_elements = kd_bufs[i].kdsb_size / sizeof(struct kd_storage);
563 kds = kd_bufs[i].kdsb_addr;
564
565 for (n = 0; n < n_elements; n++) {
566 kds[n].kds_next.buffer_index = kd_ctrl_page.kds_free_list.buffer_index;
567 kds[n].kds_next.offset = kd_ctrl_page.kds_free_list.offset;
568
569 kd_ctrl_page.kds_free_list.buffer_index = i;
570 kd_ctrl_page.kds_free_list.offset = n;
571 }
572 n_storage_units += n_elements;
573 }
574
575 bzero((char *)kdbip, sizeof(struct kd_bufinfo) * kd_ctrl_page.kdebug_cpus);
576
577 for (i = 0; i < (int)kd_ctrl_page.kdebug_cpus; i++) {
578 kdbip[i].kd_list_head.raw = KDS_PTR_NULL;
579 kdbip[i].kd_list_tail.raw = KDS_PTR_NULL;
580 kdbip[i].kd_lostevents = FALSE;
581 kdbip[i].num_bufs = 0;
582 }
583
584 kd_ctrl_page.kdebug_flags |= KDBG_BUFINIT;
585
586 kd_ctrl_page.kds_inuse_count = 0;
587 n_storage_threshold = n_storage_units / 2;
588 out:
589 if (error)
590 delete_buffers();
591
592 return(error);
593 }
594
595 static void
596 delete_buffers(void)
597 {
598 int i;
599
600 if (kd_bufs) {
601 for (i = 0; i < n_storage_buffers; i++) {
602 if (kd_bufs[i].kdsb_addr) {
603 kmem_free(kernel_map, (vm_offset_t)kd_bufs[i].kdsb_addr, (vm_size_t)kd_bufs[i].kdsb_size);
604 }
605 }
606 kmem_free(kernel_map, (vm_offset_t)kd_bufs, (vm_size_t)(n_storage_buffers * sizeof(struct kd_storage_buffers)));
607
608 kd_bufs = NULL;
609 n_storage_buffers = 0;
610 }
611 if (kdcopybuf) {
612 kmem_free(kernel_map, (vm_offset_t)kdcopybuf, KDCOPYBUF_SIZE);
613
614 kdcopybuf = NULL;
615 }
616 kd_ctrl_page.kds_free_list.raw = KDS_PTR_NULL;
617
618 if (kdbip) {
619 kmem_free(kernel_map, (vm_offset_t)kdbip, sizeof(struct kd_bufinfo) * kd_ctrl_page.kdebug_cpus);
620
621 kdbip = NULL;
622 }
623 kd_ctrl_page.kdebug_iops = NULL;
624 kd_ctrl_page.kdebug_cpus = 0;
625 kd_ctrl_page.kdebug_flags &= ~KDBG_BUFINIT;
626 }
627
628 void
629 release_storage_unit(int cpu, uint32_t kdsp_raw)
630 {
631 int s = 0;
632 struct kd_storage *kdsp_actual;
633 struct kd_bufinfo *kdbp;
634 union kds_ptr kdsp;
635
636 kdsp.raw = kdsp_raw;
637
638 s = ml_set_interrupts_enabled(FALSE);
639 lck_spin_lock(kds_spin_lock);
640
641 kdbp = &kdbip[cpu];
642
643 if (kdsp.raw == kdbp->kd_list_head.raw) {
644 /*
645 * it's possible for the storage unit pointed to
646 * by kdsp to have already been stolen... so
647 * check to see if it's still the head of the list
648 * now that we're behind the lock that protects
649 * adding and removing from the queue...
650 * since we only ever release and steal units from
651 * that position, if it's no longer the head
652 * we having nothing to do in this context
653 */
654 kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
655 kdbp->kd_list_head = kdsp_actual->kds_next;
656
657 kdsp_actual->kds_next = kd_ctrl_page.kds_free_list;
658 kd_ctrl_page.kds_free_list = kdsp;
659
660 kd_ctrl_page.kds_inuse_count--;
661 }
662 lck_spin_unlock(kds_spin_lock);
663 ml_set_interrupts_enabled(s);
664 }
665
666
667 boolean_t
668 allocate_storage_unit(int cpu)
669 {
670 union kds_ptr kdsp;
671 struct kd_storage *kdsp_actual, *kdsp_next_actual;
672 struct kd_bufinfo *kdbp, *kdbp_vict, *kdbp_try;
673 uint64_t oldest_ts, ts;
674 boolean_t retval = TRUE;
675 int s = 0;
676
677 s = ml_set_interrupts_enabled(FALSE);
678 lck_spin_lock(kds_spin_lock);
679
680 kdbp = &kdbip[cpu];
681
682 /* If someone beat us to the allocate, return success */
683 if (kdbp->kd_list_tail.raw != KDS_PTR_NULL) {
684 kdsp_actual = POINTER_FROM_KDS_PTR(kdbp->kd_list_tail);
685
686 if (kdsp_actual->kds_bufindx < EVENTS_PER_STORAGE_UNIT)
687 goto out;
688 }
689
690 if ((kdsp = kd_ctrl_page.kds_free_list).raw != KDS_PTR_NULL) {
691 kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
692 kd_ctrl_page.kds_free_list = kdsp_actual->kds_next;
693
694 kd_ctrl_page.kds_inuse_count++;
695 } else {
696 if (kd_ctrl_page.kdebug_flags & KDBG_NOWRAP) {
697 kd_ctrl_page.kdebug_slowcheck |= SLOW_NOLOG;
698 kdbp->kd_lostevents = TRUE;
699 retval = FALSE;
700 goto out;
701 }
702 kdbp_vict = NULL;
703 oldest_ts = (uint64_t)-1;
704
705 for (kdbp_try = &kdbip[0]; kdbp_try < &kdbip[kd_ctrl_page.kdebug_cpus]; kdbp_try++) {
706
707 if (kdbp_try->kd_list_head.raw == KDS_PTR_NULL) {
708 /*
709 * no storage unit to steal
710 */
711 continue;
712 }
713
714 kdsp_actual = POINTER_FROM_KDS_PTR(kdbp_try->kd_list_head);
715
716 if (kdsp_actual->kds_bufcnt < EVENTS_PER_STORAGE_UNIT) {
717 /*
718 * make sure we don't steal the storage unit
719 * being actively recorded to... need to
720 * move on because we don't want an out-of-order
721 * set of events showing up later
722 */
723 continue;
724 }
725 ts = kdbg_get_timestamp(&kdsp_actual->kds_records[0]);
726
727 if (ts < oldest_ts) {
728 /*
729 * when 'wrapping', we want to steal the
730 * storage unit that has the 'earliest' time
731 * associated with it (first event time)
732 */
733 oldest_ts = ts;
734 kdbp_vict = kdbp_try;
735 }
736 }
737 if (kdbp_vict == NULL) {
738 kdebug_enable = 0;
739 kd_ctrl_page.enabled = 0;
740 commpage_update_kdebug_enable();
741 retval = FALSE;
742 goto out;
743 }
744 kdsp = kdbp_vict->kd_list_head;
745 kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
746 kdbp_vict->kd_list_head = kdsp_actual->kds_next;
747
748 if (kdbp_vict->kd_list_head.raw != KDS_PTR_NULL) {
749 kdsp_next_actual = POINTER_FROM_KDS_PTR(kdbp_vict->kd_list_head);
750 kdsp_next_actual->kds_lostevents = TRUE;
751 } else
752 kdbp_vict->kd_lostevents = TRUE;
753
754 kd_ctrl_page.kdebug_flags |= KDBG_WRAPPED;
755 }
756 kdsp_actual->kds_timestamp = mach_absolute_time();
757 kdsp_actual->kds_next.raw = KDS_PTR_NULL;
758 kdsp_actual->kds_bufcnt = 0;
759 kdsp_actual->kds_readlast = 0;
760
761 kdsp_actual->kds_lostevents = kdbp->kd_lostevents;
762 kdbp->kd_lostevents = FALSE;
763 kdsp_actual->kds_bufindx = 0;
764
765 if (kdbp->kd_list_head.raw == KDS_PTR_NULL)
766 kdbp->kd_list_head = kdsp;
767 else
768 POINTER_FROM_KDS_PTR(kdbp->kd_list_tail)->kds_next = kdsp;
769 kdbp->kd_list_tail = kdsp;
770 out:
771 lck_spin_unlock(kds_spin_lock);
772 ml_set_interrupts_enabled(s);
773
774 return (retval);
775 }
776
777 int
778 kernel_debug_register_callback(kd_callback_t callback)
779 {
780 kd_iop_t* iop;
781 if (kmem_alloc(kernel_map, (vm_offset_t *)&iop, sizeof(kd_iop_t)) == KERN_SUCCESS) {
782 memcpy(&iop->callback, &callback, sizeof(kd_callback_t));
783
784 /*
785 * <rdar://problem/13351477> Some IOP clients are not providing a name.
786 *
787 * Remove when fixed.
788 */
789 {
790 boolean_t is_valid_name = FALSE;
791 for (uint32_t length=0; length<sizeof(callback.iop_name); ++length) {
792 /* This is roughly isprintable(c) */
793 if (callback.iop_name[length] > 0x20 && callback.iop_name[length] < 0x7F)
794 continue;
795 if (callback.iop_name[length] == 0) {
796 if (length)
797 is_valid_name = TRUE;
798 break;
799 }
800 }
801
802 if (!is_valid_name) {
803 strlcpy(iop->callback.iop_name, "IOP-???", sizeof(iop->callback.iop_name));
804 }
805 }
806
807 iop->last_timestamp = 0;
808
809 do {
810 /*
811 * We use two pieces of state, the old list head
812 * pointer, and the value of old_list_head->cpu_id.
813 * If we read kd_iops more than once, it can change
814 * between reads.
815 *
816 * TLDR; Must not read kd_iops more than once per loop.
817 */
818 iop->next = kd_iops;
819 iop->cpu_id = iop->next ? (iop->next->cpu_id+1) : kdbg_cpu_count(FALSE);
820
821 /*
822 * Header says OSCompareAndSwapPtr has a memory barrier
823 */
824 } while (!OSCompareAndSwapPtr(iop->next, iop, (void* volatile*)&kd_iops));
825
826 return iop->cpu_id;
827 }
828
829 return 0;
830 }
831
832 void
833 kernel_debug_enter(
834 uint32_t coreid,
835 uint32_t debugid,
836 uint64_t timestamp,
837 uintptr_t arg1,
838 uintptr_t arg2,
839 uintptr_t arg3,
840 uintptr_t arg4,
841 uintptr_t threadid
842 )
843 {
844 uint32_t bindx;
845 kd_buf *kd;
846 struct kd_bufinfo *kdbp;
847 struct kd_storage *kdsp_actual;
848 union kds_ptr kds_raw;
849
850 if (kd_ctrl_page.kdebug_slowcheck) {
851
852 if ( (kd_ctrl_page.kdebug_slowcheck & SLOW_NOLOG) || !(kdebug_enable & (KDEBUG_ENABLE_TRACE|KDEBUG_ENABLE_PPT)))
853 goto out1;
854
855 if (kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) {
856 if (isset(type_filter_bitmap, EXTRACT_CSC(debugid)))
857 goto record_event;
858 goto out1;
859 }
860 else if (kd_ctrl_page.kdebug_flags & KDBG_RANGECHECK) {
861 if (debugid >= kdlog_beg && debugid <= kdlog_end)
862 goto record_event;
863 goto out1;
864 }
865 else if (kd_ctrl_page.kdebug_flags & KDBG_VALCHECK) {
866 if ((debugid & DBG_FUNC_MASK) != kdlog_value1 &&
867 (debugid & DBG_FUNC_MASK) != kdlog_value2 &&
868 (debugid & DBG_FUNC_MASK) != kdlog_value3 &&
869 (debugid & DBG_FUNC_MASK) != kdlog_value4)
870 goto out1;
871 }
872 }
873
874 record_event:
875
876 disable_preemption();
877
878 if (kd_ctrl_page.enabled == 0)
879 goto out;
880
881 kdbp = &kdbip[coreid];
882 timestamp &= KDBG_TIMESTAMP_MASK;
883
884 retry_q:
885 kds_raw = kdbp->kd_list_tail;
886
887 if (kds_raw.raw != KDS_PTR_NULL) {
888 kdsp_actual = POINTER_FROM_KDS_PTR(kds_raw);
889 bindx = kdsp_actual->kds_bufindx;
890 } else
891 kdsp_actual = NULL;
892
893 if (kdsp_actual == NULL || bindx >= EVENTS_PER_STORAGE_UNIT) {
894 if (allocate_storage_unit(coreid) == FALSE) {
895 /*
896 * this can only happen if wrapping
897 * has been disabled
898 */
899 goto out;
900 }
901 goto retry_q;
902 }
903 if ( !OSCompareAndSwap(bindx, bindx + 1, &kdsp_actual->kds_bufindx))
904 goto retry_q;
905
906 // IOP entries can be allocated before xnu allocates and inits the buffer
907 if (timestamp < kdsp_actual->kds_timestamp)
908 kdsp_actual->kds_timestamp = timestamp;
909
910 kd = &kdsp_actual->kds_records[bindx];
911
912 kd->debugid = debugid;
913 kd->arg1 = arg1;
914 kd->arg2 = arg2;
915 kd->arg3 = arg3;
916 kd->arg4 = arg4;
917 kd->arg5 = threadid;
918
919 kdbg_set_timestamp_and_cpu(kd, timestamp, coreid);
920
921 OSAddAtomic(1, &kdsp_actual->kds_bufcnt);
922 out:
923 enable_preemption();
924 out1:
925 if ((kds_waiter && kd_ctrl_page.kds_inuse_count >= n_storage_threshold)) {
926 boolean_t need_kds_wakeup = FALSE;
927 int s;
928
929 /*
930 * try to take the lock here to synchronize with the
931 * waiter entering the blocked state... use the try
932 * mode to prevent deadlocks caused by re-entering this
933 * routine due to various trace points triggered in the
934 * lck_spin_sleep_xxxx routines used to actually enter
935 * our wait condition... no problem if we fail,
936 * there will be lots of additional events coming in that
937 * will eventually succeed in grabbing this lock
938 */
939 s = ml_set_interrupts_enabled(FALSE);
940
941 if (lck_spin_try_lock(kdw_spin_lock)) {
942
943 if (kds_waiter && kd_ctrl_page.kds_inuse_count >= n_storage_threshold) {
944 kds_waiter = 0;
945 need_kds_wakeup = TRUE;
946 }
947 lck_spin_unlock(kdw_spin_lock);
948
949 ml_set_interrupts_enabled(s);
950
951 if (need_kds_wakeup == TRUE)
952 wakeup(&kds_waiter);
953 }
954 }
955 }
956
957
958
959 static void
960 kernel_debug_internal(
961 uint32_t debugid,
962 uintptr_t arg1,
963 uintptr_t arg2,
964 uintptr_t arg3,
965 uintptr_t arg4,
966 uintptr_t arg5)
967 {
968 struct proc *curproc;
969 uint64_t now;
970 uint32_t bindx;
971 boolean_t s;
972 kd_buf *kd;
973 int cpu;
974 struct kd_bufinfo *kdbp;
975 struct kd_storage *kdsp_actual;
976 union kds_ptr kds_raw;
977
978
979
980 if (kd_ctrl_page.kdebug_slowcheck) {
981
982 if (kdebug_enable & KDEBUG_ENABLE_CHUD) {
983 kd_chudhook_fn chudhook;
984 /*
985 * Mask interrupts to minimize the interval across
986 * which the driver providing the hook could be
987 * unloaded.
988 */
989 s = ml_set_interrupts_enabled(FALSE);
990 chudhook = kdebug_chudhook;
991 if (chudhook)
992 chudhook(debugid, arg1, arg2, arg3, arg4, arg5);
993 ml_set_interrupts_enabled(s);
994 }
995 if ( (kd_ctrl_page.kdebug_slowcheck & SLOW_NOLOG) || !(kdebug_enable & (KDEBUG_ENABLE_TRACE|KDEBUG_ENABLE_PPT)))
996 goto out1;
997
998 if ( !ml_at_interrupt_context()) {
999 if (kd_ctrl_page.kdebug_flags & KDBG_PIDCHECK) {
1000 /*
1001 * If kdebug flag is not set for current proc, return
1002 */
1003 curproc = current_proc();
1004
1005 if ((curproc && !(curproc->p_kdebug)) &&
1006 ((debugid & 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)) &&
1007 (debugid >> 24 != DBG_TRACE))
1008 goto out1;
1009 }
1010 else if (kd_ctrl_page.kdebug_flags & KDBG_PIDEXCLUDE) {
1011 /*
1012 * If kdebug flag is set for current proc, return
1013 */
1014 curproc = current_proc();
1015
1016 if ((curproc && curproc->p_kdebug) &&
1017 ((debugid & 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)) &&
1018 (debugid >> 24 != DBG_TRACE))
1019 goto out1;
1020 }
1021 }
1022
1023 if (kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) {
1024 /* Always record trace system info */
1025 if (EXTRACT_CLASS(debugid) == DBG_TRACE)
1026 goto record_event;
1027
1028 if (isset(type_filter_bitmap, EXTRACT_CSC(debugid)))
1029 goto record_event;
1030 goto out1;
1031 }
1032 else if (kd_ctrl_page.kdebug_flags & KDBG_RANGECHECK) {
1033 /* Always record trace system info */
1034 if (EXTRACT_CLASS(debugid) == DBG_TRACE)
1035 goto record_event;
1036
1037 if (debugid < kdlog_beg || debugid > kdlog_end)
1038 goto out1;
1039 }
1040 else if (kd_ctrl_page.kdebug_flags & KDBG_VALCHECK) {
1041 /* Always record trace system info */
1042 if (EXTRACT_CLASS(debugid) == DBG_TRACE)
1043 goto record_event;
1044
1045 if ((debugid & DBG_FUNC_MASK) != kdlog_value1 &&
1046 (debugid & DBG_FUNC_MASK) != kdlog_value2 &&
1047 (debugid & DBG_FUNC_MASK) != kdlog_value3 &&
1048 (debugid & DBG_FUNC_MASK) != kdlog_value4)
1049 goto out1;
1050 }
1051 }
1052 record_event:
1053 disable_preemption();
1054
1055 if (kd_ctrl_page.enabled == 0)
1056 goto out;
1057
1058 cpu = cpu_number();
1059 kdbp = &kdbip[cpu];
1060 retry_q:
1061 kds_raw = kdbp->kd_list_tail;
1062
1063 if (kds_raw.raw != KDS_PTR_NULL) {
1064 kdsp_actual = POINTER_FROM_KDS_PTR(kds_raw);
1065 bindx = kdsp_actual->kds_bufindx;
1066 } else
1067 kdsp_actual = NULL;
1068
1069 if (kdsp_actual == NULL || bindx >= EVENTS_PER_STORAGE_UNIT) {
1070 if (allocate_storage_unit(cpu) == FALSE) {
1071 /*
1072 * this can only happen if wrapping
1073 * has been disabled
1074 */
1075 goto out;
1076 }
1077 goto retry_q;
1078 }
1079 now = mach_absolute_time() & KDBG_TIMESTAMP_MASK;
1080
1081 if ( !OSCompareAndSwap(bindx, bindx + 1, &kdsp_actual->kds_bufindx))
1082 goto retry_q;
1083
1084 kd = &kdsp_actual->kds_records[bindx];
1085
1086 kd->debugid = debugid;
1087 kd->arg1 = arg1;
1088 kd->arg2 = arg2;
1089 kd->arg3 = arg3;
1090 kd->arg4 = arg4;
1091 kd->arg5 = arg5;
1092
1093 kdbg_set_timestamp_and_cpu(kd, now, cpu);
1094
1095 OSAddAtomic(1, &kdsp_actual->kds_bufcnt);
1096 out:
1097 enable_preemption();
1098 out1:
1099 if (kds_waiter && kd_ctrl_page.kds_inuse_count >= n_storage_threshold) {
1100 uint32_t etype;
1101 uint32_t stype;
1102
1103 etype = debugid & DBG_FUNC_MASK;
1104 stype = debugid & DBG_SCALL_MASK;
1105
1106 if (etype == INTERRUPT || etype == MACH_vmfault ||
1107 stype == BSC_SysCall || stype == MACH_SysCall) {
1108
1109 boolean_t need_kds_wakeup = FALSE;
1110
1111 /*
1112 * try to take the lock here to synchronize with the
1113 * waiter entering the blocked state... use the try
1114 * mode to prevent deadlocks caused by re-entering this
1115 * routine due to various trace points triggered in the
1116 * lck_spin_sleep_xxxx routines used to actually enter
1117 * one of our 2 wait conditions... no problem if we fail,
1118 * there will be lots of additional events coming in that
1119 * will eventually succeed in grabbing this lock
1120 */
1121 s = ml_set_interrupts_enabled(FALSE);
1122
1123 if (lck_spin_try_lock(kdw_spin_lock)) {
1124
1125 if (kds_waiter && kd_ctrl_page.kds_inuse_count >= n_storage_threshold) {
1126 kds_waiter = 0;
1127 need_kds_wakeup = TRUE;
1128 }
1129 lck_spin_unlock(kdw_spin_lock);
1130 }
1131 ml_set_interrupts_enabled(s);
1132
1133 if (need_kds_wakeup == TRUE)
1134 wakeup(&kds_waiter);
1135 }
1136 }
1137 }
1138
1139 void
1140 kernel_debug(
1141 uint32_t debugid,
1142 uintptr_t arg1,
1143 uintptr_t arg2,
1144 uintptr_t arg3,
1145 uintptr_t arg4,
1146 __unused uintptr_t arg5)
1147 {
1148 kernel_debug_internal(debugid, arg1, arg2, arg3, arg4, (uintptr_t)thread_tid(current_thread()));
1149 }
1150
1151 void
1152 kernel_debug1(
1153 uint32_t debugid,
1154 uintptr_t arg1,
1155 uintptr_t arg2,
1156 uintptr_t arg3,
1157 uintptr_t arg4,
1158 uintptr_t arg5)
1159 {
1160 kernel_debug_internal(debugid, arg1, arg2, arg3, arg4, arg5);
1161 }
1162
1163 void
1164 kernel_debug_string(const char *message)
1165 {
1166 uintptr_t arg[4] = {0, 0, 0, 0};
1167
1168 /* Stuff the message string in the args and log it. */
1169 strncpy((char *)arg, message, MIN(sizeof(arg), strlen(message)));
1170 KERNEL_DEBUG_EARLY(
1171 (TRACEDBG_CODE(DBG_TRACE_INFO, 4)) | DBG_FUNC_NONE,
1172 arg[0], arg[1], arg[2], arg[3]);
1173 }
1174
1175 extern int master_cpu; /* MACH_KERNEL_PRIVATE */
1176 /*
1177 * Used prior to start_kern_tracing() being called.
1178 * Log temporarily into a static buffer.
1179 */
1180 void
1181 kernel_debug_early(
1182 uint32_t debugid,
1183 uintptr_t arg1,
1184 uintptr_t arg2,
1185 uintptr_t arg3,
1186 uintptr_t arg4)
1187 {
1188 /* If tracing is already initialized, use it */
1189 if (nkdbufs)
1190 KERNEL_DEBUG_CONSTANT(debugid, arg1, arg2, arg3, arg4, 0);
1191
1192 /* Do nothing if the buffer is full or we're not on the boot cpu */
1193 kd_early_overflow = kd_early_index >= KD_EARLY_BUFFER_MAX;
1194 if (kd_early_overflow ||
1195 cpu_number() != master_cpu)
1196 return;
1197
1198 kd_early_buffer[kd_early_index].debugid = debugid;
1199 kd_early_buffer[kd_early_index].timestamp = mach_absolute_time();
1200 kd_early_buffer[kd_early_index].arg1 = arg1;
1201 kd_early_buffer[kd_early_index].arg2 = arg2;
1202 kd_early_buffer[kd_early_index].arg3 = arg3;
1203 kd_early_buffer[kd_early_index].arg4 = arg4;
1204 kd_early_buffer[kd_early_index].arg5 = 0;
1205 kd_early_index++;
1206 }
1207
1208 /*
1209 * Transfer the contents of the temporary buffer into the trace buffers.
1210 * Precede that by logging the rebase time (offset) - the TSC-based time (in ns)
1211 * when mach_absolute_time is set to 0.
1212 */
1213 static void
1214 kernel_debug_early_end(void)
1215 {
1216 int i;
1217
1218 if (cpu_number() != master_cpu)
1219 panic("kernel_debug_early_end() not call on boot processor");
1220
1221 /* Fake sentinel marking the start of kernel time relative to TSC */
1222 kernel_debug_enter(
1223 0,
1224 (TRACEDBG_CODE(DBG_TRACE_INFO, 1)) | DBG_FUNC_NONE,
1225 0,
1226 (uint32_t)(tsc_rebase_abs_time >> 32),
1227 (uint32_t)tsc_rebase_abs_time,
1228 0,
1229 0,
1230 0);
1231 for (i = 0; i < kd_early_index; i++) {
1232 kernel_debug_enter(
1233 0,
1234 kd_early_buffer[i].debugid,
1235 kd_early_buffer[i].timestamp,
1236 kd_early_buffer[i].arg1,
1237 kd_early_buffer[i].arg2,
1238 kd_early_buffer[i].arg3,
1239 kd_early_buffer[i].arg4,
1240 0);
1241 }
1242
1243 /* Cut events-lost event on overflow */
1244 if (kd_early_overflow)
1245 KERNEL_DEBUG_CONSTANT(
1246 TRACEDBG_CODE(DBG_TRACE_INFO, 2), 0, 0, 0, 0, 0);
1247
1248 /* This trace marks the start of kernel tracing */
1249 kernel_debug_string("early trace done");
1250 }
1251
1252 /*
1253 * Support syscall SYS_kdebug_trace. U64->K32 args may get truncated in kdebug_trace64
1254 */
1255 int
1256 kdebug_trace(struct proc *p, struct kdebug_trace_args *uap, int32_t *retval)
1257 {
1258 struct kdebug_trace64_args uap64;
1259
1260 uap64.code = uap->code;
1261 uap64.arg1 = uap->arg1;
1262 uap64.arg2 = uap->arg2;
1263 uap64.arg3 = uap->arg3;
1264 uap64.arg4 = uap->arg4;
1265
1266 return kdebug_trace64(p, &uap64, retval);
1267 }
1268
1269 /*
1270 * Support syscall SYS_kdebug_trace64. 64-bit args on K32 will get truncated to fit in 32-bit record format.
1271 */
1272 int kdebug_trace64(__unused struct proc *p, struct kdebug_trace64_args *uap, __unused int32_t *retval)
1273 {
1274 uint8_t code_class;
1275
1276 /*
1277 * Not all class are supported for injection from userspace, especially ones used by the core
1278 * kernel tracing infrastructure.
1279 */
1280 code_class = EXTRACT_CLASS(uap->code);
1281
1282 switch (code_class) {
1283 case DBG_TRACE:
1284 return EPERM;
1285 }
1286
1287 if ( __probable(kdebug_enable == 0) )
1288 return(0);
1289
1290 kernel_debug_internal(uap->code, (uintptr_t)uap->arg1, (uintptr_t)uap->arg2, (uintptr_t)uap->arg3, (uintptr_t)uap->arg4, (uintptr_t)thread_tid(current_thread()));
1291
1292 return(0);
1293 }
1294
1295 static void
1296 kdbg_lock_init(void)
1297 {
1298 if (kd_ctrl_page.kdebug_flags & KDBG_LOCKINIT)
1299 return;
1300
1301 /*
1302 * allocate lock group attribute and group
1303 */
1304 kd_trace_mtx_sysctl_grp_attr = lck_grp_attr_alloc_init();
1305 kd_trace_mtx_sysctl_grp = lck_grp_alloc_init("kdebug", kd_trace_mtx_sysctl_grp_attr);
1306
1307 /*
1308 * allocate the lock attribute
1309 */
1310 kd_trace_mtx_sysctl_attr = lck_attr_alloc_init();
1311
1312
1313 /*
1314 * allocate and initialize mutex's
1315 */
1316 kd_trace_mtx_sysctl = lck_mtx_alloc_init(kd_trace_mtx_sysctl_grp, kd_trace_mtx_sysctl_attr);
1317 kds_spin_lock = lck_spin_alloc_init(kd_trace_mtx_sysctl_grp, kd_trace_mtx_sysctl_attr);
1318 kdw_spin_lock = lck_spin_alloc_init(kd_trace_mtx_sysctl_grp, kd_trace_mtx_sysctl_attr);
1319
1320 kd_ctrl_page.kdebug_flags |= KDBG_LOCKINIT;
1321 }
1322
1323
1324 int
1325 kdbg_bootstrap(boolean_t early_trace)
1326 {
1327 kd_ctrl_page.kdebug_flags &= ~KDBG_WRAPPED;
1328
1329 return (create_buffers(early_trace));
1330 }
1331
1332 int
1333 kdbg_reinit(boolean_t early_trace)
1334 {
1335 int ret = 0;
1336
1337 /*
1338 * Disable trace collecting
1339 * First make sure we're not in
1340 * the middle of cutting a trace
1341 */
1342 kdbg_set_tracing_enabled(FALSE, KDEBUG_ENABLE_TRACE);
1343
1344 /*
1345 * make sure the SLOW_NOLOG is seen
1346 * by everyone that might be trying
1347 * to cut a trace..
1348 */
1349 IOSleep(100);
1350
1351 delete_buffers();
1352
1353 if ((kd_ctrl_page.kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr) {
1354 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
1355 kd_ctrl_page.kdebug_flags &= ~KDBG_MAPINIT;
1356 kd_mapsize = 0;
1357 kd_mapptr = (kd_threadmap *) 0;
1358 kd_mapcount = 0;
1359 }
1360 ret = kdbg_bootstrap(early_trace);
1361
1362 RAW_file_offset = 0;
1363 RAW_file_written = 0;
1364
1365 return(ret);
1366 }
1367
1368 void
1369 kdbg_trace_data(struct proc *proc, long *arg_pid)
1370 {
1371 if (!proc)
1372 *arg_pid = 0;
1373 else
1374 *arg_pid = proc->p_pid;
1375 }
1376
1377
1378 void
1379 kdbg_trace_string(struct proc *proc, long *arg1, long *arg2, long *arg3, long *arg4)
1380 {
1381 char *dbg_nameptr;
1382 int dbg_namelen;
1383 long dbg_parms[4];
1384
1385 if (!proc) {
1386 *arg1 = 0;
1387 *arg2 = 0;
1388 *arg3 = 0;
1389 *arg4 = 0;
1390 return;
1391 }
1392 /*
1393 * Collect the pathname for tracing
1394 */
1395 dbg_nameptr = proc->p_comm;
1396 dbg_namelen = (int)strlen(proc->p_comm);
1397 dbg_parms[0]=0L;
1398 dbg_parms[1]=0L;
1399 dbg_parms[2]=0L;
1400 dbg_parms[3]=0L;
1401
1402 if(dbg_namelen > (int)sizeof(dbg_parms))
1403 dbg_namelen = (int)sizeof(dbg_parms);
1404
1405 strncpy((char *)dbg_parms, dbg_nameptr, dbg_namelen);
1406
1407 *arg1=dbg_parms[0];
1408 *arg2=dbg_parms[1];
1409 *arg3=dbg_parms[2];
1410 *arg4=dbg_parms[3];
1411 }
1412
1413 static void
1414 kdbg_resolve_map(thread_t th_act, void *opaque)
1415 {
1416 kd_threadmap *mapptr;
1417 krt_t *t = (krt_t *)opaque;
1418
1419 if (t->count < t->maxcount) {
1420 mapptr = &t->map[t->count];
1421 mapptr->thread = (uintptr_t)thread_tid(th_act);
1422
1423 (void) strlcpy (mapptr->command, t->atts->task_comm,
1424 sizeof(t->atts->task_comm));
1425 /*
1426 * Some kernel threads have no associated pid.
1427 * We still need to mark the entry as valid.
1428 */
1429 if (t->atts->pid)
1430 mapptr->valid = t->atts->pid;
1431 else
1432 mapptr->valid = 1;
1433
1434 t->count++;
1435 }
1436 }
1437
1438 /*
1439 *
1440 * Writes a cpumap for the given iops_list/cpu_count to the provided buffer.
1441 *
1442 * You may provide a buffer and size, or if you set the buffer to NULL, a
1443 * buffer of sufficient size will be allocated.
1444 *
1445 * If you provide a buffer and it is too small, sets cpumap_size to the number
1446 * of bytes required and returns EINVAL.
1447 *
1448 * On success, if you provided a buffer, cpumap_size is set to the number of
1449 * bytes written. If you did not provide a buffer, cpumap is set to the newly
1450 * allocated buffer and cpumap_size is set to the number of bytes allocated.
1451 *
1452 * NOTE: It may seem redundant to pass both iops and a cpu_count.
1453 *
1454 * We may be reporting data from "now", or from the "past".
1455 *
1456 * The "now" data would be for something like kdbg_readcurcpumap().
1457 * The "past" data would be for kdbg_readcpumap().
1458 *
1459 * If we do not pass both iops and cpu_count, and iops is NULL, this function
1460 * will need to read "now" state to get the number of cpus, which would be in
1461 * error if we were reporting "past" state.
1462 */
1463
1464 int
1465 kdbg_cpumap_init_internal(kd_iop_t* iops, uint32_t cpu_count, uint8_t** cpumap, uint32_t* cpumap_size)
1466 {
1467 assert(cpumap);
1468 assert(cpumap_size);
1469 assert(cpu_count);
1470 assert(!iops || iops->cpu_id + 1 == cpu_count);
1471
1472 uint32_t bytes_needed = sizeof(kd_cpumap_header) + cpu_count * sizeof(kd_cpumap);
1473 uint32_t bytes_available = *cpumap_size;
1474 *cpumap_size = bytes_needed;
1475
1476 if (*cpumap == NULL) {
1477 if (kmem_alloc(kernel_map, (vm_offset_t*)cpumap, (vm_size_t)*cpumap_size) != KERN_SUCCESS) {
1478 return ENOMEM;
1479 }
1480 } else if (bytes_available < bytes_needed) {
1481 return EINVAL;
1482 }
1483
1484 kd_cpumap_header* header = (kd_cpumap_header*)(uintptr_t)*cpumap;
1485
1486 header->version_no = RAW_VERSION1;
1487 header->cpu_count = cpu_count;
1488
1489 kd_cpumap* cpus = (kd_cpumap*)&header[1];
1490
1491 int32_t index = cpu_count - 1;
1492 while (iops) {
1493 cpus[index].cpu_id = iops->cpu_id;
1494 cpus[index].flags = KDBG_CPUMAP_IS_IOP;
1495 bzero(cpus[index].name, sizeof(cpus->name));
1496 strlcpy(cpus[index].name, iops->callback.iop_name, sizeof(cpus->name));
1497
1498 iops = iops->next;
1499 index--;
1500 }
1501
1502 while (index >= 0) {
1503 cpus[index].cpu_id = index;
1504 cpus[index].flags = 0;
1505 bzero(cpus[index].name, sizeof(cpus->name));
1506 strlcpy(cpus[index].name, "AP", sizeof(cpus->name));
1507
1508 index--;
1509 }
1510
1511 return KERN_SUCCESS;
1512 }
1513
1514 void
1515 kdbg_thrmap_init(void)
1516 {
1517 if (kd_ctrl_page.kdebug_flags & KDBG_MAPINIT)
1518 return;
1519
1520 kd_mapptr = kdbg_thrmap_init_internal(0, &kd_mapsize, &kd_mapcount);
1521
1522 if (kd_mapptr)
1523 kd_ctrl_page.kdebug_flags |= KDBG_MAPINIT;
1524 }
1525
1526
1527 kd_threadmap* kdbg_thrmap_init_internal(unsigned int count, unsigned int *mapsize, unsigned int *mapcount)
1528 {
1529 kd_threadmap *mapptr;
1530 struct proc *p;
1531 struct krt akrt;
1532 int tts_count; /* number of task-to-string structures */
1533 struct tts *tts_mapptr;
1534 unsigned int tts_mapsize = 0;
1535 int i;
1536 vm_offset_t kaddr;
1537
1538 /*
1539 * need to use PROC_SCANPROCLIST with proc_iterate
1540 */
1541 proc_list_lock();
1542
1543 /*
1544 * Calculate the sizes of map buffers
1545 */
1546 for (p = allproc.lh_first, *mapcount=0, tts_count=0; p; p = p->p_list.le_next) {
1547 *mapcount += get_task_numacts((task_t)p->task);
1548 tts_count++;
1549 }
1550 proc_list_unlock();
1551
1552 /*
1553 * The proc count could change during buffer allocation,
1554 * so introduce a small fudge factor to bump up the
1555 * buffer sizes. This gives new tasks some chance of
1556 * making into the tables. Bump up by 25%.
1557 */
1558 *mapcount += *mapcount/4;
1559 tts_count += tts_count/4;
1560
1561 *mapsize = *mapcount * sizeof(kd_threadmap);
1562
1563 if (count && count < *mapcount)
1564 return (0);
1565
1566 if ((kmem_alloc(kernel_map, &kaddr, (vm_size_t)*mapsize) == KERN_SUCCESS)) {
1567 bzero((void *)kaddr, *mapsize);
1568 mapptr = (kd_threadmap *)kaddr;
1569 } else
1570 return (0);
1571
1572 tts_mapsize = tts_count * sizeof(struct tts);
1573
1574 if ((kmem_alloc(kernel_map, &kaddr, (vm_size_t)tts_mapsize) == KERN_SUCCESS)) {
1575 bzero((void *)kaddr, tts_mapsize);
1576 tts_mapptr = (struct tts *)kaddr;
1577 } else {
1578 kmem_free(kernel_map, (vm_offset_t)mapptr, *mapsize);
1579
1580 return (0);
1581 }
1582 /*
1583 * We need to save the procs command string
1584 * and take a reference for each task associated
1585 * with a valid process
1586 */
1587
1588 proc_list_lock();
1589
1590 /*
1591 * should use proc_iterate
1592 */
1593 for (p = allproc.lh_first, i=0; p && i < tts_count; p = p->p_list.le_next) {
1594 if (p->p_lflag & P_LEXIT)
1595 continue;
1596
1597 if (p->task) {
1598 task_reference(p->task);
1599 tts_mapptr[i].task = p->task;
1600 tts_mapptr[i].pid = p->p_pid;
1601 (void)strlcpy(tts_mapptr[i].task_comm, p->p_comm, sizeof(tts_mapptr[i].task_comm));
1602 i++;
1603 }
1604 }
1605 tts_count = i;
1606
1607 proc_list_unlock();
1608
1609 /*
1610 * Initialize thread map data
1611 */
1612 akrt.map = mapptr;
1613 akrt.count = 0;
1614 akrt.maxcount = *mapcount;
1615
1616 for (i = 0; i < tts_count; i++) {
1617 akrt.atts = &tts_mapptr[i];
1618 task_act_iterate_wth_args(tts_mapptr[i].task, kdbg_resolve_map, &akrt);
1619 task_deallocate((task_t) tts_mapptr[i].task);
1620 }
1621 kmem_free(kernel_map, (vm_offset_t)tts_mapptr, tts_mapsize);
1622
1623 *mapcount = akrt.count;
1624
1625 return (mapptr);
1626 }
1627
1628 static void
1629 kdbg_clear(void)
1630 {
1631 /*
1632 * Clean up the trace buffer
1633 * First make sure we're not in
1634 * the middle of cutting a trace
1635 */
1636 kdbg_set_tracing_enabled(FALSE, KDEBUG_ENABLE_TRACE);
1637
1638 /*
1639 * make sure the SLOW_NOLOG is seen
1640 * by everyone that might be trying
1641 * to cut a trace..
1642 */
1643 IOSleep(100);
1644
1645 global_state_pid = -1;
1646 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1647 kd_ctrl_page.kdebug_flags &= ~(KDBG_NOWRAP | KDBG_RANGECHECK | KDBG_VALCHECK);
1648 kd_ctrl_page.kdebug_flags &= ~(KDBG_PIDCHECK | KDBG_PIDEXCLUDE);
1649
1650 kdbg_disable_typefilter();
1651
1652 delete_buffers();
1653 nkdbufs = 0;
1654
1655 /* Clean up the thread map buffer */
1656 kd_ctrl_page.kdebug_flags &= ~KDBG_MAPINIT;
1657 if (kd_mapptr) {
1658 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
1659 kd_mapptr = (kd_threadmap *) 0;
1660 }
1661 kd_mapsize = 0;
1662 kd_mapcount = 0;
1663
1664 RAW_file_offset = 0;
1665 RAW_file_written = 0;
1666 }
1667
1668 int
1669 kdbg_setpid(kd_regtype *kdr)
1670 {
1671 pid_t pid;
1672 int flag, ret=0;
1673 struct proc *p;
1674
1675 pid = (pid_t)kdr->value1;
1676 flag = (int)kdr->value2;
1677
1678 if (pid > 0) {
1679 if ((p = proc_find(pid)) == NULL)
1680 ret = ESRCH;
1681 else {
1682 if (flag == 1) {
1683 /*
1684 * turn on pid check for this and all pids
1685 */
1686 kd_ctrl_page.kdebug_flags |= KDBG_PIDCHECK;
1687 kd_ctrl_page.kdebug_flags &= ~KDBG_PIDEXCLUDE;
1688 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
1689
1690 p->p_kdebug = 1;
1691 } else {
1692 /*
1693 * turn off pid check for this pid value
1694 * Don't turn off all pid checking though
1695 *
1696 * kd_ctrl_page.kdebug_flags &= ~KDBG_PIDCHECK;
1697 */
1698 p->p_kdebug = 0;
1699 }
1700 proc_rele(p);
1701 }
1702 }
1703 else
1704 ret = EINVAL;
1705
1706 return(ret);
1707 }
1708
1709 /* This is for pid exclusion in the trace buffer */
1710 int
1711 kdbg_setpidex(kd_regtype *kdr)
1712 {
1713 pid_t pid;
1714 int flag, ret=0;
1715 struct proc *p;
1716
1717 pid = (pid_t)kdr->value1;
1718 flag = (int)kdr->value2;
1719
1720 if (pid > 0) {
1721 if ((p = proc_find(pid)) == NULL)
1722 ret = ESRCH;
1723 else {
1724 if (flag == 1) {
1725 /*
1726 * turn on pid exclusion
1727 */
1728 kd_ctrl_page.kdebug_flags |= KDBG_PIDEXCLUDE;
1729 kd_ctrl_page.kdebug_flags &= ~KDBG_PIDCHECK;
1730 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
1731
1732 p->p_kdebug = 1;
1733 }
1734 else {
1735 /*
1736 * turn off pid exclusion for this pid value
1737 * Don't turn off all pid exclusion though
1738 *
1739 * kd_ctrl_page.kdebug_flags &= ~KDBG_PIDEXCLUDE;
1740 */
1741 p->p_kdebug = 0;
1742 }
1743 proc_rele(p);
1744 }
1745 } else
1746 ret = EINVAL;
1747
1748 return(ret);
1749 }
1750
1751
1752 /*
1753 * This is for setting a maximum decrementer value
1754 */
1755 int
1756 kdbg_setrtcdec(kd_regtype *kdr)
1757 {
1758 int ret = 0;
1759 natural_t decval;
1760
1761 decval = (natural_t)kdr->value1;
1762
1763 if (decval && decval < KDBG_MINRTCDEC)
1764 ret = EINVAL;
1765 else
1766 ret = ENOTSUP;
1767
1768 return(ret);
1769 }
1770
1771 int
1772 kdbg_enable_typefilter(void)
1773 {
1774 if (kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) {
1775 /* free the old filter */
1776 kdbg_disable_typefilter();
1777 }
1778
1779 if (kmem_alloc(kernel_map, (vm_offset_t *)&type_filter_bitmap, KDBG_TYPEFILTER_BITMAP_SIZE) != KERN_SUCCESS) {
1780 return ENOSPC;
1781 }
1782
1783 bzero(type_filter_bitmap, KDBG_TYPEFILTER_BITMAP_SIZE);
1784
1785 /* Turn off range and value checks */
1786 kd_ctrl_page.kdebug_flags &= ~(KDBG_RANGECHECK | KDBG_VALCHECK);
1787
1788 /* Enable filter checking */
1789 kd_ctrl_page.kdebug_flags |= KDBG_TYPEFILTER_CHECK;
1790 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
1791 return 0;
1792 }
1793
1794 int
1795 kdbg_disable_typefilter(void)
1796 {
1797 /* Disable filter checking */
1798 kd_ctrl_page.kdebug_flags &= ~KDBG_TYPEFILTER_CHECK;
1799
1800 /* Turn off slow checks unless pid checks are using them */
1801 if ( (kd_ctrl_page.kdebug_flags & (KDBG_PIDCHECK | KDBG_PIDEXCLUDE)) )
1802 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
1803 else
1804 kdbg_set_flags(SLOW_CHECKS, 0, FALSE);
1805
1806 if(type_filter_bitmap == NULL)
1807 return 0;
1808
1809 vm_offset_t old_bitmap = (vm_offset_t)type_filter_bitmap;
1810 type_filter_bitmap = NULL;
1811
1812 kmem_free(kernel_map, old_bitmap, KDBG_TYPEFILTER_BITMAP_SIZE);
1813 return 0;
1814 }
1815
1816 int
1817 kdbg_setreg(kd_regtype * kdr)
1818 {
1819 int ret=0;
1820 unsigned int val_1, val_2, val;
1821 switch (kdr->type) {
1822
1823 case KDBG_CLASSTYPE :
1824 val_1 = (kdr->value1 & 0xff);
1825 val_2 = (kdr->value2 & 0xff);
1826 kdlog_beg = (val_1<<24);
1827 kdlog_end = (val_2<<24);
1828 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1829 kd_ctrl_page.kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
1830 kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE);
1831 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
1832 break;
1833 case KDBG_SUBCLSTYPE :
1834 val_1 = (kdr->value1 & 0xff);
1835 val_2 = (kdr->value2 & 0xff);
1836 val = val_2 + 1;
1837 kdlog_beg = ((val_1<<24) | (val_2 << 16));
1838 kdlog_end = ((val_1<<24) | (val << 16));
1839 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1840 kd_ctrl_page.kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
1841 kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE);
1842 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
1843 break;
1844 case KDBG_RANGETYPE :
1845 kdlog_beg = (kdr->value1);
1846 kdlog_end = (kdr->value2);
1847 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1848 kd_ctrl_page.kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */
1849 kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE);
1850 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
1851 break;
1852 case KDBG_VALCHECK:
1853 kdlog_value1 = (kdr->value1);
1854 kdlog_value2 = (kdr->value2);
1855 kdlog_value3 = (kdr->value3);
1856 kdlog_value4 = (kdr->value4);
1857 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1858 kd_ctrl_page.kdebug_flags &= ~KDBG_RANGECHECK; /* Turn off range check */
1859 kd_ctrl_page.kdebug_flags |= KDBG_VALCHECK; /* Turn on specific value check */
1860 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
1861 break;
1862 case KDBG_TYPENONE :
1863 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1864
1865 if ( (kd_ctrl_page.kdebug_flags & (KDBG_RANGECHECK | KDBG_VALCHECK |
1866 KDBG_PIDCHECK | KDBG_PIDEXCLUDE |
1867 KDBG_TYPEFILTER_CHECK)) )
1868 kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
1869 else
1870 kdbg_set_flags(SLOW_CHECKS, 0, FALSE);
1871
1872 kdlog_beg = 0;
1873 kdlog_end = 0;
1874 break;
1875 default :
1876 ret = EINVAL;
1877 break;
1878 }
1879 return(ret);
1880 }
1881
1882 int
1883 kdbg_getreg(__unused kd_regtype * kdr)
1884 {
1885 #if 0
1886 int i,j, ret=0;
1887 unsigned int val_1, val_2, val;
1888
1889 switch (kdr->type) {
1890 case KDBG_CLASSTYPE :
1891 val_1 = (kdr->value1 & 0xff);
1892 val_2 = val_1 + 1;
1893 kdlog_beg = (val_1<<24);
1894 kdlog_end = (val_2<<24);
1895 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1896 kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE);
1897 break;
1898 case KDBG_SUBCLSTYPE :
1899 val_1 = (kdr->value1 & 0xff);
1900 val_2 = (kdr->value2 & 0xff);
1901 val = val_2 + 1;
1902 kdlog_beg = ((val_1<<24) | (val_2 << 16));
1903 kdlog_end = ((val_1<<24) | (val << 16));
1904 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1905 kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE);
1906 break;
1907 case KDBG_RANGETYPE :
1908 kdlog_beg = (kdr->value1);
1909 kdlog_end = (kdr->value2);
1910 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1911 kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE);
1912 break;
1913 case KDBG_TYPENONE :
1914 kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1915 kdlog_beg = 0;
1916 kdlog_end = 0;
1917 break;
1918 default :
1919 ret = EINVAL;
1920 break;
1921 }
1922 #endif /* 0 */
1923 return(EINVAL);
1924 }
1925
1926 int
1927 kdbg_readcpumap(user_addr_t user_cpumap, size_t *user_cpumap_size)
1928 {
1929 uint8_t* cpumap = NULL;
1930 uint32_t cpumap_size = 0;
1931 int ret = KERN_SUCCESS;
1932
1933 if (kd_ctrl_page.kdebug_flags & KDBG_BUFINIT) {
1934 if (kdbg_cpumap_init_internal(kd_ctrl_page.kdebug_iops, kd_ctrl_page.kdebug_cpus, &cpumap, &cpumap_size) == KERN_SUCCESS) {
1935 if (user_cpumap) {
1936 size_t bytes_to_copy = (*user_cpumap_size >= cpumap_size) ? cpumap_size : *user_cpumap_size;
1937 if (copyout(cpumap, user_cpumap, (size_t)bytes_to_copy)) {
1938 ret = EFAULT;
1939 }
1940 }
1941 *user_cpumap_size = cpumap_size;
1942 kmem_free(kernel_map, (vm_offset_t)cpumap, cpumap_size);
1943 } else
1944 ret = EINVAL;
1945 } else
1946 ret = EINVAL;
1947
1948 return (ret);
1949 }
1950
1951 int
1952 kdbg_readcurthrmap(user_addr_t buffer, size_t *bufsize)
1953 {
1954 kd_threadmap *mapptr;
1955 unsigned int mapsize;
1956 unsigned int mapcount;
1957 unsigned int count = 0;
1958 int ret = 0;
1959
1960 count = *bufsize/sizeof(kd_threadmap);
1961 *bufsize = 0;
1962
1963 if ( (mapptr = kdbg_thrmap_init_internal(count, &mapsize, &mapcount)) ) {
1964 if (copyout(mapptr, buffer, mapcount * sizeof(kd_threadmap)))
1965 ret = EFAULT;
1966 else
1967 *bufsize = (mapcount * sizeof(kd_threadmap));
1968
1969 kmem_free(kernel_map, (vm_offset_t)mapptr, mapsize);
1970 } else
1971 ret = EINVAL;
1972
1973 return (ret);
1974 }
1975
1976 int
1977 kdbg_readthrmap(user_addr_t buffer, size_t *number, vnode_t vp, vfs_context_t ctx)
1978 {
1979 int avail = *number;
1980 int ret = 0;
1981 uint32_t count = 0;
1982 unsigned int mapsize;
1983
1984 count = avail/sizeof (kd_threadmap);
1985
1986 mapsize = kd_mapcount * sizeof(kd_threadmap);
1987
1988 if (count && (count <= kd_mapcount))
1989 {
1990 if ((kd_ctrl_page.kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
1991 {
1992 if (*number < mapsize)
1993 ret = EINVAL;
1994 else
1995 {
1996 if (vp)
1997 {
1998 RAW_header header;
1999 clock_sec_t secs;
2000 clock_usec_t usecs;
2001 char *pad_buf;
2002 uint32_t pad_size;
2003 uint32_t extra_thread_count = 0;
2004 uint32_t cpumap_size;
2005
2006 /*
2007 * To write a RAW_VERSION1+ file, we
2008 * must embed a cpumap in the "padding"
2009 * used to page align the events folloing
2010 * the threadmap. If the threadmap happens
2011 * to not require enough padding, we
2012 * artificially increase its footprint
2013 * until it needs enough padding.
2014 */
2015
2016 pad_size = PAGE_SIZE - ((sizeof(RAW_header) + (count * sizeof(kd_threadmap))) & PAGE_MASK_64);
2017 cpumap_size = sizeof(kd_cpumap_header) + kd_ctrl_page.kdebug_cpus * sizeof(kd_cpumap);
2018
2019 if (cpumap_size > pad_size) {
2020 /* Force an overflow onto the next page, we get a full page of padding */
2021 extra_thread_count = (pad_size / sizeof(kd_threadmap)) + 1;
2022 }
2023
2024 header.version_no = RAW_VERSION1;
2025 header.thread_count = count + extra_thread_count;
2026
2027 clock_get_calendar_microtime(&secs, &usecs);
2028 header.TOD_secs = secs;
2029 header.TOD_usecs = usecs;
2030
2031 ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)&header, sizeof(RAW_header), RAW_file_offset,
2032 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
2033 if (ret)
2034 goto write_error;
2035 RAW_file_offset += sizeof(RAW_header);
2036
2037 ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)kd_mapptr, mapsize, RAW_file_offset,
2038 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
2039 if (ret)
2040 goto write_error;
2041 RAW_file_offset += mapsize;
2042
2043 if (extra_thread_count) {
2044 pad_size = extra_thread_count * sizeof(kd_threadmap);
2045 pad_buf = (char *)kalloc(pad_size);
2046 memset(pad_buf, 0, pad_size);
2047
2048 ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)pad_buf, pad_size, RAW_file_offset,
2049 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
2050 kfree(pad_buf, pad_size);
2051
2052 if (ret)
2053 goto write_error;
2054 RAW_file_offset += pad_size;
2055
2056 }
2057
2058 pad_size = PAGE_SIZE - (RAW_file_offset & PAGE_MASK_64);
2059 if (pad_size) {
2060 pad_buf = (char *)kalloc(pad_size);
2061 memset(pad_buf, 0, pad_size);
2062
2063 /*
2064 * embed a cpumap in the padding bytes.
2065 * older code will skip this.
2066 * newer code will know how to read it.
2067 */
2068 uint32_t temp = pad_size;
2069 if (kdbg_cpumap_init_internal(kd_ctrl_page.kdebug_iops, kd_ctrl_page.kdebug_cpus, (uint8_t**)&pad_buf, &temp) != KERN_SUCCESS) {
2070 memset(pad_buf, 0, pad_size);
2071 }
2072
2073 ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)pad_buf, pad_size, RAW_file_offset,
2074 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
2075 kfree(pad_buf, pad_size);
2076
2077 if (ret)
2078 goto write_error;
2079 RAW_file_offset += pad_size;
2080 }
2081 RAW_file_written += sizeof(RAW_header) + mapsize + pad_size;
2082
2083 } else {
2084 if (copyout(kd_mapptr, buffer, mapsize))
2085 ret = EINVAL;
2086 }
2087 }
2088 }
2089 else
2090 ret = EINVAL;
2091 }
2092 else
2093 ret = EINVAL;
2094
2095 if (ret && vp)
2096 {
2097 count = 0;
2098
2099 vn_rdwr(UIO_WRITE, vp, (caddr_t)&count, sizeof(uint32_t), RAW_file_offset,
2100 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
2101 RAW_file_offset += sizeof(uint32_t);
2102 RAW_file_written += sizeof(uint32_t);
2103 }
2104 write_error:
2105 if ((kd_ctrl_page.kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
2106 {
2107 kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
2108 kd_ctrl_page.kdebug_flags &= ~KDBG_MAPINIT;
2109 kd_mapsize = 0;
2110 kd_mapptr = (kd_threadmap *) 0;
2111 kd_mapcount = 0;
2112 }
2113 return(ret);
2114 }
2115
2116
2117 static int
2118 kdbg_set_nkdbufs(unsigned int value)
2119 {
2120 /*
2121 * We allow a maximum buffer size of 50% of either ram or max mapped address, whichever is smaller
2122 * 'value' is the desired number of trace entries
2123 */
2124 unsigned int max_entries = (sane_size/2) / sizeof(kd_buf);
2125
2126 if (value <= max_entries)
2127 return (value);
2128 else
2129 return (max_entries);
2130 }
2131
2132
2133 static int
2134 kdbg_enable_bg_trace(void)
2135 {
2136 int ret = 0;
2137
2138 if (kdlog_bg_trace == TRUE && kdlog_bg_trace_running == FALSE && n_storage_buffers == 0) {
2139 nkdbufs = bg_nkdbufs;
2140 ret = kdbg_reinit(FALSE);
2141 if (0 == ret) {
2142 kdbg_set_tracing_enabled(TRUE, KDEBUG_ENABLE_TRACE);
2143 kdlog_bg_trace_running = TRUE;
2144 }
2145 }
2146 return ret;
2147 }
2148
2149 static void
2150 kdbg_disable_bg_trace(void)
2151 {
2152 if (kdlog_bg_trace_running == TRUE) {
2153 kdlog_bg_trace_running = FALSE;
2154 kdbg_clear();
2155 }
2156 }
2157
2158
2159
2160 /*
2161 * This function is provided for the CHUD toolkit only.
2162 * int val:
2163 * zero disables kdebug_chudhook function call
2164 * non-zero enables kdebug_chudhook function call
2165 * char *fn:
2166 * address of the enabled kdebug_chudhook function
2167 */
2168
2169 void
2170 kdbg_control_chud(int val, void *fn)
2171 {
2172 kdbg_lock_init();
2173
2174 if (val) {
2175 /* enable chudhook */
2176 kdebug_chudhook = fn;
2177 kdbg_set_flags(SLOW_CHUD, KDEBUG_ENABLE_CHUD, TRUE);
2178 }
2179 else {
2180 /* disable chudhook */
2181 kdbg_set_flags(SLOW_CHUD, KDEBUG_ENABLE_CHUD, FALSE);
2182 kdebug_chudhook = 0;
2183 }
2184 }
2185
2186
2187 int
2188 kdbg_control(int *name, u_int namelen, user_addr_t where, size_t *sizep)
2189 {
2190 int ret = 0;
2191 size_t size = *sizep;
2192 unsigned int value = 0;
2193 kd_regtype kd_Reg;
2194 kbufinfo_t kd_bufinfo;
2195 pid_t curpid;
2196 proc_t p, curproc;
2197
2198 if (name[0] == KERN_KDGETENTROPY ||
2199 name[0] == KERN_KDWRITETR ||
2200 name[0] == KERN_KDWRITEMAP ||
2201 name[0] == KERN_KDEFLAGS ||
2202 name[0] == KERN_KDDFLAGS ||
2203 name[0] == KERN_KDENABLE ||
2204 name[0] == KERN_KDENABLE_BG_TRACE ||
2205 name[0] == KERN_KDSETBUF) {
2206
2207 if ( namelen < 2 )
2208 return(EINVAL);
2209 value = name[1];
2210 }
2211
2212 kdbg_lock_init();
2213
2214 if ( !(kd_ctrl_page.kdebug_flags & KDBG_LOCKINIT))
2215 return(ENOSPC);
2216
2217 lck_mtx_lock(kd_trace_mtx_sysctl);
2218
2219 switch(name[0]) {
2220 case KERN_KDGETBUF:
2221 /*
2222 * Does not alter the global_state_pid
2223 * This is a passive request.
2224 */
2225 if (size < sizeof(kd_bufinfo.nkdbufs)) {
2226 /*
2227 * There is not enough room to return even
2228 * the first element of the info structure.
2229 */
2230 ret = EINVAL;
2231 goto out;
2232 }
2233 kd_bufinfo.nkdbufs = nkdbufs;
2234 kd_bufinfo.nkdthreads = kd_mapcount;
2235
2236 if ( (kd_ctrl_page.kdebug_slowcheck & SLOW_NOLOG) )
2237 kd_bufinfo.nolog = 1;
2238 else
2239 kd_bufinfo.nolog = 0;
2240
2241 kd_bufinfo.flags = kd_ctrl_page.kdebug_flags;
2242 #if defined(__LP64__)
2243 kd_bufinfo.flags |= KDBG_LP64;
2244 #endif
2245 kd_bufinfo.bufid = global_state_pid;
2246
2247 if (size >= sizeof(kd_bufinfo)) {
2248 /*
2249 * Provide all the info we have
2250 */
2251 if (copyout(&kd_bufinfo, where, sizeof(kd_bufinfo)))
2252 ret = EINVAL;
2253 } else {
2254 /*
2255 * For backwards compatibility, only provide
2256 * as much info as there is room for.
2257 */
2258 if (copyout(&kd_bufinfo, where, size))
2259 ret = EINVAL;
2260 }
2261 goto out;
2262
2263 case KERN_KDGETENTROPY: {
2264 /* Obsolescent - just fake with a random buffer */
2265 char *buffer = (char *) kalloc(size);
2266 read_frandom((void *) buffer, size);
2267 ret = copyout(buffer, where, size);
2268 kfree(buffer, size);
2269 goto out;
2270 }
2271
2272 case KERN_KDENABLE_BG_TRACE:
2273 bg_nkdbufs = kdbg_set_nkdbufs(value);
2274 kdlog_bg_trace = TRUE;
2275 ret = kdbg_enable_bg_trace();
2276 goto out;
2277
2278 case KERN_KDDISABLE_BG_TRACE:
2279 kdlog_bg_trace = FALSE;
2280 kdbg_disable_bg_trace();
2281 goto out;
2282 }
2283
2284 if ((curproc = current_proc()) != NULL)
2285 curpid = curproc->p_pid;
2286 else {
2287 ret = ESRCH;
2288 goto out;
2289 }
2290 if (global_state_pid == -1)
2291 global_state_pid = curpid;
2292 else if (global_state_pid != curpid) {
2293 if ((p = proc_find(global_state_pid)) == NULL) {
2294 /*
2295 * The global pid no longer exists
2296 */
2297 global_state_pid = curpid;
2298 } else {
2299 /*
2300 * The global pid exists, deny this request
2301 */
2302 proc_rele(p);
2303
2304 ret = EBUSY;
2305 goto out;
2306 }
2307 }
2308
2309 switch(name[0]) {
2310 case KERN_KDEFLAGS:
2311 kdbg_disable_bg_trace();
2312
2313 value &= KDBG_USERFLAGS;
2314 kd_ctrl_page.kdebug_flags |= value;
2315 break;
2316 case KERN_KDDFLAGS:
2317 kdbg_disable_bg_trace();
2318
2319 value &= KDBG_USERFLAGS;
2320 kd_ctrl_page.kdebug_flags &= ~value;
2321 break;
2322 case KERN_KDENABLE:
2323 /*
2324 * Enable tracing mechanism. Two types:
2325 * KDEBUG_TRACE is the standard one,
2326 * and KDEBUG_PPT which is a carefully
2327 * chosen subset to avoid performance impact.
2328 */
2329 if (value) {
2330 /*
2331 * enable only if buffer is initialized
2332 */
2333 if (!(kd_ctrl_page.kdebug_flags & KDBG_BUFINIT) ||
2334 !(value == KDEBUG_ENABLE_TRACE || value == KDEBUG_ENABLE_PPT)) {
2335 ret = EINVAL;
2336 break;
2337 }
2338 kdbg_thrmap_init();
2339
2340 kdbg_set_tracing_enabled(TRUE, value);
2341 }
2342 else
2343 {
2344 kdbg_set_tracing_enabled(FALSE, 0);
2345 }
2346 break;
2347 case KERN_KDSETBUF:
2348 kdbg_disable_bg_trace();
2349
2350 nkdbufs = kdbg_set_nkdbufs(value);
2351 break;
2352 case KERN_KDSETUP:
2353 kdbg_disable_bg_trace();
2354
2355 ret = kdbg_reinit(FALSE);
2356 break;
2357 case KERN_KDREMOVE:
2358 kdbg_clear();
2359 ret = kdbg_enable_bg_trace();
2360 break;
2361 case KERN_KDSETREG:
2362 if(size < sizeof(kd_regtype)) {
2363 ret = EINVAL;
2364 break;
2365 }
2366 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
2367 ret = EINVAL;
2368 break;
2369 }
2370 kdbg_disable_bg_trace();
2371
2372 ret = kdbg_setreg(&kd_Reg);
2373 break;
2374 case KERN_KDGETREG:
2375 if (size < sizeof(kd_regtype)) {
2376 ret = EINVAL;
2377 break;
2378 }
2379 ret = kdbg_getreg(&kd_Reg);
2380 if (copyout(&kd_Reg, where, sizeof(kd_regtype))) {
2381 ret = EINVAL;
2382 }
2383 kdbg_disable_bg_trace();
2384
2385 break;
2386 case KERN_KDREADTR:
2387 ret = kdbg_read(where, sizep, NULL, NULL);
2388 break;
2389 case KERN_KDWRITETR:
2390 case KERN_KDWRITEMAP:
2391 {
2392 struct vfs_context context;
2393 struct fileproc *fp;
2394 size_t number;
2395 vnode_t vp;
2396 int fd;
2397
2398 kdbg_disable_bg_trace();
2399
2400 if (name[0] == KERN_KDWRITETR) {
2401 int s;
2402 int wait_result = THREAD_AWAKENED;
2403 u_int64_t abstime;
2404 u_int64_t ns;
2405
2406 if (*sizep) {
2407 ns = ((u_int64_t)*sizep) * (u_int64_t)(1000 * 1000);
2408 nanoseconds_to_absolutetime(ns, &abstime );
2409 clock_absolutetime_interval_to_deadline( abstime, &abstime );
2410 } else
2411 abstime = 0;
2412
2413 s = ml_set_interrupts_enabled(FALSE);
2414 lck_spin_lock(kdw_spin_lock);
2415
2416 while (wait_result == THREAD_AWAKENED && kd_ctrl_page.kds_inuse_count < n_storage_threshold) {
2417
2418 kds_waiter = 1;
2419
2420 if (abstime)
2421 wait_result = lck_spin_sleep_deadline(kdw_spin_lock, 0, &kds_waiter, THREAD_ABORTSAFE, abstime);
2422 else
2423 wait_result = lck_spin_sleep(kdw_spin_lock, 0, &kds_waiter, THREAD_ABORTSAFE);
2424
2425 kds_waiter = 0;
2426 }
2427 lck_spin_unlock(kdw_spin_lock);
2428 ml_set_interrupts_enabled(s);
2429 }
2430 p = current_proc();
2431 fd = value;
2432
2433 proc_fdlock(p);
2434 if ( (ret = fp_lookup(p, fd, &fp, 1)) ) {
2435 proc_fdunlock(p);
2436 break;
2437 }
2438 context.vc_thread = current_thread();
2439 context.vc_ucred = fp->f_fglob->fg_cred;
2440
2441 if (FILEGLOB_DTYPE(fp->f_fglob) != DTYPE_VNODE) {
2442 fp_drop(p, fd, fp, 1);
2443 proc_fdunlock(p);
2444
2445 ret = EBADF;
2446 break;
2447 }
2448 vp = (struct vnode *)fp->f_fglob->fg_data;
2449 proc_fdunlock(p);
2450
2451 if ((ret = vnode_getwithref(vp)) == 0) {
2452 RAW_file_offset = fp->f_fglob->fg_offset;
2453 if (name[0] == KERN_KDWRITETR) {
2454 number = nkdbufs * sizeof(kd_buf);
2455
2456 KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO, 3)) | DBG_FUNC_START, 0, 0, 0, 0, 0);
2457 ret = kdbg_read(0, &number, vp, &context);
2458 KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO, 3)) | DBG_FUNC_END, number, 0, 0, 0, 0);
2459
2460 *sizep = number;
2461 } else {
2462 number = kd_mapcount * sizeof(kd_threadmap);
2463 kdbg_readthrmap(0, &number, vp, &context);
2464 }
2465 fp->f_fglob->fg_offset = RAW_file_offset;
2466 vnode_put(vp);
2467 }
2468 fp_drop(p, fd, fp, 0);
2469
2470 break;
2471 }
2472 case KERN_KDBUFWAIT:
2473 {
2474 /* WRITETR lite -- just block until there's data */
2475 int s;
2476 int wait_result = THREAD_AWAKENED;
2477 u_int64_t abstime;
2478 u_int64_t ns;
2479 size_t number = 0;
2480
2481 kdbg_disable_bg_trace();
2482
2483
2484 if (*sizep) {
2485 ns = ((u_int64_t)*sizep) * (u_int64_t)(1000 * 1000);
2486 nanoseconds_to_absolutetime(ns, &abstime );
2487 clock_absolutetime_interval_to_deadline( abstime, &abstime );
2488 } else
2489 abstime = 0;
2490
2491 s = ml_set_interrupts_enabled(FALSE);
2492 if( !s )
2493 panic("trying to wait with interrupts off");
2494 lck_spin_lock(kdw_spin_lock);
2495
2496 /* drop the mutex so don't exclude others from
2497 * accessing trace
2498 */
2499 lck_mtx_unlock(kd_trace_mtx_sysctl);
2500
2501 while (wait_result == THREAD_AWAKENED &&
2502 kd_ctrl_page.kds_inuse_count < n_storage_threshold) {
2503
2504 kds_waiter = 1;
2505
2506 if (abstime)
2507 wait_result = lck_spin_sleep_deadline(kdw_spin_lock, 0, &kds_waiter, THREAD_ABORTSAFE, abstime);
2508 else
2509 wait_result = lck_spin_sleep(kdw_spin_lock, 0, &kds_waiter, THREAD_ABORTSAFE);
2510
2511 kds_waiter = 0;
2512 }
2513
2514 /* check the count under the spinlock */
2515 number = (kd_ctrl_page.kds_inuse_count >= n_storage_threshold);
2516
2517 lck_spin_unlock(kdw_spin_lock);
2518 ml_set_interrupts_enabled(s);
2519
2520 /* pick the mutex back up again */
2521 lck_mtx_lock(kd_trace_mtx_sysctl);
2522
2523 /* write out whether we've exceeded the threshold */
2524 *sizep = number;
2525 break;
2526 }
2527 case KERN_KDPIDTR:
2528 if (size < sizeof(kd_regtype)) {
2529 ret = EINVAL;
2530 break;
2531 }
2532 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
2533 ret = EINVAL;
2534 break;
2535 }
2536 kdbg_disable_bg_trace();
2537
2538 ret = kdbg_setpid(&kd_Reg);
2539 break;
2540 case KERN_KDPIDEX:
2541 if (size < sizeof(kd_regtype)) {
2542 ret = EINVAL;
2543 break;
2544 }
2545 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
2546 ret = EINVAL;
2547 break;
2548 }
2549 kdbg_disable_bg_trace();
2550
2551 ret = kdbg_setpidex(&kd_Reg);
2552 break;
2553 case KERN_KDCPUMAP:
2554 ret = kdbg_readcpumap(where, sizep);
2555 break;
2556 case KERN_KDTHRMAP:
2557 ret = kdbg_readthrmap(where, sizep, NULL, NULL);
2558 break;
2559 case KERN_KDREADCURTHRMAP:
2560 ret = kdbg_readcurthrmap(where, sizep);
2561 break;
2562 case KERN_KDSETRTCDEC:
2563 if (size < sizeof(kd_regtype)) {
2564 ret = EINVAL;
2565 break;
2566 }
2567 if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
2568 ret = EINVAL;
2569 break;
2570 }
2571 kdbg_disable_bg_trace();
2572
2573 ret = kdbg_setrtcdec(&kd_Reg);
2574 break;
2575 case KERN_KDSET_TYPEFILTER:
2576 kdbg_disable_bg_trace();
2577
2578 if ((kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) == 0){
2579 if ((ret = kdbg_enable_typefilter()))
2580 break;
2581 }
2582
2583 if (size != KDBG_TYPEFILTER_BITMAP_SIZE) {
2584 ret = EINVAL;
2585 break;
2586 }
2587
2588 if (copyin(where, type_filter_bitmap, KDBG_TYPEFILTER_BITMAP_SIZE)) {
2589 ret = EINVAL;
2590 break;
2591 }
2592 kdbg_iop_list_callback(kd_ctrl_page.kdebug_iops, KD_CALLBACK_TYPEFILTER_CHANGED, type_filter_bitmap);
2593 break;
2594 default:
2595 ret = EINVAL;
2596 }
2597 out:
2598 lck_mtx_unlock(kd_trace_mtx_sysctl);
2599
2600 return(ret);
2601 }
2602
2603
2604 /*
2605 * This code can run for the most part concurrently with kernel_debug_internal()...
2606 * 'release_storage_unit' will take the kds_spin_lock which may cause us to briefly
2607 * synchronize with the recording side of this puzzle... otherwise, we are able to
2608 * move through the lists w/o use of any locks
2609 */
2610 int
2611 kdbg_read(user_addr_t buffer, size_t *number, vnode_t vp, vfs_context_t ctx)
2612 {
2613 unsigned int count;
2614 unsigned int cpu, min_cpu;
2615 uint64_t mintime, t, barrier = 0;
2616 int error = 0;
2617 kd_buf *tempbuf;
2618 uint32_t rcursor;
2619 kd_buf lostevent;
2620 union kds_ptr kdsp;
2621 struct kd_storage *kdsp_actual;
2622 struct kd_bufinfo *kdbp;
2623 struct kd_bufinfo *min_kdbp;
2624 uint32_t tempbuf_count;
2625 uint32_t tempbuf_number;
2626 uint32_t old_kdebug_flags;
2627 uint32_t old_kdebug_slowcheck;
2628 boolean_t lostevents = FALSE;
2629 boolean_t out_of_events = FALSE;
2630
2631 count = *number/sizeof(kd_buf);
2632 *number = 0;
2633
2634 if (count == 0 || !(kd_ctrl_page.kdebug_flags & KDBG_BUFINIT) || kdcopybuf == 0)
2635 return EINVAL;
2636
2637 memset(&lostevent, 0, sizeof(lostevent));
2638 lostevent.debugid = TRACEDBG_CODE(DBG_TRACE_INFO, 2);
2639
2640 /* Capture timestamp. Only sort events that have occured before the timestamp.
2641 * Since the iop is being flushed here, its possible that events occur on the AP
2642 * while running live tracing. If we are disabled, no new events should
2643 * occur on the AP.
2644 */
2645
2646 if (kd_ctrl_page.enabled)
2647 {
2648 // timestamp is non-zero value
2649 barrier = mach_absolute_time() & KDBG_TIMESTAMP_MASK;
2650 }
2651
2652 // Request each IOP to provide us with up to date entries before merging buffers together.
2653 kdbg_iop_list_callback(kd_ctrl_page.kdebug_iops, KD_CALLBACK_SYNC_FLUSH, NULL);
2654
2655 /*
2656 * because we hold kd_trace_mtx_sysctl, no other control threads can
2657 * be playing with kdebug_flags... the code that cuts new events could
2658 * be running, but it grabs kds_spin_lock if it needs to acquire a new
2659 * storage chunk which is where it examines kdebug_flags... it its adding
2660 * to the same chunk we're reading from, no problem...
2661 */
2662
2663 disable_wrap(&old_kdebug_slowcheck, &old_kdebug_flags);
2664
2665 if (count > nkdbufs)
2666 count = nkdbufs;
2667
2668 if ((tempbuf_count = count) > KDCOPYBUF_COUNT)
2669 tempbuf_count = KDCOPYBUF_COUNT;
2670
2671 while (count) {
2672 tempbuf = kdcopybuf;
2673 tempbuf_number = 0;
2674
2675 // While space
2676 while (tempbuf_count) {
2677 mintime = 0xffffffffffffffffULL;
2678 min_kdbp = NULL;
2679 min_cpu = 0;
2680
2681 // Check all CPUs
2682 for (cpu = 0, kdbp = &kdbip[0]; cpu < kd_ctrl_page.kdebug_cpus; cpu++, kdbp++) {
2683
2684 // Find one with raw data
2685 if ((kdsp = kdbp->kd_list_head).raw == KDS_PTR_NULL)
2686 continue;
2687 /* Debugging aid: maintain a copy of the "kdsp"
2688 * index.
2689 */
2690 volatile union kds_ptr kdsp_shadow;
2691
2692 kdsp_shadow = kdsp;
2693
2694 // Get from cpu data to buffer header to buffer
2695 kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
2696
2697 volatile struct kd_storage *kdsp_actual_shadow;
2698
2699 kdsp_actual_shadow = kdsp_actual;
2700
2701 // See if there are actual data left in this buffer
2702 rcursor = kdsp_actual->kds_readlast;
2703
2704 if (rcursor == kdsp_actual->kds_bufindx)
2705 continue;
2706
2707 t = kdbg_get_timestamp(&kdsp_actual->kds_records[rcursor]);
2708
2709 if ((t > barrier) && (barrier > 0)) {
2710 /*
2711 * Need to wait to flush iop again before we
2712 * sort any more data from the buffers
2713 */
2714 out_of_events = TRUE;
2715 break;
2716 }
2717 if (t < kdsp_actual->kds_timestamp) {
2718 /*
2719 * indicates we've not yet completed filling
2720 * in this event...
2721 * this should only occur when we're looking
2722 * at the buf that the record head is utilizing
2723 * we'll pick these events up on the next
2724 * call to kdbg_read
2725 * we bail at this point so that we don't
2726 * get an out-of-order timestream by continuing
2727 * to read events from the other CPUs' timestream(s)
2728 */
2729 out_of_events = TRUE;
2730 break;
2731 }
2732 if (t < mintime) {
2733 mintime = t;
2734 min_kdbp = kdbp;
2735 min_cpu = cpu;
2736 }
2737 }
2738 if (min_kdbp == NULL || out_of_events == TRUE) {
2739 /*
2740 * all buffers ran empty
2741 */
2742 out_of_events = TRUE;
2743 break;
2744 }
2745
2746 // Get data
2747 kdsp = min_kdbp->kd_list_head;
2748 kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
2749
2750 if (kdsp_actual->kds_lostevents == TRUE) {
2751 kdbg_set_timestamp_and_cpu(&lostevent, kdsp_actual->kds_records[kdsp_actual->kds_readlast].timestamp, min_cpu);
2752 *tempbuf = lostevent;
2753
2754 kdsp_actual->kds_lostevents = FALSE;
2755 lostevents = TRUE;
2756
2757 goto nextevent;
2758 }
2759
2760 // Copy into buffer
2761 *tempbuf = kdsp_actual->kds_records[kdsp_actual->kds_readlast++];
2762
2763 if (kdsp_actual->kds_readlast == EVENTS_PER_STORAGE_UNIT)
2764 release_storage_unit(min_cpu, kdsp.raw);
2765
2766 /*
2767 * Watch for out of order timestamps
2768 */
2769 if (mintime < min_kdbp->kd_prev_timebase) {
2770 /*
2771 * if so, use the previous timestamp + 1 cycle
2772 */
2773 min_kdbp->kd_prev_timebase++;
2774 kdbg_set_timestamp_and_cpu(tempbuf, min_kdbp->kd_prev_timebase, kdbg_get_cpu(tempbuf));
2775 } else
2776 min_kdbp->kd_prev_timebase = mintime;
2777 nextevent:
2778 tempbuf_count--;
2779 tempbuf_number++;
2780 tempbuf++;
2781
2782 if ((RAW_file_written += sizeof(kd_buf)) >= RAW_FLUSH_SIZE)
2783 break;
2784 }
2785 if (tempbuf_number) {
2786
2787 if (vp) {
2788 error = vn_rdwr(UIO_WRITE, vp, (caddr_t)kdcopybuf, tempbuf_number * sizeof(kd_buf), RAW_file_offset,
2789 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
2790
2791 RAW_file_offset += (tempbuf_number * sizeof(kd_buf));
2792
2793 if (RAW_file_written >= RAW_FLUSH_SIZE) {
2794 cluster_push(vp, 0);
2795
2796 RAW_file_written = 0;
2797 }
2798 } else {
2799 error = copyout(kdcopybuf, buffer, tempbuf_number * sizeof(kd_buf));
2800 buffer += (tempbuf_number * sizeof(kd_buf));
2801 }
2802 if (error) {
2803 *number = 0;
2804 error = EINVAL;
2805 break;
2806 }
2807 count -= tempbuf_number;
2808 *number += tempbuf_number;
2809 }
2810 if (out_of_events == TRUE)
2811 /*
2812 * all trace buffers are empty
2813 */
2814 break;
2815
2816 if ((tempbuf_count = count) > KDCOPYBUF_COUNT)
2817 tempbuf_count = KDCOPYBUF_COUNT;
2818 }
2819 if ( !(old_kdebug_flags & KDBG_NOWRAP)) {
2820 enable_wrap(old_kdebug_slowcheck, lostevents);
2821 }
2822 return (error);
2823 }
2824
2825
2826 unsigned char *getProcName(struct proc *proc);
2827 unsigned char *getProcName(struct proc *proc) {
2828
2829 return (unsigned char *) &proc->p_comm; /* Return pointer to the proc name */
2830
2831 }
2832
2833 #define STACKSHOT_SUBSYS_LOCK() lck_mtx_lock(&stackshot_subsys_mutex)
2834 #define STACKSHOT_SUBSYS_UNLOCK() lck_mtx_unlock(&stackshot_subsys_mutex)
2835 #if defined(__i386__) || defined (__x86_64__)
2836 #define TRAP_DEBUGGER __asm__ volatile("int3");
2837 #else
2838 #error No TRAP_DEBUGGER definition for this architecture
2839 #endif
2840
2841 #define SANE_TRACEBUF_SIZE (8 * 1024 * 1024)
2842 #define SANE_BOOTPROFILE_TRACEBUF_SIZE (64 * 1024 * 1024)
2843
2844 /* Initialize the mutex governing access to the stack snapshot subsystem */
2845 __private_extern__ void
2846 stackshot_lock_init( void )
2847 {
2848 stackshot_subsys_lck_grp_attr = lck_grp_attr_alloc_init();
2849
2850 stackshot_subsys_lck_grp = lck_grp_alloc_init("stackshot_subsys_lock", stackshot_subsys_lck_grp_attr);
2851
2852 stackshot_subsys_lck_attr = lck_attr_alloc_init();
2853
2854 lck_mtx_init(&stackshot_subsys_mutex, stackshot_subsys_lck_grp, stackshot_subsys_lck_attr);
2855 }
2856
2857 /*
2858 * stack_snapshot: Obtains a coherent set of stack traces for all threads
2859 * on the system, tracing both kernel and user stacks
2860 * where available. Uses machine specific trace routines
2861 * for ppc, ppc64 and x86.
2862 * Inputs: uap->pid - process id of process to be traced, or -1
2863 * for the entire system
2864 * uap->tracebuf - address of the user space destination
2865 * buffer
2866 * uap->tracebuf_size - size of the user space trace buffer
2867 * uap->options - various options, including the maximum
2868 * number of frames to trace.
2869 * Outputs: EPERM if the caller is not privileged
2870 * EINVAL if the supplied trace buffer isn't sanely sized
2871 * ENOMEM if we don't have enough memory to satisfy the
2872 * request
2873 * ENOENT if the target pid isn't found
2874 * ENOSPC if the supplied buffer is insufficient
2875 * *retval contains the number of bytes traced, if successful
2876 * and -1 otherwise. If the request failed due to
2877 * tracebuffer exhaustion, we copyout as much as possible.
2878 */
2879 int
2880 stack_snapshot(struct proc *p, register struct stack_snapshot_args *uap, int32_t *retval) {
2881 int error = 0;
2882
2883 if ((error = suser(kauth_cred_get(), &p->p_acflag)))
2884 return(error);
2885
2886 return stack_snapshot2(uap->pid, uap->tracebuf, uap->tracebuf_size,
2887 uap->flags, uap->dispatch_offset, retval);
2888 }
2889
2890 int
2891 stack_snapshot_from_kernel(pid_t pid, void *buf, uint32_t size, uint32_t flags, unsigned *bytesTraced)
2892 {
2893 int error = 0;
2894 boolean_t istate;
2895
2896 if ((buf == NULL) || (size <= 0) || (bytesTraced == NULL)) {
2897 return -1;
2898 }
2899
2900 /* cap in individual stackshot to SANE_TRACEBUF_SIZE */
2901 if (size > SANE_TRACEBUF_SIZE) {
2902 size = SANE_TRACEBUF_SIZE;
2903 }
2904
2905 /* Serialize tracing */
2906 STACKSHOT_SUBSYS_LOCK();
2907 istate = ml_set_interrupts_enabled(FALSE);
2908
2909
2910 /* Preload trace parameters*/
2911 kdp_snapshot_preflight(pid, buf, size, flags, 0);
2912
2913 /* Trap to the debugger to obtain a coherent stack snapshot; this populates
2914 * the trace buffer
2915 */
2916 TRAP_DEBUGGER;
2917
2918 ml_set_interrupts_enabled(istate);
2919
2920 *bytesTraced = kdp_stack_snapshot_bytes_traced();
2921
2922 error = kdp_stack_snapshot_geterror();
2923
2924 STACKSHOT_SUBSYS_UNLOCK();
2925
2926 return error;
2927
2928 }
2929
2930 int
2931 stack_snapshot2(pid_t pid, user_addr_t tracebuf, uint32_t tracebuf_size, uint32_t flags, uint32_t dispatch_offset, int32_t *retval)
2932 {
2933 boolean_t istate;
2934 int error = 0;
2935 unsigned bytesTraced = 0;
2936
2937 #if CONFIG_TELEMETRY
2938 if (flags & STACKSHOT_GLOBAL_MICROSTACKSHOT_ENABLE) {
2939 telemetry_global_ctl(1);
2940 *retval = 0;
2941 return (0);
2942 } else if (flags & STACKSHOT_GLOBAL_MICROSTACKSHOT_DISABLE) {
2943 telemetry_global_ctl(0);
2944 *retval = 0;
2945 return (0);
2946 }
2947
2948 if (flags & STACKSHOT_WINDOWED_MICROSTACKSHOTS_ENABLE) {
2949 error = telemetry_enable_window();
2950
2951 if (error != KERN_SUCCESS) {
2952 /* We are probably out of memory */
2953 *retval = -1;
2954 return ENOMEM;
2955 }
2956
2957 *retval = 0;
2958 return (0);
2959 } else if (flags & STACKSHOT_WINDOWED_MICROSTACKSHOTS_DISABLE) {
2960 telemetry_disable_window();
2961 *retval = 0;
2962 return (0);
2963 }
2964 #endif
2965
2966 *retval = -1;
2967 /* Serialize tracing */
2968 STACKSHOT_SUBSYS_LOCK();
2969
2970 if (tracebuf_size <= 0) {
2971 error = EINVAL;
2972 goto error_exit;
2973 }
2974
2975 #if CONFIG_TELEMETRY
2976 if (flags & STACKSHOT_GET_MICROSTACKSHOT) {
2977
2978 if (tracebuf_size > SANE_TRACEBUF_SIZE) {
2979 error = EINVAL;
2980 goto error_exit;
2981 }
2982
2983 bytesTraced = tracebuf_size;
2984 error = telemetry_gather(tracebuf, &bytesTraced,
2985 (flags & STACKSHOT_SET_MICROSTACKSHOT_MARK) ? TRUE : FALSE);
2986 if (error == KERN_NO_SPACE) {
2987 error = ENOSPC;
2988 }
2989
2990 *retval = (int)bytesTraced;
2991 goto error_exit;
2992 }
2993
2994 if (flags & STACKSHOT_GET_WINDOWED_MICROSTACKSHOTS) {
2995
2996 if (tracebuf_size > SANE_TRACEBUF_SIZE) {
2997 error = EINVAL;
2998 goto error_exit;
2999 }
3000
3001 bytesTraced = tracebuf_size;
3002 error = telemetry_gather_windowed(tracebuf, &bytesTraced);
3003 if (error == KERN_NO_SPACE) {
3004 error = ENOSPC;
3005 }
3006
3007 *retval = (int)bytesTraced;
3008 goto error_exit;
3009 }
3010
3011 if (flags & STACKSHOT_GET_BOOT_PROFILE) {
3012
3013 if (tracebuf_size > SANE_BOOTPROFILE_TRACEBUF_SIZE) {
3014 error = EINVAL;
3015 goto error_exit;
3016 }
3017
3018 bytesTraced = tracebuf_size;
3019 error = bootprofile_gather(tracebuf, &bytesTraced);
3020 if (error == KERN_NO_SPACE) {
3021 error = ENOSPC;
3022 }
3023
3024 *retval = (int)bytesTraced;
3025 goto error_exit;
3026 }
3027 #endif
3028
3029 if (tracebuf_size > SANE_TRACEBUF_SIZE) {
3030 error = EINVAL;
3031 goto error_exit;
3032 }
3033
3034 assert(stackshot_snapbuf == NULL);
3035 if (kmem_alloc_kobject(kernel_map, (vm_offset_t *)&stackshot_snapbuf, tracebuf_size) != KERN_SUCCESS) {
3036 error = ENOMEM;
3037 goto error_exit;
3038 }
3039
3040 if (panic_active()) {
3041 error = ENOMEM;
3042 goto error_exit;
3043 }
3044
3045 istate = ml_set_interrupts_enabled(FALSE);
3046 /* Preload trace parameters*/
3047 kdp_snapshot_preflight(pid, stackshot_snapbuf, tracebuf_size, flags, dispatch_offset);
3048
3049 /* Trap to the debugger to obtain a coherent stack snapshot; this populates
3050 * the trace buffer
3051 */
3052
3053 TRAP_DEBUGGER;
3054
3055 ml_set_interrupts_enabled(istate);
3056
3057 bytesTraced = kdp_stack_snapshot_bytes_traced();
3058
3059 if (bytesTraced > 0) {
3060 if ((error = copyout(stackshot_snapbuf, tracebuf,
3061 ((bytesTraced < tracebuf_size) ?
3062 bytesTraced : tracebuf_size))))
3063 goto error_exit;
3064 *retval = bytesTraced;
3065 }
3066 else {
3067 error = ENOENT;
3068 goto error_exit;
3069 }
3070
3071 error = kdp_stack_snapshot_geterror();
3072 if (error == -1) {
3073 error = ENOSPC;
3074 *retval = -1;
3075 goto error_exit;
3076 }
3077
3078 error_exit:
3079 if (stackshot_snapbuf != NULL)
3080 kmem_free(kernel_map, (vm_offset_t) stackshot_snapbuf, tracebuf_size);
3081 stackshot_snapbuf = NULL;
3082 STACKSHOT_SUBSYS_UNLOCK();
3083 return error;
3084 }
3085
3086 void
3087 start_kern_tracing(unsigned int new_nkdbufs, boolean_t need_map)
3088 {
3089
3090 if (!new_nkdbufs)
3091 return;
3092 nkdbufs = kdbg_set_nkdbufs(new_nkdbufs);
3093 kdbg_lock_init();
3094
3095 kernel_debug_string("start_kern_tracing");
3096
3097 if (0 == kdbg_reinit(TRUE)) {
3098
3099 if (need_map == TRUE) {
3100 uint32_t old1, old2;
3101
3102 kdbg_thrmap_init();
3103
3104 disable_wrap(&old1, &old2);
3105 }
3106
3107 /* Hold off interrupts until the early traces are cut */
3108 boolean_t s = ml_set_interrupts_enabled(FALSE);
3109
3110 kdbg_set_tracing_enabled(TRUE, KDEBUG_ENABLE_TRACE);
3111
3112 /*
3113 * Transfer all very early events from the static buffer
3114 * into the real buffers.
3115 */
3116 kernel_debug_early_end();
3117
3118 ml_set_interrupts_enabled(s);
3119
3120 printf("kernel tracing started\n");
3121 } else {
3122 printf("error from kdbg_reinit,kernel tracing not started\n");
3123 }
3124 }
3125
3126 void
3127 start_kern_tracing_with_typefilter(unsigned int new_nkdbufs,
3128 boolean_t need_map,
3129 unsigned int typefilter)
3130 {
3131 /* startup tracing */
3132 start_kern_tracing(new_nkdbufs, need_map);
3133
3134 /* check that tracing was actually enabled */
3135 if (!(kdebug_enable & KDEBUG_ENABLE_TRACE))
3136 return;
3137
3138 /* setup the typefiltering */
3139 if (0 == kdbg_enable_typefilter())
3140 setbit(type_filter_bitmap, typefilter & (CSC_MASK >> CSC_OFFSET));
3141 }
3142
3143 void
3144 kdbg_dump_trace_to_file(const char *filename)
3145 {
3146 vfs_context_t ctx;
3147 vnode_t vp;
3148 int error;
3149 size_t number;
3150
3151
3152 if ( !(kdebug_enable & KDEBUG_ENABLE_TRACE))
3153 return;
3154
3155 if (global_state_pid != -1) {
3156 if ((proc_find(global_state_pid)) != NULL) {
3157 /*
3158 * The global pid exists, we're running
3159 * due to fs_usage, latency, etc...
3160 * don't cut the panic/shutdown trace file
3161 * Disable tracing from this point to avoid
3162 * perturbing state.
3163 */
3164 kdebug_enable = 0;
3165 kd_ctrl_page.enabled = 0;
3166 commpage_update_kdebug_enable();
3167 return;
3168 }
3169 }
3170 KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO, 0)) | DBG_FUNC_NONE, 0, 0, 0, 0, 0);
3171
3172 kdebug_enable = 0;
3173 kd_ctrl_page.enabled = 0;
3174 commpage_update_kdebug_enable();
3175
3176 ctx = vfs_context_kernel();
3177
3178 if ((error = vnode_open(filename, (O_CREAT | FWRITE | O_NOFOLLOW), 0600, 0, &vp, ctx)))
3179 return;
3180
3181 number = kd_mapcount * sizeof(kd_threadmap);
3182 kdbg_readthrmap(0, &number, vp, ctx);
3183
3184 number = nkdbufs*sizeof(kd_buf);
3185 kdbg_read(0, &number, vp, ctx);
3186
3187 vnode_close(vp, FWRITE, ctx);
3188
3189 sync(current_proc(), (void *)NULL, (int *)NULL);
3190 }
3191
3192 /* Helper function for filling in the BSD name for an address space
3193 * Defined here because the machine bindings know only Mach threads
3194 * and nothing about BSD processes.
3195 *
3196 * FIXME: need to grab a lock during this?
3197 */
3198 void kdbg_get_task_name(char* name_buf, int len, task_t task)
3199 {
3200 proc_t proc;
3201
3202 /* Note: we can't use thread->task (and functions that rely on it) here
3203 * because it hasn't been initialized yet when this function is called.
3204 * We use the explicitly-passed task parameter instead.
3205 */
3206 proc = get_bsdtask_info(task);
3207 if (proc != PROC_NULL)
3208 snprintf(name_buf, len, "%s/%d", proc->p_comm, proc->p_pid);
3209 else
3210 snprintf(name_buf, len, "%p [!bsd]", task);
3211 }