]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/kern_stackshot.c
xnu-3248.50.21.tar.gz
[apple/xnu.git] / osfmk / kern / kern_stackshot.c
CommitLineData
fe8ab488
A
1/*
2 * Copyright (c) 2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <mach/mach_types.h>
30#include <mach/vm_param.h>
3e170ce0
A
31#include <mach/mach_vm.h>
32#include <sys/errno.h>
33#include <sys/stackshot.h>
fe8ab488
A
34#ifdef IMPORTANCE_INHERITANCE
35#include <ipc/ipc_importance.h>
36#endif
37#include <sys/appleapiopts.h>
38#include <kern/debug.h>
39#include <uuid/uuid.h>
40
41#include <kdp/kdp_dyld.h>
42#include <kdp/kdp_en_debugger.h>
43
44#include <libsa/types.h>
45#include <libkern/version.h>
46
47#include <string.h> /* bcopy */
48
49#include <kern/processor.h>
50#include <kern/thread.h>
3e170ce0 51#include <kern/telemetry.h>
fe8ab488
A
52#include <kern/clock.h>
53#include <vm/vm_map.h>
54#include <vm/vm_kern.h>
55#include <vm/vm_pageout.h>
3e170ce0 56#include <vm/vm_fault.h>
fe8ab488
A
57#include <vm/vm_shared_region.h>
58#include <libkern/OSKextLibPrivate.h>
59
3e170ce0
A
60#if (defined(__arm64__) || defined(NAND_PANIC_DEVICE)) && !defined(LEGACY_PANIC_LOGS)
61#include <pexpert/pexpert.h> /* For gPanicBase/gPanicBase */
62#endif
63
fe8ab488
A
64extern unsigned int not_in_kdp;
65
66/*
67 * TODO: Even hackier than the other pieces. This should really
68 * be moved off of kdp_pmap, and we should probably separate
69 * machine_trace_thread out of the kdp code.
70 */
71extern pmap_t kdp_pmap;
72extern addr64_t kdp_vtophys(pmap_t pmap, addr64_t va);
73
74int kdp_snapshot = 0;
75static int stack_snapshot_ret = 0;
3e170ce0 76static uint32_t stack_snapshot_bytes_traced = 0;
fe8ab488 77
3e170ce0 78static kcdata_descriptor_t stackshot_kcdata_p = NULL;
fe8ab488
A
79static void *stack_snapshot_buf;
80static uint32_t stack_snapshot_bufsize;
81int stack_snapshot_pid;
82static uint32_t stack_snapshot_flags;
fe8ab488 83static unsigned int old_debugger;
3e170ce0
A
84static boolean_t stack_enable_faulting;
85
86void *kernel_stackshot_buf = NULL; /* Pointer to buffer for stackshots triggered from the kernel and retrieved later */
87int kernel_stackshot_buf_size = 0;
88
89void *stackshot_snapbuf = NULL; /* Used by stack_snapshot2 (to be removed) */
fe8ab488 90
3e170ce0
A
91__private_extern__ void stackshot_lock_init( void );
92static boolean_t memory_iszero(void *addr, size_t size);
93kern_return_t stack_snapshot2(int pid, user_addr_t tracebuf, uint32_t tracebuf_size, uint32_t flags, int32_t *retval);
94kern_return_t stack_snapshot_from_kernel_internal(int pid, void *buf, uint32_t size, uint32_t flags, unsigned *bytes_traced);
95#if CONFIG_TELEMETRY
96kern_return_t stack_microstackshot(user_addr_t tracebuf, uint32_t tracebuf_size, uint32_t flags, int32_t *retval);
97#endif
98uint32_t get_stackshot_estsize(uint32_t prev_size_hint);
99kern_return_t kern_stack_snapshot_internal(int stackshot_config_version, void *stackshot_config,
100 size_t stackshot_config_size, boolean_t stackshot_from_user);
fe8ab488 101void do_stackshot(void);
3e170ce0 102void kdp_snapshot_preflight(int pid, void * tracebuf, uint32_t tracebuf_size, uint32_t flags, kcdata_descriptor_t data_p, boolean_t enable_faulting);
fe8ab488 103void kdp_snapshot_postflight(void);
3e170ce0
A
104static int kdp_stackshot(int pid, void *tracebuf, uint32_t tracebuf_size, uint32_t flags, uint32_t *pbytesTraced);
105static int kdp_stackshot_kcdata_format(int pid, uint32_t trace_flags, uint32_t *pBytesTraced);
fe8ab488 106int kdp_stack_snapshot_geterror(void);
3e170ce0
A
107uint32_t kdp_stack_snapshot_bytes_traced(void);
108int kdp_stackshot(int pid, void *tracebuf, uint32_t tracebuf_size, uint32_t trace_flags, uint32_t *pbytesTraced);
fe8ab488
A
109static int pid_from_task(task_t task);
110static uint64_t proc_uniqueid_from_task(task_t task);
111static void kdp_mem_and_io_snapshot(struct mem_and_io_snapshot *memio_snap);
112static boolean_t kdp_copyin(pmap_t p, uint64_t uaddr, void *dest, size_t size);
113static uint64_t proc_was_throttled_from_task(task_t task);
114
115extern int proc_pid(void *p);
116extern uint64_t proc_uniqueid(void *p);
117extern uint64_t proc_was_throttled(void *p);
118extern uint64_t proc_did_throttle(void *p);
119static uint64_t proc_did_throttle_from_task(task_t task);
120extern void proc_name_kdp(task_t task, char *buf, int size);
121extern int proc_threadname_kdp(void *uth, char *buf, size_t size);
122extern void proc_starttime_kdp(void *p, uint64_t *tv_sec, uint64_t *tv_usec);
3e170ce0
A
123extern uint64_t get_dispatchqueue_serialno_offset_from_proc(void *p);
124static uint64_t proc_dispatchqueue_serialno_offset_from_task(task_t task);
125extern int memorystatus_get_pressure_status_kdp(void);
fe8ab488
A
126
127extern int count_busy_buffers(void); /* must track with declaration in bsd/sys/buf_internal.h */
128extern void bcopy_phys(addr64_t, addr64_t, vm_size_t);
3e170ce0
A
129extern int machine_trace_thread(thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p, uint32_t *thread_trace_flags);
130extern int machine_trace_thread64(thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p, uint32_t *thread_trace_flags);
fe8ab488
A
131
132/* Validates that the given address is both a valid page and has
133 * default caching attributes for the current kdp_pmap. Returns
134 * 0 if the address is invalid, and a kernel virtual address for
135 * the given address if it is valid.
136 */
3e170ce0 137vm_offset_t machine_trace_thread_get_kva(vm_offset_t cur_target_addr, vm_map_t map, uint32_t *thread_trace_flags);
fe8ab488
A
138
139/* Clears caching information used by the above validation routine
140 * (in case the kdp_pmap has been changed or cleared).
141 */
142void machine_trace_thread_clear_validation_cache(void);
143
144#define MAX_FRAMES 1000
3e170ce0
A
145#define MAX_LOADINFOS 500
146#define USECSPERSEC 1000000
147#define TASK_IMP_WALK_LIMIT 20
fe8ab488
A
148
149typedef struct thread_snapshot *thread_snapshot_t;
150typedef struct task_snapshot *task_snapshot_t;
151
152#if CONFIG_KDP_INTERACTIVE_DEBUGGING
153extern kdp_send_t kdp_en_send_pkt;
154#endif
155
156/*
157 * Globals to support machine_trace_thread_get_kva.
158 */
159static vm_offset_t prev_target_page = 0;
160static vm_offset_t prev_target_kva = 0;
161static boolean_t validate_next_addr = TRUE;
162
3e170ce0
A
163/*
164 * Stackshot locking and other defines.
165 */
166static lck_grp_t *stackshot_subsys_lck_grp;
167static lck_grp_attr_t *stackshot_subsys_lck_grp_attr;
168static lck_attr_t *stackshot_subsys_lck_attr;
169static lck_mtx_t stackshot_subsys_mutex;
170
171#define STACKSHOT_SUBSYS_LOCK() lck_mtx_lock(&stackshot_subsys_mutex)
172#define STACKSHOT_SUBSYS_UNLOCK() lck_mtx_unlock(&stackshot_subsys_mutex)
173#if defined(__i386__) || defined (__x86_64__)
174#define TRAP_DEBUGGER __asm__ volatile("int3")
175#else
176#error No TRAP_DEBUGGER definition for this architecture
177#endif
178
179/* Initialize the mutex governing access to the stack snapshot subsystem */
180__private_extern__ void
181stackshot_lock_init( void )
182{
183 stackshot_subsys_lck_grp_attr = lck_grp_attr_alloc_init();
184
185 stackshot_subsys_lck_grp = lck_grp_alloc_init("stackshot_subsys_lock", stackshot_subsys_lck_grp_attr);
186
187 stackshot_subsys_lck_attr = lck_attr_alloc_init();
188
189 lck_mtx_init(&stackshot_subsys_mutex, stackshot_subsys_lck_grp, stackshot_subsys_lck_attr);
190}
191
192#define SANE_BOOTPROFILE_TRACEBUF_SIZE (64 * 1024 * 1024)
193#define SANE_TRACEBUF_SIZE (8 * 1024 * 1024)
194
195#define STACKSHOT_SUPP_SIZE (16 * 1024) /* Minimum stackshot size */
196#define TASK_UUID_AVG_SIZE (16 * sizeof(uuid_t)) /* Average space consumed by UUIDs/task */
fe8ab488
A
197
198/*
199 * Method for grabbing timer values safely, in the sense that no infinite loop will occur
200 * Certain flavors of the timer_grab function, which would seem to be the thing to use,
201 * can loop infinitely if called while the timer is in the process of being updated.
202 * Unfortunately, it is (rarely) possible to get inconsistent top and bottom halves of
203 * the timer using this method. This seems insoluble, since stackshot runs in a context
204 * where the timer might be half-updated, and has no way of yielding control just long
205 * enough to finish the update.
206 */
207
208static uint64_t safe_grab_timer_value(struct timer *t)
209{
210#if defined(__LP64__)
211 return t->all_bits;
212#else
213 uint64_t time = t->high_bits; /* endian independent grab */
214 time = (time << 32) | t->low_bits;
215 return time;
216#endif
217}
218
3e170ce0
A
219/*
220 * Old, inefficient stackshot call. This will be removed in the next release and is being replaced with
221 * two syscalls -- stack_snapshot_with_config and stack_microsnapshot.
222 */
223kern_return_t
224stack_snapshot2(int pid, user_addr_t tracebuf, uint32_t tracebuf_size, uint32_t flags, int32_t *retval)
225{
226 boolean_t istate;
227 int error = KERN_SUCCESS;
228 unsigned bytesTraced = 0;
229
230#if CONFIG_TELEMETRY
231 if (flags & STACKSHOT_GLOBAL_MICROSTACKSHOT_ENABLE) {
232 telemetry_global_ctl(1);
233 *retval = 0;
234 return (0);
235 } else if (flags & STACKSHOT_GLOBAL_MICROSTACKSHOT_DISABLE) {
236 telemetry_global_ctl(0);
237 *retval = 0;
238 return (0);
239 }
240
241 if (flags & STACKSHOT_WINDOWED_MICROSTACKSHOTS_ENABLE) {
242 error = telemetry_enable_window();
243
244 if (error != KERN_SUCCESS) {
245 /* We are probably out of memory */
246 *retval = -1;
247 return KERN_RESOURCE_SHORTAGE;
248 }
249
250 *retval = 0;
251 return (0);
252 } else if (flags & STACKSHOT_WINDOWED_MICROSTACKSHOTS_DISABLE) {
253 telemetry_disable_window();
254 *retval = 0;
255 return (0);
256 }
257#endif
258
259 *retval = -1;
260 /* Serialize tracing */
261 STACKSHOT_SUBSYS_LOCK();
262
263 if (tracebuf_size <= 0) {
264 error = KERN_INVALID_ARGUMENT;
265 goto error_exit;
266 }
267
268#if CONFIG_TELEMETRY
269 if (flags & STACKSHOT_GET_MICROSTACKSHOT) {
270
271 if (tracebuf_size > SANE_TRACEBUF_SIZE) {
272 error = KERN_INVALID_ARGUMENT;
273 goto error_exit;
274 }
275
276 bytesTraced = tracebuf_size;
277 error = telemetry_gather(tracebuf, &bytesTraced,
278 (flags & STACKSHOT_SET_MICROSTACKSHOT_MARK) ? TRUE : FALSE);
279 *retval = (int)bytesTraced;
280 goto error_exit;
281 }
282
283 if (flags & STACKSHOT_GET_WINDOWED_MICROSTACKSHOTS) {
284
285 if (tracebuf_size > SANE_TRACEBUF_SIZE) {
286 error = KERN_INVALID_ARGUMENT;
287 goto error_exit;
288 }
289
290 bytesTraced = tracebuf_size;
291 error = telemetry_gather_windowed(tracebuf, &bytesTraced);
292 *retval = (int)bytesTraced;
293 goto error_exit;
294 }
295
296 if (flags & STACKSHOT_GET_BOOT_PROFILE) {
297
298 if (tracebuf_size > SANE_BOOTPROFILE_TRACEBUF_SIZE) {
299 error = KERN_INVALID_ARGUMENT;
300 goto error_exit;
301 }
302
303 bytesTraced = tracebuf_size;
304 error = bootprofile_gather(tracebuf, &bytesTraced);
305 *retval = (int)bytesTraced;
306 goto error_exit;
307 }
308#endif
309
310 if (tracebuf_size > SANE_TRACEBUF_SIZE) {
311 error = KERN_INVALID_ARGUMENT;
312 goto error_exit;
313 }
314
315 assert(stackshot_snapbuf == NULL);
316 if (kmem_alloc_kobject(kernel_map, (vm_offset_t *)&stackshot_snapbuf, tracebuf_size, VM_KERN_MEMORY_DIAG) != KERN_SUCCESS) {
317 error = KERN_RESOURCE_SHORTAGE;
318 goto error_exit;
319 }
320
321 if (panic_active()) {
322 error = KERN_RESOURCE_SHORTAGE;
323 goto error_exit;
324 }
325
326 istate = ml_set_interrupts_enabled(FALSE);
327 /* Preload trace parameters */
328 kdp_snapshot_preflight(pid, stackshot_snapbuf, tracebuf_size, flags, NULL, FALSE);
329
330 /* Trap to the debugger to obtain a coherent stack snapshot; this populates
331 * the trace buffer
332 */
333
334 TRAP_DEBUGGER;
335
336 ml_set_interrupts_enabled(istate);
337
338 bytesTraced = kdp_stack_snapshot_bytes_traced();
339
340 if (bytesTraced > 0) {
341 if ((error = copyout(stackshot_snapbuf, tracebuf,
342 ((bytesTraced < tracebuf_size) ?
343 bytesTraced : tracebuf_size))))
344 goto error_exit;
345 *retval = bytesTraced;
346 }
347 else {
348 error = KERN_NOT_IN_SET;
349 goto error_exit;
350 }
351
352 error = kdp_stack_snapshot_geterror();
353 if (error == -1) {
354 error = KERN_NO_SPACE;
355 *retval = -1;
356 goto error_exit;
357 }
358
359error_exit:
360 if (stackshot_snapbuf != NULL)
361 kmem_free(kernel_map, (vm_offset_t) stackshot_snapbuf, tracebuf_size);
362 stackshot_snapbuf = NULL;
363 STACKSHOT_SUBSYS_UNLOCK();
364 return error;
365}
366
367kern_return_t
368stack_snapshot_from_kernel_internal(int pid, void *buf, uint32_t size, uint32_t flags, unsigned *bytes_traced)
369{
370 int error = 0;
371 boolean_t istate;
372
373 if ((buf == NULL) || (size <= 0) || (bytes_traced == NULL)) {
374 return KERN_INVALID_ARGUMENT;
375 }
376
377 /* cap in individual stackshot to SANE_TRACEBUF_SIZE */
378 if (size > SANE_TRACEBUF_SIZE) {
379 size = SANE_TRACEBUF_SIZE;
380 }
381
382 /* Serialize tracing */
383 STACKSHOT_SUBSYS_LOCK();
384 istate = ml_set_interrupts_enabled(FALSE);
385
386
387 /* Preload trace parameters*/
388 kdp_snapshot_preflight(pid, buf, size, flags, NULL, FALSE);
389
390 /* Trap to the debugger to obtain a coherent stack snapshot; this populates
391 * the trace buffer
392 */
393 TRAP_DEBUGGER;
394
395 ml_set_interrupts_enabled(istate);
396
397 *bytes_traced = kdp_stack_snapshot_bytes_traced();
398
399 error = kdp_stack_snapshot_geterror();
400
401 STACKSHOT_SUBSYS_UNLOCK();
402
403 return error;
404}
405
406#if CONFIG_TELEMETRY
407kern_return_t
408stack_microstackshot(user_addr_t tracebuf, uint32_t tracebuf_size, uint32_t flags, int32_t *retval)
409{
410 int error = KERN_SUCCESS;
411 uint32_t bytes_traced = 0;
412
413 *retval = -1;
414
415 /*
416 * Control related operations
417 */
418 if (flags & STACKSHOT_GLOBAL_MICROSTACKSHOT_ENABLE) {
419 telemetry_global_ctl(1);
420 *retval = 0;
421 goto exit;
422 } else if (flags & STACKSHOT_GLOBAL_MICROSTACKSHOT_DISABLE) {
423 telemetry_global_ctl(0);
424 *retval = 0;
425 goto exit;
426 }
427
428 if (flags & STACKSHOT_WINDOWED_MICROSTACKSHOTS_ENABLE) {
429 error = telemetry_enable_window();
430
431 if (error != KERN_SUCCESS) {
432 /*
433 * We are probably out of memory
434 */
435 *retval = -1;
436 error = KERN_RESOURCE_SHORTAGE;
437 goto exit;
438 }
439
440 *retval = 0;
441 goto exit;
442 } else if (flags & STACKSHOT_WINDOWED_MICROSTACKSHOTS_DISABLE) {
443 telemetry_disable_window();
444 *retval = 0;
445 goto exit;
446 }
447
448 /*
449 * Data related operations
450 */
451 *retval = -1;
452
453 if ((((void*)tracebuf) == NULL) || (tracebuf_size == 0)) {
454 error = KERN_INVALID_ARGUMENT;
455 goto exit;
456 }
457
458 STACKSHOT_SUBSYS_LOCK();
459
460 if (flags & STACKSHOT_GET_MICROSTACKSHOT) {
461 if (tracebuf_size > SANE_TRACEBUF_SIZE) {
462 error = KERN_INVALID_ARGUMENT;
463 goto unlock_exit;
464 }
465
466 bytes_traced = tracebuf_size;
467 error = telemetry_gather(tracebuf, &bytes_traced,
468 (flags & STACKSHOT_SET_MICROSTACKSHOT_MARK) ? TRUE : FALSE);
469 *retval = (int)bytes_traced;
470 goto unlock_exit;
471 }
472
473 if (flags & STACKSHOT_GET_WINDOWED_MICROSTACKSHOTS) {
474
475 if (tracebuf_size > SANE_TRACEBUF_SIZE) {
476 error = KERN_INVALID_ARGUMENT;
477 goto unlock_exit;
478 }
479
480 bytes_traced = tracebuf_size;
481 error = telemetry_gather_windowed(tracebuf, &bytes_traced);
482 *retval = (int)bytes_traced;
483 goto unlock_exit;
484 }
485
486 if (flags & STACKSHOT_GET_BOOT_PROFILE) {
487
488 if (tracebuf_size > SANE_BOOTPROFILE_TRACEBUF_SIZE) {
489 error = KERN_INVALID_ARGUMENT;
490 goto unlock_exit;
491 }
492
493 bytes_traced = tracebuf_size;
494 error = bootprofile_gather(tracebuf, &bytes_traced);
495 *retval = (int)bytes_traced;
496 }
497
498unlock_exit:
499 STACKSHOT_SUBSYS_UNLOCK();
500exit:
501 return error;
502}
503#endif /* CONFIG_TELEMETRY */
504
505/*
506 * Return the estimated size of a stackshot based on the
507 * number of currently running threads and tasks.
508 */
509uint32_t
510get_stackshot_estsize(uint32_t prev_size_hint)
511{
512 vm_size_t thread_total;
513 vm_size_t task_total;
514 uint32_t estimated_size;
515
516 thread_total = (threads_count * sizeof(struct thread_snapshot));
517 task_total = (tasks_count * (sizeof(struct task_snapshot) + TASK_UUID_AVG_SIZE));
518
519 estimated_size = (uint32_t) VM_MAP_ROUND_PAGE((thread_total + task_total + STACKSHOT_SUPP_SIZE), PAGE_MASK);
520 if (estimated_size < prev_size_hint) {
521 estimated_size = (uint32_t) VM_MAP_ROUND_PAGE(prev_size_hint, PAGE_MASK);
522 }
523
524 return estimated_size;
525}
526
527/*
528 * stackshot_remap_buffer: Utility function to remap bytes_traced bytes starting at stackshotbuf
529 * into the current task's user space and subsequently copy out the address
530 * at which the buffer has been mapped in user space to out_buffer_addr.
531 *
532 * Inputs: stackshotbuf - pointer to the original buffer in the kernel's address space
533 * bytes_traced - length of the buffer to remap starting from stackshotbuf
534 * out_buffer_addr - pointer to placeholder where newly mapped buffer will be mapped.
535 * out_size_addr - pointer to be filled in with the size of the buffer
536 *
537 * Outputs: ENOSPC if there is not enough free space in the task's address space to remap the buffer
538 * EINVAL for all other errors returned by task_remap_buffer/mach_vm_remap
539 * an error from copyout
540 */
541static kern_return_t
542stackshot_remap_buffer(void *stackshotbuf, uint32_t bytes_traced, uint64_t out_buffer_addr, uint64_t out_size_addr)
543{
544 int error = 0;
545 mach_vm_offset_t stackshotbuf_user_addr = (mach_vm_offset_t)NULL;
546 vm_prot_t cur_prot, max_prot;
547
548 error = mach_vm_remap(get_task_map(current_task()), &stackshotbuf_user_addr, bytes_traced, 0,
549 VM_FLAGS_ANYWHERE, kernel_map, (mach_vm_offset_t)stackshotbuf, FALSE, &cur_prot, &max_prot, VM_INHERIT_DEFAULT);
550 /*
551 * If the call to mach_vm_remap fails, we return the appropriate converted error
552 */
553 if (error == KERN_SUCCESS) {
554 /*
555 * If we fail to copy out the address or size of the new buffer, we remove the buffer mapping that
556 * we just made in the task's user space.
557 */
558 error = copyout(CAST_DOWN(void *, &stackshotbuf_user_addr), (user_addr_t)out_buffer_addr, sizeof(stackshotbuf_user_addr));
559 if (error != KERN_SUCCESS) {
560 mach_vm_deallocate(get_task_map(current_task()), stackshotbuf_user_addr, (mach_vm_size_t)bytes_traced);
561 return error;
562 }
563 error = copyout(&bytes_traced, (user_addr_t)out_size_addr, sizeof(bytes_traced));
564 if (error != KERN_SUCCESS) {
565 mach_vm_deallocate(get_task_map(current_task()), stackshotbuf_user_addr, (mach_vm_size_t)bytes_traced);
566 return error;
567 }
568 }
569 return error;
570}
571
572kern_return_t
573kern_stack_snapshot_internal(int stackshot_config_version, void *stackshot_config, size_t stackshot_config_size, boolean_t stackshot_from_user)
574{
575 int error = 0;
576 boolean_t prev_interrupt_state;
577 uint32_t bytes_traced = 0;
578 uint32_t stackshotbuf_size = 0;
579 void * stackshotbuf = NULL;
580 kcdata_descriptor_t kcdata_p = NULL;
581
582 void * buf_to_free = NULL;
583 int size_to_free = 0;
584
585 /* Parsed arguments */
586 uint64_t out_buffer_addr;
587 uint64_t out_size_addr;
588 int pid = -1;
589 uint32_t flags;
590 uint64_t since_timestamp;
591 boolean_t enable_faulting = FALSE;
592 uint32_t size_hint = 0;
593
594 if(stackshot_config == NULL) {
595 return KERN_INVALID_ARGUMENT;
596 }
597
598 switch (stackshot_config_version) {
599 case STACKSHOT_CONFIG_TYPE:
600 if (stackshot_config_size != sizeof(stackshot_config_t)) {
601 return KERN_INVALID_ARGUMENT;
602 }
603 stackshot_config_t *config = (stackshot_config_t *) stackshot_config;
604 out_buffer_addr = config->sc_out_buffer_addr;
605 out_size_addr = config->sc_out_size_addr;
606 pid = config->sc_pid;
607 flags = config->sc_flags;
608 since_timestamp = config->sc_since_timestamp;
609 if (config->sc_size <= SANE_TRACEBUF_SIZE) {
610 size_hint = config->sc_size;
611 }
612 break;
613 default:
614 return KERN_NOT_SUPPORTED;
615 }
616
617 /*
618 * Currently saving a kernel buffer is only supported from the internal/KEXT API.
619 */
620 if (stackshot_from_user) {
621 if (flags & STACKSHOT_SAVE_IN_KERNEL_BUFFER) {
622 return KERN_NO_ACCESS;
623 }
624 } else {
625 if (!(flags & STACKSHOT_SAVE_IN_KERNEL_BUFFER)) {
626 return KERN_NOT_SUPPORTED;
627 }
628 }
629
630 if (flags & STACKSHOT_ENABLE_FAULTING) {
631 return KERN_NOT_SUPPORTED;
632 }
633
634 /*
635 * If we're not saving the buffer in the kernel pointer, we need places to copy into.
636 */
637 if ((!out_buffer_addr || !out_size_addr) && !(flags & STACKSHOT_SAVE_IN_KERNEL_BUFFER)) {
638 return KERN_INVALID_ARGUMENT;
639 }
640
641 if (since_timestamp != 0) {
642 return KERN_NOT_SUPPORTED;
643 }
644
645 STACKSHOT_SUBSYS_LOCK();
646
647 if (flags & STACKSHOT_SAVE_IN_KERNEL_BUFFER) {
648 /*
649 * Don't overwrite an existing stackshot
650 */
651 if (kernel_stackshot_buf != NULL) {
652 error = KERN_MEMORY_PRESENT;
653 goto error_exit;
654 }
655 } else if (flags & STACKSHOT_RETRIEVE_EXISTING_BUFFER) {
656 if ((kernel_stackshot_buf == NULL) || (kernel_stackshot_buf_size <= 0)) {
657 error = KERN_NOT_IN_SET;
658 goto error_exit;
659 }
660 error = stackshot_remap_buffer(kernel_stackshot_buf, kernel_stackshot_buf_size,
661 out_buffer_addr, out_size_addr);
662 /*
663 * If we successfully remapped the buffer into the user's address space, we
664 * set buf_to_free and size_to_free so the prior kernel mapping will be removed
665 * and then clear the kernel stackshot pointer and associated size.
666 */
667 if (error == KERN_SUCCESS) {
668 buf_to_free = kernel_stackshot_buf;
669 size_to_free = (int) VM_MAP_ROUND_PAGE(kernel_stackshot_buf_size, PAGE_MASK);
670 kernel_stackshot_buf = NULL;
671 kernel_stackshot_buf_size = 0;
672 }
673
674 goto error_exit;
675 }
676
677 stackshotbuf_size = get_stackshot_estsize(size_hint);
678
679 for (; stackshotbuf_size <= SANE_TRACEBUF_SIZE; stackshotbuf_size <<= 1) {
680 if (kmem_alloc(kernel_map, (vm_offset_t *)&stackshotbuf, stackshotbuf_size, VM_KERN_MEMORY_DIAG) != KERN_SUCCESS) {
681 error = KERN_RESOURCE_SHORTAGE;
682 goto error_exit;
683 }
684
685 /*
686 * If someone has panicked, don't try and enter the debugger
687 */
688 if (panic_active()) {
689 error = KERN_RESOURCE_SHORTAGE;
690 goto error_exit;
691 }
692
693 if (flags & STACKSHOT_KCDATA_FORMAT) {
694 kcdata_p = kcdata_memory_alloc_init((mach_vm_address_t)stackshotbuf, KCDATA_BUFFER_BEGIN_STACKSHOT, stackshotbuf_size, KCFLAG_USE_MEMCOPY);
695 }
696
697
698 /*
699 * Disable interrupts and save the current interrupt state.
700 */
701 prev_interrupt_state = ml_set_interrupts_enabled(FALSE);
702
703 /*
704 * Load stackshot parameters.
705 */
706 kdp_snapshot_preflight(pid, stackshotbuf, stackshotbuf_size, flags, kcdata_p, enable_faulting);
707
708 /*
709 * Trap to the debugger to obtain a stackshot (this will populate the buffer).
710 */
711 TRAP_DEBUGGER;
712
713 ml_set_interrupts_enabled(prev_interrupt_state);
714
715 /*
716 * If we didn't allocate a big enough buffer, deallocate and try again.
717 */
718 error = kdp_stack_snapshot_geterror();
719 if (error == -1) {
720 if (kcdata_p != NULL) {
721 kcdata_memory_destroy(kcdata_p);
722 kcdata_p = NULL;
723 stackshot_kcdata_p = NULL;
724 }
725 kmem_free(kernel_map, (vm_offset_t)stackshotbuf, stackshotbuf_size);
726 stackshotbuf = NULL;
727 continue;
728 }
729
730 bytes_traced = kdp_stack_snapshot_bytes_traced();
731
732 if (bytes_traced <= 0) {
733 error = KERN_NOT_IN_SET;
734 goto error_exit;
735 }
736
737 assert(bytes_traced <= stackshotbuf_size);
738 if (!(flags & STACKSHOT_SAVE_IN_KERNEL_BUFFER)) {
739 error = stackshot_remap_buffer(stackshotbuf, bytes_traced, out_buffer_addr, out_size_addr);
740 goto error_exit;
741 }
742
743 /*
744 * Save the stackshot in the kernel buffer.
745 */
746 kernel_stackshot_buf = stackshotbuf;
747 kernel_stackshot_buf_size = bytes_traced;
748 /*
749 * Figure out if we didn't use all the pages in the buffer. If so, we set buf_to_free to the beginning of
750 * the next page after the end of the stackshot in the buffer so that the kmem_free clips the buffer and
751 * update size_to_free for kmem_free accordingly.
752 */
753 size_to_free = stackshotbuf_size - (int) VM_MAP_ROUND_PAGE(bytes_traced, PAGE_MASK);
754
755 assert(size_to_free >= 0);
756
757 if (size_to_free != 0) {
758 buf_to_free = (void *)((uint64_t)stackshotbuf + stackshotbuf_size - size_to_free);
759 }
760
761 stackshotbuf = NULL;
762 stackshotbuf_size = 0;
763 goto error_exit;
764 }
765
766 if (stackshotbuf_size > SANE_TRACEBUF_SIZE) {
767 error = KERN_RESOURCE_SHORTAGE;
768 }
769
770error_exit:
771 if (kcdata_p != NULL) {
772 kcdata_memory_destroy(kcdata_p);
773 kcdata_p = NULL;
774 stackshot_kcdata_p = NULL;
775 }
776
777 if (stackshotbuf != NULL) {
778 kmem_free(kernel_map, (vm_offset_t)stackshotbuf, stackshotbuf_size);
779 }
780 if (buf_to_free != NULL) {
781 kmem_free(kernel_map, (vm_offset_t)buf_to_free, size_to_free);
782 }
783 STACKSHOT_SUBSYS_UNLOCK();
784 return error;
785}
786
fe8ab488
A
787/* Cache stack snapshot parameters in preparation for a trace */
788void
3e170ce0
A
789kdp_snapshot_preflight(int pid, void * tracebuf, uint32_t tracebuf_size, uint32_t flags,
790 kcdata_descriptor_t data_p, boolean_t enable_faulting)
fe8ab488
A
791{
792 stack_snapshot_pid = pid;
793 stack_snapshot_buf = tracebuf;
794 stack_snapshot_bufsize = tracebuf_size;
795 stack_snapshot_flags = flags;
3e170ce0
A
796 stack_enable_faulting = enable_faulting;
797 if (data_p != NULL) {
798 stackshot_kcdata_p = data_p;
799 }
fe8ab488
A
800 kdp_snapshot++;
801 /* Mark this debugger as active, since the polled mode driver that
802 * ordinarily does this may not be enabled (yet), or since KDB may be
803 * the primary debugger.
804 */
805 old_debugger = current_debugger;
806 if (old_debugger != KDP_CUR_DB) {
807 current_debugger = KDP_CUR_DB;
808 }
809}
810
811void
812kdp_snapshot_postflight(void)
813{
814 kdp_snapshot--;
815#if CONFIG_KDP_INTERACTIVE_DEBUGGING
816 if (
817 (kdp_en_send_pkt == NULL) || (old_debugger == KDB_CUR_DB))
818 current_debugger = old_debugger;
819#else
820 current_debugger = old_debugger;
821#endif
822}
823
824int
825kdp_stack_snapshot_geterror(void)
826{
827 return stack_snapshot_ret;
828}
829
3e170ce0 830uint32_t
fe8ab488
A
831kdp_stack_snapshot_bytes_traced(void)
832{
833 return stack_snapshot_bytes_traced;
834}
835
3e170ce0
A
836static boolean_t memory_iszero(void *addr, size_t size)
837{
838 char *data = (char *)addr;
839 for (size_t i = 0; i < size; i++){
840 if (data[i] != 0)
841 return FALSE;
842 }
843 return TRUE;
844}
845
fe8ab488 846static int
3e170ce0
A
847kdp_stackshot_kcdata_format(int pid, uint32_t trace_flags, uint32_t *pBytesTraced)
848{
849 /* convenience macros specific only for this function */
850#define kcd_end_address(kcd) ((void *)((uint64_t)((kcd)->kcd_addr_begin) + kcdata_memory_get_used_bytes((kcd))))
851#define kcd_max_address(kcd) ((void *)((kcd)->kcd_addr_begin + (kcd)->kcd_length))
852#define kcd_exit_on_error(action) \
853 do { \
854 if (KERN_SUCCESS != (error = (action))) { \
855 if (error == KERN_RESOURCE_SHORTAGE) { \
856 error = -1; \
857 } \
858 goto error_exit; \
859 } \
860 } while (0); /* end kcd_exit_on_error */
861
862 int error = 0;
863 mach_vm_address_t out_addr = 0;
864 uint64_t abs_time;
865 struct task_snapshot_v2 *cur_tsnap;
866 uint64_t system_state_flags = 0;
867 int saved_count = 0;
868 task_t task = TASK_NULL;
869 thread_t thread = THREAD_NULL;
870 mach_timebase_info_data_t timebase = {0, 0};
871 uint64_t microsecs = 0, secs = 0;
872 uint32_t length_to_copy, tmp32;
873
874 abs_time = mach_absolute_time();
875 clock_get_calendar_microtime((clock_sec_t*)&secs, (clock_usec_t*)&microsecs);
876
877 /* process the flags */
878 boolean_t dispatch_p = ((trace_flags & STACKSHOT_GET_DQ) != 0);
879 boolean_t save_loadinfo_p = ((trace_flags & STACKSHOT_SAVE_LOADINFO) != 0);
880 boolean_t save_kextloadinfo_p = ((trace_flags & STACKSHOT_SAVE_KEXT_LOADINFO) != 0);
881 boolean_t save_userframes_p = ((trace_flags & STACKSHOT_SAVE_KERNEL_FRAMES_ONLY) == 0);
882 boolean_t save_donating_pids_p = ((trace_flags & STACKSHOT_SAVE_IMP_DONATION_PIDS) != 0);
883
884 if (sizeof(void *) == 8)
885 system_state_flags |= kKernel64_p;
886
887 if (stackshot_kcdata_p == NULL || pBytesTraced == NULL) {
888 error = -1;
889 goto error_exit;
890 }
891
892 /* begin saving data into the buffer */
893 *pBytesTraced = 0;
894 kcd_exit_on_error(kcdata_add_uint32_with_description(stackshot_kcdata_p, trace_flags, "stackshot_in_flags"));
895 kcd_exit_on_error(kcdata_add_uint32_with_description(stackshot_kcdata_p, (uint32_t)pid, "stackshot_in_pid"));
896 kcd_exit_on_error(kcdata_add_uint64_with_description(stackshot_kcdata_p, system_state_flags, "system_state_flags"));
897 tmp32 = PAGE_SIZE;
898 kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p, STACKSHOT_KCTYPE_KERN_PAGE_SIZE, sizeof(uint32_t), &out_addr));
899 memcpy((void *)out_addr, &tmp32, sizeof(tmp32));
900
901#if CONFIG_JETSAM
902 tmp32 = memorystatus_get_pressure_status_kdp();
903 kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p, STACKSHOT_KCTYPE_JETSAM_LEVEL, sizeof(uint32_t), &out_addr));
904 memcpy((void *)out_addr, &tmp32, sizeof(tmp32));
905#endif
906
907 /* save boot-args and osversion string */
908 length_to_copy = MIN((uint32_t)(strlen(version) + 1), OSVERSIZE);
909 kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p, STACKSHOT_KCTYPE_OSVERSION, length_to_copy, &out_addr));
910 strlcpy((char*)out_addr, &version[0], length_to_copy);
911
912 length_to_copy = MIN((uint32_t)(strlen(PE_boot_args()) + 1), OSVERSIZE);
913 kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p, STACKSHOT_KCTYPE_BOOTARGS, length_to_copy, &out_addr));
914 strlcpy((char*)out_addr, PE_boot_args(), length_to_copy);
915
916 /* setup mach_absolute_time and timebase info */
917 clock_timebase_info(&timebase);
918 kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p, KCDATA_TYPE_TIMEBASE, sizeof(timebase), &out_addr));
919 memcpy((void *)out_addr, &timebase, sizeof(timebase));
920
921 kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), &out_addr));
922 memcpy((void *)out_addr, &abs_time, sizeof(uint64_t));
923
924 microsecs = microsecs + (secs * USECSPERSEC);
925 kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p, KCDATA_TYPE_USECS_SINCE_EPOCH, sizeof(uint64_t), &out_addr));
926 memcpy((void *)out_addr, &microsecs, sizeof(uint64_t));
927
928 /* reserve space of system level shared cache load info */
929 struct dyld_uuid_info_64 *sys_shared_cache_loadinfo;
930 kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p, STACKSHOT_KCTYPE_SHAREDCACHE_LOADINFO, sizeof(kernel_uuid_info), &out_addr));
931 sys_shared_cache_loadinfo = (struct dyld_uuid_info_64 *)out_addr;
932 bzero((void *)sys_shared_cache_loadinfo, sizeof(struct dyld_uuid_info_64));
933
934 /* Add requested information first */
935 if (trace_flags & STACKSHOT_GET_GLOBAL_MEM_STATS) {
936 kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p, STACKSHOT_KCTYPE_GLOBAL_MEM_STATS, sizeof(struct mem_and_io_snapshot), &out_addr));
937 kdp_mem_and_io_snapshot((struct mem_and_io_snapshot *)out_addr);
938 }
939
940 /* Iterate over tasks */
941 queue_head_t *task_list = &tasks;
942 queue_iterate(task_list, task, task_t, tasks) {
943 int task_pid;
944 if ((task == NULL) || !ml_validate_nofault((vm_offset_t) task, sizeof(struct task)))
945 goto error_exit;
946
947 task_pid = pid_from_task(task);
948 if (!task->active) {
949 /*
950 * Not interested in terminated tasks without threads, and
951 * at the moment, stackshot can't handle a task without a name.
952 */
953 if (queue_empty(&task->threads) || task_pid == -1) {
954 continue;
955 }
956 }
957
958 /* Trace everything, unless a process was specified */
959 if ((pid == -1) || (pid == task_pid)) {
960
961 uint64_t task_uniqueid = proc_uniqueid_from_task(task);
962 boolean_t task64 = task_has_64BitAddr(task);
963 boolean_t have_map = (task->map != NULL) && (ml_validate_nofault((vm_offset_t)(task->map), sizeof(struct _vm_map)));
964 boolean_t have_pmap = have_map && (task->map->pmap != NULL) && (ml_validate_nofault((vm_offset_t)(task->map->pmap), sizeof(struct pmap)));
965
966 /* add task snapshot marker */
967 kcd_exit_on_error(kcdata_add_container_marker(stackshot_kcdata_p, KCDATA_TYPE_CONTAINER_BEGIN, STACKSHOT_KCCONTAINER_TASK, task_uniqueid));
968
969 /* add task_snapshot_v2 struct data */
970 kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p, STACKSHOT_KCTYPE_TASK_SNAPSHOT, sizeof(struct task_snapshot_v2), &out_addr));
971 cur_tsnap = (struct task_snapshot_v2 *)out_addr;
972 bzero(cur_tsnap, sizeof(struct task_snapshot_v2));
973
974 cur_tsnap->ts_pid = task_pid;
975 cur_tsnap->ts_unique_pid = task_uniqueid;
976
977 /* Add the BSD process identifiers */
978 if (task_pid != -1 && task->bsd_info != NULL)
979 proc_name_kdp(task, cur_tsnap->ts_p_comm, sizeof(cur_tsnap->ts_p_comm));
980 else {
981 cur_tsnap->ts_p_comm[0] = '\0';
982#if IMPORTANCE_INHERITANCE && (DEVELOPMENT || DEBUG)
983 if (task->task_imp_base != NULL) {
984 strlcpy(cur_tsnap->ts_p_comm, &task->task_imp_base->iit_procname[0],
985 MIN((int)sizeof(task->task_imp_base->iit_procname), (int)sizeof(cur_tsnap->ts_p_comm)));
986 }
987#endif
988 }
989
990 if (task64)
991 cur_tsnap->ts_ss_flags |= kUser64_p;
992 if (!task->active || task_is_a_corpse(task))
993 cur_tsnap->ts_ss_flags |= kTerminatedSnapshot;
994 if (task->pidsuspended)
995 cur_tsnap->ts_ss_flags |= kPidSuspended;
996 if (task->frozen)
997 cur_tsnap->ts_ss_flags |= kFrozen;
998 if (task->effective_policy.darwinbg == 1)
999 cur_tsnap->ts_ss_flags |= kTaskDarwinBG;
1000 if (task->requested_policy.t_role == TASK_FOREGROUND_APPLICATION)
1001 cur_tsnap->ts_ss_flags |= kTaskIsForeground;
1002 if (task->requested_policy.t_boosted == 1)
1003 cur_tsnap->ts_ss_flags |= kTaskIsBoosted;
1004 if (task->effective_policy.t_sup_active == 1)
1005 cur_tsnap->ts_ss_flags |= kTaskIsSuppressed;
1006
1007#if IMPORTANCE_INHERITANCE
1008 if (task->task_imp_base) {
1009 if (task->task_imp_base->iit_donor)
1010 cur_tsnap->ts_ss_flags |= kTaskIsImpDonor;
1011 if (task->task_imp_base->iit_live_donor)
1012 cur_tsnap->ts_ss_flags |= kTaskIsLiveImpDonor;
1013 }
1014#endif
1015
1016 cur_tsnap->ts_latency_qos = (task->effective_policy.t_latency_qos == LATENCY_QOS_TIER_UNSPECIFIED) ?
1017 LATENCY_QOS_TIER_UNSPECIFIED : ((0xFF << 16) | task->effective_policy.t_latency_qos);
1018 cur_tsnap->ts_suspend_count = task->suspend_count;
1019 cur_tsnap->ts_p_start_sec = 0;
1020 proc_starttime_kdp(task->bsd_info, &cur_tsnap->ts_p_start_sec, NULL);
1021
1022 cur_tsnap->ts_task_size = have_pmap ? (pmap_resident_count(task->map->pmap) * PAGE_SIZE) : 0;
1023 cur_tsnap->ts_max_resident_size = get_task_resident_max(task);
1024 cur_tsnap->ts_faults = task->faults;
1025 cur_tsnap->ts_pageins = task->pageins;
1026 cur_tsnap->ts_cow_faults = task->cow_faults;
1027 cur_tsnap->ts_user_time_in_terminated_threads = task->total_user_time;
1028 cur_tsnap->ts_system_time_in_terminated_threads = task->total_system_time;
1029 cur_tsnap->ts_was_throttled = (uint32_t) proc_was_throttled_from_task(task);
1030 cur_tsnap->ts_did_throttle = (uint32_t) proc_did_throttle_from_task(task);
1031
1032 /* Check for shared cache information */
1033 do {
1034 uint8_t shared_cache_identifier[16];
1035 uint64_t shared_cache_slide;
1036 uint64_t shared_cache_base_address = 0;
1037 boolean_t found_shared_cache_info = TRUE;
1038
1039 if (task->shared_region && ml_validate_nofault((vm_offset_t)task->shared_region, sizeof(struct vm_shared_region))) {
1040 struct vm_shared_region *sr = task->shared_region;
1041 shared_cache_base_address = sr->sr_base_address + sr->sr_first_mapping;
1042 }
1043
1044 if (!shared_cache_base_address ||
1045 !kdp_copyin(task->map->pmap, shared_cache_base_address + offsetof(struct _dyld_cache_header, uuid), shared_cache_identifier, sizeof(shared_cache_identifier))
1046 ) {
1047 found_shared_cache_info = FALSE;
1048 }
1049
1050 if (task->shared_region) {
1051 /*
1052 * No refcounting here, but we are in debugger
1053 * context, so that should be safe.
1054 */
1055 shared_cache_slide = task->shared_region->sr_slide_info.slide;
1056 } else {
1057 shared_cache_slide = 0;
1058 }
1059
1060 if (found_shared_cache_info == FALSE)
1061 break;
1062
1063 if (task_pid == 1) {
1064 /* save launchd's shared cache info as system level */
1065 bcopy(shared_cache_identifier, sys_shared_cache_loadinfo->imageUUID, sizeof(sys_shared_cache_loadinfo->imageUUID));
1066 sys_shared_cache_loadinfo->imageLoadAddress = shared_cache_slide;
1067 break;
1068 } else {
1069 if (shared_cache_slide == sys_shared_cache_loadinfo->imageLoadAddress &&
1070 0 == memcmp(shared_cache_identifier, sys_shared_cache_loadinfo->imageUUID, sizeof(sys_shared_cache_loadinfo->imageUUID))) {
1071 /* skip adding shared cache info. its same as system level one */
1072 break;
1073 }
1074 }
1075
1076 kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p, STACKSHOT_KCTYPE_SHAREDCACHE_LOADINFO, sizeof(struct dyld_uuid_info_64), &out_addr));
1077 struct dyld_uuid_info_64 *shared_cache_data = (struct dyld_uuid_info_64 *)out_addr;
1078 shared_cache_data->imageLoadAddress = shared_cache_slide;
1079 bcopy(shared_cache_identifier, shared_cache_data->imageUUID, sizeof(shared_cache_data->imageUUID));
1080
1081 } while(0);
1082
1083 /* I/O Statistics if any counters are non zero */
1084 assert(IO_NUM_PRIORITIES == STACKSHOT_IO_NUM_PRIORITIES);
1085 if (task->task_io_stats && !memory_iszero(task->task_io_stats, sizeof(struct io_stat_info))) {
1086 kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p, STACKSHOT_KCTYPE_IOSTATS, sizeof(struct io_stats_snapshot), &out_addr));
1087 struct io_stats_snapshot *_iostat = (struct io_stats_snapshot *)out_addr;
1088 _iostat->ss_disk_reads_count = task->task_io_stats->disk_reads.count;
1089 _iostat->ss_disk_reads_size = task->task_io_stats->disk_reads.size;
1090 _iostat->ss_disk_writes_count = (task->task_io_stats->total_io.count - task->task_io_stats->disk_reads.count);
1091 _iostat->ss_disk_writes_size = (task->task_io_stats->total_io.size - task->task_io_stats->disk_reads.size);
1092 _iostat->ss_paging_count = task->task_io_stats->paging.count;
1093 _iostat->ss_paging_size = task->task_io_stats->paging.size;
1094 _iostat->ss_non_paging_count = (task->task_io_stats->total_io.count - task->task_io_stats->paging.count);
1095 _iostat->ss_non_paging_size = (task->task_io_stats->total_io.size - task->task_io_stats->paging.size);
1096 _iostat->ss_metadata_count = task->task_io_stats->metadata.count;
1097 _iostat->ss_metadata_size = task->task_io_stats->metadata.size;
1098 _iostat->ss_data_count = (task->task_io_stats->total_io.count - task->task_io_stats->metadata.count);
1099 _iostat->ss_data_size = (task->task_io_stats->total_io.size - task->task_io_stats->metadata.size);
1100 for(int i = 0; i < IO_NUM_PRIORITIES; i++) {
1101 _iostat->ss_io_priority_count[i] = task->task_io_stats->io_priority[i].count;
1102 _iostat->ss_io_priority_size[i] = task->task_io_stats->io_priority[i].size;
1103 }
1104 }
1105
1106#if IMPORTANCE_INHERITANCE
1107 if (save_donating_pids_p) {
1108 kcd_exit_on_error(((((mach_vm_address_t) kcd_end_address(stackshot_kcdata_p) + (TASK_IMP_WALK_LIMIT * sizeof(int32_t)))
1109 < (mach_vm_address_t) kcd_max_address(stackshot_kcdata_p)) ? KERN_SUCCESS : KERN_RESOURCE_SHORTAGE));
1110 saved_count = task_importance_list_pids(task, TASK_IMP_LIST_DONATING_PIDS, (void *)kcd_end_address(stackshot_kcdata_p), TASK_IMP_WALK_LIMIT);
1111 if (saved_count > 0)
1112 kcd_exit_on_error(kcdata_get_memory_addr_for_array(stackshot_kcdata_p, STASKSHOT_KCTYPE_DONATING_PIDS, sizeof(int32_t), saved_count, &out_addr));
1113 }
1114#endif
1115
1116 /* place load info and libraries now */
1117 uint32_t uuid_info_count = 0;
1118 mach_vm_address_t uuid_info_addr = 0;
1119 if (save_loadinfo_p && have_pmap && task->active && task_pid > 0) {
1120 /* Read the dyld_all_image_infos struct from the task memory to get UUID array count and location */
1121 if (task64) {
1122 struct user64_dyld_all_image_infos task_image_infos;
1123 if (kdp_copyin(task->map->pmap, task->all_image_info_addr, &task_image_infos, sizeof(struct user64_dyld_all_image_infos))) {
1124 uuid_info_count = (uint32_t)task_image_infos.uuidArrayCount;
1125 uuid_info_addr = task_image_infos.uuidArray;
1126 }
1127 } else {
1128 struct user32_dyld_all_image_infos task_image_infos;
1129 if (kdp_copyin(task->map->pmap, task->all_image_info_addr, &task_image_infos, sizeof(struct user32_dyld_all_image_infos))) {
1130 uuid_info_count = task_image_infos.uuidArrayCount;
1131 uuid_info_addr = task_image_infos.uuidArray;
1132 }
1133 }
1134
1135 /*
1136 * If we get a NULL uuid_info_addr (which can happen when we catch dyld in the middle of updating
1137 * this data structure), we zero the uuid_info_count so that we won't even try to save load info
1138 * for this task.
1139 */
1140 if (!uuid_info_addr) {
1141 uuid_info_count = 0;
1142 }
1143 }
1144
1145 if (have_pmap && task_pid == 0) {
1146 if (save_kextloadinfo_p && ml_validate_nofault((vm_offset_t)(gLoadedKextSummaries), sizeof(OSKextLoadedKextSummaryHeader))) {
1147 uuid_info_count = gLoadedKextSummaries->numSummaries + 1; /* include main kernel UUID */
1148 } else {
1149 uuid_info_count = 1; /* atleast include kernel uuid */
1150 }
1151 }
1152
1153 if (task_pid > 0 && uuid_info_count > 0 && uuid_info_count < MAX_LOADINFOS) {
1154 uint32_t uuid_info_size = (uint32_t)(task64 ? sizeof(struct user64_dyld_uuid_info) : sizeof(struct user32_dyld_uuid_info));
1155 uint32_t uuid_info_array_size = uuid_info_count * uuid_info_size;
1156
1157 kcd_exit_on_error(kcdata_get_memory_addr_for_array(stackshot_kcdata_p,
1158 (task64 ? KCDATA_TYPE_LIBRARY_LOADINFO64 : KCDATA_TYPE_LIBRARY_LOADINFO),
1159 uuid_info_size,
1160 uuid_info_count,
1161 &out_addr));
1162
1163
1164 /* Copy in the UUID info array
1165 * It may be nonresident, in which case just fix up nloadinfos to 0 in the task_snap
1166 */
1167 if (have_pmap && !kdp_copyin(task->map->pmap, uuid_info_addr, (void *)out_addr, uuid_info_array_size)) {
1168 bzero((void *)out_addr, uuid_info_array_size);
1169 }
1170
1171 } else if (task_pid == 0 && uuid_info_count > 0 && uuid_info_count < MAX_LOADINFOS) {
1172 uintptr_t image_load_address;
1173
1174 do {
1175 if (!kernel_uuid || !ml_validate_nofault((vm_offset_t)kernel_uuid, sizeof(uuid_t))) {
1176 /* Kernel UUID not found or inaccessible */
1177 break;
1178 }
1179 kcd_exit_on_error(kcdata_get_memory_addr_for_array(stackshot_kcdata_p,
1180 (sizeof(kernel_uuid_info) == sizeof(struct user64_dyld_uuid_info))? KCDATA_TYPE_LIBRARY_LOADINFO64: KCDATA_TYPE_LIBRARY_LOADINFO,
1181 sizeof(kernel_uuid_info), uuid_info_count, &out_addr)
1182 );
1183 kernel_uuid_info *uuid_info_array = (kernel_uuid_info *)out_addr;
1184 image_load_address = (uintptr_t)VM_KERNEL_UNSLIDE(vm_kernel_stext);
1185 uuid_info_array[0].imageLoadAddress = image_load_address;
1186 memcpy(&uuid_info_array[0].imageUUID, kernel_uuid, sizeof(uuid_t));
1187
1188 if (save_kextloadinfo_p && ml_validate_nofault((vm_offset_t)(&gLoadedKextSummaries->summaries[0]),
1189 gLoadedKextSummaries->entry_size * gLoadedKextSummaries->numSummaries)) {
1190 uint32_t kexti;
1191 for (kexti=0 ; kexti < gLoadedKextSummaries->numSummaries; kexti++) {
1192 image_load_address = (uintptr_t)VM_KERNEL_UNSLIDE(gLoadedKextSummaries->summaries[kexti].address);
1193 uuid_info_array[kexti + 1].imageLoadAddress = image_load_address;
1194 memcpy(&uuid_info_array[kexti + 1].imageUUID, &gLoadedKextSummaries->summaries[kexti].uuid, sizeof(uuid_t));
1195 }
1196 }
1197 } while(0);
1198 }
1199
1200 /* Iterate over task threads */
1201 queue_iterate(&task->threads, thread, thread_t, task_threads){
1202 uint64_t tval;
1203 uint64_t thread_uniqueid = 0;
1204 char cur_thread_name[STACKSHOT_MAX_THREAD_NAME_SIZE];
1205
1206 if ((thread == NULL) || !ml_validate_nofault((vm_offset_t) thread, sizeof(struct thread)))
1207 goto error_exit;
1208
1209 if (!save_userframes_p && thread->kernel_stack == 0)
1210 continue;
1211
1212 thread_uniqueid = thread_tid(thread);
1213
1214 /* add thread marker */
1215 kcd_exit_on_error(kcdata_add_container_marker(stackshot_kcdata_p, KCDATA_TYPE_CONTAINER_BEGIN, STACKSHOT_KCCONTAINER_THREAD, thread_uniqueid));
1216 kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p, STACKSHOT_KCTYPE_THREAD_SNAPSHOT, sizeof(struct thread_snapshot_v2), &out_addr));
1217 struct thread_snapshot_v2 * cur_thread_snap = (struct thread_snapshot_v2 *)out_addr;
1218
1219 /* Populate the thread snapshot header */
1220 cur_thread_snap->ths_thread_id = thread_uniqueid;
1221 cur_thread_snap->ths_state = thread->state;
1222 cur_thread_snap->ths_ss_flags = 0;
1223 cur_thread_snap->ths_base_priority = thread->base_pri;
1224 cur_thread_snap->ths_sched_priority = thread->sched_pri;
1225 cur_thread_snap->ths_sched_flags = thread->sched_flags;
1226 cur_thread_snap->ths_wait_event = VM_KERNEL_UNSLIDE_OR_PERM(thread->wait_event);
1227 cur_thread_snap->ths_continuation = VM_KERNEL_UNSLIDE(thread->continuation);
1228 cur_thread_snap->ths_last_run_time = thread->last_run_time;
1229 cur_thread_snap->ths_last_made_runnable_time = thread->last_made_runnable_time;
1230 cur_thread_snap->ths_io_tier = proc_get_effective_thread_policy(thread, TASK_POLICY_IO);
1231 cur_thread_snap->ths_eqos = thread->effective_policy.thep_qos;
1232 cur_thread_snap->ths_rqos = thread->requested_policy.thrp_qos;
1233 cur_thread_snap->ths_rqos_override = thread->requested_policy.thrp_qos_override;
1234 cur_thread_snap->ths_total_syscalls = thread->syscalls_mach + thread->syscalls_unix;
1235 cur_thread_snap->ths_dqserialnum = 0;
1236
1237 tval = safe_grab_timer_value(&thread->user_timer);
1238 cur_thread_snap->ths_user_time = tval;
1239 tval = safe_grab_timer_value(&thread->system_timer);
1240
1241 if (thread->precise_user_kernel_time) {
1242 cur_thread_snap->ths_sys_time = tval;
1243 } else {
1244 cur_thread_snap->ths_user_time += tval;
1245 cur_thread_snap->ths_sys_time = 0;
1246 }
1247
1248 if (thread->effective_policy.darwinbg)
1249 cur_thread_snap->ths_ss_flags |= kThreadDarwinBG;
1250 if (proc_get_effective_thread_policy(thread, TASK_POLICY_PASSIVE_IO))
1251 cur_thread_snap->ths_ss_flags |= kThreadIOPassive;
1252 if (thread->suspend_count > 0)
1253 cur_thread_snap->ths_ss_flags |= kThreadSuspended;
1254
1255 if (thread->options & TH_OPT_GLOBAL_FORCED_IDLE) {
1256 cur_thread_snap->ths_ss_flags |= kGlobalForcedIdle;
1257 }
1258
1259 if (IPC_VOUCHER_NULL != thread->ith_voucher)
1260 cur_thread_snap->ths_voucher_identifier = VM_KERNEL_ADDRPERM(thread->ith_voucher);
1261 if (dispatch_p && (task != kernel_task) && (task->active) && have_pmap) {
1262 uint64_t dqkeyaddr = thread_dispatchqaddr(thread);
1263 if (dqkeyaddr != 0) {
1264 uint64_t dqaddr = 0;
1265 if (kdp_copyin(task->map->pmap, dqkeyaddr, &dqaddr, (task64 ? 8 : 4)) && (dqaddr != 0)) {
1266 uint64_t dqserialnumaddr = dqaddr + proc_dispatchqueue_serialno_offset_from_task(task);
1267 uint64_t dqserialnum = 0;
1268 if (kdp_copyin(task->map->pmap, dqserialnumaddr, &dqserialnum, (task64 ? 8 : 4))) {
1269 cur_thread_snap->ths_ss_flags |= kHasDispatchSerial;
1270 cur_thread_snap->ths_dqserialnum = dqserialnum;
1271 }
1272 }
1273 }
1274 }
1275
1276 /* if there is thread name then add to buffer */
1277 cur_thread_name[0] = '\0';
1278 proc_threadname_kdp(thread->uthread, cur_thread_name, STACKSHOT_MAX_THREAD_NAME_SIZE);
1279 if (strnlen(cur_thread_name, STACKSHOT_MAX_THREAD_NAME_SIZE) > 0) {
1280 kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p, STACKSHOT_KCTYPE_THREAD_NAME, sizeof(cur_thread_name), &out_addr));
1281 bcopy((void *)cur_thread_name, (void *)out_addr, sizeof(cur_thread_name));
1282 }
1283
1284 /* I/O Statistics */
1285 assert(IO_NUM_PRIORITIES == STACKSHOT_IO_NUM_PRIORITIES);
1286 if (thread->thread_io_stats && !memory_iszero(thread->thread_io_stats, sizeof(struct io_stat_info))) {
1287 kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p, STACKSHOT_KCTYPE_IOSTATS, sizeof(struct io_stats_snapshot), &out_addr));
1288 struct io_stats_snapshot *_iostat = (struct io_stats_snapshot *)out_addr;
1289 _iostat->ss_disk_reads_count = thread->thread_io_stats->disk_reads.count;
1290 _iostat->ss_disk_reads_size = thread->thread_io_stats->disk_reads.size;
1291 _iostat->ss_disk_writes_count = (thread->thread_io_stats->total_io.count - thread->thread_io_stats->disk_reads.count);
1292 _iostat->ss_disk_writes_size = (thread->thread_io_stats->total_io.size - thread->thread_io_stats->disk_reads.size);
1293 _iostat->ss_paging_count = thread->thread_io_stats->paging.count;
1294 _iostat->ss_paging_size = thread->thread_io_stats->paging.size;
1295 _iostat->ss_non_paging_count = (thread->thread_io_stats->total_io.count - thread->thread_io_stats->paging.count);
1296 _iostat->ss_non_paging_size = (thread->thread_io_stats->total_io.size - thread->thread_io_stats->paging.size);
1297 _iostat->ss_metadata_count = thread->thread_io_stats->metadata.count;
1298 _iostat->ss_metadata_size = thread->thread_io_stats->metadata.size;
1299 _iostat->ss_data_count = (thread->thread_io_stats->total_io.count - thread->thread_io_stats->metadata.count);
1300 _iostat->ss_data_size = (thread->thread_io_stats->total_io.size - thread->thread_io_stats->metadata.size);
1301 for(int i = 0; i < IO_NUM_PRIORITIES; i++) {
1302 _iostat->ss_io_priority_count[i] = thread->thread_io_stats->io_priority[i].count;
1303 _iostat->ss_io_priority_size[i] = thread->thread_io_stats->io_priority[i].size;
1304 }
1305 }
1306
1307 /* Trace user stack, if any */
1308 if (save_userframes_p && task->active && thread->task->map != kernel_map) {
1309 uint32_t thread_snapshot_flags = 0;
1310 /* 64-bit task? */
1311 if (task_has_64BitAddr(thread->task)) {
1312 out_addr = (mach_vm_address_t)kcd_end_address(stackshot_kcdata_p);
1313 saved_count = machine_trace_thread64(thread, (char *)out_addr, (char *)kcd_max_address(stackshot_kcdata_p), MAX_FRAMES, TRUE, &thread_snapshot_flags);
1314 if (saved_count > 0) {
1315 kcd_exit_on_error(kcdata_get_memory_addr_for_array(stackshot_kcdata_p,
1316 STACKSHOT_KCTYPE_USER_STACKFRAME64,
1317 sizeof(struct stack_snapshot_frame64),
1318 saved_count/sizeof(struct stack_snapshot_frame64),
1319 &out_addr));
1320 cur_thread_snap->ths_ss_flags |= kUser64_p;
1321 }
1322 }
1323 else {
1324 out_addr = (mach_vm_address_t)kcd_end_address(stackshot_kcdata_p);
1325 saved_count = machine_trace_thread(thread, (char *)out_addr, (char *)kcd_max_address(stackshot_kcdata_p), MAX_FRAMES, TRUE, &thread_snapshot_flags);
1326 if (saved_count > 0) {
1327 kcd_exit_on_error(kcdata_get_memory_addr_for_array(stackshot_kcdata_p,
1328 STACKSHOT_KCTYPE_USER_STACKFRAME,
1329 sizeof(struct stack_snapshot_frame32),
1330 saved_count/sizeof(struct stack_snapshot_frame32),
1331 &out_addr));
1332 }
1333 }
1334
1335 if (thread_snapshot_flags != 0) {
1336 cur_thread_snap->ths_ss_flags |= thread_snapshot_flags;
1337 }
1338 }
1339
1340 /* Call through to the machine specific trace routines
1341 * Frames are added past the snapshot header.
1342 */
1343 if (thread->kernel_stack != 0) {
1344 uint32_t thread_snapshot_flags = 0;
1345#if defined(__LP64__)
1346 out_addr = (mach_vm_address_t)kcd_end_address(stackshot_kcdata_p);
1347 saved_count = machine_trace_thread64(thread, (char *)out_addr, (char *)kcd_max_address(stackshot_kcdata_p), MAX_FRAMES, FALSE, &thread_snapshot_flags);
1348 if (saved_count > 0){
1349 cur_thread_snap->ths_ss_flags |= kKernel64_p;
1350 kcd_exit_on_error(kcdata_get_memory_addr_for_array(stackshot_kcdata_p,
1351 STACKSHOT_KCTYPE_KERN_STACKFRAME64,
1352 sizeof(struct stack_snapshot_frame64),
1353 saved_count/sizeof(struct stack_snapshot_frame64),
1354 &out_addr));
1355 }
1356#else
1357 out_addr = (mach_vm_address_t)kcd_end_address(stackshot_kcdata_p);
1358 saved_count = machine_trace_thread(thread, (char *)out_addr, (char *)kcd_max_address(stackshot_kcdata_p), MAX_FRAMES, FALSE, &thread_snapshot_flags);
1359 if (saved_count > 0) {
1360 kcd_exit_on_error(kcdata_get_memory_addr_for_array(stackshot_kcdata_p,
1361 STACKSHOT_KCTYPE_KERN_STACKFRAME,
1362 sizeof(struct stack_snapshot_frame32),
1363 saved_count/sizeof(struct stack_snapshot_frame32),
1364 &out_addr));
1365 }
1366#endif
1367 if (thread_snapshot_flags != 0) {
1368 cur_thread_snap->ths_ss_flags |= thread_snapshot_flags;
1369 }
1370 }
1371 /* mark end of thread snapshot data */
1372 kcd_exit_on_error(kcdata_add_container_marker(stackshot_kcdata_p, KCDATA_TYPE_CONTAINER_END, STACKSHOT_KCCONTAINER_THREAD, thread_uniqueid));
1373 }
1374 /* mark end of task snapshot data */
1375 kcd_exit_on_error(kcdata_add_container_marker(stackshot_kcdata_p, KCDATA_TYPE_CONTAINER_END, STACKSHOT_KCCONTAINER_TASK, task_uniqueid));
1376 }
1377 }
1378
1379 /* === END of populating stackshot data === */
1380
1381 *pBytesTraced = (uint32_t) kcdata_memory_get_used_bytes(stackshot_kcdata_p);
1382error_exit:
1383 /* Release stack snapshot wait indicator */
1384 kdp_snapshot_postflight();
1385
1386 return error;
1387}
1388
1389static int
1390kdp_stackshot(int pid, void *tracebuf, uint32_t tracebuf_size, uint32_t trace_flags, uint32_t *pbytesTraced)
fe8ab488
A
1391{
1392 char *tracepos = (char *) tracebuf;
1393 char *tracebound = tracepos + tracebuf_size;
1394 uint32_t tracebytes = 0;
1395 int error = 0, i;
1396
1397 task_t task = TASK_NULL;
1398 thread_t thread = THREAD_NULL;
1399 unsigned framesize = 2 * sizeof(vm_offset_t);
1400
1401 queue_head_t *task_list = &tasks;
1402 boolean_t is_active_list = TRUE;
1403
1404 boolean_t dispatch_p = ((trace_flags & STACKSHOT_GET_DQ) != 0);
1405 boolean_t save_loadinfo_p = ((trace_flags & STACKSHOT_SAVE_LOADINFO) != 0);
1406 boolean_t save_kextloadinfo_p = ((trace_flags & STACKSHOT_SAVE_KEXT_LOADINFO) != 0);
1407 boolean_t save_userframes_p = ((trace_flags & STACKSHOT_SAVE_KERNEL_FRAMES_ONLY) == 0);
1408 boolean_t save_donating_pids_p = ((trace_flags & STACKSHOT_SAVE_IMP_DONATION_PIDS) != 0);
1409
1410 if(trace_flags & STACKSHOT_GET_GLOBAL_MEM_STATS) {
1411 if(tracepos + sizeof(struct mem_and_io_snapshot) > tracebound) {
1412 error = -1;
1413 goto error_exit;
1414 }
1415 kdp_mem_and_io_snapshot((struct mem_and_io_snapshot *)tracepos);
1416 tracepos += sizeof(struct mem_and_io_snapshot);
1417 }
1418
1419
1420walk_list:
1421 queue_iterate(task_list, task, task_t, tasks) {
1422 if ((task == NULL) || !ml_validate_nofault((vm_offset_t) task, sizeof(struct task)))
1423 goto error_exit;
1424
1425 int task_pid = pid_from_task(task);
1426 uint64_t task_uniqueid = proc_uniqueid_from_task(task);
1427 boolean_t task64 = task_has_64BitAddr(task);
1428
3e170ce0
A
1429 if (!task->active || task_is_a_corpse(task)) {
1430 /*
fe8ab488
A
1431 * Not interested in terminated tasks without threads, and
1432 * at the moment, stackshot can't handle a task without a name.
1433 */
1434 if (queue_empty(&task->threads) || task_pid == -1) {
1435 continue;
1436 }
1437 }
1438
1439 /* Trace everything, unless a process was specified */
1440 if ((pid == -1) || (pid == task_pid)) {
1441 task_snapshot_t task_snap;
1442 thread_snapshot_t tsnap = NULL;
1443 uint32_t uuid_info_count = 0;
1444 mach_vm_address_t uuid_info_addr = 0;
1445 boolean_t have_map = (task->map != NULL) &&
1446 (ml_validate_nofault((vm_offset_t)(task->map), sizeof(struct _vm_map)));
1447 boolean_t have_pmap = have_map && (task->map->pmap != NULL) &&
1448 (ml_validate_nofault((vm_offset_t)(task->map->pmap), sizeof(struct pmap)));
1449 uint64_t shared_cache_base_address = 0;
1450
1451 if (have_pmap && task->active && save_loadinfo_p && task_pid > 0) {
1452 // Read the dyld_all_image_infos struct from the task memory to get UUID array count and location
1453 if (task64) {
1454 struct user64_dyld_all_image_infos task_image_infos;
1455 if (kdp_copyin(task->map->pmap, task->all_image_info_addr, &task_image_infos, sizeof(struct user64_dyld_all_image_infos))) {
1456 uuid_info_count = (uint32_t)task_image_infos.uuidArrayCount;
1457 uuid_info_addr = task_image_infos.uuidArray;
1458 }
1459 } else {
1460 struct user32_dyld_all_image_infos task_image_infos;
1461 if (kdp_copyin(task->map->pmap, task->all_image_info_addr, &task_image_infos, sizeof(struct user32_dyld_all_image_infos))) {
1462 uuid_info_count = task_image_infos.uuidArrayCount;
1463 uuid_info_addr = task_image_infos.uuidArray;
1464 }
1465 }
1466
1467 // If we get a NULL uuid_info_addr (which can happen when we catch dyld in the middle of updating
1468 // this data structure), we zero the uuid_info_count so that we won't even try to save load info
1469 // for this task.
1470 if (!uuid_info_addr) {
1471 uuid_info_count = 0;
1472 }
1473 }
1474
3e170ce0
A
1475 if (have_pmap && task_pid == 0) {
1476 if (save_kextloadinfo_p && ml_validate_nofault((vm_offset_t)(gLoadedKextSummaries), sizeof(OSKextLoadedKextSummaryHeader))) {
fe8ab488 1477 uuid_info_count = gLoadedKextSummaries->numSummaries + 1; /* include main kernel UUID */
3e170ce0
A
1478 }else {
1479 uuid_info_count = 1; /* atleast include kernel uuid */
fe8ab488
A
1480 }
1481 }
1482
1483 if (tracepos + sizeof(struct task_snapshot) > tracebound) {
1484 error = -1;
1485 goto error_exit;
1486 }
1487
1488 task_snap = (task_snapshot_t) tracepos;
1489 task_snap->snapshot_magic = STACKSHOT_TASK_SNAPSHOT_MAGIC;
1490 task_snap->pid = task_pid;
1491 task_snap->uniqueid = task_uniqueid;
1492 task_snap->nloadinfos = uuid_info_count;
1493 task_snap->donating_pid_count = 0;
1494
1495 /* Add the BSD process identifiers */
1496 if (task_pid != -1)
1497 proc_name_kdp(task, task_snap->p_comm, sizeof(task_snap->p_comm));
1498 else
1499 task_snap->p_comm[0] = '\0';
1500 task_snap->ss_flags = 0;
1501 if (task64)
1502 task_snap->ss_flags |= kUser64_p;
1503 if (task64 && task_pid == 0)
1504 task_snap->ss_flags |= kKernel64_p;
3e170ce0 1505 if (!task->active || task_is_a_corpse(task))
fe8ab488
A
1506 task_snap->ss_flags |= kTerminatedSnapshot;
1507 if(task->pidsuspended) task_snap->ss_flags |= kPidSuspended;
1508 if(task->frozen) task_snap->ss_flags |= kFrozen;
1509
1510 if (task->effective_policy.darwinbg == 1) {
1511 task_snap->ss_flags |= kTaskDarwinBG;
1512 }
1513
1514 if (task->requested_policy.t_role == TASK_FOREGROUND_APPLICATION) {
1515 task_snap->ss_flags |= kTaskIsForeground;
1516 }
1517
1518 if (task->requested_policy.t_boosted == 1) {
1519 task_snap->ss_flags |= kTaskIsBoosted;
1520 }
1521
1522 if (task->effective_policy.t_sup_active == 1)
1523 task_snap->ss_flags |= kTaskIsSuppressed;
1524#if IMPORTANCE_INHERITANCE
1525 if (task->task_imp_base) {
1526 if (task->task_imp_base->iit_donor) {
1527 task_snap->ss_flags |= kTaskIsImpDonor;
1528}
1529
1530 if (task->task_imp_base->iit_live_donor) {
1531 task_snap->ss_flags |= kTaskIsLiveImpDonor;
1532 }
1533 }
1534#endif
1535
1536 task_snap->latency_qos = (task->effective_policy.t_latency_qos == LATENCY_QOS_TIER_UNSPECIFIED) ?
1537 LATENCY_QOS_TIER_UNSPECIFIED : ((0xFF << 16) | task->effective_policy.t_latency_qos);
1538
1539 task_snap->suspend_count = task->suspend_count;
1540 task_snap->task_size = have_pmap ? pmap_resident_count(task->map->pmap) : 0;
1541 task_snap->faults = task->faults;
1542 task_snap->pageins = task->pageins;
1543 task_snap->cow_faults = task->cow_faults;
1544
1545 task_snap->user_time_in_terminated_threads = task->total_user_time;
1546 task_snap->system_time_in_terminated_threads = task->total_system_time;
1547 /*
1548 * The throttling counters are maintained as 64-bit counters in the proc
1549 * structure. However, we reserve 32-bits (each) for them in the task_snapshot
1550 * struct to save space and since we do not expect them to overflow 32-bits. If we
1551 * find these values overflowing in the future, the fix would be to simply
1552 * upgrade these counters to 64-bit in the task_snapshot struct
1553 */
1554 task_snap->was_throttled = (uint32_t) proc_was_throttled_from_task(task);
1555 task_snap->did_throttle = (uint32_t) proc_did_throttle_from_task(task);
1556
1557 /* fetch some useful BSD info: */
1558 task_snap->p_start_sec = task_snap->p_start_usec = 0;
1559 proc_starttime_kdp(task->bsd_info, &task_snap->p_start_sec, &task_snap->p_start_usec);
1560 if (task->shared_region && ml_validate_nofault((vm_offset_t)task->shared_region,
1561 sizeof(struct vm_shared_region))) {
1562 struct vm_shared_region *sr = task->shared_region;
1563
1564 shared_cache_base_address = sr->sr_base_address + sr->sr_first_mapping;
1565 }
1566 if (!shared_cache_base_address
1567 || !kdp_copyin(task->map->pmap, shared_cache_base_address + offsetof(struct _dyld_cache_header, uuid), task_snap->shared_cache_identifier, sizeof(task_snap->shared_cache_identifier))) {
1568 memset(task_snap->shared_cache_identifier, 0x0, sizeof(task_snap->shared_cache_identifier));
1569 }
1570 if (task->shared_region) {
1571 /*
1572 * No refcounting here, but we are in debugger
1573 * context, so that should be safe.
1574 */
1575 task_snap->shared_cache_slide = task->shared_region->sr_slide_info.slide;
1576 } else {
1577 task_snap->shared_cache_slide = 0;
1578 }
1579
1580 /* I/O Statistics */
1581 assert(IO_NUM_PRIORITIES == STACKSHOT_IO_NUM_PRIORITIES);
1582
1583 if (task->task_io_stats) {
1584 task_snap->disk_reads_count = task->task_io_stats->disk_reads.count;
1585 task_snap->disk_reads_size = task->task_io_stats->disk_reads.size;
1586 task_snap->disk_writes_count = (task->task_io_stats->total_io.count - task->task_io_stats->disk_reads.count);
1587 task_snap->disk_writes_size = (task->task_io_stats->total_io.size - task->task_io_stats->disk_reads.size);
1588 for(i = 0; i < IO_NUM_PRIORITIES; i++) {
1589 task_snap->io_priority_count[i] = task->task_io_stats->io_priority[i].count;
1590 task_snap->io_priority_size[i] = task->task_io_stats->io_priority[i].size;
1591 }
1592 task_snap->paging_count = task->task_io_stats->paging.count;
1593 task_snap->paging_size = task->task_io_stats->paging.size;
1594 task_snap->non_paging_count = (task->task_io_stats->total_io.count - task->task_io_stats->paging.count);
1595 task_snap->non_paging_size = (task->task_io_stats->total_io.size - task->task_io_stats->paging.size);
1596 task_snap->metadata_count = task->task_io_stats->metadata.count;
1597 task_snap->metadata_size = task->task_io_stats->metadata.size;
1598 task_snap->data_count = (task->task_io_stats->total_io.count - task->task_io_stats->metadata.count);
1599 task_snap->data_size = (task->task_io_stats->total_io.size - task->task_io_stats->metadata.size);
1600 } else {
1601 /* zero from disk_reads_count to end of structure */
1602 memset(&task_snap->disk_reads_count, 0, offsetof(struct task_snapshot, metadata_size) - offsetof(struct task_snapshot, disk_reads_count));
1603 }
1604 tracepos += sizeof(struct task_snapshot);
1605
1606 if (task_pid > 0 && uuid_info_count > 0) {
1607 uint32_t uuid_info_size = (uint32_t)(task64 ? sizeof(struct user64_dyld_uuid_info) : sizeof(struct user32_dyld_uuid_info));
1608 uint32_t uuid_info_array_size = uuid_info_count * uuid_info_size;
1609
1610 if (tracepos + uuid_info_array_size > tracebound) {
1611 error = -1;
1612 goto error_exit;
1613 }
1614
1615 // Copy in the UUID info array
1616 // It may be nonresident, in which case just fix up nloadinfos to 0 in the task_snap
1617 if (have_pmap && !kdp_copyin(task->map->pmap, uuid_info_addr, tracepos, uuid_info_array_size))
1618 task_snap->nloadinfos = 0;
1619 else
1620 tracepos += uuid_info_array_size;
1621 } else if (task_pid == 0 && uuid_info_count > 0) {
1622 uint32_t uuid_info_size = (uint32_t)sizeof(kernel_uuid_info);
1623 uint32_t uuid_info_array_size = uuid_info_count * uuid_info_size;
3e170ce0
A
1624 uint32_t uuid_offset = offsetof(kernel_uuid_info, imageUUID);
1625 uintptr_t image_load_address;
fe8ab488
A
1626
1627 if (tracepos + uuid_info_array_size > tracebound) {
1628 error = -1;
1629 goto error_exit;
1630 }
1631
fe8ab488
A
1632 do {
1633
1634 if (!kernel_uuid || !ml_validate_nofault((vm_offset_t)kernel_uuid, sizeof(uuid_t))) {
1635 /* Kernel UUID not found or inaccessible */
1636 task_snap->nloadinfos = 0;
1637 break;
1638 }
3e170ce0
A
1639 image_load_address = (uintptr_t)VM_KERNEL_UNSLIDE(vm_kernel_stext);
1640 memcpy(tracepos, &image_load_address, sizeof(uintptr_t));
1641 memcpy((tracepos + uuid_offset), kernel_uuid, sizeof(uuid_t));
1642 tracepos += uuid_info_size;
fe8ab488 1643
3e170ce0 1644 if (save_kextloadinfo_p && ml_validate_nofault((vm_offset_t)(&gLoadedKextSummaries->summaries[0]),
fe8ab488
A
1645 gLoadedKextSummaries->entry_size * gLoadedKextSummaries->numSummaries)) {
1646 uint32_t kexti;
fe8ab488 1647 for (kexti=0 ; kexti < gLoadedKextSummaries->numSummaries; kexti++) {
3e170ce0
A
1648 image_load_address = (uintptr_t)VM_KERNEL_UNSLIDE(gLoadedKextSummaries->summaries[kexti].address);
1649 memcpy(tracepos, &image_load_address, sizeof(uintptr_t));
1650 memcpy((tracepos + uuid_offset), &gLoadedKextSummaries->summaries[kexti].uuid, sizeof(uuid_t));
1651 tracepos += uuid_info_size;
fe8ab488 1652 }
fe8ab488
A
1653 } else {
1654 /* kext summary invalid, but kernel UUID was copied */
1655 task_snap->nloadinfos = 1;
fe8ab488
A
1656 break;
1657 }
1658 } while(0);
1659 }
1660
1661 if (save_donating_pids_p) {
3e170ce0
A
1662 if (tracepos + (TASK_IMP_WALK_LIMIT * sizeof(int32_t)) > tracebound) {
1663 error = -1;
1664 goto error_exit;
1665 }
1666
1667 task_snap->donating_pid_count = task_importance_list_pids(task, TASK_IMP_LIST_DONATING_PIDS, tracepos, TASK_IMP_WALK_LIMIT);
fe8ab488
A
1668 tracepos += sizeof(int) * task_snap->donating_pid_count;
1669 }
1670
1671 queue_iterate(&task->threads, thread, thread_t, task_threads){
1672 uint64_t tval;
1673
1674 if ((thread == NULL) || !ml_validate_nofault((vm_offset_t) thread, sizeof(struct thread)))
1675 goto error_exit;
1676
1677 if (((tracepos + 4 * sizeof(struct thread_snapshot)) > tracebound)) {
1678 error = -1;
1679 goto error_exit;
1680 }
1681 if (!save_userframes_p && thread->kernel_stack == 0)
1682 continue;
1683
1684 /* Populate the thread snapshot header */
1685 tsnap = (thread_snapshot_t) tracepos;
1686 tsnap->thread_id = thread_tid(thread);
1687 tsnap->state = thread->state;
3e170ce0 1688 tsnap->priority = thread->base_pri;
fe8ab488
A
1689 tsnap->sched_pri = thread->sched_pri;
1690 tsnap->sched_flags = thread->sched_flags;
1691 tsnap->wait_event = VM_KERNEL_UNSLIDE_OR_PERM(thread->wait_event);
1692 tsnap->continuation = VM_KERNEL_UNSLIDE(thread->continuation);
1693 tval = safe_grab_timer_value(&thread->user_timer);
1694 tsnap->user_time = tval;
1695 tval = safe_grab_timer_value(&thread->system_timer);
1696 if (thread->precise_user_kernel_time) {
1697 tsnap->system_time = tval;
1698 } else {
1699 tsnap->user_time += tval;
1700 tsnap->system_time = 0;
1701 }
1702 tsnap->snapshot_magic = STACKSHOT_THREAD_SNAPSHOT_MAGIC;
1703 bzero(&tsnap->pth_name, STACKSHOT_MAX_THREAD_NAME_SIZE);
1704 proc_threadname_kdp(thread->uthread, &tsnap->pth_name[0], STACKSHOT_MAX_THREAD_NAME_SIZE);
1705 tracepos += sizeof(struct thread_snapshot);
1706 tsnap->ss_flags = 0;
1707 /* I/O Statistics */
1708 assert(IO_NUM_PRIORITIES == STACKSHOT_IO_NUM_PRIORITIES);
1709 if (thread->thread_io_stats) {
1710 tsnap->disk_reads_count = thread->thread_io_stats->disk_reads.count;
1711 tsnap->disk_reads_size = thread->thread_io_stats->disk_reads.size;
1712 tsnap->disk_writes_count = (thread->thread_io_stats->total_io.count - thread->thread_io_stats->disk_reads.count);
1713 tsnap->disk_writes_size = (thread->thread_io_stats->total_io.size - thread->thread_io_stats->disk_reads.size);
1714 for(i = 0; i < IO_NUM_PRIORITIES; i++) {
1715 tsnap->io_priority_count[i] = thread->thread_io_stats->io_priority[i].count;
1716 tsnap->io_priority_size[i] = thread->thread_io_stats->io_priority[i].size;
1717 }
1718 tsnap->paging_count = thread->thread_io_stats->paging.count;
1719 tsnap->paging_size = thread->thread_io_stats->paging.size;
1720 tsnap->non_paging_count = (thread->thread_io_stats->total_io.count - thread->thread_io_stats->paging.count);
1721 tsnap->non_paging_size = (thread->thread_io_stats->total_io.size - thread->thread_io_stats->paging.size);
1722 tsnap->metadata_count = thread->thread_io_stats->metadata.count;
1723 tsnap->metadata_size = thread->thread_io_stats->metadata.size;
1724 tsnap->data_count = (thread->thread_io_stats->total_io.count - thread->thread_io_stats->metadata.count);
1725 tsnap->data_size = (thread->thread_io_stats->total_io.size - thread->thread_io_stats->metadata.size);
1726 } else {
1727 /* zero from disk_reads_count to end of structure */
1728 memset(&tsnap->disk_reads_count, 0,
1729 offsetof(struct thread_snapshot, metadata_size) - offsetof(struct thread_snapshot, disk_reads_count));
1730 }
1731
1732 if (thread->effective_policy.darwinbg) {
1733 tsnap->ss_flags |= kThreadDarwinBG;
1734 }
1735
1736 tsnap->io_tier = proc_get_effective_thread_policy(thread, TASK_POLICY_IO);
1737 if (proc_get_effective_thread_policy(thread, TASK_POLICY_PASSIVE_IO)) {
1738 tsnap->ss_flags |= kThreadIOPassive;
1739 }
1740
1741 if (thread->suspend_count > 0) {
1742 tsnap->ss_flags |= kThreadSuspended;
1743 }
3e170ce0
A
1744
1745 if (thread->options & TH_OPT_GLOBAL_FORCED_IDLE) {
1746 tsnap->ss_flags |= kGlobalForcedIdle;
1747 }
1748
fe8ab488
A
1749 if (IPC_VOUCHER_NULL != thread->ith_voucher) {
1750 tsnap->voucher_identifier = VM_KERNEL_ADDRPERM(thread->ith_voucher);
1751 }
1752
1753 tsnap->ts_qos = thread->effective_policy.thep_qos;
3e170ce0
A
1754 tsnap->ts_rqos = thread->requested_policy.thrp_qos;
1755 tsnap->ts_rqos_override = thread->requested_policy.thrp_qos_override;
1756 /* zero out unused data. */
1757 tsnap->_reserved[0] = 0;
1758 tsnap->_reserved[1] = 0;
1759 tsnap->_reserved[2] = 0;
fe8ab488
A
1760 tsnap->total_syscalls = thread->syscalls_mach + thread->syscalls_unix;
1761
1762 if (dispatch_p && (task != kernel_task) && (task->active) && have_pmap) {
1763 uint64_t dqkeyaddr = thread_dispatchqaddr(thread);
1764 if (dqkeyaddr != 0) {
1765 uint64_t dqaddr = 0;
1766 if (kdp_copyin(task->map->pmap, dqkeyaddr, &dqaddr, (task64 ? 8 : 4)) && (dqaddr != 0)) {
3e170ce0 1767 uint64_t dqserialnumaddr = dqaddr + proc_dispatchqueue_serialno_offset_from_task(task);
fe8ab488
A
1768 uint64_t dqserialnum = 0;
1769 if (kdp_copyin(task->map->pmap, dqserialnumaddr, &dqserialnum, (task64 ? 8 : 4))) {
1770 tsnap->ss_flags |= kHasDispatchSerial;
3e170ce0 1771 memcpy(tracepos, &dqserialnum, sizeof(dqserialnum));
fe8ab488
A
1772 tracepos += 8;
1773 }
1774 }
1775 }
1776 }
1777/* Call through to the machine specific trace routines
1778 * Frames are added past the snapshot header.
1779 */
1780 tracebytes = 0;
1781 if (thread->kernel_stack != 0) {
3e170ce0 1782 uint32_t thread_snapshot_flags = 0;
fe8ab488 1783#if defined(__LP64__)
3e170ce0 1784 tracebytes = machine_trace_thread64(thread, tracepos, tracebound, MAX_FRAMES, FALSE, &thread_snapshot_flags);
fe8ab488
A
1785 tsnap->ss_flags |= kKernel64_p;
1786 framesize = 16;
1787#else
3e170ce0 1788 tracebytes = machine_trace_thread(thread, tracepos, tracebound, MAX_FRAMES, FALSE, &thread_snapshot_flags);
fe8ab488
A
1789 framesize = 8;
1790#endif
3e170ce0
A
1791 if (thread_snapshot_flags != 0) {
1792 tsnap->ss_flags |= thread_snapshot_flags;
1793 }
fe8ab488
A
1794 }
1795 tsnap->nkern_frames = tracebytes/framesize;
1796 tracepos += tracebytes;
1797 tracebytes = 0;
1798 /* Trace user stack, if any */
1799 if (save_userframes_p && task->active && thread->task->map != kernel_map) {
3e170ce0 1800 uint32_t thread_snapshot_flags = 0;
fe8ab488
A
1801 /* 64-bit task? */
1802 if (task_has_64BitAddr(thread->task)) {
3e170ce0 1803 tracebytes = machine_trace_thread64(thread, tracepos, tracebound, MAX_FRAMES, TRUE, &thread_snapshot_flags);
fe8ab488
A
1804 tsnap->ss_flags |= kUser64_p;
1805 framesize = 16;
1806 }
1807 else {
3e170ce0 1808 tracebytes = machine_trace_thread(thread, tracepos, tracebound, MAX_FRAMES, TRUE, &thread_snapshot_flags);
fe8ab488
A
1809 framesize = 8;
1810 }
3e170ce0
A
1811 if (thread_snapshot_flags != 0) {
1812 tsnap->ss_flags |= thread_snapshot_flags;
1813 }
fe8ab488
A
1814 }
1815 tsnap->nuser_frames = tracebytes/framesize;
1816 tracepos += tracebytes;
1817 tracebytes = 0;
1818 }
1819
1820 if (!save_userframes_p && tsnap == NULL) {
1821 /*
1822 * No thread info is collected due to lack of kernel frames.
1823 * Remove information about this task also
1824 */
1825 tracepos = (char *)task_snap;
1826 }
1827 }
1828 }
1829
1830 if (is_active_list) {
1831 is_active_list = FALSE;
1832 task_list = &terminated_tasks;
1833 goto walk_list;
1834 }
1835
1836error_exit:
1837 /* Release stack snapshot wait indicator */
1838 kdp_snapshot_postflight();
1839
1840 *pbytesTraced = (uint32_t)(tracepos - (char *) tracebuf);
1841
1842 return error;
1843}
1844
1845static int pid_from_task(task_t task)
1846{
1847 int pid = -1;
1848
3e170ce0 1849 if (task->bsd_info) {
fe8ab488 1850 pid = proc_pid(task->bsd_info);
3e170ce0
A
1851 } else {
1852 pid = task_pid(task);
1853 }
fe8ab488
A
1854
1855 return pid;
1856}
1857
1858static uint64_t
1859proc_uniqueid_from_task(task_t task)
1860{
1861 uint64_t uniqueid = ~(0ULL);
1862
1863 if (task->bsd_info)
1864 uniqueid = proc_uniqueid(task->bsd_info);
1865
1866 return uniqueid;
1867}
1868
1869static uint64_t
1870proc_was_throttled_from_task(task_t task)
1871{
1872 uint64_t was_throttled = 0;
1873
1874 if (task->bsd_info)
1875 was_throttled = proc_was_throttled(task->bsd_info);
1876
1877 return was_throttled;
1878}
1879
1880static uint64_t
1881proc_did_throttle_from_task(task_t task)
1882{
1883 uint64_t did_throttle = 0;
1884
1885 if (task->bsd_info)
1886 did_throttle = proc_did_throttle(task->bsd_info);
1887
1888 return did_throttle;
1889}
1890
3e170ce0
A
1891static uint64_t
1892proc_dispatchqueue_serialno_offset_from_task(task_t task)
1893{
1894 uint64_t dq_serialno_offset = 0;
1895
1896 if (task->bsd_info) {
1897 dq_serialno_offset = get_dispatchqueue_serialno_offset_from_proc(task->bsd_info);
1898 }
1899
1900 return dq_serialno_offset;
1901}
1902
fe8ab488
A
1903static void
1904kdp_mem_and_io_snapshot(struct mem_and_io_snapshot *memio_snap)
1905{
1906 unsigned int pages_reclaimed;
1907 unsigned int pages_wanted;
1908 kern_return_t kErr;
1909
1910 processor_t processor;
1911 vm_statistics64_t stat;
1912 vm_statistics64_data_t host_vm_stat;
1913
1914 processor = processor_list;
1915 stat = &PROCESSOR_DATA(processor, vm_stat);
1916 host_vm_stat = *stat;
1917
1918 if (processor_count > 1) {
1919 /*
1920 * processor_list may be in the process of changing as we are
1921 * attempting a stackshot. Ordinarily it will be lock protected,
1922 * but it is not safe to lock in the context of the debugger.
1923 * Fortunately we never remove elements from the processor list,
1924 * and only add to to the end of the list, so we SHOULD be able
1925 * to walk it. If we ever want to truly tear down processors,
1926 * this will have to change.
1927 */
1928 while ((processor = processor->processor_list) != NULL) {
1929 stat = &PROCESSOR_DATA(processor, vm_stat);
1930 host_vm_stat.compressions += stat->compressions;
1931 host_vm_stat.decompressions += stat->decompressions;
1932 }
1933 }
1934
1935 memio_snap->snapshot_magic = STACKSHOT_MEM_AND_IO_SNAPSHOT_MAGIC;
1936 memio_snap->free_pages = vm_page_free_count;
1937 memio_snap->active_pages = vm_page_active_count;
1938 memio_snap->inactive_pages = vm_page_inactive_count;
1939 memio_snap->purgeable_pages = vm_page_purgeable_count;
1940 memio_snap->wired_pages = vm_page_wire_count;
1941 memio_snap->speculative_pages = vm_page_speculative_count;
1942 memio_snap->throttled_pages = vm_page_throttled_count;
1943 memio_snap->busy_buffer_count = count_busy_buffers();
1944 memio_snap->filebacked_pages = vm_page_pageable_external_count;
1945 memio_snap->compressions = (uint32_t)host_vm_stat.compressions;
1946 memio_snap->decompressions = (uint32_t)host_vm_stat.decompressions;
1947 memio_snap->compressor_size = VM_PAGE_COMPRESSOR_COUNT;
1948 kErr = mach_vm_pressure_monitor(FALSE, VM_PRESSURE_TIME_WINDOW, &pages_reclaimed, &pages_wanted);
1949
1950 if ( ! kErr ) {
1951 memio_snap->pages_wanted = (uint32_t)pages_wanted;
1952 memio_snap->pages_reclaimed = (uint32_t)pages_reclaimed;
1953 memio_snap->pages_wanted_reclaimed_valid = 1;
1954 } else {
1955 memio_snap->pages_wanted = 0;
1956 memio_snap->pages_reclaimed = 0;
1957 memio_snap->pages_wanted_reclaimed_valid = 0;
1958 }
1959}
1960
1961boolean_t
1962kdp_copyin(pmap_t p, uint64_t uaddr, void *dest, size_t size)
1963{
1964 size_t rem = size;
1965 char *kvaddr = dest;
1966
3e170ce0
A
1967#if (defined(__arm64__) || defined(NAND_PANIC_DEVICE)) && !defined(LEGACY_PANIC_LOGS)
1968 /* Identify if destination buffer is in panic storage area */
1969 if ((vm_offset_t)dest >= gPanicBase && (vm_offset_t)dest < gPanicBase + gPanicSize) {
1970 if (((vm_offset_t)dest + size) >= (gPanicBase + gPanicSize)) {
1971 return FALSE;
1972 }
1973 ppnum_t upn = pmap_find_phys(p, uaddr);
1974 uint64_t phys_src = ptoa_64(upn) | (uaddr & PAGE_MASK);
1975 void *src_va = (void*)phystokv(phys_src);
1976 if (upn && pmap_valid_page(upn)) {
1977 bcopy(src_va, kvaddr, size);
1978 return TRUE;
1979 }
1980 return FALSE;
1981 }
1982#endif
1983
fe8ab488
A
1984 while (rem) {
1985 ppnum_t upn = pmap_find_phys(p, uaddr);
1986 uint64_t phys_src = ptoa_64(upn) | (uaddr & PAGE_MASK);
1987 uint64_t phys_dest = kvtophys((vm_offset_t)kvaddr);
1988 uint64_t src_rem = PAGE_SIZE - (phys_src & PAGE_MASK);
1989 uint64_t dst_rem = PAGE_SIZE - (phys_dest & PAGE_MASK);
1990 size_t cur_size = (uint32_t) MIN(src_rem, dst_rem);
1991 cur_size = MIN(cur_size, rem);
1992
1993 if (upn && pmap_valid_page(upn) && phys_dest) {
1994 bcopy_phys(phys_src, phys_dest, cur_size);
1995 }
1996 else
1997 break;
1998 uaddr += cur_size;
1999 kvaddr += cur_size;
3e170ce0 2000 rem -= cur_size;
fe8ab488
A
2001 }
2002 return (rem == 0);
2003}
2004
2005void
2006do_stackshot()
2007{
3e170ce0
A
2008 if (stack_snapshot_flags & STACKSHOT_KCDATA_FORMAT) {
2009 stack_snapshot_ret = kdp_stackshot_kcdata_format(stack_snapshot_pid,
2010 stack_snapshot_flags,
2011 &stack_snapshot_bytes_traced);
2012 }
2013 else {
2014 stack_snapshot_ret = kdp_stackshot(stack_snapshot_pid,
fe8ab488 2015 stack_snapshot_buf, stack_snapshot_bufsize,
3e170ce0
A
2016 stack_snapshot_flags, &stack_snapshot_bytes_traced);
2017 }
fe8ab488
A
2018}
2019
2020/*
2021 * A fantastical routine that tries to be fast about returning
2022 * translations. Caches the last page we found a translation
2023 * for, so that we can be quick about multiple queries to the
2024 * same page. It turns out this is exactly the workflow
2025 * machine_trace_thread and its relatives tend to throw at us.
2026 *
2027 * Please zero the nasty global this uses after a bulk lookup;
2028 * this isn't safe across a switch of the kdp_pmap or changes
2029 * to a pmap.
2030 *
2031 * This also means that if zero is a valid KVA, we are
2032 * screwed. Sucks to be us. Fortunately, this should never
2033 * happen.
2034 */
2035vm_offset_t
3e170ce0 2036machine_trace_thread_get_kva(vm_offset_t cur_target_addr, vm_map_t map, uint32_t *thread_trace_flags)
fe8ab488
A
2037{
2038 unsigned cur_wimg_bits;
2039 vm_offset_t cur_target_page;
2040 vm_offset_t cur_phys_addr;
2041 vm_offset_t kern_virt_target_addr;
2042
2043 cur_target_page = atop(cur_target_addr);
2044
2045 if ((cur_target_page != prev_target_page) || validate_next_addr) {
2046 /*
2047 * Alright; it wasn't our previous page. So
2048 * we must validate that there is a page
2049 * table entry for this address under the
2050 * current kdp_pmap, and that it has default
2051 * cache attributes (otherwise it may not be
2052 * safe to access it).
2053 */
2054 cur_phys_addr = kdp_vtophys(kdp_pmap ? kdp_pmap : kernel_pmap, cur_target_addr);
2055
2056 if (!pmap_valid_page((ppnum_t) atop(cur_phys_addr))) {
fe8ab488 2057
3e170ce0
A
2058 if (!stack_enable_faulting) {
2059 return 0;
2060 }
fe8ab488 2061
3e170ce0
A
2062 /*
2063 * The pmap doesn't have a valid page so we start at the top level
2064 * vm map and try a lightweight fault.
2065 */
2066 cur_phys_addr = kdp_lightweight_fault(map, (cur_target_addr & ~PAGE_MASK), thread_trace_flags);
2067 cur_phys_addr += (cur_target_addr & PAGE_MASK);
2068
2069 if (!pmap_valid_page((ppnum_t) atop(cur_phys_addr)))
2070 return 0;
2071 } else {
2072 /*
2073 * This check is done in kdp_lightweight_fault for the fault path.
2074 */
2075 cur_wimg_bits = pmap_cache_attributes((ppnum_t) atop(cur_phys_addr));
2076
2077 if ((cur_wimg_bits & VM_WIMG_MASK) != VM_WIMG_DEFAULT) {
2078 return 0;
2079 }
fe8ab488
A
2080 }
2081
2082#if __x86_64__
2083 kern_virt_target_addr = (vm_offset_t) PHYSMAP_PTOV(cur_phys_addr);
2084#else
2085#error Oh come on... we should really unify the physical -> kernel virtual interface
2086#endif
2087 prev_target_page = cur_target_page;
2088 prev_target_kva = (kern_virt_target_addr & ~PAGE_MASK);
2089 validate_next_addr = FALSE;
2090 return kern_virt_target_addr;
2091 } else {
2092 /* We found a translation, so stash this page */
2093 kern_virt_target_addr = prev_target_kva + (cur_target_addr & PAGE_MASK);
2094 return kern_virt_target_addr;
2095 }
2096}
2097
2098void
2099machine_trace_thread_clear_validation_cache(void)
2100{
2101 validate_next_addr = TRUE;
2102}
2103