2 * Copyright (c) 2013 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/vm_param.h>
31 #include <mach/mach_vm.h>
32 #include <sys/errno.h>
33 #include <sys/stackshot.h>
34 #ifdef IMPORTANCE_INHERITANCE
35 #include <ipc/ipc_importance.h>
37 #include <sys/appleapiopts.h>
38 #include <kern/debug.h>
39 #include <uuid/uuid.h>
41 #include <kdp/kdp_dyld.h>
42 #include <kdp/kdp_en_debugger.h>
44 #include <libsa/types.h>
45 #include <libkern/version.h>
47 #include <string.h> /* bcopy */
49 #include <kern/processor.h>
50 #include <kern/thread.h>
51 #include <kern/telemetry.h>
52 #include <kern/clock.h>
53 #include <vm/vm_map.h>
54 #include <vm/vm_kern.h>
55 #include <vm/vm_pageout.h>
56 #include <vm/vm_fault.h>
57 #include <vm/vm_shared_region.h>
58 #include <libkern/OSKextLibPrivate.h>
60 #if (defined(__arm64__) || defined(NAND_PANIC_DEVICE)) && !defined(LEGACY_PANIC_LOGS)
61 #include <pexpert/pexpert.h> /* For gPanicBase/gPanicBase */
64 extern unsigned int not_in_kdp
;
67 * TODO: Even hackier than the other pieces. This should really
68 * be moved off of kdp_pmap, and we should probably separate
69 * machine_trace_thread out of the kdp code.
71 extern pmap_t kdp_pmap
;
72 extern addr64_t
kdp_vtophys(pmap_t pmap
, addr64_t va
);
75 static int stack_snapshot_ret
= 0;
76 static uint32_t stack_snapshot_bytes_traced
= 0;
78 static kcdata_descriptor_t stackshot_kcdata_p
= NULL
;
79 static void *stack_snapshot_buf
;
80 static uint32_t stack_snapshot_bufsize
;
81 int stack_snapshot_pid
;
82 static uint32_t stack_snapshot_flags
;
83 static unsigned int old_debugger
;
84 static boolean_t stack_enable_faulting
;
86 void *kernel_stackshot_buf
= NULL
; /* Pointer to buffer for stackshots triggered from the kernel and retrieved later */
87 int kernel_stackshot_buf_size
= 0;
89 void *stackshot_snapbuf
= NULL
; /* Used by stack_snapshot2 (to be removed) */
91 __private_extern__
void stackshot_lock_init( void );
92 static boolean_t
memory_iszero(void *addr
, size_t size
);
93 kern_return_t
stack_snapshot2(int pid
, user_addr_t tracebuf
, uint32_t tracebuf_size
, uint32_t flags
, int32_t *retval
);
94 kern_return_t
stack_snapshot_from_kernel_internal(int pid
, void *buf
, uint32_t size
, uint32_t flags
, unsigned *bytes_traced
);
96 kern_return_t
stack_microstackshot(user_addr_t tracebuf
, uint32_t tracebuf_size
, uint32_t flags
, int32_t *retval
);
98 uint32_t get_stackshot_estsize(uint32_t prev_size_hint
);
99 kern_return_t
kern_stack_snapshot_internal(int stackshot_config_version
, void *stackshot_config
,
100 size_t stackshot_config_size
, boolean_t stackshot_from_user
);
101 void do_stackshot(void);
102 void kdp_snapshot_preflight(int pid
, void * tracebuf
, uint32_t tracebuf_size
, uint32_t flags
, kcdata_descriptor_t data_p
, boolean_t enable_faulting
);
103 void kdp_snapshot_postflight(void);
104 static int kdp_stackshot(int pid
, void *tracebuf
, uint32_t tracebuf_size
, uint32_t flags
, uint32_t *pbytesTraced
);
105 static int kdp_stackshot_kcdata_format(int pid
, uint32_t trace_flags
, uint32_t *pBytesTraced
);
106 int kdp_stack_snapshot_geterror(void);
107 uint32_t kdp_stack_snapshot_bytes_traced(void);
108 int kdp_stackshot(int pid
, void *tracebuf
, uint32_t tracebuf_size
, uint32_t trace_flags
, uint32_t *pbytesTraced
);
109 static int pid_from_task(task_t task
);
110 static uint64_t proc_uniqueid_from_task(task_t task
);
111 static void kdp_mem_and_io_snapshot(struct mem_and_io_snapshot
*memio_snap
);
112 static boolean_t
kdp_copyin(pmap_t p
, uint64_t uaddr
, void *dest
, size_t size
);
113 static uint64_t proc_was_throttled_from_task(task_t task
);
115 extern int proc_pid(void *p
);
116 extern uint64_t proc_uniqueid(void *p
);
117 extern uint64_t proc_was_throttled(void *p
);
118 extern uint64_t proc_did_throttle(void *p
);
119 static uint64_t proc_did_throttle_from_task(task_t task
);
120 extern void proc_name_kdp(task_t task
, char *buf
, int size
);
121 extern int proc_threadname_kdp(void *uth
, char *buf
, size_t size
);
122 extern void proc_starttime_kdp(void *p
, uint64_t *tv_sec
, uint64_t *tv_usec
);
123 extern uint64_t get_dispatchqueue_serialno_offset_from_proc(void *p
);
124 static uint64_t proc_dispatchqueue_serialno_offset_from_task(task_t task
);
125 extern int memorystatus_get_pressure_status_kdp(void);
127 extern int count_busy_buffers(void); /* must track with declaration in bsd/sys/buf_internal.h */
128 extern void bcopy_phys(addr64_t
, addr64_t
, vm_size_t
);
129 extern int machine_trace_thread(thread_t thread
, char *tracepos
, char *tracebound
, int nframes
, boolean_t user_p
, uint32_t *thread_trace_flags
);
130 extern int machine_trace_thread64(thread_t thread
, char *tracepos
, char *tracebound
, int nframes
, boolean_t user_p
, uint32_t *thread_trace_flags
);
132 /* Validates that the given address is both a valid page and has
133 * default caching attributes for the current kdp_pmap. Returns
134 * 0 if the address is invalid, and a kernel virtual address for
135 * the given address if it is valid.
137 vm_offset_t
machine_trace_thread_get_kva(vm_offset_t cur_target_addr
, vm_map_t map
, uint32_t *thread_trace_flags
);
139 /* Clears caching information used by the above validation routine
140 * (in case the kdp_pmap has been changed or cleared).
142 void machine_trace_thread_clear_validation_cache(void);
144 #define MAX_FRAMES 1000
145 #define MAX_LOADINFOS 500
146 #define USECSPERSEC 1000000
147 #define TASK_IMP_WALK_LIMIT 20
149 typedef struct thread_snapshot
*thread_snapshot_t
;
150 typedef struct task_snapshot
*task_snapshot_t
;
152 #if CONFIG_KDP_INTERACTIVE_DEBUGGING
153 extern kdp_send_t kdp_en_send_pkt
;
157 * Globals to support machine_trace_thread_get_kva.
159 static vm_offset_t prev_target_page
= 0;
160 static vm_offset_t prev_target_kva
= 0;
161 static boolean_t validate_next_addr
= TRUE
;
164 * Stackshot locking and other defines.
166 static lck_grp_t
*stackshot_subsys_lck_grp
;
167 static lck_grp_attr_t
*stackshot_subsys_lck_grp_attr
;
168 static lck_attr_t
*stackshot_subsys_lck_attr
;
169 static lck_mtx_t stackshot_subsys_mutex
;
171 #define STACKSHOT_SUBSYS_LOCK() lck_mtx_lock(&stackshot_subsys_mutex)
172 #define STACKSHOT_SUBSYS_UNLOCK() lck_mtx_unlock(&stackshot_subsys_mutex)
173 #if defined(__i386__) || defined (__x86_64__)
174 #define TRAP_DEBUGGER __asm__ volatile("int3")
176 #error No TRAP_DEBUGGER definition for this architecture
179 /* Initialize the mutex governing access to the stack snapshot subsystem */
180 __private_extern__
void
181 stackshot_lock_init( void )
183 stackshot_subsys_lck_grp_attr
= lck_grp_attr_alloc_init();
185 stackshot_subsys_lck_grp
= lck_grp_alloc_init("stackshot_subsys_lock", stackshot_subsys_lck_grp_attr
);
187 stackshot_subsys_lck_attr
= lck_attr_alloc_init();
189 lck_mtx_init(&stackshot_subsys_mutex
, stackshot_subsys_lck_grp
, stackshot_subsys_lck_attr
);
192 #define SANE_BOOTPROFILE_TRACEBUF_SIZE (64 * 1024 * 1024)
193 #define SANE_TRACEBUF_SIZE (8 * 1024 * 1024)
195 #define STACKSHOT_SUPP_SIZE (16 * 1024) /* Minimum stackshot size */
196 #define TASK_UUID_AVG_SIZE (16 * sizeof(uuid_t)) /* Average space consumed by UUIDs/task */
199 * Method for grabbing timer values safely, in the sense that no infinite loop will occur
200 * Certain flavors of the timer_grab function, which would seem to be the thing to use,
201 * can loop infinitely if called while the timer is in the process of being updated.
202 * Unfortunately, it is (rarely) possible to get inconsistent top and bottom halves of
203 * the timer using this method. This seems insoluble, since stackshot runs in a context
204 * where the timer might be half-updated, and has no way of yielding control just long
205 * enough to finish the update.
208 static uint64_t safe_grab_timer_value(struct timer
*t
)
210 #if defined(__LP64__)
213 uint64_t time
= t
->high_bits
; /* endian independent grab */
214 time
= (time
<< 32) | t
->low_bits
;
220 * Old, inefficient stackshot call. This will be removed in the next release and is being replaced with
221 * two syscalls -- stack_snapshot_with_config and stack_microsnapshot.
224 stack_snapshot2(int pid
, user_addr_t tracebuf
, uint32_t tracebuf_size
, uint32_t flags
, int32_t *retval
)
227 int error
= KERN_SUCCESS
;
228 unsigned bytesTraced
= 0;
231 if (flags
& STACKSHOT_GLOBAL_MICROSTACKSHOT_ENABLE
) {
232 telemetry_global_ctl(1);
235 } else if (flags
& STACKSHOT_GLOBAL_MICROSTACKSHOT_DISABLE
) {
236 telemetry_global_ctl(0);
241 if (flags
& STACKSHOT_WINDOWED_MICROSTACKSHOTS_ENABLE
) {
242 error
= telemetry_enable_window();
244 if (error
!= KERN_SUCCESS
) {
245 /* We are probably out of memory */
247 return KERN_RESOURCE_SHORTAGE
;
252 } else if (flags
& STACKSHOT_WINDOWED_MICROSTACKSHOTS_DISABLE
) {
253 telemetry_disable_window();
260 /* Serialize tracing */
261 STACKSHOT_SUBSYS_LOCK();
263 if (tracebuf_size
<= 0) {
264 error
= KERN_INVALID_ARGUMENT
;
269 if (flags
& STACKSHOT_GET_MICROSTACKSHOT
) {
271 if (tracebuf_size
> SANE_TRACEBUF_SIZE
) {
272 error
= KERN_INVALID_ARGUMENT
;
276 bytesTraced
= tracebuf_size
;
277 error
= telemetry_gather(tracebuf
, &bytesTraced
,
278 (flags
& STACKSHOT_SET_MICROSTACKSHOT_MARK
) ? TRUE
: FALSE
);
279 *retval
= (int)bytesTraced
;
283 if (flags
& STACKSHOT_GET_WINDOWED_MICROSTACKSHOTS
) {
285 if (tracebuf_size
> SANE_TRACEBUF_SIZE
) {
286 error
= KERN_INVALID_ARGUMENT
;
290 bytesTraced
= tracebuf_size
;
291 error
= telemetry_gather_windowed(tracebuf
, &bytesTraced
);
292 *retval
= (int)bytesTraced
;
296 if (flags
& STACKSHOT_GET_BOOT_PROFILE
) {
298 if (tracebuf_size
> SANE_BOOTPROFILE_TRACEBUF_SIZE
) {
299 error
= KERN_INVALID_ARGUMENT
;
303 bytesTraced
= tracebuf_size
;
304 error
= bootprofile_gather(tracebuf
, &bytesTraced
);
305 *retval
= (int)bytesTraced
;
310 if (tracebuf_size
> SANE_TRACEBUF_SIZE
) {
311 error
= KERN_INVALID_ARGUMENT
;
315 assert(stackshot_snapbuf
== NULL
);
316 if (kmem_alloc_kobject(kernel_map
, (vm_offset_t
*)&stackshot_snapbuf
, tracebuf_size
, VM_KERN_MEMORY_DIAG
) != KERN_SUCCESS
) {
317 error
= KERN_RESOURCE_SHORTAGE
;
321 if (panic_active()) {
322 error
= KERN_RESOURCE_SHORTAGE
;
326 istate
= ml_set_interrupts_enabled(FALSE
);
327 /* Preload trace parameters */
328 kdp_snapshot_preflight(pid
, stackshot_snapbuf
, tracebuf_size
, flags
, NULL
, FALSE
);
330 /* Trap to the debugger to obtain a coherent stack snapshot; this populates
336 ml_set_interrupts_enabled(istate
);
338 bytesTraced
= kdp_stack_snapshot_bytes_traced();
340 if (bytesTraced
> 0) {
341 if ((error
= copyout(stackshot_snapbuf
, tracebuf
,
342 ((bytesTraced
< tracebuf_size
) ?
343 bytesTraced
: tracebuf_size
))))
345 *retval
= bytesTraced
;
348 error
= KERN_NOT_IN_SET
;
352 error
= kdp_stack_snapshot_geterror();
354 error
= KERN_NO_SPACE
;
360 if (stackshot_snapbuf
!= NULL
)
361 kmem_free(kernel_map
, (vm_offset_t
) stackshot_snapbuf
, tracebuf_size
);
362 stackshot_snapbuf
= NULL
;
363 STACKSHOT_SUBSYS_UNLOCK();
368 stack_snapshot_from_kernel_internal(int pid
, void *buf
, uint32_t size
, uint32_t flags
, unsigned *bytes_traced
)
373 if ((buf
== NULL
) || (size
<= 0) || (bytes_traced
== NULL
)) {
374 return KERN_INVALID_ARGUMENT
;
377 /* cap in individual stackshot to SANE_TRACEBUF_SIZE */
378 if (size
> SANE_TRACEBUF_SIZE
) {
379 size
= SANE_TRACEBUF_SIZE
;
382 /* Serialize tracing */
383 STACKSHOT_SUBSYS_LOCK();
384 istate
= ml_set_interrupts_enabled(FALSE
);
387 /* Preload trace parameters*/
388 kdp_snapshot_preflight(pid
, buf
, size
, flags
, NULL
, FALSE
);
390 /* Trap to the debugger to obtain a coherent stack snapshot; this populates
395 ml_set_interrupts_enabled(istate
);
397 *bytes_traced
= kdp_stack_snapshot_bytes_traced();
399 error
= kdp_stack_snapshot_geterror();
401 STACKSHOT_SUBSYS_UNLOCK();
408 stack_microstackshot(user_addr_t tracebuf
, uint32_t tracebuf_size
, uint32_t flags
, int32_t *retval
)
410 int error
= KERN_SUCCESS
;
411 uint32_t bytes_traced
= 0;
416 * Control related operations
418 if (flags
& STACKSHOT_GLOBAL_MICROSTACKSHOT_ENABLE
) {
419 telemetry_global_ctl(1);
422 } else if (flags
& STACKSHOT_GLOBAL_MICROSTACKSHOT_DISABLE
) {
423 telemetry_global_ctl(0);
428 if (flags
& STACKSHOT_WINDOWED_MICROSTACKSHOTS_ENABLE
) {
429 error
= telemetry_enable_window();
431 if (error
!= KERN_SUCCESS
) {
433 * We are probably out of memory
436 error
= KERN_RESOURCE_SHORTAGE
;
442 } else if (flags
& STACKSHOT_WINDOWED_MICROSTACKSHOTS_DISABLE
) {
443 telemetry_disable_window();
449 * Data related operations
453 if ((((void*)tracebuf
) == NULL
) || (tracebuf_size
== 0)) {
454 error
= KERN_INVALID_ARGUMENT
;
458 STACKSHOT_SUBSYS_LOCK();
460 if (flags
& STACKSHOT_GET_MICROSTACKSHOT
) {
461 if (tracebuf_size
> SANE_TRACEBUF_SIZE
) {
462 error
= KERN_INVALID_ARGUMENT
;
466 bytes_traced
= tracebuf_size
;
467 error
= telemetry_gather(tracebuf
, &bytes_traced
,
468 (flags
& STACKSHOT_SET_MICROSTACKSHOT_MARK
) ? TRUE
: FALSE
);
469 *retval
= (int)bytes_traced
;
473 if (flags
& STACKSHOT_GET_WINDOWED_MICROSTACKSHOTS
) {
475 if (tracebuf_size
> SANE_TRACEBUF_SIZE
) {
476 error
= KERN_INVALID_ARGUMENT
;
480 bytes_traced
= tracebuf_size
;
481 error
= telemetry_gather_windowed(tracebuf
, &bytes_traced
);
482 *retval
= (int)bytes_traced
;
486 if (flags
& STACKSHOT_GET_BOOT_PROFILE
) {
488 if (tracebuf_size
> SANE_BOOTPROFILE_TRACEBUF_SIZE
) {
489 error
= KERN_INVALID_ARGUMENT
;
493 bytes_traced
= tracebuf_size
;
494 error
= bootprofile_gather(tracebuf
, &bytes_traced
);
495 *retval
= (int)bytes_traced
;
499 STACKSHOT_SUBSYS_UNLOCK();
503 #endif /* CONFIG_TELEMETRY */
506 * Return the estimated size of a stackshot based on the
507 * number of currently running threads and tasks.
510 get_stackshot_estsize(uint32_t prev_size_hint
)
512 vm_size_t thread_total
;
513 vm_size_t task_total
;
514 uint32_t estimated_size
;
516 thread_total
= (threads_count
* sizeof(struct thread_snapshot
));
517 task_total
= (tasks_count
* (sizeof(struct task_snapshot
) + TASK_UUID_AVG_SIZE
));
519 estimated_size
= (uint32_t) VM_MAP_ROUND_PAGE((thread_total
+ task_total
+ STACKSHOT_SUPP_SIZE
), PAGE_MASK
);
520 if (estimated_size
< prev_size_hint
) {
521 estimated_size
= (uint32_t) VM_MAP_ROUND_PAGE(prev_size_hint
, PAGE_MASK
);
524 return estimated_size
;
528 * stackshot_remap_buffer: Utility function to remap bytes_traced bytes starting at stackshotbuf
529 * into the current task's user space and subsequently copy out the address
530 * at which the buffer has been mapped in user space to out_buffer_addr.
532 * Inputs: stackshotbuf - pointer to the original buffer in the kernel's address space
533 * bytes_traced - length of the buffer to remap starting from stackshotbuf
534 * out_buffer_addr - pointer to placeholder where newly mapped buffer will be mapped.
535 * out_size_addr - pointer to be filled in with the size of the buffer
537 * Outputs: ENOSPC if there is not enough free space in the task's address space to remap the buffer
538 * EINVAL for all other errors returned by task_remap_buffer/mach_vm_remap
539 * an error from copyout
542 stackshot_remap_buffer(void *stackshotbuf
, uint32_t bytes_traced
, uint64_t out_buffer_addr
, uint64_t out_size_addr
)
545 mach_vm_offset_t stackshotbuf_user_addr
= (mach_vm_offset_t
)NULL
;
546 vm_prot_t cur_prot
, max_prot
;
548 error
= mach_vm_remap(get_task_map(current_task()), &stackshotbuf_user_addr
, bytes_traced
, 0,
549 VM_FLAGS_ANYWHERE
, kernel_map
, (mach_vm_offset_t
)stackshotbuf
, FALSE
, &cur_prot
, &max_prot
, VM_INHERIT_DEFAULT
);
551 * If the call to mach_vm_remap fails, we return the appropriate converted error
553 if (error
== KERN_SUCCESS
) {
555 * If we fail to copy out the address or size of the new buffer, we remove the buffer mapping that
556 * we just made in the task's user space.
558 error
= copyout(CAST_DOWN(void *, &stackshotbuf_user_addr
), (user_addr_t
)out_buffer_addr
, sizeof(stackshotbuf_user_addr
));
559 if (error
!= KERN_SUCCESS
) {
560 mach_vm_deallocate(get_task_map(current_task()), stackshotbuf_user_addr
, (mach_vm_size_t
)bytes_traced
);
563 error
= copyout(&bytes_traced
, (user_addr_t
)out_size_addr
, sizeof(bytes_traced
));
564 if (error
!= KERN_SUCCESS
) {
565 mach_vm_deallocate(get_task_map(current_task()), stackshotbuf_user_addr
, (mach_vm_size_t
)bytes_traced
);
573 kern_stack_snapshot_internal(int stackshot_config_version
, void *stackshot_config
, size_t stackshot_config_size
, boolean_t stackshot_from_user
)
576 boolean_t prev_interrupt_state
;
577 uint32_t bytes_traced
= 0;
578 uint32_t stackshotbuf_size
= 0;
579 void * stackshotbuf
= NULL
;
580 kcdata_descriptor_t kcdata_p
= NULL
;
582 void * buf_to_free
= NULL
;
583 int size_to_free
= 0;
585 /* Parsed arguments */
586 uint64_t out_buffer_addr
;
587 uint64_t out_size_addr
;
590 uint64_t since_timestamp
;
591 boolean_t enable_faulting
= FALSE
;
592 uint32_t size_hint
= 0;
594 if(stackshot_config
== NULL
) {
595 return KERN_INVALID_ARGUMENT
;
598 switch (stackshot_config_version
) {
599 case STACKSHOT_CONFIG_TYPE
:
600 if (stackshot_config_size
!= sizeof(stackshot_config_t
)) {
601 return KERN_INVALID_ARGUMENT
;
603 stackshot_config_t
*config
= (stackshot_config_t
*) stackshot_config
;
604 out_buffer_addr
= config
->sc_out_buffer_addr
;
605 out_size_addr
= config
->sc_out_size_addr
;
606 pid
= config
->sc_pid
;
607 flags
= config
->sc_flags
;
608 since_timestamp
= config
->sc_since_timestamp
;
609 if (config
->sc_size
<= SANE_TRACEBUF_SIZE
) {
610 size_hint
= config
->sc_size
;
614 return KERN_NOT_SUPPORTED
;
618 * Currently saving a kernel buffer is only supported from the internal/KEXT API.
620 if (stackshot_from_user
) {
621 if (flags
& STACKSHOT_SAVE_IN_KERNEL_BUFFER
) {
622 return KERN_NO_ACCESS
;
625 if (!(flags
& STACKSHOT_SAVE_IN_KERNEL_BUFFER
)) {
626 return KERN_NOT_SUPPORTED
;
630 if (flags
& STACKSHOT_ENABLE_FAULTING
) {
631 return KERN_NOT_SUPPORTED
;
635 * If we're not saving the buffer in the kernel pointer, we need places to copy into.
637 if ((!out_buffer_addr
|| !out_size_addr
) && !(flags
& STACKSHOT_SAVE_IN_KERNEL_BUFFER
)) {
638 return KERN_INVALID_ARGUMENT
;
641 if (since_timestamp
!= 0) {
642 return KERN_NOT_SUPPORTED
;
645 STACKSHOT_SUBSYS_LOCK();
647 if (flags
& STACKSHOT_SAVE_IN_KERNEL_BUFFER
) {
649 * Don't overwrite an existing stackshot
651 if (kernel_stackshot_buf
!= NULL
) {
652 error
= KERN_MEMORY_PRESENT
;
655 } else if (flags
& STACKSHOT_RETRIEVE_EXISTING_BUFFER
) {
656 if ((kernel_stackshot_buf
== NULL
) || (kernel_stackshot_buf_size
<= 0)) {
657 error
= KERN_NOT_IN_SET
;
660 error
= stackshot_remap_buffer(kernel_stackshot_buf
, kernel_stackshot_buf_size
,
661 out_buffer_addr
, out_size_addr
);
663 * If we successfully remapped the buffer into the user's address space, we
664 * set buf_to_free and size_to_free so the prior kernel mapping will be removed
665 * and then clear the kernel stackshot pointer and associated size.
667 if (error
== KERN_SUCCESS
) {
668 buf_to_free
= kernel_stackshot_buf
;
669 size_to_free
= (int) VM_MAP_ROUND_PAGE(kernel_stackshot_buf_size
, PAGE_MASK
);
670 kernel_stackshot_buf
= NULL
;
671 kernel_stackshot_buf_size
= 0;
677 stackshotbuf_size
= get_stackshot_estsize(size_hint
);
679 for (; stackshotbuf_size
<= SANE_TRACEBUF_SIZE
; stackshotbuf_size
<<= 1) {
680 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&stackshotbuf
, stackshotbuf_size
, VM_KERN_MEMORY_DIAG
) != KERN_SUCCESS
) {
681 error
= KERN_RESOURCE_SHORTAGE
;
686 * If someone has panicked, don't try and enter the debugger
688 if (panic_active()) {
689 error
= KERN_RESOURCE_SHORTAGE
;
693 if (flags
& STACKSHOT_KCDATA_FORMAT
) {
694 kcdata_p
= kcdata_memory_alloc_init((mach_vm_address_t
)stackshotbuf
, KCDATA_BUFFER_BEGIN_STACKSHOT
, stackshotbuf_size
, KCFLAG_USE_MEMCOPY
);
699 * Disable interrupts and save the current interrupt state.
701 prev_interrupt_state
= ml_set_interrupts_enabled(FALSE
);
704 * Load stackshot parameters.
706 kdp_snapshot_preflight(pid
, stackshotbuf
, stackshotbuf_size
, flags
, kcdata_p
, enable_faulting
);
709 * Trap to the debugger to obtain a stackshot (this will populate the buffer).
713 ml_set_interrupts_enabled(prev_interrupt_state
);
716 * If we didn't allocate a big enough buffer, deallocate and try again.
718 error
= kdp_stack_snapshot_geterror();
720 if (kcdata_p
!= NULL
) {
721 kcdata_memory_destroy(kcdata_p
);
723 stackshot_kcdata_p
= NULL
;
725 kmem_free(kernel_map
, (vm_offset_t
)stackshotbuf
, stackshotbuf_size
);
730 bytes_traced
= kdp_stack_snapshot_bytes_traced();
732 if (bytes_traced
<= 0) {
733 error
= KERN_NOT_IN_SET
;
737 assert(bytes_traced
<= stackshotbuf_size
);
738 if (!(flags
& STACKSHOT_SAVE_IN_KERNEL_BUFFER
)) {
739 error
= stackshot_remap_buffer(stackshotbuf
, bytes_traced
, out_buffer_addr
, out_size_addr
);
744 * Save the stackshot in the kernel buffer.
746 kernel_stackshot_buf
= stackshotbuf
;
747 kernel_stackshot_buf_size
= bytes_traced
;
749 * Figure out if we didn't use all the pages in the buffer. If so, we set buf_to_free to the beginning of
750 * the next page after the end of the stackshot in the buffer so that the kmem_free clips the buffer and
751 * update size_to_free for kmem_free accordingly.
753 size_to_free
= stackshotbuf_size
- (int) VM_MAP_ROUND_PAGE(bytes_traced
, PAGE_MASK
);
755 assert(size_to_free
>= 0);
757 if (size_to_free
!= 0) {
758 buf_to_free
= (void *)((uint64_t)stackshotbuf
+ stackshotbuf_size
- size_to_free
);
762 stackshotbuf_size
= 0;
766 if (stackshotbuf_size
> SANE_TRACEBUF_SIZE
) {
767 error
= KERN_RESOURCE_SHORTAGE
;
771 if (kcdata_p
!= NULL
) {
772 kcdata_memory_destroy(kcdata_p
);
774 stackshot_kcdata_p
= NULL
;
777 if (stackshotbuf
!= NULL
) {
778 kmem_free(kernel_map
, (vm_offset_t
)stackshotbuf
, stackshotbuf_size
);
780 if (buf_to_free
!= NULL
) {
781 kmem_free(kernel_map
, (vm_offset_t
)buf_to_free
, size_to_free
);
783 STACKSHOT_SUBSYS_UNLOCK();
787 /* Cache stack snapshot parameters in preparation for a trace */
789 kdp_snapshot_preflight(int pid
, void * tracebuf
, uint32_t tracebuf_size
, uint32_t flags
,
790 kcdata_descriptor_t data_p
, boolean_t enable_faulting
)
792 stack_snapshot_pid
= pid
;
793 stack_snapshot_buf
= tracebuf
;
794 stack_snapshot_bufsize
= tracebuf_size
;
795 stack_snapshot_flags
= flags
;
796 stack_enable_faulting
= enable_faulting
;
797 if (data_p
!= NULL
) {
798 stackshot_kcdata_p
= data_p
;
801 /* Mark this debugger as active, since the polled mode driver that
802 * ordinarily does this may not be enabled (yet), or since KDB may be
803 * the primary debugger.
805 old_debugger
= current_debugger
;
806 if (old_debugger
!= KDP_CUR_DB
) {
807 current_debugger
= KDP_CUR_DB
;
812 kdp_snapshot_postflight(void)
815 #if CONFIG_KDP_INTERACTIVE_DEBUGGING
817 (kdp_en_send_pkt
== NULL
) || (old_debugger
== KDB_CUR_DB
))
818 current_debugger
= old_debugger
;
820 current_debugger
= old_debugger
;
825 kdp_stack_snapshot_geterror(void)
827 return stack_snapshot_ret
;
831 kdp_stack_snapshot_bytes_traced(void)
833 return stack_snapshot_bytes_traced
;
836 static boolean_t
memory_iszero(void *addr
, size_t size
)
838 char *data
= (char *)addr
;
839 for (size_t i
= 0; i
< size
; i
++){
847 kdp_stackshot_kcdata_format(int pid
, uint32_t trace_flags
, uint32_t *pBytesTraced
)
849 /* convenience macros specific only for this function */
850 #define kcd_end_address(kcd) ((void *)((uint64_t)((kcd)->kcd_addr_begin) + kcdata_memory_get_used_bytes((kcd))))
851 #define kcd_max_address(kcd) ((void *)((kcd)->kcd_addr_begin + (kcd)->kcd_length))
852 #define kcd_exit_on_error(action) \
854 if (KERN_SUCCESS != (error = (action))) { \
855 if (error == KERN_RESOURCE_SHORTAGE) { \
860 } while (0); /* end kcd_exit_on_error */
863 mach_vm_address_t out_addr
= 0;
865 struct task_snapshot_v2
*cur_tsnap
;
866 uint64_t system_state_flags
= 0;
868 task_t task
= TASK_NULL
;
869 thread_t thread
= THREAD_NULL
;
870 mach_timebase_info_data_t timebase
= {0, 0};
871 uint64_t microsecs
= 0, secs
= 0;
872 uint32_t length_to_copy
, tmp32
;
874 abs_time
= mach_absolute_time();
875 clock_get_calendar_microtime((clock_sec_t
*)&secs
, (clock_usec_t
*)µsecs
);
877 /* process the flags */
878 boolean_t dispatch_p
= ((trace_flags
& STACKSHOT_GET_DQ
) != 0);
879 boolean_t save_loadinfo_p
= ((trace_flags
& STACKSHOT_SAVE_LOADINFO
) != 0);
880 boolean_t save_kextloadinfo_p
= ((trace_flags
& STACKSHOT_SAVE_KEXT_LOADINFO
) != 0);
881 boolean_t save_userframes_p
= ((trace_flags
& STACKSHOT_SAVE_KERNEL_FRAMES_ONLY
) == 0);
882 boolean_t save_donating_pids_p
= ((trace_flags
& STACKSHOT_SAVE_IMP_DONATION_PIDS
) != 0);
884 if (sizeof(void *) == 8)
885 system_state_flags
|= kKernel64_p
;
887 if (stackshot_kcdata_p
== NULL
|| pBytesTraced
== NULL
) {
892 /* begin saving data into the buffer */
894 kcd_exit_on_error(kcdata_add_uint32_with_description(stackshot_kcdata_p
, trace_flags
, "stackshot_in_flags"));
895 kcd_exit_on_error(kcdata_add_uint32_with_description(stackshot_kcdata_p
, (uint32_t)pid
, "stackshot_in_pid"));
896 kcd_exit_on_error(kcdata_add_uint64_with_description(stackshot_kcdata_p
, system_state_flags
, "system_state_flags"));
898 kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p
, STACKSHOT_KCTYPE_KERN_PAGE_SIZE
, sizeof(uint32_t), &out_addr
));
899 memcpy((void *)out_addr
, &tmp32
, sizeof(tmp32
));
902 tmp32
= memorystatus_get_pressure_status_kdp();
903 kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p
, STACKSHOT_KCTYPE_JETSAM_LEVEL
, sizeof(uint32_t), &out_addr
));
904 memcpy((void *)out_addr
, &tmp32
, sizeof(tmp32
));
907 /* save boot-args and osversion string */
908 length_to_copy
= MIN((uint32_t)(strlen(version
) + 1), OSVERSIZE
);
909 kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p
, STACKSHOT_KCTYPE_OSVERSION
, length_to_copy
, &out_addr
));
910 strlcpy((char*)out_addr
, &version
[0], length_to_copy
);
912 length_to_copy
= MIN((uint32_t)(strlen(PE_boot_args()) + 1), OSVERSIZE
);
913 kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p
, STACKSHOT_KCTYPE_BOOTARGS
, length_to_copy
, &out_addr
));
914 strlcpy((char*)out_addr
, PE_boot_args(), length_to_copy
);
916 /* setup mach_absolute_time and timebase info */
917 clock_timebase_info(&timebase
);
918 kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p
, KCDATA_TYPE_TIMEBASE
, sizeof(timebase
), &out_addr
));
919 memcpy((void *)out_addr
, &timebase
, sizeof(timebase
));
921 kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p
, KCDATA_TYPE_MACH_ABSOLUTE_TIME
, sizeof(uint64_t), &out_addr
));
922 memcpy((void *)out_addr
, &abs_time
, sizeof(uint64_t));
924 microsecs
= microsecs
+ (secs
* USECSPERSEC
);
925 kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p
, KCDATA_TYPE_USECS_SINCE_EPOCH
, sizeof(uint64_t), &out_addr
));
926 memcpy((void *)out_addr
, µsecs
, sizeof(uint64_t));
928 /* reserve space of system level shared cache load info */
929 struct dyld_uuid_info_64
*sys_shared_cache_loadinfo
;
930 kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p
, STACKSHOT_KCTYPE_SHAREDCACHE_LOADINFO
, sizeof(kernel_uuid_info
), &out_addr
));
931 sys_shared_cache_loadinfo
= (struct dyld_uuid_info_64
*)out_addr
;
932 bzero((void *)sys_shared_cache_loadinfo
, sizeof(struct dyld_uuid_info_64
));
934 /* Add requested information first */
935 if (trace_flags
& STACKSHOT_GET_GLOBAL_MEM_STATS
) {
936 kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p
, STACKSHOT_KCTYPE_GLOBAL_MEM_STATS
, sizeof(struct mem_and_io_snapshot
), &out_addr
));
937 kdp_mem_and_io_snapshot((struct mem_and_io_snapshot
*)out_addr
);
940 /* Iterate over tasks */
941 queue_head_t
*task_list
= &tasks
;
942 queue_iterate(task_list
, task
, task_t
, tasks
) {
944 if ((task
== NULL
) || !ml_validate_nofault((vm_offset_t
) task
, sizeof(struct task
)))
947 task_pid
= pid_from_task(task
);
950 * Not interested in terminated tasks without threads, and
951 * at the moment, stackshot can't handle a task without a name.
953 if (queue_empty(&task
->threads
) || task_pid
== -1) {
958 /* Trace everything, unless a process was specified */
959 if ((pid
== -1) || (pid
== task_pid
)) {
961 uint64_t task_uniqueid
= proc_uniqueid_from_task(task
);
962 boolean_t task64
= task_has_64BitAddr(task
);
963 boolean_t have_map
= (task
->map
!= NULL
) && (ml_validate_nofault((vm_offset_t
)(task
->map
), sizeof(struct _vm_map
)));
964 boolean_t have_pmap
= have_map
&& (task
->map
->pmap
!= NULL
) && (ml_validate_nofault((vm_offset_t
)(task
->map
->pmap
), sizeof(struct pmap
)));
966 /* add task snapshot marker */
967 kcd_exit_on_error(kcdata_add_container_marker(stackshot_kcdata_p
, KCDATA_TYPE_CONTAINER_BEGIN
, STACKSHOT_KCCONTAINER_TASK
, task_uniqueid
));
969 /* add task_snapshot_v2 struct data */
970 kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p
, STACKSHOT_KCTYPE_TASK_SNAPSHOT
, sizeof(struct task_snapshot_v2
), &out_addr
));
971 cur_tsnap
= (struct task_snapshot_v2
*)out_addr
;
972 bzero(cur_tsnap
, sizeof(struct task_snapshot_v2
));
974 cur_tsnap
->ts_pid
= task_pid
;
975 cur_tsnap
->ts_unique_pid
= task_uniqueid
;
977 /* Add the BSD process identifiers */
978 if (task_pid
!= -1 && task
->bsd_info
!= NULL
)
979 proc_name_kdp(task
, cur_tsnap
->ts_p_comm
, sizeof(cur_tsnap
->ts_p_comm
));
981 cur_tsnap
->ts_p_comm
[0] = '\0';
982 #if IMPORTANCE_INHERITANCE && (DEVELOPMENT || DEBUG)
983 if (task
->task_imp_base
!= NULL
) {
984 strlcpy(cur_tsnap
->ts_p_comm
, &task
->task_imp_base
->iit_procname
[0],
985 MIN((int)sizeof(task
->task_imp_base
->iit_procname
), (int)sizeof(cur_tsnap
->ts_p_comm
)));
991 cur_tsnap
->ts_ss_flags
|= kUser64_p
;
992 if (!task
->active
|| task_is_a_corpse(task
))
993 cur_tsnap
->ts_ss_flags
|= kTerminatedSnapshot
;
994 if (task
->pidsuspended
)
995 cur_tsnap
->ts_ss_flags
|= kPidSuspended
;
997 cur_tsnap
->ts_ss_flags
|= kFrozen
;
998 if (task
->effective_policy
.darwinbg
== 1)
999 cur_tsnap
->ts_ss_flags
|= kTaskDarwinBG
;
1000 if (task
->requested_policy
.t_role
== TASK_FOREGROUND_APPLICATION
)
1001 cur_tsnap
->ts_ss_flags
|= kTaskIsForeground
;
1002 if (task
->requested_policy
.t_boosted
== 1)
1003 cur_tsnap
->ts_ss_flags
|= kTaskIsBoosted
;
1004 if (task
->effective_policy
.t_sup_active
== 1)
1005 cur_tsnap
->ts_ss_flags
|= kTaskIsSuppressed
;
1007 #if IMPORTANCE_INHERITANCE
1008 if (task
->task_imp_base
) {
1009 if (task
->task_imp_base
->iit_donor
)
1010 cur_tsnap
->ts_ss_flags
|= kTaskIsImpDonor
;
1011 if (task
->task_imp_base
->iit_live_donor
)
1012 cur_tsnap
->ts_ss_flags
|= kTaskIsLiveImpDonor
;
1016 cur_tsnap
->ts_latency_qos
= (task
->effective_policy
.t_latency_qos
== LATENCY_QOS_TIER_UNSPECIFIED
) ?
1017 LATENCY_QOS_TIER_UNSPECIFIED
: ((0xFF << 16) | task
->effective_policy
.t_latency_qos
);
1018 cur_tsnap
->ts_suspend_count
= task
->suspend_count
;
1019 cur_tsnap
->ts_p_start_sec
= 0;
1020 proc_starttime_kdp(task
->bsd_info
, &cur_tsnap
->ts_p_start_sec
, NULL
);
1022 cur_tsnap
->ts_task_size
= have_pmap
? (pmap_resident_count(task
->map
->pmap
) * PAGE_SIZE
) : 0;
1023 cur_tsnap
->ts_max_resident_size
= get_task_resident_max(task
);
1024 cur_tsnap
->ts_faults
= task
->faults
;
1025 cur_tsnap
->ts_pageins
= task
->pageins
;
1026 cur_tsnap
->ts_cow_faults
= task
->cow_faults
;
1027 cur_tsnap
->ts_user_time_in_terminated_threads
= task
->total_user_time
;
1028 cur_tsnap
->ts_system_time_in_terminated_threads
= task
->total_system_time
;
1029 cur_tsnap
->ts_was_throttled
= (uint32_t) proc_was_throttled_from_task(task
);
1030 cur_tsnap
->ts_did_throttle
= (uint32_t) proc_did_throttle_from_task(task
);
1032 /* Check for shared cache information */
1034 uint8_t shared_cache_identifier
[16];
1035 uint64_t shared_cache_slide
;
1036 uint64_t shared_cache_base_address
= 0;
1037 boolean_t found_shared_cache_info
= TRUE
;
1039 if (task
->shared_region
&& ml_validate_nofault((vm_offset_t
)task
->shared_region
, sizeof(struct vm_shared_region
))) {
1040 struct vm_shared_region
*sr
= task
->shared_region
;
1041 shared_cache_base_address
= sr
->sr_base_address
+ sr
->sr_first_mapping
;
1044 if (!shared_cache_base_address
||
1045 !kdp_copyin(task
->map
->pmap
, shared_cache_base_address
+ offsetof(struct _dyld_cache_header
, uuid
), shared_cache_identifier
, sizeof(shared_cache_identifier
))
1047 found_shared_cache_info
= FALSE
;
1050 if (task
->shared_region
) {
1052 * No refcounting here, but we are in debugger
1053 * context, so that should be safe.
1055 shared_cache_slide
= task
->shared_region
->sr_slide_info
.slide
;
1057 shared_cache_slide
= 0;
1060 if (found_shared_cache_info
== FALSE
)
1063 if (task_pid
== 1) {
1064 /* save launchd's shared cache info as system level */
1065 bcopy(shared_cache_identifier
, sys_shared_cache_loadinfo
->imageUUID
, sizeof(sys_shared_cache_loadinfo
->imageUUID
));
1066 sys_shared_cache_loadinfo
->imageLoadAddress
= shared_cache_slide
;
1069 if (shared_cache_slide
== sys_shared_cache_loadinfo
->imageLoadAddress
&&
1070 0 == memcmp(shared_cache_identifier
, sys_shared_cache_loadinfo
->imageUUID
, sizeof(sys_shared_cache_loadinfo
->imageUUID
))) {
1071 /* skip adding shared cache info. its same as system level one */
1076 kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p
, STACKSHOT_KCTYPE_SHAREDCACHE_LOADINFO
, sizeof(struct dyld_uuid_info_64
), &out_addr
));
1077 struct dyld_uuid_info_64
*shared_cache_data
= (struct dyld_uuid_info_64
*)out_addr
;
1078 shared_cache_data
->imageLoadAddress
= shared_cache_slide
;
1079 bcopy(shared_cache_identifier
, shared_cache_data
->imageUUID
, sizeof(shared_cache_data
->imageUUID
));
1083 /* I/O Statistics if any counters are non zero */
1084 assert(IO_NUM_PRIORITIES
== STACKSHOT_IO_NUM_PRIORITIES
);
1085 if (task
->task_io_stats
&& !memory_iszero(task
->task_io_stats
, sizeof(struct io_stat_info
))) {
1086 kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p
, STACKSHOT_KCTYPE_IOSTATS
, sizeof(struct io_stats_snapshot
), &out_addr
));
1087 struct io_stats_snapshot
*_iostat
= (struct io_stats_snapshot
*)out_addr
;
1088 _iostat
->ss_disk_reads_count
= task
->task_io_stats
->disk_reads
.count
;
1089 _iostat
->ss_disk_reads_size
= task
->task_io_stats
->disk_reads
.size
;
1090 _iostat
->ss_disk_writes_count
= (task
->task_io_stats
->total_io
.count
- task
->task_io_stats
->disk_reads
.count
);
1091 _iostat
->ss_disk_writes_size
= (task
->task_io_stats
->total_io
.size
- task
->task_io_stats
->disk_reads
.size
);
1092 _iostat
->ss_paging_count
= task
->task_io_stats
->paging
.count
;
1093 _iostat
->ss_paging_size
= task
->task_io_stats
->paging
.size
;
1094 _iostat
->ss_non_paging_count
= (task
->task_io_stats
->total_io
.count
- task
->task_io_stats
->paging
.count
);
1095 _iostat
->ss_non_paging_size
= (task
->task_io_stats
->total_io
.size
- task
->task_io_stats
->paging
.size
);
1096 _iostat
->ss_metadata_count
= task
->task_io_stats
->metadata
.count
;
1097 _iostat
->ss_metadata_size
= task
->task_io_stats
->metadata
.size
;
1098 _iostat
->ss_data_count
= (task
->task_io_stats
->total_io
.count
- task
->task_io_stats
->metadata
.count
);
1099 _iostat
->ss_data_size
= (task
->task_io_stats
->total_io
.size
- task
->task_io_stats
->metadata
.size
);
1100 for(int i
= 0; i
< IO_NUM_PRIORITIES
; i
++) {
1101 _iostat
->ss_io_priority_count
[i
] = task
->task_io_stats
->io_priority
[i
].count
;
1102 _iostat
->ss_io_priority_size
[i
] = task
->task_io_stats
->io_priority
[i
].size
;
1106 #if IMPORTANCE_INHERITANCE
1107 if (save_donating_pids_p
) {
1108 kcd_exit_on_error(((((mach_vm_address_t
) kcd_end_address(stackshot_kcdata_p
) + (TASK_IMP_WALK_LIMIT
* sizeof(int32_t)))
1109 < (mach_vm_address_t
) kcd_max_address(stackshot_kcdata_p
)) ? KERN_SUCCESS
: KERN_RESOURCE_SHORTAGE
));
1110 saved_count
= task_importance_list_pids(task
, TASK_IMP_LIST_DONATING_PIDS
, (void *)kcd_end_address(stackshot_kcdata_p
), TASK_IMP_WALK_LIMIT
);
1111 if (saved_count
> 0)
1112 kcd_exit_on_error(kcdata_get_memory_addr_for_array(stackshot_kcdata_p
, STASKSHOT_KCTYPE_DONATING_PIDS
, sizeof(int32_t), saved_count
, &out_addr
));
1116 /* place load info and libraries now */
1117 uint32_t uuid_info_count
= 0;
1118 mach_vm_address_t uuid_info_addr
= 0;
1119 if (save_loadinfo_p
&& have_pmap
&& task
->active
&& task_pid
> 0) {
1120 /* Read the dyld_all_image_infos struct from the task memory to get UUID array count and location */
1122 struct user64_dyld_all_image_infos task_image_infos
;
1123 if (kdp_copyin(task
->map
->pmap
, task
->all_image_info_addr
, &task_image_infos
, sizeof(struct user64_dyld_all_image_infos
))) {
1124 uuid_info_count
= (uint32_t)task_image_infos
.uuidArrayCount
;
1125 uuid_info_addr
= task_image_infos
.uuidArray
;
1128 struct user32_dyld_all_image_infos task_image_infos
;
1129 if (kdp_copyin(task
->map
->pmap
, task
->all_image_info_addr
, &task_image_infos
, sizeof(struct user32_dyld_all_image_infos
))) {
1130 uuid_info_count
= task_image_infos
.uuidArrayCount
;
1131 uuid_info_addr
= task_image_infos
.uuidArray
;
1136 * If we get a NULL uuid_info_addr (which can happen when we catch dyld in the middle of updating
1137 * this data structure), we zero the uuid_info_count so that we won't even try to save load info
1140 if (!uuid_info_addr
) {
1141 uuid_info_count
= 0;
1145 if (have_pmap
&& task_pid
== 0) {
1146 if (save_kextloadinfo_p
&& ml_validate_nofault((vm_offset_t
)(gLoadedKextSummaries
), sizeof(OSKextLoadedKextSummaryHeader
))) {
1147 uuid_info_count
= gLoadedKextSummaries
->numSummaries
+ 1; /* include main kernel UUID */
1149 uuid_info_count
= 1; /* atleast include kernel uuid */
1153 if (task_pid
> 0 && uuid_info_count
> 0 && uuid_info_count
< MAX_LOADINFOS
) {
1154 uint32_t uuid_info_size
= (uint32_t)(task64
? sizeof(struct user64_dyld_uuid_info
) : sizeof(struct user32_dyld_uuid_info
));
1155 uint32_t uuid_info_array_size
= uuid_info_count
* uuid_info_size
;
1157 kcd_exit_on_error(kcdata_get_memory_addr_for_array(stackshot_kcdata_p
,
1158 (task64
? KCDATA_TYPE_LIBRARY_LOADINFO64
: KCDATA_TYPE_LIBRARY_LOADINFO
),
1164 /* Copy in the UUID info array
1165 * It may be nonresident, in which case just fix up nloadinfos to 0 in the task_snap
1167 if (have_pmap
&& !kdp_copyin(task
->map
->pmap
, uuid_info_addr
, (void *)out_addr
, uuid_info_array_size
)) {
1168 bzero((void *)out_addr
, uuid_info_array_size
);
1171 } else if (task_pid
== 0 && uuid_info_count
> 0 && uuid_info_count
< MAX_LOADINFOS
) {
1172 uintptr_t image_load_address
;
1175 if (!kernel_uuid
|| !ml_validate_nofault((vm_offset_t
)kernel_uuid
, sizeof(uuid_t
))) {
1176 /* Kernel UUID not found or inaccessible */
1179 kcd_exit_on_error(kcdata_get_memory_addr_for_array(stackshot_kcdata_p
,
1180 (sizeof(kernel_uuid_info
) == sizeof(struct user64_dyld_uuid_info
))? KCDATA_TYPE_LIBRARY_LOADINFO64
: KCDATA_TYPE_LIBRARY_LOADINFO
,
1181 sizeof(kernel_uuid_info
), uuid_info_count
, &out_addr
)
1183 kernel_uuid_info
*uuid_info_array
= (kernel_uuid_info
*)out_addr
;
1184 image_load_address
= (uintptr_t)VM_KERNEL_UNSLIDE(vm_kernel_stext
);
1185 uuid_info_array
[0].imageLoadAddress
= image_load_address
;
1186 memcpy(&uuid_info_array
[0].imageUUID
, kernel_uuid
, sizeof(uuid_t
));
1188 if (save_kextloadinfo_p
&& ml_validate_nofault((vm_offset_t
)(&gLoadedKextSummaries
->summaries
[0]),
1189 gLoadedKextSummaries
->entry_size
* gLoadedKextSummaries
->numSummaries
)) {
1191 for (kexti
=0 ; kexti
< gLoadedKextSummaries
->numSummaries
; kexti
++) {
1192 image_load_address
= (uintptr_t)VM_KERNEL_UNSLIDE(gLoadedKextSummaries
->summaries
[kexti
].address
);
1193 uuid_info_array
[kexti
+ 1].imageLoadAddress
= image_load_address
;
1194 memcpy(&uuid_info_array
[kexti
+ 1].imageUUID
, &gLoadedKextSummaries
->summaries
[kexti
].uuid
, sizeof(uuid_t
));
1200 /* Iterate over task threads */
1201 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
){
1203 uint64_t thread_uniqueid
= 0;
1204 char cur_thread_name
[STACKSHOT_MAX_THREAD_NAME_SIZE
];
1206 if ((thread
== NULL
) || !ml_validate_nofault((vm_offset_t
) thread
, sizeof(struct thread
)))
1209 if (!save_userframes_p
&& thread
->kernel_stack
== 0)
1212 thread_uniqueid
= thread_tid(thread
);
1214 /* add thread marker */
1215 kcd_exit_on_error(kcdata_add_container_marker(stackshot_kcdata_p
, KCDATA_TYPE_CONTAINER_BEGIN
, STACKSHOT_KCCONTAINER_THREAD
, thread_uniqueid
));
1216 kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p
, STACKSHOT_KCTYPE_THREAD_SNAPSHOT
, sizeof(struct thread_snapshot_v2
), &out_addr
));
1217 struct thread_snapshot_v2
* cur_thread_snap
= (struct thread_snapshot_v2
*)out_addr
;
1219 /* Populate the thread snapshot header */
1220 cur_thread_snap
->ths_thread_id
= thread_uniqueid
;
1221 cur_thread_snap
->ths_state
= thread
->state
;
1222 cur_thread_snap
->ths_ss_flags
= 0;
1223 cur_thread_snap
->ths_base_priority
= thread
->base_pri
;
1224 cur_thread_snap
->ths_sched_priority
= thread
->sched_pri
;
1225 cur_thread_snap
->ths_sched_flags
= thread
->sched_flags
;
1226 cur_thread_snap
->ths_wait_event
= VM_KERNEL_UNSLIDE_OR_PERM(thread
->wait_event
);
1227 cur_thread_snap
->ths_continuation
= VM_KERNEL_UNSLIDE(thread
->continuation
);
1228 cur_thread_snap
->ths_last_run_time
= thread
->last_run_time
;
1229 cur_thread_snap
->ths_last_made_runnable_time
= thread
->last_made_runnable_time
;
1230 cur_thread_snap
->ths_io_tier
= proc_get_effective_thread_policy(thread
, TASK_POLICY_IO
);
1231 cur_thread_snap
->ths_eqos
= thread
->effective_policy
.thep_qos
;
1232 cur_thread_snap
->ths_rqos
= thread
->requested_policy
.thrp_qos
;
1233 cur_thread_snap
->ths_rqos_override
= thread
->requested_policy
.thrp_qos_override
;
1234 cur_thread_snap
->ths_total_syscalls
= thread
->syscalls_mach
+ thread
->syscalls_unix
;
1235 cur_thread_snap
->ths_dqserialnum
= 0;
1237 tval
= safe_grab_timer_value(&thread
->user_timer
);
1238 cur_thread_snap
->ths_user_time
= tval
;
1239 tval
= safe_grab_timer_value(&thread
->system_timer
);
1241 if (thread
->precise_user_kernel_time
) {
1242 cur_thread_snap
->ths_sys_time
= tval
;
1244 cur_thread_snap
->ths_user_time
+= tval
;
1245 cur_thread_snap
->ths_sys_time
= 0;
1248 if (thread
->effective_policy
.darwinbg
)
1249 cur_thread_snap
->ths_ss_flags
|= kThreadDarwinBG
;
1250 if (proc_get_effective_thread_policy(thread
, TASK_POLICY_PASSIVE_IO
))
1251 cur_thread_snap
->ths_ss_flags
|= kThreadIOPassive
;
1252 if (thread
->suspend_count
> 0)
1253 cur_thread_snap
->ths_ss_flags
|= kThreadSuspended
;
1255 if (thread
->options
& TH_OPT_GLOBAL_FORCED_IDLE
) {
1256 cur_thread_snap
->ths_ss_flags
|= kGlobalForcedIdle
;
1259 if (IPC_VOUCHER_NULL
!= thread
->ith_voucher
)
1260 cur_thread_snap
->ths_voucher_identifier
= VM_KERNEL_ADDRPERM(thread
->ith_voucher
);
1261 if (dispatch_p
&& (task
!= kernel_task
) && (task
->active
) && have_pmap
) {
1262 uint64_t dqkeyaddr
= thread_dispatchqaddr(thread
);
1263 if (dqkeyaddr
!= 0) {
1264 uint64_t dqaddr
= 0;
1265 if (kdp_copyin(task
->map
->pmap
, dqkeyaddr
, &dqaddr
, (task64
? 8 : 4)) && (dqaddr
!= 0)) {
1266 uint64_t dqserialnumaddr
= dqaddr
+ proc_dispatchqueue_serialno_offset_from_task(task
);
1267 uint64_t dqserialnum
= 0;
1268 if (kdp_copyin(task
->map
->pmap
, dqserialnumaddr
, &dqserialnum
, (task64
? 8 : 4))) {
1269 cur_thread_snap
->ths_ss_flags
|= kHasDispatchSerial
;
1270 cur_thread_snap
->ths_dqserialnum
= dqserialnum
;
1276 /* if there is thread name then add to buffer */
1277 cur_thread_name
[0] = '\0';
1278 proc_threadname_kdp(thread
->uthread
, cur_thread_name
, STACKSHOT_MAX_THREAD_NAME_SIZE
);
1279 if (strnlen(cur_thread_name
, STACKSHOT_MAX_THREAD_NAME_SIZE
) > 0) {
1280 kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p
, STACKSHOT_KCTYPE_THREAD_NAME
, sizeof(cur_thread_name
), &out_addr
));
1281 bcopy((void *)cur_thread_name
, (void *)out_addr
, sizeof(cur_thread_name
));
1284 /* I/O Statistics */
1285 assert(IO_NUM_PRIORITIES
== STACKSHOT_IO_NUM_PRIORITIES
);
1286 if (thread
->thread_io_stats
&& !memory_iszero(thread
->thread_io_stats
, sizeof(struct io_stat_info
))) {
1287 kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p
, STACKSHOT_KCTYPE_IOSTATS
, sizeof(struct io_stats_snapshot
), &out_addr
));
1288 struct io_stats_snapshot
*_iostat
= (struct io_stats_snapshot
*)out_addr
;
1289 _iostat
->ss_disk_reads_count
= thread
->thread_io_stats
->disk_reads
.count
;
1290 _iostat
->ss_disk_reads_size
= thread
->thread_io_stats
->disk_reads
.size
;
1291 _iostat
->ss_disk_writes_count
= (thread
->thread_io_stats
->total_io
.count
- thread
->thread_io_stats
->disk_reads
.count
);
1292 _iostat
->ss_disk_writes_size
= (thread
->thread_io_stats
->total_io
.size
- thread
->thread_io_stats
->disk_reads
.size
);
1293 _iostat
->ss_paging_count
= thread
->thread_io_stats
->paging
.count
;
1294 _iostat
->ss_paging_size
= thread
->thread_io_stats
->paging
.size
;
1295 _iostat
->ss_non_paging_count
= (thread
->thread_io_stats
->total_io
.count
- thread
->thread_io_stats
->paging
.count
);
1296 _iostat
->ss_non_paging_size
= (thread
->thread_io_stats
->total_io
.size
- thread
->thread_io_stats
->paging
.size
);
1297 _iostat
->ss_metadata_count
= thread
->thread_io_stats
->metadata
.count
;
1298 _iostat
->ss_metadata_size
= thread
->thread_io_stats
->metadata
.size
;
1299 _iostat
->ss_data_count
= (thread
->thread_io_stats
->total_io
.count
- thread
->thread_io_stats
->metadata
.count
);
1300 _iostat
->ss_data_size
= (thread
->thread_io_stats
->total_io
.size
- thread
->thread_io_stats
->metadata
.size
);
1301 for(int i
= 0; i
< IO_NUM_PRIORITIES
; i
++) {
1302 _iostat
->ss_io_priority_count
[i
] = thread
->thread_io_stats
->io_priority
[i
].count
;
1303 _iostat
->ss_io_priority_size
[i
] = thread
->thread_io_stats
->io_priority
[i
].size
;
1307 /* Trace user stack, if any */
1308 if (save_userframes_p
&& task
->active
&& thread
->task
->map
!= kernel_map
) {
1309 uint32_t thread_snapshot_flags
= 0;
1311 if (task_has_64BitAddr(thread
->task
)) {
1312 out_addr
= (mach_vm_address_t
)kcd_end_address(stackshot_kcdata_p
);
1313 saved_count
= machine_trace_thread64(thread
, (char *)out_addr
, (char *)kcd_max_address(stackshot_kcdata_p
), MAX_FRAMES
, TRUE
, &thread_snapshot_flags
);
1314 if (saved_count
> 0) {
1315 kcd_exit_on_error(kcdata_get_memory_addr_for_array(stackshot_kcdata_p
,
1316 STACKSHOT_KCTYPE_USER_STACKFRAME64
,
1317 sizeof(struct stack_snapshot_frame64
),
1318 saved_count
/sizeof(struct stack_snapshot_frame64
),
1320 cur_thread_snap
->ths_ss_flags
|= kUser64_p
;
1324 out_addr
= (mach_vm_address_t
)kcd_end_address(stackshot_kcdata_p
);
1325 saved_count
= machine_trace_thread(thread
, (char *)out_addr
, (char *)kcd_max_address(stackshot_kcdata_p
), MAX_FRAMES
, TRUE
, &thread_snapshot_flags
);
1326 if (saved_count
> 0) {
1327 kcd_exit_on_error(kcdata_get_memory_addr_for_array(stackshot_kcdata_p
,
1328 STACKSHOT_KCTYPE_USER_STACKFRAME
,
1329 sizeof(struct stack_snapshot_frame32
),
1330 saved_count
/sizeof(struct stack_snapshot_frame32
),
1335 if (thread_snapshot_flags
!= 0) {
1336 cur_thread_snap
->ths_ss_flags
|= thread_snapshot_flags
;
1340 /* Call through to the machine specific trace routines
1341 * Frames are added past the snapshot header.
1343 if (thread
->kernel_stack
!= 0) {
1344 uint32_t thread_snapshot_flags
= 0;
1345 #if defined(__LP64__)
1346 out_addr
= (mach_vm_address_t
)kcd_end_address(stackshot_kcdata_p
);
1347 saved_count
= machine_trace_thread64(thread
, (char *)out_addr
, (char *)kcd_max_address(stackshot_kcdata_p
), MAX_FRAMES
, FALSE
, &thread_snapshot_flags
);
1348 if (saved_count
> 0){
1349 cur_thread_snap
->ths_ss_flags
|= kKernel64_p
;
1350 kcd_exit_on_error(kcdata_get_memory_addr_for_array(stackshot_kcdata_p
,
1351 STACKSHOT_KCTYPE_KERN_STACKFRAME64
,
1352 sizeof(struct stack_snapshot_frame64
),
1353 saved_count
/sizeof(struct stack_snapshot_frame64
),
1357 out_addr
= (mach_vm_address_t
)kcd_end_address(stackshot_kcdata_p
);
1358 saved_count
= machine_trace_thread(thread
, (char *)out_addr
, (char *)kcd_max_address(stackshot_kcdata_p
), MAX_FRAMES
, FALSE
, &thread_snapshot_flags
);
1359 if (saved_count
> 0) {
1360 kcd_exit_on_error(kcdata_get_memory_addr_for_array(stackshot_kcdata_p
,
1361 STACKSHOT_KCTYPE_KERN_STACKFRAME
,
1362 sizeof(struct stack_snapshot_frame32
),
1363 saved_count
/sizeof(struct stack_snapshot_frame32
),
1367 if (thread_snapshot_flags
!= 0) {
1368 cur_thread_snap
->ths_ss_flags
|= thread_snapshot_flags
;
1371 /* mark end of thread snapshot data */
1372 kcd_exit_on_error(kcdata_add_container_marker(stackshot_kcdata_p
, KCDATA_TYPE_CONTAINER_END
, STACKSHOT_KCCONTAINER_THREAD
, thread_uniqueid
));
1374 /* mark end of task snapshot data */
1375 kcd_exit_on_error(kcdata_add_container_marker(stackshot_kcdata_p
, KCDATA_TYPE_CONTAINER_END
, STACKSHOT_KCCONTAINER_TASK
, task_uniqueid
));
1379 /* === END of populating stackshot data === */
1381 *pBytesTraced
= (uint32_t) kcdata_memory_get_used_bytes(stackshot_kcdata_p
);
1383 /* Release stack snapshot wait indicator */
1384 kdp_snapshot_postflight();
1390 kdp_stackshot(int pid
, void *tracebuf
, uint32_t tracebuf_size
, uint32_t trace_flags
, uint32_t *pbytesTraced
)
1392 char *tracepos
= (char *) tracebuf
;
1393 char *tracebound
= tracepos
+ tracebuf_size
;
1394 uint32_t tracebytes
= 0;
1397 task_t task
= TASK_NULL
;
1398 thread_t thread
= THREAD_NULL
;
1399 unsigned framesize
= 2 * sizeof(vm_offset_t
);
1401 queue_head_t
*task_list
= &tasks
;
1402 boolean_t is_active_list
= TRUE
;
1404 boolean_t dispatch_p
= ((trace_flags
& STACKSHOT_GET_DQ
) != 0);
1405 boolean_t save_loadinfo_p
= ((trace_flags
& STACKSHOT_SAVE_LOADINFO
) != 0);
1406 boolean_t save_kextloadinfo_p
= ((trace_flags
& STACKSHOT_SAVE_KEXT_LOADINFO
) != 0);
1407 boolean_t save_userframes_p
= ((trace_flags
& STACKSHOT_SAVE_KERNEL_FRAMES_ONLY
) == 0);
1408 boolean_t save_donating_pids_p
= ((trace_flags
& STACKSHOT_SAVE_IMP_DONATION_PIDS
) != 0);
1410 if(trace_flags
& STACKSHOT_GET_GLOBAL_MEM_STATS
) {
1411 if(tracepos
+ sizeof(struct mem_and_io_snapshot
) > tracebound
) {
1415 kdp_mem_and_io_snapshot((struct mem_and_io_snapshot
*)tracepos
);
1416 tracepos
+= sizeof(struct mem_and_io_snapshot
);
1421 queue_iterate(task_list
, task
, task_t
, tasks
) {
1422 if ((task
== NULL
) || !ml_validate_nofault((vm_offset_t
) task
, sizeof(struct task
)))
1425 int task_pid
= pid_from_task(task
);
1426 uint64_t task_uniqueid
= proc_uniqueid_from_task(task
);
1427 boolean_t task64
= task_has_64BitAddr(task
);
1429 if (!task
->active
|| task_is_a_corpse(task
)) {
1431 * Not interested in terminated tasks without threads, and
1432 * at the moment, stackshot can't handle a task without a name.
1434 if (queue_empty(&task
->threads
) || task_pid
== -1) {
1439 /* Trace everything, unless a process was specified */
1440 if ((pid
== -1) || (pid
== task_pid
)) {
1441 task_snapshot_t task_snap
;
1442 thread_snapshot_t tsnap
= NULL
;
1443 uint32_t uuid_info_count
= 0;
1444 mach_vm_address_t uuid_info_addr
= 0;
1445 boolean_t have_map
= (task
->map
!= NULL
) &&
1446 (ml_validate_nofault((vm_offset_t
)(task
->map
), sizeof(struct _vm_map
)));
1447 boolean_t have_pmap
= have_map
&& (task
->map
->pmap
!= NULL
) &&
1448 (ml_validate_nofault((vm_offset_t
)(task
->map
->pmap
), sizeof(struct pmap
)));
1449 uint64_t shared_cache_base_address
= 0;
1451 if (have_pmap
&& task
->active
&& save_loadinfo_p
&& task_pid
> 0) {
1452 // Read the dyld_all_image_infos struct from the task memory to get UUID array count and location
1454 struct user64_dyld_all_image_infos task_image_infos
;
1455 if (kdp_copyin(task
->map
->pmap
, task
->all_image_info_addr
, &task_image_infos
, sizeof(struct user64_dyld_all_image_infos
))) {
1456 uuid_info_count
= (uint32_t)task_image_infos
.uuidArrayCount
;
1457 uuid_info_addr
= task_image_infos
.uuidArray
;
1460 struct user32_dyld_all_image_infos task_image_infos
;
1461 if (kdp_copyin(task
->map
->pmap
, task
->all_image_info_addr
, &task_image_infos
, sizeof(struct user32_dyld_all_image_infos
))) {
1462 uuid_info_count
= task_image_infos
.uuidArrayCount
;
1463 uuid_info_addr
= task_image_infos
.uuidArray
;
1467 // If we get a NULL uuid_info_addr (which can happen when we catch dyld in the middle of updating
1468 // this data structure), we zero the uuid_info_count so that we won't even try to save load info
1470 if (!uuid_info_addr
) {
1471 uuid_info_count
= 0;
1475 if (have_pmap
&& task_pid
== 0) {
1476 if (save_kextloadinfo_p
&& ml_validate_nofault((vm_offset_t
)(gLoadedKextSummaries
), sizeof(OSKextLoadedKextSummaryHeader
))) {
1477 uuid_info_count
= gLoadedKextSummaries
->numSummaries
+ 1; /* include main kernel UUID */
1479 uuid_info_count
= 1; /* atleast include kernel uuid */
1483 if (tracepos
+ sizeof(struct task_snapshot
) > tracebound
) {
1488 task_snap
= (task_snapshot_t
) tracepos
;
1489 task_snap
->snapshot_magic
= STACKSHOT_TASK_SNAPSHOT_MAGIC
;
1490 task_snap
->pid
= task_pid
;
1491 task_snap
->uniqueid
= task_uniqueid
;
1492 task_snap
->nloadinfos
= uuid_info_count
;
1493 task_snap
->donating_pid_count
= 0;
1495 /* Add the BSD process identifiers */
1497 proc_name_kdp(task
, task_snap
->p_comm
, sizeof(task_snap
->p_comm
));
1499 task_snap
->p_comm
[0] = '\0';
1500 task_snap
->ss_flags
= 0;
1502 task_snap
->ss_flags
|= kUser64_p
;
1503 if (task64
&& task_pid
== 0)
1504 task_snap
->ss_flags
|= kKernel64_p
;
1505 if (!task
->active
|| task_is_a_corpse(task
))
1506 task_snap
->ss_flags
|= kTerminatedSnapshot
;
1507 if(task
->pidsuspended
) task_snap
->ss_flags
|= kPidSuspended
;
1508 if(task
->frozen
) task_snap
->ss_flags
|= kFrozen
;
1510 if (task
->effective_policy
.darwinbg
== 1) {
1511 task_snap
->ss_flags
|= kTaskDarwinBG
;
1514 if (task
->requested_policy
.t_role
== TASK_FOREGROUND_APPLICATION
) {
1515 task_snap
->ss_flags
|= kTaskIsForeground
;
1518 if (task
->requested_policy
.t_boosted
== 1) {
1519 task_snap
->ss_flags
|= kTaskIsBoosted
;
1522 if (task
->effective_policy
.t_sup_active
== 1)
1523 task_snap
->ss_flags
|= kTaskIsSuppressed
;
1524 #if IMPORTANCE_INHERITANCE
1525 if (task
->task_imp_base
) {
1526 if (task
->task_imp_base
->iit_donor
) {
1527 task_snap
->ss_flags
|= kTaskIsImpDonor
;
1530 if (task
->task_imp_base
->iit_live_donor
) {
1531 task_snap
->ss_flags
|= kTaskIsLiveImpDonor
;
1536 task_snap
->latency_qos
= (task
->effective_policy
.t_latency_qos
== LATENCY_QOS_TIER_UNSPECIFIED
) ?
1537 LATENCY_QOS_TIER_UNSPECIFIED
: ((0xFF << 16) | task
->effective_policy
.t_latency_qos
);
1539 task_snap
->suspend_count
= task
->suspend_count
;
1540 task_snap
->task_size
= have_pmap
? pmap_resident_count(task
->map
->pmap
) : 0;
1541 task_snap
->faults
= task
->faults
;
1542 task_snap
->pageins
= task
->pageins
;
1543 task_snap
->cow_faults
= task
->cow_faults
;
1545 task_snap
->user_time_in_terminated_threads
= task
->total_user_time
;
1546 task_snap
->system_time_in_terminated_threads
= task
->total_system_time
;
1548 * The throttling counters are maintained as 64-bit counters in the proc
1549 * structure. However, we reserve 32-bits (each) for them in the task_snapshot
1550 * struct to save space and since we do not expect them to overflow 32-bits. If we
1551 * find these values overflowing in the future, the fix would be to simply
1552 * upgrade these counters to 64-bit in the task_snapshot struct
1554 task_snap
->was_throttled
= (uint32_t) proc_was_throttled_from_task(task
);
1555 task_snap
->did_throttle
= (uint32_t) proc_did_throttle_from_task(task
);
1557 /* fetch some useful BSD info: */
1558 task_snap
->p_start_sec
= task_snap
->p_start_usec
= 0;
1559 proc_starttime_kdp(task
->bsd_info
, &task_snap
->p_start_sec
, &task_snap
->p_start_usec
);
1560 if (task
->shared_region
&& ml_validate_nofault((vm_offset_t
)task
->shared_region
,
1561 sizeof(struct vm_shared_region
))) {
1562 struct vm_shared_region
*sr
= task
->shared_region
;
1564 shared_cache_base_address
= sr
->sr_base_address
+ sr
->sr_first_mapping
;
1566 if (!shared_cache_base_address
1567 || !kdp_copyin(task
->map
->pmap
, shared_cache_base_address
+ offsetof(struct _dyld_cache_header
, uuid
), task_snap
->shared_cache_identifier
, sizeof(task_snap
->shared_cache_identifier
))) {
1568 memset(task_snap
->shared_cache_identifier
, 0x0, sizeof(task_snap
->shared_cache_identifier
));
1570 if (task
->shared_region
) {
1572 * No refcounting here, but we are in debugger
1573 * context, so that should be safe.
1575 task_snap
->shared_cache_slide
= task
->shared_region
->sr_slide_info
.slide
;
1577 task_snap
->shared_cache_slide
= 0;
1580 /* I/O Statistics */
1581 assert(IO_NUM_PRIORITIES
== STACKSHOT_IO_NUM_PRIORITIES
);
1583 if (task
->task_io_stats
) {
1584 task_snap
->disk_reads_count
= task
->task_io_stats
->disk_reads
.count
;
1585 task_snap
->disk_reads_size
= task
->task_io_stats
->disk_reads
.size
;
1586 task_snap
->disk_writes_count
= (task
->task_io_stats
->total_io
.count
- task
->task_io_stats
->disk_reads
.count
);
1587 task_snap
->disk_writes_size
= (task
->task_io_stats
->total_io
.size
- task
->task_io_stats
->disk_reads
.size
);
1588 for(i
= 0; i
< IO_NUM_PRIORITIES
; i
++) {
1589 task_snap
->io_priority_count
[i
] = task
->task_io_stats
->io_priority
[i
].count
;
1590 task_snap
->io_priority_size
[i
] = task
->task_io_stats
->io_priority
[i
].size
;
1592 task_snap
->paging_count
= task
->task_io_stats
->paging
.count
;
1593 task_snap
->paging_size
= task
->task_io_stats
->paging
.size
;
1594 task_snap
->non_paging_count
= (task
->task_io_stats
->total_io
.count
- task
->task_io_stats
->paging
.count
);
1595 task_snap
->non_paging_size
= (task
->task_io_stats
->total_io
.size
- task
->task_io_stats
->paging
.size
);
1596 task_snap
->metadata_count
= task
->task_io_stats
->metadata
.count
;
1597 task_snap
->metadata_size
= task
->task_io_stats
->metadata
.size
;
1598 task_snap
->data_count
= (task
->task_io_stats
->total_io
.count
- task
->task_io_stats
->metadata
.count
);
1599 task_snap
->data_size
= (task
->task_io_stats
->total_io
.size
- task
->task_io_stats
->metadata
.size
);
1601 /* zero from disk_reads_count to end of structure */
1602 memset(&task_snap
->disk_reads_count
, 0, offsetof(struct task_snapshot
, metadata_size
) - offsetof(struct task_snapshot
, disk_reads_count
));
1604 tracepos
+= sizeof(struct task_snapshot
);
1606 if (task_pid
> 0 && uuid_info_count
> 0) {
1607 uint32_t uuid_info_size
= (uint32_t)(task64
? sizeof(struct user64_dyld_uuid_info
) : sizeof(struct user32_dyld_uuid_info
));
1608 uint32_t uuid_info_array_size
= uuid_info_count
* uuid_info_size
;
1610 if (tracepos
+ uuid_info_array_size
> tracebound
) {
1615 // Copy in the UUID info array
1616 // It may be nonresident, in which case just fix up nloadinfos to 0 in the task_snap
1617 if (have_pmap
&& !kdp_copyin(task
->map
->pmap
, uuid_info_addr
, tracepos
, uuid_info_array_size
))
1618 task_snap
->nloadinfos
= 0;
1620 tracepos
+= uuid_info_array_size
;
1621 } else if (task_pid
== 0 && uuid_info_count
> 0) {
1622 uint32_t uuid_info_size
= (uint32_t)sizeof(kernel_uuid_info
);
1623 uint32_t uuid_info_array_size
= uuid_info_count
* uuid_info_size
;
1624 uint32_t uuid_offset
= offsetof(kernel_uuid_info
, imageUUID
);
1625 uintptr_t image_load_address
;
1627 if (tracepos
+ uuid_info_array_size
> tracebound
) {
1634 if (!kernel_uuid
|| !ml_validate_nofault((vm_offset_t
)kernel_uuid
, sizeof(uuid_t
))) {
1635 /* Kernel UUID not found or inaccessible */
1636 task_snap
->nloadinfos
= 0;
1639 image_load_address
= (uintptr_t)VM_KERNEL_UNSLIDE(vm_kernel_stext
);
1640 memcpy(tracepos
, &image_load_address
, sizeof(uintptr_t));
1641 memcpy((tracepos
+ uuid_offset
), kernel_uuid
, sizeof(uuid_t
));
1642 tracepos
+= uuid_info_size
;
1644 if (save_kextloadinfo_p
&& ml_validate_nofault((vm_offset_t
)(&gLoadedKextSummaries
->summaries
[0]),
1645 gLoadedKextSummaries
->entry_size
* gLoadedKextSummaries
->numSummaries
)) {
1647 for (kexti
=0 ; kexti
< gLoadedKextSummaries
->numSummaries
; kexti
++) {
1648 image_load_address
= (uintptr_t)VM_KERNEL_UNSLIDE(gLoadedKextSummaries
->summaries
[kexti
].address
);
1649 memcpy(tracepos
, &image_load_address
, sizeof(uintptr_t));
1650 memcpy((tracepos
+ uuid_offset
), &gLoadedKextSummaries
->summaries
[kexti
].uuid
, sizeof(uuid_t
));
1651 tracepos
+= uuid_info_size
;
1654 /* kext summary invalid, but kernel UUID was copied */
1655 task_snap
->nloadinfos
= 1;
1661 if (save_donating_pids_p
) {
1662 if (tracepos
+ (TASK_IMP_WALK_LIMIT
* sizeof(int32_t)) > tracebound
) {
1667 task_snap
->donating_pid_count
= task_importance_list_pids(task
, TASK_IMP_LIST_DONATING_PIDS
, tracepos
, TASK_IMP_WALK_LIMIT
);
1668 tracepos
+= sizeof(int) * task_snap
->donating_pid_count
;
1671 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
){
1674 if ((thread
== NULL
) || !ml_validate_nofault((vm_offset_t
) thread
, sizeof(struct thread
)))
1677 if (((tracepos
+ 4 * sizeof(struct thread_snapshot
)) > tracebound
)) {
1681 if (!save_userframes_p
&& thread
->kernel_stack
== 0)
1684 /* Populate the thread snapshot header */
1685 tsnap
= (thread_snapshot_t
) tracepos
;
1686 tsnap
->thread_id
= thread_tid(thread
);
1687 tsnap
->state
= thread
->state
;
1688 tsnap
->priority
= thread
->base_pri
;
1689 tsnap
->sched_pri
= thread
->sched_pri
;
1690 tsnap
->sched_flags
= thread
->sched_flags
;
1691 tsnap
->wait_event
= VM_KERNEL_UNSLIDE_OR_PERM(thread
->wait_event
);
1692 tsnap
->continuation
= VM_KERNEL_UNSLIDE(thread
->continuation
);
1693 tval
= safe_grab_timer_value(&thread
->user_timer
);
1694 tsnap
->user_time
= tval
;
1695 tval
= safe_grab_timer_value(&thread
->system_timer
);
1696 if (thread
->precise_user_kernel_time
) {
1697 tsnap
->system_time
= tval
;
1699 tsnap
->user_time
+= tval
;
1700 tsnap
->system_time
= 0;
1702 tsnap
->snapshot_magic
= STACKSHOT_THREAD_SNAPSHOT_MAGIC
;
1703 bzero(&tsnap
->pth_name
, STACKSHOT_MAX_THREAD_NAME_SIZE
);
1704 proc_threadname_kdp(thread
->uthread
, &tsnap
->pth_name
[0], STACKSHOT_MAX_THREAD_NAME_SIZE
);
1705 tracepos
+= sizeof(struct thread_snapshot
);
1706 tsnap
->ss_flags
= 0;
1707 /* I/O Statistics */
1708 assert(IO_NUM_PRIORITIES
== STACKSHOT_IO_NUM_PRIORITIES
);
1709 if (thread
->thread_io_stats
) {
1710 tsnap
->disk_reads_count
= thread
->thread_io_stats
->disk_reads
.count
;
1711 tsnap
->disk_reads_size
= thread
->thread_io_stats
->disk_reads
.size
;
1712 tsnap
->disk_writes_count
= (thread
->thread_io_stats
->total_io
.count
- thread
->thread_io_stats
->disk_reads
.count
);
1713 tsnap
->disk_writes_size
= (thread
->thread_io_stats
->total_io
.size
- thread
->thread_io_stats
->disk_reads
.size
);
1714 for(i
= 0; i
< IO_NUM_PRIORITIES
; i
++) {
1715 tsnap
->io_priority_count
[i
] = thread
->thread_io_stats
->io_priority
[i
].count
;
1716 tsnap
->io_priority_size
[i
] = thread
->thread_io_stats
->io_priority
[i
].size
;
1718 tsnap
->paging_count
= thread
->thread_io_stats
->paging
.count
;
1719 tsnap
->paging_size
= thread
->thread_io_stats
->paging
.size
;
1720 tsnap
->non_paging_count
= (thread
->thread_io_stats
->total_io
.count
- thread
->thread_io_stats
->paging
.count
);
1721 tsnap
->non_paging_size
= (thread
->thread_io_stats
->total_io
.size
- thread
->thread_io_stats
->paging
.size
);
1722 tsnap
->metadata_count
= thread
->thread_io_stats
->metadata
.count
;
1723 tsnap
->metadata_size
= thread
->thread_io_stats
->metadata
.size
;
1724 tsnap
->data_count
= (thread
->thread_io_stats
->total_io
.count
- thread
->thread_io_stats
->metadata
.count
);
1725 tsnap
->data_size
= (thread
->thread_io_stats
->total_io
.size
- thread
->thread_io_stats
->metadata
.size
);
1727 /* zero from disk_reads_count to end of structure */
1728 memset(&tsnap
->disk_reads_count
, 0,
1729 offsetof(struct thread_snapshot
, metadata_size
) - offsetof(struct thread_snapshot
, disk_reads_count
));
1732 if (thread
->effective_policy
.darwinbg
) {
1733 tsnap
->ss_flags
|= kThreadDarwinBG
;
1736 tsnap
->io_tier
= proc_get_effective_thread_policy(thread
, TASK_POLICY_IO
);
1737 if (proc_get_effective_thread_policy(thread
, TASK_POLICY_PASSIVE_IO
)) {
1738 tsnap
->ss_flags
|= kThreadIOPassive
;
1741 if (thread
->suspend_count
> 0) {
1742 tsnap
->ss_flags
|= kThreadSuspended
;
1745 if (thread
->options
& TH_OPT_GLOBAL_FORCED_IDLE
) {
1746 tsnap
->ss_flags
|= kGlobalForcedIdle
;
1749 if (IPC_VOUCHER_NULL
!= thread
->ith_voucher
) {
1750 tsnap
->voucher_identifier
= VM_KERNEL_ADDRPERM(thread
->ith_voucher
);
1753 tsnap
->ts_qos
= thread
->effective_policy
.thep_qos
;
1754 tsnap
->ts_rqos
= thread
->requested_policy
.thrp_qos
;
1755 tsnap
->ts_rqos_override
= thread
->requested_policy
.thrp_qos_override
;
1756 /* zero out unused data. */
1757 tsnap
->_reserved
[0] = 0;
1758 tsnap
->_reserved
[1] = 0;
1759 tsnap
->_reserved
[2] = 0;
1760 tsnap
->total_syscalls
= thread
->syscalls_mach
+ thread
->syscalls_unix
;
1762 if (dispatch_p
&& (task
!= kernel_task
) && (task
->active
) && have_pmap
) {
1763 uint64_t dqkeyaddr
= thread_dispatchqaddr(thread
);
1764 if (dqkeyaddr
!= 0) {
1765 uint64_t dqaddr
= 0;
1766 if (kdp_copyin(task
->map
->pmap
, dqkeyaddr
, &dqaddr
, (task64
? 8 : 4)) && (dqaddr
!= 0)) {
1767 uint64_t dqserialnumaddr
= dqaddr
+ proc_dispatchqueue_serialno_offset_from_task(task
);
1768 uint64_t dqserialnum
= 0;
1769 if (kdp_copyin(task
->map
->pmap
, dqserialnumaddr
, &dqserialnum
, (task64
? 8 : 4))) {
1770 tsnap
->ss_flags
|= kHasDispatchSerial
;
1771 memcpy(tracepos
, &dqserialnum
, sizeof(dqserialnum
));
1777 /* Call through to the machine specific trace routines
1778 * Frames are added past the snapshot header.
1781 if (thread
->kernel_stack
!= 0) {
1782 uint32_t thread_snapshot_flags
= 0;
1783 #if defined(__LP64__)
1784 tracebytes
= machine_trace_thread64(thread
, tracepos
, tracebound
, MAX_FRAMES
, FALSE
, &thread_snapshot_flags
);
1785 tsnap
->ss_flags
|= kKernel64_p
;
1788 tracebytes
= machine_trace_thread(thread
, tracepos
, tracebound
, MAX_FRAMES
, FALSE
, &thread_snapshot_flags
);
1791 if (thread_snapshot_flags
!= 0) {
1792 tsnap
->ss_flags
|= thread_snapshot_flags
;
1795 tsnap
->nkern_frames
= tracebytes
/framesize
;
1796 tracepos
+= tracebytes
;
1798 /* Trace user stack, if any */
1799 if (save_userframes_p
&& task
->active
&& thread
->task
->map
!= kernel_map
) {
1800 uint32_t thread_snapshot_flags
= 0;
1802 if (task_has_64BitAddr(thread
->task
)) {
1803 tracebytes
= machine_trace_thread64(thread
, tracepos
, tracebound
, MAX_FRAMES
, TRUE
, &thread_snapshot_flags
);
1804 tsnap
->ss_flags
|= kUser64_p
;
1808 tracebytes
= machine_trace_thread(thread
, tracepos
, tracebound
, MAX_FRAMES
, TRUE
, &thread_snapshot_flags
);
1811 if (thread_snapshot_flags
!= 0) {
1812 tsnap
->ss_flags
|= thread_snapshot_flags
;
1815 tsnap
->nuser_frames
= tracebytes
/framesize
;
1816 tracepos
+= tracebytes
;
1820 if (!save_userframes_p
&& tsnap
== NULL
) {
1822 * No thread info is collected due to lack of kernel frames.
1823 * Remove information about this task also
1825 tracepos
= (char *)task_snap
;
1830 if (is_active_list
) {
1831 is_active_list
= FALSE
;
1832 task_list
= &terminated_tasks
;
1837 /* Release stack snapshot wait indicator */
1838 kdp_snapshot_postflight();
1840 *pbytesTraced
= (uint32_t)(tracepos
- (char *) tracebuf
);
1845 static int pid_from_task(task_t task
)
1849 if (task
->bsd_info
) {
1850 pid
= proc_pid(task
->bsd_info
);
1852 pid
= task_pid(task
);
1859 proc_uniqueid_from_task(task_t task
)
1861 uint64_t uniqueid
= ~(0ULL);
1864 uniqueid
= proc_uniqueid(task
->bsd_info
);
1870 proc_was_throttled_from_task(task_t task
)
1872 uint64_t was_throttled
= 0;
1875 was_throttled
= proc_was_throttled(task
->bsd_info
);
1877 return was_throttled
;
1881 proc_did_throttle_from_task(task_t task
)
1883 uint64_t did_throttle
= 0;
1886 did_throttle
= proc_did_throttle(task
->bsd_info
);
1888 return did_throttle
;
1892 proc_dispatchqueue_serialno_offset_from_task(task_t task
)
1894 uint64_t dq_serialno_offset
= 0;
1896 if (task
->bsd_info
) {
1897 dq_serialno_offset
= get_dispatchqueue_serialno_offset_from_proc(task
->bsd_info
);
1900 return dq_serialno_offset
;
1904 kdp_mem_and_io_snapshot(struct mem_and_io_snapshot
*memio_snap
)
1906 unsigned int pages_reclaimed
;
1907 unsigned int pages_wanted
;
1910 processor_t processor
;
1911 vm_statistics64_t stat
;
1912 vm_statistics64_data_t host_vm_stat
;
1914 processor
= processor_list
;
1915 stat
= &PROCESSOR_DATA(processor
, vm_stat
);
1916 host_vm_stat
= *stat
;
1918 if (processor_count
> 1) {
1920 * processor_list may be in the process of changing as we are
1921 * attempting a stackshot. Ordinarily it will be lock protected,
1922 * but it is not safe to lock in the context of the debugger.
1923 * Fortunately we never remove elements from the processor list,
1924 * and only add to to the end of the list, so we SHOULD be able
1925 * to walk it. If we ever want to truly tear down processors,
1926 * this will have to change.
1928 while ((processor
= processor
->processor_list
) != NULL
) {
1929 stat
= &PROCESSOR_DATA(processor
, vm_stat
);
1930 host_vm_stat
.compressions
+= stat
->compressions
;
1931 host_vm_stat
.decompressions
+= stat
->decompressions
;
1935 memio_snap
->snapshot_magic
= STACKSHOT_MEM_AND_IO_SNAPSHOT_MAGIC
;
1936 memio_snap
->free_pages
= vm_page_free_count
;
1937 memio_snap
->active_pages
= vm_page_active_count
;
1938 memio_snap
->inactive_pages
= vm_page_inactive_count
;
1939 memio_snap
->purgeable_pages
= vm_page_purgeable_count
;
1940 memio_snap
->wired_pages
= vm_page_wire_count
;
1941 memio_snap
->speculative_pages
= vm_page_speculative_count
;
1942 memio_snap
->throttled_pages
= vm_page_throttled_count
;
1943 memio_snap
->busy_buffer_count
= count_busy_buffers();
1944 memio_snap
->filebacked_pages
= vm_page_pageable_external_count
;
1945 memio_snap
->compressions
= (uint32_t)host_vm_stat
.compressions
;
1946 memio_snap
->decompressions
= (uint32_t)host_vm_stat
.decompressions
;
1947 memio_snap
->compressor_size
= VM_PAGE_COMPRESSOR_COUNT
;
1948 kErr
= mach_vm_pressure_monitor(FALSE
, VM_PRESSURE_TIME_WINDOW
, &pages_reclaimed
, &pages_wanted
);
1951 memio_snap
->pages_wanted
= (uint32_t)pages_wanted
;
1952 memio_snap
->pages_reclaimed
= (uint32_t)pages_reclaimed
;
1953 memio_snap
->pages_wanted_reclaimed_valid
= 1;
1955 memio_snap
->pages_wanted
= 0;
1956 memio_snap
->pages_reclaimed
= 0;
1957 memio_snap
->pages_wanted_reclaimed_valid
= 0;
1962 kdp_copyin(pmap_t p
, uint64_t uaddr
, void *dest
, size_t size
)
1965 char *kvaddr
= dest
;
1967 #if (defined(__arm64__) || defined(NAND_PANIC_DEVICE)) && !defined(LEGACY_PANIC_LOGS)
1968 /* Identify if destination buffer is in panic storage area */
1969 if ((vm_offset_t
)dest
>= gPanicBase
&& (vm_offset_t
)dest
< gPanicBase
+ gPanicSize
) {
1970 if (((vm_offset_t
)dest
+ size
) >= (gPanicBase
+ gPanicSize
)) {
1973 ppnum_t upn
= pmap_find_phys(p
, uaddr
);
1974 uint64_t phys_src
= ptoa_64(upn
) | (uaddr
& PAGE_MASK
);
1975 void *src_va
= (void*)phystokv(phys_src
);
1976 if (upn
&& pmap_valid_page(upn
)) {
1977 bcopy(src_va
, kvaddr
, size
);
1985 ppnum_t upn
= pmap_find_phys(p
, uaddr
);
1986 uint64_t phys_src
= ptoa_64(upn
) | (uaddr
& PAGE_MASK
);
1987 uint64_t phys_dest
= kvtophys((vm_offset_t
)kvaddr
);
1988 uint64_t src_rem
= PAGE_SIZE
- (phys_src
& PAGE_MASK
);
1989 uint64_t dst_rem
= PAGE_SIZE
- (phys_dest
& PAGE_MASK
);
1990 size_t cur_size
= (uint32_t) MIN(src_rem
, dst_rem
);
1991 cur_size
= MIN(cur_size
, rem
);
1993 if (upn
&& pmap_valid_page(upn
) && phys_dest
) {
1994 bcopy_phys(phys_src
, phys_dest
, cur_size
);
2008 if (stack_snapshot_flags
& STACKSHOT_KCDATA_FORMAT
) {
2009 stack_snapshot_ret
= kdp_stackshot_kcdata_format(stack_snapshot_pid
,
2010 stack_snapshot_flags
,
2011 &stack_snapshot_bytes_traced
);
2014 stack_snapshot_ret
= kdp_stackshot(stack_snapshot_pid
,
2015 stack_snapshot_buf
, stack_snapshot_bufsize
,
2016 stack_snapshot_flags
, &stack_snapshot_bytes_traced
);
2021 * A fantastical routine that tries to be fast about returning
2022 * translations. Caches the last page we found a translation
2023 * for, so that we can be quick about multiple queries to the
2024 * same page. It turns out this is exactly the workflow
2025 * machine_trace_thread and its relatives tend to throw at us.
2027 * Please zero the nasty global this uses after a bulk lookup;
2028 * this isn't safe across a switch of the kdp_pmap or changes
2031 * This also means that if zero is a valid KVA, we are
2032 * screwed. Sucks to be us. Fortunately, this should never
2036 machine_trace_thread_get_kva(vm_offset_t cur_target_addr
, vm_map_t map
, uint32_t *thread_trace_flags
)
2038 unsigned cur_wimg_bits
;
2039 vm_offset_t cur_target_page
;
2040 vm_offset_t cur_phys_addr
;
2041 vm_offset_t kern_virt_target_addr
;
2043 cur_target_page
= atop(cur_target_addr
);
2045 if ((cur_target_page
!= prev_target_page
) || validate_next_addr
) {
2047 * Alright; it wasn't our previous page. So
2048 * we must validate that there is a page
2049 * table entry for this address under the
2050 * current kdp_pmap, and that it has default
2051 * cache attributes (otherwise it may not be
2052 * safe to access it).
2054 cur_phys_addr
= kdp_vtophys(kdp_pmap
? kdp_pmap
: kernel_pmap
, cur_target_addr
);
2056 if (!pmap_valid_page((ppnum_t
) atop(cur_phys_addr
))) {
2058 if (!stack_enable_faulting
) {
2063 * The pmap doesn't have a valid page so we start at the top level
2064 * vm map and try a lightweight fault.
2066 cur_phys_addr
= kdp_lightweight_fault(map
, (cur_target_addr
& ~PAGE_MASK
), thread_trace_flags
);
2067 cur_phys_addr
+= (cur_target_addr
& PAGE_MASK
);
2069 if (!pmap_valid_page((ppnum_t
) atop(cur_phys_addr
)))
2073 * This check is done in kdp_lightweight_fault for the fault path.
2075 cur_wimg_bits
= pmap_cache_attributes((ppnum_t
) atop(cur_phys_addr
));
2077 if ((cur_wimg_bits
& VM_WIMG_MASK
) != VM_WIMG_DEFAULT
) {
2083 kern_virt_target_addr
= (vm_offset_t
) PHYSMAP_PTOV(cur_phys_addr
);
2085 #error Oh come on... we should really unify the physical -> kernel virtual interface
2087 prev_target_page
= cur_target_page
;
2088 prev_target_kva
= (kern_virt_target_addr
& ~PAGE_MASK
);
2089 validate_next_addr
= FALSE
;
2090 return kern_virt_target_addr
;
2092 /* We found a translation, so stash this page */
2093 kern_virt_target_addr
= prev_target_kva
+ (cur_target_addr
& PAGE_MASK
);
2094 return kern_virt_target_addr
;
2099 machine_trace_thread_clear_validation_cache(void)
2101 validate_next_addr
= TRUE
;