extern char *proc_name_address(void *p);
extern uint64_t proc_uniqueid(void *p);
extern uint64_t proc_was_throttled(void *p);
extern char *proc_name_address(void *p);
extern uint64_t proc_uniqueid(void *p);
extern uint64_t proc_was_throttled(void *p);
* compute_averages(). It will notify its client (if one
* exists) when it has enough data to be worth flushing.
*/
* compute_averages(). It will notify its client (if one
* exists) when it has enough data to be worth flushing.
*/
int telemetry_bytes_since_last_mark = -1; // How much data since buf was last marked?
int telemetry_buffer_notify_at = 0;
int telemetry_bytes_since_last_mark = -1; // How much data since buf was last marked?
int telemetry_buffer_notify_at = 0;
-lck_grp_t telemetry_lck_grp;
-lck_mtx_t telemetry_mtx;
-lck_mtx_t telemetry_pmi_mtx;
+LCK_GRP_DECLARE(telemetry_lck_grp, "telemetry group");
+LCK_MTX_DECLARE(telemetry_mtx, &telemetry_lck_grp);
+LCK_MTX_DECLARE(telemetry_pmi_mtx, &telemetry_lck_grp);
#define TELEMETRY_LOCK() do { lck_mtx_lock(&telemetry_mtx); } while (0)
#define TELEMETRY_TRY_SPIN_LOCK() lck_mtx_try_lock_spin(&telemetry_mtx)
#define TELEMETRY_LOCK() do { lck_mtx_lock(&telemetry_mtx); } while (0)
#define TELEMETRY_TRY_SPIN_LOCK() lck_mtx_try_lock_spin(&telemetry_mtx)
- lck_grp_init(&telemetry_lck_grp, "telemetry group", LCK_GRP_ATTR_NULL);
- lck_mtx_init(&telemetry_mtx, &telemetry_lck_grp, LCK_ATTR_NULL);
- lck_mtx_init(&telemetry_pmi_mtx, &telemetry_lck_grp, LCK_ATTR_NULL);
-
- if (!PE_parse_boot_argn("telemetry_buffer_size", &telemetry_buffer.size, sizeof(telemetry_buffer.size))) {
+ if (!PE_parse_boot_argn("telemetry_buffer_size",
+ &telemetry_buffer.size, sizeof(telemetry_buffer.size))) {
- if (!PE_parse_boot_argn("telemetry_notification_leeway", &telemetry_notification_leeway, sizeof(telemetry_notification_leeway))) {
+ if (!PE_parse_boot_argn("telemetry_notification_leeway",
+ &telemetry_notification_leeway, sizeof(telemetry_notification_leeway))) {
- if (!PE_parse_boot_argn("telemetry_sample_rate", &telemetry_sample_rate, sizeof(telemetry_sample_rate))) {
+ if (!PE_parse_boot_argn("telemetry_sample_rate",
+ &telemetry_sample_rate, sizeof(telemetry_sample_rate))) {
telemetry_sample_rate = TELEMETRY_DEFAULT_SAMPLE_RATE;
}
/*
* To enable telemetry for all tasks, include "telemetry_sample_all_tasks=1" in boot-args.
*/
telemetry_sample_rate = TELEMETRY_DEFAULT_SAMPLE_RATE;
}
/*
* To enable telemetry for all tasks, include "telemetry_sample_all_tasks=1" in boot-args.
*/
- if (!PE_parse_boot_argn("telemetry_sample_all_tasks", &telemetry_sample_all_tasks, sizeof(telemetry_sample_all_tasks))) {
-#if CONFIG_EMBEDDED && !(DEVELOPMENT || DEBUG)
+ if (!PE_parse_boot_argn("telemetry_sample_all_tasks",
+ &telemetry_sample_all_tasks, sizeof(telemetry_sample_all_tasks))) {
+#if !defined(XNU_TARGET_OS_OSX) && !(DEVELOPMENT || DEBUG)
* buffer with the global telemetry lock held -- so we must do our (possibly faulting)
* copies from userland here, before taking the lock.
*/
* buffer with the global telemetry lock held -- so we must do our (possibly faulting)
* copies from userland here, before taking the lock.
*/
- uintptr_t frames[MAX_CALLSTACK_FRAMES] = {};
- bool user64;
- int backtrace_error = backtrace_user(frames, MAX_CALLSTACK_FRAMES, &btcount, &user64);
- if (backtrace_error) {
+
+ uintptr_t frames[128];
+ bool user64_regs = false;
+ int bterror = 0;
+ btcount = backtrace_user(frames,
+ sizeof(frames) / sizeof(frames[0]), &bterror, &user64_regs, NULL);
+ if (bterror != 0) {
- int shared_cache_uuid_valid = 0;
- uint64_t shared_cache_base_address;
- struct _dyld_cache_header shared_cache_header;
- uint64_t shared_cache_slide;
+ int shared_cache_uuid_valid = 0;
+ uint64_t shared_cache_base_address = 0;
+ struct _dyld_cache_header shared_cache_header = {};
+ uint64_t shared_cache_slide = 0;
(copyin(shared_cache_base_address + sc_header_uuid_offset, (char *)&shared_cache_header.uuid,
sizeof(shared_cache_header.uuid)) == 0)) {
shared_cache_uuid_valid = 1;
(copyin(shared_cache_base_address + sc_header_uuid_offset, (char *)&shared_cache_header.uuid,
sizeof(shared_cache_header.uuid)) == 0)) {
shared_cache_uuid_valid = 1;
- uint32_t uuid_info_count = 0;
- mach_vm_address_t uuid_info_addr = 0;
- if (task_has_64Bit_addr(task)) {
+ uint32_t uuid_info_count = 0;
+ mach_vm_address_t uuid_info_addr = 0;
+ uint32_t uuid_info_size = 0;
+ if (user64_va) {
+ uuid_info_size = sizeof(struct user64_dyld_uuid_info);
struct user64_dyld_all_image_infos task_image_infos;
if (copyin(task->all_image_info_addr, (char *)&task_image_infos, sizeof(task_image_infos)) == 0) {
uuid_info_count = (uint32_t)task_image_infos.uuidArrayCount;
uuid_info_addr = task_image_infos.uuidArray;
}
} else {
struct user64_dyld_all_image_infos task_image_infos;
if (copyin(task->all_image_info_addr, (char *)&task_image_infos, sizeof(task_image_infos)) == 0) {
uuid_info_count = (uint32_t)task_image_infos.uuidArrayCount;
uuid_info_addr = task_image_infos.uuidArray;
}
} else {
struct user32_dyld_all_image_infos task_image_infos;
if (copyin(task->all_image_info_addr, (char *)&task_image_infos, sizeof(task_image_infos)) == 0) {
uuid_info_count = task_image_infos.uuidArrayCount;
struct user32_dyld_all_image_infos task_image_infos;
if (copyin(task->all_image_info_addr, (char *)&task_image_infos, sizeof(task_image_infos)) == 0) {
uuid_info_count = task_image_infos.uuidArrayCount;
uint32_t uuid_info_array_size = uuid_info_count * uuid_info_size;
char *uuid_info_array = NULL;
if (uuid_info_count > 0) {
uint32_t uuid_info_array_size = uuid_info_count * uuid_info_size;
char *uuid_info_array = NULL;
if (uuid_info_count > 0) {
- if ((uuid_info_array = (char *)kalloc(uuid_info_array_size)) == NULL) {
+ uuid_info_array = kheap_alloc(KHEAP_TEMP,
+ uuid_info_array_size, Z_WAITOK);
+ if (uuid_info_array == NULL) {
* It may be nonresident, in which case just fix up nloadinfos to 0 in the task snapshot.
*/
if (copyin(uuid_info_addr, uuid_info_array, uuid_info_array_size) != 0) {
* It may be nonresident, in which case just fix up nloadinfos to 0 in the task snapshot.
*/
if (copyin(uuid_info_addr, uuid_info_array, uuid_info_array_size) != 0) {
(dqaddr != 0) && (dq_serialno_offset != 0)) {
uint64_t dqserialnumaddr = dqaddr + dq_serialno_offset;
(dqaddr != 0) && (dq_serialno_offset != 0)) {
uint64_t dqserialnumaddr = dqaddr + dq_serialno_offset;
tsnap->latency_qos = task_grab_latency_qos(task);
strlcpy(tsnap->p_comm, proc_name_address(p), sizeof(tsnap->p_comm));
tsnap->latency_qos = task_grab_latency_qos(task);
strlcpy(tsnap->p_comm, proc_name_address(p), sizeof(tsnap->p_comm));
-vm_offset_t bootprofile_buffer = 0;
-uint32_t bootprofile_buffer_size = 0;
-uint32_t bootprofile_buffer_current_position = 0;
-uint32_t bootprofile_interval_ms = 0;
-uint32_t bootprofile_stackshot_flags = 0;
-uint64_t bootprofile_interval_abs = 0;
-uint64_t bootprofile_next_deadline = 0;
-uint32_t bootprofile_all_procs = 0;
-char bootprofile_proc_name[17];
+vm_offset_t bootprofile_buffer = 0;
+uint32_t bootprofile_buffer_size = 0;
+uint32_t bootprofile_buffer_current_position = 0;
+uint32_t bootprofile_interval_ms = 0;
+uint64_t bootprofile_stackshot_flags = 0;
+uint64_t bootprofile_interval_abs = 0;
+uint64_t bootprofile_next_deadline = 0;
+uint32_t bootprofile_all_procs = 0;
+char bootprofile_proc_name[17];
-lck_grp_t bootprofile_lck_grp;
-lck_mtx_t bootprofile_mtx;
+LCK_GRP_DECLARE(bootprofile_lck_grp, "bootprofile_group");
+LCK_MTX_DECLARE(bootprofile_mtx, &bootprofile_lck_grp);
- lck_grp_init(&bootprofile_lck_grp, "bootprofile group", LCK_GRP_ATTR_NULL);
- lck_mtx_init(&bootprofile_mtx, &bootprofile_lck_grp, LCK_ATTR_NULL);
-
- if (!PE_parse_boot_argn("bootprofile_buffer_size", &bootprofile_buffer_size, sizeof(bootprofile_buffer_size))) {
+ if (!PE_parse_boot_argn("bootprofile_buffer_size",
+ &bootprofile_buffer_size, sizeof(bootprofile_buffer_size))) {
- if (!PE_parse_boot_argn("bootprofile_interval_ms", &bootprofile_interval_ms, sizeof(bootprofile_interval_ms))) {
+ if (!PE_parse_boot_argn("bootprofile_interval_ms",
+ &bootprofile_interval_ms, sizeof(bootprofile_interval_ms))) {
- if (!PE_parse_boot_argn("bootprofile_stackshot_flags", &bootprofile_stackshot_flags, sizeof(bootprofile_stackshot_flags))) {
+ if (!PE_parse_boot_argn("bootprofile_stackshot_flags",
+ &bootprofile_stackshot_flags, sizeof(bootprofile_stackshot_flags))) {
- if (!PE_parse_boot_argn("bootprofile_proc_name", &bootprofile_proc_name, sizeof(bootprofile_proc_name))) {
+ if (!PE_parse_boot_argn("bootprofile_proc_name",
+ &bootprofile_proc_name, sizeof(bootprofile_proc_name))) {
- kprintf("Boot profile: Sampling %s once per %u ms at %s\n", bootprofile_all_procs ? "all procs" : bootprofile_proc_name, bootprofile_interval_ms,
+ kprintf("Boot profile: Sampling %s once per %u ms at %s\n",
+ bootprofile_all_procs ? "all procs" : bootprofile_proc_name, bootprofile_interval_ms,
bootprofile_type == kBootProfileStartTimerAtBoot ? "boot" : (bootprofile_type == kBootProfileStartTimerAtWake ? "wake" : "unknown"));
timer_call_setup(&bootprofile_timer_call_entry,
bootprofile_type == kBootProfileStartTimerAtBoot ? "boot" : (bootprofile_type == kBootProfileStartTimerAtWake ? "wake" : "unknown"));
timer_call_setup(&bootprofile_timer_call_entry,
/* initiate a stackshot with whatever portion of the buffer is left */
if (bootprofile_buffer_current_position < bootprofile_buffer_size) {
/* initiate a stackshot with whatever portion of the buffer is left */
if (bootprofile_buffer_current_position < bootprofile_buffer_size) {
kern_return_t r = stack_snapshot_from_kernel(
pid_to_profile, (void *)(bootprofile_buffer + bootprofile_buffer_current_position),
bootprofile_buffer_size - bootprofile_buffer_current_position,
kern_return_t r = stack_snapshot_from_kernel(
pid_to_profile, (void *)(bootprofile_buffer + bootprofile_buffer_current_position),
bootprofile_buffer_size - bootprofile_buffer_current_position,
- flags, bootprofile_delta_since_timestamp, &retbytes);
+ flags, bootprofile_delta_since_timestamp, 0, &retbytes);