/*
- * Copyright (c) 2012-2013 Apple Inc. All rights reserved.
+ * Copyright (c) 2012-2020 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#define TELEMETRY_DEBUG 0
-extern int proc_pid(void *);
+struct proc;
+extern int proc_pid(struct proc *);
extern char *proc_name_address(void *p);
extern uint64_t proc_uniqueid(void *p);
extern uint64_t proc_was_throttled(void *p);
* compute_averages(). It will notify its client (if one
* exists) when it has enough data to be worth flushing.
*/
-struct micro_snapshot_buffer telemetry_buffer = {0, 0, 0, 0};
+struct micro_snapshot_buffer telemetry_buffer = {
+ .buffer = 0,
+ .size = 0,
+ .current_position = 0,
+ .end_point = 0
+};
int telemetry_bytes_since_last_mark = -1; // How much data since buf was last marked?
int telemetry_buffer_notify_at = 0;
-lck_grp_t telemetry_lck_grp;
-lck_mtx_t telemetry_mtx;
-lck_mtx_t telemetry_pmi_mtx;
+LCK_GRP_DECLARE(telemetry_lck_grp, "telemetry group");
+LCK_MTX_DECLARE(telemetry_mtx, &telemetry_lck_grp);
+LCK_MTX_DECLARE(telemetry_pmi_mtx, &telemetry_lck_grp);
#define TELEMETRY_LOCK() do { lck_mtx_lock(&telemetry_mtx); } while (0)
#define TELEMETRY_TRY_SPIN_LOCK() lck_mtx_try_lock_spin(&telemetry_mtx)
kern_return_t ret;
uint32_t telemetry_notification_leeway;
- lck_grp_init(&telemetry_lck_grp, "telemetry group", LCK_GRP_ATTR_NULL);
- lck_mtx_init(&telemetry_mtx, &telemetry_lck_grp, LCK_ATTR_NULL);
- lck_mtx_init(&telemetry_pmi_mtx, &telemetry_lck_grp, LCK_ATTR_NULL);
-
- if (!PE_parse_boot_argn("telemetry_buffer_size", &telemetry_buffer.size, sizeof(telemetry_buffer.size))) {
+ if (!PE_parse_boot_argn("telemetry_buffer_size",
+ &telemetry_buffer.size, sizeof(telemetry_buffer.size))) {
telemetry_buffer.size = TELEMETRY_DEFAULT_BUFFER_SIZE;
}
}
bzero((void *) telemetry_buffer.buffer, telemetry_buffer.size);
- if (!PE_parse_boot_argn("telemetry_notification_leeway", &telemetry_notification_leeway, sizeof(telemetry_notification_leeway))) {
+ if (!PE_parse_boot_argn("telemetry_notification_leeway",
+ &telemetry_notification_leeway, sizeof(telemetry_notification_leeway))) {
/*
* By default, notify the user to collect the buffer when there is this much space left in the buffer.
*/
}
telemetry_buffer_notify_at = telemetry_buffer.size - telemetry_notification_leeway;
- if (!PE_parse_boot_argn("telemetry_sample_rate", &telemetry_sample_rate, sizeof(telemetry_sample_rate))) {
+ if (!PE_parse_boot_argn("telemetry_sample_rate",
+ &telemetry_sample_rate, sizeof(telemetry_sample_rate))) {
telemetry_sample_rate = TELEMETRY_DEFAULT_SAMPLE_RATE;
}
/*
* To enable telemetry for all tasks, include "telemetry_sample_all_tasks=1" in boot-args.
*/
- if (!PE_parse_boot_argn("telemetry_sample_all_tasks", &telemetry_sample_all_tasks, sizeof(telemetry_sample_all_tasks))) {
-#if CONFIG_EMBEDDED && !(DEVELOPMENT || DEBUG)
+ if (!PE_parse_boot_argn("telemetry_sample_all_tasks",
+ &telemetry_sample_all_tasks, sizeof(telemetry_sample_all_tasks))) {
+#if !defined(XNU_TARGET_OS_OSX) && !(DEVELOPMENT || DEBUG)
telemetry_sample_all_tasks = FALSE;
#else
telemetry_sample_all_tasks = TRUE;
-#endif /* CONFIG_EMBEDDED && !(DEVELOPMENT || DEBUG) */
+#endif /* !defined(XNU_TARGET_OS_OSX) && !(DEVELOPMENT || DEBUG) */
}
kprintf("Telemetry: Sampling %stasks once per %u second%s\n",
* buffer with the global telemetry lock held -- so we must do our (possibly faulting)
* copies from userland here, before taking the lock.
*/
- uintptr_t frames[MAX_CALLSTACK_FRAMES] = {};
- bool user64;
- int backtrace_error = backtrace_user(frames, MAX_CALLSTACK_FRAMES, &btcount, &user64);
- if (backtrace_error) {
+
+ uintptr_t frames[128];
+ bool user64_regs = false;
+ int bterror = 0;
+ btcount = backtrace_user(frames,
+ sizeof(frames) / sizeof(frames[0]), &bterror, &user64_regs, NULL);
+ if (bterror != 0) {
return;
}
+ bool user64_va = task_has_64Bit_addr(task);
/*
* Find the actual [slid] address of the shared cache's UUID, and copy it in from userland.
*/
- int shared_cache_uuid_valid = 0;
- uint64_t shared_cache_base_address;
- struct _dyld_cache_header shared_cache_header;
- uint64_t shared_cache_slide;
+ int shared_cache_uuid_valid = 0;
+ uint64_t shared_cache_base_address = 0;
+ struct _dyld_cache_header shared_cache_header = {};
+ uint64_t shared_cache_slide = 0;
/*
* Don't copy in the entire shared cache header; we only need the UUID. Calculate the
(copyin(shared_cache_base_address + sc_header_uuid_offset, (char *)&shared_cache_header.uuid,
sizeof(shared_cache_header.uuid)) == 0)) {
shared_cache_uuid_valid = 1;
- shared_cache_slide = vm_shared_region_get_slide(sr);
+ shared_cache_slide = sr->sr_slide;
}
// vm_shared_region_get() gave us a reference on the shared region.
vm_shared_region_deallocate(sr);
*
* XXX - make this common with kdp?
*/
- uint32_t uuid_info_count = 0;
- mach_vm_address_t uuid_info_addr = 0;
- if (task_has_64Bit_addr(task)) {
+ uint32_t uuid_info_count = 0;
+ mach_vm_address_t uuid_info_addr = 0;
+ uint32_t uuid_info_size = 0;
+ if (user64_va) {
+ uuid_info_size = sizeof(struct user64_dyld_uuid_info);
struct user64_dyld_all_image_infos task_image_infos;
if (copyin(task->all_image_info_addr, (char *)&task_image_infos, sizeof(task_image_infos)) == 0) {
uuid_info_count = (uint32_t)task_image_infos.uuidArrayCount;
uuid_info_addr = task_image_infos.uuidArray;
}
} else {
+ uuid_info_size = sizeof(struct user32_dyld_uuid_info);
struct user32_dyld_all_image_infos task_image_infos;
if (copyin(task->all_image_info_addr, (char *)&task_image_infos, sizeof(task_image_infos)) == 0) {
uuid_info_count = task_image_infos.uuidArrayCount;
uuid_info_count = TELEMETRY_MAX_UUID_COUNT;
}
- uint32_t uuid_info_size = (uint32_t)(task_has_64Bit_addr(thread->task) ? sizeof(struct user64_dyld_uuid_info) : sizeof(struct user32_dyld_uuid_info));
uint32_t uuid_info_array_size = uuid_info_count * uuid_info_size;
char *uuid_info_array = NULL;
if (uuid_info_count > 0) {
- if ((uuid_info_array = (char *)kalloc(uuid_info_array_size)) == NULL) {
+ uuid_info_array = kheap_alloc(KHEAP_TEMP,
+ uuid_info_array_size, Z_WAITOK);
+ if (uuid_info_array == NULL) {
return;
}
* It may be nonresident, in which case just fix up nloadinfos to 0 in the task snapshot.
*/
if (copyin(uuid_info_addr, uuid_info_array, uuid_info_array_size) != 0) {
- kfree(uuid_info_array, uuid_info_array_size);
+ kheap_free(KHEAP_TEMP, uuid_info_array, uuid_info_array_size);
uuid_info_array = NULL;
uuid_info_array_size = 0;
}
if (dqkeyaddr != 0) {
uint64_t dqaddr = 0;
uint64_t dq_serialno_offset = get_task_dispatchqueue_serialno_offset(task);
- if ((copyin(dqkeyaddr, (char *)&dqaddr, (task_has_64Bit_addr(task) ? 8 : 4)) == 0) &&
+ if ((copyin(dqkeyaddr, (char *)&dqaddr, (user64_va ? 8 : 4)) == 0) &&
(dqaddr != 0) && (dq_serialno_offset != 0)) {
uint64_t dqserialnumaddr = dqaddr + dq_serialno_offset;
- if (copyin(dqserialnumaddr, (char *)&dqserialnum, (task_has_64Bit_addr(task) ? 8 : 4)) == 0) {
+ if (copyin(dqserialnumaddr, (char *)&dqserialnum, (user64_va ? 8 : 4)) == 0) {
dqserialnum_valid = 1;
}
}
tsnap->ss_flags |= kTaskIsSuppressed;
}
+
tsnap->latency_qos = task_grab_latency_qos(task);
strlcpy(tsnap->p_comm, proc_name_address(p), sizeof(tsnap->p_comm));
- if (task_has_64Bit_addr(thread->task)) {
+ if (user64_va) {
tsnap->ss_flags |= kUser64_p;
}
current_buffer->current_position += sizeof(dqserialnum);
}
- if (user64) {
+ if (user64_regs) {
framesize = 8;
thsnap->ss_flags |= kUser64_p;
} else {
}
if (uuid_info_array != NULL) {
- kfree(uuid_info_array, uuid_info_array_size);
+ kheap_free(KHEAP_TEMP, uuid_info_array, uuid_info_array_size);
}
}
#define BOOTPROFILE_MAX_BUFFER_SIZE (64*1024*1024) /* see also COPYSIZELIMIT_PANIC */
-vm_offset_t bootprofile_buffer = 0;
-uint32_t bootprofile_buffer_size = 0;
-uint32_t bootprofile_buffer_current_position = 0;
-uint32_t bootprofile_interval_ms = 0;
-uint32_t bootprofile_stackshot_flags = 0;
-uint64_t bootprofile_interval_abs = 0;
-uint64_t bootprofile_next_deadline = 0;
-uint32_t bootprofile_all_procs = 0;
-char bootprofile_proc_name[17];
+vm_offset_t bootprofile_buffer = 0;
+uint32_t bootprofile_buffer_size = 0;
+uint32_t bootprofile_buffer_current_position = 0;
+uint32_t bootprofile_interval_ms = 0;
+uint64_t bootprofile_stackshot_flags = 0;
+uint64_t bootprofile_interval_abs = 0;
+uint64_t bootprofile_next_deadline = 0;
+uint32_t bootprofile_all_procs = 0;
+char bootprofile_proc_name[17];
uint64_t bootprofile_delta_since_timestamp = 0;
-lck_grp_t bootprofile_lck_grp;
-lck_mtx_t bootprofile_mtx;
+LCK_GRP_DECLARE(bootprofile_lck_grp, "bootprofile_group");
+LCK_MTX_DECLARE(bootprofile_mtx, &bootprofile_lck_grp);
enum {
kern_return_t ret;
char type[32];
- lck_grp_init(&bootprofile_lck_grp, "bootprofile group", LCK_GRP_ATTR_NULL);
- lck_mtx_init(&bootprofile_mtx, &bootprofile_lck_grp, LCK_ATTR_NULL);
-
- if (!PE_parse_boot_argn("bootprofile_buffer_size", &bootprofile_buffer_size, sizeof(bootprofile_buffer_size))) {
+ if (!PE_parse_boot_argn("bootprofile_buffer_size",
+ &bootprofile_buffer_size, sizeof(bootprofile_buffer_size))) {
bootprofile_buffer_size = 0;
}
bootprofile_buffer_size = BOOTPROFILE_MAX_BUFFER_SIZE;
}
- if (!PE_parse_boot_argn("bootprofile_interval_ms", &bootprofile_interval_ms, sizeof(bootprofile_interval_ms))) {
+ if (!PE_parse_boot_argn("bootprofile_interval_ms",
+ &bootprofile_interval_ms, sizeof(bootprofile_interval_ms))) {
bootprofile_interval_ms = 0;
}
- if (!PE_parse_boot_argn("bootprofile_stackshot_flags", &bootprofile_stackshot_flags, sizeof(bootprofile_stackshot_flags))) {
+ if (!PE_parse_boot_argn("bootprofile_stackshot_flags",
+ &bootprofile_stackshot_flags, sizeof(bootprofile_stackshot_flags))) {
bootprofile_stackshot_flags = 0;
}
- if (!PE_parse_boot_argn("bootprofile_proc_name", &bootprofile_proc_name, sizeof(bootprofile_proc_name))) {
+ if (!PE_parse_boot_argn("bootprofile_proc_name",
+ &bootprofile_proc_name, sizeof(bootprofile_proc_name))) {
bootprofile_all_procs = 1;
bootprofile_proc_name[0] = '\0';
}
}
bzero((void *) bootprofile_buffer, bootprofile_buffer_size);
- kprintf("Boot profile: Sampling %s once per %u ms at %s\n", bootprofile_all_procs ? "all procs" : bootprofile_proc_name, bootprofile_interval_ms,
+ kprintf("Boot profile: Sampling %s once per %u ms at %s\n",
+ bootprofile_all_procs ? "all procs" : bootprofile_proc_name, bootprofile_interval_ms,
bootprofile_type == kBootProfileStartTimerAtBoot ? "boot" : (bootprofile_type == kBootProfileStartTimerAtWake ? "wake" : "unknown"));
timer_call_setup(&bootprofile_timer_call_entry,
/* initiate a stackshot with whatever portion of the buffer is left */
if (bootprofile_buffer_current_position < bootprofile_buffer_size) {
- uint32_t flags = STACKSHOT_KCDATA_FORMAT | STACKSHOT_TRYLOCK | STACKSHOT_SAVE_LOADINFO
+ uint64_t flags = STACKSHOT_KCDATA_FORMAT | STACKSHOT_TRYLOCK | STACKSHOT_SAVE_LOADINFO
| STACKSHOT_GET_GLOBAL_MEM_STATS;
-#if __x86_64__
+#if defined(XNU_TARGET_OS_OSX)
flags |= STACKSHOT_SAVE_KEXT_LOADINFO;
-#endif /* __x86_64__ */
+#endif
/* OR on flags specified in boot-args */
kern_return_t r = stack_snapshot_from_kernel(
pid_to_profile, (void *)(bootprofile_buffer + bootprofile_buffer_current_position),
bootprofile_buffer_size - bootprofile_buffer_current_position,
- flags, bootprofile_delta_since_timestamp, &retbytes);
+ flags, bootprofile_delta_since_timestamp, 0, &retbytes);
/*
* We call with STACKSHOT_TRYLOCK because the stackshot lock is coarser