+ return copyout(&sinfo32, uaddr, sizeof(sinfo32));
+ }
+}
+
+void
+gather_populate_corpse_crashinfo(proc_t p, task_t corpse_task,
+ mach_exception_data_type_t code, mach_exception_data_type_t subcode,
+ uint64_t *udata_buffer, int num_udata, void *reason)
+{
+ struct rusage_superset rup;
+
+ gather_rusage_info(p, &rup.ri, RUSAGE_INFO_CURRENT);
+ rup.ri.ri_phys_footprint = 0;
+ populate_corpse_crashinfo(p, corpse_task, &rup, code, subcode,
+ udata_buffer, num_udata, reason);
+}
+
+static void
+proc_update_corpse_exception_codes(proc_t p, mach_exception_data_type_t *code, mach_exception_data_type_t *subcode)
+{
+ mach_exception_data_type_t code_update = *code;
+ mach_exception_data_type_t subcode_update = *subcode;
+ if (p->p_exit_reason == OS_REASON_NULL) {
+ return;
+ }
+
+ switch (p->p_exit_reason->osr_namespace) {
+ case OS_REASON_JETSAM:
+ if (p->p_exit_reason->osr_code == JETSAM_REASON_MEMORY_PERPROCESSLIMIT) {
+ /* Update the code with EXC_RESOURCE code for high memory watermark */
+ EXC_RESOURCE_ENCODE_TYPE(code_update, RESOURCE_TYPE_MEMORY);
+ EXC_RESOURCE_ENCODE_FLAVOR(code_update, FLAVOR_HIGH_WATERMARK);
+ EXC_RESOURCE_HWM_ENCODE_LIMIT(code_update, ((get_task_phys_footprint_limit(p->task)) >> 20));
+ subcode_update = 0;
+ break;
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ *code = code_update;
+ *subcode = subcode_update;
+ return;
+}
+
+mach_exception_data_type_t
+proc_encode_exit_exception_code(proc_t p)
+{
+ uint64_t subcode = 0;
+
+ if (p->p_exit_reason == OS_REASON_NULL) {
+ return 0;
+ }
+
+ /* Embed first 32 bits of osr_namespace and osr_code in exception code */
+ ENCODE_OSR_NAMESPACE_TO_MACH_EXCEPTION_CODE(subcode, p->p_exit_reason->osr_namespace);
+ ENCODE_OSR_CODE_TO_MACH_EXCEPTION_CODE(subcode, p->p_exit_reason->osr_code);
+ return (mach_exception_data_type_t)subcode;
+}
+
+static void
+populate_corpse_crashinfo(proc_t p, task_t corpse_task, struct rusage_superset *rup,
+ mach_exception_data_type_t code, mach_exception_data_type_t subcode,
+ uint64_t *udata_buffer, int num_udata, os_reason_t reason)
+{
+ mach_vm_address_t uaddr = 0;
+ mach_exception_data_type_t exc_codes[EXCEPTION_CODE_MAX];
+ exc_codes[0] = code;
+ exc_codes[1] = subcode;
+ cpu_type_t cputype;
+ struct proc_uniqidentifierinfo p_uniqidinfo;
+ struct proc_workqueueinfo pwqinfo;
+ int retval = 0;
+ uint64_t crashed_threadid = task_corpse_get_crashed_thread_id(corpse_task);
+ unsigned int pflags = 0;
+ uint64_t max_footprint_mb;
+ uint64_t max_footprint;
+
+ uint64_t ledger_internal;
+ uint64_t ledger_internal_compressed;
+ uint64_t ledger_iokit_mapped;
+ uint64_t ledger_alternate_accounting;
+ uint64_t ledger_alternate_accounting_compressed;
+ uint64_t ledger_purgeable_nonvolatile;
+ uint64_t ledger_purgeable_nonvolatile_compressed;
+ uint64_t ledger_page_table;
+ uint64_t ledger_phys_footprint;
+ uint64_t ledger_phys_footprint_lifetime_max;
+ uint64_t ledger_network_nonvolatile;
+ uint64_t ledger_network_nonvolatile_compressed;
+ uint64_t ledger_wired_mem;
+ uint64_t ledger_tagged_footprint;
+ uint64_t ledger_tagged_footprint_compressed;
+ uint64_t ledger_media_footprint;
+ uint64_t ledger_media_footprint_compressed;
+ uint64_t ledger_graphics_footprint;
+ uint64_t ledger_graphics_footprint_compressed;
+ uint64_t ledger_neural_footprint;
+ uint64_t ledger_neural_footprint_compressed;
+
+ void *crash_info_ptr = task_get_corpseinfo(corpse_task);
+
+#if CONFIG_MEMORYSTATUS
+ int memstat_dirty_flags = 0;
+#endif
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_EXCEPTION_CODES, sizeof(exc_codes), &uaddr)) {
+ kcdata_memcpy(crash_info_ptr, uaddr, exc_codes, sizeof(exc_codes));
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PID, sizeof(p->p_pid), &uaddr)) {
+ kcdata_memcpy(crash_info_ptr, uaddr, &p->p_pid, sizeof(p->p_pid));
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PPID, sizeof(p->p_ppid), &uaddr)) {
+ kcdata_memcpy(crash_info_ptr, uaddr, &p->p_ppid, sizeof(p->p_ppid));
+ }
+
+ /* Don't include the crashed thread ID if there's an exit reason that indicates it's irrelevant */
+ if ((p->p_exit_reason == OS_REASON_NULL) || !(p->p_exit_reason->osr_flags & OS_REASON_FLAG_NO_CRASHED_TID)) {
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CRASHED_THREADID, sizeof(uint64_t), &uaddr)) {
+ kcdata_memcpy(crash_info_ptr, uaddr, &crashed_threadid, sizeof(uint64_t));
+ }
+ }
+
+ static_assert(sizeof(struct proc_uniqidentifierinfo) == sizeof(struct crashinfo_proc_uniqidentifierinfo));
+ if (KERN_SUCCESS ==
+ kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_BSDINFOWITHUNIQID, sizeof(struct proc_uniqidentifierinfo), &uaddr)) {
+ proc_piduniqidentifierinfo(p, &p_uniqidinfo);
+ kcdata_memcpy(crash_info_ptr, uaddr, &p_uniqidinfo, sizeof(struct proc_uniqidentifierinfo));
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_RUSAGE_INFO, sizeof(rusage_info_current), &uaddr)) {
+ kcdata_memcpy(crash_info_ptr, uaddr, &rup->ri, sizeof(rusage_info_current));
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_CSFLAGS, sizeof(p->p_csflags), &uaddr)) {
+ kcdata_memcpy(crash_info_ptr, uaddr, &p->p_csflags, sizeof(p->p_csflags));
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_NAME, sizeof(p->p_comm), &uaddr)) {
+ kcdata_memcpy(crash_info_ptr, uaddr, &p->p_comm, sizeof(p->p_comm));
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_STARTTIME, sizeof(p->p_start), &uaddr)) {
+ struct timeval64 t64;
+ t64.tv_sec = (int64_t)p->p_start.tv_sec;
+ t64.tv_usec = (int64_t)p->p_start.tv_usec;
+ kcdata_memcpy(crash_info_ptr, uaddr, &t64, sizeof(t64));
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_USERSTACK, sizeof(p->user_stack), &uaddr)) {
+ kcdata_memcpy(crash_info_ptr, uaddr, &p->user_stack, sizeof(p->user_stack));
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_ARGSLEN, sizeof(p->p_argslen), &uaddr)) {
+ kcdata_memcpy(crash_info_ptr, uaddr, &p->p_argslen, sizeof(p->p_argslen));
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_ARGC, sizeof(p->p_argc), &uaddr)) {
+ kcdata_memcpy(crash_info_ptr, uaddr, &p->p_argc, sizeof(p->p_argc));
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_PATH, MAXPATHLEN, &uaddr)) {
+ char *buf = zalloc_flags(ZV_NAMEI, Z_WAITOK | Z_ZERO);
+ proc_pidpathinfo_internal(p, 0, buf, MAXPATHLEN, &retval);
+ kcdata_memcpy(crash_info_ptr, uaddr, buf, MAXPATHLEN);
+ zfree(ZV_NAMEI, buf);
+ }
+
+ pflags = p->p_flag & (P_LP64 | P_SUGID | P_TRANSLATED);
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_FLAGS, sizeof(pflags), &uaddr)) {
+ kcdata_memcpy(crash_info_ptr, uaddr, &pflags, sizeof(pflags));
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_UID, sizeof(p->p_uid), &uaddr)) {
+ kcdata_memcpy(crash_info_ptr, uaddr, &p->p_uid, sizeof(p->p_uid));
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_GID, sizeof(p->p_gid), &uaddr)) {
+ kcdata_memcpy(crash_info_ptr, uaddr, &p->p_gid, sizeof(p->p_gid));
+ }
+
+ cputype = cpu_type() & ~CPU_ARCH_MASK;
+ if (IS_64BIT_PROCESS(p)) {
+ cputype |= CPU_ARCH_ABI64;
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CPUTYPE, sizeof(cpu_type_t), &uaddr)) {
+ kcdata_memcpy(crash_info_ptr, uaddr, &cputype, sizeof(cpu_type_t));
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_MEMORY_LIMIT, sizeof(max_footprint_mb), &uaddr)) {
+ max_footprint = get_task_phys_footprint_limit(p->task);
+ max_footprint_mb = max_footprint >> 20;
+ kcdata_memcpy(crash_info_ptr, uaddr, &max_footprint_mb, sizeof(max_footprint_mb));
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PHYS_FOOTPRINT_LIFETIME_MAX, sizeof(ledger_phys_footprint_lifetime_max), &uaddr)) {
+ ledger_phys_footprint_lifetime_max = get_task_phys_footprint_lifetime_max(p->task);
+ kcdata_memcpy(crash_info_ptr, uaddr, &ledger_phys_footprint_lifetime_max, sizeof(ledger_phys_footprint_lifetime_max));
+ }
+
+ // In the forking case, the current ledger info is copied into the corpse while the original task is suspended for consistency
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_INTERNAL, sizeof(ledger_internal), &uaddr)) {
+ ledger_internal = get_task_internal(corpse_task);
+ kcdata_memcpy(crash_info_ptr, uaddr, &ledger_internal, sizeof(ledger_internal));
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_INTERNAL_COMPRESSED, sizeof(ledger_internal_compressed), &uaddr)) {
+ ledger_internal_compressed = get_task_internal_compressed(corpse_task);
+ kcdata_memcpy(crash_info_ptr, uaddr, &ledger_internal_compressed, sizeof(ledger_internal_compressed));
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_IOKIT_MAPPED, sizeof(ledger_iokit_mapped), &uaddr)) {
+ ledger_iokit_mapped = get_task_iokit_mapped(corpse_task);
+ kcdata_memcpy(crash_info_ptr, uaddr, &ledger_iokit_mapped, sizeof(ledger_iokit_mapped));
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_ALTERNATE_ACCOUNTING, sizeof(ledger_alternate_accounting), &uaddr)) {
+ ledger_alternate_accounting = get_task_alternate_accounting(corpse_task);
+ kcdata_memcpy(crash_info_ptr, uaddr, &ledger_alternate_accounting, sizeof(ledger_alternate_accounting));
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_ALTERNATE_ACCOUNTING_COMPRESSED, sizeof(ledger_alternate_accounting_compressed), &uaddr)) {
+ ledger_alternate_accounting_compressed = get_task_alternate_accounting_compressed(corpse_task);
+ kcdata_memcpy(crash_info_ptr, uaddr, &ledger_alternate_accounting_compressed, sizeof(ledger_alternate_accounting_compressed));
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PURGEABLE_NONVOLATILE, sizeof(ledger_purgeable_nonvolatile), &uaddr)) {
+ ledger_purgeable_nonvolatile = get_task_purgeable_nonvolatile(corpse_task);
+ kcdata_memcpy(crash_info_ptr, uaddr, &ledger_purgeable_nonvolatile, sizeof(ledger_purgeable_nonvolatile));
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PURGEABLE_NONVOLATILE_COMPRESSED, sizeof(ledger_purgeable_nonvolatile_compressed), &uaddr)) {
+ ledger_purgeable_nonvolatile_compressed = get_task_purgeable_nonvolatile_compressed(corpse_task);
+ kcdata_memcpy(crash_info_ptr, uaddr, &ledger_purgeable_nonvolatile_compressed, sizeof(ledger_purgeable_nonvolatile_compressed));
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PAGE_TABLE, sizeof(ledger_page_table), &uaddr)) {
+ ledger_page_table = get_task_page_table(corpse_task);
+ kcdata_memcpy(crash_info_ptr, uaddr, &ledger_page_table, sizeof(ledger_page_table));
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PHYS_FOOTPRINT, sizeof(ledger_phys_footprint), &uaddr)) {
+ ledger_phys_footprint = get_task_phys_footprint(corpse_task);
+ kcdata_memcpy(crash_info_ptr, uaddr, &ledger_phys_footprint, sizeof(ledger_phys_footprint));
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NETWORK_NONVOLATILE, sizeof(ledger_network_nonvolatile), &uaddr)) {
+ ledger_network_nonvolatile = get_task_network_nonvolatile(corpse_task);
+ kcdata_memcpy(crash_info_ptr, uaddr, &ledger_network_nonvolatile, sizeof(ledger_network_nonvolatile));
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NETWORK_NONVOLATILE_COMPRESSED, sizeof(ledger_network_nonvolatile_compressed), &uaddr)) {
+ ledger_network_nonvolatile_compressed = get_task_network_nonvolatile_compressed(corpse_task);
+ kcdata_memcpy(crash_info_ptr, uaddr, &ledger_network_nonvolatile_compressed, sizeof(ledger_network_nonvolatile_compressed));
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_WIRED_MEM, sizeof(ledger_wired_mem), &uaddr)) {
+ ledger_wired_mem = get_task_wired_mem(corpse_task);
+ kcdata_memcpy(crash_info_ptr, uaddr, &ledger_wired_mem, sizeof(ledger_wired_mem));
+ }
+
+ bzero(&pwqinfo, sizeof(struct proc_workqueueinfo));
+ retval = fill_procworkqueue(p, &pwqinfo);
+ if (retval == 0) {
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_WORKQUEUEINFO, sizeof(struct proc_workqueueinfo), &uaddr)) {
+ kcdata_memcpy(crash_info_ptr, uaddr, &pwqinfo, sizeof(struct proc_workqueueinfo));
+ }
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_RESPONSIBLE_PID, sizeof(p->p_responsible_pid), &uaddr)) {
+ kcdata_memcpy(crash_info_ptr, uaddr, &p->p_responsible_pid, sizeof(p->p_responsible_pid));
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_PERSONA_ID, sizeof(uid_t), &uaddr)) {
+ uid_t persona_id = proc_persona_id(p);
+ kcdata_memcpy(crash_info_ptr, uaddr, &persona_id, sizeof(persona_id));
+ }
+
+#if CONFIG_COALITIONS
+ if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(crash_info_ptr, TASK_CRASHINFO_COALITION_ID, sizeof(uint64_t), COALITION_NUM_TYPES, &uaddr)) {
+ uint64_t coalition_ids[COALITION_NUM_TYPES];
+ task_coalition_ids(p->task, coalition_ids);
+ kcdata_memcpy(crash_info_ptr, uaddr, coalition_ids, sizeof(coalition_ids));
+ }
+#endif /* CONFIG_COALITIONS */
+
+#if CONFIG_MEMORYSTATUS
+ memstat_dirty_flags = memorystatus_dirty_get(p, FALSE);
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_DIRTY_FLAGS, sizeof(memstat_dirty_flags), &uaddr)) {
+ kcdata_memcpy(crash_info_ptr, uaddr, &memstat_dirty_flags, sizeof(memstat_dirty_flags));
+ }
+#endif
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_MEMORY_LIMIT_INCREASE, sizeof(p->p_memlimit_increase), &uaddr)) {
+ kcdata_memcpy(crash_info_ptr, uaddr, &p->p_memlimit_increase, sizeof(p->p_memlimit_increase));
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_TAGGED_FOOTPRINT, sizeof(ledger_tagged_footprint), &uaddr)) {
+ ledger_tagged_footprint = get_task_tagged_footprint(corpse_task);
+ kcdata_memcpy(crash_info_ptr, uaddr, &ledger_tagged_footprint, sizeof(ledger_tagged_footprint));
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_TAGGED_FOOTPRINT_COMPRESSED, sizeof(ledger_tagged_footprint_compressed), &uaddr)) {
+ ledger_tagged_footprint_compressed = get_task_tagged_footprint_compressed(corpse_task);
+ kcdata_memcpy(crash_info_ptr, uaddr, &ledger_tagged_footprint_compressed, sizeof(ledger_tagged_footprint_compressed));
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_MEDIA_FOOTPRINT, sizeof(ledger_media_footprint), &uaddr)) {
+ ledger_media_footprint = get_task_media_footprint(corpse_task);
+ kcdata_memcpy(crash_info_ptr, uaddr, &ledger_media_footprint, sizeof(ledger_media_footprint));
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_MEDIA_FOOTPRINT_COMPRESSED, sizeof(ledger_media_footprint_compressed), &uaddr)) {
+ ledger_media_footprint_compressed = get_task_media_footprint_compressed(corpse_task);
+ kcdata_memcpy(crash_info_ptr, uaddr, &ledger_media_footprint_compressed, sizeof(ledger_media_footprint_compressed));
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_GRAPHICS_FOOTPRINT, sizeof(ledger_graphics_footprint), &uaddr)) {
+ ledger_graphics_footprint = get_task_graphics_footprint(corpse_task);
+ kcdata_memcpy(crash_info_ptr, uaddr, &ledger_graphics_footprint, sizeof(ledger_graphics_footprint));
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_GRAPHICS_FOOTPRINT_COMPRESSED, sizeof(ledger_graphics_footprint_compressed), &uaddr)) {
+ ledger_graphics_footprint_compressed = get_task_graphics_footprint_compressed(corpse_task);
+ kcdata_memcpy(crash_info_ptr, uaddr, &ledger_graphics_footprint_compressed, sizeof(ledger_graphics_footprint_compressed));
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NEURAL_FOOTPRINT, sizeof(ledger_neural_footprint), &uaddr)) {
+ ledger_neural_footprint = get_task_neural_footprint(corpse_task);
+ kcdata_memcpy(crash_info_ptr, uaddr, &ledger_neural_footprint, sizeof(ledger_neural_footprint));
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NEURAL_FOOTPRINT_COMPRESSED, sizeof(ledger_neural_footprint_compressed), &uaddr)) {
+ ledger_neural_footprint_compressed = get_task_neural_footprint_compressed(corpse_task);
+ kcdata_memcpy(crash_info_ptr, uaddr, &ledger_neural_footprint_compressed, sizeof(ledger_neural_footprint_compressed));
+ }
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_MEMORYSTATUS_EFFECTIVE_PRIORITY, sizeof(p->p_memstat_effectivepriority), &uaddr)) {
+ kcdata_memcpy(crash_info_ptr, uaddr, &p->p_memstat_effectivepriority, sizeof(p->p_memstat_effectivepriority));
+ }
+
+ if (p->p_exit_reason != OS_REASON_NULL && reason == OS_REASON_NULL) {
+ reason = p->p_exit_reason;
+ }
+ if (reason != OS_REASON_NULL) {
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, EXIT_REASON_SNAPSHOT, sizeof(struct exit_reason_snapshot), &uaddr)) {
+ struct exit_reason_snapshot ers = {
+ .ers_namespace = reason->osr_namespace,
+ .ers_code = reason->osr_code,
+ .ers_flags = reason->osr_flags
+ };
+
+ kcdata_memcpy(crash_info_ptr, uaddr, &ers, sizeof(ers));
+ }
+
+ if (reason->osr_kcd_buf != 0) {
+ uint32_t reason_buf_size = (uint32_t)kcdata_memory_get_used_bytes(&reason->osr_kcd_descriptor);
+ assert(reason_buf_size != 0);
+
+ if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, KCDATA_TYPE_NESTED_KCDATA, reason_buf_size, &uaddr)) {
+ kcdata_memcpy(crash_info_ptr, uaddr, reason->osr_kcd_buf, reason_buf_size);
+ }
+ }
+ }
+
+ if (num_udata > 0) {
+ if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(crash_info_ptr, TASK_CRASHINFO_UDATA_PTRS,
+ sizeof(uint64_t), num_udata, &uaddr)) {
+ kcdata_memcpy(crash_info_ptr, uaddr, udata_buffer, sizeof(uint64_t) * num_udata);
+ }