2 * Copyright (c) 2017 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <kdp/kdp_core.h>
30 #include <kdp/processor_core.h>
31 #include <kern/assert.h>
32 #include <kern/zalloc.h>
33 #include <libkern/kernel_mach_header.h>
34 #include <libkern/OSAtomic.h>
35 #include <libsa/types.h>
36 #include <pexpert/pexpert.h>
38 #ifdef CONFIG_KDP_INTERACTIVE_DEBUGGING
40 #define roundup(x, y) ((((x) % (y)) == 0) ? \
41 (x) : ((x) + ((y) - ((x) % (y)))))
44 * The processor_core_context structure describes the current
45 * corefile that's being generated. It also includes a pointer
46 * to the core_outvars which is used by the KDP code for context
47 * about the specific output mechanism being used.
49 * We include *remaining variables to catch inconsistencies / bugs
50 * in the co-processor coredump callbacks.
53 struct kdp_core_out_vars
* core_outvars
; /* Output procedure info (see kdp_core.c) */
54 kern_coredump_callback_config
*core_config
; /* Information about core currently being dumped */
55 void *core_refcon
; /* Reference constant associated with the coredump helper */
56 boolean_t core_is64bit
; /* Bitness of CPU */
57 uint32_t core_mh_magic
; /* Magic for mach header */
58 cpu_type_t core_cpu_type
; /* CPU type for mach header */
59 cpu_subtype_t core_cpu_subtype
; /* CPU subtype for mach header */
60 uint64_t core_file_length
; /* Overall corefile length including any zero padding */
61 uint64_t core_file_length_compressed
; /* File length after compression */
62 uint64_t core_segment_count
; /* Number of LC_SEGMENT*s in the core currently being dumped */
63 uint64_t core_segments_remaining
; /* Number of LC_SEGMENT*s that have not been added to the header */
64 uint64_t core_segment_byte_total
; /* Sum of all the data from the LC_SEGMENTS in the core */
65 uint64_t core_segment_bytes_remaining
; /* Quantity of data remaining from LC_SEGMENTs that have yet to be added */
66 uint64_t core_thread_count
; /* Number of LC_THREADs to be included */
67 uint64_t core_threads_remaining
; /* Number of LC_THREADs that have yet to be included */
68 uint64_t core_thread_state_size
; /* Size of each LC_THREAD */
69 uint64_t core_misc_bytes_count
; /* Quantity of LC_NOTE data to be included */
70 uint64_t core_misc_bytes_remaining
; /* Quantity of LC_NOTE data that has not yet been included */
71 uint64_t core_cur_hoffset
; /* Current offset in this core's header */
72 uint64_t core_cur_foffset
; /* Current offset in this core's overall file */
73 uint64_t core_header_size
; /* Size of this core's header */
74 uint64_t core_total_bytes
; /* Total amount of data to be included in this core (excluding zero fill) */
75 } processor_core_context
;
78 * The kern_coredump_core structure describes a core that has been
79 * registered for use by the coredump mechanism.
81 struct kern_coredump_core
{
82 struct kern_coredump_core
*kcc_next
; /* Next processor to dump */
83 void *kcc_refcon
; /* Reference constant to be passed to callbacks */
84 char kcc_corename
[MACH_CORE_FILEHEADER_NAMELEN
]; /* Description of this processor */
85 boolean_t kcc_is64bit
; /* Processor bitness */
86 uint32_t kcc_mh_magic
; /* Magic for mach header */
87 cpu_type_t kcc_cpu_type
; /* CPU type for mach header */
88 cpu_subtype_t kcc_cpu_subtype
; /* CPU subtype for mach header */
89 kern_coredump_callback_config kcc_cb
; /* Registered processor callbacks for coredump */
90 } * kern_coredump_core_list
= NULL
;
92 uint32_t coredump_registered_count
= 0;
94 struct kern_coredump_core
*kernel_helper
= NULL
;
96 static struct kern_coredump_core
*
97 kern_register_coredump_helper_internal(int kern_coredump_config_vers
, const kern_coredump_callback_config
*kc_callbacks
,
98 void *refcon
, const char *core_description
, boolean_t xnu_callback
, boolean_t is64bit
,
99 uint32_t mh_magic
, cpu_type_t cpu_type
, cpu_subtype_t cpu_subtype
)
101 struct kern_coredump_core
*core_helper
= NULL
;
102 kern_coredump_callback_config
*core_callbacks
= NULL
;
104 if (kern_coredump_config_vers
< KERN_COREDUMP_MIN_CONFIG_VERSION
) {
107 if (kc_callbacks
== NULL
) {
111 if (core_description
== NULL
) {
115 if (kc_callbacks
->kcc_coredump_get_summary
== NULL
||
116 kc_callbacks
->kcc_coredump_save_segment_descriptions
== NULL
||
117 kc_callbacks
->kcc_coredump_save_segment_data
== NULL
||
118 kc_callbacks
->kcc_coredump_save_thread_state
== NULL
||
119 kc_callbacks
->kcc_coredump_save_sw_vers
== NULL
) {
123 #if !defined(__LP64__)
124 /* We don't support generating 64-bit cores on 32-bit platforms */
130 core_helper
= zalloc_permanent_type(struct kern_coredump_core
);
131 core_helper
->kcc_next
= NULL
;
132 core_helper
->kcc_refcon
= refcon
;
134 snprintf((char *)&core_helper
->kcc_corename
, MACH_CORE_FILEHEADER_NAMELEN
, "%s", core_description
);
136 /* Make sure there's room for the -coproc suffix (16 - NULL char - strlen(-coproc)) */
137 snprintf((char *)&core_helper
->kcc_corename
, MACH_CORE_FILEHEADER_NAMELEN
, "%.8s-coproc", core_description
);
139 core_helper
->kcc_is64bit
= is64bit
;
140 core_helper
->kcc_mh_magic
= mh_magic
;
141 core_helper
->kcc_cpu_type
= cpu_type
;
142 core_helper
->kcc_cpu_subtype
= cpu_subtype
;
143 core_callbacks
= &core_helper
->kcc_cb
;
145 core_callbacks
->kcc_coredump_init
= kc_callbacks
->kcc_coredump_init
;
146 core_callbacks
->kcc_coredump_get_summary
= kc_callbacks
->kcc_coredump_get_summary
;
147 core_callbacks
->kcc_coredump_save_segment_descriptions
= kc_callbacks
->kcc_coredump_save_segment_descriptions
;
148 core_callbacks
->kcc_coredump_save_segment_data
= kc_callbacks
->kcc_coredump_save_segment_data
;
149 core_callbacks
->kcc_coredump_save_thread_state
= kc_callbacks
->kcc_coredump_save_thread_state
;
150 core_callbacks
->kcc_coredump_save_misc_data
= kc_callbacks
->kcc_coredump_save_misc_data
;
151 core_callbacks
->kcc_coredump_save_sw_vers
= kc_callbacks
->kcc_coredump_save_sw_vers
;
154 assert(kernel_helper
== NULL
);
155 kernel_helper
= core_helper
;
158 core_helper
->kcc_next
= kern_coredump_core_list
;
159 } while (!OSCompareAndSwapPtr(kern_coredump_core_list
, core_helper
, &kern_coredump_core_list
));
162 OSAddAtomic(1, &coredump_registered_count
);
163 kprintf("Registered coredump handler for %s\n", core_description
);
169 kern_register_coredump_helper(int kern_coredump_config_vers
, const kern_coredump_callback_config
*kc_callbacks
,
170 void *refcon
, const char *core_description
, boolean_t is64bit
, uint32_t mh_magic
,
171 cpu_type_t cpu_type
, cpu_subtype_t cpu_subtype
)
173 if (coredump_registered_count
>= KERN_COREDUMP_MAX_CORES
) {
174 return KERN_RESOURCE_SHORTAGE
;
177 if (kern_register_coredump_helper_internal(kern_coredump_config_vers
, kc_callbacks
, refcon
, core_description
, FALSE
,
178 is64bit
, mh_magic
, cpu_type
, cpu_subtype
) == NULL
) {
179 return KERN_INVALID_ARGUMENT
;
186 kern_register_xnu_coredump_helper(kern_coredump_callback_config
*kc_callbacks
)
188 #if defined(__LP64__)
189 boolean_t is64bit
= TRUE
;
191 boolean_t is64bit
= FALSE
;
194 if (kern_register_coredump_helper_internal(KERN_COREDUMP_CONFIG_VERSION
, kc_callbacks
, NULL
, "kernel", TRUE
, is64bit
,
195 _mh_execute_header
.magic
, _mh_execute_header
.cputype
, _mh_execute_header
.cpusubtype
) == NULL
) {
203 * Save metadata about the core we're about to write, write out the mach header
206 coredump_save_summary(uint64_t core_segment_count
, uint64_t core_byte_count
,
207 uint64_t thread_count
, uint64_t thread_state_size
,
208 uint64_t misc_bytes_count
, void *context
)
210 processor_core_context
*core_context
= (processor_core_context
*)context
;
211 uint32_t sizeofcmds
= 0, numcmds
= 0;
214 if (!core_segment_count
|| !core_byte_count
|| !thread_count
|| !thread_state_size
215 || (thread_state_size
> KERN_COREDUMP_THREADSIZE_MAX
)) {
216 return KERN_INVALID_ARGUMENT
;
219 /* Initialize core_context */
220 core_context
->core_segments_remaining
= core_context
->core_segment_count
= core_segment_count
;
221 core_context
->core_segment_bytes_remaining
= core_context
->core_segment_byte_total
= core_byte_count
;
222 core_context
->core_threads_remaining
= core_context
->core_thread_count
= thread_count
;
223 core_context
->core_thread_state_size
= thread_state_size
;
224 core_context
->core_misc_bytes_remaining
= core_context
->core_misc_bytes_count
= misc_bytes_count
;
227 #if defined(__LP64__)
228 if (core_context
->core_is64bit
) {
229 sizeofcmds
= (uint32_t)(core_context
->core_segment_count
* sizeof(struct segment_command_64
) +
230 (core_context
->core_threads_remaining
* core_context
->core_thread_state_size
) +
231 /* TODO: LC_NOTE */ 0 + sizeof(struct ident_command
) + KERN_COREDUMP_VERSIONSTRINGMAXSIZE
);
232 core_context
->core_header_size
= sizeofcmds
+ sizeof(struct mach_header_64
);
234 #endif /* defined(__LP64__) */
236 sizeofcmds
= (uint32_t)(core_context
->core_segment_count
* sizeof(struct segment_command
) +
237 (core_context
->core_threads_remaining
* core_context
->core_thread_state_size
) +
238 /* TODO: LC_NOTE */ 0 + sizeof(struct ident_command
) + KERN_COREDUMP_VERSIONSTRINGMAXSIZE
);
239 core_context
->core_header_size
= sizeofcmds
+ sizeof(struct mach_header
);
242 core_context
->core_total_bytes
= core_context
->core_header_size
+ core_context
->core_segment_byte_total
+ /* TODO: LC_NOTE */ 0;
243 core_context
->core_file_length
= round_page(core_context
->core_header_size
) + core_context
->core_segment_byte_total
+ /* TODO: LC_NOTE */ 0;
244 core_context
->core_cur_foffset
= round_page(core_context
->core_header_size
);
246 numcmds
= (uint32_t)(core_context
->core_segment_count
+ core_context
->core_thread_count
+ /* TODO: LC_NOTE */ 0 +
247 1 /* ident command */);
250 * Reset the zstream and other output context before writing any data out. We do this here
251 * to update the total file length on the outvars before we start writing out.
253 kdp_reset_output_vars(core_context
->core_outvars
, core_context
->core_file_length
);
255 /* Construct core file header */
256 #if defined(__LP64__)
257 if (core_context
->core_is64bit
) {
258 struct mach_header_64 core_header
= { };
260 core_header
.magic
= core_context
->core_mh_magic
;
261 core_header
.cputype
= core_context
->core_cpu_type
;
262 core_header
.cpusubtype
= core_context
->core_cpu_subtype
;
263 core_header
.filetype
= MH_CORE
;
264 core_header
.ncmds
= numcmds
;
265 core_header
.sizeofcmds
= sizeofcmds
;
266 core_header
.flags
= 0;
268 /* Send the core_header to the output procedure */
269 ret
= kdp_core_output(core_context
->core_outvars
, sizeof(core_header
), (caddr_t
)&core_header
);
270 if (ret
!= KERN_SUCCESS
) {
271 kern_coredump_log(context
, "coredump_save_summary() : failed to write mach header : kdp_core_output(%p, %lu, %p) returned error 0x%x\n",
272 core_context
->core_outvars
, sizeof(core_header
), &core_header
, ret
);
276 core_context
->core_cur_hoffset
+= sizeof(core_header
);
278 #endif /* defined(__LP64__) */
280 struct mach_header core_header
= { };
282 core_header
.magic
= core_context
->core_mh_magic
;
283 core_header
.cputype
= core_context
->core_cpu_type
;
284 core_header
.cpusubtype
= core_context
->core_cpu_subtype
;
285 core_header
.filetype
= MH_CORE
;
286 core_header
.ncmds
= numcmds
;
287 core_header
.sizeofcmds
= sizeofcmds
;
288 core_header
.flags
= 0;
290 /* Send the core_header to the output procedure */
291 ret
= kdp_core_output(core_context
->core_outvars
, sizeof(core_header
), (caddr_t
)&core_header
);
292 if (ret
!= KERN_SUCCESS
) {
293 kern_coredump_log(context
, "coredump_save_summary() : failed to write mach header : kdp_core_output(%p, %lu, %p) returned error 0x%x\n",
294 core_context
->core_outvars
, sizeof(core_header
), &core_header
, ret
);
298 core_context
->core_cur_hoffset
+= sizeof(core_header
);
305 * Construct a segment command for the specified segment.
308 coredump_save_segment_descriptions(uint64_t seg_start
, uint64_t seg_end
,
311 processor_core_context
*core_context
= (processor_core_context
*)context
;
313 uint64_t size
= seg_end
- seg_start
;
315 if (seg_end
<= seg_start
) {
316 kern_coredump_log(context
, "coredump_save_segment_descriptions(0x%llx, 0x%llx, %p) : called with invalid addresses : start 0x%llx >= end 0x%llx\n",
317 seg_start
, seg_end
, context
, seg_start
, seg_end
);
318 return KERN_INVALID_ARGUMENT
;
321 if (core_context
->core_segments_remaining
== 0) {
322 kern_coredump_log(context
, "coredump_save_segment_descriptions(0x%llx, 0x%llx, %p) : coredump_save_segment_descriptions() called too many times, %llu segment descriptions already recorded\n",
323 seg_start
, seg_end
, context
, core_context
->core_segment_count
);
324 return KERN_INVALID_ARGUMENT
;
327 /* Construct segment command */
328 #if defined(__LP64__)
329 if (core_context
->core_is64bit
) {
330 struct segment_command_64 seg_command
= { };
332 if (core_context
->core_cur_hoffset
+ sizeof(seg_command
) > core_context
->core_header_size
) {
333 kern_coredump_log(context
, "coredump_save_segment_descriptions(0x%llx, 0x%llx, %p) : ran out of space to save commands with %llu of %llu remaining\n",
334 seg_start
, seg_end
, context
, core_context
->core_segments_remaining
, core_context
->core_segment_count
);
335 return KERN_NO_SPACE
;
338 seg_command
.cmd
= LC_SEGMENT_64
;
339 seg_command
.cmdsize
= sizeof(seg_command
);
340 seg_command
.segname
[0] = 0;
341 seg_command
.vmaddr
= seg_start
;
342 seg_command
.vmsize
= size
;
343 seg_command
.fileoff
= core_context
->core_cur_foffset
;
344 seg_command
.filesize
= size
;
345 seg_command
.maxprot
= VM_PROT_READ
;
346 seg_command
.initprot
= VM_PROT_READ
;
348 /* Flush new command to output */
349 ret
= kdp_core_output(core_context
->core_outvars
, sizeof(seg_command
), (caddr_t
)&seg_command
);
350 if (ret
!= KERN_SUCCESS
) {
351 kern_coredump_log(context
, "coredump_save_segment_descriptions(0x%llx, 0x%llx, %p) : failed to write segment %llu of %llu. kdp_core_output(%p, %lu, %p) returned error %d\n",
352 seg_start
, seg_end
, context
, core_context
->core_segment_count
- core_context
->core_segments_remaining
,
353 core_context
->core_segment_count
, core_context
->core_outvars
, sizeof(seg_command
), &seg_command
, ret
);
357 core_context
->core_cur_hoffset
+= sizeof(seg_command
);
359 #endif /* defined(__LP64__) */
361 struct segment_command seg_command
= { };
363 if (seg_start
> UINT32_MAX
|| seg_end
> UINT32_MAX
) {
364 kern_coredump_log(context
, "coredump_save_segment_descriptions(0x%llx, 0x%llx, %p) : called with invalid addresses for 32-bit : start 0x%llx, end 0x%llx\n",
365 seg_start
, seg_end
, context
, seg_start
, seg_end
);
366 return KERN_INVALID_ARGUMENT
;
369 if (core_context
->core_cur_hoffset
+ sizeof(seg_command
) > core_context
->core_header_size
) {
370 kern_coredump_log(context
, "coredump_save_segment_descriptions(0x%llx, 0x%llx, %p) : ran out of space to save commands with %llu of %llu remaining\n",
371 seg_start
, seg_end
, context
, core_context
->core_segments_remaining
, core_context
->core_segment_count
);
372 return KERN_NO_SPACE
;
375 seg_command
.cmd
= LC_SEGMENT
;
376 seg_command
.cmdsize
= sizeof(seg_command
);
377 seg_command
.segname
[0] = 0;
378 seg_command
.vmaddr
= (uint32_t) seg_start
;
379 seg_command
.vmsize
= (uint32_t) size
;
380 seg_command
.fileoff
= (uint32_t) core_context
->core_cur_foffset
;
381 seg_command
.filesize
= (uint32_t) size
;
382 seg_command
.maxprot
= VM_PROT_READ
;
383 seg_command
.initprot
= VM_PROT_READ
;
385 /* Flush new command to output */
386 ret
= kdp_core_output(core_context
->core_outvars
, sizeof(seg_command
), (caddr_t
)&seg_command
);
387 if (ret
!= KERN_SUCCESS
) {
388 kern_coredump_log(context
, "coredump_save_segment_descriptions(0x%llx, 0x%llx, %p) : failed to write segment %llu of %llu : kdp_core_output(%p, %lu, %p) returned error 0x%x\n",
389 seg_start
, seg_end
, context
, core_context
->core_segment_count
- core_context
->core_segments_remaining
,
390 core_context
->core_segment_count
, core_context
->core_outvars
, sizeof(seg_command
), &seg_command
, ret
);
394 core_context
->core_cur_hoffset
+= sizeof(seg_command
);
397 /* Update coredump context */
398 core_context
->core_segments_remaining
--;
399 core_context
->core_cur_foffset
+= size
;
407 * Passed thread_state is expected to be a struct thread_command
410 coredump_save_thread_state(void *thread_state
, void *context
)
412 processor_core_context
*core_context
= (processor_core_context
*)context
;
413 struct thread_command
*tc
= (struct thread_command
*)thread_state
;
416 if (tc
->cmd
!= LC_THREAD
) {
417 kern_coredump_log(context
, "coredump_save_thread_state(%p, %p) : found %d expected LC_THREAD (%d)\n",
418 thread_state
, context
, tc
->cmd
, LC_THREAD
);
419 return KERN_INVALID_ARGUMENT
;
422 if (core_context
->core_cur_hoffset
+ core_context
->core_thread_state_size
> core_context
->core_header_size
) {
423 kern_coredump_log(context
, "coredump_save_thread_state(%p, %p) : ran out of space to save threads with %llu of %llu remaining\n",
424 thread_state
, context
, core_context
->core_threads_remaining
, core_context
->core_thread_count
);
425 return KERN_NO_SPACE
;
428 ret
= kdp_core_output(core_context
->core_outvars
, core_context
->core_thread_state_size
, (caddr_t
)thread_state
);
429 if (ret
!= KERN_SUCCESS
) {
430 kern_coredump_log(context
, "coredump_save_thread_state(%p, %p) : failed to write thread data : kdp_core_output(%p, %llu, %p) returned 0x%x\n",
431 thread_state
, context
, core_context
->core_outvars
, core_context
->core_thread_state_size
, thread_state
, ret
);
435 core_context
->core_threads_remaining
--;
436 core_context
->core_cur_hoffset
+= core_context
->core_thread_state_size
;
442 coredump_save_sw_vers(void *sw_vers
, uint64_t length
, void *context
)
444 processor_core_context
*core_context
= (processor_core_context
*)context
;
445 struct ident_command ident
= { };
448 if (length
> KERN_COREDUMP_VERSIONSTRINGMAXSIZE
|| !length
) {
449 kern_coredump_log(context
, "coredump_save_sw_vers(%p, %llu, %p) : called with invalid length %llu\n",
450 sw_vers
, length
, context
, length
);
451 return KERN_INVALID_ARGUMENT
;
454 if (core_context
->core_cur_hoffset
+ sizeof(struct ident_command
) + length
> core_context
->core_header_size
) {
455 kern_coredump_log(context
, "coredump_save_sw_vers(%p, %llu, %p) : ran out of space to save data\n",
456 sw_vers
, length
, context
);
457 return KERN_NO_SPACE
;
460 ident
.cmd
= LC_IDENT
;
461 ident
.cmdsize
= (uint32_t)(sizeof(struct ident_command
) + KERN_COREDUMP_VERSIONSTRINGMAXSIZE
);
462 ret
= kdp_core_output(core_context
->core_outvars
, sizeof(struct ident_command
), (caddr_t
)&ident
);
463 if (ret
!= KERN_SUCCESS
) {
464 kern_coredump_log(context
, "coredump_save_sw_vers(%p, %llu, %p) : failed to write ident command : kdp_core_output(%p, %lu, %p) returned 0x%x\n",
465 sw_vers
, length
, context
, core_context
->core_outvars
, sizeof(struct ident_command
), &ident
, ret
);
469 ret
= kdp_core_output(core_context
->core_outvars
, length
, (caddr_t
)sw_vers
);
470 if (ret
!= KERN_SUCCESS
) {
471 kern_coredump_log(context
, "coredump_save_sw_vers(%p, %llu, %p) : failed to write version string : kdp_core_output(%p, %llu, %p) returned 0x%x\n",
472 sw_vers
, length
, context
, core_context
->core_outvars
, length
, sw_vers
, ret
);
476 if (length
< KERN_COREDUMP_VERSIONSTRINGMAXSIZE
) {
477 /* Zero fill to the full command size */
478 ret
= kdp_core_output(core_context
->core_outvars
, (KERN_COREDUMP_VERSIONSTRINGMAXSIZE
- length
), NULL
);
479 if (ret
!= KERN_SUCCESS
) {
480 kern_coredump_log(context
, "coredump_save_sw_vers(%p, %llu, %p) : failed to write zero fill padding : kdp_core_output(%p, %llu, NULL) returned 0x%x\n",
481 sw_vers
, length
, context
, core_context
->core_outvars
, (KERN_COREDUMP_VERSIONSTRINGMAXSIZE
- length
), ret
);
486 core_context
->core_cur_hoffset
+= sizeof(struct ident_command
) + KERN_COREDUMP_VERSIONSTRINGMAXSIZE
;
492 coredump_save_segment_data(void *seg_data
, uint64_t length
, void *context
)
495 processor_core_context
*core_context
= (processor_core_context
*)context
;
497 if (length
> core_context
->core_segment_bytes_remaining
) {
498 kern_coredump_log(context
, "coredump_save_segment_data(%p, %llu, %p) : called with too much data, %llu written, %llu left\n",
499 seg_data
, length
, context
, core_context
->core_segment_byte_total
- core_context
->core_segment_bytes_remaining
,
500 core_context
->core_segment_bytes_remaining
);
501 return KERN_INVALID_ARGUMENT
;
504 ret
= kdp_core_output(core_context
->core_outvars
, length
, (caddr_t
)seg_data
);
505 if (ret
!= KERN_SUCCESS
) {
506 kern_coredump_log(context
, "coredump_save_segment_data(%p, %llu, %p) : failed to write data (%llu bytes remaining) :%d\n",
507 seg_data
, length
, context
, core_context
->core_segment_bytes_remaining
, ret
);
511 core_context
->core_segment_bytes_remaining
-= length
;
512 core_context
->core_cur_foffset
+= length
;
518 kern_coredump_routine(void *core_outvars
, struct kern_coredump_core
*current_core
, uint64_t core_begin_offset
, uint64_t *core_file_length
, boolean_t
*header_update_failed
)
521 processor_core_context context
= { };
522 *core_file_length
= 0;
523 *header_update_failed
= FALSE
;
525 /* Setup the coredump context */
526 context
.core_outvars
= core_outvars
;
527 context
.core_config
= ¤t_core
->kcc_cb
;
528 context
.core_refcon
= current_core
->kcc_refcon
;
529 context
.core_is64bit
= current_core
->kcc_is64bit
;
530 context
.core_mh_magic
= current_core
->kcc_mh_magic
;
531 context
.core_cpu_type
= current_core
->kcc_cpu_type
;
532 context
.core_cpu_subtype
= current_core
->kcc_cpu_subtype
;
534 kern_coredump_log(&context
, "\nBeginning coredump of %s\n", current_core
->kcc_corename
);
536 if (current_core
->kcc_cb
.kcc_coredump_init
!= NULL
) {
537 ret
= current_core
->kcc_cb
.kcc_coredump_init(context
.core_refcon
, &context
);
538 if (ret
== KERN_NODE_DOWN
) {
539 kern_coredump_log(&context
, "coredump_init returned KERN_NODE_DOWN, skipping this core\n");
541 } else if (ret
!= KERN_SUCCESS
) {
542 kern_coredump_log(&context
, "(kern_coredump_routine) : coredump_init failed with %d\n", ret
);
547 /* Populate the context with metadata about the corefile (cmd info, sizes etc) */
548 ret
= current_core
->kcc_cb
.kcc_coredump_get_summary(context
.core_refcon
, coredump_save_summary
, &context
);
549 if (ret
!= KERN_SUCCESS
) {
550 kern_coredump_log(&context
, "(kern_coredump_routine) : get_summary failed with %d\n", ret
);
554 if (context
.core_header_size
== 0) {
555 kern_coredump_log(&context
, "(kern_coredump_routine) : header size not populated after coredump_get_summary\n");
559 /* Save the segment descriptions for the segments to be included */
560 ret
= current_core
->kcc_cb
.kcc_coredump_save_segment_descriptions(context
.core_refcon
, coredump_save_segment_descriptions
,
562 if (ret
!= KERN_SUCCESS
) {
563 kern_coredump_log(&context
, "(kern_coredump_routine) : save_segment_descriptions failed with %d\n", ret
);
567 if (context
.core_segments_remaining
!= 0) {
568 kern_coredump_log(&context
, "(kern_coredump_routine) : save_segment_descriptions returned without all segment descriptions written, %llu of %llu remaining\n",
569 context
.core_segments_remaining
, context
.core_segment_count
);
573 /* TODO: Add LC_NOTE command for miscellaneous data if requested */
576 * Save the thread commands/state
578 * TODO: Should this buffer be allocated at boot rather than on the stack?
580 if (context
.core_thread_state_size
) {
581 char threadstatebuf
[context
.core_thread_state_size
];
582 ret
= current_core
->kcc_cb
.kcc_coredump_save_thread_state(context
.core_refcon
, &threadstatebuf
, coredump_save_thread_state
,
584 if (ret
!= KERN_SUCCESS
) {
585 kern_coredump_log(&context
, "(kern_coredump_routine) : save_thread_state failed with %d\n", ret
);
590 if (context
.core_threads_remaining
!= 0) {
591 kern_coredump_log(&context
, "(kern_coredump_routine) : save_thread_state returned without all thread descriptions written, %llu of %llu remaining\n",
592 context
.core_threads_remaining
, context
.core_thread_count
);
596 /* Save the sw version string */
597 ret
= current_core
->kcc_cb
.kcc_coredump_save_sw_vers(context
.core_refcon
, coredump_save_sw_vers
, &context
);
598 if (ret
!= KERN_SUCCESS
) {
599 kern_coredump_log(&context
, "(kern_coredump_routine) : save_sw_vers failed with %d\n", ret
);
603 assert(context
.core_cur_hoffset
== context
.core_header_size
);
605 /* Zero fill between the end of the header and the beginning of the segment data file offset */
606 ret
= kdp_core_output(context
.core_outvars
, (round_page(context
.core_header_size
) - context
.core_header_size
), NULL
);
607 if (ret
!= KERN_SUCCESS
) {
608 kern_coredump_log(&context
, "(kern_coredump_routine) : failed to write zero fill padding (%llu bytes remaining) : kdp_core_output(%p, %llu, NULL) returned 0x%x\n",
609 context
.core_segment_bytes_remaining
, context
.core_outvars
, (round_page(context
.core_header_size
) - context
.core_header_size
), ret
);
613 context
.core_cur_foffset
= round_page(context
.core_header_size
);
614 ret
= current_core
->kcc_cb
.kcc_coredump_save_segment_data(context
.core_refcon
, coredump_save_segment_data
, &context
);
615 if (ret
!= KERN_SUCCESS
) {
616 kern_coredump_log(&context
, "coredump_save_segment_data failed with %d\n", ret
);
620 if (context
.core_segment_bytes_remaining
!= 0) {
621 kern_coredump_log(&context
, "(kern_coredump_routine) : save_segment_data returned without all segment data written, %llu of %llu remaining\n",
622 context
.core_segment_bytes_remaining
, context
.core_segment_byte_total
);
626 /* TODO: Save the miscellaneous data if requested */
628 /* Flush the last data out */
629 ret
= kdp_core_output(context
.core_outvars
, 0, NULL
);
630 if (ret
!= KERN_SUCCESS
) {
631 kern_coredump_log(&context
, "(kern_coredump_routine) : failed to flush final core data : kdp_core_output(%p, 0, NULL) returned 0x%x\n",
632 context
.core_outvars
, ret
);
636 kern_coredump_log(&context
, "Done\nCoredump complete of %s, dumped %llu segments (%llu bytes), %llu threads (%llu bytes) overall uncompressed file length %llu bytes.",
637 current_core
->kcc_corename
, context
.core_segment_count
, context
.core_segment_byte_total
, context
.core_thread_count
,
638 (context
.core_thread_count
* context
.core_thread_state_size
), context
.core_file_length
);
640 if (core_begin_offset
) {
641 /* If we're writing to disk (we have a begin offset, we need to update the header */
642 ret
= kern_dump_record_file(context
.core_outvars
, current_core
->kcc_corename
, core_begin_offset
, &context
.core_file_length_compressed
);
643 if (ret
!= KERN_SUCCESS
) {
644 *header_update_failed
= TRUE
;
645 kern_coredump_log(&context
, "\n(kern_coredump_routine) : kern_dump_record_file failed with %d\n", ret
);
650 kern_coredump_log(&context
, " Compressed file length is %llu bytes\n", context
.core_file_length_compressed
);
652 *core_file_length
= context
.core_file_length_compressed
;
658 kern_do_coredump(void *core_outvars
, boolean_t kernel_only
, uint64_t first_file_offset
, uint64_t *last_file_offset
)
660 struct kern_coredump_core
*current_core
= NULL
;
661 uint64_t prev_core_length
= 0;
662 kern_return_t cur_ret
= KERN_SUCCESS
, ret
= KERN_SUCCESS
;
663 boolean_t header_update_failed
= FALSE
;
665 assert(last_file_offset
!= NULL
);
667 *last_file_offset
= first_file_offset
;
668 cur_ret
= kern_coredump_routine(core_outvars
, kernel_helper
, *last_file_offset
, &prev_core_length
, &header_update_failed
);
669 if (cur_ret
!= KERN_SUCCESS
) {
670 // As long as we didn't fail while updating the header for the raw file, we should be able to try
671 // to capture other corefiles.
672 if (header_update_failed
) {
673 // The header may be in an inconsistent state, so bail now
676 prev_core_length
= 0;
681 *last_file_offset
= roundup(((*last_file_offset
) + prev_core_length
), KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN
);
682 prev_core_length
= 0;
688 current_core
= kern_coredump_core_list
;
689 while (current_core
) {
690 /* Seek to the beginning of the next file */
691 cur_ret
= kern_dump_seek_to_next_file(core_outvars
, *last_file_offset
);
692 if (cur_ret
!= KERN_SUCCESS
) {
693 kern_coredump_log(NULL
, "Failed to seek to beginning of next core\n");
697 cur_ret
= kern_coredump_routine(core_outvars
, current_core
, *last_file_offset
, &prev_core_length
, &header_update_failed
);
698 if (cur_ret
!= KERN_SUCCESS
) {
699 // As long as we didn't fail while updating the header for the raw file, we should be able to try
700 // to capture other corefiles.
701 if (header_update_failed
) {
702 // The header may be in an inconsistent state, so bail now
705 // Try to capture other corefiles even if one failed, update the overall return
707 prev_core_length
= 0;
712 /* Calculate the offset of the beginning of the next core in the raw file */
713 *last_file_offset
= roundup(((*last_file_offset
) + prev_core_length
), KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN
);
714 prev_core_length
= 0;
715 current_core
= current_core
->kcc_next
;
720 #else /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
723 kern_register_coredump_helper(int kern_coredump_config_vers
, const kern_coredump_callback_config
*kc_callbacks
, void* refcon
,
724 const char *core_description
, boolean_t is64bit
, uint32_t mh_magic
,
725 cpu_type_t cpu_type
, cpu_subtype_t cpu_subtype
)
727 #pragma unused(kern_coredump_config_vers, kc_callbacks, refcon, core_description, is64bit, mh_magic, cpu_type, cpu_subtype)
728 return KERN_NOT_SUPPORTED
;
730 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
733 * Must be callable with a NULL context
736 kern_coredump_log(void *context
, const char *string
, ...)
738 #pragma unused(context)
739 va_list coredump_log_args
;
741 va_start(coredump_log_args
, string
);
742 _doprnt(string
, &coredump_log_args
, consdebug_putc
, 16);
743 va_end(coredump_log_args
);
745 #if defined(__arm__) || defined(__arm64__)