2 * Copyright (c) 2017 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <kdp/kdp_core.h>
30 #include <kdp/processor_core.h>
31 #include <kern/assert.h>
32 #include <kern/kalloc.h>
33 #include <libkern/kernel_mach_header.h>
34 #include <libkern/OSAtomic.h>
35 #include <libsa/types.h>
36 #include <pexpert/pexpert.h>
38 #ifdef CONFIG_KDP_INTERACTIVE_DEBUGGING
40 #define roundup(x, y) ((((x) % (y)) == 0) ? \
41 (x) : ((x) + ((y) - ((x) % (y)))))
44 * The processor_core_context structure describes the current
45 * corefile that's being generated. It also includes a pointer
46 * to the core_outvars which is used by the KDP code for context
47 * about the specific output mechanism being used.
49 * We include *remaining variables to catch inconsistencies / bugs
50 * in the co-processor coredump callbacks.
53 struct kdp_core_out_vars
* core_outvars
; /* Output procedure info (see kdp_core.c) */
54 kern_coredump_callback_config
*core_config
; /* Information about core currently being dumped */
55 void *core_refcon
; /* Reference constant associated with the coredump helper */
56 boolean_t core_is64bit
; /* Bitness of CPU */
57 uint32_t core_mh_magic
; /* Magic for mach header */
58 cpu_type_t core_cpu_type
; /* CPU type for mach header */
59 cpu_subtype_t core_cpu_subtype
; /* CPU subtype for mach header */
60 uint64_t core_file_length
; /* Overall corefile length including any zero padding */
61 uint64_t core_file_length_compressed
; /* File length after compression */
62 uint64_t core_segment_count
; /* Number of LC_SEGMENT*s in the core currently being dumped */
63 uint64_t core_segments_remaining
; /* Number of LC_SEGMENT*s that have not been added to the header */
64 uint64_t core_segment_byte_total
; /* Sum of all the data from the LC_SEGMENTS in the core */
65 uint64_t core_segment_bytes_remaining
; /* Quantity of data remaining from LC_SEGMENTs that have yet to be added */
66 uint64_t core_thread_count
; /* Number of LC_THREADs to be included */
67 uint64_t core_threads_remaining
; /* Number of LC_THREADs that have yet to be included */
68 uint64_t core_thread_state_size
; /* Size of each LC_THREAD */
69 uint64_t core_misc_bytes_count
; /* Quantity of LC_NOTE data to be included */
70 uint64_t core_misc_bytes_remaining
; /* Quantity of LC_NOTE data that has not yet been included */
71 uint64_t core_cur_hoffset
; /* Current offset in this core's header */
72 uint64_t core_cur_foffset
; /* Current offset in this core's overall file */
73 uint64_t core_header_size
; /* Size of this core's header */
74 uint64_t core_total_bytes
; /* Total amount of data to be included in this core (excluding zero fill) */
75 } processor_core_context
;
78 * The kern_coredump_core structure describes a core that has been
79 * registered for use by the coredump mechanism.
81 struct kern_coredump_core
{
82 struct kern_coredump_core
*kcc_next
; /* Next processor to dump */
83 void *kcc_refcon
; /* Reference constant to be passed to callbacks */
84 char kcc_corename
[MACH_CORE_FILEHEADER_NAMELEN
]; /* Description of this processor */
85 boolean_t kcc_is64bit
; /* Processor bitness */
86 uint32_t kcc_mh_magic
; /* Magic for mach header */
87 cpu_type_t kcc_cpu_type
; /* CPU type for mach header */
88 cpu_subtype_t kcc_cpu_subtype
; /* CPU subtype for mach header */
89 kern_coredump_callback_config kcc_cb
; /* Registered processor callbacks for coredump */
90 } * kern_coredump_core_list
= NULL
;
92 uint32_t coredump_registered_count
= 0;
94 struct kern_coredump_core
*kernel_helper
= NULL
;
96 static struct kern_coredump_core
*
97 kern_register_coredump_helper_internal(int kern_coredump_config_vers
, kern_coredump_callback_config
*kc_callbacks
,
98 void *refcon
, const char *core_description
, boolean_t xnu_callback
, boolean_t is64bit
,
99 uint32_t mh_magic
, cpu_type_t cpu_type
, cpu_subtype_t cpu_subtype
)
101 struct kern_coredump_core
*core_helper
= NULL
;
102 kern_coredump_callback_config
*core_callbacks
= NULL
;
104 if (kern_coredump_config_vers
< KERN_COREDUMP_MIN_CONFIG_VERSION
)
106 if (kc_callbacks
== NULL
)
108 if (core_description
== NULL
)
111 if (kc_callbacks
->kcc_coredump_get_summary
== NULL
||
112 kc_callbacks
->kcc_coredump_save_segment_descriptions
== NULL
||
113 kc_callbacks
->kcc_coredump_save_segment_data
== NULL
||
114 kc_callbacks
->kcc_coredump_save_thread_state
== NULL
||
115 kc_callbacks
->kcc_coredump_save_sw_vers
== NULL
)
118 #if !defined(__LP64__)
119 /* We don't support generating 64-bit cores on 32-bit platforms */
124 core_helper
= kalloc(sizeof(*core_helper
));
125 core_helper
->kcc_next
= NULL
;
126 core_helper
->kcc_refcon
= refcon
;
128 snprintf((char *)&core_helper
->kcc_corename
, MACH_CORE_FILEHEADER_NAMELEN
, "%s", core_description
);
130 /* Make sure there's room for the -coproc suffix (16 - NULL char - strlen(-coproc)) */
131 snprintf((char *)&core_helper
->kcc_corename
, MACH_CORE_FILEHEADER_NAMELEN
, "%.8s-coproc", core_description
);
133 core_helper
->kcc_is64bit
= is64bit
;
134 core_helper
->kcc_mh_magic
= mh_magic
;
135 core_helper
->kcc_cpu_type
= cpu_type
;
136 core_helper
->kcc_cpu_subtype
= cpu_subtype
;
137 core_callbacks
= &core_helper
->kcc_cb
;
139 core_callbacks
->kcc_coredump_init
= kc_callbacks
->kcc_coredump_init
;
140 core_callbacks
->kcc_coredump_get_summary
= kc_callbacks
->kcc_coredump_get_summary
;
141 core_callbacks
->kcc_coredump_save_segment_descriptions
= kc_callbacks
->kcc_coredump_save_segment_descriptions
;
142 core_callbacks
->kcc_coredump_save_segment_data
= kc_callbacks
->kcc_coredump_save_segment_data
;
143 core_callbacks
->kcc_coredump_save_thread_state
= kc_callbacks
->kcc_coredump_save_thread_state
;
144 core_callbacks
->kcc_coredump_save_misc_data
= kc_callbacks
->kcc_coredump_save_misc_data
;
145 core_callbacks
->kcc_coredump_save_sw_vers
= kc_callbacks
->kcc_coredump_save_sw_vers
;
148 assert(kernel_helper
== NULL
);
149 kernel_helper
= core_helper
;
152 core_helper
->kcc_next
= kern_coredump_core_list
;
153 } while (!OSCompareAndSwapPtr(kern_coredump_core_list
, core_helper
, &kern_coredump_core_list
));
156 OSAddAtomic(1, &coredump_registered_count
);
157 kprintf("Registered coredump handler for %s\n", core_description
);
163 kern_register_coredump_helper(int kern_coredump_config_vers
, kern_coredump_callback_config
*kc_callbacks
,
164 void *refcon
, const char *core_description
, boolean_t is64bit
, uint32_t mh_magic
,
165 cpu_type_t cpu_type
, cpu_subtype_t cpu_subtype
)
167 if (coredump_registered_count
>= KERN_COREDUMP_MAX_CORES
)
168 return KERN_RESOURCE_SHORTAGE
;
170 if (kern_register_coredump_helper_internal(kern_coredump_config_vers
, kc_callbacks
, refcon
, core_description
, FALSE
,
171 is64bit
, mh_magic
, cpu_type
, cpu_subtype
) == NULL
)
172 return KERN_INVALID_ARGUMENT
;
178 kern_register_xnu_coredump_helper(kern_coredump_callback_config
*kc_callbacks
)
180 #if defined(__LP64__)
181 boolean_t is64bit
= TRUE
;
183 boolean_t is64bit
= FALSE
;
186 if (kern_register_coredump_helper_internal(KERN_COREDUMP_CONFIG_VERSION
, kc_callbacks
, NULL
, "kernel", TRUE
, is64bit
,
187 _mh_execute_header
.magic
, _mh_execute_header
.cputype
, _mh_execute_header
.cpusubtype
) == NULL
)
194 * Save metadata about the core we're about to write, write out the mach header
197 coredump_save_summary(uint64_t core_segment_count
, uint64_t core_byte_count
,
198 uint64_t thread_count
, uint64_t thread_state_size
,
199 uint64_t misc_bytes_count
, void *context
)
201 processor_core_context
*core_context
= (processor_core_context
*)context
;
202 uint32_t sizeofcmds
= 0, numcmds
= 0;
205 if (!core_segment_count
|| !core_byte_count
|| !thread_count
|| !thread_state_size
206 || (thread_state_size
> KERN_COREDUMP_THREADSIZE_MAX
))
207 return KERN_INVALID_ARGUMENT
;
209 /* Initialize core_context */
210 core_context
->core_segments_remaining
= core_context
->core_segment_count
= core_segment_count
;
211 core_context
->core_segment_bytes_remaining
= core_context
->core_segment_byte_total
= core_byte_count
;
212 core_context
->core_threads_remaining
= core_context
->core_thread_count
= thread_count
;
213 core_context
->core_thread_state_size
= thread_state_size
;
214 core_context
->core_misc_bytes_remaining
= core_context
->core_misc_bytes_count
= misc_bytes_count
;
217 #if defined(__LP64__)
218 if (core_context
->core_is64bit
) {
219 sizeofcmds
= (uint32_t)(core_context
->core_segment_count
* sizeof(struct segment_command_64
) +
220 (core_context
->core_threads_remaining
* core_context
->core_thread_state_size
) +
221 /* TODO: LC_NOTE */ 0 + sizeof(struct ident_command
) + KERN_COREDUMP_VERSIONSTRINGMAXSIZE
);
222 core_context
->core_header_size
= sizeofcmds
+ sizeof(struct mach_header_64
);
224 #endif /* defined(__LP64__) */
226 sizeofcmds
= (uint32_t)(core_context
->core_segment_count
* sizeof(struct segment_command
) +
227 (core_context
->core_threads_remaining
* core_context
->core_thread_state_size
) +
228 /* TODO: LC_NOTE */ 0 + sizeof(struct ident_command
) + KERN_COREDUMP_VERSIONSTRINGMAXSIZE
);
229 core_context
->core_header_size
= sizeofcmds
+ sizeof(struct mach_header
);
232 core_context
->core_total_bytes
= core_context
->core_header_size
+ core_context
->core_segment_byte_total
+ /* TODO: LC_NOTE */ 0;
233 core_context
->core_file_length
= round_page(core_context
->core_header_size
) + core_context
->core_segment_byte_total
+ /* TODO: LC_NOTE */ 0;
234 core_context
->core_cur_foffset
= round_page(core_context
->core_header_size
);
236 numcmds
= (uint32_t)(core_context
->core_segment_count
+ core_context
->core_thread_count
+ /* TODO: LC_NOTE */ 0 +
237 1 /* ident command */);
240 * Reset the zstream and other output context before writing any data out. We do this here
241 * to update the total file length on the outvars before we start writing out.
243 kdp_reset_output_vars(core_context
->core_outvars
, core_context
->core_file_length
);
245 /* Construct core file header */
246 #if defined(__LP64__)
247 if (core_context
->core_is64bit
) {
248 struct mach_header_64 core_header
= { };
250 core_header
.magic
= core_context
->core_mh_magic
;
251 core_header
.cputype
= core_context
->core_cpu_type
;
252 core_header
.cpusubtype
= core_context
->core_cpu_subtype
;
253 core_header
.filetype
= MH_CORE
;
254 core_header
.ncmds
= numcmds
;
255 core_header
.sizeofcmds
= sizeofcmds
;
256 core_header
.flags
= 0;
258 /* Send the core_header to the output procedure */
259 ret
= kdp_core_output(core_context
->core_outvars
, sizeof(core_header
), (caddr_t
)&core_header
);
260 if (ret
!= KERN_SUCCESS
) {
261 kern_coredump_log(context
, "coredump_save_summary() : failed to write mach header : kdp_core_output(%p, %lu, %p) returned error 0x%x\n",
262 core_context
->core_outvars
, sizeof(core_header
), &core_header
, ret
);
266 core_context
->core_cur_hoffset
+= sizeof(core_header
);
268 #endif /* defined(__LP64__) */
270 struct mach_header core_header
= { };
272 core_header
.magic
= core_context
->core_mh_magic
;
273 core_header
.cputype
= core_context
->core_cpu_type
;
274 core_header
.cpusubtype
= core_context
->core_cpu_subtype
;
275 core_header
.filetype
= MH_CORE
;
276 core_header
.ncmds
= numcmds
;
277 core_header
.sizeofcmds
= sizeofcmds
;
278 core_header
.flags
= 0;
280 /* Send the core_header to the output procedure */
281 ret
= kdp_core_output(core_context
->core_outvars
, sizeof(core_header
), (caddr_t
)&core_header
);
282 if (ret
!= KERN_SUCCESS
) {
283 kern_coredump_log(context
, "coredump_save_summary() : failed to write mach header : kdp_core_output(%p, %lu, %p) returned error 0x%x\n",
284 core_context
->core_outvars
, sizeof(core_header
), &core_header
, ret
);
288 core_context
->core_cur_hoffset
+= sizeof(core_header
);
295 * Construct a segment command for the specified segment.
298 coredump_save_segment_descriptions(uint64_t seg_start
, uint64_t seg_end
,
301 processor_core_context
*core_context
= (processor_core_context
*)context
;
303 uint64_t size
= seg_end
- seg_start
;
305 if (seg_end
<= seg_start
) {
306 kern_coredump_log(context
, "coredump_save_segment_descriptions(0x%llx, 0x%llx, %p) : called with invalid addresses : start 0x%llx >= end 0x%llx\n",
307 seg_start
, seg_end
, context
, seg_start
, seg_end
);
308 return KERN_INVALID_ARGUMENT
;
311 if (core_context
->core_segments_remaining
== 0) {
312 kern_coredump_log(context
, "coredump_save_segment_descriptions(0x%llx, 0x%llx, %p) : coredump_save_segment_descriptions() called too many times, %llu segment descriptions already recorded\n",
313 seg_start
, seg_end
, context
, core_context
->core_segment_count
);
314 return KERN_INVALID_ARGUMENT
;
317 /* Construct segment command */
318 #if defined(__LP64__)
319 if (core_context
->core_is64bit
) {
320 struct segment_command_64 seg_command
= { };
322 if (core_context
->core_cur_hoffset
+ sizeof(seg_command
) > core_context
->core_header_size
) {
323 kern_coredump_log(context
, "coredump_save_segment_descriptions(0x%llx, 0x%llx, %p) : ran out of space to save commands with %llu of %llu remaining\n",
324 seg_start
, seg_end
, context
, core_context
->core_segments_remaining
, core_context
->core_segment_count
);
325 return KERN_NO_SPACE
;
328 seg_command
.cmd
= LC_SEGMENT_64
;
329 seg_command
.cmdsize
= sizeof(seg_command
);
330 seg_command
.segname
[0] = 0;
331 seg_command
.vmaddr
= seg_start
;
332 seg_command
.vmsize
= size
;
333 seg_command
.fileoff
= core_context
->core_cur_foffset
;
334 seg_command
.filesize
= size
;
335 seg_command
.maxprot
= VM_PROT_READ
;
336 seg_command
.initprot
= VM_PROT_READ
;
338 /* Flush new command to output */
339 ret
= kdp_core_output(core_context
->core_outvars
, sizeof(seg_command
), (caddr_t
)&seg_command
);
340 if (ret
!= KERN_SUCCESS
) {
341 kern_coredump_log(context
, "coredump_save_segment_descriptions(0x%llx, 0x%llx, %p) : failed to write segment %llu of %llu. kdp_core_output(%p, %lu, %p) returned error %d\n",
342 seg_start
, seg_end
, context
, core_context
->core_segment_count
- core_context
->core_segments_remaining
,
343 core_context
->core_segment_count
, core_context
->core_outvars
, sizeof(seg_command
), &seg_command
, ret
);
347 core_context
->core_cur_hoffset
+= sizeof(seg_command
);
349 #endif /* defined(__LP64__) */
351 struct segment_command seg_command
= { };
353 if (seg_start
> UINT32_MAX
|| seg_end
> UINT32_MAX
) {
354 kern_coredump_log(context
, "coredump_save_segment_descriptions(0x%llx, 0x%llx, %p) : called with invalid addresses for 32-bit : start 0x%llx, end 0x%llx\n",
355 seg_start
, seg_end
, context
, seg_start
, seg_end
);
356 return KERN_INVALID_ARGUMENT
;
359 if (core_context
->core_cur_hoffset
+ sizeof(seg_command
) > core_context
->core_header_size
) {
360 kern_coredump_log(context
, "coredump_save_segment_descriptions(0x%llx, 0x%llx, %p) : ran out of space to save commands with %llu of %llu remaining\n",
361 seg_start
, seg_end
, context
, core_context
->core_segments_remaining
, core_context
->core_segment_count
);
362 return KERN_NO_SPACE
;
365 seg_command
.cmd
= LC_SEGMENT
;
366 seg_command
.cmdsize
= sizeof(seg_command
);
367 seg_command
.segname
[0] = 0;
368 seg_command
.vmaddr
= (uint32_t) seg_start
;
369 seg_command
.vmsize
= (uint32_t) size
;
370 seg_command
.fileoff
= (uint32_t) core_context
->core_cur_foffset
;
371 seg_command
.filesize
= (uint32_t) size
;
372 seg_command
.maxprot
= VM_PROT_READ
;
373 seg_command
.initprot
= VM_PROT_READ
;
375 /* Flush new command to output */
376 ret
= kdp_core_output(core_context
->core_outvars
, sizeof(seg_command
), (caddr_t
)&seg_command
);
377 if (ret
!= KERN_SUCCESS
) {
378 kern_coredump_log(context
, "coredump_save_segment_descriptions(0x%llx, 0x%llx, %p) : failed to write segment %llu of %llu : kdp_core_output(%p, %lu, %p) returned error 0x%x\n",
379 seg_start
, seg_end
, context
, core_context
->core_segment_count
- core_context
->core_segments_remaining
,
380 core_context
->core_segment_count
, core_context
->core_outvars
, sizeof(seg_command
), &seg_command
, ret
);
384 core_context
->core_cur_hoffset
+= sizeof(seg_command
);
387 /* Update coredump context */
388 core_context
->core_segments_remaining
--;
389 core_context
->core_cur_foffset
+= size
;
397 * Passed thread_state is expected to be a struct thread_command
400 coredump_save_thread_state(void *thread_state
, void *context
)
402 processor_core_context
*core_context
= (processor_core_context
*)context
;
403 struct thread_command
*tc
= (struct thread_command
*)thread_state
;
406 if (tc
->cmd
!= LC_THREAD
) {
407 kern_coredump_log(context
, "coredump_save_thread_state(%p, %p) : found %d expected LC_THREAD (%d)\n",
408 thread_state
, context
, tc
->cmd
, LC_THREAD
);
409 return KERN_INVALID_ARGUMENT
;
412 if (core_context
->core_cur_hoffset
+ core_context
->core_thread_state_size
> core_context
->core_header_size
) {
413 kern_coredump_log(context
, "coredump_save_thread_state(%p, %p) : ran out of space to save threads with %llu of %llu remaining\n",
414 thread_state
, context
, core_context
->core_threads_remaining
, core_context
->core_thread_count
);
415 return KERN_NO_SPACE
;
418 ret
= kdp_core_output(core_context
->core_outvars
, core_context
->core_thread_state_size
, (caddr_t
)thread_state
);
419 if (ret
!= KERN_SUCCESS
) {
420 kern_coredump_log(context
, "coredump_save_thread_state(%p, %p) : failed to write thread data : kdp_core_output(%p, %llu, %p) returned 0x%x\n",
421 thread_state
, context
, core_context
->core_outvars
, core_context
->core_thread_state_size
, thread_state
, ret
);
425 core_context
->core_threads_remaining
--;
426 core_context
->core_cur_hoffset
+= core_context
->core_thread_state_size
;
432 coredump_save_sw_vers(void *sw_vers
, uint64_t length
, void *context
)
434 processor_core_context
*core_context
= (processor_core_context
*)context
;
435 struct ident_command ident
= { };
438 if (length
> KERN_COREDUMP_VERSIONSTRINGMAXSIZE
|| !length
) {
439 kern_coredump_log(context
, "coredump_save_sw_vers(%p, %llu, %p) : called with invalid length %llu\n",
440 sw_vers
, length
, context
, length
);
441 return KERN_INVALID_ARGUMENT
;
444 if (core_context
->core_cur_hoffset
+ sizeof(struct ident_command
) + length
> core_context
->core_header_size
) {
445 kern_coredump_log(context
, "coredump_save_sw_vers(%p, %llu, %p) : ran out of space to save data\n",
446 sw_vers
, length
, context
);
447 return KERN_NO_SPACE
;
450 ident
.cmd
= LC_IDENT
;
451 ident
.cmdsize
= (uint32_t)(sizeof(struct ident_command
) + KERN_COREDUMP_VERSIONSTRINGMAXSIZE
);
452 ret
= kdp_core_output(core_context
->core_outvars
, sizeof(struct ident_command
), (caddr_t
)&ident
);
453 if (ret
!= KERN_SUCCESS
) {
454 kern_coredump_log(context
, "coredump_save_sw_vers(%p, %llu, %p) : failed to write ident command : kdp_core_output(%p, %lu, %p) returned 0x%x\n",
455 sw_vers
, length
, context
, core_context
->core_outvars
, sizeof(struct ident_command
), &ident
, ret
);
459 ret
= kdp_core_output(core_context
->core_outvars
, length
, (caddr_t
)sw_vers
);
460 if (ret
!= KERN_SUCCESS
) {
461 kern_coredump_log(context
, "coredump_save_sw_vers(%p, %llu, %p) : failed to write version string : kdp_core_output(%p, %llu, %p) returned 0x%x\n",
462 sw_vers
, length
, context
, core_context
->core_outvars
, length
, sw_vers
, ret
);
466 if (length
< KERN_COREDUMP_VERSIONSTRINGMAXSIZE
) {
467 /* Zero fill to the full command size */
468 ret
= kdp_core_output(core_context
->core_outvars
, (KERN_COREDUMP_VERSIONSTRINGMAXSIZE
- length
), NULL
);
469 if (ret
!= KERN_SUCCESS
) {
470 kern_coredump_log(context
, "coredump_save_sw_vers(%p, %llu, %p) : failed to write zero fill padding : kdp_core_output(%p, %llu, NULL) returned 0x%x\n",
471 sw_vers
, length
, context
, core_context
->core_outvars
, (KERN_COREDUMP_VERSIONSTRINGMAXSIZE
- length
), ret
);
476 core_context
->core_cur_hoffset
+= sizeof(struct ident_command
) + KERN_COREDUMP_VERSIONSTRINGMAXSIZE
;
482 coredump_save_segment_data(void *seg_data
, uint64_t length
, void *context
)
485 processor_core_context
*core_context
= (processor_core_context
*)context
;
487 if (length
> core_context
->core_segment_bytes_remaining
) {
488 kern_coredump_log(context
, "coredump_save_segment_data(%p, %llu, %p) : called with too much data, %llu written, %llu left\n",
489 seg_data
, length
, context
, core_context
->core_segment_byte_total
- core_context
->core_segment_bytes_remaining
,
490 core_context
->core_segment_bytes_remaining
);
491 return KERN_INVALID_ARGUMENT
;
494 ret
= kdp_core_output(core_context
->core_outvars
, length
, (caddr_t
)seg_data
);
495 if (ret
!= KERN_SUCCESS
) {
496 kern_coredump_log(context
, "coredump_save_segment_data(%p, %llu, %p) : failed to write data (%llu bytes remaining) :%d\n",
497 seg_data
, length
, context
, core_context
->core_segment_bytes_remaining
, ret
);
501 core_context
->core_segment_bytes_remaining
-= length
;
502 core_context
->core_cur_foffset
+= length
;
508 kern_coredump_routine(void *core_outvars
, struct kern_coredump_core
*current_core
, uint64_t core_begin_offset
, uint64_t *core_file_length
, boolean_t
*header_update_failed
)
511 processor_core_context context
= { };
512 *core_file_length
= 0;
513 *header_update_failed
= FALSE
;
515 /* Setup the coredump context */
516 context
.core_outvars
= core_outvars
;
517 context
.core_config
= ¤t_core
->kcc_cb
;
518 context
.core_refcon
= current_core
->kcc_refcon
;
519 context
.core_is64bit
= current_core
->kcc_is64bit
;
520 context
.core_mh_magic
= current_core
->kcc_mh_magic
;
521 context
.core_cpu_type
= current_core
->kcc_cpu_type
;
522 context
.core_cpu_subtype
= current_core
->kcc_cpu_subtype
;
524 kern_coredump_log(&context
, "\nBeginning coredump of %s\n", current_core
->kcc_corename
);
526 if (current_core
->kcc_cb
.kcc_coredump_init
!= NULL
) {
527 ret
= current_core
->kcc_cb
.kcc_coredump_init(context
.core_refcon
, &context
);
528 if (ret
== KERN_NODE_DOWN
) {
529 kern_coredump_log(&context
, "coredump_init returned KERN_NODE_DOWN, skipping this core\n");
531 } else if (ret
!= KERN_SUCCESS
) {
532 kern_coredump_log(&context
, "(kern_coredump_routine) : coredump_init failed with %d\n", ret
);
537 /* Populate the context with metadata about the corefile (cmd info, sizes etc) */
538 ret
= current_core
->kcc_cb
.kcc_coredump_get_summary(context
.core_refcon
, coredump_save_summary
, &context
);
539 if (ret
!= KERN_SUCCESS
) {
540 kern_coredump_log(&context
, "(kern_coredump_routine) : get_summary failed with %d\n", ret
);
544 if (context
.core_header_size
== 0) {
545 kern_coredump_log(&context
, "(kern_coredump_routine) : header size not populated after coredump_get_summary\n");
549 /* Save the segment descriptions for the segments to be included */
550 ret
= current_core
->kcc_cb
.kcc_coredump_save_segment_descriptions(context
.core_refcon
, coredump_save_segment_descriptions
,
552 if (ret
!= KERN_SUCCESS
) {
553 kern_coredump_log(&context
, "(kern_coredump_routine) : save_segment_descriptions failed with %d\n", ret
);
557 if (context
.core_segments_remaining
!= 0) {
558 kern_coredump_log(&context
, "(kern_coredump_routine) : save_segment_descriptions returned without all segment descriptions written, %llu of %llu remaining\n",
559 context
.core_segments_remaining
, context
.core_segment_count
);
563 /* TODO: Add LC_NOTE command for miscellaneous data if requested */
566 * Save the thread commands/state
568 * TODO: Should this buffer be allocated at boot rather than on the stack?
570 if (context
.core_thread_state_size
) {
571 char threadstatebuf
[context
.core_thread_state_size
];
572 ret
= current_core
->kcc_cb
.kcc_coredump_save_thread_state(context
.core_refcon
, &threadstatebuf
, coredump_save_thread_state
,
574 if (ret
!= KERN_SUCCESS
) {
575 kern_coredump_log(&context
, "(kern_coredump_routine) : save_thread_state failed with %d\n", ret
);
580 if (context
.core_threads_remaining
!= 0) {
581 kern_coredump_log(&context
, "(kern_coredump_routine) : save_thread_state returned without all thread descriptions written, %llu of %llu remaining\n",
582 context
.core_threads_remaining
, context
.core_thread_count
);
586 /* Save the sw version string */
587 ret
= current_core
->kcc_cb
.kcc_coredump_save_sw_vers(context
.core_refcon
, coredump_save_sw_vers
, &context
);
588 if (ret
!= KERN_SUCCESS
) {
589 kern_coredump_log(&context
, "(kern_coredump_routine) : save_sw_vers failed with %d\n", ret
);
593 assert(context
.core_cur_hoffset
== context
.core_header_size
);
595 /* Zero fill between the end of the header and the beginning of the segment data file offset */
596 ret
= kdp_core_output(context
.core_outvars
, (round_page(context
.core_header_size
) - context
.core_header_size
), NULL
);
597 if (ret
!= KERN_SUCCESS
) {
598 kern_coredump_log(&context
, "(kern_coredump_routine) : failed to write zero fill padding (%llu bytes remaining) : kdp_core_output(%p, %llu, NULL) returned 0x%x\n",
599 context
.core_segment_bytes_remaining
, context
.core_outvars
, (round_page(context
.core_header_size
) - context
.core_header_size
), ret
);
603 context
.core_cur_foffset
= round_page(context
.core_header_size
);
604 ret
= current_core
->kcc_cb
.kcc_coredump_save_segment_data(context
.core_refcon
, coredump_save_segment_data
, &context
);
605 if (ret
!= KERN_SUCCESS
) {
606 kern_coredump_log(&context
, "coredump_save_segment_data failed with %d\n", ret
);
610 if (context
.core_segment_bytes_remaining
!= 0) {
611 kern_coredump_log(&context
, "(kern_coredump_routine) : save_segment_data returned without all segment data written, %llu of %llu remaining\n",
612 context
.core_segment_bytes_remaining
, context
.core_segment_byte_total
);
616 /* TODO: Save the miscellaneous data if requested */
618 /* Flush the last data out */
619 ret
= kdp_core_output(context
.core_outvars
, 0, NULL
);
620 if (ret
!= KERN_SUCCESS
) {
621 kern_coredump_log(&context
, "(kern_coredump_routine) : failed to flush final core data : kdp_core_output(%p, 0, NULL) returned 0x%x\n",
622 context
.core_outvars
, ret
);
626 kern_coredump_log(&context
, "Done\nCoredump complete of %s, dumped %llu segments (%llu bytes), %llu threads (%llu bytes) overall uncompressed file length %llu bytes.",
627 current_core
->kcc_corename
, context
.core_segment_count
, context
.core_segment_byte_total
, context
.core_thread_count
,
628 (context
.core_thread_count
* context
.core_thread_state_size
), context
.core_file_length
);
630 if (core_begin_offset
) {
631 /* If we're writing to disk (we have a begin offset, we need to update the header */
632 ret
= kern_dump_record_file(context
.core_outvars
, current_core
->kcc_corename
, core_begin_offset
, &context
.core_file_length_compressed
);
633 if (ret
!= KERN_SUCCESS
) {
634 *header_update_failed
= TRUE
;
635 kern_coredump_log(&context
, "\n(kern_coredump_routine) : kern_dump_record_file failed with %d\n", ret
);
640 kern_coredump_log(&context
, " Compressed file length is %llu bytes\n", context
.core_file_length_compressed
);
642 *core_file_length
= context
.core_file_length_compressed
;
648 kern_do_coredump(void *core_outvars
, boolean_t kernel_only
, uint64_t first_file_offset
, uint64_t *last_file_offset
)
650 struct kern_coredump_core
*current_core
= NULL
;
651 uint64_t prev_core_length
= 0;
652 kern_return_t cur_ret
= KERN_SUCCESS
, ret
= KERN_SUCCESS
;
653 boolean_t header_update_failed
= FALSE
;
655 assert(last_file_offset
!= NULL
);
657 *last_file_offset
= first_file_offset
;
658 cur_ret
= kern_coredump_routine(core_outvars
, kernel_helper
, *last_file_offset
, &prev_core_length
, &header_update_failed
);
659 if (cur_ret
!= KERN_SUCCESS
) {
660 // As long as we didn't fail while updating the header for the raw file, we should be able to try
661 // to capture other corefiles.
662 if (header_update_failed
) {
663 // The header may be in an inconsistent state, so bail now
666 prev_core_length
= 0;
671 *last_file_offset
= roundup(((*last_file_offset
) + prev_core_length
), KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN
);
672 prev_core_length
= 0;
678 current_core
= kern_coredump_core_list
;
679 while (current_core
) {
680 /* Seek to the beginning of the next file */
681 cur_ret
= kern_dump_seek_to_next_file(core_outvars
, *last_file_offset
);
682 if (cur_ret
!= KERN_SUCCESS
) {
683 kern_coredump_log(NULL
, "Failed to seek to beginning of next core\n");
687 cur_ret
= kern_coredump_routine(core_outvars
, current_core
, *last_file_offset
, &prev_core_length
, &header_update_failed
);
688 if (cur_ret
!= KERN_SUCCESS
) {
689 // As long as we didn't fail while updating the header for the raw file, we should be able to try
690 // to capture other corefiles.
691 if (header_update_failed
) {
692 // The header may be in an inconsistent state, so bail now
695 // Try to capture other corefiles even if one failed, update the overall return
697 prev_core_length
= 0;
702 /* Calculate the offset of the beginning of the next core in the raw file */
703 *last_file_offset
= roundup(((*last_file_offset
) + prev_core_length
), KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN
);
704 prev_core_length
= 0;
705 current_core
= current_core
->kcc_next
;
710 #else /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
713 kern_register_coredump_helper(int kern_coredump_config_vers
, kern_coredump_callback_config
*kc_callbacks
, void* refcon
,
714 const char *core_description
, boolean_t is64bit
, uint32_t mh_magic
,
715 cpu_type_t cpu_type
, cpu_subtype_t cpu_subtype
)
717 #pragma unused(kern_coredump_config_vers, kc_callbacks, refcon, core_description, is64bit, mh_magic, cpu_type, cpu_subtype)
718 return KERN_NOT_SUPPORTED
;
720 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
723 * Must be callable with a NULL context
726 kern_coredump_log(void *context
, const char *string
, ...)
728 #pragma unused(context)
729 va_list coredump_log_args
;
731 va_start(coredump_log_args
, string
);
732 _doprnt(string
, &coredump_log_args
, consdebug_putc
, 0);
733 va_end(coredump_log_args
);