]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kdp/processor_core.c
xnu-6153.141.1.tar.gz
[apple/xnu.git] / osfmk / kdp / processor_core.c
CommitLineData
5ba3f43e
A
1/*
2 * Copyright (c) 2017 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <kdp/kdp_core.h>
30#include <kdp/processor_core.h>
31#include <kern/assert.h>
32#include <kern/kalloc.h>
33#include <libkern/kernel_mach_header.h>
34#include <libkern/OSAtomic.h>
35#include <libsa/types.h>
36#include <pexpert/pexpert.h>
37
38#ifdef CONFIG_KDP_INTERACTIVE_DEBUGGING
39
0a7de745
A
40#define roundup(x, y) ((((x) % (y)) == 0) ? \
41 (x) : ((x) + ((y) - ((x) % (y)))))
5ba3f43e
A
42
43/*
44 * The processor_core_context structure describes the current
45 * corefile that's being generated. It also includes a pointer
46 * to the core_outvars which is used by the KDP code for context
47 * about the specific output mechanism being used.
48 *
49 * We include *remaining variables to catch inconsistencies / bugs
50 * in the co-processor coredump callbacks.
51 */
52typedef struct {
53 struct kdp_core_out_vars * core_outvars; /* Output procedure info (see kdp_core.c) */
54 kern_coredump_callback_config *core_config; /* Information about core currently being dumped */
55 void *core_refcon; /* Reference constant associated with the coredump helper */
56 boolean_t core_is64bit; /* Bitness of CPU */
57 uint32_t core_mh_magic; /* Magic for mach header */
58 cpu_type_t core_cpu_type; /* CPU type for mach header */
59 cpu_subtype_t core_cpu_subtype; /* CPU subtype for mach header */
60 uint64_t core_file_length; /* Overall corefile length including any zero padding */
61 uint64_t core_file_length_compressed; /* File length after compression */
62 uint64_t core_segment_count; /* Number of LC_SEGMENT*s in the core currently being dumped */
63 uint64_t core_segments_remaining; /* Number of LC_SEGMENT*s that have not been added to the header */
64 uint64_t core_segment_byte_total; /* Sum of all the data from the LC_SEGMENTS in the core */
65 uint64_t core_segment_bytes_remaining; /* Quantity of data remaining from LC_SEGMENTs that have yet to be added */
66 uint64_t core_thread_count; /* Number of LC_THREADs to be included */
67 uint64_t core_threads_remaining; /* Number of LC_THREADs that have yet to be included */
68 uint64_t core_thread_state_size; /* Size of each LC_THREAD */
69 uint64_t core_misc_bytes_count; /* Quantity of LC_NOTE data to be included */
70 uint64_t core_misc_bytes_remaining; /* Quantity of LC_NOTE data that has not yet been included */
71 uint64_t core_cur_hoffset; /* Current offset in this core's header */
72 uint64_t core_cur_foffset; /* Current offset in this core's overall file */
73 uint64_t core_header_size; /* Size of this core's header */
74 uint64_t core_total_bytes; /* Total amount of data to be included in this core (excluding zero fill) */
75} processor_core_context;
76
77/*
78 * The kern_coredump_core structure describes a core that has been
79 * registered for use by the coredump mechanism.
80 */
81struct kern_coredump_core {
82 struct kern_coredump_core *kcc_next; /* Next processor to dump */
83 void *kcc_refcon; /* Reference constant to be passed to callbacks */
84 char kcc_corename[MACH_CORE_FILEHEADER_NAMELEN]; /* Description of this processor */
85 boolean_t kcc_is64bit; /* Processor bitness */
86 uint32_t kcc_mh_magic; /* Magic for mach header */
87 cpu_type_t kcc_cpu_type; /* CPU type for mach header */
88 cpu_subtype_t kcc_cpu_subtype; /* CPU subtype for mach header */
89 kern_coredump_callback_config kcc_cb; /* Registered processor callbacks for coredump */
90} * kern_coredump_core_list = NULL;
91
92uint32_t coredump_registered_count = 0;
93
94struct kern_coredump_core *kernel_helper = NULL;
95
96static struct kern_coredump_core *
cb323159 97kern_register_coredump_helper_internal(int kern_coredump_config_vers, const kern_coredump_callback_config *kc_callbacks,
0a7de745
A
98 void *refcon, const char *core_description, boolean_t xnu_callback, boolean_t is64bit,
99 uint32_t mh_magic, cpu_type_t cpu_type, cpu_subtype_t cpu_subtype)
5ba3f43e
A
100{
101 struct kern_coredump_core *core_helper = NULL;
102 kern_coredump_callback_config *core_callbacks = NULL;
103
0a7de745 104 if (kern_coredump_config_vers < KERN_COREDUMP_MIN_CONFIG_VERSION) {
5ba3f43e 105 return NULL;
0a7de745
A
106 }
107 if (kc_callbacks == NULL) {
108 return NULL;
109 }
110 ;
111 if (core_description == NULL) {
5ba3f43e 112 return NULL;
0a7de745 113 }
5ba3f43e
A
114
115 if (kc_callbacks->kcc_coredump_get_summary == NULL ||
0a7de745
A
116 kc_callbacks->kcc_coredump_save_segment_descriptions == NULL ||
117 kc_callbacks->kcc_coredump_save_segment_data == NULL ||
118 kc_callbacks->kcc_coredump_save_thread_state == NULL ||
119 kc_callbacks->kcc_coredump_save_sw_vers == NULL) {
5ba3f43e 120 return NULL;
0a7de745 121 }
5ba3f43e
A
122
123#if !defined(__LP64__)
124 /* We don't support generating 64-bit cores on 32-bit platforms */
0a7de745 125 if (is64bit) {
5ba3f43e 126 return NULL;
0a7de745 127 }
5ba3f43e
A
128#endif
129
130 core_helper = kalloc(sizeof(*core_helper));
131 core_helper->kcc_next = NULL;
132 core_helper->kcc_refcon = refcon;
133 if (xnu_callback) {
134 snprintf((char *)&core_helper->kcc_corename, MACH_CORE_FILEHEADER_NAMELEN, "%s", core_description);
135 } else {
136 /* Make sure there's room for the -coproc suffix (16 - NULL char - strlen(-coproc)) */
137 snprintf((char *)&core_helper->kcc_corename, MACH_CORE_FILEHEADER_NAMELEN, "%.8s-coproc", core_description);
138 }
139 core_helper->kcc_is64bit = is64bit;
140 core_helper->kcc_mh_magic = mh_magic;
141 core_helper->kcc_cpu_type = cpu_type;
142 core_helper->kcc_cpu_subtype = cpu_subtype;
143 core_callbacks = &core_helper->kcc_cb;
144
145 core_callbacks->kcc_coredump_init = kc_callbacks->kcc_coredump_init;
146 core_callbacks->kcc_coredump_get_summary = kc_callbacks->kcc_coredump_get_summary;
147 core_callbacks->kcc_coredump_save_segment_descriptions = kc_callbacks->kcc_coredump_save_segment_descriptions;
148 core_callbacks->kcc_coredump_save_segment_data = kc_callbacks->kcc_coredump_save_segment_data;
149 core_callbacks->kcc_coredump_save_thread_state = kc_callbacks->kcc_coredump_save_thread_state;
150 core_callbacks->kcc_coredump_save_misc_data = kc_callbacks->kcc_coredump_save_misc_data;
151 core_callbacks->kcc_coredump_save_sw_vers = kc_callbacks->kcc_coredump_save_sw_vers;
152
153 if (xnu_callback) {
154 assert(kernel_helper == NULL);
155 kernel_helper = core_helper;
156 } else {
157 do {
158 core_helper->kcc_next = kern_coredump_core_list;
159 } while (!OSCompareAndSwapPtr(kern_coredump_core_list, core_helper, &kern_coredump_core_list));
160 }
161
162 OSAddAtomic(1, &coredump_registered_count);
163 kprintf("Registered coredump handler for %s\n", core_description);
164
165 return core_helper;
166}
167
168kern_return_t
cb323159 169kern_register_coredump_helper(int kern_coredump_config_vers, const kern_coredump_callback_config *kc_callbacks,
0a7de745
A
170 void *refcon, const char *core_description, boolean_t is64bit, uint32_t mh_magic,
171 cpu_type_t cpu_type, cpu_subtype_t cpu_subtype)
5ba3f43e 172{
0a7de745 173 if (coredump_registered_count >= KERN_COREDUMP_MAX_CORES) {
5ba3f43e 174 return KERN_RESOURCE_SHORTAGE;
0a7de745 175 }
5ba3f43e
A
176
177 if (kern_register_coredump_helper_internal(kern_coredump_config_vers, kc_callbacks, refcon, core_description, FALSE,
0a7de745 178 is64bit, mh_magic, cpu_type, cpu_subtype) == NULL) {
5ba3f43e 179 return KERN_INVALID_ARGUMENT;
0a7de745 180 }
5ba3f43e
A
181
182 return KERN_SUCCESS;
183}
184
185kern_return_t
186kern_register_xnu_coredump_helper(kern_coredump_callback_config *kc_callbacks)
187{
188#if defined(__LP64__)
189 boolean_t is64bit = TRUE;
190#else
191 boolean_t is64bit = FALSE;
192#endif
193
194 if (kern_register_coredump_helper_internal(KERN_COREDUMP_CONFIG_VERSION, kc_callbacks, NULL, "kernel", TRUE, is64bit,
0a7de745 195 _mh_execute_header.magic, _mh_execute_header.cputype, _mh_execute_header.cpusubtype) == NULL) {
5ba3f43e 196 return KERN_FAILURE;
0a7de745 197 }
5ba3f43e
A
198
199 return KERN_SUCCESS;
200}
201
202/*
203 * Save metadata about the core we're about to write, write out the mach header
204 */
205static int
206coredump_save_summary(uint64_t core_segment_count, uint64_t core_byte_count,
0a7de745
A
207 uint64_t thread_count, uint64_t thread_state_size,
208 uint64_t misc_bytes_count, void *context)
5ba3f43e
A
209{
210 processor_core_context *core_context = (processor_core_context *)context;
211 uint32_t sizeofcmds = 0, numcmds = 0;
212 int ret = 0;
213
214 if (!core_segment_count || !core_byte_count || !thread_count || !thread_state_size
0a7de745 215 || (thread_state_size > KERN_COREDUMP_THREADSIZE_MAX)) {
5ba3f43e 216 return KERN_INVALID_ARGUMENT;
0a7de745 217 }
5ba3f43e
A
218
219 /* Initialize core_context */
220 core_context->core_segments_remaining = core_context->core_segment_count = core_segment_count;
221 core_context->core_segment_bytes_remaining = core_context->core_segment_byte_total = core_byte_count;
222 core_context->core_threads_remaining = core_context->core_thread_count = thread_count;
223 core_context->core_thread_state_size = thread_state_size;
224 core_context->core_misc_bytes_remaining = core_context->core_misc_bytes_count = misc_bytes_count;
225
226
227#if defined(__LP64__)
228 if (core_context->core_is64bit) {
229 sizeofcmds = (uint32_t)(core_context->core_segment_count * sizeof(struct segment_command_64) +
0a7de745
A
230 (core_context->core_threads_remaining * core_context->core_thread_state_size) +
231 /* TODO: LC_NOTE */ 0 + sizeof(struct ident_command) + KERN_COREDUMP_VERSIONSTRINGMAXSIZE);
5ba3f43e
A
232 core_context->core_header_size = sizeofcmds + sizeof(struct mach_header_64);
233 } else
234#endif /* defined(__LP64__) */
235 {
236 sizeofcmds = (uint32_t)(core_context->core_segment_count * sizeof(struct segment_command) +
0a7de745
A
237 (core_context->core_threads_remaining * core_context->core_thread_state_size) +
238 /* TODO: LC_NOTE */ 0 + sizeof(struct ident_command) + KERN_COREDUMP_VERSIONSTRINGMAXSIZE);
5ba3f43e
A
239 core_context->core_header_size = sizeofcmds + sizeof(struct mach_header);
240 }
241
242 core_context->core_total_bytes = core_context->core_header_size + core_context->core_segment_byte_total + /* TODO: LC_NOTE */ 0;
243 core_context->core_file_length = round_page(core_context->core_header_size) + core_context->core_segment_byte_total + /* TODO: LC_NOTE */ 0;
244 core_context->core_cur_foffset = round_page(core_context->core_header_size);
245
246 numcmds = (uint32_t)(core_context->core_segment_count + core_context->core_thread_count + /* TODO: LC_NOTE */ 0 +
0a7de745 247 1 /* ident command */);
5ba3f43e
A
248
249 /*
250 * Reset the zstream and other output context before writing any data out. We do this here
251 * to update the total file length on the outvars before we start writing out.
252 */
253 kdp_reset_output_vars(core_context->core_outvars, core_context->core_file_length);
254
255 /* Construct core file header */
256#if defined(__LP64__)
257 if (core_context->core_is64bit) {
258 struct mach_header_64 core_header = { };
259
260 core_header.magic = core_context->core_mh_magic;
261 core_header.cputype = core_context->core_cpu_type;
262 core_header.cpusubtype = core_context->core_cpu_subtype;
263 core_header.filetype = MH_CORE;
264 core_header.ncmds = numcmds;
265 core_header.sizeofcmds = sizeofcmds;
266 core_header.flags = 0;
267
268 /* Send the core_header to the output procedure */
269 ret = kdp_core_output(core_context->core_outvars, sizeof(core_header), (caddr_t)&core_header);
270 if (ret != KERN_SUCCESS) {
d9a64523 271 kern_coredump_log(context, "coredump_save_summary() : failed to write mach header : kdp_core_output(%p, %lu, %p) returned error 0x%x\n",
0a7de745 272 core_context->core_outvars, sizeof(core_header), &core_header, ret);
5ba3f43e
A
273 return ret;
274 }
275
276 core_context->core_cur_hoffset += sizeof(core_header);
277 } else
278#endif /* defined(__LP64__) */
279 {
280 struct mach_header core_header = { };
281
282 core_header.magic = core_context->core_mh_magic;
283 core_header.cputype = core_context->core_cpu_type;
284 core_header.cpusubtype = core_context->core_cpu_subtype;
285 core_header.filetype = MH_CORE;
286 core_header.ncmds = numcmds;
287 core_header.sizeofcmds = sizeofcmds;
288 core_header.flags = 0;
289
290 /* Send the core_header to the output procedure */
291 ret = kdp_core_output(core_context->core_outvars, sizeof(core_header), (caddr_t)&core_header);
292 if (ret != KERN_SUCCESS) {
d9a64523 293 kern_coredump_log(context, "coredump_save_summary() : failed to write mach header : kdp_core_output(%p, %lu, %p) returned error 0x%x\n",
0a7de745 294 core_context->core_outvars, sizeof(core_header), &core_header, ret);
5ba3f43e
A
295 return ret;
296 }
297
298 core_context->core_cur_hoffset += sizeof(core_header);
299 }
300
301 return KERN_SUCCESS;
302}
303
304/*
305 * Construct a segment command for the specified segment.
306 */
307static int
308coredump_save_segment_descriptions(uint64_t seg_start, uint64_t seg_end,
0a7de745 309 void *context)
5ba3f43e
A
310{
311 processor_core_context *core_context = (processor_core_context *)context;
312 int ret;
313 uint64_t size = seg_end - seg_start;
314
315 if (seg_end <= seg_start) {
d9a64523 316 kern_coredump_log(context, "coredump_save_segment_descriptions(0x%llx, 0x%llx, %p) : called with invalid addresses : start 0x%llx >= end 0x%llx\n",
0a7de745 317 seg_start, seg_end, context, seg_start, seg_end);
5ba3f43e
A
318 return KERN_INVALID_ARGUMENT;
319 }
320
321 if (core_context->core_segments_remaining == 0) {
d9a64523 322 kern_coredump_log(context, "coredump_save_segment_descriptions(0x%llx, 0x%llx, %p) : coredump_save_segment_descriptions() called too many times, %llu segment descriptions already recorded\n",
0a7de745 323 seg_start, seg_end, context, core_context->core_segment_count);
5ba3f43e
A
324 return KERN_INVALID_ARGUMENT;
325 }
326
327 /* Construct segment command */
328#if defined(__LP64__)
329 if (core_context->core_is64bit) {
330 struct segment_command_64 seg_command = { };
331
332 if (core_context->core_cur_hoffset + sizeof(seg_command) > core_context->core_header_size) {
d9a64523 333 kern_coredump_log(context, "coredump_save_segment_descriptions(0x%llx, 0x%llx, %p) : ran out of space to save commands with %llu of %llu remaining\n",
0a7de745 334 seg_start, seg_end, context, core_context->core_segments_remaining, core_context->core_segment_count);
5ba3f43e
A
335 return KERN_NO_SPACE;
336 }
337
338 seg_command.cmd = LC_SEGMENT_64;
339 seg_command.cmdsize = sizeof(seg_command);
340 seg_command.segname[0] = 0;
341 seg_command.vmaddr = seg_start;
342 seg_command.vmsize = size;
343 seg_command.fileoff = core_context->core_cur_foffset;
344 seg_command.filesize = size;
345 seg_command.maxprot = VM_PROT_READ;
346 seg_command.initprot = VM_PROT_READ;
347
348 /* Flush new command to output */
349 ret = kdp_core_output(core_context->core_outvars, sizeof(seg_command), (caddr_t)&seg_command);
350 if (ret != KERN_SUCCESS) {
d9a64523 351 kern_coredump_log(context, "coredump_save_segment_descriptions(0x%llx, 0x%llx, %p) : failed to write segment %llu of %llu. kdp_core_output(%p, %lu, %p) returned error %d\n",
0a7de745
A
352 seg_start, seg_end, context, core_context->core_segment_count - core_context->core_segments_remaining,
353 core_context->core_segment_count, core_context->core_outvars, sizeof(seg_command), &seg_command, ret);
5ba3f43e
A
354 return ret;
355 }
356
357 core_context->core_cur_hoffset += sizeof(seg_command);
358 } else
359#endif /* defined(__LP64__) */
360 {
361 struct segment_command seg_command = { };
362
363 if (seg_start > UINT32_MAX || seg_end > UINT32_MAX) {
d9a64523 364 kern_coredump_log(context, "coredump_save_segment_descriptions(0x%llx, 0x%llx, %p) : called with invalid addresses for 32-bit : start 0x%llx, end 0x%llx\n",
0a7de745 365 seg_start, seg_end, context, seg_start, seg_end);
5ba3f43e
A
366 return KERN_INVALID_ARGUMENT;
367 }
368
369 if (core_context->core_cur_hoffset + sizeof(seg_command) > core_context->core_header_size) {
d9a64523 370 kern_coredump_log(context, "coredump_save_segment_descriptions(0x%llx, 0x%llx, %p) : ran out of space to save commands with %llu of %llu remaining\n",
0a7de745 371 seg_start, seg_end, context, core_context->core_segments_remaining, core_context->core_segment_count);
5ba3f43e
A
372 return KERN_NO_SPACE;
373 }
374
375 seg_command.cmd = LC_SEGMENT;
376 seg_command.cmdsize = sizeof(seg_command);
377 seg_command.segname[0] = 0;
378 seg_command.vmaddr = (uint32_t) seg_start;
379 seg_command.vmsize = (uint32_t) size;
380 seg_command.fileoff = (uint32_t) core_context->core_cur_foffset;
381 seg_command.filesize = (uint32_t) size;
382 seg_command.maxprot = VM_PROT_READ;
383 seg_command.initprot = VM_PROT_READ;
384
385 /* Flush new command to output */
386 ret = kdp_core_output(core_context->core_outvars, sizeof(seg_command), (caddr_t)&seg_command);
387 if (ret != KERN_SUCCESS) {
d9a64523 388 kern_coredump_log(context, "coredump_save_segment_descriptions(0x%llx, 0x%llx, %p) : failed to write segment %llu of %llu : kdp_core_output(%p, %lu, %p) returned error 0x%x\n",
0a7de745
A
389 seg_start, seg_end, context, core_context->core_segment_count - core_context->core_segments_remaining,
390 core_context->core_segment_count, core_context->core_outvars, sizeof(seg_command), &seg_command, ret);
5ba3f43e
A
391 return ret;
392 }
393
394 core_context->core_cur_hoffset += sizeof(seg_command);
395 }
396
397 /* Update coredump context */
398 core_context->core_segments_remaining--;
399 core_context->core_cur_foffset += size;
400
401 return KERN_SUCCESS;
402}
403
404/*
405 * Save thread state.
406 *
407 * Passed thread_state is expected to be a struct thread_command
408 */
409static int
410coredump_save_thread_state(void *thread_state, void *context)
411{
412 processor_core_context *core_context = (processor_core_context *)context;
413 struct thread_command *tc = (struct thread_command *)thread_state;
414 int ret;
415
416 if (tc->cmd != LC_THREAD) {
d9a64523 417 kern_coredump_log(context, "coredump_save_thread_state(%p, %p) : found %d expected LC_THREAD (%d)\n",
0a7de745 418 thread_state, context, tc->cmd, LC_THREAD);
5ba3f43e
A
419 return KERN_INVALID_ARGUMENT;
420 }
421
422 if (core_context->core_cur_hoffset + core_context->core_thread_state_size > core_context->core_header_size) {
d9a64523 423 kern_coredump_log(context, "coredump_save_thread_state(%p, %p) : ran out of space to save threads with %llu of %llu remaining\n",
0a7de745 424 thread_state, context, core_context->core_threads_remaining, core_context->core_thread_count);
5ba3f43e
A
425 return KERN_NO_SPACE;
426 }
427
428 ret = kdp_core_output(core_context->core_outvars, core_context->core_thread_state_size, (caddr_t)thread_state);
429 if (ret != KERN_SUCCESS) {
d9a64523 430 kern_coredump_log(context, "coredump_save_thread_state(%p, %p) : failed to write thread data : kdp_core_output(%p, %llu, %p) returned 0x%x\n",
0a7de745 431 thread_state, context, core_context->core_outvars, core_context->core_thread_state_size, thread_state, ret);
5ba3f43e
A
432 return ret;
433 }
434
435 core_context->core_threads_remaining--;
436 core_context->core_cur_hoffset += core_context->core_thread_state_size;
437
438 return KERN_SUCCESS;
439}
440
441static int
442coredump_save_sw_vers(void *sw_vers, uint64_t length, void *context)
443{
444 processor_core_context *core_context = (processor_core_context *)context;
445 struct ident_command ident = { };
446 int ret;
447
448 if (length > KERN_COREDUMP_VERSIONSTRINGMAXSIZE || !length) {
d9a64523 449 kern_coredump_log(context, "coredump_save_sw_vers(%p, %llu, %p) : called with invalid length %llu\n",
0a7de745 450 sw_vers, length, context, length);
5ba3f43e
A
451 return KERN_INVALID_ARGUMENT;
452 }
453
454 if (core_context->core_cur_hoffset + sizeof(struct ident_command) + length > core_context->core_header_size) {
d9a64523 455 kern_coredump_log(context, "coredump_save_sw_vers(%p, %llu, %p) : ran out of space to save data\n",
0a7de745 456 sw_vers, length, context);
5ba3f43e
A
457 return KERN_NO_SPACE;
458 }
459
460 ident.cmd = LC_IDENT;
461 ident.cmdsize = (uint32_t)(sizeof(struct ident_command) + KERN_COREDUMP_VERSIONSTRINGMAXSIZE);
462 ret = kdp_core_output(core_context->core_outvars, sizeof(struct ident_command), (caddr_t)&ident);
463 if (ret != KERN_SUCCESS) {
d9a64523 464 kern_coredump_log(context, "coredump_save_sw_vers(%p, %llu, %p) : failed to write ident command : kdp_core_output(%p, %lu, %p) returned 0x%x\n",
0a7de745 465 sw_vers, length, context, core_context->core_outvars, sizeof(struct ident_command), &ident, ret);
5ba3f43e
A
466 return ret;
467 }
468
469 ret = kdp_core_output(core_context->core_outvars, length, (caddr_t)sw_vers);
470 if (ret != KERN_SUCCESS) {
d9a64523 471 kern_coredump_log(context, "coredump_save_sw_vers(%p, %llu, %p) : failed to write version string : kdp_core_output(%p, %llu, %p) returned 0x%x\n",
0a7de745 472 sw_vers, length, context, core_context->core_outvars, length, sw_vers, ret);
5ba3f43e
A
473 return ret;
474 }
475
476 if (length < KERN_COREDUMP_VERSIONSTRINGMAXSIZE) {
477 /* Zero fill to the full command size */
478 ret = kdp_core_output(core_context->core_outvars, (KERN_COREDUMP_VERSIONSTRINGMAXSIZE - length), NULL);
479 if (ret != KERN_SUCCESS) {
d9a64523 480 kern_coredump_log(context, "coredump_save_sw_vers(%p, %llu, %p) : failed to write zero fill padding : kdp_core_output(%p, %llu, NULL) returned 0x%x\n",
0a7de745 481 sw_vers, length, context, core_context->core_outvars, (KERN_COREDUMP_VERSIONSTRINGMAXSIZE - length), ret);
5ba3f43e
A
482 return ret;
483 }
484 }
485
486 core_context->core_cur_hoffset += sizeof(struct ident_command) + KERN_COREDUMP_VERSIONSTRINGMAXSIZE;
487
488 return KERN_SUCCESS;
489}
490
491static int
492coredump_save_segment_data(void *seg_data, uint64_t length, void *context)
493{
494 int ret;
495 processor_core_context *core_context = (processor_core_context *)context;
496
497 if (length > core_context->core_segment_bytes_remaining) {
d9a64523 498 kern_coredump_log(context, "coredump_save_segment_data(%p, %llu, %p) : called with too much data, %llu written, %llu left\n",
0a7de745
A
499 seg_data, length, context, core_context->core_segment_byte_total - core_context->core_segment_bytes_remaining,
500 core_context->core_segment_bytes_remaining);
5ba3f43e
A
501 return KERN_INVALID_ARGUMENT;
502 }
503
504 ret = kdp_core_output(core_context->core_outvars, length, (caddr_t)seg_data);
505 if (ret != KERN_SUCCESS) {
d9a64523 506 kern_coredump_log(context, "coredump_save_segment_data(%p, %llu, %p) : failed to write data (%llu bytes remaining) :%d\n",
0a7de745 507 seg_data, length, context, core_context->core_segment_bytes_remaining, ret);
5ba3f43e
A
508 return ret;
509 }
510
511 core_context->core_segment_bytes_remaining -= length;
512 core_context->core_cur_foffset += length;
513
514 return KERN_SUCCESS;
515}
516
517static kern_return_t
518kern_coredump_routine(void *core_outvars, struct kern_coredump_core *current_core, uint64_t core_begin_offset, uint64_t *core_file_length, boolean_t *header_update_failed)
519{
520 kern_return_t ret;
521 processor_core_context context = { };
522 *core_file_length = 0;
523 *header_update_failed = FALSE;
524
525 /* Setup the coredump context */
526 context.core_outvars = core_outvars;
527 context.core_config = &current_core->kcc_cb;
528 context.core_refcon = current_core->kcc_refcon;
529 context.core_is64bit = current_core->kcc_is64bit;
530 context.core_mh_magic = current_core->kcc_mh_magic;
531 context.core_cpu_type = current_core->kcc_cpu_type;
532 context.core_cpu_subtype = current_core->kcc_cpu_subtype;
533
534 kern_coredump_log(&context, "\nBeginning coredump of %s\n", current_core->kcc_corename);
535
536 if (current_core->kcc_cb.kcc_coredump_init != NULL) {
537 ret = current_core->kcc_cb.kcc_coredump_init(context.core_refcon, &context);
538 if (ret == KERN_NODE_DOWN) {
539 kern_coredump_log(&context, "coredump_init returned KERN_NODE_DOWN, skipping this core\n");
540 return KERN_SUCCESS;
0a7de745 541 } else if (ret != KERN_SUCCESS) {
5ba3f43e
A
542 kern_coredump_log(&context, "(kern_coredump_routine) : coredump_init failed with %d\n", ret);
543 return ret;
544 }
545 }
546
547 /* Populate the context with metadata about the corefile (cmd info, sizes etc) */
548 ret = current_core->kcc_cb.kcc_coredump_get_summary(context.core_refcon, coredump_save_summary, &context);
549 if (ret != KERN_SUCCESS) {
550 kern_coredump_log(&context, "(kern_coredump_routine) : get_summary failed with %d\n", ret);
551 return ret;
552 }
553
554 if (context.core_header_size == 0) {
555 kern_coredump_log(&context, "(kern_coredump_routine) : header size not populated after coredump_get_summary\n");
556 return KERN_FAILURE;
557 }
558
559 /* Save the segment descriptions for the segments to be included */
560 ret = current_core->kcc_cb.kcc_coredump_save_segment_descriptions(context.core_refcon, coredump_save_segment_descriptions,
0a7de745 561 &context);
5ba3f43e
A
562 if (ret != KERN_SUCCESS) {
563 kern_coredump_log(&context, "(kern_coredump_routine) : save_segment_descriptions failed with %d\n", ret);
564 return ret;
565 }
566
567 if (context.core_segments_remaining != 0) {
568 kern_coredump_log(&context, "(kern_coredump_routine) : save_segment_descriptions returned without all segment descriptions written, %llu of %llu remaining\n",
0a7de745 569 context.core_segments_remaining, context.core_segment_count);
5ba3f43e
A
570 return KERN_FAILURE;
571 }
572
573 /* TODO: Add LC_NOTE command for miscellaneous data if requested */
574
575 /*
576 * Save the thread commands/state
577 *
578 * TODO: Should this buffer be allocated at boot rather than on the stack?
579 */
580 if (context.core_thread_state_size) {
581 char threadstatebuf[context.core_thread_state_size];
582 ret = current_core->kcc_cb.kcc_coredump_save_thread_state(context.core_refcon, &threadstatebuf, coredump_save_thread_state,
0a7de745 583 &context);
5ba3f43e
A
584 if (ret != KERN_SUCCESS) {
585 kern_coredump_log(&context, "(kern_coredump_routine) : save_thread_state failed with %d\n", ret);
586 return ret;
587 }
588 }
589
590 if (context.core_threads_remaining != 0) {
591 kern_coredump_log(&context, "(kern_coredump_routine) : save_thread_state returned without all thread descriptions written, %llu of %llu remaining\n",
0a7de745 592 context.core_threads_remaining, context.core_thread_count);
5ba3f43e
A
593 return KERN_FAILURE;
594 }
595
596 /* Save the sw version string */
597 ret = current_core->kcc_cb.kcc_coredump_save_sw_vers(context.core_refcon, coredump_save_sw_vers, &context);
598 if (ret != KERN_SUCCESS) {
599 kern_coredump_log(&context, "(kern_coredump_routine) : save_sw_vers failed with %d\n", ret);
600 return ret;
601 }
602
603 assert(context.core_cur_hoffset == context.core_header_size);
604
605 /* Zero fill between the end of the header and the beginning of the segment data file offset */
606 ret = kdp_core_output(context.core_outvars, (round_page(context.core_header_size) - context.core_header_size), NULL);
607 if (ret != KERN_SUCCESS) {
d9a64523 608 kern_coredump_log(&context, "(kern_coredump_routine) : failed to write zero fill padding (%llu bytes remaining) : kdp_core_output(%p, %llu, NULL) returned 0x%x\n",
0a7de745 609 context.core_segment_bytes_remaining, context.core_outvars, (round_page(context.core_header_size) - context.core_header_size), ret);
5ba3f43e
A
610 return ret;
611 }
612
613 context.core_cur_foffset = round_page(context.core_header_size);
614 ret = current_core->kcc_cb.kcc_coredump_save_segment_data(context.core_refcon, coredump_save_segment_data, &context);
615 if (ret != KERN_SUCCESS) {
616 kern_coredump_log(&context, "coredump_save_segment_data failed with %d\n", ret);
617 return ret;
618 }
619
620 if (context.core_segment_bytes_remaining != 0) {
621 kern_coredump_log(&context, "(kern_coredump_routine) : save_segment_data returned without all segment data written, %llu of %llu remaining\n",
0a7de745 622 context.core_segment_bytes_remaining, context.core_segment_byte_total);
5ba3f43e
A
623 return KERN_FAILURE;
624 }
625
626 /* TODO: Save the miscellaneous data if requested */
627
628 /* Flush the last data out */
629 ret = kdp_core_output(context.core_outvars, 0, NULL);
630 if (ret != KERN_SUCCESS) {
d9a64523 631 kern_coredump_log(&context, "(kern_coredump_routine) : failed to flush final core data : kdp_core_output(%p, 0, NULL) returned 0x%x\n",
0a7de745 632 context.core_outvars, ret);
5ba3f43e
A
633 return ret;
634 }
635
636 kern_coredump_log(&context, "Done\nCoredump complete of %s, dumped %llu segments (%llu bytes), %llu threads (%llu bytes) overall uncompressed file length %llu bytes.",
0a7de745
A
637 current_core->kcc_corename, context.core_segment_count, context.core_segment_byte_total, context.core_thread_count,
638 (context.core_thread_count * context.core_thread_state_size), context.core_file_length);
5ba3f43e
A
639
640 if (core_begin_offset) {
641 /* If we're writing to disk (we have a begin offset, we need to update the header */
642 ret = kern_dump_record_file(context.core_outvars, current_core->kcc_corename, core_begin_offset, &context.core_file_length_compressed);
643 if (ret != KERN_SUCCESS) {
644 *header_update_failed = TRUE;
645 kern_coredump_log(&context, "\n(kern_coredump_routine) : kern_dump_record_file failed with %d\n", ret);
646 return ret;
647 }
648 }
649
650 kern_coredump_log(&context, " Compressed file length is %llu bytes\n", context.core_file_length_compressed);
651
652 *core_file_length = context.core_file_length_compressed;
653
654 return KERN_SUCCESS;
655}
656
657kern_return_t
658kern_do_coredump(void *core_outvars, boolean_t kernel_only, uint64_t first_file_offset, uint64_t *last_file_offset)
659{
660 struct kern_coredump_core *current_core = NULL;
661 uint64_t prev_core_length = 0;
662 kern_return_t cur_ret = KERN_SUCCESS, ret = KERN_SUCCESS;
663 boolean_t header_update_failed = FALSE;
664
665 assert(last_file_offset != NULL);
666
667 *last_file_offset = first_file_offset;
668 cur_ret = kern_coredump_routine(core_outvars, kernel_helper, *last_file_offset, &prev_core_length, &header_update_failed);
669 if (cur_ret != KERN_SUCCESS) {
670 // As long as we didn't fail while updating the header for the raw file, we should be able to try
671 // to capture other corefiles.
672 if (header_update_failed) {
673 // The header may be in an inconsistent state, so bail now
674 return KERN_FAILURE;
675 } else {
676 prev_core_length = 0;
677 ret = KERN_FAILURE;
678 }
679 }
680
681 *last_file_offset = roundup(((*last_file_offset) + prev_core_length), KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
682 prev_core_length = 0;
683
684 if (kernel_only) {
685 return ret;
686 }
687
688 current_core = kern_coredump_core_list;
689 while (current_core) {
690 /* Seek to the beginning of the next file */
5c9f4661
A
691 cur_ret = kern_dump_seek_to_next_file(core_outvars, *last_file_offset);
692 if (cur_ret != KERN_SUCCESS) {
5ba3f43e
A
693 kern_coredump_log(NULL, "Failed to seek to beginning of next core\n");
694 return KERN_FAILURE;
695 }
696
0a7de745 697 cur_ret = kern_coredump_routine(core_outvars, current_core, *last_file_offset, &prev_core_length, &header_update_failed);
5ba3f43e
A
698 if (cur_ret != KERN_SUCCESS) {
699 // As long as we didn't fail while updating the header for the raw file, we should be able to try
700 // to capture other corefiles.
701 if (header_update_failed) {
702 // The header may be in an inconsistent state, so bail now
703 return KERN_FAILURE;
704 } else {
705 // Try to capture other corefiles even if one failed, update the overall return
706 // status though
707 prev_core_length = 0;
708 ret = KERN_FAILURE;
709 }
710 }
711
712 /* Calculate the offset of the beginning of the next core in the raw file */
713 *last_file_offset = roundup(((*last_file_offset) + prev_core_length), KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
714 prev_core_length = 0;
715 current_core = current_core->kcc_next;
716 }
717
718 return ret;
719}
720#else /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
721
722kern_return_t
cb323159 723kern_register_coredump_helper(int kern_coredump_config_vers, const kern_coredump_callback_config *kc_callbacks, void* refcon,
0a7de745
A
724 const char *core_description, boolean_t is64bit, uint32_t mh_magic,
725 cpu_type_t cpu_type, cpu_subtype_t cpu_subtype)
5ba3f43e
A
726{
727#pragma unused(kern_coredump_config_vers, kc_callbacks, refcon, core_description, is64bit, mh_magic, cpu_type, cpu_subtype)
728 return KERN_NOT_SUPPORTED;
729}
730#endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
731
732/*
733 * Must be callable with a NULL context
734 */
735void
736kern_coredump_log(void *context, const char *string, ...)
737{
738#pragma unused(context)
739 va_list coredump_log_args;
740
741 va_start(coredump_log_args, string);
742 _doprnt(string, &coredump_log_args, consdebug_putc, 0);
743 va_end(coredump_log_args);
744
745#if CONFIG_EMBEDDED
746 paniclog_flush();
747#endif
748}