]>
Commit | Line | Data |
---|---|---|
3e170ce0 | 1 | /* |
5ba3f43e | 2 | * Copyright (c) 2015-2017 Apple Computer, Inc. All rights reserved. |
3e170ce0 A |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #ifdef CONFIG_KDP_INTERACTIVE_DEBUGGING | |
30 | ||
31 | #include <mach/mach_types.h> | |
32 | #include <mach/vm_attributes.h> | |
33 | #include <mach/vm_param.h> | |
34 | #include <mach/vm_map.h> | |
35 | #include <vm/vm_protos.h> | |
36 | #include <vm/vm_kern.h> | |
37 | #include <vm/vm_map.h> | |
39037602 | 38 | #include <machine/cpu_capabilities.h> |
3e170ce0 A |
39 | #include <libsa/types.h> |
40 | #include <libkern/kernel_mach_header.h> | |
41 | #include <libkern/zlib.h> | |
42 | #include <kdp/kdp_internal.h> | |
43 | #include <kdp/kdp_core.h> | |
5ba3f43e | 44 | #include <kdp/processor_core.h> |
3e170ce0 A |
45 | #include <IOKit/IOPolledInterface.h> |
46 | #include <IOKit/IOBSD.h> | |
47 | #include <sys/errno.h> | |
48 | #include <sys/msgbuf.h> | |
5ba3f43e | 49 | #include <san/kasan.h> |
3e170ce0 | 50 | |
5ba3f43e | 51 | #if defined(__x86_64__) |
3e170ce0 A |
52 | #include <i386/pmap_internal.h> |
53 | #include <kdp/ml/i386/kdp_x86_common.h> | |
5ba3f43e A |
54 | #include <kern/debug.h> |
55 | #endif /* defined(__x86_64__) */ | |
3e170ce0 | 56 | |
5ba3f43e A |
57 | #if CONFIG_EMBEDDED |
58 | #include <arm/cpuid.h> | |
59 | #include <arm/caches_internal.h> | |
39037602 | 60 | #include <pexpert/arm/consistent_debug.h> |
5ba3f43e A |
61 | |
62 | #if !defined(ROUNDUP) | |
63 | #define ROUNDUP(a, b) (((a) + ((b) - 1)) & (~((b) - 1))) | |
64 | #endif | |
65 | ||
66 | #if !defined(ROUNDDOWN) | |
67 | #define ROUNDDOWN(a, b) ((a) & ~((b) - 1)) | |
68 | #endif | |
69 | #endif /* CONFIG_EMBEDDED */ | |
3e170ce0 A |
70 | |
71 | typedef int (*pmap_traverse_callback)(vm_map_offset_t start, | |
72 | vm_map_offset_t end, | |
73 | void *context); | |
74 | ||
75 | extern int pmap_traverse_present_mappings(pmap_t pmap, | |
76 | vm_map_offset_t start, | |
77 | vm_map_offset_t end, | |
78 | pmap_traverse_callback callback, | |
79 | void *context); | |
80 | ||
5ba3f43e A |
81 | static int kern_dump_save_summary(void *refcon, core_save_summary_cb callback, void *context); |
82 | static int kern_dump_save_seg_descriptions(void *refcon, core_save_segment_descriptions_cb callback, void *context); | |
83 | static int kern_dump_save_thread_state(void *refcon, void *buf, core_save_thread_state_cb callback, void *context); | |
84 | static int kern_dump_save_sw_vers(void *refcon, core_save_sw_vers_cb callback, void *context); | |
85 | static int kern_dump_save_segment_data(void *refcon, core_save_segment_data_cb callback, void *context); | |
3e170ce0 A |
86 | |
87 | static int | |
88 | kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start, | |
89 | vm_map_offset_t end, | |
90 | void *context); | |
91 | static int | |
5ba3f43e A |
92 | kern_dump_pmap_traverse_send_segdesc_callback(vm_map_offset_t start, |
93 | vm_map_offset_t end, | |
94 | void *context); | |
95 | ||
3e170ce0 A |
96 | static int |
97 | kern_dump_pmap_traverse_send_segdata_callback(vm_map_offset_t start, | |
98 | vm_map_offset_t end, | |
99 | void *context); | |
100 | ||
101 | struct kdp_core_out_vars; | |
102 | typedef int (*kern_dump_output_proc)(unsigned int request, char *corename, | |
103 | uint64_t length, void *panic_data); | |
104 | ||
105 | struct kdp_core_out_vars | |
106 | { | |
107 | kern_dump_output_proc outproc; | |
5ba3f43e | 108 | z_output_func zoutput; |
3e170ce0 A |
109 | size_t zipped; |
110 | uint64_t totalbytes; | |
111 | uint64_t lastpercent; | |
112 | IOReturn error; | |
113 | unsigned outremain; | |
114 | unsigned outlen; | |
115 | unsigned writes; | |
116 | Bytef * outbuf; | |
117 | }; | |
118 | ||
3e170ce0 A |
119 | extern uint32_t kdp_crashdump_pkt_size; |
120 | ||
121 | static vm_offset_t kdp_core_zmem; | |
122 | static size_t kdp_core_zsize; | |
123 | static size_t kdp_core_zoffset; | |
124 | static z_stream kdp_core_zs; | |
125 | ||
39037602 A |
126 | static uint64_t kdp_core_total_size; |
127 | static uint64_t kdp_core_total_size_sent_uncomp; | |
5ba3f43e | 128 | #if CONFIG_EMBEDDED |
39037602 A |
129 | struct xnu_hw_shmem_dbg_command_info *hwsd_info = NULL; |
130 | ||
131 | #define KDP_CORE_HW_SHMEM_DBG_NUM_BUFFERS 2 | |
132 | #define KDP_CORE_HW_SHMEM_DBG_TOTAL_BUF_SIZE 64 * 1024 | |
133 | ||
134 | /* | |
135 | * Astris can read up to 4064 bytes at a time over | |
136 | * the probe, so we should try to make our buffer | |
137 | * size a multiple of this to make reads by astris | |
138 | * (the bottleneck) most efficient. | |
139 | */ | |
140 | #define OPTIMAL_ASTRIS_READSIZE 4064 | |
141 | ||
142 | struct kdp_hw_shmem_dbg_buf_elm { | |
143 | vm_offset_t khsd_buf; | |
144 | uint32_t khsd_data_length; | |
145 | STAILQ_ENTRY(kdp_hw_shmem_dbg_buf_elm) khsd_elms; | |
146 | }; | |
147 | ||
148 | static STAILQ_HEAD(, kdp_hw_shmem_dbg_buf_elm) free_hw_shmem_dbg_bufs = | |
149 | STAILQ_HEAD_INITIALIZER(free_hw_shmem_dbg_bufs); | |
150 | static STAILQ_HEAD(, kdp_hw_shmem_dbg_buf_elm) hw_shmem_dbg_bufs_to_flush = | |
151 | STAILQ_HEAD_INITIALIZER(hw_shmem_dbg_bufs_to_flush); | |
152 | ||
153 | static struct kdp_hw_shmem_dbg_buf_elm *currently_filling_buf = NULL; | |
154 | static struct kdp_hw_shmem_dbg_buf_elm *currently_flushing_buf = NULL; | |
155 | ||
156 | static uint32_t kdp_hw_shmem_dbg_bufsize = 0; | |
157 | ||
158 | static uint32_t kdp_hw_shmem_dbg_seq_no = 0; | |
159 | static uint64_t kdp_hw_shmem_dbg_contact_deadline = 0; | |
160 | static uint64_t kdp_hw_shmem_dbg_contact_deadline_interval = 0; | |
161 | ||
162 | #define KDP_HW_SHMEM_DBG_TIMEOUT_DEADLINE_SECS 30 | |
5ba3f43e A |
163 | #endif /* CONFIG_EMBEDDED */ |
164 | ||
165 | static boolean_t kern_dump_successful = FALSE; | |
166 | ||
167 | struct mach_core_fileheader kdp_core_header = { }; | |
39037602 A |
168 | |
169 | /* | |
170 | * These variables will be modified by the BSD layer if the root device is | |
171 | * a RAMDisk. | |
172 | */ | |
173 | uint64_t kdp_core_ramdisk_addr = 0; | |
174 | uint64_t kdp_core_ramdisk_size = 0; | |
3e170ce0 | 175 | |
3e170ce0 A |
176 | boolean_t kdp_has_polled_corefile(void) |
177 | { | |
178 | return (NULL != gIOPolledCoreFileVars); | |
179 | } | |
180 | ||
d9a64523 A |
181 | kern_return_t kdp_polled_corefile_error(void) |
182 | { | |
183 | return gIOPolledCoreFileOpenRet; | |
184 | } | |
5ba3f43e | 185 | #if CONFIG_EMBEDDED |
39037602 A |
186 | /* |
187 | * Whenever we start a coredump, make sure the buffers | |
188 | * are all on the free queue and the state is as expected. | |
189 | * The buffers may have been left in a different state if | |
190 | * a previous coredump attempt failed. | |
191 | */ | |
192 | static void | |
193 | kern_dump_hw_shmem_dbg_reset() | |
194 | { | |
195 | struct kdp_hw_shmem_dbg_buf_elm *cur_elm = NULL, *tmp_elm = NULL; | |
196 | ||
197 | STAILQ_FOREACH(cur_elm, &free_hw_shmem_dbg_bufs, khsd_elms) { | |
198 | cur_elm->khsd_data_length = 0; | |
199 | } | |
200 | ||
201 | if (currently_filling_buf != NULL) { | |
202 | currently_filling_buf->khsd_data_length = 0; | |
203 | ||
204 | STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs, currently_filling_buf, khsd_elms); | |
205 | currently_filling_buf = NULL; | |
206 | } | |
207 | ||
208 | if (currently_flushing_buf != NULL) { | |
209 | currently_flushing_buf->khsd_data_length = 0; | |
210 | ||
211 | STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs, currently_flushing_buf, khsd_elms); | |
212 | currently_flushing_buf = NULL; | |
213 | } | |
214 | ||
215 | STAILQ_FOREACH_SAFE(cur_elm, &hw_shmem_dbg_bufs_to_flush, khsd_elms, tmp_elm) { | |
216 | cur_elm->khsd_data_length = 0; | |
217 | ||
218 | STAILQ_REMOVE(&hw_shmem_dbg_bufs_to_flush, cur_elm, kdp_hw_shmem_dbg_buf_elm, khsd_elms); | |
219 | STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs, cur_elm, khsd_elms); | |
220 | } | |
221 | ||
222 | hwsd_info->xhsdci_status = XHSDCI_COREDUMP_BUF_EMPTY; | |
223 | kdp_hw_shmem_dbg_seq_no = 0; | |
224 | hwsd_info->xhsdci_buf_phys_addr = 0; | |
225 | hwsd_info->xhsdci_buf_data_length = 0; | |
226 | hwsd_info->xhsdci_coredump_total_size_uncomp = 0; | |
227 | hwsd_info->xhsdci_coredump_total_size_sent_uncomp = 0; | |
228 | hwsd_info->xhsdci_page_size = PAGE_SIZE; | |
229 | FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info)); | |
230 | ||
231 | kdp_hw_shmem_dbg_contact_deadline = mach_absolute_time() + kdp_hw_shmem_dbg_contact_deadline_interval; | |
232 | } | |
233 | ||
234 | /* | |
235 | * Tries to move buffers forward in 'progress'. If | |
236 | * the hardware debugger is done consuming the current buffer, we | |
237 | * can put the next one on it and move the current | |
238 | * buffer back to the free queue. | |
239 | */ | |
240 | static int | |
241 | kern_dump_hw_shmem_dbg_process_buffers() | |
242 | { | |
243 | FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info)); | |
244 | if (hwsd_info->xhsdci_status == XHSDCI_COREDUMP_ERROR) { | |
5ba3f43e | 245 | kern_coredump_log(NULL, "Detected remote error, terminating...\n"); |
39037602 A |
246 | return -1; |
247 | } else if (hwsd_info->xhsdci_status == XHSDCI_COREDUMP_BUF_EMPTY) { | |
248 | if (hwsd_info->xhsdci_seq_no != (kdp_hw_shmem_dbg_seq_no + 1)) { | |
5ba3f43e | 249 | kern_coredump_log(NULL, "Detected stale/invalid seq num. Expected: %d, received %d\n", |
39037602 A |
250 | (kdp_hw_shmem_dbg_seq_no + 1), hwsd_info->xhsdci_seq_no); |
251 | hwsd_info->xhsdci_status = XHSDCI_COREDUMP_ERROR; | |
252 | FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info)); | |
253 | return -1; | |
254 | } | |
255 | ||
256 | kdp_hw_shmem_dbg_seq_no = hwsd_info->xhsdci_seq_no; | |
257 | ||
258 | if (currently_flushing_buf != NULL) { | |
259 | currently_flushing_buf->khsd_data_length = 0; | |
260 | STAILQ_INSERT_TAIL(&free_hw_shmem_dbg_bufs, currently_flushing_buf, khsd_elms); | |
261 | } | |
262 | ||
263 | currently_flushing_buf = STAILQ_FIRST(&hw_shmem_dbg_bufs_to_flush); | |
264 | if (currently_flushing_buf != NULL) { | |
265 | STAILQ_REMOVE_HEAD(&hw_shmem_dbg_bufs_to_flush, khsd_elms); | |
266 | ||
267 | FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info)); | |
268 | hwsd_info->xhsdci_buf_phys_addr = kvtophys(currently_flushing_buf->khsd_buf); | |
269 | hwsd_info->xhsdci_buf_data_length = currently_flushing_buf->khsd_data_length; | |
270 | hwsd_info->xhsdci_coredump_total_size_uncomp = kdp_core_total_size; | |
271 | hwsd_info->xhsdci_coredump_total_size_sent_uncomp = kdp_core_total_size_sent_uncomp; | |
272 | FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, KDP_CORE_HW_SHMEM_DBG_TOTAL_BUF_SIZE); | |
273 | hwsd_info->xhsdci_seq_no = ++kdp_hw_shmem_dbg_seq_no; | |
274 | hwsd_info->xhsdci_status = XHSDCI_COREDUMP_BUF_READY; | |
275 | FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info)); | |
276 | } | |
277 | ||
278 | kdp_hw_shmem_dbg_contact_deadline = mach_absolute_time() + | |
279 | kdp_hw_shmem_dbg_contact_deadline_interval; | |
280 | ||
281 | return 0; | |
282 | } else if (mach_absolute_time() > kdp_hw_shmem_dbg_contact_deadline) { | |
5ba3f43e A |
283 | kern_coredump_log(NULL, "Kernel timed out waiting for hardware debugger to update handshake structure."); |
284 | kern_coredump_log(NULL, "No contact in %d seconds\n", KDP_HW_SHMEM_DBG_TIMEOUT_DEADLINE_SECS); | |
39037602 A |
285 | |
286 | hwsd_info->xhsdci_status = XHSDCI_COREDUMP_ERROR; | |
287 | FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info)); | |
288 | return -1; | |
289 | } | |
290 | ||
291 | return 0; | |
292 | } | |
293 | ||
294 | /* | |
295 | * Populates currently_filling_buf with a new buffer | |
296 | * once one becomes available. Returns 0 on success | |
297 | * or the value returned by kern_dump_hw_shmem_dbg_process_buffers() | |
298 | * if it is non-zero (an error). | |
299 | */ | |
300 | static int | |
301 | kern_dump_hw_shmem_dbg_get_buffer() | |
302 | { | |
303 | int ret = 0; | |
304 | ||
305 | assert(currently_filling_buf == NULL); | |
306 | ||
307 | while (STAILQ_EMPTY(&free_hw_shmem_dbg_bufs)) { | |
308 | ret = kern_dump_hw_shmem_dbg_process_buffers(); | |
309 | if (ret) { | |
310 | return ret; | |
311 | } | |
312 | } | |
313 | ||
314 | currently_filling_buf = STAILQ_FIRST(&free_hw_shmem_dbg_bufs); | |
315 | STAILQ_REMOVE_HEAD(&free_hw_shmem_dbg_bufs, khsd_elms); | |
316 | ||
317 | assert(currently_filling_buf->khsd_data_length == 0); | |
318 | return ret; | |
319 | } | |
320 | ||
321 | /* | |
322 | * Output procedure for hardware shared memory core dumps | |
323 | * | |
324 | * Tries to fill up the buffer completely before flushing | |
325 | */ | |
326 | static int | |
327 | kern_dump_hw_shmem_dbg_buffer_proc(unsigned int request, __unused char *corename, | |
328 | uint64_t length, void * data) | |
329 | { | |
330 | int ret = 0; | |
331 | ||
332 | assert(length < UINT32_MAX); | |
333 | uint32_t bytes_remaining = (uint32_t) length; | |
334 | uint32_t bytes_to_copy; | |
335 | ||
336 | if (request == KDP_EOF) { | |
337 | assert(currently_filling_buf == NULL); | |
338 | ||
339 | /* | |
340 | * Wait until we've flushed all the buffers | |
341 | * before setting the connection status to done. | |
342 | */ | |
343 | while (!STAILQ_EMPTY(&hw_shmem_dbg_bufs_to_flush) || | |
344 | currently_flushing_buf != NULL) { | |
345 | ret = kern_dump_hw_shmem_dbg_process_buffers(); | |
346 | if (ret) { | |
347 | return ret; | |
348 | } | |
349 | } | |
350 | ||
351 | /* | |
352 | * If the last status we saw indicates that the buffer was | |
353 | * empty and we didn't flush any new data since then, we expect | |
354 | * the sequence number to still match the last we saw. | |
355 | */ | |
356 | if (hwsd_info->xhsdci_seq_no < kdp_hw_shmem_dbg_seq_no) { | |
5ba3f43e | 357 | kern_coredump_log(NULL, "EOF Flush: Detected stale/invalid seq num. Expected: %d, received %d\n", |
39037602 A |
358 | kdp_hw_shmem_dbg_seq_no, hwsd_info->xhsdci_seq_no); |
359 | return -1; | |
360 | } | |
361 | ||
362 | kdp_hw_shmem_dbg_seq_no = hwsd_info->xhsdci_seq_no; | |
363 | ||
5ba3f43e | 364 | kern_coredump_log(NULL, "Setting coredump status as done!\n"); |
39037602 A |
365 | hwsd_info->xhsdci_seq_no = ++kdp_hw_shmem_dbg_seq_no; |
366 | hwsd_info->xhsdci_status = XHSDCI_COREDUMP_STATUS_DONE; | |
367 | FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info)); | |
368 | ||
369 | return ret; | |
370 | } | |
371 | ||
372 | assert(request == KDP_DATA); | |
373 | ||
374 | /* | |
375 | * The output procedure is called with length == 0 and data == NULL | |
376 | * to flush any remaining output at the end of the coredump before | |
377 | * we call it a final time to mark the dump as done. | |
378 | */ | |
379 | if (length == 0) { | |
380 | assert(data == NULL); | |
381 | ||
382 | if (currently_filling_buf != NULL) { | |
383 | STAILQ_INSERT_TAIL(&hw_shmem_dbg_bufs_to_flush, currently_filling_buf, khsd_elms); | |
384 | currently_filling_buf = NULL; | |
385 | } | |
386 | ||
387 | /* | |
388 | * Move the current buffer along if possible. | |
389 | */ | |
390 | ret = kern_dump_hw_shmem_dbg_process_buffers(); | |
391 | return ret; | |
392 | } | |
393 | ||
394 | while (bytes_remaining != 0) { | |
395 | /* | |
396 | * Make sure we have a buffer to work with. | |
397 | */ | |
398 | while (currently_filling_buf == NULL) { | |
399 | ret = kern_dump_hw_shmem_dbg_get_buffer(); | |
400 | if (ret) { | |
401 | return ret; | |
402 | } | |
403 | } | |
404 | ||
405 | assert(kdp_hw_shmem_dbg_bufsize >= currently_filling_buf->khsd_data_length); | |
406 | bytes_to_copy = MIN(bytes_remaining, kdp_hw_shmem_dbg_bufsize - | |
407 | currently_filling_buf->khsd_data_length); | |
408 | bcopy(data, (void *)(currently_filling_buf->khsd_buf + currently_filling_buf->khsd_data_length), | |
409 | bytes_to_copy); | |
410 | ||
411 | currently_filling_buf->khsd_data_length += bytes_to_copy; | |
412 | ||
413 | if (currently_filling_buf->khsd_data_length == kdp_hw_shmem_dbg_bufsize) { | |
414 | STAILQ_INSERT_TAIL(&hw_shmem_dbg_bufs_to_flush, currently_filling_buf, khsd_elms); | |
415 | currently_filling_buf = NULL; | |
416 | ||
417 | /* | |
418 | * Move it along if possible. | |
419 | */ | |
420 | ret = kern_dump_hw_shmem_dbg_process_buffers(); | |
421 | if (ret) { | |
422 | return ret; | |
423 | } | |
424 | } | |
425 | ||
426 | bytes_remaining -= bytes_to_copy; | |
427 | data = (void *) ((uintptr_t)data + bytes_to_copy); | |
428 | } | |
429 | ||
430 | return ret; | |
431 | } | |
5ba3f43e | 432 | #endif /* CONFIG_EMBEDDED */ |
39037602 | 433 | |
3e170ce0 A |
434 | static IOReturn |
435 | kern_dump_disk_proc(unsigned int request, __unused char *corename, | |
436 | uint64_t length, void * data) | |
437 | { | |
438 | uint64_t noffset; | |
439 | uint32_t err = kIOReturnSuccess; | |
440 | ||
441 | switch (request) | |
442 | { | |
443 | case KDP_WRQ: | |
444 | err = IOPolledFileSeek(gIOPolledCoreFileVars, 0); | |
5ba3f43e A |
445 | if (kIOReturnSuccess != err) { |
446 | kern_coredump_log(NULL, "IOPolledFileSeek(gIOPolledCoreFileVars, 0) returned 0x%x\n", err); | |
447 | break; | |
448 | } | |
3e170ce0 A |
449 | err = IOPolledFilePollersOpen(gIOPolledCoreFileVars, kIOPolledBeforeSleepState, false); |
450 | break; | |
451 | ||
452 | case KDP_SEEK: | |
453 | noffset = *((uint64_t *) data); | |
454 | err = IOPolledFileWrite(gIOPolledCoreFileVars, 0, 0, NULL); | |
5ba3f43e A |
455 | if (kIOReturnSuccess != err) { |
456 | kern_coredump_log(NULL, "IOPolledFileWrite (during seek) returned 0x%x\n", err); | |
457 | break; | |
458 | } | |
3e170ce0 | 459 | err = IOPolledFileSeek(gIOPolledCoreFileVars, noffset); |
5ba3f43e A |
460 | if (kIOReturnSuccess != err) { |
461 | kern_coredump_log(NULL, "IOPolledFileSeek(0x%llx) returned 0x%x\n", noffset, err); | |
462 | } | |
3e170ce0 A |
463 | break; |
464 | ||
465 | case KDP_DATA: | |
466 | err = IOPolledFileWrite(gIOPolledCoreFileVars, data, length, NULL); | |
5ba3f43e | 467 | if (kIOReturnSuccess != err) { |
d9a64523 | 468 | kern_coredump_log(NULL, "IOPolledFileWrite(gIOPolledCoreFileVars, %p, 0x%llx, NULL) returned 0x%x\n", |
5ba3f43e A |
469 | data, length, err); |
470 | break; | |
471 | } | |
472 | break; | |
473 | ||
474 | #if CONFIG_EMBEDDED | |
475 | /* Only supported on embedded by the underlying polled mode driver */ | |
476 | case KDP_FLUSH: | |
477 | err = IOPolledFileFlush(gIOPolledCoreFileVars); | |
478 | if (kIOReturnSuccess != err) { | |
479 | kern_coredump_log(NULL, "IOPolledFileFlush() returned 0x%x\n", err); | |
480 | break; | |
481 | } | |
3e170ce0 | 482 | break; |
5ba3f43e | 483 | #endif |
3e170ce0 A |
484 | |
485 | case KDP_EOF: | |
486 | err = IOPolledFileWrite(gIOPolledCoreFileVars, 0, 0, NULL); | |
5ba3f43e A |
487 | if (kIOReturnSuccess != err) { |
488 | kern_coredump_log(NULL, "IOPolledFileWrite (during EOF) returned 0x%x\n", err); | |
489 | break; | |
490 | } | |
3e170ce0 | 491 | err = IOPolledFilePollersClose(gIOPolledCoreFileVars, kIOPolledBeforeSleepState); |
5ba3f43e A |
492 | if (kIOReturnSuccess != err) { |
493 | kern_coredump_log(NULL, "IOPolledFilePollersClose (during EOF) returned 0x%x\n", err); | |
494 | break; | |
495 | } | |
3e170ce0 A |
496 | break; |
497 | } | |
498 | ||
499 | return (err); | |
500 | } | |
501 | ||
39037602 A |
502 | /* |
503 | * flushes any data to the output proc immediately | |
504 | */ | |
3e170ce0 A |
505 | static int |
506 | kdp_core_zoutput(z_streamp strm, Bytef *buf, unsigned len) | |
507 | { | |
508 | struct kdp_core_out_vars * vars = (typeof(vars)) strm->opaque; | |
509 | IOReturn ret; | |
510 | ||
511 | vars->zipped += len; | |
512 | ||
513 | if (vars->error >= 0) | |
514 | { | |
515 | if ((ret = (*vars->outproc)(KDP_DATA, NULL, len, buf)) != kIOReturnSuccess) | |
516 | { | |
d9a64523 | 517 | kern_coredump_log(NULL, "(kdp_core_zoutput) outproc(KDP_DATA, NULL, 0x%x, %p) returned 0x%x\n", |
5ba3f43e | 518 | len, buf, ret); |
3e170ce0 A |
519 | vars->error = ret; |
520 | } | |
5ba3f43e | 521 | if (!buf && !len) kern_coredump_log(NULL, "100.."); |
3e170ce0 A |
522 | } |
523 | return (len); | |
524 | } | |
525 | ||
39037602 A |
526 | /* |
527 | * tries to fill the buffer with data before flushing it via the output proc. | |
528 | */ | |
3e170ce0 A |
529 | static int |
530 | kdp_core_zoutputbuf(z_streamp strm, Bytef *inbuf, unsigned inlen) | |
531 | { | |
532 | struct kdp_core_out_vars * vars = (typeof(vars)) strm->opaque; | |
533 | unsigned remain; | |
534 | IOReturn ret; | |
535 | unsigned chunk; | |
536 | boolean_t flush; | |
537 | ||
538 | remain = inlen; | |
539 | vars->zipped += inlen; | |
540 | flush = (!inbuf && !inlen); | |
541 | ||
542 | while ((vars->error >= 0) && (remain || flush)) | |
543 | { | |
544 | chunk = vars->outremain; | |
545 | if (chunk > remain) chunk = remain; | |
39037602 A |
546 | if (!inbuf) bzero(&vars->outbuf[vars->outlen - vars->outremain], chunk); |
547 | else | |
548 | { | |
549 | bcopy(inbuf, &vars->outbuf[vars->outlen - vars->outremain], chunk); | |
550 | inbuf += chunk; | |
551 | } | |
3e170ce0 A |
552 | vars->outremain -= chunk; |
553 | remain -= chunk; | |
3e170ce0 A |
554 | |
555 | if (vars->outremain && !flush) break; | |
556 | if ((ret = (*vars->outproc)(KDP_DATA, NULL, | |
557 | vars->outlen - vars->outremain, | |
558 | vars->outbuf)) != kIOReturnSuccess) | |
559 | { | |
d9a64523 | 560 | kern_coredump_log(NULL, "(kdp_core_zoutputbuf) outproc(KDP_DATA, NULL, 0x%x, %p) returned 0x%x\n", |
5ba3f43e | 561 | (vars->outlen - vars->outremain), vars->outbuf, ret); |
3e170ce0 A |
562 | vars->error = ret; |
563 | } | |
564 | if (flush) | |
565 | { | |
5ba3f43e | 566 | kern_coredump_log(NULL, "100.."); |
3e170ce0 A |
567 | flush = false; |
568 | } | |
569 | vars->outremain = vars->outlen; | |
570 | } | |
571 | return (inlen); | |
572 | } | |
573 | ||
574 | static int | |
575 | kdp_core_zinput(z_streamp strm, Bytef *buf, unsigned size) | |
576 | { | |
577 | struct kdp_core_out_vars * vars = (typeof(vars)) strm->opaque; | |
39037602 | 578 | uint64_t percent, total_in = 0; |
3e170ce0 A |
579 | unsigned len; |
580 | ||
581 | len = strm->avail_in; | |
582 | if (len > size) len = size; | |
583 | if (len == 0) return 0; | |
584 | ||
585 | if (strm->next_in != (Bytef *) strm) memcpy(buf, strm->next_in, len); | |
586 | else bzero(buf, len); | |
587 | strm->adler = z_crc32(strm->adler, buf, len); | |
588 | ||
589 | strm->avail_in -= len; | |
590 | strm->next_in += len; | |
591 | strm->total_in += len; | |
592 | ||
593 | if (0 == (511 & vars->writes++)) | |
594 | { | |
39037602 A |
595 | total_in = strm->total_in; |
596 | kdp_core_total_size_sent_uncomp = strm->total_in; | |
597 | ||
598 | percent = (total_in * 100) / vars->totalbytes; | |
3e170ce0 A |
599 | if ((percent - vars->lastpercent) >= 10) |
600 | { | |
601 | vars->lastpercent = percent; | |
5ba3f43e | 602 | kern_coredump_log(NULL, "%lld..\n", percent); |
3e170ce0 A |
603 | } |
604 | } | |
605 | ||
606 | return (int)len; | |
607 | } | |
608 | ||
609 | static IOReturn | |
39037602 | 610 | kdp_core_stream_output_chunk(struct kdp_core_out_vars * vars, unsigned length, void * data) |
3e170ce0 A |
611 | { |
612 | z_stream * zs; | |
613 | int zr; | |
614 | boolean_t flush; | |
615 | ||
3e170ce0 | 616 | zs = &kdp_core_zs; |
3e170ce0 | 617 | |
39037602 | 618 | if (kdp_corezip_disabled) |
3e170ce0 | 619 | { |
39037602 A |
620 | (*vars->zoutput)(zs, data, length); |
621 | } | |
622 | else | |
623 | { | |
624 | ||
625 | flush = (!length && !data); | |
626 | zr = Z_OK; | |
627 | ||
628 | assert(!zs->avail_in); | |
629 | ||
630 | while (vars->error >= 0) | |
3e170ce0 | 631 | { |
39037602 A |
632 | if (!zs->avail_in && !flush) |
633 | { | |
634 | if (!length) break; | |
635 | zs->next_in = data ? data : (Bytef *) zs /* zero marker */; | |
636 | zs->avail_in = length; | |
637 | length = 0; | |
638 | } | |
639 | if (!zs->avail_out) | |
640 | { | |
641 | zs->next_out = (Bytef *) zs; | |
642 | zs->avail_out = UINT32_MAX; | |
643 | } | |
644 | zr = deflate(zs, flush ? Z_FINISH : Z_NO_FLUSH); | |
645 | if (Z_STREAM_END == zr) break; | |
646 | if (zr != Z_OK) | |
647 | { | |
5ba3f43e | 648 | kern_coredump_log(NULL, "ZERR %d\n", zr); |
39037602 A |
649 | vars->error = zr; |
650 | } | |
3e170ce0 | 651 | } |
3e170ce0 | 652 | |
39037602 A |
653 | if (flush) (*vars->zoutput)(zs, NULL, 0); |
654 | } | |
3e170ce0 A |
655 | |
656 | return (vars->error); | |
657 | } | |
658 | ||
5ba3f43e A |
659 | kern_return_t |
660 | kdp_core_output(void *kdp_core_out_vars, uint64_t length, void * data) | |
39037602 A |
661 | { |
662 | IOReturn err; | |
663 | unsigned int chunk; | |
664 | enum { kMaxZLibChunk = 1024*1024*1024 }; | |
5ba3f43e | 665 | struct kdp_core_out_vars *vars = (struct kdp_core_out_vars *)kdp_core_out_vars; |
39037602 A |
666 | |
667 | do | |
668 | { | |
669 | if (length <= kMaxZLibChunk) chunk = (typeof(chunk)) length; | |
670 | else chunk = kMaxZLibChunk; | |
671 | err = kdp_core_stream_output_chunk(vars, chunk, data); | |
672 | ||
673 | length -= chunk; | |
674 | if (data) data = (void *) (((uintptr_t) data) + chunk); | |
675 | } | |
676 | while (length && (kIOReturnSuccess == err)); | |
677 | ||
678 | return (err); | |
679 | } | |
680 | ||
5ba3f43e A |
681 | #if defined(__arm__) || defined(__arm64__) |
682 | extern pmap_paddr_t avail_start, avail_end; | |
683 | extern struct vm_object pmap_object_store; | |
684 | #endif | |
3e170ce0 A |
685 | extern vm_offset_t c_buffers; |
686 | extern vm_size_t c_buffers_size; | |
687 | ||
688 | ppnum_t | |
39037602 | 689 | kernel_pmap_present_mapping(uint64_t vaddr, uint64_t * pvincr, uintptr_t * pvphysaddr) |
3e170ce0 | 690 | { |
39037602 A |
691 | ppnum_t ppn = 0; |
692 | uint64_t vincr = PAGE_SIZE_64; | |
3e170ce0 A |
693 | |
694 | assert(!(vaddr & PAGE_MASK_64)); | |
695 | ||
696 | /* VA ranges to exclude */ | |
697 | if (vaddr == c_buffers) | |
698 | { | |
699 | /* compressor data */ | |
700 | ppn = 0; | |
701 | vincr = c_buffers_size; | |
702 | } | |
703 | else if (vaddr == kdp_core_zmem) | |
704 | { | |
705 | /* zlib working memory */ | |
706 | ppn = 0; | |
707 | vincr = kdp_core_zsize; | |
708 | } | |
39037602 A |
709 | else if ((kdp_core_ramdisk_addr != 0) && (vaddr == kdp_core_ramdisk_addr)) |
710 | { | |
711 | ppn = 0; | |
712 | vincr = kdp_core_ramdisk_size; | |
713 | } | |
3e170ce0 | 714 | else |
d9a64523 | 715 | #if defined(__arm64__) && defined(CONFIG_XNUPOST) |
5c9f4661 | 716 | if (vaddr == _COMM_HIGH_PAGE64_BASE_ADDRESS) |
5ba3f43e A |
717 | { |
718 | /* not readable */ | |
719 | ppn = 0; | |
720 | vincr = _COMM_PAGE_AREA_LENGTH; | |
721 | } | |
722 | else | |
723 | #endif /* defined(__arm64__) */ | |
724 | #if defined(__arm__) || defined(__arm64__) | |
725 | if (vaddr == phystokv(avail_start)) | |
726 | { | |
727 | /* physical memory map */ | |
728 | ppn = 0; | |
729 | vincr = (avail_end - avail_start); | |
730 | } | |
731 | else | |
732 | #endif /* defined(__arm__) || defined(__arm64__) */ | |
3e170ce0 A |
733 | ppn = pmap_find_phys(kernel_pmap, vaddr); |
734 | ||
39037602 A |
735 | *pvincr = round_page_64(vincr); |
736 | ||
737 | if (ppn && pvphysaddr) | |
738 | { | |
739 | uint64_t phys = ptoa_64(ppn); | |
cb323159 A |
740 | if (physmap_enclosed(phys)) { |
741 | *pvphysaddr = phystokv(phys); | |
742 | } else { | |
743 | ppn = 0; | |
744 | } | |
39037602 A |
745 | } |
746 | ||
3e170ce0 A |
747 | return (ppn); |
748 | } | |
749 | ||
750 | int | |
751 | pmap_traverse_present_mappings(pmap_t __unused pmap, | |
752 | vm_map_offset_t start, | |
753 | vm_map_offset_t end, | |
754 | pmap_traverse_callback callback, | |
755 | void *context) | |
756 | { | |
757 | IOReturn ret; | |
758 | vm_map_offset_t vcurstart, vcur; | |
5ba3f43e | 759 | uint64_t vincr = 0; |
cb323159 A |
760 | vm_map_offset_t debug_start = trunc_page((vm_map_offset_t) debug_buf_base); |
761 | vm_map_offset_t debug_end = round_page((vm_map_offset_t) (debug_buf_base + debug_buf_size)); | |
762 | #if defined(XNU_TARGET_OS_BRIDGE) | |
763 | vm_map_offset_t macos_panic_start = trunc_page((vm_map_offset_t) macos_panic_base); | |
764 | vm_map_offset_t macos_panic_end = round_page((vm_map_offset_t) (macos_panic_base + macos_panic_size)); | |
765 | #endif | |
766 | ||
3e170ce0 | 767 | boolean_t lastvavalid; |
5ba3f43e A |
768 | #if defined(__arm__) || defined(__arm64__) |
769 | vm_page_t m = VM_PAGE_NULL; | |
770 | #endif | |
3e170ce0 | 771 | |
5ba3f43e | 772 | #if defined(__x86_64__) |
3e170ce0 A |
773 | assert(!is_ept_pmap(pmap)); |
774 | #endif | |
775 | ||
776 | /* Assumes pmap is locked, or being called from the kernel debugger */ | |
777 | ||
778 | if (start > end) return (KERN_INVALID_ARGUMENT); | |
779 | ||
780 | ret = KERN_SUCCESS; | |
781 | lastvavalid = FALSE; | |
782 | for (vcur = vcurstart = start; (ret == KERN_SUCCESS) && (vcur < end); ) { | |
5ba3f43e A |
783 | ppnum_t ppn = 0; |
784 | ||
785 | #if defined(__arm__) || defined(__arm64__) | |
786 | /* We're at the start of the physmap, so pull out the pagetable pages that | |
787 | * are accessed through that region.*/ | |
788 | if (vcur == phystokv(avail_start) && vm_object_lock_try_shared(&pmap_object_store)) | |
789 | m = (vm_page_t)vm_page_queue_first(&pmap_object_store.memq); | |
3e170ce0 | 790 | |
5ba3f43e A |
791 | if (m != VM_PAGE_NULL) |
792 | { | |
793 | vm_map_offset_t vprev = vcur; | |
794 | ppn = (ppnum_t)atop(avail_end); | |
795 | while (!vm_page_queue_end(&pmap_object_store.memq, (vm_page_queue_entry_t)m)) | |
796 | { | |
797 | /* Ignore pages that come from the static region and have already been dumped.*/ | |
798 | if (VM_PAGE_GET_PHYS_PAGE(m) >= atop(avail_start)) | |
799 | { | |
800 | ppn = VM_PAGE_GET_PHYS_PAGE(m); | |
801 | break; | |
802 | } | |
d9a64523 | 803 | m = (vm_page_t)vm_page_queue_next(&m->vmp_listq); |
5ba3f43e A |
804 | } |
805 | vincr = PAGE_SIZE_64; | |
806 | if (ppn == atop(avail_end)) | |
807 | { | |
808 | vm_object_unlock(&pmap_object_store); | |
809 | m = VM_PAGE_NULL; | |
d9a64523 A |
810 | // avail_end is not a valid physical address, |
811 | // so phystokv(avail_end) may not produce the expected result. | |
812 | vcur = phystokv(avail_start) + (avail_end - avail_start); | |
813 | } else { | |
814 | m = (vm_page_t)vm_page_queue_next(&m->vmp_listq); | |
815 | vcur = phystokv(ptoa(ppn)); | |
816 | } | |
817 | if (vcur != vprev) | |
818 | { | |
819 | ret = callback(vcurstart, vprev, context); | |
820 | lastvavalid = FALSE; | |
5ba3f43e | 821 | } |
5ba3f43e A |
822 | } |
823 | if (m == VM_PAGE_NULL) | |
824 | ppn = kernel_pmap_present_mapping(vcur, &vincr, NULL); | |
825 | #else /* defined(__arm__) || defined(__arm64__) */ | |
39037602 | 826 | ppn = kernel_pmap_present_mapping(vcur, &vincr, NULL); |
5ba3f43e | 827 | #endif |
3e170ce0 A |
828 | if (ppn != 0) |
829 | { | |
830 | if (((vcur < debug_start) || (vcur >= debug_end)) | |
4ba76501 | 831 | && !(pmap_valid_page(ppn) || bootloader_valid_page(ppn)) |
cb323159 A |
832 | #if defined(XNU_TARGET_OS_BRIDGE) |
833 | // include the macOS panic region if it's mapped | |
834 | && ((vcur < macos_panic_start) || (vcur >= macos_panic_end)) | |
835 | #endif | |
836 | ) | |
3e170ce0 A |
837 | { |
838 | /* not something we want */ | |
839 | ppn = 0; | |
840 | } | |
841 | } | |
842 | ||
843 | if (ppn != 0) { | |
844 | if (!lastvavalid) { | |
845 | /* Start of a new virtual region */ | |
846 | vcurstart = vcur; | |
847 | lastvavalid = TRUE; | |
848 | } | |
849 | } else { | |
850 | if (lastvavalid) { | |
851 | /* end of a virtual region */ | |
852 | ret = callback(vcurstart, vcur, context); | |
853 | lastvavalid = FALSE; | |
854 | } | |
855 | ||
5ba3f43e | 856 | #if defined(__x86_64__) |
3e170ce0 | 857 | /* Try to skip by 2MB if possible */ |
0a7de745 | 858 | if ((vcur & PDMASK) == 0) { |
3e170ce0 A |
859 | pd_entry_t *pde; |
860 | pde = pmap_pde(pmap, vcur); | |
861 | if (0 == pde || ((*pde & INTEL_PTE_VALID) == 0)) { | |
862 | /* Make sure we wouldn't overflow */ | |
863 | if (vcur < (end - NBPD)) { | |
864 | vincr = NBPD; | |
865 | } | |
866 | } | |
867 | } | |
5ba3f43e | 868 | #endif /* defined(__x86_64__) */ |
3e170ce0 A |
869 | } |
870 | vcur += vincr; | |
871 | } | |
872 | ||
873 | if ((ret == KERN_SUCCESS) && lastvavalid) { | |
874 | /* send previous run */ | |
875 | ret = callback(vcurstart, vcur, context); | |
876 | } | |
a39ff7e2 A |
877 | |
878 | #if KASAN | |
879 | if (ret == KERN_SUCCESS) { | |
880 | ret = kasan_traverse_mappings(callback, context); | |
881 | } | |
882 | #endif | |
883 | ||
3e170ce0 A |
884 | return (ret); |
885 | } | |
886 | ||
5ba3f43e A |
887 | struct kern_dump_preflight_context |
888 | { | |
889 | uint32_t region_count; | |
890 | uint64_t dumpable_bytes; | |
891 | }; | |
892 | ||
3e170ce0 A |
893 | int |
894 | kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start, | |
895 | vm_map_offset_t end, | |
896 | void *context) | |
897 | { | |
5ba3f43e A |
898 | struct kern_dump_preflight_context *kdc = (struct kern_dump_preflight_context *)context; |
899 | IOReturn ret = KERN_SUCCESS; | |
3e170ce0 | 900 | |
5ba3f43e A |
901 | kdc->region_count++; |
902 | kdc->dumpable_bytes += (end - start); | |
3e170ce0 | 903 | |
5ba3f43e | 904 | return (ret); |
3e170ce0 A |
905 | } |
906 | ||
3e170ce0 | 907 | |
5ba3f43e A |
908 | struct kern_dump_send_seg_desc_context |
909 | { | |
910 | core_save_segment_descriptions_cb callback; | |
911 | void *context; | |
912 | }; | |
3e170ce0 | 913 | |
5ba3f43e A |
914 | int |
915 | kern_dump_pmap_traverse_send_segdesc_callback(vm_map_offset_t start, | |
916 | vm_map_offset_t end, | |
917 | void *context) | |
918 | { | |
919 | struct kern_dump_send_seg_desc_context *kds_context = (struct kern_dump_send_seg_desc_context *)context; | |
920 | uint64_t seg_start = (uint64_t) start; | |
921 | uint64_t seg_end = (uint64_t) end; | |
3e170ce0 | 922 | |
5ba3f43e | 923 | return kds_context->callback(seg_start, seg_end, kds_context->context); |
3e170ce0 A |
924 | } |
925 | ||
5ba3f43e A |
926 | struct kern_dump_send_segdata_context |
927 | { | |
928 | core_save_segment_data_cb callback; | |
929 | void *context; | |
930 | }; | |
3e170ce0 A |
931 | |
932 | int | |
933 | kern_dump_pmap_traverse_send_segdata_callback(vm_map_offset_t start, | |
934 | vm_map_offset_t end, | |
935 | void *context) | |
936 | { | |
5ba3f43e | 937 | struct kern_dump_send_segdata_context *kds_context = (struct kern_dump_send_segdata_context *)context; |
3e170ce0 | 938 | |
5ba3f43e | 939 | return kds_context->callback((void *)start, (uint64_t)(end - start), kds_context->context); |
3e170ce0 A |
940 | } |
941 | ||
942 | static int | |
5ba3f43e | 943 | kern_dump_save_summary(__unused void *refcon, core_save_summary_cb callback, void *context) |
3e170ce0 | 944 | { |
5ba3f43e A |
945 | struct kern_dump_preflight_context kdc_preflight = { }; |
946 | uint64_t thread_state_size = 0, thread_count = 0; | |
947 | kern_return_t ret; | |
948 | ||
949 | ret = pmap_traverse_present_mappings(kernel_pmap, | |
950 | VM_MIN_KERNEL_AND_KEXT_ADDRESS, | |
951 | VM_MAX_KERNEL_ADDRESS, | |
952 | kern_dump_pmap_traverse_preflight_callback, | |
953 | &kdc_preflight); | |
954 | if (ret != KERN_SUCCESS) { | |
955 | kern_coredump_log(context, "save_summary: pmap traversal failed: %d\n", ret); | |
956 | return ret; | |
39037602 | 957 | } |
3e170ce0 | 958 | |
5ba3f43e A |
959 | kern_collectth_state_size(&thread_count, &thread_state_size); |
960 | ||
961 | ret = callback(kdc_preflight.region_count, kdc_preflight.dumpable_bytes, | |
962 | thread_count, thread_state_size, 0, context); | |
963 | return ret; | |
964 | } | |
965 | ||
966 | static int | |
967 | kern_dump_save_seg_descriptions(__unused void *refcon, core_save_segment_descriptions_cb callback, void *context) | |
968 | { | |
969 | kern_return_t ret; | |
970 | struct kern_dump_send_seg_desc_context kds_context; | |
971 | ||
972 | kds_context.callback = callback; | |
973 | kds_context.context = context; | |
974 | ||
975 | ret = pmap_traverse_present_mappings(kernel_pmap, | |
976 | VM_MIN_KERNEL_AND_KEXT_ADDRESS, | |
977 | VM_MAX_KERNEL_ADDRESS, | |
978 | kern_dump_pmap_traverse_send_segdesc_callback, | |
979 | &kds_context); | |
980 | if (ret != KERN_SUCCESS) { | |
981 | kern_coredump_log(context, "save_seg_desc: pmap traversal failed: %d\n", ret); | |
982 | return ret; | |
39037602 | 983 | } |
39037602 | 984 | |
5ba3f43e A |
985 | return KERN_SUCCESS; |
986 | } | |
39037602 | 987 | |
5ba3f43e A |
988 | static int |
989 | kern_dump_save_thread_state(__unused void *refcon, void *buf, core_save_thread_state_cb callback, void *context) | |
990 | { | |
991 | kern_return_t ret; | |
992 | uint64_t thread_state_size = 0, thread_count = 0; | |
993 | ||
994 | kern_collectth_state_size(&thread_count, &thread_state_size); | |
995 | ||
996 | if (thread_state_size > 0) { | |
997 | void * iter = NULL; | |
998 | do { | |
999 | kern_collectth_state (current_thread(), buf, thread_state_size, &iter); | |
1000 | ||
1001 | ret = callback(buf, context); | |
1002 | if (ret != KERN_SUCCESS) { | |
1003 | return ret; | |
1004 | } | |
1005 | } while (iter); | |
1006 | } | |
1007 | ||
1008 | return KERN_SUCCESS; | |
1009 | } | |
1010 | ||
1011 | static int | |
1012 | kern_dump_save_sw_vers(__unused void *refcon, core_save_sw_vers_cb callback, void *context) | |
1013 | { | |
1014 | return callback(&kdp_kernelversion_string, sizeof(kdp_kernelversion_string), context); | |
1015 | } | |
1016 | ||
1017 | static int | |
1018 | kern_dump_save_segment_data(__unused void *refcon, core_save_segment_data_cb callback, void *context) | |
1019 | { | |
1020 | kern_return_t ret; | |
1021 | struct kern_dump_send_segdata_context kds_context; | |
1022 | ||
1023 | kds_context.callback = callback; | |
1024 | kds_context.context = context; | |
1025 | ||
1026 | ret = pmap_traverse_present_mappings(kernel_pmap, | |
1027 | VM_MIN_KERNEL_AND_KEXT_ADDRESS, | |
1028 | VM_MAX_KERNEL_ADDRESS, kern_dump_pmap_traverse_send_segdata_callback, &kds_context); | |
1029 | if (ret != KERN_SUCCESS) { | |
1030 | kern_coredump_log(context, "save_seg_data: pmap traversal failed: %d\n", ret); | |
1031 | return ret; | |
1032 | } | |
1033 | ||
1034 | return KERN_SUCCESS; | |
1035 | } | |
1036 | ||
1037 | kern_return_t | |
1038 | kdp_reset_output_vars(void *kdp_core_out_vars, uint64_t totalbytes) | |
1039 | { | |
1040 | struct kdp_core_out_vars *outvars = (struct kdp_core_out_vars *)kdp_core_out_vars; | |
1041 | ||
1042 | /* Re-initialize kdp_outvars */ | |
1043 | outvars->zipped = 0; | |
1044 | outvars->totalbytes = totalbytes; | |
1045 | outvars->lastpercent = 0; | |
1046 | outvars->error = kIOReturnSuccess; | |
1047 | outvars->outremain = 0; | |
1048 | outvars->outlen = 0; | |
1049 | outvars->writes = 0; | |
1050 | outvars->outbuf = NULL; | |
1051 | ||
1052 | if (outvars->outproc == &kdp_send_crashdump_data) { | |
1053 | /* KERN_DUMP_NET */ | |
1054 | outvars->outbuf = (Bytef *) (kdp_core_zmem + kdp_core_zoffset); | |
1055 | outvars->outremain = outvars->outlen = kdp_crashdump_pkt_size; | |
1056 | } | |
1057 | ||
1058 | kdp_core_total_size = totalbytes; | |
1059 | ||
1060 | /* Re-initialize zstream variables */ | |
39037602 A |
1061 | kdp_core_zs.avail_in = 0; |
1062 | kdp_core_zs.next_in = NULL; | |
1063 | kdp_core_zs.avail_out = 0; | |
1064 | kdp_core_zs.next_out = NULL; | |
5ba3f43e | 1065 | kdp_core_zs.opaque = outvars; |
39037602 | 1066 | |
5ba3f43e | 1067 | deflateResetWithIO(&kdp_core_zs, kdp_core_zinput, outvars->zoutput); |
39037602 | 1068 | |
5ba3f43e A |
1069 | return KERN_SUCCESS; |
1070 | } | |
1071 | ||
1072 | static int | |
1073 | kern_dump_update_header(struct kdp_core_out_vars *outvars) | |
1074 | { | |
1075 | uint64_t foffset; | |
1076 | int ret; | |
1077 | ||
1078 | /* Write the file header -- first seek to the beginning of the file */ | |
1079 | foffset = 0; | |
1080 | if ((ret = (outvars->outproc)(KDP_SEEK, NULL, sizeof(foffset), &foffset)) != kIOReturnSuccess) { | |
d9a64523 | 1081 | kern_coredump_log(NULL, "(kern_dump_update_header) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n", |
5ba3f43e A |
1082 | sizeof(foffset), &foffset, foffset, ret); |
1083 | return ret; | |
3e170ce0 | 1084 | } |
3e170ce0 | 1085 | |
5ba3f43e | 1086 | if ((ret = (outvars->outproc)(KDP_DATA, NULL, sizeof(kdp_core_header), &kdp_core_header)) != kIOReturnSuccess) { |
d9a64523 | 1087 | kern_coredump_log(NULL, "(kern_dump_update_header) outproc(KDP_DATA, NULL, %lu, %p) returned 0x%x\n", |
5ba3f43e A |
1088 | sizeof(kdp_core_header), &kdp_core_header, ret); |
1089 | return ret; | |
1090 | } | |
3e170ce0 | 1091 | |
5ba3f43e A |
1092 | if ((ret = (outvars->outproc)(KDP_DATA, NULL, 0, NULL)) != kIOReturnSuccess) { |
1093 | kern_coredump_log(NULL, "(kern_dump_update_header) outproc data flush returned 0x%x\n", ret); | |
1094 | return ret; | |
1095 | } | |
3e170ce0 | 1096 | |
5ba3f43e A |
1097 | #if CONFIG_EMBEDDED |
1098 | if ((ret = (outvars->outproc)(KDP_FLUSH, NULL, 0, NULL)) != kIOReturnSuccess) { | |
1099 | kern_coredump_log(NULL, "(kern_dump_update_header) outproc explicit flush returned 0x%x\n", ret); | |
1100 | return ret; | |
1101 | } | |
1102 | #endif | |
3e170ce0 | 1103 | |
5ba3f43e A |
1104 | return KERN_SUCCESS; |
1105 | } | |
3e170ce0 | 1106 | |
5ba3f43e A |
1107 | int |
1108 | kern_dump_record_file(void *kdp_core_out_vars, const char *filename, uint64_t file_offset, uint64_t *out_file_length) | |
1109 | { | |
1110 | int ret = 0; | |
1111 | struct kdp_core_out_vars *outvars = (struct kdp_core_out_vars *)kdp_core_out_vars; | |
1112 | ||
1113 | assert(kdp_core_header.num_files < KERN_COREDUMP_MAX_CORES); | |
1114 | assert(out_file_length != NULL); | |
1115 | *out_file_length = 0; | |
1116 | ||
1117 | kdp_core_header.files[kdp_core_header.num_files].gzip_offset = file_offset; | |
1118 | kdp_core_header.files[kdp_core_header.num_files].gzip_length = outvars->zipped; | |
1119 | strncpy((char *)&kdp_core_header.files[kdp_core_header.num_files].core_name, filename, | |
1120 | MACH_CORE_FILEHEADER_NAMELEN); | |
1121 | kdp_core_header.files[kdp_core_header.num_files].core_name[MACH_CORE_FILEHEADER_NAMELEN - 1] = '\0'; | |
1122 | kdp_core_header.num_files++; | |
1123 | kdp_core_header.signature = MACH_CORE_FILEHEADER_SIGNATURE; | |
1124 | ||
1125 | ret = kern_dump_update_header(outvars); | |
1126 | if (ret == KERN_SUCCESS) { | |
1127 | *out_file_length = outvars->zipped; | |
1128 | } | |
3e170ce0 | 1129 | |
5ba3f43e A |
1130 | return ret; |
1131 | } | |
39037602 | 1132 | |
5ba3f43e A |
1133 | int |
1134 | kern_dump_seek_to_next_file(void *kdp_core_out_vars, uint64_t next_file_offset) | |
1135 | { | |
1136 | struct kdp_core_out_vars *outvars = (struct kdp_core_out_vars *)kdp_core_out_vars; | |
1137 | int ret; | |
3e170ce0 | 1138 | |
5ba3f43e | 1139 | if ((ret = (outvars->outproc)(KDP_SEEK, NULL, sizeof(next_file_offset), &next_file_offset)) != kIOReturnSuccess) { |
d9a64523 | 1140 | kern_coredump_log(NULL, "(kern_dump_seek_to_next_file) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n", |
5ba3f43e A |
1141 | sizeof(next_file_offset), &next_file_offset, next_file_offset, ret); |
1142 | } | |
3e170ce0 | 1143 | |
5ba3f43e A |
1144 | return ret; |
1145 | } | |
3e170ce0 | 1146 | |
5ba3f43e A |
1147 | static int |
1148 | do_kern_dump(kern_dump_output_proc outproc, enum kern_dump_type kd_variant) | |
1149 | { | |
1150 | struct kdp_core_out_vars outvars = { }; | |
3e170ce0 | 1151 | |
5ba3f43e A |
1152 | char *log_start = NULL, *buf = NULL; |
1153 | size_t existing_log_size = 0, new_log_len = 0; | |
1154 | uint64_t foffset = 0; | |
1155 | int ret = 0; | |
1156 | boolean_t output_opened = FALSE, dump_succeeded = TRUE; | |
3e170ce0 | 1157 | |
5ba3f43e A |
1158 | /* |
1159 | * Record the initial panic log buffer length so we can dump the coredump log | |
1160 | * and panic log to disk | |
1161 | */ | |
1162 | log_start = debug_buf_ptr; | |
1163 | #if CONFIG_EMBEDDED | |
1164 | assert(panic_info->eph_other_log_offset != 0); | |
1165 | assert(panic_info->eph_panic_log_len != 0); | |
1166 | /* Include any data from before the panic log as well */ | |
1167 | existing_log_size = (panic_info->eph_panic_log_offset - sizeof(struct embedded_panic_header)) + | |
1168 | panic_info->eph_panic_log_len + panic_info->eph_other_log_len; | |
1169 | #else /* CONFIG_EMBEDDED */ | |
5c9f4661 A |
1170 | if (panic_info->mph_panic_log_offset != 0) { |
1171 | existing_log_size = (panic_info->mph_panic_log_offset - sizeof(struct macos_panic_header)) + | |
cc8bc92a | 1172 | panic_info->mph_panic_log_len + panic_info->mph_other_log_len; |
5c9f4661 | 1173 | } |
5ba3f43e A |
1174 | #endif /* CONFIG_EMBEDDED */ |
1175 | ||
1176 | assert (existing_log_size <= debug_buf_size); | |
3e170ce0 | 1177 | |
cb323159 | 1178 | if ((kd_variant == KERN_DUMP_DISK) || (kd_variant == KERN_DUMP_STACKSHOT_DISK)) { |
5ba3f43e A |
1179 | /* Open the file for output */ |
1180 | if ((ret = (*outproc)(KDP_WRQ, NULL, 0, NULL)) != kIOReturnSuccess) { | |
1181 | kern_coredump_log(NULL, "outproc(KDP_WRQ, NULL, 0, NULL) returned 0x%x\n", ret); | |
1182 | dump_succeeded = FALSE; | |
1183 | goto exit; | |
1184 | } | |
1185 | } | |
1186 | output_opened = true; | |
3e170ce0 | 1187 | |
5ba3f43e A |
1188 | /* Initialize gzip, output context */ |
1189 | bzero(&outvars, sizeof(outvars)); | |
1190 | outvars.outproc = outproc; | |
3e170ce0 | 1191 | |
cb323159 | 1192 | if ((kd_variant == KERN_DUMP_DISK) || (kd_variant == KERN_DUMP_STACKSHOT_DISK)) { |
5ba3f43e A |
1193 | outvars.zoutput = kdp_core_zoutput; |
1194 | /* Space for file header, panic log, core log */ | |
1195 | foffset = (KERN_COREDUMP_HEADERSIZE + existing_log_size + KERN_COREDUMP_MAXDEBUGLOGSIZE + | |
1196 | KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN - 1) & ~(KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN - 1); | |
1197 | kdp_core_header.log_offset = KERN_COREDUMP_HEADERSIZE; | |
3e170ce0 | 1198 | |
5ba3f43e A |
1199 | /* Seek the calculated offset (we'll scrollback later to flush the logs and header) */ |
1200 | if ((ret = (*outproc)(KDP_SEEK, NULL, sizeof(foffset), &foffset)) != kIOReturnSuccess) { | |
d9a64523 | 1201 | kern_coredump_log(NULL, "(do_kern_dump seek begin) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n", |
5ba3f43e A |
1202 | sizeof(foffset), &foffset, foffset, ret); |
1203 | dump_succeeded = FALSE; | |
1204 | goto exit; | |
1205 | } | |
1206 | } else if (kd_variant == KERN_DUMP_NET) { | |
1207 | assert((kdp_core_zoffset + kdp_crashdump_pkt_size) <= kdp_core_zsize); | |
1208 | outvars.zoutput = kdp_core_zoutputbuf; | |
1209 | #if CONFIG_EMBEDDED | |
1210 | } else { /* KERN_DUMP_HW_SHMEM_DBG */ | |
1211 | outvars.zoutput = kdp_core_zoutput; | |
1212 | kern_dump_hw_shmem_dbg_reset(); | |
1213 | #endif | |
1214 | } | |
3e170ce0 | 1215 | |
5ba3f43e A |
1216 | #if defined(__arm__) || defined(__arm64__) |
1217 | flush_mmu_tlb(); | |
1218 | #endif | |
3e170ce0 | 1219 | |
5ba3f43e A |
1220 | kern_coredump_log(NULL, "%s", (kd_variant == KERN_DUMP_DISK) ? "Writing local cores..." : |
1221 | "Transmitting kernel state, please wait:\n"); | |
1222 | ||
cb323159 A |
1223 | |
1224 | #if defined(__x86_64__) | |
1225 | if (((kd_variant == KERN_DUMP_STACKSHOT_DISK) || (kd_variant == KERN_DUMP_DISK)) && ((panic_stackshot_buf != 0) && (panic_stackshot_len != 0))) { | |
1226 | uint64_t compressed_stackshot_len = 0; | |
1227 | ||
1228 | if ((ret = kdp_reset_output_vars(&outvars, panic_stackshot_len)) != KERN_SUCCESS) { | |
1229 | kern_coredump_log(NULL, "Failed to reset outvars for stackshot with len 0x%zx, returned 0x%x\n", panic_stackshot_len, ret); | |
1230 | dump_succeeded = FALSE; | |
1231 | } else if ((ret = kdp_core_output(&outvars, panic_stackshot_len, (void *)panic_stackshot_buf)) != KERN_SUCCESS) { | |
1232 | kern_coredump_log(NULL, "Failed to write panic stackshot to file, kdp_coreoutput(outvars, %lu, %p) returned 0x%x\n", | |
1233 | panic_stackshot_len, (void *) panic_stackshot_buf, ret); | |
1234 | dump_succeeded = FALSE; | |
1235 | } else if ((ret = kdp_core_output(&outvars, 0, NULL)) != KERN_SUCCESS) { | |
1236 | kern_coredump_log(NULL, "Failed to flush stackshot data : kdp_core_output(%p, 0, NULL) returned 0x%x\n", &outvars, ret); | |
1237 | dump_succeeded = FALSE; | |
1238 | } else if ((ret = kern_dump_record_file(&outvars, "panic_stackshot.kcdata", foffset, &compressed_stackshot_len)) != KERN_SUCCESS) { | |
1239 | kern_coredump_log(NULL, "Failed to record panic stackshot in corefile header, kern_dump_record_file returned 0x%x\n", ret); | |
1240 | dump_succeeded = FALSE; | |
1241 | } else { | |
1242 | kern_coredump_log(NULL, "Recorded panic stackshot in corefile at offset 0x%llx, compressed to %llu bytes\n", foffset, compressed_stackshot_len); | |
1243 | foffset = roundup((foffset + compressed_stackshot_len), KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN); | |
1244 | if ((ret = kern_dump_seek_to_next_file(&outvars, foffset)) != kIOReturnSuccess) { | |
1245 | kern_coredump_log(NULL, "Failed to seek to stackshot file offset 0x%llx, kern_dump_seek_to_next_file returned 0x%x\n", foffset, ret); | |
1246 | dump_succeeded = FALSE; | |
1247 | } | |
1248 | } | |
1249 | } | |
1250 | #endif | |
1251 | ||
5ba3f43e A |
1252 | if (kd_variant == KERN_DUMP_DISK) { |
1253 | /* | |
1254 | * Dump co-processors as well, foffset will be overwritten with the | |
1255 | * offset of the next location in the file to be written to. | |
1256 | */ | |
1257 | if (kern_do_coredump(&outvars, FALSE, foffset, &foffset) != 0) { | |
1258 | dump_succeeded = FALSE; | |
1259 | } | |
cb323159 | 1260 | } else if (kd_variant != KERN_DUMP_STACKSHOT_DISK) { |
5ba3f43e A |
1261 | /* Only the kernel */ |
1262 | if (kern_do_coredump(&outvars, TRUE, foffset, &foffset) != 0) { | |
1263 | dump_succeeded = FALSE; | |
1264 | } | |
3e170ce0 | 1265 | } |
3e170ce0 | 1266 | |
5ba3f43e | 1267 | if (kd_variant == KERN_DUMP_DISK) { |
5ba3f43e A |
1268 | /* Write the debug log -- first seek to the end of the corefile header */ |
1269 | foffset = KERN_COREDUMP_HEADERSIZE; | |
1270 | if ((ret = (*outproc)(KDP_SEEK, NULL, sizeof(foffset), &foffset)) != kIOReturnSuccess) { | |
d9a64523 | 1271 | kern_coredump_log(NULL, "(do_kern_dump seek logfile) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n", |
5ba3f43e A |
1272 | sizeof(foffset), &foffset, foffset, ret); |
1273 | dump_succeeded = FALSE; | |
1274 | goto exit; | |
1275 | } | |
3e170ce0 | 1276 | |
5ba3f43e A |
1277 | new_log_len = debug_buf_ptr - log_start; |
1278 | if (new_log_len > KERN_COREDUMP_MAXDEBUGLOGSIZE) { | |
1279 | new_log_len = KERN_COREDUMP_MAXDEBUGLOGSIZE; | |
1280 | } | |
3e170ce0 | 1281 | |
5ba3f43e | 1282 | /* This data is after the panic stackshot, we need to write it separately */ |
cc8bc92a | 1283 | #if CONFIG_EMBEDDED |
5ba3f43e | 1284 | existing_log_size -= panic_info->eph_other_log_len; |
cc8bc92a | 1285 | #else |
5c9f4661 A |
1286 | if (existing_log_size) { |
1287 | existing_log_size -= panic_info->mph_other_log_len; | |
1288 | } | |
5ba3f43e | 1289 | #endif |
3e170ce0 | 1290 | |
5ba3f43e A |
1291 | /* |
1292 | * Write out the paniclog (from the beginning of the debug | |
1293 | * buffer until the start of the stackshot) | |
1294 | */ | |
1295 | buf = debug_buf_base; | |
1296 | if ((ret = (*outproc)(KDP_DATA, NULL, existing_log_size, buf)) != kIOReturnSuccess) { | |
d9a64523 | 1297 | kern_coredump_log(NULL, "(do_kern_dump paniclog) outproc(KDP_DATA, NULL, %lu, %p) returned 0x%x\n", |
5ba3f43e A |
1298 | existing_log_size, buf, ret); |
1299 | dump_succeeded = FALSE; | |
1300 | goto exit; | |
1301 | } | |
3e170ce0 | 1302 | |
cc8bc92a A |
1303 | /* |
1304 | * The next part of the log we're interested in is the beginning of the 'other' log. | |
1305 | * Include any data after the panic stackshot but before we started the coredump log | |
1306 | * (see above) | |
1307 | */ | |
5ba3f43e | 1308 | #if CONFIG_EMBEDDED |
5ba3f43e | 1309 | buf = (char *)(((char *)panic_info) + (uintptr_t) panic_info->eph_other_log_offset); |
5ba3f43e A |
1310 | new_log_len += panic_info->eph_other_log_len; |
1311 | #else /* CONFIG_EMBEDDED */ | |
cc8bc92a A |
1312 | buf = (char *)(((char *)panic_info) + (uintptr_t) panic_info->mph_other_log_offset); |
1313 | new_log_len += panic_info->mph_other_log_len; | |
5ba3f43e A |
1314 | #endif /* CONFIG_EMBEDDED */ |
1315 | ||
1316 | /* Write the coredump log */ | |
1317 | if ((ret = (*outproc)(KDP_DATA, NULL, new_log_len, buf)) != kIOReturnSuccess) { | |
d9a64523 | 1318 | kern_coredump_log(NULL, "(do_kern_dump coredump log) outproc(KDP_DATA, NULL, %lu, %p) returned 0x%x\n", |
5ba3f43e A |
1319 | new_log_len, buf, ret); |
1320 | dump_succeeded = FALSE; | |
1321 | goto exit; | |
1322 | } | |
1323 | ||
1324 | kdp_core_header.log_length = existing_log_size + new_log_len; | |
1325 | kern_dump_update_header(&outvars); | |
3e170ce0 | 1326 | } |
3e170ce0 A |
1327 | |
1328 | exit: | |
5ba3f43e A |
1329 | /* close / last packet */ |
1330 | if (output_opened && (ret = (*outproc)(KDP_EOF, NULL, 0, ((void *) 0))) != kIOReturnSuccess) { | |
1331 | kern_coredump_log(NULL, "(do_kern_dump close) outproc(KDP_EOF, NULL, 0, 0) returned 0x%x\n", ret); | |
1332 | dump_succeeded = FALSE; | |
1333 | } | |
3e170ce0 | 1334 | |
5c9f4661 | 1335 | /* If applicable, update the panic header and flush it so we update the CRC */ |
cc8bc92a A |
1336 | #if CONFIG_EMBEDDED |
1337 | panic_info->eph_panic_flags |= (dump_succeeded ? EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_COMPLETE : | |
1338 | EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED); | |
5c9f4661 | 1339 | paniclog_flush(); |
cc8bc92a | 1340 | #else |
5c9f4661 A |
1341 | if (panic_info->mph_panic_log_offset != 0) { |
1342 | panic_info->mph_panic_flags |= (dump_succeeded ? MACOS_PANIC_HEADER_FLAG_COREDUMP_COMPLETE : | |
cc8bc92a | 1343 | MACOS_PANIC_HEADER_FLAG_COREDUMP_FAILED); |
5c9f4661 A |
1344 | paniclog_flush(); |
1345 | } | |
cc8bc92a | 1346 | #endif |
cc8bc92a | 1347 | |
5ba3f43e A |
1348 | return (dump_succeeded ? 0 : -1); |
1349 | } | |
3e170ce0 | 1350 | |
5ba3f43e A |
1351 | boolean_t |
1352 | dumped_kernel_core() | |
1353 | { | |
1354 | return kern_dump_successful; | |
3e170ce0 A |
1355 | } |
1356 | ||
1357 | int | |
39037602 | 1358 | kern_dump(enum kern_dump_type kd_variant) |
3e170ce0 | 1359 | { |
5ba3f43e A |
1360 | static boolean_t local_dump_in_progress = FALSE, dumped_local = FALSE; |
1361 | int ret = -1; | |
1362 | #if KASAN | |
1363 | kasan_disable(); | |
1364 | #endif | |
cb323159 | 1365 | if ((kd_variant == KERN_DUMP_DISK) || (kd_variant == KERN_DUMP_STACKSHOT_DISK)) { |
39037602 | 1366 | if (dumped_local) return (0); |
5ba3f43e A |
1367 | if (local_dump_in_progress) return (-1); |
1368 | local_dump_in_progress = TRUE; | |
1369 | #if CONFIG_EMBEDDED | |
1370 | hwsd_info->xhsdci_status = XHSDCI_STATUS_KERNEL_BUSY; | |
1371 | #endif | |
cb323159 | 1372 | ret = do_kern_dump(&kern_dump_disk_proc, kd_variant); |
5ba3f43e A |
1373 | if (ret == 0) { |
1374 | dumped_local = TRUE; | |
1375 | kern_dump_successful = TRUE; | |
1376 | local_dump_in_progress = FALSE; | |
1377 | } | |
1378 | ||
1379 | return ret; | |
1380 | #if CONFIG_EMBEDDED | |
39037602 | 1381 | } else if (kd_variant == KERN_DUMP_HW_SHMEM_DBG) { |
5ba3f43e A |
1382 | ret = do_kern_dump(&kern_dump_hw_shmem_dbg_buffer_proc, KERN_DUMP_HW_SHMEM_DBG); |
1383 | if (ret == 0) { | |
1384 | kern_dump_successful = TRUE; | |
1385 | } | |
1386 | return ret; | |
39037602 | 1387 | #endif |
5ba3f43e A |
1388 | } else { |
1389 | ret = do_kern_dump(&kdp_send_crashdump_data, KERN_DUMP_NET); | |
1390 | if (ret == 0) { | |
1391 | kern_dump_successful = TRUE; | |
1392 | } | |
1393 | return ret; | |
1394 | } | |
1395 | } | |
1396 | ||
1397 | #if CONFIG_EMBEDDED | |
5ba3f43e A |
1398 | void |
1399 | panic_spin_shmcon() | |
1400 | { | |
d9a64523 A |
1401 | if (hwsd_info == NULL) { |
1402 | kern_coredump_log(NULL, "handshake structure not initialized\n"); | |
1403 | return; | |
1404 | } | |
1405 | ||
5ba3f43e A |
1406 | kern_coredump_log(NULL, "\nPlease go to https://panic.apple.com to report this panic\n"); |
1407 | kern_coredump_log(NULL, "Waiting for hardware shared memory debugger, handshake structure is at virt: %p, phys %p\n", | |
1408 | hwsd_info, (void *)kvtophys((vm_offset_t)hwsd_info)); | |
1409 | ||
5ba3f43e A |
1410 | hwsd_info->xhsdci_status = XHSDCI_STATUS_KERNEL_READY; |
1411 | hwsd_info->xhsdci_seq_no = 0; | |
1412 | FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info)); | |
1413 | ||
1414 | for (;;) { | |
1415 | FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info)); | |
1416 | if (hwsd_info->xhsdci_status == XHSDCI_COREDUMP_BEGIN) { | |
1417 | kern_dump(KERN_DUMP_HW_SHMEM_DBG); | |
1418 | } | |
1419 | ||
1420 | if ((hwsd_info->xhsdci_status == XHSDCI_COREDUMP_REMOTE_DONE) || | |
1421 | (hwsd_info->xhsdci_status == XHSDCI_COREDUMP_ERROR)) { | |
1422 | hwsd_info->xhsdci_status = XHSDCI_STATUS_KERNEL_READY; | |
1423 | hwsd_info->xhsdci_seq_no = 0; | |
1424 | FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info)); | |
1425 | } | |
39037602 | 1426 | } |
3e170ce0 | 1427 | } |
5ba3f43e | 1428 | #endif /* CONFIG_EMBEDDED */ |
3e170ce0 A |
1429 | |
1430 | static void * | |
1431 | kdp_core_zalloc(void * __unused ref, u_int items, u_int size) | |
1432 | { | |
1433 | void * result; | |
1434 | ||
1435 | result = (void *) (kdp_core_zmem + kdp_core_zoffset); | |
1436 | kdp_core_zoffset += ~31L & (31 + (items * size)); // 32b align for vector crc | |
1437 | assert(kdp_core_zoffset <= kdp_core_zsize); | |
1438 | ||
1439 | return (result); | |
1440 | } | |
1441 | ||
1442 | static void | |
1443 | kdp_core_zfree(void * __unused ref, void * __unused ptr) {} | |
1444 | ||
1445 | ||
5ba3f43e A |
1446 | #if CONFIG_EMBEDDED |
1447 | #define LEVEL Z_BEST_SPEED | |
1448 | #define NETBUF 0 | |
1449 | #else | |
3e170ce0 A |
1450 | #define LEVEL Z_BEST_SPEED |
1451 | #define NETBUF 1440 | |
5ba3f43e | 1452 | #endif |
3e170ce0 A |
1453 | |
1454 | void | |
1455 | kdp_core_init(void) | |
1456 | { | |
39037602 A |
1457 | int wbits = 12; |
1458 | int memlevel = 3; | |
1459 | kern_return_t kr; | |
5ba3f43e | 1460 | #if CONFIG_EMBEDDED |
39037602 A |
1461 | int i = 0; |
1462 | vm_offset_t kdp_core_hw_shmem_buf = 0; | |
1463 | struct kdp_hw_shmem_dbg_buf_elm *cur_elm = NULL; | |
5ba3f43e | 1464 | cache_info_t *cpuid_cache_info = NULL; |
39037602 | 1465 | #endif |
5ba3f43e | 1466 | kern_coredump_callback_config core_config = { }; |
39037602 A |
1467 | |
1468 | if (kdp_core_zs.zalloc) return; | |
1469 | kdp_core_zsize = round_page(NETBUF + zlib_deflate_memory_size(wbits, memlevel)); | |
1470 | printf("kdp_core zlib memory 0x%lx\n", kdp_core_zsize); | |
1471 | kr = kmem_alloc(kernel_map, &kdp_core_zmem, kdp_core_zsize, VM_KERN_MEMORY_DIAG); | |
1472 | assert (KERN_SUCCESS == kr); | |
1473 | ||
3e170ce0 | 1474 | kdp_core_zoffset = 0; |
39037602 A |
1475 | kdp_core_zs.zalloc = kdp_core_zalloc; |
1476 | kdp_core_zs.zfree = kdp_core_zfree; | |
1477 | ||
1478 | if (deflateInit2(&kdp_core_zs, LEVEL, Z_DEFLATED, | |
1479 | wbits + 16 /*gzip mode*/, memlevel, Z_DEFAULT_STRATEGY)) { | |
1480 | /* Allocation failed */ | |
1481 | bzero(&kdp_core_zs, sizeof(kdp_core_zs)); | |
1482 | kdp_core_zoffset = 0; | |
1483 | } | |
1484 | ||
5ba3f43e A |
1485 | bzero(&kdp_core_header, sizeof(kdp_core_header)); |
1486 | ||
1487 | core_config.kcc_coredump_init = NULL; /* TODO: consider doing mmu flush from an init function */ | |
1488 | core_config.kcc_coredump_get_summary = kern_dump_save_summary; | |
1489 | core_config.kcc_coredump_save_segment_descriptions = kern_dump_save_seg_descriptions; | |
1490 | core_config.kcc_coredump_save_thread_state = kern_dump_save_thread_state; | |
1491 | core_config.kcc_coredump_save_sw_vers = kern_dump_save_sw_vers; | |
1492 | core_config.kcc_coredump_save_segment_data = kern_dump_save_segment_data; | |
1493 | core_config.kcc_coredump_save_misc_data = NULL; | |
1494 | ||
1495 | kr = kern_register_xnu_coredump_helper(&core_config); | |
1496 | assert(KERN_SUCCESS == kr); | |
1497 | ||
1498 | #if CONFIG_EMBEDDED | |
39037602 A |
1499 | if (!PE_consistent_debug_enabled()) { |
1500 | return; | |
1501 | } | |
1502 | ||
1503 | /* | |
1504 | * We need to allocate physically contiguous memory since astris isn't capable | |
1505 | * of doing address translations while the CPUs are running. | |
1506 | */ | |
1507 | kdp_hw_shmem_dbg_bufsize = KDP_CORE_HW_SHMEM_DBG_TOTAL_BUF_SIZE; | |
1508 | kr = kmem_alloc_contig(kernel_map, &kdp_core_hw_shmem_buf, kdp_hw_shmem_dbg_bufsize, VM_MAP_PAGE_MASK(kernel_map), | |
1509 | 0, 0, KMA_KOBJECT, VM_KERN_MEMORY_DIAG); | |
1510 | assert(KERN_SUCCESS == kr); | |
1511 | ||
1512 | /* | |
1513 | * Put the connection info structure at the beginning of this buffer and adjust | |
1514 | * the buffer size accordingly. | |
1515 | */ | |
1516 | hwsd_info = (struct xnu_hw_shmem_dbg_command_info *) kdp_core_hw_shmem_buf; | |
1517 | hwsd_info->xhsdci_status = XHSDCI_STATUS_NONE; | |
1518 | hwsd_info->xhsdci_seq_no = 0; | |
1519 | hwsd_info->xhsdci_buf_phys_addr = 0; | |
1520 | hwsd_info->xhsdci_buf_data_length = 0; | |
1521 | hwsd_info->xhsdci_coredump_total_size_uncomp = 0; | |
1522 | hwsd_info->xhsdci_coredump_total_size_sent_uncomp = 0; | |
1523 | hwsd_info->xhsdci_page_size = PAGE_SIZE; | |
1524 | ||
5ba3f43e A |
1525 | cpuid_cache_info = cache_info(); |
1526 | assert(cpuid_cache_info != NULL); | |
1527 | ||
39037602 | 1528 | kdp_core_hw_shmem_buf += sizeof(*hwsd_info); |
5ba3f43e A |
1529 | /* Leave the handshake structure on its own cache line so buffer writes don't cause flushes of old handshake data */ |
1530 | kdp_core_hw_shmem_buf = ROUNDUP(kdp_core_hw_shmem_buf, (uint64_t) cpuid_cache_info->c_linesz); | |
1531 | kdp_hw_shmem_dbg_bufsize -= (uint32_t) (kdp_core_hw_shmem_buf - (vm_offset_t) hwsd_info); | |
1532 | kdp_hw_shmem_dbg_bufsize /= KDP_CORE_HW_SHMEM_DBG_NUM_BUFFERS; | |
1533 | /* The buffer size should be a cache-line length multiple */ | |
1534 | kdp_hw_shmem_dbg_bufsize -= (kdp_hw_shmem_dbg_bufsize % ROUNDDOWN(OPTIMAL_ASTRIS_READSIZE, cpuid_cache_info->c_linesz)); | |
39037602 A |
1535 | |
1536 | STAILQ_INIT(&free_hw_shmem_dbg_bufs); | |
1537 | STAILQ_INIT(&hw_shmem_dbg_bufs_to_flush); | |
1538 | ||
1539 | for (i = 0; i < KDP_CORE_HW_SHMEM_DBG_NUM_BUFFERS; i++) { | |
1540 | cur_elm = kalloc(sizeof(*cur_elm)); | |
1541 | assert(cur_elm != NULL); | |
1542 | ||
1543 | cur_elm->khsd_buf = kdp_core_hw_shmem_buf; | |
1544 | cur_elm->khsd_data_length = 0; | |
1545 | ||
1546 | kdp_core_hw_shmem_buf += kdp_hw_shmem_dbg_bufsize; | |
1547 | ||
1548 | STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs, cur_elm, khsd_elms); | |
1549 | } | |
1550 | ||
1551 | nanoseconds_to_absolutetime(KDP_HW_SHMEM_DBG_TIMEOUT_DEADLINE_SECS * NSEC_PER_SEC, | |
1552 | &kdp_hw_shmem_dbg_contact_deadline_interval); | |
1553 | ||
1554 | PE_consistent_debug_register(kDbgIdAstrisConnection, kvtophys((vm_offset_t) hwsd_info), sizeof(pmap_paddr_t)); | |
1555 | PE_consistent_debug_register(kDbgIdAstrisConnectionVers, CUR_XNU_HWSDCI_STRUCT_VERS, sizeof(uint32_t)); | |
5ba3f43e | 1556 | #endif /* CONFIG_EMBEDDED */ |
3e170ce0 A |
1557 | } |
1558 | ||
1559 | #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */ |