]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kdp/kdp_core.c
xnu-3789.70.16.tar.gz
[apple/xnu.git] / osfmk / kdp / kdp_core.c
1 /*
2 * Copyright (c) 2015 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #ifdef CONFIG_KDP_INTERACTIVE_DEBUGGING
30
31 #include <mach/mach_types.h>
32 #include <mach/vm_attributes.h>
33 #include <mach/vm_param.h>
34 #include <mach/vm_map.h>
35 #include <vm/vm_protos.h>
36 #include <vm/vm_kern.h>
37 #include <vm/vm_map.h>
38 #include <machine/cpu_capabilities.h>
39 #include <libsa/types.h>
40 #include <libkern/kernel_mach_header.h>
41 #include <libkern/zlib.h>
42 #include <kdp/kdp_internal.h>
43 #include <kdp/kdp_core.h>
44 #include <IOKit/IOPolledInterface.h>
45 #include <IOKit/IOBSD.h>
46 #include <sys/errno.h>
47 #include <sys/msgbuf.h>
48
49 #if defined(__i386__) || defined(__x86_64__)
50 #include <i386/pmap_internal.h>
51 #include <kdp/ml/i386/kdp_x86_common.h>
52 #endif /* defined(__i386__) || defined(__x86_64__) */
53
54
55 #if WITH_CONSISTENT_DBG
56 #include <pexpert/arm/consistent_debug.h>
57 #endif /* WITH_CONSISTENT_DBG */
58
59 typedef int (*pmap_traverse_callback)(vm_map_offset_t start,
60 vm_map_offset_t end,
61 void *context);
62
63 extern int pmap_traverse_present_mappings(pmap_t pmap,
64 vm_map_offset_t start,
65 vm_map_offset_t end,
66 pmap_traverse_callback callback,
67 void *context);
68
69
70 static int
71 kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start,
72 vm_map_offset_t end,
73 void *context);
74 static int
75 kern_dump_pmap_traverse_send_seg_callback(vm_map_offset_t start,
76 vm_map_offset_t end,
77 void *context);
78 static int
79 kern_dump_pmap_traverse_send_segdata_callback(vm_map_offset_t start,
80 vm_map_offset_t end,
81 void *context);
82
83 struct kdp_core_out_vars;
84 typedef int (*kern_dump_output_proc)(unsigned int request, char *corename,
85 uint64_t length, void *panic_data);
86
87 struct kdp_core_out_vars
88 {
89 kern_dump_output_proc outproc;
90 z_output_func zoutput;
91 size_t zipped;
92 uint64_t totalbytes;
93 uint64_t lastpercent;
94 IOReturn error;
95 unsigned outremain;
96 unsigned outlen;
97 unsigned writes;
98 Bytef * outbuf;
99 };
100
101 struct kern_dump_preflight_context
102 {
103 uint32_t region_count;
104 uint64_t dumpable_bytes;
105 };
106
107 struct kern_dump_send_context
108 {
109 struct kdp_core_out_vars * outvars;
110 uint64_t hoffset;
111 uint64_t foffset;
112 uint64_t header_size;
113 uint64_t dumpable_bytes;
114 uint32_t region_count;
115 };
116
117 extern uint32_t kdp_crashdump_pkt_size;
118
119 static vm_offset_t kdp_core_zmem;
120 static size_t kdp_core_zsize;
121 static size_t kdp_core_zoffset;
122 static z_stream kdp_core_zs;
123
124 static uint64_t kdp_core_total_size;
125 static uint64_t kdp_core_total_size_sent_uncomp;
126 #if WITH_CONSISTENT_DBG
127 struct xnu_hw_shmem_dbg_command_info *hwsd_info = NULL;
128
129 #define KDP_CORE_HW_SHMEM_DBG_NUM_BUFFERS 2
130 #define KDP_CORE_HW_SHMEM_DBG_TOTAL_BUF_SIZE 64 * 1024
131
132 /*
133 * Astris can read up to 4064 bytes at a time over
134 * the probe, so we should try to make our buffer
135 * size a multiple of this to make reads by astris
136 * (the bottleneck) most efficient.
137 */
138 #define OPTIMAL_ASTRIS_READSIZE 4064
139
140 struct kdp_hw_shmem_dbg_buf_elm {
141 vm_offset_t khsd_buf;
142 uint32_t khsd_data_length;
143 STAILQ_ENTRY(kdp_hw_shmem_dbg_buf_elm) khsd_elms;
144 };
145
146 static STAILQ_HEAD(, kdp_hw_shmem_dbg_buf_elm) free_hw_shmem_dbg_bufs =
147 STAILQ_HEAD_INITIALIZER(free_hw_shmem_dbg_bufs);
148 static STAILQ_HEAD(, kdp_hw_shmem_dbg_buf_elm) hw_shmem_dbg_bufs_to_flush =
149 STAILQ_HEAD_INITIALIZER(hw_shmem_dbg_bufs_to_flush);
150
151 static struct kdp_hw_shmem_dbg_buf_elm *currently_filling_buf = NULL;
152 static struct kdp_hw_shmem_dbg_buf_elm *currently_flushing_buf = NULL;
153
154 static uint32_t kdp_hw_shmem_dbg_bufsize = 0;
155
156 static uint32_t kdp_hw_shmem_dbg_seq_no = 0;
157 static uint64_t kdp_hw_shmem_dbg_contact_deadline = 0;
158 static uint64_t kdp_hw_shmem_dbg_contact_deadline_interval = 0;
159
160 #define KDP_HW_SHMEM_DBG_TIMEOUT_DEADLINE_SECS 30
161 #endif /* WITH_CONSISTENT_DBG */
162
163 /*
164 * These variables will be modified by the BSD layer if the root device is
165 * a RAMDisk.
166 */
167 uint64_t kdp_core_ramdisk_addr = 0;
168 uint64_t kdp_core_ramdisk_size = 0;
169
170 #define DEBG kdb_printf
171
172 boolean_t kdp_has_polled_corefile(void)
173 {
174 return (NULL != gIOPolledCoreFileVars);
175 }
176
177 #if WITH_CONSISTENT_DBG
178 /*
179 * Whenever we start a coredump, make sure the buffers
180 * are all on the free queue and the state is as expected.
181 * The buffers may have been left in a different state if
182 * a previous coredump attempt failed.
183 */
184 static void
185 kern_dump_hw_shmem_dbg_reset()
186 {
187 struct kdp_hw_shmem_dbg_buf_elm *cur_elm = NULL, *tmp_elm = NULL;
188
189 STAILQ_FOREACH(cur_elm, &free_hw_shmem_dbg_bufs, khsd_elms) {
190 cur_elm->khsd_data_length = 0;
191 }
192
193 if (currently_filling_buf != NULL) {
194 currently_filling_buf->khsd_data_length = 0;
195
196 STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs, currently_filling_buf, khsd_elms);
197 currently_filling_buf = NULL;
198 }
199
200 if (currently_flushing_buf != NULL) {
201 currently_flushing_buf->khsd_data_length = 0;
202
203 STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs, currently_flushing_buf, khsd_elms);
204 currently_flushing_buf = NULL;
205 }
206
207 STAILQ_FOREACH_SAFE(cur_elm, &hw_shmem_dbg_bufs_to_flush, khsd_elms, tmp_elm) {
208 cur_elm->khsd_data_length = 0;
209
210 STAILQ_REMOVE(&hw_shmem_dbg_bufs_to_flush, cur_elm, kdp_hw_shmem_dbg_buf_elm, khsd_elms);
211 STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs, cur_elm, khsd_elms);
212 }
213
214 hwsd_info->xhsdci_status = XHSDCI_COREDUMP_BUF_EMPTY;
215 kdp_hw_shmem_dbg_seq_no = 0;
216 hwsd_info->xhsdci_buf_phys_addr = 0;
217 hwsd_info->xhsdci_buf_data_length = 0;
218 hwsd_info->xhsdci_coredump_total_size_uncomp = 0;
219 hwsd_info->xhsdci_coredump_total_size_sent_uncomp = 0;
220 hwsd_info->xhsdci_page_size = PAGE_SIZE;
221 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
222
223 kdp_hw_shmem_dbg_contact_deadline = mach_absolute_time() + kdp_hw_shmem_dbg_contact_deadline_interval;
224 }
225
226 /*
227 * Tries to move buffers forward in 'progress'. If
228 * the hardware debugger is done consuming the current buffer, we
229 * can put the next one on it and move the current
230 * buffer back to the free queue.
231 */
232 static int
233 kern_dump_hw_shmem_dbg_process_buffers()
234 {
235 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
236 if (hwsd_info->xhsdci_status == XHSDCI_COREDUMP_ERROR) {
237 kdb_printf("Detected remote error, terminating...\n");
238 return -1;
239 } else if (hwsd_info->xhsdci_status == XHSDCI_COREDUMP_BUF_EMPTY) {
240 if (hwsd_info->xhsdci_seq_no != (kdp_hw_shmem_dbg_seq_no + 1)) {
241 kdb_printf("Detected stale/invalid seq num. Expected: %d, received %d\n",
242 (kdp_hw_shmem_dbg_seq_no + 1), hwsd_info->xhsdci_seq_no);
243 hwsd_info->xhsdci_status = XHSDCI_COREDUMP_ERROR;
244 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
245 return -1;
246 }
247
248 kdp_hw_shmem_dbg_seq_no = hwsd_info->xhsdci_seq_no;
249
250 if (currently_flushing_buf != NULL) {
251 currently_flushing_buf->khsd_data_length = 0;
252 STAILQ_INSERT_TAIL(&free_hw_shmem_dbg_bufs, currently_flushing_buf, khsd_elms);
253 }
254
255 currently_flushing_buf = STAILQ_FIRST(&hw_shmem_dbg_bufs_to_flush);
256 if (currently_flushing_buf != NULL) {
257 STAILQ_REMOVE_HEAD(&hw_shmem_dbg_bufs_to_flush, khsd_elms);
258
259 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
260 hwsd_info->xhsdci_buf_phys_addr = kvtophys(currently_flushing_buf->khsd_buf);
261 hwsd_info->xhsdci_buf_data_length = currently_flushing_buf->khsd_data_length;
262 hwsd_info->xhsdci_coredump_total_size_uncomp = kdp_core_total_size;
263 hwsd_info->xhsdci_coredump_total_size_sent_uncomp = kdp_core_total_size_sent_uncomp;
264 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, KDP_CORE_HW_SHMEM_DBG_TOTAL_BUF_SIZE);
265 hwsd_info->xhsdci_seq_no = ++kdp_hw_shmem_dbg_seq_no;
266 hwsd_info->xhsdci_status = XHSDCI_COREDUMP_BUF_READY;
267 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
268 }
269
270 kdp_hw_shmem_dbg_contact_deadline = mach_absolute_time() +
271 kdp_hw_shmem_dbg_contact_deadline_interval;
272
273 return 0;
274 } else if (mach_absolute_time() > kdp_hw_shmem_dbg_contact_deadline) {
275 kdb_printf("Kernel timed out waiting for hardware debugger to update handshake structure.");
276 kdb_printf(" No contact in %d seconds\n", KDP_HW_SHMEM_DBG_TIMEOUT_DEADLINE_SECS);
277
278 hwsd_info->xhsdci_status = XHSDCI_COREDUMP_ERROR;
279 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
280 return -1;
281 }
282
283 return 0;
284 }
285
286 /*
287 * Populates currently_filling_buf with a new buffer
288 * once one becomes available. Returns 0 on success
289 * or the value returned by kern_dump_hw_shmem_dbg_process_buffers()
290 * if it is non-zero (an error).
291 */
292 static int
293 kern_dump_hw_shmem_dbg_get_buffer()
294 {
295 int ret = 0;
296
297 assert(currently_filling_buf == NULL);
298
299 while (STAILQ_EMPTY(&free_hw_shmem_dbg_bufs)) {
300 ret = kern_dump_hw_shmem_dbg_process_buffers();
301 if (ret) {
302 return ret;
303 }
304 }
305
306 currently_filling_buf = STAILQ_FIRST(&free_hw_shmem_dbg_bufs);
307 STAILQ_REMOVE_HEAD(&free_hw_shmem_dbg_bufs, khsd_elms);
308
309 assert(currently_filling_buf->khsd_data_length == 0);
310 return ret;
311 }
312
313 /*
314 * Output procedure for hardware shared memory core dumps
315 *
316 * Tries to fill up the buffer completely before flushing
317 */
318 static int
319 kern_dump_hw_shmem_dbg_buffer_proc(unsigned int request, __unused char *corename,
320 uint64_t length, void * data)
321 {
322 int ret = 0;
323
324 assert(length < UINT32_MAX);
325 uint32_t bytes_remaining = (uint32_t) length;
326 uint32_t bytes_to_copy;
327
328 if (request == KDP_EOF) {
329 assert(currently_filling_buf == NULL);
330
331 /*
332 * Wait until we've flushed all the buffers
333 * before setting the connection status to done.
334 */
335 while (!STAILQ_EMPTY(&hw_shmem_dbg_bufs_to_flush) ||
336 currently_flushing_buf != NULL) {
337 ret = kern_dump_hw_shmem_dbg_process_buffers();
338 if (ret) {
339 return ret;
340 }
341 }
342
343 /*
344 * If the last status we saw indicates that the buffer was
345 * empty and we didn't flush any new data since then, we expect
346 * the sequence number to still match the last we saw.
347 */
348 if (hwsd_info->xhsdci_seq_no < kdp_hw_shmem_dbg_seq_no) {
349 kdb_printf("EOF Flush: Detected stale/invalid seq num. Expected: %d, received %d\n",
350 kdp_hw_shmem_dbg_seq_no, hwsd_info->xhsdci_seq_no);
351 return -1;
352 }
353
354 kdp_hw_shmem_dbg_seq_no = hwsd_info->xhsdci_seq_no;
355
356 kdb_printf("Setting coredump status as done!\n");
357 hwsd_info->xhsdci_seq_no = ++kdp_hw_shmem_dbg_seq_no;
358 hwsd_info->xhsdci_status = XHSDCI_COREDUMP_STATUS_DONE;
359 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
360
361 return ret;
362 }
363
364 assert(request == KDP_DATA);
365
366 /*
367 * The output procedure is called with length == 0 and data == NULL
368 * to flush any remaining output at the end of the coredump before
369 * we call it a final time to mark the dump as done.
370 */
371 if (length == 0) {
372 assert(data == NULL);
373
374 if (currently_filling_buf != NULL) {
375 STAILQ_INSERT_TAIL(&hw_shmem_dbg_bufs_to_flush, currently_filling_buf, khsd_elms);
376 currently_filling_buf = NULL;
377 }
378
379 /*
380 * Move the current buffer along if possible.
381 */
382 ret = kern_dump_hw_shmem_dbg_process_buffers();
383 return ret;
384 }
385
386 while (bytes_remaining != 0) {
387 /*
388 * Make sure we have a buffer to work with.
389 */
390 while (currently_filling_buf == NULL) {
391 ret = kern_dump_hw_shmem_dbg_get_buffer();
392 if (ret) {
393 return ret;
394 }
395 }
396
397 assert(kdp_hw_shmem_dbg_bufsize >= currently_filling_buf->khsd_data_length);
398 bytes_to_copy = MIN(bytes_remaining, kdp_hw_shmem_dbg_bufsize -
399 currently_filling_buf->khsd_data_length);
400 bcopy(data, (void *)(currently_filling_buf->khsd_buf + currently_filling_buf->khsd_data_length),
401 bytes_to_copy);
402
403 currently_filling_buf->khsd_data_length += bytes_to_copy;
404
405 if (currently_filling_buf->khsd_data_length == kdp_hw_shmem_dbg_bufsize) {
406 STAILQ_INSERT_TAIL(&hw_shmem_dbg_bufs_to_flush, currently_filling_buf, khsd_elms);
407 currently_filling_buf = NULL;
408
409 /*
410 * Move it along if possible.
411 */
412 ret = kern_dump_hw_shmem_dbg_process_buffers();
413 if (ret) {
414 return ret;
415 }
416 }
417
418 bytes_remaining -= bytes_to_copy;
419 data = (void *) ((uintptr_t)data + bytes_to_copy);
420 }
421
422 return ret;
423 }
424 #endif /* WITH_CONSISTENT_DBG */
425
426 static IOReturn
427 kern_dump_disk_proc(unsigned int request, __unused char *corename,
428 uint64_t length, void * data)
429 {
430 uint64_t noffset;
431 uint32_t err = kIOReturnSuccess;
432
433 switch (request)
434 {
435 case KDP_WRQ:
436 err = IOPolledFileSeek(gIOPolledCoreFileVars, 0);
437 if (kIOReturnSuccess != err) break;
438 err = IOPolledFilePollersOpen(gIOPolledCoreFileVars, kIOPolledBeforeSleepState, false);
439 break;
440
441 case KDP_SEEK:
442 noffset = *((uint64_t *) data);
443 err = IOPolledFileWrite(gIOPolledCoreFileVars, 0, 0, NULL);
444 if (kIOReturnSuccess != err) break;
445 err = IOPolledFileSeek(gIOPolledCoreFileVars, noffset);
446 break;
447
448 case KDP_DATA:
449 err = IOPolledFileWrite(gIOPolledCoreFileVars, data, length, NULL);
450 if (kIOReturnSuccess != err) break;
451 break;
452
453 case KDP_EOF:
454 err = IOPolledFileWrite(gIOPolledCoreFileVars, 0, 0, NULL);
455 if (kIOReturnSuccess != err) break;
456 err = IOPolledFilePollersClose(gIOPolledCoreFileVars, kIOPolledBeforeSleepState);
457 if (kIOReturnSuccess != err) break;
458 break;
459 }
460
461 return (err);
462 }
463
464 /*
465 * flushes any data to the output proc immediately
466 */
467 static int
468 kdp_core_zoutput(z_streamp strm, Bytef *buf, unsigned len)
469 {
470 struct kdp_core_out_vars * vars = (typeof(vars)) strm->opaque;
471 IOReturn ret;
472
473 vars->zipped += len;
474
475 if (vars->error >= 0)
476 {
477 if ((ret = (*vars->outproc)(KDP_DATA, NULL, len, buf)) != kIOReturnSuccess)
478 {
479 DEBG("KDP_DATA(0x%x)\n", ret);
480 vars->error = ret;
481 }
482 if (!buf && !len) DEBG("100..");
483 }
484 return (len);
485 }
486
487 /*
488 * tries to fill the buffer with data before flushing it via the output proc.
489 */
490 static int
491 kdp_core_zoutputbuf(z_streamp strm, Bytef *inbuf, unsigned inlen)
492 {
493 struct kdp_core_out_vars * vars = (typeof(vars)) strm->opaque;
494 unsigned remain;
495 IOReturn ret;
496 unsigned chunk;
497 boolean_t flush;
498
499 remain = inlen;
500 vars->zipped += inlen;
501 flush = (!inbuf && !inlen);
502
503 while ((vars->error >= 0) && (remain || flush))
504 {
505 chunk = vars->outremain;
506 if (chunk > remain) chunk = remain;
507 if (!inbuf) bzero(&vars->outbuf[vars->outlen - vars->outremain], chunk);
508 else
509 {
510 bcopy(inbuf, &vars->outbuf[vars->outlen - vars->outremain], chunk);
511 inbuf += chunk;
512 }
513 vars->outremain -= chunk;
514 remain -= chunk;
515
516 if (vars->outremain && !flush) break;
517 if ((ret = (*vars->outproc)(KDP_DATA, NULL,
518 vars->outlen - vars->outremain,
519 vars->outbuf)) != kIOReturnSuccess)
520 {
521 DEBG("KDP_DATA(0x%x)\n", ret);
522 vars->error = ret;
523 }
524 if (flush)
525 {
526 DEBG("100..");
527 flush = false;
528 }
529 vars->outremain = vars->outlen;
530 }
531 return (inlen);
532 }
533
534 static int
535 kdp_core_zinput(z_streamp strm, Bytef *buf, unsigned size)
536 {
537 struct kdp_core_out_vars * vars = (typeof(vars)) strm->opaque;
538 uint64_t percent, total_in = 0;
539 unsigned len;
540
541 len = strm->avail_in;
542 if (len > size) len = size;
543 if (len == 0) return 0;
544
545 if (strm->next_in != (Bytef *) strm) memcpy(buf, strm->next_in, len);
546 else bzero(buf, len);
547 strm->adler = z_crc32(strm->adler, buf, len);
548
549 strm->avail_in -= len;
550 strm->next_in += len;
551 strm->total_in += len;
552
553 if (0 == (511 & vars->writes++))
554 {
555 total_in = strm->total_in;
556 kdp_core_total_size_sent_uncomp = strm->total_in;
557
558 percent = (total_in * 100) / vars->totalbytes;
559 if ((percent - vars->lastpercent) >= 10)
560 {
561 vars->lastpercent = percent;
562 DEBG("%lld..\n", percent);
563 }
564 }
565
566 return (int)len;
567 }
568
569 static IOReturn
570 kdp_core_stream_output_chunk(struct kdp_core_out_vars * vars, unsigned length, void * data)
571 {
572 z_stream * zs;
573 int zr;
574 boolean_t flush;
575
576 zs = &kdp_core_zs;
577
578 if (kdp_corezip_disabled)
579 {
580 (*vars->zoutput)(zs, data, length);
581 }
582 else
583 {
584
585 flush = (!length && !data);
586 zr = Z_OK;
587
588 assert(!zs->avail_in);
589
590 while (vars->error >= 0)
591 {
592 if (!zs->avail_in && !flush)
593 {
594 if (!length) break;
595 zs->next_in = data ? data : (Bytef *) zs /* zero marker */;
596 zs->avail_in = length;
597 length = 0;
598 }
599 if (!zs->avail_out)
600 {
601 zs->next_out = (Bytef *) zs;
602 zs->avail_out = UINT32_MAX;
603 }
604 zr = deflate(zs, flush ? Z_FINISH : Z_NO_FLUSH);
605 if (Z_STREAM_END == zr) break;
606 if (zr != Z_OK)
607 {
608 DEBG("ZERR %d\n", zr);
609 vars->error = zr;
610 }
611 }
612
613 if (flush) (*vars->zoutput)(zs, NULL, 0);
614 }
615
616 return (vars->error);
617 }
618
619 static IOReturn
620 kdp_core_stream_output(struct kdp_core_out_vars * vars, uint64_t length, void * data)
621 {
622 IOReturn err;
623 unsigned int chunk;
624 enum { kMaxZLibChunk = 1024*1024*1024 };
625
626 do
627 {
628 if (length <= kMaxZLibChunk) chunk = (typeof(chunk)) length;
629 else chunk = kMaxZLibChunk;
630 err = kdp_core_stream_output_chunk(vars, chunk, data);
631
632 length -= chunk;
633 if (data) data = (void *) (((uintptr_t) data) + chunk);
634 }
635 while (length && (kIOReturnSuccess == err));
636
637 return (err);
638 }
639
640 extern vm_offset_t c_buffers;
641 extern vm_size_t c_buffers_size;
642
643 ppnum_t
644 kernel_pmap_present_mapping(uint64_t vaddr, uint64_t * pvincr, uintptr_t * pvphysaddr)
645 {
646 ppnum_t ppn = 0;
647 uint64_t vincr = PAGE_SIZE_64;
648
649 assert(!(vaddr & PAGE_MASK_64));
650
651 /* VA ranges to exclude */
652 if (vaddr == c_buffers)
653 {
654 /* compressor data */
655 ppn = 0;
656 vincr = c_buffers_size;
657 }
658 else if (vaddr == kdp_core_zmem)
659 {
660 /* zlib working memory */
661 ppn = 0;
662 vincr = kdp_core_zsize;
663 }
664 else if ((kdp_core_ramdisk_addr != 0) && (vaddr == kdp_core_ramdisk_addr))
665 {
666 ppn = 0;
667 vincr = kdp_core_ramdisk_size;
668 }
669 else
670 ppn = pmap_find_phys(kernel_pmap, vaddr);
671
672 *pvincr = round_page_64(vincr);
673
674 if (ppn && pvphysaddr)
675 {
676 uint64_t phys = ptoa_64(ppn);
677 if (physmap_enclosed(phys)) *pvphysaddr = PHYSMAP_PTOV(phys);
678 else ppn = 0;
679 }
680
681 return (ppn);
682 }
683
684 int
685 pmap_traverse_present_mappings(pmap_t __unused pmap,
686 vm_map_offset_t start,
687 vm_map_offset_t end,
688 pmap_traverse_callback callback,
689 void *context)
690 {
691 IOReturn ret;
692 vm_map_offset_t vcurstart, vcur;
693 uint64_t vincr;
694 vm_map_offset_t debug_start;
695 vm_map_offset_t debug_end;
696 boolean_t lastvavalid;
697
698 debug_start = trunc_page((vm_map_offset_t) debug_buf_addr);
699 debug_end = round_page((vm_map_offset_t) (debug_buf_addr + debug_buf_size));
700
701 #if defined(__i386__) || defined(__x86_64__)
702 assert(!is_ept_pmap(pmap));
703 #endif
704
705 /* Assumes pmap is locked, or being called from the kernel debugger */
706
707 if (start > end) return (KERN_INVALID_ARGUMENT);
708
709 ret = KERN_SUCCESS;
710 lastvavalid = FALSE;
711 for (vcur = vcurstart = start; (ret == KERN_SUCCESS) && (vcur < end); ) {
712 ppnum_t ppn;
713
714 ppn = kernel_pmap_present_mapping(vcur, &vincr, NULL);
715 if (ppn != 0)
716 {
717 if (((vcur < debug_start) || (vcur >= debug_end))
718 && !pmap_valid_page(ppn))
719 {
720 /* not something we want */
721 ppn = 0;
722 }
723 }
724
725 if (ppn != 0) {
726 if (!lastvavalid) {
727 /* Start of a new virtual region */
728 vcurstart = vcur;
729 lastvavalid = TRUE;
730 }
731 } else {
732 if (lastvavalid) {
733 /* end of a virtual region */
734 ret = callback(vcurstart, vcur, context);
735 lastvavalid = FALSE;
736 }
737
738 #if defined(__i386__) || defined(__x86_64__)
739 /* Try to skip by 2MB if possible */
740 if (((vcur & PDMASK) == 0) && cpu_64bit) {
741 pd_entry_t *pde;
742 pde = pmap_pde(pmap, vcur);
743 if (0 == pde || ((*pde & INTEL_PTE_VALID) == 0)) {
744 /* Make sure we wouldn't overflow */
745 if (vcur < (end - NBPD)) {
746 vincr = NBPD;
747 }
748 }
749 }
750 #endif /* defined(__i386__) || defined(__x86_64__) */
751 }
752 vcur += vincr;
753 }
754
755 if ((ret == KERN_SUCCESS) && lastvavalid) {
756 /* send previous run */
757 ret = callback(vcurstart, vcur, context);
758 }
759 return (ret);
760 }
761
762 int
763 kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start,
764 vm_map_offset_t end,
765 void *context)
766 {
767 struct kern_dump_preflight_context *kdc = (struct kern_dump_preflight_context *)context;
768 IOReturn ret = KERN_SUCCESS;
769
770 kdc->region_count++;
771 kdc->dumpable_bytes += (end - start);
772
773 return (ret);
774 }
775
776 int
777 kern_dump_pmap_traverse_send_seg_callback(vm_map_offset_t start,
778 vm_map_offset_t end,
779 void *context)
780 {
781 struct kern_dump_send_context *kdc = (struct kern_dump_send_context *)context;
782 IOReturn ret = KERN_SUCCESS;
783 kernel_segment_command_t sc;
784 vm_size_t size = (vm_size_t)(end - start);
785
786 if (kdc->hoffset + sizeof(sc) > kdc->header_size) {
787 return (KERN_NO_SPACE);
788 }
789
790 kdc->region_count++;
791 kdc->dumpable_bytes += (end - start);
792
793 /*
794 * Fill in segment command structure.
795 */
796
797 sc.cmd = LC_SEGMENT_KERNEL;
798 sc.cmdsize = sizeof(kernel_segment_command_t);
799 sc.segname[0] = 0;
800 sc.vmaddr = (vm_address_t)start;
801 sc.vmsize = size;
802 sc.fileoff = (vm_address_t)kdc->foffset;
803 sc.filesize = size;
804 sc.maxprot = VM_PROT_READ;
805 sc.initprot = VM_PROT_READ;
806 sc.nsects = 0;
807 sc.flags = 0;
808
809 if ((ret = kdp_core_stream_output(kdc->outvars, sizeof(kernel_segment_command_t), (caddr_t) &sc)) != kIOReturnSuccess) {
810 DEBG("kdp_core_stream_output(0x%x)\n", ret);
811 goto out;
812 }
813
814 kdc->hoffset += sizeof(kernel_segment_command_t);
815 kdc->foffset += size;
816
817 out:
818 return (ret);
819 }
820
821
822 int
823 kern_dump_pmap_traverse_send_segdata_callback(vm_map_offset_t start,
824 vm_map_offset_t end,
825 void *context)
826 {
827 struct kern_dump_send_context *kdc = (struct kern_dump_send_context *)context;
828 int ret = KERN_SUCCESS;
829 vm_size_t size = (vm_size_t)(end - start);
830
831 kdc->region_count++;
832 kdc->dumpable_bytes += size;
833 if ((ret = kdp_core_stream_output(kdc->outvars, size, (caddr_t)(uintptr_t)start)) != kIOReturnSuccess) {
834 DEBG("kdp_core_stream_output(0x%x)\n", ret);
835 goto out;
836 }
837 kdc->foffset += size;
838
839 out:
840 return (ret);
841 }
842
843 static int
844 do_kern_dump(kern_dump_output_proc outproc, enum kern_dump_type kd_variant)
845 {
846 struct kern_dump_preflight_context kdc_preflight = { };
847 struct kern_dump_send_context kdc_sendseg = { };
848 struct kern_dump_send_context kdc_send = { };
849 struct kdp_core_out_vars outvars = { };
850 struct mach_core_fileheader hdr = { };
851 struct ident_command ident = { };
852 kernel_mach_header_t mh = { };
853
854 uint32_t segment_count = 0, tstate_count = 0;
855 size_t command_size = 0, header_size = 0, tstate_size = 0;
856 uint64_t hoffset = 0, foffset = 0;
857 int ret = 0;
858 char * log_start;
859 char * buf;
860 size_t log_size;
861 uint64_t new_logs = 0;
862 boolean_t opened;
863
864 opened = false;
865 log_start = debug_buf_ptr;
866 log_size = debug_buf_ptr - debug_buf_addr;
867 assert (log_size <= debug_buf_size);
868 if (debug_buf_stackshot_start)
869 {
870 assert(debug_buf_stackshot_end >= debug_buf_stackshot_start);
871 log_size -= (debug_buf_stackshot_end - debug_buf_stackshot_start);
872 }
873
874 if (kd_variant == KERN_DUMP_DISK)
875 {
876 if ((ret = (*outproc)(KDP_WRQ, NULL, 0, &hoffset)) != kIOReturnSuccess) {
877 DEBG("KDP_WRQ(0x%x)\n", ret);
878 goto out;
879 }
880 }
881 opened = true;
882
883 // init gzip
884 bzero(&outvars, sizeof(outvars));
885 bzero(&hdr, sizeof(hdr));
886 outvars.outproc = outproc;
887
888 /*
889 * Initialize zstream variables that point to input and output
890 * buffer info.
891 */
892 kdp_core_zs.avail_in = 0;
893 kdp_core_zs.next_in = NULL;
894 kdp_core_zs.avail_out = 0;
895 kdp_core_zs.next_out = NULL;
896 kdp_core_zs.opaque = &outvars;
897 kdc_sendseg.outvars = &outvars;
898 kdc_send.outvars = &outvars;
899
900 enum { kHdrOffset = 4096, kMaxCoreLog = 16384 };
901
902 if (kd_variant == KERN_DUMP_DISK) {
903 outvars.outbuf = NULL;
904 outvars.outlen = 0;
905 outvars.outremain = 0;
906 outvars.zoutput = kdp_core_zoutput;
907 // space for file header, panic log, core log
908 foffset = (kHdrOffset + log_size + kMaxCoreLog + 4095) & ~4095ULL;
909 hdr.log_offset = kHdrOffset;
910 hdr.gzip_offset = foffset;
911 if ((ret = (*outproc)(KDP_SEEK, NULL, sizeof(foffset), &foffset)) != kIOReturnSuccess) {
912 DEBG("KDP_SEEK(0x%x)\n", ret);
913 goto out;
914 }
915 } else if (kd_variant == KERN_DUMP_NET) {
916 outvars.outbuf = (Bytef *) (kdp_core_zmem + kdp_core_zoffset);
917 assert((kdp_core_zoffset + kdp_crashdump_pkt_size) <= kdp_core_zsize);
918 outvars.outlen = kdp_crashdump_pkt_size;
919 outvars.outremain = outvars.outlen;
920 outvars.zoutput = kdp_core_zoutputbuf;
921 #if WITH_CONSISTENT_DBG
922 } else { /* KERN_DUMP_HW_SHMEM_DBG */
923 outvars.outbuf = NULL;
924 outvars.outlen = 0;
925 outvars.outremain = 0;
926 outvars.zoutput = kdp_core_zoutput;
927 kern_dump_hw_shmem_dbg_reset();
928 #endif
929 }
930
931 deflateResetWithIO(&kdp_core_zs, kdp_core_zinput, outvars.zoutput);
932
933
934 kdc_preflight.region_count = 0;
935 kdc_preflight.dumpable_bytes = 0;
936
937 ret = pmap_traverse_present_mappings(kernel_pmap,
938 VM_MIN_KERNEL_AND_KEXT_ADDRESS,
939 VM_MAX_KERNEL_ADDRESS,
940 kern_dump_pmap_traverse_preflight_callback,
941 &kdc_preflight);
942 if (ret)
943 {
944 DEBG("pmap traversal failed: %d\n", ret);
945 return (ret);
946 }
947
948 outvars.totalbytes = kdc_preflight.dumpable_bytes;
949 assert(outvars.totalbytes);
950 segment_count = kdc_preflight.region_count;
951
952 kdp_core_total_size = outvars.totalbytes;
953 kdp_core_total_size_sent_uncomp = 0;
954
955 kern_collectth_state_size(&tstate_count, &tstate_size);
956
957 command_size = segment_count * sizeof(kernel_segment_command_t)
958 + tstate_count * tstate_size
959 + sizeof(struct ident_command) + sizeof(kdp_kernelversion_string);
960
961 header_size = command_size + sizeof(kernel_mach_header_t);
962
963 /*
964 * Set up Mach-O header for currently executing kernel.
965 */
966
967 mh.magic = _mh_execute_header.magic;
968 mh.cputype = _mh_execute_header.cputype;;
969 mh.cpusubtype = _mh_execute_header.cpusubtype;
970 mh.filetype = MH_CORE;
971 mh.ncmds = segment_count + tstate_count + 1;
972 mh.sizeofcmds = (uint32_t)command_size;
973 mh.flags = 0;
974 #if defined(__LP64__)
975 mh.reserved = 0;
976 #endif
977
978 hoffset = 0; /* offset into header */
979 foffset = (uint64_t) round_page(header_size); /* offset into file */
980
981 /* Transmit the Mach-O MH_CORE header, and segment and thread commands
982 */
983 if ((ret = kdp_core_stream_output(&outvars, sizeof(kernel_mach_header_t), (caddr_t) &mh) != kIOReturnSuccess))
984 {
985 DEBG("KDP_DATA(0x%x)\n", ret);
986 goto out;
987 }
988
989 hoffset += sizeof(kernel_mach_header_t);
990
991 DEBG("%s", (kd_variant == KERN_DUMP_DISK) ? "Writing local kernel core..." :
992 "Transmitting kernel state, please wait:\n");
993
994 kdc_sendseg.region_count = 0;
995 kdc_sendseg.dumpable_bytes = 0;
996 kdc_sendseg.hoffset = hoffset;
997 kdc_sendseg.foffset = foffset;
998 kdc_sendseg.header_size = header_size;
999
1000 if ((ret = pmap_traverse_present_mappings(kernel_pmap,
1001 VM_MIN_KERNEL_AND_KEXT_ADDRESS,
1002 VM_MAX_KERNEL_ADDRESS,
1003 kern_dump_pmap_traverse_send_seg_callback,
1004 &kdc_sendseg)) != kIOReturnSuccess)
1005 {
1006 DEBG("pmap_traverse_present_mappings(0x%x)\n", ret);
1007 goto out;
1008 }
1009
1010 hoffset = kdc_sendseg.hoffset;
1011 /*
1012 * Now send out the LC_THREAD load command, with the thread information
1013 * for the current activation.
1014 */
1015
1016 if (tstate_size > 0)
1017 {
1018 void * iter;
1019 char tstate[tstate_size];
1020 iter = NULL;
1021 do {
1022 /*
1023 * Now send out the LC_THREAD load command, with the thread information
1024 */
1025 kern_collectth_state (current_thread(), tstate, tstate_size, &iter);
1026
1027 if ((ret = kdp_core_stream_output(&outvars, tstate_size, tstate)) != kIOReturnSuccess) {
1028 DEBG("kdp_core_stream_output(0x%x)\n", ret);
1029 goto out;
1030 }
1031 }
1032 while (iter);
1033 }
1034
1035 ident.cmd = LC_IDENT;
1036 ident.cmdsize = (uint32_t) (sizeof(struct ident_command) + sizeof(kdp_kernelversion_string));
1037 if ((ret = kdp_core_stream_output(&outvars, sizeof(ident), &ident)) != kIOReturnSuccess) {
1038 DEBG("kdp_core_stream_output(0x%x)\n", ret);
1039 goto out;
1040 }
1041 if ((ret = kdp_core_stream_output(&outvars, sizeof(kdp_kernelversion_string), &kdp_kernelversion_string[0])) != kIOReturnSuccess) {
1042 DEBG("kdp_core_stream_output(0x%x)\n", ret);
1043 goto out;
1044 }
1045
1046 kdc_send.region_count = 0;
1047 kdc_send.dumpable_bytes = 0;
1048 foffset = (uint64_t) round_page(header_size); /* offset into file */
1049 kdc_send.foffset = foffset;
1050 kdc_send.hoffset = 0;
1051 foffset = round_page_64(header_size) - header_size;
1052 if (foffset)
1053 {
1054 // zero fill to page align
1055 if ((ret = kdp_core_stream_output(&outvars, foffset, NULL)) != kIOReturnSuccess) {
1056 DEBG("kdp_core_stream_output(0x%x)\n", ret);
1057 goto out;
1058 }
1059 }
1060
1061 ret = pmap_traverse_present_mappings(kernel_pmap,
1062 VM_MIN_KERNEL_AND_KEXT_ADDRESS,
1063 VM_MAX_KERNEL_ADDRESS,
1064 kern_dump_pmap_traverse_send_segdata_callback,
1065 &kdc_send);
1066 if (ret) {
1067 DEBG("pmap_traverse_present_mappings(0x%x)\n", ret);
1068 goto out;
1069 }
1070
1071 if ((ret = kdp_core_stream_output(&outvars, 0, NULL) != kIOReturnSuccess)) {
1072 DEBG("kdp_core_stream_output(0x%x)\n", ret);
1073 goto out;
1074 }
1075
1076 out:
1077 if (kIOReturnSuccess == ret) DEBG("success\n");
1078 else outvars.zipped = 0;
1079
1080 DEBG("Mach-o header: %lu\n", header_size);
1081 DEBG("Region counts: [%u, %u, %u]\n", kdc_preflight.region_count,
1082 kdc_sendseg.region_count,
1083 kdc_send.region_count);
1084 DEBG("Byte counts : [%llu, %llu, %llu, %lu, %lu]\n", kdc_preflight.dumpable_bytes,
1085 kdc_sendseg.dumpable_bytes,
1086 kdc_send.dumpable_bytes,
1087 outvars.zipped,
1088 (long) (debug_buf_ptr - debug_buf_addr));
1089 if ((kd_variant == KERN_DUMP_DISK) && opened)
1090 {
1091 // write debug log
1092 foffset = kHdrOffset;
1093 if ((ret = (*outproc)(KDP_SEEK, NULL, sizeof(foffset), &foffset)) != kIOReturnSuccess) {
1094 DEBG("KDP_SEEK(0x%x)\n", ret);
1095 goto exit;
1096 }
1097
1098 new_logs = debug_buf_ptr - log_start;
1099 if (new_logs > kMaxCoreLog) new_logs = kMaxCoreLog;
1100 buf = debug_buf_addr;
1101 if (debug_buf_stackshot_start)
1102 {
1103 if ((ret = (*outproc)(KDP_DATA, NULL, (debug_buf_stackshot_start - debug_buf_addr), debug_buf_addr)) != kIOReturnSuccess)
1104 {
1105 DEBG("KDP_DATA(0x%x)\n", ret);
1106 goto exit;
1107 }
1108 buf = debug_buf_stackshot_end;
1109 }
1110 if ((ret = (*outproc)(KDP_DATA, NULL, (log_start + new_logs - buf), buf)) != kIOReturnSuccess)
1111 {
1112 DEBG("KDP_DATA(0x%x)\n", ret);
1113 goto exit;
1114 }
1115
1116 // write header
1117
1118 foffset = 0;
1119 if ((ret = (*outproc)(KDP_SEEK, NULL, sizeof(foffset), &foffset)) != kIOReturnSuccess) {
1120 DEBG("KDP_SEEK(0x%x)\n", ret);
1121 goto exit;
1122 }
1123
1124 hdr.signature = MACH_CORE_FILEHEADER_SIGNATURE;
1125 hdr.log_length = new_logs + log_size;
1126 hdr.gzip_length = outvars.zipped;
1127
1128 if ((ret = (*outproc)(KDP_DATA, NULL, sizeof(hdr), &hdr)) != kIOReturnSuccess)
1129 {
1130 DEBG("KDP_DATA(0x%x)\n", ret);
1131 goto exit;
1132 }
1133 }
1134
1135 exit:
1136 /* close / last packet */
1137 if (opened && (ret = (*outproc)(KDP_EOF, NULL, 0, ((void *) 0))) != kIOReturnSuccess)
1138 {
1139 DEBG("KDP_EOF(0x%x)\n", ret);
1140 }
1141
1142
1143 return (ret);
1144 }
1145
1146 int
1147 kern_dump(enum kern_dump_type kd_variant)
1148 {
1149 static boolean_t dumped_local;
1150 if (kd_variant == KERN_DUMP_DISK) {
1151 if (dumped_local) return (0);
1152 dumped_local = TRUE;
1153 return (do_kern_dump(&kern_dump_disk_proc, KERN_DUMP_DISK));
1154 #if WITH_CONSISTENT_DBG
1155 } else if (kd_variant == KERN_DUMP_HW_SHMEM_DBG) {
1156 return (do_kern_dump(&kern_dump_hw_shmem_dbg_buffer_proc, KERN_DUMP_HW_SHMEM_DBG));
1157 #endif
1158 }
1159 #if CONFIG_KDP_INTERACTIVE_DEBUGGING
1160 return (do_kern_dump(&kdp_send_crashdump_data, KERN_DUMP_NET));
1161 #else
1162 return (-1);
1163 #endif
1164 }
1165
1166 static void *
1167 kdp_core_zalloc(void * __unused ref, u_int items, u_int size)
1168 {
1169 void * result;
1170
1171 result = (void *) (kdp_core_zmem + kdp_core_zoffset);
1172 kdp_core_zoffset += ~31L & (31 + (items * size)); // 32b align for vector crc
1173 assert(kdp_core_zoffset <= kdp_core_zsize);
1174
1175 return (result);
1176 }
1177
1178 static void
1179 kdp_core_zfree(void * __unused ref, void * __unused ptr) {}
1180
1181
1182 #define LEVEL Z_BEST_SPEED
1183 #define NETBUF 1440
1184
1185 void
1186 kdp_core_init(void)
1187 {
1188 int wbits = 12;
1189 int memlevel = 3;
1190 kern_return_t kr;
1191 #if WITH_CONSISTENT_DBG
1192 int i = 0;
1193 vm_offset_t kdp_core_hw_shmem_buf = 0;
1194 struct kdp_hw_shmem_dbg_buf_elm *cur_elm = NULL;
1195 #endif
1196
1197 if (kdp_core_zs.zalloc) return;
1198 kdp_core_zsize = round_page(NETBUF + zlib_deflate_memory_size(wbits, memlevel));
1199 printf("kdp_core zlib memory 0x%lx\n", kdp_core_zsize);
1200 kr = kmem_alloc(kernel_map, &kdp_core_zmem, kdp_core_zsize, VM_KERN_MEMORY_DIAG);
1201 assert (KERN_SUCCESS == kr);
1202
1203 kdp_core_zoffset = 0;
1204 kdp_core_zs.zalloc = kdp_core_zalloc;
1205 kdp_core_zs.zfree = kdp_core_zfree;
1206
1207 if (deflateInit2(&kdp_core_zs, LEVEL, Z_DEFLATED,
1208 wbits + 16 /*gzip mode*/, memlevel, Z_DEFAULT_STRATEGY)) {
1209 /* Allocation failed */
1210 bzero(&kdp_core_zs, sizeof(kdp_core_zs));
1211 kdp_core_zoffset = 0;
1212 }
1213
1214 #if WITH_CONSISTENT_DBG
1215 if (!PE_consistent_debug_enabled()) {
1216 return;
1217 }
1218
1219 /*
1220 * We need to allocate physically contiguous memory since astris isn't capable
1221 * of doing address translations while the CPUs are running.
1222 */
1223 kdp_hw_shmem_dbg_bufsize = KDP_CORE_HW_SHMEM_DBG_TOTAL_BUF_SIZE;
1224 kr = kmem_alloc_contig(kernel_map, &kdp_core_hw_shmem_buf, kdp_hw_shmem_dbg_bufsize, VM_MAP_PAGE_MASK(kernel_map),
1225 0, 0, KMA_KOBJECT, VM_KERN_MEMORY_DIAG);
1226 assert(KERN_SUCCESS == kr);
1227
1228 /*
1229 * Put the connection info structure at the beginning of this buffer and adjust
1230 * the buffer size accordingly.
1231 */
1232 hwsd_info = (struct xnu_hw_shmem_dbg_command_info *) kdp_core_hw_shmem_buf;
1233 hwsd_info->xhsdci_status = XHSDCI_STATUS_NONE;
1234 hwsd_info->xhsdci_seq_no = 0;
1235 hwsd_info->xhsdci_buf_phys_addr = 0;
1236 hwsd_info->xhsdci_buf_data_length = 0;
1237 hwsd_info->xhsdci_coredump_total_size_uncomp = 0;
1238 hwsd_info->xhsdci_coredump_total_size_sent_uncomp = 0;
1239 hwsd_info->xhsdci_page_size = PAGE_SIZE;
1240
1241 kdp_core_hw_shmem_buf += sizeof(*hwsd_info);
1242 kdp_hw_shmem_dbg_bufsize -= sizeof(*hwsd_info);
1243 kdp_hw_shmem_dbg_bufsize = (kdp_hw_shmem_dbg_bufsize / KDP_CORE_HW_SHMEM_DBG_NUM_BUFFERS);
1244 kdp_hw_shmem_dbg_bufsize -= (kdp_hw_shmem_dbg_bufsize % OPTIMAL_ASTRIS_READSIZE);
1245
1246 STAILQ_INIT(&free_hw_shmem_dbg_bufs);
1247 STAILQ_INIT(&hw_shmem_dbg_bufs_to_flush);
1248
1249 for (i = 0; i < KDP_CORE_HW_SHMEM_DBG_NUM_BUFFERS; i++) {
1250 cur_elm = kalloc(sizeof(*cur_elm));
1251 assert(cur_elm != NULL);
1252
1253 cur_elm->khsd_buf = kdp_core_hw_shmem_buf;
1254 cur_elm->khsd_data_length = 0;
1255
1256 kdp_core_hw_shmem_buf += kdp_hw_shmem_dbg_bufsize;
1257
1258 STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs, cur_elm, khsd_elms);
1259 }
1260
1261 nanoseconds_to_absolutetime(KDP_HW_SHMEM_DBG_TIMEOUT_DEADLINE_SECS * NSEC_PER_SEC,
1262 &kdp_hw_shmem_dbg_contact_deadline_interval);
1263
1264 PE_consistent_debug_register(kDbgIdAstrisConnection, kvtophys((vm_offset_t) hwsd_info), sizeof(pmap_paddr_t));
1265 PE_consistent_debug_register(kDbgIdAstrisConnectionVers, CUR_XNU_HWSDCI_STRUCT_VERS, sizeof(uint32_t));
1266 #endif /* WITH_CONSISTENT_DBG */
1267 }
1268
1269 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */