]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kdp/kdp_core.c
xnu-3248.60.10.tar.gz
[apple/xnu.git] / osfmk / kdp / kdp_core.c
CommitLineData
3e170ce0
A
1/*
2 * Copyright (c) 2015 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#ifdef CONFIG_KDP_INTERACTIVE_DEBUGGING
30
31#include <mach/mach_types.h>
32#include <mach/vm_attributes.h>
33#include <mach/vm_param.h>
34#include <mach/vm_map.h>
35#include <vm/vm_protos.h>
36#include <vm/vm_kern.h>
37#include <vm/vm_map.h>
38#include <libsa/types.h>
39#include <libkern/kernel_mach_header.h>
40#include <libkern/zlib.h>
41#include <kdp/kdp_internal.h>
42#include <kdp/kdp_core.h>
43#include <IOKit/IOPolledInterface.h>
44#include <IOKit/IOBSD.h>
45#include <sys/errno.h>
46#include <sys/msgbuf.h>
47
48#if defined(__i386__) || defined(__x86_64__)
49#include <i386/pmap_internal.h>
50#include <kdp/ml/i386/kdp_x86_common.h>
51#endif /* defined(__i386__) || defined(__x86_64__) */
52
53
54
55typedef int (*pmap_traverse_callback)(vm_map_offset_t start,
56 vm_map_offset_t end,
57 void *context);
58
59extern int pmap_traverse_present_mappings(pmap_t pmap,
60 vm_map_offset_t start,
61 vm_map_offset_t end,
62 pmap_traverse_callback callback,
63 void *context);
64
65
66static int
67kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start,
68 vm_map_offset_t end,
69 void *context);
70static int
71kern_dump_pmap_traverse_send_seg_callback(vm_map_offset_t start,
72 vm_map_offset_t end,
73 void *context);
74static int
75kern_dump_pmap_traverse_send_segdata_callback(vm_map_offset_t start,
76 vm_map_offset_t end,
77 void *context);
78
79struct kdp_core_out_vars;
80typedef int (*kern_dump_output_proc)(unsigned int request, char *corename,
81 uint64_t length, void *panic_data);
82
83struct kdp_core_out_vars
84{
85 kern_dump_output_proc outproc;
86 z_output_func zoutput;
87 size_t zipped;
88 uint64_t totalbytes;
89 uint64_t lastpercent;
90 IOReturn error;
91 unsigned outremain;
92 unsigned outlen;
93 unsigned writes;
94 Bytef * outbuf;
95};
96
97struct kern_dump_preflight_context
98{
99 uint32_t region_count;
100 uint64_t dumpable_bytes;
101};
102
103struct kern_dump_send_context
104{
105 struct kdp_core_out_vars * outvars;
106 uint64_t hoffset;
107 uint64_t foffset;
108 uint64_t header_size;
109 uint64_t dumpable_bytes;
110 uint32_t region_count;
111};
112
113extern uint32_t kdp_crashdump_pkt_size;
114
115static vm_offset_t kdp_core_zmem;
116static size_t kdp_core_zsize;
117static size_t kdp_core_zoffset;
118static z_stream kdp_core_zs;
119
120
121#define DEBG kdb_printf
122
123boolean_t kdp_has_polled_corefile(void)
124{
125 return (NULL != gIOPolledCoreFileVars);
126}
127
128static IOReturn
129kern_dump_disk_proc(unsigned int request, __unused char *corename,
130 uint64_t length, void * data)
131{
132 uint64_t noffset;
133 uint32_t err = kIOReturnSuccess;
134
135 switch (request)
136 {
137 case KDP_WRQ:
138 err = IOPolledFileSeek(gIOPolledCoreFileVars, 0);
139 if (kIOReturnSuccess != err) break;
140 err = IOPolledFilePollersOpen(gIOPolledCoreFileVars, kIOPolledBeforeSleepState, false);
141 break;
142
143 case KDP_SEEK:
144 noffset = *((uint64_t *) data);
145 err = IOPolledFileWrite(gIOPolledCoreFileVars, 0, 0, NULL);
146 if (kIOReturnSuccess != err) break;
147 err = IOPolledFileSeek(gIOPolledCoreFileVars, noffset);
148 break;
149
150 case KDP_DATA:
151 err = IOPolledFileWrite(gIOPolledCoreFileVars, data, length, NULL);
152 if (kIOReturnSuccess != err) break;
153 break;
154
155 case KDP_EOF:
156 err = IOPolledFileWrite(gIOPolledCoreFileVars, 0, 0, NULL);
157 if (kIOReturnSuccess != err) break;
158 err = IOPolledFilePollersClose(gIOPolledCoreFileVars, kIOPolledBeforeSleepState);
159 if (kIOReturnSuccess != err) break;
160 break;
161 }
162
163 return (err);
164}
165
166static int
167kdp_core_zoutput(z_streamp strm, Bytef *buf, unsigned len)
168{
169 struct kdp_core_out_vars * vars = (typeof(vars)) strm->opaque;
170 IOReturn ret;
171
172 vars->zipped += len;
173
174 if (vars->error >= 0)
175 {
176 if ((ret = (*vars->outproc)(KDP_DATA, NULL, len, buf)) != kIOReturnSuccess)
177 {
178 DEBG("KDP_DATA(0x%x)\n", ret);
179 vars->error = ret;
180 }
181 if (!buf && !len) DEBG("100..");
182 }
183 return (len);
184}
185
186static int
187kdp_core_zoutputbuf(z_streamp strm, Bytef *inbuf, unsigned inlen)
188{
189 struct kdp_core_out_vars * vars = (typeof(vars)) strm->opaque;
190 unsigned remain;
191 IOReturn ret;
192 unsigned chunk;
193 boolean_t flush;
194
195 remain = inlen;
196 vars->zipped += inlen;
197 flush = (!inbuf && !inlen);
198
199 while ((vars->error >= 0) && (remain || flush))
200 {
201 chunk = vars->outremain;
202 if (chunk > remain) chunk = remain;
203 bcopy(inbuf, &vars->outbuf[vars->outlen - vars->outremain], chunk);
204 vars->outremain -= chunk;
205 remain -= chunk;
206 inbuf += chunk;
207
208 if (vars->outremain && !flush) break;
209 if ((ret = (*vars->outproc)(KDP_DATA, NULL,
210 vars->outlen - vars->outremain,
211 vars->outbuf)) != kIOReturnSuccess)
212 {
213 DEBG("KDP_DATA(0x%x)\n", ret);
214 vars->error = ret;
215 }
216 if (flush)
217 {
218 DEBG("100..");
219 flush = false;
220 }
221 vars->outremain = vars->outlen;
222 }
223 return (inlen);
224}
225
226static int
227kdp_core_zinput(z_streamp strm, Bytef *buf, unsigned size)
228{
229 struct kdp_core_out_vars * vars = (typeof(vars)) strm->opaque;
230 uint64_t percent;
231 unsigned len;
232
233 len = strm->avail_in;
234 if (len > size) len = size;
235 if (len == 0) return 0;
236
237 if (strm->next_in != (Bytef *) strm) memcpy(buf, strm->next_in, len);
238 else bzero(buf, len);
239 strm->adler = z_crc32(strm->adler, buf, len);
240
241 strm->avail_in -= len;
242 strm->next_in += len;
243 strm->total_in += len;
244
245 if (0 == (511 & vars->writes++))
246 {
247 percent = (strm->total_in * 100) / vars->totalbytes;
248 if ((percent - vars->lastpercent) >= 10)
249 {
250 vars->lastpercent = percent;
251 DEBG("%lld..", percent);
252 }
253 }
254
255 return (int)len;
256}
257
258static IOReturn
259kdp_core_stream_output(struct kdp_core_out_vars * vars, uint64_t length, void * data)
260{
261 z_stream * zs;
262 int zr;
263 boolean_t flush;
264
265 flush = (!length && !data);
266 zr = Z_OK;
267
268 zs = &kdp_core_zs;
269 assert(!zs->avail_in);
270
271 while (vars->error >= 0)
272 {
273 if (!zs->avail_in && !flush)
274 {
275 if (!length) break;
276 zs->next_in = data ? data : (Bytef *) zs /* zero marker */;
277 zs->avail_in = (uInt)length;
278 length = 0;
279 }
280 if (!zs->avail_out)
281 {
282 zs->next_out = (Bytef *) zs;
283 zs->avail_out = UINT32_MAX;
284 }
285 zr = deflate(zs, flush ? Z_FINISH : Z_NO_FLUSH);
286 if (Z_STREAM_END == zr) break;
287 if (zr != Z_OK)
288 {
289 DEBG("ZERR %d\n", zr);
290 vars->error = zr;
291 }
292 }
293
294 if (flush) (*vars->zoutput)(zs, NULL, 0);
295
296 return (vars->error);
297}
298
299extern vm_offset_t c_buffers;
300extern vm_size_t c_buffers_size;
301
302ppnum_t
303kernel_pmap_present_mapping(uint64_t vaddr, uint64_t * pvincr)
304{
305 ppnum_t ppn;
306 uint64_t vincr;
307 vincr = PAGE_SIZE_64;
308
309 assert(!(vaddr & PAGE_MASK_64));
310
311 /* VA ranges to exclude */
312 if (vaddr == c_buffers)
313 {
314 /* compressor data */
315 ppn = 0;
316 vincr = c_buffers_size;
317 }
318 else if (vaddr == kdp_core_zmem)
319 {
320 /* zlib working memory */
321 ppn = 0;
322 vincr = kdp_core_zsize;
323 }
324 else
325 ppn = pmap_find_phys(kernel_pmap, vaddr);
326
327 *pvincr = vincr;
328 return (ppn);
329}
330
331int
332pmap_traverse_present_mappings(pmap_t __unused pmap,
333 vm_map_offset_t start,
334 vm_map_offset_t end,
335 pmap_traverse_callback callback,
336 void *context)
337{
338 IOReturn ret;
339 vm_map_offset_t vcurstart, vcur;
340 uint64_t vincr;
341 vm_map_offset_t debug_start;
342 vm_map_offset_t debug_end;
343 boolean_t lastvavalid;
344
345 debug_start = trunc_page((vm_map_offset_t) debug_buf_addr);
346 debug_end = round_page((vm_map_offset_t) (debug_buf_addr + debug_buf_size));
347
348#if defined(__i386__) || defined(__x86_64__)
349 assert(!is_ept_pmap(pmap));
350#endif
351
352 /* Assumes pmap is locked, or being called from the kernel debugger */
353
354 if (start > end) return (KERN_INVALID_ARGUMENT);
355
356 ret = KERN_SUCCESS;
357 lastvavalid = FALSE;
358 for (vcur = vcurstart = start; (ret == KERN_SUCCESS) && (vcur < end); ) {
359 ppnum_t ppn;
360
361 ppn = kernel_pmap_present_mapping(vcur, &vincr);
362 if (ppn != 0)
363 {
364 if (((vcur < debug_start) || (vcur >= debug_end))
365 && !pmap_valid_page(ppn))
366 {
367 /* not something we want */
368 ppn = 0;
369 }
370 }
371
372 if (ppn != 0) {
373 if (!lastvavalid) {
374 /* Start of a new virtual region */
375 vcurstart = vcur;
376 lastvavalid = TRUE;
377 }
378 } else {
379 if (lastvavalid) {
380 /* end of a virtual region */
381 ret = callback(vcurstart, vcur, context);
382 lastvavalid = FALSE;
383 }
384
385#if defined(__i386__) || defined(__x86_64__)
386 /* Try to skip by 2MB if possible */
387 if (((vcur & PDMASK) == 0) && cpu_64bit) {
388 pd_entry_t *pde;
389 pde = pmap_pde(pmap, vcur);
390 if (0 == pde || ((*pde & INTEL_PTE_VALID) == 0)) {
391 /* Make sure we wouldn't overflow */
392 if (vcur < (end - NBPD)) {
393 vincr = NBPD;
394 }
395 }
396 }
397#endif /* defined(__i386__) || defined(__x86_64__) */
398 }
399 vcur += vincr;
400 }
401
402 if ((ret == KERN_SUCCESS) && lastvavalid) {
403 /* send previous run */
404 ret = callback(vcurstart, vcur, context);
405 }
406 return (ret);
407}
408
409int
410kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start,
411 vm_map_offset_t end,
412 void *context)
413{
414 struct kern_dump_preflight_context *kdc = (struct kern_dump_preflight_context *)context;
415 IOReturn ret = KERN_SUCCESS;
416
417 kdc->region_count++;
418 kdc->dumpable_bytes += (end - start);
419
420 return (ret);
421}
422
423int
424kern_dump_pmap_traverse_send_seg_callback(vm_map_offset_t start,
425 vm_map_offset_t end,
426 void *context)
427{
428 struct kern_dump_send_context *kdc = (struct kern_dump_send_context *)context;
429 IOReturn ret = KERN_SUCCESS;
430 kernel_segment_command_t sc;
431 vm_size_t size = (vm_size_t)(end - start);
432
433 if (kdc->hoffset + sizeof(sc) > kdc->header_size) {
434 return (KERN_NO_SPACE);
435 }
436
437 kdc->region_count++;
438 kdc->dumpable_bytes += (end - start);
439
440 /*
441 * Fill in segment command structure.
442 */
443
444 sc.cmd = LC_SEGMENT_KERNEL;
445 sc.cmdsize = sizeof(kernel_segment_command_t);
446 sc.segname[0] = 0;
447 sc.vmaddr = (vm_address_t)start;
448 sc.vmsize = size;
449 sc.fileoff = (vm_address_t)kdc->foffset;
450 sc.filesize = size;
451 sc.maxprot = VM_PROT_READ;
452 sc.initprot = VM_PROT_READ;
453 sc.nsects = 0;
454 sc.flags = 0;
455
456 if ((ret = kdp_core_stream_output(kdc->outvars, sizeof(kernel_segment_command_t), (caddr_t) &sc)) != kIOReturnSuccess) {
457 DEBG("kdp_core_stream_output(0x%x)\n", ret);
458 goto out;
459 }
460
461 kdc->hoffset += sizeof(kernel_segment_command_t);
462 kdc->foffset += size;
463
464out:
465 return (ret);
466}
467
468
469int
470kern_dump_pmap_traverse_send_segdata_callback(vm_map_offset_t start,
471 vm_map_offset_t end,
472 void *context)
473{
474 struct kern_dump_send_context *kdc = (struct kern_dump_send_context *)context;
475 int ret = KERN_SUCCESS;
476 vm_size_t size = (vm_size_t)(end - start);
477
478 kdc->region_count++;
479 kdc->dumpable_bytes += size;
480 if ((ret = kdp_core_stream_output(kdc->outvars, (unsigned int)size, (caddr_t)(uintptr_t)start)) != kIOReturnSuccess) {
481 DEBG("kdp_core_stream_output(0x%x)\n", ret);
482 goto out;
483 }
484 kdc->foffset += size;
485
486out:
487 return (ret);
488}
489
490static int
491do_kern_dump(kern_dump_output_proc outproc, bool local)
492{
493 struct kern_dump_preflight_context kdc_preflight;
494 struct kern_dump_send_context kdc_sendseg;
495 struct kern_dump_send_context kdc_send;
496 struct kdp_core_out_vars outvars;
497 struct mach_core_fileheader hdr;
498 kernel_mach_header_t mh;
499 uint32_t segment_count, tstate_count;
500 size_t command_size = 0, header_size = 0, tstate_size = 0;
501 uint64_t hoffset, foffset;
502 int ret;
503 char * log_start;
504 uint64_t log_length;
505 uint64_t new_logs;
506 boolean_t opened;
507
508 opened = false;
509 log_start = debug_buf_ptr;
510 log_length = 0;
511 if (log_start >= debug_buf_addr)
512 {
513 log_length = log_start - debug_buf_addr;
514 if (log_length <= debug_buf_size) log_length = debug_buf_size - log_length;
515 else log_length = 0;
516 }
517
518 if (local)
519 {
520 if ((ret = (*outproc)(KDP_WRQ, NULL, 0, &hoffset)) != kIOReturnSuccess) {
521 DEBG("KDP_WRQ(0x%x)\n", ret);
522 goto out;
523 }
524 }
525 opened = true;
526
527 // init gzip
528 bzero(&outvars, sizeof(outvars));
529 bzero(&hdr, sizeof(hdr));
530 outvars.outproc = outproc;
531 kdp_core_zs.avail_in = 0;
532 kdp_core_zs.next_in = NULL;
533 kdp_core_zs.avail_out = 0;
534 kdp_core_zs.next_out = NULL;
535 kdp_core_zs.opaque = &outvars;
536 kdc_sendseg.outvars = &outvars;
537 kdc_send.outvars = &outvars;
538
539 if (local)
540 {
541 outvars.outbuf = NULL;
542 outvars.outlen = 0;
543 outvars.outremain = 0;
544 outvars.zoutput = kdp_core_zoutput;
545 // space for file header & log
546 foffset = (4096 + log_length + 4095) & ~4095ULL;
547 hdr.log_offset = 4096;
548 hdr.gzip_offset = foffset;
549 if ((ret = (*outproc)(KDP_SEEK, NULL, sizeof(foffset), &foffset)) != kIOReturnSuccess) {
550 DEBG("KDP_SEEK(0x%x)\n", ret);
551 goto out;
552 }
553 }
554 else
555 {
556 outvars.outbuf = (Bytef *) (kdp_core_zmem + kdp_core_zoffset);
557 assert((kdp_core_zoffset + kdp_crashdump_pkt_size) <= kdp_core_zsize);
558 outvars.outlen = kdp_crashdump_pkt_size;
559 outvars.outremain = outvars.outlen;
560 outvars.zoutput = kdp_core_zoutputbuf;
561 }
562
563 deflateResetWithIO(&kdp_core_zs, kdp_core_zinput, outvars.zoutput);
564
565
566 kdc_preflight.region_count = 0;
567 kdc_preflight.dumpable_bytes = 0;
568
569 ret = pmap_traverse_present_mappings(kernel_pmap,
570 VM_MIN_KERNEL_AND_KEXT_ADDRESS,
571 VM_MAX_KERNEL_ADDRESS,
572 kern_dump_pmap_traverse_preflight_callback,
573 &kdc_preflight);
574 if (ret)
575 {
576 DEBG("pmap traversal failed: %d\n", ret);
577 return (ret);
578 }
579
580 outvars.totalbytes = kdc_preflight.dumpable_bytes;
581 assert(outvars.totalbytes);
582 segment_count = kdc_preflight.region_count;
583
584 kern_collectth_state_size(&tstate_count, &tstate_size);
585
586 command_size = segment_count * sizeof(kernel_segment_command_t) + tstate_count * tstate_size;
587
588 header_size = command_size + sizeof(kernel_mach_header_t);
589
590 /*
591 * Set up Mach-O header for currently executing kernel.
592 */
593
594 mh.magic = _mh_execute_header.magic;
595 mh.cputype = _mh_execute_header.cputype;;
596 mh.cpusubtype = _mh_execute_header.cpusubtype;
597 mh.filetype = MH_CORE;
598 mh.ncmds = segment_count + tstate_count;
599 mh.sizeofcmds = (uint32_t)command_size;
600 mh.flags = 0;
601#if defined(__LP64__)
602 mh.reserved = 0;
603#endif
604
605 hoffset = 0; /* offset into header */
606 foffset = (uint64_t) round_page(header_size); /* offset into file */
607
608 /* Transmit the Mach-O MH_CORE header, and segment and thread commands
609 */
610 if ((ret = kdp_core_stream_output(&outvars, sizeof(kernel_mach_header_t), (caddr_t) &mh) != kIOReturnSuccess))
611 {
612 DEBG("KDP_DATA(0x%x)\n", ret);
613 goto out;
614 }
615
616 hoffset += sizeof(kernel_mach_header_t);
617
618 DEBG("%s", local ? "Writing local kernel core..." :
619 "Transmitting kernel state, please wait:\n");
620
621 kdc_sendseg.region_count = 0;
622 kdc_sendseg.dumpable_bytes = 0;
623 kdc_sendseg.hoffset = hoffset;
624 kdc_sendseg.foffset = foffset;
625 kdc_sendseg.header_size = header_size;
626
627 if ((ret = pmap_traverse_present_mappings(kernel_pmap,
628 VM_MIN_KERNEL_AND_KEXT_ADDRESS,
629 VM_MAX_KERNEL_ADDRESS,
630 kern_dump_pmap_traverse_send_seg_callback,
631 &kdc_sendseg)) != kIOReturnSuccess)
632 {
633 DEBG("pmap_traverse_present_mappings(0x%x)\n", ret);
634 goto out;
635 }
636
637 hoffset = kdc_sendseg.hoffset;
638 /*
639 * Now send out the LC_THREAD load command, with the thread information
640 * for the current activation.
641 */
642
643 if (tstate_size > 0)
644 {
645 void * iter;
646 char tstate[tstate_size];
647 iter = NULL;
648 do {
649 /*
650 * Now send out the LC_THREAD load command, with the thread information
651 */
652 kern_collectth_state (current_thread(), tstate, tstate_size, &iter);
653
654 if ((ret = kdp_core_stream_output(&outvars, tstate_size, tstate)) != kIOReturnSuccess) {
655 DEBG("kdp_core_stream_output(0x%x)\n", ret);
656 goto out;
657 }
658 }
659 while (iter);
660 }
661
662 kdc_send.region_count = 0;
663 kdc_send.dumpable_bytes = 0;
664 foffset = (uint64_t) round_page(header_size); /* offset into file */
665 kdc_send.foffset = foffset;
666 kdc_send.hoffset = 0;
667 foffset = round_page_64(header_size) - header_size;
668 if (foffset)
669 {
670 // zero fill to page align
671 if ((ret = kdp_core_stream_output(&outvars, foffset, NULL)) != kIOReturnSuccess) {
672 DEBG("kdp_core_stream_output(0x%x)\n", ret);
673 goto out;
674 }
675 }
676
677 ret = pmap_traverse_present_mappings(kernel_pmap,
678 VM_MIN_KERNEL_AND_KEXT_ADDRESS,
679 VM_MAX_KERNEL_ADDRESS,
680 kern_dump_pmap_traverse_send_segdata_callback,
681 &kdc_send);
682 if (ret) {
683 DEBG("pmap_traverse_present_mappings(0x%x)\n", ret);
684 goto out;
685 }
686
687 if ((ret = kdp_core_stream_output(&outvars, 0, NULL) != kIOReturnSuccess)) {
688 DEBG("kdp_core_stream_output(0x%x)\n", ret);
689 goto out;
690 }
691
692out:
693 if (kIOReturnSuccess == ret) DEBG("success\n");
694 else outvars.zipped = 0;
695
696 DEBG("Mach-o header: %lu\n", header_size);
697 DEBG("Region counts: [%u, %u, %u]\n", kdc_preflight.region_count,
698 kdc_sendseg.region_count,
699 kdc_send.region_count);
700 DEBG("Byte counts : [%llu, %llu, %llu, %lu, %llu]\n", kdc_preflight.dumpable_bytes,
701 kdc_sendseg.dumpable_bytes,
702 kdc_send.dumpable_bytes,
703 outvars.zipped, log_length);
704 if (local && opened)
705 {
706 // write debug log
707 foffset = 4096;
708 if ((ret = (*outproc)(KDP_SEEK, NULL, sizeof(foffset), &foffset)) != kIOReturnSuccess) {
709 DEBG("KDP_SEEK(0x%x)\n", ret);
710 goto exit;
711 }
712
713 new_logs = debug_buf_ptr - log_start;
714 if (new_logs > log_length) new_logs = log_length;
715
716 if ((ret = (*outproc)(KDP_DATA, NULL, new_logs, log_start)) != kIOReturnSuccess)
717 {
718 DEBG("KDP_DATA(0x%x)\n", ret);
719 goto exit;
720 }
721
722 // write header
723
724 foffset = 0;
725 if ((ret = (*outproc)(KDP_SEEK, NULL, sizeof(foffset), &foffset)) != kIOReturnSuccess) {
726 DEBG("KDP_SEEK(0x%x)\n", ret);
727 goto exit;
728 }
729
730 hdr.signature = MACH_CORE_FILEHEADER_SIGNATURE;
731 hdr.log_length = new_logs;
732 hdr.gzip_length = outvars.zipped;
733
734 if ((ret = (*outproc)(KDP_DATA, NULL, sizeof(hdr), &hdr)) != kIOReturnSuccess)
735 {
736 DEBG("KDP_DATA(0x%x)\n", ret);
737 goto exit;
738 }
739 }
740
741exit:
742 /* close / last packet */
743 if ((ret = (*outproc)(KDP_EOF, NULL, 0, ((void *) 0))) != kIOReturnSuccess)
744 {
745 DEBG("KDP_EOF(0x%x)\n", ret);
746 }
747
748
749 return (ret);
750}
751
752int
753kern_dump(boolean_t local)
754{
755 static boolean_t dumped_local;
756 if (local) {
757 if (dumped_local) return (0);
758 dumped_local = TRUE;
759 return (do_kern_dump(&kern_dump_disk_proc, true));
760 }
761#if CONFIG_KDP_INTERACTIVE_DEBUGGING
762 return (do_kern_dump(&kdp_send_crashdump_data, false));
763#else
764 return (-1);
765#endif
766}
767
768static void *
769kdp_core_zalloc(void * __unused ref, u_int items, u_int size)
770{
771 void * result;
772
773 result = (void *) (kdp_core_zmem + kdp_core_zoffset);
774 kdp_core_zoffset += ~31L & (31 + (items * size)); // 32b align for vector crc
775 assert(kdp_core_zoffset <= kdp_core_zsize);
776
777 return (result);
778}
779
780static void
781kdp_core_zfree(void * __unused ref, void * __unused ptr) {}
782
783
784#define LEVEL Z_BEST_SPEED
785#define NETBUF 1440
786
787void
788kdp_core_init(void)
789{
790 int wbits = 12;
791 int memlevel = 3;
792 kern_return_t kr;
793
794 if (kdp_core_zs.zalloc) return;
795 kdp_core_zsize = round_page(NETBUF + zlib_deflate_memory_size(wbits, memlevel));
796 printf("kdp_core zlib memory 0x%lx\n", kdp_core_zsize);
797 kr = kmem_alloc(kernel_map, &kdp_core_zmem, kdp_core_zsize, VM_KERN_MEMORY_DIAG);
798 assert (KERN_SUCCESS == kr);
799
800 kdp_core_zoffset = 0;
801 kdp_core_zs.zalloc = kdp_core_zalloc;
802 kdp_core_zs.zfree = kdp_core_zfree;
803
804 if (deflateInit2(&kdp_core_zs, LEVEL, Z_DEFLATED,
805 wbits + 16 /*gzip mode*/, memlevel, Z_DEFAULT_STRATEGY))
806 {
807 /* Allocation failed */
808 bzero(&kdp_core_zs, sizeof(kdp_core_zs));
809 kdp_core_zoffset = 0;
810 }
811}
812
813#endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */