+ }
+
+ kfree(mapptr, mapsize);
+ } else {
+ ret = EINVAL;
+ }
+
+ return ret;
+}
+
+static int
+kdbg_write_v1_header(bool write_thread_map, vnode_t vp, vfs_context_t ctx)
+{
+ int ret = 0;
+ RAW_header header;
+ clock_sec_t secs;
+ clock_usec_t usecs;
+ char *pad_buf;
+ uint32_t pad_size;
+ uint32_t extra_thread_count = 0;
+ uint32_t cpumap_size;
+ size_t map_size = 0;
+ uint32_t map_count = 0;
+
+ if (write_thread_map) {
+ assert(kd_ctrl_page.kdebug_flags & KDBG_MAPINIT);
+ if (kd_mapcount > UINT32_MAX) {
+ return ERANGE;
+ }
+ map_count = (uint32_t)kd_mapcount;
+ if (os_mul_overflow(map_count, sizeof(kd_threadmap), &map_size)) {
+ return ERANGE;
+ }
+ if (map_size >= INT_MAX) {
+ return ERANGE;
+ }
+ }
+
+ /*
+ * Without the buffers initialized, we cannot construct a CPU map or a
+ * thread map, and cannot write a header.
+ */
+ if (!(kd_ctrl_page.kdebug_flags & KDBG_BUFINIT)) {
+ return EINVAL;
+ }
+
+ /*
+ * To write a RAW_VERSION1+ file, we must embed a cpumap in the
+ * "padding" used to page align the events following the threadmap. If
+ * the threadmap happens to not require enough padding, we artificially
+ * increase its footprint until it needs enough padding.
+ */
+
+ assert(vp);
+ assert(ctx);
+
+ pad_size = PAGE_16KB - ((sizeof(RAW_header) + map_size) & PAGE_MASK);
+ cpumap_size = sizeof(kd_cpumap_header) + kd_ctrl_page.kdebug_cpus * sizeof(kd_cpumap);
+
+ if (cpumap_size > pad_size) {
+ /* If the cpu map doesn't fit in the current available pad_size,
+ * we increase the pad_size by 16K. We do this so that the event
+ * data is always available on a page aligned boundary for both
+ * 4k and 16k systems. We enforce this alignment for the event
+ * data so that we can take advantage of optimized file/disk writes.
+ */
+ pad_size += PAGE_16KB;
+ }
+
+ /* The way we are silently embedding a cpumap in the "padding" is by artificially
+ * increasing the number of thread entries. However, we'll also need to ensure that
+ * the cpumap is embedded in the last 4K page before when the event data is expected.
+ * This way the tools can read the data starting the next page boundary on both
+ * 4K and 16K systems preserving compatibility with older versions of the tools
+ */
+ if (pad_size > PAGE_4KB) {
+ pad_size -= PAGE_4KB;
+ extra_thread_count = (pad_size / sizeof(kd_threadmap)) + 1;
+ }
+
+ memset(&header, 0, sizeof(header));
+ header.version_no = RAW_VERSION1;
+ header.thread_count = map_count + extra_thread_count;
+
+ clock_get_calendar_microtime(&secs, &usecs);
+ header.TOD_secs = secs;
+ header.TOD_usecs = usecs;
+
+ ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)&header, (int)sizeof(RAW_header), RAW_file_offset,
+ UIO_SYSSPACE, IO_NODELOCKED | IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
+ if (ret) {
+ goto write_error;
+ }
+ RAW_file_offset += sizeof(RAW_header);
+ RAW_file_written += sizeof(RAW_header);
+
+ if (write_thread_map) {
+ assert(map_size < INT_MAX);
+ ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)kd_mapptr, (int)map_size, RAW_file_offset,
+ UIO_SYSSPACE, IO_NODELOCKED | IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
+ if (ret) {
+ goto write_error;
+ }
+
+ RAW_file_offset += map_size;
+ RAW_file_written += map_size;
+ }
+
+ if (extra_thread_count) {
+ pad_size = extra_thread_count * sizeof(kd_threadmap);
+ pad_buf = kheap_alloc(KHEAP_TEMP, pad_size, Z_WAITOK | Z_ZERO);
+ if (!pad_buf) {
+ ret = ENOMEM;
+ goto write_error;
+ }
+
+ assert(pad_size < INT_MAX);
+ ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)pad_buf, (int)pad_size, RAW_file_offset,
+ UIO_SYSSPACE, IO_NODELOCKED | IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
+ kheap_free(KHEAP_TEMP, pad_buf, pad_size);
+ if (ret) {
+ goto write_error;
+ }
+
+ RAW_file_offset += pad_size;
+ RAW_file_written += pad_size;
+ }
+
+ pad_size = PAGE_SIZE - (RAW_file_offset & PAGE_MASK);
+ if (pad_size) {
+ pad_buf = (char *)kheap_alloc(KHEAP_TEMP, pad_size, Z_WAITOK | Z_ZERO);
+ if (!pad_buf) {
+ ret = ENOMEM;
+ goto write_error;
+ }
+
+ /*
+ * embed a cpumap in the padding bytes.
+ * older code will skip this.
+ * newer code will know how to read it.
+ */
+ uint32_t temp = pad_size;
+ if (kdbg_cpumap_init_internal(kd_ctrl_page.kdebug_iops, kd_ctrl_page.kdebug_cpus, (uint8_t**)&pad_buf, &temp) != KERN_SUCCESS) {
+ memset(pad_buf, 0, pad_size);
+ }
+
+ assert(pad_size < INT_MAX);
+ ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)pad_buf, (int)pad_size, RAW_file_offset,
+ UIO_SYSSPACE, IO_NODELOCKED | IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
+ kheap_free(KHEAP_TEMP, pad_buf, pad_size);
+ if (ret) {
+ goto write_error;
+ }
+
+ RAW_file_offset += pad_size;
+ RAW_file_written += pad_size;
+ }
+
+write_error:
+ return ret;
+}
+
+static void
+kdbg_clear_thread_map(void)
+{
+ ktrace_assert_lock_held();
+
+ if (kd_ctrl_page.kdebug_flags & KDBG_MAPINIT) {
+ assert(kd_mapptr != NULL);
+ kfree(kd_mapptr, kd_mapsize);
+ kd_mapptr = NULL;
+ kd_mapsize = 0;
+ kd_mapcount = 0;
+ kd_ctrl_page.kdebug_flags &= ~KDBG_MAPINIT;
+ }
+}
+
+/*
+ * Write out a version 1 header and the thread map, if it is initialized, to a
+ * vnode. Used by KDWRITEMAP and kdbg_dump_trace_to_file.
+ *
+ * Returns write errors from vn_rdwr if a write fails. Returns ENODATA if the
+ * thread map has not been initialized, but the header will still be written.
+ * Returns ENOMEM if padding could not be allocated. Returns 0 otherwise.
+ */
+static int
+kdbg_write_thread_map(vnode_t vp, vfs_context_t ctx)
+{
+ int ret = 0;
+ bool map_initialized;
+
+ ktrace_assert_lock_held();
+ assert(ctx != NULL);
+
+ map_initialized = (kd_ctrl_page.kdebug_flags & KDBG_MAPINIT);