]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/kern_cdata.c
xnu-7195.60.75.tar.gz
[apple/xnu.git] / osfmk / kern / kern_cdata.c
CommitLineData
3e170ce0
A
1/*
2 * Copyright (c) 2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <kern/assert.h>
30#include <mach/mach_types.h>
31#include <mach/boolean.h>
32#include <mach/vm_param.h>
33#include <kern/kern_types.h>
34#include <kern/mach_param.h>
35#include <kern/thread.h>
36#include <kern/task.h>
37#include <kern/kern_cdata.h>
38#include <kern/kalloc.h>
39#include <mach/mach_vm.h>
40
39037602 41static kern_return_t kcdata_get_memory_addr_with_flavor(kcdata_descriptor_t data, uint32_t type, uint32_t size, uint64_t flags, mach_vm_address_t *user_addr);
f427ee49
A
42static size_t kcdata_get_memory_size_for_data(uint32_t size);
43static kern_return_t kcdata_compress_chunk_with_flags(kcdata_descriptor_t data, uint32_t type, const void *input_data, uint32_t input_size, uint64_t flags);
44static kern_return_t kcdata_compress_chunk(kcdata_descriptor_t data, uint32_t type, const void *input_data, uint32_t input_size);
45static kern_return_t kcdata_write_compression_stats(kcdata_descriptor_t data);
46static kern_return_t kcdata_get_compression_stats(kcdata_descriptor_t data, uint64_t *totalout, uint64_t *totalin);
47
48/*
49 * zlib will need to store its metadata and this value is indifferent from the
50 * window bits and other zlib internals
51 */
52#define ZLIB_METADATA_SIZE 1440
53
54/* #define kcdata_debug_printf printf */
55#define kcdata_debug_printf(...) ;
56
57#pragma pack(push, 4)
58
59/* Internal structs for convenience */
60struct _uint64_with_description_data {
61 char desc[KCDATA_DESC_MAXLEN];
62 uint64_t data;
63};
64
65struct _uint32_with_description_data {
66 char desc[KCDATA_DESC_MAXLEN];
67 uint32_t data;
68};
69
70#pragma pack(pop)
39037602 71
3e170ce0 72/*
39037602
A
73 * Estimates how large of a buffer that should be allocated for a buffer that will contain
74 * num_items items of known types with overall length payload_size.
3e170ce0 75 *
39037602 76 * NOTE: This function will not give an accurate estimate for buffers that will
0a7de745 77 * contain unknown types (those with string descriptions).
3e170ce0 78 */
0a7de745
A
79uint32_t
80kcdata_estimate_required_buffer_size(uint32_t num_items, uint32_t payload_size)
39037602
A
81{
82 /*
83 * In the worst case each item will need (KCDATA_ALIGNMENT_SIZE - 1) padding
84 */
f427ee49
A
85 uint32_t max_padding_bytes = 0;
86 uint32_t max_padding_with_item_description_bytes = 0;
87 uint32_t estimated_required_buffer_size = 0;
88 const uint32_t begin_and_end_marker_bytes = 2 * sizeof(struct kcdata_item);
89
90 if (os_mul_overflow(num_items, KCDATA_ALIGNMENT_SIZE - 1, &max_padding_bytes)) {
91 panic("%s: Overflow in required buffer size estimate", __func__);
92 }
93
94 if (os_mul_and_add_overflow(num_items, sizeof(struct kcdata_item), max_padding_bytes, &max_padding_with_item_description_bytes)) {
95 panic("%s: Overflow in required buffer size estimate", __func__);
96 }
3e170ce0 97
f427ee49
A
98 if (os_add3_overflow(max_padding_with_item_description_bytes, begin_and_end_marker_bytes, payload_size, &estimated_required_buffer_size)) {
99 panic("%s: Overflow in required buffer size estimate", __func__);
100 }
101
102 return estimated_required_buffer_size;
39037602 103}
3e170ce0 104
0a7de745
A
105kcdata_descriptor_t
106kcdata_memory_alloc_init(mach_vm_address_t buffer_addr_p, unsigned data_type, unsigned size, unsigned flags)
3e170ce0
A
107{
108 kcdata_descriptor_t data = NULL;
109 mach_vm_address_t user_addr = 0;
f427ee49 110 uint16_t clamped_flags = (uint16_t) flags;
3e170ce0 111
f427ee49 112 data = kalloc_flags(sizeof(struct kcdata_descriptor), Z_WAITOK | Z_ZERO);
3e170ce0
A
113 if (data == NULL) {
114 return NULL;
115 }
3e170ce0
A
116 data->kcd_addr_begin = buffer_addr_p;
117 data->kcd_addr_end = buffer_addr_p;
f427ee49 118 data->kcd_flags = (clamped_flags & KCFLAG_USE_COPYOUT) ? clamped_flags : clamped_flags | KCFLAG_USE_MEMCOPY;
3e170ce0
A
119 data->kcd_length = size;
120
121 /* Initialize the BEGIN header */
0a7de745 122 if (KERN_SUCCESS != kcdata_get_memory_addr(data, data_type, 0, &user_addr)) {
3e170ce0
A
123 kcdata_memory_destroy(data);
124 return NULL;
125 }
126
127 return data;
128}
129
0a7de745
A
130kern_return_t
131kcdata_memory_static_init(kcdata_descriptor_t data, mach_vm_address_t buffer_addr_p, unsigned data_type, unsigned size, unsigned flags)
3e170ce0
A
132{
133 mach_vm_address_t user_addr = 0;
f427ee49 134 uint16_t clamped_flags = (uint16_t) flags;
3e170ce0
A
135
136 if (data == NULL) {
137 return KERN_INVALID_ARGUMENT;
138 }
139 bzero(data, sizeof(struct kcdata_descriptor));
140 data->kcd_addr_begin = buffer_addr_p;
141 data->kcd_addr_end = buffer_addr_p;
f427ee49 142 data->kcd_flags = (clamped_flags & KCFLAG_USE_COPYOUT) ? clamped_flags : clamped_flags | KCFLAG_USE_MEMCOPY;
3e170ce0
A
143 data->kcd_length = size;
144
145 /* Initialize the BEGIN header */
146 return kcdata_get_memory_addr(data, data_type, 0, &user_addr);
147}
148
0a7de745
A
149void *
150kcdata_memory_get_begin_addr(kcdata_descriptor_t data)
5ba3f43e
A
151{
152 if (data == NULL) {
153 return NULL;
154 }
155
156 return (void *)data->kcd_addr_begin;
157}
158
0a7de745
A
159uint64_t
160kcdata_memory_get_used_bytes(kcdata_descriptor_t kcd)
3e170ce0
A
161{
162 assert(kcd != NULL);
163 return ((uint64_t)kcd->kcd_addr_end - (uint64_t)kcd->kcd_addr_begin) + sizeof(struct kcdata_item);
164}
165
f427ee49
A
166uint64_t
167kcdata_memory_get_uncompressed_bytes(kcdata_descriptor_t kcd)
168{
169 kern_return_t kr;
170
171 assert(kcd != NULL);
172 if (kcd->kcd_flags & KCFLAG_USE_COMPRESSION) {
173 uint64_t totalout, totalin;
174
175 kr = kcdata_get_compression_stats(kcd, &totalout, &totalin);
176 if (kr == KERN_SUCCESS) {
177 return totalin;
178 } else {
179 return 0;
180 }
181 } else {
182 /* If compression wasn't used, get the number of bytes used */
183 return kcdata_memory_get_used_bytes(kcd);
184 }
185}
186
3e170ce0
A
187/*
188 * Free up the memory associated with kcdata
189 */
0a7de745
A
190kern_return_t
191kcdata_memory_destroy(kcdata_descriptor_t data)
3e170ce0
A
192{
193 if (!data) {
194 return KERN_INVALID_ARGUMENT;
195 }
196
197 /*
198 * data->kcd_addr_begin points to memory in not tracked by
199 * kcdata lib. So not clearing that here.
200 */
201 kfree(data, sizeof(struct kcdata_descriptor));
202 return KERN_SUCCESS;
203}
204
f427ee49
A
205/* Used by zlib to allocate space in its metadata section */
206static void *
207kcdata_compress_zalloc(void *opaque, u_int items, u_int size)
208{
209 void *result;
210 struct kcdata_compress_descriptor *cd = opaque;
211 int alloc_size = ~31L & (31 + (items * size));
212
213 result = (void *)(cd->kcd_cd_base + cd->kcd_cd_offset);
214 if ((uintptr_t) result + alloc_size > (uintptr_t) cd->kcd_cd_base + cd->kcd_cd_maxoffset) {
215 result = Z_NULL;
216 } else {
217 cd->kcd_cd_offset += alloc_size;
218 }
219
220 kcdata_debug_printf("%s: %d * %d = %d => %p\n", __func__, items, size, items * size, result);
221
222 return result;
223}
224
225/* Used by zlib to free previously allocated space in its metadata section */
226static void
227kcdata_compress_zfree(void *opaque, void *ptr)
228{
229 (void) opaque;
230 (void) ptr;
231
232 kcdata_debug_printf("%s: ptr %p\n", __func__, ptr);
233
234 /*
235 * Since the buffers we are using are temporary, we don't worry about
236 * freeing memory for now. Besides, testing has shown that zlib only calls
237 * this at the end, near deflateEnd() or a Z_FINISH deflate() call.
238 */
239}
240
241/* Used to initialize the selected compression algorithm's internal state (if any) */
242static kern_return_t
243kcdata_init_compress_state(kcdata_descriptor_t data, void (*memcpy_f)(void *, const void *, size_t), uint64_t type, mach_vm_address_t totalout_addr, mach_vm_address_t totalin_addr)
244{
245 kern_return_t ret = KERN_SUCCESS;
246 size_t size;
247 int wbits = 12, memlevel = 3;
248 struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
249
250 cd->kcd_cd_memcpy_f = memcpy_f;
251 cd->kcd_cd_compression_type = type;
252 cd->kcd_cd_totalout_addr = totalout_addr;
253 cd->kcd_cd_totalin_addr = totalin_addr;
254
255 switch (type) {
256 case KCDCT_ZLIB:
257 /* allocate space for the metadata used by zlib */
258 size = round_page(ZLIB_METADATA_SIZE + zlib_deflate_memory_size(wbits, memlevel));
259 kcdata_debug_printf("%s: size = %zu kcd_length: %d\n", __func__, size, data->kcd_length);
260 kcdata_debug_printf("%s: kcd buffer [%p - %p]\n", __func__, (void *) data->kcd_addr_begin, (void *) data->kcd_addr_begin + data->kcd_length);
261
262 if (4 * size > data->kcd_length) {
263 return KERN_INSUFFICIENT_BUFFER_SIZE;
264 }
265
266 cd->kcd_cd_zs.avail_in = 0;
267 cd->kcd_cd_zs.next_in = NULL;
268 cd->kcd_cd_zs.avail_out = 0;
269 cd->kcd_cd_zs.next_out = NULL;
270 cd->kcd_cd_zs.opaque = cd;
271 cd->kcd_cd_zs.zalloc = kcdata_compress_zalloc;
272 cd->kcd_cd_zs.zfree = kcdata_compress_zfree;
273 cd->kcd_cd_base = (void *) data->kcd_addr_begin + data->kcd_length - size;
274 data->kcd_length -= size;
275 cd->kcd_cd_offset = 0;
276 cd->kcd_cd_maxoffset = size;
277 cd->kcd_cd_flags = 0;
278
279 kcdata_debug_printf("%s: buffer [%p - %p]\n", __func__, cd->kcd_cd_base, cd->kcd_cd_base + size);
280
281 if (deflateInit2(&cd->kcd_cd_zs, Z_BEST_SPEED, Z_DEFLATED, wbits, memlevel, Z_DEFAULT_STRATEGY) != Z_OK) {
282 kcdata_debug_printf("EMERGENCY: deflateInit2 failed!\n");
283 ret = KERN_INVALID_ARGUMENT;
284 }
285 break;
286 default:
287 panic("kcdata_init_compress_state: invalid compression type: %d", (int) type);
288 }
289
290 return ret;
291}
292
293
294/*
295 * Turn on the compression logic for kcdata
296 */
297kern_return_t
298kcdata_init_compress(kcdata_descriptor_t data, int hdr_tag, void (*memcpy_f)(void *, const void *, size_t), uint64_t type)
299{
300 kern_return_t kr;
301 mach_vm_address_t user_addr, totalout_addr, totalin_addr;
302 struct _uint64_with_description_data save_data;
303 const uint64_t size_req = sizeof(save_data);
304
305 assert(data && (data->kcd_flags & KCFLAG_USE_COMPRESSION) == 0);
306
307 /* reset the compression descriptor */
308 bzero(&data->kcd_comp_d, sizeof(struct kcdata_compress_descriptor));
309
310 /* add the header information */
311 kcdata_add_uint64_with_description(data, type, "kcd_c_type");
312
313 /* reserve space to write total out */
314 bzero(&save_data, size_req);
315 strlcpy(&(save_data.desc[0]), "kcd_c_totalout", sizeof(save_data.desc));
316 kr = kcdata_get_memory_addr(data, KCDATA_TYPE_UINT64_DESC, size_req, &totalout_addr);
317 if (kr != KERN_SUCCESS) {
318 return kr;
319 }
320 memcpy((void *)totalout_addr, &save_data, size_req);
321
322 /* space for total in */
323 bzero(&save_data, size_req);
324 strlcpy(&(save_data.desc[0]), "kcd_c_totalin", sizeof(save_data.desc));
325 kr = kcdata_get_memory_addr(data, KCDATA_TYPE_UINT64_DESC, size_req, &totalin_addr);
326 if (kr != KERN_SUCCESS) {
327 return kr;
328 }
329 memcpy((void *)totalin_addr, &save_data, size_req);
330
331 /* add the inner buffer */
332 kcdata_get_memory_addr(data, hdr_tag, 0, &user_addr);
333
334 /* save the flag */
335 data->kcd_flags |= KCFLAG_USE_COMPRESSION;
336
337 /* initialize algorithm specific state */
338 kr = kcdata_init_compress_state(data, memcpy_f, type, totalout_addr + offsetof(struct _uint64_with_description_data, data), totalin_addr + offsetof(struct _uint64_with_description_data, data));
339 if (kr != KERN_SUCCESS) {
340 kcdata_debug_printf("%s: failed to initialize compression state!\n", __func__);
341 return kr;
342 }
343
344 return KERN_SUCCESS;
345}
346
347static inline
348int
349kcdata_zlib_translate_kcd_cf_flag(enum kcdata_compression_flush flush)
350{
351 switch (flush) {
352 case KCDCF_NO_FLUSH: return Z_NO_FLUSH;
353 case KCDCF_SYNC_FLUSH: return Z_SYNC_FLUSH;
354 case KCDCF_FINISH: return Z_FINISH;
355 default: panic("invalid kcdata_zlib_translate_kcd_cf_flag flag");
356 }
357}
358
359static inline
360int
361kcdata_zlib_translate_kcd_cf_expected_ret(enum kcdata_compression_flush flush)
362{
363 switch (flush) {
364 case KCDCF_NO_FLUSH: /* fall through */
365 case KCDCF_SYNC_FLUSH: return Z_OK;
366 case KCDCF_FINISH: return Z_STREAM_END;
367 default: panic("invalid kcdata_zlib_translate_kcd_cf_expected_ret flag");
368 }
369}
370
371/* Called by kcdata_do_compress() when the configured compression algorithm is zlib */
372static kern_return_t
373kcdata_do_compress_zlib(kcdata_descriptor_t data, void *inbuffer,
374 size_t insize, void *outbuffer, size_t outsize, size_t *wrote,
375 enum kcdata_compression_flush flush)
376{
377 struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
378 z_stream *zs = &cd->kcd_cd_zs;
379 int expected_ret, ret;
380
381 zs->next_out = outbuffer;
382 zs->avail_out = (unsigned int) outsize;
383 zs->next_in = inbuffer;
384 zs->avail_in = (unsigned int) insize;
385 ret = deflate(zs, kcdata_zlib_translate_kcd_cf_flag(flush));
386 if (zs->avail_in != 0 || zs->avail_out <= 0) {
387 return KERN_INSUFFICIENT_BUFFER_SIZE;
388 }
389
390 expected_ret = kcdata_zlib_translate_kcd_cf_expected_ret(flush);
391 if (ret != expected_ret) {
392 /*
393 * Should only fail with catastrophic, unrecoverable cases (i.e.,
394 * corrupted z_stream, or incorrect configuration)
395 */
396 panic("zlib kcdata compression ret = %d\n", ret);
397 }
398
399 kcdata_debug_printf("%s: %p (%zu) <- %p (%zu); flush: %d; ret = %ld\n",
400 __func__, outbuffer, outsize, inbuffer, insize, flush, outsize - zs->avail_out);
401 if (wrote) {
402 *wrote = outsize - zs->avail_out;
403 }
404 return KERN_SUCCESS;
405}
406
407/*
408 * Compress the buffer at @inbuffer (of size @insize) into the kcdata buffer
409 * @outbuffer (of size @outsize). Flush based on the @flush parameter.
410 *
411 * Returns KERN_SUCCESS on success, or KERN_INSUFFICIENT_BUFFER_SIZE if
412 * @outsize isn't sufficient. Also, writes the number of bytes written in the
413 * @outbuffer to @wrote.
414 */
415static kern_return_t
416kcdata_do_compress(kcdata_descriptor_t data, void *inbuffer, size_t insize,
417 void *outbuffer, size_t outsize, size_t *wrote, enum kcdata_compression_flush flush)
418{
419 struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
420
421 assert(data->kcd_flags & KCFLAG_USE_COMPRESSION);
422
423 kcdata_debug_printf("%s: %p (%zu) <- %p (%zu); flush: %d\n",
424 __func__, outbuffer, outsize, inbuffer, insize, flush);
425
426 /* don't compress if we are in a window */
427 if (cd->kcd_cd_flags & KCD_CD_FLAG_IN_MARK || data->kcd_comp_d.kcd_cd_compression_type == KCDCT_NONE) {
428 assert(cd->kcd_cd_memcpy_f);
429 if (outsize >= insize) {
430 cd->kcd_cd_memcpy_f(outbuffer, inbuffer, insize);
431 if (wrote) {
432 *wrote = insize;
433 }
434 return KERN_SUCCESS;
435 } else {
436 return KERN_INSUFFICIENT_BUFFER_SIZE;
437 }
438 }
439
440 switch (data->kcd_comp_d.kcd_cd_compression_type) {
441 case KCDCT_ZLIB:
442 return kcdata_do_compress_zlib(data, inbuffer, insize, outbuffer, outsize, wrote, flush);
443 default:
444 panic("invalid compression type 0x%llx in kcdata_do_compress", data->kcd_comp_d.kcd_cd_compression_type);
445 }
446}
447
448static size_t
449kcdata_compression_bound_zlib(kcdata_descriptor_t data, size_t size)
450{
451 struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
452 z_stream *zs = &cd->kcd_cd_zs;
453
454 return (size_t) deflateBound(zs, (unsigned long) size);
455}
456
457
458/*
459 * returns the worst-case, maximum length of the compressed data when
460 * compressing a buffer of size @size using the configured algorithm.
461 */
462static size_t
463kcdata_compression_bound(kcdata_descriptor_t data, size_t size)
464{
465 switch (data->kcd_comp_d.kcd_cd_compression_type) {
466 case KCDCT_ZLIB:
467 return kcdata_compression_bound_zlib(data, size);
468 case KCDCT_NONE:
469 return size;
470 default:
471 panic("%s: unknown compression method", __func__);
472 }
473}
474
475/*
476 * kcdata_compress_chunk_with_flags:
477 * Compress buffer found at @input_data (length @input_size) to the kcdata
478 * buffer described by @data. This method will construct the kcdata_item_t
479 * required by parsers using the type information @type and flags @flags.
480 *
481 * Returns KERN_SUCCESS when successful. Currently, asserts on failure.
482 */
483kern_return_t
484kcdata_compress_chunk_with_flags(kcdata_descriptor_t data, uint32_t type, const void *input_data, uint32_t input_size, uint64_t kcdata_flags)
485{
486 assert(data);
487 assert((data->kcd_flags & KCFLAG_USE_COMPRESSION));
488 assert(input_data);
489 struct kcdata_item info;
490 char padding_data[16] = {0};
491 struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
492 size_t wrote = 0;
493 kern_return_t kr;
494
495 kcdata_debug_printf("%s: type: %d input_data: %p (%d) kcdata_flags: 0x%llx\n",
496 __func__, type, input_data, input_size, kcdata_flags);
497
498 /*
499 * first, get memory space. The uncompressed size must fit in the remained
500 * of the kcdata buffer, in case the compression algorithm doesn't actually
501 * compress the data at all.
502 */
503 size_t total_uncompressed_size = kcdata_compression_bound(data, (size_t) kcdata_get_memory_size_for_data(input_size));
504 if (total_uncompressed_size > data->kcd_length ||
505 data->kcd_length - total_uncompressed_size < data->kcd_addr_end - data->kcd_addr_begin) {
506 kcdata_debug_printf("%s: insufficient buffer size: kcd_length => %d e-b=> %lld our size: %zu\n",
507 __func__, data->kcd_length, data->kcd_addr_end - data->kcd_addr_begin, total_uncompressed_size);
508 return KERN_INSUFFICIENT_BUFFER_SIZE;
509 }
510 uint32_t padding = kcdata_calc_padding(input_size);
511 assert(padding < sizeof(padding_data));
512
513 void *space_start = (void *) data->kcd_addr_end;
514 void *space_ptr = space_start;
515
516 /* create the output stream */
517 size_t total_uncompressed_space_remaining = total_uncompressed_size;
518
519 /* create the info data */
520 bzero(&info, sizeof(info));
521 info.type = type;
522 info.size = input_size + padding;
523 info.flags = kcdata_flags;
524
525 /*
526 * The next possibly three compresses are needed separately because of the
527 * scatter-gather nature of this operation. The kcdata item header (info)
528 * and padding are on the stack, while the actual data is somewhere else.
529 * */
530
531 /* create the input stream for info & compress */
532 enum kcdata_compression_flush flush = (padding || input_size) ? KCDCF_NO_FLUSH :
533 cd->kcd_cd_flags & KCD_CD_FLAG_FINALIZE ? KCDCF_FINISH :
534 KCDCF_SYNC_FLUSH;
535 kr = kcdata_do_compress(data, &info, sizeof(info), space_ptr, total_uncompressed_space_remaining, &wrote, flush);
536 if (kr != KERN_SUCCESS) {
537 return kr;
538 }
539 kcdata_debug_printf("%s: first wrote = %zu\n", __func__, wrote);
540 space_ptr += wrote;
541 total_uncompressed_space_remaining -= wrote;
542
543 /* If there is input provided, compress that here */
544 if (input_size) {
545 flush = padding ? KCDCF_NO_FLUSH :
546 cd->kcd_cd_flags & KCD_CD_FLAG_FINALIZE ? KCDCF_FINISH :
547 KCDCF_SYNC_FLUSH;
548 kr = kcdata_do_compress(data, (void *) (uintptr_t) input_data, input_size, space_ptr, total_uncompressed_space_remaining, &wrote, flush);
549 if (kr != KERN_SUCCESS) {
550 return kr;
551 }
552 kcdata_debug_printf("%s: 2nd wrote = %zu\n", __func__, wrote);
553 space_ptr += wrote;
554 total_uncompressed_space_remaining -= wrote;
555 }
556
557 /* If the item and its data require padding to maintain alignment,
558 * "compress" that into the output buffer. */
559 if (padding) {
560 /* write the padding */
561 kr = kcdata_do_compress(data, padding_data, padding, space_ptr, total_uncompressed_space_remaining, &wrote,
562 cd->kcd_cd_flags & KCD_CD_FLAG_FINALIZE ? KCDCF_FINISH : KCDCF_SYNC_FLUSH);
563 if (kr != KERN_SUCCESS) {
564 return kr;
565 }
566 kcdata_debug_printf("%s: 3rd wrote = %zu\n", __func__, wrote);
567 if (wrote == 0) {
568 return KERN_FAILURE;
569 }
570 space_ptr += wrote;
571 total_uncompressed_space_remaining -= wrote;
572 }
573
574 assert((size_t)(space_ptr - space_start) <= total_uncompressed_size);
575
576 /* move the end marker forward */
577 data->kcd_addr_end = (mach_vm_address_t) (space_start + (total_uncompressed_size - total_uncompressed_space_remaining));
578
579 return KERN_SUCCESS;
580}
581
582/*
583 * kcdata_compress_chunk:
584 * Like kcdata_compress_chunk_with_flags(), but uses the default set of kcdata flags,
585 * i.e. padding and also saves the amount of padding bytes.
586 *
587 * Returns are the same as in kcdata_compress_chunk_with_flags()
588 */
589kern_return_t
590kcdata_compress_chunk(kcdata_descriptor_t data, uint32_t type, const void *input_data, uint32_t input_size)
591{
592 /* these flags are for kcdata - store that the struct is padded and store the amount of padding bytes */
593 uint64_t flags = (KCDATA_FLAGS_STRUCT_PADDING_MASK & kcdata_calc_padding(input_size)) | KCDATA_FLAGS_STRUCT_HAS_PADDING;
594 return kcdata_compress_chunk_with_flags(data, type, input_data, input_size, flags);
595}
596
597kern_return_t
598kcdata_push_data(kcdata_descriptor_t data, uint32_t type, uint32_t size, const void *input_data)
599{
600 if (data->kcd_flags & KCFLAG_USE_COMPRESSION) {
601 return kcdata_compress_chunk(data, type, input_data, size);
602 } else {
603 kern_return_t ret;
604 mach_vm_address_t uaddr = 0;
605 ret = kcdata_get_memory_addr(data, type, size, &uaddr);
606 if (ret != KERN_SUCCESS) {
607 return ret;
608 }
609
610 kcdata_memcpy(data, uaddr, input_data, size);
611 return KERN_SUCCESS;
612 }
613}
614
615kern_return_t
616kcdata_push_array(kcdata_descriptor_t data, uint32_t type_of_element, uint32_t size_of_element, uint32_t count, const void *input_data)
617{
618 uint64_t flags = type_of_element;
619 flags = (flags << 32) | count;
620 uint32_t total_size = count * size_of_element;
621 uint32_t pad = kcdata_calc_padding(total_size);
622
623 if (data->kcd_flags & KCFLAG_USE_COMPRESSION) {
624 return kcdata_compress_chunk_with_flags(data, KCDATA_TYPE_ARRAY_PAD0 | pad, input_data, total_size, flags);
625 } else {
626 kern_return_t ret;
627 mach_vm_address_t uaddr = 0;
628 ret = kcdata_get_memory_addr_with_flavor(data, KCDATA_TYPE_ARRAY_PAD0 | pad, total_size, flags, &uaddr);
629 if (ret != KERN_SUCCESS) {
630 return ret;
631 }
632
633 kcdata_memcpy(data, uaddr, input_data, total_size);
634 return KERN_SUCCESS;
635 }
636}
637
638/* A few words on how window compression works:
639 *
640 * This is how the buffer looks when the window is opened:
641 *
642 * X---------------------------------------------------------------------X
643 * | | |
644 * | Filled with stackshot data | Zero bytes |
645 * | | |
646 * X---------------------------------------------------------------------X
647 * ^
648 * \ - kcd_addr_end
649 *
650 * Opening a window will save the current kcd_addr_end to kcd_cd_mark_begin.
651 *
652 * Any kcdata_* operation will then push data to the buffer like normal. (If
653 * you call any compressing functions they will pass-through, i.e. no
654 * compression will be done) Once the window is closed, the following takes
655 * place:
656 *
657 * X---------------------------------------------------------------------X
658 * | | | | |
659 * | Existing data | New data | Scratch buffer | |
660 * | | | | |
661 * X---------------------------------------------------------------------X
662 * ^ ^ ^
663 * | | |
664 * \ -kcd_cd_mark_begin | |
665 * | |
666 * \ - kcd_addr_end |
667 * |
668 * kcd_addr_end + (kcd_addr_end - kcd_cd_mark_begin) - /
669 *
670 * (1) The data between kcd_cd_mark_begin and kcd_addr_end is fed to the
671 * compression algorithm to compress to the scratch buffer.
672 * (2) The scratch buffer's contents are copied into the area denoted "New
673 * data" above. Effectively overwriting the uncompressed data with the
674 * compressed one.
675 * (3) kcd_addr_end is then rewound to kcd_cd_mark_begin + sizeof_compressed_data
676 */
677
678/* Record the state, and restart compression from this later */
679void
680kcdata_compression_window_open(kcdata_descriptor_t data)
681{
682 struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
683 assert((cd->kcd_cd_flags & KCD_CD_FLAG_IN_MARK) == 0);
684
685 if (data->kcd_flags & KCFLAG_USE_COMPRESSION) {
686 cd->kcd_cd_flags |= KCD_CD_FLAG_IN_MARK;
687 cd->kcd_cd_mark_begin = data->kcd_addr_end;
688 }
689}
690
691/* Compress the region between the mark and the current end */
692kern_return_t
693kcdata_compression_window_close(kcdata_descriptor_t data)
694{
695 struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
696 uint64_t total_size, max_size;
697 void *space_start, *space_ptr;
698 size_t total_uncompressed_space_remaining, wrote = 0;
699 kern_return_t kr;
700
701 if ((data->kcd_flags & KCFLAG_USE_COMPRESSION) == 0) {
702 return KERN_SUCCESS;
703 }
704
705 assert(cd->kcd_cd_flags & KCD_CD_FLAG_IN_MARK);
706
707 if (data->kcd_addr_end == (mach_vm_address_t) cd->kcd_cd_mark_begin) {
708 /* clear the window marker and return, this is a no-op */
709 cd->kcd_cd_flags &= ~KCD_CD_FLAG_IN_MARK;
710 return KERN_SUCCESS;
711 }
712
713 assert(cd->kcd_cd_mark_begin < data->kcd_addr_end);
714 total_size = data->kcd_addr_end - (uint64_t) cd->kcd_cd_mark_begin;
715 max_size = (uint64_t) kcdata_compression_bound(data, total_size);
716 kcdata_debug_printf("%s: total_size = %lld\n", __func__, total_size);
717
718 /*
719 * first, get memory space. The uncompressed size must fit in the remained
720 * of the kcdata buffer, in case the compression algorithm doesn't actually
721 * compress the data at all.
722 */
723 if (max_size > data->kcd_length ||
724 data->kcd_length - max_size < data->kcd_addr_end - data->kcd_addr_begin) {
725 kcdata_debug_printf("%s: insufficient buffer size: kcd_length => %d e-b=> %lld our size: %lld\n",
726 __func__, data->kcd_length, data->kcd_addr_end - data->kcd_addr_begin, max_size);
727 return KERN_INSUFFICIENT_BUFFER_SIZE;
728 }
729
730 /* clear the window marker */
731 cd->kcd_cd_flags &= ~KCD_CD_FLAG_IN_MARK;
732
733 space_start = (void *) data->kcd_addr_end;
734 space_ptr = space_start;
735 total_uncompressed_space_remaining = (unsigned int) max_size;
736 kr = kcdata_do_compress(data, (void *) cd->kcd_cd_mark_begin, total_size, space_ptr,
737 total_uncompressed_space_remaining, &wrote, KCDCF_SYNC_FLUSH);
738 if (kr != KERN_SUCCESS) {
739 return kr;
740 }
741 kcdata_debug_printf("%s: first wrote = %zu\n", __func__, wrote);
742 if (wrote == 0) {
743 return KERN_FAILURE;
744 }
745 space_ptr += wrote;
746 total_uncompressed_space_remaining -= wrote;
747
748 assert((size_t)(space_ptr - space_start) <= max_size);
749
750 /* copy to the original location */
751 kcdata_memcpy(data, cd->kcd_cd_mark_begin, space_start, (uint32_t) (max_size - total_uncompressed_space_remaining));
752
753 /* rewind the end marker */
754 data->kcd_addr_end = cd->kcd_cd_mark_begin + (max_size - total_uncompressed_space_remaining);
755
756 return KERN_SUCCESS;
757}
758
759static kern_return_t
760kcdata_get_compression_stats_zlib(kcdata_descriptor_t data, uint64_t *totalout, uint64_t *totalin)
761{
762 struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
763 z_stream *zs = &cd->kcd_cd_zs;
3e170ce0 764
f427ee49
A
765 assert((cd->kcd_cd_flags & KCD_CD_FLAG_IN_MARK) == 0);
766
767 *totalout = (uint64_t) zs->total_out;
768 *totalin = (uint64_t) zs->total_in;
769
770 return KERN_SUCCESS;
771}
772
773static kern_return_t
774kcdata_get_compression_stats(kcdata_descriptor_t data, uint64_t *totalout, uint64_t *totalin)
775{
776 kern_return_t kr;
777
778 switch (data->kcd_comp_d.kcd_cd_compression_type) {
779 case KCDCT_ZLIB:
780 kr = kcdata_get_compression_stats_zlib(data, totalout, totalin);
781 break;
782 case KCDCT_NONE:
783 kr = KERN_SUCCESS;
784 break;
785 default:
786 panic("invalid compression flag 0x%llx in kcdata_write_compression_stats", (data->kcd_comp_d.kcd_cd_compression_type));
787 }
788
789 return kr;
790}
791
792kern_return_t
793kcdata_write_compression_stats(kcdata_descriptor_t data)
794{
795 kern_return_t kr;
796 uint64_t totalout, totalin;
797
798 kr = kcdata_get_compression_stats(data, &totalout, &totalin);
799 if (kr != KERN_SUCCESS) {
800 return kr;
801 }
802
803 *(uint64_t *)data->kcd_comp_d.kcd_cd_totalout_addr = totalout;
804 *(uint64_t *)data->kcd_comp_d.kcd_cd_totalin_addr = totalin;
805
806 return kr;
807}
808
809static kern_return_t
810kcdata_finish_compression_zlib(kcdata_descriptor_t data)
811{
812 struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
813 z_stream *zs = &cd->kcd_cd_zs;
814
815 /*
816 * macOS on x86 w/ coprocessor ver. 2 and later context: Stackshot compression leaves artifacts
817 * in the panic buffer which interferes with CRC checks. The CRC is calculated here over the full
818 * buffer but only the portion with valid panic data is sent to iBoot via the SMC. When iBoot
819 * calculates the CRC to compare with the value in the header it uses a zero-filled buffer.
820 * The stackshot compression leaves non-zero bytes behind so those must be cleared prior to the CRC calculation.
821 *
822 * All other contexts: The stackshot compression artifacts are present in its panic buffer but the CRC check
823 * is done on the same buffer for the before and after calculation so there's nothing functionally
824 * broken. The same buffer cleanup is done here for completeness' sake.
825 * From rdar://problem/64381661
826 */
827
828 void* stackshot_end = (char*)data->kcd_addr_begin + kcdata_memory_get_used_bytes(data);
829 uint32_t zero_fill_size = data->kcd_length - kcdata_memory_get_used_bytes(data);
830 bzero(stackshot_end, zero_fill_size);
831
832 if (deflateEnd(zs) == Z_OK) {
833 return KERN_SUCCESS;
834 } else {
835 return KERN_FAILURE;
836 }
837}
838
839kern_return_t
840kcdata_finish_compression(kcdata_descriptor_t data)
841{
842 kcdata_write_compression_stats(data);
843
844 switch (data->kcd_comp_d.kcd_cd_compression_type) {
845 case KCDCT_ZLIB:
846 data->kcd_length += data->kcd_comp_d.kcd_cd_maxoffset;
847 return kcdata_finish_compression_zlib(data);
848 case KCDCT_NONE:
849 return KERN_SUCCESS;
850 default:
851 panic("invalid compression type 0x%llxin kcdata_finish_compression", data->kcd_comp_d.kcd_cd_compression_type);
852 }
853}
854
855void
856kcd_finalize_compression(kcdata_descriptor_t data)
857{
858 if (data->kcd_flags & KCFLAG_USE_COMPRESSION) {
859 data->kcd_comp_d.kcd_cd_flags |= KCD_CD_FLAG_FINALIZE;
860 }
861}
3e170ce0
A
862
863/*
864 * Routine: kcdata_get_memory_addr
865 * Desc: get memory address in the userspace memory for corpse info
39037602
A
866 * NOTE: The caller is responsible for zeroing the resulting memory or
867 * using other means to mark memory if it has failed populating the
3e170ce0
A
868 * data in middle of operation.
869 * params: data - pointer describing the crash info allocation
870 * type - type of data to be put. See corpse.h for defined types
871 * size - size requested. The header describes this size
872 * returns: mach_vm_address_t address in user memory for copyout().
873 */
39037602
A
874kern_return_t
875kcdata_get_memory_addr(kcdata_descriptor_t data, uint32_t type, uint32_t size, mach_vm_address_t * user_addr)
3e170ce0 876{
39037602
A
877 /* record number of padding bytes as lower 4 bits of flags */
878 uint64_t flags = (KCDATA_FLAGS_STRUCT_PADDING_MASK & kcdata_calc_padding(size)) | KCDATA_FLAGS_STRUCT_HAS_PADDING;
879 return kcdata_get_memory_addr_with_flavor(data, type, size, flags, user_addr);
880}
881
882/*
883 * Routine: kcdata_add_buffer_end
884 *
885 * Desc: Write buffer end marker. This does not advance the end pointer in the
886 * kcdata_descriptor_t, so it may be used conservatively before additional data
887 * is added, as long as it is at least called after the last time data is added.
888 *
889 * params: data - pointer describing the crash info allocation
890 */
891
892kern_return_t
893kcdata_write_buffer_end(kcdata_descriptor_t data)
894{
895 struct kcdata_item info;
896 bzero(&info, sizeof(info));
897 info.type = KCDATA_TYPE_BUFFER_END;
898 info.size = 0;
899 return kcdata_memcpy(data, data->kcd_addr_end, &info, sizeof(info));
3e170ce0
A
900}
901
902/*
903 * Routine: kcdata_get_memory_addr_with_flavor
904 * Desc: internal function with flags field. See documentation for kcdata_get_memory_addr for details
905 */
906
0a7de745
A
907static kern_return_t
908kcdata_get_memory_addr_with_flavor(
909 kcdata_descriptor_t data,
910 uint32_t type,
911 uint32_t size,
912 uint64_t flags,
913 mach_vm_address_t *user_addr)
3e170ce0 914{
d9a64523 915 kern_return_t kr;
3e170ce0 916 struct kcdata_item info;
3e170ce0 917
a39ff7e2 918 uint32_t orig_size = size;
3e170ce0 919 /* make sure 16 byte aligned */
d9a64523
A
920 uint32_t padding = kcdata_calc_padding(size);
921 size += padding;
a39ff7e2
A
922 uint32_t total_size = size + sizeof(info);
923
924 if (user_addr == NULL || data == NULL || total_size + sizeof(info) < orig_size) {
925 return KERN_INVALID_ARGUMENT;
926 }
3e170ce0 927
f427ee49
A
928 assert(((data->kcd_flags & KCFLAG_USE_COMPRESSION) && (data->kcd_comp_d.kcd_cd_flags & KCD_CD_FLAG_IN_MARK))
929 || ((data->kcd_flags & KCFLAG_USE_COMPRESSION) == 0));
930
3e170ce0 931 bzero(&info, sizeof(info));
39037602
A
932 info.type = type;
933 info.size = size;
934 info.flags = flags;
3e170ce0
A
935
936 /* check available memory, including trailer size for KCDATA_TYPE_BUFFER_END */
a39ff7e2 937 if (total_size + sizeof(info) > data->kcd_length ||
0a7de745 938 data->kcd_length - (total_size + sizeof(info)) < data->kcd_addr_end - data->kcd_addr_begin) {
f427ee49 939 return KERN_INSUFFICIENT_BUFFER_SIZE;
3e170ce0
A
940 }
941
d9a64523 942 kr = kcdata_memcpy(data, data->kcd_addr_end, &info, sizeof(info));
0a7de745 943 if (kr) {
d9a64523 944 return kr;
0a7de745 945 }
3e170ce0
A
946
947 data->kcd_addr_end += sizeof(info);
d9a64523
A
948
949 if (padding) {
950 kr = kcdata_bzero(data, data->kcd_addr_end + size - padding, padding);
0a7de745 951 if (kr) {
d9a64523 952 return kr;
0a7de745 953 }
d9a64523
A
954 }
955
3e170ce0
A
956 *user_addr = data->kcd_addr_end;
957 data->kcd_addr_end += size;
958
39037602
A
959 if (!(data->kcd_flags & KCFLAG_NO_AUTO_ENDBUFFER)) {
960 /* setup the end header as well */
961 return kcdata_write_buffer_end(data);
3e170ce0 962 } else {
39037602 963 return KERN_SUCCESS;
3e170ce0 964 }
3e170ce0
A
965}
966
f427ee49
A
967/* Routine: kcdata_get_memory_size_for_data
968 * Desc: returns the amount of memory that is required to store the information
969 * in kcdata
970 */
971static size_t
972kcdata_get_memory_size_for_data(uint32_t size)
973{
974 return size + kcdata_calc_padding(size) + sizeof(struct kcdata_item);
975}
976
3e170ce0
A
977/*
978 * Routine: kcdata_get_memory_addr_for_array
979 * Desc: get memory address in the userspace memory for corpse info
980 * NOTE: The caller is responsible to zero the resulting memory or
981 * user other means to mark memory if it has failed populating the
982 * data in middle of operation.
983 * params: data - pointer describing the crash info allocation
984 * type_of_element - type of data to be put. See kern_cdata.h for defined types
985 * size_of_element - size of element. The header describes this size
986 * count - num of elements in array.
987 * returns: mach_vm_address_t address in user memory for copyout().
988 */
989
0a7de745
A
990kern_return_t
991kcdata_get_memory_addr_for_array(
992 kcdata_descriptor_t data,
993 uint32_t type_of_element,
994 uint32_t size_of_element,
995 uint32_t count,
996 mach_vm_address_t *user_addr)
3e170ce0 997{
39037602
A
998 /* for arrays we record the number of padding bytes as the low-order 4 bits
999 * of the type field. KCDATA_TYPE_ARRAY_PAD{x} means x bytes of pad. */
1000 uint64_t flags = type_of_element;
1001 flags = (flags << 32) | count;
3e170ce0 1002 uint32_t total_size = count * size_of_element;
39037602
A
1003 uint32_t pad = kcdata_calc_padding(total_size);
1004
1005 return kcdata_get_memory_addr_with_flavor(data, KCDATA_TYPE_ARRAY_PAD0 | pad, total_size, flags, user_addr);
3e170ce0
A
1006}
1007
1008/*
1009 * Routine: kcdata_add_container_marker
1010 * Desc: Add a container marker in the buffer for type and identifier.
1011 * params: data - pointer describing the crash info allocation
1012 * header_type - one of (KCDATA_TYPE_CONTAINER_BEGIN ,KCDATA_TYPE_CONTAINER_END)
1013 * container_type - type of data to be put. See kern_cdata.h for defined types
1014 * identifier - unique identifier. This is required to match nested containers.
1015 * returns: return value of kcdata_get_memory_addr()
1016 */
1017
0a7de745
A
1018kern_return_t
1019kcdata_add_container_marker(
1020 kcdata_descriptor_t data,
1021 uint32_t header_type,
1022 uint32_t container_type,
1023 uint64_t identifier)
3e170ce0
A
1024{
1025 mach_vm_address_t user_addr;
1026 kern_return_t kr;
f427ee49
A
1027 uint32_t data_size;
1028
3e170ce0 1029 assert(header_type == KCDATA_TYPE_CONTAINER_END || header_type == KCDATA_TYPE_CONTAINER_BEGIN);
3e170ce0 1030
f427ee49
A
1031 data_size = (header_type == KCDATA_TYPE_CONTAINER_BEGIN)? sizeof(uint32_t): 0;
1032
1033 if (!(data->kcd_flags & KCFLAG_USE_COMPRESSION)) {
1034 kr = kcdata_get_memory_addr_with_flavor(data, header_type, data_size, identifier, &user_addr);
1035 if (kr != KERN_SUCCESS) {
1036 return kr;
1037 }
1038
1039 if (data_size) {
1040 kr = kcdata_memcpy(data, user_addr, &container_type, data_size);
1041 }
1042 } else {
1043 kr = kcdata_compress_chunk_with_flags(data, header_type, &container_type, data_size, identifier);
0a7de745 1044 }
f427ee49 1045
3e170ce0
A
1046 return kr;
1047}
1048
39037602
A
1049/*
1050 * Routine: kcdata_undo_addcontainer_begin
1051 * Desc: call this after adding a container begin but before adding anything else to revert.
1052 */
1053kern_return_t
1054kcdata_undo_add_container_begin(kcdata_descriptor_t data)
1055{
1056 /*
1057 * the payload of a container begin is a single uint64_t. It is padded out
1058 * to 16 bytes.
1059 */
1060 const mach_vm_address_t padded_payload_size = 16;
1061 data->kcd_addr_end -= sizeof(struct kcdata_item) + padded_payload_size;
1062
1063 if (!(data->kcd_flags & KCFLAG_NO_AUTO_ENDBUFFER)) {
1064 /* setup the end header as well */
1065 return kcdata_write_buffer_end(data);
1066 } else {
1067 return KERN_SUCCESS;
1068 }
1069}
1070
3e170ce0
A
1071/*
1072 * Routine: kcdata_memcpy
1073 * Desc: a common function to copy data out based on either copyout or memcopy flags
1074 * params: data - pointer describing the kcdata buffer
1075 * dst_addr - destination address
1076 * src_addr - source address
1077 * size - size in bytes to copy.
1078 * returns: KERN_NO_ACCESS if copyout fails.
1079 */
1080
0a7de745
A
1081kern_return_t
1082kcdata_memcpy(kcdata_descriptor_t data, mach_vm_address_t dst_addr, const void *src_addr, uint32_t size)
3e170ce0
A
1083{
1084 if (data->kcd_flags & KCFLAG_USE_COPYOUT) {
0a7de745 1085 if (copyout(src_addr, dst_addr, size)) {
3e170ce0 1086 return KERN_NO_ACCESS;
0a7de745 1087 }
3e170ce0
A
1088 } else {
1089 memcpy((void *)dst_addr, src_addr, size);
1090 }
1091 return KERN_SUCCESS;
1092}
1093
d9a64523
A
1094/*
1095 * Routine: kcdata_bzero
1096 * Desc: zero out a portion of a kcdata buffer.
1097 */
1098kern_return_t
1099kcdata_bzero(kcdata_descriptor_t data, mach_vm_address_t dst_addr, uint32_t size)
1100{
1101 kern_return_t kr = KERN_SUCCESS;
1102 if (data->kcd_flags & KCFLAG_USE_COPYOUT) {
1103 uint8_t zeros[16] = {};
1104 while (size) {
1105 uint32_t block_size = MIN(size, 16);
1106 kr = copyout(&zeros, dst_addr, block_size);
0a7de745 1107 if (kr) {
d9a64523 1108 return KERN_NO_ACCESS;
0a7de745 1109 }
d9a64523
A
1110 size -= block_size;
1111 }
1112 return KERN_SUCCESS;
1113 } else {
1114 bzero((void*)dst_addr, size);
1115 return KERN_SUCCESS;
1116 }
1117}
1118
3e170ce0
A
1119/*
1120 * Routine: kcdata_add_type_definition
1121 * Desc: add type definition to kcdata buffer.
1122 * see feature description in documentation above.
1123 * params: data - pointer describing the kcdata buffer
1124 * type_id - unique type identifier for this data
1125 * type_name - a string of max KCDATA_DESC_MAXLEN size for name of type
1126 * elements_array - address to descriptors for each field in struct
1127 * elements_count - count of how many fields are there in struct.
1128 * returns: return code from kcdata_get_memory_addr in case of failure.
1129 */
1130
0a7de745
A
1131kern_return_t
1132kcdata_add_type_definition(
1133 kcdata_descriptor_t data,
1134 uint32_t type_id,
1135 char *type_name,
1136 struct kcdata_subtype_descriptor *elements_array_addr,
1137 uint32_t elements_count)
3e170ce0
A
1138{
1139 kern_return_t kr = KERN_SUCCESS;
1140 struct kcdata_type_definition kc_type_definition;
1141 mach_vm_address_t user_addr;
1142 uint32_t total_size = sizeof(struct kcdata_type_definition);
39037602 1143 bzero(&kc_type_definition, sizeof(kc_type_definition));
3e170ce0 1144
0a7de745 1145 if (strlen(type_name) >= KCDATA_DESC_MAXLEN) {
3e170ce0 1146 return KERN_INVALID_ARGUMENT;
0a7de745 1147 }
3e170ce0
A
1148 strlcpy(&kc_type_definition.kct_name[0], type_name, KCDATA_DESC_MAXLEN);
1149 kc_type_definition.kct_num_elements = elements_count;
1150 kc_type_definition.kct_type_identifier = type_id;
1151
1152 total_size += elements_count * sizeof(struct kcdata_subtype_descriptor);
39037602
A
1153 /* record number of padding bytes as lower 4 bits of flags */
1154 if (KERN_SUCCESS != (kr = kcdata_get_memory_addr_with_flavor(data, KCDATA_TYPE_TYPEDEFINTION, total_size,
0a7de745 1155 kcdata_calc_padding(total_size), &user_addr))) {
3e170ce0 1156 return kr;
0a7de745
A
1157 }
1158 if (KERN_SUCCESS != (kr = kcdata_memcpy(data, user_addr, (void *)&kc_type_definition, sizeof(struct kcdata_type_definition)))) {
3e170ce0 1159 return kr;
0a7de745 1160 }
3e170ce0 1161 user_addr += sizeof(struct kcdata_type_definition);
0a7de745 1162 if (KERN_SUCCESS != (kr = kcdata_memcpy(data, user_addr, (void *)elements_array_addr, elements_count * sizeof(struct kcdata_subtype_descriptor)))) {
3e170ce0 1163 return kr;
0a7de745 1164 }
3e170ce0
A
1165 return kr;
1166}
1167
39037602
A
1168kern_return_t
1169kcdata_add_uint64_with_description(kcdata_descriptor_t data_desc, uint64_t data, const char * description)
3e170ce0 1170{
0a7de745 1171 if (strlen(description) >= KCDATA_DESC_MAXLEN) {
3e170ce0 1172 return KERN_INVALID_ARGUMENT;
0a7de745 1173 }
3e170ce0
A
1174
1175 kern_return_t kr = 0;
1176 mach_vm_address_t user_addr;
1177 struct _uint64_with_description_data save_data;
1178 const uint64_t size_req = sizeof(save_data);
1179 bzero(&save_data, size_req);
1180
1181 strlcpy(&(save_data.desc[0]), description, sizeof(save_data.desc));
1182 save_data.data = data;
1183
f427ee49
A
1184 if (data_desc->kcd_flags & KCFLAG_USE_COMPRESSION) {
1185 /* allocate space for the output */
1186 return kcdata_compress_chunk(data_desc, KCDATA_TYPE_UINT64_DESC, &save_data, size_req);
1187 }
1188
3e170ce0 1189 kr = kcdata_get_memory_addr(data_desc, KCDATA_TYPE_UINT64_DESC, size_req, &user_addr);
0a7de745 1190 if (kr != KERN_SUCCESS) {
3e170ce0 1191 return kr;
0a7de745 1192 }
3e170ce0
A
1193
1194 if (data_desc->kcd_flags & KCFLAG_USE_COPYOUT) {
0a7de745 1195 if (copyout(&save_data, user_addr, size_req)) {
3e170ce0 1196 return KERN_NO_ACCESS;
0a7de745 1197 }
3e170ce0
A
1198 } else {
1199 memcpy((void *)user_addr, &save_data, size_req);
1200 }
1201 return KERN_SUCCESS;
1202}
1203
0a7de745
A
1204kern_return_t
1205kcdata_add_uint32_with_description(
1206 kcdata_descriptor_t data_desc,
1207 uint32_t data,
1208 const char *description)
3e170ce0
A
1209{
1210 assert(strlen(description) < KCDATA_DESC_MAXLEN);
0a7de745 1211 if (strlen(description) >= KCDATA_DESC_MAXLEN) {
3e170ce0 1212 return KERN_INVALID_ARGUMENT;
0a7de745 1213 }
3e170ce0
A
1214 kern_return_t kr = 0;
1215 mach_vm_address_t user_addr;
1216 struct _uint32_with_description_data save_data;
1217 const uint64_t size_req = sizeof(save_data);
1218
1219 bzero(&save_data, size_req);
1220 strlcpy(&(save_data.desc[0]), description, sizeof(save_data.desc));
1221 save_data.data = data;
1222
f427ee49
A
1223 if (data_desc->kcd_flags & KCFLAG_USE_COMPRESSION) {
1224 /* allocate space for the output */
1225 return kcdata_compress_chunk(data_desc, KCDATA_TYPE_UINT32_DESC, &save_data, size_req);
1226 }
1227
3e170ce0 1228 kr = kcdata_get_memory_addr(data_desc, KCDATA_TYPE_UINT32_DESC, size_req, &user_addr);
0a7de745 1229 if (kr != KERN_SUCCESS) {
3e170ce0 1230 return kr;
0a7de745 1231 }
f427ee49 1232
3e170ce0 1233 if (data_desc->kcd_flags & KCFLAG_USE_COPYOUT) {
0a7de745 1234 if (copyout(&save_data, user_addr, size_req)) {
3e170ce0 1235 return KERN_NO_ACCESS;
0a7de745 1236 }
3e170ce0
A
1237 } else {
1238 memcpy((void *)user_addr, &save_data, size_req);
1239 }
f427ee49 1240
3e170ce0
A
1241 return KERN_SUCCESS;
1242}
1243
1244
1245/* end buffer management api */