2 * Copyright (c) 2015 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <kern/assert.h>
30 #include <mach/mach_types.h>
31 #include <mach/boolean.h>
32 #include <mach/vm_param.h>
33 #include <kern/kern_types.h>
34 #include <kern/mach_param.h>
35 #include <kern/thread.h>
36 #include <kern/task.h>
37 #include <kern/kern_cdata.h>
38 #include <kern/kalloc.h>
39 #include <mach/mach_vm.h>
41 static kern_return_t
kcdata_get_memory_addr_with_flavor(kcdata_descriptor_t data
, uint32_t type
, uint32_t size
, uint64_t flags
, mach_vm_address_t
*user_addr
);
42 static size_t kcdata_get_memory_size_for_data(uint32_t size
);
43 static kern_return_t
kcdata_compress_chunk_with_flags(kcdata_descriptor_t data
, uint32_t type
, const void *input_data
, uint32_t input_size
, uint64_t flags
);
44 static kern_return_t
kcdata_compress_chunk(kcdata_descriptor_t data
, uint32_t type
, const void *input_data
, uint32_t input_size
);
45 static kern_return_t
kcdata_write_compression_stats(kcdata_descriptor_t data
);
46 static kern_return_t
kcdata_get_compression_stats(kcdata_descriptor_t data
, uint64_t *totalout
, uint64_t *totalin
);
49 * zlib will need to store its metadata and this value is indifferent from the
50 * window bits and other zlib internals
52 #define ZLIB_METADATA_SIZE 1440
54 /* #define kcdata_debug_printf printf */
55 #define kcdata_debug_printf(...) ;
59 /* Internal structs for convenience */
60 struct _uint64_with_description_data
{
61 char desc
[KCDATA_DESC_MAXLEN
];
65 struct _uint32_with_description_data
{
66 char desc
[KCDATA_DESC_MAXLEN
];
73 * Estimates how large of a buffer that should be allocated for a buffer that will contain
74 * num_items items of known types with overall length payload_size.
76 * NOTE: This function will not give an accurate estimate for buffers that will
77 * contain unknown types (those with string descriptions).
80 kcdata_estimate_required_buffer_size(uint32_t num_items
, uint32_t payload_size
)
83 * In the worst case each item will need (KCDATA_ALIGNMENT_SIZE - 1) padding
85 uint32_t max_padding_bytes
= 0;
86 uint32_t max_padding_with_item_description_bytes
= 0;
87 uint32_t estimated_required_buffer_size
= 0;
88 const uint32_t begin_and_end_marker_bytes
= 2 * sizeof(struct kcdata_item
);
90 if (os_mul_overflow(num_items
, KCDATA_ALIGNMENT_SIZE
- 1, &max_padding_bytes
)) {
91 panic("%s: Overflow in required buffer size estimate", __func__
);
94 if (os_mul_and_add_overflow(num_items
, sizeof(struct kcdata_item
), max_padding_bytes
, &max_padding_with_item_description_bytes
)) {
95 panic("%s: Overflow in required buffer size estimate", __func__
);
98 if (os_add3_overflow(max_padding_with_item_description_bytes
, begin_and_end_marker_bytes
, payload_size
, &estimated_required_buffer_size
)) {
99 panic("%s: Overflow in required buffer size estimate", __func__
);
102 return estimated_required_buffer_size
;
106 kcdata_memory_alloc_init(mach_vm_address_t buffer_addr_p
, unsigned data_type
, unsigned size
, unsigned flags
)
108 kcdata_descriptor_t data
= NULL
;
109 mach_vm_address_t user_addr
= 0;
110 uint16_t clamped_flags
= (uint16_t) flags
;
112 data
= kalloc_flags(sizeof(struct kcdata_descriptor
), Z_WAITOK
| Z_ZERO
);
116 data
->kcd_addr_begin
= buffer_addr_p
;
117 data
->kcd_addr_end
= buffer_addr_p
;
118 data
->kcd_flags
= (clamped_flags
& KCFLAG_USE_COPYOUT
) ? clamped_flags
: clamped_flags
| KCFLAG_USE_MEMCOPY
;
119 data
->kcd_length
= size
;
121 /* Initialize the BEGIN header */
122 if (KERN_SUCCESS
!= kcdata_get_memory_addr(data
, data_type
, 0, &user_addr
)) {
123 kcdata_memory_destroy(data
);
131 kcdata_memory_static_init(kcdata_descriptor_t data
, mach_vm_address_t buffer_addr_p
, unsigned data_type
, unsigned size
, unsigned flags
)
133 mach_vm_address_t user_addr
= 0;
134 uint16_t clamped_flags
= (uint16_t) flags
;
137 return KERN_INVALID_ARGUMENT
;
139 bzero(data
, sizeof(struct kcdata_descriptor
));
140 data
->kcd_addr_begin
= buffer_addr_p
;
141 data
->kcd_addr_end
= buffer_addr_p
;
142 data
->kcd_flags
= (clamped_flags
& KCFLAG_USE_COPYOUT
) ? clamped_flags
: clamped_flags
| KCFLAG_USE_MEMCOPY
;
143 data
->kcd_length
= size
;
145 /* Initialize the BEGIN header */
146 return kcdata_get_memory_addr(data
, data_type
, 0, &user_addr
);
150 kcdata_memory_get_begin_addr(kcdata_descriptor_t data
)
156 return (void *)data
->kcd_addr_begin
;
160 kcdata_memory_get_used_bytes(kcdata_descriptor_t kcd
)
163 return ((uint64_t)kcd
->kcd_addr_end
- (uint64_t)kcd
->kcd_addr_begin
) + sizeof(struct kcdata_item
);
167 kcdata_memory_get_uncompressed_bytes(kcdata_descriptor_t kcd
)
172 if (kcd
->kcd_flags
& KCFLAG_USE_COMPRESSION
) {
173 uint64_t totalout
, totalin
;
175 kr
= kcdata_get_compression_stats(kcd
, &totalout
, &totalin
);
176 if (kr
== KERN_SUCCESS
) {
182 /* If compression wasn't used, get the number of bytes used */
183 return kcdata_memory_get_used_bytes(kcd
);
188 * Free up the memory associated with kcdata
191 kcdata_memory_destroy(kcdata_descriptor_t data
)
194 return KERN_INVALID_ARGUMENT
;
198 * data->kcd_addr_begin points to memory in not tracked by
199 * kcdata lib. So not clearing that here.
201 kfree(data
, sizeof(struct kcdata_descriptor
));
205 /* Used by zlib to allocate space in its metadata section */
207 kcdata_compress_zalloc(void *opaque
, u_int items
, u_int size
)
210 struct kcdata_compress_descriptor
*cd
= opaque
;
211 int alloc_size
= ~31L & (31 + (items
* size
));
213 result
= (void *)(cd
->kcd_cd_base
+ cd
->kcd_cd_offset
);
214 if ((uintptr_t) result
+ alloc_size
> (uintptr_t) cd
->kcd_cd_base
+ cd
->kcd_cd_maxoffset
) {
217 cd
->kcd_cd_offset
+= alloc_size
;
220 kcdata_debug_printf("%s: %d * %d = %d => %p\n", __func__
, items
, size
, items
* size
, result
);
225 /* Used by zlib to free previously allocated space in its metadata section */
227 kcdata_compress_zfree(void *opaque
, void *ptr
)
232 kcdata_debug_printf("%s: ptr %p\n", __func__
, ptr
);
235 * Since the buffers we are using are temporary, we don't worry about
236 * freeing memory for now. Besides, testing has shown that zlib only calls
237 * this at the end, near deflateEnd() or a Z_FINISH deflate() call.
241 /* Used to initialize the selected compression algorithm's internal state (if any) */
243 kcdata_init_compress_state(kcdata_descriptor_t data
, void (*memcpy_f
)(void *, const void *, size_t), uint64_t type
, mach_vm_address_t totalout_addr
, mach_vm_address_t totalin_addr
)
245 kern_return_t ret
= KERN_SUCCESS
;
247 int wbits
= 12, memlevel
= 3;
248 struct kcdata_compress_descriptor
*cd
= &data
->kcd_comp_d
;
250 cd
->kcd_cd_memcpy_f
= memcpy_f
;
251 cd
->kcd_cd_compression_type
= type
;
252 cd
->kcd_cd_totalout_addr
= totalout_addr
;
253 cd
->kcd_cd_totalin_addr
= totalin_addr
;
257 /* allocate space for the metadata used by zlib */
258 size
= round_page(ZLIB_METADATA_SIZE
+ zlib_deflate_memory_size(wbits
, memlevel
));
259 kcdata_debug_printf("%s: size = %zu kcd_length: %d\n", __func__
, size
, data
->kcd_length
);
260 kcdata_debug_printf("%s: kcd buffer [%p - %p]\n", __func__
, (void *) data
->kcd_addr_begin
, (void *) data
->kcd_addr_begin
+ data
->kcd_length
);
262 if (4 * size
> data
->kcd_length
) {
263 return KERN_INSUFFICIENT_BUFFER_SIZE
;
266 cd
->kcd_cd_zs
.avail_in
= 0;
267 cd
->kcd_cd_zs
.next_in
= NULL
;
268 cd
->kcd_cd_zs
.avail_out
= 0;
269 cd
->kcd_cd_zs
.next_out
= NULL
;
270 cd
->kcd_cd_zs
.opaque
= cd
;
271 cd
->kcd_cd_zs
.zalloc
= kcdata_compress_zalloc
;
272 cd
->kcd_cd_zs
.zfree
= kcdata_compress_zfree
;
273 cd
->kcd_cd_base
= (void *) data
->kcd_addr_begin
+ data
->kcd_length
- size
;
274 data
->kcd_length
-= size
;
275 cd
->kcd_cd_offset
= 0;
276 cd
->kcd_cd_maxoffset
= size
;
277 cd
->kcd_cd_flags
= 0;
279 kcdata_debug_printf("%s: buffer [%p - %p]\n", __func__
, cd
->kcd_cd_base
, cd
->kcd_cd_base
+ size
);
281 if (deflateInit2(&cd
->kcd_cd_zs
, Z_BEST_SPEED
, Z_DEFLATED
, wbits
, memlevel
, Z_DEFAULT_STRATEGY
) != Z_OK
) {
282 kcdata_debug_printf("EMERGENCY: deflateInit2 failed!\n");
283 ret
= KERN_INVALID_ARGUMENT
;
287 panic("kcdata_init_compress_state: invalid compression type: %d", (int) type
);
295 * Turn on the compression logic for kcdata
298 kcdata_init_compress(kcdata_descriptor_t data
, int hdr_tag
, void (*memcpy_f
)(void *, const void *, size_t), uint64_t type
)
301 mach_vm_address_t user_addr
, totalout_addr
, totalin_addr
;
302 struct _uint64_with_description_data save_data
;
303 const uint64_t size_req
= sizeof(save_data
);
305 assert(data
&& (data
->kcd_flags
& KCFLAG_USE_COMPRESSION
) == 0);
307 /* reset the compression descriptor */
308 bzero(&data
->kcd_comp_d
, sizeof(struct kcdata_compress_descriptor
));
310 /* add the header information */
311 kcdata_add_uint64_with_description(data
, type
, "kcd_c_type");
313 /* reserve space to write total out */
314 bzero(&save_data
, size_req
);
315 strlcpy(&(save_data
.desc
[0]), "kcd_c_totalout", sizeof(save_data
.desc
));
316 kr
= kcdata_get_memory_addr(data
, KCDATA_TYPE_UINT64_DESC
, size_req
, &totalout_addr
);
317 if (kr
!= KERN_SUCCESS
) {
320 memcpy((void *)totalout_addr
, &save_data
, size_req
);
322 /* space for total in */
323 bzero(&save_data
, size_req
);
324 strlcpy(&(save_data
.desc
[0]), "kcd_c_totalin", sizeof(save_data
.desc
));
325 kr
= kcdata_get_memory_addr(data
, KCDATA_TYPE_UINT64_DESC
, size_req
, &totalin_addr
);
326 if (kr
!= KERN_SUCCESS
) {
329 memcpy((void *)totalin_addr
, &save_data
, size_req
);
331 /* add the inner buffer */
332 kcdata_get_memory_addr(data
, hdr_tag
, 0, &user_addr
);
335 data
->kcd_flags
|= KCFLAG_USE_COMPRESSION
;
337 /* initialize algorithm specific state */
338 kr
= kcdata_init_compress_state(data
, memcpy_f
, type
, totalout_addr
+ offsetof(struct _uint64_with_description_data
, data
), totalin_addr
+ offsetof(struct _uint64_with_description_data
, data
));
339 if (kr
!= KERN_SUCCESS
) {
340 kcdata_debug_printf("%s: failed to initialize compression state!\n", __func__
);
349 kcdata_zlib_translate_kcd_cf_flag(enum kcdata_compression_flush flush
)
352 case KCDCF_NO_FLUSH
: return Z_NO_FLUSH
;
353 case KCDCF_SYNC_FLUSH
: return Z_SYNC_FLUSH
;
354 case KCDCF_FINISH
: return Z_FINISH
;
355 default: panic("invalid kcdata_zlib_translate_kcd_cf_flag flag");
361 kcdata_zlib_translate_kcd_cf_expected_ret(enum kcdata_compression_flush flush
)
364 case KCDCF_NO_FLUSH
: /* fall through */
365 case KCDCF_SYNC_FLUSH
: return Z_OK
;
366 case KCDCF_FINISH
: return Z_STREAM_END
;
367 default: panic("invalid kcdata_zlib_translate_kcd_cf_expected_ret flag");
371 /* Called by kcdata_do_compress() when the configured compression algorithm is zlib */
373 kcdata_do_compress_zlib(kcdata_descriptor_t data
, void *inbuffer
,
374 size_t insize
, void *outbuffer
, size_t outsize
, size_t *wrote
,
375 enum kcdata_compression_flush flush
)
377 struct kcdata_compress_descriptor
*cd
= &data
->kcd_comp_d
;
378 z_stream
*zs
= &cd
->kcd_cd_zs
;
379 int expected_ret
, ret
;
381 zs
->next_out
= outbuffer
;
382 zs
->avail_out
= (unsigned int) outsize
;
383 zs
->next_in
= inbuffer
;
384 zs
->avail_in
= (unsigned int) insize
;
385 ret
= deflate(zs
, kcdata_zlib_translate_kcd_cf_flag(flush
));
386 if (zs
->avail_in
!= 0 || zs
->avail_out
<= 0) {
387 return KERN_INSUFFICIENT_BUFFER_SIZE
;
390 expected_ret
= kcdata_zlib_translate_kcd_cf_expected_ret(flush
);
391 if (ret
!= expected_ret
) {
393 * Should only fail with catastrophic, unrecoverable cases (i.e.,
394 * corrupted z_stream, or incorrect configuration)
396 panic("zlib kcdata compression ret = %d\n", ret
);
399 kcdata_debug_printf("%s: %p (%zu) <- %p (%zu); flush: %d; ret = %ld\n",
400 __func__
, outbuffer
, outsize
, inbuffer
, insize
, flush
, outsize
- zs
->avail_out
);
402 *wrote
= outsize
- zs
->avail_out
;
408 * Compress the buffer at @inbuffer (of size @insize) into the kcdata buffer
409 * @outbuffer (of size @outsize). Flush based on the @flush parameter.
411 * Returns KERN_SUCCESS on success, or KERN_INSUFFICIENT_BUFFER_SIZE if
412 * @outsize isn't sufficient. Also, writes the number of bytes written in the
413 * @outbuffer to @wrote.
416 kcdata_do_compress(kcdata_descriptor_t data
, void *inbuffer
, size_t insize
,
417 void *outbuffer
, size_t outsize
, size_t *wrote
, enum kcdata_compression_flush flush
)
419 struct kcdata_compress_descriptor
*cd
= &data
->kcd_comp_d
;
421 assert(data
->kcd_flags
& KCFLAG_USE_COMPRESSION
);
423 kcdata_debug_printf("%s: %p (%zu) <- %p (%zu); flush: %d\n",
424 __func__
, outbuffer
, outsize
, inbuffer
, insize
, flush
);
426 /* don't compress if we are in a window */
427 if (cd
->kcd_cd_flags
& KCD_CD_FLAG_IN_MARK
|| data
->kcd_comp_d
.kcd_cd_compression_type
== KCDCT_NONE
) {
428 assert(cd
->kcd_cd_memcpy_f
);
429 if (outsize
>= insize
) {
430 cd
->kcd_cd_memcpy_f(outbuffer
, inbuffer
, insize
);
436 return KERN_INSUFFICIENT_BUFFER_SIZE
;
440 switch (data
->kcd_comp_d
.kcd_cd_compression_type
) {
442 return kcdata_do_compress_zlib(data
, inbuffer
, insize
, outbuffer
, outsize
, wrote
, flush
);
444 panic("invalid compression type 0x%llx in kcdata_do_compress", data
->kcd_comp_d
.kcd_cd_compression_type
);
449 kcdata_compression_bound_zlib(kcdata_descriptor_t data
, size_t size
)
451 struct kcdata_compress_descriptor
*cd
= &data
->kcd_comp_d
;
452 z_stream
*zs
= &cd
->kcd_cd_zs
;
454 return (size_t) deflateBound(zs
, (unsigned long) size
);
459 * returns the worst-case, maximum length of the compressed data when
460 * compressing a buffer of size @size using the configured algorithm.
463 kcdata_compression_bound(kcdata_descriptor_t data
, size_t size
)
465 switch (data
->kcd_comp_d
.kcd_cd_compression_type
) {
467 return kcdata_compression_bound_zlib(data
, size
);
471 panic("%s: unknown compression method", __func__
);
476 * kcdata_compress_chunk_with_flags:
477 * Compress buffer found at @input_data (length @input_size) to the kcdata
478 * buffer described by @data. This method will construct the kcdata_item_t
479 * required by parsers using the type information @type and flags @flags.
481 * Returns KERN_SUCCESS when successful. Currently, asserts on failure.
484 kcdata_compress_chunk_with_flags(kcdata_descriptor_t data
, uint32_t type
, const void *input_data
, uint32_t input_size
, uint64_t kcdata_flags
)
487 assert((data
->kcd_flags
& KCFLAG_USE_COMPRESSION
));
489 struct kcdata_item info
;
490 char padding_data
[16] = {0};
491 struct kcdata_compress_descriptor
*cd
= &data
->kcd_comp_d
;
495 kcdata_debug_printf("%s: type: %d input_data: %p (%d) kcdata_flags: 0x%llx\n",
496 __func__
, type
, input_data
, input_size
, kcdata_flags
);
499 * first, get memory space. The uncompressed size must fit in the remained
500 * of the kcdata buffer, in case the compression algorithm doesn't actually
501 * compress the data at all.
503 size_t total_uncompressed_size
= kcdata_compression_bound(data
, (size_t) kcdata_get_memory_size_for_data(input_size
));
504 if (total_uncompressed_size
> data
->kcd_length
||
505 data
->kcd_length
- total_uncompressed_size
< data
->kcd_addr_end
- data
->kcd_addr_begin
) {
506 kcdata_debug_printf("%s: insufficient buffer size: kcd_length => %d e-b=> %lld our size: %zu\n",
507 __func__
, data
->kcd_length
, data
->kcd_addr_end
- data
->kcd_addr_begin
, total_uncompressed_size
);
508 return KERN_INSUFFICIENT_BUFFER_SIZE
;
510 uint32_t padding
= kcdata_calc_padding(input_size
);
511 assert(padding
< sizeof(padding_data
));
513 void *space_start
= (void *) data
->kcd_addr_end
;
514 void *space_ptr
= space_start
;
516 /* create the output stream */
517 size_t total_uncompressed_space_remaining
= total_uncompressed_size
;
519 /* create the info data */
520 bzero(&info
, sizeof(info
));
522 info
.size
= input_size
+ padding
;
523 info
.flags
= kcdata_flags
;
526 * The next possibly three compresses are needed separately because of the
527 * scatter-gather nature of this operation. The kcdata item header (info)
528 * and padding are on the stack, while the actual data is somewhere else.
531 /* create the input stream for info & compress */
532 enum kcdata_compression_flush flush
= (padding
|| input_size
) ? KCDCF_NO_FLUSH
:
533 cd
->kcd_cd_flags
& KCD_CD_FLAG_FINALIZE
? KCDCF_FINISH
:
535 kr
= kcdata_do_compress(data
, &info
, sizeof(info
), space_ptr
, total_uncompressed_space_remaining
, &wrote
, flush
);
536 if (kr
!= KERN_SUCCESS
) {
539 kcdata_debug_printf("%s: first wrote = %zu\n", __func__
, wrote
);
541 total_uncompressed_space_remaining
-= wrote
;
543 /* If there is input provided, compress that here */
545 flush
= padding
? KCDCF_NO_FLUSH
:
546 cd
->kcd_cd_flags
& KCD_CD_FLAG_FINALIZE
? KCDCF_FINISH
:
548 kr
= kcdata_do_compress(data
, (void *) (uintptr_t) input_data
, input_size
, space_ptr
, total_uncompressed_space_remaining
, &wrote
, flush
);
549 if (kr
!= KERN_SUCCESS
) {
552 kcdata_debug_printf("%s: 2nd wrote = %zu\n", __func__
, wrote
);
554 total_uncompressed_space_remaining
-= wrote
;
557 /* If the item and its data require padding to maintain alignment,
558 * "compress" that into the output buffer. */
560 /* write the padding */
561 kr
= kcdata_do_compress(data
, padding_data
, padding
, space_ptr
, total_uncompressed_space_remaining
, &wrote
,
562 cd
->kcd_cd_flags
& KCD_CD_FLAG_FINALIZE
? KCDCF_FINISH
: KCDCF_SYNC_FLUSH
);
563 if (kr
!= KERN_SUCCESS
) {
566 kcdata_debug_printf("%s: 3rd wrote = %zu\n", __func__
, wrote
);
571 total_uncompressed_space_remaining
-= wrote
;
574 assert((size_t)(space_ptr
- space_start
) <= total_uncompressed_size
);
576 /* move the end marker forward */
577 data
->kcd_addr_end
= (mach_vm_address_t
) (space_start
+ (total_uncompressed_size
- total_uncompressed_space_remaining
));
583 * kcdata_compress_chunk:
584 * Like kcdata_compress_chunk_with_flags(), but uses the default set of kcdata flags,
585 * i.e. padding and also saves the amount of padding bytes.
587 * Returns are the same as in kcdata_compress_chunk_with_flags()
590 kcdata_compress_chunk(kcdata_descriptor_t data
, uint32_t type
, const void *input_data
, uint32_t input_size
)
592 /* these flags are for kcdata - store that the struct is padded and store the amount of padding bytes */
593 uint64_t flags
= (KCDATA_FLAGS_STRUCT_PADDING_MASK
& kcdata_calc_padding(input_size
)) | KCDATA_FLAGS_STRUCT_HAS_PADDING
;
594 return kcdata_compress_chunk_with_flags(data
, type
, input_data
, input_size
, flags
);
598 kcdata_push_data(kcdata_descriptor_t data
, uint32_t type
, uint32_t size
, const void *input_data
)
600 if (data
->kcd_flags
& KCFLAG_USE_COMPRESSION
) {
601 return kcdata_compress_chunk(data
, type
, input_data
, size
);
604 mach_vm_address_t uaddr
= 0;
605 ret
= kcdata_get_memory_addr(data
, type
, size
, &uaddr
);
606 if (ret
!= KERN_SUCCESS
) {
610 kcdata_memcpy(data
, uaddr
, input_data
, size
);
616 kcdata_push_array(kcdata_descriptor_t data
, uint32_t type_of_element
, uint32_t size_of_element
, uint32_t count
, const void *input_data
)
618 uint64_t flags
= type_of_element
;
619 flags
= (flags
<< 32) | count
;
620 uint32_t total_size
= count
* size_of_element
;
621 uint32_t pad
= kcdata_calc_padding(total_size
);
623 if (data
->kcd_flags
& KCFLAG_USE_COMPRESSION
) {
624 return kcdata_compress_chunk_with_flags(data
, KCDATA_TYPE_ARRAY_PAD0
| pad
, input_data
, total_size
, flags
);
627 mach_vm_address_t uaddr
= 0;
628 ret
= kcdata_get_memory_addr_with_flavor(data
, KCDATA_TYPE_ARRAY_PAD0
| pad
, total_size
, flags
, &uaddr
);
629 if (ret
!= KERN_SUCCESS
) {
633 kcdata_memcpy(data
, uaddr
, input_data
, total_size
);
638 /* A few words on how window compression works:
640 * This is how the buffer looks when the window is opened:
642 * X---------------------------------------------------------------------X
644 * | Filled with stackshot data | Zero bytes |
646 * X---------------------------------------------------------------------X
650 * Opening a window will save the current kcd_addr_end to kcd_cd_mark_begin.
652 * Any kcdata_* operation will then push data to the buffer like normal. (If
653 * you call any compressing functions they will pass-through, i.e. no
654 * compression will be done) Once the window is closed, the following takes
657 * X---------------------------------------------------------------------X
659 * | Existing data | New data | Scratch buffer | |
661 * X---------------------------------------------------------------------X
664 * \ -kcd_cd_mark_begin | |
668 * kcd_addr_end + (kcd_addr_end - kcd_cd_mark_begin) - /
670 * (1) The data between kcd_cd_mark_begin and kcd_addr_end is fed to the
671 * compression algorithm to compress to the scratch buffer.
672 * (2) The scratch buffer's contents are copied into the area denoted "New
673 * data" above. Effectively overwriting the uncompressed data with the
675 * (3) kcd_addr_end is then rewound to kcd_cd_mark_begin + sizeof_compressed_data
678 /* Record the state, and restart compression from this later */
680 kcdata_compression_window_open(kcdata_descriptor_t data
)
682 struct kcdata_compress_descriptor
*cd
= &data
->kcd_comp_d
;
683 assert((cd
->kcd_cd_flags
& KCD_CD_FLAG_IN_MARK
) == 0);
685 if (data
->kcd_flags
& KCFLAG_USE_COMPRESSION
) {
686 cd
->kcd_cd_flags
|= KCD_CD_FLAG_IN_MARK
;
687 cd
->kcd_cd_mark_begin
= data
->kcd_addr_end
;
691 /* Compress the region between the mark and the current end */
693 kcdata_compression_window_close(kcdata_descriptor_t data
)
695 struct kcdata_compress_descriptor
*cd
= &data
->kcd_comp_d
;
696 uint64_t total_size
, max_size
;
697 void *space_start
, *space_ptr
;
698 size_t total_uncompressed_space_remaining
, wrote
= 0;
701 if ((data
->kcd_flags
& KCFLAG_USE_COMPRESSION
) == 0) {
705 assert(cd
->kcd_cd_flags
& KCD_CD_FLAG_IN_MARK
);
707 if (data
->kcd_addr_end
== (mach_vm_address_t
) cd
->kcd_cd_mark_begin
) {
708 /* clear the window marker and return, this is a no-op */
709 cd
->kcd_cd_flags
&= ~KCD_CD_FLAG_IN_MARK
;
713 assert(cd
->kcd_cd_mark_begin
< data
->kcd_addr_end
);
714 total_size
= data
->kcd_addr_end
- (uint64_t) cd
->kcd_cd_mark_begin
;
715 max_size
= (uint64_t) kcdata_compression_bound(data
, total_size
);
716 kcdata_debug_printf("%s: total_size = %lld\n", __func__
, total_size
);
719 * first, get memory space. The uncompressed size must fit in the remained
720 * of the kcdata buffer, in case the compression algorithm doesn't actually
721 * compress the data at all.
723 if (max_size
> data
->kcd_length
||
724 data
->kcd_length
- max_size
< data
->kcd_addr_end
- data
->kcd_addr_begin
) {
725 kcdata_debug_printf("%s: insufficient buffer size: kcd_length => %d e-b=> %lld our size: %lld\n",
726 __func__
, data
->kcd_length
, data
->kcd_addr_end
- data
->kcd_addr_begin
, max_size
);
727 return KERN_INSUFFICIENT_BUFFER_SIZE
;
730 /* clear the window marker */
731 cd
->kcd_cd_flags
&= ~KCD_CD_FLAG_IN_MARK
;
733 space_start
= (void *) data
->kcd_addr_end
;
734 space_ptr
= space_start
;
735 total_uncompressed_space_remaining
= (unsigned int) max_size
;
736 kr
= kcdata_do_compress(data
, (void *) cd
->kcd_cd_mark_begin
, total_size
, space_ptr
,
737 total_uncompressed_space_remaining
, &wrote
, KCDCF_SYNC_FLUSH
);
738 if (kr
!= KERN_SUCCESS
) {
741 kcdata_debug_printf("%s: first wrote = %zu\n", __func__
, wrote
);
746 total_uncompressed_space_remaining
-= wrote
;
748 assert((size_t)(space_ptr
- space_start
) <= max_size
);
750 /* copy to the original location */
751 kcdata_memcpy(data
, cd
->kcd_cd_mark_begin
, space_start
, (uint32_t) (max_size
- total_uncompressed_space_remaining
));
753 /* rewind the end marker */
754 data
->kcd_addr_end
= cd
->kcd_cd_mark_begin
+ (max_size
- total_uncompressed_space_remaining
);
760 kcdata_get_compression_stats_zlib(kcdata_descriptor_t data
, uint64_t *totalout
, uint64_t *totalin
)
762 struct kcdata_compress_descriptor
*cd
= &data
->kcd_comp_d
;
763 z_stream
*zs
= &cd
->kcd_cd_zs
;
765 assert((cd
->kcd_cd_flags
& KCD_CD_FLAG_IN_MARK
) == 0);
767 *totalout
= (uint64_t) zs
->total_out
;
768 *totalin
= (uint64_t) zs
->total_in
;
774 kcdata_get_compression_stats(kcdata_descriptor_t data
, uint64_t *totalout
, uint64_t *totalin
)
778 switch (data
->kcd_comp_d
.kcd_cd_compression_type
) {
780 kr
= kcdata_get_compression_stats_zlib(data
, totalout
, totalin
);
786 panic("invalid compression flag 0x%llx in kcdata_write_compression_stats", (data
->kcd_comp_d
.kcd_cd_compression_type
));
793 kcdata_write_compression_stats(kcdata_descriptor_t data
)
796 uint64_t totalout
, totalin
;
798 kr
= kcdata_get_compression_stats(data
, &totalout
, &totalin
);
799 if (kr
!= KERN_SUCCESS
) {
803 *(uint64_t *)data
->kcd_comp_d
.kcd_cd_totalout_addr
= totalout
;
804 *(uint64_t *)data
->kcd_comp_d
.kcd_cd_totalin_addr
= totalin
;
810 kcdata_finish_compression_zlib(kcdata_descriptor_t data
)
812 struct kcdata_compress_descriptor
*cd
= &data
->kcd_comp_d
;
813 z_stream
*zs
= &cd
->kcd_cd_zs
;
816 * macOS on x86 w/ coprocessor ver. 2 and later context: Stackshot compression leaves artifacts
817 * in the panic buffer which interferes with CRC checks. The CRC is calculated here over the full
818 * buffer but only the portion with valid panic data is sent to iBoot via the SMC. When iBoot
819 * calculates the CRC to compare with the value in the header it uses a zero-filled buffer.
820 * The stackshot compression leaves non-zero bytes behind so those must be cleared prior to the CRC calculation.
822 * All other contexts: The stackshot compression artifacts are present in its panic buffer but the CRC check
823 * is done on the same buffer for the before and after calculation so there's nothing functionally
824 * broken. The same buffer cleanup is done here for completeness' sake.
825 * From rdar://problem/64381661
828 void* stackshot_end
= (char*)data
->kcd_addr_begin
+ kcdata_memory_get_used_bytes(data
);
829 uint32_t zero_fill_size
= data
->kcd_length
- kcdata_memory_get_used_bytes(data
);
830 bzero(stackshot_end
, zero_fill_size
);
832 if (deflateEnd(zs
) == Z_OK
) {
840 kcdata_finish_compression(kcdata_descriptor_t data
)
842 kcdata_write_compression_stats(data
);
844 switch (data
->kcd_comp_d
.kcd_cd_compression_type
) {
846 data
->kcd_length
+= data
->kcd_comp_d
.kcd_cd_maxoffset
;
847 return kcdata_finish_compression_zlib(data
);
851 panic("invalid compression type 0x%llxin kcdata_finish_compression", data
->kcd_comp_d
.kcd_cd_compression_type
);
856 kcd_finalize_compression(kcdata_descriptor_t data
)
858 if (data
->kcd_flags
& KCFLAG_USE_COMPRESSION
) {
859 data
->kcd_comp_d
.kcd_cd_flags
|= KCD_CD_FLAG_FINALIZE
;
864 * Routine: kcdata_get_memory_addr
865 * Desc: get memory address in the userspace memory for corpse info
866 * NOTE: The caller is responsible for zeroing the resulting memory or
867 * using other means to mark memory if it has failed populating the
868 * data in middle of operation.
869 * params: data - pointer describing the crash info allocation
870 * type - type of data to be put. See corpse.h for defined types
871 * size - size requested. The header describes this size
872 * returns: mach_vm_address_t address in user memory for copyout().
875 kcdata_get_memory_addr(kcdata_descriptor_t data
, uint32_t type
, uint32_t size
, mach_vm_address_t
* user_addr
)
877 /* record number of padding bytes as lower 4 bits of flags */
878 uint64_t flags
= (KCDATA_FLAGS_STRUCT_PADDING_MASK
& kcdata_calc_padding(size
)) | KCDATA_FLAGS_STRUCT_HAS_PADDING
;
879 return kcdata_get_memory_addr_with_flavor(data
, type
, size
, flags
, user_addr
);
883 * Routine: kcdata_add_buffer_end
885 * Desc: Write buffer end marker. This does not advance the end pointer in the
886 * kcdata_descriptor_t, so it may be used conservatively before additional data
887 * is added, as long as it is at least called after the last time data is added.
889 * params: data - pointer describing the crash info allocation
893 kcdata_write_buffer_end(kcdata_descriptor_t data
)
895 struct kcdata_item info
;
896 bzero(&info
, sizeof(info
));
897 info
.type
= KCDATA_TYPE_BUFFER_END
;
899 return kcdata_memcpy(data
, data
->kcd_addr_end
, &info
, sizeof(info
));
903 * Routine: kcdata_get_memory_addr_with_flavor
904 * Desc: internal function with flags field. See documentation for kcdata_get_memory_addr for details
908 kcdata_get_memory_addr_with_flavor(
909 kcdata_descriptor_t data
,
913 mach_vm_address_t
*user_addr
)
916 struct kcdata_item info
;
918 uint32_t orig_size
= size
;
919 /* make sure 16 byte aligned */
920 uint32_t padding
= kcdata_calc_padding(size
);
922 uint32_t total_size
= size
+ sizeof(info
);
924 if (user_addr
== NULL
|| data
== NULL
|| total_size
+ sizeof(info
) < orig_size
) {
925 return KERN_INVALID_ARGUMENT
;
928 assert(((data
->kcd_flags
& KCFLAG_USE_COMPRESSION
) && (data
->kcd_comp_d
.kcd_cd_flags
& KCD_CD_FLAG_IN_MARK
))
929 || ((data
->kcd_flags
& KCFLAG_USE_COMPRESSION
) == 0));
931 bzero(&info
, sizeof(info
));
936 /* check available memory, including trailer size for KCDATA_TYPE_BUFFER_END */
937 if (total_size
+ sizeof(info
) > data
->kcd_length
||
938 data
->kcd_length
- (total_size
+ sizeof(info
)) < data
->kcd_addr_end
- data
->kcd_addr_begin
) {
939 return KERN_INSUFFICIENT_BUFFER_SIZE
;
942 kr
= kcdata_memcpy(data
, data
->kcd_addr_end
, &info
, sizeof(info
));
947 data
->kcd_addr_end
+= sizeof(info
);
950 kr
= kcdata_bzero(data
, data
->kcd_addr_end
+ size
- padding
, padding
);
956 *user_addr
= data
->kcd_addr_end
;
957 data
->kcd_addr_end
+= size
;
959 if (!(data
->kcd_flags
& KCFLAG_NO_AUTO_ENDBUFFER
)) {
960 /* setup the end header as well */
961 return kcdata_write_buffer_end(data
);
967 /* Routine: kcdata_get_memory_size_for_data
968 * Desc: returns the amount of memory that is required to store the information
972 kcdata_get_memory_size_for_data(uint32_t size
)
974 return size
+ kcdata_calc_padding(size
) + sizeof(struct kcdata_item
);
978 * Routine: kcdata_get_memory_addr_for_array
979 * Desc: get memory address in the userspace memory for corpse info
980 * NOTE: The caller is responsible to zero the resulting memory or
981 * user other means to mark memory if it has failed populating the
982 * data in middle of operation.
983 * params: data - pointer describing the crash info allocation
984 * type_of_element - type of data to be put. See kern_cdata.h for defined types
985 * size_of_element - size of element. The header describes this size
986 * count - num of elements in array.
987 * returns: mach_vm_address_t address in user memory for copyout().
991 kcdata_get_memory_addr_for_array(
992 kcdata_descriptor_t data
,
993 uint32_t type_of_element
,
994 uint32_t size_of_element
,
996 mach_vm_address_t
*user_addr
)
998 /* for arrays we record the number of padding bytes as the low-order 4 bits
999 * of the type field. KCDATA_TYPE_ARRAY_PAD{x} means x bytes of pad. */
1000 uint64_t flags
= type_of_element
;
1001 flags
= (flags
<< 32) | count
;
1002 uint32_t total_size
= count
* size_of_element
;
1003 uint32_t pad
= kcdata_calc_padding(total_size
);
1005 return kcdata_get_memory_addr_with_flavor(data
, KCDATA_TYPE_ARRAY_PAD0
| pad
, total_size
, flags
, user_addr
);
1009 * Routine: kcdata_add_container_marker
1010 * Desc: Add a container marker in the buffer for type and identifier.
1011 * params: data - pointer describing the crash info allocation
1012 * header_type - one of (KCDATA_TYPE_CONTAINER_BEGIN ,KCDATA_TYPE_CONTAINER_END)
1013 * container_type - type of data to be put. See kern_cdata.h for defined types
1014 * identifier - unique identifier. This is required to match nested containers.
1015 * returns: return value of kcdata_get_memory_addr()
1019 kcdata_add_container_marker(
1020 kcdata_descriptor_t data
,
1021 uint32_t header_type
,
1022 uint32_t container_type
,
1023 uint64_t identifier
)
1025 mach_vm_address_t user_addr
;
1029 assert(header_type
== KCDATA_TYPE_CONTAINER_END
|| header_type
== KCDATA_TYPE_CONTAINER_BEGIN
);
1031 data_size
= (header_type
== KCDATA_TYPE_CONTAINER_BEGIN
)? sizeof(uint32_t): 0;
1033 if (!(data
->kcd_flags
& KCFLAG_USE_COMPRESSION
)) {
1034 kr
= kcdata_get_memory_addr_with_flavor(data
, header_type
, data_size
, identifier
, &user_addr
);
1035 if (kr
!= KERN_SUCCESS
) {
1040 kr
= kcdata_memcpy(data
, user_addr
, &container_type
, data_size
);
1043 kr
= kcdata_compress_chunk_with_flags(data
, header_type
, &container_type
, data_size
, identifier
);
1050 * Routine: kcdata_undo_addcontainer_begin
1051 * Desc: call this after adding a container begin but before adding anything else to revert.
1054 kcdata_undo_add_container_begin(kcdata_descriptor_t data
)
1057 * the payload of a container begin is a single uint64_t. It is padded out
1060 const mach_vm_address_t padded_payload_size
= 16;
1061 data
->kcd_addr_end
-= sizeof(struct kcdata_item
) + padded_payload_size
;
1063 if (!(data
->kcd_flags
& KCFLAG_NO_AUTO_ENDBUFFER
)) {
1064 /* setup the end header as well */
1065 return kcdata_write_buffer_end(data
);
1067 return KERN_SUCCESS
;
1072 * Routine: kcdata_memcpy
1073 * Desc: a common function to copy data out based on either copyout or memcopy flags
1074 * params: data - pointer describing the kcdata buffer
1075 * dst_addr - destination address
1076 * src_addr - source address
1077 * size - size in bytes to copy.
1078 * returns: KERN_NO_ACCESS if copyout fails.
1082 kcdata_memcpy(kcdata_descriptor_t data
, mach_vm_address_t dst_addr
, const void *src_addr
, uint32_t size
)
1084 if (data
->kcd_flags
& KCFLAG_USE_COPYOUT
) {
1085 if (copyout(src_addr
, dst_addr
, size
)) {
1086 return KERN_NO_ACCESS
;
1089 memcpy((void *)dst_addr
, src_addr
, size
);
1091 return KERN_SUCCESS
;
1095 * Routine: kcdata_bzero
1096 * Desc: zero out a portion of a kcdata buffer.
1099 kcdata_bzero(kcdata_descriptor_t data
, mach_vm_address_t dst_addr
, uint32_t size
)
1101 kern_return_t kr
= KERN_SUCCESS
;
1102 if (data
->kcd_flags
& KCFLAG_USE_COPYOUT
) {
1103 uint8_t zeros
[16] = {};
1105 uint32_t block_size
= MIN(size
, 16);
1106 kr
= copyout(&zeros
, dst_addr
, block_size
);
1108 return KERN_NO_ACCESS
;
1112 return KERN_SUCCESS
;
1114 bzero((void*)dst_addr
, size
);
1115 return KERN_SUCCESS
;
1120 * Routine: kcdata_add_type_definition
1121 * Desc: add type definition to kcdata buffer.
1122 * see feature description in documentation above.
1123 * params: data - pointer describing the kcdata buffer
1124 * type_id - unique type identifier for this data
1125 * type_name - a string of max KCDATA_DESC_MAXLEN size for name of type
1126 * elements_array - address to descriptors for each field in struct
1127 * elements_count - count of how many fields are there in struct.
1128 * returns: return code from kcdata_get_memory_addr in case of failure.
1132 kcdata_add_type_definition(
1133 kcdata_descriptor_t data
,
1136 struct kcdata_subtype_descriptor
*elements_array_addr
,
1137 uint32_t elements_count
)
1139 kern_return_t kr
= KERN_SUCCESS
;
1140 struct kcdata_type_definition kc_type_definition
;
1141 mach_vm_address_t user_addr
;
1142 uint32_t total_size
= sizeof(struct kcdata_type_definition
);
1143 bzero(&kc_type_definition
, sizeof(kc_type_definition
));
1145 if (strlen(type_name
) >= KCDATA_DESC_MAXLEN
) {
1146 return KERN_INVALID_ARGUMENT
;
1148 strlcpy(&kc_type_definition
.kct_name
[0], type_name
, KCDATA_DESC_MAXLEN
);
1149 kc_type_definition
.kct_num_elements
= elements_count
;
1150 kc_type_definition
.kct_type_identifier
= type_id
;
1152 total_size
+= elements_count
* sizeof(struct kcdata_subtype_descriptor
);
1153 /* record number of padding bytes as lower 4 bits of flags */
1154 if (KERN_SUCCESS
!= (kr
= kcdata_get_memory_addr_with_flavor(data
, KCDATA_TYPE_TYPEDEFINTION
, total_size
,
1155 kcdata_calc_padding(total_size
), &user_addr
))) {
1158 if (KERN_SUCCESS
!= (kr
= kcdata_memcpy(data
, user_addr
, (void *)&kc_type_definition
, sizeof(struct kcdata_type_definition
)))) {
1161 user_addr
+= sizeof(struct kcdata_type_definition
);
1162 if (KERN_SUCCESS
!= (kr
= kcdata_memcpy(data
, user_addr
, (void *)elements_array_addr
, elements_count
* sizeof(struct kcdata_subtype_descriptor
)))) {
1169 kcdata_add_uint64_with_description(kcdata_descriptor_t data_desc
, uint64_t data
, const char * description
)
1171 if (strlen(description
) >= KCDATA_DESC_MAXLEN
) {
1172 return KERN_INVALID_ARGUMENT
;
1175 kern_return_t kr
= 0;
1176 mach_vm_address_t user_addr
;
1177 struct _uint64_with_description_data save_data
;
1178 const uint64_t size_req
= sizeof(save_data
);
1179 bzero(&save_data
, size_req
);
1181 strlcpy(&(save_data
.desc
[0]), description
, sizeof(save_data
.desc
));
1182 save_data
.data
= data
;
1184 if (data_desc
->kcd_flags
& KCFLAG_USE_COMPRESSION
) {
1185 /* allocate space for the output */
1186 return kcdata_compress_chunk(data_desc
, KCDATA_TYPE_UINT64_DESC
, &save_data
, size_req
);
1189 kr
= kcdata_get_memory_addr(data_desc
, KCDATA_TYPE_UINT64_DESC
, size_req
, &user_addr
);
1190 if (kr
!= KERN_SUCCESS
) {
1194 if (data_desc
->kcd_flags
& KCFLAG_USE_COPYOUT
) {
1195 if (copyout(&save_data
, user_addr
, size_req
)) {
1196 return KERN_NO_ACCESS
;
1199 memcpy((void *)user_addr
, &save_data
, size_req
);
1201 return KERN_SUCCESS
;
1205 kcdata_add_uint32_with_description(
1206 kcdata_descriptor_t data_desc
,
1208 const char *description
)
1210 assert(strlen(description
) < KCDATA_DESC_MAXLEN
);
1211 if (strlen(description
) >= KCDATA_DESC_MAXLEN
) {
1212 return KERN_INVALID_ARGUMENT
;
1214 kern_return_t kr
= 0;
1215 mach_vm_address_t user_addr
;
1216 struct _uint32_with_description_data save_data
;
1217 const uint64_t size_req
= sizeof(save_data
);
1219 bzero(&save_data
, size_req
);
1220 strlcpy(&(save_data
.desc
[0]), description
, sizeof(save_data
.desc
));
1221 save_data
.data
= data
;
1223 if (data_desc
->kcd_flags
& KCFLAG_USE_COMPRESSION
) {
1224 /* allocate space for the output */
1225 return kcdata_compress_chunk(data_desc
, KCDATA_TYPE_UINT32_DESC
, &save_data
, size_req
);
1228 kr
= kcdata_get_memory_addr(data_desc
, KCDATA_TYPE_UINT32_DESC
, size_req
, &user_addr
);
1229 if (kr
!= KERN_SUCCESS
) {
1233 if (data_desc
->kcd_flags
& KCFLAG_USE_COPYOUT
) {
1234 if (copyout(&save_data
, user_addr
, size_req
)) {
1235 return KERN_NO_ACCESS
;
1238 memcpy((void *)user_addr
, &save_data
, size_req
);
1241 return KERN_SUCCESS
;
1245 /* end buffer management api */