2 * Copyright (c) 2015 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * THE KCDATA MANIFESTO
34 * Kcdata is a self-describing data serialization format. It is meant to get
35 * nested data structures out of xnu with minimum fuss, but also for that data
36 * to be easy to parse. It is also meant to allow us to add new fields and
37 * evolve the data format without breaking old parsers.
39 * Kcdata is a permanent data format suitable for long-term storage including
40 * in files. It is very important that we continue to be able to parse old
41 * versions of kcdata-based formats. To this end, there are several
42 * invariants you MUST MAINTAIN if you alter this file.
44 * * None of the magic numbers should ever be a byteswap of themselves or
45 * of any of the other magic numbers.
47 * * Never remove any type.
49 * * All kcdata structs must be packed, and must exclusively use fixed-size
52 * * Never change the definition of any type, except to add new fields to
55 * * If you do add new fields to the end of a type, do not actually change
56 * the definition of the old structure. Instead, define a new structure
57 * with the new fields. See thread_snapshot_v3 as an example. This
58 * provides source compatibility for old readers, and also documents where
59 * the potential size cutoffs are.
61 * * If you change libkdd, or kcdata.py run the unit tests under libkdd.
63 * * If you add a type or extend an existing one, add a sample test to
64 * libkdd/tests so future changes to libkdd will always parse your struct
67 * For example to add a field to this:
72 * } __attribute__ ((packed));
74 * Make it look like this:
79 * ///////// end version 1 of foobar. sizeof(struct foobar) was 8 ////////
81 * } __attribute__ ((packed));
83 * If you are parsing kcdata formats, you MUST
85 * * Check the length field of each struct, including array elements. If the
86 * struct is longer than you expect, you must ignore the extra data.
88 * * Ignore any data types you do not understand.
90 * Additionally, we want to be as forward compatible as we can. Meaning old
91 * tools should still be able to use new data whenever possible. To this end,
94 * * Try not to add new versions of types that supplant old ones. Instead
95 * extend the length of existing types or add supplemental types.
97 * * Try not to remove information from existing kcdata formats, unless
98 * removal was explicitly asked for. For example it is fine to add a
99 * stackshot flag to remove unwanted information, but you should not
100 * remove it from the default stackshot if the new flag is absent.
102 * * (TBD) If you do break old readers by removing information or
103 * supplanting old structs, then increase the major version number.
107 * The following is a description of the kcdata format.
110 * The format for data is setup in a generic format as follows
112 * Layout of data structure:
115 * | type = MAGIC | LENGTH |
120 * |___________data____________|
123 * |___________data____________|
124 * | type = END | size=0 |
128 * The type field describes what kind of data is passed. For example type = TASK_CRASHINFO_UUID means the following data is a uuid.
129 * These types need to be defined in task_corpses.h for easy consumption by userspace inspection tools.
131 * Some range of types is reserved for special types like ints, longs etc. A cool new functionality made possible with this
132 * extensible data format is that kernel can decide to put more information as required without requiring user space tools to
133 * re-compile to be compatible. The case of rusage struct versions could be introduced without breaking existing tools.
135 * Feature description: Generic data with description
136 * -------------------
137 * Further more generic data with description is very much possible now. For example
139 * - kcdata_add_uint64_with_description(cdatainfo, 0x700, "NUM MACH PORTS");
140 * - and more functions that allow adding description.
141 * The userspace tools can then look at the description and print the data even if they are not compiled with knowledge of the field apriori.
144 * 0000 57 f1 ad de 00 00 00 00 00 00 00 00 00 00 00 00 W...............
145 * 0010 01 00 00 00 00 00 00 00 30 00 00 00 00 00 00 00 ........0.......
146 * 0020 50 49 44 00 00 00 00 00 00 00 00 00 00 00 00 00 PID.............
147 * 0030 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
148 * 0040 9c 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
149 * 0050 01 00 00 00 00 00 00 00 30 00 00 00 00 00 00 00 ........0.......
150 * 0060 50 41 52 45 4e 54 20 50 49 44 00 00 00 00 00 00 PARENT PID......
151 * 0070 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
152 * 0080 01 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
155 * Feature description: Container markers for compound data
157 * If a given kernel data type is complex and requires adding multiple optional fields inside a container
158 * object for a consumer to understand arbitrary data, we package it using container markers.
160 * For example, the stackshot code gathers information and describes the state of a given task with respect
161 * to many subsystems. It includes data such as io stats, vm counters, process names/flags and syscall counts.
163 * kcdata_add_container_marker(kcdata_p, KCDATA_TYPE_CONTAINER_BEGIN, STACKSHOT_KCCONTAINER_TASK, task_uniqueid);
164 * // add multiple data, or add_<type>_with_description()s here
166 * kcdata_add_container_marker(kcdata_p, KCDATA_TYPE_CONTAINER_END, STACKSHOT_KCCONTAINER_TASK, task_uniqueid);
168 * Feature description: Custom Data formats on demand
169 * --------------------
170 * With the self describing nature of format, the kernel provider can describe a data type (uniquely identified by a number) and use
171 * it in the buffer for sending data. The consumer can parse the type information and have knowledge of describing incoming data.
172 * Following is an example of how we can describe a kernel specific struct sample_disk_io_stats in buffer.
174 * struct sample_disk_io_stats {
175 * uint64_t disk_reads_count;
176 * uint64_t disk_reads_size;
177 * uint64_t io_priority_count[4];
178 * uint64_t io_priority_size;
179 * } __attribute__ ((packed));
182 * struct kcdata_subtype_descriptor disk_io_stats_def[] = {
183 * {KCS_SUBTYPE_FLAGS_NONE, KC_ST_UINT64, 0 * sizeof(uint64_t), sizeof(uint64_t), "disk_reads_count"},
184 * {KCS_SUBTYPE_FLAGS_NONE, KC_ST_UINT64, 1 * sizeof(uint64_t), sizeof(uint64_t), "disk_reads_size"},
185 * {KCS_SUBTYPE_FLAGS_ARRAY, KC_ST_UINT64, 2 * sizeof(uint64_t), KCS_SUBTYPE_PACK_SIZE(4, sizeof(uint64_t)), "io_priority_count"},
186 * {KCS_SUBTYPE_FLAGS_ARRAY, KC_ST_UINT64, (2 + 4) * sizeof(uint64_t), sizeof(uint64_t), "io_priority_size"},
189 * Now you can add this custom type definition into the buffer as
190 * kcdata_add_type_definition(kcdata_p, KCTYPE_SAMPLE_DISK_IO_STATS, "sample_disk_io_stats",
191 * &disk_io_stats_def[0], sizeof(disk_io_stats_def)/sizeof(struct kcdata_subtype_descriptor));
193 * Feature description: Compression
194 * --------------------
195 * In order to avoid keeping large amunt of memory reserved for a panic stackshot, kcdata has support
196 * for compressing the buffer in a streaming fashion. New data pushed to the kcdata buffer will be
197 * automatically compressed using an algorithm selected by the API user (currently, we only support
198 * pass-through and zlib, in the future we plan to add WKDM support, see: 57913859).
200 * To start using compression, call:
201 * kcdata_init_compress(kcdata_p, hdr_tag, memcpy_f, comp_type);
203 * `kcdata_p` is the kcdata buffer that will be used
204 * `hdr_tag` is the usual header tag denoting what type of kcdata buffer this will be
205 * `memcpy_f` a memcpy(3) function to use to copy into the buffer, optional.
206 * `compy_type` is the compression type, see KCDCT_ZLIB for an example.
208 * Once compression is initialized:
209 * (1) all self-describing APIs will automatically compress
210 * (2) you can now use the following APIs to compress data into the buffer:
211 * (None of the following will compress unless kcdata_init_compress() has been called)
213 * - kcdata_push_data(kcdata_descriptor_t data, uint32_t type, uint32_t size, const void *input_data)
214 * Pushes the buffer of kctype @type at[@input_data, @input_data + @size]
215 * into the kcdata buffer @data, compressing if needed.
217 * - kcdata_push_array(kcdata_descriptor_t data, uint32_t type_of_element,
218 * uint32_t size_of_element, uint32_t count, const void *input_data)
219 * Pushes the array found at @input_data, with element type @type_of_element, where
220 * each element is of size @size_of_element and there are @count elements into the kcdata buffer
223 * - kcdata_compression_window_open/close(kcdata_descriptor_t data)
224 * In case the data you are trying to push to the kcdata buffer @data is difficult to predict,
225 * you can open a "compression window". Between an open and a close, no compression will be done.
226 * Once you clsoe the window, the underlying compression algorithm will compress the data into the buffer
227 * and automatically rewind the current end marker of the kcdata buffer.
228 * There is an ASCII art in kern_cdata.c to aid the reader in understanding
231 * - kcdata_finish_compression(kcdata_descriptor_t data)
232 * Must be called at the end to flush any underlying buffers used by the compression algorithms.
233 * This function will also add some statistics about the compression to the buffer which helps with
234 * decompressing later.
236 * Once you are done with the kcdata buffer, call kcdata_deinit_compress to
237 * free any buffers that may have been allocated internal to the compression
247 #include <uuid/uuid.h>
249 #define KCDATA_DESC_MAXLEN 32 /* including NULL byte at end */
251 #define KCDATA_FLAGS_STRUCT_PADDING_MASK 0xf
252 #define KCDATA_FLAGS_STRUCT_HAS_PADDING 0x80
255 * kcdata aligns elements to 16 byte boundaries.
257 #define KCDATA_ALIGNMENT_SIZE 0x10
261 uint32_t size
; /* len(data) */
265 * padding = flags & 0xf
266 * has_padding = (flags & 0x80) >> 7
268 * has_padding is needed to disambiguate cases such as
269 * thread_snapshot_v2 and thread_snapshot_v3. Their
270 * respective sizes are 0x68 and 0x70, and thread_snapshot_v2
271 * was emmitted by old kernels *before* we started recording
272 * padding. Since legacy thread_snapsht_v2 and modern
273 * thread_snapshot_v3 will both record 0 for the padding
274 * flags, we need some other bit which will be nonzero in the
275 * flags to disambiguate.
277 * This is why we hardcode a special case for
278 * STACKSHOT_KCTYPE_THREAD_SNAPSHOT into the iterator
279 * functions below. There is only a finite number of such
280 * hardcodings which will ever be needed. They can occur
283 * * We have a legacy structure that predates padding flags
285 * * which we want to extend without changing the kcdata type
287 * * by only so many bytes as would fit in the space that
288 * was previously unused padding.
291 * container_id = flags
294 * element_count = flags & UINT32_MAX
295 * element_type = (flags >> 32) & UINT32_MAX
298 char data
[]; /* must be at the end */
301 typedef struct kcdata_item
* kcdata_item_t
;
303 enum KCDATA_SUBTYPE_TYPES
{ KC_ST_CHAR
= 1, KC_ST_INT8
, KC_ST_UINT8
, KC_ST_INT16
, KC_ST_UINT16
, KC_ST_INT32
, KC_ST_UINT32
, KC_ST_INT64
, KC_ST_UINT64
};
304 typedef enum KCDATA_SUBTYPE_TYPES kctype_subtype_t
;
307 * A subtype description structure that defines
308 * how a compound data is laid out in memory. This
309 * provides on the fly definition of types and consumption
312 struct kcdata_subtype_descriptor
{
314 #define KCS_SUBTYPE_FLAGS_NONE 0x0
315 #define KCS_SUBTYPE_FLAGS_ARRAY 0x1
316 /* Force struct type even if only one element.
318 * Normally a kcdata_type_definition is treated as a structure if it has
319 * more than one subtype descriptor. Otherwise it is treated as a simple
320 * type. For example libkdd will represent a simple integer 42 as simply
321 * 42, but it will represent a structure containing an integer 42 as
322 * {"field_name": 42}..
324 * If a kcdata_type_definition has only single subtype, then it will be
325 * treated as a structure iff KCS_SUBTYPE_FLAGS_STRUCT is set. If it has
326 * multiple subtypes, it will always be treated as a structure.
328 * KCS_SUBTYPE_FLAGS_MERGE has the opposite effect. If this flag is used then
329 * even if there are multiple elements, they will all be treated as individual
330 * properties of the parent dictionary.
332 #define KCS_SUBTYPE_FLAGS_STRUCT 0x2 /* force struct type even if only one element */
333 #define KCS_SUBTYPE_FLAGS_MERGE 0x4 /* treat as multiple elements of parents instead of struct */
334 uint8_t kcs_elem_type
; /* restricted to kctype_subtype_t */
335 uint16_t kcs_elem_offset
; /* offset in struct where data is found */
336 uint32_t kcs_elem_size
; /* size of element (or) packed state for array type */
337 char kcs_name
[KCDATA_DESC_MAXLEN
]; /* max 31 bytes for name of field */
340 typedef struct kcdata_subtype_descriptor
* kcdata_subtype_descriptor_t
;
343 * In case of array of basic c types in kctype_subtype_t,
344 * size is packed in lower 16 bits and
345 * count is packed in upper 16 bits of kcs_elem_size field.
347 #define KCS_SUBTYPE_PACK_SIZE(e_count, e_size) (((e_count)&0xffffu) << 16 | ((e_size)&0xffffu))
349 static inline uint32_t
350 kcs_get_elem_size(kcdata_subtype_descriptor_t d
)
352 if (d
->kcs_flags
& KCS_SUBTYPE_FLAGS_ARRAY
) {
353 /* size is composed as ((count &0xffff)<<16 | (elem_size & 0xffff)) */
354 return (uint32_t)((d
->kcs_elem_size
& 0xffff) * ((d
->kcs_elem_size
& 0xffff0000) >> 16));
356 return d
->kcs_elem_size
;
359 static inline uint32_t
360 kcs_get_elem_count(kcdata_subtype_descriptor_t d
)
362 if (d
->kcs_flags
& KCS_SUBTYPE_FLAGS_ARRAY
) {
363 return (d
->kcs_elem_size
>> 16) & 0xffff;
369 kcs_set_elem_size(kcdata_subtype_descriptor_t d
, uint32_t size
, uint32_t count
)
372 /* means we are setting up an array */
373 if (size
> 0xffff || count
> 0xffff) {
374 return -1; //invalid argument
376 d
->kcs_elem_size
= ((count
& 0xffff) << 16 | (size
& 0xffff));
378 d
->kcs_elem_size
= size
;
383 struct kcdata_type_definition
{
384 uint32_t kct_type_identifier
;
385 uint32_t kct_num_elements
;
386 char kct_name
[KCDATA_DESC_MAXLEN
];
387 struct kcdata_subtype_descriptor kct_elements
[];
391 /* chunk type definitions. 0 - 0x7ff are reserved and defined here
392 * NOTE: Please update kcdata/libkdd/kcdtypes.c if you make any changes
393 * in STACKSHOT_KCTYPE_* types.
397 * Types with description value.
398 * these will have KCDATA_DESC_MAXLEN-1 length string description
399 * and rest of kcdata_iter_size() - KCDATA_DESC_MAXLEN bytes as data
401 #define KCDATA_TYPE_INVALID 0x0u
402 #define KCDATA_TYPE_STRING_DESC 0x1u
403 #define KCDATA_TYPE_UINT32_DESC 0x2u
404 #define KCDATA_TYPE_UINT64_DESC 0x3u
405 #define KCDATA_TYPE_INT32_DESC 0x4u
406 #define KCDATA_TYPE_INT64_DESC 0x5u
407 #define KCDATA_TYPE_BINDATA_DESC 0x6u
410 * Compound type definitions
412 #define KCDATA_TYPE_ARRAY 0x11u /* Array of data OBSOLETE DONT USE THIS*/
413 #define KCDATA_TYPE_TYPEDEFINTION 0x12u /* Meta type that describes a type on the fly. */
414 #define KCDATA_TYPE_CONTAINER_BEGIN \
415 0x13u /* Container type which has corresponding CONTAINER_END header. \
416 * KCDATA_TYPE_CONTAINER_BEGIN has type in the data segment. \
417 * Both headers have (uint64_t) ID for matching up nested data. \
419 #define KCDATA_TYPE_CONTAINER_END 0x14u
421 #define KCDATA_TYPE_ARRAY_PAD0 0x20u /* Array of data with 0 byte of padding*/
422 #define KCDATA_TYPE_ARRAY_PAD1 0x21u /* Array of data with 1 byte of padding*/
423 #define KCDATA_TYPE_ARRAY_PAD2 0x22u /* Array of data with 2 byte of padding*/
424 #define KCDATA_TYPE_ARRAY_PAD3 0x23u /* Array of data with 3 byte of padding*/
425 #define KCDATA_TYPE_ARRAY_PAD4 0x24u /* Array of data with 4 byte of padding*/
426 #define KCDATA_TYPE_ARRAY_PAD5 0x25u /* Array of data with 5 byte of padding*/
427 #define KCDATA_TYPE_ARRAY_PAD6 0x26u /* Array of data with 6 byte of padding*/
428 #define KCDATA_TYPE_ARRAY_PAD7 0x27u /* Array of data with 7 byte of padding*/
429 #define KCDATA_TYPE_ARRAY_PAD8 0x28u /* Array of data with 8 byte of padding*/
430 #define KCDATA_TYPE_ARRAY_PAD9 0x29u /* Array of data with 9 byte of padding*/
431 #define KCDATA_TYPE_ARRAY_PADa 0x2au /* Array of data with a byte of padding*/
432 #define KCDATA_TYPE_ARRAY_PADb 0x2bu /* Array of data with b byte of padding*/
433 #define KCDATA_TYPE_ARRAY_PADc 0x2cu /* Array of data with c byte of padding*/
434 #define KCDATA_TYPE_ARRAY_PADd 0x2du /* Array of data with d byte of padding*/
435 #define KCDATA_TYPE_ARRAY_PADe 0x2eu /* Array of data with e byte of padding*/
436 #define KCDATA_TYPE_ARRAY_PADf 0x2fu /* Array of data with f byte of padding*/
439 * Generic data types that are most commonly used
441 #define KCDATA_TYPE_LIBRARY_LOADINFO 0x30u /* struct dyld_uuid_info_32 */
442 #define KCDATA_TYPE_LIBRARY_LOADINFO64 0x31u /* struct dyld_uuid_info_64 */
443 #define KCDATA_TYPE_TIMEBASE 0x32u /* struct mach_timebase_info */
444 #define KCDATA_TYPE_MACH_ABSOLUTE_TIME 0x33u /* uint64_t */
445 #define KCDATA_TYPE_TIMEVAL 0x34u /* struct timeval64 */
446 #define KCDATA_TYPE_USECS_SINCE_EPOCH 0x35u /* time in usecs uint64_t */
447 #define KCDATA_TYPE_PID 0x36u /* int32_t */
448 #define KCDATA_TYPE_PROCNAME 0x37u /* char * */
449 #define KCDATA_TYPE_NESTED_KCDATA 0x38u /* nested kcdata buffer */
450 #define KCDATA_TYPE_LIBRARY_AOTINFO 0x39u /* struct user64_dyld_aot_info */
452 #define KCDATA_TYPE_BUFFER_END 0xF19158EDu
454 /* MAGIC numbers defined for each class of chunked data
456 * To future-proof against big-endian arches, make sure none of these magic
457 * numbers are byteswaps of each other
460 #define KCDATA_BUFFER_BEGIN_CRASHINFO 0xDEADF157u /* owner: corpses/task_corpse.h */
461 /* type-range: 0x800 - 0x8ff */
462 #define KCDATA_BUFFER_BEGIN_STACKSHOT 0x59a25807u /* owner: sys/stackshot.h */
463 /* type-range: 0x900 - 0x93f */
464 #define KCDATA_BUFFER_BEGIN_COMPRESSED 0x434f4d50u /* owner: sys/stackshot.h */
465 /* type-range: 0x900 - 0x93f */
466 #define KCDATA_BUFFER_BEGIN_DELTA_STACKSHOT 0xDE17A59Au /* owner: sys/stackshot.h */
467 /* type-range: 0x940 - 0x9ff */
468 #define KCDATA_BUFFER_BEGIN_OS_REASON 0x53A20900u /* owner: sys/reason.h */
469 /* type-range: 0x1000-0x103f */
470 #define KCDATA_BUFFER_BEGIN_XNUPOST_CONFIG 0x1e21c09fu /* owner: osfmk/tests/kernel_tests.c */
471 /* type-range: 0x1040-0x105f */
473 /* next type range number available 0x1060 */
474 /**************** definitions for XNUPOST *********************/
475 #define XNUPOST_KCTYPE_TESTCONFIG 0x1040
477 /**************** definitions for stackshot *********************/
479 /* This value must always match IO_NUM_PRIORITIES defined in thread_info.h */
480 #define STACKSHOT_IO_NUM_PRIORITIES 4
481 /* This value must always match MAXTHREADNAMESIZE used in bsd */
482 #define STACKSHOT_MAX_THREAD_NAME_SIZE 64
485 * NOTE: Please update kcdata/libkdd/kcdtypes.c if you make any changes
486 * in STACKSHOT_KCTYPE_* types.
488 #define STACKSHOT_KCTYPE_IOSTATS 0x901u /* io_stats_snapshot */
489 #define STACKSHOT_KCTYPE_GLOBAL_MEM_STATS 0x902u /* struct mem_and_io_snapshot */
490 #define STACKSHOT_KCCONTAINER_TASK 0x903u
491 #define STACKSHOT_KCCONTAINER_THREAD 0x904u
492 #define STACKSHOT_KCTYPE_TASK_SNAPSHOT 0x905u /* task_snapshot_v2 */
493 #define STACKSHOT_KCTYPE_THREAD_SNAPSHOT 0x906u /* thread_snapshot_v2, thread_snapshot_v3 */
494 #define STACKSHOT_KCTYPE_DONATING_PIDS 0x907u /* int[] */
495 #define STACKSHOT_KCTYPE_SHAREDCACHE_LOADINFO 0x908u /* dyld_shared_cache_loadinfo */
496 #define STACKSHOT_KCTYPE_THREAD_NAME 0x909u /* char[] */
497 #define STACKSHOT_KCTYPE_KERN_STACKFRAME 0x90Au /* struct stack_snapshot_frame32 */
498 #define STACKSHOT_KCTYPE_KERN_STACKFRAME64 0x90Bu /* struct stack_snapshot_frame64 */
499 #define STACKSHOT_KCTYPE_USER_STACKFRAME 0x90Cu /* struct stack_snapshot_frame32 */
500 #define STACKSHOT_KCTYPE_USER_STACKFRAME64 0x90Du /* struct stack_snapshot_frame64 */
501 #define STACKSHOT_KCTYPE_BOOTARGS 0x90Eu /* boot args string */
502 #define STACKSHOT_KCTYPE_OSVERSION 0x90Fu /* os version string */
503 #define STACKSHOT_KCTYPE_KERN_PAGE_SIZE 0x910u /* kernel page size in uint32_t */
504 #define STACKSHOT_KCTYPE_JETSAM_LEVEL 0x911u /* jetsam level in uint32_t */
505 #define STACKSHOT_KCTYPE_DELTA_SINCE_TIMESTAMP 0x912u /* timestamp used for the delta stackshot */
506 #define STACKSHOT_KCTYPE_KERN_STACKLR 0x913u /* uint32_t */
507 #define STACKSHOT_KCTYPE_KERN_STACKLR64 0x914u /* uint64_t */
508 #define STACKSHOT_KCTYPE_USER_STACKLR 0x915u /* uint32_t */
509 #define STACKSHOT_KCTYPE_USER_STACKLR64 0x916u /* uint64_t */
510 #define STACKSHOT_KCTYPE_NONRUNNABLE_TIDS 0x917u /* uint64_t */
511 #define STACKSHOT_KCTYPE_NONRUNNABLE_TASKS 0x918u /* uint64_t */
512 #define STACKSHOT_KCTYPE_CPU_TIMES 0x919u /* struct stackshot_cpu_times or stackshot_cpu_times_v2 */
513 #define STACKSHOT_KCTYPE_STACKSHOT_DURATION 0x91au /* struct stackshot_duration */
514 #define STACKSHOT_KCTYPE_STACKSHOT_FAULT_STATS 0x91bu /* struct stackshot_fault_stats */
515 #define STACKSHOT_KCTYPE_KERNELCACHE_LOADINFO 0x91cu /* kernelcache UUID -- same as KCDATA_TYPE_LIBRARY_LOADINFO64 */
516 #define STACKSHOT_KCTYPE_THREAD_WAITINFO 0x91du /* struct stackshot_thread_waitinfo */
517 #define STACKSHOT_KCTYPE_THREAD_GROUP_SNAPSHOT 0x91eu /* struct thread_group_snapshot or thread_group_snapshot_v2 */
518 #define STACKSHOT_KCTYPE_THREAD_GROUP 0x91fu /* uint64_t */
519 #define STACKSHOT_KCTYPE_JETSAM_COALITION_SNAPSHOT 0x920u /* struct jetsam_coalition_snapshot */
520 #define STACKSHOT_KCTYPE_JETSAM_COALITION 0x921u /* uint64_t */
521 #define STACKSHOT_KCTYPE_THREAD_POLICY_VERSION 0x922u /* THREAD_POLICY_INTERNAL_STRUCT_VERSION in uint32 */
522 #define STACKSHOT_KCTYPE_INSTRS_CYCLES 0x923u /* struct instrs_cycles_snapshot */
523 #define STACKSHOT_KCTYPE_USER_STACKTOP 0x924u /* struct stack_snapshot_stacktop */
524 #define STACKSHOT_KCTYPE_ASID 0x925u /* uint32_t */
525 #define STACKSHOT_KCTYPE_PAGE_TABLES 0x926u /* uint64_t */
526 #define STACKSHOT_KCTYPE_SYS_SHAREDCACHE_LAYOUT 0x927u /* same as KCDATA_TYPE_LIBRARY_LOADINFO64 */
527 #define STACKSHOT_KCTYPE_THREAD_DISPATCH_QUEUE_LABEL 0x928u /* dispatch queue label */
528 #define STACKSHOT_KCTYPE_THREAD_TURNSTILEINFO 0x929u /* struct stackshot_thread_turnstileinfo */
529 #define STACKSHOT_KCTYPE_TASK_CPU_ARCHITECTURE 0x92au /* struct stackshot_cpu_architecture */
530 #define STACKSHOT_KCTYPE_LATENCY_INFO 0x92bu /* struct stackshot_latency_collection */
531 #define STACKSHOT_KCTYPE_LATENCY_INFO_TASK 0x92cu /* struct stackshot_latency_task */
532 #define STACKSHOT_KCTYPE_LATENCY_INFO_THREAD 0x92du /* struct stackshot_latency_thread */
533 #define STACKSHOT_KCTYPE_LOADINFO64_TEXT_EXEC 0x92eu /* TEXT_EXEC load info -- same as KCDATA_TYPE_LIBRARY_LOADINFO64 */
534 #define STACKSHOT_KCTYPE_AOTCACHE_LOADINFO 0x92fu /* struct dyld_aot_cache_uuid_info */
536 #define STACKSHOT_KCTYPE_TASK_DELTA_SNAPSHOT 0x940u /* task_delta_snapshot_v2 */
537 #define STACKSHOT_KCTYPE_THREAD_DELTA_SNAPSHOT 0x941u /* thread_delta_snapshot_v* */
539 struct stack_snapshot_frame32
{
544 struct stack_snapshot_frame64
{
549 struct dyld_uuid_info_32
{
550 uint32_t imageLoadAddress
; /* base address image is mapped at */
554 struct dyld_uuid_info_64
{
555 uint64_t imageLoadAddress
; /* XXX image slide */
560 * N.B.: Newer kernels output dyld_shared_cache_loadinfo structures
561 * instead of this, since the field names match their contents better.
563 struct dyld_uuid_info_64_v2
{
564 uint64_t imageLoadAddress
; /* XXX image slide */
566 /* end of version 1 of dyld_uuid_info_64. sizeof v1 was 24 */
567 uint64_t imageSlidBaseAddress
; /* slid base address or slid first mapping of image */
571 * This is the renamed version of dyld_uuid_info_64 with more accurate
572 * field names, for STACKSHOT_KCTYPE_SHAREDCACHE_LOADINFO. Any users
573 * must be aware of the dyld_uuid_info_64* version history and ensure
574 * the fields they are accessing are within the actual bounds.
576 * OLD_FIELD NEW_FIELD
577 * imageLoadAddress sharedCacheSlide
578 * imageUUID sharedCacheUUID
579 * imageSlidBaseAddress sharedCacheUnreliableSlidBaseAddress
580 * - sharedCacheSlidFirstMapping
582 struct dyld_shared_cache_loadinfo
{
583 uint64_t sharedCacheSlide
; /* image slide value */
584 uuid_t sharedCacheUUID
;
585 /* end of version 1 of dyld_uuid_info_64. sizeof v1 was 24 */
586 uint64_t sharedCacheUnreliableSlidBaseAddress
; /* for backwards-compatibility; use sharedCacheSlidFirstMapping if available */
587 /* end of version 2 of dyld_uuid_info_64. sizeof v2 was 32 */
588 uint64_t sharedCacheSlidFirstMapping
; /* slid base address of first mapping */
591 struct dyld_aot_cache_uuid_info
{
592 uint64_t x86SlidBaseAddress
; /* slid first mapping address of x86 shared cache */
593 uuid_t x86UUID
; /* UUID of x86 shared cache */
594 uint64_t aotSlidBaseAddress
; /* slide first mapping address of aot cache */
595 uuid_t aotUUID
; /* UUID of aot shared cache */
598 struct user32_dyld_uuid_info
{
599 uint32_t imageLoadAddress
; /* base address image is mapped into */
600 uuid_t imageUUID
; /* UUID of image */
603 struct user64_dyld_uuid_info
{
604 uint64_t imageLoadAddress
; /* base address image is mapped into */
605 uuid_t imageUUID
; /* UUID of image */
608 #define DYLD_AOT_IMAGE_KEY_SIZE 32
610 struct user64_dyld_aot_info
{
611 uint64_t x86LoadAddress
;
612 uint64_t aotLoadAddress
;
613 uint64_t aotImageSize
;
614 uint8_t aotImageKey
[DYLD_AOT_IMAGE_KEY_SIZE
];
617 enum task_snapshot_flags
{
618 /* k{User,Kernel}64_p (values 0x1 and 0x2) are defined in generic_snapshot_flags */
619 kTaskRsrcFlagged
= 0x4, // In the EXC_RESOURCE danger zone?
620 kTerminatedSnapshot
= 0x8,
621 kPidSuspended
= 0x10, // true for suspended task
622 kFrozen
= 0x20, // true for hibernated task (along with pidsuspended)
623 kTaskDarwinBG
= 0x40,
624 kTaskExtDarwinBG
= 0x80,
625 kTaskVisVisible
= 0x100,
626 kTaskVisNonvisible
= 0x200,
627 kTaskIsForeground
= 0x400,
628 kTaskIsBoosted
= 0x800,
629 kTaskIsSuppressed
= 0x1000,
630 kTaskIsTimerThrottled
= 0x2000, /* deprecated */
631 kTaskIsImpDonor
= 0x4000,
632 kTaskIsLiveImpDonor
= 0x8000,
633 kTaskIsDirty
= 0x10000,
634 kTaskWqExceededConstrainedThreadLimit
= 0x20000,
635 kTaskWqExceededTotalThreadLimit
= 0x40000,
636 kTaskWqFlagsAvailable
= 0x80000,
637 kTaskUUIDInfoFaultedIn
= 0x100000, /* successfully faulted in some UUID info */
638 kTaskUUIDInfoMissing
= 0x200000, /* some UUID info was paged out */
639 kTaskUUIDInfoTriedFault
= 0x400000, /* tried to fault in UUID info */
640 kTaskSharedRegionInfoUnavailable
= 0x800000, /* shared region info unavailable */
641 kTaskTALEngaged
= 0x1000000,
642 /* 0x2000000 unused */
643 kTaskIsDirtyTracked
= 0x4000000,
644 kTaskAllowIdleExit
= 0x8000000,
645 kTaskIsTranslated
= 0x10000000,
646 kTaskSharedRegionNone
= 0x20000000, /* task doesn't have a shared region */
647 kTaskSharedRegionSystem
= 0x40000000, /* task is attached to system shared region */
648 kTaskSharedRegionOther
= 0x80000000, /* task is attached to a different shared region */
651 enum thread_snapshot_flags
{
652 /* k{User,Kernel}64_p (values 0x1 and 0x2) are defined in generic_snapshot_flags */
653 kHasDispatchSerial
= 0x4,
654 kStacksPCOnly
= 0x8, /* Stack traces have no frame pointers. */
655 kThreadDarwinBG
= 0x10, /* Thread is darwinbg */
656 kThreadIOPassive
= 0x20, /* Thread uses passive IO */
657 kThreadSuspended
= 0x40, /* Thread is suspended */
658 kThreadTruncatedBT
= 0x80, /* Unmapped pages caused truncated backtrace */
659 kGlobalForcedIdle
= 0x100, /* Thread performs global forced idle */
660 kThreadFaultedBT
= 0x200, /* Some thread stack pages were faulted in as part of BT */
661 kThreadTriedFaultBT
= 0x400, /* We tried to fault in thread stack pages as part of BT */
662 kThreadOnCore
= 0x800, /* Thread was on-core when we entered debugger context */
663 kThreadIdleWorker
= 0x1000, /* Thread is an idle libpthread worker thread */
664 kThreadMain
= 0x2000, /* Thread is the main thread */
667 struct mem_and_io_snapshot
{
668 uint32_t snapshot_magic
;
670 uint32_t active_pages
;
671 uint32_t inactive_pages
;
672 uint32_t purgeable_pages
;
673 uint32_t wired_pages
;
674 uint32_t speculative_pages
;
675 uint32_t throttled_pages
;
676 uint32_t filebacked_pages
;
677 uint32_t compressions
;
678 uint32_t decompressions
;
679 uint32_t compressor_size
;
680 int32_t busy_buffer_count
;
681 uint32_t pages_wanted
;
682 uint32_t pages_reclaimed
;
683 uint8_t pages_wanted_reclaimed_valid
; // did mach_vm_pressure_monitor succeed?
684 } __attribute__((packed
));
686 /* SS_TH_* macros are for ths_state */
687 #define SS_TH_WAIT 0x01 /* queued for waiting */
688 #define SS_TH_SUSP 0x02 /* stopped or requested to stop */
689 #define SS_TH_RUN 0x04 /* running or on runq */
690 #define SS_TH_UNINT 0x08 /* waiting uninteruptibly */
691 #define SS_TH_TERMINATE 0x10 /* halted at termination */
692 #define SS_TH_TERMINATE2 0x20 /* added to termination queue */
693 #define SS_TH_IDLE 0x80 /* idling processor */
695 struct thread_snapshot_v2
{
696 uint64_t ths_thread_id
;
697 uint64_t ths_wait_event
;
698 uint64_t ths_continuation
;
699 uint64_t ths_total_syscalls
;
700 uint64_t ths_voucher_identifier
;
701 uint64_t ths_dqserialnum
;
702 uint64_t ths_user_time
;
703 uint64_t ths_sys_time
;
704 uint64_t ths_ss_flags
;
705 uint64_t ths_last_run_time
;
706 uint64_t ths_last_made_runnable_time
;
708 uint32_t ths_sched_flags
;
709 int16_t ths_base_priority
;
710 int16_t ths_sched_priority
;
713 uint8_t ths_rqos_override
;
715 } __attribute__((packed
));
717 struct thread_snapshot_v3
{
718 uint64_t ths_thread_id
;
719 uint64_t ths_wait_event
;
720 uint64_t ths_continuation
;
721 uint64_t ths_total_syscalls
;
722 uint64_t ths_voucher_identifier
;
723 uint64_t ths_dqserialnum
;
724 uint64_t ths_user_time
;
725 uint64_t ths_sys_time
;
726 uint64_t ths_ss_flags
;
727 uint64_t ths_last_run_time
;
728 uint64_t ths_last_made_runnable_time
;
730 uint32_t ths_sched_flags
;
731 int16_t ths_base_priority
;
732 int16_t ths_sched_priority
;
735 uint8_t ths_rqos_override
;
737 uint64_t ths_thread_t
;
738 } __attribute__((packed
));
741 struct thread_snapshot_v4
{
742 uint64_t ths_thread_id
;
743 uint64_t ths_wait_event
;
744 uint64_t ths_continuation
;
745 uint64_t ths_total_syscalls
;
746 uint64_t ths_voucher_identifier
;
747 uint64_t ths_dqserialnum
;
748 uint64_t ths_user_time
;
749 uint64_t ths_sys_time
;
750 uint64_t ths_ss_flags
;
751 uint64_t ths_last_run_time
;
752 uint64_t ths_last_made_runnable_time
;
754 uint32_t ths_sched_flags
;
755 int16_t ths_base_priority
;
756 int16_t ths_sched_priority
;
759 uint8_t ths_rqos_override
;
761 uint64_t ths_thread_t
;
762 uint64_t ths_requested_policy
;
763 uint64_t ths_effective_policy
;
764 } __attribute__((packed
));
767 struct thread_group_snapshot
{
770 } __attribute__((packed
));
772 enum thread_group_flags
{
773 kThreadGroupEfficient
= 0x1,
774 kThreadGroupUIApp
= 0x2
777 struct thread_group_snapshot_v2
{
781 } __attribute__((packed
));
783 enum coalition_flags
{
784 kCoalitionTermRequested
= 0x1,
785 kCoalitionTerminated
= 0x2,
786 kCoalitionReaped
= 0x4,
787 kCoalitionPrivileged
= 0x8,
790 struct jetsam_coalition_snapshot
{
793 uint64_t jcs_thread_group
;
794 uint64_t jcs_leader_task_uniqueid
;
795 } __attribute__((packed
));
797 struct instrs_cycles_snapshot
{
798 uint64_t ics_instructions
;
800 } __attribute__((packed
));
802 struct thread_delta_snapshot_v2
{
803 uint64_t tds_thread_id
;
804 uint64_t tds_voucher_identifier
;
805 uint64_t tds_ss_flags
;
806 uint64_t tds_last_made_runnable_time
;
808 uint32_t tds_sched_flags
;
809 int16_t tds_base_priority
;
810 int16_t tds_sched_priority
;
813 uint8_t tds_rqos_override
;
815 } __attribute__ ((packed
));
817 struct thread_delta_snapshot_v3
{
818 uint64_t tds_thread_id
;
819 uint64_t tds_voucher_identifier
;
820 uint64_t tds_ss_flags
;
821 uint64_t tds_last_made_runnable_time
;
823 uint32_t tds_sched_flags
;
824 int16_t tds_base_priority
;
825 int16_t tds_sched_priority
;
828 uint8_t tds_rqos_override
;
830 uint64_t tds_requested_policy
;
831 uint64_t tds_effective_policy
;
832 } __attribute__ ((packed
));
834 struct io_stats_snapshot
{
837 * XXX: These fields must be together.
839 uint64_t ss_disk_reads_count
;
840 uint64_t ss_disk_reads_size
;
841 uint64_t ss_disk_writes_count
;
842 uint64_t ss_disk_writes_size
;
843 uint64_t ss_io_priority_count
[STACKSHOT_IO_NUM_PRIORITIES
];
844 uint64_t ss_io_priority_size
[STACKSHOT_IO_NUM_PRIORITIES
];
845 uint64_t ss_paging_count
;
846 uint64_t ss_paging_size
;
847 uint64_t ss_non_paging_count
;
848 uint64_t ss_non_paging_size
;
849 uint64_t ss_data_count
;
850 uint64_t ss_data_size
;
851 uint64_t ss_metadata_count
;
852 uint64_t ss_metadata_size
;
853 /* XXX: I/O Statistics end */
854 } __attribute__ ((packed
));
856 struct task_snapshot_v2
{
857 uint64_t ts_unique_pid
;
858 uint64_t ts_ss_flags
;
859 uint64_t ts_user_time_in_terminated_threads
;
860 uint64_t ts_system_time_in_terminated_threads
;
861 uint64_t ts_p_start_sec
;
862 uint64_t ts_task_size
;
863 uint64_t ts_max_resident_size
;
864 uint32_t ts_suspend_count
;
867 uint32_t ts_cow_faults
;
868 uint32_t ts_was_throttled
;
869 uint32_t ts_did_throttle
;
870 uint32_t ts_latency_qos
;
873 } __attribute__ ((packed
));
875 struct task_delta_snapshot_v2
{
876 uint64_t tds_unique_pid
;
877 uint64_t tds_ss_flags
;
878 uint64_t tds_user_time_in_terminated_threads
;
879 uint64_t tds_system_time_in_terminated_threads
;
880 uint64_t tds_task_size
;
881 uint64_t tds_max_resident_size
;
882 uint32_t tds_suspend_count
;
884 uint32_t tds_pageins
;
885 uint32_t tds_cow_faults
;
886 uint32_t tds_was_throttled
;
887 uint32_t tds_did_throttle
;
888 uint32_t tds_latency_qos
;
889 } __attribute__ ((packed
));
891 struct stackshot_cpu_times
{
893 uint64_t system_usec
;
894 } __attribute__((packed
));
896 struct stackshot_cpu_times_v2
{
898 uint64_t system_usec
;
899 uint64_t runnable_usec
;
900 } __attribute__((packed
));
902 struct stackshot_duration
{
903 uint64_t stackshot_duration
;
904 uint64_t stackshot_duration_outer
;
905 } __attribute__((packed
));
907 struct stackshot_duration_v2
{
908 uint64_t stackshot_duration
;
909 uint64_t stackshot_duration_outer
;
910 uint64_t stackshot_duration_prior
;
911 } __attribute__((packed
));
913 struct stackshot_fault_stats
{
914 uint32_t sfs_pages_faulted_in
; /* number of pages faulted in using KDP fault path */
915 uint64_t sfs_time_spent_faulting
; /* MATUs spent faulting */
916 uint64_t sfs_system_max_fault_time
; /* MATUs fault time limit per stackshot */
917 uint8_t sfs_stopped_faulting
; /* we stopped decompressing because we hit the limit */
918 } __attribute__((packed
));
920 typedef struct stackshot_thread_waitinfo
{
921 uint64_t owner
; /* The thread that owns the object */
922 uint64_t waiter
; /* The thread that's waiting on the object */
923 uint64_t context
; /* A context uniquely identifying the object */
924 uint8_t wait_type
; /* The type of object that the thread is waiting on */
925 } __attribute__((packed
)) thread_waitinfo_t
;
927 typedef struct stackshot_thread_turnstileinfo
{
928 uint64_t waiter
; /* The thread that's waiting on the object */
929 uint64_t turnstile_context
; /* Associated data (either thread id, or workq addr) */
930 uint8_t turnstile_priority
;
931 uint8_t number_of_hops
;
932 #define STACKSHOT_TURNSTILE_STATUS_UNKNOWN 0x01 /* The final inheritor is unknown (bug?) */
933 #define STACKSHOT_TURNSTILE_STATUS_LOCKED_WAITQ 0x02 /* A waitq was found to be locked */
934 #define STACKSHOT_TURNSTILE_STATUS_WORKQUEUE 0x04 /* The final inheritor is a workqueue */
935 #define STACKSHOT_TURNSTILE_STATUS_THREAD 0x08 /* The final inheritor is a thread */
936 #define STACKSHOT_TURNSTILE_STATUS_BLOCKED_ON_TASK 0x10 /* blocked on task, dind't find thread */
937 #define STACKSHOT_TURNSTILE_STATUS_HELD_IPLOCK 0x20 /* the ip_lock was held */
938 uint64_t turnstile_flags
;
939 } __attribute__((packed
)) thread_turnstileinfo_t
;
941 #define STACKSHOT_WAITOWNER_KERNEL (UINT64_MAX - 1)
942 #define STACKSHOT_WAITOWNER_PORT_LOCKED (UINT64_MAX - 2)
943 #define STACKSHOT_WAITOWNER_PSET_LOCKED (UINT64_MAX - 3)
944 #define STACKSHOT_WAITOWNER_INTRANSIT (UINT64_MAX - 4)
945 #define STACKSHOT_WAITOWNER_MTXSPIN (UINT64_MAX - 5)
946 #define STACKSHOT_WAITOWNER_THREQUESTED (UINT64_MAX - 6) /* workloop waiting for a new worker thread */
947 #define STACKSHOT_WAITOWNER_SUSPENDED (UINT64_MAX - 7) /* workloop is suspended */
949 struct stackshot_cpu_architecture
{
952 } __attribute__((packed
));
954 struct stack_snapshot_stacktop
{
956 uint8_t stack_contents
[8];
959 /* only collected if STACKSHOT_COLLECTS_LATENCY_INFO is set to !0 */
960 struct stackshot_latency_collection
{
961 uint64_t latency_version
;
962 uint64_t setup_latency
;
963 uint64_t total_task_iteration_latency
;
964 uint64_t total_terminated_task_iteration_latency
;
965 } __attribute__((packed
));
967 /* only collected if STACKSHOT_COLLECTS_LATENCY_INFO is set to !0 */
968 struct stackshot_latency_task
{
969 uint64_t task_uniqueid
;
970 uint64_t setup_latency
;
971 uint64_t task_thread_count_loop_latency
;
972 uint64_t task_thread_data_loop_latency
;
973 uint64_t cur_tsnap_latency
;
974 uint64_t pmap_latency
;
975 uint64_t bsd_proc_ids_latency
;
976 uint64_t misc_latency
;
977 uint64_t misc2_latency
;
978 uint64_t end_latency
;
979 } __attribute__((packed
));
981 /* only collected if STACKSHOT_COLLECTS_LATENCY_INFO is set to !0 */
982 struct stackshot_latency_thread
{
984 uint64_t cur_thsnap1_latency
;
985 uint64_t dispatch_serial_latency
;
986 uint64_t dispatch_label_latency
;
987 uint64_t cur_thsnap2_latency
;
988 uint64_t thread_name_latency
;
989 uint64_t sur_times_latency
;
990 uint64_t user_stack_latency
;
991 uint64_t kernel_stack_latency
;
992 uint64_t misc_latency
;
993 } __attribute__((packed
));
996 /**************** definitions for crashinfo *********************/
999 * NOTE: Please update kcdata/libkdd/kcdtypes.c if you make any changes
1000 * in TASK_CRASHINFO_* types.
1003 /* FIXME some of these types aren't clean (fixed width, packed, and defined *here*) */
1005 struct crashinfo_proc_uniqidentifierinfo
{
1006 uint8_t p_uuid
[16]; /* UUID of the main executable */
1007 uint64_t p_uniqueid
; /* 64 bit unique identifier for process */
1008 uint64_t p_puniqueid
; /* unique identifier for process's parent */
1009 uint64_t p_reserve2
; /* reserved for future use */
1010 uint64_t p_reserve3
; /* reserved for future use */
1011 uint64_t p_reserve4
; /* reserved for future use */
1012 } __attribute__((packed
));
1014 #define TASK_CRASHINFO_BEGIN KCDATA_BUFFER_BEGIN_CRASHINFO
1015 #define TASK_CRASHINFO_STRING_DESC KCDATA_TYPE_STRING_DESC
1016 #define TASK_CRASHINFO_UINT32_DESC KCDATA_TYPE_UINT32_DESC
1017 #define TASK_CRASHINFO_UINT64_DESC KCDATA_TYPE_UINT64_DESC
1019 #define TASK_CRASHINFO_EXTMODINFO 0x801
1020 #define TASK_CRASHINFO_BSDINFOWITHUNIQID 0x802 /* struct crashinfo_proc_uniqidentifierinfo */
1021 #define TASK_CRASHINFO_TASKDYLD_INFO 0x803
1022 #define TASK_CRASHINFO_UUID 0x804
1023 #define TASK_CRASHINFO_PID 0x805
1024 #define TASK_CRASHINFO_PPID 0x806
1025 #define TASK_CRASHINFO_RUSAGE 0x807 /* struct rusage DEPRECATED do not use.
1026 * This struct has longs in it */
1027 #define TASK_CRASHINFO_RUSAGE_INFO 0x808 /* struct rusage_info_v3 from resource.h */
1028 #define TASK_CRASHINFO_PROC_NAME 0x809 /* char * */
1029 #define TASK_CRASHINFO_PROC_STARTTIME 0x80B /* struct timeval64 */
1030 #define TASK_CRASHINFO_USERSTACK 0x80C /* uint64_t */
1031 #define TASK_CRASHINFO_ARGSLEN 0x80D
1032 #define TASK_CRASHINFO_EXCEPTION_CODES 0x80E /* mach_exception_data_t */
1033 #define TASK_CRASHINFO_PROC_PATH 0x80F /* string of len MAXPATHLEN */
1034 #define TASK_CRASHINFO_PROC_CSFLAGS 0x810 /* uint32_t */
1035 #define TASK_CRASHINFO_PROC_STATUS 0x811 /* char */
1036 #define TASK_CRASHINFO_UID 0x812 /* uid_t */
1037 #define TASK_CRASHINFO_GID 0x813 /* gid_t */
1038 #define TASK_CRASHINFO_PROC_ARGC 0x814 /* int */
1039 #define TASK_CRASHINFO_PROC_FLAGS 0x815 /* unsigned int */
1040 #define TASK_CRASHINFO_CPUTYPE 0x816 /* cpu_type_t */
1041 #define TASK_CRASHINFO_WORKQUEUEINFO 0x817 /* struct proc_workqueueinfo */
1042 #define TASK_CRASHINFO_RESPONSIBLE_PID 0x818 /* pid_t */
1043 #define TASK_CRASHINFO_DIRTY_FLAGS 0x819 /* int */
1044 #define TASK_CRASHINFO_CRASHED_THREADID 0x81A /* uint64_t */
1045 #define TASK_CRASHINFO_COALITION_ID 0x81B /* uint64_t */
1046 #define TASK_CRASHINFO_UDATA_PTRS 0x81C /* uint64_t */
1047 #define TASK_CRASHINFO_MEMORY_LIMIT 0x81D /* uint64_t */
1049 #define TASK_CRASHINFO_LEDGER_INTERNAL 0x81E /* uint64_t */
1050 #define TASK_CRASHINFO_LEDGER_INTERNAL_COMPRESSED 0x81F /* uint64_t */
1051 #define TASK_CRASHINFO_LEDGER_IOKIT_MAPPED 0x820 /* uint64_t */
1052 #define TASK_CRASHINFO_LEDGER_ALTERNATE_ACCOUNTING 0x821 /* uint64_t */
1053 #define TASK_CRASHINFO_LEDGER_ALTERNATE_ACCOUNTING_COMPRESSED 0x822 /* uint64_t */
1054 #define TASK_CRASHINFO_LEDGER_PURGEABLE_NONVOLATILE 0x823 /* uint64_t */
1055 #define TASK_CRASHINFO_LEDGER_PURGEABLE_NONVOLATILE_COMPRESSED 0x824 /* uint64_t */
1056 #define TASK_CRASHINFO_LEDGER_PAGE_TABLE 0x825 /* uint64_t */
1057 #define TASK_CRASHINFO_LEDGER_PHYS_FOOTPRINT 0x826 /* uint64_t */
1058 #define TASK_CRASHINFO_LEDGER_PHYS_FOOTPRINT_LIFETIME_MAX 0x827 /* uint64_t */
1059 #define TASK_CRASHINFO_LEDGER_NETWORK_NONVOLATILE 0x828 /* uint64_t */
1060 #define TASK_CRASHINFO_LEDGER_NETWORK_NONVOLATILE_COMPRESSED 0x829 /* uint64_t */
1061 #define TASK_CRASHINFO_LEDGER_WIRED_MEM 0x82A /* uint64_t */
1062 #define TASK_CRASHINFO_PROC_PERSONA_ID 0x82B /* uid_t */
1063 #define TASK_CRASHINFO_MEMORY_LIMIT_INCREASE 0x82C /* uint32_t */
1064 #define TASK_CRASHINFO_LEDGER_TAGGED_FOOTPRINT 0x82D /* uint64_t */
1065 #define TASK_CRASHINFO_LEDGER_TAGGED_FOOTPRINT_COMPRESSED 0x82E /* uint64_t */
1066 #define TASK_CRASHINFO_LEDGER_MEDIA_FOOTPRINT 0x82F /* uint64_t */
1067 #define TASK_CRASHINFO_LEDGER_MEDIA_FOOTPRINT_COMPRESSED 0x830 /* uint64_t */
1068 #define TASK_CRASHINFO_LEDGER_GRAPHICS_FOOTPRINT 0x831 /* uint64_t */
1069 #define TASK_CRASHINFO_LEDGER_GRAPHICS_FOOTPRINT_COMPRESSED 0x832 /* uint64_t */
1070 #define TASK_CRASHINFO_LEDGER_NEURAL_FOOTPRINT 0x833 /* uint64_t */
1071 #define TASK_CRASHINFO_LEDGER_NEURAL_FOOTPRINT_COMPRESSED 0x834 /* uint64_t */
1072 #define TASK_CRASHINFO_MEMORYSTATUS_EFFECTIVE_PRIORITY 0x835 /* int32_t */
1074 #define TASK_CRASHINFO_END KCDATA_TYPE_BUFFER_END
1076 /**************** definitions for os reasons *********************/
1078 #define EXIT_REASON_SNAPSHOT 0x1001
1079 #define EXIT_REASON_USER_DESC 0x1002 /* string description of reason */
1080 #define EXIT_REASON_USER_PAYLOAD 0x1003 /* user payload data */
1081 #define EXIT_REASON_CODESIGNING_INFO 0x1004
1082 #define EXIT_REASON_WORKLOOP_ID 0x1005
1083 #define EXIT_REASON_DISPATCH_QUEUE_NO 0x1006
1085 struct exit_reason_snapshot
{
1086 uint32_t ers_namespace
;
1088 /* end of version 1 of exit_reason_snapshot. sizeof v1 was 12 */
1090 } __attribute__((packed
));
1092 #define EXIT_REASON_CODESIG_PATH_MAX 1024
1094 struct codesigning_exit_reason_info
{
1095 uint64_t ceri_virt_addr
;
1096 uint64_t ceri_file_offset
;
1097 char ceri_pathname
[EXIT_REASON_CODESIG_PATH_MAX
];
1098 char ceri_filename
[EXIT_REASON_CODESIG_PATH_MAX
];
1099 uint64_t ceri_codesig_modtime_secs
;
1100 uint64_t ceri_codesig_modtime_nsecs
;
1101 uint64_t ceri_page_modtime_secs
;
1102 uint64_t ceri_page_modtime_nsecs
;
1103 uint8_t ceri_path_truncated
;
1104 uint8_t ceri_object_codesigned
;
1105 uint8_t ceri_page_codesig_validated
;
1106 uint8_t ceri_page_codesig_tainted
;
1107 uint8_t ceri_page_codesig_nx
;
1108 uint8_t ceri_page_wpmapped
;
1109 uint8_t ceri_page_slid
;
1110 uint8_t ceri_page_dirty
;
1111 uint32_t ceri_page_shadow_depth
;
1112 } __attribute__((packed
));
1114 #define EXIT_REASON_USER_DESC_MAX_LEN 1024
1115 #define EXIT_REASON_PAYLOAD_MAX_LEN 2048
1116 /**************** safe iterators *********************/
1118 typedef struct kcdata_iter
{
1126 kcdata_iter(void *buffer
, unsigned long size
)
1129 iter
.item
= (kcdata_item_t
) buffer
;
1130 iter
.end
= (void*) (((uintptr_t)buffer
) + size
);
1135 kcdata_iter_t
kcdata_iter_unsafe(void *buffer
) __attribute__((deprecated
));
1139 kcdata_iter_unsafe(void *buffer
)
1142 iter
.item
= (kcdata_item_t
) buffer
;
1143 iter
.end
= (void*) (uintptr_t) ~0;
1147 static const kcdata_iter_t kcdata_invalid_iter
= { .item
= NULL
, .end
= NULL
};
1151 kcdata_iter_valid(kcdata_iter_t iter
)
1154 ((uintptr_t)iter
.item
+ sizeof(struct kcdata_item
) <= (uintptr_t)iter
.end
) &&
1155 ((uintptr_t)iter
.item
+ sizeof(struct kcdata_item
) + iter
.item
->size
<= (uintptr_t)iter
.end
);
1161 kcdata_iter_next(kcdata_iter_t iter
)
1163 iter
.item
= (kcdata_item_t
) (((uintptr_t)iter
.item
) + sizeof(struct kcdata_item
) + (iter
.item
->size
));
1167 static inline uint32_t
1168 kcdata_iter_type(kcdata_iter_t iter
)
1170 if ((iter
.item
->type
& ~0xfu
) == KCDATA_TYPE_ARRAY_PAD0
) {
1171 return KCDATA_TYPE_ARRAY
;
1173 return iter
.item
->type
;
1177 static inline uint32_t
1178 kcdata_calc_padding(uint32_t size
)
1180 /* calculate number of bytes to add to size to get something divisible by 16 */
1181 return (-size
) & 0xf;
1184 static inline uint32_t
1185 kcdata_flags_get_padding(uint64_t flags
)
1187 return flags
& KCDATA_FLAGS_STRUCT_PADDING_MASK
;
1190 /* see comment above about has_padding */
1192 kcdata_iter_is_legacy_item(kcdata_iter_t iter
, uint32_t legacy_size
)
1194 uint32_t legacy_size_padded
= legacy_size
+ kcdata_calc_padding(legacy_size
);
1195 return iter
.item
->size
== legacy_size_padded
&&
1196 (iter
.item
->flags
& (KCDATA_FLAGS_STRUCT_PADDING_MASK
| KCDATA_FLAGS_STRUCT_HAS_PADDING
)) == 0;
1199 static inline uint32_t
1200 kcdata_iter_size(kcdata_iter_t iter
)
1202 uint32_t legacy_size
= 0;
1204 switch (kcdata_iter_type(iter
)) {
1205 case KCDATA_TYPE_ARRAY
:
1206 case KCDATA_TYPE_CONTAINER_BEGIN
:
1207 return iter
.item
->size
;
1208 case STACKSHOT_KCTYPE_THREAD_SNAPSHOT
: {
1209 legacy_size
= sizeof(struct thread_snapshot_v2
);
1210 if (kcdata_iter_is_legacy_item(iter
, legacy_size
)) {
1216 case STACKSHOT_KCTYPE_SHAREDCACHE_LOADINFO
: {
1217 legacy_size
= sizeof(struct dyld_uuid_info_64
);
1218 if (kcdata_iter_is_legacy_item(iter
, legacy_size
)) {
1226 if (iter
.item
->size
< kcdata_flags_get_padding(iter
.item
->flags
)) {
1229 return iter
.item
->size
- kcdata_flags_get_padding(iter
.item
->flags
);
1234 static inline uint64_t
1235 kcdata_iter_flags(kcdata_iter_t iter
)
1237 return iter
.item
->flags
;
1242 kcdata_iter_payload(kcdata_iter_t iter
)
1244 return &iter
.item
->data
;
1250 kcdata_iter_array_elem_type(kcdata_iter_t iter
)
1252 return (iter
.item
->flags
>> 32) & UINT32_MAX
;
1257 kcdata_iter_array_elem_count(kcdata_iter_t iter
)
1259 return (iter
.item
->flags
) & UINT32_MAX
;
1262 /* KCDATA_TYPE_ARRAY is ambiguous about the size of the array elements. Size is
1263 * calculated as total_size / elements_count, but total size got padded out to a
1264 * 16 byte alignment. New kernels will generate KCDATA_TYPE_ARRAY_PAD* instead
1265 * to explicitly tell us how much padding was used. Here we have a fixed, never
1266 * to be altered list of the sizes of array elements that were used before I
1267 * discovered this issue. If you find a KCDATA_TYPE_ARRAY that is not one of
1268 * these types, treat it as invalid data. */
1272 kcdata_iter_array_size_switch(kcdata_iter_t iter
)
1274 switch (kcdata_iter_array_elem_type(iter
)) {
1275 case KCDATA_TYPE_LIBRARY_LOADINFO
:
1276 return sizeof(struct dyld_uuid_info_32
);
1277 case KCDATA_TYPE_LIBRARY_LOADINFO64
:
1278 return sizeof(struct dyld_uuid_info_64
);
1279 case STACKSHOT_KCTYPE_KERN_STACKFRAME
:
1280 case STACKSHOT_KCTYPE_USER_STACKFRAME
:
1281 return sizeof(struct stack_snapshot_frame32
);
1282 case STACKSHOT_KCTYPE_KERN_STACKFRAME64
:
1283 case STACKSHOT_KCTYPE_USER_STACKFRAME64
:
1284 return sizeof(struct stack_snapshot_frame64
);
1285 case STACKSHOT_KCTYPE_DONATING_PIDS
:
1286 return sizeof(int32_t);
1287 case STACKSHOT_KCTYPE_THREAD_DELTA_SNAPSHOT
:
1288 return sizeof(struct thread_delta_snapshot_v2
);
1289 // This one is only here to make some unit tests work. It should be OK to
1291 case TASK_CRASHINFO_CRASHED_THREADID
:
1292 return sizeof(uint64_t);
1300 kcdata_iter_array_valid(kcdata_iter_t iter
)
1302 if (!kcdata_iter_valid(iter
)) {
1305 if (kcdata_iter_type(iter
) != KCDATA_TYPE_ARRAY
) {
1308 if (kcdata_iter_array_elem_count(iter
) == 0) {
1309 return iter
.item
->size
== 0;
1311 if (iter
.item
->type
== KCDATA_TYPE_ARRAY
) {
1312 uint32_t elem_size
= kcdata_iter_array_size_switch(iter
);
1313 if (elem_size
== 0) {
1316 /* sizes get aligned to the nearest 16. */
1318 kcdata_iter_array_elem_count(iter
) <= iter
.item
->size
/ elem_size
&&
1319 iter
.item
->size
% kcdata_iter_array_elem_count(iter
) < 16;
1322 (iter
.item
->type
& 0xf) <= iter
.item
->size
&&
1323 kcdata_iter_array_elem_count(iter
) <= iter
.item
->size
- (iter
.item
->type
& 0xf) &&
1324 (iter
.item
->size
- (iter
.item
->type
& 0xf)) % kcdata_iter_array_elem_count(iter
) == 0;
1331 kcdata_iter_array_elem_size(kcdata_iter_t iter
)
1333 if (iter
.item
->type
== KCDATA_TYPE_ARRAY
) {
1334 return kcdata_iter_array_size_switch(iter
);
1336 if (kcdata_iter_array_elem_count(iter
) == 0) {
1339 return (iter
.item
->size
- (iter
.item
->type
& 0xf)) / kcdata_iter_array_elem_count(iter
);
1344 kcdata_iter_container_valid(kcdata_iter_t iter
)
1347 kcdata_iter_valid(iter
) &&
1348 kcdata_iter_type(iter
) == KCDATA_TYPE_CONTAINER_BEGIN
&&
1349 iter
.item
->size
>= sizeof(uint32_t);
1354 kcdata_iter_container_type(kcdata_iter_t iter
)
1356 return *(uint32_t *) kcdata_iter_payload(iter
);
1361 kcdata_iter_container_id(kcdata_iter_t iter
)
1363 return iter
.item
->flags
;
1367 #define KCDATA_ITER_FOREACH(iter) for(; kcdata_iter_valid(iter) && iter.item->type != KCDATA_TYPE_BUFFER_END; iter = kcdata_iter_next(iter))
1368 #define KCDATA_ITER_FOREACH_FAILED(iter) (!kcdata_iter_valid(iter) || (iter).item->type != KCDATA_TYPE_BUFFER_END)
1372 kcdata_iter_find_type(kcdata_iter_t iter
, uint32_t type
)
1374 KCDATA_ITER_FOREACH(iter
)
1376 if (kcdata_iter_type(iter
) == type
) {
1380 return kcdata_invalid_iter
;
1385 kcdata_iter_data_with_desc_valid(kcdata_iter_t iter
, uint32_t minsize
)
1388 kcdata_iter_valid(iter
) &&
1389 kcdata_iter_size(iter
) >= KCDATA_DESC_MAXLEN
+ minsize
&&
1390 ((char*)kcdata_iter_payload(iter
))[KCDATA_DESC_MAXLEN
- 1] == 0;
1395 kcdata_iter_string(kcdata_iter_t iter
, uint32_t offset
)
1397 if (offset
> kcdata_iter_size(iter
)) {
1400 uint32_t maxlen
= kcdata_iter_size(iter
) - offset
;
1401 char *s
= ((char*)kcdata_iter_payload(iter
)) + offset
;
1402 if (strnlen(s
, maxlen
) < maxlen
) {
1410 kcdata_iter_get_data_with_desc(kcdata_iter_t iter
, char **desc_ptr
, void **data_ptr
, uint32_t *size_ptr
)
1413 *desc_ptr
= (char *)kcdata_iter_payload(iter
);
1416 *data_ptr
= (void *)((uintptr_t)kcdata_iter_payload(iter
) + KCDATA_DESC_MAXLEN
);
1419 *size_ptr
= kcdata_iter_size(iter
) - KCDATA_DESC_MAXLEN
;