]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/kern_cdata.c
xnu-4570.41.2.tar.gz
[apple/xnu.git] / osfmk / kern / kern_cdata.c
CommitLineData
3e170ce0
A
1/*
2 * Copyright (c) 2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <kern/assert.h>
30#include <mach/mach_types.h>
31#include <mach/boolean.h>
32#include <mach/vm_param.h>
33#include <kern/kern_types.h>
34#include <kern/mach_param.h>
35#include <kern/thread.h>
36#include <kern/task.h>
37#include <kern/kern_cdata.h>
38#include <kern/kalloc.h>
39#include <mach/mach_vm.h>
40
39037602
A
41static kern_return_t kcdata_get_memory_addr_with_flavor(kcdata_descriptor_t data, uint32_t type, uint32_t size, uint64_t flags, mach_vm_address_t *user_addr);
42
3e170ce0 43/*
39037602
A
44 * Estimates how large of a buffer that should be allocated for a buffer that will contain
45 * num_items items of known types with overall length payload_size.
3e170ce0 46 *
39037602
A
47 * NOTE: This function will not give an accurate estimate for buffers that will
48 * contain unknown types (those with string descriptions).
3e170ce0 49 */
39037602
A
50uint32_t kcdata_estimate_required_buffer_size(uint32_t num_items, uint32_t payload_size)
51{
52 /*
53 * In the worst case each item will need (KCDATA_ALIGNMENT_SIZE - 1) padding
54 */
55 uint32_t max_padding_bytes = num_items * (KCDATA_ALIGNMENT_SIZE - 1);
56 uint32_t item_description_bytes = num_items * sizeof(struct kcdata_item);
57 uint32_t begin_and_end_marker_bytes = 2 * sizeof(struct kcdata_item);
3e170ce0 58
39037602
A
59 return max_padding_bytes + item_description_bytes + begin_and_end_marker_bytes + payload_size;
60}
3e170ce0
A
61
62kcdata_descriptor_t kcdata_memory_alloc_init(mach_vm_address_t buffer_addr_p, unsigned data_type, unsigned size, unsigned flags)
63{
64 kcdata_descriptor_t data = NULL;
65 mach_vm_address_t user_addr = 0;
66
67 data = kalloc(sizeof(struct kcdata_descriptor));
68 if (data == NULL) {
69 return NULL;
70 }
71 bzero(data, sizeof(struct kcdata_descriptor));
72 data->kcd_addr_begin = buffer_addr_p;
73 data->kcd_addr_end = buffer_addr_p;
74 data->kcd_flags = (flags & KCFLAG_USE_COPYOUT)? KCFLAG_USE_COPYOUT : KCFLAG_USE_MEMCOPY;
75 data->kcd_length = size;
76
77 /* Initialize the BEGIN header */
78 if (KERN_SUCCESS != kcdata_get_memory_addr(data, data_type, 0, &user_addr)){
79 kcdata_memory_destroy(data);
80 return NULL;
81 }
82
83 return data;
84}
85
86kern_return_t kcdata_memory_static_init(kcdata_descriptor_t data, mach_vm_address_t buffer_addr_p, unsigned data_type, unsigned size, unsigned flags)
87{
88 mach_vm_address_t user_addr = 0;
89
90 if (data == NULL) {
91 return KERN_INVALID_ARGUMENT;
92 }
93 bzero(data, sizeof(struct kcdata_descriptor));
94 data->kcd_addr_begin = buffer_addr_p;
95 data->kcd_addr_end = buffer_addr_p;
96 data->kcd_flags = (flags & KCFLAG_USE_COPYOUT)? KCFLAG_USE_COPYOUT : KCFLAG_USE_MEMCOPY;
97 data->kcd_length = size;
98
99 /* Initialize the BEGIN header */
100 return kcdata_get_memory_addr(data, data_type, 0, &user_addr);
101}
102
5ba3f43e
A
103void *kcdata_memory_get_begin_addr(kcdata_descriptor_t data)
104{
105 if (data == NULL) {
106 return NULL;
107 }
108
109 return (void *)data->kcd_addr_begin;
110}
111
3e170ce0
A
112uint64_t kcdata_memory_get_used_bytes(kcdata_descriptor_t kcd)
113{
114 assert(kcd != NULL);
115 return ((uint64_t)kcd->kcd_addr_end - (uint64_t)kcd->kcd_addr_begin) + sizeof(struct kcdata_item);
116}
117
118/*
119 * Free up the memory associated with kcdata
120 */
121kern_return_t kcdata_memory_destroy(kcdata_descriptor_t data)
122{
123 if (!data) {
124 return KERN_INVALID_ARGUMENT;
125 }
126
127 /*
128 * data->kcd_addr_begin points to memory in not tracked by
129 * kcdata lib. So not clearing that here.
130 */
131 kfree(data, sizeof(struct kcdata_descriptor));
132 return KERN_SUCCESS;
133}
134
135
136
137/*
138 * Routine: kcdata_get_memory_addr
139 * Desc: get memory address in the userspace memory for corpse info
39037602
A
140 * NOTE: The caller is responsible for zeroing the resulting memory or
141 * using other means to mark memory if it has failed populating the
3e170ce0
A
142 * data in middle of operation.
143 * params: data - pointer describing the crash info allocation
144 * type - type of data to be put. See corpse.h for defined types
145 * size - size requested. The header describes this size
146 * returns: mach_vm_address_t address in user memory for copyout().
147 */
39037602
A
148kern_return_t
149kcdata_get_memory_addr(kcdata_descriptor_t data, uint32_t type, uint32_t size, mach_vm_address_t * user_addr)
3e170ce0 150{
39037602
A
151 /* record number of padding bytes as lower 4 bits of flags */
152 uint64_t flags = (KCDATA_FLAGS_STRUCT_PADDING_MASK & kcdata_calc_padding(size)) | KCDATA_FLAGS_STRUCT_HAS_PADDING;
153 return kcdata_get_memory_addr_with_flavor(data, type, size, flags, user_addr);
154}
155
156/*
157 * Routine: kcdata_add_buffer_end
158 *
159 * Desc: Write buffer end marker. This does not advance the end pointer in the
160 * kcdata_descriptor_t, so it may be used conservatively before additional data
161 * is added, as long as it is at least called after the last time data is added.
162 *
163 * params: data - pointer describing the crash info allocation
164 */
165
166kern_return_t
167kcdata_write_buffer_end(kcdata_descriptor_t data)
168{
169 struct kcdata_item info;
170 bzero(&info, sizeof(info));
171 info.type = KCDATA_TYPE_BUFFER_END;
172 info.size = 0;
173 return kcdata_memcpy(data, data->kcd_addr_end, &info, sizeof(info));
3e170ce0
A
174}
175
176/*
177 * Routine: kcdata_get_memory_addr_with_flavor
178 * Desc: internal function with flags field. See documentation for kcdata_get_memory_addr for details
179 */
180
181static kern_return_t kcdata_get_memory_addr_with_flavor(
182 kcdata_descriptor_t data,
183 uint32_t type,
184 uint32_t size,
185 uint64_t flags,
186 mach_vm_address_t *user_addr)
187{
188 struct kcdata_item info;
189 uint32_t total_size;
190
191 if (user_addr == NULL || data == NULL) {
192 return KERN_INVALID_ARGUMENT;
193 }
194
195 /* make sure 16 byte aligned */
39037602 196 size += kcdata_calc_padding(size);
3e170ce0
A
197
198 bzero(&info, sizeof(info));
39037602
A
199 info.type = type;
200 info.size = size;
201 info.flags = flags;
3e170ce0
A
202 total_size = size + sizeof(info);
203
204 /* check available memory, including trailer size for KCDATA_TYPE_BUFFER_END */
205 if (data->kcd_length < ((data->kcd_addr_end - data->kcd_addr_begin) + total_size + sizeof(info))) {
206 return KERN_RESOURCE_SHORTAGE;
207 }
208
209 if (data->kcd_flags & KCFLAG_USE_COPYOUT) {
210 if (copyout(&info, data->kcd_addr_end, sizeof(info)))
211 return KERN_NO_ACCESS;
212 } else {
213 memcpy((void *)data->kcd_addr_end, &info, sizeof(info));
214 }
215
216 data->kcd_addr_end += sizeof(info);
217 *user_addr = data->kcd_addr_end;
218 data->kcd_addr_end += size;
219
39037602
A
220 if (!(data->kcd_flags & KCFLAG_NO_AUTO_ENDBUFFER)) {
221 /* setup the end header as well */
222 return kcdata_write_buffer_end(data);
3e170ce0 223 } else {
39037602 224 return KERN_SUCCESS;
3e170ce0 225 }
3e170ce0
A
226}
227
228/*
229 * Routine: kcdata_get_memory_addr_for_array
230 * Desc: get memory address in the userspace memory for corpse info
231 * NOTE: The caller is responsible to zero the resulting memory or
232 * user other means to mark memory if it has failed populating the
233 * data in middle of operation.
234 * params: data - pointer describing the crash info allocation
235 * type_of_element - type of data to be put. See kern_cdata.h for defined types
236 * size_of_element - size of element. The header describes this size
237 * count - num of elements in array.
238 * returns: mach_vm_address_t address in user memory for copyout().
239 */
240
241kern_return_t kcdata_get_memory_addr_for_array(
242 kcdata_descriptor_t data,
243 uint32_t type_of_element,
244 uint32_t size_of_element,
245 uint32_t count,
246 mach_vm_address_t *user_addr)
247{
39037602
A
248 /* for arrays we record the number of padding bytes as the low-order 4 bits
249 * of the type field. KCDATA_TYPE_ARRAY_PAD{x} means x bytes of pad. */
250 uint64_t flags = type_of_element;
251 flags = (flags << 32) | count;
3e170ce0 252 uint32_t total_size = count * size_of_element;
39037602
A
253 uint32_t pad = kcdata_calc_padding(total_size);
254
255 return kcdata_get_memory_addr_with_flavor(data, KCDATA_TYPE_ARRAY_PAD0 | pad, total_size, flags, user_addr);
3e170ce0
A
256}
257
258/*
259 * Routine: kcdata_add_container_marker
260 * Desc: Add a container marker in the buffer for type and identifier.
261 * params: data - pointer describing the crash info allocation
262 * header_type - one of (KCDATA_TYPE_CONTAINER_BEGIN ,KCDATA_TYPE_CONTAINER_END)
263 * container_type - type of data to be put. See kern_cdata.h for defined types
264 * identifier - unique identifier. This is required to match nested containers.
265 * returns: return value of kcdata_get_memory_addr()
266 */
267
268kern_return_t kcdata_add_container_marker(
269 kcdata_descriptor_t data,
270 uint32_t header_type,
271 uint32_t container_type,
272 uint64_t identifier)
273{
274 mach_vm_address_t user_addr;
275 kern_return_t kr;
276 assert(header_type == KCDATA_TYPE_CONTAINER_END || header_type == KCDATA_TYPE_CONTAINER_BEGIN);
277 uint32_t data_size = (header_type == KCDATA_TYPE_CONTAINER_BEGIN)? sizeof(uint32_t): 0;
278 kr = kcdata_get_memory_addr_with_flavor(data, header_type, data_size, identifier, &user_addr);
279 if (kr != KERN_SUCCESS)
280 return kr;
281
282 if (data_size)
283 kr = kcdata_memcpy(data, user_addr, &container_type, data_size);
284 return kr;
285}
286
39037602
A
287/*
288 * Routine: kcdata_undo_addcontainer_begin
289 * Desc: call this after adding a container begin but before adding anything else to revert.
290 */
291kern_return_t
292kcdata_undo_add_container_begin(kcdata_descriptor_t data)
293{
294 /*
295 * the payload of a container begin is a single uint64_t. It is padded out
296 * to 16 bytes.
297 */
298 const mach_vm_address_t padded_payload_size = 16;
299 data->kcd_addr_end -= sizeof(struct kcdata_item) + padded_payload_size;
300
301 if (!(data->kcd_flags & KCFLAG_NO_AUTO_ENDBUFFER)) {
302 /* setup the end header as well */
303 return kcdata_write_buffer_end(data);
304 } else {
305 return KERN_SUCCESS;
306 }
307}
308
3e170ce0
A
309/*
310 * Routine: kcdata_memcpy
311 * Desc: a common function to copy data out based on either copyout or memcopy flags
312 * params: data - pointer describing the kcdata buffer
313 * dst_addr - destination address
314 * src_addr - source address
315 * size - size in bytes to copy.
316 * returns: KERN_NO_ACCESS if copyout fails.
317 */
318
319kern_return_t kcdata_memcpy(kcdata_descriptor_t data, mach_vm_address_t dst_addr, void *src_addr, uint32_t size)
320{
321 if (data->kcd_flags & KCFLAG_USE_COPYOUT) {
322 if (copyout(src_addr, dst_addr, size))
323 return KERN_NO_ACCESS;
324 } else {
325 memcpy((void *)dst_addr, src_addr, size);
326 }
327 return KERN_SUCCESS;
328}
329
330/*
331 * Routine: kcdata_add_type_definition
332 * Desc: add type definition to kcdata buffer.
333 * see feature description in documentation above.
334 * params: data - pointer describing the kcdata buffer
335 * type_id - unique type identifier for this data
336 * type_name - a string of max KCDATA_DESC_MAXLEN size for name of type
337 * elements_array - address to descriptors for each field in struct
338 * elements_count - count of how many fields are there in struct.
339 * returns: return code from kcdata_get_memory_addr in case of failure.
340 */
341
342kern_return_t kcdata_add_type_definition(
343 kcdata_descriptor_t data,
344 uint32_t type_id,
345 char *type_name,
346 struct kcdata_subtype_descriptor *elements_array_addr,
347 uint32_t elements_count)
348{
349 kern_return_t kr = KERN_SUCCESS;
350 struct kcdata_type_definition kc_type_definition;
351 mach_vm_address_t user_addr;
352 uint32_t total_size = sizeof(struct kcdata_type_definition);
39037602 353 bzero(&kc_type_definition, sizeof(kc_type_definition));
3e170ce0 354
5ba3f43e 355 if (strlen(type_name) >= KCDATA_DESC_MAXLEN)
3e170ce0
A
356 return KERN_INVALID_ARGUMENT;
357 strlcpy(&kc_type_definition.kct_name[0], type_name, KCDATA_DESC_MAXLEN);
358 kc_type_definition.kct_num_elements = elements_count;
359 kc_type_definition.kct_type_identifier = type_id;
360
361 total_size += elements_count * sizeof(struct kcdata_subtype_descriptor);
39037602
A
362 /* record number of padding bytes as lower 4 bits of flags */
363 if (KERN_SUCCESS != (kr = kcdata_get_memory_addr_with_flavor(data, KCDATA_TYPE_TYPEDEFINTION, total_size,
364 kcdata_calc_padding(total_size), &user_addr)))
3e170ce0
A
365 return kr;
366 if (KERN_SUCCESS != (kr = kcdata_memcpy(data, user_addr, (void *)&kc_type_definition, sizeof(struct kcdata_type_definition))))
367 return kr;
368 user_addr += sizeof(struct kcdata_type_definition);
369 if (KERN_SUCCESS != (kr = kcdata_memcpy(data, user_addr, (void *)elements_array_addr, elements_count * sizeof(struct kcdata_subtype_descriptor))))
370 return kr;
371 return kr;
372}
373
374#pragma pack(4)
375
376/* Internal structs for convenience */
377struct _uint64_with_description_data {
378 char desc[KCDATA_DESC_MAXLEN];
379 uint64_t data;
380};
381
382struct _uint32_with_description_data {
383 char desc[KCDATA_DESC_MAXLEN];
384 uint32_t data;
385};
386
387#pragma pack()
388
39037602
A
389kern_return_t
390kcdata_add_uint64_with_description(kcdata_descriptor_t data_desc, uint64_t data, const char * description)
3e170ce0 391{
5ba3f43e 392 if (strlen(description) >= KCDATA_DESC_MAXLEN)
3e170ce0
A
393 return KERN_INVALID_ARGUMENT;
394
395 kern_return_t kr = 0;
396 mach_vm_address_t user_addr;
397 struct _uint64_with_description_data save_data;
398 const uint64_t size_req = sizeof(save_data);
399 bzero(&save_data, size_req);
400
401 strlcpy(&(save_data.desc[0]), description, sizeof(save_data.desc));
402 save_data.data = data;
403
404 kr = kcdata_get_memory_addr(data_desc, KCDATA_TYPE_UINT64_DESC, size_req, &user_addr);
405 if (kr != KERN_SUCCESS)
406 return kr;
407
408 if (data_desc->kcd_flags & KCFLAG_USE_COPYOUT) {
409 if (copyout(&save_data, user_addr, size_req))
410 return KERN_NO_ACCESS;
411 } else {
412 memcpy((void *)user_addr, &save_data, size_req);
413 }
414 return KERN_SUCCESS;
415}
416
417kern_return_t kcdata_add_uint32_with_description(
418 kcdata_descriptor_t data_desc,
419 uint32_t data,
420 const char *description)
421{
422 assert(strlen(description) < KCDATA_DESC_MAXLEN);
5ba3f43e 423 if (strlen(description) >= KCDATA_DESC_MAXLEN)
3e170ce0
A
424 return KERN_INVALID_ARGUMENT;
425 kern_return_t kr = 0;
426 mach_vm_address_t user_addr;
427 struct _uint32_with_description_data save_data;
428 const uint64_t size_req = sizeof(save_data);
429
430 bzero(&save_data, size_req);
431 strlcpy(&(save_data.desc[0]), description, sizeof(save_data.desc));
432 save_data.data = data;
433
434 kr = kcdata_get_memory_addr(data_desc, KCDATA_TYPE_UINT32_DESC, size_req, &user_addr);
435 if (kr != KERN_SUCCESS)
436 return kr;
437 if (data_desc->kcd_flags & KCFLAG_USE_COPYOUT) {
438 if (copyout(&save_data, user_addr, size_req))
439 return KERN_NO_ACCESS;
440 } else {
441 memcpy((void *)user_addr, &save_data, size_req);
442 }
443 return KERN_SUCCESS;
444}
445
446
447/* end buffer management api */