]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/telemetry.c
xnu-4570.31.3.tar.gz
[apple/xnu.git] / osfmk / kern / telemetry.c
1 /*
2 * Copyright (c) 2012-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <mach/host_priv.h>
29 #include <mach/host_special_ports.h>
30 #include <mach/mach_types.h>
31 #include <mach/telemetry_notification_server.h>
32
33 #include <kern/assert.h>
34 #include <kern/clock.h>
35 #include <kern/debug.h>
36 #include <kern/host.h>
37 #include <kern/kalloc.h>
38 #include <kern/kern_types.h>
39 #include <kern/locks.h>
40 #include <kern/misc_protos.h>
41 #include <kern/sched.h>
42 #include <kern/sched_prim.h>
43 #include <kern/telemetry.h>
44 #include <kern/timer_call.h>
45 #include <kern/policy_internal.h>
46 #include <kern/kcdata.h>
47
48 #include <pexpert/pexpert.h>
49
50 #include <vm/vm_kern.h>
51 #include <vm/vm_shared_region.h>
52
53 #include <kperf/callstack.h>
54 #include <kern/backtrace.h>
55
56 #include <sys/kdebug.h>
57 #include <uuid/uuid.h>
58 #include <kdp/kdp_dyld.h>
59
60 #define TELEMETRY_DEBUG 0
61
62 extern int proc_pid(void *);
63 extern char *proc_name_address(void *p);
64 extern uint64_t proc_uniqueid(void *p);
65 extern uint64_t proc_was_throttled(void *p);
66 extern uint64_t proc_did_throttle(void *p);
67 extern int proc_selfpid(void);
68 extern boolean_t task_did_exec(task_t task);
69 extern boolean_t task_is_exec_copy(task_t task);
70
71 struct micro_snapshot_buffer {
72 vm_offset_t buffer;
73 uint32_t size;
74 uint32_t current_position;
75 uint32_t end_point;
76 };
77
78 void telemetry_take_sample(thread_t thread, uint8_t microsnapshot_flags, struct micro_snapshot_buffer * current_buffer);
79 int telemetry_buffer_gather(user_addr_t buffer, uint32_t *length, boolean_t mark, struct micro_snapshot_buffer * current_buffer);
80
81 #define TELEMETRY_DEFAULT_SAMPLE_RATE (1) /* 1 sample every 1 second */
82 #define TELEMETRY_DEFAULT_BUFFER_SIZE (16*1024)
83 #define TELEMETRY_MAX_BUFFER_SIZE (64*1024)
84
85 #define TELEMETRY_DEFAULT_NOTIFY_LEEWAY (4*1024) // Userland gets 4k of leeway to collect data after notification
86 #define TELEMETRY_MAX_UUID_COUNT (128) // Max of 128 non-shared-cache UUIDs to log for symbolication
87
88 uint32_t telemetry_sample_rate = 0;
89 volatile boolean_t telemetry_needs_record = FALSE;
90 volatile boolean_t telemetry_needs_timer_arming_record = FALSE;
91
92 /*
93 * If TRUE, record micro-stackshot samples for all tasks.
94 * If FALSE, only sample tasks which are marked for telemetry.
95 */
96 boolean_t telemetry_sample_all_tasks = FALSE;
97 uint32_t telemetry_active_tasks = 0; // Number of tasks opted into telemetry
98
99 uint32_t telemetry_timestamp = 0;
100
101 /*
102 * The telemetry_buffer is responsible
103 * for timer samples and interrupt samples that are driven by
104 * compute_averages(). It will notify its client (if one
105 * exists) when it has enough data to be worth flushing.
106 */
107 struct micro_snapshot_buffer telemetry_buffer = {0, 0, 0, 0};
108
109 int telemetry_bytes_since_last_mark = -1; // How much data since buf was last marked?
110 int telemetry_buffer_notify_at = 0;
111
112 lck_grp_t telemetry_lck_grp;
113 lck_mtx_t telemetry_mtx;
114
115 #define TELEMETRY_LOCK() do { lck_mtx_lock(&telemetry_mtx); } while(0)
116 #define TELEMETRY_TRY_SPIN_LOCK() lck_mtx_try_lock_spin(&telemetry_mtx)
117 #define TELEMETRY_UNLOCK() do { lck_mtx_unlock(&telemetry_mtx); } while(0)
118
119 void telemetry_init(void)
120 {
121 kern_return_t ret;
122 uint32_t telemetry_notification_leeway;
123
124 lck_grp_init(&telemetry_lck_grp, "telemetry group", LCK_GRP_ATTR_NULL);
125 lck_mtx_init(&telemetry_mtx, &telemetry_lck_grp, LCK_ATTR_NULL);
126
127 if (!PE_parse_boot_argn("telemetry_buffer_size", &telemetry_buffer.size, sizeof(telemetry_buffer.size))) {
128 telemetry_buffer.size = TELEMETRY_DEFAULT_BUFFER_SIZE;
129 }
130
131 if (telemetry_buffer.size > TELEMETRY_MAX_BUFFER_SIZE)
132 telemetry_buffer.size = TELEMETRY_MAX_BUFFER_SIZE;
133
134 ret = kmem_alloc(kernel_map, &telemetry_buffer.buffer, telemetry_buffer.size, VM_KERN_MEMORY_DIAG);
135 if (ret != KERN_SUCCESS) {
136 kprintf("Telemetry: Allocation failed: %d\n", ret);
137 return;
138 }
139 bzero((void *) telemetry_buffer.buffer, telemetry_buffer.size);
140
141 if (!PE_parse_boot_argn("telemetry_notification_leeway", &telemetry_notification_leeway, sizeof(telemetry_notification_leeway))) {
142 /*
143 * By default, notify the user to collect the buffer when there is this much space left in the buffer.
144 */
145 telemetry_notification_leeway = TELEMETRY_DEFAULT_NOTIFY_LEEWAY;
146 }
147 if (telemetry_notification_leeway >= telemetry_buffer.size) {
148 printf("telemetry: nonsensical telemetry_notification_leeway boot-arg %d changed to %d\n",
149 telemetry_notification_leeway, TELEMETRY_DEFAULT_NOTIFY_LEEWAY);
150 telemetry_notification_leeway = TELEMETRY_DEFAULT_NOTIFY_LEEWAY;
151 }
152 telemetry_buffer_notify_at = telemetry_buffer.size - telemetry_notification_leeway;
153
154 if (!PE_parse_boot_argn("telemetry_sample_rate", &telemetry_sample_rate, sizeof(telemetry_sample_rate))) {
155 telemetry_sample_rate = TELEMETRY_DEFAULT_SAMPLE_RATE;
156 }
157
158 /*
159 * To enable telemetry for all tasks, include "telemetry_sample_all_tasks=1" in boot-args.
160 */
161 if (!PE_parse_boot_argn("telemetry_sample_all_tasks", &telemetry_sample_all_tasks, sizeof(telemetry_sample_all_tasks))) {
162
163 #if CONFIG_EMBEDDED && !(DEVELOPMENT || DEBUG)
164 telemetry_sample_all_tasks = FALSE;
165 #else
166 telemetry_sample_all_tasks = TRUE;
167 #endif /* CONFIG_EMBEDDED && !(DEVELOPMENT || DEBUG) */
168
169 }
170
171 kprintf("Telemetry: Sampling %stasks once per %u second%s\n",
172 (telemetry_sample_all_tasks) ? "all " : "",
173 telemetry_sample_rate, telemetry_sample_rate == 1 ? "" : "s");
174 }
175
176 /*
177 * Enable or disable global microstackshots (ie telemetry_sample_all_tasks).
178 *
179 * enable_disable == 1: turn it on
180 * enable_disable == 0: turn it off
181 */
182 void
183 telemetry_global_ctl(int enable_disable)
184 {
185 if (enable_disable == 1) {
186 telemetry_sample_all_tasks = TRUE;
187 } else {
188 telemetry_sample_all_tasks = FALSE;
189 }
190 }
191
192 /*
193 * Opt the given task into or out of the telemetry stream.
194 *
195 * Supported reasons (callers may use any or all of):
196 * TF_CPUMON_WARNING
197 * TF_WAKEMON_WARNING
198 *
199 * enable_disable == 1: turn it on
200 * enable_disable == 0: turn it off
201 */
202 void
203 telemetry_task_ctl(task_t task, uint32_t reasons, int enable_disable)
204 {
205 task_lock(task);
206 telemetry_task_ctl_locked(task, reasons, enable_disable);
207 task_unlock(task);
208 }
209
210 void
211 telemetry_task_ctl_locked(task_t task, uint32_t reasons, int enable_disable)
212 {
213 uint32_t origflags;
214
215 assert((reasons != 0) && ((reasons | TF_TELEMETRY) == TF_TELEMETRY));
216
217 task_lock_assert_owned(task);
218
219 origflags = task->t_flags;
220
221 if (enable_disable == 1) {
222 task->t_flags |= reasons;
223 if ((origflags & TF_TELEMETRY) == 0) {
224 OSIncrementAtomic(&telemetry_active_tasks);
225 #if TELEMETRY_DEBUG
226 printf("%s: telemetry OFF -> ON (%d active)\n", proc_name_address(task->bsd_info), telemetry_active_tasks);
227 #endif
228 }
229 } else {
230 task->t_flags &= ~reasons;
231 if (((origflags & TF_TELEMETRY) != 0) && ((task->t_flags & TF_TELEMETRY) == 0)) {
232 /*
233 * If this task went from having at least one telemetry bit to having none,
234 * the net change was to disable telemetry for the task.
235 */
236 OSDecrementAtomic(&telemetry_active_tasks);
237 #if TELEMETRY_DEBUG
238 printf("%s: telemetry ON -> OFF (%d active)\n", proc_name_address(task->bsd_info), telemetry_active_tasks);
239 #endif
240 }
241 }
242 }
243
244 /*
245 * Determine if the current thread is eligible for telemetry:
246 *
247 * telemetry_sample_all_tasks: All threads are eligible. This takes precedence.
248 * telemetry_active_tasks: Count of tasks opted in.
249 * task->t_flags & TF_TELEMETRY: This task is opted in.
250 */
251 static boolean_t
252 telemetry_is_active(thread_t thread)
253 {
254 task_t task = thread->task;
255
256 if (task == kernel_task) {
257 /* Kernel threads never return to an AST boundary, and are ineligible */
258 return FALSE;
259 }
260
261 if (telemetry_sample_all_tasks == TRUE) {
262 return (TRUE);
263 }
264
265 if ((telemetry_active_tasks > 0) && ((thread->task->t_flags & TF_TELEMETRY) != 0)) {
266 return (TRUE);
267 }
268
269 return (FALSE);
270 }
271
272 /*
273 * Userland is arming a timer. If we are eligible for such a record,
274 * sample now. No need to do this one at the AST because we're already at
275 * a safe place in this system call.
276 */
277 int telemetry_timer_event(__unused uint64_t deadline, __unused uint64_t interval, __unused uint64_t leeway)
278 {
279 if (telemetry_needs_timer_arming_record == TRUE) {
280 telemetry_needs_timer_arming_record = FALSE;
281 telemetry_take_sample(current_thread(), kTimerArmingRecord | kUserMode, &telemetry_buffer);
282 }
283
284 return (0);
285 }
286
287 /*
288 * Mark the current thread for an interrupt-based
289 * telemetry record, to be sampled at the next AST boundary.
290 */
291 void telemetry_mark_curthread(boolean_t interrupted_userspace)
292 {
293 uint32_t ast_bits = 0;
294 thread_t thread = current_thread();
295
296 /*
297 * If telemetry isn't active for this thread, return and try
298 * again next time.
299 */
300 if (telemetry_is_active(thread) == FALSE) {
301 return;
302 }
303
304 ast_bits |= (interrupted_userspace ? AST_TELEMETRY_USER : AST_TELEMETRY_KERNEL);
305
306 telemetry_needs_record = FALSE;
307 thread_ast_set(thread, ast_bits);
308 ast_propagate(thread);
309 }
310
311 void compute_telemetry(void *arg __unused)
312 {
313 if (telemetry_sample_all_tasks || (telemetry_active_tasks > 0)) {
314 if ((++telemetry_timestamp) % telemetry_sample_rate == 0) {
315 telemetry_needs_record = TRUE;
316 telemetry_needs_timer_arming_record = TRUE;
317 }
318 }
319 }
320
321 /*
322 * If userland has registered a port for telemetry notifications, send one now.
323 */
324 static void
325 telemetry_notify_user(void)
326 {
327 mach_port_t user_port;
328 uint32_t flags = 0;
329 int error;
330
331 error = host_get_telemetry_port(host_priv_self(), &user_port);
332 if ((error != KERN_SUCCESS) || !IPC_PORT_VALID(user_port)) {
333 return;
334 }
335
336 telemetry_notification(user_port, flags);
337 ipc_port_release_send(user_port);
338 }
339
340 void telemetry_ast(thread_t thread, ast_t reasons)
341 {
342 assert((reasons & AST_TELEMETRY_ALL) != AST_TELEMETRY_ALL); /* only one is valid at a time */
343
344 boolean_t io_telemetry = (reasons & AST_TELEMETRY_IO) ? TRUE : FALSE;
345 boolean_t interrupted_userspace = (reasons & AST_TELEMETRY_USER) ? TRUE : FALSE;
346
347 uint8_t microsnapshot_flags = kInterruptRecord;
348
349 if (io_telemetry == TRUE)
350 microsnapshot_flags = kIORecord;
351
352 if (interrupted_userspace)
353 microsnapshot_flags |= kUserMode;
354
355 telemetry_take_sample(thread, microsnapshot_flags, &telemetry_buffer);
356 }
357
358 void telemetry_take_sample(thread_t thread, uint8_t microsnapshot_flags, struct micro_snapshot_buffer * current_buffer)
359 {
360 task_t task;
361 void *p;
362 uint32_t btcount = 0, bti;
363 struct micro_snapshot *msnap;
364 struct task_snapshot *tsnap;
365 struct thread_snapshot *thsnap;
366 clock_sec_t secs;
367 clock_usec_t usecs;
368 vm_size_t framesize;
369 uint32_t current_record_start;
370 uint32_t tmp = 0;
371 boolean_t notify = FALSE;
372
373 if (thread == THREAD_NULL)
374 return;
375
376 task = thread->task;
377 if ((task == TASK_NULL) || (task == kernel_task) || task_did_exec(task) || task_is_exec_copy(task))
378 return;
379
380 /*
381 * To avoid overloading the system with telemetry requests, make
382 * sure we don't add more requests while existing ones are
383 * in-flight. Attempt this by checking if we can grab the lock.
384 *
385 * This concerns me a little; this working as intended is
386 * contingent on the workload being done in the context of the
387 * telemetry lock being the expensive part of telemetry. This
388 * includes populating the buffer and the client gathering it,
389 * but excludes the copyin overhead.
390 */
391 if (!TELEMETRY_TRY_SPIN_LOCK())
392 return;
393
394 TELEMETRY_UNLOCK();
395
396 /* telemetry_XXX accessed outside of lock for instrumentation only */
397 /* TODO */
398 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_STACKSHOT, MICROSTACKSHOT_RECORD) | DBG_FUNC_START, microsnapshot_flags, telemetry_bytes_since_last_mark, 0, 0, (&telemetry_buffer != current_buffer));
399
400 p = get_bsdtask_info(task);
401
402 /*
403 * Gather up the data we'll need for this sample. The sample is written into the kernel
404 * buffer with the global telemetry lock held -- so we must do our (possibly faulting)
405 * copies from userland here, before taking the lock.
406 */
407 uintptr_t frames[MAX_CALLSTACK_FRAMES] = {};
408 bool user64;
409 int backtrace_error = backtrace_user(frames, MAX_CALLSTACK_FRAMES, &btcount, &user64);
410 if (backtrace_error) {
411 return;
412 }
413
414 /*
415 * Find the actual [slid] address of the shared cache's UUID, and copy it in from userland.
416 */
417 int shared_cache_uuid_valid = 0;
418 uint64_t shared_cache_base_address;
419 struct _dyld_cache_header shared_cache_header;
420 uint64_t shared_cache_slide;
421
422 /*
423 * Don't copy in the entire shared cache header; we only need the UUID. Calculate the
424 * offset of that one field.
425 */
426 int sc_header_uuid_offset = (char *)&shared_cache_header.uuid - (char *)&shared_cache_header;
427 vm_shared_region_t sr = vm_shared_region_get(task);
428 if (sr != NULL) {
429 if ((vm_shared_region_start_address(sr, &shared_cache_base_address) == KERN_SUCCESS) &&
430 (copyin(shared_cache_base_address + sc_header_uuid_offset, (char *)&shared_cache_header.uuid,
431 sizeof (shared_cache_header.uuid)) == 0)) {
432 shared_cache_uuid_valid = 1;
433 shared_cache_slide = vm_shared_region_get_slide(sr);
434 }
435 // vm_shared_region_get() gave us a reference on the shared region.
436 vm_shared_region_deallocate(sr);
437 }
438
439 /*
440 * Retrieve the array of UUID's for binaries used by this task.
441 * We reach down into DYLD's data structures to find the array.
442 *
443 * XXX - make this common with kdp?
444 */
445 uint32_t uuid_info_count = 0;
446 mach_vm_address_t uuid_info_addr = 0;
447 if (task_has_64BitAddr(task)) {
448 struct user64_dyld_all_image_infos task_image_infos;
449 if (copyin(task->all_image_info_addr, (char *)&task_image_infos, sizeof(task_image_infos)) == 0) {
450 uuid_info_count = (uint32_t)task_image_infos.uuidArrayCount;
451 uuid_info_addr = task_image_infos.uuidArray;
452 }
453 } else {
454 struct user32_dyld_all_image_infos task_image_infos;
455 if (copyin(task->all_image_info_addr, (char *)&task_image_infos, sizeof(task_image_infos)) == 0) {
456 uuid_info_count = task_image_infos.uuidArrayCount;
457 uuid_info_addr = task_image_infos.uuidArray;
458 }
459 }
460
461 /*
462 * If we get a NULL uuid_info_addr (which can happen when we catch dyld in the middle of updating
463 * this data structure), we zero the uuid_info_count so that we won't even try to save load info
464 * for this task.
465 */
466 if (!uuid_info_addr) {
467 uuid_info_count = 0;
468 }
469
470 /*
471 * Don't copy in an unbounded amount of memory. The main binary and interesting
472 * non-shared-cache libraries should be in the first few images.
473 */
474 if (uuid_info_count > TELEMETRY_MAX_UUID_COUNT) {
475 uuid_info_count = TELEMETRY_MAX_UUID_COUNT;
476 }
477
478 uint32_t uuid_info_size = (uint32_t)(task_has_64BitAddr(thread->task) ? sizeof(struct user64_dyld_uuid_info) : sizeof(struct user32_dyld_uuid_info));
479 uint32_t uuid_info_array_size = uuid_info_count * uuid_info_size;
480 char *uuid_info_array = NULL;
481
482 if (uuid_info_count > 0) {
483 if ((uuid_info_array = (char *)kalloc(uuid_info_array_size)) == NULL) {
484 return;
485 }
486
487 /*
488 * Copy in the UUID info array.
489 * It may be nonresident, in which case just fix up nloadinfos to 0 in the task snapshot.
490 */
491 if (copyin(uuid_info_addr, uuid_info_array, uuid_info_array_size) != 0) {
492 kfree(uuid_info_array, uuid_info_array_size);
493 uuid_info_array = NULL;
494 uuid_info_array_size = 0;
495 }
496 }
497
498 /*
499 * Look for a dispatch queue serial number, and copy it in from userland if present.
500 */
501 uint64_t dqserialnum = 0;
502 int dqserialnum_valid = 0;
503
504 uint64_t dqkeyaddr = thread_dispatchqaddr(thread);
505 if (dqkeyaddr != 0) {
506 uint64_t dqaddr = 0;
507 uint64_t dq_serialno_offset = get_task_dispatchqueue_serialno_offset(task);
508 if ((copyin(dqkeyaddr, (char *)&dqaddr, (task_has_64BitAddr(task) ? 8 : 4)) == 0) &&
509 (dqaddr != 0) && (dq_serialno_offset != 0)) {
510 uint64_t dqserialnumaddr = dqaddr + dq_serialno_offset;
511 if (copyin(dqserialnumaddr, (char *)&dqserialnum, (task_has_64BitAddr(task) ? 8 : 4)) == 0) {
512 dqserialnum_valid = 1;
513 }
514 }
515 }
516
517 clock_get_calendar_microtime(&secs, &usecs);
518
519 TELEMETRY_LOCK();
520
521 /*
522 * If our buffer is not backed by anything,
523 * then we cannot take the sample. Meant to allow us to deallocate the window
524 * buffer if it is disabled.
525 */
526 if (!current_buffer->buffer)
527 goto cancel_sample;
528
529 /*
530 * We do the bulk of the operation under the telemetry lock, on assumption that
531 * any page faults during execution will not cause another AST_TELEMETRY_ALL
532 * to deadlock; they will just block until we finish. This makes it easier
533 * to copy into the buffer directly. As soon as we unlock, userspace can copy
534 * out of our buffer.
535 */
536
537 copytobuffer:
538
539 current_record_start = current_buffer->current_position;
540
541 if ((current_buffer->size - current_buffer->current_position) < sizeof(struct micro_snapshot)) {
542 /*
543 * We can't fit a record in the space available, so wrap around to the beginning.
544 * Save the current position as the known end point of valid data.
545 */
546 current_buffer->end_point = current_record_start;
547 current_buffer->current_position = 0;
548 if (current_record_start == 0) {
549 /* This sample is too large to fit in the buffer even when we started at 0, so skip it */
550 goto cancel_sample;
551 }
552 goto copytobuffer;
553 }
554
555 msnap = (struct micro_snapshot *)(uintptr_t)(current_buffer->buffer + current_buffer->current_position);
556 msnap->snapshot_magic = STACKSHOT_MICRO_SNAPSHOT_MAGIC;
557 msnap->ms_flags = microsnapshot_flags;
558 msnap->ms_opaque_flags = 0; /* namespace managed by userspace */
559 msnap->ms_cpu = 0; /* XXX - does this field make sense for a micro-stackshot? */
560 msnap->ms_time = secs;
561 msnap->ms_time_microsecs = usecs;
562
563 current_buffer->current_position += sizeof(struct micro_snapshot);
564
565 if ((current_buffer->size - current_buffer->current_position) < sizeof(struct task_snapshot)) {
566 current_buffer->end_point = current_record_start;
567 current_buffer->current_position = 0;
568 if (current_record_start == 0) {
569 /* This sample is too large to fit in the buffer even when we started at 0, so skip it */
570 goto cancel_sample;
571 }
572 goto copytobuffer;
573 }
574
575 tsnap = (struct task_snapshot *)(uintptr_t)(current_buffer->buffer + current_buffer->current_position);
576 bzero(tsnap, sizeof(*tsnap));
577 tsnap->snapshot_magic = STACKSHOT_TASK_SNAPSHOT_MAGIC;
578 tsnap->pid = proc_pid(p);
579 tsnap->uniqueid = proc_uniqueid(p);
580 tsnap->user_time_in_terminated_threads = task->total_user_time;
581 tsnap->system_time_in_terminated_threads = task->total_system_time;
582 tsnap->suspend_count = task->suspend_count;
583 tsnap->task_size = pmap_resident_count(task->map->pmap);
584 tsnap->faults = task->faults;
585 tsnap->pageins = task->pageins;
586 tsnap->cow_faults = task->cow_faults;
587 /*
588 * The throttling counters are maintained as 64-bit counters in the proc
589 * structure. However, we reserve 32-bits (each) for them in the task_snapshot
590 * struct to save space and since we do not expect them to overflow 32-bits. If we
591 * find these values overflowing in the future, the fix would be to simply
592 * upgrade these counters to 64-bit in the task_snapshot struct
593 */
594 tsnap->was_throttled = (uint32_t) proc_was_throttled(p);
595 tsnap->did_throttle = (uint32_t) proc_did_throttle(p);
596
597 if (task->t_flags & TF_TELEMETRY) {
598 tsnap->ss_flags |= kTaskRsrcFlagged;
599 }
600
601 if (proc_get_effective_task_policy(task, TASK_POLICY_DARWIN_BG)) {
602 tsnap->ss_flags |= kTaskDarwinBG;
603 }
604
605 proc_get_darwinbgstate(task, &tmp);
606
607 if (proc_get_effective_task_policy(task, TASK_POLICY_ROLE) == TASK_FOREGROUND_APPLICATION) {
608 tsnap->ss_flags |= kTaskIsForeground;
609 }
610
611 if (tmp & PROC_FLAG_ADAPTIVE_IMPORTANT) {
612 tsnap->ss_flags |= kTaskIsBoosted;
613 }
614
615 if (tmp & PROC_FLAG_SUPPRESSED) {
616 tsnap->ss_flags |= kTaskIsSuppressed;
617 }
618
619 tsnap->latency_qos = task_grab_latency_qos(task);
620
621 strlcpy(tsnap->p_comm, proc_name_address(p), sizeof(tsnap->p_comm));
622 if (task_has_64BitAddr(thread->task)) {
623 tsnap->ss_flags |= kUser64_p;
624 }
625
626 if (shared_cache_uuid_valid) {
627 tsnap->shared_cache_slide = shared_cache_slide;
628 bcopy(shared_cache_header.uuid, tsnap->shared_cache_identifier, sizeof (shared_cache_header.uuid));
629 }
630
631 current_buffer->current_position += sizeof(struct task_snapshot);
632
633 /*
634 * Directly after the task snapshot, place the array of UUID's corresponding to the binaries
635 * used by this task.
636 */
637 if ((current_buffer->size - current_buffer->current_position) < uuid_info_array_size) {
638 current_buffer->end_point = current_record_start;
639 current_buffer->current_position = 0;
640 if (current_record_start == 0) {
641 /* This sample is too large to fit in the buffer even when we started at 0, so skip it */
642 goto cancel_sample;
643 }
644 goto copytobuffer;
645 }
646
647 /*
648 * Copy the UUID info array into our sample.
649 */
650 if (uuid_info_array_size > 0) {
651 bcopy(uuid_info_array, (char *)(current_buffer->buffer + current_buffer->current_position), uuid_info_array_size);
652 tsnap->nloadinfos = uuid_info_count;
653 }
654
655 current_buffer->current_position += uuid_info_array_size;
656
657 /*
658 * After the task snapshot & list of binary UUIDs, we place a thread snapshot.
659 */
660
661 if ((current_buffer->size - current_buffer->current_position) < sizeof(struct thread_snapshot)) {
662 /* wrap and overwrite */
663 current_buffer->end_point = current_record_start;
664 current_buffer->current_position = 0;
665 if (current_record_start == 0) {
666 /* This sample is too large to fit in the buffer even when we started at 0, so skip it */
667 goto cancel_sample;
668 }
669 goto copytobuffer;
670 }
671
672 thsnap = (struct thread_snapshot *)(uintptr_t)(current_buffer->buffer + current_buffer->current_position);
673 bzero(thsnap, sizeof(*thsnap));
674
675 thsnap->snapshot_magic = STACKSHOT_THREAD_SNAPSHOT_MAGIC;
676 thsnap->thread_id = thread_tid(thread);
677 thsnap->state = thread->state;
678 thsnap->priority = thread->base_pri;
679 thsnap->sched_pri = thread->sched_pri;
680 thsnap->sched_flags = thread->sched_flags;
681 thsnap->ss_flags |= kStacksPCOnly;
682 thsnap->ts_qos = thread->effective_policy.thep_qos;
683 thsnap->ts_rqos = thread->requested_policy.thrp_qos;
684 thsnap->ts_rqos_override = thread->requested_policy.thrp_qos_override;
685
686 if (proc_get_effective_thread_policy(thread, TASK_POLICY_DARWIN_BG)) {
687 thsnap->ss_flags |= kThreadDarwinBG;
688 }
689
690 thsnap->user_time = timer_grab(&thread->user_timer);
691
692 uint64_t tval = timer_grab(&thread->system_timer);
693
694 if (thread->precise_user_kernel_time) {
695 thsnap->system_time = tval;
696 } else {
697 thsnap->user_time += tval;
698 thsnap->system_time = 0;
699 }
700
701 current_buffer->current_position += sizeof(struct thread_snapshot);
702
703 /*
704 * If this thread has a dispatch queue serial number, include it here.
705 */
706 if (dqserialnum_valid) {
707 if ((current_buffer->size - current_buffer->current_position) < sizeof(dqserialnum)) {
708 /* wrap and overwrite */
709 current_buffer->end_point = current_record_start;
710 current_buffer->current_position = 0;
711 if (current_record_start == 0) {
712 /* This sample is too large to fit in the buffer even when we started at 0, so skip it */
713 goto cancel_sample;
714 }
715 goto copytobuffer;
716 }
717
718 thsnap->ss_flags |= kHasDispatchSerial;
719 bcopy(&dqserialnum, (char *)current_buffer->buffer + current_buffer->current_position, sizeof (dqserialnum));
720 current_buffer->current_position += sizeof (dqserialnum);
721 }
722
723 if (task_has_64BitAddr(task)) {
724 framesize = 8;
725 thsnap->ss_flags |= kUser64_p;
726 } else {
727 framesize = 4;
728 }
729
730 /*
731 * If we can't fit this entire stacktrace then cancel this record, wrap to the beginning,
732 * and start again there so that we always store a full record.
733 */
734 if ((current_buffer->size - current_buffer->current_position)/framesize < btcount) {
735 current_buffer->end_point = current_record_start;
736 current_buffer->current_position = 0;
737 if (current_record_start == 0) {
738 /* This sample is too large to fit in the buffer even when we started at 0, so skip it */
739 goto cancel_sample;
740 }
741 goto copytobuffer;
742 }
743
744 for (bti=0; bti < btcount; bti++, current_buffer->current_position += framesize) {
745 if (framesize == 8) {
746 *(uint64_t *)(uintptr_t)(current_buffer->buffer + current_buffer->current_position) = frames[bti];
747 } else {
748 *(uint32_t *)(uintptr_t)(current_buffer->buffer + current_buffer->current_position) = (uint32_t)frames[bti];
749 }
750 }
751
752 if (current_buffer->end_point < current_buffer->current_position) {
753 /*
754 * Each time the cursor wraps around to the beginning, we leave a
755 * differing amount of unused space at the end of the buffer. Make
756 * sure the cursor pushes the end point in case we're making use of
757 * more of the buffer than we did the last time we wrapped.
758 */
759 current_buffer->end_point = current_buffer->current_position;
760 }
761
762 thsnap->nuser_frames = btcount;
763
764 /*
765 * Now THIS is a hack.
766 */
767 if (current_buffer == &telemetry_buffer) {
768 telemetry_bytes_since_last_mark += (current_buffer->current_position - current_record_start);
769 if (telemetry_bytes_since_last_mark > telemetry_buffer_notify_at) {
770 notify = TRUE;
771 }
772 }
773
774 cancel_sample:
775
776 TELEMETRY_UNLOCK();
777
778 /* TODO */
779 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_STACKSHOT, MICROSTACKSHOT_RECORD) | DBG_FUNC_END, notify, telemetry_bytes_since_last_mark, current_buffer->current_position, current_buffer->end_point, (&telemetry_buffer != current_buffer));
780
781 if (notify) {
782 telemetry_notify_user();
783 }
784
785 if (uuid_info_array != NULL) {
786 kfree(uuid_info_array, uuid_info_array_size);
787 }
788 }
789
790 #if TELEMETRY_DEBUG
791 static void
792 log_telemetry_output(vm_offset_t buf, uint32_t pos, uint32_t sz)
793 {
794 struct micro_snapshot *p;
795 uint32_t offset;
796
797 printf("Copying out %d bytes of telemetry at offset %d\n", sz, pos);
798
799 buf += pos;
800
801 /*
802 * Find and log each timestamp in this chunk of buffer.
803 */
804 for (offset = 0; offset < sz; offset++) {
805 p = (struct micro_snapshot *)(buf + offset);
806 if (p->snapshot_magic == STACKSHOT_MICRO_SNAPSHOT_MAGIC) {
807 printf("telemetry timestamp: %lld\n", p->ms_time);
808 }
809 }
810 }
811 #endif
812
813 int telemetry_gather(user_addr_t buffer, uint32_t *length, boolean_t mark)
814 {
815 return telemetry_buffer_gather(buffer, length, mark, &telemetry_buffer);
816 }
817
818 int telemetry_buffer_gather(user_addr_t buffer, uint32_t *length, boolean_t mark, struct micro_snapshot_buffer * current_buffer)
819 {
820 int result = 0;
821 uint32_t oldest_record_offset;
822
823 /* TODO */
824 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_STACKSHOT, MICROSTACKSHOT_GATHER) | DBG_FUNC_START, mark, telemetry_bytes_since_last_mark, 0, 0, (&telemetry_buffer != current_buffer));
825
826 TELEMETRY_LOCK();
827
828 if (current_buffer->buffer == 0) {
829 *length = 0;
830 goto out;
831 }
832
833 if (*length < current_buffer->size) {
834 result = KERN_NO_SPACE;
835 goto out;
836 }
837
838 /*
839 * Copy the ring buffer out to userland in order sorted by time: least recent to most recent.
840 * First, we need to search forward from the cursor to find the oldest record in our buffer.
841 */
842 oldest_record_offset = current_buffer->current_position;
843 do {
844 if (((oldest_record_offset + sizeof(uint32_t)) > current_buffer->size) ||
845 ((oldest_record_offset + sizeof(uint32_t)) > current_buffer->end_point)) {
846
847 if (*(uint32_t *)(uintptr_t)(current_buffer->buffer) == 0) {
848 /*
849 * There is no magic number at the start of the buffer, which means
850 * it's empty; nothing to see here yet.
851 */
852 *length = 0;
853 goto out;
854 }
855 /*
856 * We've looked through the end of the active buffer without finding a valid
857 * record; that means all valid records are in a single chunk, beginning at
858 * the very start of the buffer.
859 */
860
861 oldest_record_offset = 0;
862 assert(*(uint32_t *)(uintptr_t)(current_buffer->buffer) == STACKSHOT_MICRO_SNAPSHOT_MAGIC);
863 break;
864 }
865
866 if (*(uint32_t *)(uintptr_t)(current_buffer->buffer + oldest_record_offset) == STACKSHOT_MICRO_SNAPSHOT_MAGIC)
867 break;
868
869 /*
870 * There are no alignment guarantees for micro-stackshot records, so we must search at each
871 * byte offset.
872 */
873 oldest_record_offset++;
874 } while (oldest_record_offset != current_buffer->current_position);
875
876 /*
877 * If needed, copyout in two chunks: from the oldest record to the end of the buffer, and then
878 * from the beginning of the buffer up to the current position.
879 */
880 if (oldest_record_offset != 0) {
881 #if TELEMETRY_DEBUG
882 log_telemetry_output(current_buffer->buffer, oldest_record_offset,
883 current_buffer->end_point - oldest_record_offset);
884 #endif
885 if ((result = copyout((void *)(current_buffer->buffer + oldest_record_offset), buffer,
886 current_buffer->end_point - oldest_record_offset)) != 0) {
887 *length = 0;
888 goto out;
889 }
890 *length = current_buffer->end_point - oldest_record_offset;
891 } else {
892 *length = 0;
893 }
894
895 #if TELEMETRY_DEBUG
896 log_telemetry_output(current_buffer->buffer, 0, current_buffer->current_position);
897 #endif
898 if ((result = copyout((void *)current_buffer->buffer, buffer + *length,
899 current_buffer->current_position)) != 0) {
900 *length = 0;
901 goto out;
902 }
903 *length += (uint32_t)current_buffer->current_position;
904
905 out:
906
907 if (mark && (*length > 0)) {
908 telemetry_bytes_since_last_mark = 0;
909 }
910
911 TELEMETRY_UNLOCK();
912
913 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_STACKSHOT, MICROSTACKSHOT_GATHER) | DBG_FUNC_END, current_buffer->current_position, *length, current_buffer->end_point, 0, (&telemetry_buffer != current_buffer));
914
915 return (result);
916 }
917
918 /************************/
919 /* BOOT PROFILE SUPPORT */
920 /************************/
921 /*
922 * Boot Profiling
923 *
924 * The boot-profiling support is a mechanism to sample activity happening on the
925 * system during boot. This mechanism sets up a periodic timer and on every timer fire,
926 * captures a full backtrace into the boot profiling buffer. This buffer can be pulled
927 * out and analyzed from user-space. It is turned on using the following boot-args:
928 * "bootprofile_buffer_size" specifies the size of the boot profile buffer
929 * "bootprofile_interval_ms" specifies the interval for the profiling timer
930 *
931 * Process Specific Boot Profiling
932 *
933 * The boot-arg "bootprofile_proc_name" can be used to specify a certain
934 * process that needs to profiled during boot. Setting this boot-arg changes
935 * the way stackshots are captured. At every timer fire, the code looks at the
936 * currently running process and takes a stackshot only if the requested process
937 * is on-core (which makes it unsuitable for MP systems).
938 *
939 * Trigger Events
940 *
941 * The boot-arg "bootprofile_type=boot" starts the timer during early boot. Using
942 * "wake" starts the timer at AP wake from suspend-to-RAM.
943 */
944
945 #define BOOTPROFILE_MAX_BUFFER_SIZE (64*1024*1024) /* see also COPYSIZELIMIT_PANIC */
946
947 vm_offset_t bootprofile_buffer = 0;
948 uint32_t bootprofile_buffer_size = 0;
949 uint32_t bootprofile_buffer_current_position = 0;
950 uint32_t bootprofile_interval_ms = 0;
951 uint32_t bootprofile_stackshot_flags = 0;
952 uint64_t bootprofile_interval_abs = 0;
953 uint64_t bootprofile_next_deadline = 0;
954 uint32_t bootprofile_all_procs = 0;
955 char bootprofile_proc_name[17];
956 uint64_t bootprofile_delta_since_timestamp = 0;
957 lck_grp_t bootprofile_lck_grp;
958 lck_mtx_t bootprofile_mtx;
959
960
961 enum {
962 kBootProfileDisabled = 0,
963 kBootProfileStartTimerAtBoot,
964 kBootProfileStartTimerAtWake
965 } bootprofile_type = kBootProfileDisabled;
966
967
968 static timer_call_data_t bootprofile_timer_call_entry;
969
970 #define BOOTPROFILE_LOCK() do { lck_mtx_lock(&bootprofile_mtx); } while(0)
971 #define BOOTPROFILE_TRY_SPIN_LOCK() lck_mtx_try_lock_spin(&bootprofile_mtx)
972 #define BOOTPROFILE_UNLOCK() do { lck_mtx_unlock(&bootprofile_mtx); } while(0)
973
974 static void bootprofile_timer_call(
975 timer_call_param_t param0,
976 timer_call_param_t param1);
977
978 void bootprofile_init(void)
979 {
980 kern_return_t ret;
981 char type[32];
982
983 lck_grp_init(&bootprofile_lck_grp, "bootprofile group", LCK_GRP_ATTR_NULL);
984 lck_mtx_init(&bootprofile_mtx, &bootprofile_lck_grp, LCK_ATTR_NULL);
985
986 if (!PE_parse_boot_argn("bootprofile_buffer_size", &bootprofile_buffer_size, sizeof(bootprofile_buffer_size))) {
987 bootprofile_buffer_size = 0;
988 }
989
990 if (bootprofile_buffer_size > BOOTPROFILE_MAX_BUFFER_SIZE)
991 bootprofile_buffer_size = BOOTPROFILE_MAX_BUFFER_SIZE;
992
993 if (!PE_parse_boot_argn("bootprofile_interval_ms", &bootprofile_interval_ms, sizeof(bootprofile_interval_ms))) {
994 bootprofile_interval_ms = 0;
995 }
996
997 if (!PE_parse_boot_argn("bootprofile_stackshot_flags", &bootprofile_stackshot_flags, sizeof(bootprofile_stackshot_flags))) {
998 bootprofile_stackshot_flags = 0;
999 }
1000
1001 if (!PE_parse_boot_argn("bootprofile_proc_name", &bootprofile_proc_name, sizeof(bootprofile_proc_name))) {
1002 bootprofile_all_procs = 1;
1003 bootprofile_proc_name[0] = '\0';
1004 }
1005
1006 if (PE_parse_boot_argn("bootprofile_type", type, sizeof(type))) {
1007 if (0 == strcmp(type, "boot")) {
1008 bootprofile_type = kBootProfileStartTimerAtBoot;
1009 } else if (0 == strcmp(type, "wake")) {
1010 bootprofile_type = kBootProfileStartTimerAtWake;
1011 } else {
1012 bootprofile_type = kBootProfileDisabled;
1013 }
1014 } else {
1015 bootprofile_type = kBootProfileDisabled;
1016 }
1017
1018 clock_interval_to_absolutetime_interval(bootprofile_interval_ms, NSEC_PER_MSEC, &bootprofile_interval_abs);
1019
1020 /* Both boot args must be set to enable */
1021 if ((bootprofile_type == kBootProfileDisabled) || (bootprofile_buffer_size == 0) || (bootprofile_interval_abs == 0)) {
1022 return;
1023 }
1024
1025 ret = kmem_alloc(kernel_map, &bootprofile_buffer, bootprofile_buffer_size, VM_KERN_MEMORY_DIAG);
1026 if (ret != KERN_SUCCESS) {
1027 kprintf("Boot profile: Allocation failed: %d\n", ret);
1028 return;
1029 }
1030 bzero((void *) bootprofile_buffer, bootprofile_buffer_size);
1031
1032 kprintf("Boot profile: Sampling %s once per %u ms at %s\n", bootprofile_all_procs ? "all procs" : bootprofile_proc_name, bootprofile_interval_ms,
1033 bootprofile_type == kBootProfileStartTimerAtBoot ? "boot" : (bootprofile_type == kBootProfileStartTimerAtWake ? "wake" : "unknown"));
1034
1035 timer_call_setup(&bootprofile_timer_call_entry,
1036 bootprofile_timer_call,
1037 NULL);
1038
1039 if (bootprofile_type == kBootProfileStartTimerAtBoot) {
1040 bootprofile_next_deadline = mach_absolute_time() + bootprofile_interval_abs;
1041 timer_call_enter_with_leeway(&bootprofile_timer_call_entry,
1042 NULL,
1043 bootprofile_next_deadline,
1044 0,
1045 TIMER_CALL_SYS_NORMAL,
1046 FALSE);
1047 }
1048 }
1049
1050 void
1051 bootprofile_wake_from_sleep(void)
1052 {
1053 if (bootprofile_type == kBootProfileStartTimerAtWake) {
1054 bootprofile_next_deadline = mach_absolute_time() + bootprofile_interval_abs;
1055 timer_call_enter_with_leeway(&bootprofile_timer_call_entry,
1056 NULL,
1057 bootprofile_next_deadline,
1058 0,
1059 TIMER_CALL_SYS_NORMAL,
1060 FALSE);
1061 }
1062 }
1063
1064
1065 static void
1066 bootprofile_timer_call(
1067 timer_call_param_t param0 __unused,
1068 timer_call_param_t param1 __unused)
1069 {
1070 unsigned retbytes = 0;
1071 int pid_to_profile = -1;
1072
1073 if (!BOOTPROFILE_TRY_SPIN_LOCK()) {
1074 goto reprogram;
1075 }
1076
1077 /* Check if process-specific boot profiling is turned on */
1078 if (!bootprofile_all_procs) {
1079 /*
1080 * Since boot profiling initializes really early in boot, it is
1081 * possible that at this point, the task/proc is not initialized.
1082 * Nothing to do in that case.
1083 */
1084
1085 if ((current_task() != NULL) && (current_task()->bsd_info != NULL) &&
1086 (0 == strncmp(bootprofile_proc_name, proc_name_address(current_task()->bsd_info), 17))) {
1087 pid_to_profile = proc_selfpid();
1088 }
1089 else {
1090 /*
1091 * Process-specific boot profiling requested but the on-core process is
1092 * something else. Nothing to do here.
1093 */
1094 BOOTPROFILE_UNLOCK();
1095 goto reprogram;
1096 }
1097 }
1098
1099 /* initiate a stackshot with whatever portion of the buffer is left */
1100 if (bootprofile_buffer_current_position < bootprofile_buffer_size) {
1101
1102 uint32_t flags = STACKSHOT_KCDATA_FORMAT | STACKSHOT_TRYLOCK | STACKSHOT_SAVE_LOADINFO
1103 | STACKSHOT_GET_GLOBAL_MEM_STATS;
1104 #if __x86_64__
1105 flags |= STACKSHOT_SAVE_KEXT_LOADINFO;
1106 #endif /* __x86_64__ */
1107
1108
1109 /* OR on flags specified in boot-args */
1110 flags |= bootprofile_stackshot_flags;
1111 if ((flags & STACKSHOT_COLLECT_DELTA_SNAPSHOT) && (bootprofile_delta_since_timestamp == 0)) {
1112 /* Can't take deltas until the first one */
1113 flags &= ~ STACKSHOT_COLLECT_DELTA_SNAPSHOT;
1114 }
1115
1116 uint64_t timestamp = 0;
1117 if (bootprofile_stackshot_flags & STACKSHOT_COLLECT_DELTA_SNAPSHOT) {
1118 timestamp = mach_absolute_time();
1119 }
1120
1121 kern_return_t r = stack_snapshot_from_kernel(
1122 pid_to_profile, (void *)(bootprofile_buffer + bootprofile_buffer_current_position),
1123 bootprofile_buffer_size - bootprofile_buffer_current_position,
1124 flags, bootprofile_delta_since_timestamp, &retbytes);
1125
1126 /*
1127 * We call with STACKSHOT_TRYLOCK because the stackshot lock is coarser
1128 * than the bootprofile lock. If someone else has the lock we'll just
1129 * try again later.
1130 */
1131
1132 if (r == KERN_LOCK_OWNED) {
1133 BOOTPROFILE_UNLOCK();
1134 goto reprogram;
1135 }
1136
1137 if (bootprofile_stackshot_flags & STACKSHOT_COLLECT_DELTA_SNAPSHOT &&
1138 r == KERN_SUCCESS) {
1139 bootprofile_delta_since_timestamp = timestamp;
1140 }
1141
1142 bootprofile_buffer_current_position += retbytes;
1143 }
1144
1145 BOOTPROFILE_UNLOCK();
1146
1147 /* If we didn't get any data or have run out of buffer space, stop profiling */
1148 if ((retbytes == 0) || (bootprofile_buffer_current_position == bootprofile_buffer_size)) {
1149 return;
1150 }
1151
1152
1153 reprogram:
1154 /* If the user gathered the buffer, no need to keep profiling */
1155 if (bootprofile_interval_abs == 0) {
1156 return;
1157 }
1158
1159 clock_deadline_for_periodic_event(bootprofile_interval_abs,
1160 mach_absolute_time(),
1161 &bootprofile_next_deadline);
1162 timer_call_enter_with_leeway(&bootprofile_timer_call_entry,
1163 NULL,
1164 bootprofile_next_deadline,
1165 0,
1166 TIMER_CALL_SYS_NORMAL,
1167 FALSE);
1168 }
1169
1170 void bootprofile_get(void **buffer, uint32_t *length)
1171 {
1172 BOOTPROFILE_LOCK();
1173 *buffer = (void*) bootprofile_buffer;
1174 *length = bootprofile_buffer_current_position;
1175 BOOTPROFILE_UNLOCK();
1176 }
1177
1178 int bootprofile_gather(user_addr_t buffer, uint32_t *length)
1179 {
1180 int result = 0;
1181
1182 BOOTPROFILE_LOCK();
1183
1184 if (bootprofile_buffer == 0) {
1185 *length = 0;
1186 goto out;
1187 }
1188
1189 if (*length < bootprofile_buffer_current_position) {
1190 result = KERN_NO_SPACE;
1191 goto out;
1192 }
1193
1194 if ((result = copyout((void *)bootprofile_buffer, buffer,
1195 bootprofile_buffer_current_position)) != 0) {
1196 *length = 0;
1197 goto out;
1198 }
1199 *length = bootprofile_buffer_current_position;
1200
1201 /* cancel future timers */
1202 bootprofile_interval_abs = 0;
1203
1204 out:
1205
1206 BOOTPROFILE_UNLOCK();
1207
1208 return (result);
1209 }