-/*
- * Enable the window_buffer, and do any associated setup.
- */
-kern_return_t
-telemetry_enable_window(void)
-{
- kern_return_t ret = KERN_SUCCESS;
- vm_offset_t kern_buffer = 0;
- vm_size_t kern_buffer_size = TELEMETRY_DEFAULT_WINDOW_BUFFER_SIZE;
-
- /*
- * We have no guarantee we won't allocate the buffer, take
- * the lock, and then discover someone beat us to the punch,
- * but we would prefer to avoid blocking while holding the
- * lock.
- */
- ret = kmem_alloc(kernel_map, &kern_buffer, kern_buffer_size, VM_KERN_MEMORY_DIAG);
-
- TELEMETRY_LOCK();
-
- if (!window_buffer.buffer) {
- if (ret == KERN_SUCCESS) {
- /* No existing buffer was found, so... */
- window_buffer.end_point = 0;
- window_buffer.current_position = 0;
-
- /* Hand off the buffer, and... */
- window_buffer.size = (uint32_t) kern_buffer_size;
- window_buffer.buffer = kern_buffer;
- kern_buffer = 0;
- kern_buffer_size = 0;
- bzero((void *) window_buffer.buffer, window_buffer.size);
-
- /* Let the scheduler know it should drive windowed samples */
- telemetry_window_enabled = TRUE;
- }
- } else {
- /* We already have a buffer, so we have "succeeded" */
- ret = KERN_SUCCESS;
- }
-
- TELEMETRY_UNLOCK();
-
- if (kern_buffer)
- kmem_free(kernel_map, kern_buffer, kern_buffer_size);
-
- return ret;
-}
-
-/*
- * Disable the window_buffer, and do any associated teardown.
- */
-void
-telemetry_disable_window(void)
-{
- vm_offset_t kern_buffer = 0;
- vm_size_t kern_buffer_size = 0;
-
- TELEMETRY_LOCK();
-
- if (window_buffer.buffer) {
- /* We have a window buffer, so tear it down */
- telemetry_window_enabled = FALSE;
- kern_buffer = window_buffer.buffer;
- kern_buffer_size = window_buffer.size;
- window_buffer.buffer = 0;
- window_buffer.size = 0;
- window_buffer.current_position = 0;
- window_buffer.end_point = 0;
- }
-
- TELEMETRY_UNLOCK();
-
- if (kern_buffer)
- kmem_free(kernel_map, kern_buffer, kern_buffer_size);
-}
-