]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/kpc_thread.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / kern / kpc_thread.c
1 /*
2 * Copyright (c) 2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <kern/processor.h>
31 #include <kern/thread.h>
32 #include <kern/assert.h>
33 #include <kern/locks.h>
34 #include <sys/errno.h>
35
36 #include <kperf/kperf.h>
37 #include <kperf/buffer.h>
38 #include <kperf/context.h>
39 #include <kperf/sample.h>
40 #include <kperf/action.h>
41 #include <kperf/kperf_kpc.h>
42 #include <kern/kpc.h>
43
44 #if defined (__arm64__)
45 #include <arm/cpu_data_internal.h>
46 #elif defined (__arm__)
47 #include <arm/cpu_data_internal.h>
48 #endif
49
50 /* global for whether to read PMCs on context switch */
51 int kpc_threads_counting = 0;
52
53 /* whether to call into KPC when a thread goes off CPU */
54 boolean_t kpc_off_cpu_active = FALSE;
55
56 /* current config and number of counters in that config */
57 static uint32_t kpc_thread_classes = 0;
58 static uint32_t kpc_thread_classes_count = 0;
59
60 static LCK_GRP_DECLARE(kpc_thread_lckgrp, "kpc thread");
61 static LCK_MTX_DECLARE(kpc_thread_lock, &kpc_thread_lckgrp);
62
63 uint32_t
64 kpc_get_thread_counting(void)
65 {
66 uint32_t kpc_thread_classes_tmp;
67 int kpc_threads_counting_tmp;
68
69 /* Make sure we get a consistent snapshot of these values */
70 lck_mtx_lock(&kpc_thread_lock);
71
72 kpc_thread_classes_tmp = kpc_thread_classes;
73 kpc_threads_counting_tmp = kpc_threads_counting;
74
75 lck_mtx_unlock(&kpc_thread_lock);
76
77 if (kpc_threads_counting_tmp) {
78 return kpc_thread_classes_tmp;
79 } else {
80 return 0;
81 }
82 }
83
84 int
85 kpc_set_thread_counting(uint32_t classes)
86 {
87 uint32_t count;
88
89 lck_mtx_lock(&kpc_thread_lock);
90
91 count = kpc_get_counter_count(classes);
92
93 if ((classes == 0)
94 || (count == 0)) {
95 /* shut down */
96 kpc_threads_counting = FALSE;
97 } else {
98 /* stash the config */
99 kpc_thread_classes = classes;
100
101 /* work out the size */
102 kpc_thread_classes_count = count;
103 assert(kpc_thread_classes_count <= KPC_MAX_COUNTERS);
104
105 /* enable switch */
106 kpc_threads_counting = TRUE;
107
108 /* and schedule an AST for this thread... */
109 if (!current_thread()->kpc_buf) {
110 current_thread()->kperf_ast |= T_KPC_ALLOC;
111 act_set_kperf(current_thread());
112 }
113 }
114
115 kpc_off_cpu_update();
116 lck_mtx_unlock(&kpc_thread_lock);
117
118 return 0;
119 }
120
121 /* snapshot current PMCs and update counters in the current thread */
122 static void
123 kpc_update_thread_counters( thread_t thread )
124 {
125 uint32_t i;
126 uint64_t *tmp = NULL;
127 cpu_data_t *cpu = NULL;
128
129 cpu = current_cpu_datap();
130
131 /* 1. stash current PMCs into latest CPU block */
132 kpc_get_cpu_counters( FALSE, kpc_thread_classes,
133 NULL, cpu->cpu_kpc_buf[1] );
134
135 /* 2. apply delta to old thread */
136 if (thread->kpc_buf) {
137 for (i = 0; i < kpc_thread_classes_count; i++) {
138 thread->kpc_buf[i] += cpu->cpu_kpc_buf[1][i] - cpu->cpu_kpc_buf[0][i];
139 }
140 }
141
142 /* schedule any necessary allocations */
143 if (!current_thread()->kpc_buf) {
144 current_thread()->kperf_ast |= T_KPC_ALLOC;
145 act_set_kperf(current_thread());
146 }
147
148 /* 3. switch the PMC block pointers */
149 tmp = cpu->cpu_kpc_buf[1];
150 cpu->cpu_kpc_buf[1] = cpu->cpu_kpc_buf[0];
151 cpu->cpu_kpc_buf[0] = tmp;
152 }
153
154 /* get counter values for a thread */
155 int
156 kpc_get_curthread_counters(uint32_t *inoutcount, uint64_t *buf)
157 {
158 thread_t thread = current_thread();
159 boolean_t enabled;
160
161 /* buffer too small :( */
162 if (*inoutcount < kpc_thread_classes_count) {
163 return EINVAL;
164 }
165
166 /* copy data and actual size */
167 if (!thread->kpc_buf) {
168 return EINVAL;
169 }
170
171 enabled = ml_set_interrupts_enabled(FALSE);
172
173 /* snap latest version of counters for this thread */
174 kpc_update_thread_counters( current_thread());
175
176 /* copy out */
177 memcpy( buf, thread->kpc_buf,
178 kpc_thread_classes_count * sizeof(*buf));
179 *inoutcount = kpc_thread_classes_count;
180
181 ml_set_interrupts_enabled(enabled);
182
183 return 0;
184 }
185
186 void
187 kpc_off_cpu_update(void)
188 {
189 kpc_off_cpu_active = kpc_threads_counting;
190 }
191
192 void
193 kpc_off_cpu_internal(thread_t thread)
194 {
195 if (kpc_threads_counting) {
196 kpc_update_thread_counters(thread);
197 }
198 }
199
200 void
201 kpc_thread_create(thread_t thread)
202 {
203 /* nothing to do if we're not counting */
204 if (!kpc_threads_counting) {
205 return;
206 }
207
208 /* give the new thread a counterbuf */
209 thread->kpc_buf = kpc_counterbuf_alloc();
210 }
211
212 void
213 kpc_thread_destroy(thread_t thread)
214 {
215 uint64_t *buf = NULL;
216
217 /* usual case: no kpc buf, just return */
218 if (!thread->kpc_buf) {
219 return;
220 }
221
222 /* otherwise, don't leak */
223 buf = thread->kpc_buf;
224 thread->kpc_buf = NULL;
225 kpc_counterbuf_free(buf);
226 }
227
228 void
229 kpc_thread_ast_handler(thread_t thread)
230 {
231 if (thread->kperf_ast & T_KPC_ALLOC) {
232 thread->kpc_buf = kpc_counterbuf_alloc();
233 }
234 }