]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/kpc_thread.c
aa8f261d4397eb6d04b31e33e1cfee8be5e496f2
[apple/xnu.git] / osfmk / kern / kpc_thread.c
1 /*
2 * Copyright (c) 2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <kern/processor.h>
31 #include <kern/thread.h>
32 #include <kern/assert.h>
33 #include <kern/locks.h>
34 #include <sys/errno.h>
35
36 #include <kperf/kperf.h>
37 #include <kperf/buffer.h>
38 #include <kperf/context.h>
39 #include <kperf/sample.h>
40 #include <kperf/action.h>
41 #include <kperf/kperf_kpc.h>
42 #include <kern/kpc.h>
43
44
45 /* global for whether to read PMCs on context switch */
46 int kpc_threads_counting = 0;
47
48 /* whether to call into KPC when a thread goes off CPU */
49 boolean_t kpc_off_cpu_active = FALSE;
50
51 /* current config and number of counters in that config */
52 static uint32_t kpc_thread_classes = 0;
53 static uint32_t kpc_thread_classes_count = 0;
54
55 static lck_grp_attr_t *kpc_thread_lckgrp_attr = NULL;
56 static lck_grp_t *kpc_thread_lckgrp = NULL;
57 static lck_mtx_t kpc_thread_lock;
58
59 void
60 kpc_thread_init(void)
61 {
62 kpc_thread_lckgrp_attr = lck_grp_attr_alloc_init();
63 kpc_thread_lckgrp = lck_grp_alloc_init("kpc", kpc_thread_lckgrp_attr);
64 lck_mtx_init(&kpc_thread_lock, kpc_thread_lckgrp, LCK_ATTR_NULL);
65 }
66
67 uint32_t
68 kpc_get_thread_counting(void)
69 {
70 uint32_t kpc_thread_classes_tmp;
71 int kpc_threads_counting_tmp;
72
73 /* Make sure we get a consistent snapshot of these values */
74 lck_mtx_lock(&kpc_thread_lock);
75
76 kpc_thread_classes_tmp = kpc_thread_classes;
77 kpc_threads_counting_tmp = kpc_threads_counting;
78
79 lck_mtx_unlock(&kpc_thread_lock);
80
81 if( kpc_threads_counting_tmp )
82 return kpc_thread_classes_tmp;
83 else
84 return 0;
85 }
86
87 int
88 kpc_set_thread_counting(uint32_t classes)
89 {
90 uint32_t count;
91
92 lck_mtx_lock(&kpc_thread_lock);
93
94 count = kpc_get_counter_count(classes);
95
96 if( (classes == 0)
97 || (count == 0) )
98 {
99 /* shut down */
100 kpc_threads_counting = FALSE;
101 }
102 else
103 {
104 /* stash the config */
105 kpc_thread_classes = classes;
106
107 /* work out the size */
108 kpc_thread_classes_count = count;
109 assert(kpc_thread_classes_count <= KPC_MAX_COUNTERS);
110
111 /* enable switch */
112 kpc_threads_counting = TRUE;
113
114 /* and schedule an AST for this thread... */
115 if( !current_thread()->kpc_buf )
116 {
117 current_thread()->kperf_flags |= T_KPC_ALLOC;
118 act_set_kperf(current_thread());
119 }
120 }
121
122 kpc_off_cpu_update();
123 lck_mtx_unlock(&kpc_thread_lock);
124
125 return 0;
126 }
127
128 /* snapshot current PMCs and update counters in the current thread */
129 static void
130 kpc_update_thread_counters( thread_t thread )
131 {
132 uint32_t i;
133 uint64_t *tmp = NULL;
134 cpu_data_t *cpu = NULL;
135
136 cpu = current_cpu_datap();
137
138 /* 1. stash current PMCs into latest CPU block */
139 kpc_get_cpu_counters( FALSE, kpc_thread_classes,
140 NULL, cpu->cpu_kpc_buf[1] );
141
142 /* 2. apply delta to old thread */
143 if( thread->kpc_buf )
144 for( i = 0; i < kpc_thread_classes_count; i++ )
145 thread->kpc_buf[i] += cpu->cpu_kpc_buf[1][i] - cpu->cpu_kpc_buf[0][i];
146
147 /* schedule any necessary allocations */
148 if( !current_thread()->kpc_buf )
149 {
150 current_thread()->kperf_flags |= T_KPC_ALLOC;
151 act_set_kperf(current_thread());
152 }
153
154 /* 3. switch the PMC block pointers */
155 tmp = cpu->cpu_kpc_buf[1];
156 cpu->cpu_kpc_buf[1] = cpu->cpu_kpc_buf[0];
157 cpu->cpu_kpc_buf[0] = tmp;
158 }
159
160 /* get counter values for a thread */
161 int
162 kpc_get_curthread_counters(uint32_t *inoutcount, uint64_t *buf)
163 {
164 thread_t thread = current_thread();
165 boolean_t enabled;
166
167 /* buffer too small :( */
168 if( *inoutcount < kpc_thread_classes_count )
169 return EINVAL;
170
171 /* copy data and actual size */
172 if( !thread->kpc_buf )
173 return EINVAL;
174
175 enabled = ml_set_interrupts_enabled(FALSE);
176
177 /* snap latest version of counters for this thread */
178 kpc_update_thread_counters( current_thread() );
179
180 /* copy out */
181 memcpy( buf, thread->kpc_buf,
182 kpc_thread_classes_count * sizeof(*buf) );
183 *inoutcount = kpc_thread_classes_count;
184
185 ml_set_interrupts_enabled(enabled);
186
187 return 0;
188 }
189
190 void
191 kpc_off_cpu_update(void)
192 {
193 kpc_off_cpu_active = kpc_threads_counting;
194 }
195
196 void
197 kpc_off_cpu_internal(thread_t thread)
198 {
199 if (kpc_threads_counting) {
200 kpc_update_thread_counters(thread);
201 }
202 }
203
204 void
205 kpc_thread_create(thread_t thread)
206 {
207 /* nothing to do if we're not counting */
208 if(!kpc_threads_counting)
209 return;
210
211 /* give the new thread a counterbuf */
212 thread->kpc_buf = kpc_counterbuf_alloc();
213 }
214
215 void
216 kpc_thread_destroy(thread_t thread)
217 {
218 uint64_t *buf = NULL;
219
220 /* usual case: no kpc buf, just return */
221 if( !thread->kpc_buf )
222 return;
223
224 /* otherwise, don't leak */
225 buf = thread->kpc_buf;
226 thread->kpc_buf = NULL;
227 kpc_counterbuf_free(buf);
228 }
229
230 /* ast callback on a thread */
231 void
232 kpc_thread_ast_handler( thread_t thread )
233 {
234 /* see if we want an alloc */
235 if( thread->kperf_flags & T_KPC_ALLOC )
236 thread->kpc_buf = kpc_counterbuf_alloc();
237 }