]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/hw_perfmon.c
xnu-517.7.21.tar.gz
[apple/xnu.git] / osfmk / ppc / hw_perfmon.c
CommitLineData
55e303ae
A
1/*
2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
e5568f75
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
55e303ae 11 *
e5568f75
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
55e303ae
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
e5568f75
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
55e303ae
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23#include <kern/thread.h>
24#include <kern/thread_act.h>
25#include <ppc/exception.h>
26#include <ppc/savearea.h>
27#include <ppc/hw_perfmon.h>
28#include <ppc/hw_perfmon_mmcr.h>
29
30decl_simple_lock_data(,hw_perfmon_lock)
31static task_t hw_perfmon_owner = TASK_NULL;
32static int hw_perfmon_thread_count = 0;
33
34/* Notes:
35 * -supervisor/user level filtering is unnecessary because of the way PMCs and MMCRs are context switched
36 * (can only count user events anyway)
37 * -marked filtering is unnecssary because each thread has its own virtualized set of PMCs and MMCRs
38 * -virtual counter PMI is passed up as a breakpoint exception
39 */
40
41int perfmon_init(void)
42{
43 simple_lock_init(&hw_perfmon_lock, FALSE);
44 return KERN_SUCCESS;
45}
46
47/* PMC Facility Owner:
48 * TASK_NULL - no one owns it
49 * kernel_task - owned by hw_perfmon
50 * other task - owned by another task
51 */
52
53int perfmon_acquire_facility(task_t task)
54{
55 kern_return_t retval = KERN_SUCCESS;
56
57 simple_lock(&hw_perfmon_lock);
58
59 if(hw_perfmon_owner==task) {
60#ifdef HWPERFMON_DEBUG
61 kprintf("perfmon_acquire_facility - ACQUIRED: already owner\n");
62#endif
63 retval = KERN_SUCCESS;
64 /* already own it */
65 } else if(hw_perfmon_owner==TASK_NULL) { /* no one owns it */
66 hw_perfmon_owner = task;
67 hw_perfmon_thread_count = 0;
68#ifdef HWPERFMON_DEBUG
69 kprintf("perfmon_acquire_facility - ACQUIRED: no current owner - made new owner\n");
70#endif
71 retval = KERN_SUCCESS;
72 } else { /* someone already owns it */
73 if(hw_perfmon_owner==kernel_task) {
74 if(hw_perfmon_thread_count==0) { /* kernel owns it but no threads using it */
75 hw_perfmon_owner = task;
76 hw_perfmon_thread_count = 0;
77#ifdef HWPERFMON_DEBUG
78 kprintf("perfmon_acquire_facility - ACQUIRED: kernel is current owner but no threads using it\n");
79#endif
80 retval = KERN_SUCCESS;
81 } else {
82#ifdef HWPERFMON_DEBUG
83 kprintf("perfmon_acquire_facility - DENIED: kernel is current owner and facility in use\n");
84#endif
85 retval = KERN_RESOURCE_SHORTAGE;
86 }
87 } else { /* non-kernel owner */
88#ifdef HWPERFMON_DEBUG
89 kprintf("perfmon_acquire_facility - DENIED: another active task owns the facility\n");
90#endif
91 retval = KERN_RESOURCE_SHORTAGE;
92 }
93 }
94
95 simple_unlock(&hw_perfmon_lock);
96 return retval;
97}
98
99int perfmon_release_facility(task_t task)
100{
101 kern_return_t retval = KERN_SUCCESS;
102 task_t old_perfmon_owner = hw_perfmon_owner;
103
104 simple_lock(&hw_perfmon_lock);
105
106 if(task!=hw_perfmon_owner) {
107 retval = KERN_NO_ACCESS;
108 } else {
109 if(old_perfmon_owner==kernel_task) {
110 if(hw_perfmon_thread_count>0) {
111#ifdef HWPERFMON_DEBUG
112 kprintf("perfmon_release_facility - NOT RELEASED: kernel task is owner and has active perfmon threads\n");
113#endif
114 retval = KERN_NO_ACCESS;
115 } else {
116#ifdef HWPERFMON_DEBUG
117 kprintf("perfmon_release_facility - RELEASED: kernel task was owner\n");
118#endif
119 hw_perfmon_owner = TASK_NULL;
120 retval = KERN_SUCCESS;
121 }
122 } else {
123#ifdef HWPERFMON_DEBUG
124 kprintf("perfmon_release_facility - RELEASED: user task was owner\n");
125#endif
126 hw_perfmon_owner = TASK_NULL;
127 retval = KERN_SUCCESS;
128 }
129 }
130
131 simple_unlock(&hw_perfmon_lock);
132 return retval;
133}
134
135int perfmon_enable(thread_act_t thr_act)
136{
137 struct savearea *sv = thr_act->mact.pcb;
138 kern_return_t kr;
139 kern_return_t retval = KERN_SUCCESS;
140 int curPMC;
141
142 if(thr_act->mact.specFlags & perfMonitor) {
143 return KERN_SUCCESS; /* already enabled */
144 } else if(perfmon_acquire_facility(kernel_task)!=KERN_SUCCESS) {
145 return KERN_RESOURCE_SHORTAGE; /* facility is in use */
146 } else { /* kernel_task owns the faciltity and this thread has not yet been counted */
147 simple_lock(&hw_perfmon_lock);
148 hw_perfmon_thread_count++;
149 simple_unlock(&hw_perfmon_lock);
150 }
151
152 sv->save_mmcr1 = 0;
153 sv->save_mmcr2 = 0;
154
155 switch(machine_slot[0].cpu_subtype) {
156 case CPU_SUBTYPE_POWERPC_750:
157 case CPU_SUBTYPE_POWERPC_7400:
158 case CPU_SUBTYPE_POWERPC_7450:
159 {
160 ppc32_mmcr0_reg_t mmcr0_reg;
161
162 mmcr0_reg.value = 0;
163 mmcr0_reg.field.disable_counters_always = TRUE;
164 mmcr0_reg.field.disable_counters_supervisor = TRUE; /* no choice */
165 sv->save_mmcr0 = mmcr0_reg.value;
166 }
167 break;
168 case CPU_SUBTYPE_POWERPC_970:
169 {
170 ppc64_mmcr0_reg_t mmcr0_reg;
171
172 mmcr0_reg.value = 0;
173 mmcr0_reg.field.disable_counters_always = TRUE;
174 mmcr0_reg.field.disable_counters_supervisor = TRUE; /* no choice */
175 sv->save_mmcr0 = mmcr0_reg.value;
176 }
177 break;
178 default:
179 retval = KERN_FAILURE;
180 break;
181 }
182
183 if(retval==KERN_SUCCESS) {
184 for(curPMC=0; curPMC<MAX_CPUPMC_COUNT; curPMC++) {
185 sv->save_pmc[curPMC] = 0;
186 thr_act->mact.pmcovfl[curPMC] = 0;
187 }
188 thr_act->mact.perfmonFlags = 0;
189 thr_act->mact.specFlags |= perfMonitor; /* enable perf monitor facility for this thread */
190 if(thr_act==current_act()) {
191 per_proc_info[cpu_number()].spcFlags |= perfMonitor; /* update per_proc */
192 }
193 }
194
195#ifdef HWPERFMON_DEBUG
196 kprintf("perfmon_enable - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2);
197#endif
198
199 return retval;
200}
201
202int perfmon_disable(thread_act_t thr_act)
203{
204 struct savearea *sv = thr_act->mact.pcb;
205 int curPMC;
206
207 if(!(thr_act->mact.specFlags & perfMonitor)) {
208 return KERN_NO_ACCESS; /* not enabled */
209 } else {
210 simple_lock(&hw_perfmon_lock);
211 hw_perfmon_thread_count--;
212 simple_unlock(&hw_perfmon_lock);
213 perfmon_release_facility(kernel_task); /* will release if hw_perfmon_thread_count is 0 */
214 }
215
216 thr_act->mact.specFlags &= ~perfMonitor; /* disable perf monitor facility for this thread */
217 if(thr_act==current_act()) {
218 per_proc_info[cpu_number()].spcFlags &= ~perfMonitor; /* update per_proc */
219 }
220 sv->save_mmcr0 = 0;
221 sv->save_mmcr1 = 0;
222 sv->save_mmcr2 = 0;
223
224 for(curPMC=0; curPMC<MAX_CPUPMC_COUNT; curPMC++) {
225 sv->save_pmc[curPMC] = 0;
226 thr_act->mact.pmcovfl[curPMC] = 0;
227 thr_act->mact.perfmonFlags = 0;
228 }
229
230#ifdef HWPERFMON_DEBUG
231 kprintf("perfmon_disable - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2);
232#endif
233
234 return KERN_SUCCESS;
235}
236
237int perfmon_clear_counters(thread_act_t thr_act)
238{
239 struct savearea *sv = thr_act->mact.pcb;
240 int curPMC;
241
242#ifdef HWPERFMON_DEBUG
243 kprintf("perfmon_clear_counters (CPU%d)\n", cpu_number());
244#endif
245
246 /* clear thread copy */
247 for(curPMC=0; curPMC<MAX_CPUPMC_COUNT; curPMC++) {
248 sv->save_pmc[curPMC] = 0;
249 thr_act->mact.pmcovfl[curPMC] = 0;
250 }
251
252 return KERN_SUCCESS;
253}
254
255int perfmon_write_counters(thread_act_t thr_act, uint64_t *pmcs)
256{
257 struct savearea *sv = thr_act->mact.pcb;
258 int curPMC;
259
260#ifdef HWPERFMON_DEBUG
261 kprintf("perfmon_write_counters (CPU%d): mmcr0 = %016llX, pmc1=%llX pmc2=%llX pmc3=%llX pmc4=%llX pmc5=%llX pmc6=%llX pmc7=%llX pmc8=%llX\n", cpu_number(), sv->save_mmcr0, pmcs[PMC_1], pmcs[PMC_2], pmcs[PMC_3], pmcs[PMC_4], pmcs[PMC_5], pmcs[PMC_6], pmcs[PMC_7], pmcs[PMC_8]);
262#endif
263
264 /* update thread copy */
265 for(curPMC=0; curPMC<MAX_CPUPMC_COUNT; curPMC++) {
266 sv->save_pmc[curPMC] = pmcs[curPMC] & 0x7FFFFFFF;
267 thr_act->mact.pmcovfl[curPMC] = (pmcs[curPMC]>>31) & 0xFFFFFFFF;
268 }
269
270 return KERN_SUCCESS;
271}
272
273int perfmon_read_counters(thread_act_t thr_act, uint64_t *pmcs)
274{
275 struct savearea *sv = thr_act->mact.pcb;
276 int curPMC;
277
278 /* retrieve from thread copy */
279 for(curPMC=0; curPMC<MAX_CPUPMC_COUNT; curPMC++) {
280 pmcs[curPMC] = thr_act->mact.pmcovfl[curPMC];
281 pmcs[curPMC] = pmcs[curPMC]<<31;
282 pmcs[curPMC] |= (sv->save_pmc[curPMC] & 0x7FFFFFFF);
283 }
284
285 /* zero any unused counters on this platform */
286 switch(machine_slot[0].cpu_subtype) {
287 case CPU_SUBTYPE_POWERPC_750:
288 case CPU_SUBTYPE_POWERPC_7400:
289 case CPU_SUBTYPE_POWERPC_7450:
290 pmcs[PMC_7] = 0;
291 pmcs[PMC_8] = 0;
292 break;
293 default:
294 break;
295 }
296
297#ifdef HWPERFMON_DEBUG
298 kprintf("perfmon_read_counters (CPU%d): mmcr0 = %016llX pmc1=%llX pmc2=%llX pmc3=%llX pmc4=%llX pmc5=%llX pmc6=%llX pmc7=%llX pmc8=%llX\n", cpu_number(), sv->save_mmcr0, pmcs[PMC_1], pmcs[PMC_2], pmcs[PMC_3], pmcs[PMC_4], pmcs[PMC_5], pmcs[PMC_6], pmcs[PMC_7], pmcs[PMC_8]);
299#endif
300
301 return KERN_SUCCESS;
302}
303
304int perfmon_start_counters(thread_act_t thr_act)
305{
306 struct savearea *sv = thr_act->mact.pcb;
307 kern_return_t retval = KERN_SUCCESS;
308
309 switch(machine_slot[0].cpu_subtype) {
310 case CPU_SUBTYPE_POWERPC_750:
311 case CPU_SUBTYPE_POWERPC_7400:
312 {
313 ppc32_mmcr0_reg_t mmcr0_reg;
314 mmcr0_reg.value = sv->save_mmcr0;
315 mmcr0_reg.field.disable_counters_always = FALSE;
316 /* XXXXX PMI broken on 750, 750CX, 750FX, 7400 and 7410 v1.2 and earlier XXXXX */
317 mmcr0_reg.field.on_pmi_stop_counting = FALSE;
318 mmcr0_reg.field.enable_pmi = FALSE;
319 mmcr0_reg.field.enable_pmi_on_pmc1 = FALSE;
320 mmcr0_reg.field.enable_pmi_on_pmcn = FALSE;
321 sv->save_mmcr0 = mmcr0_reg.value;
322 }
323 break;
324 case CPU_SUBTYPE_POWERPC_7450:
325 {
326 ppc32_mmcr0_reg_t mmcr0_reg;
327 mmcr0_reg.value = sv->save_mmcr0;
328 mmcr0_reg.field.disable_counters_always = FALSE;
329 mmcr0_reg.field.on_pmi_stop_counting = TRUE;
330 mmcr0_reg.field.enable_pmi = TRUE;
331 mmcr0_reg.field.enable_pmi_on_pmc1 = TRUE;
332 mmcr0_reg.field.enable_pmi_on_pmcn = TRUE;
333 sv->save_mmcr0 = mmcr0_reg.value;
334 }
335 break;
336 case CPU_SUBTYPE_POWERPC_970:
337 {
338 ppc64_mmcr0_reg_t mmcr0_reg;
339 mmcr0_reg.value = sv->save_mmcr0;
340 mmcr0_reg.field.disable_counters_always = FALSE;
341 mmcr0_reg.field.on_pmi_stop_counting = TRUE;
342 mmcr0_reg.field.enable_pmi = TRUE;
343 mmcr0_reg.field.enable_pmi_on_pmc1 = TRUE;
344 mmcr0_reg.field.enable_pmi_on_pmcn = TRUE;
345 sv->save_mmcr0 = mmcr0_reg.value;
346 }
347 break;
348 default:
349 retval = KERN_FAILURE;
350 break;
351 }
352
353#ifdef HWPERFMON_DEBUG
354 kprintf("perfmon_start_counters (CPU%d) - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", cpu_number(), sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2);
355#endif
356
357 return retval;
358}
359
360int perfmon_stop_counters(thread_act_t thr_act)
361{
362 struct savearea *sv = thr_act->mact.pcb;
363 kern_return_t retval = KERN_SUCCESS;
364
365 switch(machine_slot[0].cpu_subtype) {
366 case CPU_SUBTYPE_POWERPC_750:
367 case CPU_SUBTYPE_POWERPC_7400:
368 case CPU_SUBTYPE_POWERPC_7450:
369 {
370 ppc32_mmcr0_reg_t mmcr0_reg;
371 mmcr0_reg.value = sv->save_mmcr0;
372 mmcr0_reg.field.disable_counters_always = TRUE;
373 sv->save_mmcr0 = mmcr0_reg.value;
374 }
375 break;
376 case CPU_SUBTYPE_POWERPC_970:
377 {
378 ppc64_mmcr0_reg_t mmcr0_reg;
379 mmcr0_reg.value = sv->save_mmcr0;
380 mmcr0_reg.field.disable_counters_always = TRUE;
381 sv->save_mmcr0 = mmcr0_reg.value;
382 }
383 break;
384 default:
385 retval = KERN_FAILURE;
386 break;
387 }
388
389#ifdef HWPERFMON_DEBUG
390 kprintf("perfmon_stop_counters (CPU%d) - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", cpu_number(), sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2);
391#endif
392
393 return retval;
394}
395
396int perfmon_set_event(thread_act_t thr_act, int pmc, int event)
397{
398 struct savearea *sv = thr_act->mact.pcb;
399 kern_return_t retval = KERN_SUCCESS;
400
401#ifdef HWPERFMON_DEBUG
402 kprintf("perfmon_set_event b4 (CPU%d) - pmc=%d, event=%d - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", cpu_number(), pmc, event, sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2);
403#endif
404
405 switch(machine_slot[0].cpu_subtype) {
406 case CPU_SUBTYPE_POWERPC_750:
407 case CPU_SUBTYPE_POWERPC_7400:
408 {
409 ppc32_mmcr0_reg_t mmcr0_reg;
410 ppc32_mmcr1_reg_t mmcr1_reg;
411
412 mmcr0_reg.value = sv->save_mmcr0;
413 mmcr1_reg.value = sv->save_mmcr1;
414
415 switch(pmc) {
416 case PMC_1:
417 mmcr0_reg.field.pmc1_event = event;
418 sv->save_mmcr0 = mmcr0_reg.value;
419 break;
420 case PMC_2:
421 mmcr0_reg.field.pmc2_event = event;
422 sv->save_mmcr0 = mmcr0_reg.value;
423 break;
424 case PMC_3:
425 mmcr1_reg.field.pmc3_event = event;
426 sv->save_mmcr1 = mmcr1_reg.value;
427 break;
428 case PMC_4:
429 mmcr1_reg.field.pmc4_event = event;
430 sv->save_mmcr1 = mmcr1_reg.value;
431 break;
432 default:
433 retval = KERN_FAILURE;
434 break;
435 }
436 }
437 break;
438 case CPU_SUBTYPE_POWERPC_7450:
439 {
440 ppc32_mmcr0_reg_t mmcr0_reg;
441 ppc32_mmcr1_reg_t mmcr1_reg;
442
443 mmcr0_reg.value = sv->save_mmcr0;
444 mmcr1_reg.value = sv->save_mmcr1;
445
446 switch(pmc) {
447 case PMC_1:
448 mmcr0_reg.field.pmc1_event = event;
449 sv->save_mmcr0 = mmcr0_reg.value;
450 break;
451 case PMC_2:
452 mmcr0_reg.field.pmc2_event = event;
453 sv->save_mmcr0 = mmcr0_reg.value;
454 break;
455 case PMC_3:
456 mmcr1_reg.field.pmc3_event = event;
457 sv->save_mmcr1 = mmcr1_reg.value;
458 break;
459 case PMC_4:
460 mmcr1_reg.field.pmc4_event = event;
461 sv->save_mmcr1 = mmcr1_reg.value;
462 break;
463 case PMC_5:
464 mmcr1_reg.field.pmc5_event = event;
465 sv->save_mmcr1 = mmcr1_reg.value;
466 break;
467 case PMC_6:
468 mmcr1_reg.field.pmc6_event = event;
469 sv->save_mmcr1 = mmcr1_reg.value;
470 break;
471 default:
472 retval = KERN_FAILURE;
473 break;
474 }
475 }
476 break;
477 case CPU_SUBTYPE_POWERPC_970:
478 {
479 ppc64_mmcr0_reg_t mmcr0_reg;
480 ppc64_mmcr1_reg_t mmcr1_reg;
481
482 mmcr0_reg.value = sv->save_mmcr0;
483 mmcr1_reg.value = sv->save_mmcr1;
484
485 switch(pmc) {
486 case PMC_1:
487 mmcr0_reg.field.pmc1_event = event;
488 sv->save_mmcr0 = mmcr0_reg.value;
489 break;
490 case PMC_2:
491 mmcr0_reg.field.pmc2_event = event;
492 sv->save_mmcr0 = mmcr0_reg.value;
493 break;
494 case PMC_3:
495 mmcr1_reg.field.pmc3_event = event;
496 sv->save_mmcr1 = mmcr1_reg.value;
497 break;
498 case PMC_4:
499 mmcr1_reg.field.pmc4_event = event;
500 sv->save_mmcr1 = mmcr1_reg.value;
501 break;
502 case PMC_5:
503 mmcr1_reg.field.pmc5_event = event;
504 sv->save_mmcr1 = mmcr1_reg.value;
505 break;
506 case PMC_6:
507 mmcr1_reg.field.pmc6_event = event;
508 sv->save_mmcr1 = mmcr1_reg.value;
509 break;
510 case PMC_7:
511 mmcr1_reg.field.pmc7_event = event;
512 sv->save_mmcr1 = mmcr1_reg.value;
513 break;
514 case PMC_8:
515 mmcr1_reg.field.pmc8_event = event;
516 sv->save_mmcr1 = mmcr1_reg.value;
517 break;
518 default:
519 retval = KERN_FAILURE;
520 break;
521 }
522 }
523 break;
524 default:
525 retval = KERN_FAILURE;
526 break;
527 }
528
529#ifdef HWPERFMON_DEBUG
530 kprintf("perfmon_set_event (CPU%d) - pmc=%d, event=%d - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", cpu_number(), pmc, event, sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2);
531#endif
532
533 return retval;
534}
535
536int perfmon_set_event_func(thread_act_t thr_act, uint32_t f)
537{
538 struct savearea *sv = thr_act->mact.pcb;
539 kern_return_t retval = KERN_SUCCESS;
540
541#ifdef HWPERFMON_DEBUG
542 kprintf("perfmon_set_event_func - func=%s\n",
543 f==PPC_PERFMON_FUNC_FPU ? "FUNC" :
544 f==PPC_PERFMON_FUNC_ISU ? "ISU" :
545 f==PPC_PERFMON_FUNC_IFU ? "IFU" :
546 f==PPC_PERFMON_FUNC_VMX ? "VMX" :
547 f==PPC_PERFMON_FUNC_IDU ? "IDU" :
548 f==PPC_PERFMON_FUNC_GPS ? "GPS" :
549 f==PPC_PERFMON_FUNC_LSU0 ? "LSU0" :
550 f==PPC_PERFMON_FUNC_LSU1A ? "LSU1A" :
551 f==PPC_PERFMON_FUNC_LSU1B ? "LSU1B" :
552 f==PPC_PERFMON_FUNC_SPECA ? "SPECA" :
553 f==PPC_PERFMON_FUNC_SPECB ? "SPECB" :
554 f==PPC_PERFMON_FUNC_SPECC ? "SPECC" :
555 "UNKNOWN");
556#endif /* HWPERFMON_DEBUG */
557
558 switch(machine_slot[0].cpu_subtype) {
559 case CPU_SUBTYPE_POWERPC_750:
560 case CPU_SUBTYPE_POWERPC_7400:
561 case CPU_SUBTYPE_POWERPC_7450:
562 retval = KERN_FAILURE; /* event functional unit only applies to 970 */
563 break;
564 case CPU_SUBTYPE_POWERPC_970:
565 {
566 ppc64_mmcr1_reg_t mmcr1_reg;
567 ppc_func_unit_t func_unit;
568
569 func_unit.value = f;
570 mmcr1_reg.value = sv->save_mmcr1;
571
572 mmcr1_reg.field.ttm0_select = func_unit.field.TTM0SEL;
573 mmcr1_reg.field.ttm1_select = func_unit.field.TTM1SEL;
574 mmcr1_reg.field.ttm2_select = 0; /* not used */
575 mmcr1_reg.field.ttm3_select = func_unit.field.TTM3SEL;
576 mmcr1_reg.field.speculative_event = func_unit.field.SPECSEL;
577 mmcr1_reg.field.lane0_select = func_unit.field.TD_CP_DBGxSEL;
578 mmcr1_reg.field.lane1_select = func_unit.field.TD_CP_DBGxSEL;
579 mmcr1_reg.field.lane2_select = func_unit.field.TD_CP_DBGxSEL;
580 mmcr1_reg.field.lane3_select = func_unit.field.TD_CP_DBGxSEL;
581
582 sv->save_mmcr1 = mmcr1_reg.value;
583 }
584 break;
585 default:
586 retval = KERN_FAILURE;
587 break;
588 }
589
590 return retval;
591}
592
593int perfmon_set_threshold(thread_act_t thr_act, int threshold)
594{
595 struct savearea *sv = thr_act->mact.pcb;
596 kern_return_t retval = KERN_SUCCESS;
597
598 switch(machine_slot[0].cpu_subtype) {
599 case CPU_SUBTYPE_POWERPC_750:
600 {
601 ppc32_mmcr0_reg_t mmcr0_reg;
602
603 mmcr0_reg.value = sv->save_mmcr0;
604
605 if(threshold>63) { /* no multiplier on 750 */
606 int newThreshold = 63;
607#ifdef HWPERFMON_DEBUG
608 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) exceeds max threshold value - clamping to %d\n", threshold, newThreshold);
609#endif
610 threshold = newThreshold;
611 }
612 mmcr0_reg.field.threshold_value = threshold;
613
614 sv->save_mmcr0 = mmcr0_reg.value;
615 }
616 break;
617
618 case CPU_SUBTYPE_POWERPC_7400:
619 case CPU_SUBTYPE_POWERPC_7450:
620 {
621 ppc32_mmcr0_reg_t mmcr0_reg;
622 ppc32_mmcr2_reg_t mmcr2_reg;
623
624 mmcr0_reg.value = sv->save_mmcr0;
625 mmcr2_reg.value = sv->save_mmcr2;
626
627 if(threshold<=(2*63)) { /* 2x multiplier */
628 if(threshold%2 != 0) {
629 int newThreshold = 2*(threshold/2);
630#ifdef HWPERFMON_DEBUG
631 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) is not evenly divisible by 2x multiplier - using threshold of %d instead\n", threshold, newThreshold);
632#endif
633 threshold = newThreshold;
634 }
635 mmcr2_reg.field.threshold_multiplier = 0;
636 } else if(threshold<=(32*63)) { /* 32x multiplier */
637 if(threshold%32 != 0) {
638 int newThreshold = 32*(threshold/32);
639#ifdef HWPERFMON_DEBUG
640 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) is not evenly divisible by 32x multiplier - using threshold of %d instead\n", threshold, newThreshold);
641#endif
642 threshold = newThreshold;
643 }
644 mmcr2_reg.field.threshold_multiplier = 1;
645 } else {
646 int newThreshold = 32*63;
647#ifdef HWPERFMON_DEBUG
648 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) exceeds max threshold value - clamping to %d\n", threshold, newThreshold);
649#endif
650 threshold = newThreshold;
651 mmcr2_reg.field.threshold_multiplier = 1;
652 }
653 mmcr0_reg.field.threshold_value = threshold;
654
655 sv->save_mmcr0 = mmcr0_reg.value;
656 sv->save_mmcr2 = mmcr2_reg.value;
657
658 }
659 break;
660 case CPU_SUBTYPE_POWERPC_970:
661 {
662 ppc64_mmcr0_reg_t mmcr0_reg;
663
664 mmcr0_reg.value = sv->save_mmcr0;
665
666 if(threshold>63) { /* multiplier is in HID1 on 970 - not context switching HID1 so always 1x */
667 int newThreshold = 63;
668#ifdef HWPERFMON_DEBUG
669 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) exceeds max threshold value - clamping to %d\n", threshold, newThreshold);
670#endif
671 threshold = newThreshold;
672 }
673 mmcr0_reg.field.threshold_value = threshold;
674
675 sv->save_mmcr0 = mmcr0_reg.value;
676 }
677 break;
678 default:
679 retval = KERN_FAILURE;
680 break;
681 }
682
683#ifdef HWPERFMON_DEBUG
684 kprintf("perfmon_set_threshold - threshold=%d - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", threshold, sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2);
685#endif
686
687 return retval;
688}
689
690int perfmon_set_tbsel(thread_act_t thr_act, int tbsel)
691{
692 struct savearea *sv = thr_act->mact.pcb;
693 kern_return_t retval = KERN_SUCCESS;
694
695 switch(machine_slot[0].cpu_subtype) {
696 case CPU_SUBTYPE_POWERPC_750:
697 case CPU_SUBTYPE_POWERPC_7400:
698 case CPU_SUBTYPE_POWERPC_7450:
699 {
700 ppc32_mmcr0_reg_t mmcr0_reg;
701
702 mmcr0_reg.value = sv->save_mmcr0;
703 switch(tbsel) {
704 case 0x0:
705 case 0x1:
706 case 0x2:
707 case 0x3:
708 mmcr0_reg.field.timebase_bit_selector = tbsel;
709 break;
710 default:
711 retval = KERN_FAILURE;
712 }
713 sv->save_mmcr0 = mmcr0_reg.value;
714 }
715 break;
716 case CPU_SUBTYPE_POWERPC_970:
717 {
718 ppc64_mmcr0_reg_t mmcr0_reg;
719
720 mmcr0_reg.value = sv->save_mmcr0;
721 switch(tbsel) {
722 case 0x0:
723 case 0x1:
724 case 0x2:
725 case 0x3:
726 mmcr0_reg.field.timebase_bit_selector = tbsel;
727 break;
728 default:
729 retval = KERN_FAILURE;
730 }
731 sv->save_mmcr0 = mmcr0_reg.value;
732 }
733 break;
734 default:
735 retval = KERN_FAILURE;
736 break;
737 }
738
739#ifdef HWPERFMON_DEBUG
740 kprintf("perfmon_set_tbsel - tbsel=%d - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", tbsel, sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2);
741#endif
742
743 return retval;
744}
745
746int perfmon_control(struct savearea *ssp)
747{
748 mach_port_t thr_port = CAST_DOWN(mach_port_t, ssp->save_r3);
749 int action = (int)ssp->save_r4;
750 int pmc = (int)ssp->save_r5;
751 int val = (int)ssp->save_r6;
752 uint64_t *usr_pmcs_p = CAST_DOWN(uint64_t *, ssp->save_r7);
753 thread_act_t thr_act = THREAD_NULL;
754 uint64_t kern_pmcs[MAX_CPUPMC_COUNT];
755 kern_return_t retval = KERN_SUCCESS;
756 int error;
757 boolean_t oldlevel;
758
759 thr_act = (thread_act_t) port_name_to_act(thr_port); // convert user space thread port name to a thread_act_t
760 if(!thr_act) {
761 ssp->save_r3 = KERN_INVALID_ARGUMENT;
762 return 1; /* Return and check for ASTs... */
763 }
764
765 if(thr_act!=current_act()) {
766 thread_suspend(thr_act);
767 }
768
769#ifdef HWPERFMON_DEBUG
770 // kprintf("perfmon_control: action=0x%x pmc=%d val=%d pmcs=0x%x\n", action, pmc, val, usr_pmcs_p);
771#endif
772
773 oldlevel = ml_set_interrupts_enabled(FALSE);
774
775 /* individual actions which do not require perfmon facility to be enabled */
776 if(action==PPC_PERFMON_DISABLE) {
777 retval = perfmon_disable(thr_act);
778 }
779 else if(action==PPC_PERFMON_ENABLE) {
780 retval = perfmon_enable(thr_act);
781 }
782
783 else { /* individual actions which do require perfmon facility to be enabled */
784 if(!(thr_act->mact.specFlags & perfMonitor)) { /* perfmon not enabled */
785#ifdef HWPERFMON_DEBUG
786 kprintf("perfmon_control: ERROR - perfmon not enabled for this thread\n");
787#endif
788 retval = KERN_NO_ACCESS;
789 goto perfmon_return;
790 }
791
792 if(action==PPC_PERFMON_SET_EVENT) {
793 retval = perfmon_set_event(thr_act, pmc, val);
794 }
795 else if(action==PPC_PERFMON_SET_THRESHOLD) {
796 retval = perfmon_set_threshold(thr_act, val);
797 }
798 else if(action==PPC_PERFMON_SET_TBSEL) {
799 retval = perfmon_set_tbsel(thr_act, val);
800 }
801 else if(action==PPC_PERFMON_SET_EVENT_FUNC) {
802 retval = perfmon_set_event_func(thr_act, val);
803 }
804 else if(action==PPC_PERFMON_ENABLE_PMI_BRKPT) {
805 if(val) {
806 thr_act->mact.perfmonFlags |= PERFMONFLAG_BREAKPOINT_FOR_PMI;
807 } else {
808 thr_act->mact.perfmonFlags &= ~PERFMONFLAG_BREAKPOINT_FOR_PMI;
809 }
810 retval = KERN_SUCCESS;
811 }
812
813 /* combinable actions */
814 else {
815 if(action & PPC_PERFMON_STOP_COUNTERS) {
816 error = perfmon_stop_counters(thr_act);
817 if(error!=KERN_SUCCESS) {
818 retval = error;
819 goto perfmon_return;
820 }
821 }
822 if(action & PPC_PERFMON_CLEAR_COUNTERS) {
823 error = perfmon_clear_counters(thr_act);
824 if(error!=KERN_SUCCESS) {
825 retval = error;
826 goto perfmon_return;
827 }
828 }
829 if(action & PPC_PERFMON_WRITE_COUNTERS) {
830 if(error = copyin((void *)usr_pmcs_p, (void *)kern_pmcs, MAX_CPUPMC_COUNT*sizeof(uint64_t))) {
831 retval = error;
832 goto perfmon_return;
833 }
834 error = perfmon_write_counters(thr_act, kern_pmcs);
835 if(error!=KERN_SUCCESS) {
836 retval = error;
837 goto perfmon_return;
838 }
839 }
840 if(action & PPC_PERFMON_READ_COUNTERS) {
841 error = perfmon_read_counters(thr_act, kern_pmcs);
842 if(error!=KERN_SUCCESS) {
843 retval = error;
844 goto perfmon_return;
845 }
846 if(error = copyout((void *)kern_pmcs, (void *)usr_pmcs_p, MAX_CPUPMC_COUNT*sizeof(uint64_t))) {
847 retval = error;
848 goto perfmon_return;
849 }
850 }
851 if(action & PPC_PERFMON_START_COUNTERS) {
852 error = perfmon_start_counters(thr_act);
853 if(error!=KERN_SUCCESS) {
854 retval = error;
855 goto perfmon_return;
856 }
857 }
858 }
859 }
860
861 perfmon_return:
862 ml_set_interrupts_enabled(oldlevel);
863
864#ifdef HWPERFMON_DEBUG
865 kprintf("perfmon_control (CPU%d): mmcr0 = %016llX, pmc1=%X pmc2=%X pmc3=%X pmc4=%X pmc5=%X pmc6=%X pmc7=%X pmc8=%X\n", cpu_number(), ssp->save_mmcr0, ssp->save_pmc[PMC_1], ssp->save_pmc[PMC_2], ssp->save_pmc[PMC_3], ssp->save_pmc[PMC_4], ssp->save_pmc[PMC_5], ssp->save_pmc[PMC_6], ssp->save_pmc[PMC_7], ssp->save_pmc[PMC_8]);
866#endif
867
868 if(thr_act!=current_act()) {
869 thread_resume(thr_act);
870 }
871
872#ifdef HWPERFMON_DEBUG
873 if(retval!=KERN_SUCCESS) {
874 kprintf("perfmon_control - ERROR: retval=%d\n", retval);
875 }
876#endif /* HWPERFMON_DEBUG */
877
878 ssp->save_r3 = retval;
879 return 1; /* Return and check for ASTs... */
880}
881
882int perfmon_handle_pmi(struct savearea *ssp)
883{
884 int curPMC;
885 kern_return_t retval = KERN_SUCCESS;
886 thread_act_t thr_act = current_act();
887
888#ifdef HWPERFMON_DEBUG
889 kprintf("perfmon_handle_pmi: got rupt\n");
890#endif
891
892 if(!(thr_act->mact.specFlags & perfMonitor)) { /* perfmon not enabled */
893#ifdef HWPERFMON_DEBUG
894 kprintf("perfmon_handle_pmi: ERROR - perfmon not enabled for this thread\n");
895#endif
896 return KERN_FAILURE;
897 }
898
899 for(curPMC=0; curPMC<MAX_CPUPMC_COUNT; curPMC++) {
900 if(thr_act->mact.pcb->save_pmc[curPMC] & 0x80000000) {
901 if(thr_act->mact.pmcovfl[curPMC]==0xFFFFFFFF && (thr_act->mact.perfmonFlags & PERFMONFLAG_BREAKPOINT_FOR_PMI)) {
902 doexception(EXC_BREAKPOINT, EXC_PPC_PERFMON, (unsigned int)ssp->save_srr0); // pass up a breakpoint exception
903 return KERN_SUCCESS;
904 } else {
905 thr_act->mact.pmcovfl[curPMC]++;
906 thr_act->mact.pcb->save_pmc[curPMC] = 0;
907 }
908 }
909 }
910
911 if(retval==KERN_SUCCESS) {
912 switch(machine_slot[0].cpu_subtype) {
913 case CPU_SUBTYPE_POWERPC_7450:
914 {
915 ppc32_mmcr0_reg_t mmcr0_reg;
916
917 mmcr0_reg.value = thr_act->mact.pcb->save_mmcr0;
918 mmcr0_reg.field.disable_counters_always = FALSE;
919 mmcr0_reg.field.enable_pmi = TRUE;
920 thr_act->mact.pcb->save_mmcr0 = mmcr0_reg.value;
921 }
922 retval = KERN_SUCCESS;
923 break;
924 case CPU_SUBTYPE_POWERPC_970:
925 {
926 ppc64_mmcr0_reg_t mmcr0_reg;
927
928 mmcr0_reg.value = thr_act->mact.pcb->save_mmcr0;
929 mmcr0_reg.field.disable_counters_always = FALSE;
930 mmcr0_reg.field.enable_pmi = TRUE;
931 thr_act->mact.pcb->save_mmcr0 = mmcr0_reg.value;
932 }
933 retval = KERN_SUCCESS;
934 break;
935 default:
936 retval = KERN_FAILURE;
937 break;
938 }
939 }
940
941 return retval;
942}