2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
26 #include <kern/thread.h>
27 #include <kern/thread_act.h>
28 #include <ppc/exception.h>
29 #include <ppc/savearea.h>
30 #include <ppc/hw_perfmon.h>
31 #include <ppc/hw_perfmon_mmcr.h>
33 decl_simple_lock_data(,hw_perfmon_lock
)
34 static task_t hw_perfmon_owner
= TASK_NULL
;
35 static int hw_perfmon_thread_count
= 0;
38 * -supervisor/user level filtering is unnecessary because of the way PMCs and MMCRs are context switched
39 * (can only count user events anyway)
40 * -marked filtering is unnecssary because each thread has its own virtualized set of PMCs and MMCRs
41 * -virtual counter PMI is passed up as a breakpoint exception
44 int perfmon_init(void)
46 simple_lock_init(&hw_perfmon_lock
, FALSE
);
50 /* PMC Facility Owner:
51 * TASK_NULL - no one owns it
52 * kernel_task - owned by hw_perfmon
53 * other task - owned by another task
56 int perfmon_acquire_facility(task_t task
)
58 kern_return_t retval
= KERN_SUCCESS
;
60 simple_lock(&hw_perfmon_lock
);
62 if(hw_perfmon_owner
==task
) {
63 #ifdef HWPERFMON_DEBUG
64 kprintf("perfmon_acquire_facility - ACQUIRED: already owner\n");
66 retval
= KERN_SUCCESS
;
68 } else if(hw_perfmon_owner
==TASK_NULL
) { /* no one owns it */
69 hw_perfmon_owner
= task
;
70 hw_perfmon_thread_count
= 0;
71 #ifdef HWPERFMON_DEBUG
72 kprintf("perfmon_acquire_facility - ACQUIRED: no current owner - made new owner\n");
74 retval
= KERN_SUCCESS
;
75 } else { /* someone already owns it */
76 if(hw_perfmon_owner
==kernel_task
) {
77 if(hw_perfmon_thread_count
==0) { /* kernel owns it but no threads using it */
78 hw_perfmon_owner
= task
;
79 hw_perfmon_thread_count
= 0;
80 #ifdef HWPERFMON_DEBUG
81 kprintf("perfmon_acquire_facility - ACQUIRED: kernel is current owner but no threads using it\n");
83 retval
= KERN_SUCCESS
;
85 #ifdef HWPERFMON_DEBUG
86 kprintf("perfmon_acquire_facility - DENIED: kernel is current owner and facility in use\n");
88 retval
= KERN_RESOURCE_SHORTAGE
;
90 } else { /* non-kernel owner */
91 #ifdef HWPERFMON_DEBUG
92 kprintf("perfmon_acquire_facility - DENIED: another active task owns the facility\n");
94 retval
= KERN_RESOURCE_SHORTAGE
;
98 simple_unlock(&hw_perfmon_lock
);
102 int perfmon_release_facility(task_t task
)
104 kern_return_t retval
= KERN_SUCCESS
;
105 task_t old_perfmon_owner
= hw_perfmon_owner
;
107 simple_lock(&hw_perfmon_lock
);
109 if(task
!=hw_perfmon_owner
) {
110 retval
= KERN_NO_ACCESS
;
112 if(old_perfmon_owner
==kernel_task
) {
113 if(hw_perfmon_thread_count
>0) {
114 #ifdef HWPERFMON_DEBUG
115 kprintf("perfmon_release_facility - NOT RELEASED: kernel task is owner and has active perfmon threads\n");
117 retval
= KERN_NO_ACCESS
;
119 #ifdef HWPERFMON_DEBUG
120 kprintf("perfmon_release_facility - RELEASED: kernel task was owner\n");
122 hw_perfmon_owner
= TASK_NULL
;
123 retval
= KERN_SUCCESS
;
126 #ifdef HWPERFMON_DEBUG
127 kprintf("perfmon_release_facility - RELEASED: user task was owner\n");
129 hw_perfmon_owner
= TASK_NULL
;
130 retval
= KERN_SUCCESS
;
134 simple_unlock(&hw_perfmon_lock
);
138 int perfmon_enable(thread_act_t thr_act
)
140 struct savearea
*sv
= thr_act
->mact
.pcb
;
142 kern_return_t retval
= KERN_SUCCESS
;
145 if(thr_act
->mact
.specFlags
& perfMonitor
) {
146 return KERN_SUCCESS
; /* already enabled */
147 } else if(perfmon_acquire_facility(kernel_task
)!=KERN_SUCCESS
) {
148 return KERN_RESOURCE_SHORTAGE
; /* facility is in use */
149 } else { /* kernel_task owns the faciltity and this thread has not yet been counted */
150 simple_lock(&hw_perfmon_lock
);
151 hw_perfmon_thread_count
++;
152 simple_unlock(&hw_perfmon_lock
);
158 switch(machine_slot
[0].cpu_subtype
) {
159 case CPU_SUBTYPE_POWERPC_750
:
160 case CPU_SUBTYPE_POWERPC_7400
:
161 case CPU_SUBTYPE_POWERPC_7450
:
163 ppc32_mmcr0_reg_t mmcr0_reg
;
166 mmcr0_reg
.field
.disable_counters_always
= TRUE
;
167 mmcr0_reg
.field
.disable_counters_supervisor
= TRUE
; /* no choice */
168 sv
->save_mmcr0
= mmcr0_reg
.value
;
171 case CPU_SUBTYPE_POWERPC_970
:
173 ppc64_mmcr0_reg_t mmcr0_reg
;
176 mmcr0_reg
.field
.disable_counters_always
= TRUE
;
177 mmcr0_reg
.field
.disable_counters_supervisor
= TRUE
; /* no choice */
178 sv
->save_mmcr0
= mmcr0_reg
.value
;
182 retval
= KERN_FAILURE
;
186 if(retval
==KERN_SUCCESS
) {
187 for(curPMC
=0; curPMC
<MAX_CPUPMC_COUNT
; curPMC
++) {
188 sv
->save_pmc
[curPMC
] = 0;
189 thr_act
->mact
.pmcovfl
[curPMC
] = 0;
191 thr_act
->mact
.perfmonFlags
= 0;
192 thr_act
->mact
.specFlags
|= perfMonitor
; /* enable perf monitor facility for this thread */
193 if(thr_act
==current_act()) {
194 per_proc_info
[cpu_number()].spcFlags
|= perfMonitor
; /* update per_proc */
198 #ifdef HWPERFMON_DEBUG
199 kprintf("perfmon_enable - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", sv
->save_mmcr0
, sv
->save_mmcr1
, sv
->save_mmcr2
);
205 int perfmon_disable(thread_act_t thr_act
)
207 struct savearea
*sv
= thr_act
->mact
.pcb
;
210 if(!(thr_act
->mact
.specFlags
& perfMonitor
)) {
211 return KERN_NO_ACCESS
; /* not enabled */
213 simple_lock(&hw_perfmon_lock
);
214 hw_perfmon_thread_count
--;
215 simple_unlock(&hw_perfmon_lock
);
216 perfmon_release_facility(kernel_task
); /* will release if hw_perfmon_thread_count is 0 */
219 thr_act
->mact
.specFlags
&= ~perfMonitor
; /* disable perf monitor facility for this thread */
220 if(thr_act
==current_act()) {
221 per_proc_info
[cpu_number()].spcFlags
&= ~perfMonitor
; /* update per_proc */
227 for(curPMC
=0; curPMC
<MAX_CPUPMC_COUNT
; curPMC
++) {
228 sv
->save_pmc
[curPMC
] = 0;
229 thr_act
->mact
.pmcovfl
[curPMC
] = 0;
230 thr_act
->mact
.perfmonFlags
= 0;
233 #ifdef HWPERFMON_DEBUG
234 kprintf("perfmon_disable - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", sv
->save_mmcr0
, sv
->save_mmcr1
, sv
->save_mmcr2
);
240 int perfmon_clear_counters(thread_act_t thr_act
)
242 struct savearea
*sv
= thr_act
->mact
.pcb
;
245 #ifdef HWPERFMON_DEBUG
246 kprintf("perfmon_clear_counters (CPU%d)\n", cpu_number());
249 /* clear thread copy */
250 for(curPMC
=0; curPMC
<MAX_CPUPMC_COUNT
; curPMC
++) {
251 sv
->save_pmc
[curPMC
] = 0;
252 thr_act
->mact
.pmcovfl
[curPMC
] = 0;
258 int perfmon_write_counters(thread_act_t thr_act
, uint64_t *pmcs
)
260 struct savearea
*sv
= thr_act
->mact
.pcb
;
263 #ifdef HWPERFMON_DEBUG
264 kprintf("perfmon_write_counters (CPU%d): mmcr0 = %016llX, pmc1=%llX pmc2=%llX pmc3=%llX pmc4=%llX pmc5=%llX pmc6=%llX pmc7=%llX pmc8=%llX\n", cpu_number(), sv
->save_mmcr0
, pmcs
[PMC_1
], pmcs
[PMC_2
], pmcs
[PMC_3
], pmcs
[PMC_4
], pmcs
[PMC_5
], pmcs
[PMC_6
], pmcs
[PMC_7
], pmcs
[PMC_8
]);
267 /* update thread copy */
268 for(curPMC
=0; curPMC
<MAX_CPUPMC_COUNT
; curPMC
++) {
269 sv
->save_pmc
[curPMC
] = pmcs
[curPMC
] & 0x7FFFFFFF;
270 thr_act
->mact
.pmcovfl
[curPMC
] = (pmcs
[curPMC
]>>31) & 0xFFFFFFFF;
276 int perfmon_read_counters(thread_act_t thr_act
, uint64_t *pmcs
)
278 struct savearea
*sv
= thr_act
->mact
.pcb
;
281 /* retrieve from thread copy */
282 for(curPMC
=0; curPMC
<MAX_CPUPMC_COUNT
; curPMC
++) {
283 pmcs
[curPMC
] = thr_act
->mact
.pmcovfl
[curPMC
];
284 pmcs
[curPMC
] = pmcs
[curPMC
]<<31;
285 pmcs
[curPMC
] |= (sv
->save_pmc
[curPMC
] & 0x7FFFFFFF);
288 /* zero any unused counters on this platform */
289 switch(machine_slot
[0].cpu_subtype
) {
290 case CPU_SUBTYPE_POWERPC_750
:
291 case CPU_SUBTYPE_POWERPC_7400
:
292 case CPU_SUBTYPE_POWERPC_7450
:
300 #ifdef HWPERFMON_DEBUG
301 kprintf("perfmon_read_counters (CPU%d): mmcr0 = %016llX pmc1=%llX pmc2=%llX pmc3=%llX pmc4=%llX pmc5=%llX pmc6=%llX pmc7=%llX pmc8=%llX\n", cpu_number(), sv
->save_mmcr0
, pmcs
[PMC_1
], pmcs
[PMC_2
], pmcs
[PMC_3
], pmcs
[PMC_4
], pmcs
[PMC_5
], pmcs
[PMC_6
], pmcs
[PMC_7
], pmcs
[PMC_8
]);
307 int perfmon_start_counters(thread_act_t thr_act
)
309 struct savearea
*sv
= thr_act
->mact
.pcb
;
310 kern_return_t retval
= KERN_SUCCESS
;
312 switch(machine_slot
[0].cpu_subtype
) {
313 case CPU_SUBTYPE_POWERPC_750
:
314 case CPU_SUBTYPE_POWERPC_7400
:
316 ppc32_mmcr0_reg_t mmcr0_reg
;
317 mmcr0_reg
.value
= sv
->save_mmcr0
;
318 mmcr0_reg
.field
.disable_counters_always
= FALSE
;
319 /* XXXXX PMI broken on 750, 750CX, 750FX, 7400 and 7410 v1.2 and earlier XXXXX */
320 mmcr0_reg
.field
.on_pmi_stop_counting
= FALSE
;
321 mmcr0_reg
.field
.enable_pmi
= FALSE
;
322 mmcr0_reg
.field
.enable_pmi_on_pmc1
= FALSE
;
323 mmcr0_reg
.field
.enable_pmi_on_pmcn
= FALSE
;
324 sv
->save_mmcr0
= mmcr0_reg
.value
;
327 case CPU_SUBTYPE_POWERPC_7450
:
329 ppc32_mmcr0_reg_t mmcr0_reg
;
330 mmcr0_reg
.value
= sv
->save_mmcr0
;
331 mmcr0_reg
.field
.disable_counters_always
= FALSE
;
332 mmcr0_reg
.field
.on_pmi_stop_counting
= TRUE
;
333 mmcr0_reg
.field
.enable_pmi
= TRUE
;
334 mmcr0_reg
.field
.enable_pmi_on_pmc1
= TRUE
;
335 mmcr0_reg
.field
.enable_pmi_on_pmcn
= TRUE
;
336 sv
->save_mmcr0
= mmcr0_reg
.value
;
339 case CPU_SUBTYPE_POWERPC_970
:
341 ppc64_mmcr0_reg_t mmcr0_reg
;
342 mmcr0_reg
.value
= sv
->save_mmcr0
;
343 mmcr0_reg
.field
.disable_counters_always
= FALSE
;
344 mmcr0_reg
.field
.on_pmi_stop_counting
= TRUE
;
345 mmcr0_reg
.field
.enable_pmi
= TRUE
;
346 mmcr0_reg
.field
.enable_pmi_on_pmc1
= TRUE
;
347 mmcr0_reg
.field
.enable_pmi_on_pmcn
= TRUE
;
348 sv
->save_mmcr0
= mmcr0_reg
.value
;
352 retval
= KERN_FAILURE
;
356 #ifdef HWPERFMON_DEBUG
357 kprintf("perfmon_start_counters (CPU%d) - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", cpu_number(), sv
->save_mmcr0
, sv
->save_mmcr1
, sv
->save_mmcr2
);
363 int perfmon_stop_counters(thread_act_t thr_act
)
365 struct savearea
*sv
= thr_act
->mact
.pcb
;
366 kern_return_t retval
= KERN_SUCCESS
;
368 switch(machine_slot
[0].cpu_subtype
) {
369 case CPU_SUBTYPE_POWERPC_750
:
370 case CPU_SUBTYPE_POWERPC_7400
:
371 case CPU_SUBTYPE_POWERPC_7450
:
373 ppc32_mmcr0_reg_t mmcr0_reg
;
374 mmcr0_reg
.value
= sv
->save_mmcr0
;
375 mmcr0_reg
.field
.disable_counters_always
= TRUE
;
376 sv
->save_mmcr0
= mmcr0_reg
.value
;
379 case CPU_SUBTYPE_POWERPC_970
:
381 ppc64_mmcr0_reg_t mmcr0_reg
;
382 mmcr0_reg
.value
= sv
->save_mmcr0
;
383 mmcr0_reg
.field
.disable_counters_always
= TRUE
;
384 sv
->save_mmcr0
= mmcr0_reg
.value
;
388 retval
= KERN_FAILURE
;
392 #ifdef HWPERFMON_DEBUG
393 kprintf("perfmon_stop_counters (CPU%d) - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", cpu_number(), sv
->save_mmcr0
, sv
->save_mmcr1
, sv
->save_mmcr2
);
399 int perfmon_set_event(thread_act_t thr_act
, int pmc
, int event
)
401 struct savearea
*sv
= thr_act
->mact
.pcb
;
402 kern_return_t retval
= KERN_SUCCESS
;
404 #ifdef HWPERFMON_DEBUG
405 kprintf("perfmon_set_event b4 (CPU%d) - pmc=%d, event=%d - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", cpu_number(), pmc
, event
, sv
->save_mmcr0
, sv
->save_mmcr1
, sv
->save_mmcr2
);
408 switch(machine_slot
[0].cpu_subtype
) {
409 case CPU_SUBTYPE_POWERPC_750
:
410 case CPU_SUBTYPE_POWERPC_7400
:
412 ppc32_mmcr0_reg_t mmcr0_reg
;
413 ppc32_mmcr1_reg_t mmcr1_reg
;
415 mmcr0_reg
.value
= sv
->save_mmcr0
;
416 mmcr1_reg
.value
= sv
->save_mmcr1
;
420 mmcr0_reg
.field
.pmc1_event
= event
;
421 sv
->save_mmcr0
= mmcr0_reg
.value
;
424 mmcr0_reg
.field
.pmc2_event
= event
;
425 sv
->save_mmcr0
= mmcr0_reg
.value
;
428 mmcr1_reg
.field
.pmc3_event
= event
;
429 sv
->save_mmcr1
= mmcr1_reg
.value
;
432 mmcr1_reg
.field
.pmc4_event
= event
;
433 sv
->save_mmcr1
= mmcr1_reg
.value
;
436 retval
= KERN_FAILURE
;
441 case CPU_SUBTYPE_POWERPC_7450
:
443 ppc32_mmcr0_reg_t mmcr0_reg
;
444 ppc32_mmcr1_reg_t mmcr1_reg
;
446 mmcr0_reg
.value
= sv
->save_mmcr0
;
447 mmcr1_reg
.value
= sv
->save_mmcr1
;
451 mmcr0_reg
.field
.pmc1_event
= event
;
452 sv
->save_mmcr0
= mmcr0_reg
.value
;
455 mmcr0_reg
.field
.pmc2_event
= event
;
456 sv
->save_mmcr0
= mmcr0_reg
.value
;
459 mmcr1_reg
.field
.pmc3_event
= event
;
460 sv
->save_mmcr1
= mmcr1_reg
.value
;
463 mmcr1_reg
.field
.pmc4_event
= event
;
464 sv
->save_mmcr1
= mmcr1_reg
.value
;
467 mmcr1_reg
.field
.pmc5_event
= event
;
468 sv
->save_mmcr1
= mmcr1_reg
.value
;
471 mmcr1_reg
.field
.pmc6_event
= event
;
472 sv
->save_mmcr1
= mmcr1_reg
.value
;
475 retval
= KERN_FAILURE
;
480 case CPU_SUBTYPE_POWERPC_970
:
482 ppc64_mmcr0_reg_t mmcr0_reg
;
483 ppc64_mmcr1_reg_t mmcr1_reg
;
485 mmcr0_reg
.value
= sv
->save_mmcr0
;
486 mmcr1_reg
.value
= sv
->save_mmcr1
;
490 mmcr0_reg
.field
.pmc1_event
= event
;
491 sv
->save_mmcr0
= mmcr0_reg
.value
;
494 mmcr0_reg
.field
.pmc2_event
= event
;
495 sv
->save_mmcr0
= mmcr0_reg
.value
;
498 mmcr1_reg
.field
.pmc3_event
= event
;
499 sv
->save_mmcr1
= mmcr1_reg
.value
;
502 mmcr1_reg
.field
.pmc4_event
= event
;
503 sv
->save_mmcr1
= mmcr1_reg
.value
;
506 mmcr1_reg
.field
.pmc5_event
= event
;
507 sv
->save_mmcr1
= mmcr1_reg
.value
;
510 mmcr1_reg
.field
.pmc6_event
= event
;
511 sv
->save_mmcr1
= mmcr1_reg
.value
;
514 mmcr1_reg
.field
.pmc7_event
= event
;
515 sv
->save_mmcr1
= mmcr1_reg
.value
;
518 mmcr1_reg
.field
.pmc8_event
= event
;
519 sv
->save_mmcr1
= mmcr1_reg
.value
;
522 retval
= KERN_FAILURE
;
528 retval
= KERN_FAILURE
;
532 #ifdef HWPERFMON_DEBUG
533 kprintf("perfmon_set_event (CPU%d) - pmc=%d, event=%d - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", cpu_number(), pmc
, event
, sv
->save_mmcr0
, sv
->save_mmcr1
, sv
->save_mmcr2
);
539 int perfmon_set_event_func(thread_act_t thr_act
, uint32_t f
)
541 struct savearea
*sv
= thr_act
->mact
.pcb
;
542 kern_return_t retval
= KERN_SUCCESS
;
544 #ifdef HWPERFMON_DEBUG
545 kprintf("perfmon_set_event_func - func=%s\n",
546 f
==PPC_PERFMON_FUNC_FPU
? "FUNC" :
547 f
==PPC_PERFMON_FUNC_ISU
? "ISU" :
548 f
==PPC_PERFMON_FUNC_IFU
? "IFU" :
549 f
==PPC_PERFMON_FUNC_VMX
? "VMX" :
550 f
==PPC_PERFMON_FUNC_IDU
? "IDU" :
551 f
==PPC_PERFMON_FUNC_GPS
? "GPS" :
552 f
==PPC_PERFMON_FUNC_LSU0
? "LSU0" :
553 f
==PPC_PERFMON_FUNC_LSU1A
? "LSU1A" :
554 f
==PPC_PERFMON_FUNC_LSU1B
? "LSU1B" :
555 f
==PPC_PERFMON_FUNC_SPECA
? "SPECA" :
556 f
==PPC_PERFMON_FUNC_SPECB
? "SPECB" :
557 f
==PPC_PERFMON_FUNC_SPECC
? "SPECC" :
559 #endif /* HWPERFMON_DEBUG */
561 switch(machine_slot
[0].cpu_subtype
) {
562 case CPU_SUBTYPE_POWERPC_750
:
563 case CPU_SUBTYPE_POWERPC_7400
:
564 case CPU_SUBTYPE_POWERPC_7450
:
565 retval
= KERN_FAILURE
; /* event functional unit only applies to 970 */
567 case CPU_SUBTYPE_POWERPC_970
:
569 ppc64_mmcr1_reg_t mmcr1_reg
;
570 ppc_func_unit_t func_unit
;
573 mmcr1_reg
.value
= sv
->save_mmcr1
;
575 mmcr1_reg
.field
.ttm0_select
= func_unit
.field
.TTM0SEL
;
576 mmcr1_reg
.field
.ttm1_select
= func_unit
.field
.TTM1SEL
;
577 mmcr1_reg
.field
.ttm2_select
= 0; /* not used */
578 mmcr1_reg
.field
.ttm3_select
= func_unit
.field
.TTM3SEL
;
579 mmcr1_reg
.field
.speculative_event
= func_unit
.field
.SPECSEL
;
580 mmcr1_reg
.field
.lane0_select
= func_unit
.field
.TD_CP_DBGxSEL
;
581 mmcr1_reg
.field
.lane1_select
= func_unit
.field
.TD_CP_DBGxSEL
;
582 mmcr1_reg
.field
.lane2_select
= func_unit
.field
.TD_CP_DBGxSEL
;
583 mmcr1_reg
.field
.lane3_select
= func_unit
.field
.TD_CP_DBGxSEL
;
585 sv
->save_mmcr1
= mmcr1_reg
.value
;
589 retval
= KERN_FAILURE
;
596 int perfmon_set_threshold(thread_act_t thr_act
, int threshold
)
598 struct savearea
*sv
= thr_act
->mact
.pcb
;
599 kern_return_t retval
= KERN_SUCCESS
;
601 switch(machine_slot
[0].cpu_subtype
) {
602 case CPU_SUBTYPE_POWERPC_750
:
604 ppc32_mmcr0_reg_t mmcr0_reg
;
606 mmcr0_reg
.value
= sv
->save_mmcr0
;
608 if(threshold
>63) { /* no multiplier on 750 */
609 int newThreshold
= 63;
610 #ifdef HWPERFMON_DEBUG
611 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) exceeds max threshold value - clamping to %d\n", threshold
, newThreshold
);
613 threshold
= newThreshold
;
615 mmcr0_reg
.field
.threshold_value
= threshold
;
617 sv
->save_mmcr0
= mmcr0_reg
.value
;
621 case CPU_SUBTYPE_POWERPC_7400
:
622 case CPU_SUBTYPE_POWERPC_7450
:
624 ppc32_mmcr0_reg_t mmcr0_reg
;
625 ppc32_mmcr2_reg_t mmcr2_reg
;
627 mmcr0_reg
.value
= sv
->save_mmcr0
;
628 mmcr2_reg
.value
= sv
->save_mmcr2
;
630 if(threshold
<=(2*63)) { /* 2x multiplier */
631 if(threshold%2
!= 0) {
632 int newThreshold
= 2*(threshold
/2);
633 #ifdef HWPERFMON_DEBUG
634 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) is not evenly divisible by 2x multiplier - using threshold of %d instead\n", threshold
, newThreshold
);
636 threshold
= newThreshold
;
638 mmcr2_reg
.field
.threshold_multiplier
= 0;
639 } else if(threshold
<=(32*63)) { /* 32x multiplier */
640 if(threshold%32
!= 0) {
641 int newThreshold
= 32*(threshold
/32);
642 #ifdef HWPERFMON_DEBUG
643 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) is not evenly divisible by 32x multiplier - using threshold of %d instead\n", threshold
, newThreshold
);
645 threshold
= newThreshold
;
647 mmcr2_reg
.field
.threshold_multiplier
= 1;
649 int newThreshold
= 32*63;
650 #ifdef HWPERFMON_DEBUG
651 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) exceeds max threshold value - clamping to %d\n", threshold
, newThreshold
);
653 threshold
= newThreshold
;
654 mmcr2_reg
.field
.threshold_multiplier
= 1;
656 mmcr0_reg
.field
.threshold_value
= threshold
;
658 sv
->save_mmcr0
= mmcr0_reg
.value
;
659 sv
->save_mmcr2
= mmcr2_reg
.value
;
663 case CPU_SUBTYPE_POWERPC_970
:
665 ppc64_mmcr0_reg_t mmcr0_reg
;
667 mmcr0_reg
.value
= sv
->save_mmcr0
;
669 if(threshold
>63) { /* multiplier is in HID1 on 970 - not context switching HID1 so always 1x */
670 int newThreshold
= 63;
671 #ifdef HWPERFMON_DEBUG
672 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) exceeds max threshold value - clamping to %d\n", threshold
, newThreshold
);
674 threshold
= newThreshold
;
676 mmcr0_reg
.field
.threshold_value
= threshold
;
678 sv
->save_mmcr0
= mmcr0_reg
.value
;
682 retval
= KERN_FAILURE
;
686 #ifdef HWPERFMON_DEBUG
687 kprintf("perfmon_set_threshold - threshold=%d - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", threshold
, sv
->save_mmcr0
, sv
->save_mmcr1
, sv
->save_mmcr2
);
693 int perfmon_set_tbsel(thread_act_t thr_act
, int tbsel
)
695 struct savearea
*sv
= thr_act
->mact
.pcb
;
696 kern_return_t retval
= KERN_SUCCESS
;
698 switch(machine_slot
[0].cpu_subtype
) {
699 case CPU_SUBTYPE_POWERPC_750
:
700 case CPU_SUBTYPE_POWERPC_7400
:
701 case CPU_SUBTYPE_POWERPC_7450
:
703 ppc32_mmcr0_reg_t mmcr0_reg
;
705 mmcr0_reg
.value
= sv
->save_mmcr0
;
711 mmcr0_reg
.field
.timebase_bit_selector
= tbsel
;
714 retval
= KERN_FAILURE
;
716 sv
->save_mmcr0
= mmcr0_reg
.value
;
719 case CPU_SUBTYPE_POWERPC_970
:
721 ppc64_mmcr0_reg_t mmcr0_reg
;
723 mmcr0_reg
.value
= sv
->save_mmcr0
;
729 mmcr0_reg
.field
.timebase_bit_selector
= tbsel
;
732 retval
= KERN_FAILURE
;
734 sv
->save_mmcr0
= mmcr0_reg
.value
;
738 retval
= KERN_FAILURE
;
742 #ifdef HWPERFMON_DEBUG
743 kprintf("perfmon_set_tbsel - tbsel=%d - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", tbsel
, sv
->save_mmcr0
, sv
->save_mmcr1
, sv
->save_mmcr2
);
749 int perfmon_control(struct savearea
*ssp
)
751 mach_port_t thr_port
= CAST_DOWN(mach_port_t
, ssp
->save_r3
);
752 int action
= (int)ssp
->save_r4
;
753 int pmc
= (int)ssp
->save_r5
;
754 int val
= (int)ssp
->save_r6
;
755 uint64_t *usr_pmcs_p
= CAST_DOWN(uint64_t *, ssp
->save_r7
);
756 thread_act_t thr_act
= THREAD_NULL
;
757 uint64_t kern_pmcs
[MAX_CPUPMC_COUNT
];
758 kern_return_t retval
= KERN_SUCCESS
;
762 thr_act
= (thread_act_t
) port_name_to_act(thr_port
); // convert user space thread port name to a thread_act_t
764 ssp
->save_r3
= KERN_INVALID_ARGUMENT
;
765 return 1; /* Return and check for ASTs... */
768 if(thr_act
!=current_act()) {
769 thread_suspend(thr_act
);
772 #ifdef HWPERFMON_DEBUG
773 // kprintf("perfmon_control: action=0x%x pmc=%d val=%d pmcs=0x%x\n", action, pmc, val, usr_pmcs_p);
776 oldlevel
= ml_set_interrupts_enabled(FALSE
);
778 /* individual actions which do not require perfmon facility to be enabled */
779 if(action
==PPC_PERFMON_DISABLE
) {
780 retval
= perfmon_disable(thr_act
);
782 else if(action
==PPC_PERFMON_ENABLE
) {
783 retval
= perfmon_enable(thr_act
);
786 else { /* individual actions which do require perfmon facility to be enabled */
787 if(!(thr_act
->mact
.specFlags
& perfMonitor
)) { /* perfmon not enabled */
788 #ifdef HWPERFMON_DEBUG
789 kprintf("perfmon_control: ERROR - perfmon not enabled for this thread\n");
791 retval
= KERN_NO_ACCESS
;
795 if(action
==PPC_PERFMON_SET_EVENT
) {
796 retval
= perfmon_set_event(thr_act
, pmc
, val
);
798 else if(action
==PPC_PERFMON_SET_THRESHOLD
) {
799 retval
= perfmon_set_threshold(thr_act
, val
);
801 else if(action
==PPC_PERFMON_SET_TBSEL
) {
802 retval
= perfmon_set_tbsel(thr_act
, val
);
804 else if(action
==PPC_PERFMON_SET_EVENT_FUNC
) {
805 retval
= perfmon_set_event_func(thr_act
, val
);
807 else if(action
==PPC_PERFMON_ENABLE_PMI_BRKPT
) {
809 thr_act
->mact
.perfmonFlags
|= PERFMONFLAG_BREAKPOINT_FOR_PMI
;
811 thr_act
->mact
.perfmonFlags
&= ~PERFMONFLAG_BREAKPOINT_FOR_PMI
;
813 retval
= KERN_SUCCESS
;
816 /* combinable actions */
818 if(action
& PPC_PERFMON_STOP_COUNTERS
) {
819 error
= perfmon_stop_counters(thr_act
);
820 if(error
!=KERN_SUCCESS
) {
825 if(action
& PPC_PERFMON_CLEAR_COUNTERS
) {
826 error
= perfmon_clear_counters(thr_act
);
827 if(error
!=KERN_SUCCESS
) {
832 if(action
& PPC_PERFMON_WRITE_COUNTERS
) {
833 if(error
= copyin((void *)usr_pmcs_p
, (void *)kern_pmcs
, MAX_CPUPMC_COUNT
*sizeof(uint64_t))) {
837 error
= perfmon_write_counters(thr_act
, kern_pmcs
);
838 if(error
!=KERN_SUCCESS
) {
843 if(action
& PPC_PERFMON_READ_COUNTERS
) {
844 error
= perfmon_read_counters(thr_act
, kern_pmcs
);
845 if(error
!=KERN_SUCCESS
) {
849 if(error
= copyout((void *)kern_pmcs
, (void *)usr_pmcs_p
, MAX_CPUPMC_COUNT
*sizeof(uint64_t))) {
854 if(action
& PPC_PERFMON_START_COUNTERS
) {
855 error
= perfmon_start_counters(thr_act
);
856 if(error
!=KERN_SUCCESS
) {
865 ml_set_interrupts_enabled(oldlevel
);
867 #ifdef HWPERFMON_DEBUG
868 kprintf("perfmon_control (CPU%d): mmcr0 = %016llX, pmc1=%X pmc2=%X pmc3=%X pmc4=%X pmc5=%X pmc6=%X pmc7=%X pmc8=%X\n", cpu_number(), ssp
->save_mmcr0
, ssp
->save_pmc
[PMC_1
], ssp
->save_pmc
[PMC_2
], ssp
->save_pmc
[PMC_3
], ssp
->save_pmc
[PMC_4
], ssp
->save_pmc
[PMC_5
], ssp
->save_pmc
[PMC_6
], ssp
->save_pmc
[PMC_7
], ssp
->save_pmc
[PMC_8
]);
871 if(thr_act
!=current_act()) {
872 thread_resume(thr_act
);
875 #ifdef HWPERFMON_DEBUG
876 if(retval
!=KERN_SUCCESS
) {
877 kprintf("perfmon_control - ERROR: retval=%d\n", retval
);
879 #endif /* HWPERFMON_DEBUG */
881 ssp
->save_r3
= retval
;
882 return 1; /* Return and check for ASTs... */
885 int perfmon_handle_pmi(struct savearea
*ssp
)
888 kern_return_t retval
= KERN_SUCCESS
;
889 thread_act_t thr_act
= current_act();
891 #ifdef HWPERFMON_DEBUG
892 kprintf("perfmon_handle_pmi: got rupt\n");
895 if(!(thr_act
->mact
.specFlags
& perfMonitor
)) { /* perfmon not enabled */
896 #ifdef HWPERFMON_DEBUG
897 kprintf("perfmon_handle_pmi: ERROR - perfmon not enabled for this thread\n");
902 for(curPMC
=0; curPMC
<MAX_CPUPMC_COUNT
; curPMC
++) {
903 if(thr_act
->mact
.pcb
->save_pmc
[curPMC
] & 0x80000000) {
904 if(thr_act
->mact
.pmcovfl
[curPMC
]==0xFFFFFFFF && (thr_act
->mact
.perfmonFlags
& PERFMONFLAG_BREAKPOINT_FOR_PMI
)) {
905 doexception(EXC_BREAKPOINT
, EXC_PPC_PERFMON
, (unsigned int)ssp
->save_srr0
); // pass up a breakpoint exception
908 thr_act
->mact
.pmcovfl
[curPMC
]++;
909 thr_act
->mact
.pcb
->save_pmc
[curPMC
] = 0;
914 if(retval
==KERN_SUCCESS
) {
915 switch(machine_slot
[0].cpu_subtype
) {
916 case CPU_SUBTYPE_POWERPC_7450
:
918 ppc32_mmcr0_reg_t mmcr0_reg
;
920 mmcr0_reg
.value
= thr_act
->mact
.pcb
->save_mmcr0
;
921 mmcr0_reg
.field
.disable_counters_always
= FALSE
;
922 mmcr0_reg
.field
.enable_pmi
= TRUE
;
923 thr_act
->mact
.pcb
->save_mmcr0
= mmcr0_reg
.value
;
925 retval
= KERN_SUCCESS
;
927 case CPU_SUBTYPE_POWERPC_970
:
929 ppc64_mmcr0_reg_t mmcr0_reg
;
931 mmcr0_reg
.value
= thr_act
->mact
.pcb
->save_mmcr0
;
932 mmcr0_reg
.field
.disable_counters_always
= FALSE
;
933 mmcr0_reg
.field
.enable_pmi
= TRUE
;
934 thr_act
->mact
.pcb
->save_mmcr0
= mmcr0_reg
.value
;
936 retval
= KERN_SUCCESS
;
939 retval
= KERN_FAILURE
;