2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 #include <kern/thread.h>
24 #include <ppc/exception.h>
25 #include <ppc/savearea.h>
26 #include <ppc/hw_perfmon.h>
27 #include <ppc/hw_perfmon_mmcr.h>
29 decl_simple_lock_data(,hw_perfmon_lock
)
30 static task_t hw_perfmon_owner
= TASK_NULL
;
31 static int hw_perfmon_thread_count
= 0;
34 * -supervisor/user level filtering is unnecessary because of the way PMCs and MMCRs are context switched
35 * (can only count user events anyway)
36 * -marked filtering is unnecssary because each thread has its own virtualized set of PMCs and MMCRs
37 * -virtual counter PMI is passed up as a breakpoint exception
40 int perfmon_init(void)
42 simple_lock_init(&hw_perfmon_lock
, FALSE
);
46 /* PMC Facility Owner:
47 * TASK_NULL - no one owns it
48 * kernel_task - owned by hw_perfmon
49 * other task - owned by another task
52 int perfmon_acquire_facility(task_t task
)
54 kern_return_t retval
= KERN_SUCCESS
;
56 simple_lock(&hw_perfmon_lock
);
58 if(hw_perfmon_owner
==task
) {
59 #ifdef HWPERFMON_DEBUG
60 kprintf("perfmon_acquire_facility - ACQUIRED: already owner\n");
62 retval
= KERN_SUCCESS
;
64 } else if(hw_perfmon_owner
==TASK_NULL
) { /* no one owns it */
65 hw_perfmon_owner
= task
;
66 hw_perfmon_thread_count
= 0;
67 #ifdef HWPERFMON_DEBUG
68 kprintf("perfmon_acquire_facility - ACQUIRED: no current owner - made new owner\n");
70 retval
= KERN_SUCCESS
;
71 } else { /* someone already owns it */
72 if(hw_perfmon_owner
==kernel_task
) {
73 if(hw_perfmon_thread_count
==0) { /* kernel owns it but no threads using it */
74 hw_perfmon_owner
= task
;
75 hw_perfmon_thread_count
= 0;
76 #ifdef HWPERFMON_DEBUG
77 kprintf("perfmon_acquire_facility - ACQUIRED: kernel is current owner but no threads using it\n");
79 retval
= KERN_SUCCESS
;
81 #ifdef HWPERFMON_DEBUG
82 kprintf("perfmon_acquire_facility - DENIED: kernel is current owner and facility in use\n");
84 retval
= KERN_RESOURCE_SHORTAGE
;
86 } else { /* non-kernel owner */
87 #ifdef HWPERFMON_DEBUG
88 kprintf("perfmon_acquire_facility - DENIED: another active task owns the facility\n");
90 retval
= KERN_RESOURCE_SHORTAGE
;
94 simple_unlock(&hw_perfmon_lock
);
98 int perfmon_release_facility(task_t task
)
100 kern_return_t retval
= KERN_SUCCESS
;
101 task_t old_perfmon_owner
= hw_perfmon_owner
;
103 simple_lock(&hw_perfmon_lock
);
105 if(task
!=hw_perfmon_owner
) {
106 retval
= KERN_NO_ACCESS
;
108 if(old_perfmon_owner
==kernel_task
) {
109 if(hw_perfmon_thread_count
>0) {
110 #ifdef HWPERFMON_DEBUG
111 kprintf("perfmon_release_facility - NOT RELEASED: kernel task is owner and has active perfmon threads\n");
113 retval
= KERN_NO_ACCESS
;
115 #ifdef HWPERFMON_DEBUG
116 kprintf("perfmon_release_facility - RELEASED: kernel task was owner\n");
118 hw_perfmon_owner
= TASK_NULL
;
119 retval
= KERN_SUCCESS
;
122 #ifdef HWPERFMON_DEBUG
123 kprintf("perfmon_release_facility - RELEASED: user task was owner\n");
125 hw_perfmon_owner
= TASK_NULL
;
126 retval
= KERN_SUCCESS
;
130 simple_unlock(&hw_perfmon_lock
);
134 int perfmon_enable(thread_t thread
)
136 struct savearea
*sv
= thread
->machine
.pcb
;
138 kern_return_t retval
= KERN_SUCCESS
;
141 if(thread
->machine
.specFlags
& perfMonitor
) {
142 return KERN_SUCCESS
; /* already enabled */
143 } else if(perfmon_acquire_facility(kernel_task
)!=KERN_SUCCESS
) {
144 return KERN_RESOURCE_SHORTAGE
; /* facility is in use */
145 } else { /* kernel_task owns the faciltity and this thread has not yet been counted */
146 simple_lock(&hw_perfmon_lock
);
147 hw_perfmon_thread_count
++;
148 simple_unlock(&hw_perfmon_lock
);
154 switch(PerProcTable
[0].ppe_vaddr
->cpu_subtype
) {
155 case CPU_SUBTYPE_POWERPC_750
:
156 case CPU_SUBTYPE_POWERPC_7400
:
157 case CPU_SUBTYPE_POWERPC_7450
:
159 ppc32_mmcr0_reg_t mmcr0_reg
;
162 mmcr0_reg
.field
.disable_counters_always
= TRUE
;
163 mmcr0_reg
.field
.disable_counters_supervisor
= TRUE
; /* no choice */
164 sv
->save_mmcr0
= mmcr0_reg
.value
;
167 case CPU_SUBTYPE_POWERPC_970
:
169 ppc64_mmcr0_reg_t mmcr0_reg
;
172 mmcr0_reg
.field
.disable_counters_always
= TRUE
;
173 mmcr0_reg
.field
.disable_counters_supervisor
= TRUE
; /* no choice */
174 sv
->save_mmcr0
= mmcr0_reg
.value
;
178 retval
= KERN_FAILURE
;
182 if(retval
==KERN_SUCCESS
) {
183 for(curPMC
=0; curPMC
<MAX_CPUPMC_COUNT
; curPMC
++) {
184 sv
->save_pmc
[curPMC
] = 0;
185 thread
->machine
.pmcovfl
[curPMC
] = 0;
187 thread
->machine
.perfmonFlags
= 0;
188 thread
->machine
.specFlags
|= perfMonitor
; /* enable perf monitor facility for this thread */
189 if(thread
==current_thread()) {
190 getPerProc()->spcFlags
|= perfMonitor
; /* update per_proc */
194 #ifdef HWPERFMON_DEBUG
195 kprintf("perfmon_enable - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", sv
->save_mmcr0
, sv
->save_mmcr1
, sv
->save_mmcr2
);
201 int perfmon_disable(thread_t thread
)
203 struct savearea
*sv
= thread
->machine
.pcb
;
206 if(!(thread
->machine
.specFlags
& perfMonitor
)) {
207 return KERN_NO_ACCESS
; /* not enabled */
209 simple_lock(&hw_perfmon_lock
);
210 hw_perfmon_thread_count
--;
211 simple_unlock(&hw_perfmon_lock
);
212 perfmon_release_facility(kernel_task
); /* will release if hw_perfmon_thread_count is 0 */
215 thread
->machine
.specFlags
&= ~perfMonitor
; /* disable perf monitor facility for this thread */
216 if(thread
==current_thread()) {
217 PerProcTable
[cpu_number()].ppe_vaddr
->spcFlags
&= ~perfMonitor
; /* update per_proc */
223 for(curPMC
=0; curPMC
<MAX_CPUPMC_COUNT
; curPMC
++) {
224 sv
->save_pmc
[curPMC
] = 0;
225 thread
->machine
.pmcovfl
[curPMC
] = 0;
226 thread
->machine
.perfmonFlags
= 0;
229 #ifdef HWPERFMON_DEBUG
230 kprintf("perfmon_disable - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", sv
->save_mmcr0
, sv
->save_mmcr1
, sv
->save_mmcr2
);
236 int perfmon_clear_counters(thread_t thread
)
238 struct savearea
*sv
= thread
->machine
.pcb
;
241 #ifdef HWPERFMON_DEBUG
242 kprintf("perfmon_clear_counters (CPU%d)\n", cpu_number());
245 /* clear thread copy */
246 for(curPMC
=0; curPMC
<MAX_CPUPMC_COUNT
; curPMC
++) {
247 sv
->save_pmc
[curPMC
] = 0;
248 thread
->machine
.pmcovfl
[curPMC
] = 0;
254 int perfmon_write_counters(thread_t thread
, uint64_t *pmcs
)
256 struct savearea
*sv
= thread
->machine
.pcb
;
259 #ifdef HWPERFMON_DEBUG
260 kprintf("perfmon_write_counters (CPU%d): mmcr0 = %016llX, pmc1=%llX pmc2=%llX pmc3=%llX pmc4=%llX pmc5=%llX pmc6=%llX pmc7=%llX pmc8=%llX\n", cpu_number(), sv
->save_mmcr0
, pmcs
[PMC_1
], pmcs
[PMC_2
], pmcs
[PMC_3
], pmcs
[PMC_4
], pmcs
[PMC_5
], pmcs
[PMC_6
], pmcs
[PMC_7
], pmcs
[PMC_8
]);
263 /* update thread copy */
264 for(curPMC
=0; curPMC
<MAX_CPUPMC_COUNT
; curPMC
++) {
265 sv
->save_pmc
[curPMC
] = pmcs
[curPMC
] & 0x7FFFFFFF;
266 thread
->machine
.pmcovfl
[curPMC
] = (pmcs
[curPMC
]>>31) & 0xFFFFFFFF;
272 int perfmon_read_counters(thread_t thread
, uint64_t *pmcs
)
274 struct savearea
*sv
= thread
->machine
.pcb
;
277 /* retrieve from thread copy */
278 for(curPMC
=0; curPMC
<MAX_CPUPMC_COUNT
; curPMC
++) {
279 pmcs
[curPMC
] = thread
->machine
.pmcovfl
[curPMC
];
280 pmcs
[curPMC
] = pmcs
[curPMC
]<<31;
281 pmcs
[curPMC
] |= (sv
->save_pmc
[curPMC
] & 0x7FFFFFFF);
284 /* zero any unused counters on this platform */
285 switch(PerProcTable
[0].ppe_vaddr
->cpu_subtype
) {
286 case CPU_SUBTYPE_POWERPC_750
:
287 case CPU_SUBTYPE_POWERPC_7400
:
288 case CPU_SUBTYPE_POWERPC_7450
:
296 #ifdef HWPERFMON_DEBUG
297 kprintf("perfmon_read_counters (CPU%d): mmcr0 = %016llX pmc1=%llX pmc2=%llX pmc3=%llX pmc4=%llX pmc5=%llX pmc6=%llX pmc7=%llX pmc8=%llX\n", cpu_number(), sv
->save_mmcr0
, pmcs
[PMC_1
], pmcs
[PMC_2
], pmcs
[PMC_3
], pmcs
[PMC_4
], pmcs
[PMC_5
], pmcs
[PMC_6
], pmcs
[PMC_7
], pmcs
[PMC_8
]);
303 int perfmon_start_counters(thread_t thread
)
305 struct savearea
*sv
= thread
->machine
.pcb
;
306 kern_return_t retval
= KERN_SUCCESS
;
308 switch(PerProcTable
[0].ppe_vaddr
->cpu_subtype
) {
309 case CPU_SUBTYPE_POWERPC_750
:
310 case CPU_SUBTYPE_POWERPC_7400
:
312 ppc32_mmcr0_reg_t mmcr0_reg
;
313 mmcr0_reg
.value
= sv
->save_mmcr0
;
314 mmcr0_reg
.field
.disable_counters_always
= FALSE
;
315 /* XXXXX PMI broken on 750, 750CX, 750FX, 7400 and 7410 v1.2 and earlier XXXXX */
316 mmcr0_reg
.field
.on_pmi_stop_counting
= FALSE
;
317 mmcr0_reg
.field
.enable_pmi
= FALSE
;
318 mmcr0_reg
.field
.enable_pmi_on_pmc1
= FALSE
;
319 mmcr0_reg
.field
.enable_pmi_on_pmcn
= FALSE
;
320 sv
->save_mmcr0
= mmcr0_reg
.value
;
323 case CPU_SUBTYPE_POWERPC_7450
:
325 ppc32_mmcr0_reg_t mmcr0_reg
;
326 mmcr0_reg
.value
= sv
->save_mmcr0
;
327 mmcr0_reg
.field
.disable_counters_always
= FALSE
;
328 mmcr0_reg
.field
.on_pmi_stop_counting
= TRUE
;
329 mmcr0_reg
.field
.enable_pmi
= TRUE
;
330 mmcr0_reg
.field
.enable_pmi_on_pmc1
= TRUE
;
331 mmcr0_reg
.field
.enable_pmi_on_pmcn
= TRUE
;
332 sv
->save_mmcr0
= mmcr0_reg
.value
;
335 case CPU_SUBTYPE_POWERPC_970
:
337 ppc64_mmcr0_reg_t mmcr0_reg
;
338 mmcr0_reg
.value
= sv
->save_mmcr0
;
339 mmcr0_reg
.field
.disable_counters_always
= FALSE
;
340 mmcr0_reg
.field
.on_pmi_stop_counting
= TRUE
;
341 mmcr0_reg
.field
.enable_pmi
= TRUE
;
342 mmcr0_reg
.field
.enable_pmi_on_pmc1
= TRUE
;
343 mmcr0_reg
.field
.enable_pmi_on_pmcn
= TRUE
;
344 sv
->save_mmcr0
= mmcr0_reg
.value
;
348 retval
= KERN_FAILURE
;
352 #ifdef HWPERFMON_DEBUG
353 kprintf("perfmon_start_counters (CPU%d) - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", cpu_number(), sv
->save_mmcr0
, sv
->save_mmcr1
, sv
->save_mmcr2
);
359 int perfmon_stop_counters(thread_t thread
)
361 struct savearea
*sv
= thread
->machine
.pcb
;
362 kern_return_t retval
= KERN_SUCCESS
;
364 switch(PerProcTable
[0].ppe_vaddr
->cpu_subtype
) {
365 case CPU_SUBTYPE_POWERPC_750
:
366 case CPU_SUBTYPE_POWERPC_7400
:
367 case CPU_SUBTYPE_POWERPC_7450
:
369 ppc32_mmcr0_reg_t mmcr0_reg
;
370 mmcr0_reg
.value
= sv
->save_mmcr0
;
371 mmcr0_reg
.field
.disable_counters_always
= TRUE
;
372 sv
->save_mmcr0
= mmcr0_reg
.value
;
375 case CPU_SUBTYPE_POWERPC_970
:
377 ppc64_mmcr0_reg_t mmcr0_reg
;
378 mmcr0_reg
.value
= sv
->save_mmcr0
;
379 mmcr0_reg
.field
.disable_counters_always
= TRUE
;
380 sv
->save_mmcr0
= mmcr0_reg
.value
;
384 retval
= KERN_FAILURE
;
388 #ifdef HWPERFMON_DEBUG
389 kprintf("perfmon_stop_counters (CPU%d) - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", cpu_number(), sv
->save_mmcr0
, sv
->save_mmcr1
, sv
->save_mmcr2
);
395 int perfmon_set_event(thread_t thread
, int pmc
, int event
)
397 struct savearea
*sv
= thread
->machine
.pcb
;
398 kern_return_t retval
= KERN_SUCCESS
;
400 #ifdef HWPERFMON_DEBUG
401 kprintf("perfmon_set_event b4 (CPU%d) - pmc=%d, event=%d - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", cpu_number(), pmc
, event
, sv
->save_mmcr0
, sv
->save_mmcr1
, sv
->save_mmcr2
);
404 switch(PerProcTable
[0].ppe_vaddr
->cpu_subtype
) {
405 case CPU_SUBTYPE_POWERPC_750
:
406 case CPU_SUBTYPE_POWERPC_7400
:
408 ppc32_mmcr0_reg_t mmcr0_reg
;
409 ppc32_mmcr1_reg_t mmcr1_reg
;
411 mmcr0_reg
.value
= sv
->save_mmcr0
;
412 mmcr1_reg
.value
= sv
->save_mmcr1
;
416 mmcr0_reg
.field
.pmc1_event
= event
;
417 sv
->save_mmcr0
= mmcr0_reg
.value
;
420 mmcr0_reg
.field
.pmc2_event
= event
;
421 sv
->save_mmcr0
= mmcr0_reg
.value
;
424 mmcr1_reg
.field
.pmc3_event
= event
;
425 sv
->save_mmcr1
= mmcr1_reg
.value
;
428 mmcr1_reg
.field
.pmc4_event
= event
;
429 sv
->save_mmcr1
= mmcr1_reg
.value
;
432 retval
= KERN_FAILURE
;
437 case CPU_SUBTYPE_POWERPC_7450
:
439 ppc32_mmcr0_reg_t mmcr0_reg
;
440 ppc32_mmcr1_reg_t mmcr1_reg
;
442 mmcr0_reg
.value
= sv
->save_mmcr0
;
443 mmcr1_reg
.value
= sv
->save_mmcr1
;
447 mmcr0_reg
.field
.pmc1_event
= event
;
448 sv
->save_mmcr0
= mmcr0_reg
.value
;
451 mmcr0_reg
.field
.pmc2_event
= event
;
452 sv
->save_mmcr0
= mmcr0_reg
.value
;
455 mmcr1_reg
.field
.pmc3_event
= event
;
456 sv
->save_mmcr1
= mmcr1_reg
.value
;
459 mmcr1_reg
.field
.pmc4_event
= event
;
460 sv
->save_mmcr1
= mmcr1_reg
.value
;
463 mmcr1_reg
.field
.pmc5_event
= event
;
464 sv
->save_mmcr1
= mmcr1_reg
.value
;
467 mmcr1_reg
.field
.pmc6_event
= event
;
468 sv
->save_mmcr1
= mmcr1_reg
.value
;
471 retval
= KERN_FAILURE
;
476 case CPU_SUBTYPE_POWERPC_970
:
478 ppc64_mmcr0_reg_t mmcr0_reg
;
479 ppc64_mmcr1_reg_t mmcr1_reg
;
481 mmcr0_reg
.value
= sv
->save_mmcr0
;
482 mmcr1_reg
.value
= sv
->save_mmcr1
;
486 mmcr0_reg
.field
.pmc1_event
= event
;
487 sv
->save_mmcr0
= mmcr0_reg
.value
;
490 mmcr0_reg
.field
.pmc2_event
= event
;
491 sv
->save_mmcr0
= mmcr0_reg
.value
;
494 mmcr1_reg
.field
.pmc3_event
= event
;
495 sv
->save_mmcr1
= mmcr1_reg
.value
;
498 mmcr1_reg
.field
.pmc4_event
= event
;
499 sv
->save_mmcr1
= mmcr1_reg
.value
;
502 mmcr1_reg
.field
.pmc5_event
= event
;
503 sv
->save_mmcr1
= mmcr1_reg
.value
;
506 mmcr1_reg
.field
.pmc6_event
= event
;
507 sv
->save_mmcr1
= mmcr1_reg
.value
;
510 mmcr1_reg
.field
.pmc7_event
= event
;
511 sv
->save_mmcr1
= mmcr1_reg
.value
;
514 mmcr1_reg
.field
.pmc8_event
= event
;
515 sv
->save_mmcr1
= mmcr1_reg
.value
;
518 retval
= KERN_FAILURE
;
524 retval
= KERN_FAILURE
;
528 #ifdef HWPERFMON_DEBUG
529 kprintf("perfmon_set_event (CPU%d) - pmc=%d, event=%d - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", cpu_number(), pmc
, event
, sv
->save_mmcr0
, sv
->save_mmcr1
, sv
->save_mmcr2
);
535 int perfmon_set_event_func(thread_t thread
, uint32_t f
)
537 struct savearea
*sv
= thread
->machine
.pcb
;
538 kern_return_t retval
= KERN_SUCCESS
;
540 #ifdef HWPERFMON_DEBUG
541 kprintf("perfmon_set_event_func - func=%s\n",
542 f
==PPC_PERFMON_FUNC_FPU
? "FUNC" :
543 f
==PPC_PERFMON_FUNC_ISU
? "ISU" :
544 f
==PPC_PERFMON_FUNC_IFU
? "IFU" :
545 f
==PPC_PERFMON_FUNC_VMX
? "VMX" :
546 f
==PPC_PERFMON_FUNC_IDU
? "IDU" :
547 f
==PPC_PERFMON_FUNC_GPS
? "GPS" :
548 f
==PPC_PERFMON_FUNC_LSU0
? "LSU0" :
549 f
==PPC_PERFMON_FUNC_LSU1A
? "LSU1A" :
550 f
==PPC_PERFMON_FUNC_LSU1B
? "LSU1B" :
551 f
==PPC_PERFMON_FUNC_SPECA
? "SPECA" :
552 f
==PPC_PERFMON_FUNC_SPECB
? "SPECB" :
553 f
==PPC_PERFMON_FUNC_SPECC
? "SPECC" :
555 #endif /* HWPERFMON_DEBUG */
557 switch(PerProcTable
[0].ppe_vaddr
->cpu_subtype
) {
558 case CPU_SUBTYPE_POWERPC_750
:
559 case CPU_SUBTYPE_POWERPC_7400
:
560 case CPU_SUBTYPE_POWERPC_7450
:
561 retval
= KERN_FAILURE
; /* event functional unit only applies to 970 */
563 case CPU_SUBTYPE_POWERPC_970
:
565 ppc64_mmcr1_reg_t mmcr1_reg
;
566 ppc_func_unit_t func_unit
;
569 mmcr1_reg
.value
= sv
->save_mmcr1
;
571 mmcr1_reg
.field
.ttm0_select
= func_unit
.field
.TTM0SEL
;
572 mmcr1_reg
.field
.ttm1_select
= func_unit
.field
.TTM1SEL
;
573 mmcr1_reg
.field
.ttm2_select
= 0; /* not used */
574 mmcr1_reg
.field
.ttm3_select
= func_unit
.field
.TTM3SEL
;
575 mmcr1_reg
.field
.speculative_event
= func_unit
.field
.SPECSEL
;
576 mmcr1_reg
.field
.lane0_select
= func_unit
.field
.TD_CP_DBGxSEL
;
577 mmcr1_reg
.field
.lane1_select
= func_unit
.field
.TD_CP_DBGxSEL
;
578 mmcr1_reg
.field
.lane2_select
= func_unit
.field
.TD_CP_DBGxSEL
;
579 mmcr1_reg
.field
.lane3_select
= func_unit
.field
.TD_CP_DBGxSEL
;
581 sv
->save_mmcr1
= mmcr1_reg
.value
;
585 retval
= KERN_FAILURE
;
592 int perfmon_set_threshold(thread_t thread
, int threshold
)
594 struct savearea
*sv
= thread
->machine
.pcb
;
595 kern_return_t retval
= KERN_SUCCESS
;
597 switch(PerProcTable
[0].ppe_vaddr
->cpu_subtype
) {
598 case CPU_SUBTYPE_POWERPC_750
:
600 ppc32_mmcr0_reg_t mmcr0_reg
;
602 mmcr0_reg
.value
= sv
->save_mmcr0
;
604 if(threshold
>63) { /* no multiplier on 750 */
605 int newThreshold
= 63;
606 #ifdef HWPERFMON_DEBUG
607 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) exceeds max threshold value - clamping to %d\n", threshold
, newThreshold
);
609 threshold
= newThreshold
;
611 mmcr0_reg
.field
.threshold_value
= threshold
;
613 sv
->save_mmcr0
= mmcr0_reg
.value
;
617 case CPU_SUBTYPE_POWERPC_7400
:
618 case CPU_SUBTYPE_POWERPC_7450
:
620 ppc32_mmcr0_reg_t mmcr0_reg
;
621 ppc32_mmcr2_reg_t mmcr2_reg
;
623 mmcr0_reg
.value
= sv
->save_mmcr0
;
624 mmcr2_reg
.value
= sv
->save_mmcr2
;
626 if(threshold
<=(2*63)) { /* 2x multiplier */
627 if(threshold%2
!= 0) {
628 int newThreshold
= 2*(threshold
/2);
629 #ifdef HWPERFMON_DEBUG
630 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) is not evenly divisible by 2x multiplier - using threshold of %d instead\n", threshold
, newThreshold
);
632 threshold
= newThreshold
;
634 mmcr2_reg
.field
.threshold_multiplier
= 0;
635 } else if(threshold
<=(32*63)) { /* 32x multiplier */
636 if(threshold%32
!= 0) {
637 int newThreshold
= 32*(threshold
/32);
638 #ifdef HWPERFMON_DEBUG
639 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) is not evenly divisible by 32x multiplier - using threshold of %d instead\n", threshold
, newThreshold
);
641 threshold
= newThreshold
;
643 mmcr2_reg
.field
.threshold_multiplier
= 1;
645 int newThreshold
= 32*63;
646 #ifdef HWPERFMON_DEBUG
647 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) exceeds max threshold value - clamping to %d\n", threshold
, newThreshold
);
649 threshold
= newThreshold
;
650 mmcr2_reg
.field
.threshold_multiplier
= 1;
652 mmcr0_reg
.field
.threshold_value
= threshold
;
654 sv
->save_mmcr0
= mmcr0_reg
.value
;
655 sv
->save_mmcr2
= mmcr2_reg
.value
;
659 case CPU_SUBTYPE_POWERPC_970
:
661 ppc64_mmcr0_reg_t mmcr0_reg
;
663 mmcr0_reg
.value
= sv
->save_mmcr0
;
665 if(threshold
>63) { /* multiplier is in HID1 on 970 - not context switching HID1 so always 1x */
666 int newThreshold
= 63;
667 #ifdef HWPERFMON_DEBUG
668 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) exceeds max threshold value - clamping to %d\n", threshold
, newThreshold
);
670 threshold
= newThreshold
;
672 mmcr0_reg
.field
.threshold_value
= threshold
;
674 sv
->save_mmcr0
= mmcr0_reg
.value
;
678 retval
= KERN_FAILURE
;
682 #ifdef HWPERFMON_DEBUG
683 kprintf("perfmon_set_threshold - threshold=%d - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", threshold
, sv
->save_mmcr0
, sv
->save_mmcr1
, sv
->save_mmcr2
);
689 int perfmon_set_tbsel(thread_t thread
, int tbsel
)
691 struct savearea
*sv
= thread
->machine
.pcb
;
692 kern_return_t retval
= KERN_SUCCESS
;
694 switch(PerProcTable
[0].ppe_vaddr
->cpu_subtype
) {
695 case CPU_SUBTYPE_POWERPC_750
:
696 case CPU_SUBTYPE_POWERPC_7400
:
697 case CPU_SUBTYPE_POWERPC_7450
:
699 ppc32_mmcr0_reg_t mmcr0_reg
;
701 mmcr0_reg
.value
= sv
->save_mmcr0
;
707 mmcr0_reg
.field
.timebase_bit_selector
= tbsel
;
710 retval
= KERN_FAILURE
;
712 sv
->save_mmcr0
= mmcr0_reg
.value
;
715 case CPU_SUBTYPE_POWERPC_970
:
717 ppc64_mmcr0_reg_t mmcr0_reg
;
719 mmcr0_reg
.value
= sv
->save_mmcr0
;
725 mmcr0_reg
.field
.timebase_bit_selector
= tbsel
;
728 retval
= KERN_FAILURE
;
730 sv
->save_mmcr0
= mmcr0_reg
.value
;
734 retval
= KERN_FAILURE
;
738 #ifdef HWPERFMON_DEBUG
739 kprintf("perfmon_set_tbsel - tbsel=%d - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", tbsel
, sv
->save_mmcr0
, sv
->save_mmcr1
, sv
->save_mmcr2
);
745 int perfmon_control(struct savearea
*ssp
)
747 mach_port_t thr_port
= CAST_DOWN(mach_port_t
, ssp
->save_r3
);
748 int action
= (int)ssp
->save_r4
;
749 int pmc
= (int)ssp
->save_r5
;
750 int val
= (int)ssp
->save_r6
;
751 uint64_t *usr_pmcs_p
= CAST_DOWN(uint64_t *, ssp
->save_r7
);
752 thread_t thread
= THREAD_NULL
;
753 uint64_t kern_pmcs
[MAX_CPUPMC_COUNT
];
754 kern_return_t retval
= KERN_SUCCESS
;
758 thread
= (thread_t
) port_name_to_thread(thr_port
); // convert user space thread port name to a thread_t
760 ssp
->save_r3
= KERN_INVALID_ARGUMENT
;
761 return 1; /* Return and check for ASTs... */
764 if(thread
!=current_thread()) {
765 thread_suspend(thread
);
768 #ifdef HWPERFMON_DEBUG
769 // kprintf("perfmon_control: action=0x%x pmc=%d val=%d pmcs=0x%x\n", action, pmc, val, usr_pmcs_p);
772 oldlevel
= ml_set_interrupts_enabled(FALSE
);
774 /* individual actions which do not require perfmon facility to be enabled */
775 if(action
==PPC_PERFMON_DISABLE
) {
776 retval
= perfmon_disable(thread
);
778 else if(action
==PPC_PERFMON_ENABLE
) {
779 retval
= perfmon_enable(thread
);
782 else { /* individual actions which do require perfmon facility to be enabled */
783 if(!(thread
->machine
.specFlags
& perfMonitor
)) { /* perfmon not enabled */
784 #ifdef HWPERFMON_DEBUG
785 kprintf("perfmon_control: ERROR - perfmon not enabled for this thread\n");
787 retval
= KERN_NO_ACCESS
;
791 if(action
==PPC_PERFMON_SET_EVENT
) {
792 retval
= perfmon_set_event(thread
, pmc
, val
);
794 else if(action
==PPC_PERFMON_SET_THRESHOLD
) {
795 retval
= perfmon_set_threshold(thread
, val
);
797 else if(action
==PPC_PERFMON_SET_TBSEL
) {
798 retval
= perfmon_set_tbsel(thread
, val
);
800 else if(action
==PPC_PERFMON_SET_EVENT_FUNC
) {
801 retval
= perfmon_set_event_func(thread
, val
);
803 else if(action
==PPC_PERFMON_ENABLE_PMI_BRKPT
) {
805 thread
->machine
.perfmonFlags
|= PERFMONFLAG_BREAKPOINT_FOR_PMI
;
807 thread
->machine
.perfmonFlags
&= ~PERFMONFLAG_BREAKPOINT_FOR_PMI
;
809 retval
= KERN_SUCCESS
;
812 /* combinable actions */
814 if(action
& PPC_PERFMON_STOP_COUNTERS
) {
815 error
= perfmon_stop_counters(thread
);
816 if(error
!=KERN_SUCCESS
) {
821 if(action
& PPC_PERFMON_CLEAR_COUNTERS
) {
822 error
= perfmon_clear_counters(thread
);
823 if(error
!=KERN_SUCCESS
) {
828 if(action
& PPC_PERFMON_WRITE_COUNTERS
) {
829 if(error
= copyin(CAST_USER_ADDR_T(usr_pmcs_p
), (void *)kern_pmcs
, MAX_CPUPMC_COUNT
*sizeof(uint64_t))) {
833 error
= perfmon_write_counters(thread
, kern_pmcs
);
834 if(error
!=KERN_SUCCESS
) {
839 if(action
& PPC_PERFMON_READ_COUNTERS
) {
840 error
= perfmon_read_counters(thread
, kern_pmcs
);
841 if(error
!=KERN_SUCCESS
) {
845 if(error
= copyout((void *)kern_pmcs
, CAST_USER_ADDR_T(usr_pmcs_p
), MAX_CPUPMC_COUNT
*sizeof(uint64_t))) {
850 if(action
& PPC_PERFMON_START_COUNTERS
) {
851 error
= perfmon_start_counters(thread
);
852 if(error
!=KERN_SUCCESS
) {
861 ml_set_interrupts_enabled(oldlevel
);
863 #ifdef HWPERFMON_DEBUG
864 kprintf("perfmon_control (CPU%d): mmcr0 = %016llX, pmc1=%X pmc2=%X pmc3=%X pmc4=%X pmc5=%X pmc6=%X pmc7=%X pmc8=%X\n", cpu_number(), ssp
->save_mmcr0
, ssp
->save_pmc
[PMC_1
], ssp
->save_pmc
[PMC_2
], ssp
->save_pmc
[PMC_3
], ssp
->save_pmc
[PMC_4
], ssp
->save_pmc
[PMC_5
], ssp
->save_pmc
[PMC_6
], ssp
->save_pmc
[PMC_7
], ssp
->save_pmc
[PMC_8
]);
867 if(thread
!=current_thread()) {
868 thread_resume(thread
);
871 #ifdef HWPERFMON_DEBUG
872 if(retval
!=KERN_SUCCESS
) {
873 kprintf("perfmon_control - ERROR: retval=%d\n", retval
);
875 #endif /* HWPERFMON_DEBUG */
877 ssp
->save_r3
= retval
;
878 return 1; /* Return and check for ASTs... */
881 int perfmon_handle_pmi(struct savearea
*ssp
)
884 kern_return_t retval
= KERN_SUCCESS
;
885 thread_t thread
= current_thread();
887 #ifdef HWPERFMON_DEBUG
888 kprintf("perfmon_handle_pmi: got rupt\n");
891 if(!(thread
->machine
.specFlags
& perfMonitor
)) { /* perfmon not enabled */
892 #ifdef HWPERFMON_DEBUG
893 kprintf("perfmon_handle_pmi: ERROR - perfmon not enabled for this thread\n");
898 for(curPMC
=0; curPMC
<MAX_CPUPMC_COUNT
; curPMC
++) {
899 if(thread
->machine
.pcb
->save_pmc
[curPMC
] & 0x80000000) {
900 if(thread
->machine
.pmcovfl
[curPMC
]==0xFFFFFFFF && (thread
->machine
.perfmonFlags
& PERFMONFLAG_BREAKPOINT_FOR_PMI
)) {
901 doexception(EXC_BREAKPOINT
, EXC_PPC_PERFMON
, (unsigned int)ssp
->save_srr0
); // pass up a breakpoint exception
904 thread
->machine
.pmcovfl
[curPMC
]++;
905 thread
->machine
.pcb
->save_pmc
[curPMC
] = 0;
910 if(retval
==KERN_SUCCESS
) {
911 switch(PerProcTable
[0].ppe_vaddr
->cpu_subtype
) {
912 case CPU_SUBTYPE_POWERPC_7450
:
914 ppc32_mmcr0_reg_t mmcr0_reg
;
916 mmcr0_reg
.value
= thread
->machine
.pcb
->save_mmcr0
;
917 mmcr0_reg
.field
.disable_counters_always
= FALSE
;
918 mmcr0_reg
.field
.enable_pmi
= TRUE
;
919 thread
->machine
.pcb
->save_mmcr0
= mmcr0_reg
.value
;
921 retval
= KERN_SUCCESS
;
923 case CPU_SUBTYPE_POWERPC_970
:
925 ppc64_mmcr0_reg_t mmcr0_reg
;
927 mmcr0_reg
.value
= thread
->machine
.pcb
->save_mmcr0
;
928 mmcr0_reg
.field
.disable_counters_always
= FALSE
;
929 mmcr0_reg
.field
.enable_pmi
= TRUE
;
930 thread
->machine
.pcb
->save_mmcr0
= mmcr0_reg
.value
;
932 retval
= KERN_SUCCESS
;
935 retval
= KERN_FAILURE
;