2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <kern/thread.h>
30 #include <kern/ipc_tt.h>
31 #include <ppc/exception.h>
32 #include <ppc/savearea.h>
33 #include <ppc/hw_perfmon.h>
34 #include <ppc/hw_perfmon_mmcr.h>
36 #include <mach/thread_act.h>
38 decl_simple_lock_data(,hw_perfmon_lock
)
39 static task_t hw_perfmon_owner
= TASK_NULL
;
40 static int hw_perfmon_thread_count
= 0;
43 * -supervisor/user level filtering is unnecessary because of the way PMCs and MMCRs are context switched
44 * (can only count user events anyway)
45 * -marked filtering is unnecssary because each thread has its own virtualized set of PMCs and MMCRs
46 * -virtual counter PMI is passed up as a breakpoint exception
49 int perfmon_init(void)
51 simple_lock_init(&hw_perfmon_lock
, FALSE
);
55 /* PMC Facility Owner:
56 * TASK_NULL - no one owns it
57 * kernel_task - owned by hw_perfmon
58 * other task - owned by another task
61 int perfmon_acquire_facility(task_t task
)
63 kern_return_t retval
= KERN_SUCCESS
;
65 simple_lock(&hw_perfmon_lock
);
67 if(hw_perfmon_owner
==task
) {
68 #ifdef HWPERFMON_DEBUG
69 kprintf("perfmon_acquire_facility - ACQUIRED: already owner\n");
71 retval
= KERN_SUCCESS
;
73 } else if(hw_perfmon_owner
==TASK_NULL
) { /* no one owns it */
74 hw_perfmon_owner
= task
;
75 hw_perfmon_thread_count
= 0;
76 #ifdef HWPERFMON_DEBUG
77 kprintf("perfmon_acquire_facility - ACQUIRED: no current owner - made new owner\n");
79 retval
= KERN_SUCCESS
;
80 } else { /* someone already owns it */
81 if(hw_perfmon_owner
==kernel_task
) {
82 if(hw_perfmon_thread_count
==0) { /* kernel owns it but no threads using it */
83 hw_perfmon_owner
= task
;
84 hw_perfmon_thread_count
= 0;
85 #ifdef HWPERFMON_DEBUG
86 kprintf("perfmon_acquire_facility - ACQUIRED: kernel is current owner but no threads using it\n");
88 retval
= KERN_SUCCESS
;
90 #ifdef HWPERFMON_DEBUG
91 kprintf("perfmon_acquire_facility - DENIED: kernel is current owner and facility in use\n");
93 retval
= KERN_RESOURCE_SHORTAGE
;
95 } else { /* non-kernel owner */
96 #ifdef HWPERFMON_DEBUG
97 kprintf("perfmon_acquire_facility - DENIED: another active task owns the facility\n");
99 retval
= KERN_RESOURCE_SHORTAGE
;
103 simple_unlock(&hw_perfmon_lock
);
107 int perfmon_release_facility(task_t task
)
109 kern_return_t retval
= KERN_SUCCESS
;
110 task_t old_perfmon_owner
= hw_perfmon_owner
;
112 simple_lock(&hw_perfmon_lock
);
114 if(task
!=hw_perfmon_owner
) {
115 retval
= KERN_NO_ACCESS
;
117 if(old_perfmon_owner
==kernel_task
) {
118 if(hw_perfmon_thread_count
>0) {
119 #ifdef HWPERFMON_DEBUG
120 kprintf("perfmon_release_facility - NOT RELEASED: kernel task is owner and has active perfmon threads\n");
122 retval
= KERN_NO_ACCESS
;
124 #ifdef HWPERFMON_DEBUG
125 kprintf("perfmon_release_facility - RELEASED: kernel task was owner\n");
127 hw_perfmon_owner
= TASK_NULL
;
128 retval
= KERN_SUCCESS
;
131 #ifdef HWPERFMON_DEBUG
132 kprintf("perfmon_release_facility - RELEASED: user task was owner\n");
134 hw_perfmon_owner
= TASK_NULL
;
135 retval
= KERN_SUCCESS
;
139 simple_unlock(&hw_perfmon_lock
);
144 perfmon_enable(thread_t thread
)
146 struct savearea
*sv
= thread
->machine
.pcb
;
147 kern_return_t retval
= KERN_SUCCESS
;
150 if(thread
->machine
.specFlags
& perfMonitor
) {
151 return KERN_SUCCESS
; /* already enabled */
152 } else if(perfmon_acquire_facility(kernel_task
)!=KERN_SUCCESS
) {
153 return KERN_RESOURCE_SHORTAGE
; /* facility is in use */
154 } else { /* kernel_task owns the faciltity and this thread has not yet been counted */
155 simple_lock(&hw_perfmon_lock
);
156 hw_perfmon_thread_count
++;
157 simple_unlock(&hw_perfmon_lock
);
163 switch(PerProcTable
[0].ppe_vaddr
->cpu_subtype
) {
164 case CPU_SUBTYPE_POWERPC_750
:
165 case CPU_SUBTYPE_POWERPC_7400
:
166 case CPU_SUBTYPE_POWERPC_7450
:
168 ppc32_mmcr0_reg_t mmcr0_reg
;
171 mmcr0_reg
.field
.disable_counters_always
= TRUE
;
172 mmcr0_reg
.field
.disable_counters_supervisor
= TRUE
; /* no choice */
173 sv
->save_mmcr0
= mmcr0_reg
.value
;
176 case CPU_SUBTYPE_POWERPC_970
:
178 ppc64_mmcr0_reg_t mmcr0_reg
;
181 mmcr0_reg
.field
.disable_counters_always
= TRUE
;
182 mmcr0_reg
.field
.disable_counters_supervisor
= TRUE
; /* no choice */
183 sv
->save_mmcr0
= mmcr0_reg
.value
;
187 retval
= KERN_FAILURE
;
191 if(retval
==KERN_SUCCESS
) {
192 for(curPMC
=0; curPMC
<MAX_CPUPMC_COUNT
; curPMC
++) {
193 sv
->save_pmc
[curPMC
] = 0;
194 thread
->machine
.pmcovfl
[curPMC
] = 0;
196 thread
->machine
.perfmonFlags
= 0;
197 thread
->machine
.specFlags
|= perfMonitor
; /* enable perf monitor facility for this thread */
198 if(thread
==current_thread()) {
199 getPerProc()->spcFlags
|= perfMonitor
; /* update per_proc */
203 #ifdef HWPERFMON_DEBUG
204 kprintf("perfmon_enable - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", sv
->save_mmcr0
, sv
->save_mmcr1
, sv
->save_mmcr2
);
210 int perfmon_disable(thread_t thread
)
212 struct savearea
*sv
= thread
->machine
.pcb
;
215 if(!(thread
->machine
.specFlags
& perfMonitor
)) {
216 return KERN_NO_ACCESS
; /* not enabled */
218 simple_lock(&hw_perfmon_lock
);
219 hw_perfmon_thread_count
--;
220 simple_unlock(&hw_perfmon_lock
);
221 perfmon_release_facility(kernel_task
); /* will release if hw_perfmon_thread_count is 0 */
224 thread
->machine
.specFlags
&= ~perfMonitor
; /* disable perf monitor facility for this thread */
225 if(thread
==current_thread()) {
226 PerProcTable
[cpu_number()].ppe_vaddr
->spcFlags
&= ~perfMonitor
; /* update per_proc */
232 for(curPMC
=0; curPMC
<MAX_CPUPMC_COUNT
; curPMC
++) {
233 sv
->save_pmc
[curPMC
] = 0;
234 thread
->machine
.pmcovfl
[curPMC
] = 0;
235 thread
->machine
.perfmonFlags
= 0;
238 #ifdef HWPERFMON_DEBUG
239 kprintf("perfmon_disable - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", sv
->save_mmcr0
, sv
->save_mmcr1
, sv
->save_mmcr2
);
246 perfmon_clear_counters(thread_t thread
)
248 struct savearea
*sv
= thread
->machine
.pcb
;
251 #ifdef HWPERFMON_DEBUG
252 kprintf("perfmon_clear_counters (CPU%d)\n", cpu_number());
255 /* clear thread copy */
256 for(curPMC
=0; curPMC
<MAX_CPUPMC_COUNT
; curPMC
++) {
257 sv
->save_pmc
[curPMC
] = 0;
258 thread
->machine
.pmcovfl
[curPMC
] = 0;
265 perfmon_write_counters(thread_t thread
, uint64_t *pmcs
)
267 struct savearea
*sv
= thread
->machine
.pcb
;
270 #ifdef HWPERFMON_DEBUG
271 kprintf("perfmon_write_counters (CPU%d): mmcr0 = %016llX, pmc1=%llX pmc2=%llX pmc3=%llX pmc4=%llX pmc5=%llX pmc6=%llX pmc7=%llX pmc8=%llX\n", cpu_number(), sv
->save_mmcr0
, pmcs
[PMC_1
], pmcs
[PMC_2
], pmcs
[PMC_3
], pmcs
[PMC_4
], pmcs
[PMC_5
], pmcs
[PMC_6
], pmcs
[PMC_7
], pmcs
[PMC_8
]);
274 /* update thread copy */
275 for(curPMC
=0; curPMC
<MAX_CPUPMC_COUNT
; curPMC
++) {
276 sv
->save_pmc
[curPMC
] = pmcs
[curPMC
] & 0x7FFFFFFF;
277 thread
->machine
.pmcovfl
[curPMC
] = (pmcs
[curPMC
]>>31) & 0xFFFFFFFF;
284 perfmon_read_counters(thread_t thread
, uint64_t *pmcs
)
286 struct savearea
*sv
= thread
->machine
.pcb
;
289 /* retrieve from thread copy */
290 for(curPMC
=0; curPMC
<MAX_CPUPMC_COUNT
; curPMC
++) {
291 pmcs
[curPMC
] = thread
->machine
.pmcovfl
[curPMC
];
292 pmcs
[curPMC
] = pmcs
[curPMC
]<<31;
293 pmcs
[curPMC
] |= (sv
->save_pmc
[curPMC
] & 0x7FFFFFFF);
296 /* zero any unused counters on this platform */
297 switch(PerProcTable
[0].ppe_vaddr
->cpu_subtype
) {
298 case CPU_SUBTYPE_POWERPC_750
:
299 case CPU_SUBTYPE_POWERPC_7400
:
300 case CPU_SUBTYPE_POWERPC_7450
:
308 #ifdef HWPERFMON_DEBUG
309 kprintf("perfmon_read_counters (CPU%d): mmcr0 = %016llX pmc1=%llX pmc2=%llX pmc3=%llX pmc4=%llX pmc5=%llX pmc6=%llX pmc7=%llX pmc8=%llX\n", cpu_number(), sv
->save_mmcr0
, pmcs
[PMC_1
], pmcs
[PMC_2
], pmcs
[PMC_3
], pmcs
[PMC_4
], pmcs
[PMC_5
], pmcs
[PMC_6
], pmcs
[PMC_7
], pmcs
[PMC_8
]);
316 perfmon_start_counters(thread_t thread
)
318 struct savearea
*sv
= thread
->machine
.pcb
;
319 kern_return_t retval
= KERN_SUCCESS
;
321 switch(PerProcTable
[0].ppe_vaddr
->cpu_subtype
) {
322 case CPU_SUBTYPE_POWERPC_750
:
323 case CPU_SUBTYPE_POWERPC_7400
:
325 ppc32_mmcr0_reg_t mmcr0_reg
;
326 mmcr0_reg
.value
= sv
->save_mmcr0
;
327 mmcr0_reg
.field
.disable_counters_always
= FALSE
;
328 /* XXXXX PMI broken on 750, 750CX, 750FX, 7400 and 7410 v1.2 and earlier XXXXX */
329 mmcr0_reg
.field
.on_pmi_stop_counting
= FALSE
;
330 mmcr0_reg
.field
.enable_pmi
= FALSE
;
331 mmcr0_reg
.field
.enable_pmi_on_pmc1
= FALSE
;
332 mmcr0_reg
.field
.enable_pmi_on_pmcn
= FALSE
;
333 sv
->save_mmcr0
= mmcr0_reg
.value
;
336 case CPU_SUBTYPE_POWERPC_7450
:
338 ppc32_mmcr0_reg_t mmcr0_reg
;
339 mmcr0_reg
.value
= sv
->save_mmcr0
;
340 mmcr0_reg
.field
.disable_counters_always
= FALSE
;
341 mmcr0_reg
.field
.on_pmi_stop_counting
= TRUE
;
342 mmcr0_reg
.field
.enable_pmi
= TRUE
;
343 mmcr0_reg
.field
.enable_pmi_on_pmc1
= TRUE
;
344 mmcr0_reg
.field
.enable_pmi_on_pmcn
= TRUE
;
345 sv
->save_mmcr0
= mmcr0_reg
.value
;
348 case CPU_SUBTYPE_POWERPC_970
:
350 ppc64_mmcr0_reg_t mmcr0_reg
;
351 mmcr0_reg
.value
= sv
->save_mmcr0
;
352 mmcr0_reg
.field
.disable_counters_always
= FALSE
;
353 mmcr0_reg
.field
.on_pmi_stop_counting
= TRUE
;
354 mmcr0_reg
.field
.enable_pmi
= TRUE
;
355 mmcr0_reg
.field
.enable_pmi_on_pmc1
= TRUE
;
356 mmcr0_reg
.field
.enable_pmi_on_pmcn
= TRUE
;
357 sv
->save_mmcr0
= mmcr0_reg
.value
;
361 retval
= KERN_FAILURE
;
365 #ifdef HWPERFMON_DEBUG
366 kprintf("perfmon_start_counters (CPU%d) - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", cpu_number(), sv
->save_mmcr0
, sv
->save_mmcr1
, sv
->save_mmcr2
);
373 perfmon_stop_counters(thread_t thread
)
375 struct savearea
*sv
= thread
->machine
.pcb
;
376 kern_return_t retval
= KERN_SUCCESS
;
378 switch(PerProcTable
[0].ppe_vaddr
->cpu_subtype
) {
379 case CPU_SUBTYPE_POWERPC_750
:
380 case CPU_SUBTYPE_POWERPC_7400
:
381 case CPU_SUBTYPE_POWERPC_7450
:
383 ppc32_mmcr0_reg_t mmcr0_reg
;
384 mmcr0_reg
.value
= sv
->save_mmcr0
;
385 mmcr0_reg
.field
.disable_counters_always
= TRUE
;
386 sv
->save_mmcr0
= mmcr0_reg
.value
;
389 case CPU_SUBTYPE_POWERPC_970
:
391 ppc64_mmcr0_reg_t mmcr0_reg
;
392 mmcr0_reg
.value
= sv
->save_mmcr0
;
393 mmcr0_reg
.field
.disable_counters_always
= TRUE
;
394 sv
->save_mmcr0
= mmcr0_reg
.value
;
398 retval
= KERN_FAILURE
;
402 #ifdef HWPERFMON_DEBUG
403 kprintf("perfmon_stop_counters (CPU%d) - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", cpu_number(), sv
->save_mmcr0
, sv
->save_mmcr1
, sv
->save_mmcr2
);
410 perfmon_set_event(thread_t thread
, int pmc
, int event
)
412 struct savearea
*sv
= thread
->machine
.pcb
;
413 kern_return_t retval
= KERN_SUCCESS
;
415 #ifdef HWPERFMON_DEBUG
416 kprintf("perfmon_set_event b4 (CPU%d) - pmc=%d, event=%d - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", cpu_number(), pmc
, event
, sv
->save_mmcr0
, sv
->save_mmcr1
, sv
->save_mmcr2
);
419 switch(PerProcTable
[0].ppe_vaddr
->cpu_subtype
) {
420 case CPU_SUBTYPE_POWERPC_750
:
421 case CPU_SUBTYPE_POWERPC_7400
:
423 ppc32_mmcr0_reg_t mmcr0_reg
;
424 ppc32_mmcr1_reg_t mmcr1_reg
;
426 mmcr0_reg
.value
= sv
->save_mmcr0
;
427 mmcr1_reg
.value
= sv
->save_mmcr1
;
431 mmcr0_reg
.field
.pmc1_event
= event
;
432 sv
->save_mmcr0
= mmcr0_reg
.value
;
435 mmcr0_reg
.field
.pmc2_event
= event
;
436 sv
->save_mmcr0
= mmcr0_reg
.value
;
439 mmcr1_reg
.field
.pmc3_event
= event
;
440 sv
->save_mmcr1
= mmcr1_reg
.value
;
443 mmcr1_reg
.field
.pmc4_event
= event
;
444 sv
->save_mmcr1
= mmcr1_reg
.value
;
447 retval
= KERN_FAILURE
;
452 case CPU_SUBTYPE_POWERPC_7450
:
454 ppc32_mmcr0_reg_t mmcr0_reg
;
455 ppc32_mmcr1_reg_t mmcr1_reg
;
457 mmcr0_reg
.value
= sv
->save_mmcr0
;
458 mmcr1_reg
.value
= sv
->save_mmcr1
;
462 mmcr0_reg
.field
.pmc1_event
= event
;
463 sv
->save_mmcr0
= mmcr0_reg
.value
;
466 mmcr0_reg
.field
.pmc2_event
= event
;
467 sv
->save_mmcr0
= mmcr0_reg
.value
;
470 mmcr1_reg
.field
.pmc3_event
= event
;
471 sv
->save_mmcr1
= mmcr1_reg
.value
;
474 mmcr1_reg
.field
.pmc4_event
= event
;
475 sv
->save_mmcr1
= mmcr1_reg
.value
;
478 mmcr1_reg
.field
.pmc5_event
= event
;
479 sv
->save_mmcr1
= mmcr1_reg
.value
;
482 mmcr1_reg
.field
.pmc6_event
= event
;
483 sv
->save_mmcr1
= mmcr1_reg
.value
;
486 retval
= KERN_FAILURE
;
491 case CPU_SUBTYPE_POWERPC_970
:
493 ppc64_mmcr0_reg_t mmcr0_reg
;
494 ppc64_mmcr1_reg_t mmcr1_reg
;
496 mmcr0_reg
.value
= sv
->save_mmcr0
;
497 mmcr1_reg
.value
= sv
->save_mmcr1
;
501 mmcr0_reg
.field
.pmc1_event
= event
;
502 sv
->save_mmcr0
= mmcr0_reg
.value
;
505 mmcr0_reg
.field
.pmc2_event
= event
;
506 sv
->save_mmcr0
= mmcr0_reg
.value
;
509 mmcr1_reg
.field
.pmc3_event
= event
;
510 sv
->save_mmcr1
= mmcr1_reg
.value
;
513 mmcr1_reg
.field
.pmc4_event
= event
;
514 sv
->save_mmcr1
= mmcr1_reg
.value
;
517 mmcr1_reg
.field
.pmc5_event
= event
;
518 sv
->save_mmcr1
= mmcr1_reg
.value
;
521 mmcr1_reg
.field
.pmc6_event
= event
;
522 sv
->save_mmcr1
= mmcr1_reg
.value
;
525 mmcr1_reg
.field
.pmc7_event
= event
;
526 sv
->save_mmcr1
= mmcr1_reg
.value
;
529 mmcr1_reg
.field
.pmc8_event
= event
;
530 sv
->save_mmcr1
= mmcr1_reg
.value
;
533 retval
= KERN_FAILURE
;
539 retval
= KERN_FAILURE
;
543 #ifdef HWPERFMON_DEBUG
544 kprintf("perfmon_set_event (CPU%d) - pmc=%d, event=%d - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", cpu_number(), pmc
, event
, sv
->save_mmcr0
, sv
->save_mmcr1
, sv
->save_mmcr2
);
551 perfmon_set_event_func(thread_t thread
, uint32_t f
)
553 struct savearea
*sv
= thread
->machine
.pcb
;
554 kern_return_t retval
= KERN_SUCCESS
;
556 #ifdef HWPERFMON_DEBUG
557 kprintf("perfmon_set_event_func - func=%s\n",
558 f
==PPC_PERFMON_FUNC_FPU
? "FUNC" :
559 f
==PPC_PERFMON_FUNC_ISU
? "ISU" :
560 f
==PPC_PERFMON_FUNC_IFU
? "IFU" :
561 f
==PPC_PERFMON_FUNC_VMX
? "VMX" :
562 f
==PPC_PERFMON_FUNC_IDU
? "IDU" :
563 f
==PPC_PERFMON_FUNC_GPS
? "GPS" :
564 f
==PPC_PERFMON_FUNC_LSU0
? "LSU0" :
565 f
==PPC_PERFMON_FUNC_LSU1A
? "LSU1A" :
566 f
==PPC_PERFMON_FUNC_LSU1B
? "LSU1B" :
567 f
==PPC_PERFMON_FUNC_SPECA
? "SPECA" :
568 f
==PPC_PERFMON_FUNC_SPECB
? "SPECB" :
569 f
==PPC_PERFMON_FUNC_SPECC
? "SPECC" :
571 #endif /* HWPERFMON_DEBUG */
573 switch(PerProcTable
[0].ppe_vaddr
->cpu_subtype
) {
574 case CPU_SUBTYPE_POWERPC_750
:
575 case CPU_SUBTYPE_POWERPC_7400
:
576 case CPU_SUBTYPE_POWERPC_7450
:
577 retval
= KERN_FAILURE
; /* event functional unit only applies to 970 */
579 case CPU_SUBTYPE_POWERPC_970
:
581 ppc64_mmcr1_reg_t mmcr1_reg
;
582 ppc_func_unit_t func_unit
;
585 mmcr1_reg
.value
= sv
->save_mmcr1
;
587 mmcr1_reg
.field
.ttm0_select
= func_unit
.field
.TTM0SEL
;
588 mmcr1_reg
.field
.ttm1_select
= func_unit
.field
.TTM1SEL
;
589 mmcr1_reg
.field
.ttm2_select
= 0; /* not used */
590 mmcr1_reg
.field
.ttm3_select
= func_unit
.field
.TTM3SEL
;
591 mmcr1_reg
.field
.speculative_event
= func_unit
.field
.SPECSEL
;
592 mmcr1_reg
.field
.lane0_select
= func_unit
.field
.TD_CP_DBGxSEL
;
593 mmcr1_reg
.field
.lane1_select
= func_unit
.field
.TD_CP_DBGxSEL
;
594 mmcr1_reg
.field
.lane2_select
= func_unit
.field
.TD_CP_DBGxSEL
;
595 mmcr1_reg
.field
.lane3_select
= func_unit
.field
.TD_CP_DBGxSEL
;
597 sv
->save_mmcr1
= mmcr1_reg
.value
;
601 retval
= KERN_FAILURE
;
609 perfmon_set_threshold(thread_t thread
, int threshold
)
611 struct savearea
*sv
= thread
->machine
.pcb
;
612 kern_return_t retval
= KERN_SUCCESS
;
614 switch(PerProcTable
[0].ppe_vaddr
->cpu_subtype
) {
615 case CPU_SUBTYPE_POWERPC_750
:
617 ppc32_mmcr0_reg_t mmcr0_reg
;
619 mmcr0_reg
.value
= sv
->save_mmcr0
;
621 if(threshold
>63) { /* no multiplier on 750 */
622 int newThreshold
= 63;
623 #ifdef HWPERFMON_DEBUG
624 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) exceeds max threshold value - clamping to %d\n", threshold
, newThreshold
);
626 threshold
= newThreshold
;
628 mmcr0_reg
.field
.threshold_value
= threshold
;
630 sv
->save_mmcr0
= mmcr0_reg
.value
;
634 case CPU_SUBTYPE_POWERPC_7400
:
635 case CPU_SUBTYPE_POWERPC_7450
:
637 ppc32_mmcr0_reg_t mmcr0_reg
;
638 ppc32_mmcr2_reg_t mmcr2_reg
;
640 mmcr0_reg
.value
= sv
->save_mmcr0
;
641 mmcr2_reg
.value
= sv
->save_mmcr2
;
643 if(threshold
<=(2*63)) { /* 2x multiplier */
644 if(threshold%2
!= 0) {
645 int newThreshold
= 2*(threshold
/2);
646 #ifdef HWPERFMON_DEBUG
647 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) is not evenly divisible by 2x multiplier - using threshold of %d instead\n", threshold
, newThreshold
);
649 threshold
= newThreshold
;
651 mmcr2_reg
.field
.threshold_multiplier
= 0;
652 } else if(threshold
<=(32*63)) { /* 32x multiplier */
653 if(threshold%32
!= 0) {
654 int newThreshold
= 32*(threshold
/32);
655 #ifdef HWPERFMON_DEBUG
656 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) is not evenly divisible by 32x multiplier - using threshold of %d instead\n", threshold
, newThreshold
);
658 threshold
= newThreshold
;
660 mmcr2_reg
.field
.threshold_multiplier
= 1;
662 int newThreshold
= 32*63;
663 #ifdef HWPERFMON_DEBUG
664 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) exceeds max threshold value - clamping to %d\n", threshold
, newThreshold
);
666 threshold
= newThreshold
;
667 mmcr2_reg
.field
.threshold_multiplier
= 1;
669 mmcr0_reg
.field
.threshold_value
= threshold
;
671 sv
->save_mmcr0
= mmcr0_reg
.value
;
672 sv
->save_mmcr2
= mmcr2_reg
.value
;
676 case CPU_SUBTYPE_POWERPC_970
:
678 ppc64_mmcr0_reg_t mmcr0_reg
;
680 mmcr0_reg
.value
= sv
->save_mmcr0
;
682 if(threshold
>63) { /* multiplier is in HID1 on 970 - not context switching HID1 so always 1x */
683 int newThreshold
= 63;
684 #ifdef HWPERFMON_DEBUG
685 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) exceeds max threshold value - clamping to %d\n", threshold
, newThreshold
);
687 threshold
= newThreshold
;
689 mmcr0_reg
.field
.threshold_value
= threshold
;
691 sv
->save_mmcr0
= mmcr0_reg
.value
;
695 retval
= KERN_FAILURE
;
699 #ifdef HWPERFMON_DEBUG
700 kprintf("perfmon_set_threshold - threshold=%d - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", threshold
, sv
->save_mmcr0
, sv
->save_mmcr1
, sv
->save_mmcr2
);
707 perfmon_set_tbsel(thread_t thread
, int tbsel
)
709 struct savearea
*sv
= thread
->machine
.pcb
;
710 kern_return_t retval
= KERN_SUCCESS
;
712 switch(PerProcTable
[0].ppe_vaddr
->cpu_subtype
) {
713 case CPU_SUBTYPE_POWERPC_750
:
714 case CPU_SUBTYPE_POWERPC_7400
:
715 case CPU_SUBTYPE_POWERPC_7450
:
717 ppc32_mmcr0_reg_t mmcr0_reg
;
719 mmcr0_reg
.value
= sv
->save_mmcr0
;
725 mmcr0_reg
.field
.timebase_bit_selector
= tbsel
;
728 retval
= KERN_FAILURE
;
730 sv
->save_mmcr0
= mmcr0_reg
.value
;
733 case CPU_SUBTYPE_POWERPC_970
:
735 ppc64_mmcr0_reg_t mmcr0_reg
;
737 mmcr0_reg
.value
= sv
->save_mmcr0
;
743 mmcr0_reg
.field
.timebase_bit_selector
= tbsel
;
746 retval
= KERN_FAILURE
;
748 sv
->save_mmcr0
= mmcr0_reg
.value
;
752 retval
= KERN_FAILURE
;
756 #ifdef HWPERFMON_DEBUG
757 kprintf("perfmon_set_tbsel - tbsel=%d - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", tbsel
, sv
->save_mmcr0
, sv
->save_mmcr1
, sv
->save_mmcr2
);
763 int perfmon_control(struct savearea
*ssp
)
765 mach_port_name_t thr_port
= CAST_DOWN(mach_port_name_t
, ssp
->save_r3
);
766 int action
= (int)ssp
->save_r4
;
767 int pmc
= (int)ssp
->save_r5
;
768 int val
= (int)ssp
->save_r6
;
769 uint64_t *usr_pmcs_p
= CAST_DOWN(uint64_t *, ssp
->save_r7
);
770 thread_t thread
= THREAD_NULL
;
771 uint64_t kern_pmcs
[MAX_CPUPMC_COUNT
];
772 kern_return_t retval
= KERN_SUCCESS
;
776 thread
= port_name_to_thread(thr_port
); // convert user space thread port name to a thread_t
778 ssp
->save_r3
= KERN_INVALID_ARGUMENT
;
779 return 1; /* Return and check for ASTs... */
782 if(thread
!=current_thread()) {
783 thread_suspend(thread
);
786 #ifdef HWPERFMON_DEBUG
787 // kprintf("perfmon_control: action=0x%x pmc=%d val=%d pmcs=0x%x\n", action, pmc, val, usr_pmcs_p);
790 oldlevel
= ml_set_interrupts_enabled(FALSE
);
792 /* individual actions which do not require perfmon facility to be enabled */
793 if(action
==PPC_PERFMON_DISABLE
) {
794 retval
= perfmon_disable(thread
);
796 else if(action
==PPC_PERFMON_ENABLE
) {
797 retval
= perfmon_enable(thread
);
800 else { /* individual actions which do require perfmon facility to be enabled */
801 if(!(thread
->machine
.specFlags
& perfMonitor
)) { /* perfmon not enabled */
802 #ifdef HWPERFMON_DEBUG
803 kprintf("perfmon_control: ERROR - perfmon not enabled for this thread\n");
805 retval
= KERN_NO_ACCESS
;
809 if(action
==PPC_PERFMON_SET_EVENT
) {
810 retval
= perfmon_set_event(thread
, pmc
, val
);
812 else if(action
==PPC_PERFMON_SET_THRESHOLD
) {
813 retval
= perfmon_set_threshold(thread
, val
);
815 else if(action
==PPC_PERFMON_SET_TBSEL
) {
816 retval
= perfmon_set_tbsel(thread
, val
);
818 else if(action
==PPC_PERFMON_SET_EVENT_FUNC
) {
819 retval
= perfmon_set_event_func(thread
, val
);
821 else if(action
==PPC_PERFMON_ENABLE_PMI_BRKPT
) {
823 thread
->machine
.perfmonFlags
|= PERFMONFLAG_BREAKPOINT_FOR_PMI
;
825 thread
->machine
.perfmonFlags
&= ~PERFMONFLAG_BREAKPOINT_FOR_PMI
;
827 retval
= KERN_SUCCESS
;
830 /* combinable actions */
832 if(action
& PPC_PERFMON_STOP_COUNTERS
) {
833 error
= perfmon_stop_counters(thread
);
834 if(error
!=KERN_SUCCESS
) {
839 if(action
& PPC_PERFMON_CLEAR_COUNTERS
) {
840 error
= perfmon_clear_counters(thread
);
841 if(error
!=KERN_SUCCESS
) {
846 if(action
& PPC_PERFMON_WRITE_COUNTERS
) {
847 if((error
= copyin(CAST_USER_ADDR_T(usr_pmcs_p
), (void *)kern_pmcs
, MAX_CPUPMC_COUNT
*sizeof(uint64_t)))) {
851 error
= perfmon_write_counters(thread
, kern_pmcs
);
852 if(error
!=KERN_SUCCESS
) {
857 if(action
& PPC_PERFMON_READ_COUNTERS
) {
858 error
= perfmon_read_counters(thread
, kern_pmcs
);
859 if(error
!=KERN_SUCCESS
) {
863 if((error
= copyout((void *)kern_pmcs
, CAST_USER_ADDR_T(usr_pmcs_p
), MAX_CPUPMC_COUNT
*sizeof(uint64_t)))) {
868 if(action
& PPC_PERFMON_START_COUNTERS
) {
869 error
= perfmon_start_counters(thread
);
870 if(error
!=KERN_SUCCESS
) {
879 ml_set_interrupts_enabled(oldlevel
);
881 #ifdef HWPERFMON_DEBUG
882 kprintf("perfmon_control (CPU%d): mmcr0 = %016llX, pmc1=%X pmc2=%X pmc3=%X pmc4=%X pmc5=%X pmc6=%X pmc7=%X pmc8=%X\n", cpu_number(), ssp
->save_mmcr0
, ssp
->save_pmc
[PMC_1
], ssp
->save_pmc
[PMC_2
], ssp
->save_pmc
[PMC_3
], ssp
->save_pmc
[PMC_4
], ssp
->save_pmc
[PMC_5
], ssp
->save_pmc
[PMC_6
], ssp
->save_pmc
[PMC_7
], ssp
->save_pmc
[PMC_8
]);
885 if(thread
!=current_thread()) {
886 thread_resume(thread
);
889 #ifdef HWPERFMON_DEBUG
890 if(retval
!=KERN_SUCCESS
) {
891 kprintf("perfmon_control - ERROR: retval=%d\n", retval
);
893 #endif /* HWPERFMON_DEBUG */
895 ssp
->save_r3
= retval
;
896 return 1; /* Return and check for ASTs... */
899 int perfmon_handle_pmi(struct savearea
*ssp
)
902 kern_return_t retval
= KERN_SUCCESS
;
903 thread_t thread
= current_thread();
905 #ifdef HWPERFMON_DEBUG
906 kprintf("perfmon_handle_pmi: got rupt\n");
909 if(!(thread
->machine
.specFlags
& perfMonitor
)) { /* perfmon not enabled */
910 #ifdef HWPERFMON_DEBUG
911 kprintf("perfmon_handle_pmi: ERROR - perfmon not enabled for this thread\n");
916 for(curPMC
=0; curPMC
<MAX_CPUPMC_COUNT
; curPMC
++) {
917 if(thread
->machine
.pcb
->save_pmc
[curPMC
] & 0x80000000) {
918 if(thread
->machine
.pmcovfl
[curPMC
]==0xFFFFFFFF && (thread
->machine
.perfmonFlags
& PERFMONFLAG_BREAKPOINT_FOR_PMI
)) {
919 doexception(EXC_BREAKPOINT
, EXC_PPC_PERFMON
, (unsigned int)ssp
->save_srr0
); // pass up a breakpoint exception
922 thread
->machine
.pmcovfl
[curPMC
]++;
923 thread
->machine
.pcb
->save_pmc
[curPMC
] = 0;
928 if(retval
==KERN_SUCCESS
) {
929 switch(PerProcTable
[0].ppe_vaddr
->cpu_subtype
) {
930 case CPU_SUBTYPE_POWERPC_7450
:
932 ppc32_mmcr0_reg_t mmcr0_reg
;
934 mmcr0_reg
.value
= thread
->machine
.pcb
->save_mmcr0
;
935 mmcr0_reg
.field
.disable_counters_always
= FALSE
;
936 mmcr0_reg
.field
.enable_pmi
= TRUE
;
937 thread
->machine
.pcb
->save_mmcr0
= mmcr0_reg
.value
;
939 retval
= KERN_SUCCESS
;
941 case CPU_SUBTYPE_POWERPC_970
:
943 ppc64_mmcr0_reg_t mmcr0_reg
;
945 mmcr0_reg
.value
= thread
->machine
.pcb
->save_mmcr0
;
946 mmcr0_reg
.field
.disable_counters_always
= FALSE
;
947 mmcr0_reg
.field
.enable_pmi
= TRUE
;
948 thread
->machine
.pcb
->save_mmcr0
= mmcr0_reg
.value
;
950 retval
= KERN_SUCCESS
;
953 retval
= KERN_FAILURE
;