]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/hw_perfmon.c
492592f06562a03450694cbd10976c7fd866e5f2
[apple/xnu.git] / osfmk / ppc / hw_perfmon.c
1 /*
2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/thread.h>
30 #include <ppc/exception.h>
31 #include <ppc/savearea.h>
32 #include <ppc/hw_perfmon.h>
33 #include <ppc/hw_perfmon_mmcr.h>
34
35 decl_simple_lock_data(,hw_perfmon_lock)
36 static task_t hw_perfmon_owner = TASK_NULL;
37 static int hw_perfmon_thread_count = 0;
38
39 /* Notes:
40 * -supervisor/user level filtering is unnecessary because of the way PMCs and MMCRs are context switched
41 * (can only count user events anyway)
42 * -marked filtering is unnecssary because each thread has its own virtualized set of PMCs and MMCRs
43 * -virtual counter PMI is passed up as a breakpoint exception
44 */
45
46 int perfmon_init(void)
47 {
48 simple_lock_init(&hw_perfmon_lock, FALSE);
49 return KERN_SUCCESS;
50 }
51
52 /* PMC Facility Owner:
53 * TASK_NULL - no one owns it
54 * kernel_task - owned by hw_perfmon
55 * other task - owned by another task
56 */
57
58 int perfmon_acquire_facility(task_t task)
59 {
60 kern_return_t retval = KERN_SUCCESS;
61
62 simple_lock(&hw_perfmon_lock);
63
64 if(hw_perfmon_owner==task) {
65 #ifdef HWPERFMON_DEBUG
66 kprintf("perfmon_acquire_facility - ACQUIRED: already owner\n");
67 #endif
68 retval = KERN_SUCCESS;
69 /* already own it */
70 } else if(hw_perfmon_owner==TASK_NULL) { /* no one owns it */
71 hw_perfmon_owner = task;
72 hw_perfmon_thread_count = 0;
73 #ifdef HWPERFMON_DEBUG
74 kprintf("perfmon_acquire_facility - ACQUIRED: no current owner - made new owner\n");
75 #endif
76 retval = KERN_SUCCESS;
77 } else { /* someone already owns it */
78 if(hw_perfmon_owner==kernel_task) {
79 if(hw_perfmon_thread_count==0) { /* kernel owns it but no threads using it */
80 hw_perfmon_owner = task;
81 hw_perfmon_thread_count = 0;
82 #ifdef HWPERFMON_DEBUG
83 kprintf("perfmon_acquire_facility - ACQUIRED: kernel is current owner but no threads using it\n");
84 #endif
85 retval = KERN_SUCCESS;
86 } else {
87 #ifdef HWPERFMON_DEBUG
88 kprintf("perfmon_acquire_facility - DENIED: kernel is current owner and facility in use\n");
89 #endif
90 retval = KERN_RESOURCE_SHORTAGE;
91 }
92 } else { /* non-kernel owner */
93 #ifdef HWPERFMON_DEBUG
94 kprintf("perfmon_acquire_facility - DENIED: another active task owns the facility\n");
95 #endif
96 retval = KERN_RESOURCE_SHORTAGE;
97 }
98 }
99
100 simple_unlock(&hw_perfmon_lock);
101 return retval;
102 }
103
104 int perfmon_release_facility(task_t task)
105 {
106 kern_return_t retval = KERN_SUCCESS;
107 task_t old_perfmon_owner = hw_perfmon_owner;
108
109 simple_lock(&hw_perfmon_lock);
110
111 if(task!=hw_perfmon_owner) {
112 retval = KERN_NO_ACCESS;
113 } else {
114 if(old_perfmon_owner==kernel_task) {
115 if(hw_perfmon_thread_count>0) {
116 #ifdef HWPERFMON_DEBUG
117 kprintf("perfmon_release_facility - NOT RELEASED: kernel task is owner and has active perfmon threads\n");
118 #endif
119 retval = KERN_NO_ACCESS;
120 } else {
121 #ifdef HWPERFMON_DEBUG
122 kprintf("perfmon_release_facility - RELEASED: kernel task was owner\n");
123 #endif
124 hw_perfmon_owner = TASK_NULL;
125 retval = KERN_SUCCESS;
126 }
127 } else {
128 #ifdef HWPERFMON_DEBUG
129 kprintf("perfmon_release_facility - RELEASED: user task was owner\n");
130 #endif
131 hw_perfmon_owner = TASK_NULL;
132 retval = KERN_SUCCESS;
133 }
134 }
135
136 simple_unlock(&hw_perfmon_lock);
137 return retval;
138 }
139
140 int perfmon_enable(thread_t thread)
141 {
142 struct savearea *sv = thread->machine.pcb;
143 kern_return_t kr;
144 kern_return_t retval = KERN_SUCCESS;
145 int curPMC;
146
147 if(thread->machine.specFlags & perfMonitor) {
148 return KERN_SUCCESS; /* already enabled */
149 } else if(perfmon_acquire_facility(kernel_task)!=KERN_SUCCESS) {
150 return KERN_RESOURCE_SHORTAGE; /* facility is in use */
151 } else { /* kernel_task owns the faciltity and this thread has not yet been counted */
152 simple_lock(&hw_perfmon_lock);
153 hw_perfmon_thread_count++;
154 simple_unlock(&hw_perfmon_lock);
155 }
156
157 sv->save_mmcr1 = 0;
158 sv->save_mmcr2 = 0;
159
160 switch(PerProcTable[0].ppe_vaddr->cpu_subtype) {
161 case CPU_SUBTYPE_POWERPC_750:
162 case CPU_SUBTYPE_POWERPC_7400:
163 case CPU_SUBTYPE_POWERPC_7450:
164 {
165 ppc32_mmcr0_reg_t mmcr0_reg;
166
167 mmcr0_reg.value = 0;
168 mmcr0_reg.field.disable_counters_always = TRUE;
169 mmcr0_reg.field.disable_counters_supervisor = TRUE; /* no choice */
170 sv->save_mmcr0 = mmcr0_reg.value;
171 }
172 break;
173 case CPU_SUBTYPE_POWERPC_970:
174 {
175 ppc64_mmcr0_reg_t mmcr0_reg;
176
177 mmcr0_reg.value = 0;
178 mmcr0_reg.field.disable_counters_always = TRUE;
179 mmcr0_reg.field.disable_counters_supervisor = TRUE; /* no choice */
180 sv->save_mmcr0 = mmcr0_reg.value;
181 }
182 break;
183 default:
184 retval = KERN_FAILURE;
185 break;
186 }
187
188 if(retval==KERN_SUCCESS) {
189 for(curPMC=0; curPMC<MAX_CPUPMC_COUNT; curPMC++) {
190 sv->save_pmc[curPMC] = 0;
191 thread->machine.pmcovfl[curPMC] = 0;
192 }
193 thread->machine.perfmonFlags = 0;
194 thread->machine.specFlags |= perfMonitor; /* enable perf monitor facility for this thread */
195 if(thread==current_thread()) {
196 getPerProc()->spcFlags |= perfMonitor; /* update per_proc */
197 }
198 }
199
200 #ifdef HWPERFMON_DEBUG
201 kprintf("perfmon_enable - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2);
202 #endif
203
204 return retval;
205 }
206
207 int perfmon_disable(thread_t thread)
208 {
209 struct savearea *sv = thread->machine.pcb;
210 int curPMC;
211
212 if(!(thread->machine.specFlags & perfMonitor)) {
213 return KERN_NO_ACCESS; /* not enabled */
214 } else {
215 simple_lock(&hw_perfmon_lock);
216 hw_perfmon_thread_count--;
217 simple_unlock(&hw_perfmon_lock);
218 perfmon_release_facility(kernel_task); /* will release if hw_perfmon_thread_count is 0 */
219 }
220
221 thread->machine.specFlags &= ~perfMonitor; /* disable perf monitor facility for this thread */
222 if(thread==current_thread()) {
223 PerProcTable[cpu_number()].ppe_vaddr->spcFlags &= ~perfMonitor; /* update per_proc */
224 }
225 sv->save_mmcr0 = 0;
226 sv->save_mmcr1 = 0;
227 sv->save_mmcr2 = 0;
228
229 for(curPMC=0; curPMC<MAX_CPUPMC_COUNT; curPMC++) {
230 sv->save_pmc[curPMC] = 0;
231 thread->machine.pmcovfl[curPMC] = 0;
232 thread->machine.perfmonFlags = 0;
233 }
234
235 #ifdef HWPERFMON_DEBUG
236 kprintf("perfmon_disable - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2);
237 #endif
238
239 return KERN_SUCCESS;
240 }
241
242 int perfmon_clear_counters(thread_t thread)
243 {
244 struct savearea *sv = thread->machine.pcb;
245 int curPMC;
246
247 #ifdef HWPERFMON_DEBUG
248 kprintf("perfmon_clear_counters (CPU%d)\n", cpu_number());
249 #endif
250
251 /* clear thread copy */
252 for(curPMC=0; curPMC<MAX_CPUPMC_COUNT; curPMC++) {
253 sv->save_pmc[curPMC] = 0;
254 thread->machine.pmcovfl[curPMC] = 0;
255 }
256
257 return KERN_SUCCESS;
258 }
259
260 int perfmon_write_counters(thread_t thread, uint64_t *pmcs)
261 {
262 struct savearea *sv = thread->machine.pcb;
263 int curPMC;
264
265 #ifdef HWPERFMON_DEBUG
266 kprintf("perfmon_write_counters (CPU%d): mmcr0 = %016llX, pmc1=%llX pmc2=%llX pmc3=%llX pmc4=%llX pmc5=%llX pmc6=%llX pmc7=%llX pmc8=%llX\n", cpu_number(), sv->save_mmcr0, pmcs[PMC_1], pmcs[PMC_2], pmcs[PMC_3], pmcs[PMC_4], pmcs[PMC_5], pmcs[PMC_6], pmcs[PMC_7], pmcs[PMC_8]);
267 #endif
268
269 /* update thread copy */
270 for(curPMC=0; curPMC<MAX_CPUPMC_COUNT; curPMC++) {
271 sv->save_pmc[curPMC] = pmcs[curPMC] & 0x7FFFFFFF;
272 thread->machine.pmcovfl[curPMC] = (pmcs[curPMC]>>31) & 0xFFFFFFFF;
273 }
274
275 return KERN_SUCCESS;
276 }
277
278 int perfmon_read_counters(thread_t thread, uint64_t *pmcs)
279 {
280 struct savearea *sv = thread->machine.pcb;
281 int curPMC;
282
283 /* retrieve from thread copy */
284 for(curPMC=0; curPMC<MAX_CPUPMC_COUNT; curPMC++) {
285 pmcs[curPMC] = thread->machine.pmcovfl[curPMC];
286 pmcs[curPMC] = pmcs[curPMC]<<31;
287 pmcs[curPMC] |= (sv->save_pmc[curPMC] & 0x7FFFFFFF);
288 }
289
290 /* zero any unused counters on this platform */
291 switch(PerProcTable[0].ppe_vaddr->cpu_subtype) {
292 case CPU_SUBTYPE_POWERPC_750:
293 case CPU_SUBTYPE_POWERPC_7400:
294 case CPU_SUBTYPE_POWERPC_7450:
295 pmcs[PMC_7] = 0;
296 pmcs[PMC_8] = 0;
297 break;
298 default:
299 break;
300 }
301
302 #ifdef HWPERFMON_DEBUG
303 kprintf("perfmon_read_counters (CPU%d): mmcr0 = %016llX pmc1=%llX pmc2=%llX pmc3=%llX pmc4=%llX pmc5=%llX pmc6=%llX pmc7=%llX pmc8=%llX\n", cpu_number(), sv->save_mmcr0, pmcs[PMC_1], pmcs[PMC_2], pmcs[PMC_3], pmcs[PMC_4], pmcs[PMC_5], pmcs[PMC_6], pmcs[PMC_7], pmcs[PMC_8]);
304 #endif
305
306 return KERN_SUCCESS;
307 }
308
309 int perfmon_start_counters(thread_t thread)
310 {
311 struct savearea *sv = thread->machine.pcb;
312 kern_return_t retval = KERN_SUCCESS;
313
314 switch(PerProcTable[0].ppe_vaddr->cpu_subtype) {
315 case CPU_SUBTYPE_POWERPC_750:
316 case CPU_SUBTYPE_POWERPC_7400:
317 {
318 ppc32_mmcr0_reg_t mmcr0_reg;
319 mmcr0_reg.value = sv->save_mmcr0;
320 mmcr0_reg.field.disable_counters_always = FALSE;
321 /* XXXXX PMI broken on 750, 750CX, 750FX, 7400 and 7410 v1.2 and earlier XXXXX */
322 mmcr0_reg.field.on_pmi_stop_counting = FALSE;
323 mmcr0_reg.field.enable_pmi = FALSE;
324 mmcr0_reg.field.enable_pmi_on_pmc1 = FALSE;
325 mmcr0_reg.field.enable_pmi_on_pmcn = FALSE;
326 sv->save_mmcr0 = mmcr0_reg.value;
327 }
328 break;
329 case CPU_SUBTYPE_POWERPC_7450:
330 {
331 ppc32_mmcr0_reg_t mmcr0_reg;
332 mmcr0_reg.value = sv->save_mmcr0;
333 mmcr0_reg.field.disable_counters_always = FALSE;
334 mmcr0_reg.field.on_pmi_stop_counting = TRUE;
335 mmcr0_reg.field.enable_pmi = TRUE;
336 mmcr0_reg.field.enable_pmi_on_pmc1 = TRUE;
337 mmcr0_reg.field.enable_pmi_on_pmcn = TRUE;
338 sv->save_mmcr0 = mmcr0_reg.value;
339 }
340 break;
341 case CPU_SUBTYPE_POWERPC_970:
342 {
343 ppc64_mmcr0_reg_t mmcr0_reg;
344 mmcr0_reg.value = sv->save_mmcr0;
345 mmcr0_reg.field.disable_counters_always = FALSE;
346 mmcr0_reg.field.on_pmi_stop_counting = TRUE;
347 mmcr0_reg.field.enable_pmi = TRUE;
348 mmcr0_reg.field.enable_pmi_on_pmc1 = TRUE;
349 mmcr0_reg.field.enable_pmi_on_pmcn = TRUE;
350 sv->save_mmcr0 = mmcr0_reg.value;
351 }
352 break;
353 default:
354 retval = KERN_FAILURE;
355 break;
356 }
357
358 #ifdef HWPERFMON_DEBUG
359 kprintf("perfmon_start_counters (CPU%d) - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", cpu_number(), sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2);
360 #endif
361
362 return retval;
363 }
364
365 int perfmon_stop_counters(thread_t thread)
366 {
367 struct savearea *sv = thread->machine.pcb;
368 kern_return_t retval = KERN_SUCCESS;
369
370 switch(PerProcTable[0].ppe_vaddr->cpu_subtype) {
371 case CPU_SUBTYPE_POWERPC_750:
372 case CPU_SUBTYPE_POWERPC_7400:
373 case CPU_SUBTYPE_POWERPC_7450:
374 {
375 ppc32_mmcr0_reg_t mmcr0_reg;
376 mmcr0_reg.value = sv->save_mmcr0;
377 mmcr0_reg.field.disable_counters_always = TRUE;
378 sv->save_mmcr0 = mmcr0_reg.value;
379 }
380 break;
381 case CPU_SUBTYPE_POWERPC_970:
382 {
383 ppc64_mmcr0_reg_t mmcr0_reg;
384 mmcr0_reg.value = sv->save_mmcr0;
385 mmcr0_reg.field.disable_counters_always = TRUE;
386 sv->save_mmcr0 = mmcr0_reg.value;
387 }
388 break;
389 default:
390 retval = KERN_FAILURE;
391 break;
392 }
393
394 #ifdef HWPERFMON_DEBUG
395 kprintf("perfmon_stop_counters (CPU%d) - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", cpu_number(), sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2);
396 #endif
397
398 return retval;
399 }
400
401 int perfmon_set_event(thread_t thread, int pmc, int event)
402 {
403 struct savearea *sv = thread->machine.pcb;
404 kern_return_t retval = KERN_SUCCESS;
405
406 #ifdef HWPERFMON_DEBUG
407 kprintf("perfmon_set_event b4 (CPU%d) - pmc=%d, event=%d - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", cpu_number(), pmc, event, sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2);
408 #endif
409
410 switch(PerProcTable[0].ppe_vaddr->cpu_subtype) {
411 case CPU_SUBTYPE_POWERPC_750:
412 case CPU_SUBTYPE_POWERPC_7400:
413 {
414 ppc32_mmcr0_reg_t mmcr0_reg;
415 ppc32_mmcr1_reg_t mmcr1_reg;
416
417 mmcr0_reg.value = sv->save_mmcr0;
418 mmcr1_reg.value = sv->save_mmcr1;
419
420 switch(pmc) {
421 case PMC_1:
422 mmcr0_reg.field.pmc1_event = event;
423 sv->save_mmcr0 = mmcr0_reg.value;
424 break;
425 case PMC_2:
426 mmcr0_reg.field.pmc2_event = event;
427 sv->save_mmcr0 = mmcr0_reg.value;
428 break;
429 case PMC_3:
430 mmcr1_reg.field.pmc3_event = event;
431 sv->save_mmcr1 = mmcr1_reg.value;
432 break;
433 case PMC_4:
434 mmcr1_reg.field.pmc4_event = event;
435 sv->save_mmcr1 = mmcr1_reg.value;
436 break;
437 default:
438 retval = KERN_FAILURE;
439 break;
440 }
441 }
442 break;
443 case CPU_SUBTYPE_POWERPC_7450:
444 {
445 ppc32_mmcr0_reg_t mmcr0_reg;
446 ppc32_mmcr1_reg_t mmcr1_reg;
447
448 mmcr0_reg.value = sv->save_mmcr0;
449 mmcr1_reg.value = sv->save_mmcr1;
450
451 switch(pmc) {
452 case PMC_1:
453 mmcr0_reg.field.pmc1_event = event;
454 sv->save_mmcr0 = mmcr0_reg.value;
455 break;
456 case PMC_2:
457 mmcr0_reg.field.pmc2_event = event;
458 sv->save_mmcr0 = mmcr0_reg.value;
459 break;
460 case PMC_3:
461 mmcr1_reg.field.pmc3_event = event;
462 sv->save_mmcr1 = mmcr1_reg.value;
463 break;
464 case PMC_4:
465 mmcr1_reg.field.pmc4_event = event;
466 sv->save_mmcr1 = mmcr1_reg.value;
467 break;
468 case PMC_5:
469 mmcr1_reg.field.pmc5_event = event;
470 sv->save_mmcr1 = mmcr1_reg.value;
471 break;
472 case PMC_6:
473 mmcr1_reg.field.pmc6_event = event;
474 sv->save_mmcr1 = mmcr1_reg.value;
475 break;
476 default:
477 retval = KERN_FAILURE;
478 break;
479 }
480 }
481 break;
482 case CPU_SUBTYPE_POWERPC_970:
483 {
484 ppc64_mmcr0_reg_t mmcr0_reg;
485 ppc64_mmcr1_reg_t mmcr1_reg;
486
487 mmcr0_reg.value = sv->save_mmcr0;
488 mmcr1_reg.value = sv->save_mmcr1;
489
490 switch(pmc) {
491 case PMC_1:
492 mmcr0_reg.field.pmc1_event = event;
493 sv->save_mmcr0 = mmcr0_reg.value;
494 break;
495 case PMC_2:
496 mmcr0_reg.field.pmc2_event = event;
497 sv->save_mmcr0 = mmcr0_reg.value;
498 break;
499 case PMC_3:
500 mmcr1_reg.field.pmc3_event = event;
501 sv->save_mmcr1 = mmcr1_reg.value;
502 break;
503 case PMC_4:
504 mmcr1_reg.field.pmc4_event = event;
505 sv->save_mmcr1 = mmcr1_reg.value;
506 break;
507 case PMC_5:
508 mmcr1_reg.field.pmc5_event = event;
509 sv->save_mmcr1 = mmcr1_reg.value;
510 break;
511 case PMC_6:
512 mmcr1_reg.field.pmc6_event = event;
513 sv->save_mmcr1 = mmcr1_reg.value;
514 break;
515 case PMC_7:
516 mmcr1_reg.field.pmc7_event = event;
517 sv->save_mmcr1 = mmcr1_reg.value;
518 break;
519 case PMC_8:
520 mmcr1_reg.field.pmc8_event = event;
521 sv->save_mmcr1 = mmcr1_reg.value;
522 break;
523 default:
524 retval = KERN_FAILURE;
525 break;
526 }
527 }
528 break;
529 default:
530 retval = KERN_FAILURE;
531 break;
532 }
533
534 #ifdef HWPERFMON_DEBUG
535 kprintf("perfmon_set_event (CPU%d) - pmc=%d, event=%d - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", cpu_number(), pmc, event, sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2);
536 #endif
537
538 return retval;
539 }
540
541 int perfmon_set_event_func(thread_t thread, uint32_t f)
542 {
543 struct savearea *sv = thread->machine.pcb;
544 kern_return_t retval = KERN_SUCCESS;
545
546 #ifdef HWPERFMON_DEBUG
547 kprintf("perfmon_set_event_func - func=%s\n",
548 f==PPC_PERFMON_FUNC_FPU ? "FUNC" :
549 f==PPC_PERFMON_FUNC_ISU ? "ISU" :
550 f==PPC_PERFMON_FUNC_IFU ? "IFU" :
551 f==PPC_PERFMON_FUNC_VMX ? "VMX" :
552 f==PPC_PERFMON_FUNC_IDU ? "IDU" :
553 f==PPC_PERFMON_FUNC_GPS ? "GPS" :
554 f==PPC_PERFMON_FUNC_LSU0 ? "LSU0" :
555 f==PPC_PERFMON_FUNC_LSU1A ? "LSU1A" :
556 f==PPC_PERFMON_FUNC_LSU1B ? "LSU1B" :
557 f==PPC_PERFMON_FUNC_SPECA ? "SPECA" :
558 f==PPC_PERFMON_FUNC_SPECB ? "SPECB" :
559 f==PPC_PERFMON_FUNC_SPECC ? "SPECC" :
560 "UNKNOWN");
561 #endif /* HWPERFMON_DEBUG */
562
563 switch(PerProcTable[0].ppe_vaddr->cpu_subtype) {
564 case CPU_SUBTYPE_POWERPC_750:
565 case CPU_SUBTYPE_POWERPC_7400:
566 case CPU_SUBTYPE_POWERPC_7450:
567 retval = KERN_FAILURE; /* event functional unit only applies to 970 */
568 break;
569 case CPU_SUBTYPE_POWERPC_970:
570 {
571 ppc64_mmcr1_reg_t mmcr1_reg;
572 ppc_func_unit_t func_unit;
573
574 func_unit.value = f;
575 mmcr1_reg.value = sv->save_mmcr1;
576
577 mmcr1_reg.field.ttm0_select = func_unit.field.TTM0SEL;
578 mmcr1_reg.field.ttm1_select = func_unit.field.TTM1SEL;
579 mmcr1_reg.field.ttm2_select = 0; /* not used */
580 mmcr1_reg.field.ttm3_select = func_unit.field.TTM3SEL;
581 mmcr1_reg.field.speculative_event = func_unit.field.SPECSEL;
582 mmcr1_reg.field.lane0_select = func_unit.field.TD_CP_DBGxSEL;
583 mmcr1_reg.field.lane1_select = func_unit.field.TD_CP_DBGxSEL;
584 mmcr1_reg.field.lane2_select = func_unit.field.TD_CP_DBGxSEL;
585 mmcr1_reg.field.lane3_select = func_unit.field.TD_CP_DBGxSEL;
586
587 sv->save_mmcr1 = mmcr1_reg.value;
588 }
589 break;
590 default:
591 retval = KERN_FAILURE;
592 break;
593 }
594
595 return retval;
596 }
597
598 int perfmon_set_threshold(thread_t thread, int threshold)
599 {
600 struct savearea *sv = thread->machine.pcb;
601 kern_return_t retval = KERN_SUCCESS;
602
603 switch(PerProcTable[0].ppe_vaddr->cpu_subtype) {
604 case CPU_SUBTYPE_POWERPC_750:
605 {
606 ppc32_mmcr0_reg_t mmcr0_reg;
607
608 mmcr0_reg.value = sv->save_mmcr0;
609
610 if(threshold>63) { /* no multiplier on 750 */
611 int newThreshold = 63;
612 #ifdef HWPERFMON_DEBUG
613 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) exceeds max threshold value - clamping to %d\n", threshold, newThreshold);
614 #endif
615 threshold = newThreshold;
616 }
617 mmcr0_reg.field.threshold_value = threshold;
618
619 sv->save_mmcr0 = mmcr0_reg.value;
620 }
621 break;
622
623 case CPU_SUBTYPE_POWERPC_7400:
624 case CPU_SUBTYPE_POWERPC_7450:
625 {
626 ppc32_mmcr0_reg_t mmcr0_reg;
627 ppc32_mmcr2_reg_t mmcr2_reg;
628
629 mmcr0_reg.value = sv->save_mmcr0;
630 mmcr2_reg.value = sv->save_mmcr2;
631
632 if(threshold<=(2*63)) { /* 2x multiplier */
633 if(threshold%2 != 0) {
634 int newThreshold = 2*(threshold/2);
635 #ifdef HWPERFMON_DEBUG
636 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) is not evenly divisible by 2x multiplier - using threshold of %d instead\n", threshold, newThreshold);
637 #endif
638 threshold = newThreshold;
639 }
640 mmcr2_reg.field.threshold_multiplier = 0;
641 } else if(threshold<=(32*63)) { /* 32x multiplier */
642 if(threshold%32 != 0) {
643 int newThreshold = 32*(threshold/32);
644 #ifdef HWPERFMON_DEBUG
645 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) is not evenly divisible by 32x multiplier - using threshold of %d instead\n", threshold, newThreshold);
646 #endif
647 threshold = newThreshold;
648 }
649 mmcr2_reg.field.threshold_multiplier = 1;
650 } else {
651 int newThreshold = 32*63;
652 #ifdef HWPERFMON_DEBUG
653 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) exceeds max threshold value - clamping to %d\n", threshold, newThreshold);
654 #endif
655 threshold = newThreshold;
656 mmcr2_reg.field.threshold_multiplier = 1;
657 }
658 mmcr0_reg.field.threshold_value = threshold;
659
660 sv->save_mmcr0 = mmcr0_reg.value;
661 sv->save_mmcr2 = mmcr2_reg.value;
662
663 }
664 break;
665 case CPU_SUBTYPE_POWERPC_970:
666 {
667 ppc64_mmcr0_reg_t mmcr0_reg;
668
669 mmcr0_reg.value = sv->save_mmcr0;
670
671 if(threshold>63) { /* multiplier is in HID1 on 970 - not context switching HID1 so always 1x */
672 int newThreshold = 63;
673 #ifdef HWPERFMON_DEBUG
674 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) exceeds max threshold value - clamping to %d\n", threshold, newThreshold);
675 #endif
676 threshold = newThreshold;
677 }
678 mmcr0_reg.field.threshold_value = threshold;
679
680 sv->save_mmcr0 = mmcr0_reg.value;
681 }
682 break;
683 default:
684 retval = KERN_FAILURE;
685 break;
686 }
687
688 #ifdef HWPERFMON_DEBUG
689 kprintf("perfmon_set_threshold - threshold=%d - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", threshold, sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2);
690 #endif
691
692 return retval;
693 }
694
695 int perfmon_set_tbsel(thread_t thread, int tbsel)
696 {
697 struct savearea *sv = thread->machine.pcb;
698 kern_return_t retval = KERN_SUCCESS;
699
700 switch(PerProcTable[0].ppe_vaddr->cpu_subtype) {
701 case CPU_SUBTYPE_POWERPC_750:
702 case CPU_SUBTYPE_POWERPC_7400:
703 case CPU_SUBTYPE_POWERPC_7450:
704 {
705 ppc32_mmcr0_reg_t mmcr0_reg;
706
707 mmcr0_reg.value = sv->save_mmcr0;
708 switch(tbsel) {
709 case 0x0:
710 case 0x1:
711 case 0x2:
712 case 0x3:
713 mmcr0_reg.field.timebase_bit_selector = tbsel;
714 break;
715 default:
716 retval = KERN_FAILURE;
717 }
718 sv->save_mmcr0 = mmcr0_reg.value;
719 }
720 break;
721 case CPU_SUBTYPE_POWERPC_970:
722 {
723 ppc64_mmcr0_reg_t mmcr0_reg;
724
725 mmcr0_reg.value = sv->save_mmcr0;
726 switch(tbsel) {
727 case 0x0:
728 case 0x1:
729 case 0x2:
730 case 0x3:
731 mmcr0_reg.field.timebase_bit_selector = tbsel;
732 break;
733 default:
734 retval = KERN_FAILURE;
735 }
736 sv->save_mmcr0 = mmcr0_reg.value;
737 }
738 break;
739 default:
740 retval = KERN_FAILURE;
741 break;
742 }
743
744 #ifdef HWPERFMON_DEBUG
745 kprintf("perfmon_set_tbsel - tbsel=%d - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", tbsel, sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2);
746 #endif
747
748 return retval;
749 }
750
751 int perfmon_control(struct savearea *ssp)
752 {
753 mach_port_t thr_port = CAST_DOWN(mach_port_t, ssp->save_r3);
754 int action = (int)ssp->save_r4;
755 int pmc = (int)ssp->save_r5;
756 int val = (int)ssp->save_r6;
757 uint64_t *usr_pmcs_p = CAST_DOWN(uint64_t *, ssp->save_r7);
758 thread_t thread = THREAD_NULL;
759 uint64_t kern_pmcs[MAX_CPUPMC_COUNT];
760 kern_return_t retval = KERN_SUCCESS;
761 int error;
762 boolean_t oldlevel;
763
764 thread = (thread_t) port_name_to_thread(thr_port); // convert user space thread port name to a thread_t
765 if(!thread) {
766 ssp->save_r3 = KERN_INVALID_ARGUMENT;
767 return 1; /* Return and check for ASTs... */
768 }
769
770 if(thread!=current_thread()) {
771 thread_suspend(thread);
772 }
773
774 #ifdef HWPERFMON_DEBUG
775 // kprintf("perfmon_control: action=0x%x pmc=%d val=%d pmcs=0x%x\n", action, pmc, val, usr_pmcs_p);
776 #endif
777
778 oldlevel = ml_set_interrupts_enabled(FALSE);
779
780 /* individual actions which do not require perfmon facility to be enabled */
781 if(action==PPC_PERFMON_DISABLE) {
782 retval = perfmon_disable(thread);
783 }
784 else if(action==PPC_PERFMON_ENABLE) {
785 retval = perfmon_enable(thread);
786 }
787
788 else { /* individual actions which do require perfmon facility to be enabled */
789 if(!(thread->machine.specFlags & perfMonitor)) { /* perfmon not enabled */
790 #ifdef HWPERFMON_DEBUG
791 kprintf("perfmon_control: ERROR - perfmon not enabled for this thread\n");
792 #endif
793 retval = KERN_NO_ACCESS;
794 goto perfmon_return;
795 }
796
797 if(action==PPC_PERFMON_SET_EVENT) {
798 retval = perfmon_set_event(thread, pmc, val);
799 }
800 else if(action==PPC_PERFMON_SET_THRESHOLD) {
801 retval = perfmon_set_threshold(thread, val);
802 }
803 else if(action==PPC_PERFMON_SET_TBSEL) {
804 retval = perfmon_set_tbsel(thread, val);
805 }
806 else if(action==PPC_PERFMON_SET_EVENT_FUNC) {
807 retval = perfmon_set_event_func(thread, val);
808 }
809 else if(action==PPC_PERFMON_ENABLE_PMI_BRKPT) {
810 if(val) {
811 thread->machine.perfmonFlags |= PERFMONFLAG_BREAKPOINT_FOR_PMI;
812 } else {
813 thread->machine.perfmonFlags &= ~PERFMONFLAG_BREAKPOINT_FOR_PMI;
814 }
815 retval = KERN_SUCCESS;
816 }
817
818 /* combinable actions */
819 else {
820 if(action & PPC_PERFMON_STOP_COUNTERS) {
821 error = perfmon_stop_counters(thread);
822 if(error!=KERN_SUCCESS) {
823 retval = error;
824 goto perfmon_return;
825 }
826 }
827 if(action & PPC_PERFMON_CLEAR_COUNTERS) {
828 error = perfmon_clear_counters(thread);
829 if(error!=KERN_SUCCESS) {
830 retval = error;
831 goto perfmon_return;
832 }
833 }
834 if(action & PPC_PERFMON_WRITE_COUNTERS) {
835 if(error = copyin(CAST_USER_ADDR_T(usr_pmcs_p), (void *)kern_pmcs, MAX_CPUPMC_COUNT*sizeof(uint64_t))) {
836 retval = error;
837 goto perfmon_return;
838 }
839 error = perfmon_write_counters(thread, kern_pmcs);
840 if(error!=KERN_SUCCESS) {
841 retval = error;
842 goto perfmon_return;
843 }
844 }
845 if(action & PPC_PERFMON_READ_COUNTERS) {
846 error = perfmon_read_counters(thread, kern_pmcs);
847 if(error!=KERN_SUCCESS) {
848 retval = error;
849 goto perfmon_return;
850 }
851 if(error = copyout((void *)kern_pmcs, CAST_USER_ADDR_T(usr_pmcs_p), MAX_CPUPMC_COUNT*sizeof(uint64_t))) {
852 retval = error;
853 goto perfmon_return;
854 }
855 }
856 if(action & PPC_PERFMON_START_COUNTERS) {
857 error = perfmon_start_counters(thread);
858 if(error!=KERN_SUCCESS) {
859 retval = error;
860 goto perfmon_return;
861 }
862 }
863 }
864 }
865
866 perfmon_return:
867 ml_set_interrupts_enabled(oldlevel);
868
869 #ifdef HWPERFMON_DEBUG
870 kprintf("perfmon_control (CPU%d): mmcr0 = %016llX, pmc1=%X pmc2=%X pmc3=%X pmc4=%X pmc5=%X pmc6=%X pmc7=%X pmc8=%X\n", cpu_number(), ssp->save_mmcr0, ssp->save_pmc[PMC_1], ssp->save_pmc[PMC_2], ssp->save_pmc[PMC_3], ssp->save_pmc[PMC_4], ssp->save_pmc[PMC_5], ssp->save_pmc[PMC_6], ssp->save_pmc[PMC_7], ssp->save_pmc[PMC_8]);
871 #endif
872
873 if(thread!=current_thread()) {
874 thread_resume(thread);
875 }
876
877 #ifdef HWPERFMON_DEBUG
878 if(retval!=KERN_SUCCESS) {
879 kprintf("perfmon_control - ERROR: retval=%d\n", retval);
880 }
881 #endif /* HWPERFMON_DEBUG */
882
883 ssp->save_r3 = retval;
884 return 1; /* Return and check for ASTs... */
885 }
886
887 int perfmon_handle_pmi(struct savearea *ssp)
888 {
889 int curPMC;
890 kern_return_t retval = KERN_SUCCESS;
891 thread_t thread = current_thread();
892
893 #ifdef HWPERFMON_DEBUG
894 kprintf("perfmon_handle_pmi: got rupt\n");
895 #endif
896
897 if(!(thread->machine.specFlags & perfMonitor)) { /* perfmon not enabled */
898 #ifdef HWPERFMON_DEBUG
899 kprintf("perfmon_handle_pmi: ERROR - perfmon not enabled for this thread\n");
900 #endif
901 return KERN_FAILURE;
902 }
903
904 for(curPMC=0; curPMC<MAX_CPUPMC_COUNT; curPMC++) {
905 if(thread->machine.pcb->save_pmc[curPMC] & 0x80000000) {
906 if(thread->machine.pmcovfl[curPMC]==0xFFFFFFFF && (thread->machine.perfmonFlags & PERFMONFLAG_BREAKPOINT_FOR_PMI)) {
907 doexception(EXC_BREAKPOINT, EXC_PPC_PERFMON, (unsigned int)ssp->save_srr0); // pass up a breakpoint exception
908 return KERN_SUCCESS;
909 } else {
910 thread->machine.pmcovfl[curPMC]++;
911 thread->machine.pcb->save_pmc[curPMC] = 0;
912 }
913 }
914 }
915
916 if(retval==KERN_SUCCESS) {
917 switch(PerProcTable[0].ppe_vaddr->cpu_subtype) {
918 case CPU_SUBTYPE_POWERPC_7450:
919 {
920 ppc32_mmcr0_reg_t mmcr0_reg;
921
922 mmcr0_reg.value = thread->machine.pcb->save_mmcr0;
923 mmcr0_reg.field.disable_counters_always = FALSE;
924 mmcr0_reg.field.enable_pmi = TRUE;
925 thread->machine.pcb->save_mmcr0 = mmcr0_reg.value;
926 }
927 retval = KERN_SUCCESS;
928 break;
929 case CPU_SUBTYPE_POWERPC_970:
930 {
931 ppc64_mmcr0_reg_t mmcr0_reg;
932
933 mmcr0_reg.value = thread->machine.pcb->save_mmcr0;
934 mmcr0_reg.field.disable_counters_always = FALSE;
935 mmcr0_reg.field.enable_pmi = TRUE;
936 thread->machine.pcb->save_mmcr0 = mmcr0_reg.value;
937 }
938 retval = KERN_SUCCESS;
939 break;
940 default:
941 retval = KERN_FAILURE;
942 break;
943 }
944 }
945
946 return retval;
947 }