]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/hw_perfmon.c
xnu-792.12.6.tar.gz
[apple/xnu.git] / osfmk / ppc / hw_perfmon.c
1 /*
2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30
31 #include <kern/thread.h>
32 #include <ppc/exception.h>
33 #include <ppc/savearea.h>
34 #include <ppc/hw_perfmon.h>
35 #include <ppc/hw_perfmon_mmcr.h>
36
37 decl_simple_lock_data(,hw_perfmon_lock)
38 static task_t hw_perfmon_owner = TASK_NULL;
39 static int hw_perfmon_thread_count = 0;
40
41 /* Notes:
42 * -supervisor/user level filtering is unnecessary because of the way PMCs and MMCRs are context switched
43 * (can only count user events anyway)
44 * -marked filtering is unnecssary because each thread has its own virtualized set of PMCs and MMCRs
45 * -virtual counter PMI is passed up as a breakpoint exception
46 */
47
48 int perfmon_init(void)
49 {
50 simple_lock_init(&hw_perfmon_lock, FALSE);
51 return KERN_SUCCESS;
52 }
53
54 /* PMC Facility Owner:
55 * TASK_NULL - no one owns it
56 * kernel_task - owned by hw_perfmon
57 * other task - owned by another task
58 */
59
60 int perfmon_acquire_facility(task_t task)
61 {
62 kern_return_t retval = KERN_SUCCESS;
63
64 simple_lock(&hw_perfmon_lock);
65
66 if(hw_perfmon_owner==task) {
67 #ifdef HWPERFMON_DEBUG
68 kprintf("perfmon_acquire_facility - ACQUIRED: already owner\n");
69 #endif
70 retval = KERN_SUCCESS;
71 /* already own it */
72 } else if(hw_perfmon_owner==TASK_NULL) { /* no one owns it */
73 hw_perfmon_owner = task;
74 hw_perfmon_thread_count = 0;
75 #ifdef HWPERFMON_DEBUG
76 kprintf("perfmon_acquire_facility - ACQUIRED: no current owner - made new owner\n");
77 #endif
78 retval = KERN_SUCCESS;
79 } else { /* someone already owns it */
80 if(hw_perfmon_owner==kernel_task) {
81 if(hw_perfmon_thread_count==0) { /* kernel owns it but no threads using it */
82 hw_perfmon_owner = task;
83 hw_perfmon_thread_count = 0;
84 #ifdef HWPERFMON_DEBUG
85 kprintf("perfmon_acquire_facility - ACQUIRED: kernel is current owner but no threads using it\n");
86 #endif
87 retval = KERN_SUCCESS;
88 } else {
89 #ifdef HWPERFMON_DEBUG
90 kprintf("perfmon_acquire_facility - DENIED: kernel is current owner and facility in use\n");
91 #endif
92 retval = KERN_RESOURCE_SHORTAGE;
93 }
94 } else { /* non-kernel owner */
95 #ifdef HWPERFMON_DEBUG
96 kprintf("perfmon_acquire_facility - DENIED: another active task owns the facility\n");
97 #endif
98 retval = KERN_RESOURCE_SHORTAGE;
99 }
100 }
101
102 simple_unlock(&hw_perfmon_lock);
103 return retval;
104 }
105
106 int perfmon_release_facility(task_t task)
107 {
108 kern_return_t retval = KERN_SUCCESS;
109 task_t old_perfmon_owner = hw_perfmon_owner;
110
111 simple_lock(&hw_perfmon_lock);
112
113 if(task!=hw_perfmon_owner) {
114 retval = KERN_NO_ACCESS;
115 } else {
116 if(old_perfmon_owner==kernel_task) {
117 if(hw_perfmon_thread_count>0) {
118 #ifdef HWPERFMON_DEBUG
119 kprintf("perfmon_release_facility - NOT RELEASED: kernel task is owner and has active perfmon threads\n");
120 #endif
121 retval = KERN_NO_ACCESS;
122 } else {
123 #ifdef HWPERFMON_DEBUG
124 kprintf("perfmon_release_facility - RELEASED: kernel task was owner\n");
125 #endif
126 hw_perfmon_owner = TASK_NULL;
127 retval = KERN_SUCCESS;
128 }
129 } else {
130 #ifdef HWPERFMON_DEBUG
131 kprintf("perfmon_release_facility - RELEASED: user task was owner\n");
132 #endif
133 hw_perfmon_owner = TASK_NULL;
134 retval = KERN_SUCCESS;
135 }
136 }
137
138 simple_unlock(&hw_perfmon_lock);
139 return retval;
140 }
141
142 int perfmon_enable(thread_t thread)
143 {
144 struct savearea *sv = thread->machine.pcb;
145 kern_return_t kr;
146 kern_return_t retval = KERN_SUCCESS;
147 int curPMC;
148
149 if(thread->machine.specFlags & perfMonitor) {
150 return KERN_SUCCESS; /* already enabled */
151 } else if(perfmon_acquire_facility(kernel_task)!=KERN_SUCCESS) {
152 return KERN_RESOURCE_SHORTAGE; /* facility is in use */
153 } else { /* kernel_task owns the faciltity and this thread has not yet been counted */
154 simple_lock(&hw_perfmon_lock);
155 hw_perfmon_thread_count++;
156 simple_unlock(&hw_perfmon_lock);
157 }
158
159 sv->save_mmcr1 = 0;
160 sv->save_mmcr2 = 0;
161
162 switch(PerProcTable[0].ppe_vaddr->cpu_subtype) {
163 case CPU_SUBTYPE_POWERPC_750:
164 case CPU_SUBTYPE_POWERPC_7400:
165 case CPU_SUBTYPE_POWERPC_7450:
166 {
167 ppc32_mmcr0_reg_t mmcr0_reg;
168
169 mmcr0_reg.value = 0;
170 mmcr0_reg.field.disable_counters_always = TRUE;
171 mmcr0_reg.field.disable_counters_supervisor = TRUE; /* no choice */
172 sv->save_mmcr0 = mmcr0_reg.value;
173 }
174 break;
175 case CPU_SUBTYPE_POWERPC_970:
176 {
177 ppc64_mmcr0_reg_t mmcr0_reg;
178
179 mmcr0_reg.value = 0;
180 mmcr0_reg.field.disable_counters_always = TRUE;
181 mmcr0_reg.field.disable_counters_supervisor = TRUE; /* no choice */
182 sv->save_mmcr0 = mmcr0_reg.value;
183 }
184 break;
185 default:
186 retval = KERN_FAILURE;
187 break;
188 }
189
190 if(retval==KERN_SUCCESS) {
191 for(curPMC=0; curPMC<MAX_CPUPMC_COUNT; curPMC++) {
192 sv->save_pmc[curPMC] = 0;
193 thread->machine.pmcovfl[curPMC] = 0;
194 }
195 thread->machine.perfmonFlags = 0;
196 thread->machine.specFlags |= perfMonitor; /* enable perf monitor facility for this thread */
197 if(thread==current_thread()) {
198 getPerProc()->spcFlags |= perfMonitor; /* update per_proc */
199 }
200 }
201
202 #ifdef HWPERFMON_DEBUG
203 kprintf("perfmon_enable - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2);
204 #endif
205
206 return retval;
207 }
208
209 int perfmon_disable(thread_t thread)
210 {
211 struct savearea *sv = thread->machine.pcb;
212 int curPMC;
213
214 if(!(thread->machine.specFlags & perfMonitor)) {
215 return KERN_NO_ACCESS; /* not enabled */
216 } else {
217 simple_lock(&hw_perfmon_lock);
218 hw_perfmon_thread_count--;
219 simple_unlock(&hw_perfmon_lock);
220 perfmon_release_facility(kernel_task); /* will release if hw_perfmon_thread_count is 0 */
221 }
222
223 thread->machine.specFlags &= ~perfMonitor; /* disable perf monitor facility for this thread */
224 if(thread==current_thread()) {
225 PerProcTable[cpu_number()].ppe_vaddr->spcFlags &= ~perfMonitor; /* update per_proc */
226 }
227 sv->save_mmcr0 = 0;
228 sv->save_mmcr1 = 0;
229 sv->save_mmcr2 = 0;
230
231 for(curPMC=0; curPMC<MAX_CPUPMC_COUNT; curPMC++) {
232 sv->save_pmc[curPMC] = 0;
233 thread->machine.pmcovfl[curPMC] = 0;
234 thread->machine.perfmonFlags = 0;
235 }
236
237 #ifdef HWPERFMON_DEBUG
238 kprintf("perfmon_disable - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2);
239 #endif
240
241 return KERN_SUCCESS;
242 }
243
244 int perfmon_clear_counters(thread_t thread)
245 {
246 struct savearea *sv = thread->machine.pcb;
247 int curPMC;
248
249 #ifdef HWPERFMON_DEBUG
250 kprintf("perfmon_clear_counters (CPU%d)\n", cpu_number());
251 #endif
252
253 /* clear thread copy */
254 for(curPMC=0; curPMC<MAX_CPUPMC_COUNT; curPMC++) {
255 sv->save_pmc[curPMC] = 0;
256 thread->machine.pmcovfl[curPMC] = 0;
257 }
258
259 return KERN_SUCCESS;
260 }
261
262 int perfmon_write_counters(thread_t thread, uint64_t *pmcs)
263 {
264 struct savearea *sv = thread->machine.pcb;
265 int curPMC;
266
267 #ifdef HWPERFMON_DEBUG
268 kprintf("perfmon_write_counters (CPU%d): mmcr0 = %016llX, pmc1=%llX pmc2=%llX pmc3=%llX pmc4=%llX pmc5=%llX pmc6=%llX pmc7=%llX pmc8=%llX\n", cpu_number(), sv->save_mmcr0, pmcs[PMC_1], pmcs[PMC_2], pmcs[PMC_3], pmcs[PMC_4], pmcs[PMC_5], pmcs[PMC_6], pmcs[PMC_7], pmcs[PMC_8]);
269 #endif
270
271 /* update thread copy */
272 for(curPMC=0; curPMC<MAX_CPUPMC_COUNT; curPMC++) {
273 sv->save_pmc[curPMC] = pmcs[curPMC] & 0x7FFFFFFF;
274 thread->machine.pmcovfl[curPMC] = (pmcs[curPMC]>>31) & 0xFFFFFFFF;
275 }
276
277 return KERN_SUCCESS;
278 }
279
280 int perfmon_read_counters(thread_t thread, uint64_t *pmcs)
281 {
282 struct savearea *sv = thread->machine.pcb;
283 int curPMC;
284
285 /* retrieve from thread copy */
286 for(curPMC=0; curPMC<MAX_CPUPMC_COUNT; curPMC++) {
287 pmcs[curPMC] = thread->machine.pmcovfl[curPMC];
288 pmcs[curPMC] = pmcs[curPMC]<<31;
289 pmcs[curPMC] |= (sv->save_pmc[curPMC] & 0x7FFFFFFF);
290 }
291
292 /* zero any unused counters on this platform */
293 switch(PerProcTable[0].ppe_vaddr->cpu_subtype) {
294 case CPU_SUBTYPE_POWERPC_750:
295 case CPU_SUBTYPE_POWERPC_7400:
296 case CPU_SUBTYPE_POWERPC_7450:
297 pmcs[PMC_7] = 0;
298 pmcs[PMC_8] = 0;
299 break;
300 default:
301 break;
302 }
303
304 #ifdef HWPERFMON_DEBUG
305 kprintf("perfmon_read_counters (CPU%d): mmcr0 = %016llX pmc1=%llX pmc2=%llX pmc3=%llX pmc4=%llX pmc5=%llX pmc6=%llX pmc7=%llX pmc8=%llX\n", cpu_number(), sv->save_mmcr0, pmcs[PMC_1], pmcs[PMC_2], pmcs[PMC_3], pmcs[PMC_4], pmcs[PMC_5], pmcs[PMC_6], pmcs[PMC_7], pmcs[PMC_8]);
306 #endif
307
308 return KERN_SUCCESS;
309 }
310
311 int perfmon_start_counters(thread_t thread)
312 {
313 struct savearea *sv = thread->machine.pcb;
314 kern_return_t retval = KERN_SUCCESS;
315
316 switch(PerProcTable[0].ppe_vaddr->cpu_subtype) {
317 case CPU_SUBTYPE_POWERPC_750:
318 case CPU_SUBTYPE_POWERPC_7400:
319 {
320 ppc32_mmcr0_reg_t mmcr0_reg;
321 mmcr0_reg.value = sv->save_mmcr0;
322 mmcr0_reg.field.disable_counters_always = FALSE;
323 /* XXXXX PMI broken on 750, 750CX, 750FX, 7400 and 7410 v1.2 and earlier XXXXX */
324 mmcr0_reg.field.on_pmi_stop_counting = FALSE;
325 mmcr0_reg.field.enable_pmi = FALSE;
326 mmcr0_reg.field.enable_pmi_on_pmc1 = FALSE;
327 mmcr0_reg.field.enable_pmi_on_pmcn = FALSE;
328 sv->save_mmcr0 = mmcr0_reg.value;
329 }
330 break;
331 case CPU_SUBTYPE_POWERPC_7450:
332 {
333 ppc32_mmcr0_reg_t mmcr0_reg;
334 mmcr0_reg.value = sv->save_mmcr0;
335 mmcr0_reg.field.disable_counters_always = FALSE;
336 mmcr0_reg.field.on_pmi_stop_counting = TRUE;
337 mmcr0_reg.field.enable_pmi = TRUE;
338 mmcr0_reg.field.enable_pmi_on_pmc1 = TRUE;
339 mmcr0_reg.field.enable_pmi_on_pmcn = TRUE;
340 sv->save_mmcr0 = mmcr0_reg.value;
341 }
342 break;
343 case CPU_SUBTYPE_POWERPC_970:
344 {
345 ppc64_mmcr0_reg_t mmcr0_reg;
346 mmcr0_reg.value = sv->save_mmcr0;
347 mmcr0_reg.field.disable_counters_always = FALSE;
348 mmcr0_reg.field.on_pmi_stop_counting = TRUE;
349 mmcr0_reg.field.enable_pmi = TRUE;
350 mmcr0_reg.field.enable_pmi_on_pmc1 = TRUE;
351 mmcr0_reg.field.enable_pmi_on_pmcn = TRUE;
352 sv->save_mmcr0 = mmcr0_reg.value;
353 }
354 break;
355 default:
356 retval = KERN_FAILURE;
357 break;
358 }
359
360 #ifdef HWPERFMON_DEBUG
361 kprintf("perfmon_start_counters (CPU%d) - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", cpu_number(), sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2);
362 #endif
363
364 return retval;
365 }
366
367 int perfmon_stop_counters(thread_t thread)
368 {
369 struct savearea *sv = thread->machine.pcb;
370 kern_return_t retval = KERN_SUCCESS;
371
372 switch(PerProcTable[0].ppe_vaddr->cpu_subtype) {
373 case CPU_SUBTYPE_POWERPC_750:
374 case CPU_SUBTYPE_POWERPC_7400:
375 case CPU_SUBTYPE_POWERPC_7450:
376 {
377 ppc32_mmcr0_reg_t mmcr0_reg;
378 mmcr0_reg.value = sv->save_mmcr0;
379 mmcr0_reg.field.disable_counters_always = TRUE;
380 sv->save_mmcr0 = mmcr0_reg.value;
381 }
382 break;
383 case CPU_SUBTYPE_POWERPC_970:
384 {
385 ppc64_mmcr0_reg_t mmcr0_reg;
386 mmcr0_reg.value = sv->save_mmcr0;
387 mmcr0_reg.field.disable_counters_always = TRUE;
388 sv->save_mmcr0 = mmcr0_reg.value;
389 }
390 break;
391 default:
392 retval = KERN_FAILURE;
393 break;
394 }
395
396 #ifdef HWPERFMON_DEBUG
397 kprintf("perfmon_stop_counters (CPU%d) - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", cpu_number(), sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2);
398 #endif
399
400 return retval;
401 }
402
403 int perfmon_set_event(thread_t thread, int pmc, int event)
404 {
405 struct savearea *sv = thread->machine.pcb;
406 kern_return_t retval = KERN_SUCCESS;
407
408 #ifdef HWPERFMON_DEBUG
409 kprintf("perfmon_set_event b4 (CPU%d) - pmc=%d, event=%d - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", cpu_number(), pmc, event, sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2);
410 #endif
411
412 switch(PerProcTable[0].ppe_vaddr->cpu_subtype) {
413 case CPU_SUBTYPE_POWERPC_750:
414 case CPU_SUBTYPE_POWERPC_7400:
415 {
416 ppc32_mmcr0_reg_t mmcr0_reg;
417 ppc32_mmcr1_reg_t mmcr1_reg;
418
419 mmcr0_reg.value = sv->save_mmcr0;
420 mmcr1_reg.value = sv->save_mmcr1;
421
422 switch(pmc) {
423 case PMC_1:
424 mmcr0_reg.field.pmc1_event = event;
425 sv->save_mmcr0 = mmcr0_reg.value;
426 break;
427 case PMC_2:
428 mmcr0_reg.field.pmc2_event = event;
429 sv->save_mmcr0 = mmcr0_reg.value;
430 break;
431 case PMC_3:
432 mmcr1_reg.field.pmc3_event = event;
433 sv->save_mmcr1 = mmcr1_reg.value;
434 break;
435 case PMC_4:
436 mmcr1_reg.field.pmc4_event = event;
437 sv->save_mmcr1 = mmcr1_reg.value;
438 break;
439 default:
440 retval = KERN_FAILURE;
441 break;
442 }
443 }
444 break;
445 case CPU_SUBTYPE_POWERPC_7450:
446 {
447 ppc32_mmcr0_reg_t mmcr0_reg;
448 ppc32_mmcr1_reg_t mmcr1_reg;
449
450 mmcr0_reg.value = sv->save_mmcr0;
451 mmcr1_reg.value = sv->save_mmcr1;
452
453 switch(pmc) {
454 case PMC_1:
455 mmcr0_reg.field.pmc1_event = event;
456 sv->save_mmcr0 = mmcr0_reg.value;
457 break;
458 case PMC_2:
459 mmcr0_reg.field.pmc2_event = event;
460 sv->save_mmcr0 = mmcr0_reg.value;
461 break;
462 case PMC_3:
463 mmcr1_reg.field.pmc3_event = event;
464 sv->save_mmcr1 = mmcr1_reg.value;
465 break;
466 case PMC_4:
467 mmcr1_reg.field.pmc4_event = event;
468 sv->save_mmcr1 = mmcr1_reg.value;
469 break;
470 case PMC_5:
471 mmcr1_reg.field.pmc5_event = event;
472 sv->save_mmcr1 = mmcr1_reg.value;
473 break;
474 case PMC_6:
475 mmcr1_reg.field.pmc6_event = event;
476 sv->save_mmcr1 = mmcr1_reg.value;
477 break;
478 default:
479 retval = KERN_FAILURE;
480 break;
481 }
482 }
483 break;
484 case CPU_SUBTYPE_POWERPC_970:
485 {
486 ppc64_mmcr0_reg_t mmcr0_reg;
487 ppc64_mmcr1_reg_t mmcr1_reg;
488
489 mmcr0_reg.value = sv->save_mmcr0;
490 mmcr1_reg.value = sv->save_mmcr1;
491
492 switch(pmc) {
493 case PMC_1:
494 mmcr0_reg.field.pmc1_event = event;
495 sv->save_mmcr0 = mmcr0_reg.value;
496 break;
497 case PMC_2:
498 mmcr0_reg.field.pmc2_event = event;
499 sv->save_mmcr0 = mmcr0_reg.value;
500 break;
501 case PMC_3:
502 mmcr1_reg.field.pmc3_event = event;
503 sv->save_mmcr1 = mmcr1_reg.value;
504 break;
505 case PMC_4:
506 mmcr1_reg.field.pmc4_event = event;
507 sv->save_mmcr1 = mmcr1_reg.value;
508 break;
509 case PMC_5:
510 mmcr1_reg.field.pmc5_event = event;
511 sv->save_mmcr1 = mmcr1_reg.value;
512 break;
513 case PMC_6:
514 mmcr1_reg.field.pmc6_event = event;
515 sv->save_mmcr1 = mmcr1_reg.value;
516 break;
517 case PMC_7:
518 mmcr1_reg.field.pmc7_event = event;
519 sv->save_mmcr1 = mmcr1_reg.value;
520 break;
521 case PMC_8:
522 mmcr1_reg.field.pmc8_event = event;
523 sv->save_mmcr1 = mmcr1_reg.value;
524 break;
525 default:
526 retval = KERN_FAILURE;
527 break;
528 }
529 }
530 break;
531 default:
532 retval = KERN_FAILURE;
533 break;
534 }
535
536 #ifdef HWPERFMON_DEBUG
537 kprintf("perfmon_set_event (CPU%d) - pmc=%d, event=%d - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", cpu_number(), pmc, event, sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2);
538 #endif
539
540 return retval;
541 }
542
543 int perfmon_set_event_func(thread_t thread, uint32_t f)
544 {
545 struct savearea *sv = thread->machine.pcb;
546 kern_return_t retval = KERN_SUCCESS;
547
548 #ifdef HWPERFMON_DEBUG
549 kprintf("perfmon_set_event_func - func=%s\n",
550 f==PPC_PERFMON_FUNC_FPU ? "FUNC" :
551 f==PPC_PERFMON_FUNC_ISU ? "ISU" :
552 f==PPC_PERFMON_FUNC_IFU ? "IFU" :
553 f==PPC_PERFMON_FUNC_VMX ? "VMX" :
554 f==PPC_PERFMON_FUNC_IDU ? "IDU" :
555 f==PPC_PERFMON_FUNC_GPS ? "GPS" :
556 f==PPC_PERFMON_FUNC_LSU0 ? "LSU0" :
557 f==PPC_PERFMON_FUNC_LSU1A ? "LSU1A" :
558 f==PPC_PERFMON_FUNC_LSU1B ? "LSU1B" :
559 f==PPC_PERFMON_FUNC_SPECA ? "SPECA" :
560 f==PPC_PERFMON_FUNC_SPECB ? "SPECB" :
561 f==PPC_PERFMON_FUNC_SPECC ? "SPECC" :
562 "UNKNOWN");
563 #endif /* HWPERFMON_DEBUG */
564
565 switch(PerProcTable[0].ppe_vaddr->cpu_subtype) {
566 case CPU_SUBTYPE_POWERPC_750:
567 case CPU_SUBTYPE_POWERPC_7400:
568 case CPU_SUBTYPE_POWERPC_7450:
569 retval = KERN_FAILURE; /* event functional unit only applies to 970 */
570 break;
571 case CPU_SUBTYPE_POWERPC_970:
572 {
573 ppc64_mmcr1_reg_t mmcr1_reg;
574 ppc_func_unit_t func_unit;
575
576 func_unit.value = f;
577 mmcr1_reg.value = sv->save_mmcr1;
578
579 mmcr1_reg.field.ttm0_select = func_unit.field.TTM0SEL;
580 mmcr1_reg.field.ttm1_select = func_unit.field.TTM1SEL;
581 mmcr1_reg.field.ttm2_select = 0; /* not used */
582 mmcr1_reg.field.ttm3_select = func_unit.field.TTM3SEL;
583 mmcr1_reg.field.speculative_event = func_unit.field.SPECSEL;
584 mmcr1_reg.field.lane0_select = func_unit.field.TD_CP_DBGxSEL;
585 mmcr1_reg.field.lane1_select = func_unit.field.TD_CP_DBGxSEL;
586 mmcr1_reg.field.lane2_select = func_unit.field.TD_CP_DBGxSEL;
587 mmcr1_reg.field.lane3_select = func_unit.field.TD_CP_DBGxSEL;
588
589 sv->save_mmcr1 = mmcr1_reg.value;
590 }
591 break;
592 default:
593 retval = KERN_FAILURE;
594 break;
595 }
596
597 return retval;
598 }
599
600 int perfmon_set_threshold(thread_t thread, int threshold)
601 {
602 struct savearea *sv = thread->machine.pcb;
603 kern_return_t retval = KERN_SUCCESS;
604
605 switch(PerProcTable[0].ppe_vaddr->cpu_subtype) {
606 case CPU_SUBTYPE_POWERPC_750:
607 {
608 ppc32_mmcr0_reg_t mmcr0_reg;
609
610 mmcr0_reg.value = sv->save_mmcr0;
611
612 if(threshold>63) { /* no multiplier on 750 */
613 int newThreshold = 63;
614 #ifdef HWPERFMON_DEBUG
615 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) exceeds max threshold value - clamping to %d\n", threshold, newThreshold);
616 #endif
617 threshold = newThreshold;
618 }
619 mmcr0_reg.field.threshold_value = threshold;
620
621 sv->save_mmcr0 = mmcr0_reg.value;
622 }
623 break;
624
625 case CPU_SUBTYPE_POWERPC_7400:
626 case CPU_SUBTYPE_POWERPC_7450:
627 {
628 ppc32_mmcr0_reg_t mmcr0_reg;
629 ppc32_mmcr2_reg_t mmcr2_reg;
630
631 mmcr0_reg.value = sv->save_mmcr0;
632 mmcr2_reg.value = sv->save_mmcr2;
633
634 if(threshold<=(2*63)) { /* 2x multiplier */
635 if(threshold%2 != 0) {
636 int newThreshold = 2*(threshold/2);
637 #ifdef HWPERFMON_DEBUG
638 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) is not evenly divisible by 2x multiplier - using threshold of %d instead\n", threshold, newThreshold);
639 #endif
640 threshold = newThreshold;
641 }
642 mmcr2_reg.field.threshold_multiplier = 0;
643 } else if(threshold<=(32*63)) { /* 32x multiplier */
644 if(threshold%32 != 0) {
645 int newThreshold = 32*(threshold/32);
646 #ifdef HWPERFMON_DEBUG
647 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) is not evenly divisible by 32x multiplier - using threshold of %d instead\n", threshold, newThreshold);
648 #endif
649 threshold = newThreshold;
650 }
651 mmcr2_reg.field.threshold_multiplier = 1;
652 } else {
653 int newThreshold = 32*63;
654 #ifdef HWPERFMON_DEBUG
655 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) exceeds max threshold value - clamping to %d\n", threshold, newThreshold);
656 #endif
657 threshold = newThreshold;
658 mmcr2_reg.field.threshold_multiplier = 1;
659 }
660 mmcr0_reg.field.threshold_value = threshold;
661
662 sv->save_mmcr0 = mmcr0_reg.value;
663 sv->save_mmcr2 = mmcr2_reg.value;
664
665 }
666 break;
667 case CPU_SUBTYPE_POWERPC_970:
668 {
669 ppc64_mmcr0_reg_t mmcr0_reg;
670
671 mmcr0_reg.value = sv->save_mmcr0;
672
673 if(threshold>63) { /* multiplier is in HID1 on 970 - not context switching HID1 so always 1x */
674 int newThreshold = 63;
675 #ifdef HWPERFMON_DEBUG
676 kprintf("perfmon_set_threshold - WARNING: supplied threshold (%d) exceeds max threshold value - clamping to %d\n", threshold, newThreshold);
677 #endif
678 threshold = newThreshold;
679 }
680 mmcr0_reg.field.threshold_value = threshold;
681
682 sv->save_mmcr0 = mmcr0_reg.value;
683 }
684 break;
685 default:
686 retval = KERN_FAILURE;
687 break;
688 }
689
690 #ifdef HWPERFMON_DEBUG
691 kprintf("perfmon_set_threshold - threshold=%d - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", threshold, sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2);
692 #endif
693
694 return retval;
695 }
696
697 int perfmon_set_tbsel(thread_t thread, int tbsel)
698 {
699 struct savearea *sv = thread->machine.pcb;
700 kern_return_t retval = KERN_SUCCESS;
701
702 switch(PerProcTable[0].ppe_vaddr->cpu_subtype) {
703 case CPU_SUBTYPE_POWERPC_750:
704 case CPU_SUBTYPE_POWERPC_7400:
705 case CPU_SUBTYPE_POWERPC_7450:
706 {
707 ppc32_mmcr0_reg_t mmcr0_reg;
708
709 mmcr0_reg.value = sv->save_mmcr0;
710 switch(tbsel) {
711 case 0x0:
712 case 0x1:
713 case 0x2:
714 case 0x3:
715 mmcr0_reg.field.timebase_bit_selector = tbsel;
716 break;
717 default:
718 retval = KERN_FAILURE;
719 }
720 sv->save_mmcr0 = mmcr0_reg.value;
721 }
722 break;
723 case CPU_SUBTYPE_POWERPC_970:
724 {
725 ppc64_mmcr0_reg_t mmcr0_reg;
726
727 mmcr0_reg.value = sv->save_mmcr0;
728 switch(tbsel) {
729 case 0x0:
730 case 0x1:
731 case 0x2:
732 case 0x3:
733 mmcr0_reg.field.timebase_bit_selector = tbsel;
734 break;
735 default:
736 retval = KERN_FAILURE;
737 }
738 sv->save_mmcr0 = mmcr0_reg.value;
739 }
740 break;
741 default:
742 retval = KERN_FAILURE;
743 break;
744 }
745
746 #ifdef HWPERFMON_DEBUG
747 kprintf("perfmon_set_tbsel - tbsel=%d - mmcr0=0x%llx mmcr1=0x%llx mmcr2=0x%llx\n", tbsel, sv->save_mmcr0, sv->save_mmcr1, sv->save_mmcr2);
748 #endif
749
750 return retval;
751 }
752
753 int perfmon_control(struct savearea *ssp)
754 {
755 mach_port_t thr_port = CAST_DOWN(mach_port_t, ssp->save_r3);
756 int action = (int)ssp->save_r4;
757 int pmc = (int)ssp->save_r5;
758 int val = (int)ssp->save_r6;
759 uint64_t *usr_pmcs_p = CAST_DOWN(uint64_t *, ssp->save_r7);
760 thread_t thread = THREAD_NULL;
761 uint64_t kern_pmcs[MAX_CPUPMC_COUNT];
762 kern_return_t retval = KERN_SUCCESS;
763 int error;
764 boolean_t oldlevel;
765
766 thread = (thread_t) port_name_to_thread(thr_port); // convert user space thread port name to a thread_t
767 if(!thread) {
768 ssp->save_r3 = KERN_INVALID_ARGUMENT;
769 return 1; /* Return and check for ASTs... */
770 }
771
772 if(thread!=current_thread()) {
773 thread_suspend(thread);
774 }
775
776 #ifdef HWPERFMON_DEBUG
777 // kprintf("perfmon_control: action=0x%x pmc=%d val=%d pmcs=0x%x\n", action, pmc, val, usr_pmcs_p);
778 #endif
779
780 oldlevel = ml_set_interrupts_enabled(FALSE);
781
782 /* individual actions which do not require perfmon facility to be enabled */
783 if(action==PPC_PERFMON_DISABLE) {
784 retval = perfmon_disable(thread);
785 }
786 else if(action==PPC_PERFMON_ENABLE) {
787 retval = perfmon_enable(thread);
788 }
789
790 else { /* individual actions which do require perfmon facility to be enabled */
791 if(!(thread->machine.specFlags & perfMonitor)) { /* perfmon not enabled */
792 #ifdef HWPERFMON_DEBUG
793 kprintf("perfmon_control: ERROR - perfmon not enabled for this thread\n");
794 #endif
795 retval = KERN_NO_ACCESS;
796 goto perfmon_return;
797 }
798
799 if(action==PPC_PERFMON_SET_EVENT) {
800 retval = perfmon_set_event(thread, pmc, val);
801 }
802 else if(action==PPC_PERFMON_SET_THRESHOLD) {
803 retval = perfmon_set_threshold(thread, val);
804 }
805 else if(action==PPC_PERFMON_SET_TBSEL) {
806 retval = perfmon_set_tbsel(thread, val);
807 }
808 else if(action==PPC_PERFMON_SET_EVENT_FUNC) {
809 retval = perfmon_set_event_func(thread, val);
810 }
811 else if(action==PPC_PERFMON_ENABLE_PMI_BRKPT) {
812 if(val) {
813 thread->machine.perfmonFlags |= PERFMONFLAG_BREAKPOINT_FOR_PMI;
814 } else {
815 thread->machine.perfmonFlags &= ~PERFMONFLAG_BREAKPOINT_FOR_PMI;
816 }
817 retval = KERN_SUCCESS;
818 }
819
820 /* combinable actions */
821 else {
822 if(action & PPC_PERFMON_STOP_COUNTERS) {
823 error = perfmon_stop_counters(thread);
824 if(error!=KERN_SUCCESS) {
825 retval = error;
826 goto perfmon_return;
827 }
828 }
829 if(action & PPC_PERFMON_CLEAR_COUNTERS) {
830 error = perfmon_clear_counters(thread);
831 if(error!=KERN_SUCCESS) {
832 retval = error;
833 goto perfmon_return;
834 }
835 }
836 if(action & PPC_PERFMON_WRITE_COUNTERS) {
837 if(error = copyin(CAST_USER_ADDR_T(usr_pmcs_p), (void *)kern_pmcs, MAX_CPUPMC_COUNT*sizeof(uint64_t))) {
838 retval = error;
839 goto perfmon_return;
840 }
841 error = perfmon_write_counters(thread, kern_pmcs);
842 if(error!=KERN_SUCCESS) {
843 retval = error;
844 goto perfmon_return;
845 }
846 }
847 if(action & PPC_PERFMON_READ_COUNTERS) {
848 error = perfmon_read_counters(thread, kern_pmcs);
849 if(error!=KERN_SUCCESS) {
850 retval = error;
851 goto perfmon_return;
852 }
853 if(error = copyout((void *)kern_pmcs, CAST_USER_ADDR_T(usr_pmcs_p), MAX_CPUPMC_COUNT*sizeof(uint64_t))) {
854 retval = error;
855 goto perfmon_return;
856 }
857 }
858 if(action & PPC_PERFMON_START_COUNTERS) {
859 error = perfmon_start_counters(thread);
860 if(error!=KERN_SUCCESS) {
861 retval = error;
862 goto perfmon_return;
863 }
864 }
865 }
866 }
867
868 perfmon_return:
869 ml_set_interrupts_enabled(oldlevel);
870
871 #ifdef HWPERFMON_DEBUG
872 kprintf("perfmon_control (CPU%d): mmcr0 = %016llX, pmc1=%X pmc2=%X pmc3=%X pmc4=%X pmc5=%X pmc6=%X pmc7=%X pmc8=%X\n", cpu_number(), ssp->save_mmcr0, ssp->save_pmc[PMC_1], ssp->save_pmc[PMC_2], ssp->save_pmc[PMC_3], ssp->save_pmc[PMC_4], ssp->save_pmc[PMC_5], ssp->save_pmc[PMC_6], ssp->save_pmc[PMC_7], ssp->save_pmc[PMC_8]);
873 #endif
874
875 if(thread!=current_thread()) {
876 thread_resume(thread);
877 }
878
879 #ifdef HWPERFMON_DEBUG
880 if(retval!=KERN_SUCCESS) {
881 kprintf("perfmon_control - ERROR: retval=%d\n", retval);
882 }
883 #endif /* HWPERFMON_DEBUG */
884
885 ssp->save_r3 = retval;
886 return 1; /* Return and check for ASTs... */
887 }
888
889 int perfmon_handle_pmi(struct savearea *ssp)
890 {
891 int curPMC;
892 kern_return_t retval = KERN_SUCCESS;
893 thread_t thread = current_thread();
894
895 #ifdef HWPERFMON_DEBUG
896 kprintf("perfmon_handle_pmi: got rupt\n");
897 #endif
898
899 if(!(thread->machine.specFlags & perfMonitor)) { /* perfmon not enabled */
900 #ifdef HWPERFMON_DEBUG
901 kprintf("perfmon_handle_pmi: ERROR - perfmon not enabled for this thread\n");
902 #endif
903 return KERN_FAILURE;
904 }
905
906 for(curPMC=0; curPMC<MAX_CPUPMC_COUNT; curPMC++) {
907 if(thread->machine.pcb->save_pmc[curPMC] & 0x80000000) {
908 if(thread->machine.pmcovfl[curPMC]==0xFFFFFFFF && (thread->machine.perfmonFlags & PERFMONFLAG_BREAKPOINT_FOR_PMI)) {
909 doexception(EXC_BREAKPOINT, EXC_PPC_PERFMON, (unsigned int)ssp->save_srr0); // pass up a breakpoint exception
910 return KERN_SUCCESS;
911 } else {
912 thread->machine.pmcovfl[curPMC]++;
913 thread->machine.pcb->save_pmc[curPMC] = 0;
914 }
915 }
916 }
917
918 if(retval==KERN_SUCCESS) {
919 switch(PerProcTable[0].ppe_vaddr->cpu_subtype) {
920 case CPU_SUBTYPE_POWERPC_7450:
921 {
922 ppc32_mmcr0_reg_t mmcr0_reg;
923
924 mmcr0_reg.value = thread->machine.pcb->save_mmcr0;
925 mmcr0_reg.field.disable_counters_always = FALSE;
926 mmcr0_reg.field.enable_pmi = TRUE;
927 thread->machine.pcb->save_mmcr0 = mmcr0_reg.value;
928 }
929 retval = KERN_SUCCESS;
930 break;
931 case CPU_SUBTYPE_POWERPC_970:
932 {
933 ppc64_mmcr0_reg_t mmcr0_reg;
934
935 mmcr0_reg.value = thread->machine.pcb->save_mmcr0;
936 mmcr0_reg.field.disable_counters_always = FALSE;
937 mmcr0_reg.field.enable_pmi = TRUE;
938 thread->machine.pcb->save_mmcr0 = mmcr0_reg.value;
939 }
940 retval = KERN_SUCCESS;
941 break;
942 default:
943 retval = KERN_FAILURE;
944 break;
945 }
946 }
947
948 return retval;
949 }