]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/cpu.c
xnu-792.18.15.tar.gz
[apple/xnu.git] / osfmk / ppc / cpu.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/machine.h>
31 #include <mach/processor_info.h>
32
33 #include <kern/kalloc.h>
34 #include <kern/kern_types.h>
35 #include <kern/machine.h>
36 #include <kern/misc_protos.h>
37 #include <kern/thread.h>
38 #include <kern/sched_prim.h>
39 #include <kern/processor.h>
40 #include <kern/pms.h>
41
42 #include <vm/pmap.h>
43 #include <IOKit/IOHibernatePrivate.h>
44
45 #include <ppc/proc_reg.h>
46 #include <ppc/misc_protos.h>
47 #include <ppc/machine_routines.h>
48 #include <ppc/cpu_internal.h>
49 #include <ppc/exception.h>
50 #include <ppc/asm.h>
51 #include <ppc/hw_perfmon.h>
52 #include <pexpert/pexpert.h>
53 #include <kern/cpu_data.h>
54 #include <ppc/mappings.h>
55 #include <ppc/Diagnostics.h>
56 #include <ppc/trap.h>
57 #include <ppc/machine_cpu.h>
58 #include <ppc/rtclock.h>
59
60 decl_mutex_data(static,ppt_lock);
61
62 unsigned int real_ncpus = 1;
63 unsigned int max_ncpus = MAX_CPUS;
64
65 decl_simple_lock_data(static,rht_lock);
66
67 static unsigned int rht_state = 0;
68 #define RHT_WAIT 0x01
69 #define RHT_BUSY 0x02
70
71 decl_simple_lock_data(static,SignalReadyLock);
72
73 struct SIGtimebase {
74 volatile boolean_t avail;
75 volatile boolean_t ready;
76 volatile boolean_t done;
77 uint64_t abstime;
78 };
79
80 perfCallback perfCpuSigHook = 0; /* Pointer to CHUD cpu signal hook routine */
81
82 extern int debugger_sync;
83
84 /*
85 * Forward definitions
86 */
87
88 void cpu_sync_timebase(
89 void);
90
91 void cpu_timebase_signal_handler(
92 struct per_proc_info *proc_info,
93 struct SIGtimebase *timebaseAddr);
94
95 /*
96 * Routine: cpu_bootstrap
97 * Function:
98 */
99 void
100 cpu_bootstrap(
101 void)
102 {
103 simple_lock_init(&rht_lock,0);
104 simple_lock_init(&SignalReadyLock,0);
105 mutex_init(&ppt_lock,0);
106 }
107
108
109 /*
110 * Routine: cpu_init
111 * Function:
112 */
113 void
114 cpu_init(
115 void)
116 {
117 struct per_proc_info *proc_info;
118
119 proc_info = getPerProc();
120
121 /*
122 * Restore the TBR.
123 */
124 if (proc_info->save_tbu != 0 || proc_info->save_tbl != 0) {
125 mttb(0);
126 mttbu(proc_info->save_tbu);
127 mttb(proc_info->save_tbl);
128 }
129
130 proc_info->rtcPop = EndOfAllTime; /* forget any existing decrementer setting */
131 etimer_resync_deadlines(); /* Now that the time base is sort of correct, request the next timer pop */
132
133 proc_info->cpu_type = CPU_TYPE_POWERPC;
134 proc_info->cpu_subtype = (cpu_subtype_t)proc_info->pf.rptdProc;
135 proc_info->cpu_threadtype = CPU_THREADTYPE_NONE;
136 proc_info->running = TRUE;
137
138 }
139
140 /*
141 * Routine: cpu_machine_init
142 * Function:
143 */
144 void
145 cpu_machine_init(
146 void)
147 {
148 struct per_proc_info *proc_info;
149 volatile struct per_proc_info *mproc_info;
150
151
152 proc_info = getPerProc();
153 mproc_info = PerProcTable[master_cpu].ppe_vaddr;
154
155 if (proc_info != mproc_info) {
156 simple_lock(&rht_lock);
157 if (rht_state & RHT_WAIT)
158 thread_wakeup(&rht_state);
159 rht_state &= ~(RHT_BUSY|RHT_WAIT);
160 simple_unlock(&rht_lock);
161 }
162
163 PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone));
164
165 if (proc_info->hibernate) {
166 uint32_t tbu, tbl;
167
168 do {
169 tbu = mftbu();
170 tbl = mftb();
171 } while (mftbu() != tbu);
172
173 proc_info->hibernate = 0;
174 hibernate_machine_init();
175
176 // hibernate_machine_init() could take minutes and we don't want timeouts
177 // to fire as soon as scheduling starts. Reset timebase so it appears
178 // no time has elapsed, as it would for regular sleep.
179 mttb(0);
180 mttbu(tbu);
181 mttb(tbl);
182 }
183
184 if (proc_info != mproc_info) {
185 while (!((mproc_info->cpu_flags) & SignalReady))
186 continue;
187 cpu_sync_timebase();
188 }
189
190 ml_init_interrupt();
191 if (proc_info != mproc_info)
192 simple_lock(&SignalReadyLock);
193 proc_info->cpu_flags |= BootDone|SignalReady;
194 if (proc_info != mproc_info) {
195 if (proc_info->ppXFlags & SignalReadyWait) {
196 hw_atomic_and(&proc_info->ppXFlags, ~SignalReadyWait);
197 thread_wakeup(&proc_info->cpu_flags);
198 }
199 simple_unlock(&SignalReadyLock);
200 pmsPark(); /* Timers should be cool now, park the power management stepper */
201 }
202 }
203
204
205 /*
206 * Routine: cpu_per_proc_alloc
207 * Function:
208 */
209 struct per_proc_info *
210 cpu_per_proc_alloc(
211 void)
212 {
213 struct per_proc_info *proc_info=0;
214 void *interrupt_stack=0;
215 void *debugger_stack=0;
216
217 if ((proc_info = (struct per_proc_info*)kalloc(sizeof(struct per_proc_info))) == (struct per_proc_info*)0)
218 return (struct per_proc_info *)NULL;
219 if ((interrupt_stack = kalloc(INTSTACK_SIZE)) == 0) {
220 kfree(proc_info, sizeof(struct per_proc_info));
221 return (struct per_proc_info *)NULL;
222 }
223
224 if ((debugger_stack = kalloc(KERNEL_STACK_SIZE)) == 0) {
225 kfree(proc_info, sizeof(struct per_proc_info));
226 kfree(interrupt_stack, INTSTACK_SIZE);
227 return (struct per_proc_info *)NULL;
228 }
229
230 bzero((void *)proc_info, sizeof(struct per_proc_info));
231
232 proc_info->pp2ndPage = (addr64_t)pmap_find_phys(kernel_pmap, (addr64_t)proc_info + 0x1000) << PAGE_SHIFT; /* Set physical address of the second page */
233 proc_info->next_savearea = (uint64_t)save_get_init();
234 proc_info->pf = BootProcInfo.pf;
235 proc_info->istackptr = (vm_offset_t)interrupt_stack + INTSTACK_SIZE - FM_SIZE;
236 proc_info->intstack_top_ss = proc_info->istackptr;
237 proc_info->debstackptr = (vm_offset_t)debugger_stack + KERNEL_STACK_SIZE - FM_SIZE;
238 proc_info->debstack_top_ss = proc_info->debstackptr;
239
240 return proc_info;
241
242 }
243
244
245 /*
246 * Routine: cpu_per_proc_free
247 * Function:
248 */
249 void
250 cpu_per_proc_free(
251 struct per_proc_info *proc_info
252 )
253 {
254 if (proc_info->cpu_number == master_cpu)
255 return;
256 kfree((void *)(proc_info->intstack_top_ss - INTSTACK_SIZE + FM_SIZE), INTSTACK_SIZE);
257 kfree((void *)(proc_info->debstack_top_ss - KERNEL_STACK_SIZE + FM_SIZE), KERNEL_STACK_SIZE);
258 kfree((void *)proc_info, sizeof(struct per_proc_info)); /* Release the per_proc */
259 }
260
261
262 /*
263 * Routine: cpu_per_proc_register
264 * Function:
265 */
266 kern_return_t
267 cpu_per_proc_register(
268 struct per_proc_info *proc_info
269 )
270 {
271 int cpu;
272
273 mutex_lock(&ppt_lock);
274 if (real_ncpus >= max_ncpus) {
275 mutex_unlock(&ppt_lock);
276 return KERN_FAILURE;
277 }
278 cpu = real_ncpus;
279 proc_info->cpu_number = cpu;
280 PerProcTable[cpu].ppe_vaddr = proc_info;
281 PerProcTable[cpu].ppe_paddr = (addr64_t)pmap_find_phys(kernel_pmap, (addr64_t)proc_info) << PAGE_SHIFT;
282 eieio();
283 real_ncpus++;
284 mutex_unlock(&ppt_lock);
285 return KERN_SUCCESS;
286 }
287
288
289 /*
290 * Routine: cpu_start
291 * Function:
292 */
293 kern_return_t
294 cpu_start(
295 int cpu)
296 {
297 struct per_proc_info *proc_info;
298 kern_return_t ret;
299 mapping_t *mp;
300
301 proc_info = PerProcTable[cpu].ppe_vaddr;
302
303 if (cpu == cpu_number()) {
304 PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone));
305 ml_init_interrupt();
306 proc_info->cpu_flags |= BootDone|SignalReady;
307
308 return KERN_SUCCESS;
309 } else {
310 proc_info->cpu_flags &= BootDone;
311 proc_info->interrupts_enabled = 0;
312 proc_info->pending_ast = AST_NONE;
313 proc_info->istackptr = proc_info->intstack_top_ss;
314 proc_info->rtcPop = EndOfAllTime;
315 proc_info->FPU_owner = 0;
316 proc_info->VMX_owner = 0;
317 proc_info->pms.pmsStamp = 0; /* Dummy transition time */
318 proc_info->pms.pmsPop = EndOfAllTime; /* Set the pop way into the future */
319 proc_info->pms.pmsState = pmsParked; /* Park the stepper */
320 proc_info->pms.pmsCSetCmd = pmsCInit; /* Set dummy initial hardware state */
321 mp = (mapping_t *)(&proc_info->ppUMWmp);
322 mp->mpFlags = 0x01000000 | mpLinkage | mpPerm | 1;
323 mp->mpSpace = invalSpace;
324
325 if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
326
327 simple_lock(&rht_lock);
328 while (rht_state & RHT_BUSY) {
329 rht_state |= RHT_WAIT;
330 thread_sleep_usimple_lock((event_t)&rht_state,
331 &rht_lock, THREAD_UNINT);
332 }
333 rht_state |= RHT_BUSY;
334 simple_unlock(&rht_lock);
335
336 ml_phys_write((vm_offset_t)&ResetHandler + 0,
337 RESET_HANDLER_START);
338 ml_phys_write((vm_offset_t)&ResetHandler + 4,
339 (vm_offset_t)_start_cpu);
340 ml_phys_write((vm_offset_t)&ResetHandler + 8,
341 (vm_offset_t)&PerProcTable[cpu]);
342 }
343 /*
344 * Note: we pass the current time to the other processor here. He will load it
345 * as early as possible so that there is a chance that it is close to accurate.
346 * After the machine is up a while, we will officially resync the clocks so
347 * that all processors are the same. This is just to get close.
348 */
349
350 ml_get_timebase((unsigned long long *)&proc_info->ruptStamp);
351
352 __asm__ volatile("sync"); /* Commit to storage */
353 __asm__ volatile("isync"); /* Wait a second */
354 ret = PE_cpu_start(proc_info->cpu_id,
355 proc_info->start_paddr, (vm_offset_t)proc_info);
356
357 if (ret != KERN_SUCCESS) {
358 if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
359 simple_lock(&rht_lock);
360 if (rht_state & RHT_WAIT)
361 thread_wakeup(&rht_state);
362 rht_state &= ~(RHT_BUSY|RHT_WAIT);
363 simple_unlock(&rht_lock);
364 };
365 } else {
366 simple_lock(&SignalReadyLock);
367 if (!((*(volatile short *)&proc_info->cpu_flags) & SignalReady)) {
368 hw_atomic_or(&proc_info->ppXFlags, SignalReadyWait);
369 thread_sleep_simple_lock((event_t)&proc_info->cpu_flags,
370 &SignalReadyLock, THREAD_UNINT);
371 }
372 simple_unlock(&SignalReadyLock);
373
374 }
375 return(ret);
376 }
377 }
378
379 /*
380 * Routine: cpu_exit_wait
381 * Function:
382 */
383 void
384 cpu_exit_wait(
385 int cpu)
386 {
387 struct per_proc_info *tpproc;
388
389 if ( cpu != master_cpu) {
390 tpproc = PerProcTable[cpu].ppe_vaddr;
391 while (!((*(volatile short *)&tpproc->cpu_flags) & SleepState)) {};
392 }
393 }
394
395
396 /*
397 * Routine: cpu_doshutdown
398 * Function:
399 */
400 void
401 cpu_doshutdown(
402 void)
403 {
404 enable_preemption();
405 processor_offline(current_processor());
406 }
407
408
409 /*
410 * Routine: cpu_sleep
411 * Function:
412 */
413 void
414 cpu_sleep(
415 void)
416 {
417 struct per_proc_info *proc_info;
418 unsigned int i;
419 unsigned int wait_ncpus_sleep, ncpus_sleep;
420 facility_context *fowner;
421
422 proc_info = getPerProc();
423
424 proc_info->running = FALSE;
425
426 fowner = proc_info->FPU_owner; /* Cache this */
427 if(fowner) fpu_save(fowner); /* If anyone owns FPU, save it */
428 proc_info->FPU_owner = 0; /* Set no fpu owner now */
429
430 fowner = proc_info->VMX_owner; /* Cache this */
431 if(fowner) vec_save(fowner); /* If anyone owns vectors, save it */
432 proc_info->VMX_owner = 0; /* Set no vector owner now */
433
434 if (proc_info->cpu_number == master_cpu) {
435 proc_info->cpu_flags &= BootDone;
436 proc_info->interrupts_enabled = 0;
437 proc_info->pending_ast = AST_NONE;
438
439 if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
440 ml_phys_write((vm_offset_t)&ResetHandler + 0,
441 RESET_HANDLER_START);
442 ml_phys_write((vm_offset_t)&ResetHandler + 4,
443 (vm_offset_t)_start_cpu);
444 ml_phys_write((vm_offset_t)&ResetHandler + 8,
445 (vm_offset_t)&PerProcTable[master_cpu]);
446
447 __asm__ volatile("sync");
448 __asm__ volatile("isync");
449 }
450
451 wait_ncpus_sleep = real_ncpus-1;
452 ncpus_sleep = 0;
453 while (wait_ncpus_sleep != ncpus_sleep) {
454 ncpus_sleep = 0;
455 for(i=1; i < real_ncpus ; i++) {
456 if ((*(volatile short *)&(PerProcTable[i].ppe_vaddr->cpu_flags)) & SleepState)
457 ncpus_sleep++;
458 }
459 }
460
461 }
462
463 /*
464 * Save the TBR before stopping.
465 */
466 do {
467 proc_info->save_tbu = mftbu();
468 proc_info->save_tbl = mftb();
469 } while (mftbu() != proc_info->save_tbu);
470
471 PE_cpu_machine_quiesce(proc_info->cpu_id);
472 }
473
474
475 /*
476 * Routine: cpu_signal
477 * Function:
478 * Here is where we send a message to another processor. So far we only have two:
479 * SIGPast and SIGPdebug. SIGPast is used to preempt and kick off threads (this is
480 * currently disabled). SIGPdebug is used to enter the debugger.
481 *
482 * We set up the SIGP function to indicate that this is a simple message and set the
483 * order code (MPsigpParm0) to SIGPast or SIGPdebug). After finding the per_processor
484 * block for the target, we lock the message block. Then we set the parameter(s).
485 * Next we change the lock (also called "busy") to "passing" and finally signal
486 * the other processor. Note that we only wait about 1ms to get the message lock.
487 * If we time out, we return failure to our caller. It is their responsibility to
488 * recover.
489 */
490 kern_return_t
491 cpu_signal(
492 int target,
493 int signal,
494 unsigned int p1,
495 unsigned int p2)
496 {
497
498 unsigned int holdStat;
499 struct per_proc_info *tpproc, *mpproc;
500 int busybitset=0;
501
502 #if DEBUG
503 if(((unsigned int)target) >= MAX_CPUS) panic("cpu_signal: invalid target CPU - %08X\n", target);
504 #endif
505
506 mpproc = getPerProc(); /* Point to our block */
507 tpproc = PerProcTable[target].ppe_vaddr; /* Point to the target's block */
508 if(mpproc == tpproc) return KERN_FAILURE; /* Cannot signal ourselves */
509
510 if(!tpproc->running) return KERN_FAILURE;
511
512 if (!(tpproc->cpu_flags & SignalReady)) return KERN_FAILURE;
513
514 if((tpproc->MPsigpStat & MPsigpMsgp) == MPsigpMsgp) { /* Is there an unreceived message already pending? */
515
516 if(signal == SIGPwake) { /* SIGPwake can merge into all others... */
517 mpproc->hwCtr.numSIGPmwake++; /* Account for merged wakes */
518 return KERN_SUCCESS;
519 }
520
521 if((signal == SIGPast) && (tpproc->MPsigpParm0 == SIGPast)) { /* We can merge ASTs */
522 mpproc->hwCtr.numSIGPmast++; /* Account for merged ASTs */
523 return KERN_SUCCESS; /* Don't bother to send this one... */
524 }
525
526 if (tpproc->MPsigpParm0 == SIGPwake) {
527 if (hw_lock_mbits(&tpproc->MPsigpStat, (MPsigpMsgp | MPsigpAck),
528 (MPsigpBusy | MPsigpPass ), MPsigpBusy, 0)) {
529 busybitset = 1;
530 mpproc->hwCtr.numSIGPmwake++;
531 }
532 }
533 }
534
535 if((busybitset == 0) &&
536 (!hw_lock_mbits(&tpproc->MPsigpStat, MPsigpMsgp, 0, MPsigpBusy,
537 (gPEClockFrequencyInfo.timebase_frequency_hz >> 11)))) { /* Try to lock the message block with a .5ms timeout */
538 mpproc->hwCtr.numSIGPtimo++; /* Account for timeouts */
539 return KERN_FAILURE; /* Timed out, take your ball and go home... */
540 }
541
542 holdStat = MPsigpBusy | MPsigpPass | (MPsigpSigp << 8) | mpproc->cpu_number; /* Set up the signal status word */
543 tpproc->MPsigpParm0 = signal; /* Set message order */
544 tpproc->MPsigpParm1 = p1; /* Set additional parm */
545 tpproc->MPsigpParm2 = p2; /* Set additional parm */
546
547 __asm__ volatile("sync"); /* Make sure it's all there */
548
549 tpproc->MPsigpStat = holdStat; /* Set status and pass the lock */
550 __asm__ volatile("eieio"); /* I'm a paraniod freak */
551
552 if (busybitset == 0)
553 PE_cpu_signal(mpproc->cpu_id, tpproc->cpu_id); /* Kick the other processor */
554
555 return KERN_SUCCESS; /* All is goodness and rainbows... */
556 }
557
558
559 /*
560 * Routine: cpu_signal_handler
561 * Function:
562 * Here is where we implement the receiver of the signaling protocol.
563 * We wait for the signal status area to be passed to us. Then we snarf
564 * up the status, the sender, and the 3 potential parms. Next we release
565 * the lock and signal the other guy.
566 */
567 void
568 cpu_signal_handler(
569 void)
570 {
571
572 unsigned int holdStat, holdParm0, holdParm1, holdParm2, mtype;
573 unsigned int *parmAddr;
574 struct per_proc_info *proc_info;
575 int cpu;
576 broadcastFunc xfunc;
577 cpu = cpu_number(); /* Get the CPU number */
578
579 proc_info = getPerProc();
580
581 /*
582 * Since we've been signaled, wait about 31 ms for the signal lock to pass
583 */
584 if(!hw_lock_mbits(&proc_info->MPsigpStat, (MPsigpMsgp | MPsigpAck), (MPsigpBusy | MPsigpPass),
585 (MPsigpBusy | MPsigpPass | MPsigpAck), (gPEClockFrequencyInfo.timebase_frequency_hz >> 5))) {
586 panic("cpu_signal_handler: Lock pass timed out\n");
587 }
588
589 holdStat = proc_info->MPsigpStat; /* Snarf stat word */
590 holdParm0 = proc_info->MPsigpParm0; /* Snarf parameter */
591 holdParm1 = proc_info->MPsigpParm1; /* Snarf parameter */
592 holdParm2 = proc_info->MPsigpParm2; /* Snarf parameter */
593
594 __asm__ volatile("isync"); /* Make sure we don't unlock until memory is in */
595
596 proc_info->MPsigpStat = holdStat & ~(MPsigpMsgp | MPsigpAck | MPsigpFunc); /* Release lock */
597
598 switch ((holdStat & MPsigpFunc) >> 8) { /* Decode function code */
599
600 case MPsigpIdle: /* Was function cancelled? */
601 return; /* Yup... */
602
603 case MPsigpSigp: /* Signal Processor message? */
604
605 switch (holdParm0) { /* Decode SIGP message order */
606
607 case SIGPast: /* Should we do an AST? */
608 proc_info->hwCtr.numSIGPast++; /* Count this one */
609 #if 0
610 kprintf("cpu_signal_handler: AST check on cpu %x\n", cpu_number());
611 #endif
612 ast_check((processor_t)proc_info->processor);
613 return; /* All done... */
614
615 case SIGPcpureq: /* CPU specific function? */
616
617 proc_info->hwCtr.numSIGPcpureq++; /* Count this one */
618 switch (holdParm1) { /* Select specific function */
619
620 case CPRQtimebase:
621
622 cpu_timebase_signal_handler(proc_info, (struct SIGtimebase *)holdParm2);
623 return;
624
625 case CPRQsegload:
626 return;
627
628 case CPRQchud:
629 parmAddr = (unsigned int *)holdParm2; /* Get the destination address */
630 if(perfCpuSigHook) {
631 struct savearea *ssp = current_thread()->machine.pcb;
632 if(ssp) {
633 (perfCpuSigHook)(parmAddr[1] /* request */, ssp, 0, 0);
634 }
635 }
636 parmAddr[1] = 0;
637 parmAddr[0] = 0; /* Show we're done */
638 return;
639
640 case CPRQscom:
641 if(((scomcomm *)holdParm2)->scomfunc) { /* Are we writing */
642 ((scomcomm *)holdParm2)->scomstat = ml_scom_write(((scomcomm *)holdParm2)->scomreg, ((scomcomm *)holdParm2)->scomdata); /* Write scom */
643 }
644 else { /* No, reading... */
645 ((scomcomm *)holdParm2)->scomstat = ml_scom_read(((scomcomm *)holdParm2)->scomreg, &((scomcomm *)holdParm2)->scomdata); /* Read scom */
646 }
647 return;
648
649 case CPRQsps:
650 {
651 ml_set_processor_speed_slave(holdParm2);
652 return;
653 }
654 default:
655 panic("cpu_signal_handler: unknown CPU request - %08X\n", holdParm1);
656 return;
657 }
658
659
660 case SIGPdebug: /* Enter the debugger? */
661
662 proc_info->hwCtr.numSIGPdebug++; /* Count this one */
663 proc_info->debugger_is_slave++; /* Bump up the count to show we're here */
664 hw_atomic_sub(&debugger_sync, 1); /* Show we've received the 'rupt */
665 __asm__ volatile("tw 4,r3,r3"); /* Enter the debugger */
666 return; /* All done now... */
667
668 case SIGPwake: /* Wake up CPU */
669 proc_info->hwCtr.numSIGPwake++; /* Count this one */
670 return; /* No need to do anything, the interrupt does it all... */
671
672 case SIGPcall: /* Call function on CPU */
673 proc_info->hwCtr.numSIGPcall++; /* Count this one */
674 xfunc = holdParm1; /* Do this since I can't seem to figure C out */
675 xfunc(holdParm2); /* Call the passed function */
676 return; /* Done... */
677
678 default:
679 panic("cpu_signal_handler: unknown SIGP message order - %08X\n", holdParm0);
680 return;
681
682 }
683
684 default:
685 panic("cpu_signal_handler: unknown SIGP function - %08X\n", (holdStat & MPsigpFunc) >> 8);
686 return;
687
688 }
689 panic("cpu_signal_handler: we should never get here\n");
690 }
691
692
693 /*
694 * Routine: cpu_sync_timebase
695 * Function:
696 */
697 void
698 cpu_sync_timebase(
699 void)
700 {
701 natural_t tbu, tbl;
702 boolean_t intr;
703 struct SIGtimebase syncClkSpot;
704
705 intr = ml_set_interrupts_enabled(FALSE); /* No interruptions in here */
706
707 syncClkSpot.avail = FALSE;
708 syncClkSpot.ready = FALSE;
709 syncClkSpot.done = FALSE;
710
711 while (cpu_signal(master_cpu, SIGPcpureq, CPRQtimebase,
712 (unsigned int)&syncClkSpot) != KERN_SUCCESS)
713 continue;
714
715 while (syncClkSpot.avail == FALSE)
716 continue;
717
718 isync();
719
720 /*
721 * We do the following to keep the compiler from generating extra stuff
722 * in tb set part
723 */
724 tbu = syncClkSpot.abstime >> 32;
725 tbl = (uint32_t)syncClkSpot.abstime;
726
727 mttb(0);
728 mttbu(tbu);
729 mttb(tbl);
730
731 syncClkSpot.ready = TRUE;
732
733 while (syncClkSpot.done == FALSE)
734 continue;
735
736 etimer_resync_deadlines(); /* Start the timer */
737 (void)ml_set_interrupts_enabled(intr);
738 }
739
740
741 /*
742 * Routine: cpu_timebase_signal_handler
743 * Function:
744 */
745 void
746 cpu_timebase_signal_handler(
747 struct per_proc_info *proc_info,
748 struct SIGtimebase *timebaseAddr)
749 {
750 unsigned int tbu, tbu2, tbl;
751
752 if(proc_info->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
753 proc_info->time_base_enable(proc_info->cpu_id, FALSE);
754
755 timebaseAddr->abstime = 0; /* Touch to force into cache */
756 sync();
757
758 do {
759 asm volatile(" mftbu %0" : "=r" (tbu));
760 asm volatile(" mftb %0" : "=r" (tbl));
761 asm volatile(" mftbu %0" : "=r" (tbu2));
762 } while (tbu != tbu2);
763
764 timebaseAddr->abstime = ((uint64_t)tbu << 32) | tbl;
765 sync(); /* Force order */
766
767 timebaseAddr->avail = TRUE;
768
769 while (timebaseAddr->ready == FALSE)
770 continue;
771
772 if(proc_info->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
773 proc_info->time_base_enable(proc_info->cpu_id, TRUE);
774
775 timebaseAddr->done = TRUE;
776 }
777
778
779 /*
780 * Routine: cpu_control
781 * Function:
782 */
783 kern_return_t
784 cpu_control(
785 int slot_num,
786 processor_info_t info,
787 unsigned int count)
788 {
789 struct per_proc_info *proc_info;
790 cpu_type_t tcpu_type;
791 cpu_subtype_t tcpu_subtype;
792 processor_pm_regs_t perf_regs;
793 processor_control_cmd_t cmd;
794 boolean_t oldlevel;
795 #define MMCR0_SUPPORT_MASK 0xf83f1fff
796 #define MMCR1_SUPPORT_MASK 0xffc00000
797 #define MMCR2_SUPPORT_MASK 0x80000000
798
799 proc_info = PerProcTable[slot_num].ppe_vaddr;
800 tcpu_type = proc_info->cpu_type;
801 tcpu_subtype = proc_info->cpu_subtype;
802 cmd = (processor_control_cmd_t) info;
803
804 if (count < PROCESSOR_CONTROL_CMD_COUNT)
805 return(KERN_FAILURE);
806
807 if ( tcpu_type != cmd->cmd_cpu_type ||
808 tcpu_subtype != cmd->cmd_cpu_subtype)
809 return(KERN_FAILURE);
810
811 if (perfmon_acquire_facility(current_task()) != KERN_SUCCESS) {
812 return(KERN_RESOURCE_SHORTAGE); /* cpu performance facility in use by another task */
813 }
814
815 switch (cmd->cmd_op)
816 {
817 case PROCESSOR_PM_CLR_PMC: /* Clear Performance Monitor Counters */
818 switch (tcpu_subtype)
819 {
820 case CPU_SUBTYPE_POWERPC_750:
821 case CPU_SUBTYPE_POWERPC_7400:
822 case CPU_SUBTYPE_POWERPC_7450:
823 {
824 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
825 mtpmc1(0x0);
826 mtpmc2(0x0);
827 mtpmc3(0x0);
828 mtpmc4(0x0);
829 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
830 return(KERN_SUCCESS);
831 }
832 default:
833 return(KERN_FAILURE);
834 } /* tcpu_subtype */
835 case PROCESSOR_PM_SET_REGS: /* Set Performance Monitor Registors */
836 switch (tcpu_subtype)
837 {
838 case CPU_SUBTYPE_POWERPC_750:
839 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
840 PROCESSOR_PM_REGS_COUNT_POWERPC_750))
841 return(KERN_FAILURE);
842 else
843 {
844 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
845 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
846 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
847 mtpmc1(PERFMON_PMC1(perf_regs));
848 mtpmc2(PERFMON_PMC2(perf_regs));
849 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
850 mtpmc3(PERFMON_PMC3(perf_regs));
851 mtpmc4(PERFMON_PMC4(perf_regs));
852 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
853 return(KERN_SUCCESS);
854 }
855 case CPU_SUBTYPE_POWERPC_7400:
856 case CPU_SUBTYPE_POWERPC_7450:
857 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
858 PROCESSOR_PM_REGS_COUNT_POWERPC_7400))
859 return(KERN_FAILURE);
860 else
861 {
862 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
863 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
864 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
865 mtpmc1(PERFMON_PMC1(perf_regs));
866 mtpmc2(PERFMON_PMC2(perf_regs));
867 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
868 mtpmc3(PERFMON_PMC3(perf_regs));
869 mtpmc4(PERFMON_PMC4(perf_regs));
870 mtmmcr2(PERFMON_MMCR2(perf_regs) & MMCR2_SUPPORT_MASK);
871 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
872 return(KERN_SUCCESS);
873 }
874 default:
875 return(KERN_FAILURE);
876 } /* switch tcpu_subtype */
877 case PROCESSOR_PM_SET_MMCR:
878 switch (tcpu_subtype)
879 {
880 case CPU_SUBTYPE_POWERPC_750:
881 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
882 PROCESSOR_PM_REGS_COUNT_POWERPC_750))
883 return(KERN_FAILURE);
884 else
885 {
886 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
887 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
888 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
889 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
890 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
891 return(KERN_SUCCESS);
892 }
893 case CPU_SUBTYPE_POWERPC_7400:
894 case CPU_SUBTYPE_POWERPC_7450:
895 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
896 PROCESSOR_PM_REGS_COUNT_POWERPC_7400))
897 return(KERN_FAILURE);
898 else
899 {
900 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
901 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
902 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
903 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
904 mtmmcr2(PERFMON_MMCR2(perf_regs) & MMCR2_SUPPORT_MASK);
905 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
906 return(KERN_SUCCESS);
907 }
908 default:
909 return(KERN_FAILURE);
910 } /* tcpu_subtype */
911 default:
912 return(KERN_FAILURE);
913 } /* switch cmd_op */
914 }
915
916
917 /*
918 * Routine: cpu_info_count
919 * Function:
920 */
921 kern_return_t
922 cpu_info_count(
923 processor_flavor_t flavor,
924 unsigned int *count)
925 {
926 cpu_subtype_t tcpu_subtype;
927
928 /*
929 * For now, we just assume that all CPUs are of the same type
930 */
931 tcpu_subtype = PerProcTable[master_cpu].ppe_vaddr->cpu_subtype;
932 switch (flavor) {
933 case PROCESSOR_PM_REGS_INFO:
934 switch (tcpu_subtype) {
935 case CPU_SUBTYPE_POWERPC_750:
936
937 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_750;
938 return(KERN_SUCCESS);
939
940 case CPU_SUBTYPE_POWERPC_7400:
941 case CPU_SUBTYPE_POWERPC_7450:
942
943 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_7400;
944 return(KERN_SUCCESS);
945
946 default:
947 *count = 0;
948 return(KERN_INVALID_ARGUMENT);
949 } /* switch tcpu_subtype */
950
951 case PROCESSOR_TEMPERATURE:
952 *count = PROCESSOR_TEMPERATURE_COUNT;
953 return (KERN_SUCCESS);
954
955 default:
956 *count = 0;
957 return(KERN_INVALID_ARGUMENT);
958
959 }
960 }
961
962
963 /*
964 * Routine: cpu_info
965 * Function:
966 */
967 kern_return_t
968 cpu_info(
969 processor_flavor_t flavor,
970 int slot_num,
971 processor_info_t info,
972 unsigned int *count)
973 {
974 cpu_subtype_t tcpu_subtype;
975 processor_pm_regs_t perf_regs;
976 boolean_t oldlevel;
977
978 tcpu_subtype = PerProcTable[slot_num].ppe_vaddr->cpu_subtype;
979
980 switch (flavor) {
981 case PROCESSOR_PM_REGS_INFO:
982
983 perf_regs = (processor_pm_regs_t) info;
984
985 switch (tcpu_subtype) {
986 case CPU_SUBTYPE_POWERPC_750:
987
988 if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_750)
989 return(KERN_FAILURE);
990
991 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
992 PERFMON_MMCR0(perf_regs) = mfmmcr0();
993 PERFMON_PMC1(perf_regs) = mfpmc1();
994 PERFMON_PMC2(perf_regs) = mfpmc2();
995 PERFMON_MMCR1(perf_regs) = mfmmcr1();
996 PERFMON_PMC3(perf_regs) = mfpmc3();
997 PERFMON_PMC4(perf_regs) = mfpmc4();
998 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
999
1000 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_750;
1001 return(KERN_SUCCESS);
1002
1003 case CPU_SUBTYPE_POWERPC_7400:
1004 case CPU_SUBTYPE_POWERPC_7450:
1005
1006 if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_7400)
1007 return(KERN_FAILURE);
1008
1009 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
1010 PERFMON_MMCR0(perf_regs) = mfmmcr0();
1011 PERFMON_PMC1(perf_regs) = mfpmc1();
1012 PERFMON_PMC2(perf_regs) = mfpmc2();
1013 PERFMON_MMCR1(perf_regs) = mfmmcr1();
1014 PERFMON_PMC3(perf_regs) = mfpmc3();
1015 PERFMON_PMC4(perf_regs) = mfpmc4();
1016 PERFMON_MMCR2(perf_regs) = mfmmcr2();
1017 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
1018
1019 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_7400;
1020 return(KERN_SUCCESS);
1021
1022 default:
1023 return(KERN_FAILURE);
1024 } /* switch tcpu_subtype */
1025
1026 case PROCESSOR_TEMPERATURE: /* Get the temperature of a processor */
1027
1028 *info = -1; /* Get the temperature */
1029 return(KERN_FAILURE);
1030
1031 default:
1032 return(KERN_INVALID_ARGUMENT);
1033
1034 } /* flavor */
1035 }
1036
1037
1038 /*
1039 * Routine: cpu_to_processor
1040 * Function:
1041 */
1042 processor_t
1043 cpu_to_processor(
1044 int cpu)
1045 {
1046 return ((processor_t)PerProcTable[cpu].ppe_vaddr->processor);
1047 }
1048
1049
1050 /*
1051 * Routine: slot_type
1052 * Function:
1053 */
1054 cpu_type_t
1055 slot_type(
1056 int slot_num)
1057 {
1058 return (PerProcTable[slot_num].ppe_vaddr->cpu_type);
1059 }
1060
1061
1062 /*
1063 * Routine: slot_subtype
1064 * Function:
1065 */
1066 cpu_subtype_t
1067 slot_subtype(
1068 int slot_num)
1069 {
1070 return (PerProcTable[slot_num].ppe_vaddr->cpu_subtype);
1071 }
1072
1073
1074 /*
1075 * Routine: slot_threadtype
1076 * Function:
1077 */
1078 cpu_threadtype_t
1079 slot_threadtype(
1080 int slot_num)
1081 {
1082 return (PerProcTable[slot_num].ppe_vaddr->cpu_threadtype);
1083 }
1084
1085
1086 /*
1087 * Routine: cpu_type
1088 * Function:
1089 */
1090 cpu_type_t
1091 cpu_type(void)
1092 {
1093 return (getPerProc()->cpu_type);
1094 }
1095
1096
1097 /*
1098 * Routine: cpu_subtype
1099 * Function:
1100 */
1101 cpu_subtype_t
1102 cpu_subtype(void)
1103 {
1104 return (getPerProc()->cpu_subtype);
1105 }
1106
1107
1108 /*
1109 * Routine: cpu_threadtype
1110 * Function:
1111 */
1112 cpu_threadtype_t
1113 cpu_threadtype(void)
1114 {
1115 return (getPerProc()->cpu_threadtype);
1116 }
1117
1118 /*
1119 * Call a function on all running processors
1120 *
1121 * Note that the synch paramter is used to wait until all functions are complete.
1122 * It is not passed to the other processor and must be known by the called function.
1123 * The called function must do a thread_wakeup on the synch if it decrements the
1124 * synch count to 0.
1125 */
1126
1127
1128 int32_t cpu_broadcast(uint32_t *synch, broadcastFunc func, uint32_t parm) {
1129
1130 int sigproc, cpu, ocpu;
1131
1132 cpu = cpu_number(); /* Who are we? */
1133 sigproc = 0; /* Clear called processor count */
1134
1135 if(real_ncpus > 1) { /* Are we just a uni? */
1136
1137 assert_wait((event_t)synch, THREAD_UNINT); /* If more than one processor, we may have to wait */
1138
1139 for(ocpu = 0; ocpu < real_ncpus; ocpu++) { /* Tell everyone to call */
1140 if(ocpu == cpu) continue; /* If we talk to ourselves, people will wonder... */
1141 hw_atomic_add(synch, 1); /* Tentatively bump synchronizer */
1142 sigproc++; /* Tentatively bump signal sent count */
1143 if(KERN_SUCCESS != cpu_signal(ocpu, SIGPcall, (uint32_t)func, parm)) { /* Call the function on the other processor */
1144 hw_atomic_sub(synch, 1); /* Other guy isn't really there, ignore it */
1145 sigproc--; /* and don't count it */
1146 }
1147 }
1148
1149 if(!sigproc) clear_wait(current_thread(), THREAD_AWAKENED); /* Clear wait if we never signalled */
1150 else thread_block(THREAD_CONTINUE_NULL); /* Wait for everyone to get into step... */
1151 }
1152
1153 return sigproc; /* Return the number of guys actually signalled */
1154
1155 }