]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/cpu.c
xnu-792.12.6.tar.gz
[apple/xnu.git] / osfmk / ppc / cpu.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30
31 #include <mach/mach_types.h>
32 #include <mach/machine.h>
33 #include <mach/processor_info.h>
34
35 #include <kern/kalloc.h>
36 #include <kern/kern_types.h>
37 #include <kern/machine.h>
38 #include <kern/misc_protos.h>
39 #include <kern/thread.h>
40 #include <kern/sched_prim.h>
41 #include <kern/processor.h>
42
43 #include <vm/pmap.h>
44 #include <IOKit/IOHibernatePrivate.h>
45
46 #include <ppc/proc_reg.h>
47 #include <ppc/misc_protos.h>
48 #include <ppc/machine_routines.h>
49 #include <ppc/cpu_internal.h>
50 #include <ppc/exception.h>
51 #include <ppc/asm.h>
52 #include <ppc/hw_perfmon.h>
53 #include <pexpert/pexpert.h>
54 #include <kern/cpu_data.h>
55 #include <ppc/mappings.h>
56 #include <ppc/Diagnostics.h>
57 #include <ppc/trap.h>
58 #include <ppc/machine_cpu.h>
59 #include <ppc/pms.h>
60 #include <ppc/rtclock.h>
61
62 decl_mutex_data(static,ppt_lock);
63
64 unsigned int real_ncpus = 1;
65 unsigned int max_ncpus = MAX_CPUS;
66
67 decl_simple_lock_data(static,rht_lock);
68
69 static unsigned int rht_state = 0;
70 #define RHT_WAIT 0x01
71 #define RHT_BUSY 0x02
72
73 decl_simple_lock_data(static,SignalReadyLock);
74
75 struct SIGtimebase {
76 boolean_t avail;
77 boolean_t ready;
78 boolean_t done;
79 uint64_t abstime;
80 };
81
82 perfCallback perfCpuSigHook = 0; /* Pointer to CHUD cpu signal hook routine */
83
84 extern int debugger_sync;
85
86 /*
87 * Forward definitions
88 */
89
90 void cpu_sync_timebase(
91 void);
92
93 void cpu_timebase_signal_handler(
94 struct per_proc_info *proc_info,
95 struct SIGtimebase *timebaseAddr);
96
97 /*
98 * Routine: cpu_bootstrap
99 * Function:
100 */
101 void
102 cpu_bootstrap(
103 void)
104 {
105 simple_lock_init(&rht_lock,0);
106 simple_lock_init(&SignalReadyLock,0);
107 mutex_init(&ppt_lock,0);
108 }
109
110
111 /*
112 * Routine: cpu_init
113 * Function:
114 */
115 void
116 cpu_init(
117 void)
118 {
119 struct per_proc_info *proc_info;
120
121 proc_info = getPerProc();
122
123 /*
124 * Restore the TBR.
125 */
126 if (proc_info->save_tbu != 0 || proc_info->save_tbl != 0) {
127 mttb(0);
128 mttbu(proc_info->save_tbu);
129 mttb(proc_info->save_tbl);
130 }
131
132 setTimerReq(); /* Now that the time base is sort of correct, request the next timer pop */
133
134 proc_info->cpu_type = CPU_TYPE_POWERPC;
135 proc_info->cpu_subtype = (cpu_subtype_t)proc_info->pf.rptdProc;
136 proc_info->cpu_threadtype = CPU_THREADTYPE_NONE;
137 proc_info->running = TRUE;
138
139 }
140
141 /*
142 * Routine: cpu_machine_init
143 * Function:
144 */
145 void
146 cpu_machine_init(
147 void)
148 {
149 struct per_proc_info *proc_info;
150 volatile struct per_proc_info *mproc_info;
151
152
153 proc_info = getPerProc();
154 mproc_info = PerProcTable[master_cpu].ppe_vaddr;
155
156 if (proc_info != mproc_info) {
157 simple_lock(&rht_lock);
158 if (rht_state & RHT_WAIT)
159 thread_wakeup(&rht_state);
160 rht_state &= ~(RHT_BUSY|RHT_WAIT);
161 simple_unlock(&rht_lock);
162 }
163
164 PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone));
165
166 if (proc_info->hibernate) {
167 uint32_t tbu, tbl;
168
169 do {
170 tbu = mftbu();
171 tbl = mftb();
172 } while (mftbu() != tbu);
173
174 proc_info->hibernate = 0;
175 hibernate_machine_init();
176
177 // hibernate_machine_init() could take minutes and we don't want timeouts
178 // to fire as soon as scheduling starts. Reset timebase so it appears
179 // no time has elapsed, as it would for regular sleep.
180 mttb(0);
181 mttbu(tbu);
182 mttb(tbl);
183 }
184
185 if (proc_info != mproc_info) {
186 while (!((mproc_info->cpu_flags) & SignalReady))
187 continue;
188 cpu_sync_timebase();
189 }
190
191 ml_init_interrupt();
192 if (proc_info != mproc_info)
193 simple_lock(&SignalReadyLock);
194 proc_info->cpu_flags |= BootDone|SignalReady;
195 if (proc_info != mproc_info) {
196 if (proc_info->ppXFlags & SignalReadyWait) {
197 hw_atomic_and(&proc_info->ppXFlags, ~SignalReadyWait);
198 thread_wakeup(&proc_info->cpu_flags);
199 }
200 simple_unlock(&SignalReadyLock);
201 pmsPark(); /* Timers should be cool now, park the power management stepper */
202 }
203 }
204
205
206 /*
207 * Routine: cpu_per_proc_alloc
208 * Function:
209 */
210 struct per_proc_info *
211 cpu_per_proc_alloc(
212 void)
213 {
214 struct per_proc_info *proc_info=0;
215 void *interrupt_stack=0;
216 void *debugger_stack=0;
217
218 if ((proc_info = (struct per_proc_info*)kalloc(sizeof(struct per_proc_info))) == (struct per_proc_info*)0)
219 return (struct per_proc_info *)NULL;
220 if ((interrupt_stack = kalloc(INTSTACK_SIZE)) == 0) {
221 kfree(proc_info, sizeof(struct per_proc_info));
222 return (struct per_proc_info *)NULL;
223 }
224
225 if ((debugger_stack = kalloc(KERNEL_STACK_SIZE)) == 0) {
226 kfree(proc_info, sizeof(struct per_proc_info));
227 kfree(interrupt_stack, INTSTACK_SIZE);
228 return (struct per_proc_info *)NULL;
229 }
230
231 bzero((void *)proc_info, sizeof(struct per_proc_info));
232
233 proc_info->pp2ndPage = (addr64_t)pmap_find_phys(kernel_pmap, (addr64_t)proc_info + 0x1000) << PAGE_SHIFT; /* Set physical address of the second page */
234 proc_info->next_savearea = (uint64_t)save_get_init();
235 proc_info->pf = BootProcInfo.pf;
236 proc_info->istackptr = (vm_offset_t)interrupt_stack + INTSTACK_SIZE - FM_SIZE;
237 proc_info->intstack_top_ss = proc_info->istackptr;
238 proc_info->debstackptr = (vm_offset_t)debugger_stack + KERNEL_STACK_SIZE - FM_SIZE;
239 proc_info->debstack_top_ss = proc_info->debstackptr;
240
241 return proc_info;
242
243 }
244
245
246 /*
247 * Routine: cpu_per_proc_free
248 * Function:
249 */
250 void
251 cpu_per_proc_free(
252 struct per_proc_info *proc_info
253 )
254 {
255 if (proc_info->cpu_number == master_cpu)
256 return;
257 kfree((void *)(proc_info->intstack_top_ss - INTSTACK_SIZE + FM_SIZE), INTSTACK_SIZE);
258 kfree((void *)(proc_info->debstack_top_ss - KERNEL_STACK_SIZE + FM_SIZE), KERNEL_STACK_SIZE);
259 kfree((void *)proc_info, sizeof(struct per_proc_info)); /* Release the per_proc */
260 }
261
262
263 /*
264 * Routine: cpu_per_proc_register
265 * Function:
266 */
267 kern_return_t
268 cpu_per_proc_register(
269 struct per_proc_info *proc_info
270 )
271 {
272 int cpu;
273
274 mutex_lock(&ppt_lock);
275 if (real_ncpus >= max_ncpus) {
276 mutex_unlock(&ppt_lock);
277 return KERN_FAILURE;
278 }
279 cpu = real_ncpus;
280 proc_info->cpu_number = cpu;
281 PerProcTable[cpu].ppe_vaddr = proc_info;
282 PerProcTable[cpu].ppe_paddr = (addr64_t)pmap_find_phys(kernel_pmap, (addr64_t)proc_info) << PAGE_SHIFT;
283 eieio();
284 real_ncpus++;
285 mutex_unlock(&ppt_lock);
286 return KERN_SUCCESS;
287 }
288
289
290 /*
291 * Routine: cpu_start
292 * Function:
293 */
294 kern_return_t
295 cpu_start(
296 int cpu)
297 {
298 struct per_proc_info *proc_info;
299 kern_return_t ret;
300 mapping_t *mp;
301
302 proc_info = PerProcTable[cpu].ppe_vaddr;
303
304 if (cpu == cpu_number()) {
305 PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone));
306 ml_init_interrupt();
307 proc_info->cpu_flags |= BootDone|SignalReady;
308
309 return KERN_SUCCESS;
310 } else {
311 proc_info->cpu_flags &= BootDone;
312 proc_info->interrupts_enabled = 0;
313 proc_info->pending_ast = AST_NONE;
314 proc_info->istackptr = proc_info->intstack_top_ss;
315 proc_info->rtcPop = EndOfAllTime;
316 proc_info->FPU_owner = 0;
317 proc_info->VMX_owner = 0;
318 proc_info->pms.pmsStamp = 0; /* Dummy transition time */
319 proc_info->pms.pmsPop = EndOfAllTime; /* Set the pop way into the future */
320 proc_info->pms.pmsState = pmsParked; /* Park the stepper */
321 proc_info->pms.pmsCSetCmd = pmsCInit; /* Set dummy initial hardware state */
322 mp = (mapping_t *)(&proc_info->ppUMWmp);
323 mp->mpFlags = 0x01000000 | mpLinkage | mpPerm | 1;
324 mp->mpSpace = invalSpace;
325
326 if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
327
328 simple_lock(&rht_lock);
329 while (rht_state & RHT_BUSY) {
330 rht_state |= RHT_WAIT;
331 thread_sleep_usimple_lock((event_t)&rht_state,
332 &rht_lock, THREAD_UNINT);
333 }
334 rht_state |= RHT_BUSY;
335 simple_unlock(&rht_lock);
336
337 ml_phys_write((vm_offset_t)&ResetHandler + 0,
338 RESET_HANDLER_START);
339 ml_phys_write((vm_offset_t)&ResetHandler + 4,
340 (vm_offset_t)_start_cpu);
341 ml_phys_write((vm_offset_t)&ResetHandler + 8,
342 (vm_offset_t)&PerProcTable[cpu]);
343 }
344 /*
345 * Note: we pass the current time to the other processor here. He will load it
346 * as early as possible so that there is a chance that it is close to accurate.
347 * After the machine is up a while, we will officially resync the clocks so
348 * that all processors are the same. This is just to get close.
349 */
350
351 ml_get_timebase((unsigned long long *)&proc_info->ruptStamp);
352
353 __asm__ volatile("sync"); /* Commit to storage */
354 __asm__ volatile("isync"); /* Wait a second */
355 ret = PE_cpu_start(proc_info->cpu_id,
356 proc_info->start_paddr, (vm_offset_t)proc_info);
357
358 if (ret != KERN_SUCCESS) {
359 if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
360 simple_lock(&rht_lock);
361 if (rht_state & RHT_WAIT)
362 thread_wakeup(&rht_state);
363 rht_state &= ~(RHT_BUSY|RHT_WAIT);
364 simple_unlock(&rht_lock);
365 };
366 } else {
367 simple_lock(&SignalReadyLock);
368 if (!((*(volatile short *)&proc_info->cpu_flags) & SignalReady)) {
369 hw_atomic_or(&proc_info->ppXFlags, SignalReadyWait);
370 thread_sleep_simple_lock((event_t)&proc_info->cpu_flags,
371 &SignalReadyLock, THREAD_UNINT);
372 }
373 simple_unlock(&SignalReadyLock);
374
375 }
376 return(ret);
377 }
378 }
379
380 /*
381 * Routine: cpu_exit_wait
382 * Function:
383 */
384 void
385 cpu_exit_wait(
386 int cpu)
387 {
388 struct per_proc_info *tpproc;
389
390 if ( cpu != master_cpu) {
391 tpproc = PerProcTable[cpu].ppe_vaddr;
392 while (!((*(volatile short *)&tpproc->cpu_flags) & SleepState)) {};
393 }
394 }
395
396
397 /*
398 * Routine: cpu_doshutdown
399 * Function:
400 */
401 void
402 cpu_doshutdown(
403 void)
404 {
405 enable_preemption();
406 processor_offline(current_processor());
407 }
408
409
410 /*
411 * Routine: cpu_sleep
412 * Function:
413 */
414 void
415 cpu_sleep(
416 void)
417 {
418 struct per_proc_info *proc_info;
419 unsigned int i;
420 unsigned int wait_ncpus_sleep, ncpus_sleep;
421 facility_context *fowner;
422
423 proc_info = getPerProc();
424
425 proc_info->running = FALSE;
426
427 fowner = proc_info->FPU_owner; /* Cache this */
428 if(fowner) fpu_save(fowner); /* If anyone owns FPU, save it */
429 proc_info->FPU_owner = 0; /* Set no fpu owner now */
430
431 fowner = proc_info->VMX_owner; /* Cache this */
432 if(fowner) vec_save(fowner); /* If anyone owns vectors, save it */
433 proc_info->VMX_owner = 0; /* Set no vector owner now */
434
435 if (proc_info->cpu_number == master_cpu) {
436 proc_info->cpu_flags &= BootDone;
437 proc_info->interrupts_enabled = 0;
438 proc_info->pending_ast = AST_NONE;
439
440 if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
441 ml_phys_write((vm_offset_t)&ResetHandler + 0,
442 RESET_HANDLER_START);
443 ml_phys_write((vm_offset_t)&ResetHandler + 4,
444 (vm_offset_t)_start_cpu);
445 ml_phys_write((vm_offset_t)&ResetHandler + 8,
446 (vm_offset_t)&PerProcTable[master_cpu]);
447
448 __asm__ volatile("sync");
449 __asm__ volatile("isync");
450 }
451
452 wait_ncpus_sleep = real_ncpus-1;
453 ncpus_sleep = 0;
454 while (wait_ncpus_sleep != ncpus_sleep) {
455 ncpus_sleep = 0;
456 for(i=1; i < real_ncpus ; i++) {
457 if ((*(volatile short *)&(PerProcTable[i].ppe_vaddr->cpu_flags)) & SleepState)
458 ncpus_sleep++;
459 }
460 }
461
462 }
463
464 /*
465 * Save the TBR before stopping.
466 */
467 do {
468 proc_info->save_tbu = mftbu();
469 proc_info->save_tbl = mftb();
470 } while (mftbu() != proc_info->save_tbu);
471
472 PE_cpu_machine_quiesce(proc_info->cpu_id);
473 }
474
475
476 /*
477 * Routine: cpu_signal
478 * Function:
479 * Here is where we send a message to another processor. So far we only have two:
480 * SIGPast and SIGPdebug. SIGPast is used to preempt and kick off threads (this is
481 * currently disabled). SIGPdebug is used to enter the debugger.
482 *
483 * We set up the SIGP function to indicate that this is a simple message and set the
484 * order code (MPsigpParm0) to SIGPast or SIGPdebug). After finding the per_processor
485 * block for the target, we lock the message block. Then we set the parameter(s).
486 * Next we change the lock (also called "busy") to "passing" and finally signal
487 * the other processor. Note that we only wait about 1ms to get the message lock.
488 * If we time out, we return failure to our caller. It is their responsibility to
489 * recover.
490 */
491 kern_return_t
492 cpu_signal(
493 int target,
494 int signal,
495 unsigned int p1,
496 unsigned int p2)
497 {
498
499 unsigned int holdStat;
500 struct per_proc_info *tpproc, *mpproc;
501 int busybitset=0;
502
503 #if DEBUG
504 if(((unsigned int)target) >= MAX_CPUS) panic("cpu_signal: invalid target CPU - %08X\n", target);
505 #endif
506
507 mpproc = getPerProc(); /* Point to our block */
508 tpproc = PerProcTable[target].ppe_vaddr; /* Point to the target's block */
509 if(mpproc == tpproc) return KERN_FAILURE; /* Cannot signal ourselves */
510
511 if(!tpproc->running) return KERN_FAILURE;
512
513 if (!(tpproc->cpu_flags & SignalReady)) return KERN_FAILURE;
514
515 if((tpproc->MPsigpStat & MPsigpMsgp) == MPsigpMsgp) { /* Is there an unreceived message already pending? */
516
517 if(signal == SIGPwake) { /* SIGPwake can merge into all others... */
518 mpproc->hwCtr.numSIGPmwake++; /* Account for merged wakes */
519 return KERN_SUCCESS;
520 }
521
522 if((signal == SIGPast) && (tpproc->MPsigpParm0 == SIGPast)) { /* We can merge ASTs */
523 mpproc->hwCtr.numSIGPmast++; /* Account for merged ASTs */
524 return KERN_SUCCESS; /* Don't bother to send this one... */
525 }
526
527 if (tpproc->MPsigpParm0 == SIGPwake) {
528 if (hw_lock_mbits(&tpproc->MPsigpStat, (MPsigpMsgp | MPsigpAck),
529 (MPsigpBusy | MPsigpPass ), MPsigpBusy, 0)) {
530 busybitset = 1;
531 mpproc->hwCtr.numSIGPmwake++;
532 }
533 }
534 }
535
536 if((busybitset == 0) &&
537 (!hw_lock_mbits(&tpproc->MPsigpStat, MPsigpMsgp, 0, MPsigpBusy,
538 (gPEClockFrequencyInfo.timebase_frequency_hz >> 11)))) { /* Try to lock the message block with a .5ms timeout */
539 mpproc->hwCtr.numSIGPtimo++; /* Account for timeouts */
540 return KERN_FAILURE; /* Timed out, take your ball and go home... */
541 }
542
543 holdStat = MPsigpBusy | MPsigpPass | (MPsigpSigp << 8) | mpproc->cpu_number; /* Set up the signal status word */
544 tpproc->MPsigpParm0 = signal; /* Set message order */
545 tpproc->MPsigpParm1 = p1; /* Set additional parm */
546 tpproc->MPsigpParm2 = p2; /* Set additional parm */
547
548 __asm__ volatile("sync"); /* Make sure it's all there */
549
550 tpproc->MPsigpStat = holdStat; /* Set status and pass the lock */
551 __asm__ volatile("eieio"); /* I'm a paraniod freak */
552
553 if (busybitset == 0)
554 PE_cpu_signal(mpproc->cpu_id, tpproc->cpu_id); /* Kick the other processor */
555
556 return KERN_SUCCESS; /* All is goodness and rainbows... */
557 }
558
559
560 /*
561 * Routine: cpu_signal_handler
562 * Function:
563 * Here is where we implement the receiver of the signaling protocol.
564 * We wait for the signal status area to be passed to us. Then we snarf
565 * up the status, the sender, and the 3 potential parms. Next we release
566 * the lock and signal the other guy.
567 */
568 void
569 cpu_signal_handler(
570 void)
571 {
572
573 unsigned int holdStat, holdParm0, holdParm1, holdParm2, mtype;
574 unsigned int *parmAddr;
575 struct per_proc_info *proc_info;
576 int cpu;
577 broadcastFunc xfunc;
578 cpu = cpu_number(); /* Get the CPU number */
579
580 proc_info = getPerProc();
581
582 /*
583 * Since we've been signaled, wait about 31 ms for the signal lock to pass
584 */
585 if(!hw_lock_mbits(&proc_info->MPsigpStat, (MPsigpMsgp | MPsigpAck), (MPsigpBusy | MPsigpPass),
586 (MPsigpBusy | MPsigpPass | MPsigpAck), (gPEClockFrequencyInfo.timebase_frequency_hz >> 5))) {
587 panic("cpu_signal_handler: Lock pass timed out\n");
588 }
589
590 holdStat = proc_info->MPsigpStat; /* Snarf stat word */
591 holdParm0 = proc_info->MPsigpParm0; /* Snarf parameter */
592 holdParm1 = proc_info->MPsigpParm1; /* Snarf parameter */
593 holdParm2 = proc_info->MPsigpParm2; /* Snarf parameter */
594
595 __asm__ volatile("isync"); /* Make sure we don't unlock until memory is in */
596
597 proc_info->MPsigpStat = holdStat & ~(MPsigpMsgp | MPsigpAck | MPsigpFunc); /* Release lock */
598
599 switch ((holdStat & MPsigpFunc) >> 8) { /* Decode function code */
600
601 case MPsigpIdle: /* Was function cancelled? */
602 return; /* Yup... */
603
604 case MPsigpSigp: /* Signal Processor message? */
605
606 switch (holdParm0) { /* Decode SIGP message order */
607
608 case SIGPast: /* Should we do an AST? */
609 proc_info->hwCtr.numSIGPast++; /* Count this one */
610 #if 0
611 kprintf("cpu_signal_handler: AST check on cpu %x\n", cpu_number());
612 #endif
613 ast_check((processor_t)proc_info->processor);
614 return; /* All done... */
615
616 case SIGPcpureq: /* CPU specific function? */
617
618 proc_info->hwCtr.numSIGPcpureq++; /* Count this one */
619 switch (holdParm1) { /* Select specific function */
620
621 case CPRQtimebase:
622
623 cpu_timebase_signal_handler(proc_info, (struct SIGtimebase *)holdParm2);
624 return;
625
626 case CPRQsegload:
627 return;
628
629 case CPRQchud:
630 parmAddr = (unsigned int *)holdParm2; /* Get the destination address */
631 if(perfCpuSigHook) {
632 struct savearea *ssp = current_thread()->machine.pcb;
633 if(ssp) {
634 (perfCpuSigHook)(parmAddr[1] /* request */, ssp, 0, 0);
635 }
636 }
637 parmAddr[1] = 0;
638 parmAddr[0] = 0; /* Show we're done */
639 return;
640
641 case CPRQscom:
642 if(((scomcomm *)holdParm2)->scomfunc) { /* Are we writing */
643 ((scomcomm *)holdParm2)->scomstat = ml_scom_write(((scomcomm *)holdParm2)->scomreg, ((scomcomm *)holdParm2)->scomdata); /* Write scom */
644 }
645 else { /* No, reading... */
646 ((scomcomm *)holdParm2)->scomstat = ml_scom_read(((scomcomm *)holdParm2)->scomreg, &((scomcomm *)holdParm2)->scomdata); /* Read scom */
647 }
648 return;
649
650 case CPRQsps:
651 {
652 ml_set_processor_speed_slave(holdParm2);
653 return;
654 }
655 default:
656 panic("cpu_signal_handler: unknown CPU request - %08X\n", holdParm1);
657 return;
658 }
659
660
661 case SIGPdebug: /* Enter the debugger? */
662
663 proc_info->hwCtr.numSIGPdebug++; /* Count this one */
664 proc_info->debugger_is_slave++; /* Bump up the count to show we're here */
665 hw_atomic_sub(&debugger_sync, 1); /* Show we've received the 'rupt */
666 __asm__ volatile("tw 4,r3,r3"); /* Enter the debugger */
667 return; /* All done now... */
668
669 case SIGPwake: /* Wake up CPU */
670 proc_info->hwCtr.numSIGPwake++; /* Count this one */
671 return; /* No need to do anything, the interrupt does it all... */
672
673 case SIGPcall: /* Call function on CPU */
674 proc_info->hwCtr.numSIGPcall++; /* Count this one */
675 xfunc = holdParm1; /* Do this since I can't seem to figure C out */
676 xfunc(holdParm2); /* Call the passed function */
677 return; /* Done... */
678
679 default:
680 panic("cpu_signal_handler: unknown SIGP message order - %08X\n", holdParm0);
681 return;
682
683 }
684
685 default:
686 panic("cpu_signal_handler: unknown SIGP function - %08X\n", (holdStat & MPsigpFunc) >> 8);
687 return;
688
689 }
690 panic("cpu_signal_handler: we should never get here\n");
691 }
692
693
694 /*
695 * Routine: cpu_sync_timebase
696 * Function:
697 */
698 void
699 cpu_sync_timebase(
700 void)
701 {
702 natural_t tbu, tbl;
703 boolean_t intr;
704 struct SIGtimebase syncClkSpot;
705
706 intr = ml_set_interrupts_enabled(FALSE); /* No interruptions in here */
707
708 syncClkSpot.avail = FALSE;
709 syncClkSpot.ready = FALSE;
710 syncClkSpot.done = FALSE;
711
712 while (cpu_signal(master_cpu, SIGPcpureq, CPRQtimebase,
713 (unsigned int)&syncClkSpot) != KERN_SUCCESS)
714 continue;
715
716 while (*(volatile int *)&(syncClkSpot.avail) == FALSE)
717 continue;
718
719 isync();
720
721 /*
722 * We do the following to keep the compiler from generating extra stuff
723 * in tb set part
724 */
725 tbu = syncClkSpot.abstime >> 32;
726 tbl = (uint32_t)syncClkSpot.abstime;
727
728 mttb(0);
729 mttbu(tbu);
730 mttb(tbl);
731
732 syncClkSpot.ready = TRUE;
733
734 while (*(volatile int *)&(syncClkSpot.done) == FALSE)
735 continue;
736
737 setTimerReq(); /* Start the timer */
738
739 (void)ml_set_interrupts_enabled(intr);
740 }
741
742
743 /*
744 * Routine: cpu_timebase_signal_handler
745 * Function:
746 */
747 void
748 cpu_timebase_signal_handler(
749 struct per_proc_info *proc_info,
750 struct SIGtimebase *timebaseAddr)
751 {
752 unsigned int tbu, tbu2, tbl;
753
754 if(proc_info->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
755 proc_info->time_base_enable(proc_info->cpu_id, FALSE);
756
757 timebaseAddr->abstime = 0; /* Touch to force into cache */
758 sync();
759
760 do {
761 asm volatile(" mftbu %0" : "=r" (tbu));
762 asm volatile(" mftb %0" : "=r" (tbl));
763 asm volatile(" mftbu %0" : "=r" (tbu2));
764 } while (tbu != tbu2);
765
766 timebaseAddr->abstime = ((uint64_t)tbu << 32) | tbl;
767 sync(); /* Force order */
768
769 timebaseAddr->avail = TRUE;
770
771 while (*(volatile int *)&(timebaseAddr->ready) == FALSE);
772
773 if(proc_info->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
774 proc_info->time_base_enable(proc_info->cpu_id, TRUE);
775
776 timebaseAddr->done = TRUE;
777 }
778
779
780 /*
781 * Routine: cpu_control
782 * Function:
783 */
784 kern_return_t
785 cpu_control(
786 int slot_num,
787 processor_info_t info,
788 unsigned int count)
789 {
790 struct per_proc_info *proc_info;
791 cpu_type_t tcpu_type;
792 cpu_subtype_t tcpu_subtype;
793 processor_pm_regs_t perf_regs;
794 processor_control_cmd_t cmd;
795 boolean_t oldlevel;
796 #define MMCR0_SUPPORT_MASK 0xf83f1fff
797 #define MMCR1_SUPPORT_MASK 0xffc00000
798 #define MMCR2_SUPPORT_MASK 0x80000000
799
800 proc_info = PerProcTable[slot_num].ppe_vaddr;
801 tcpu_type = proc_info->cpu_type;
802 tcpu_subtype = proc_info->cpu_subtype;
803 cmd = (processor_control_cmd_t) info;
804
805 if (count < PROCESSOR_CONTROL_CMD_COUNT)
806 return(KERN_FAILURE);
807
808 if ( tcpu_type != cmd->cmd_cpu_type ||
809 tcpu_subtype != cmd->cmd_cpu_subtype)
810 return(KERN_FAILURE);
811
812 if (perfmon_acquire_facility(current_task()) != KERN_SUCCESS) {
813 return(KERN_RESOURCE_SHORTAGE); /* cpu performance facility in use by another task */
814 }
815
816 switch (cmd->cmd_op)
817 {
818 case PROCESSOR_PM_CLR_PMC: /* Clear Performance Monitor Counters */
819 switch (tcpu_subtype)
820 {
821 case CPU_SUBTYPE_POWERPC_750:
822 case CPU_SUBTYPE_POWERPC_7400:
823 case CPU_SUBTYPE_POWERPC_7450:
824 {
825 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
826 mtpmc1(0x0);
827 mtpmc2(0x0);
828 mtpmc3(0x0);
829 mtpmc4(0x0);
830 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
831 return(KERN_SUCCESS);
832 }
833 default:
834 return(KERN_FAILURE);
835 } /* tcpu_subtype */
836 case PROCESSOR_PM_SET_REGS: /* Set Performance Monitor Registors */
837 switch (tcpu_subtype)
838 {
839 case CPU_SUBTYPE_POWERPC_750:
840 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
841 PROCESSOR_PM_REGS_COUNT_POWERPC_750))
842 return(KERN_FAILURE);
843 else
844 {
845 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
846 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
847 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
848 mtpmc1(PERFMON_PMC1(perf_regs));
849 mtpmc2(PERFMON_PMC2(perf_regs));
850 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
851 mtpmc3(PERFMON_PMC3(perf_regs));
852 mtpmc4(PERFMON_PMC4(perf_regs));
853 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
854 return(KERN_SUCCESS);
855 }
856 case CPU_SUBTYPE_POWERPC_7400:
857 case CPU_SUBTYPE_POWERPC_7450:
858 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
859 PROCESSOR_PM_REGS_COUNT_POWERPC_7400))
860 return(KERN_FAILURE);
861 else
862 {
863 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
864 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
865 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
866 mtpmc1(PERFMON_PMC1(perf_regs));
867 mtpmc2(PERFMON_PMC2(perf_regs));
868 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
869 mtpmc3(PERFMON_PMC3(perf_regs));
870 mtpmc4(PERFMON_PMC4(perf_regs));
871 mtmmcr2(PERFMON_MMCR2(perf_regs) & MMCR2_SUPPORT_MASK);
872 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
873 return(KERN_SUCCESS);
874 }
875 default:
876 return(KERN_FAILURE);
877 } /* switch tcpu_subtype */
878 case PROCESSOR_PM_SET_MMCR:
879 switch (tcpu_subtype)
880 {
881 case CPU_SUBTYPE_POWERPC_750:
882 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
883 PROCESSOR_PM_REGS_COUNT_POWERPC_750))
884 return(KERN_FAILURE);
885 else
886 {
887 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
888 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
889 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
890 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
891 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
892 return(KERN_SUCCESS);
893 }
894 case CPU_SUBTYPE_POWERPC_7400:
895 case CPU_SUBTYPE_POWERPC_7450:
896 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
897 PROCESSOR_PM_REGS_COUNT_POWERPC_7400))
898 return(KERN_FAILURE);
899 else
900 {
901 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
902 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
903 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
904 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
905 mtmmcr2(PERFMON_MMCR2(perf_regs) & MMCR2_SUPPORT_MASK);
906 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
907 return(KERN_SUCCESS);
908 }
909 default:
910 return(KERN_FAILURE);
911 } /* tcpu_subtype */
912 default:
913 return(KERN_FAILURE);
914 } /* switch cmd_op */
915 }
916
917
918 /*
919 * Routine: cpu_info_count
920 * Function:
921 */
922 kern_return_t
923 cpu_info_count(
924 processor_flavor_t flavor,
925 unsigned int *count)
926 {
927 cpu_subtype_t tcpu_subtype;
928
929 /*
930 * For now, we just assume that all CPUs are of the same type
931 */
932 tcpu_subtype = PerProcTable[master_cpu].ppe_vaddr->cpu_subtype;
933 switch (flavor) {
934 case PROCESSOR_PM_REGS_INFO:
935 switch (tcpu_subtype) {
936 case CPU_SUBTYPE_POWERPC_750:
937
938 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_750;
939 return(KERN_SUCCESS);
940
941 case CPU_SUBTYPE_POWERPC_7400:
942 case CPU_SUBTYPE_POWERPC_7450:
943
944 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_7400;
945 return(KERN_SUCCESS);
946
947 default:
948 *count = 0;
949 return(KERN_INVALID_ARGUMENT);
950 } /* switch tcpu_subtype */
951
952 case PROCESSOR_TEMPERATURE:
953 *count = PROCESSOR_TEMPERATURE_COUNT;
954 return (KERN_SUCCESS);
955
956 default:
957 *count = 0;
958 return(KERN_INVALID_ARGUMENT);
959
960 }
961 }
962
963
964 /*
965 * Routine: cpu_info
966 * Function:
967 */
968 kern_return_t
969 cpu_info(
970 processor_flavor_t flavor,
971 int slot_num,
972 processor_info_t info,
973 unsigned int *count)
974 {
975 cpu_subtype_t tcpu_subtype;
976 processor_pm_regs_t perf_regs;
977 boolean_t oldlevel;
978
979 tcpu_subtype = PerProcTable[slot_num].ppe_vaddr->cpu_subtype;
980
981 switch (flavor) {
982 case PROCESSOR_PM_REGS_INFO:
983
984 perf_regs = (processor_pm_regs_t) info;
985
986 switch (tcpu_subtype) {
987 case CPU_SUBTYPE_POWERPC_750:
988
989 if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_750)
990 return(KERN_FAILURE);
991
992 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
993 PERFMON_MMCR0(perf_regs) = mfmmcr0();
994 PERFMON_PMC1(perf_regs) = mfpmc1();
995 PERFMON_PMC2(perf_regs) = mfpmc2();
996 PERFMON_MMCR1(perf_regs) = mfmmcr1();
997 PERFMON_PMC3(perf_regs) = mfpmc3();
998 PERFMON_PMC4(perf_regs) = mfpmc4();
999 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
1000
1001 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_750;
1002 return(KERN_SUCCESS);
1003
1004 case CPU_SUBTYPE_POWERPC_7400:
1005 case CPU_SUBTYPE_POWERPC_7450:
1006
1007 if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_7400)
1008 return(KERN_FAILURE);
1009
1010 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
1011 PERFMON_MMCR0(perf_regs) = mfmmcr0();
1012 PERFMON_PMC1(perf_regs) = mfpmc1();
1013 PERFMON_PMC2(perf_regs) = mfpmc2();
1014 PERFMON_MMCR1(perf_regs) = mfmmcr1();
1015 PERFMON_PMC3(perf_regs) = mfpmc3();
1016 PERFMON_PMC4(perf_regs) = mfpmc4();
1017 PERFMON_MMCR2(perf_regs) = mfmmcr2();
1018 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
1019
1020 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_7400;
1021 return(KERN_SUCCESS);
1022
1023 default:
1024 return(KERN_FAILURE);
1025 } /* switch tcpu_subtype */
1026
1027 case PROCESSOR_TEMPERATURE: /* Get the temperature of a processor */
1028
1029 *info = -1; /* Get the temperature */
1030 return(KERN_FAILURE);
1031
1032 default:
1033 return(KERN_INVALID_ARGUMENT);
1034
1035 } /* flavor */
1036 }
1037
1038
1039 /*
1040 * Routine: cpu_to_processor
1041 * Function:
1042 */
1043 processor_t
1044 cpu_to_processor(
1045 int cpu)
1046 {
1047 return ((processor_t)PerProcTable[cpu].ppe_vaddr->processor);
1048 }
1049
1050
1051 /*
1052 * Routine: slot_type
1053 * Function:
1054 */
1055 cpu_type_t
1056 slot_type(
1057 int slot_num)
1058 {
1059 return (PerProcTable[slot_num].ppe_vaddr->cpu_type);
1060 }
1061
1062
1063 /*
1064 * Routine: slot_subtype
1065 * Function:
1066 */
1067 cpu_subtype_t
1068 slot_subtype(
1069 int slot_num)
1070 {
1071 return (PerProcTable[slot_num].ppe_vaddr->cpu_subtype);
1072 }
1073
1074
1075 /*
1076 * Routine: slot_threadtype
1077 * Function:
1078 */
1079 cpu_threadtype_t
1080 slot_threadtype(
1081 int slot_num)
1082 {
1083 return (PerProcTable[slot_num].ppe_vaddr->cpu_threadtype);
1084 }
1085
1086
1087 /*
1088 * Routine: cpu_type
1089 * Function:
1090 */
1091 cpu_type_t
1092 cpu_type(void)
1093 {
1094 return (getPerProc()->cpu_type);
1095 }
1096
1097
1098 /*
1099 * Routine: cpu_subtype
1100 * Function:
1101 */
1102 cpu_subtype_t
1103 cpu_subtype(void)
1104 {
1105 return (getPerProc()->cpu_subtype);
1106 }
1107
1108
1109 /*
1110 * Routine: cpu_threadtype
1111 * Function:
1112 */
1113 cpu_threadtype_t
1114 cpu_threadtype(void)
1115 {
1116 return (getPerProc()->cpu_threadtype);
1117 }
1118
1119 /*
1120 * Call a function on all running processors
1121 *
1122 * Note that the synch paramter is used to wait until all functions are complete.
1123 * It is not passed to the other processor and must be known by the called function.
1124 * The called function must do a thread_wakeup on the synch if it decrements the
1125 * synch count to 0.
1126 */
1127
1128
1129 int32_t cpu_broadcast(uint32_t *synch, broadcastFunc func, uint32_t parm) {
1130
1131 int sigproc, cpu, ocpu;
1132
1133 cpu = cpu_number(); /* Who are we? */
1134 sigproc = 0; /* Clear called processor count */
1135
1136 if(real_ncpus > 1) { /* Are we just a uni? */
1137
1138 assert_wait((event_t)synch, THREAD_UNINT); /* If more than one processor, we may have to wait */
1139
1140 for(ocpu = 0; ocpu < real_ncpus; ocpu++) { /* Tell everyone to call */
1141 if(ocpu == cpu) continue; /* If we talk to ourselves, people will wonder... */
1142 hw_atomic_add(synch, 1); /* Tentatively bump synchronizer */
1143 sigproc++; /* Tentatively bump signal sent count */
1144 if(KERN_SUCCESS != cpu_signal(ocpu, SIGPcall, (uint32_t)func, parm)) { /* Call the function on the other processor */
1145 hw_atomic_sub(synch, 1); /* Other guy isn't really there, ignore it */
1146 sigproc--; /* and don't count it */
1147 }
1148 }
1149
1150 if(!sigproc) clear_wait(current_thread(), THREAD_AWAKENED); /* Clear wait if we never signalled */
1151 else thread_block(THREAD_CONTINUE_NULL); /* Wait for everyone to get into step... */
1152 }
1153
1154 return sigproc; /* Return the number of guys actually signalled */
1155
1156 }