]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/cpu.c
234cd8e023b8e82e22d590dd9924d4b3d2b16936
[apple/xnu.git] / osfmk / ppc / cpu.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30
31 #include <mach/mach_types.h>
32 #include <mach/machine.h>
33 #include <mach/processor_info.h>
34
35 #include <kern/kalloc.h>
36 #include <kern/kern_types.h>
37 #include <kern/machine.h>
38 #include <kern/misc_protos.h>
39 #include <kern/thread.h>
40 #include <kern/sched_prim.h>
41 #include <kern/processor.h>
42 #include <kern/pms.h>
43
44 #include <vm/pmap.h>
45 #include <IOKit/IOHibernatePrivate.h>
46
47 #include <ppc/proc_reg.h>
48 #include <ppc/misc_protos.h>
49 #include <ppc/machine_routines.h>
50 #include <ppc/cpu_internal.h>
51 #include <ppc/exception.h>
52 #include <ppc/asm.h>
53 #include <ppc/hw_perfmon.h>
54 #include <pexpert/pexpert.h>
55 #include <kern/cpu_data.h>
56 #include <ppc/mappings.h>
57 #include <ppc/Diagnostics.h>
58 #include <ppc/trap.h>
59 #include <ppc/machine_cpu.h>
60 #include <ppc/rtclock.h>
61
62 decl_mutex_data(static,ppt_lock);
63
64 unsigned int real_ncpus = 1;
65 unsigned int max_ncpus = MAX_CPUS;
66
67 decl_simple_lock_data(static,rht_lock);
68
69 static unsigned int rht_state = 0;
70 #define RHT_WAIT 0x01
71 #define RHT_BUSY 0x02
72
73 decl_simple_lock_data(static,SignalReadyLock);
74
75 struct SIGtimebase {
76 volatile boolean_t avail;
77 volatile boolean_t ready;
78 volatile boolean_t done;
79 uint64_t abstime;
80 };
81
82 perfCallback perfCpuSigHook = 0; /* Pointer to CHUD cpu signal hook routine */
83
84 extern int debugger_sync;
85
86 /*
87 * Forward definitions
88 */
89
90 void cpu_sync_timebase(
91 void);
92
93 void cpu_timebase_signal_handler(
94 struct per_proc_info *proc_info,
95 struct SIGtimebase *timebaseAddr);
96
97 /*
98 * Routine: cpu_bootstrap
99 * Function:
100 */
101 void
102 cpu_bootstrap(
103 void)
104 {
105 simple_lock_init(&rht_lock,0);
106 simple_lock_init(&SignalReadyLock,0);
107 mutex_init(&ppt_lock,0);
108 }
109
110
111 /*
112 * Routine: cpu_init
113 * Function:
114 */
115 void
116 cpu_init(
117 void)
118 {
119 struct per_proc_info *proc_info;
120
121 proc_info = getPerProc();
122
123 /*
124 * Restore the TBR.
125 */
126 if (proc_info->save_tbu != 0 || proc_info->save_tbl != 0) {
127 mttb(0);
128 mttbu(proc_info->save_tbu);
129 mttb(proc_info->save_tbl);
130 }
131
132 proc_info->rtcPop = EndOfAllTime; /* forget any existing decrementer setting */
133 etimer_resync_deadlines(); /* Now that the time base is sort of correct, request the next timer pop */
134
135 proc_info->cpu_type = CPU_TYPE_POWERPC;
136 proc_info->cpu_subtype = (cpu_subtype_t)proc_info->pf.rptdProc;
137 proc_info->cpu_threadtype = CPU_THREADTYPE_NONE;
138 proc_info->running = TRUE;
139
140 }
141
142 /*
143 * Routine: cpu_machine_init
144 * Function:
145 */
146 void
147 cpu_machine_init(
148 void)
149 {
150 struct per_proc_info *proc_info;
151 volatile struct per_proc_info *mproc_info;
152
153
154 proc_info = getPerProc();
155 mproc_info = PerProcTable[master_cpu].ppe_vaddr;
156
157 if (proc_info != mproc_info) {
158 simple_lock(&rht_lock);
159 if (rht_state & RHT_WAIT)
160 thread_wakeup(&rht_state);
161 rht_state &= ~(RHT_BUSY|RHT_WAIT);
162 simple_unlock(&rht_lock);
163 }
164
165 PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone));
166
167 if (proc_info->hibernate) {
168 uint32_t tbu, tbl;
169
170 do {
171 tbu = mftbu();
172 tbl = mftb();
173 } while (mftbu() != tbu);
174
175 proc_info->hibernate = 0;
176 hibernate_machine_init();
177
178 // hibernate_machine_init() could take minutes and we don't want timeouts
179 // to fire as soon as scheduling starts. Reset timebase so it appears
180 // no time has elapsed, as it would for regular sleep.
181 mttb(0);
182 mttbu(tbu);
183 mttb(tbl);
184 }
185
186 if (proc_info != mproc_info) {
187 while (!((mproc_info->cpu_flags) & SignalReady))
188 continue;
189 cpu_sync_timebase();
190 }
191
192 ml_init_interrupt();
193 if (proc_info != mproc_info)
194 simple_lock(&SignalReadyLock);
195 proc_info->cpu_flags |= BootDone|SignalReady;
196 if (proc_info != mproc_info) {
197 if (proc_info->ppXFlags & SignalReadyWait) {
198 hw_atomic_and(&proc_info->ppXFlags, ~SignalReadyWait);
199 thread_wakeup(&proc_info->cpu_flags);
200 }
201 simple_unlock(&SignalReadyLock);
202 pmsPark(); /* Timers should be cool now, park the power management stepper */
203 }
204 }
205
206
207 /*
208 * Routine: cpu_per_proc_alloc
209 * Function:
210 */
211 struct per_proc_info *
212 cpu_per_proc_alloc(
213 void)
214 {
215 struct per_proc_info *proc_info=0;
216 void *interrupt_stack=0;
217 void *debugger_stack=0;
218
219 if ((proc_info = (struct per_proc_info*)kalloc(sizeof(struct per_proc_info))) == (struct per_proc_info*)0)
220 return (struct per_proc_info *)NULL;
221 if ((interrupt_stack = kalloc(INTSTACK_SIZE)) == 0) {
222 kfree(proc_info, sizeof(struct per_proc_info));
223 return (struct per_proc_info *)NULL;
224 }
225
226 if ((debugger_stack = kalloc(KERNEL_STACK_SIZE)) == 0) {
227 kfree(proc_info, sizeof(struct per_proc_info));
228 kfree(interrupt_stack, INTSTACK_SIZE);
229 return (struct per_proc_info *)NULL;
230 }
231
232 bzero((void *)proc_info, sizeof(struct per_proc_info));
233
234 proc_info->pp2ndPage = (addr64_t)pmap_find_phys(kernel_pmap, (addr64_t)proc_info + 0x1000) << PAGE_SHIFT; /* Set physical address of the second page */
235 proc_info->next_savearea = (uint64_t)save_get_init();
236 proc_info->pf = BootProcInfo.pf;
237 proc_info->istackptr = (vm_offset_t)interrupt_stack + INTSTACK_SIZE - FM_SIZE;
238 proc_info->intstack_top_ss = proc_info->istackptr;
239 proc_info->debstackptr = (vm_offset_t)debugger_stack + KERNEL_STACK_SIZE - FM_SIZE;
240 proc_info->debstack_top_ss = proc_info->debstackptr;
241
242 return proc_info;
243
244 }
245
246
247 /*
248 * Routine: cpu_per_proc_free
249 * Function:
250 */
251 void
252 cpu_per_proc_free(
253 struct per_proc_info *proc_info
254 )
255 {
256 if (proc_info->cpu_number == master_cpu)
257 return;
258 kfree((void *)(proc_info->intstack_top_ss - INTSTACK_SIZE + FM_SIZE), INTSTACK_SIZE);
259 kfree((void *)(proc_info->debstack_top_ss - KERNEL_STACK_SIZE + FM_SIZE), KERNEL_STACK_SIZE);
260 kfree((void *)proc_info, sizeof(struct per_proc_info)); /* Release the per_proc */
261 }
262
263
264 /*
265 * Routine: cpu_per_proc_register
266 * Function:
267 */
268 kern_return_t
269 cpu_per_proc_register(
270 struct per_proc_info *proc_info
271 )
272 {
273 int cpu;
274
275 mutex_lock(&ppt_lock);
276 if (real_ncpus >= max_ncpus) {
277 mutex_unlock(&ppt_lock);
278 return KERN_FAILURE;
279 }
280 cpu = real_ncpus;
281 proc_info->cpu_number = cpu;
282 PerProcTable[cpu].ppe_vaddr = proc_info;
283 PerProcTable[cpu].ppe_paddr = (addr64_t)pmap_find_phys(kernel_pmap, (addr64_t)proc_info) << PAGE_SHIFT;
284 eieio();
285 real_ncpus++;
286 mutex_unlock(&ppt_lock);
287 return KERN_SUCCESS;
288 }
289
290
291 /*
292 * Routine: cpu_start
293 * Function:
294 */
295 kern_return_t
296 cpu_start(
297 int cpu)
298 {
299 struct per_proc_info *proc_info;
300 kern_return_t ret;
301 mapping_t *mp;
302
303 proc_info = PerProcTable[cpu].ppe_vaddr;
304
305 if (cpu == cpu_number()) {
306 PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone));
307 ml_init_interrupt();
308 proc_info->cpu_flags |= BootDone|SignalReady;
309
310 return KERN_SUCCESS;
311 } else {
312 proc_info->cpu_flags &= BootDone;
313 proc_info->interrupts_enabled = 0;
314 proc_info->pending_ast = AST_NONE;
315 proc_info->istackptr = proc_info->intstack_top_ss;
316 proc_info->rtcPop = EndOfAllTime;
317 proc_info->FPU_owner = 0;
318 proc_info->VMX_owner = 0;
319 proc_info->pms.pmsStamp = 0; /* Dummy transition time */
320 proc_info->pms.pmsPop = EndOfAllTime; /* Set the pop way into the future */
321 proc_info->pms.pmsState = pmsParked; /* Park the stepper */
322 proc_info->pms.pmsCSetCmd = pmsCInit; /* Set dummy initial hardware state */
323 mp = (mapping_t *)(&proc_info->ppUMWmp);
324 mp->mpFlags = 0x01000000 | mpLinkage | mpPerm | 1;
325 mp->mpSpace = invalSpace;
326
327 if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
328
329 simple_lock(&rht_lock);
330 while (rht_state & RHT_BUSY) {
331 rht_state |= RHT_WAIT;
332 thread_sleep_usimple_lock((event_t)&rht_state,
333 &rht_lock, THREAD_UNINT);
334 }
335 rht_state |= RHT_BUSY;
336 simple_unlock(&rht_lock);
337
338 ml_phys_write((vm_offset_t)&ResetHandler + 0,
339 RESET_HANDLER_START);
340 ml_phys_write((vm_offset_t)&ResetHandler + 4,
341 (vm_offset_t)_start_cpu);
342 ml_phys_write((vm_offset_t)&ResetHandler + 8,
343 (vm_offset_t)&PerProcTable[cpu]);
344 }
345 /*
346 * Note: we pass the current time to the other processor here. He will load it
347 * as early as possible so that there is a chance that it is close to accurate.
348 * After the machine is up a while, we will officially resync the clocks so
349 * that all processors are the same. This is just to get close.
350 */
351
352 ml_get_timebase((unsigned long long *)&proc_info->ruptStamp);
353
354 __asm__ volatile("sync"); /* Commit to storage */
355 __asm__ volatile("isync"); /* Wait a second */
356 ret = PE_cpu_start(proc_info->cpu_id,
357 proc_info->start_paddr, (vm_offset_t)proc_info);
358
359 if (ret != KERN_SUCCESS) {
360 if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
361 simple_lock(&rht_lock);
362 if (rht_state & RHT_WAIT)
363 thread_wakeup(&rht_state);
364 rht_state &= ~(RHT_BUSY|RHT_WAIT);
365 simple_unlock(&rht_lock);
366 };
367 } else {
368 simple_lock(&SignalReadyLock);
369 if (!((*(volatile short *)&proc_info->cpu_flags) & SignalReady)) {
370 hw_atomic_or(&proc_info->ppXFlags, SignalReadyWait);
371 thread_sleep_simple_lock((event_t)&proc_info->cpu_flags,
372 &SignalReadyLock, THREAD_UNINT);
373 }
374 simple_unlock(&SignalReadyLock);
375
376 }
377 return(ret);
378 }
379 }
380
381 /*
382 * Routine: cpu_exit_wait
383 * Function:
384 */
385 void
386 cpu_exit_wait(
387 int cpu)
388 {
389 struct per_proc_info *tpproc;
390
391 if ( cpu != master_cpu) {
392 tpproc = PerProcTable[cpu].ppe_vaddr;
393 while (!((*(volatile short *)&tpproc->cpu_flags) & SleepState)) {};
394 }
395 }
396
397
398 /*
399 * Routine: cpu_doshutdown
400 * Function:
401 */
402 void
403 cpu_doshutdown(
404 void)
405 {
406 enable_preemption();
407 processor_offline(current_processor());
408 }
409
410
411 /*
412 * Routine: cpu_sleep
413 * Function:
414 */
415 void
416 cpu_sleep(
417 void)
418 {
419 struct per_proc_info *proc_info;
420 unsigned int i;
421 unsigned int wait_ncpus_sleep, ncpus_sleep;
422 facility_context *fowner;
423
424 proc_info = getPerProc();
425
426 proc_info->running = FALSE;
427
428 fowner = proc_info->FPU_owner; /* Cache this */
429 if(fowner) fpu_save(fowner); /* If anyone owns FPU, save it */
430 proc_info->FPU_owner = 0; /* Set no fpu owner now */
431
432 fowner = proc_info->VMX_owner; /* Cache this */
433 if(fowner) vec_save(fowner); /* If anyone owns vectors, save it */
434 proc_info->VMX_owner = 0; /* Set no vector owner now */
435
436 if (proc_info->cpu_number == master_cpu) {
437 proc_info->cpu_flags &= BootDone;
438 proc_info->interrupts_enabled = 0;
439 proc_info->pending_ast = AST_NONE;
440
441 if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
442 ml_phys_write((vm_offset_t)&ResetHandler + 0,
443 RESET_HANDLER_START);
444 ml_phys_write((vm_offset_t)&ResetHandler + 4,
445 (vm_offset_t)_start_cpu);
446 ml_phys_write((vm_offset_t)&ResetHandler + 8,
447 (vm_offset_t)&PerProcTable[master_cpu]);
448
449 __asm__ volatile("sync");
450 __asm__ volatile("isync");
451 }
452
453 wait_ncpus_sleep = real_ncpus-1;
454 ncpus_sleep = 0;
455 while (wait_ncpus_sleep != ncpus_sleep) {
456 ncpus_sleep = 0;
457 for(i=1; i < real_ncpus ; i++) {
458 if ((*(volatile short *)&(PerProcTable[i].ppe_vaddr->cpu_flags)) & SleepState)
459 ncpus_sleep++;
460 }
461 }
462
463 }
464
465 /*
466 * Save the TBR before stopping.
467 */
468 do {
469 proc_info->save_tbu = mftbu();
470 proc_info->save_tbl = mftb();
471 } while (mftbu() != proc_info->save_tbu);
472
473 PE_cpu_machine_quiesce(proc_info->cpu_id);
474 }
475
476
477 /*
478 * Routine: cpu_signal
479 * Function:
480 * Here is where we send a message to another processor. So far we only have two:
481 * SIGPast and SIGPdebug. SIGPast is used to preempt and kick off threads (this is
482 * currently disabled). SIGPdebug is used to enter the debugger.
483 *
484 * We set up the SIGP function to indicate that this is a simple message and set the
485 * order code (MPsigpParm0) to SIGPast or SIGPdebug). After finding the per_processor
486 * block for the target, we lock the message block. Then we set the parameter(s).
487 * Next we change the lock (also called "busy") to "passing" and finally signal
488 * the other processor. Note that we only wait about 1ms to get the message lock.
489 * If we time out, we return failure to our caller. It is their responsibility to
490 * recover.
491 */
492 kern_return_t
493 cpu_signal(
494 int target,
495 int signal,
496 unsigned int p1,
497 unsigned int p2)
498 {
499
500 unsigned int holdStat;
501 struct per_proc_info *tpproc, *mpproc;
502 int busybitset=0;
503
504 #if DEBUG
505 if(((unsigned int)target) >= MAX_CPUS) panic("cpu_signal: invalid target CPU - %08X\n", target);
506 #endif
507
508 mpproc = getPerProc(); /* Point to our block */
509 tpproc = PerProcTable[target].ppe_vaddr; /* Point to the target's block */
510 if(mpproc == tpproc) return KERN_FAILURE; /* Cannot signal ourselves */
511
512 if(!tpproc->running) return KERN_FAILURE;
513
514 if (!(tpproc->cpu_flags & SignalReady)) return KERN_FAILURE;
515
516 if((tpproc->MPsigpStat & MPsigpMsgp) == MPsigpMsgp) { /* Is there an unreceived message already pending? */
517
518 if(signal == SIGPwake) { /* SIGPwake can merge into all others... */
519 mpproc->hwCtr.numSIGPmwake++; /* Account for merged wakes */
520 return KERN_SUCCESS;
521 }
522
523 if((signal == SIGPast) && (tpproc->MPsigpParm0 == SIGPast)) { /* We can merge ASTs */
524 mpproc->hwCtr.numSIGPmast++; /* Account for merged ASTs */
525 return KERN_SUCCESS; /* Don't bother to send this one... */
526 }
527
528 if (tpproc->MPsigpParm0 == SIGPwake) {
529 if (hw_lock_mbits(&tpproc->MPsigpStat, (MPsigpMsgp | MPsigpAck),
530 (MPsigpBusy | MPsigpPass ), MPsigpBusy, 0)) {
531 busybitset = 1;
532 mpproc->hwCtr.numSIGPmwake++;
533 }
534 }
535 }
536
537 if((busybitset == 0) &&
538 (!hw_lock_mbits(&tpproc->MPsigpStat, MPsigpMsgp, 0, MPsigpBusy,
539 (gPEClockFrequencyInfo.timebase_frequency_hz >> 11)))) { /* Try to lock the message block with a .5ms timeout */
540 mpproc->hwCtr.numSIGPtimo++; /* Account for timeouts */
541 return KERN_FAILURE; /* Timed out, take your ball and go home... */
542 }
543
544 holdStat = MPsigpBusy | MPsigpPass | (MPsigpSigp << 8) | mpproc->cpu_number; /* Set up the signal status word */
545 tpproc->MPsigpParm0 = signal; /* Set message order */
546 tpproc->MPsigpParm1 = p1; /* Set additional parm */
547 tpproc->MPsigpParm2 = p2; /* Set additional parm */
548
549 __asm__ volatile("sync"); /* Make sure it's all there */
550
551 tpproc->MPsigpStat = holdStat; /* Set status and pass the lock */
552 __asm__ volatile("eieio"); /* I'm a paraniod freak */
553
554 if (busybitset == 0)
555 PE_cpu_signal(mpproc->cpu_id, tpproc->cpu_id); /* Kick the other processor */
556
557 return KERN_SUCCESS; /* All is goodness and rainbows... */
558 }
559
560
561 /*
562 * Routine: cpu_signal_handler
563 * Function:
564 * Here is where we implement the receiver of the signaling protocol.
565 * We wait for the signal status area to be passed to us. Then we snarf
566 * up the status, the sender, and the 3 potential parms. Next we release
567 * the lock and signal the other guy.
568 */
569 void
570 cpu_signal_handler(
571 void)
572 {
573
574 unsigned int holdStat, holdParm0, holdParm1, holdParm2, mtype;
575 unsigned int *parmAddr;
576 struct per_proc_info *proc_info;
577 int cpu;
578 broadcastFunc xfunc;
579 cpu = cpu_number(); /* Get the CPU number */
580
581 proc_info = getPerProc();
582
583 /*
584 * Since we've been signaled, wait about 31 ms for the signal lock to pass
585 */
586 if(!hw_lock_mbits(&proc_info->MPsigpStat, (MPsigpMsgp | MPsigpAck), (MPsigpBusy | MPsigpPass),
587 (MPsigpBusy | MPsigpPass | MPsigpAck), (gPEClockFrequencyInfo.timebase_frequency_hz >> 5))) {
588 panic("cpu_signal_handler: Lock pass timed out\n");
589 }
590
591 holdStat = proc_info->MPsigpStat; /* Snarf stat word */
592 holdParm0 = proc_info->MPsigpParm0; /* Snarf parameter */
593 holdParm1 = proc_info->MPsigpParm1; /* Snarf parameter */
594 holdParm2 = proc_info->MPsigpParm2; /* Snarf parameter */
595
596 __asm__ volatile("isync"); /* Make sure we don't unlock until memory is in */
597
598 proc_info->MPsigpStat = holdStat & ~(MPsigpMsgp | MPsigpAck | MPsigpFunc); /* Release lock */
599
600 switch ((holdStat & MPsigpFunc) >> 8) { /* Decode function code */
601
602 case MPsigpIdle: /* Was function cancelled? */
603 return; /* Yup... */
604
605 case MPsigpSigp: /* Signal Processor message? */
606
607 switch (holdParm0) { /* Decode SIGP message order */
608
609 case SIGPast: /* Should we do an AST? */
610 proc_info->hwCtr.numSIGPast++; /* Count this one */
611 #if 0
612 kprintf("cpu_signal_handler: AST check on cpu %x\n", cpu_number());
613 #endif
614 ast_check((processor_t)proc_info->processor);
615 return; /* All done... */
616
617 case SIGPcpureq: /* CPU specific function? */
618
619 proc_info->hwCtr.numSIGPcpureq++; /* Count this one */
620 switch (holdParm1) { /* Select specific function */
621
622 case CPRQtimebase:
623
624 cpu_timebase_signal_handler(proc_info, (struct SIGtimebase *)holdParm2);
625 return;
626
627 case CPRQsegload:
628 return;
629
630 case CPRQchud:
631 parmAddr = (unsigned int *)holdParm2; /* Get the destination address */
632 if(perfCpuSigHook) {
633 struct savearea *ssp = current_thread()->machine.pcb;
634 if(ssp) {
635 (perfCpuSigHook)(parmAddr[1] /* request */, ssp, 0, 0);
636 }
637 }
638 parmAddr[1] = 0;
639 parmAddr[0] = 0; /* Show we're done */
640 return;
641
642 case CPRQscom:
643 if(((scomcomm *)holdParm2)->scomfunc) { /* Are we writing */
644 ((scomcomm *)holdParm2)->scomstat = ml_scom_write(((scomcomm *)holdParm2)->scomreg, ((scomcomm *)holdParm2)->scomdata); /* Write scom */
645 }
646 else { /* No, reading... */
647 ((scomcomm *)holdParm2)->scomstat = ml_scom_read(((scomcomm *)holdParm2)->scomreg, &((scomcomm *)holdParm2)->scomdata); /* Read scom */
648 }
649 return;
650
651 case CPRQsps:
652 {
653 ml_set_processor_speed_slave(holdParm2);
654 return;
655 }
656 default:
657 panic("cpu_signal_handler: unknown CPU request - %08X\n", holdParm1);
658 return;
659 }
660
661
662 case SIGPdebug: /* Enter the debugger? */
663
664 proc_info->hwCtr.numSIGPdebug++; /* Count this one */
665 proc_info->debugger_is_slave++; /* Bump up the count to show we're here */
666 hw_atomic_sub(&debugger_sync, 1); /* Show we've received the 'rupt */
667 __asm__ volatile("tw 4,r3,r3"); /* Enter the debugger */
668 return; /* All done now... */
669
670 case SIGPwake: /* Wake up CPU */
671 proc_info->hwCtr.numSIGPwake++; /* Count this one */
672 return; /* No need to do anything, the interrupt does it all... */
673
674 case SIGPcall: /* Call function on CPU */
675 proc_info->hwCtr.numSIGPcall++; /* Count this one */
676 xfunc = holdParm1; /* Do this since I can't seem to figure C out */
677 xfunc(holdParm2); /* Call the passed function */
678 return; /* Done... */
679
680 default:
681 panic("cpu_signal_handler: unknown SIGP message order - %08X\n", holdParm0);
682 return;
683
684 }
685
686 default:
687 panic("cpu_signal_handler: unknown SIGP function - %08X\n", (holdStat & MPsigpFunc) >> 8);
688 return;
689
690 }
691 panic("cpu_signal_handler: we should never get here\n");
692 }
693
694
695 /*
696 * Routine: cpu_sync_timebase
697 * Function:
698 */
699 void
700 cpu_sync_timebase(
701 void)
702 {
703 natural_t tbu, tbl;
704 boolean_t intr;
705 struct SIGtimebase syncClkSpot;
706
707 intr = ml_set_interrupts_enabled(FALSE); /* No interruptions in here */
708
709 syncClkSpot.avail = FALSE;
710 syncClkSpot.ready = FALSE;
711 syncClkSpot.done = FALSE;
712
713 while (cpu_signal(master_cpu, SIGPcpureq, CPRQtimebase,
714 (unsigned int)&syncClkSpot) != KERN_SUCCESS)
715 continue;
716
717 while (syncClkSpot.avail == FALSE)
718 continue;
719
720 isync();
721
722 /*
723 * We do the following to keep the compiler from generating extra stuff
724 * in tb set part
725 */
726 tbu = syncClkSpot.abstime >> 32;
727 tbl = (uint32_t)syncClkSpot.abstime;
728
729 mttb(0);
730 mttbu(tbu);
731 mttb(tbl);
732
733 syncClkSpot.ready = TRUE;
734
735 while (syncClkSpot.done == FALSE)
736 continue;
737
738 etimer_resync_deadlines(); /* Start the timer */
739 (void)ml_set_interrupts_enabled(intr);
740 }
741
742
743 /*
744 * Routine: cpu_timebase_signal_handler
745 * Function:
746 */
747 void
748 cpu_timebase_signal_handler(
749 struct per_proc_info *proc_info,
750 struct SIGtimebase *timebaseAddr)
751 {
752 unsigned int tbu, tbu2, tbl;
753
754 if(proc_info->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
755 proc_info->time_base_enable(proc_info->cpu_id, FALSE);
756
757 timebaseAddr->abstime = 0; /* Touch to force into cache */
758 sync();
759
760 do {
761 asm volatile(" mftbu %0" : "=r" (tbu));
762 asm volatile(" mftb %0" : "=r" (tbl));
763 asm volatile(" mftbu %0" : "=r" (tbu2));
764 } while (tbu != tbu2);
765
766 timebaseAddr->abstime = ((uint64_t)tbu << 32) | tbl;
767 sync(); /* Force order */
768
769 timebaseAddr->avail = TRUE;
770
771 while (timebaseAddr->ready == FALSE)
772 continue;
773
774 if(proc_info->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
775 proc_info->time_base_enable(proc_info->cpu_id, TRUE);
776
777 timebaseAddr->done = TRUE;
778 }
779
780
781 /*
782 * Routine: cpu_control
783 * Function:
784 */
785 kern_return_t
786 cpu_control(
787 int slot_num,
788 processor_info_t info,
789 unsigned int count)
790 {
791 struct per_proc_info *proc_info;
792 cpu_type_t tcpu_type;
793 cpu_subtype_t tcpu_subtype;
794 processor_pm_regs_t perf_regs;
795 processor_control_cmd_t cmd;
796 boolean_t oldlevel;
797 #define MMCR0_SUPPORT_MASK 0xf83f1fff
798 #define MMCR1_SUPPORT_MASK 0xffc00000
799 #define MMCR2_SUPPORT_MASK 0x80000000
800
801 proc_info = PerProcTable[slot_num].ppe_vaddr;
802 tcpu_type = proc_info->cpu_type;
803 tcpu_subtype = proc_info->cpu_subtype;
804 cmd = (processor_control_cmd_t) info;
805
806 if (count < PROCESSOR_CONTROL_CMD_COUNT)
807 return(KERN_FAILURE);
808
809 if ( tcpu_type != cmd->cmd_cpu_type ||
810 tcpu_subtype != cmd->cmd_cpu_subtype)
811 return(KERN_FAILURE);
812
813 if (perfmon_acquire_facility(current_task()) != KERN_SUCCESS) {
814 return(KERN_RESOURCE_SHORTAGE); /* cpu performance facility in use by another task */
815 }
816
817 switch (cmd->cmd_op)
818 {
819 case PROCESSOR_PM_CLR_PMC: /* Clear Performance Monitor Counters */
820 switch (tcpu_subtype)
821 {
822 case CPU_SUBTYPE_POWERPC_750:
823 case CPU_SUBTYPE_POWERPC_7400:
824 case CPU_SUBTYPE_POWERPC_7450:
825 {
826 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
827 mtpmc1(0x0);
828 mtpmc2(0x0);
829 mtpmc3(0x0);
830 mtpmc4(0x0);
831 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
832 return(KERN_SUCCESS);
833 }
834 default:
835 return(KERN_FAILURE);
836 } /* tcpu_subtype */
837 case PROCESSOR_PM_SET_REGS: /* Set Performance Monitor Registors */
838 switch (tcpu_subtype)
839 {
840 case CPU_SUBTYPE_POWERPC_750:
841 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
842 PROCESSOR_PM_REGS_COUNT_POWERPC_750))
843 return(KERN_FAILURE);
844 else
845 {
846 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
847 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
848 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
849 mtpmc1(PERFMON_PMC1(perf_regs));
850 mtpmc2(PERFMON_PMC2(perf_regs));
851 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
852 mtpmc3(PERFMON_PMC3(perf_regs));
853 mtpmc4(PERFMON_PMC4(perf_regs));
854 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
855 return(KERN_SUCCESS);
856 }
857 case CPU_SUBTYPE_POWERPC_7400:
858 case CPU_SUBTYPE_POWERPC_7450:
859 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
860 PROCESSOR_PM_REGS_COUNT_POWERPC_7400))
861 return(KERN_FAILURE);
862 else
863 {
864 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
865 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
866 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
867 mtpmc1(PERFMON_PMC1(perf_regs));
868 mtpmc2(PERFMON_PMC2(perf_regs));
869 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
870 mtpmc3(PERFMON_PMC3(perf_regs));
871 mtpmc4(PERFMON_PMC4(perf_regs));
872 mtmmcr2(PERFMON_MMCR2(perf_regs) & MMCR2_SUPPORT_MASK);
873 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
874 return(KERN_SUCCESS);
875 }
876 default:
877 return(KERN_FAILURE);
878 } /* switch tcpu_subtype */
879 case PROCESSOR_PM_SET_MMCR:
880 switch (tcpu_subtype)
881 {
882 case CPU_SUBTYPE_POWERPC_750:
883 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
884 PROCESSOR_PM_REGS_COUNT_POWERPC_750))
885 return(KERN_FAILURE);
886 else
887 {
888 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
889 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
890 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
891 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
892 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
893 return(KERN_SUCCESS);
894 }
895 case CPU_SUBTYPE_POWERPC_7400:
896 case CPU_SUBTYPE_POWERPC_7450:
897 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
898 PROCESSOR_PM_REGS_COUNT_POWERPC_7400))
899 return(KERN_FAILURE);
900 else
901 {
902 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
903 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
904 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
905 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
906 mtmmcr2(PERFMON_MMCR2(perf_regs) & MMCR2_SUPPORT_MASK);
907 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
908 return(KERN_SUCCESS);
909 }
910 default:
911 return(KERN_FAILURE);
912 } /* tcpu_subtype */
913 default:
914 return(KERN_FAILURE);
915 } /* switch cmd_op */
916 }
917
918
919 /*
920 * Routine: cpu_info_count
921 * Function:
922 */
923 kern_return_t
924 cpu_info_count(
925 processor_flavor_t flavor,
926 unsigned int *count)
927 {
928 cpu_subtype_t tcpu_subtype;
929
930 /*
931 * For now, we just assume that all CPUs are of the same type
932 */
933 tcpu_subtype = PerProcTable[master_cpu].ppe_vaddr->cpu_subtype;
934 switch (flavor) {
935 case PROCESSOR_PM_REGS_INFO:
936 switch (tcpu_subtype) {
937 case CPU_SUBTYPE_POWERPC_750:
938
939 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_750;
940 return(KERN_SUCCESS);
941
942 case CPU_SUBTYPE_POWERPC_7400:
943 case CPU_SUBTYPE_POWERPC_7450:
944
945 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_7400;
946 return(KERN_SUCCESS);
947
948 default:
949 *count = 0;
950 return(KERN_INVALID_ARGUMENT);
951 } /* switch tcpu_subtype */
952
953 case PROCESSOR_TEMPERATURE:
954 *count = PROCESSOR_TEMPERATURE_COUNT;
955 return (KERN_SUCCESS);
956
957 default:
958 *count = 0;
959 return(KERN_INVALID_ARGUMENT);
960
961 }
962 }
963
964
965 /*
966 * Routine: cpu_info
967 * Function:
968 */
969 kern_return_t
970 cpu_info(
971 processor_flavor_t flavor,
972 int slot_num,
973 processor_info_t info,
974 unsigned int *count)
975 {
976 cpu_subtype_t tcpu_subtype;
977 processor_pm_regs_t perf_regs;
978 boolean_t oldlevel;
979
980 tcpu_subtype = PerProcTable[slot_num].ppe_vaddr->cpu_subtype;
981
982 switch (flavor) {
983 case PROCESSOR_PM_REGS_INFO:
984
985 perf_regs = (processor_pm_regs_t) info;
986
987 switch (tcpu_subtype) {
988 case CPU_SUBTYPE_POWERPC_750:
989
990 if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_750)
991 return(KERN_FAILURE);
992
993 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
994 PERFMON_MMCR0(perf_regs) = mfmmcr0();
995 PERFMON_PMC1(perf_regs) = mfpmc1();
996 PERFMON_PMC2(perf_regs) = mfpmc2();
997 PERFMON_MMCR1(perf_regs) = mfmmcr1();
998 PERFMON_PMC3(perf_regs) = mfpmc3();
999 PERFMON_PMC4(perf_regs) = mfpmc4();
1000 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
1001
1002 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_750;
1003 return(KERN_SUCCESS);
1004
1005 case CPU_SUBTYPE_POWERPC_7400:
1006 case CPU_SUBTYPE_POWERPC_7450:
1007
1008 if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_7400)
1009 return(KERN_FAILURE);
1010
1011 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
1012 PERFMON_MMCR0(perf_regs) = mfmmcr0();
1013 PERFMON_PMC1(perf_regs) = mfpmc1();
1014 PERFMON_PMC2(perf_regs) = mfpmc2();
1015 PERFMON_MMCR1(perf_regs) = mfmmcr1();
1016 PERFMON_PMC3(perf_regs) = mfpmc3();
1017 PERFMON_PMC4(perf_regs) = mfpmc4();
1018 PERFMON_MMCR2(perf_regs) = mfmmcr2();
1019 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
1020
1021 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_7400;
1022 return(KERN_SUCCESS);
1023
1024 default:
1025 return(KERN_FAILURE);
1026 } /* switch tcpu_subtype */
1027
1028 case PROCESSOR_TEMPERATURE: /* Get the temperature of a processor */
1029
1030 *info = -1; /* Get the temperature */
1031 return(KERN_FAILURE);
1032
1033 default:
1034 return(KERN_INVALID_ARGUMENT);
1035
1036 } /* flavor */
1037 }
1038
1039
1040 /*
1041 * Routine: cpu_to_processor
1042 * Function:
1043 */
1044 processor_t
1045 cpu_to_processor(
1046 int cpu)
1047 {
1048 return ((processor_t)PerProcTable[cpu].ppe_vaddr->processor);
1049 }
1050
1051
1052 /*
1053 * Routine: slot_type
1054 * Function:
1055 */
1056 cpu_type_t
1057 slot_type(
1058 int slot_num)
1059 {
1060 return (PerProcTable[slot_num].ppe_vaddr->cpu_type);
1061 }
1062
1063
1064 /*
1065 * Routine: slot_subtype
1066 * Function:
1067 */
1068 cpu_subtype_t
1069 slot_subtype(
1070 int slot_num)
1071 {
1072 return (PerProcTable[slot_num].ppe_vaddr->cpu_subtype);
1073 }
1074
1075
1076 /*
1077 * Routine: slot_threadtype
1078 * Function:
1079 */
1080 cpu_threadtype_t
1081 slot_threadtype(
1082 int slot_num)
1083 {
1084 return (PerProcTable[slot_num].ppe_vaddr->cpu_threadtype);
1085 }
1086
1087
1088 /*
1089 * Routine: cpu_type
1090 * Function:
1091 */
1092 cpu_type_t
1093 cpu_type(void)
1094 {
1095 return (getPerProc()->cpu_type);
1096 }
1097
1098
1099 /*
1100 * Routine: cpu_subtype
1101 * Function:
1102 */
1103 cpu_subtype_t
1104 cpu_subtype(void)
1105 {
1106 return (getPerProc()->cpu_subtype);
1107 }
1108
1109
1110 /*
1111 * Routine: cpu_threadtype
1112 * Function:
1113 */
1114 cpu_threadtype_t
1115 cpu_threadtype(void)
1116 {
1117 return (getPerProc()->cpu_threadtype);
1118 }
1119
1120 /*
1121 * Call a function on all running processors
1122 *
1123 * Note that the synch paramter is used to wait until all functions are complete.
1124 * It is not passed to the other processor and must be known by the called function.
1125 * The called function must do a thread_wakeup on the synch if it decrements the
1126 * synch count to 0.
1127 */
1128
1129
1130 int32_t cpu_broadcast(uint32_t *synch, broadcastFunc func, uint32_t parm) {
1131
1132 int sigproc, cpu, ocpu;
1133
1134 cpu = cpu_number(); /* Who are we? */
1135 sigproc = 0; /* Clear called processor count */
1136
1137 if(real_ncpus > 1) { /* Are we just a uni? */
1138
1139 assert_wait((event_t)synch, THREAD_UNINT); /* If more than one processor, we may have to wait */
1140
1141 for(ocpu = 0; ocpu < real_ncpus; ocpu++) { /* Tell everyone to call */
1142 if(ocpu == cpu) continue; /* If we talk to ourselves, people will wonder... */
1143 hw_atomic_add(synch, 1); /* Tentatively bump synchronizer */
1144 sigproc++; /* Tentatively bump signal sent count */
1145 if(KERN_SUCCESS != cpu_signal(ocpu, SIGPcall, (uint32_t)func, parm)) { /* Call the function on the other processor */
1146 hw_atomic_sub(synch, 1); /* Other guy isn't really there, ignore it */
1147 sigproc--; /* and don't count it */
1148 }
1149 }
1150
1151 if(!sigproc) clear_wait(current_thread(), THREAD_AWAKENED); /* Clear wait if we never signalled */
1152 else thread_block(THREAD_CONTINUE_NULL); /* Wait for everyone to get into step... */
1153 }
1154
1155 return sigproc; /* Return the number of guys actually signalled */
1156
1157 }