]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/cpu.c
xnu-1228.3.13.tar.gz
[apple/xnu.git] / osfmk / ppc / cpu.c
1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/machine.h>
31 #include <mach/processor_info.h>
32
33 #include <kern/kalloc.h>
34 #include <kern/kern_types.h>
35 #include <kern/machine.h>
36 #include <kern/misc_protos.h>
37 #include <kern/thread.h>
38 #include <kern/sched_prim.h>
39 #include <kern/processor.h>
40 #include <kern/pms.h>
41
42 #include <vm/pmap.h>
43 #include <IOKit/IOHibernatePrivate.h>
44
45 #include <ppc/proc_reg.h>
46 #include <ppc/misc_protos.h>
47 #include <ppc/fpu_protos.h>
48 #include <ppc/machine_routines.h>
49 #include <ppc/cpu_internal.h>
50 #include <ppc/exception.h>
51 #include <ppc/asm.h>
52 #include <ppc/hw_perfmon.h>
53 #include <pexpert/pexpert.h>
54 #include <kern/cpu_data.h>
55 #include <ppc/mappings.h>
56 #include <ppc/Diagnostics.h>
57 #include <ppc/trap.h>
58 #include <ppc/machine_cpu.h>
59 #include <ppc/rtclock.h>
60
61 decl_mutex_data(static,ppt_lock);
62
63 unsigned int real_ncpus = 1;
64 unsigned int max_ncpus = MAX_CPUS;
65
66 decl_simple_lock_data(static,rht_lock);
67
68 static unsigned int rht_state = 0;
69 #define RHT_WAIT 0x01
70 #define RHT_BUSY 0x02
71
72 decl_simple_lock_data(static,SignalReadyLock);
73
74 struct SIGtimebase {
75 volatile boolean_t avail;
76 volatile boolean_t ready;
77 volatile boolean_t done;
78 uint64_t abstime;
79 };
80
81 perfCallback perfCpuSigHook; /* Pointer to CHUD cpu signal hook routine */
82
83 extern uint32_t debugger_sync;
84
85 /*
86 * Forward definitions
87 */
88
89 void cpu_sync_timebase(
90 void);
91
92 void cpu_timebase_signal_handler(
93 struct per_proc_info *proc_info,
94 struct SIGtimebase *timebaseAddr);
95
96 /*
97 * Routine: cpu_bootstrap
98 * Function:
99 */
100 void
101 cpu_bootstrap(
102 void)
103 {
104 simple_lock_init(&rht_lock,0);
105 simple_lock_init(&SignalReadyLock,0);
106 mutex_init(&ppt_lock,0);
107 }
108
109
110 /*
111 * Routine: cpu_init
112 * Function:
113 */
114 void
115 cpu_init(
116 void)
117 {
118 struct per_proc_info *proc_info;
119
120 proc_info = getPerProc();
121
122 /*
123 * Restore the TBR.
124 */
125 if (proc_info->save_tbu != 0 || proc_info->save_tbl != 0) {
126 mttb(0);
127 mttbu(proc_info->save_tbu);
128 mttb(proc_info->save_tbl);
129 }
130
131 proc_info->rtcPop = EndOfAllTime; /* forget any existing decrementer setting */
132 etimer_resync_deadlines(); /* Now that the time base is sort of correct, request the next timer pop */
133
134 proc_info->cpu_type = CPU_TYPE_POWERPC;
135 proc_info->cpu_subtype = (cpu_subtype_t)proc_info->pf.rptdProc;
136 proc_info->cpu_threadtype = CPU_THREADTYPE_NONE;
137 proc_info->running = TRUE;
138
139 }
140
141 /*
142 * Routine: cpu_machine_init
143 * Function:
144 */
145 void
146 cpu_machine_init(
147 void)
148 {
149 struct per_proc_info *proc_info;
150 volatile struct per_proc_info *mproc_info;
151
152
153 proc_info = getPerProc();
154 mproc_info = PerProcTable[master_cpu].ppe_vaddr;
155
156 if (proc_info != mproc_info) {
157 simple_lock(&rht_lock);
158 if (rht_state & RHT_WAIT)
159 thread_wakeup(&rht_state);
160 rht_state &= ~(RHT_BUSY|RHT_WAIT);
161 simple_unlock(&rht_lock);
162 }
163
164 PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone));
165
166 if (proc_info->hibernate) {
167 uint32_t tbu, tbl;
168
169 do {
170 tbu = mftbu();
171 tbl = mftb();
172 } while (mftbu() != tbu);
173
174 proc_info->hibernate = 0;
175 hibernate_machine_init();
176
177 // hibernate_machine_init() could take minutes and we don't want timeouts
178 // to fire as soon as scheduling starts. Reset timebase so it appears
179 // no time has elapsed, as it would for regular sleep.
180 mttb(0);
181 mttbu(tbu);
182 mttb(tbl);
183 }
184
185 if (proc_info != mproc_info) {
186 while (!((mproc_info->cpu_flags) & SignalReady))
187 continue;
188 cpu_sync_timebase();
189 }
190
191 ml_init_interrupt();
192 if (proc_info != mproc_info)
193 simple_lock(&SignalReadyLock);
194 proc_info->cpu_flags |= BootDone|SignalReady;
195 if (proc_info != mproc_info) {
196 if (proc_info->ppXFlags & SignalReadyWait) {
197 (void)hw_atomic_and(&proc_info->ppXFlags, ~SignalReadyWait);
198 thread_wakeup(&proc_info->cpu_flags);
199 }
200 simple_unlock(&SignalReadyLock);
201 pmsPark(); /* Timers should be cool now, park the power management stepper */
202 }
203 }
204
205
206 /*
207 * Routine: cpu_per_proc_alloc
208 * Function:
209 */
210 struct per_proc_info *
211 cpu_per_proc_alloc(
212 void)
213 {
214 struct per_proc_info *proc_info = NULL;
215 void *interrupt_stack = NULL;
216 void *debugger_stack = NULL;
217
218 if ((proc_info = (struct per_proc_info*)kalloc(sizeof(struct per_proc_info))) == (struct per_proc_info*)0)
219 return (struct per_proc_info *)NULL;
220 if ((interrupt_stack = kalloc(INTSTACK_SIZE)) == 0) {
221 kfree(proc_info, sizeof(struct per_proc_info));
222 return (struct per_proc_info *)NULL;
223 }
224
225 if ((debugger_stack = kalloc(KERNEL_STACK_SIZE)) == 0) {
226 kfree(proc_info, sizeof(struct per_proc_info));
227 kfree(interrupt_stack, INTSTACK_SIZE);
228 return (struct per_proc_info *)NULL;
229 }
230
231 bzero((void *)proc_info, sizeof(struct per_proc_info));
232
233 /* Set physical address of the second page */
234 proc_info->pp2ndPage = (addr64_t)pmap_find_phys(kernel_pmap,
235 ((addr64_t)(unsigned int)proc_info) + 0x1000)
236 << PAGE_SHIFT;
237 proc_info->next_savearea = (uint64_t)save_get_init();
238 proc_info->pf = BootProcInfo.pf;
239 proc_info->istackptr = (vm_offset_t)interrupt_stack + INTSTACK_SIZE - FM_SIZE;
240 proc_info->intstack_top_ss = proc_info->istackptr;
241 proc_info->debstackptr = (vm_offset_t)debugger_stack + KERNEL_STACK_SIZE - FM_SIZE;
242 proc_info->debstack_top_ss = proc_info->debstackptr;
243
244 return proc_info;
245
246 }
247
248
249 /*
250 * Routine: cpu_per_proc_free
251 * Function:
252 */
253 void
254 cpu_per_proc_free(
255 struct per_proc_info *proc_info
256 )
257 {
258 if (proc_info->cpu_number == master_cpu)
259 return;
260 kfree((void *)(proc_info->intstack_top_ss - INTSTACK_SIZE + FM_SIZE), INTSTACK_SIZE);
261 kfree((void *)(proc_info->debstack_top_ss - KERNEL_STACK_SIZE + FM_SIZE), KERNEL_STACK_SIZE);
262 kfree((void *)proc_info, sizeof(struct per_proc_info)); /* Release the per_proc */
263 }
264
265
266 /*
267 * Routine: cpu_per_proc_register
268 * Function:
269 */
270 kern_return_t
271 cpu_per_proc_register(
272 struct per_proc_info *proc_info
273 )
274 {
275 int cpu;
276
277 mutex_lock(&ppt_lock);
278 if (real_ncpus >= max_ncpus) {
279 mutex_unlock(&ppt_lock);
280 return KERN_FAILURE;
281 }
282 cpu = real_ncpus;
283 proc_info->cpu_number = cpu;
284 PerProcTable[cpu].ppe_vaddr = proc_info;
285 PerProcTable[cpu].ppe_paddr = (addr64_t)pmap_find_phys(kernel_pmap, (addr64_t)(unsigned int)proc_info) << PAGE_SHIFT;
286 eieio();
287 real_ncpus++;
288 mutex_unlock(&ppt_lock);
289 return KERN_SUCCESS;
290 }
291
292
293 /*
294 * Routine: cpu_start
295 * Function:
296 */
297 kern_return_t
298 cpu_start(
299 int cpu)
300 {
301 struct per_proc_info *proc_info;
302 kern_return_t ret;
303 mapping_t *mp;
304
305 proc_info = PerProcTable[cpu].ppe_vaddr;
306
307 if (cpu == cpu_number()) {
308 PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone));
309 ml_init_interrupt();
310 proc_info->cpu_flags |= BootDone|SignalReady;
311
312 return KERN_SUCCESS;
313 } else {
314 proc_info->cpu_flags &= BootDone;
315 proc_info->interrupts_enabled = 0;
316 proc_info->pending_ast = AST_NONE;
317 proc_info->istackptr = proc_info->intstack_top_ss;
318 proc_info->rtcPop = EndOfAllTime;
319 proc_info->FPU_owner = NULL;
320 proc_info->VMX_owner = NULL;
321 proc_info->pms.pmsStamp = 0; /* Dummy transition time */
322 proc_info->pms.pmsPop = EndOfAllTime; /* Set the pop way into the future */
323 proc_info->pms.pmsState = pmsParked; /* Park the stepper */
324 proc_info->pms.pmsCSetCmd = pmsCInit; /* Set dummy initial hardware state */
325 mp = (mapping_t *)(&proc_info->ppUMWmp);
326 mp->mpFlags = 0x01000000 | mpLinkage | mpPerm | 1;
327 mp->mpSpace = invalSpace;
328
329 if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
330
331 simple_lock(&rht_lock);
332 while (rht_state & RHT_BUSY) {
333 rht_state |= RHT_WAIT;
334 thread_sleep_usimple_lock((event_t)&rht_state,
335 &rht_lock, THREAD_UNINT);
336 }
337 rht_state |= RHT_BUSY;
338 simple_unlock(&rht_lock);
339
340 ml_phys_write((vm_offset_t)&ResetHandler + 0,
341 RESET_HANDLER_START);
342 ml_phys_write((vm_offset_t)&ResetHandler + 4,
343 (vm_offset_t)_start_cpu);
344 ml_phys_write((vm_offset_t)&ResetHandler + 8,
345 (vm_offset_t)&PerProcTable[cpu]);
346 }
347 /*
348 * Note: we pass the current time to the other processor here. He will load it
349 * as early as possible so that there is a chance that it is close to accurate.
350 * After the machine is up a while, we will officially resync the clocks so
351 * that all processors are the same. This is just to get close.
352 */
353
354 ml_get_timebase((unsigned long long *)&proc_info->ruptStamp);
355
356 __asm__ volatile("sync"); /* Commit to storage */
357 __asm__ volatile("isync"); /* Wait a second */
358 ret = PE_cpu_start(proc_info->cpu_id,
359 proc_info->start_paddr, (vm_offset_t)proc_info);
360
361 if (ret != KERN_SUCCESS) {
362 if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
363 simple_lock(&rht_lock);
364 if (rht_state & RHT_WAIT)
365 thread_wakeup(&rht_state);
366 rht_state &= ~(RHT_BUSY|RHT_WAIT);
367 simple_unlock(&rht_lock);
368 };
369 } else {
370 simple_lock(&SignalReadyLock);
371 if (!((*(volatile short *)&proc_info->cpu_flags) & SignalReady)) {
372 (void)hw_atomic_or(&proc_info->ppXFlags, SignalReadyWait);
373 thread_sleep_simple_lock((event_t)&proc_info->cpu_flags,
374 &SignalReadyLock, THREAD_UNINT);
375 }
376 simple_unlock(&SignalReadyLock);
377
378 }
379 return(ret);
380 }
381 }
382
383 /*
384 * Routine: cpu_exit_wait
385 * Function:
386 */
387 void
388 cpu_exit_wait(
389 int cpu)
390 {
391 struct per_proc_info *tpproc;
392
393 if ( cpu != master_cpu) {
394 tpproc = PerProcTable[cpu].ppe_vaddr;
395 while (!((*(volatile short *)&tpproc->cpu_flags) & SleepState)) {};
396 }
397 }
398
399
400 /*
401 * Routine: cpu_doshutdown
402 * Function:
403 */
404 void
405 cpu_doshutdown(
406 void)
407 {
408 enable_preemption();
409 processor_offline(current_processor());
410 }
411
412
413 /*
414 * Routine: cpu_sleep
415 * Function:
416 */
417 void
418 cpu_sleep(
419 void)
420 {
421 struct per_proc_info *proc_info;
422 unsigned int i;
423 unsigned int wait_ncpus_sleep, ncpus_sleep;
424 facility_context *fowner;
425
426 proc_info = getPerProc();
427
428 proc_info->running = FALSE;
429
430 fowner = proc_info->FPU_owner; /* Cache this */
431 if(fowner) /* If anyone owns FPU, save it */
432 fpu_save(fowner);
433 proc_info->FPU_owner = NULL; /* Set no fpu owner now */
434
435 fowner = proc_info->VMX_owner; /* Cache this */
436 if(fowner) vec_save(fowner); /* If anyone owns vectors, save it */
437 proc_info->VMX_owner = NULL; /* Set no vector owner now */
438
439 if (proc_info->cpu_number == master_cpu) {
440 proc_info->cpu_flags &= BootDone;
441 proc_info->interrupts_enabled = 0;
442 proc_info->pending_ast = AST_NONE;
443
444 if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
445 ml_phys_write((vm_offset_t)&ResetHandler + 0,
446 RESET_HANDLER_START);
447 ml_phys_write((vm_offset_t)&ResetHandler + 4,
448 (vm_offset_t)_start_cpu);
449 ml_phys_write((vm_offset_t)&ResetHandler + 8,
450 (vm_offset_t)&PerProcTable[master_cpu]);
451
452 __asm__ volatile("sync");
453 __asm__ volatile("isync");
454 }
455
456 wait_ncpus_sleep = real_ncpus-1;
457 ncpus_sleep = 0;
458 while (wait_ncpus_sleep != ncpus_sleep) {
459 ncpus_sleep = 0;
460 for(i=1; i < real_ncpus ; i++) {
461 if ((*(volatile short *)&(PerProcTable[i].ppe_vaddr->cpu_flags)) & SleepState)
462 ncpus_sleep++;
463 }
464 }
465
466 }
467
468 /*
469 * Save the TBR before stopping.
470 */
471 do {
472 proc_info->save_tbu = mftbu();
473 proc_info->save_tbl = mftb();
474 } while (mftbu() != proc_info->save_tbu);
475
476 PE_cpu_machine_quiesce(proc_info->cpu_id);
477 }
478
479
480 /*
481 * Routine: cpu_signal
482 * Function:
483 * Here is where we send a message to another processor. So far we only have two:
484 * SIGPast and SIGPdebug. SIGPast is used to preempt and kick off threads (this is
485 * currently disabled). SIGPdebug is used to enter the debugger.
486 *
487 * We set up the SIGP function to indicate that this is a simple message and set the
488 * order code (MPsigpParm0) to SIGPast or SIGPdebug). After finding the per_processor
489 * block for the target, we lock the message block. Then we set the parameter(s).
490 * Next we change the lock (also called "busy") to "passing" and finally signal
491 * the other processor. Note that we only wait about 1ms to get the message lock.
492 * If we time out, we return failure to our caller. It is their responsibility to
493 * recover.
494 */
495 kern_return_t
496 cpu_signal(
497 int target,
498 int signal,
499 unsigned int p1,
500 unsigned int p2)
501 {
502
503 unsigned int holdStat;
504 struct per_proc_info *tpproc, *mpproc;
505 int busybitset=0;
506
507 #if DEBUG
508 if(((unsigned int)target) >= MAX_CPUS) panic("cpu_signal: invalid target CPU - %08X\n", target);
509 #endif
510
511 mpproc = getPerProc(); /* Point to our block */
512 tpproc = PerProcTable[target].ppe_vaddr; /* Point to the target's block */
513 if(mpproc == tpproc) return KERN_FAILURE; /* Cannot signal ourselves */
514
515 if(!tpproc->running) return KERN_FAILURE;
516
517 if (!(tpproc->cpu_flags & SignalReady)) return KERN_FAILURE;
518
519 if((tpproc->MPsigpStat & MPsigpMsgp) == MPsigpMsgp) { /* Is there an unreceived message already pending? */
520
521 if(signal == SIGPwake) { /* SIGPwake can merge into all others... */
522 mpproc->hwCtr.numSIGPmwake++; /* Account for merged wakes */
523 return KERN_SUCCESS;
524 }
525
526 if((signal == SIGPast) && (tpproc->MPsigpParm0 == SIGPast)) { /* We can merge ASTs */
527 mpproc->hwCtr.numSIGPmast++; /* Account for merged ASTs */
528 return KERN_SUCCESS; /* Don't bother to send this one... */
529 }
530
531 if (tpproc->MPsigpParm0 == SIGPwake) {
532 if (hw_lock_mbits(&tpproc->MPsigpStat, (MPsigpMsgp | MPsigpAck),
533 (MPsigpBusy | MPsigpPass ), MPsigpBusy, 0)) {
534 busybitset = 1;
535 mpproc->hwCtr.numSIGPmwake++;
536 }
537 }
538 }
539
540 if((busybitset == 0) &&
541 (!hw_lock_mbits(&tpproc->MPsigpStat, MPsigpMsgp, 0, MPsigpBusy,
542 (gPEClockFrequencyInfo.timebase_frequency_hz >> 11)))) { /* Try to lock the message block with a .5ms timeout */
543 mpproc->hwCtr.numSIGPtimo++; /* Account for timeouts */
544 return KERN_FAILURE; /* Timed out, take your ball and go home... */
545 }
546
547 holdStat = MPsigpBusy | MPsigpPass | (MPsigpSigp << 8) | mpproc->cpu_number; /* Set up the signal status word */
548 tpproc->MPsigpParm0 = signal; /* Set message order */
549 tpproc->MPsigpParm1 = p1; /* Set additional parm */
550 tpproc->MPsigpParm2 = p2; /* Set additional parm */
551
552 __asm__ volatile("sync"); /* Make sure it's all there */
553
554 tpproc->MPsigpStat = holdStat; /* Set status and pass the lock */
555 __asm__ volatile("eieio"); /* I'm a paraniod freak */
556
557 if (busybitset == 0)
558 PE_cpu_signal(mpproc->cpu_id, tpproc->cpu_id); /* Kick the other processor */
559
560 return KERN_SUCCESS; /* All is goodness and rainbows... */
561 }
562
563
564 /*
565 * Routine: cpu_signal_handler
566 * Function:
567 * Here is where we implement the receiver of the signaling protocol.
568 * We wait for the signal status area to be passed to us. Then we snarf
569 * up the status, the sender, and the 3 potential parms. Next we release
570 * the lock and signal the other guy.
571 */
572 void
573 cpu_signal_handler(void)
574 {
575 unsigned int holdStat, holdParm0, holdParm1, holdParm2;
576 unsigned int *parmAddr;
577 struct per_proc_info *proc_info;
578 int cpu;
579 broadcastFunc xfunc;
580 cpu = cpu_number(); /* Get the CPU number */
581
582 proc_info = getPerProc();
583
584 /*
585 * Since we've been signaled, wait about 31 ms for the signal lock to pass
586 */
587 if(!hw_lock_mbits(&proc_info->MPsigpStat, (MPsigpMsgp | MPsigpAck), (MPsigpBusy | MPsigpPass),
588 (MPsigpBusy | MPsigpPass | MPsigpAck), (gPEClockFrequencyInfo.timebase_frequency_hz >> 5))) {
589 panic("cpu_signal_handler: Lock pass timed out\n");
590 }
591
592 holdStat = proc_info->MPsigpStat; /* Snarf stat word */
593 holdParm0 = proc_info->MPsigpParm0; /* Snarf parameter */
594 holdParm1 = proc_info->MPsigpParm1; /* Snarf parameter */
595 holdParm2 = proc_info->MPsigpParm2; /* Snarf parameter */
596
597 __asm__ volatile("isync"); /* Make sure we don't unlock until memory is in */
598
599 proc_info->MPsigpStat = holdStat & ~(MPsigpMsgp | MPsigpAck | MPsigpFunc); /* Release lock */
600
601 switch ((holdStat & MPsigpFunc) >> 8) { /* Decode function code */
602
603 case MPsigpIdle: /* Was function cancelled? */
604 return; /* Yup... */
605
606 case MPsigpSigp: /* Signal Processor message? */
607
608 switch (holdParm0) { /* Decode SIGP message order */
609
610 case SIGPast: /* Should we do an AST? */
611 proc_info->hwCtr.numSIGPast++; /* Count this one */
612 #if 0
613 kprintf("cpu_signal_handler: AST check on cpu %x\n", cpu_number());
614 #endif
615 ast_check((processor_t)proc_info->processor);
616 return; /* All done... */
617
618 case SIGPcpureq: /* CPU specific function? */
619
620 proc_info->hwCtr.numSIGPcpureq++; /* Count this one */
621 switch (holdParm1) { /* Select specific function */
622
623 case CPRQtimebase:
624
625 cpu_timebase_signal_handler(proc_info, (struct SIGtimebase *)holdParm2);
626 return;
627
628 case CPRQsegload:
629 return;
630
631 case CPRQchud:
632 parmAddr = (unsigned int *)holdParm2; /* Get the destination address */
633 if(perfCpuSigHook) {
634 struct savearea *ssp = current_thread()->machine.pcb;
635 if(ssp) {
636 (perfCpuSigHook)(parmAddr[1] /* request */, ssp, 0, 0);
637 }
638 }
639 parmAddr[1] = 0;
640 parmAddr[0] = 0; /* Show we're done */
641 return;
642
643 case CPRQscom:
644 if(((scomcomm *)holdParm2)->scomfunc) { /* Are we writing */
645 ((scomcomm *)holdParm2)->scomstat = ml_scom_write(((scomcomm *)holdParm2)->scomreg, ((scomcomm *)holdParm2)->scomdata); /* Write scom */
646 }
647 else { /* No, reading... */
648 ((scomcomm *)holdParm2)->scomstat = ml_scom_read(((scomcomm *)holdParm2)->scomreg, &((scomcomm *)holdParm2)->scomdata); /* Read scom */
649 }
650 return;
651
652 case CPRQsps:
653 {
654 ml_set_processor_speed_slave(holdParm2);
655 return;
656 }
657 default:
658 panic("cpu_signal_handler: unknown CPU request - %08X\n", holdParm1);
659 return;
660 }
661
662
663 case SIGPdebug: /* Enter the debugger? */
664
665 proc_info->hwCtr.numSIGPdebug++; /* Count this one */
666 proc_info->debugger_is_slave++; /* Bump up the count to show we're here */
667 (void)hw_atomic_sub(&debugger_sync, 1); /* Show we've received the 'rupt */
668 __asm__ volatile("tw 4,r3,r3"); /* Enter the debugger */
669 return; /* All done now... */
670
671 case SIGPwake: /* Wake up CPU */
672 proc_info->hwCtr.numSIGPwake++; /* Count this one */
673 return; /* No need to do anything, the interrupt does it all... */
674
675 case SIGPcall: /* Call function on CPU */
676 proc_info->hwCtr.numSIGPcall++; /* Count this one */
677 xfunc = (broadcastFunc)holdParm1; /* Do this since I can't seem to figure C out */
678 xfunc(holdParm2); /* Call the passed function */
679 return; /* Done... */
680
681 default:
682 panic("cpu_signal_handler: unknown SIGP message order - %08X\n", holdParm0);
683 return;
684
685 }
686
687 default:
688 panic("cpu_signal_handler: unknown SIGP function - %08X\n", (holdStat & MPsigpFunc) >> 8);
689 return;
690
691 }
692 panic("cpu_signal_handler: we should never get here\n");
693 }
694
695
696 /*
697 * Routine: cpu_sync_timebase
698 * Function:
699 */
700 void
701 cpu_sync_timebase(
702 void)
703 {
704 natural_t tbu, tbl;
705 boolean_t intr;
706 struct SIGtimebase syncClkSpot;
707
708 intr = ml_set_interrupts_enabled(FALSE); /* No interruptions in here */
709
710 syncClkSpot.avail = FALSE;
711 syncClkSpot.ready = FALSE;
712 syncClkSpot.done = FALSE;
713
714 while (cpu_signal(master_cpu, SIGPcpureq, CPRQtimebase,
715 (unsigned int)&syncClkSpot) != KERN_SUCCESS)
716 continue;
717
718 while (syncClkSpot.avail == FALSE)
719 continue;
720
721 isync();
722
723 /*
724 * We do the following to keep the compiler from generating extra stuff
725 * in tb set part
726 */
727 tbu = syncClkSpot.abstime >> 32;
728 tbl = (uint32_t)syncClkSpot.abstime;
729
730 mttb(0);
731 mttbu(tbu);
732 mttb(tbl);
733
734 syncClkSpot.ready = TRUE;
735
736 while (syncClkSpot.done == FALSE)
737 continue;
738
739 etimer_resync_deadlines(); /* Start the timer */
740 (void)ml_set_interrupts_enabled(intr);
741 }
742
743
744 /*
745 * Routine: cpu_timebase_signal_handler
746 * Function:
747 */
748 void
749 cpu_timebase_signal_handler(
750 struct per_proc_info *proc_info,
751 struct SIGtimebase *timebaseAddr)
752 {
753 unsigned int tbu, tbu2, tbl;
754
755 if(proc_info->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
756 proc_info->time_base_enable(proc_info->cpu_id, FALSE);
757
758 timebaseAddr->abstime = 0; /* Touch to force into cache */
759 sync();
760
761 do {
762 asm volatile(" mftbu %0" : "=r" (tbu));
763 asm volatile(" mftb %0" : "=r" (tbl));
764 asm volatile(" mftbu %0" : "=r" (tbu2));
765 } while (tbu != tbu2);
766
767 timebaseAddr->abstime = ((uint64_t)tbu << 32) | tbl;
768 sync(); /* Force order */
769
770 timebaseAddr->avail = TRUE;
771
772 while (timebaseAddr->ready == FALSE)
773 continue;
774
775 if(proc_info->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
776 proc_info->time_base_enable(proc_info->cpu_id, TRUE);
777
778 timebaseAddr->done = TRUE;
779 }
780
781
782 /*
783 * Routine: cpu_control
784 * Function:
785 */
786 kern_return_t
787 cpu_control(
788 int slot_num,
789 processor_info_t info,
790 unsigned int count)
791 {
792 struct per_proc_info *proc_info;
793 cpu_type_t tcpu_type;
794 cpu_subtype_t tcpu_subtype;
795 processor_pm_regs_t perf_regs;
796 processor_control_cmd_t cmd;
797 boolean_t oldlevel;
798 #define MMCR0_SUPPORT_MASK 0xf83f1fff
799 #define MMCR1_SUPPORT_MASK 0xffc00000
800 #define MMCR2_SUPPORT_MASK 0x80000000
801
802 proc_info = PerProcTable[slot_num].ppe_vaddr;
803 tcpu_type = proc_info->cpu_type;
804 tcpu_subtype = proc_info->cpu_subtype;
805 cmd = (processor_control_cmd_t) info;
806
807 if (count < PROCESSOR_CONTROL_CMD_COUNT)
808 return(KERN_FAILURE);
809
810 if ( tcpu_type != cmd->cmd_cpu_type ||
811 tcpu_subtype != cmd->cmd_cpu_subtype)
812 return(KERN_FAILURE);
813
814 if (perfmon_acquire_facility(current_task()) != KERN_SUCCESS) {
815 return(KERN_RESOURCE_SHORTAGE); /* cpu performance facility in use by another task */
816 }
817
818 switch (cmd->cmd_op)
819 {
820 case PROCESSOR_PM_CLR_PMC: /* Clear Performance Monitor Counters */
821 switch (tcpu_subtype)
822 {
823 case CPU_SUBTYPE_POWERPC_750:
824 case CPU_SUBTYPE_POWERPC_7400:
825 case CPU_SUBTYPE_POWERPC_7450:
826 {
827 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
828 mtpmc1(0x0);
829 mtpmc2(0x0);
830 mtpmc3(0x0);
831 mtpmc4(0x0);
832 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
833 return(KERN_SUCCESS);
834 }
835 default:
836 return(KERN_FAILURE);
837 } /* tcpu_subtype */
838 case PROCESSOR_PM_SET_REGS: /* Set Performance Monitor Registors */
839 switch (tcpu_subtype)
840 {
841 case CPU_SUBTYPE_POWERPC_750:
842 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
843 PROCESSOR_PM_REGS_COUNT_POWERPC_750))
844 return(KERN_FAILURE);
845 else
846 {
847 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
848 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
849 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
850 mtpmc1(PERFMON_PMC1(perf_regs));
851 mtpmc2(PERFMON_PMC2(perf_regs));
852 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
853 mtpmc3(PERFMON_PMC3(perf_regs));
854 mtpmc4(PERFMON_PMC4(perf_regs));
855 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
856 return(KERN_SUCCESS);
857 }
858 case CPU_SUBTYPE_POWERPC_7400:
859 case CPU_SUBTYPE_POWERPC_7450:
860 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
861 PROCESSOR_PM_REGS_COUNT_POWERPC_7400))
862 return(KERN_FAILURE);
863 else
864 {
865 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
866 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
867 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
868 mtpmc1(PERFMON_PMC1(perf_regs));
869 mtpmc2(PERFMON_PMC2(perf_regs));
870 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
871 mtpmc3(PERFMON_PMC3(perf_regs));
872 mtpmc4(PERFMON_PMC4(perf_regs));
873 mtmmcr2(PERFMON_MMCR2(perf_regs) & MMCR2_SUPPORT_MASK);
874 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
875 return(KERN_SUCCESS);
876 }
877 default:
878 return(KERN_FAILURE);
879 } /* switch tcpu_subtype */
880 case PROCESSOR_PM_SET_MMCR:
881 switch (tcpu_subtype)
882 {
883 case CPU_SUBTYPE_POWERPC_750:
884 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
885 PROCESSOR_PM_REGS_COUNT_POWERPC_750))
886 return(KERN_FAILURE);
887 else
888 {
889 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
890 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
891 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
892 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
893 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
894 return(KERN_SUCCESS);
895 }
896 case CPU_SUBTYPE_POWERPC_7400:
897 case CPU_SUBTYPE_POWERPC_7450:
898 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
899 PROCESSOR_PM_REGS_COUNT_POWERPC_7400))
900 return(KERN_FAILURE);
901 else
902 {
903 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
904 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
905 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
906 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
907 mtmmcr2(PERFMON_MMCR2(perf_regs) & MMCR2_SUPPORT_MASK);
908 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
909 return(KERN_SUCCESS);
910 }
911 default:
912 return(KERN_FAILURE);
913 } /* tcpu_subtype */
914 default:
915 return(KERN_FAILURE);
916 } /* switch cmd_op */
917 }
918
919
920 /*
921 * Routine: cpu_info_count
922 * Function:
923 */
924 kern_return_t
925 cpu_info_count(
926 processor_flavor_t flavor,
927 unsigned int *count)
928 {
929 cpu_subtype_t tcpu_subtype;
930
931 /*
932 * For now, we just assume that all CPUs are of the same type
933 */
934 tcpu_subtype = PerProcTable[master_cpu].ppe_vaddr->cpu_subtype;
935 switch (flavor) {
936 case PROCESSOR_PM_REGS_INFO:
937 switch (tcpu_subtype) {
938 case CPU_SUBTYPE_POWERPC_750:
939
940 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_750;
941 return(KERN_SUCCESS);
942
943 case CPU_SUBTYPE_POWERPC_7400:
944 case CPU_SUBTYPE_POWERPC_7450:
945
946 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_7400;
947 return(KERN_SUCCESS);
948
949 default:
950 *count = 0;
951 return(KERN_INVALID_ARGUMENT);
952 } /* switch tcpu_subtype */
953
954 case PROCESSOR_TEMPERATURE:
955 *count = PROCESSOR_TEMPERATURE_COUNT;
956 return (KERN_SUCCESS);
957
958 default:
959 *count = 0;
960 return(KERN_INVALID_ARGUMENT);
961
962 }
963 }
964
965
966 /*
967 * Routine: cpu_info
968 * Function:
969 */
970 kern_return_t
971 cpu_info(
972 processor_flavor_t flavor,
973 int slot_num,
974 processor_info_t info,
975 unsigned int *count)
976 {
977 cpu_subtype_t tcpu_subtype;
978 processor_pm_regs_t perf_regs;
979 boolean_t oldlevel;
980
981 tcpu_subtype = PerProcTable[slot_num].ppe_vaddr->cpu_subtype;
982
983 switch (flavor) {
984 case PROCESSOR_PM_REGS_INFO:
985
986 perf_regs = (processor_pm_regs_t) info;
987
988 switch (tcpu_subtype) {
989 case CPU_SUBTYPE_POWERPC_750:
990
991 if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_750)
992 return(KERN_FAILURE);
993
994 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
995 PERFMON_MMCR0(perf_regs) = mfmmcr0();
996 PERFMON_PMC1(perf_regs) = mfpmc1();
997 PERFMON_PMC2(perf_regs) = mfpmc2();
998 PERFMON_MMCR1(perf_regs) = mfmmcr1();
999 PERFMON_PMC3(perf_regs) = mfpmc3();
1000 PERFMON_PMC4(perf_regs) = mfpmc4();
1001 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
1002
1003 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_750;
1004 return(KERN_SUCCESS);
1005
1006 case CPU_SUBTYPE_POWERPC_7400:
1007 case CPU_SUBTYPE_POWERPC_7450:
1008
1009 if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_7400)
1010 return(KERN_FAILURE);
1011
1012 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
1013 PERFMON_MMCR0(perf_regs) = mfmmcr0();
1014 PERFMON_PMC1(perf_regs) = mfpmc1();
1015 PERFMON_PMC2(perf_regs) = mfpmc2();
1016 PERFMON_MMCR1(perf_regs) = mfmmcr1();
1017 PERFMON_PMC3(perf_regs) = mfpmc3();
1018 PERFMON_PMC4(perf_regs) = mfpmc4();
1019 PERFMON_MMCR2(perf_regs) = mfmmcr2();
1020 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
1021
1022 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_7400;
1023 return(KERN_SUCCESS);
1024
1025 default:
1026 return(KERN_FAILURE);
1027 } /* switch tcpu_subtype */
1028
1029 case PROCESSOR_TEMPERATURE: /* Get the temperature of a processor */
1030
1031 *info = -1; /* Get the temperature */
1032 return(KERN_FAILURE);
1033
1034 default:
1035 return(KERN_INVALID_ARGUMENT);
1036
1037 } /* flavor */
1038 }
1039
1040
1041 /*
1042 * Routine: cpu_to_processor
1043 * Function:
1044 */
1045 processor_t
1046 cpu_to_processor(
1047 int cpu)
1048 {
1049 return ((processor_t)PerProcTable[cpu].ppe_vaddr->processor);
1050 }
1051
1052
1053 /*
1054 * Routine: slot_type
1055 * Function:
1056 */
1057 cpu_type_t
1058 slot_type(
1059 int slot_num)
1060 {
1061 return (PerProcTable[slot_num].ppe_vaddr->cpu_type);
1062 }
1063
1064
1065 /*
1066 * Routine: slot_subtype
1067 * Function:
1068 */
1069 cpu_subtype_t
1070 slot_subtype(
1071 int slot_num)
1072 {
1073 return (PerProcTable[slot_num].ppe_vaddr->cpu_subtype);
1074 }
1075
1076
1077 /*
1078 * Routine: slot_threadtype
1079 * Function:
1080 */
1081 cpu_threadtype_t
1082 slot_threadtype(
1083 int slot_num)
1084 {
1085 return (PerProcTable[slot_num].ppe_vaddr->cpu_threadtype);
1086 }
1087
1088
1089 /*
1090 * Routine: cpu_type
1091 * Function:
1092 */
1093 cpu_type_t
1094 cpu_type(void)
1095 {
1096 return (getPerProc()->cpu_type);
1097 }
1098
1099
1100 /*
1101 * Routine: cpu_subtype
1102 * Function:
1103 */
1104 cpu_subtype_t
1105 cpu_subtype(void)
1106 {
1107 return (getPerProc()->cpu_subtype);
1108 }
1109
1110
1111 /*
1112 * Routine: cpu_threadtype
1113 * Function:
1114 */
1115 cpu_threadtype_t
1116 cpu_threadtype(void)
1117 {
1118 return (getPerProc()->cpu_threadtype);
1119 }
1120
1121 /*
1122 * Call a function on all running processors
1123 *
1124 * Note that the synch paramter is used to wait until all functions are complete.
1125 * It is not passed to the other processor and must be known by the called function.
1126 * The called function must do a thread_wakeup on the synch if it decrements the
1127 * synch count to 0.
1128 *
1129 * We start by initializing the synchronizer to the number of possible cpus.
1130 * The we signal each popssible processor.
1131 * If the signal fails, we count it. We also skip our own.
1132 * When we are finished signaling, we adjust the syncronizer count down buy the number of failed signals.
1133 * Because the signaled processors are also decrementing the synchronizer count, the adjustment may result in a 0
1134 * If this happens, all other processors are finished with the function.
1135 * If so, we clear the wait and continue
1136 * Otherwise, we block waiting for the other processor(s) to finish.
1137 *
1138 * Meanwhile, the other processors are decrementing the synchronizer when they are done
1139 * If it goes to zero, thread_wakeup is called to run the broadcaster
1140 *
1141 * Note that because we account for the broadcaster in the synchronization count, we will not get any
1142 * premature wakeup calls.
1143 *
1144 * Also note that when we do the adjustment of the synchronization count, it the result is 0, it means that
1145 * all of the other processors are finished. Otherwise, we know that there is at least one more.
1146 * When that thread decrements the synchronizer to zero, it will do a thread_wake.
1147 *
1148 */
1149
1150 int32_t
1151 cpu_broadcast(uint32_t *synch, broadcastFunc func, uint32_t parm)
1152 {
1153 int failsig;
1154 unsigned int cpu, ocpu;
1155
1156 cpu = cpu_number(); /* Who are we? */
1157 failsig = 0; /* Clear called processor count */
1158
1159 if(real_ncpus > 1) { /* Are we just a uni? */
1160
1161 *synch = real_ncpus; /* Set how many we are going to try */
1162 assert_wait((event_t)synch, THREAD_UNINT); /* If more than one processor, we may have to wait */
1163
1164 for(ocpu = 0; ocpu < real_ncpus; ocpu++) { /* Tell everyone to call */
1165
1166 if(ocpu == cpu) continue; /* If we talk to ourselves, people will wonder... */
1167
1168 if(KERN_SUCCESS != cpu_signal(ocpu, SIGPcall, (uint32_t)func, parm)) { /* Call the function on the other processor */
1169 failsig++; /* Count failed signals */
1170 }
1171 }
1172
1173 if (hw_atomic_sub(synch, failsig + 1) == 0)
1174 clear_wait(current_thread(), THREAD_AWAKENED); /* Clear wait if we never signalled or all of the others finished */
1175 else
1176 thread_block(THREAD_CONTINUE_NULL); /* Wait for everyone to get into step... */
1177 }
1178
1179 return (real_ncpus - failsig - 1); /* Return the number of guys actually signalled... */
1180 }