]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/cpu.c
5ad0832507cdb9edd0a684854d32130c21de6f40
[apple/xnu.git] / osfmk / ppc / cpu.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/machine.h>
31 #include <mach/processor_info.h>
32
33 #include <kern/kalloc.h>
34 #include <kern/kern_types.h>
35 #include <kern/machine.h>
36 #include <kern/misc_protos.h>
37 #include <kern/thread.h>
38 #include <kern/sched_prim.h>
39 #include <kern/processor.h>
40
41 #include <vm/pmap.h>
42 #include <IOKit/IOHibernatePrivate.h>
43
44 #include <ppc/proc_reg.h>
45 #include <ppc/misc_protos.h>
46 #include <ppc/machine_routines.h>
47 #include <ppc/cpu_internal.h>
48 #include <ppc/exception.h>
49 #include <ppc/asm.h>
50 #include <ppc/hw_perfmon.h>
51 #include <pexpert/pexpert.h>
52 #include <kern/cpu_data.h>
53 #include <ppc/mappings.h>
54 #include <ppc/Diagnostics.h>
55 #include <ppc/trap.h>
56 #include <ppc/machine_cpu.h>
57 #include <ppc/pms.h>
58 #include <ppc/rtclock.h>
59
60 decl_mutex_data(static,ppt_lock);
61
62 unsigned int real_ncpus = 1;
63 unsigned int max_ncpus = MAX_CPUS;
64
65 decl_simple_lock_data(static,rht_lock);
66
67 static unsigned int rht_state = 0;
68 #define RHT_WAIT 0x01
69 #define RHT_BUSY 0x02
70
71 decl_simple_lock_data(static,SignalReadyLock);
72
73 struct SIGtimebase {
74 boolean_t avail;
75 boolean_t ready;
76 boolean_t done;
77 uint64_t abstime;
78 };
79
80 perfCallback perfCpuSigHook = 0; /* Pointer to CHUD cpu signal hook routine */
81
82 extern int debugger_sync;
83
84 /*
85 * Forward definitions
86 */
87
88 void cpu_sync_timebase(
89 void);
90
91 void cpu_timebase_signal_handler(
92 struct per_proc_info *proc_info,
93 struct SIGtimebase *timebaseAddr);
94
95 /*
96 * Routine: cpu_bootstrap
97 * Function:
98 */
99 void
100 cpu_bootstrap(
101 void)
102 {
103 simple_lock_init(&rht_lock,0);
104 simple_lock_init(&SignalReadyLock,0);
105 mutex_init(&ppt_lock,0);
106 }
107
108
109 /*
110 * Routine: cpu_init
111 * Function:
112 */
113 void
114 cpu_init(
115 void)
116 {
117 struct per_proc_info *proc_info;
118
119 proc_info = getPerProc();
120
121 /*
122 * Restore the TBR.
123 */
124 if (proc_info->save_tbu != 0 || proc_info->save_tbl != 0) {
125 mttb(0);
126 mttbu(proc_info->save_tbu);
127 mttb(proc_info->save_tbl);
128 }
129
130 setTimerReq(); /* Now that the time base is sort of correct, request the next timer pop */
131
132 proc_info->cpu_type = CPU_TYPE_POWERPC;
133 proc_info->cpu_subtype = (cpu_subtype_t)proc_info->pf.rptdProc;
134 proc_info->cpu_threadtype = CPU_THREADTYPE_NONE;
135 proc_info->running = TRUE;
136
137 }
138
139 /*
140 * Routine: cpu_machine_init
141 * Function:
142 */
143 void
144 cpu_machine_init(
145 void)
146 {
147 struct per_proc_info *proc_info;
148 volatile struct per_proc_info *mproc_info;
149
150
151 proc_info = getPerProc();
152 mproc_info = PerProcTable[master_cpu].ppe_vaddr;
153
154 if (proc_info != mproc_info) {
155 simple_lock(&rht_lock);
156 if (rht_state & RHT_WAIT)
157 thread_wakeup(&rht_state);
158 rht_state &= ~(RHT_BUSY|RHT_WAIT);
159 simple_unlock(&rht_lock);
160 }
161
162 PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone));
163
164 if (proc_info->hibernate) {
165 uint32_t tbu, tbl;
166
167 do {
168 tbu = mftbu();
169 tbl = mftb();
170 } while (mftbu() != tbu);
171
172 proc_info->hibernate = 0;
173 hibernate_machine_init();
174
175 // hibernate_machine_init() could take minutes and we don't want timeouts
176 // to fire as soon as scheduling starts. Reset timebase so it appears
177 // no time has elapsed, as it would for regular sleep.
178 mttb(0);
179 mttbu(tbu);
180 mttb(tbl);
181 }
182
183 if (proc_info != mproc_info) {
184 while (!((mproc_info->cpu_flags) & SignalReady))
185 continue;
186 cpu_sync_timebase();
187 }
188
189 ml_init_interrupt();
190 if (proc_info != mproc_info)
191 simple_lock(&SignalReadyLock);
192 proc_info->cpu_flags |= BootDone|SignalReady;
193 if (proc_info != mproc_info) {
194 if (proc_info->ppXFlags & SignalReadyWait) {
195 hw_atomic_and(&proc_info->ppXFlags, ~SignalReadyWait);
196 thread_wakeup(&proc_info->cpu_flags);
197 }
198 simple_unlock(&SignalReadyLock);
199 pmsPark(); /* Timers should be cool now, park the power management stepper */
200 }
201 }
202
203
204 /*
205 * Routine: cpu_per_proc_alloc
206 * Function:
207 */
208 struct per_proc_info *
209 cpu_per_proc_alloc(
210 void)
211 {
212 struct per_proc_info *proc_info=0;
213 void *interrupt_stack=0;
214 void *debugger_stack=0;
215
216 if ((proc_info = (struct per_proc_info*)kalloc(sizeof(struct per_proc_info))) == (struct per_proc_info*)0)
217 return (struct per_proc_info *)NULL;
218 if ((interrupt_stack = kalloc(INTSTACK_SIZE)) == 0) {
219 kfree(proc_info, sizeof(struct per_proc_info));
220 return (struct per_proc_info *)NULL;
221 }
222
223 if ((debugger_stack = kalloc(KERNEL_STACK_SIZE)) == 0) {
224 kfree(proc_info, sizeof(struct per_proc_info));
225 kfree(interrupt_stack, INTSTACK_SIZE);
226 return (struct per_proc_info *)NULL;
227 }
228
229 bzero((void *)proc_info, sizeof(struct per_proc_info));
230
231 proc_info->pp2ndPage = (addr64_t)pmap_find_phys(kernel_pmap, (addr64_t)proc_info + 0x1000) << PAGE_SHIFT; /* Set physical address of the second page */
232 proc_info->next_savearea = (uint64_t)save_get_init();
233 proc_info->pf = BootProcInfo.pf;
234 proc_info->istackptr = (vm_offset_t)interrupt_stack + INTSTACK_SIZE - FM_SIZE;
235 proc_info->intstack_top_ss = proc_info->istackptr;
236 proc_info->debstackptr = (vm_offset_t)debugger_stack + KERNEL_STACK_SIZE - FM_SIZE;
237 proc_info->debstack_top_ss = proc_info->debstackptr;
238
239 return proc_info;
240
241 }
242
243
244 /*
245 * Routine: cpu_per_proc_free
246 * Function:
247 */
248 void
249 cpu_per_proc_free(
250 struct per_proc_info *proc_info
251 )
252 {
253 if (proc_info->cpu_number == master_cpu)
254 return;
255 kfree((void *)(proc_info->intstack_top_ss - INTSTACK_SIZE + FM_SIZE), INTSTACK_SIZE);
256 kfree((void *)(proc_info->debstack_top_ss - KERNEL_STACK_SIZE + FM_SIZE), KERNEL_STACK_SIZE);
257 kfree((void *)proc_info, sizeof(struct per_proc_info)); /* Release the per_proc */
258 }
259
260
261 /*
262 * Routine: cpu_per_proc_register
263 * Function:
264 */
265 kern_return_t
266 cpu_per_proc_register(
267 struct per_proc_info *proc_info
268 )
269 {
270 int cpu;
271
272 mutex_lock(&ppt_lock);
273 if (real_ncpus >= max_ncpus) {
274 mutex_unlock(&ppt_lock);
275 return KERN_FAILURE;
276 }
277 cpu = real_ncpus;
278 proc_info->cpu_number = cpu;
279 PerProcTable[cpu].ppe_vaddr = proc_info;
280 PerProcTable[cpu].ppe_paddr = (addr64_t)pmap_find_phys(kernel_pmap, (addr64_t)proc_info) << PAGE_SHIFT;
281 eieio();
282 real_ncpus++;
283 mutex_unlock(&ppt_lock);
284 return KERN_SUCCESS;
285 }
286
287
288 /*
289 * Routine: cpu_start
290 * Function:
291 */
292 kern_return_t
293 cpu_start(
294 int cpu)
295 {
296 struct per_proc_info *proc_info;
297 kern_return_t ret;
298 mapping_t *mp;
299
300 proc_info = PerProcTable[cpu].ppe_vaddr;
301
302 if (cpu == cpu_number()) {
303 PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone));
304 ml_init_interrupt();
305 proc_info->cpu_flags |= BootDone|SignalReady;
306
307 return KERN_SUCCESS;
308 } else {
309 proc_info->cpu_flags &= BootDone;
310 proc_info->interrupts_enabled = 0;
311 proc_info->pending_ast = AST_NONE;
312 proc_info->istackptr = proc_info->intstack_top_ss;
313 proc_info->rtcPop = EndOfAllTime;
314 proc_info->FPU_owner = 0;
315 proc_info->VMX_owner = 0;
316 proc_info->pms.pmsStamp = 0; /* Dummy transition time */
317 proc_info->pms.pmsPop = EndOfAllTime; /* Set the pop way into the future */
318 proc_info->pms.pmsState = pmsParked; /* Park the stepper */
319 proc_info->pms.pmsCSetCmd = pmsCInit; /* Set dummy initial hardware state */
320 mp = (mapping_t *)(&proc_info->ppUMWmp);
321 mp->mpFlags = 0x01000000 | mpLinkage | mpPerm | 1;
322 mp->mpSpace = invalSpace;
323
324 if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
325
326 simple_lock(&rht_lock);
327 while (rht_state & RHT_BUSY) {
328 rht_state |= RHT_WAIT;
329 thread_sleep_usimple_lock((event_t)&rht_state,
330 &rht_lock, THREAD_UNINT);
331 }
332 rht_state |= RHT_BUSY;
333 simple_unlock(&rht_lock);
334
335 ml_phys_write((vm_offset_t)&ResetHandler + 0,
336 RESET_HANDLER_START);
337 ml_phys_write((vm_offset_t)&ResetHandler + 4,
338 (vm_offset_t)_start_cpu);
339 ml_phys_write((vm_offset_t)&ResetHandler + 8,
340 (vm_offset_t)&PerProcTable[cpu]);
341 }
342 /*
343 * Note: we pass the current time to the other processor here. He will load it
344 * as early as possible so that there is a chance that it is close to accurate.
345 * After the machine is up a while, we will officially resync the clocks so
346 * that all processors are the same. This is just to get close.
347 */
348
349 ml_get_timebase((unsigned long long *)&proc_info->ruptStamp);
350
351 __asm__ volatile("sync"); /* Commit to storage */
352 __asm__ volatile("isync"); /* Wait a second */
353 ret = PE_cpu_start(proc_info->cpu_id,
354 proc_info->start_paddr, (vm_offset_t)proc_info);
355
356 if (ret != KERN_SUCCESS) {
357 if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
358 simple_lock(&rht_lock);
359 if (rht_state & RHT_WAIT)
360 thread_wakeup(&rht_state);
361 rht_state &= ~(RHT_BUSY|RHT_WAIT);
362 simple_unlock(&rht_lock);
363 };
364 } else {
365 simple_lock(&SignalReadyLock);
366 if (!((*(volatile short *)&proc_info->cpu_flags) & SignalReady)) {
367 hw_atomic_or(&proc_info->ppXFlags, SignalReadyWait);
368 thread_sleep_simple_lock((event_t)&proc_info->cpu_flags,
369 &SignalReadyLock, THREAD_UNINT);
370 }
371 simple_unlock(&SignalReadyLock);
372
373 }
374 return(ret);
375 }
376 }
377
378 /*
379 * Routine: cpu_exit_wait
380 * Function:
381 */
382 void
383 cpu_exit_wait(
384 int cpu)
385 {
386 struct per_proc_info *tpproc;
387
388 if ( cpu != master_cpu) {
389 tpproc = PerProcTable[cpu].ppe_vaddr;
390 while (!((*(volatile short *)&tpproc->cpu_flags) & SleepState)) {};
391 }
392 }
393
394
395 /*
396 * Routine: cpu_doshutdown
397 * Function:
398 */
399 void
400 cpu_doshutdown(
401 void)
402 {
403 enable_preemption();
404 processor_offline(current_processor());
405 }
406
407
408 /*
409 * Routine: cpu_sleep
410 * Function:
411 */
412 void
413 cpu_sleep(
414 void)
415 {
416 struct per_proc_info *proc_info;
417 unsigned int i;
418 unsigned int wait_ncpus_sleep, ncpus_sleep;
419 facility_context *fowner;
420
421 proc_info = getPerProc();
422
423 proc_info->running = FALSE;
424
425 fowner = proc_info->FPU_owner; /* Cache this */
426 if(fowner) fpu_save(fowner); /* If anyone owns FPU, save it */
427 proc_info->FPU_owner = 0; /* Set no fpu owner now */
428
429 fowner = proc_info->VMX_owner; /* Cache this */
430 if(fowner) vec_save(fowner); /* If anyone owns vectors, save it */
431 proc_info->VMX_owner = 0; /* Set no vector owner now */
432
433 if (proc_info->cpu_number == master_cpu) {
434 proc_info->cpu_flags &= BootDone;
435 proc_info->interrupts_enabled = 0;
436 proc_info->pending_ast = AST_NONE;
437
438 if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
439 ml_phys_write((vm_offset_t)&ResetHandler + 0,
440 RESET_HANDLER_START);
441 ml_phys_write((vm_offset_t)&ResetHandler + 4,
442 (vm_offset_t)_start_cpu);
443 ml_phys_write((vm_offset_t)&ResetHandler + 8,
444 (vm_offset_t)&PerProcTable[master_cpu]);
445
446 __asm__ volatile("sync");
447 __asm__ volatile("isync");
448 }
449
450 wait_ncpus_sleep = real_ncpus-1;
451 ncpus_sleep = 0;
452 while (wait_ncpus_sleep != ncpus_sleep) {
453 ncpus_sleep = 0;
454 for(i=1; i < real_ncpus ; i++) {
455 if ((*(volatile short *)&(PerProcTable[i].ppe_vaddr->cpu_flags)) & SleepState)
456 ncpus_sleep++;
457 }
458 }
459
460 }
461
462 /*
463 * Save the TBR before stopping.
464 */
465 do {
466 proc_info->save_tbu = mftbu();
467 proc_info->save_tbl = mftb();
468 } while (mftbu() != proc_info->save_tbu);
469
470 PE_cpu_machine_quiesce(proc_info->cpu_id);
471 }
472
473
474 /*
475 * Routine: cpu_signal
476 * Function:
477 * Here is where we send a message to another processor. So far we only have two:
478 * SIGPast and SIGPdebug. SIGPast is used to preempt and kick off threads (this is
479 * currently disabled). SIGPdebug is used to enter the debugger.
480 *
481 * We set up the SIGP function to indicate that this is a simple message and set the
482 * order code (MPsigpParm0) to SIGPast or SIGPdebug). After finding the per_processor
483 * block for the target, we lock the message block. Then we set the parameter(s).
484 * Next we change the lock (also called "busy") to "passing" and finally signal
485 * the other processor. Note that we only wait about 1ms to get the message lock.
486 * If we time out, we return failure to our caller. It is their responsibility to
487 * recover.
488 */
489 kern_return_t
490 cpu_signal(
491 int target,
492 int signal,
493 unsigned int p1,
494 unsigned int p2)
495 {
496
497 unsigned int holdStat;
498 struct per_proc_info *tpproc, *mpproc;
499 int busybitset=0;
500
501 #if DEBUG
502 if(((unsigned int)target) >= MAX_CPUS) panic("cpu_signal: invalid target CPU - %08X\n", target);
503 #endif
504
505 mpproc = getPerProc(); /* Point to our block */
506 tpproc = PerProcTable[target].ppe_vaddr; /* Point to the target's block */
507 if(mpproc == tpproc) return KERN_FAILURE; /* Cannot signal ourselves */
508
509 if(!tpproc->running) return KERN_FAILURE;
510
511 if (!(tpproc->cpu_flags & SignalReady)) return KERN_FAILURE;
512
513 if((tpproc->MPsigpStat & MPsigpMsgp) == MPsigpMsgp) { /* Is there an unreceived message already pending? */
514
515 if(signal == SIGPwake) { /* SIGPwake can merge into all others... */
516 mpproc->hwCtr.numSIGPmwake++; /* Account for merged wakes */
517 return KERN_SUCCESS;
518 }
519
520 if((signal == SIGPast) && (tpproc->MPsigpParm0 == SIGPast)) { /* We can merge ASTs */
521 mpproc->hwCtr.numSIGPmast++; /* Account for merged ASTs */
522 return KERN_SUCCESS; /* Don't bother to send this one... */
523 }
524
525 if (tpproc->MPsigpParm0 == SIGPwake) {
526 if (hw_lock_mbits(&tpproc->MPsigpStat, (MPsigpMsgp | MPsigpAck),
527 (MPsigpBusy | MPsigpPass ), MPsigpBusy, 0)) {
528 busybitset = 1;
529 mpproc->hwCtr.numSIGPmwake++;
530 }
531 }
532 }
533
534 if((busybitset == 0) &&
535 (!hw_lock_mbits(&tpproc->MPsigpStat, MPsigpMsgp, 0, MPsigpBusy,
536 (gPEClockFrequencyInfo.timebase_frequency_hz >> 11)))) { /* Try to lock the message block with a .5ms timeout */
537 mpproc->hwCtr.numSIGPtimo++; /* Account for timeouts */
538 return KERN_FAILURE; /* Timed out, take your ball and go home... */
539 }
540
541 holdStat = MPsigpBusy | MPsigpPass | (MPsigpSigp << 8) | mpproc->cpu_number; /* Set up the signal status word */
542 tpproc->MPsigpParm0 = signal; /* Set message order */
543 tpproc->MPsigpParm1 = p1; /* Set additional parm */
544 tpproc->MPsigpParm2 = p2; /* Set additional parm */
545
546 __asm__ volatile("sync"); /* Make sure it's all there */
547
548 tpproc->MPsigpStat = holdStat; /* Set status and pass the lock */
549 __asm__ volatile("eieio"); /* I'm a paraniod freak */
550
551 if (busybitset == 0)
552 PE_cpu_signal(mpproc->cpu_id, tpproc->cpu_id); /* Kick the other processor */
553
554 return KERN_SUCCESS; /* All is goodness and rainbows... */
555 }
556
557
558 /*
559 * Routine: cpu_signal_handler
560 * Function:
561 * Here is where we implement the receiver of the signaling protocol.
562 * We wait for the signal status area to be passed to us. Then we snarf
563 * up the status, the sender, and the 3 potential parms. Next we release
564 * the lock and signal the other guy.
565 */
566 void
567 cpu_signal_handler(
568 void)
569 {
570
571 unsigned int holdStat, holdParm0, holdParm1, holdParm2, mtype;
572 unsigned int *parmAddr;
573 struct per_proc_info *proc_info;
574 int cpu;
575 broadcastFunc xfunc;
576 cpu = cpu_number(); /* Get the CPU number */
577
578 proc_info = getPerProc();
579
580 /*
581 * Since we've been signaled, wait about 31 ms for the signal lock to pass
582 */
583 if(!hw_lock_mbits(&proc_info->MPsigpStat, (MPsigpMsgp | MPsigpAck), (MPsigpBusy | MPsigpPass),
584 (MPsigpBusy | MPsigpPass | MPsigpAck), (gPEClockFrequencyInfo.timebase_frequency_hz >> 5))) {
585 panic("cpu_signal_handler: Lock pass timed out\n");
586 }
587
588 holdStat = proc_info->MPsigpStat; /* Snarf stat word */
589 holdParm0 = proc_info->MPsigpParm0; /* Snarf parameter */
590 holdParm1 = proc_info->MPsigpParm1; /* Snarf parameter */
591 holdParm2 = proc_info->MPsigpParm2; /* Snarf parameter */
592
593 __asm__ volatile("isync"); /* Make sure we don't unlock until memory is in */
594
595 proc_info->MPsigpStat = holdStat & ~(MPsigpMsgp | MPsigpAck | MPsigpFunc); /* Release lock */
596
597 switch ((holdStat & MPsigpFunc) >> 8) { /* Decode function code */
598
599 case MPsigpIdle: /* Was function cancelled? */
600 return; /* Yup... */
601
602 case MPsigpSigp: /* Signal Processor message? */
603
604 switch (holdParm0) { /* Decode SIGP message order */
605
606 case SIGPast: /* Should we do an AST? */
607 proc_info->hwCtr.numSIGPast++; /* Count this one */
608 #if 0
609 kprintf("cpu_signal_handler: AST check on cpu %x\n", cpu_number());
610 #endif
611 ast_check((processor_t)proc_info->processor);
612 return; /* All done... */
613
614 case SIGPcpureq: /* CPU specific function? */
615
616 proc_info->hwCtr.numSIGPcpureq++; /* Count this one */
617 switch (holdParm1) { /* Select specific function */
618
619 case CPRQtimebase:
620
621 cpu_timebase_signal_handler(proc_info, (struct SIGtimebase *)holdParm2);
622 return;
623
624 case CPRQsegload:
625 return;
626
627 case CPRQchud:
628 parmAddr = (unsigned int *)holdParm2; /* Get the destination address */
629 if(perfCpuSigHook) {
630 struct savearea *ssp = current_thread()->machine.pcb;
631 if(ssp) {
632 (perfCpuSigHook)(parmAddr[1] /* request */, ssp, 0, 0);
633 }
634 }
635 parmAddr[1] = 0;
636 parmAddr[0] = 0; /* Show we're done */
637 return;
638
639 case CPRQscom:
640 if(((scomcomm *)holdParm2)->scomfunc) { /* Are we writing */
641 ((scomcomm *)holdParm2)->scomstat = ml_scom_write(((scomcomm *)holdParm2)->scomreg, ((scomcomm *)holdParm2)->scomdata); /* Write scom */
642 }
643 else { /* No, reading... */
644 ((scomcomm *)holdParm2)->scomstat = ml_scom_read(((scomcomm *)holdParm2)->scomreg, &((scomcomm *)holdParm2)->scomdata); /* Read scom */
645 }
646 return;
647
648 case CPRQsps:
649 {
650 ml_set_processor_speed_slave(holdParm2);
651 return;
652 }
653 default:
654 panic("cpu_signal_handler: unknown CPU request - %08X\n", holdParm1);
655 return;
656 }
657
658
659 case SIGPdebug: /* Enter the debugger? */
660
661 proc_info->hwCtr.numSIGPdebug++; /* Count this one */
662 proc_info->debugger_is_slave++; /* Bump up the count to show we're here */
663 hw_atomic_sub(&debugger_sync, 1); /* Show we've received the 'rupt */
664 __asm__ volatile("tw 4,r3,r3"); /* Enter the debugger */
665 return; /* All done now... */
666
667 case SIGPwake: /* Wake up CPU */
668 proc_info->hwCtr.numSIGPwake++; /* Count this one */
669 return; /* No need to do anything, the interrupt does it all... */
670
671 case SIGPcall: /* Call function on CPU */
672 proc_info->hwCtr.numSIGPcall++; /* Count this one */
673 xfunc = holdParm1; /* Do this since I can't seem to figure C out */
674 xfunc(holdParm2); /* Call the passed function */
675 return; /* Done... */
676
677 default:
678 panic("cpu_signal_handler: unknown SIGP message order - %08X\n", holdParm0);
679 return;
680
681 }
682
683 default:
684 panic("cpu_signal_handler: unknown SIGP function - %08X\n", (holdStat & MPsigpFunc) >> 8);
685 return;
686
687 }
688 panic("cpu_signal_handler: we should never get here\n");
689 }
690
691
692 /*
693 * Routine: cpu_sync_timebase
694 * Function:
695 */
696 void
697 cpu_sync_timebase(
698 void)
699 {
700 natural_t tbu, tbl;
701 boolean_t intr;
702 struct SIGtimebase syncClkSpot;
703
704 intr = ml_set_interrupts_enabled(FALSE); /* No interruptions in here */
705
706 syncClkSpot.avail = FALSE;
707 syncClkSpot.ready = FALSE;
708 syncClkSpot.done = FALSE;
709
710 while (cpu_signal(master_cpu, SIGPcpureq, CPRQtimebase,
711 (unsigned int)&syncClkSpot) != KERN_SUCCESS)
712 continue;
713
714 while (*(volatile int *)&(syncClkSpot.avail) == FALSE)
715 continue;
716
717 isync();
718
719 /*
720 * We do the following to keep the compiler from generating extra stuff
721 * in tb set part
722 */
723 tbu = syncClkSpot.abstime >> 32;
724 tbl = (uint32_t)syncClkSpot.abstime;
725
726 mttb(0);
727 mttbu(tbu);
728 mttb(tbl);
729
730 syncClkSpot.ready = TRUE;
731
732 while (*(volatile int *)&(syncClkSpot.done) == FALSE)
733 continue;
734
735 setTimerReq(); /* Start the timer */
736
737 (void)ml_set_interrupts_enabled(intr);
738 }
739
740
741 /*
742 * Routine: cpu_timebase_signal_handler
743 * Function:
744 */
745 void
746 cpu_timebase_signal_handler(
747 struct per_proc_info *proc_info,
748 struct SIGtimebase *timebaseAddr)
749 {
750 unsigned int tbu, tbu2, tbl;
751
752 if(proc_info->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
753 proc_info->time_base_enable(proc_info->cpu_id, FALSE);
754
755 timebaseAddr->abstime = 0; /* Touch to force into cache */
756 sync();
757
758 do {
759 asm volatile(" mftbu %0" : "=r" (tbu));
760 asm volatile(" mftb %0" : "=r" (tbl));
761 asm volatile(" mftbu %0" : "=r" (tbu2));
762 } while (tbu != tbu2);
763
764 timebaseAddr->abstime = ((uint64_t)tbu << 32) | tbl;
765 sync(); /* Force order */
766
767 timebaseAddr->avail = TRUE;
768
769 while (*(volatile int *)&(timebaseAddr->ready) == FALSE);
770
771 if(proc_info->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
772 proc_info->time_base_enable(proc_info->cpu_id, TRUE);
773
774 timebaseAddr->done = TRUE;
775 }
776
777
778 /*
779 * Routine: cpu_control
780 * Function:
781 */
782 kern_return_t
783 cpu_control(
784 int slot_num,
785 processor_info_t info,
786 unsigned int count)
787 {
788 struct per_proc_info *proc_info;
789 cpu_type_t tcpu_type;
790 cpu_subtype_t tcpu_subtype;
791 processor_pm_regs_t perf_regs;
792 processor_control_cmd_t cmd;
793 boolean_t oldlevel;
794 #define MMCR0_SUPPORT_MASK 0xf83f1fff
795 #define MMCR1_SUPPORT_MASK 0xffc00000
796 #define MMCR2_SUPPORT_MASK 0x80000000
797
798 proc_info = PerProcTable[slot_num].ppe_vaddr;
799 tcpu_type = proc_info->cpu_type;
800 tcpu_subtype = proc_info->cpu_subtype;
801 cmd = (processor_control_cmd_t) info;
802
803 if (count < PROCESSOR_CONTROL_CMD_COUNT)
804 return(KERN_FAILURE);
805
806 if ( tcpu_type != cmd->cmd_cpu_type ||
807 tcpu_subtype != cmd->cmd_cpu_subtype)
808 return(KERN_FAILURE);
809
810 if (perfmon_acquire_facility(current_task()) != KERN_SUCCESS) {
811 return(KERN_RESOURCE_SHORTAGE); /* cpu performance facility in use by another task */
812 }
813
814 switch (cmd->cmd_op)
815 {
816 case PROCESSOR_PM_CLR_PMC: /* Clear Performance Monitor Counters */
817 switch (tcpu_subtype)
818 {
819 case CPU_SUBTYPE_POWERPC_750:
820 case CPU_SUBTYPE_POWERPC_7400:
821 case CPU_SUBTYPE_POWERPC_7450:
822 {
823 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
824 mtpmc1(0x0);
825 mtpmc2(0x0);
826 mtpmc3(0x0);
827 mtpmc4(0x0);
828 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
829 return(KERN_SUCCESS);
830 }
831 default:
832 return(KERN_FAILURE);
833 } /* tcpu_subtype */
834 case PROCESSOR_PM_SET_REGS: /* Set Performance Monitor Registors */
835 switch (tcpu_subtype)
836 {
837 case CPU_SUBTYPE_POWERPC_750:
838 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
839 PROCESSOR_PM_REGS_COUNT_POWERPC_750))
840 return(KERN_FAILURE);
841 else
842 {
843 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
844 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
845 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
846 mtpmc1(PERFMON_PMC1(perf_regs));
847 mtpmc2(PERFMON_PMC2(perf_regs));
848 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
849 mtpmc3(PERFMON_PMC3(perf_regs));
850 mtpmc4(PERFMON_PMC4(perf_regs));
851 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
852 return(KERN_SUCCESS);
853 }
854 case CPU_SUBTYPE_POWERPC_7400:
855 case CPU_SUBTYPE_POWERPC_7450:
856 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
857 PROCESSOR_PM_REGS_COUNT_POWERPC_7400))
858 return(KERN_FAILURE);
859 else
860 {
861 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
862 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
863 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
864 mtpmc1(PERFMON_PMC1(perf_regs));
865 mtpmc2(PERFMON_PMC2(perf_regs));
866 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
867 mtpmc3(PERFMON_PMC3(perf_regs));
868 mtpmc4(PERFMON_PMC4(perf_regs));
869 mtmmcr2(PERFMON_MMCR2(perf_regs) & MMCR2_SUPPORT_MASK);
870 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
871 return(KERN_SUCCESS);
872 }
873 default:
874 return(KERN_FAILURE);
875 } /* switch tcpu_subtype */
876 case PROCESSOR_PM_SET_MMCR:
877 switch (tcpu_subtype)
878 {
879 case CPU_SUBTYPE_POWERPC_750:
880 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
881 PROCESSOR_PM_REGS_COUNT_POWERPC_750))
882 return(KERN_FAILURE);
883 else
884 {
885 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
886 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
887 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
888 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
889 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
890 return(KERN_SUCCESS);
891 }
892 case CPU_SUBTYPE_POWERPC_7400:
893 case CPU_SUBTYPE_POWERPC_7450:
894 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
895 PROCESSOR_PM_REGS_COUNT_POWERPC_7400))
896 return(KERN_FAILURE);
897 else
898 {
899 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
900 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
901 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
902 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
903 mtmmcr2(PERFMON_MMCR2(perf_regs) & MMCR2_SUPPORT_MASK);
904 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
905 return(KERN_SUCCESS);
906 }
907 default:
908 return(KERN_FAILURE);
909 } /* tcpu_subtype */
910 default:
911 return(KERN_FAILURE);
912 } /* switch cmd_op */
913 }
914
915
916 /*
917 * Routine: cpu_info_count
918 * Function:
919 */
920 kern_return_t
921 cpu_info_count(
922 processor_flavor_t flavor,
923 unsigned int *count)
924 {
925 cpu_subtype_t tcpu_subtype;
926
927 /*
928 * For now, we just assume that all CPUs are of the same type
929 */
930 tcpu_subtype = PerProcTable[master_cpu].ppe_vaddr->cpu_subtype;
931 switch (flavor) {
932 case PROCESSOR_PM_REGS_INFO:
933 switch (tcpu_subtype) {
934 case CPU_SUBTYPE_POWERPC_750:
935
936 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_750;
937 return(KERN_SUCCESS);
938
939 case CPU_SUBTYPE_POWERPC_7400:
940 case CPU_SUBTYPE_POWERPC_7450:
941
942 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_7400;
943 return(KERN_SUCCESS);
944
945 default:
946 *count = 0;
947 return(KERN_INVALID_ARGUMENT);
948 } /* switch tcpu_subtype */
949
950 case PROCESSOR_TEMPERATURE:
951 *count = PROCESSOR_TEMPERATURE_COUNT;
952 return (KERN_SUCCESS);
953
954 default:
955 *count = 0;
956 return(KERN_INVALID_ARGUMENT);
957
958 }
959 }
960
961
962 /*
963 * Routine: cpu_info
964 * Function:
965 */
966 kern_return_t
967 cpu_info(
968 processor_flavor_t flavor,
969 int slot_num,
970 processor_info_t info,
971 unsigned int *count)
972 {
973 cpu_subtype_t tcpu_subtype;
974 processor_pm_regs_t perf_regs;
975 boolean_t oldlevel;
976
977 tcpu_subtype = PerProcTable[slot_num].ppe_vaddr->cpu_subtype;
978
979 switch (flavor) {
980 case PROCESSOR_PM_REGS_INFO:
981
982 perf_regs = (processor_pm_regs_t) info;
983
984 switch (tcpu_subtype) {
985 case CPU_SUBTYPE_POWERPC_750:
986
987 if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_750)
988 return(KERN_FAILURE);
989
990 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
991 PERFMON_MMCR0(perf_regs) = mfmmcr0();
992 PERFMON_PMC1(perf_regs) = mfpmc1();
993 PERFMON_PMC2(perf_regs) = mfpmc2();
994 PERFMON_MMCR1(perf_regs) = mfmmcr1();
995 PERFMON_PMC3(perf_regs) = mfpmc3();
996 PERFMON_PMC4(perf_regs) = mfpmc4();
997 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
998
999 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_750;
1000 return(KERN_SUCCESS);
1001
1002 case CPU_SUBTYPE_POWERPC_7400:
1003 case CPU_SUBTYPE_POWERPC_7450:
1004
1005 if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_7400)
1006 return(KERN_FAILURE);
1007
1008 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
1009 PERFMON_MMCR0(perf_regs) = mfmmcr0();
1010 PERFMON_PMC1(perf_regs) = mfpmc1();
1011 PERFMON_PMC2(perf_regs) = mfpmc2();
1012 PERFMON_MMCR1(perf_regs) = mfmmcr1();
1013 PERFMON_PMC3(perf_regs) = mfpmc3();
1014 PERFMON_PMC4(perf_regs) = mfpmc4();
1015 PERFMON_MMCR2(perf_regs) = mfmmcr2();
1016 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
1017
1018 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_7400;
1019 return(KERN_SUCCESS);
1020
1021 default:
1022 return(KERN_FAILURE);
1023 } /* switch tcpu_subtype */
1024
1025 case PROCESSOR_TEMPERATURE: /* Get the temperature of a processor */
1026
1027 *info = -1; /* Get the temperature */
1028 return(KERN_FAILURE);
1029
1030 default:
1031 return(KERN_INVALID_ARGUMENT);
1032
1033 } /* flavor */
1034 }
1035
1036
1037 /*
1038 * Routine: cpu_to_processor
1039 * Function:
1040 */
1041 processor_t
1042 cpu_to_processor(
1043 int cpu)
1044 {
1045 return ((processor_t)PerProcTable[cpu].ppe_vaddr->processor);
1046 }
1047
1048
1049 /*
1050 * Routine: slot_type
1051 * Function:
1052 */
1053 cpu_type_t
1054 slot_type(
1055 int slot_num)
1056 {
1057 return (PerProcTable[slot_num].ppe_vaddr->cpu_type);
1058 }
1059
1060
1061 /*
1062 * Routine: slot_subtype
1063 * Function:
1064 */
1065 cpu_subtype_t
1066 slot_subtype(
1067 int slot_num)
1068 {
1069 return (PerProcTable[slot_num].ppe_vaddr->cpu_subtype);
1070 }
1071
1072
1073 /*
1074 * Routine: slot_threadtype
1075 * Function:
1076 */
1077 cpu_threadtype_t
1078 slot_threadtype(
1079 int slot_num)
1080 {
1081 return (PerProcTable[slot_num].ppe_vaddr->cpu_threadtype);
1082 }
1083
1084
1085 /*
1086 * Routine: cpu_type
1087 * Function:
1088 */
1089 cpu_type_t
1090 cpu_type(void)
1091 {
1092 return (getPerProc()->cpu_type);
1093 }
1094
1095
1096 /*
1097 * Routine: cpu_subtype
1098 * Function:
1099 */
1100 cpu_subtype_t
1101 cpu_subtype(void)
1102 {
1103 return (getPerProc()->cpu_subtype);
1104 }
1105
1106
1107 /*
1108 * Routine: cpu_threadtype
1109 * Function:
1110 */
1111 cpu_threadtype_t
1112 cpu_threadtype(void)
1113 {
1114 return (getPerProc()->cpu_threadtype);
1115 }
1116
1117 /*
1118 * Call a function on all running processors
1119 *
1120 * Note that the synch paramter is used to wait until all functions are complete.
1121 * It is not passed to the other processor and must be known by the called function.
1122 * The called function must do a thread_wakeup on the synch if it decrements the
1123 * synch count to 0.
1124 */
1125
1126
1127 int32_t cpu_broadcast(uint32_t *synch, broadcastFunc func, uint32_t parm) {
1128
1129 int sigproc, cpu, ocpu;
1130
1131 cpu = cpu_number(); /* Who are we? */
1132 sigproc = 0; /* Clear called processor count */
1133
1134 if(real_ncpus > 1) { /* Are we just a uni? */
1135
1136 assert_wait((event_t)synch, THREAD_UNINT); /* If more than one processor, we may have to wait */
1137
1138 for(ocpu = 0; ocpu < real_ncpus; ocpu++) { /* Tell everyone to call */
1139 if(ocpu == cpu) continue; /* If we talk to ourselves, people will wonder... */
1140 hw_atomic_add(synch, 1); /* Tentatively bump synchronizer */
1141 sigproc++; /* Tentatively bump signal sent count */
1142 if(KERN_SUCCESS != cpu_signal(ocpu, SIGPcall, (uint32_t)func, parm)) { /* Call the function on the other processor */
1143 hw_atomic_sub(synch, 1); /* Other guy isn't really there, ignore it */
1144 sigproc--; /* and don't count it */
1145 }
1146 }
1147
1148 if(!sigproc) clear_wait(current_thread(), THREAD_AWAKENED); /* Clear wait if we never signalled */
1149 else thread_block(THREAD_CONTINUE_NULL); /* Wait for everyone to get into step... */
1150 }
1151
1152 return sigproc; /* Return the number of guys actually signalled */
1153
1154 }