]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/cpu.c
cd42fdd121ae938e4270402c0a54c1f13694b58d
[apple/xnu.git] / osfmk / ppc / cpu.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 #include <mach/mach_types.h>
24 #include <mach/machine.h>
25 #include <mach/processor_info.h>
26
27 #include <kern/kalloc.h>
28 #include <kern/kern_types.h>
29 #include <kern/machine.h>
30 #include <kern/misc_protos.h>
31 #include <kern/thread.h>
32 #include <kern/sched_prim.h>
33 #include <kern/processor.h>
34
35 #include <vm/pmap.h>
36 #include <IOKit/IOHibernatePrivate.h>
37
38 #include <ppc/proc_reg.h>
39 #include <ppc/misc_protos.h>
40 #include <ppc/machine_routines.h>
41 #include <ppc/cpu_internal.h>
42 #include <ppc/exception.h>
43 #include <ppc/asm.h>
44 #include <ppc/hw_perfmon.h>
45 #include <pexpert/pexpert.h>
46 #include <kern/cpu_data.h>
47 #include <ppc/mappings.h>
48 #include <ppc/Diagnostics.h>
49 #include <ppc/trap.h>
50 #include <ppc/machine_cpu.h>
51 #include <ppc/pms.h>
52 #include <ppc/rtclock.h>
53
54 decl_mutex_data(static,ppt_lock);
55
56 unsigned int real_ncpus = 1;
57 unsigned int max_ncpus = MAX_CPUS;
58
59 decl_simple_lock_data(static,rht_lock);
60
61 static unsigned int rht_state = 0;
62 #define RHT_WAIT 0x01
63 #define RHT_BUSY 0x02
64
65 decl_simple_lock_data(static,SignalReadyLock);
66
67 struct SIGtimebase {
68 boolean_t avail;
69 boolean_t ready;
70 boolean_t done;
71 uint64_t abstime;
72 };
73
74 perfCallback perfCpuSigHook = 0; /* Pointer to CHUD cpu signal hook routine */
75
76 extern int debugger_sync;
77
78 /*
79 * Forward definitions
80 */
81
82 void cpu_sync_timebase(
83 void);
84
85 void cpu_timebase_signal_handler(
86 struct per_proc_info *proc_info,
87 struct SIGtimebase *timebaseAddr);
88
89 /*
90 * Routine: cpu_bootstrap
91 * Function:
92 */
93 void
94 cpu_bootstrap(
95 void)
96 {
97 simple_lock_init(&rht_lock,0);
98 simple_lock_init(&SignalReadyLock,0);
99 mutex_init(&ppt_lock,0);
100 }
101
102
103 /*
104 * Routine: cpu_init
105 * Function:
106 */
107 void
108 cpu_init(
109 void)
110 {
111 struct per_proc_info *proc_info;
112
113 proc_info = getPerProc();
114
115 /*
116 * Restore the TBR.
117 */
118 if (proc_info->save_tbu != 0 || proc_info->save_tbl != 0) {
119 mttb(0);
120 mttbu(proc_info->save_tbu);
121 mttb(proc_info->save_tbl);
122 }
123
124 setTimerReq(); /* Now that the time base is sort of correct, request the next timer pop */
125
126 proc_info->cpu_type = CPU_TYPE_POWERPC;
127 proc_info->cpu_subtype = (cpu_subtype_t)proc_info->pf.rptdProc;
128 proc_info->cpu_threadtype = CPU_THREADTYPE_NONE;
129 proc_info->running = TRUE;
130
131 }
132
133 /*
134 * Routine: cpu_machine_init
135 * Function:
136 */
137 void
138 cpu_machine_init(
139 void)
140 {
141 struct per_proc_info *proc_info;
142 volatile struct per_proc_info *mproc_info;
143
144
145 proc_info = getPerProc();
146 mproc_info = PerProcTable[master_cpu].ppe_vaddr;
147
148 if (proc_info != mproc_info) {
149 simple_lock(&rht_lock);
150 if (rht_state & RHT_WAIT)
151 thread_wakeup(&rht_state);
152 rht_state &= ~(RHT_BUSY|RHT_WAIT);
153 simple_unlock(&rht_lock);
154 }
155
156 PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone));
157
158 if (proc_info->hibernate) {
159 uint32_t tbu, tbl;
160
161 do {
162 tbu = mftbu();
163 tbl = mftb();
164 } while (mftbu() != tbu);
165
166 proc_info->hibernate = 0;
167 hibernate_machine_init();
168
169 // hibernate_machine_init() could take minutes and we don't want timeouts
170 // to fire as soon as scheduling starts. Reset timebase so it appears
171 // no time has elapsed, as it would for regular sleep.
172 mttb(0);
173 mttbu(tbu);
174 mttb(tbl);
175 }
176
177 if (proc_info != mproc_info) {
178 while (!((mproc_info->cpu_flags) & SignalReady))
179 continue;
180 cpu_sync_timebase();
181 }
182
183 ml_init_interrupt();
184 if (proc_info != mproc_info)
185 simple_lock(&SignalReadyLock);
186 proc_info->cpu_flags |= BootDone|SignalReady;
187 if (proc_info != mproc_info) {
188 if (proc_info->ppXFlags & SignalReadyWait) {
189 hw_atomic_and(&proc_info->ppXFlags, ~SignalReadyWait);
190 thread_wakeup(&proc_info->cpu_flags);
191 }
192 simple_unlock(&SignalReadyLock);
193 pmsPark(); /* Timers should be cool now, park the power management stepper */
194 }
195 }
196
197
198 /*
199 * Routine: cpu_per_proc_alloc
200 * Function:
201 */
202 struct per_proc_info *
203 cpu_per_proc_alloc(
204 void)
205 {
206 struct per_proc_info *proc_info=0;
207 void *interrupt_stack=0;
208 void *debugger_stack=0;
209
210 if ((proc_info = (struct per_proc_info*)kalloc(sizeof(struct per_proc_info))) == (struct per_proc_info*)0)
211 return (struct per_proc_info *)NULL;
212 if ((interrupt_stack = kalloc(INTSTACK_SIZE)) == 0) {
213 kfree(proc_info, sizeof(struct per_proc_info));
214 return (struct per_proc_info *)NULL;
215 }
216
217 if ((debugger_stack = kalloc(KERNEL_STACK_SIZE)) == 0) {
218 kfree(proc_info, sizeof(struct per_proc_info));
219 kfree(interrupt_stack, INTSTACK_SIZE);
220 return (struct per_proc_info *)NULL;
221 }
222
223 bzero((void *)proc_info, sizeof(struct per_proc_info));
224
225 proc_info->pp2ndPage = (addr64_t)pmap_find_phys(kernel_pmap, (addr64_t)proc_info + 0x1000) << PAGE_SHIFT; /* Set physical address of the second page */
226 proc_info->next_savearea = (uint64_t)save_get_init();
227 proc_info->pf = BootProcInfo.pf;
228 proc_info->istackptr = (vm_offset_t)interrupt_stack + INTSTACK_SIZE - FM_SIZE;
229 proc_info->intstack_top_ss = proc_info->istackptr;
230 proc_info->debstackptr = (vm_offset_t)debugger_stack + KERNEL_STACK_SIZE - FM_SIZE;
231 proc_info->debstack_top_ss = proc_info->debstackptr;
232
233 return proc_info;
234
235 }
236
237
238 /*
239 * Routine: cpu_per_proc_free
240 * Function:
241 */
242 void
243 cpu_per_proc_free(
244 struct per_proc_info *proc_info
245 )
246 {
247 if (proc_info->cpu_number == master_cpu)
248 return;
249 kfree((void *)(proc_info->intstack_top_ss - INTSTACK_SIZE + FM_SIZE), INTSTACK_SIZE);
250 kfree((void *)(proc_info->debstack_top_ss - KERNEL_STACK_SIZE + FM_SIZE), KERNEL_STACK_SIZE);
251 kfree((void *)proc_info, sizeof(struct per_proc_info)); /* Release the per_proc */
252 }
253
254
255 /*
256 * Routine: cpu_per_proc_register
257 * Function:
258 */
259 kern_return_t
260 cpu_per_proc_register(
261 struct per_proc_info *proc_info
262 )
263 {
264 int cpu;
265
266 mutex_lock(&ppt_lock);
267 if (real_ncpus >= max_ncpus) {
268 mutex_unlock(&ppt_lock);
269 return KERN_FAILURE;
270 }
271 cpu = real_ncpus;
272 proc_info->cpu_number = cpu;
273 PerProcTable[cpu].ppe_vaddr = proc_info;
274 PerProcTable[cpu].ppe_paddr = (addr64_t)pmap_find_phys(kernel_pmap, (addr64_t)proc_info) << PAGE_SHIFT;
275 eieio();
276 real_ncpus++;
277 mutex_unlock(&ppt_lock);
278 return KERN_SUCCESS;
279 }
280
281
282 /*
283 * Routine: cpu_start
284 * Function:
285 */
286 kern_return_t
287 cpu_start(
288 int cpu)
289 {
290 struct per_proc_info *proc_info;
291 kern_return_t ret;
292 mapping_t *mp;
293
294 proc_info = PerProcTable[cpu].ppe_vaddr;
295
296 if (cpu == cpu_number()) {
297 PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone));
298 ml_init_interrupt();
299 proc_info->cpu_flags |= BootDone|SignalReady;
300
301 return KERN_SUCCESS;
302 } else {
303 proc_info->cpu_flags &= BootDone;
304 proc_info->interrupts_enabled = 0;
305 proc_info->pending_ast = AST_NONE;
306 proc_info->istackptr = proc_info->intstack_top_ss;
307 proc_info->rtcPop = EndOfAllTime;
308 proc_info->FPU_owner = 0;
309 proc_info->VMX_owner = 0;
310 proc_info->pms.pmsStamp = 0; /* Dummy transition time */
311 proc_info->pms.pmsPop = EndOfAllTime; /* Set the pop way into the future */
312 proc_info->pms.pmsState = pmsParked; /* Park the stepper */
313 proc_info->pms.pmsCSetCmd = pmsCInit; /* Set dummy initial hardware state */
314 mp = (mapping_t *)(&proc_info->ppUMWmp);
315 mp->mpFlags = 0x01000000 | mpLinkage | mpPerm | 1;
316 mp->mpSpace = invalSpace;
317
318 if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
319
320 simple_lock(&rht_lock);
321 while (rht_state & RHT_BUSY) {
322 rht_state |= RHT_WAIT;
323 thread_sleep_usimple_lock((event_t)&rht_state,
324 &rht_lock, THREAD_UNINT);
325 }
326 rht_state |= RHT_BUSY;
327 simple_unlock(&rht_lock);
328
329 ml_phys_write((vm_offset_t)&ResetHandler + 0,
330 RESET_HANDLER_START);
331 ml_phys_write((vm_offset_t)&ResetHandler + 4,
332 (vm_offset_t)_start_cpu);
333 ml_phys_write((vm_offset_t)&ResetHandler + 8,
334 (vm_offset_t)&PerProcTable[cpu]);
335 }
336 /*
337 * Note: we pass the current time to the other processor here. He will load it
338 * as early as possible so that there is a chance that it is close to accurate.
339 * After the machine is up a while, we will officially resync the clocks so
340 * that all processors are the same. This is just to get close.
341 */
342
343 ml_get_timebase((unsigned long long *)&proc_info->ruptStamp);
344
345 __asm__ volatile("sync"); /* Commit to storage */
346 __asm__ volatile("isync"); /* Wait a second */
347 ret = PE_cpu_start(proc_info->cpu_id,
348 proc_info->start_paddr, (vm_offset_t)proc_info);
349
350 if (ret != KERN_SUCCESS) {
351 if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
352 simple_lock(&rht_lock);
353 if (rht_state & RHT_WAIT)
354 thread_wakeup(&rht_state);
355 rht_state &= ~(RHT_BUSY|RHT_WAIT);
356 simple_unlock(&rht_lock);
357 };
358 } else {
359 simple_lock(&SignalReadyLock);
360 if (!((*(volatile short *)&proc_info->cpu_flags) & SignalReady)) {
361 hw_atomic_or(&proc_info->ppXFlags, SignalReadyWait);
362 thread_sleep_simple_lock((event_t)&proc_info->cpu_flags,
363 &SignalReadyLock, THREAD_UNINT);
364 }
365 simple_unlock(&SignalReadyLock);
366
367 }
368 return(ret);
369 }
370 }
371
372 /*
373 * Routine: cpu_exit_wait
374 * Function:
375 */
376 void
377 cpu_exit_wait(
378 int cpu)
379 {
380 struct per_proc_info *tpproc;
381
382 if ( cpu != master_cpu) {
383 tpproc = PerProcTable[cpu].ppe_vaddr;
384 while (!((*(volatile short *)&tpproc->cpu_flags) & SleepState)) {};
385 }
386 }
387
388
389 /*
390 * Routine: cpu_doshutdown
391 * Function:
392 */
393 void
394 cpu_doshutdown(
395 void)
396 {
397 enable_preemption();
398 processor_offline(current_processor());
399 }
400
401
402 /*
403 * Routine: cpu_sleep
404 * Function:
405 */
406 void
407 cpu_sleep(
408 void)
409 {
410 struct per_proc_info *proc_info;
411 unsigned int i;
412 unsigned int wait_ncpus_sleep, ncpus_sleep;
413 facility_context *fowner;
414
415 proc_info = getPerProc();
416
417 proc_info->running = FALSE;
418
419 fowner = proc_info->FPU_owner; /* Cache this */
420 if(fowner) fpu_save(fowner); /* If anyone owns FPU, save it */
421 proc_info->FPU_owner = 0; /* Set no fpu owner now */
422
423 fowner = proc_info->VMX_owner; /* Cache this */
424 if(fowner) vec_save(fowner); /* If anyone owns vectors, save it */
425 proc_info->VMX_owner = 0; /* Set no vector owner now */
426
427 if (proc_info->cpu_number == master_cpu) {
428 proc_info->cpu_flags &= BootDone;
429 proc_info->interrupts_enabled = 0;
430 proc_info->pending_ast = AST_NONE;
431
432 if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
433 ml_phys_write((vm_offset_t)&ResetHandler + 0,
434 RESET_HANDLER_START);
435 ml_phys_write((vm_offset_t)&ResetHandler + 4,
436 (vm_offset_t)_start_cpu);
437 ml_phys_write((vm_offset_t)&ResetHandler + 8,
438 (vm_offset_t)&PerProcTable[master_cpu]);
439
440 __asm__ volatile("sync");
441 __asm__ volatile("isync");
442 }
443
444 wait_ncpus_sleep = real_ncpus-1;
445 ncpus_sleep = 0;
446 while (wait_ncpus_sleep != ncpus_sleep) {
447 ncpus_sleep = 0;
448 for(i=1; i < real_ncpus ; i++) {
449 if ((*(volatile short *)&(PerProcTable[i].ppe_vaddr->cpu_flags)) & SleepState)
450 ncpus_sleep++;
451 }
452 }
453
454 }
455
456 /*
457 * Save the TBR before stopping.
458 */
459 do {
460 proc_info->save_tbu = mftbu();
461 proc_info->save_tbl = mftb();
462 } while (mftbu() != proc_info->save_tbu);
463
464 PE_cpu_machine_quiesce(proc_info->cpu_id);
465 }
466
467
468 /*
469 * Routine: cpu_signal
470 * Function:
471 * Here is where we send a message to another processor. So far we only have two:
472 * SIGPast and SIGPdebug. SIGPast is used to preempt and kick off threads (this is
473 * currently disabled). SIGPdebug is used to enter the debugger.
474 *
475 * We set up the SIGP function to indicate that this is a simple message and set the
476 * order code (MPsigpParm0) to SIGPast or SIGPdebug). After finding the per_processor
477 * block for the target, we lock the message block. Then we set the parameter(s).
478 * Next we change the lock (also called "busy") to "passing" and finally signal
479 * the other processor. Note that we only wait about 1ms to get the message lock.
480 * If we time out, we return failure to our caller. It is their responsibility to
481 * recover.
482 */
483 kern_return_t
484 cpu_signal(
485 int target,
486 int signal,
487 unsigned int p1,
488 unsigned int p2)
489 {
490
491 unsigned int holdStat;
492 struct per_proc_info *tpproc, *mpproc;
493 int busybitset=0;
494
495 #if DEBUG
496 if(((unsigned int)target) >= MAX_CPUS) panic("cpu_signal: invalid target CPU - %08X\n", target);
497 #endif
498
499 mpproc = getPerProc(); /* Point to our block */
500 tpproc = PerProcTable[target].ppe_vaddr; /* Point to the target's block */
501 if(mpproc == tpproc) return KERN_FAILURE; /* Cannot signal ourselves */
502
503 if(!tpproc->running) return KERN_FAILURE;
504
505 if (!(tpproc->cpu_flags & SignalReady)) return KERN_FAILURE;
506
507 if((tpproc->MPsigpStat & MPsigpMsgp) == MPsigpMsgp) { /* Is there an unreceived message already pending? */
508
509 if(signal == SIGPwake) { /* SIGPwake can merge into all others... */
510 mpproc->hwCtr.numSIGPmwake++; /* Account for merged wakes */
511 return KERN_SUCCESS;
512 }
513
514 if((signal == SIGPast) && (tpproc->MPsigpParm0 == SIGPast)) { /* We can merge ASTs */
515 mpproc->hwCtr.numSIGPmast++; /* Account for merged ASTs */
516 return KERN_SUCCESS; /* Don't bother to send this one... */
517 }
518
519 if (tpproc->MPsigpParm0 == SIGPwake) {
520 if (hw_lock_mbits(&tpproc->MPsigpStat, (MPsigpMsgp | MPsigpAck),
521 (MPsigpBusy | MPsigpPass ), MPsigpBusy, 0)) {
522 busybitset = 1;
523 mpproc->hwCtr.numSIGPmwake++;
524 }
525 }
526 }
527
528 if((busybitset == 0) &&
529 (!hw_lock_mbits(&tpproc->MPsigpStat, MPsigpMsgp, 0, MPsigpBusy,
530 (gPEClockFrequencyInfo.timebase_frequency_hz >> 11)))) { /* Try to lock the message block with a .5ms timeout */
531 mpproc->hwCtr.numSIGPtimo++; /* Account for timeouts */
532 return KERN_FAILURE; /* Timed out, take your ball and go home... */
533 }
534
535 holdStat = MPsigpBusy | MPsigpPass | (MPsigpSigp << 8) | mpproc->cpu_number; /* Set up the signal status word */
536 tpproc->MPsigpParm0 = signal; /* Set message order */
537 tpproc->MPsigpParm1 = p1; /* Set additional parm */
538 tpproc->MPsigpParm2 = p2; /* Set additional parm */
539
540 __asm__ volatile("sync"); /* Make sure it's all there */
541
542 tpproc->MPsigpStat = holdStat; /* Set status and pass the lock */
543 __asm__ volatile("eieio"); /* I'm a paraniod freak */
544
545 if (busybitset == 0)
546 PE_cpu_signal(mpproc->cpu_id, tpproc->cpu_id); /* Kick the other processor */
547
548 return KERN_SUCCESS; /* All is goodness and rainbows... */
549 }
550
551
552 /*
553 * Routine: cpu_signal_handler
554 * Function:
555 * Here is where we implement the receiver of the signaling protocol.
556 * We wait for the signal status area to be passed to us. Then we snarf
557 * up the status, the sender, and the 3 potential parms. Next we release
558 * the lock and signal the other guy.
559 */
560 void
561 cpu_signal_handler(
562 void)
563 {
564
565 unsigned int holdStat, holdParm0, holdParm1, holdParm2, mtype;
566 unsigned int *parmAddr;
567 struct per_proc_info *proc_info;
568 int cpu;
569 broadcastFunc xfunc;
570 cpu = cpu_number(); /* Get the CPU number */
571
572 proc_info = getPerProc();
573
574 /*
575 * Since we've been signaled, wait about 31 ms for the signal lock to pass
576 */
577 if(!hw_lock_mbits(&proc_info->MPsigpStat, (MPsigpMsgp | MPsigpAck), (MPsigpBusy | MPsigpPass),
578 (MPsigpBusy | MPsigpPass | MPsigpAck), (gPEClockFrequencyInfo.timebase_frequency_hz >> 5))) {
579 panic("cpu_signal_handler: Lock pass timed out\n");
580 }
581
582 holdStat = proc_info->MPsigpStat; /* Snarf stat word */
583 holdParm0 = proc_info->MPsigpParm0; /* Snarf parameter */
584 holdParm1 = proc_info->MPsigpParm1; /* Snarf parameter */
585 holdParm2 = proc_info->MPsigpParm2; /* Snarf parameter */
586
587 __asm__ volatile("isync"); /* Make sure we don't unlock until memory is in */
588
589 proc_info->MPsigpStat = holdStat & ~(MPsigpMsgp | MPsigpAck | MPsigpFunc); /* Release lock */
590
591 switch ((holdStat & MPsigpFunc) >> 8) { /* Decode function code */
592
593 case MPsigpIdle: /* Was function cancelled? */
594 return; /* Yup... */
595
596 case MPsigpSigp: /* Signal Processor message? */
597
598 switch (holdParm0) { /* Decode SIGP message order */
599
600 case SIGPast: /* Should we do an AST? */
601 proc_info->hwCtr.numSIGPast++; /* Count this one */
602 #if 0
603 kprintf("cpu_signal_handler: AST check on cpu %x\n", cpu_number());
604 #endif
605 ast_check((processor_t)proc_info->processor);
606 return; /* All done... */
607
608 case SIGPcpureq: /* CPU specific function? */
609
610 proc_info->hwCtr.numSIGPcpureq++; /* Count this one */
611 switch (holdParm1) { /* Select specific function */
612
613 case CPRQtimebase:
614
615 cpu_timebase_signal_handler(proc_info, (struct SIGtimebase *)holdParm2);
616 return;
617
618 case CPRQsegload:
619 return;
620
621 case CPRQchud:
622 parmAddr = (unsigned int *)holdParm2; /* Get the destination address */
623 if(perfCpuSigHook) {
624 struct savearea *ssp = current_thread()->machine.pcb;
625 if(ssp) {
626 (perfCpuSigHook)(parmAddr[1] /* request */, ssp, 0, 0);
627 }
628 }
629 parmAddr[1] = 0;
630 parmAddr[0] = 0; /* Show we're done */
631 return;
632
633 case CPRQscom:
634 if(((scomcomm *)holdParm2)->scomfunc) { /* Are we writing */
635 ((scomcomm *)holdParm2)->scomstat = ml_scom_write(((scomcomm *)holdParm2)->scomreg, ((scomcomm *)holdParm2)->scomdata); /* Write scom */
636 }
637 else { /* No, reading... */
638 ((scomcomm *)holdParm2)->scomstat = ml_scom_read(((scomcomm *)holdParm2)->scomreg, &((scomcomm *)holdParm2)->scomdata); /* Read scom */
639 }
640 return;
641
642 case CPRQsps:
643 {
644 ml_set_processor_speed_slave(holdParm2);
645 return;
646 }
647 default:
648 panic("cpu_signal_handler: unknown CPU request - %08X\n", holdParm1);
649 return;
650 }
651
652
653 case SIGPdebug: /* Enter the debugger? */
654
655 proc_info->hwCtr.numSIGPdebug++; /* Count this one */
656 proc_info->debugger_is_slave++; /* Bump up the count to show we're here */
657 hw_atomic_sub(&debugger_sync, 1); /* Show we've received the 'rupt */
658 __asm__ volatile("tw 4,r3,r3"); /* Enter the debugger */
659 return; /* All done now... */
660
661 case SIGPwake: /* Wake up CPU */
662 proc_info->hwCtr.numSIGPwake++; /* Count this one */
663 return; /* No need to do anything, the interrupt does it all... */
664
665 case SIGPcall: /* Call function on CPU */
666 proc_info->hwCtr.numSIGPcall++; /* Count this one */
667 xfunc = holdParm1; /* Do this since I can't seem to figure C out */
668 xfunc(holdParm2); /* Call the passed function */
669 return; /* Done... */
670
671 default:
672 panic("cpu_signal_handler: unknown SIGP message order - %08X\n", holdParm0);
673 return;
674
675 }
676
677 default:
678 panic("cpu_signal_handler: unknown SIGP function - %08X\n", (holdStat & MPsigpFunc) >> 8);
679 return;
680
681 }
682 panic("cpu_signal_handler: we should never get here\n");
683 }
684
685
686 /*
687 * Routine: cpu_sync_timebase
688 * Function:
689 */
690 void
691 cpu_sync_timebase(
692 void)
693 {
694 natural_t tbu, tbl;
695 boolean_t intr;
696 struct SIGtimebase syncClkSpot;
697
698 intr = ml_set_interrupts_enabled(FALSE); /* No interruptions in here */
699
700 syncClkSpot.avail = FALSE;
701 syncClkSpot.ready = FALSE;
702 syncClkSpot.done = FALSE;
703
704 while (cpu_signal(master_cpu, SIGPcpureq, CPRQtimebase,
705 (unsigned int)&syncClkSpot) != KERN_SUCCESS)
706 continue;
707
708 while (*(volatile int *)&(syncClkSpot.avail) == FALSE)
709 continue;
710
711 isync();
712
713 /*
714 * We do the following to keep the compiler from generating extra stuff
715 * in tb set part
716 */
717 tbu = syncClkSpot.abstime >> 32;
718 tbl = (uint32_t)syncClkSpot.abstime;
719
720 mttb(0);
721 mttbu(tbu);
722 mttb(tbl);
723
724 syncClkSpot.ready = TRUE;
725
726 while (*(volatile int *)&(syncClkSpot.done) == FALSE)
727 continue;
728
729 setTimerReq(); /* Start the timer */
730
731 (void)ml_set_interrupts_enabled(intr);
732 }
733
734
735 /*
736 * Routine: cpu_timebase_signal_handler
737 * Function:
738 */
739 void
740 cpu_timebase_signal_handler(
741 struct per_proc_info *proc_info,
742 struct SIGtimebase *timebaseAddr)
743 {
744 unsigned int tbu, tbu2, tbl;
745
746 if(proc_info->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
747 proc_info->time_base_enable(proc_info->cpu_id, FALSE);
748
749 timebaseAddr->abstime = 0; /* Touch to force into cache */
750 sync();
751
752 do {
753 asm volatile(" mftbu %0" : "=r" (tbu));
754 asm volatile(" mftb %0" : "=r" (tbl));
755 asm volatile(" mftbu %0" : "=r" (tbu2));
756 } while (tbu != tbu2);
757
758 timebaseAddr->abstime = ((uint64_t)tbu << 32) | tbl;
759 sync(); /* Force order */
760
761 timebaseAddr->avail = TRUE;
762
763 while (*(volatile int *)&(timebaseAddr->ready) == FALSE);
764
765 if(proc_info->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
766 proc_info->time_base_enable(proc_info->cpu_id, TRUE);
767
768 timebaseAddr->done = TRUE;
769 }
770
771
772 /*
773 * Routine: cpu_control
774 * Function:
775 */
776 kern_return_t
777 cpu_control(
778 int slot_num,
779 processor_info_t info,
780 unsigned int count)
781 {
782 struct per_proc_info *proc_info;
783 cpu_type_t tcpu_type;
784 cpu_subtype_t tcpu_subtype;
785 processor_pm_regs_t perf_regs;
786 processor_control_cmd_t cmd;
787 boolean_t oldlevel;
788 #define MMCR0_SUPPORT_MASK 0xf83f1fff
789 #define MMCR1_SUPPORT_MASK 0xffc00000
790 #define MMCR2_SUPPORT_MASK 0x80000000
791
792 proc_info = PerProcTable[slot_num].ppe_vaddr;
793 tcpu_type = proc_info->cpu_type;
794 tcpu_subtype = proc_info->cpu_subtype;
795 cmd = (processor_control_cmd_t) info;
796
797 if (count < PROCESSOR_CONTROL_CMD_COUNT)
798 return(KERN_FAILURE);
799
800 if ( tcpu_type != cmd->cmd_cpu_type ||
801 tcpu_subtype != cmd->cmd_cpu_subtype)
802 return(KERN_FAILURE);
803
804 if (perfmon_acquire_facility(current_task()) != KERN_SUCCESS) {
805 return(KERN_RESOURCE_SHORTAGE); /* cpu performance facility in use by another task */
806 }
807
808 switch (cmd->cmd_op)
809 {
810 case PROCESSOR_PM_CLR_PMC: /* Clear Performance Monitor Counters */
811 switch (tcpu_subtype)
812 {
813 case CPU_SUBTYPE_POWERPC_750:
814 case CPU_SUBTYPE_POWERPC_7400:
815 case CPU_SUBTYPE_POWERPC_7450:
816 {
817 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
818 mtpmc1(0x0);
819 mtpmc2(0x0);
820 mtpmc3(0x0);
821 mtpmc4(0x0);
822 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
823 return(KERN_SUCCESS);
824 }
825 default:
826 return(KERN_FAILURE);
827 } /* tcpu_subtype */
828 case PROCESSOR_PM_SET_REGS: /* Set Performance Monitor Registors */
829 switch (tcpu_subtype)
830 {
831 case CPU_SUBTYPE_POWERPC_750:
832 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
833 PROCESSOR_PM_REGS_COUNT_POWERPC_750))
834 return(KERN_FAILURE);
835 else
836 {
837 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
838 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
839 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
840 mtpmc1(PERFMON_PMC1(perf_regs));
841 mtpmc2(PERFMON_PMC2(perf_regs));
842 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
843 mtpmc3(PERFMON_PMC3(perf_regs));
844 mtpmc4(PERFMON_PMC4(perf_regs));
845 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
846 return(KERN_SUCCESS);
847 }
848 case CPU_SUBTYPE_POWERPC_7400:
849 case CPU_SUBTYPE_POWERPC_7450:
850 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
851 PROCESSOR_PM_REGS_COUNT_POWERPC_7400))
852 return(KERN_FAILURE);
853 else
854 {
855 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
856 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
857 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
858 mtpmc1(PERFMON_PMC1(perf_regs));
859 mtpmc2(PERFMON_PMC2(perf_regs));
860 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
861 mtpmc3(PERFMON_PMC3(perf_regs));
862 mtpmc4(PERFMON_PMC4(perf_regs));
863 mtmmcr2(PERFMON_MMCR2(perf_regs) & MMCR2_SUPPORT_MASK);
864 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
865 return(KERN_SUCCESS);
866 }
867 default:
868 return(KERN_FAILURE);
869 } /* switch tcpu_subtype */
870 case PROCESSOR_PM_SET_MMCR:
871 switch (tcpu_subtype)
872 {
873 case CPU_SUBTYPE_POWERPC_750:
874 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
875 PROCESSOR_PM_REGS_COUNT_POWERPC_750))
876 return(KERN_FAILURE);
877 else
878 {
879 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
880 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
881 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
882 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
883 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
884 return(KERN_SUCCESS);
885 }
886 case CPU_SUBTYPE_POWERPC_7400:
887 case CPU_SUBTYPE_POWERPC_7450:
888 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
889 PROCESSOR_PM_REGS_COUNT_POWERPC_7400))
890 return(KERN_FAILURE);
891 else
892 {
893 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
894 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
895 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
896 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
897 mtmmcr2(PERFMON_MMCR2(perf_regs) & MMCR2_SUPPORT_MASK);
898 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
899 return(KERN_SUCCESS);
900 }
901 default:
902 return(KERN_FAILURE);
903 } /* tcpu_subtype */
904 default:
905 return(KERN_FAILURE);
906 } /* switch cmd_op */
907 }
908
909
910 /*
911 * Routine: cpu_info_count
912 * Function:
913 */
914 kern_return_t
915 cpu_info_count(
916 processor_flavor_t flavor,
917 unsigned int *count)
918 {
919 cpu_subtype_t tcpu_subtype;
920
921 /*
922 * For now, we just assume that all CPUs are of the same type
923 */
924 tcpu_subtype = PerProcTable[master_cpu].ppe_vaddr->cpu_subtype;
925 switch (flavor) {
926 case PROCESSOR_PM_REGS_INFO:
927 switch (tcpu_subtype) {
928 case CPU_SUBTYPE_POWERPC_750:
929
930 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_750;
931 return(KERN_SUCCESS);
932
933 case CPU_SUBTYPE_POWERPC_7400:
934 case CPU_SUBTYPE_POWERPC_7450:
935
936 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_7400;
937 return(KERN_SUCCESS);
938
939 default:
940 *count = 0;
941 return(KERN_INVALID_ARGUMENT);
942 } /* switch tcpu_subtype */
943
944 case PROCESSOR_TEMPERATURE:
945 *count = PROCESSOR_TEMPERATURE_COUNT;
946 return (KERN_SUCCESS);
947
948 default:
949 *count = 0;
950 return(KERN_INVALID_ARGUMENT);
951
952 }
953 }
954
955
956 /*
957 * Routine: cpu_info
958 * Function:
959 */
960 kern_return_t
961 cpu_info(
962 processor_flavor_t flavor,
963 int slot_num,
964 processor_info_t info,
965 unsigned int *count)
966 {
967 cpu_subtype_t tcpu_subtype;
968 processor_pm_regs_t perf_regs;
969 boolean_t oldlevel;
970
971 tcpu_subtype = PerProcTable[slot_num].ppe_vaddr->cpu_subtype;
972
973 switch (flavor) {
974 case PROCESSOR_PM_REGS_INFO:
975
976 perf_regs = (processor_pm_regs_t) info;
977
978 switch (tcpu_subtype) {
979 case CPU_SUBTYPE_POWERPC_750:
980
981 if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_750)
982 return(KERN_FAILURE);
983
984 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
985 PERFMON_MMCR0(perf_regs) = mfmmcr0();
986 PERFMON_PMC1(perf_regs) = mfpmc1();
987 PERFMON_PMC2(perf_regs) = mfpmc2();
988 PERFMON_MMCR1(perf_regs) = mfmmcr1();
989 PERFMON_PMC3(perf_regs) = mfpmc3();
990 PERFMON_PMC4(perf_regs) = mfpmc4();
991 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
992
993 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_750;
994 return(KERN_SUCCESS);
995
996 case CPU_SUBTYPE_POWERPC_7400:
997 case CPU_SUBTYPE_POWERPC_7450:
998
999 if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_7400)
1000 return(KERN_FAILURE);
1001
1002 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
1003 PERFMON_MMCR0(perf_regs) = mfmmcr0();
1004 PERFMON_PMC1(perf_regs) = mfpmc1();
1005 PERFMON_PMC2(perf_regs) = mfpmc2();
1006 PERFMON_MMCR1(perf_regs) = mfmmcr1();
1007 PERFMON_PMC3(perf_regs) = mfpmc3();
1008 PERFMON_PMC4(perf_regs) = mfpmc4();
1009 PERFMON_MMCR2(perf_regs) = mfmmcr2();
1010 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
1011
1012 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_7400;
1013 return(KERN_SUCCESS);
1014
1015 default:
1016 return(KERN_FAILURE);
1017 } /* switch tcpu_subtype */
1018
1019 case PROCESSOR_TEMPERATURE: /* Get the temperature of a processor */
1020
1021 *info = -1; /* Get the temperature */
1022 return(KERN_FAILURE);
1023
1024 default:
1025 return(KERN_INVALID_ARGUMENT);
1026
1027 } /* flavor */
1028 }
1029
1030
1031 /*
1032 * Routine: cpu_to_processor
1033 * Function:
1034 */
1035 processor_t
1036 cpu_to_processor(
1037 int cpu)
1038 {
1039 return ((processor_t)PerProcTable[cpu].ppe_vaddr->processor);
1040 }
1041
1042
1043 /*
1044 * Routine: slot_type
1045 * Function:
1046 */
1047 cpu_type_t
1048 slot_type(
1049 int slot_num)
1050 {
1051 return (PerProcTable[slot_num].ppe_vaddr->cpu_type);
1052 }
1053
1054
1055 /*
1056 * Routine: slot_subtype
1057 * Function:
1058 */
1059 cpu_subtype_t
1060 slot_subtype(
1061 int slot_num)
1062 {
1063 return (PerProcTable[slot_num].ppe_vaddr->cpu_subtype);
1064 }
1065
1066
1067 /*
1068 * Routine: slot_threadtype
1069 * Function:
1070 */
1071 cpu_threadtype_t
1072 slot_threadtype(
1073 int slot_num)
1074 {
1075 return (PerProcTable[slot_num].ppe_vaddr->cpu_threadtype);
1076 }
1077
1078
1079 /*
1080 * Routine: cpu_type
1081 * Function:
1082 */
1083 cpu_type_t
1084 cpu_type(void)
1085 {
1086 return (getPerProc()->cpu_type);
1087 }
1088
1089
1090 /*
1091 * Routine: cpu_subtype
1092 * Function:
1093 */
1094 cpu_subtype_t
1095 cpu_subtype(void)
1096 {
1097 return (getPerProc()->cpu_subtype);
1098 }
1099
1100
1101 /*
1102 * Routine: cpu_threadtype
1103 * Function:
1104 */
1105 cpu_threadtype_t
1106 cpu_threadtype(void)
1107 {
1108 return (getPerProc()->cpu_threadtype);
1109 }
1110
1111 /*
1112 * Call a function on all running processors
1113 *
1114 * Note that the synch paramter is used to wait until all functions are complete.
1115 * It is not passed to the other processor and must be known by the called function.
1116 * The called function must do a thread_wakeup on the synch if it decrements the
1117 * synch count to 0.
1118 */
1119
1120
1121 int32_t cpu_broadcast(uint32_t *synch, broadcastFunc func, uint32_t parm) {
1122
1123 int sigproc, cpu, ocpu;
1124
1125 cpu = cpu_number(); /* Who are we? */
1126 sigproc = 0; /* Clear called processor count */
1127
1128 if(real_ncpus > 1) { /* Are we just a uni? */
1129
1130 assert_wait((event_t)synch, THREAD_UNINT); /* If more than one processor, we may have to wait */
1131
1132 for(ocpu = 0; ocpu < real_ncpus; ocpu++) { /* Tell everyone to call */
1133 if(ocpu == cpu) continue; /* If we talk to ourselves, people will wonder... */
1134 hw_atomic_add(synch, 1); /* Tentatively bump synchronizer */
1135 sigproc++; /* Tentatively bump signal sent count */
1136 if(KERN_SUCCESS != cpu_signal(ocpu, SIGPcall, (uint32_t)func, parm)) { /* Call the function on the other processor */
1137 hw_atomic_sub(synch, 1); /* Other guy isn't really there, ignore it */
1138 sigproc--; /* and don't count it */
1139 }
1140 }
1141
1142 if(!sigproc) clear_wait(current_thread(), THREAD_AWAKENED); /* Clear wait if we never signalled */
1143 else thread_block(THREAD_CONTINUE_NULL); /* Wait for everyone to get into step... */
1144 }
1145
1146 return sigproc; /* Return the number of guys actually signalled */
1147
1148 }