]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/cpu.c
xnu-1504.9.26.tar.gz
[apple/xnu.git] / osfmk / ppc / cpu.c
1 /*
2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/machine.h>
31 #include <mach/processor_info.h>
32
33 #include <kern/kalloc.h>
34 #include <kern/kern_types.h>
35 #include <kern/machine.h>
36 #include <kern/misc_protos.h>
37 #include <kern/thread.h>
38 #include <kern/sched_prim.h>
39 #include <kern/timer_queue.h>
40 #include <kern/processor.h>
41 #include <kern/pms.h>
42
43 #include <vm/pmap.h>
44 #include <IOKit/IOHibernatePrivate.h>
45
46 #include <ppc/proc_reg.h>
47 #include <ppc/misc_protos.h>
48 #include <ppc/fpu_protos.h>
49 #include <ppc/machine_routines.h>
50 #include <ppc/cpu_internal.h>
51 #include <ppc/exception.h>
52 #include <ppc/asm.h>
53 #include <ppc/hw_perfmon.h>
54 #include <pexpert/pexpert.h>
55 #include <kern/cpu_data.h>
56 #include <ppc/mappings.h>
57 #include <ppc/Diagnostics.h>
58 #include <ppc/trap.h>
59 #include <ppc/machine_cpu.h>
60 #include <ppc/rtclock.h>
61
62 #include <libkern/OSAtomic.h>
63
64 unsigned int real_ncpus = 1;
65 unsigned int max_ncpus = MAX_CPUS;
66
67 decl_simple_lock_data(static,rht_lock);
68
69 static unsigned int rht_state = 0;
70 #define RHT_WAIT 0x01
71 #define RHT_BUSY 0x02
72
73 decl_simple_lock_data(static,SignalReadyLock);
74
75 struct SIGtimebase {
76 volatile boolean_t avail;
77 volatile boolean_t ready;
78 volatile boolean_t done;
79 uint64_t abstime;
80 };
81
82 perfCallback perfCpuSigHook; /* Pointer to CHUD cpu signal hook routine */
83
84 extern uint32_t debugger_sync;
85
86 /*
87 * Forward definitions
88 */
89
90 void cpu_sync_timebase(
91 void);
92
93 void cpu_timebase_signal_handler(
94 struct per_proc_info *proc_info,
95 struct SIGtimebase *timebaseAddr);
96
97 /*
98 * Routine: cpu_bootstrap
99 * Function:
100 */
101 void
102 cpu_bootstrap(
103 void)
104 {
105 simple_lock_init(&rht_lock,0);
106 simple_lock_init(&SignalReadyLock,0);
107 }
108
109
110 /*
111 * Routine: cpu_init
112 * Function:
113 */
114 void
115 cpu_init(
116 void)
117 {
118 struct per_proc_info *proc_info;
119
120 proc_info = getPerProc();
121
122 /*
123 * Restore the TBR.
124 */
125 if (proc_info->save_tbu != 0 || proc_info->save_tbl != 0) {
126 mttb(0);
127 mttbu(proc_info->save_tbu);
128 mttb(proc_info->save_tbl);
129 }
130
131 proc_info->rtcPop = EndOfAllTime; /* forget any existing decrementer setting */
132 etimer_resync_deadlines(); /* Now that the time base is sort of correct, request the next timer pop */
133
134 proc_info->cpu_type = CPU_TYPE_POWERPC;
135 proc_info->cpu_subtype = (cpu_subtype_t)proc_info->pf.rptdProc;
136 proc_info->cpu_threadtype = CPU_THREADTYPE_NONE;
137 proc_info->running = TRUE;
138
139 }
140
141 /*
142 * Routine: cpu_machine_init
143 * Function:
144 */
145 void
146 cpu_machine_init(
147 void)
148 {
149 struct per_proc_info *proc_info;
150 volatile struct per_proc_info *mproc_info;
151
152
153 proc_info = getPerProc();
154 mproc_info = PerProcTable[master_cpu].ppe_vaddr;
155
156 if (proc_info != mproc_info) {
157 simple_lock(&rht_lock);
158 if (rht_state & RHT_WAIT)
159 thread_wakeup(&rht_state);
160 rht_state &= ~(RHT_BUSY|RHT_WAIT);
161 simple_unlock(&rht_lock);
162 }
163
164 PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone));
165
166 if (proc_info->hibernate) {
167 uint32_t tbu, tbl;
168
169 do {
170 tbu = mftbu();
171 tbl = mftb();
172 } while (mftbu() != tbu);
173
174 proc_info->hibernate = 0;
175 hibernate_machine_init();
176
177 // hibernate_machine_init() could take minutes and we don't want timeouts
178 // to fire as soon as scheduling starts. Reset timebase so it appears
179 // no time has elapsed, as it would for regular sleep.
180 mttb(0);
181 mttbu(tbu);
182 mttb(tbl);
183 }
184
185 if (proc_info != mproc_info) {
186 while (!((mproc_info->cpu_flags) & SignalReady))
187 continue;
188 cpu_sync_timebase();
189 }
190
191 ml_init_interrupt();
192 if (proc_info != mproc_info)
193 simple_lock(&SignalReadyLock);
194 proc_info->cpu_flags |= BootDone|SignalReady;
195 if (proc_info != mproc_info) {
196 if (proc_info->ppXFlags & SignalReadyWait) {
197 hw_atomic_and_noret(&proc_info->ppXFlags, ~SignalReadyWait);
198 thread_wakeup(&proc_info->cpu_flags);
199 }
200 simple_unlock(&SignalReadyLock);
201 pmsPark(); /* Timers should be cool now, park the power management stepper */
202 }
203 }
204
205
206 /*
207 * Routine: cpu_per_proc_alloc
208 * Function:
209 */
210 struct per_proc_info *
211 cpu_per_proc_alloc(
212 void)
213 {
214 struct per_proc_info *proc_info = NULL;
215 void *interrupt_stack = NULL;
216 void *debugger_stack = NULL;
217
218 if ((proc_info = (struct per_proc_info*)kalloc(sizeof(struct per_proc_info))) == (struct per_proc_info*)0)
219 return (struct per_proc_info *)NULL;
220 if ((interrupt_stack = kalloc(INTSTACK_SIZE)) == 0) {
221 kfree(proc_info, sizeof(struct per_proc_info));
222 return (struct per_proc_info *)NULL;
223 }
224
225 if ((debugger_stack = kalloc(kernel_stack_size)) == 0) {
226 kfree(proc_info, sizeof(struct per_proc_info));
227 kfree(interrupt_stack, INTSTACK_SIZE);
228 return (struct per_proc_info *)NULL;
229 }
230
231 bzero((void *)proc_info, sizeof(struct per_proc_info));
232
233 /* Set physical address of the second page */
234 proc_info->pp2ndPage = (addr64_t)pmap_find_phys(kernel_pmap,
235 ((addr64_t)(unsigned int)proc_info) + 0x1000)
236 << PAGE_SHIFT;
237 proc_info->next_savearea = (uint64_t)save_get_init();
238 proc_info->pf = BootProcInfo.pf;
239 proc_info->istackptr = (vm_offset_t)interrupt_stack + INTSTACK_SIZE - FM_SIZE;
240 proc_info->intstack_top_ss = proc_info->istackptr;
241 proc_info->debstackptr = (vm_offset_t)debugger_stack + kernel_stack_size - FM_SIZE;
242 proc_info->debstack_top_ss = proc_info->debstackptr;
243
244 queue_init(&proc_info->rtclock_timer.queue);
245 proc_info->rtclock_timer.deadline = EndOfAllTime;
246
247 return proc_info;
248
249 }
250
251
252 /*
253 * Routine: cpu_per_proc_free
254 * Function:
255 */
256 void
257 cpu_per_proc_free(
258 struct per_proc_info *proc_info
259 )
260 {
261 if (proc_info->cpu_number == master_cpu)
262 return;
263 kfree((void *)(proc_info->intstack_top_ss - INTSTACK_SIZE + FM_SIZE), INTSTACK_SIZE);
264 kfree((void *)(proc_info->debstack_top_ss - kernel_stack_size + FM_SIZE), kernel_stack_size);
265 kfree((void *)proc_info, sizeof(struct per_proc_info)); /* Release the per_proc */
266 }
267
268
269 /*
270 * Routine: cpu_per_proc_register
271 * Function:
272 */
273 kern_return_t
274 cpu_per_proc_register(
275 struct per_proc_info *proc_info
276 )
277 {
278 int cpu;
279
280 cpu = OSIncrementAtomic(&real_ncpus);
281
282 if (real_ncpus > max_ncpus) {
283 return KERN_FAILURE;
284 }
285
286 proc_info->cpu_number = cpu;
287 PerProcTable[cpu].ppe_vaddr = proc_info;
288 PerProcTable[cpu].ppe_paddr = (addr64_t)pmap_find_phys(kernel_pmap, (addr64_t)(unsigned int)proc_info) << PAGE_SHIFT;
289 eieio();
290 return KERN_SUCCESS;
291 }
292
293
294 /*
295 * Routine: cpu_start
296 * Function:
297 */
298 kern_return_t
299 cpu_start(
300 int cpu)
301 {
302 struct per_proc_info *proc_info;
303 kern_return_t ret;
304 mapping_t *mp;
305
306 proc_info = PerProcTable[cpu].ppe_vaddr;
307
308 if (cpu == cpu_number()) {
309 PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone));
310 ml_init_interrupt();
311 proc_info->cpu_flags |= BootDone|SignalReady;
312
313 return KERN_SUCCESS;
314 } else {
315 proc_info->cpu_flags &= BootDone;
316 proc_info->interrupts_enabled = 0;
317 proc_info->pending_ast = AST_NONE;
318 proc_info->istackptr = proc_info->intstack_top_ss;
319 proc_info->rtcPop = EndOfAllTime;
320 proc_info->FPU_owner = NULL;
321 proc_info->VMX_owner = NULL;
322 proc_info->pms.pmsStamp = 0; /* Dummy transition time */
323 proc_info->pms.pmsPop = EndOfAllTime; /* Set the pop way into the future */
324 proc_info->pms.pmsState = pmsParked; /* Park the stepper */
325 proc_info->pms.pmsCSetCmd = pmsCInit; /* Set dummy initial hardware state */
326 mp = (mapping_t *)(&proc_info->ppUMWmp);
327 mp->mpFlags = 0x01000000 | mpLinkage | mpPerm | 1;
328 mp->mpSpace = invalSpace;
329
330 if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
331
332 simple_lock(&rht_lock);
333 while (rht_state & RHT_BUSY) {
334 rht_state |= RHT_WAIT;
335 thread_sleep_usimple_lock((event_t)&rht_state,
336 &rht_lock, THREAD_UNINT);
337 }
338 rht_state |= RHT_BUSY;
339 simple_unlock(&rht_lock);
340
341 ml_phys_write((vm_offset_t)&ResetHandler + 0,
342 RESET_HANDLER_START);
343 ml_phys_write((vm_offset_t)&ResetHandler + 4,
344 (vm_offset_t)_start_cpu);
345 ml_phys_write((vm_offset_t)&ResetHandler + 8,
346 (vm_offset_t)&PerProcTable[cpu]);
347 }
348 /*
349 * Note: we pass the current time to the other processor here. He will load it
350 * as early as possible so that there is a chance that it is close to accurate.
351 * After the machine is up a while, we will officially resync the clocks so
352 * that all processors are the same. This is just to get close.
353 */
354
355 ml_get_timebase((unsigned long long *)&proc_info->ruptStamp);
356
357 __asm__ volatile("sync"); /* Commit to storage */
358 __asm__ volatile("isync"); /* Wait a second */
359 ret = PE_cpu_start(proc_info->cpu_id,
360 proc_info->start_paddr, (vm_offset_t)proc_info);
361
362 if (ret != KERN_SUCCESS) {
363 if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
364 simple_lock(&rht_lock);
365 if (rht_state & RHT_WAIT)
366 thread_wakeup(&rht_state);
367 rht_state &= ~(RHT_BUSY|RHT_WAIT);
368 simple_unlock(&rht_lock);
369 };
370 } else {
371 simple_lock(&SignalReadyLock);
372 if (!((*(volatile short *)&proc_info->cpu_flags) & SignalReady)) {
373 hw_atomic_or_noret(&proc_info->ppXFlags, SignalReadyWait);
374 thread_sleep_simple_lock((event_t)&proc_info->cpu_flags,
375 &SignalReadyLock, THREAD_UNINT);
376 }
377 simple_unlock(&SignalReadyLock);
378
379 }
380 return(ret);
381 }
382 }
383
384 /*
385 * Routine: cpu_exit_wait
386 * Function:
387 */
388 void
389 cpu_exit_wait(
390 int cpu)
391 {
392 struct per_proc_info *tpproc;
393
394 if ( cpu != master_cpu) {
395 tpproc = PerProcTable[cpu].ppe_vaddr;
396 while (!((*(volatile short *)&tpproc->cpu_flags) & SleepState)) {};
397 }
398 }
399
400
401 /*
402 * Routine: cpu_doshutdown
403 * Function:
404 */
405 void
406 cpu_doshutdown(
407 void)
408 {
409 enable_preemption();
410 processor_offline(current_processor());
411 }
412
413
414 /*
415 * Routine: cpu_sleep
416 * Function:
417 */
418 void
419 cpu_sleep(
420 void)
421 {
422 struct per_proc_info *proc_info;
423 unsigned int i;
424 unsigned int wait_ncpus_sleep, ncpus_sleep;
425 facility_context *fowner;
426
427 proc_info = getPerProc();
428
429 proc_info->running = FALSE;
430
431 timer_queue_shutdown(&proc_info->rtclock_timer.queue);
432 proc_info->rtclock_timer.deadline = EndOfAllTime;
433
434 fowner = proc_info->FPU_owner; /* Cache this */
435 if(fowner) /* If anyone owns FPU, save it */
436 fpu_save(fowner);
437 proc_info->FPU_owner = NULL; /* Set no fpu owner now */
438
439 fowner = proc_info->VMX_owner; /* Cache this */
440 if(fowner) vec_save(fowner); /* If anyone owns vectors, save it */
441 proc_info->VMX_owner = NULL; /* Set no vector owner now */
442
443 if (proc_info->cpu_number == master_cpu) {
444 proc_info->cpu_flags &= BootDone;
445 proc_info->interrupts_enabled = 0;
446 proc_info->pending_ast = AST_NONE;
447
448 if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
449 ml_phys_write((vm_offset_t)&ResetHandler + 0,
450 RESET_HANDLER_START);
451 ml_phys_write((vm_offset_t)&ResetHandler + 4,
452 (vm_offset_t)_start_cpu);
453 ml_phys_write((vm_offset_t)&ResetHandler + 8,
454 (vm_offset_t)&PerProcTable[master_cpu]);
455
456 __asm__ volatile("sync");
457 __asm__ volatile("isync");
458 }
459
460 wait_ncpus_sleep = real_ncpus-1;
461 ncpus_sleep = 0;
462 while (wait_ncpus_sleep != ncpus_sleep) {
463 ncpus_sleep = 0;
464 for(i=1; i < real_ncpus ; i++) {
465 if ((*(volatile short *)&(PerProcTable[i].ppe_vaddr->cpu_flags)) & SleepState)
466 ncpus_sleep++;
467 }
468 }
469
470 }
471
472 /*
473 * Save the TBR before stopping.
474 */
475 do {
476 proc_info->save_tbu = mftbu();
477 proc_info->save_tbl = mftb();
478 } while (mftbu() != proc_info->save_tbu);
479
480 PE_cpu_machine_quiesce(proc_info->cpu_id);
481 }
482
483
484 /*
485 * Routine: cpu_signal
486 * Function:
487 * Here is where we send a message to another processor. So far we only have two:
488 * SIGPast and SIGPdebug. SIGPast is used to preempt and kick off threads (this is
489 * currently disabled). SIGPdebug is used to enter the debugger.
490 *
491 * We set up the SIGP function to indicate that this is a simple message and set the
492 * order code (MPsigpParm0) to SIGPast or SIGPdebug). After finding the per_processor
493 * block for the target, we lock the message block. Then we set the parameter(s).
494 * Next we change the lock (also called "busy") to "passing" and finally signal
495 * the other processor. Note that we only wait about 1ms to get the message lock.
496 * If we time out, we return failure to our caller. It is their responsibility to
497 * recover.
498 */
499 kern_return_t
500 cpu_signal(
501 int target,
502 int signal,
503 unsigned int p1,
504 unsigned int p2)
505 {
506
507 unsigned int holdStat;
508 struct per_proc_info *tpproc, *mpproc;
509 int busybitset=0;
510
511 #if DEBUG
512 if(((unsigned int)target) >= MAX_CPUS) panic("cpu_signal: invalid target CPU - %08X\n", target);
513 #endif
514
515 mpproc = getPerProc(); /* Point to our block */
516 tpproc = PerProcTable[target].ppe_vaddr; /* Point to the target's block */
517 if(mpproc == tpproc) return KERN_FAILURE; /* Cannot signal ourselves */
518
519 if(!tpproc->running) return KERN_FAILURE;
520
521 if (!(tpproc->cpu_flags & SignalReady)) return KERN_FAILURE;
522
523 if((tpproc->MPsigpStat & MPsigpMsgp) == MPsigpMsgp) { /* Is there an unreceived message already pending? */
524
525 if(signal == SIGPwake) { /* SIGPwake can merge into all others... */
526 mpproc->hwCtr.numSIGPmwake++; /* Account for merged wakes */
527 return KERN_SUCCESS;
528 }
529
530 if((signal == SIGPast) && (tpproc->MPsigpParm0 == SIGPast)) { /* We can merge ASTs */
531 mpproc->hwCtr.numSIGPmast++; /* Account for merged ASTs */
532 return KERN_SUCCESS; /* Don't bother to send this one... */
533 }
534
535 if (tpproc->MPsigpParm0 == SIGPwake) {
536 if (hw_lock_mbits(&tpproc->MPsigpStat, (MPsigpMsgp | MPsigpAck),
537 (MPsigpBusy | MPsigpPass ), MPsigpBusy, 0)) {
538 busybitset = 1;
539 mpproc->hwCtr.numSIGPmwake++;
540 }
541 }
542 }
543
544 if((busybitset == 0) &&
545 (!hw_lock_mbits(&tpproc->MPsigpStat, MPsigpMsgp, 0, MPsigpBusy,
546 (gPEClockFrequencyInfo.timebase_frequency_hz >> 11)))) { /* Try to lock the message block with a .5ms timeout */
547 mpproc->hwCtr.numSIGPtimo++; /* Account for timeouts */
548 return KERN_FAILURE; /* Timed out, take your ball and go home... */
549 }
550
551 holdStat = MPsigpBusy | MPsigpPass | (MPsigpSigp << 8) | mpproc->cpu_number; /* Set up the signal status word */
552 tpproc->MPsigpParm0 = signal; /* Set message order */
553 tpproc->MPsigpParm1 = p1; /* Set additional parm */
554 tpproc->MPsigpParm2 = p2; /* Set additional parm */
555
556 __asm__ volatile("sync"); /* Make sure it's all there */
557
558 tpproc->MPsigpStat = holdStat; /* Set status and pass the lock */
559 __asm__ volatile("eieio"); /* I'm a paraniod freak */
560
561 if (busybitset == 0)
562 PE_cpu_signal(mpproc->cpu_id, tpproc->cpu_id); /* Kick the other processor */
563
564 return KERN_SUCCESS; /* All is goodness and rainbows... */
565 }
566
567
568 /*
569 * Routine: cpu_signal_handler
570 * Function:
571 * Here is where we implement the receiver of the signaling protocol.
572 * We wait for the signal status area to be passed to us. Then we snarf
573 * up the status, the sender, and the 3 potential parms. Next we release
574 * the lock and signal the other guy.
575 */
576 void
577 cpu_signal_handler(void)
578 {
579 unsigned int holdStat, holdParm0, holdParm1, holdParm2;
580 unsigned int *parmAddr;
581 struct per_proc_info *proc_info;
582 int cpu;
583 broadcastFunc xfunc;
584 cpu = cpu_number(); /* Get the CPU number */
585
586 proc_info = getPerProc();
587
588 /*
589 * Since we've been signaled, wait about 31 ms for the signal lock to pass
590 */
591 if(!hw_lock_mbits(&proc_info->MPsigpStat, (MPsigpMsgp | MPsigpAck), (MPsigpBusy | MPsigpPass),
592 (MPsigpBusy | MPsigpPass | MPsigpAck), (gPEClockFrequencyInfo.timebase_frequency_hz >> 5))) {
593 panic("cpu_signal_handler: Lock pass timed out\n");
594 }
595
596 holdStat = proc_info->MPsigpStat; /* Snarf stat word */
597 holdParm0 = proc_info->MPsigpParm0; /* Snarf parameter */
598 holdParm1 = proc_info->MPsigpParm1; /* Snarf parameter */
599 holdParm2 = proc_info->MPsigpParm2; /* Snarf parameter */
600
601 __asm__ volatile("isync"); /* Make sure we don't unlock until memory is in */
602
603 proc_info->MPsigpStat = holdStat & ~(MPsigpMsgp | MPsigpAck | MPsigpFunc); /* Release lock */
604
605 switch ((holdStat & MPsigpFunc) >> 8) { /* Decode function code */
606
607 case MPsigpIdle: /* Was function cancelled? */
608 return; /* Yup... */
609
610 case MPsigpSigp: /* Signal Processor message? */
611
612 switch (holdParm0) { /* Decode SIGP message order */
613
614 case SIGPast: /* Should we do an AST? */
615 proc_info->hwCtr.numSIGPast++; /* Count this one */
616 #if 0
617 kprintf("cpu_signal_handler: AST check on cpu %x\n", cpu_number());
618 #endif
619 ast_check((processor_t)proc_info->processor);
620 return; /* All done... */
621
622 case SIGPcpureq: /* CPU specific function? */
623
624 proc_info->hwCtr.numSIGPcpureq++; /* Count this one */
625 switch (holdParm1) { /* Select specific function */
626
627 case CPRQtimebase:
628
629 cpu_timebase_signal_handler(proc_info, (struct SIGtimebase *)holdParm2);
630 return;
631
632 case CPRQsegload:
633 return;
634
635 case CPRQchud:
636 parmAddr = (unsigned int *)holdParm2; /* Get the destination address */
637 if(perfCpuSigHook) {
638 struct savearea *ssp = current_thread()->machine.pcb;
639 if(ssp) {
640 (perfCpuSigHook)(parmAddr[1] /* request */, ssp, 0, 0);
641 }
642 }
643 parmAddr[1] = 0;
644 parmAddr[0] = 0; /* Show we're done */
645 return;
646
647 case CPRQscom:
648 if(((scomcomm *)holdParm2)->scomfunc) { /* Are we writing */
649 ((scomcomm *)holdParm2)->scomstat = ml_scom_write(((scomcomm *)holdParm2)->scomreg, ((scomcomm *)holdParm2)->scomdata); /* Write scom */
650 }
651 else { /* No, reading... */
652 ((scomcomm *)holdParm2)->scomstat = ml_scom_read(((scomcomm *)holdParm2)->scomreg, &((scomcomm *)holdParm2)->scomdata); /* Read scom */
653 }
654 return;
655
656 case CPRQsps:
657 {
658 ml_set_processor_speed_slave(holdParm2);
659 return;
660 }
661 default:
662 panic("cpu_signal_handler: unknown CPU request - %08X\n", holdParm1);
663 return;
664 }
665
666
667 case SIGPdebug: /* Enter the debugger? */
668
669 proc_info->hwCtr.numSIGPdebug++; /* Count this one */
670 proc_info->debugger_is_slave++; /* Bump up the count to show we're here */
671 (void)hw_atomic_sub(&debugger_sync, 1); /* Show we've received the 'rupt */
672 __asm__ volatile("tw 4,r3,r3"); /* Enter the debugger */
673 return; /* All done now... */
674
675 case SIGPwake: /* Wake up CPU */
676 proc_info->hwCtr.numSIGPwake++; /* Count this one */
677 return; /* No need to do anything, the interrupt does it all... */
678
679 case SIGPcall: /* Call function on CPU */
680 proc_info->hwCtr.numSIGPcall++; /* Count this one */
681 xfunc = (broadcastFunc)holdParm1; /* Do this since I can't seem to figure C out */
682 xfunc(holdParm2); /* Call the passed function */
683 return; /* Done... */
684
685 default:
686 panic("cpu_signal_handler: unknown SIGP message order - %08X\n", holdParm0);
687 return;
688
689 }
690
691 default:
692 panic("cpu_signal_handler: unknown SIGP function - %08X\n", (holdStat & MPsigpFunc) >> 8);
693 return;
694
695 }
696 panic("cpu_signal_handler: we should never get here\n");
697 }
698
699
700 /*
701 * Routine: cpu_sync_timebase
702 * Function:
703 */
704 void
705 cpu_sync_timebase(
706 void)
707 {
708 natural_t tbu, tbl;
709 boolean_t intr;
710 struct SIGtimebase syncClkSpot;
711
712 intr = ml_set_interrupts_enabled(FALSE); /* No interruptions in here */
713
714 syncClkSpot.avail = FALSE;
715 syncClkSpot.ready = FALSE;
716 syncClkSpot.done = FALSE;
717
718 while (cpu_signal(master_cpu, SIGPcpureq, CPRQtimebase,
719 (unsigned int)&syncClkSpot) != KERN_SUCCESS)
720 continue;
721
722 while (syncClkSpot.avail == FALSE)
723 continue;
724
725 isync();
726
727 /*
728 * We do the following to keep the compiler from generating extra stuff
729 * in tb set part
730 */
731 tbu = syncClkSpot.abstime >> 32;
732 tbl = (uint32_t)syncClkSpot.abstime;
733
734 mttb(0);
735 mttbu(tbu);
736 mttb(tbl);
737
738 syncClkSpot.ready = TRUE;
739
740 while (syncClkSpot.done == FALSE)
741 continue;
742
743 etimer_resync_deadlines(); /* Start the timer */
744 (void)ml_set_interrupts_enabled(intr);
745 }
746
747
748 /*
749 * Routine: cpu_timebase_signal_handler
750 * Function:
751 */
752 void
753 cpu_timebase_signal_handler(
754 struct per_proc_info *proc_info,
755 struct SIGtimebase *timebaseAddr)
756 {
757 unsigned int tbu, tbu2, tbl;
758
759 if(proc_info->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
760 proc_info->time_base_enable(proc_info->cpu_id, FALSE);
761
762 timebaseAddr->abstime = 0; /* Touch to force into cache */
763 sync();
764
765 do {
766 asm volatile(" mftbu %0" : "=r" (tbu));
767 asm volatile(" mftb %0" : "=r" (tbl));
768 asm volatile(" mftbu %0" : "=r" (tbu2));
769 } while (tbu != tbu2);
770
771 timebaseAddr->abstime = ((uint64_t)tbu << 32) | tbl;
772 sync(); /* Force order */
773
774 timebaseAddr->avail = TRUE;
775
776 while (timebaseAddr->ready == FALSE)
777 continue;
778
779 if(proc_info->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
780 proc_info->time_base_enable(proc_info->cpu_id, TRUE);
781
782 timebaseAddr->done = TRUE;
783 }
784
785
786 /*
787 * Routine: cpu_control
788 * Function:
789 */
790 kern_return_t
791 cpu_control(
792 int slot_num,
793 processor_info_t info,
794 unsigned int count)
795 {
796 struct per_proc_info *proc_info;
797 cpu_type_t tcpu_type;
798 cpu_subtype_t tcpu_subtype;
799 processor_pm_regs_t perf_regs;
800 processor_control_cmd_t cmd;
801 boolean_t oldlevel;
802 #define MMCR0_SUPPORT_MASK 0xf83f1fff
803 #define MMCR1_SUPPORT_MASK 0xffc00000
804 #define MMCR2_SUPPORT_MASK 0x80000000
805
806 proc_info = PerProcTable[slot_num].ppe_vaddr;
807 tcpu_type = proc_info->cpu_type;
808 tcpu_subtype = proc_info->cpu_subtype;
809 cmd = (processor_control_cmd_t) info;
810
811 if (count < PROCESSOR_CONTROL_CMD_COUNT)
812 return(KERN_FAILURE);
813
814 if ( tcpu_type != cmd->cmd_cpu_type ||
815 tcpu_subtype != cmd->cmd_cpu_subtype)
816 return(KERN_FAILURE);
817
818 if (perfmon_acquire_facility(current_task()) != KERN_SUCCESS) {
819 return(KERN_RESOURCE_SHORTAGE); /* cpu performance facility in use by another task */
820 }
821
822 switch (cmd->cmd_op)
823 {
824 case PROCESSOR_PM_CLR_PMC: /* Clear Performance Monitor Counters */
825 switch (tcpu_subtype)
826 {
827 case CPU_SUBTYPE_POWERPC_750:
828 case CPU_SUBTYPE_POWERPC_7400:
829 case CPU_SUBTYPE_POWERPC_7450:
830 {
831 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
832 mtpmc1(0x0);
833 mtpmc2(0x0);
834 mtpmc3(0x0);
835 mtpmc4(0x0);
836 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
837 return(KERN_SUCCESS);
838 }
839 default:
840 return(KERN_FAILURE);
841 } /* tcpu_subtype */
842 case PROCESSOR_PM_SET_REGS: /* Set Performance Monitor Registors */
843 switch (tcpu_subtype)
844 {
845 case CPU_SUBTYPE_POWERPC_750:
846 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
847 PROCESSOR_PM_REGS_COUNT_POWERPC_750))
848 return(KERN_FAILURE);
849 else
850 {
851 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
852 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
853 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
854 mtpmc1(PERFMON_PMC1(perf_regs));
855 mtpmc2(PERFMON_PMC2(perf_regs));
856 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
857 mtpmc3(PERFMON_PMC3(perf_regs));
858 mtpmc4(PERFMON_PMC4(perf_regs));
859 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
860 return(KERN_SUCCESS);
861 }
862 case CPU_SUBTYPE_POWERPC_7400:
863 case CPU_SUBTYPE_POWERPC_7450:
864 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
865 PROCESSOR_PM_REGS_COUNT_POWERPC_7400))
866 return(KERN_FAILURE);
867 else
868 {
869 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
870 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
871 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
872 mtpmc1(PERFMON_PMC1(perf_regs));
873 mtpmc2(PERFMON_PMC2(perf_regs));
874 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
875 mtpmc3(PERFMON_PMC3(perf_regs));
876 mtpmc4(PERFMON_PMC4(perf_regs));
877 mtmmcr2(PERFMON_MMCR2(perf_regs) & MMCR2_SUPPORT_MASK);
878 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
879 return(KERN_SUCCESS);
880 }
881 default:
882 return(KERN_FAILURE);
883 } /* switch tcpu_subtype */
884 case PROCESSOR_PM_SET_MMCR:
885 switch (tcpu_subtype)
886 {
887 case CPU_SUBTYPE_POWERPC_750:
888 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
889 PROCESSOR_PM_REGS_COUNT_POWERPC_750))
890 return(KERN_FAILURE);
891 else
892 {
893 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
894 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
895 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
896 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
897 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
898 return(KERN_SUCCESS);
899 }
900 case CPU_SUBTYPE_POWERPC_7400:
901 case CPU_SUBTYPE_POWERPC_7450:
902 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
903 PROCESSOR_PM_REGS_COUNT_POWERPC_7400))
904 return(KERN_FAILURE);
905 else
906 {
907 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
908 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
909 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
910 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
911 mtmmcr2(PERFMON_MMCR2(perf_regs) & MMCR2_SUPPORT_MASK);
912 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
913 return(KERN_SUCCESS);
914 }
915 default:
916 return(KERN_FAILURE);
917 } /* tcpu_subtype */
918 default:
919 return(KERN_FAILURE);
920 } /* switch cmd_op */
921 }
922
923
924 /*
925 * Routine: cpu_info_count
926 * Function:
927 */
928 kern_return_t
929 cpu_info_count(
930 processor_flavor_t flavor,
931 unsigned int *count)
932 {
933 cpu_subtype_t tcpu_subtype;
934
935 /*
936 * For now, we just assume that all CPUs are of the same type
937 */
938 tcpu_subtype = PerProcTable[master_cpu].ppe_vaddr->cpu_subtype;
939 switch (flavor) {
940 case PROCESSOR_PM_REGS_INFO:
941 switch (tcpu_subtype) {
942 case CPU_SUBTYPE_POWERPC_750:
943
944 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_750;
945 return(KERN_SUCCESS);
946
947 case CPU_SUBTYPE_POWERPC_7400:
948 case CPU_SUBTYPE_POWERPC_7450:
949
950 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_7400;
951 return(KERN_SUCCESS);
952
953 default:
954 *count = 0;
955 return(KERN_INVALID_ARGUMENT);
956 } /* switch tcpu_subtype */
957
958 case PROCESSOR_TEMPERATURE:
959 *count = PROCESSOR_TEMPERATURE_COUNT;
960 return (KERN_SUCCESS);
961
962 default:
963 *count = 0;
964 return(KERN_INVALID_ARGUMENT);
965
966 }
967 }
968
969
970 /*
971 * Routine: cpu_info
972 * Function:
973 */
974 kern_return_t
975 cpu_info(
976 processor_flavor_t flavor,
977 int slot_num,
978 processor_info_t info,
979 unsigned int *count)
980 {
981 cpu_subtype_t tcpu_subtype;
982 processor_pm_regs_t perf_regs;
983 boolean_t oldlevel;
984
985 tcpu_subtype = PerProcTable[slot_num].ppe_vaddr->cpu_subtype;
986
987 switch (flavor) {
988 case PROCESSOR_PM_REGS_INFO:
989
990 perf_regs = (processor_pm_regs_t) info;
991
992 switch (tcpu_subtype) {
993 case CPU_SUBTYPE_POWERPC_750:
994
995 if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_750)
996 return(KERN_FAILURE);
997
998 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
999 PERFMON_MMCR0(perf_regs) = mfmmcr0();
1000 PERFMON_PMC1(perf_regs) = mfpmc1();
1001 PERFMON_PMC2(perf_regs) = mfpmc2();
1002 PERFMON_MMCR1(perf_regs) = mfmmcr1();
1003 PERFMON_PMC3(perf_regs) = mfpmc3();
1004 PERFMON_PMC4(perf_regs) = mfpmc4();
1005 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
1006
1007 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_750;
1008 return(KERN_SUCCESS);
1009
1010 case CPU_SUBTYPE_POWERPC_7400:
1011 case CPU_SUBTYPE_POWERPC_7450:
1012
1013 if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_7400)
1014 return(KERN_FAILURE);
1015
1016 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
1017 PERFMON_MMCR0(perf_regs) = mfmmcr0();
1018 PERFMON_PMC1(perf_regs) = mfpmc1();
1019 PERFMON_PMC2(perf_regs) = mfpmc2();
1020 PERFMON_MMCR1(perf_regs) = mfmmcr1();
1021 PERFMON_PMC3(perf_regs) = mfpmc3();
1022 PERFMON_PMC4(perf_regs) = mfpmc4();
1023 PERFMON_MMCR2(perf_regs) = mfmmcr2();
1024 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
1025
1026 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_7400;
1027 return(KERN_SUCCESS);
1028
1029 default:
1030 return(KERN_FAILURE);
1031 } /* switch tcpu_subtype */
1032
1033 case PROCESSOR_TEMPERATURE: /* Get the temperature of a processor */
1034
1035 *info = -1; /* Get the temperature */
1036 return(KERN_FAILURE);
1037
1038 default:
1039 return(KERN_INVALID_ARGUMENT);
1040
1041 } /* flavor */
1042 }
1043
1044
1045 /*
1046 * Routine: cpu_to_processor
1047 * Function:
1048 */
1049 processor_t
1050 cpu_to_processor(
1051 int cpu)
1052 {
1053 return ((processor_t)PerProcTable[cpu].ppe_vaddr->processor);
1054 }
1055
1056
1057 /*
1058 * Routine: slot_type
1059 * Function:
1060 */
1061 cpu_type_t
1062 slot_type(
1063 int slot_num)
1064 {
1065 return (PerProcTable[slot_num].ppe_vaddr->cpu_type);
1066 }
1067
1068
1069 /*
1070 * Routine: slot_subtype
1071 * Function:
1072 */
1073 cpu_subtype_t
1074 slot_subtype(
1075 int slot_num)
1076 {
1077 return (PerProcTable[slot_num].ppe_vaddr->cpu_subtype);
1078 }
1079
1080
1081 /*
1082 * Routine: slot_threadtype
1083 * Function:
1084 */
1085 cpu_threadtype_t
1086 slot_threadtype(
1087 int slot_num)
1088 {
1089 return (PerProcTable[slot_num].ppe_vaddr->cpu_threadtype);
1090 }
1091
1092
1093 /*
1094 * Routine: cpu_type
1095 * Function:
1096 */
1097 cpu_type_t
1098 cpu_type(void)
1099 {
1100 return (getPerProc()->cpu_type);
1101 }
1102
1103
1104 /*
1105 * Routine: cpu_subtype
1106 * Function:
1107 */
1108 cpu_subtype_t
1109 cpu_subtype(void)
1110 {
1111 return (getPerProc()->cpu_subtype);
1112 }
1113
1114
1115 /*
1116 * Routine: cpu_threadtype
1117 * Function:
1118 */
1119 cpu_threadtype_t
1120 cpu_threadtype(void)
1121 {
1122 return (getPerProc()->cpu_threadtype);
1123 }
1124
1125 /*
1126 * Call a function on all running processors
1127 *
1128 * Note that the synch paramter is used to wait until all functions are complete.
1129 * It is not passed to the other processor and must be known by the called function.
1130 * The called function must do a thread_wakeup on the synch if it decrements the
1131 * synch count to 0.
1132 *
1133 * We start by initializing the synchronizer to the number of possible cpus.
1134 * The we signal each popssible processor.
1135 * If the signal fails, we count it. We also skip our own.
1136 * When we are finished signaling, we adjust the syncronizer count down buy the number of failed signals.
1137 * Because the signaled processors are also decrementing the synchronizer count, the adjustment may result in a 0
1138 * If this happens, all other processors are finished with the function.
1139 * If so, we clear the wait and continue
1140 * Otherwise, we block waiting for the other processor(s) to finish.
1141 *
1142 * Meanwhile, the other processors are decrementing the synchronizer when they are done
1143 * If it goes to zero, thread_wakeup is called to run the broadcaster
1144 *
1145 * Note that because we account for the broadcaster in the synchronization count, we will not get any
1146 * premature wakeup calls.
1147 *
1148 * Also note that when we do the adjustment of the synchronization count, it the result is 0, it means that
1149 * all of the other processors are finished. Otherwise, we know that there is at least one more.
1150 * When that thread decrements the synchronizer to zero, it will do a thread_wake.
1151 *
1152 */
1153
1154 int32_t
1155 cpu_broadcast(uint32_t *synch, broadcastFunc func, uint32_t parm)
1156 {
1157 int failsig;
1158 unsigned int cpu, ocpu;
1159
1160 cpu = cpu_number(); /* Who are we? */
1161 failsig = 0; /* Clear called processor count */
1162
1163 if(real_ncpus > 1) { /* Are we just a uni? */
1164
1165 *synch = real_ncpus; /* Set how many we are going to try */
1166 assert_wait((event_t)synch, THREAD_UNINT); /* If more than one processor, we may have to wait */
1167
1168 for(ocpu = 0; ocpu < real_ncpus; ocpu++) { /* Tell everyone to call */
1169
1170 if(ocpu == cpu) continue; /* If we talk to ourselves, people will wonder... */
1171
1172 if(KERN_SUCCESS != cpu_signal(ocpu, SIGPcall, (uint32_t)func, parm)) { /* Call the function on the other processor */
1173 failsig++; /* Count failed signals */
1174 }
1175 }
1176
1177 if (hw_atomic_sub(synch, failsig + 1) == 0)
1178 clear_wait(current_thread(), THREAD_AWAKENED); /* Clear wait if we never signalled or all of the others finished */
1179 else
1180 thread_block(THREAD_CONTINUE_NULL); /* Wait for everyone to get into step... */
1181 }
1182
1183 return (real_ncpus - failsig - 1); /* Return the number of guys actually signalled... */
1184 }