]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/cpu.c
xnu-792.1.5.tar.gz
[apple/xnu.git] / osfmk / ppc / cpu.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
e5568f75
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
e5568f75
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
e5568f75
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
1c79356b 22
91447636
A
23#include <mach/mach_types.h>
24#include <mach/machine.h>
25#include <mach/processor_info.h>
26
27#include <kern/kalloc.h>
28#include <kern/kern_types.h>
1c79356b
A
29#include <kern/machine.h>
30#include <kern/misc_protos.h>
31#include <kern/thread.h>
91447636 32#include <kern/sched_prim.h>
1c79356b 33#include <kern/processor.h>
91447636
A
34
35#include <vm/pmap.h>
36
1c79356b
A
37#include <ppc/proc_reg.h>
38#include <ppc/misc_protos.h>
39#include <ppc/machine_routines.h>
91447636 40#include <ppc/cpu_internal.h>
1c79356b 41#include <ppc/exception.h>
9bccf70c 42#include <ppc/asm.h>
55e303ae 43#include <ppc/hw_perfmon.h>
1c79356b 44#include <pexpert/pexpert.h>
9bccf70c 45#include <kern/cpu_data.h>
55e303ae
A
46#include <ppc/mappings.h>
47#include <ppc/Diagnostics.h>
48#include <ppc/trap.h>
91447636 49#include <ppc/machine_cpu.h>
1c79356b 50
91447636 51decl_mutex_data(static,ppt_lock);
1c79356b 52
91447636
A
53unsigned int real_ncpus = 1;
54unsigned int max_ncpus = MAX_CPUS;
1c79356b 55
91447636 56decl_simple_lock_data(static,rht_lock);
5353443c 57
91447636
A
58static unsigned int rht_state = 0;
59#define RHT_WAIT 0x01
60#define RHT_BUSY 0x02
1c79356b 61
91447636 62decl_simple_lock_data(static,SignalReadyLock);
1c79356b
A
63
64struct SIGtimebase {
65 boolean_t avail;
66 boolean_t ready;
67 boolean_t done;
0b4e3aa0 68 uint64_t abstime;
1c79356b
A
69};
70
91447636 71perfCallback perfCpuSigHook = 0; /* Pointer to CHUD cpu signal hook routine */
1c79356b 72
91447636 73extern int debugger_sync;
1c79356b 74
91447636
A
75/*
76 * Forward definitions
77 */
1c79356b 78
91447636
A
79void cpu_sync_timebase(
80 void);
55e303ae 81
91447636
A
82void cpu_timebase_signal_handler(
83 struct per_proc_info *proc_info,
84 struct SIGtimebase *timebaseAddr);
1c79356b 85
91447636
A
86/*
87 * Routine: cpu_bootstrap
88 * Function:
89 */
90void
91cpu_bootstrap(
92 void)
1c79356b 93{
91447636
A
94 simple_lock_init(&rht_lock,0);
95 simple_lock_init(&SignalReadyLock,0);
96 mutex_init(&ppt_lock,0);
1c79356b
A
97}
98
1c79356b 99
91447636
A
100/*
101 * Routine: cpu_init
102 * Function:
103 */
1c79356b
A
104void
105cpu_init(
106 void)
107{
91447636
A
108 struct per_proc_info *proc_info;
109
110 proc_info = getPerProc();
1c79356b 111
91447636
A
112 /*
113 * Restore the TBR.
114 */
115 if (proc_info->save_tbu != 0 || proc_info->save_tbl != 0) {
116 mttb(0);
117 mttbu(proc_info->save_tbu);
118 mttb(proc_info->save_tbl);
119 }
1c79356b 120
91447636
A
121 proc_info->cpu_type = CPU_TYPE_POWERPC;
122 proc_info->cpu_subtype = (cpu_subtype_t)proc_info->pf.rptdProc;
123 proc_info->cpu_threadtype = CPU_THREADTYPE_NONE;
124 proc_info->running = TRUE;
1c79356b
A
125
126}
127
91447636
A
128/*
129 * Routine: cpu_machine_init
130 * Function:
131 */
1c79356b
A
132void
133cpu_machine_init(
134 void)
135{
91447636 136 struct per_proc_info *proc_info;
0b4e3aa0 137 volatile struct per_proc_info *mproc_info;
1c79356b 138
1c79356b 139
91447636
A
140 proc_info = getPerProc();
141 mproc_info = PerProcTable[master_cpu].ppe_vaddr;
142
143 if (proc_info != mproc_info) {
144 simple_lock(&rht_lock);
145 if (rht_state & RHT_WAIT)
146 thread_wakeup(&rht_state);
147 rht_state &= ~(RHT_BUSY|RHT_WAIT);
148 simple_unlock(&rht_lock);
149 }
150
151 PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone));
152
153
154 if (proc_info != mproc_info) {
155 while (!((mproc_info->cpu_flags) & SignalReady))
0b4e3aa0 156 continue;
1c79356b 157 cpu_sync_timebase();
0b4e3aa0 158 }
91447636 159
1c79356b 160 ml_init_interrupt();
91447636 161 if (proc_info != mproc_info)
5353443c 162 simple_lock(&SignalReadyLock);
91447636
A
163 proc_info->cpu_flags |= BootDone|SignalReady;
164 if (proc_info != mproc_info) {
165 if (proc_info->ppXFlags & SignalReadyWait) {
166 hw_atomic_and(&proc_info->ppXFlags, ~SignalReadyWait);
167 thread_wakeup(&proc_info->cpu_flags);
5353443c
A
168 }
169 simple_unlock(&SignalReadyLock);
170 }
1c79356b
A
171}
172
1c79356b 173
91447636
A
174/*
175 * Routine: cpu_per_proc_alloc
176 * Function:
177 */
178struct per_proc_info *
179cpu_per_proc_alloc(
180 void)
181{
182 struct per_proc_info *proc_info=0;
183 void *interrupt_stack=0;
184 void *debugger_stack=0;
185
186 if ((proc_info = (struct per_proc_info*)kalloc(PAGE_SIZE)) == (struct per_proc_info*)0)
187 return (struct per_proc_info *)NULL;;
188 if ((interrupt_stack = kalloc(INTSTACK_SIZE)) == 0) {
189 kfree(proc_info, PAGE_SIZE);
190 return (struct per_proc_info *)NULL;;
1c79356b 191 }
91447636
A
192#if MACH_KDP || MACH_KDB
193 if ((debugger_stack = kalloc(KERNEL_STACK_SIZE)) == 0) {
194 kfree(proc_info, PAGE_SIZE);
195 kfree(interrupt_stack, INTSTACK_SIZE);
196 return (struct per_proc_info *)NULL;;
197 }
198#endif
199
200 bzero((void *)proc_info, sizeof(struct per_proc_info));
201
202 proc_info->next_savearea = (uint64_t)save_get_init();
203 proc_info->pf = BootProcInfo.pf;
204 proc_info->istackptr = (vm_offset_t)interrupt_stack + INTSTACK_SIZE - FM_SIZE;
205 proc_info->intstack_top_ss = proc_info->istackptr;
206#if MACH_KDP || MACH_KDB
207 proc_info->debstackptr = (vm_offset_t)debugger_stack + KERNEL_STACK_SIZE - FM_SIZE;
208 proc_info->debstack_top_ss = proc_info->debstackptr;
209#endif /* MACH_KDP || MACH_KDB */
210 return proc_info;
211
212}
213
214
215/*
216 * Routine: cpu_per_proc_free
217 * Function:
218 */
219void
220cpu_per_proc_free(
221 struct per_proc_info *proc_info
222)
223{
224 if (proc_info->cpu_number == master_cpu)
225 return;
226 kfree((void *)(proc_info->intstack_top_ss - INTSTACK_SIZE + FM_SIZE), INTSTACK_SIZE);
227 kfree((void *)(proc_info->debstack_top_ss - KERNEL_STACK_SIZE + FM_SIZE), KERNEL_STACK_SIZE);
228 kfree((void *)proc_info, PAGE_SIZE);
229}
230
231
232/*
233 * Routine: cpu_per_proc_register
234 * Function:
235 */
236kern_return_t
237cpu_per_proc_register(
238 struct per_proc_info *proc_info
239)
240{
241 int cpu;
242
243 mutex_lock(&ppt_lock);
244 if (real_ncpus >= max_ncpus) {
245 mutex_unlock(&ppt_lock);
1c79356b 246 return KERN_FAILURE;
91447636
A
247 }
248 cpu = real_ncpus;
249 proc_info->cpu_number = cpu;
250 PerProcTable[cpu].ppe_vaddr = proc_info;
251 PerProcTable[cpu].ppe_paddr = ((addr64_t)pmap_find_phys(kernel_pmap, (vm_offset_t)proc_info)) << PAGE_SHIFT;
252 eieio();
253 real_ncpus++;
254 mutex_unlock(&ppt_lock);
255 return KERN_SUCCESS;
1c79356b
A
256}
257
91447636
A
258
259/*
260 * Routine: cpu_start
261 * Function:
262 */
1c79356b
A
263kern_return_t
264cpu_start(
265 int cpu)
266{
267 struct per_proc_info *proc_info;
91447636
A
268 kern_return_t ret;
269 mapping_t *mp;
1c79356b 270
91447636 271 proc_info = PerProcTable[cpu].ppe_vaddr;
1c79356b
A
272
273 if (cpu == cpu_number()) {
274 PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone));
275 ml_init_interrupt();
0b4e3aa0 276 proc_info->cpu_flags |= BootDone|SignalReady;
1c79356b
A
277
278 return KERN_SUCCESS;
279 } else {
1c79356b 280 proc_info->cpu_flags &= BootDone;
0b4e3aa0 281 proc_info->interrupts_enabled = 0;
91447636
A
282 proc_info->pending_ast = AST_NONE;
283 proc_info->istackptr = proc_info->intstack_top_ss;
a3d08fcd 284 proc_info->rtcPop = 0xFFFFFFFFFFFFFFFFULL;
91447636
A
285 mp = (mapping_t *)(&proc_info->ppUMWmp);
286 mp->mpFlags = 0x01000000 | mpLinkage | mpPerm | 1;
55e303ae 287 mp->mpSpace = invalSpace;
1c79356b
A
288
289 if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
290
91447636
A
291 simple_lock(&rht_lock);
292 while (rht_state & RHT_BUSY) {
293 rht_state |= RHT_WAIT;
294 thread_sleep_usimple_lock((event_t)&rht_state,
295 &rht_lock, THREAD_UNINT);
296 }
297 rht_state |= RHT_BUSY;
298 simple_unlock(&rht_lock);
1c79356b 299
1c79356b 300 ml_phys_write((vm_offset_t)&ResetHandler + 0,
91447636 301 RESET_HANDLER_START);
1c79356b 302 ml_phys_write((vm_offset_t)&ResetHandler + 4,
91447636 303 (vm_offset_t)_start_cpu);
1c79356b 304 ml_phys_write((vm_offset_t)&ResetHandler + 8,
91447636 305 (vm_offset_t)&PerProcTable[cpu]);
1c79356b
A
306 }
307/*
308 * Note: we pass the current time to the other processor here. He will load it
309 * as early as possible so that there is a chance that it is close to accurate.
310 * After the machine is up a while, we will officially resync the clocks so
311 * that all processors are the same. This is just to get close.
312 */
313
91447636 314 ml_get_timebase((unsigned long long *)&proc_info->ruptStamp);
1c79356b
A
315
316 __asm__ volatile("sync"); /* Commit to storage */
317 __asm__ volatile("isync"); /* Wait a second */
91447636
A
318 ret = PE_cpu_start(proc_info->cpu_id,
319 proc_info->start_paddr, (vm_offset_t)proc_info);
320
321 if (ret != KERN_SUCCESS) {
322 if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
323 simple_lock(&rht_lock);
324 if (rht_state & RHT_WAIT)
325 thread_wakeup(&rht_state);
326 rht_state &= ~(RHT_BUSY|RHT_WAIT);
327 simple_unlock(&rht_lock);
328 };
5353443c
A
329 } else {
330 simple_lock(&SignalReadyLock);
91447636
A
331 if (!((*(volatile short *)&proc_info->cpu_flags) & SignalReady)) {
332 hw_atomic_or(&proc_info->ppXFlags, SignalReadyWait);
333 thread_sleep_simple_lock((event_t)&proc_info->cpu_flags,
334 &SignalReadyLock, THREAD_UNINT);
5353443c
A
335 }
336 simple_unlock(&SignalReadyLock);
91447636 337
1c79356b
A
338 }
339 return(ret);
340 }
341}
342
91447636
A
343/*
344 * Routine: cpu_exit_wait
345 * Function:
346 */
5353443c
A
347void
348cpu_exit_wait(
91447636
A
349 int cpu)
350{
351 struct per_proc_info *tpproc;
352
353 if ( cpu != master_cpu) {
354 tpproc = PerProcTable[cpu].ppe_vaddr;
355 while (!((*(volatile short *)&tpproc->cpu_flags) & SleepState)) {};
356 }
357}
358
359
360/*
361 * Routine: cpu_doshutdown
362 * Function:
363 */
364void
365cpu_doshutdown(
366 void)
367{
368 enable_preemption();
369 processor_offline(current_processor());
370}
371
372
373/*
374 * Routine: cpu_sleep
375 * Function:
376 */
377void
378cpu_sleep(
379 void)
380{
381 struct per_proc_info *proc_info;
382 unsigned int i;
383 unsigned int wait_ncpus_sleep, ncpus_sleep;
384 facility_context *fowner;
385
386 proc_info = getPerProc();
387
388 proc_info->running = FALSE;
389
390 fowner = proc_info->FPU_owner; /* Cache this */
391 if(fowner) fpu_save(fowner); /* If anyone owns FPU, save it */
392 proc_info->FPU_owner = 0; /* Set no fpu owner now */
393
394 fowner = proc_info->VMX_owner; /* Cache this */
395 if(fowner) vec_save(fowner); /* If anyone owns vectors, save it */
396 proc_info->VMX_owner = 0; /* Set no vector owner now */
397
398 if (proc_info->cpu_number == master_cpu) {
399 proc_info->cpu_flags &= BootDone;
400 proc_info->interrupts_enabled = 0;
401 proc_info->pending_ast = AST_NONE;
402
403 if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
404 ml_phys_write((vm_offset_t)&ResetHandler + 0,
405 RESET_HANDLER_START);
406 ml_phys_write((vm_offset_t)&ResetHandler + 4,
407 (vm_offset_t)_start_cpu);
408 ml_phys_write((vm_offset_t)&ResetHandler + 8,
409 (vm_offset_t)&PerProcTable[master_cpu]);
410
411 __asm__ volatile("sync");
412 __asm__ volatile("isync");
413 }
414
415 wait_ncpus_sleep = real_ncpus-1;
416 ncpus_sleep = 0;
417 while (wait_ncpus_sleep != ncpus_sleep) {
418 ncpus_sleep = 0;
419 for(i=1; i < real_ncpus ; i++) {
420 if ((*(volatile short *)&(PerProcTable[i].ppe_vaddr->cpu_flags)) & SleepState)
421 ncpus_sleep++;
422 }
423 }
424
425 }
426
427 /*
428 * Save the TBR before stopping.
429 */
430 do {
431 proc_info->save_tbu = mftbu();
432 proc_info->save_tbl = mftb();
433 } while (mftbu() != proc_info->save_tbu);
434
435 PE_cpu_machine_quiesce(proc_info->cpu_id);
436}
437
438
439/*
440 * Routine: cpu_signal
441 * Function:
442 * Here is where we send a message to another processor. So far we only have two:
443 * SIGPast and SIGPdebug. SIGPast is used to preempt and kick off threads (this is
444 * currently disabled). SIGPdebug is used to enter the debugger.
445 *
446 * We set up the SIGP function to indicate that this is a simple message and set the
447 * order code (MPsigpParm0) to SIGPast or SIGPdebug). After finding the per_processor
448 * block for the target, we lock the message block. Then we set the parameter(s).
449 * Next we change the lock (also called "busy") to "passing" and finally signal
450 * the other processor. Note that we only wait about 1ms to get the message lock.
451 * If we time out, we return failure to our caller. It is their responsibility to
452 * recover.
453 */
454kern_return_t
455cpu_signal(
456 int target,
457 int signal,
458 unsigned int p1,
459 unsigned int p2)
5353443c 460{
91447636
A
461
462 unsigned int holdStat;
463 struct per_proc_info *tpproc, *mpproc;
464 int busybitset=0;
465
466#if DEBUG
467 if(((unsigned int)target) >= MAX_CPUS) panic("cpu_signal: invalid target CPU - %08X\n", target);
468#endif
469
470 mpproc = getPerProc(); /* Point to our block */
471 tpproc = PerProcTable[target].ppe_vaddr; /* Point to the target's block */
472 if(mpproc == tpproc) return KERN_FAILURE; /* Cannot signal ourselves */
473
474 if(!tpproc->running) return KERN_FAILURE;
475
476 if (!(tpproc->cpu_flags & SignalReady)) return KERN_FAILURE;
477
478 if((tpproc->MPsigpStat & MPsigpMsgp) == MPsigpMsgp) { /* Is there an unreceived message already pending? */
479
480 if(signal == SIGPwake) { /* SIGPwake can merge into all others... */
481 mpproc->hwCtr.numSIGPmwake++; /* Account for merged wakes */
482 return KERN_SUCCESS;
483 }
484
485 if((signal == SIGPast) && (tpproc->MPsigpParm0 == SIGPast)) { /* We can merge ASTs */
486 mpproc->hwCtr.numSIGPmast++; /* Account for merged ASTs */
487 return KERN_SUCCESS; /* Don't bother to send this one... */
488 }
489
490 if (tpproc->MPsigpParm0 == SIGPwake) {
491 if (hw_lock_mbits(&tpproc->MPsigpStat, (MPsigpMsgp | MPsigpAck),
492 (MPsigpBusy | MPsigpPass ), MPsigpBusy, 0)) {
493 busybitset = 1;
494 mpproc->hwCtr.numSIGPmwake++;
495 }
496 }
497 }
498
499 if((busybitset == 0) &&
500 (!hw_lock_mbits(&tpproc->MPsigpStat, MPsigpMsgp, 0, MPsigpBusy,
501 (gPEClockFrequencyInfo.timebase_frequency_hz >> 11)))) { /* Try to lock the message block with a .5ms timeout */
502 mpproc->hwCtr.numSIGPtimo++; /* Account for timeouts */
503 return KERN_FAILURE; /* Timed out, take your ball and go home... */
504 }
505
506 holdStat = MPsigpBusy | MPsigpPass | (MPsigpSigp << 8) | mpproc->cpu_number; /* Set up the signal status word */
507 tpproc->MPsigpParm0 = signal; /* Set message order */
508 tpproc->MPsigpParm1 = p1; /* Set additional parm */
509 tpproc->MPsigpParm2 = p2; /* Set additional parm */
510
511 __asm__ volatile("sync"); /* Make sure it's all there */
512
513 tpproc->MPsigpStat = holdStat; /* Set status and pass the lock */
514 __asm__ volatile("eieio"); /* I'm a paraniod freak */
515
516 if (busybitset == 0)
517 PE_cpu_signal(mpproc->cpu_id, tpproc->cpu_id); /* Kick the other processor */
518
519 return KERN_SUCCESS; /* All is goodness and rainbows... */
5353443c
A
520}
521
55e303ae 522
1c79356b 523/*
91447636
A
524 * Routine: cpu_signal_handler
525 * Function:
1c79356b
A
526 * Here is where we implement the receiver of the signaling protocol.
527 * We wait for the signal status area to be passed to us. Then we snarf
528 * up the status, the sender, and the 3 potential parms. Next we release
529 * the lock and signal the other guy.
530 */
1c79356b
A
531void
532cpu_signal_handler(
533 void)
534{
535
536 unsigned int holdStat, holdParm0, holdParm1, holdParm2, mtype;
537 unsigned int *parmAddr;
91447636 538 struct per_proc_info *proc_info;
1c79356b 539 int cpu;
a3d08fcd 540 broadcastFunc xfunc;
1c79356b 541 cpu = cpu_number(); /* Get the CPU number */
91447636
A
542
543 proc_info = getPerProc();
544
1c79356b 545/*
90556fb8 546 * Since we've been signaled, wait about 31 ms for the signal lock to pass
1c79356b 547 */
91447636 548 if(!hw_lock_mbits(&proc_info->MPsigpStat, (MPsigpMsgp | MPsigpAck), (MPsigpBusy | MPsigpPass),
90556fb8 549 (MPsigpBusy | MPsigpPass | MPsigpAck), (gPEClockFrequencyInfo.timebase_frequency_hz >> 5))) {
1c79356b
A
550 panic("cpu_signal_handler: Lock pass timed out\n");
551 }
552
91447636
A
553 holdStat = proc_info->MPsigpStat; /* Snarf stat word */
554 holdParm0 = proc_info->MPsigpParm0; /* Snarf parameter */
555 holdParm1 = proc_info->MPsigpParm1; /* Snarf parameter */
556 holdParm2 = proc_info->MPsigpParm2; /* Snarf parameter */
1c79356b
A
557
558 __asm__ volatile("isync"); /* Make sure we don't unlock until memory is in */
559
91447636 560 proc_info->MPsigpStat = holdStat & ~(MPsigpMsgp | MPsigpAck | MPsigpFunc); /* Release lock */
1c79356b
A
561
562 switch ((holdStat & MPsigpFunc) >> 8) { /* Decode function code */
563
564 case MPsigpIdle: /* Was function cancelled? */
565 return; /* Yup... */
566
567 case MPsigpSigp: /* Signal Processor message? */
568
569 switch (holdParm0) { /* Decode SIGP message order */
570
571 case SIGPast: /* Should we do an AST? */
91447636 572 proc_info->hwCtr.numSIGPast++; /* Count this one */
1c79356b
A
573#if 0
574 kprintf("cpu_signal_handler: AST check on cpu %x\n", cpu_number());
575#endif
91447636 576 ast_check((processor_t)proc_info->processor);
1c79356b
A
577 return; /* All done... */
578
579 case SIGPcpureq: /* CPU specific function? */
580
91447636 581 proc_info->hwCtr.numSIGPcpureq++; /* Count this one */
1c79356b
A
582 switch (holdParm1) { /* Select specific function */
583
1c79356b
A
584 case CPRQtimebase:
585
91447636 586 cpu_timebase_signal_handler(proc_info, (struct SIGtimebase *)holdParm2);
1c79356b
A
587 return;
588
55e303ae
A
589 case CPRQsegload:
590 return;
591
592 case CPRQchud:
593 parmAddr = (unsigned int *)holdParm2; /* Get the destination address */
594 if(perfCpuSigHook) {
91447636 595 struct savearea *ssp = current_thread()->machine.pcb;
55e303ae
A
596 if(ssp) {
597 (perfCpuSigHook)(parmAddr[1] /* request */, ssp, 0, 0);
598 }
599 }
600 parmAddr[1] = 0;
601 parmAddr[0] = 0; /* Show we're done */
602 return;
603
604 case CPRQscom:
483a1d10
A
605 if(((scomcomm *)holdParm2)->scomfunc) { /* Are we writing */
606 ((scomcomm *)holdParm2)->scomstat = ml_scom_write(((scomcomm *)holdParm2)->scomreg, ((scomcomm *)holdParm2)->scomdata); /* Write scom */
607 }
608 else { /* No, reading... */
609 ((scomcomm *)holdParm2)->scomstat = ml_scom_read(((scomcomm *)holdParm2)->scomreg, &((scomcomm *)holdParm2)->scomdata); /* Read scom */
610 }
55e303ae
A
611 return;
612
5353443c
A
613 case CPRQsps:
614 {
91447636
A
615 ml_set_processor_speed_slave(holdParm2);
616 return;
617 }
1c79356b
A
618 default:
619 panic("cpu_signal_handler: unknown CPU request - %08X\n", holdParm1);
620 return;
621 }
622
623
624 case SIGPdebug: /* Enter the debugger? */
625
91447636
A
626 proc_info->hwCtr.numSIGPdebug++; /* Count this one */
627 proc_info->debugger_is_slave++; /* Bump up the count to show we're here */
1c79356b
A
628 hw_atomic_sub(&debugger_sync, 1); /* Show we've received the 'rupt */
629 __asm__ volatile("tw 4,r3,r3"); /* Enter the debugger */
630 return; /* All done now... */
631
632 case SIGPwake: /* Wake up CPU */
91447636 633 proc_info->hwCtr.numSIGPwake++; /* Count this one */
1c79356b
A
634 return; /* No need to do anything, the interrupt does it all... */
635
a3d08fcd 636 case SIGPcall: /* Call function on CPU */
91447636 637 proc_info->hwCtr.numSIGPcall++; /* Count this one */
a3d08fcd
A
638 xfunc = holdParm1; /* Do this since I can't seem to figure C out */
639 xfunc(holdParm2); /* Call the passed function */
640 return; /* Done... */
641
1c79356b
A
642 default:
643 panic("cpu_signal_handler: unknown SIGP message order - %08X\n", holdParm0);
644 return;
645
646 }
647
648 default:
649 panic("cpu_signal_handler: unknown SIGP function - %08X\n", (holdStat & MPsigpFunc) >> 8);
650 return;
651
652 }
653 panic("cpu_signal_handler: we should never get here\n");
654}
655
91447636 656
1c79356b 657/*
91447636
A
658 * Routine: cpu_sync_timebase
659 * Function:
1c79356b 660 */
91447636
A
661void
662cpu_sync_timebase(
663 void)
1c79356b 664{
91447636
A
665 natural_t tbu, tbl;
666 boolean_t intr;
667 struct SIGtimebase syncClkSpot;
1c79356b 668
91447636 669 intr = ml_set_interrupts_enabled(FALSE); /* No interruptions in here */
1c79356b 670
91447636
A
671 syncClkSpot.avail = FALSE;
672 syncClkSpot.ready = FALSE;
673 syncClkSpot.done = FALSE;
1c79356b 674
91447636
A
675 while (cpu_signal(master_cpu, SIGPcpureq, CPRQtimebase,
676 (unsigned int)&syncClkSpot) != KERN_SUCCESS)
677 continue;
1c79356b 678
91447636
A
679 while (*(volatile int *)&(syncClkSpot.avail) == FALSE)
680 continue;
0b4e3aa0 681
91447636 682 isync();
7b1edb79 683
91447636
A
684 /*
685 * We do the following to keep the compiler from generating extra stuff
686 * in tb set part
687 */
688 tbu = syncClkSpot.abstime >> 32;
689 tbl = (uint32_t)syncClkSpot.abstime;
7b1edb79 690
91447636
A
691 mttb(0);
692 mttbu(tbu);
693 mttb(tbl);
9bccf70c 694
91447636 695 syncClkSpot.ready = TRUE;
1c79356b 696
91447636
A
697 while (*(volatile int *)&(syncClkSpot.done) == FALSE)
698 continue;
1c79356b 699
91447636 700 (void)ml_set_interrupts_enabled(intr);
1c79356b
A
701}
702
91447636
A
703
704/*
705 * Routine: cpu_timebase_signal_handler
706 * Function:
707 */
1c79356b 708void
91447636
A
709cpu_timebase_signal_handler(
710 struct per_proc_info *proc_info,
711 struct SIGtimebase *timebaseAddr)
1c79356b 712{
91447636
A
713 unsigned int tbu, tbu2, tbl;
714
715 if(proc_info->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
716 proc_info->time_base_enable(proc_info->cpu_id, FALSE);
717
718 timebaseAddr->abstime = 0; /* Touch to force into cache */
719 sync();
720
721 do {
722 asm volatile(" mftbu %0" : "=r" (tbu));
723 asm volatile(" mftb %0" : "=r" (tbl));
724 asm volatile(" mftbu %0" : "=r" (tbu2));
725 } while (tbu != tbu2);
726
727 timebaseAddr->abstime = ((uint64_t)tbu << 32) | tbl;
728 sync(); /* Force order */
729
730 timebaseAddr->avail = TRUE;
731
732 while (*(volatile int *)&(timebaseAddr->ready) == FALSE);
733
734 if(proc_info->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
735 proc_info->time_base_enable(proc_info->cpu_id, TRUE);
736
737 timebaseAddr->done = TRUE;
1c79356b
A
738}
739
91447636
A
740
741/*
742 * Routine: cpu_control
743 * Function:
744 */
745kern_return_t
746cpu_control(
747 int slot_num,
748 processor_info_t info,
749 unsigned int count)
1c79356b
A
750{
751 struct per_proc_info *proc_info;
91447636
A
752 cpu_type_t tcpu_type;
753 cpu_subtype_t tcpu_subtype;
754 processor_pm_regs_t perf_regs;
755 processor_control_cmd_t cmd;
756 boolean_t oldlevel;
757#define MMCR0_SUPPORT_MASK 0xf83f1fff
758#define MMCR1_SUPPORT_MASK 0xffc00000
759#define MMCR2_SUPPORT_MASK 0x80000000
760
761 proc_info = PerProcTable[slot_num].ppe_vaddr;
762 tcpu_type = proc_info->cpu_type;
763 tcpu_subtype = proc_info->cpu_subtype;
764 cmd = (processor_control_cmd_t) info;
1c79356b 765
91447636
A
766 if (count < PROCESSOR_CONTROL_CMD_COUNT)
767 return(KERN_FAILURE);
1c79356b 768
91447636
A
769 if ( tcpu_type != cmd->cmd_cpu_type ||
770 tcpu_subtype != cmd->cmd_cpu_subtype)
771 return(KERN_FAILURE);
1c79356b 772
91447636
A
773 if (perfmon_acquire_facility(current_task()) != KERN_SUCCESS) {
774 return(KERN_RESOURCE_SHORTAGE); /* cpu performance facility in use by another task */
775 }
765c9de3 776
91447636
A
777 switch (cmd->cmd_op)
778 {
779 case PROCESSOR_PM_CLR_PMC: /* Clear Performance Monitor Counters */
780 switch (tcpu_subtype)
781 {
782 case CPU_SUBTYPE_POWERPC_750:
783 case CPU_SUBTYPE_POWERPC_7400:
784 case CPU_SUBTYPE_POWERPC_7450:
785 {
786 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
787 mtpmc1(0x0);
788 mtpmc2(0x0);
789 mtpmc3(0x0);
790 mtpmc4(0x0);
791 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
792 return(KERN_SUCCESS);
793 }
794 default:
795 return(KERN_FAILURE);
796 } /* tcpu_subtype */
797 case PROCESSOR_PM_SET_REGS: /* Set Performance Monitor Registors */
798 switch (tcpu_subtype)
799 {
800 case CPU_SUBTYPE_POWERPC_750:
801 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
802 PROCESSOR_PM_REGS_COUNT_POWERPC_750))
803 return(KERN_FAILURE);
804 else
805 {
806 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
807 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
808 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
809 mtpmc1(PERFMON_PMC1(perf_regs));
810 mtpmc2(PERFMON_PMC2(perf_regs));
811 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
812 mtpmc3(PERFMON_PMC3(perf_regs));
813 mtpmc4(PERFMON_PMC4(perf_regs));
814 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
815 return(KERN_SUCCESS);
816 }
817 case CPU_SUBTYPE_POWERPC_7400:
818 case CPU_SUBTYPE_POWERPC_7450:
819 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
820 PROCESSOR_PM_REGS_COUNT_POWERPC_7400))
821 return(KERN_FAILURE);
822 else
823 {
824 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
825 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
826 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
827 mtpmc1(PERFMON_PMC1(perf_regs));
828 mtpmc2(PERFMON_PMC2(perf_regs));
829 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
830 mtpmc3(PERFMON_PMC3(perf_regs));
831 mtpmc4(PERFMON_PMC4(perf_regs));
832 mtmmcr2(PERFMON_MMCR2(perf_regs) & MMCR2_SUPPORT_MASK);
833 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
834 return(KERN_SUCCESS);
835 }
836 default:
837 return(KERN_FAILURE);
838 } /* switch tcpu_subtype */
839 case PROCESSOR_PM_SET_MMCR:
840 switch (tcpu_subtype)
841 {
842 case CPU_SUBTYPE_POWERPC_750:
843 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
844 PROCESSOR_PM_REGS_COUNT_POWERPC_750))
845 return(KERN_FAILURE);
846 else
847 {
848 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
849 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
850 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
851 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
852 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
853 return(KERN_SUCCESS);
854 }
855 case CPU_SUBTYPE_POWERPC_7400:
856 case CPU_SUBTYPE_POWERPC_7450:
857 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
858 PROCESSOR_PM_REGS_COUNT_POWERPC_7400))
859 return(KERN_FAILURE);
860 else
861 {
862 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
863 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
864 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
865 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
866 mtmmcr2(PERFMON_MMCR2(perf_regs) & MMCR2_SUPPORT_MASK);
867 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
868 return(KERN_SUCCESS);
869 }
870 default:
871 return(KERN_FAILURE);
872 } /* tcpu_subtype */
873 default:
874 return(KERN_FAILURE);
875 } /* switch cmd_op */
876}
765c9de3 877
1c79356b 878
91447636
A
879/*
880 * Routine: cpu_info_count
881 * Function:
882 */
883kern_return_t
884cpu_info_count(
885 processor_flavor_t flavor,
886 unsigned int *count)
887{
888 cpu_subtype_t tcpu_subtype;
55e303ae 889
91447636
A
890 /*
891 * For now, we just assume that all CPUs are of the same type
892 */
893 tcpu_subtype = PerProcTable[master_cpu].ppe_vaddr->cpu_subtype;
894 switch (flavor) {
895 case PROCESSOR_PM_REGS_INFO:
896 switch (tcpu_subtype) {
897 case CPU_SUBTYPE_POWERPC_750:
898
899 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_750;
900 return(KERN_SUCCESS);
901
902 case CPU_SUBTYPE_POWERPC_7400:
903 case CPU_SUBTYPE_POWERPC_7450:
904
905 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_7400;
906 return(KERN_SUCCESS);
907
908 default:
909 *count = 0;
910 return(KERN_INVALID_ARGUMENT);
911 } /* switch tcpu_subtype */
912
913 case PROCESSOR_TEMPERATURE:
914 *count = PROCESSOR_TEMPERATURE_COUNT;
915 return (KERN_SUCCESS);
916
917 default:
918 *count = 0;
919 return(KERN_INVALID_ARGUMENT);
920
1c79356b 921 }
91447636 922}
1c79356b 923
91447636
A
924
925/*
926 * Routine: cpu_info
927 * Function:
928 */
929kern_return_t
930cpu_info(
931 processor_flavor_t flavor,
932 int slot_num,
933 processor_info_t info,
934 unsigned int *count)
935{
936 cpu_subtype_t tcpu_subtype;
937 processor_pm_regs_t perf_regs;
938 boolean_t oldlevel;
939
940 tcpu_subtype = PerProcTable[slot_num].ppe_vaddr->cpu_subtype;
941
942 switch (flavor) {
943 case PROCESSOR_PM_REGS_INFO:
944
945 perf_regs = (processor_pm_regs_t) info;
946
947 switch (tcpu_subtype) {
948 case CPU_SUBTYPE_POWERPC_750:
949
950 if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_750)
951 return(KERN_FAILURE);
952
953 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
954 PERFMON_MMCR0(perf_regs) = mfmmcr0();
955 PERFMON_PMC1(perf_regs) = mfpmc1();
956 PERFMON_PMC2(perf_regs) = mfpmc2();
957 PERFMON_MMCR1(perf_regs) = mfmmcr1();
958 PERFMON_PMC3(perf_regs) = mfpmc3();
959 PERFMON_PMC4(perf_regs) = mfpmc4();
960 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
961
962 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_750;
963 return(KERN_SUCCESS);
964
965 case CPU_SUBTYPE_POWERPC_7400:
966 case CPU_SUBTYPE_POWERPC_7450:
967
968 if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_7400)
969 return(KERN_FAILURE);
970
971 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
972 PERFMON_MMCR0(perf_regs) = mfmmcr0();
973 PERFMON_PMC1(perf_regs) = mfpmc1();
974 PERFMON_PMC2(perf_regs) = mfpmc2();
975 PERFMON_MMCR1(perf_regs) = mfmmcr1();
976 PERFMON_PMC3(perf_regs) = mfpmc3();
977 PERFMON_PMC4(perf_regs) = mfpmc4();
978 PERFMON_MMCR2(perf_regs) = mfmmcr2();
979 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
980
981 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_7400;
982 return(KERN_SUCCESS);
983
984 default:
985 return(KERN_FAILURE);
986 } /* switch tcpu_subtype */
987
988 case PROCESSOR_TEMPERATURE: /* Get the temperature of a processor */
989
990 *info = -1; /* Get the temperature */
991 return(KERN_FAILURE);
992
993 default:
994 return(KERN_INVALID_ARGUMENT);
995
996 } /* flavor */
1c79356b
A
997}
998
91447636
A
999
1000/*
1001 * Routine: cpu_to_processor
1002 * Function:
1003 */
1004processor_t
1005cpu_to_processor(
1006 int cpu)
1c79356b 1007{
91447636
A
1008 return ((processor_t)PerProcTable[cpu].ppe_vaddr->processor);
1009}
1c79356b 1010
1c79356b 1011
91447636
A
1012/*
1013 * Routine: slot_type
1014 * Function:
1015 */
1016cpu_type_t
1017slot_type(
1018 int slot_num)
1019{
1020 return (PerProcTable[slot_num].ppe_vaddr->cpu_type);
1021}
1c79356b 1022
1c79356b 1023
91447636
A
1024/*
1025 * Routine: slot_subtype
1026 * Function:
1027 */
1028cpu_subtype_t
1029slot_subtype(
1030 int slot_num)
1031{
1032 return (PerProcTable[slot_num].ppe_vaddr->cpu_subtype);
1033}
1c79356b 1034
1c79356b 1035
91447636
A
1036/*
1037 * Routine: slot_threadtype
1038 * Function:
1039 */
1040cpu_threadtype_t
1041slot_threadtype(
1042 int slot_num)
1043{
1044 return (PerProcTable[slot_num].ppe_vaddr->cpu_threadtype);
1045}
1c79356b 1046
1c79356b 1047
91447636
A
1048/*
1049 * Routine: cpu_type
1050 * Function:
1051 */
1052cpu_type_t
1053cpu_type(void)
1054{
1055 return (getPerProc()->cpu_type);
1056}
1c79356b 1057
1c79356b 1058
91447636
A
1059/*
1060 * Routine: cpu_subtype
1061 * Function:
1062 */
1063cpu_subtype_t
1064cpu_subtype(void)
1065{
1066 return (getPerProc()->cpu_subtype);
1067}
1068
1069
1070/*
1071 * Routine: cpu_threadtype
1072 * Function:
1073 */
1074cpu_threadtype_t
1075cpu_threadtype(void)
1076{
1077 return (getPerProc()->cpu_threadtype);
1c79356b 1078}
a3d08fcd
A
1079
1080/*
1081 * Call a function on all running processors
1082 *
1083 * Note that the synch paramter is used to wait until all functions are complete.
1084 * It is not passed to the other processor and must be known by the called function.
1085 * The called function must do a thread_wakeup on the synch if it decrements the
1086 * synch count to 0.
1087 */
1088
1089
1090int32_t cpu_broadcast(uint32_t *synch, broadcastFunc func, uint32_t parm) {
1091
1092 int sigproc, cpu, ocpu;
1093
1094 cpu = cpu_number(); /* Who are we? */
1095 sigproc = 0; /* Clear called processor count */
1096
1097 if(real_ncpus > 1) { /* Are we just a uni? */
1098
1099 assert_wait((event_t)synch, THREAD_UNINT); /* If more than one processor, we may have to wait */
1100
1101 for(ocpu = 0; ocpu < real_ncpus; ocpu++) { /* Tell everyone to call */
1102 if(ocpu == cpu) continue; /* If we talk to ourselves, people will wonder... */
1103 hw_atomic_add(synch, 1); /* Tentatively bump synchronizer */
1104 sigproc++; /* Tentatively bump signal sent count */
1105 if(KERN_SUCCESS != cpu_signal(ocpu, SIGPcall, (uint32_t)func, parm)) { /* Call the function on the other processor */
1106 hw_atomic_sub(synch, 1); /* Other guy isn't really there, ignore it */
1107 sigproc--; /* and don't count it */
1108 }
1109 }
1110
1111 if(!sigproc) clear_wait(current_thread(), THREAD_AWAKENED); /* Clear wait if we never signalled */
1112 else thread_block(THREAD_CONTINUE_NULL); /* Wait for everyone to get into step... */
1113 }
1114
1115 return sigproc; /* Return the number of guys actually signalled */
1116
1117}