]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/cpu.c
xnu-792.6.61.tar.gz
[apple/xnu.git] / osfmk / ppc / cpu.c
CommitLineData
1c79356b 1/*
3a60a9f5 2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
37839358
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
37839358
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
37839358
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
1c79356b 22
91447636
A
23#include <mach/mach_types.h>
24#include <mach/machine.h>
25#include <mach/processor_info.h>
26
27#include <kern/kalloc.h>
28#include <kern/kern_types.h>
1c79356b
A
29#include <kern/machine.h>
30#include <kern/misc_protos.h>
31#include <kern/thread.h>
91447636 32#include <kern/sched_prim.h>
1c79356b 33#include <kern/processor.h>
91447636
A
34
35#include <vm/pmap.h>
3a60a9f5 36#include <IOKit/IOHibernatePrivate.h>
91447636 37
1c79356b
A
38#include <ppc/proc_reg.h>
39#include <ppc/misc_protos.h>
40#include <ppc/machine_routines.h>
91447636 41#include <ppc/cpu_internal.h>
1c79356b 42#include <ppc/exception.h>
9bccf70c 43#include <ppc/asm.h>
55e303ae 44#include <ppc/hw_perfmon.h>
1c79356b 45#include <pexpert/pexpert.h>
9bccf70c 46#include <kern/cpu_data.h>
55e303ae
A
47#include <ppc/mappings.h>
48#include <ppc/Diagnostics.h>
49#include <ppc/trap.h>
91447636 50#include <ppc/machine_cpu.h>
3a60a9f5
A
51#include <ppc/pms.h>
52#include <ppc/rtclock.h>
1c79356b 53
91447636 54decl_mutex_data(static,ppt_lock);
1c79356b 55
91447636
A
56unsigned int real_ncpus = 1;
57unsigned int max_ncpus = MAX_CPUS;
1c79356b 58
91447636 59decl_simple_lock_data(static,rht_lock);
5353443c 60
91447636
A
61static unsigned int rht_state = 0;
62#define RHT_WAIT 0x01
63#define RHT_BUSY 0x02
1c79356b 64
91447636 65decl_simple_lock_data(static,SignalReadyLock);
1c79356b
A
66
67struct SIGtimebase {
68 boolean_t avail;
69 boolean_t ready;
70 boolean_t done;
0b4e3aa0 71 uint64_t abstime;
1c79356b
A
72};
73
91447636 74perfCallback perfCpuSigHook = 0; /* Pointer to CHUD cpu signal hook routine */
1c79356b 75
91447636 76extern int debugger_sync;
1c79356b 77
91447636
A
78/*
79 * Forward definitions
80 */
1c79356b 81
91447636
A
82void cpu_sync_timebase(
83 void);
55e303ae 84
91447636
A
85void cpu_timebase_signal_handler(
86 struct per_proc_info *proc_info,
87 struct SIGtimebase *timebaseAddr);
1c79356b 88
91447636
A
89/*
90 * Routine: cpu_bootstrap
91 * Function:
92 */
93void
94cpu_bootstrap(
95 void)
1c79356b 96{
91447636
A
97 simple_lock_init(&rht_lock,0);
98 simple_lock_init(&SignalReadyLock,0);
99 mutex_init(&ppt_lock,0);
1c79356b
A
100}
101
1c79356b 102
91447636
A
103/*
104 * Routine: cpu_init
105 * Function:
106 */
1c79356b
A
107void
108cpu_init(
109 void)
110{
91447636
A
111 struct per_proc_info *proc_info;
112
113 proc_info = getPerProc();
1c79356b 114
91447636
A
115 /*
116 * Restore the TBR.
117 */
118 if (proc_info->save_tbu != 0 || proc_info->save_tbl != 0) {
119 mttb(0);
120 mttbu(proc_info->save_tbu);
121 mttb(proc_info->save_tbl);
122 }
3a60a9f5
A
123
124 setTimerReq(); /* Now that the time base is sort of correct, request the next timer pop */
1c79356b 125
91447636
A
126 proc_info->cpu_type = CPU_TYPE_POWERPC;
127 proc_info->cpu_subtype = (cpu_subtype_t)proc_info->pf.rptdProc;
128 proc_info->cpu_threadtype = CPU_THREADTYPE_NONE;
129 proc_info->running = TRUE;
1c79356b
A
130
131}
132
91447636
A
133/*
134 * Routine: cpu_machine_init
135 * Function:
136 */
1c79356b
A
137void
138cpu_machine_init(
139 void)
140{
91447636 141 struct per_proc_info *proc_info;
0b4e3aa0 142 volatile struct per_proc_info *mproc_info;
1c79356b 143
1c79356b 144
91447636
A
145 proc_info = getPerProc();
146 mproc_info = PerProcTable[master_cpu].ppe_vaddr;
147
148 if (proc_info != mproc_info) {
149 simple_lock(&rht_lock);
150 if (rht_state & RHT_WAIT)
151 thread_wakeup(&rht_state);
152 rht_state &= ~(RHT_BUSY|RHT_WAIT);
153 simple_unlock(&rht_lock);
154 }
155
156 PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone));
157
3a60a9f5
A
158 if (proc_info->hibernate) {
159 uint32_t tbu, tbl;
160
161 do {
162 tbu = mftbu();
163 tbl = mftb();
164 } while (mftbu() != tbu);
165
166 proc_info->hibernate = 0;
167 hibernate_machine_init();
168
169 // hibernate_machine_init() could take minutes and we don't want timeouts
170 // to fire as soon as scheduling starts. Reset timebase so it appears
171 // no time has elapsed, as it would for regular sleep.
172 mttb(0);
173 mttbu(tbu);
174 mttb(tbl);
175 }
91447636
A
176
177 if (proc_info != mproc_info) {
178 while (!((mproc_info->cpu_flags) & SignalReady))
0b4e3aa0 179 continue;
1c79356b 180 cpu_sync_timebase();
0b4e3aa0 181 }
91447636 182
1c79356b 183 ml_init_interrupt();
91447636 184 if (proc_info != mproc_info)
5353443c 185 simple_lock(&SignalReadyLock);
91447636
A
186 proc_info->cpu_flags |= BootDone|SignalReady;
187 if (proc_info != mproc_info) {
188 if (proc_info->ppXFlags & SignalReadyWait) {
189 hw_atomic_and(&proc_info->ppXFlags, ~SignalReadyWait);
190 thread_wakeup(&proc_info->cpu_flags);
5353443c
A
191 }
192 simple_unlock(&SignalReadyLock);
3a60a9f5 193 pmsPark(); /* Timers should be cool now, park the power management stepper */
5353443c 194 }
1c79356b
A
195}
196
1c79356b 197
91447636
A
198/*
199 * Routine: cpu_per_proc_alloc
200 * Function:
201 */
202struct per_proc_info *
203cpu_per_proc_alloc(
204 void)
205{
206 struct per_proc_info *proc_info=0;
207 void *interrupt_stack=0;
208 void *debugger_stack=0;
209
3a60a9f5
A
210 if ((proc_info = (struct per_proc_info*)kalloc(sizeof(struct per_proc_info))) == (struct per_proc_info*)0)
211 return (struct per_proc_info *)NULL;
91447636 212 if ((interrupt_stack = kalloc(INTSTACK_SIZE)) == 0) {
3a60a9f5
A
213 kfree(proc_info, sizeof(struct per_proc_info));
214 return (struct per_proc_info *)NULL;
1c79356b 215 }
3a60a9f5 216
91447636 217 if ((debugger_stack = kalloc(KERNEL_STACK_SIZE)) == 0) {
3a60a9f5 218 kfree(proc_info, sizeof(struct per_proc_info));
91447636 219 kfree(interrupt_stack, INTSTACK_SIZE);
3a60a9f5 220 return (struct per_proc_info *)NULL;
91447636 221 }
91447636
A
222
223 bzero((void *)proc_info, sizeof(struct per_proc_info));
224
3a60a9f5 225 proc_info->pp2ndPage = (addr64_t)pmap_find_phys(kernel_pmap, (addr64_t)proc_info + 0x1000) << PAGE_SHIFT; /* Set physical address of the second page */
91447636
A
226 proc_info->next_savearea = (uint64_t)save_get_init();
227 proc_info->pf = BootProcInfo.pf;
228 proc_info->istackptr = (vm_offset_t)interrupt_stack + INTSTACK_SIZE - FM_SIZE;
229 proc_info->intstack_top_ss = proc_info->istackptr;
91447636
A
230 proc_info->debstackptr = (vm_offset_t)debugger_stack + KERNEL_STACK_SIZE - FM_SIZE;
231 proc_info->debstack_top_ss = proc_info->debstackptr;
3a60a9f5 232
91447636
A
233 return proc_info;
234
235}
236
237
238/*
239 * Routine: cpu_per_proc_free
240 * Function:
241 */
242void
243cpu_per_proc_free(
244 struct per_proc_info *proc_info
245)
246{
247 if (proc_info->cpu_number == master_cpu)
248 return;
249 kfree((void *)(proc_info->intstack_top_ss - INTSTACK_SIZE + FM_SIZE), INTSTACK_SIZE);
250 kfree((void *)(proc_info->debstack_top_ss - KERNEL_STACK_SIZE + FM_SIZE), KERNEL_STACK_SIZE);
3a60a9f5 251 kfree((void *)proc_info, sizeof(struct per_proc_info)); /* Release the per_proc */
91447636
A
252}
253
254
255/*
256 * Routine: cpu_per_proc_register
257 * Function:
258 */
259kern_return_t
260cpu_per_proc_register(
261 struct per_proc_info *proc_info
262)
263{
264 int cpu;
265
266 mutex_lock(&ppt_lock);
267 if (real_ncpus >= max_ncpus) {
268 mutex_unlock(&ppt_lock);
1c79356b 269 return KERN_FAILURE;
91447636
A
270 }
271 cpu = real_ncpus;
272 proc_info->cpu_number = cpu;
273 PerProcTable[cpu].ppe_vaddr = proc_info;
3a60a9f5 274 PerProcTable[cpu].ppe_paddr = (addr64_t)pmap_find_phys(kernel_pmap, (addr64_t)proc_info) << PAGE_SHIFT;
91447636
A
275 eieio();
276 real_ncpus++;
277 mutex_unlock(&ppt_lock);
278 return KERN_SUCCESS;
1c79356b
A
279}
280
91447636
A
281
282/*
283 * Routine: cpu_start
284 * Function:
285 */
1c79356b
A
286kern_return_t
287cpu_start(
288 int cpu)
289{
290 struct per_proc_info *proc_info;
91447636
A
291 kern_return_t ret;
292 mapping_t *mp;
1c79356b 293
91447636 294 proc_info = PerProcTable[cpu].ppe_vaddr;
1c79356b
A
295
296 if (cpu == cpu_number()) {
297 PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone));
298 ml_init_interrupt();
0b4e3aa0 299 proc_info->cpu_flags |= BootDone|SignalReady;
1c79356b
A
300
301 return KERN_SUCCESS;
302 } else {
1c79356b 303 proc_info->cpu_flags &= BootDone;
0b4e3aa0 304 proc_info->interrupts_enabled = 0;
91447636
A
305 proc_info->pending_ast = AST_NONE;
306 proc_info->istackptr = proc_info->intstack_top_ss;
3a60a9f5
A
307 proc_info->rtcPop = EndOfAllTime;
308 proc_info->FPU_owner = 0;
309 proc_info->VMX_owner = 0;
310 proc_info->pms.pmsStamp = 0; /* Dummy transition time */
311 proc_info->pms.pmsPop = EndOfAllTime; /* Set the pop way into the future */
312 proc_info->pms.pmsState = pmsParked; /* Park the stepper */
313 proc_info->pms.pmsCSetCmd = pmsCInit; /* Set dummy initial hardware state */
91447636
A
314 mp = (mapping_t *)(&proc_info->ppUMWmp);
315 mp->mpFlags = 0x01000000 | mpLinkage | mpPerm | 1;
55e303ae 316 mp->mpSpace = invalSpace;
1c79356b
A
317
318 if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
319
91447636
A
320 simple_lock(&rht_lock);
321 while (rht_state & RHT_BUSY) {
322 rht_state |= RHT_WAIT;
323 thread_sleep_usimple_lock((event_t)&rht_state,
324 &rht_lock, THREAD_UNINT);
325 }
326 rht_state |= RHT_BUSY;
327 simple_unlock(&rht_lock);
1c79356b 328
1c79356b 329 ml_phys_write((vm_offset_t)&ResetHandler + 0,
91447636 330 RESET_HANDLER_START);
1c79356b 331 ml_phys_write((vm_offset_t)&ResetHandler + 4,
91447636 332 (vm_offset_t)_start_cpu);
1c79356b 333 ml_phys_write((vm_offset_t)&ResetHandler + 8,
91447636 334 (vm_offset_t)&PerProcTable[cpu]);
1c79356b
A
335 }
336/*
337 * Note: we pass the current time to the other processor here. He will load it
338 * as early as possible so that there is a chance that it is close to accurate.
339 * After the machine is up a while, we will officially resync the clocks so
340 * that all processors are the same. This is just to get close.
341 */
342
91447636 343 ml_get_timebase((unsigned long long *)&proc_info->ruptStamp);
1c79356b
A
344
345 __asm__ volatile("sync"); /* Commit to storage */
346 __asm__ volatile("isync"); /* Wait a second */
91447636
A
347 ret = PE_cpu_start(proc_info->cpu_id,
348 proc_info->start_paddr, (vm_offset_t)proc_info);
349
350 if (ret != KERN_SUCCESS) {
351 if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
352 simple_lock(&rht_lock);
353 if (rht_state & RHT_WAIT)
354 thread_wakeup(&rht_state);
355 rht_state &= ~(RHT_BUSY|RHT_WAIT);
356 simple_unlock(&rht_lock);
357 };
5353443c
A
358 } else {
359 simple_lock(&SignalReadyLock);
91447636
A
360 if (!((*(volatile short *)&proc_info->cpu_flags) & SignalReady)) {
361 hw_atomic_or(&proc_info->ppXFlags, SignalReadyWait);
362 thread_sleep_simple_lock((event_t)&proc_info->cpu_flags,
363 &SignalReadyLock, THREAD_UNINT);
5353443c
A
364 }
365 simple_unlock(&SignalReadyLock);
91447636 366
1c79356b
A
367 }
368 return(ret);
369 }
370}
371
91447636
A
372/*
373 * Routine: cpu_exit_wait
374 * Function:
375 */
5353443c
A
376void
377cpu_exit_wait(
91447636
A
378 int cpu)
379{
380 struct per_proc_info *tpproc;
381
382 if ( cpu != master_cpu) {
383 tpproc = PerProcTable[cpu].ppe_vaddr;
384 while (!((*(volatile short *)&tpproc->cpu_flags) & SleepState)) {};
385 }
386}
387
388
389/*
390 * Routine: cpu_doshutdown
391 * Function:
392 */
393void
394cpu_doshutdown(
395 void)
396{
397 enable_preemption();
398 processor_offline(current_processor());
399}
400
401
402/*
403 * Routine: cpu_sleep
404 * Function:
405 */
406void
407cpu_sleep(
408 void)
409{
410 struct per_proc_info *proc_info;
411 unsigned int i;
412 unsigned int wait_ncpus_sleep, ncpus_sleep;
413 facility_context *fowner;
414
415 proc_info = getPerProc();
416
417 proc_info->running = FALSE;
418
419 fowner = proc_info->FPU_owner; /* Cache this */
420 if(fowner) fpu_save(fowner); /* If anyone owns FPU, save it */
421 proc_info->FPU_owner = 0; /* Set no fpu owner now */
422
423 fowner = proc_info->VMX_owner; /* Cache this */
424 if(fowner) vec_save(fowner); /* If anyone owns vectors, save it */
425 proc_info->VMX_owner = 0; /* Set no vector owner now */
426
427 if (proc_info->cpu_number == master_cpu) {
428 proc_info->cpu_flags &= BootDone;
429 proc_info->interrupts_enabled = 0;
430 proc_info->pending_ast = AST_NONE;
431
432 if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
433 ml_phys_write((vm_offset_t)&ResetHandler + 0,
434 RESET_HANDLER_START);
435 ml_phys_write((vm_offset_t)&ResetHandler + 4,
436 (vm_offset_t)_start_cpu);
437 ml_phys_write((vm_offset_t)&ResetHandler + 8,
438 (vm_offset_t)&PerProcTable[master_cpu]);
439
440 __asm__ volatile("sync");
441 __asm__ volatile("isync");
442 }
443
444 wait_ncpus_sleep = real_ncpus-1;
445 ncpus_sleep = 0;
446 while (wait_ncpus_sleep != ncpus_sleep) {
447 ncpus_sleep = 0;
448 for(i=1; i < real_ncpus ; i++) {
449 if ((*(volatile short *)&(PerProcTable[i].ppe_vaddr->cpu_flags)) & SleepState)
450 ncpus_sleep++;
451 }
452 }
453
454 }
455
456 /*
457 * Save the TBR before stopping.
458 */
459 do {
460 proc_info->save_tbu = mftbu();
461 proc_info->save_tbl = mftb();
462 } while (mftbu() != proc_info->save_tbu);
463
464 PE_cpu_machine_quiesce(proc_info->cpu_id);
465}
466
467
468/*
469 * Routine: cpu_signal
470 * Function:
471 * Here is where we send a message to another processor. So far we only have two:
472 * SIGPast and SIGPdebug. SIGPast is used to preempt and kick off threads (this is
473 * currently disabled). SIGPdebug is used to enter the debugger.
474 *
475 * We set up the SIGP function to indicate that this is a simple message and set the
476 * order code (MPsigpParm0) to SIGPast or SIGPdebug). After finding the per_processor
477 * block for the target, we lock the message block. Then we set the parameter(s).
478 * Next we change the lock (also called "busy") to "passing" and finally signal
479 * the other processor. Note that we only wait about 1ms to get the message lock.
480 * If we time out, we return failure to our caller. It is their responsibility to
481 * recover.
482 */
483kern_return_t
484cpu_signal(
485 int target,
486 int signal,
487 unsigned int p1,
488 unsigned int p2)
5353443c 489{
91447636
A
490
491 unsigned int holdStat;
492 struct per_proc_info *tpproc, *mpproc;
493 int busybitset=0;
494
495#if DEBUG
496 if(((unsigned int)target) >= MAX_CPUS) panic("cpu_signal: invalid target CPU - %08X\n", target);
497#endif
498
499 mpproc = getPerProc(); /* Point to our block */
500 tpproc = PerProcTable[target].ppe_vaddr; /* Point to the target's block */
501 if(mpproc == tpproc) return KERN_FAILURE; /* Cannot signal ourselves */
502
503 if(!tpproc->running) return KERN_FAILURE;
504
505 if (!(tpproc->cpu_flags & SignalReady)) return KERN_FAILURE;
506
507 if((tpproc->MPsigpStat & MPsigpMsgp) == MPsigpMsgp) { /* Is there an unreceived message already pending? */
508
509 if(signal == SIGPwake) { /* SIGPwake can merge into all others... */
510 mpproc->hwCtr.numSIGPmwake++; /* Account for merged wakes */
511 return KERN_SUCCESS;
512 }
513
514 if((signal == SIGPast) && (tpproc->MPsigpParm0 == SIGPast)) { /* We can merge ASTs */
515 mpproc->hwCtr.numSIGPmast++; /* Account for merged ASTs */
516 return KERN_SUCCESS; /* Don't bother to send this one... */
517 }
518
519 if (tpproc->MPsigpParm0 == SIGPwake) {
520 if (hw_lock_mbits(&tpproc->MPsigpStat, (MPsigpMsgp | MPsigpAck),
521 (MPsigpBusy | MPsigpPass ), MPsigpBusy, 0)) {
522 busybitset = 1;
523 mpproc->hwCtr.numSIGPmwake++;
524 }
525 }
526 }
527
528 if((busybitset == 0) &&
529 (!hw_lock_mbits(&tpproc->MPsigpStat, MPsigpMsgp, 0, MPsigpBusy,
530 (gPEClockFrequencyInfo.timebase_frequency_hz >> 11)))) { /* Try to lock the message block with a .5ms timeout */
531 mpproc->hwCtr.numSIGPtimo++; /* Account for timeouts */
532 return KERN_FAILURE; /* Timed out, take your ball and go home... */
533 }
534
535 holdStat = MPsigpBusy | MPsigpPass | (MPsigpSigp << 8) | mpproc->cpu_number; /* Set up the signal status word */
536 tpproc->MPsigpParm0 = signal; /* Set message order */
537 tpproc->MPsigpParm1 = p1; /* Set additional parm */
538 tpproc->MPsigpParm2 = p2; /* Set additional parm */
539
540 __asm__ volatile("sync"); /* Make sure it's all there */
541
542 tpproc->MPsigpStat = holdStat; /* Set status and pass the lock */
543 __asm__ volatile("eieio"); /* I'm a paraniod freak */
544
545 if (busybitset == 0)
546 PE_cpu_signal(mpproc->cpu_id, tpproc->cpu_id); /* Kick the other processor */
547
548 return KERN_SUCCESS; /* All is goodness and rainbows... */
5353443c
A
549}
550
55e303ae 551
1c79356b 552/*
91447636
A
553 * Routine: cpu_signal_handler
554 * Function:
1c79356b
A
555 * Here is where we implement the receiver of the signaling protocol.
556 * We wait for the signal status area to be passed to us. Then we snarf
557 * up the status, the sender, and the 3 potential parms. Next we release
558 * the lock and signal the other guy.
559 */
1c79356b
A
560void
561cpu_signal_handler(
562 void)
563{
564
565 unsigned int holdStat, holdParm0, holdParm1, holdParm2, mtype;
566 unsigned int *parmAddr;
91447636 567 struct per_proc_info *proc_info;
1c79356b 568 int cpu;
a3d08fcd 569 broadcastFunc xfunc;
1c79356b 570 cpu = cpu_number(); /* Get the CPU number */
91447636
A
571
572 proc_info = getPerProc();
573
1c79356b 574/*
90556fb8 575 * Since we've been signaled, wait about 31 ms for the signal lock to pass
1c79356b 576 */
91447636 577 if(!hw_lock_mbits(&proc_info->MPsigpStat, (MPsigpMsgp | MPsigpAck), (MPsigpBusy | MPsigpPass),
90556fb8 578 (MPsigpBusy | MPsigpPass | MPsigpAck), (gPEClockFrequencyInfo.timebase_frequency_hz >> 5))) {
1c79356b
A
579 panic("cpu_signal_handler: Lock pass timed out\n");
580 }
581
91447636
A
582 holdStat = proc_info->MPsigpStat; /* Snarf stat word */
583 holdParm0 = proc_info->MPsigpParm0; /* Snarf parameter */
584 holdParm1 = proc_info->MPsigpParm1; /* Snarf parameter */
585 holdParm2 = proc_info->MPsigpParm2; /* Snarf parameter */
1c79356b
A
586
587 __asm__ volatile("isync"); /* Make sure we don't unlock until memory is in */
588
91447636 589 proc_info->MPsigpStat = holdStat & ~(MPsigpMsgp | MPsigpAck | MPsigpFunc); /* Release lock */
1c79356b
A
590
591 switch ((holdStat & MPsigpFunc) >> 8) { /* Decode function code */
592
593 case MPsigpIdle: /* Was function cancelled? */
594 return; /* Yup... */
595
596 case MPsigpSigp: /* Signal Processor message? */
597
598 switch (holdParm0) { /* Decode SIGP message order */
599
600 case SIGPast: /* Should we do an AST? */
91447636 601 proc_info->hwCtr.numSIGPast++; /* Count this one */
1c79356b
A
602#if 0
603 kprintf("cpu_signal_handler: AST check on cpu %x\n", cpu_number());
604#endif
91447636 605 ast_check((processor_t)proc_info->processor);
1c79356b
A
606 return; /* All done... */
607
608 case SIGPcpureq: /* CPU specific function? */
609
91447636 610 proc_info->hwCtr.numSIGPcpureq++; /* Count this one */
1c79356b
A
611 switch (holdParm1) { /* Select specific function */
612
1c79356b
A
613 case CPRQtimebase:
614
91447636 615 cpu_timebase_signal_handler(proc_info, (struct SIGtimebase *)holdParm2);
1c79356b
A
616 return;
617
55e303ae
A
618 case CPRQsegload:
619 return;
620
621 case CPRQchud:
622 parmAddr = (unsigned int *)holdParm2; /* Get the destination address */
623 if(perfCpuSigHook) {
91447636 624 struct savearea *ssp = current_thread()->machine.pcb;
55e303ae
A
625 if(ssp) {
626 (perfCpuSigHook)(parmAddr[1] /* request */, ssp, 0, 0);
627 }
628 }
629 parmAddr[1] = 0;
630 parmAddr[0] = 0; /* Show we're done */
631 return;
632
633 case CPRQscom:
483a1d10
A
634 if(((scomcomm *)holdParm2)->scomfunc) { /* Are we writing */
635 ((scomcomm *)holdParm2)->scomstat = ml_scom_write(((scomcomm *)holdParm2)->scomreg, ((scomcomm *)holdParm2)->scomdata); /* Write scom */
636 }
637 else { /* No, reading... */
638 ((scomcomm *)holdParm2)->scomstat = ml_scom_read(((scomcomm *)holdParm2)->scomreg, &((scomcomm *)holdParm2)->scomdata); /* Read scom */
639 }
55e303ae
A
640 return;
641
5353443c
A
642 case CPRQsps:
643 {
91447636
A
644 ml_set_processor_speed_slave(holdParm2);
645 return;
646 }
1c79356b
A
647 default:
648 panic("cpu_signal_handler: unknown CPU request - %08X\n", holdParm1);
649 return;
650 }
651
652
653 case SIGPdebug: /* Enter the debugger? */
654
91447636
A
655 proc_info->hwCtr.numSIGPdebug++; /* Count this one */
656 proc_info->debugger_is_slave++; /* Bump up the count to show we're here */
1c79356b
A
657 hw_atomic_sub(&debugger_sync, 1); /* Show we've received the 'rupt */
658 __asm__ volatile("tw 4,r3,r3"); /* Enter the debugger */
659 return; /* All done now... */
660
661 case SIGPwake: /* Wake up CPU */
91447636 662 proc_info->hwCtr.numSIGPwake++; /* Count this one */
1c79356b
A
663 return; /* No need to do anything, the interrupt does it all... */
664
a3d08fcd 665 case SIGPcall: /* Call function on CPU */
91447636 666 proc_info->hwCtr.numSIGPcall++; /* Count this one */
a3d08fcd
A
667 xfunc = holdParm1; /* Do this since I can't seem to figure C out */
668 xfunc(holdParm2); /* Call the passed function */
669 return; /* Done... */
670
1c79356b
A
671 default:
672 panic("cpu_signal_handler: unknown SIGP message order - %08X\n", holdParm0);
673 return;
674
675 }
676
677 default:
678 panic("cpu_signal_handler: unknown SIGP function - %08X\n", (holdStat & MPsigpFunc) >> 8);
679 return;
680
681 }
682 panic("cpu_signal_handler: we should never get here\n");
683}
684
91447636 685
1c79356b 686/*
91447636
A
687 * Routine: cpu_sync_timebase
688 * Function:
1c79356b 689 */
91447636
A
690void
691cpu_sync_timebase(
692 void)
1c79356b 693{
91447636
A
694 natural_t tbu, tbl;
695 boolean_t intr;
696 struct SIGtimebase syncClkSpot;
1c79356b 697
91447636 698 intr = ml_set_interrupts_enabled(FALSE); /* No interruptions in here */
1c79356b 699
91447636
A
700 syncClkSpot.avail = FALSE;
701 syncClkSpot.ready = FALSE;
702 syncClkSpot.done = FALSE;
1c79356b 703
91447636
A
704 while (cpu_signal(master_cpu, SIGPcpureq, CPRQtimebase,
705 (unsigned int)&syncClkSpot) != KERN_SUCCESS)
706 continue;
1c79356b 707
91447636
A
708 while (*(volatile int *)&(syncClkSpot.avail) == FALSE)
709 continue;
0b4e3aa0 710
91447636 711 isync();
7b1edb79 712
91447636
A
713 /*
714 * We do the following to keep the compiler from generating extra stuff
715 * in tb set part
716 */
717 tbu = syncClkSpot.abstime >> 32;
718 tbl = (uint32_t)syncClkSpot.abstime;
7b1edb79 719
91447636
A
720 mttb(0);
721 mttbu(tbu);
722 mttb(tbl);
9bccf70c 723
91447636 724 syncClkSpot.ready = TRUE;
1c79356b 725
91447636
A
726 while (*(volatile int *)&(syncClkSpot.done) == FALSE)
727 continue;
1c79356b 728
3a60a9f5
A
729 setTimerReq(); /* Start the timer */
730
91447636 731 (void)ml_set_interrupts_enabled(intr);
1c79356b
A
732}
733
91447636
A
734
735/*
736 * Routine: cpu_timebase_signal_handler
737 * Function:
738 */
1c79356b 739void
91447636
A
740cpu_timebase_signal_handler(
741 struct per_proc_info *proc_info,
742 struct SIGtimebase *timebaseAddr)
1c79356b 743{
91447636
A
744 unsigned int tbu, tbu2, tbl;
745
746 if(proc_info->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
747 proc_info->time_base_enable(proc_info->cpu_id, FALSE);
748
749 timebaseAddr->abstime = 0; /* Touch to force into cache */
750 sync();
751
752 do {
753 asm volatile(" mftbu %0" : "=r" (tbu));
754 asm volatile(" mftb %0" : "=r" (tbl));
755 asm volatile(" mftbu %0" : "=r" (tbu2));
756 } while (tbu != tbu2);
757
758 timebaseAddr->abstime = ((uint64_t)tbu << 32) | tbl;
759 sync(); /* Force order */
760
761 timebaseAddr->avail = TRUE;
762
763 while (*(volatile int *)&(timebaseAddr->ready) == FALSE);
764
765 if(proc_info->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
766 proc_info->time_base_enable(proc_info->cpu_id, TRUE);
767
768 timebaseAddr->done = TRUE;
1c79356b
A
769}
770
91447636
A
771
772/*
773 * Routine: cpu_control
774 * Function:
775 */
776kern_return_t
777cpu_control(
778 int slot_num,
779 processor_info_t info,
780 unsigned int count)
1c79356b
A
781{
782 struct per_proc_info *proc_info;
91447636
A
783 cpu_type_t tcpu_type;
784 cpu_subtype_t tcpu_subtype;
785 processor_pm_regs_t perf_regs;
786 processor_control_cmd_t cmd;
787 boolean_t oldlevel;
788#define MMCR0_SUPPORT_MASK 0xf83f1fff
789#define MMCR1_SUPPORT_MASK 0xffc00000
790#define MMCR2_SUPPORT_MASK 0x80000000
791
792 proc_info = PerProcTable[slot_num].ppe_vaddr;
793 tcpu_type = proc_info->cpu_type;
794 tcpu_subtype = proc_info->cpu_subtype;
795 cmd = (processor_control_cmd_t) info;
1c79356b 796
91447636
A
797 if (count < PROCESSOR_CONTROL_CMD_COUNT)
798 return(KERN_FAILURE);
1c79356b 799
91447636
A
800 if ( tcpu_type != cmd->cmd_cpu_type ||
801 tcpu_subtype != cmd->cmd_cpu_subtype)
802 return(KERN_FAILURE);
1c79356b 803
91447636
A
804 if (perfmon_acquire_facility(current_task()) != KERN_SUCCESS) {
805 return(KERN_RESOURCE_SHORTAGE); /* cpu performance facility in use by another task */
806 }
765c9de3 807
91447636
A
808 switch (cmd->cmd_op)
809 {
810 case PROCESSOR_PM_CLR_PMC: /* Clear Performance Monitor Counters */
811 switch (tcpu_subtype)
812 {
813 case CPU_SUBTYPE_POWERPC_750:
814 case CPU_SUBTYPE_POWERPC_7400:
815 case CPU_SUBTYPE_POWERPC_7450:
816 {
817 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
818 mtpmc1(0x0);
819 mtpmc2(0x0);
820 mtpmc3(0x0);
821 mtpmc4(0x0);
822 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
823 return(KERN_SUCCESS);
824 }
825 default:
826 return(KERN_FAILURE);
827 } /* tcpu_subtype */
828 case PROCESSOR_PM_SET_REGS: /* Set Performance Monitor Registors */
829 switch (tcpu_subtype)
830 {
831 case CPU_SUBTYPE_POWERPC_750:
832 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
833 PROCESSOR_PM_REGS_COUNT_POWERPC_750))
834 return(KERN_FAILURE);
835 else
836 {
837 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
838 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
839 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
840 mtpmc1(PERFMON_PMC1(perf_regs));
841 mtpmc2(PERFMON_PMC2(perf_regs));
842 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
843 mtpmc3(PERFMON_PMC3(perf_regs));
844 mtpmc4(PERFMON_PMC4(perf_regs));
845 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
846 return(KERN_SUCCESS);
847 }
848 case CPU_SUBTYPE_POWERPC_7400:
849 case CPU_SUBTYPE_POWERPC_7450:
850 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
851 PROCESSOR_PM_REGS_COUNT_POWERPC_7400))
852 return(KERN_FAILURE);
853 else
854 {
855 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
856 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
857 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
858 mtpmc1(PERFMON_PMC1(perf_regs));
859 mtpmc2(PERFMON_PMC2(perf_regs));
860 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
861 mtpmc3(PERFMON_PMC3(perf_regs));
862 mtpmc4(PERFMON_PMC4(perf_regs));
863 mtmmcr2(PERFMON_MMCR2(perf_regs) & MMCR2_SUPPORT_MASK);
864 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
865 return(KERN_SUCCESS);
866 }
867 default:
868 return(KERN_FAILURE);
869 } /* switch tcpu_subtype */
870 case PROCESSOR_PM_SET_MMCR:
871 switch (tcpu_subtype)
872 {
873 case CPU_SUBTYPE_POWERPC_750:
874 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
875 PROCESSOR_PM_REGS_COUNT_POWERPC_750))
876 return(KERN_FAILURE);
877 else
878 {
879 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
880 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
881 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
882 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
883 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
884 return(KERN_SUCCESS);
885 }
886 case CPU_SUBTYPE_POWERPC_7400:
887 case CPU_SUBTYPE_POWERPC_7450:
888 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
889 PROCESSOR_PM_REGS_COUNT_POWERPC_7400))
890 return(KERN_FAILURE);
891 else
892 {
893 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
894 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
895 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
896 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
897 mtmmcr2(PERFMON_MMCR2(perf_regs) & MMCR2_SUPPORT_MASK);
898 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
899 return(KERN_SUCCESS);
900 }
901 default:
902 return(KERN_FAILURE);
903 } /* tcpu_subtype */
904 default:
905 return(KERN_FAILURE);
906 } /* switch cmd_op */
907}
765c9de3 908
1c79356b 909
91447636
A
910/*
911 * Routine: cpu_info_count
912 * Function:
913 */
914kern_return_t
915cpu_info_count(
916 processor_flavor_t flavor,
917 unsigned int *count)
918{
919 cpu_subtype_t tcpu_subtype;
55e303ae 920
91447636
A
921 /*
922 * For now, we just assume that all CPUs are of the same type
923 */
924 tcpu_subtype = PerProcTable[master_cpu].ppe_vaddr->cpu_subtype;
925 switch (flavor) {
926 case PROCESSOR_PM_REGS_INFO:
927 switch (tcpu_subtype) {
928 case CPU_SUBTYPE_POWERPC_750:
929
930 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_750;
931 return(KERN_SUCCESS);
932
933 case CPU_SUBTYPE_POWERPC_7400:
934 case CPU_SUBTYPE_POWERPC_7450:
935
936 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_7400;
937 return(KERN_SUCCESS);
938
939 default:
940 *count = 0;
941 return(KERN_INVALID_ARGUMENT);
942 } /* switch tcpu_subtype */
943
944 case PROCESSOR_TEMPERATURE:
945 *count = PROCESSOR_TEMPERATURE_COUNT;
946 return (KERN_SUCCESS);
947
948 default:
949 *count = 0;
950 return(KERN_INVALID_ARGUMENT);
951
1c79356b 952 }
91447636 953}
1c79356b 954
91447636
A
955
956/*
957 * Routine: cpu_info
958 * Function:
959 */
960kern_return_t
961cpu_info(
962 processor_flavor_t flavor,
963 int slot_num,
964 processor_info_t info,
965 unsigned int *count)
966{
967 cpu_subtype_t tcpu_subtype;
968 processor_pm_regs_t perf_regs;
969 boolean_t oldlevel;
970
971 tcpu_subtype = PerProcTable[slot_num].ppe_vaddr->cpu_subtype;
972
973 switch (flavor) {
974 case PROCESSOR_PM_REGS_INFO:
975
976 perf_regs = (processor_pm_regs_t) info;
977
978 switch (tcpu_subtype) {
979 case CPU_SUBTYPE_POWERPC_750:
980
981 if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_750)
982 return(KERN_FAILURE);
983
984 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
985 PERFMON_MMCR0(perf_regs) = mfmmcr0();
986 PERFMON_PMC1(perf_regs) = mfpmc1();
987 PERFMON_PMC2(perf_regs) = mfpmc2();
988 PERFMON_MMCR1(perf_regs) = mfmmcr1();
989 PERFMON_PMC3(perf_regs) = mfpmc3();
990 PERFMON_PMC4(perf_regs) = mfpmc4();
991 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
992
993 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_750;
994 return(KERN_SUCCESS);
995
996 case CPU_SUBTYPE_POWERPC_7400:
997 case CPU_SUBTYPE_POWERPC_7450:
998
999 if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_7400)
1000 return(KERN_FAILURE);
1001
1002 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
1003 PERFMON_MMCR0(perf_regs) = mfmmcr0();
1004 PERFMON_PMC1(perf_regs) = mfpmc1();
1005 PERFMON_PMC2(perf_regs) = mfpmc2();
1006 PERFMON_MMCR1(perf_regs) = mfmmcr1();
1007 PERFMON_PMC3(perf_regs) = mfpmc3();
1008 PERFMON_PMC4(perf_regs) = mfpmc4();
1009 PERFMON_MMCR2(perf_regs) = mfmmcr2();
1010 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
1011
1012 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_7400;
1013 return(KERN_SUCCESS);
1014
1015 default:
1016 return(KERN_FAILURE);
1017 } /* switch tcpu_subtype */
1018
1019 case PROCESSOR_TEMPERATURE: /* Get the temperature of a processor */
1020
1021 *info = -1; /* Get the temperature */
1022 return(KERN_FAILURE);
1023
1024 default:
1025 return(KERN_INVALID_ARGUMENT);
1026
1027 } /* flavor */
1c79356b
A
1028}
1029
91447636
A
1030
1031/*
1032 * Routine: cpu_to_processor
1033 * Function:
1034 */
1035processor_t
1036cpu_to_processor(
1037 int cpu)
1c79356b 1038{
91447636
A
1039 return ((processor_t)PerProcTable[cpu].ppe_vaddr->processor);
1040}
1c79356b 1041
1c79356b 1042
91447636
A
1043/*
1044 * Routine: slot_type
1045 * Function:
1046 */
1047cpu_type_t
1048slot_type(
1049 int slot_num)
1050{
1051 return (PerProcTable[slot_num].ppe_vaddr->cpu_type);
1052}
1c79356b 1053
1c79356b 1054
91447636
A
1055/*
1056 * Routine: slot_subtype
1057 * Function:
1058 */
1059cpu_subtype_t
1060slot_subtype(
1061 int slot_num)
1062{
1063 return (PerProcTable[slot_num].ppe_vaddr->cpu_subtype);
1064}
1c79356b 1065
1c79356b 1066
91447636
A
1067/*
1068 * Routine: slot_threadtype
1069 * Function:
1070 */
1071cpu_threadtype_t
1072slot_threadtype(
1073 int slot_num)
1074{
1075 return (PerProcTable[slot_num].ppe_vaddr->cpu_threadtype);
1076}
1c79356b 1077
1c79356b 1078
91447636
A
1079/*
1080 * Routine: cpu_type
1081 * Function:
1082 */
1083cpu_type_t
1084cpu_type(void)
1085{
1086 return (getPerProc()->cpu_type);
1087}
1c79356b 1088
1c79356b 1089
91447636
A
1090/*
1091 * Routine: cpu_subtype
1092 * Function:
1093 */
1094cpu_subtype_t
1095cpu_subtype(void)
1096{
1097 return (getPerProc()->cpu_subtype);
1098}
1099
1100
1101/*
1102 * Routine: cpu_threadtype
1103 * Function:
1104 */
1105cpu_threadtype_t
1106cpu_threadtype(void)
1107{
1108 return (getPerProc()->cpu_threadtype);
1c79356b 1109}
a3d08fcd
A
1110
1111/*
1112 * Call a function on all running processors
1113 *
1114 * Note that the synch paramter is used to wait until all functions are complete.
1115 * It is not passed to the other processor and must be known by the called function.
1116 * The called function must do a thread_wakeup on the synch if it decrements the
1117 * synch count to 0.
1118 */
1119
1120
1121int32_t cpu_broadcast(uint32_t *synch, broadcastFunc func, uint32_t parm) {
1122
1123 int sigproc, cpu, ocpu;
1124
1125 cpu = cpu_number(); /* Who are we? */
1126 sigproc = 0; /* Clear called processor count */
1127
1128 if(real_ncpus > 1) { /* Are we just a uni? */
1129
1130 assert_wait((event_t)synch, THREAD_UNINT); /* If more than one processor, we may have to wait */
1131
1132 for(ocpu = 0; ocpu < real_ncpus; ocpu++) { /* Tell everyone to call */
1133 if(ocpu == cpu) continue; /* If we talk to ourselves, people will wonder... */
1134 hw_atomic_add(synch, 1); /* Tentatively bump synchronizer */
1135 sigproc++; /* Tentatively bump signal sent count */
1136 if(KERN_SUCCESS != cpu_signal(ocpu, SIGPcall, (uint32_t)func, parm)) { /* Call the function on the other processor */
1137 hw_atomic_sub(synch, 1); /* Other guy isn't really there, ignore it */
1138 sigproc--; /* and don't count it */
1139 }
1140 }
1141
1142 if(!sigproc) clear_wait(current_thread(), THREAD_AWAKENED); /* Clear wait if we never signalled */
1143 else thread_block(THREAD_CONTINUE_NULL); /* Wait for everyone to get into step... */
1144 }
1145
1146 return sigproc; /* Return the number of guys actually signalled */
1147
1148}