]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/cpu.c
360c29c6efc7f6c38a57c78ea67edf39ae2f0128
[apple/xnu.git] / osfmk / ppc / cpu.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * File: ppc/cpu.c
24 *
25 * cpu specific routines
26 */
27
28 #include <kern/machine.h>
29 #include <kern/misc_protos.h>
30 #include <kern/thread.h>
31 #include <kern/processor.h>
32 #include <mach/machine.h>
33 #include <mach/processor_info.h>
34 #include <mach/mach_types.h>
35 #include <ppc/proc_reg.h>
36 #include <ppc/misc_protos.h>
37 #include <ppc/machine_routines.h>
38 #include <ppc/machine_cpu.h>
39 #include <ppc/exception.h>
40 #include <pexpert/pexpert.h>
41 //#include <pexpert/ppc/powermac.h>
42
43 /* TODO: BOGUS TO BE REMOVED */
44 int real_ncpus = 1;
45
46 int wncpu = NCPUS;
47 resethandler_t resethandler_target;
48
49 #define MMCR0_SUPPORT_MASK 0xf83f1fff
50 #define MMCR1_SUPPORT_MASK 0xffc00000
51 #define MMCR2_SUPPORT_MASK 0x80000000
52
53 extern int debugger_pending[NCPUS];
54 extern int debugger_is_slave[NCPUS];
55 extern int debugger_holdoff[NCPUS];
56 extern int debugger_sync;
57
58 struct SIGtimebase {
59 boolean_t avail;
60 boolean_t ready;
61 boolean_t done;
62 AbsoluteTime abstime;
63 };
64
65 extern struct SIGtimebase syncClkSpot;
66
67 void cpu_sync_timebase(void);
68
69 kern_return_t
70 cpu_control(
71 int slot_num,
72 processor_info_t info,
73 unsigned int count)
74 {
75 cpu_type_t cpu_type;
76 cpu_subtype_t cpu_subtype;
77 processor_pm_regs_t perf_regs;
78 processor_control_cmd_t cmd;
79 boolean_t oldlevel;
80
81 cpu_type = machine_slot[slot_num].cpu_type;
82 cpu_subtype = machine_slot[slot_num].cpu_subtype;
83 cmd = (processor_control_cmd_t) info;
84
85 if (count < PROCESSOR_CONTROL_CMD_COUNT)
86 return(KERN_FAILURE);
87
88 if ( cpu_type != cmd->cmd_cpu_type ||
89 cpu_subtype != cmd->cmd_cpu_subtype)
90 return(KERN_FAILURE);
91
92 switch (cmd->cmd_op)
93 {
94 case PROCESSOR_PM_CLR_PMC: /* Clear Performance Monitor Counters */
95 switch (cpu_subtype)
96 {
97 case CPU_SUBTYPE_POWERPC_604:
98 {
99 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
100 mtpmc1(0x0);
101 mtpmc2(0x0);
102 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
103 return(KERN_SUCCESS);
104 }
105 case CPU_SUBTYPE_POWERPC_604e:
106 case CPU_SUBTYPE_POWERPC_750:
107 case CPU_SUBTYPE_POWERPC_7400:
108 case CPU_SUBTYPE_POWERPC_7450:
109 {
110 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
111 mtpmc1(0x0);
112 mtpmc2(0x0);
113 mtpmc3(0x0);
114 mtpmc4(0x0);
115 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
116 return(KERN_SUCCESS);
117 }
118 default:
119 return(KERN_FAILURE);
120 } /* cpu_subtype */
121 case PROCESSOR_PM_SET_REGS: /* Set Performance Monitor Registors */
122 switch (cpu_subtype)
123 {
124 case CPU_SUBTYPE_POWERPC_604:
125 if (count < (PROCESSOR_CONTROL_CMD_COUNT
126 + PROCESSOR_PM_REGS_COUNT_POWERPC_604))
127 return(KERN_FAILURE);
128 else
129 {
130 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
131 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
132 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
133 mtpmc1(PERFMON_PMC1(perf_regs));
134 mtpmc2(PERFMON_PMC2(perf_regs));
135 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
136 return(KERN_SUCCESS);
137 }
138 case CPU_SUBTYPE_POWERPC_604e:
139 case CPU_SUBTYPE_POWERPC_750:
140 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
141 PROCESSOR_PM_REGS_COUNT_POWERPC_750))
142 return(KERN_FAILURE);
143 else
144 {
145 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
146 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
147 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
148 mtpmc1(PERFMON_PMC1(perf_regs));
149 mtpmc2(PERFMON_PMC2(perf_regs));
150 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
151 mtpmc3(PERFMON_PMC3(perf_regs));
152 mtpmc4(PERFMON_PMC4(perf_regs));
153 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
154 return(KERN_SUCCESS);
155 }
156 case CPU_SUBTYPE_POWERPC_7400:
157 case CPU_SUBTYPE_POWERPC_7450:
158 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
159 PROCESSOR_PM_REGS_COUNT_POWERPC_7400))
160 return(KERN_FAILURE);
161 else
162 {
163 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
164 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
165 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
166 mtpmc1(PERFMON_PMC1(perf_regs));
167 mtpmc2(PERFMON_PMC2(perf_regs));
168 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
169 mtpmc3(PERFMON_PMC3(perf_regs));
170 mtpmc4(PERFMON_PMC4(perf_regs));
171 mtmmcr2(PERFMON_MMCR2(perf_regs) & MMCR2_SUPPORT_MASK);
172 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
173 return(KERN_SUCCESS);
174 }
175 default:
176 return(KERN_FAILURE);
177 } /* switch cpu_subtype */
178 case PROCESSOR_PM_SET_MMCR:
179 switch (cpu_subtype)
180 {
181 case CPU_SUBTYPE_POWERPC_604:
182 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
183 PROCESSOR_PM_REGS_COUNT_POWERPC_604))
184 return(KERN_FAILURE);
185 else
186 {
187 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
188 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
189 return(KERN_SUCCESS);
190 }
191 case CPU_SUBTYPE_POWERPC_604e:
192 case CPU_SUBTYPE_POWERPC_750:
193 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
194 PROCESSOR_PM_REGS_COUNT_POWERPC_750))
195 return(KERN_FAILURE);
196 else
197 {
198 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
199 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
200 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
201 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
202 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
203 return(KERN_SUCCESS);
204 }
205 case CPU_SUBTYPE_POWERPC_7400:
206 case CPU_SUBTYPE_POWERPC_7450:
207 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
208 PROCESSOR_PM_REGS_COUNT_POWERPC_7400))
209 return(KERN_FAILURE);
210 else
211 {
212 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
213 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
214 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
215 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
216 mtmmcr2(PERFMON_MMCR2(perf_regs) & MMCR2_SUPPORT_MASK);
217 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
218 return(KERN_SUCCESS);
219 }
220 default:
221 return(KERN_FAILURE);
222 } /* cpu_subtype */
223 default:
224 return(KERN_FAILURE);
225 } /* switch cmd_op */
226 }
227
228 kern_return_t
229 cpu_info_count(
230 processor_flavor_t flavor,
231 unsigned int *count)
232 {
233 cpu_subtype_t cpu_subtype;
234
235 /*
236 * For now, we just assume that all CPUs are of the same type
237 */
238 cpu_subtype = machine_slot[0].cpu_subtype;
239 switch (flavor) {
240 case PROCESSOR_PM_REGS_INFO:
241 switch (cpu_subtype) {
242 case CPU_SUBTYPE_POWERPC_604:
243 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_604;
244 return(KERN_SUCCESS);
245
246 case CPU_SUBTYPE_POWERPC_604e:
247 case CPU_SUBTYPE_POWERPC_750:
248
249 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_750;
250 return(KERN_SUCCESS);
251
252 case CPU_SUBTYPE_POWERPC_7400:
253 case CPU_SUBTYPE_POWERPC_7450:
254
255 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_7400;
256 return(KERN_SUCCESS);
257
258 default:
259 *count = 0;
260 return(KERN_INVALID_ARGUMENT);
261 } /* switch cpu_subtype */
262
263 case PROCESSOR_TEMPERATURE:
264 *count = PROCESSOR_TEMPERATURE_COUNT;
265 return (KERN_SUCCESS);
266
267 default:
268 *count = 0;
269 return(KERN_INVALID_ARGUMENT);
270
271 }
272 }
273
274 kern_return_t
275 cpu_info(
276 processor_flavor_t flavor,
277 int slot_num,
278 processor_info_t info,
279 unsigned int *count)
280 {
281 cpu_subtype_t cpu_subtype;
282 processor_pm_regs_t perf_regs;
283 boolean_t oldlevel;
284 unsigned int temp[2];
285
286 cpu_subtype = machine_slot[slot_num].cpu_subtype;
287
288 switch (flavor) {
289 case PROCESSOR_PM_REGS_INFO:
290
291 perf_regs = (processor_pm_regs_t) info;
292
293 switch (cpu_subtype) {
294 case CPU_SUBTYPE_POWERPC_604:
295
296 if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_604)
297 return(KERN_FAILURE);
298
299 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
300 PERFMON_MMCR0(perf_regs) = mfmmcr0();
301 PERFMON_PMC1(perf_regs) = mfpmc1();
302 PERFMON_PMC2(perf_regs) = mfpmc2();
303 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
304
305 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_604;
306 return(KERN_SUCCESS);
307
308 case CPU_SUBTYPE_POWERPC_604e:
309 case CPU_SUBTYPE_POWERPC_750:
310
311 if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_750)
312 return(KERN_FAILURE);
313
314 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
315 PERFMON_MMCR0(perf_regs) = mfmmcr0();
316 PERFMON_PMC1(perf_regs) = mfpmc1();
317 PERFMON_PMC2(perf_regs) = mfpmc2();
318 PERFMON_MMCR1(perf_regs) = mfmmcr1();
319 PERFMON_PMC3(perf_regs) = mfpmc3();
320 PERFMON_PMC4(perf_regs) = mfpmc4();
321 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
322
323 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_750;
324 return(KERN_SUCCESS);
325
326 case CPU_SUBTYPE_POWERPC_7400:
327 case CPU_SUBTYPE_POWERPC_7450:
328
329 if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_7400)
330 return(KERN_FAILURE);
331
332 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
333 PERFMON_MMCR0(perf_regs) = mfmmcr0();
334 PERFMON_PMC1(perf_regs) = mfpmc1();
335 PERFMON_PMC2(perf_regs) = mfpmc2();
336 PERFMON_MMCR1(perf_regs) = mfmmcr1();
337 PERFMON_PMC3(perf_regs) = mfpmc3();
338 PERFMON_PMC4(perf_regs) = mfpmc4();
339 PERFMON_MMCR2(perf_regs) = mfmmcr2();
340 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
341
342 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_7400;
343 return(KERN_SUCCESS);
344
345 default:
346 return(KERN_FAILURE);
347 } /* switch cpu_subtype */
348
349 case PROCESSOR_TEMPERATURE: /* Get the temperature of a processor */
350
351 disable_preemption(); /* Don't move me now */
352
353 if(slot_num == cpu_number()) { /* Is this for the local CPU? */
354 *info = ml_read_temp(); /* Get the temperature */
355 }
356 else { /* For another CPU */
357 temp[0] = -1; /* Set sync flag */
358 eieio();
359 sync();
360 temp[1] = -1; /* Set invalid temperature */
361 (void)cpu_signal(slot_num, SIGPcpureq, CPRQtemp ,(unsigned int)&temp); /* Ask him to take his temperature */
362 (void)hw_cpu_sync(temp, LockTimeOut); /* Wait for the other processor to get its temperature */
363 *info = temp[1]; /* Pass it back */
364 }
365
366 enable_preemption(); /* Ok to move now */
367 return(KERN_SUCCESS);
368
369 default:
370 return(KERN_INVALID_ARGUMENT);
371
372 } /* flavor */
373 }
374
375 void
376 cpu_init(
377 void)
378 {
379 int cpu;
380
381 cpu = cpu_number();
382
383 machine_slot[cpu].running = TRUE;
384 machine_slot[cpu].cpu_type = CPU_TYPE_POWERPC;
385 machine_slot[cpu].cpu_subtype = (cpu_subtype_t)per_proc_info[cpu].pf.rptdProc;
386
387 }
388
389 void
390 cpu_machine_init(
391 void)
392 {
393 struct per_proc_info *proc_info;
394 int cpu;
395
396 /* TODO: realese mutex lock reset_handler_lock */
397
398 cpu = cpu_number();
399 proc_info = &per_proc_info[cpu];
400 PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone));
401 if (cpu != master_cpu)
402 cpu_sync_timebase();
403 ml_init_interrupt();
404 proc_info->cpu_flags |= BootDone;
405 }
406
407 kern_return_t
408 cpu_register(
409 int *target_cpu
410 )
411 {
412 int cpu;
413
414 /*
415 * TODO:
416 * - Run cpu_register() in exclusion mode
417 */
418
419 *target_cpu = -1;
420 for(cpu=0; cpu < wncpu; cpu++) {
421 if(!machine_slot[cpu].is_cpu) {
422 machine_slot[cpu].is_cpu = TRUE;
423 *target_cpu = cpu;
424 break;
425 }
426 }
427 if (*target_cpu != -1) {
428 real_ncpus++;
429 return KERN_SUCCESS;
430 } else
431 return KERN_FAILURE;
432 }
433
434 kern_return_t
435 cpu_start(
436 int cpu)
437 {
438 struct per_proc_info *proc_info;
439 kern_return_t ret;
440
441 extern void (*exception_handlers[])(void);
442 extern vm_offset_t intstack;
443 extern vm_offset_t debstack;
444
445 proc_info = &per_proc_info[cpu];
446
447 if (cpu == cpu_number()) {
448 PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone));
449 ml_init_interrupt();
450 proc_info->cpu_flags |= BootDone;
451
452 return KERN_SUCCESS;
453 } else {
454 extern void _start_cpu(void);
455
456 proc_info->cpu_number = cpu;
457 proc_info->cpu_flags &= BootDone;
458 proc_info->istackptr = (vm_offset_t)&intstack + (INTSTACK_SIZE*(cpu+1)) - sizeof (struct ppc_saved_state);
459 proc_info->intstack_top_ss = proc_info->istackptr;
460 #if MACH_KDP || MACH_KDB
461 proc_info->debstackptr = (vm_offset_t)&debstack + (KERNEL_STACK_SIZE*(cpu+1)) - sizeof (struct ppc_saved_state);
462 proc_info->debstack_top_ss = proc_info->debstackptr;
463 #endif /* MACH_KDP || MACH_KDB */
464 proc_info->get_interrupts_enabled = fake_get_interrupts_enabled;
465 proc_info->set_interrupts_enabled = fake_set_interrupts_enabled;
466 proc_info->active_kloaded = (unsigned int)&active_kloaded[cpu];
467 proc_info->cpu_data = (unsigned int)&cpu_data[cpu];
468 proc_info->active_stacks = (unsigned int)&active_stacks[cpu];
469 proc_info->need_ast = (unsigned int)&need_ast[cpu];
470 proc_info->FPU_thread = 0;
471 proc_info->FPU_vmmCtx = 0;
472 proc_info->VMX_thread = 0;
473 proc_info->VMX_vmmCtx = 0;
474
475 if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
476
477 /* TODO: get mutex lock reset_handler_lock */
478
479 resethandler_target.type = RESET_HANDLER_START;
480 resethandler_target.call_paddr = kvtophys((vm_offset_t)_start_cpu);
481 resethandler_target.arg__paddr = kvtophys((vm_offset_t)proc_info);
482
483 ml_phys_write((vm_offset_t)&ResetHandler + 0,
484 resethandler_target.type);
485 ml_phys_write((vm_offset_t)&ResetHandler + 4,
486 resethandler_target.call_paddr);
487 ml_phys_write((vm_offset_t)&ResetHandler + 8,
488 resethandler_target.arg__paddr);
489
490 }
491 /*
492 * Note: we pass the current time to the other processor here. He will load it
493 * as early as possible so that there is a chance that it is close to accurate.
494 * After the machine is up a while, we will officially resync the clocks so
495 * that all processors are the same. This is just to get close.
496 */
497
498 ml_get_timebase(&proc_info->ruptStamp); /* Pass our current time to the other guy */
499
500 __asm__ volatile("sync"); /* Commit to storage */
501 __asm__ volatile("isync"); /* Wait a second */
502 ret = PE_cpu_start(proc_info->cpu_id,
503 proc_info->start_paddr, (vm_offset_t)proc_info);
504
505 if (ret != KERN_SUCCESS &&
506 proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
507
508 /* TODO: realese mutex lock reset_handler_lock */
509 }
510 return(ret);
511 }
512 }
513
514 /*
515 * Here is where we implement the receiver of the signaling protocol.
516 * We wait for the signal status area to be passed to us. Then we snarf
517 * up the status, the sender, and the 3 potential parms. Next we release
518 * the lock and signal the other guy.
519 */
520
521 void
522 cpu_signal_handler(
523 void)
524 {
525
526 unsigned int holdStat, holdParm0, holdParm1, holdParm2, mtype;
527 unsigned int *parmAddr;
528 struct per_proc_info *pproc; /* Area for my per_proc address */
529 int cpu;
530 struct SIGtimebase *timebaseAddr;
531 natural_t tbu, tbu2, tbl;
532
533 cpu = cpu_number(); /* Get the CPU number */
534 pproc = &per_proc_info[cpu]; /* Point to our block */
535
536 /*
537 * Since we've been signaled, wait just under 1ms for the signal lock to pass
538 */
539 if(!hw_lock_mbits(&pproc->MPsigpStat, MPsigpMsgp, (MPsigpBusy | MPsigpPass),
540 (MPsigpBusy | MPsigpPass), (gPEClockFrequencyInfo.bus_clock_rate_hz >> 7))) {
541 panic("cpu_signal_handler: Lock pass timed out\n");
542 }
543
544 holdStat = pproc->MPsigpStat; /* Snarf stat word */
545 holdParm0 = pproc->MPsigpParm0; /* Snarf parameter */
546 holdParm1 = pproc->MPsigpParm1; /* Snarf parameter */
547 holdParm2 = pproc->MPsigpParm2; /* Snarf parameter */
548
549 __asm__ volatile("isync"); /* Make sure we don't unlock until memory is in */
550
551 pproc->MPsigpStat = holdStat & ~(MPsigpMsgp | MPsigpFunc); /* Release lock */
552
553 switch ((holdStat & MPsigpFunc) >> 8) { /* Decode function code */
554
555 case MPsigpIdle: /* Was function cancelled? */
556 return; /* Yup... */
557
558 case MPsigpSigp: /* Signal Processor message? */
559
560 switch (holdParm0) { /* Decode SIGP message order */
561
562 case SIGPast: /* Should we do an AST? */
563 pproc->numSIGPast++; /* Count this one */
564 #if 0
565 kprintf("cpu_signal_handler: AST check on cpu %x\n", cpu_number());
566 #endif
567 ast_check(); /* Yes, do it */
568 /* XXX: Should check if AST_URGENT is needed */
569 ast_on(AST_URGENT);
570 return; /* All done... */
571
572 case SIGPcpureq: /* CPU specific function? */
573
574 pproc->numSIGPcpureq++; /* Count this one */
575 switch (holdParm1) { /* Select specific function */
576
577 case CPRQtemp: /* Get the temperature */
578 parmAddr = (unsigned int *)holdParm2; /* Get the destination address */
579 parmAddr[1] = ml_read_temp(); /* Get the core temperature */
580 eieio(); /* Force order */
581 sync(); /* Force to memory */
582 parmAddr[0] = 0; /* Show we're done */
583 return;
584
585 case CPRQtimebase:
586
587 timebaseAddr = (struct SIGtimebase *)holdParm2;
588
589 if(pproc->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
590 pproc->time_base_enable(pproc->cpu_id, FALSE);
591
592 timebaseAddr->abstime.hi = 0; /* Touch to force into cache */
593 sync();
594
595 do {
596 asm volatile(" mftbu %0" : "=r" (tbu));
597 asm volatile(" mftb %0" : "=r" (tbl));
598 asm volatile(" mftbu %0" : "=r" (tbu2));
599 } while (tbu != tbu2);
600
601 timebaseAddr->abstime.lo = tbl; /* Set low order */
602 timebaseAddr->abstime.hi = tbu; /* Set high order */
603 sync(); /* Force order */
604
605 timebaseAddr->avail = TRUE;
606
607 while (*(volatile int *)&(syncClkSpot.ready) == FALSE);
608
609 if(pproc->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
610 pproc->time_base_enable(pproc->cpu_id, TRUE);
611
612 timebaseAddr->done = TRUE;
613
614 return;
615
616 default:
617 panic("cpu_signal_handler: unknown CPU request - %08X\n", holdParm1);
618 return;
619 }
620
621
622 case SIGPdebug: /* Enter the debugger? */
623
624 pproc->numSIGPdebug++; /* Count this one */
625 debugger_is_slave[cpu]++; /* Bump up the count to show we're here */
626 hw_atomic_sub(&debugger_sync, 1); /* Show we've received the 'rupt */
627 __asm__ volatile("tw 4,r3,r3"); /* Enter the debugger */
628 return; /* All done now... */
629
630 case SIGPwake: /* Wake up CPU */
631 pproc->numSIGPwake++; /* Count this one */
632 return; /* No need to do anything, the interrupt does it all... */
633
634 default:
635 panic("cpu_signal_handler: unknown SIGP message order - %08X\n", holdParm0);
636 return;
637
638 }
639
640 default:
641 panic("cpu_signal_handler: unknown SIGP function - %08X\n", (holdStat & MPsigpFunc) >> 8);
642 return;
643
644 }
645 panic("cpu_signal_handler: we should never get here\n");
646 }
647
648 /*
649 * Here is where we send a message to another processor. So far we only have two:
650 * SIGPast and SIGPdebug. SIGPast is used to preempt and kick off threads (this is
651 * currently disabled). SIGPdebug is used to enter the debugger.
652 *
653 * We set up the SIGP function to indicate that this is a simple message and set the
654 * order code (MPsigpParm0) to SIGPast or SIGPdebug). After finding the per_processor
655 * block for the target, we lock the message block. Then we set the parameter(s).
656 * Next we change the lock (also called "busy") to "passing" and finally signal
657 * the other processor. Note that we only wait about 1ms to get the message lock.
658 * If we time out, we return failure to our caller. It is their responsibility to
659 * recover.
660 */
661
662 kern_return_t
663 cpu_signal(
664 int target,
665 int signal,
666 unsigned int p1,
667 unsigned int p2)
668 {
669
670 unsigned int holdStat, holdParm0, holdParm1, holdParm2, mtype;
671 struct per_proc_info *tpproc, *mpproc; /* Area for per_proc addresses */
672 int cpu;
673
674 #if DEBUG
675 if(target > NCPUS) panic("cpu_signal: invalid target CPU - %08X\n", target);
676 #endif
677
678 cpu = cpu_number(); /* Get our CPU number */
679 if(target == cpu) return KERN_FAILURE; /* Don't play with ourselves */
680 if(!machine_slot[target].running) return KERN_FAILURE; /* These guys are too young */
681
682 mpproc = &per_proc_info[cpu]; /* Point to our block */
683 tpproc = &per_proc_info[target]; /* Point to the target's block */
684
685 if(!hw_lock_mbits(&tpproc->MPsigpStat, MPsigpMsgp, 0, MPsigpBusy,
686 (gPEClockFrequencyInfo.bus_clock_rate_hz >> 7))) { /* Try to lock the message block */
687 return KERN_FAILURE; /* Timed out, take your ball and go home... */
688 }
689
690 holdStat = MPsigpBusy | MPsigpPass | (MPsigpSigp << 8) | cpu; /* Set up the signal status word */
691 tpproc->MPsigpParm0 = signal; /* Set message order */
692 tpproc->MPsigpParm1 = p1; /* Set additional parm */
693 tpproc->MPsigpParm2 = p2; /* Set additional parm */
694
695 __asm__ volatile("sync"); /* Make sure it's all there */
696
697 tpproc->MPsigpStat = holdStat; /* Set status and pass the lock */
698 __asm__ volatile("eieio"); /* I'm a paraniod freak */
699
700 PE_cpu_signal(mpproc->cpu_id, tpproc->cpu_id); /* Kick the other processor */
701
702 return KERN_SUCCESS; /* All is goodness and rainbows... */
703 }
704
705 void
706 cpu_doshutdown(
707 void)
708 {
709 processor_doshutdown(current_processor());
710 }
711
712 void
713 cpu_sleep(
714 void)
715 {
716 struct per_proc_info *proc_info;
717 unsigned int cpu;
718 extern void (*exception_handlers[])(void);
719 extern vm_offset_t intstack;
720 extern vm_offset_t debstack;
721 extern void _restart_cpu(void);
722
723 cpu = cpu_number();
724 #if 0
725 kprintf("******* About to sleep cpu %d\n", cpu);
726 #endif
727
728 proc_info = &per_proc_info[cpu];
729
730 if (proc_info->cpu_number == 0) {
731 proc_info->cpu_flags &= BootDone;
732 proc_info->istackptr = (vm_offset_t)&intstack + (INTSTACK_SIZE*(cpu+1)) - sizeof (struct ppc_saved_state);
733 proc_info->intstack_top_ss = proc_info->istackptr;
734 #if MACH_KDP || MACH_KDB
735 proc_info->debstackptr = (vm_offset_t)&debstack + (KERNEL_STACK_SIZE*(cpu+1)) - sizeof (struct ppc_saved_state);
736 proc_info->debstack_top_ss = proc_info->debstackptr;
737 #endif /* MACH_KDP || MACH_KDB */
738 proc_info->get_interrupts_enabled = fake_get_interrupts_enabled;
739 proc_info->set_interrupts_enabled = fake_set_interrupts_enabled;
740 proc_info->FPU_thread = 0;
741
742 if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
743 extern void _start_cpu(void);
744
745 resethandler_target.type = RESET_HANDLER_START;
746 resethandler_target.call_paddr = kvtophys((vm_offset_t)_start_cpu);
747 resethandler_target.arg__paddr = kvtophys((vm_offset_t)proc_info);
748
749 ml_phys_write((vm_offset_t)&ResetHandler + 0,
750 resethandler_target.type);
751 ml_phys_write((vm_offset_t)&ResetHandler + 4,
752 resethandler_target.call_paddr);
753 ml_phys_write((vm_offset_t)&ResetHandler + 8,
754 resethandler_target.arg__paddr);
755
756 __asm__ volatile("sync");
757 __asm__ volatile("isync");
758 }
759 }
760
761 PE_cpu_machine_quiesce(proc_info->cpu_id);
762 }
763
764 void
765 cpu_sync_timebase(
766 void)
767 {
768 natural_t tbu, tbl;
769 boolean_t intr;
770
771 intr = ml_set_interrupts_enabled(FALSE); /* No interruptions in here */
772
773 /* Note that syncClkSpot is in a cache aligned area */
774 syncClkSpot.avail = FALSE;
775 syncClkSpot.ready = FALSE;
776 syncClkSpot.done = FALSE;
777
778 while (cpu_signal(master_cpu, SIGPcpureq, CPRQtimebase, (unsigned int)&syncClkSpot)
779 != KERN_SUCCESS);
780
781
782 while (*(volatile int *)&(syncClkSpot.avail) == FALSE);
783 isync();
784
785 /*
786 * We do the following to keep the compiler from generating extra stuff
787 * in tb set part
788 */
789 tbu = syncClkSpot.abstime.hi;
790 tbl = syncClkSpot.abstime.lo;
791
792 mttb(0);
793 mttbu(tbu);
794 mttb(tbl);
795
796 syncClkSpot.ready = TRUE;
797
798 while (*(volatile int *)&(syncClkSpot.done) == FALSE);
799
800 (void)ml_set_interrupts_enabled(intr);
801 }