]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/cpu.c
xnu-517.7.21.tar.gz
[apple/xnu.git] / osfmk / ppc / cpu.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * File: ppc/cpu.c
24 *
25 * cpu specific routines
26 */
27
28 #include <kern/machine.h>
29 #include <kern/misc_protos.h>
30 #include <kern/thread.h>
31 #include <kern/processor.h>
32 #include <mach/machine.h>
33 #include <mach/processor_info.h>
34 #include <mach/mach_types.h>
35 #include <ppc/proc_reg.h>
36 #include <ppc/misc_protos.h>
37 #include <ppc/machine_routines.h>
38 #include <ppc/machine_cpu.h>
39 #include <ppc/exception.h>
40 #include <ppc/asm.h>
41 #include <ppc/hw_perfmon.h>
42 #include <pexpert/pexpert.h>
43 #include <kern/cpu_data.h>
44 #include <ppc/mappings.h>
45 #include <ppc/Diagnostics.h>
46 #include <ppc/trap.h>
47
48 /* TODO: BOGUS TO BE REMOVED */
49 int real_ncpus = 1;
50
51 int wncpu = NCPUS;
52 resethandler_t resethandler_target;
53
54 #define MMCR0_SUPPORT_MASK 0xf83f1fff
55 #define MMCR1_SUPPORT_MASK 0xffc00000
56 #define MMCR2_SUPPORT_MASK 0x80000000
57
58 extern int debugger_pending[NCPUS];
59 extern int debugger_is_slave[NCPUS];
60 extern int debugger_holdoff[NCPUS];
61 extern int debugger_sync;
62
63 struct SIGtimebase {
64 boolean_t avail;
65 boolean_t ready;
66 boolean_t done;
67 uint64_t abstime;
68 };
69
70 struct per_proc_info *pper_proc_info = per_proc_info;
71
72 extern struct SIGtimebase syncClkSpot;
73
74 void cpu_sync_timebase(void);
75
76 kern_return_t
77 cpu_control(
78 int slot_num,
79 processor_info_t info,
80 unsigned int count)
81 {
82 cpu_type_t cpu_type;
83 cpu_subtype_t cpu_subtype;
84 processor_pm_regs_t perf_regs;
85 processor_control_cmd_t cmd;
86 boolean_t oldlevel;
87
88 cpu_type = machine_slot[slot_num].cpu_type;
89 cpu_subtype = machine_slot[slot_num].cpu_subtype;
90 cmd = (processor_control_cmd_t) info;
91
92 if (count < PROCESSOR_CONTROL_CMD_COUNT)
93 return(KERN_FAILURE);
94
95 if ( cpu_type != cmd->cmd_cpu_type ||
96 cpu_subtype != cmd->cmd_cpu_subtype)
97 return(KERN_FAILURE);
98
99 if (perfmon_acquire_facility(current_task()) != KERN_SUCCESS) {
100 return(KERN_RESOURCE_SHORTAGE); /* cpu performance facility in use by another task */
101 }
102
103 switch (cmd->cmd_op)
104 {
105 case PROCESSOR_PM_CLR_PMC: /* Clear Performance Monitor Counters */
106 switch (cpu_subtype)
107 {
108 case CPU_SUBTYPE_POWERPC_750:
109 case CPU_SUBTYPE_POWERPC_7400:
110 case CPU_SUBTYPE_POWERPC_7450:
111 {
112 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
113 mtpmc1(0x0);
114 mtpmc2(0x0);
115 mtpmc3(0x0);
116 mtpmc4(0x0);
117 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
118 return(KERN_SUCCESS);
119 }
120 default:
121 return(KERN_FAILURE);
122 } /* cpu_subtype */
123 case PROCESSOR_PM_SET_REGS: /* Set Performance Monitor Registors */
124 switch (cpu_subtype)
125 {
126 case CPU_SUBTYPE_POWERPC_750:
127 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
128 PROCESSOR_PM_REGS_COUNT_POWERPC_750))
129 return(KERN_FAILURE);
130 else
131 {
132 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
133 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
134 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
135 mtpmc1(PERFMON_PMC1(perf_regs));
136 mtpmc2(PERFMON_PMC2(perf_regs));
137 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
138 mtpmc3(PERFMON_PMC3(perf_regs));
139 mtpmc4(PERFMON_PMC4(perf_regs));
140 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
141 return(KERN_SUCCESS);
142 }
143 case CPU_SUBTYPE_POWERPC_7400:
144 case CPU_SUBTYPE_POWERPC_7450:
145 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
146 PROCESSOR_PM_REGS_COUNT_POWERPC_7400))
147 return(KERN_FAILURE);
148 else
149 {
150 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
151 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
152 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
153 mtpmc1(PERFMON_PMC1(perf_regs));
154 mtpmc2(PERFMON_PMC2(perf_regs));
155 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
156 mtpmc3(PERFMON_PMC3(perf_regs));
157 mtpmc4(PERFMON_PMC4(perf_regs));
158 mtmmcr2(PERFMON_MMCR2(perf_regs) & MMCR2_SUPPORT_MASK);
159 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
160 return(KERN_SUCCESS);
161 }
162 default:
163 return(KERN_FAILURE);
164 } /* switch cpu_subtype */
165 case PROCESSOR_PM_SET_MMCR:
166 switch (cpu_subtype)
167 {
168 case CPU_SUBTYPE_POWERPC_750:
169 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
170 PROCESSOR_PM_REGS_COUNT_POWERPC_750))
171 return(KERN_FAILURE);
172 else
173 {
174 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
175 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
176 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
177 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
178 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
179 return(KERN_SUCCESS);
180 }
181 case CPU_SUBTYPE_POWERPC_7400:
182 case CPU_SUBTYPE_POWERPC_7450:
183 if (count < (PROCESSOR_CONTROL_CMD_COUNT +
184 PROCESSOR_PM_REGS_COUNT_POWERPC_7400))
185 return(KERN_FAILURE);
186 else
187 {
188 perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs;
189 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
190 mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK);
191 mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK);
192 mtmmcr2(PERFMON_MMCR2(perf_regs) & MMCR2_SUPPORT_MASK);
193 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
194 return(KERN_SUCCESS);
195 }
196 default:
197 return(KERN_FAILURE);
198 } /* cpu_subtype */
199 default:
200 return(KERN_FAILURE);
201 } /* switch cmd_op */
202 }
203
204 kern_return_t
205 cpu_info_count(
206 processor_flavor_t flavor,
207 unsigned int *count)
208 {
209 cpu_subtype_t cpu_subtype;
210
211 /*
212 * For now, we just assume that all CPUs are of the same type
213 */
214 cpu_subtype = machine_slot[0].cpu_subtype;
215 switch (flavor) {
216 case PROCESSOR_PM_REGS_INFO:
217 switch (cpu_subtype) {
218 case CPU_SUBTYPE_POWERPC_750:
219
220 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_750;
221 return(KERN_SUCCESS);
222
223 case CPU_SUBTYPE_POWERPC_7400:
224 case CPU_SUBTYPE_POWERPC_7450:
225
226 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_7400;
227 return(KERN_SUCCESS);
228
229 default:
230 *count = 0;
231 return(KERN_INVALID_ARGUMENT);
232 } /* switch cpu_subtype */
233
234 case PROCESSOR_TEMPERATURE:
235 *count = PROCESSOR_TEMPERATURE_COUNT;
236 return (KERN_SUCCESS);
237
238 default:
239 *count = 0;
240 return(KERN_INVALID_ARGUMENT);
241
242 }
243 }
244
245 kern_return_t
246 cpu_info(
247 processor_flavor_t flavor,
248 int slot_num,
249 processor_info_t info,
250 unsigned int *count)
251 {
252 cpu_subtype_t cpu_subtype;
253 processor_pm_regs_t perf_regs;
254 boolean_t oldlevel;
255 unsigned int temp[2];
256
257 cpu_subtype = machine_slot[slot_num].cpu_subtype;
258
259 switch (flavor) {
260 case PROCESSOR_PM_REGS_INFO:
261
262 perf_regs = (processor_pm_regs_t) info;
263
264 switch (cpu_subtype) {
265 case CPU_SUBTYPE_POWERPC_750:
266
267 if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_750)
268 return(KERN_FAILURE);
269
270 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
271 PERFMON_MMCR0(perf_regs) = mfmmcr0();
272 PERFMON_PMC1(perf_regs) = mfpmc1();
273 PERFMON_PMC2(perf_regs) = mfpmc2();
274 PERFMON_MMCR1(perf_regs) = mfmmcr1();
275 PERFMON_PMC3(perf_regs) = mfpmc3();
276 PERFMON_PMC4(perf_regs) = mfpmc4();
277 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
278
279 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_750;
280 return(KERN_SUCCESS);
281
282 case CPU_SUBTYPE_POWERPC_7400:
283 case CPU_SUBTYPE_POWERPC_7450:
284
285 if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_7400)
286 return(KERN_FAILURE);
287
288 oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */
289 PERFMON_MMCR0(perf_regs) = mfmmcr0();
290 PERFMON_PMC1(perf_regs) = mfpmc1();
291 PERFMON_PMC2(perf_regs) = mfpmc2();
292 PERFMON_MMCR1(perf_regs) = mfmmcr1();
293 PERFMON_PMC3(perf_regs) = mfpmc3();
294 PERFMON_PMC4(perf_regs) = mfpmc4();
295 PERFMON_MMCR2(perf_regs) = mfmmcr2();
296 ml_set_interrupts_enabled(oldlevel); /* enable interrupts */
297
298 *count = PROCESSOR_PM_REGS_COUNT_POWERPC_7400;
299 return(KERN_SUCCESS);
300
301 default:
302 return(KERN_FAILURE);
303 } /* switch cpu_subtype */
304
305 case PROCESSOR_TEMPERATURE: /* Get the temperature of a processor */
306
307 disable_preemption(); /* Don't move me now */
308
309 if(slot_num == cpu_number()) { /* Is this for the local CPU? */
310 *info = ml_read_temp(); /* Get the temperature */
311 }
312 else { /* For another CPU */
313 temp[0] = -1; /* Set sync flag */
314 eieio();
315 sync();
316 temp[1] = -1; /* Set invalid temperature */
317 (void)cpu_signal(slot_num, SIGPcpureq, CPRQtemp ,(unsigned int)&temp); /* Ask him to take his temperature */
318 (void)hw_cpu_sync(temp, LockTimeOut); /* Wait for the other processor to get its temperature */
319 *info = temp[1]; /* Pass it back */
320 }
321
322 enable_preemption(); /* Ok to move now */
323 return(KERN_SUCCESS);
324
325 default:
326 return(KERN_INVALID_ARGUMENT);
327
328 } /* flavor */
329 }
330
331 void
332 cpu_init(
333 void)
334 {
335 int cpu;
336
337 cpu = cpu_number();
338
339 machine_slot[cpu].running = TRUE;
340 machine_slot[cpu].cpu_type = CPU_TYPE_POWERPC;
341 machine_slot[cpu].cpu_subtype = (cpu_subtype_t)per_proc_info[cpu].pf.rptdProc;
342
343 }
344
345 void
346 cpu_machine_init(
347 void)
348 {
349 struct per_proc_info *tproc_info;
350 volatile struct per_proc_info *mproc_info;
351 int cpu;
352
353 /* TODO: realese mutex lock reset_handler_lock */
354
355 cpu = cpu_number();
356 tproc_info = &per_proc_info[cpu];
357 mproc_info = &per_proc_info[master_cpu];
358 PE_cpu_machine_init(tproc_info->cpu_id, !(tproc_info->cpu_flags & BootDone));
359 if (cpu != master_cpu) {
360 while (!((mproc_info->cpu_flags) & SignalReady))
361 continue;
362 cpu_sync_timebase();
363 }
364 ml_init_interrupt();
365 tproc_info->cpu_flags |= BootDone|SignalReady;
366 }
367
368 kern_return_t
369 cpu_register(
370 int *target_cpu
371 )
372 {
373 int cpu;
374
375 /*
376 * TODO:
377 * - Run cpu_register() in exclusion mode
378 */
379
380 *target_cpu = -1;
381 for(cpu=0; cpu < wncpu; cpu++) {
382 if(!machine_slot[cpu].is_cpu) {
383 machine_slot[cpu].is_cpu = TRUE;
384 *target_cpu = cpu;
385 break;
386 }
387 }
388 if (*target_cpu != -1) {
389 real_ncpus++;
390 return KERN_SUCCESS;
391 } else
392 return KERN_FAILURE;
393 }
394
395 kern_return_t
396 cpu_start(
397 int cpu)
398 {
399 struct per_proc_info *proc_info;
400 kern_return_t ret;
401 mapping *mp;
402
403 extern vm_offset_t intstack;
404 extern vm_offset_t debstack;
405
406 proc_info = &per_proc_info[cpu];
407
408 if (cpu == cpu_number()) {
409 PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone));
410 ml_init_interrupt();
411 proc_info->cpu_flags |= BootDone|SignalReady;
412
413 return KERN_SUCCESS;
414 } else {
415 extern void _start_cpu(void);
416
417 proc_info->cpu_number = cpu;
418 proc_info->cpu_flags &= BootDone;
419 proc_info->istackptr = (vm_offset_t)&intstack + (INTSTACK_SIZE*(cpu+1)) - FM_SIZE;
420 proc_info->intstack_top_ss = proc_info->istackptr;
421 #if MACH_KDP || MACH_KDB
422 proc_info->debstackptr = (vm_offset_t)&debstack + (KERNEL_STACK_SIZE*(cpu+1)) - FM_SIZE;
423 proc_info->debstack_top_ss = proc_info->debstackptr;
424 #endif /* MACH_KDP || MACH_KDB */
425 proc_info->interrupts_enabled = 0;
426 proc_info->need_ast = (unsigned int)&need_ast[cpu];
427 proc_info->FPU_owner = 0;
428 proc_info->VMX_owner = 0;
429 mp = (mapping *)(&proc_info->ppCIOmp);
430 mp->mpFlags = 0x01000000 | mpSpecial | 1;
431 mp->mpSpace = invalSpace;
432
433 if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
434
435 /* TODO: get mutex lock reset_handler_lock */
436
437 resethandler_target.type = RESET_HANDLER_START;
438 resethandler_target.call_paddr = (vm_offset_t)_start_cpu; /* Note: these routines are always V=R */
439 resethandler_target.arg__paddr = (vm_offset_t)proc_info; /* Note: these routines are always V=R */
440
441 ml_phys_write((vm_offset_t)&ResetHandler + 0,
442 resethandler_target.type);
443 ml_phys_write((vm_offset_t)&ResetHandler + 4,
444 resethandler_target.call_paddr);
445 ml_phys_write((vm_offset_t)&ResetHandler + 8,
446 resethandler_target.arg__paddr);
447
448 }
449 /*
450 * Note: we pass the current time to the other processor here. He will load it
451 * as early as possible so that there is a chance that it is close to accurate.
452 * After the machine is up a while, we will officially resync the clocks so
453 * that all processors are the same. This is just to get close.
454 */
455
456 ml_get_timebase((unsigned long long *)&proc_info->ruptStamp); /* Pass our current time to the other guy */
457
458 __asm__ volatile("sync"); /* Commit to storage */
459 __asm__ volatile("isync"); /* Wait a second */
460 ret = PE_cpu_start(proc_info->cpu_id,
461 proc_info->start_paddr, (vm_offset_t)proc_info);
462
463 if (ret != KERN_SUCCESS &&
464 proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
465
466 /* TODO: realese mutex lock reset_handler_lock */
467 }
468 return(ret);
469 }
470 }
471
472 perfTrap perfCpuSigHook = 0; /* Pointer to CHUD cpu signal hook routine */
473
474 /*
475 * Here is where we implement the receiver of the signaling protocol.
476 * We wait for the signal status area to be passed to us. Then we snarf
477 * up the status, the sender, and the 3 potential parms. Next we release
478 * the lock and signal the other guy.
479 */
480
481 void
482 cpu_signal_handler(
483 void)
484 {
485
486 unsigned int holdStat, holdParm0, holdParm1, holdParm2, mtype;
487 unsigned int *parmAddr;
488 struct per_proc_info *pproc; /* Area for my per_proc address */
489 int cpu;
490 struct SIGtimebase *timebaseAddr;
491 natural_t tbu, tbu2, tbl;
492
493 cpu = cpu_number(); /* Get the CPU number */
494 pproc = &per_proc_info[cpu]; /* Point to our block */
495
496 /*
497 * Since we've been signaled, wait about 31 ms for the signal lock to pass
498 */
499 if(!hw_lock_mbits(&pproc->MPsigpStat, (MPsigpMsgp | MPsigpAck), (MPsigpBusy | MPsigpPass),
500 (MPsigpBusy | MPsigpPass | MPsigpAck), (gPEClockFrequencyInfo.timebase_frequency_hz >> 5))) {
501 panic("cpu_signal_handler: Lock pass timed out\n");
502 }
503
504 holdStat = pproc->MPsigpStat; /* Snarf stat word */
505 holdParm0 = pproc->MPsigpParm0; /* Snarf parameter */
506 holdParm1 = pproc->MPsigpParm1; /* Snarf parameter */
507 holdParm2 = pproc->MPsigpParm2; /* Snarf parameter */
508
509 __asm__ volatile("isync"); /* Make sure we don't unlock until memory is in */
510
511 pproc->MPsigpStat = holdStat & ~(MPsigpMsgp | MPsigpAck | MPsigpFunc); /* Release lock */
512
513 switch ((holdStat & MPsigpFunc) >> 8) { /* Decode function code */
514
515 case MPsigpIdle: /* Was function cancelled? */
516 return; /* Yup... */
517
518 case MPsigpSigp: /* Signal Processor message? */
519
520 switch (holdParm0) { /* Decode SIGP message order */
521
522 case SIGPast: /* Should we do an AST? */
523 pproc->hwCtr.numSIGPast++; /* Count this one */
524 #if 0
525 kprintf("cpu_signal_handler: AST check on cpu %x\n", cpu_number());
526 #endif
527 ast_check(cpu_to_processor(cpu));
528 return; /* All done... */
529
530 case SIGPcpureq: /* CPU specific function? */
531
532 pproc->hwCtr.numSIGPcpureq++; /* Count this one */
533 switch (holdParm1) { /* Select specific function */
534
535 case CPRQtemp: /* Get the temperature */
536 parmAddr = (unsigned int *)holdParm2; /* Get the destination address */
537 parmAddr[1] = ml_read_temp(); /* Get the core temperature */
538 eieio(); /* Force order */
539 sync(); /* Force to memory */
540 parmAddr[0] = 0; /* Show we're done */
541 return;
542
543 case CPRQtimebase:
544
545 timebaseAddr = (struct SIGtimebase *)holdParm2;
546
547 if(pproc->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
548 pproc->time_base_enable(pproc->cpu_id, FALSE);
549
550 timebaseAddr->abstime = 0; /* Touch to force into cache */
551 sync();
552
553 do {
554 asm volatile(" mftbu %0" : "=r" (tbu));
555 asm volatile(" mftb %0" : "=r" (tbl));
556 asm volatile(" mftbu %0" : "=r" (tbu2));
557 } while (tbu != tbu2);
558
559 timebaseAddr->abstime = ((uint64_t)tbu << 32) | tbl;
560 sync(); /* Force order */
561
562 timebaseAddr->avail = TRUE;
563
564 while (*(volatile int *)&(syncClkSpot.ready) == FALSE);
565
566 if(pproc->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
567 pproc->time_base_enable(pproc->cpu_id, TRUE);
568
569 timebaseAddr->done = TRUE;
570
571 return;
572
573 case CPRQsegload:
574 return;
575
576 case CPRQchud:
577 parmAddr = (unsigned int *)holdParm2; /* Get the destination address */
578 if(perfCpuSigHook) {
579 struct savearea *ssp = current_act()->mact.pcb;
580 if(ssp) {
581 (perfCpuSigHook)(parmAddr[1] /* request */, ssp, 0, 0);
582 }
583 }
584 parmAddr[1] = 0;
585 parmAddr[0] = 0; /* Show we're done */
586 return;
587
588 case CPRQscom:
589 if(((scomcomm *)holdParm2)->scomfunc) { /* Are we writing */
590 ((scomcomm *)holdParm2)->scomstat = ml_scom_write(((scomcomm *)holdParm2)->scomreg, ((scomcomm *)holdParm2)->scomdata); /* Write scom */
591 }
592 else { /* No, reading... */
593 ((scomcomm *)holdParm2)->scomstat = ml_scom_read(((scomcomm *)holdParm2)->scomreg, &((scomcomm *)holdParm2)->scomdata); /* Read scom */
594 }
595 return;
596
597 default:
598 panic("cpu_signal_handler: unknown CPU request - %08X\n", holdParm1);
599 return;
600 }
601
602
603 case SIGPdebug: /* Enter the debugger? */
604
605 pproc->hwCtr.numSIGPdebug++; /* Count this one */
606 debugger_is_slave[cpu]++; /* Bump up the count to show we're here */
607 hw_atomic_sub(&debugger_sync, 1); /* Show we've received the 'rupt */
608 __asm__ volatile("tw 4,r3,r3"); /* Enter the debugger */
609 return; /* All done now... */
610
611 case SIGPwake: /* Wake up CPU */
612 pproc->hwCtr.numSIGPwake++; /* Count this one */
613 return; /* No need to do anything, the interrupt does it all... */
614
615 default:
616 panic("cpu_signal_handler: unknown SIGP message order - %08X\n", holdParm0);
617 return;
618
619 }
620
621 default:
622 panic("cpu_signal_handler: unknown SIGP function - %08X\n", (holdStat & MPsigpFunc) >> 8);
623 return;
624
625 }
626 panic("cpu_signal_handler: we should never get here\n");
627 }
628
629 /*
630 * Here is where we send a message to another processor. So far we only have two:
631 * SIGPast and SIGPdebug. SIGPast is used to preempt and kick off threads (this is
632 * currently disabled). SIGPdebug is used to enter the debugger.
633 *
634 * We set up the SIGP function to indicate that this is a simple message and set the
635 * order code (MPsigpParm0) to SIGPast or SIGPdebug). After finding the per_processor
636 * block for the target, we lock the message block. Then we set the parameter(s).
637 * Next we change the lock (also called "busy") to "passing" and finally signal
638 * the other processor. Note that we only wait about 1ms to get the message lock.
639 * If we time out, we return failure to our caller. It is their responsibility to
640 * recover.
641 */
642
643 kern_return_t
644 cpu_signal(
645 int target,
646 int signal,
647 unsigned int p1,
648 unsigned int p2)
649 {
650
651 unsigned int holdStat, holdParm0, holdParm1, holdParm2, mtype;
652 struct per_proc_info *tpproc, *mpproc; /* Area for per_proc addresses */
653 int cpu;
654 int busybitset =0;
655
656 #if DEBUG
657 if(target > NCPUS) panic("cpu_signal: invalid target CPU - %08X\n", target);
658 #endif
659
660 cpu = cpu_number(); /* Get our CPU number */
661 if(target == cpu) return KERN_FAILURE; /* Don't play with ourselves */
662 if(!machine_slot[target].running) return KERN_FAILURE; /* These guys are too young */
663
664 mpproc = &per_proc_info[cpu]; /* Point to our block */
665 tpproc = &per_proc_info[target]; /* Point to the target's block */
666
667 if (!(tpproc->cpu_flags & SignalReady)) return KERN_FAILURE;
668
669 if((tpproc->MPsigpStat & MPsigpMsgp) == MPsigpMsgp) { /* Is there an unreceived message already pending? */
670
671 if(signal == SIGPwake) { /* SIGPwake can merge into all others... */
672 mpproc->hwCtr.numSIGPmwake++; /* Account for merged wakes */
673 return KERN_SUCCESS;
674 }
675
676 if((signal == SIGPast) && (tpproc->MPsigpParm0 == SIGPast)) { /* We can merge ASTs */
677 mpproc->hwCtr.numSIGPmast++; /* Account for merged ASTs */
678 return KERN_SUCCESS; /* Don't bother to send this one... */
679 }
680
681 if (tpproc->MPsigpParm0 == SIGPwake) {
682 if (hw_lock_mbits(&tpproc->MPsigpStat, (MPsigpMsgp | MPsigpAck),
683 (MPsigpBusy | MPsigpPass ), MPsigpBusy, 0)) {
684 busybitset = 1;
685 mpproc->hwCtr.numSIGPmwake++;
686 }
687 }
688 }
689
690 if((busybitset == 0) &&
691 (!hw_lock_mbits(&tpproc->MPsigpStat, MPsigpMsgp, 0, MPsigpBusy,
692 (gPEClockFrequencyInfo.timebase_frequency_hz >> 11)))) { /* Try to lock the message block with a .5ms timeout */
693 mpproc->hwCtr.numSIGPtimo++; /* Account for timeouts */
694 return KERN_FAILURE; /* Timed out, take your ball and go home... */
695 }
696
697 holdStat = MPsigpBusy | MPsigpPass | (MPsigpSigp << 8) | cpu; /* Set up the signal status word */
698 tpproc->MPsigpParm0 = signal; /* Set message order */
699 tpproc->MPsigpParm1 = p1; /* Set additional parm */
700 tpproc->MPsigpParm2 = p2; /* Set additional parm */
701
702 __asm__ volatile("sync"); /* Make sure it's all there */
703
704 tpproc->MPsigpStat = holdStat; /* Set status and pass the lock */
705 __asm__ volatile("eieio"); /* I'm a paraniod freak */
706
707 if (busybitset == 0)
708 PE_cpu_signal(mpproc->cpu_id, tpproc->cpu_id); /* Kick the other processor */
709
710 return KERN_SUCCESS; /* All is goodness and rainbows... */
711 }
712
713 void
714 cpu_doshutdown(
715 void)
716 {
717 enable_preemption();
718 processor_offline(current_processor());
719 }
720
721 void
722 cpu_sleep(
723 void)
724 {
725 struct per_proc_info *proc_info;
726 unsigned int cpu, i;
727 unsigned int wait_ncpus_sleep, ncpus_sleep;
728 facility_context *fowner;
729 extern vm_offset_t intstack;
730 extern vm_offset_t debstack;
731 extern void _restart_cpu(void);
732
733 cpu = cpu_number();
734
735 proc_info = &per_proc_info[cpu];
736
737 fowner = proc_info->FPU_owner; /* Cache this */
738 if(fowner) fpu_save(fowner); /* If anyone owns FPU, save it */
739 proc_info->FPU_owner = 0; /* Set no fpu owner now */
740
741 fowner = proc_info->VMX_owner; /* Cache this */
742 if(fowner) vec_save(fowner); /* If anyone owns vectors, save it */
743 proc_info->VMX_owner = 0; /* Set no vector owner now */
744
745 if (proc_info->cpu_number == 0) {
746 proc_info->cpu_flags &= BootDone;
747 proc_info->istackptr = (vm_offset_t)&intstack + (INTSTACK_SIZE*(cpu+1)) - FM_SIZE;
748 proc_info->intstack_top_ss = proc_info->istackptr;
749 #if MACH_KDP || MACH_KDB
750 proc_info->debstackptr = (vm_offset_t)&debstack + (KERNEL_STACK_SIZE*(cpu+1)) - FM_SIZE;
751 proc_info->debstack_top_ss = proc_info->debstackptr;
752 #endif /* MACH_KDP || MACH_KDB */
753 proc_info->interrupts_enabled = 0;
754
755 if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
756 extern void _start_cpu(void);
757
758 resethandler_target.type = RESET_HANDLER_START;
759 resethandler_target.call_paddr = (vm_offset_t)_start_cpu; /* Note: these routines are always V=R */
760 resethandler_target.arg__paddr = (vm_offset_t)proc_info; /* Note: these routines are always V=R */
761
762 ml_phys_write((vm_offset_t)&ResetHandler + 0,
763 resethandler_target.type);
764 ml_phys_write((vm_offset_t)&ResetHandler + 4,
765 resethandler_target.call_paddr);
766 ml_phys_write((vm_offset_t)&ResetHandler + 8,
767 resethandler_target.arg__paddr);
768
769 __asm__ volatile("sync");
770 __asm__ volatile("isync");
771 }
772
773 wait_ncpus_sleep = real_ncpus-1;
774 ncpus_sleep = 0;
775 while (wait_ncpus_sleep != ncpus_sleep) {
776 ncpus_sleep = 0;
777 for(i=1; i < real_ncpus ; i++) {
778 if ((*(volatile short *)&per_proc_info[i].cpu_flags) & SleepState)
779 ncpus_sleep++;
780 }
781 }
782 }
783
784 PE_cpu_machine_quiesce(proc_info->cpu_id);
785 }
786
787 void
788 cpu_sync_timebase(
789 void)
790 {
791 natural_t tbu, tbl;
792 boolean_t intr;
793
794 intr = ml_set_interrupts_enabled(FALSE); /* No interruptions in here */
795
796 /* Note that syncClkSpot is in a cache aligned area */
797 syncClkSpot.avail = FALSE;
798 syncClkSpot.ready = FALSE;
799 syncClkSpot.done = FALSE;
800
801 while (cpu_signal(master_cpu, SIGPcpureq, CPRQtimebase,
802 (unsigned int)&syncClkSpot) != KERN_SUCCESS)
803 continue;
804
805 while (*(volatile int *)&(syncClkSpot.avail) == FALSE)
806 continue;
807
808 isync();
809
810 /*
811 * We do the following to keep the compiler from generating extra stuff
812 * in tb set part
813 */
814 tbu = syncClkSpot.abstime >> 32;
815 tbl = (uint32_t)syncClkSpot.abstime;
816
817 mttb(0);
818 mttbu(tbu);
819 mttb(tbl);
820
821 syncClkSpot.ready = TRUE;
822
823 while (*(volatile int *)&(syncClkSpot.done) == FALSE)
824 continue;
825
826 (void)ml_set_interrupts_enabled(intr);
827 }