2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
25 * cpu specific routines
28 #include <kern/machine.h>
29 #include <kern/misc_protos.h>
30 #include <kern/thread.h>
31 #include <kern/processor.h>
32 #include <mach/machine.h>
33 #include <mach/processor_info.h>
34 #include <mach/mach_types.h>
35 #include <ppc/proc_reg.h>
36 #include <ppc/misc_protos.h>
37 #include <ppc/machine_routines.h>
38 #include <ppc/machine_cpu.h>
39 #include <ppc/exception.h>
40 #include <pexpert/pexpert.h>
41 //#include <pexpert/ppc/powermac.h>
43 /* TODO: BOGUS TO BE REMOVED */
47 resethandler_t resethandler_target
;
49 #define MMCR0_SUPPORT_MASK 0xf83f1fff
50 #define MMCR1_SUPPORT_MASK 0xffc00000
51 #define MMCR2_SUPPORT_MASK 0x80000000
53 extern int debugger_pending
[NCPUS
];
54 extern int debugger_is_slave
[NCPUS
];
55 extern int debugger_holdoff
[NCPUS
];
56 extern int debugger_sync
;
65 extern struct SIGtimebase syncClkSpot
;
67 void cpu_sync_timebase(void);
72 processor_info_t info
,
76 cpu_subtype_t cpu_subtype
;
77 processor_pm_regs_t perf_regs
;
78 processor_control_cmd_t cmd
;
81 cpu_type
= machine_slot
[slot_num
].cpu_type
;
82 cpu_subtype
= machine_slot
[slot_num
].cpu_subtype
;
83 cmd
= (processor_control_cmd_t
) info
;
85 if (count
< PROCESSOR_CONTROL_CMD_COUNT
)
88 if ( cpu_type
!= cmd
->cmd_cpu_type
||
89 cpu_subtype
!= cmd
->cmd_cpu_subtype
)
94 case PROCESSOR_PM_CLR_PMC
: /* Clear Performance Monitor Counters */
97 case CPU_SUBTYPE_POWERPC_604
:
99 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
102 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
103 return(KERN_SUCCESS
);
105 case CPU_SUBTYPE_POWERPC_604e
:
106 case CPU_SUBTYPE_POWERPC_750
:
107 case CPU_SUBTYPE_POWERPC_7400
:
108 case CPU_SUBTYPE_POWERPC_7450
:
110 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
115 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
116 return(KERN_SUCCESS
);
119 return(KERN_FAILURE
);
121 case PROCESSOR_PM_SET_REGS
: /* Set Performance Monitor Registors */
124 case CPU_SUBTYPE_POWERPC_604
:
125 if (count
< (PROCESSOR_CONTROL_CMD_COUNT
126 + PROCESSOR_PM_REGS_COUNT_POWERPC_604
))
127 return(KERN_FAILURE
);
130 perf_regs
= (processor_pm_regs_t
)cmd
->cmd_pm_regs
;
131 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
132 mtmmcr0(PERFMON_MMCR0(perf_regs
) & MMCR0_SUPPORT_MASK
);
133 mtpmc1(PERFMON_PMC1(perf_regs
));
134 mtpmc2(PERFMON_PMC2(perf_regs
));
135 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
136 return(KERN_SUCCESS
);
138 case CPU_SUBTYPE_POWERPC_604e
:
139 case CPU_SUBTYPE_POWERPC_750
:
140 if (count
< (PROCESSOR_CONTROL_CMD_COUNT
+
141 PROCESSOR_PM_REGS_COUNT_POWERPC_750
))
142 return(KERN_FAILURE
);
145 perf_regs
= (processor_pm_regs_t
)cmd
->cmd_pm_regs
;
146 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
147 mtmmcr0(PERFMON_MMCR0(perf_regs
) & MMCR0_SUPPORT_MASK
);
148 mtpmc1(PERFMON_PMC1(perf_regs
));
149 mtpmc2(PERFMON_PMC2(perf_regs
));
150 mtmmcr1(PERFMON_MMCR1(perf_regs
) & MMCR1_SUPPORT_MASK
);
151 mtpmc3(PERFMON_PMC3(perf_regs
));
152 mtpmc4(PERFMON_PMC4(perf_regs
));
153 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
154 return(KERN_SUCCESS
);
156 case CPU_SUBTYPE_POWERPC_7400
:
157 case CPU_SUBTYPE_POWERPC_7450
:
158 if (count
< (PROCESSOR_CONTROL_CMD_COUNT
+
159 PROCESSOR_PM_REGS_COUNT_POWERPC_7400
))
160 return(KERN_FAILURE
);
163 perf_regs
= (processor_pm_regs_t
)cmd
->cmd_pm_regs
;
164 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
165 mtmmcr0(PERFMON_MMCR0(perf_regs
) & MMCR0_SUPPORT_MASK
);
166 mtpmc1(PERFMON_PMC1(perf_regs
));
167 mtpmc2(PERFMON_PMC2(perf_regs
));
168 mtmmcr1(PERFMON_MMCR1(perf_regs
) & MMCR1_SUPPORT_MASK
);
169 mtpmc3(PERFMON_PMC3(perf_regs
));
170 mtpmc4(PERFMON_PMC4(perf_regs
));
171 mtmmcr2(PERFMON_MMCR2(perf_regs
) & MMCR2_SUPPORT_MASK
);
172 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
173 return(KERN_SUCCESS
);
176 return(KERN_FAILURE
);
177 } /* switch cpu_subtype */
178 case PROCESSOR_PM_SET_MMCR
:
181 case CPU_SUBTYPE_POWERPC_604
:
182 if (count
< (PROCESSOR_CONTROL_CMD_COUNT
+
183 PROCESSOR_PM_REGS_COUNT_POWERPC_604
))
184 return(KERN_FAILURE
);
187 perf_regs
= (processor_pm_regs_t
)cmd
->cmd_pm_regs
;
188 mtmmcr0(PERFMON_MMCR0(perf_regs
) & MMCR0_SUPPORT_MASK
);
189 return(KERN_SUCCESS
);
191 case CPU_SUBTYPE_POWERPC_604e
:
192 case CPU_SUBTYPE_POWERPC_750
:
193 if (count
< (PROCESSOR_CONTROL_CMD_COUNT
+
194 PROCESSOR_PM_REGS_COUNT_POWERPC_750
))
195 return(KERN_FAILURE
);
198 perf_regs
= (processor_pm_regs_t
)cmd
->cmd_pm_regs
;
199 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
200 mtmmcr0(PERFMON_MMCR0(perf_regs
) & MMCR0_SUPPORT_MASK
);
201 mtmmcr1(PERFMON_MMCR1(perf_regs
) & MMCR1_SUPPORT_MASK
);
202 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
203 return(KERN_SUCCESS
);
205 case CPU_SUBTYPE_POWERPC_7400
:
206 case CPU_SUBTYPE_POWERPC_7450
:
207 if (count
< (PROCESSOR_CONTROL_CMD_COUNT
+
208 PROCESSOR_PM_REGS_COUNT_POWERPC_7400
))
209 return(KERN_FAILURE
);
212 perf_regs
= (processor_pm_regs_t
)cmd
->cmd_pm_regs
;
213 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
214 mtmmcr0(PERFMON_MMCR0(perf_regs
) & MMCR0_SUPPORT_MASK
);
215 mtmmcr1(PERFMON_MMCR1(perf_regs
) & MMCR1_SUPPORT_MASK
);
216 mtmmcr2(PERFMON_MMCR2(perf_regs
) & MMCR2_SUPPORT_MASK
);
217 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
218 return(KERN_SUCCESS
);
221 return(KERN_FAILURE
);
224 return(KERN_FAILURE
);
225 } /* switch cmd_op */
230 processor_flavor_t flavor
,
233 cpu_subtype_t cpu_subtype
;
236 * For now, we just assume that all CPUs are of the same type
238 cpu_subtype
= machine_slot
[0].cpu_subtype
;
240 case PROCESSOR_PM_REGS_INFO
:
241 switch (cpu_subtype
) {
242 case CPU_SUBTYPE_POWERPC_604
:
243 *count
= PROCESSOR_PM_REGS_COUNT_POWERPC_604
;
244 return(KERN_SUCCESS
);
246 case CPU_SUBTYPE_POWERPC_604e
:
247 case CPU_SUBTYPE_POWERPC_750
:
249 *count
= PROCESSOR_PM_REGS_COUNT_POWERPC_750
;
250 return(KERN_SUCCESS
);
252 case CPU_SUBTYPE_POWERPC_7400
:
253 case CPU_SUBTYPE_POWERPC_7450
:
255 *count
= PROCESSOR_PM_REGS_COUNT_POWERPC_7400
;
256 return(KERN_SUCCESS
);
260 return(KERN_INVALID_ARGUMENT
);
261 } /* switch cpu_subtype */
263 case PROCESSOR_TEMPERATURE
:
264 *count
= PROCESSOR_TEMPERATURE_COUNT
;
265 return (KERN_SUCCESS
);
269 return(KERN_INVALID_ARGUMENT
);
276 processor_flavor_t flavor
,
278 processor_info_t info
,
281 cpu_subtype_t cpu_subtype
;
282 processor_pm_regs_t perf_regs
;
284 unsigned int temp
[2];
286 cpu_subtype
= machine_slot
[slot_num
].cpu_subtype
;
289 case PROCESSOR_PM_REGS_INFO
:
291 perf_regs
= (processor_pm_regs_t
) info
;
293 switch (cpu_subtype
) {
294 case CPU_SUBTYPE_POWERPC_604
:
296 if (*count
< PROCESSOR_PM_REGS_COUNT_POWERPC_604
)
297 return(KERN_FAILURE
);
299 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
300 PERFMON_MMCR0(perf_regs
) = mfmmcr0();
301 PERFMON_PMC1(perf_regs
) = mfpmc1();
302 PERFMON_PMC2(perf_regs
) = mfpmc2();
303 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
305 *count
= PROCESSOR_PM_REGS_COUNT_POWERPC_604
;
306 return(KERN_SUCCESS
);
308 case CPU_SUBTYPE_POWERPC_604e
:
309 case CPU_SUBTYPE_POWERPC_750
:
311 if (*count
< PROCESSOR_PM_REGS_COUNT_POWERPC_750
)
312 return(KERN_FAILURE
);
314 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
315 PERFMON_MMCR0(perf_regs
) = mfmmcr0();
316 PERFMON_PMC1(perf_regs
) = mfpmc1();
317 PERFMON_PMC2(perf_regs
) = mfpmc2();
318 PERFMON_MMCR1(perf_regs
) = mfmmcr1();
319 PERFMON_PMC3(perf_regs
) = mfpmc3();
320 PERFMON_PMC4(perf_regs
) = mfpmc4();
321 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
323 *count
= PROCESSOR_PM_REGS_COUNT_POWERPC_750
;
324 return(KERN_SUCCESS
);
326 case CPU_SUBTYPE_POWERPC_7400
:
327 case CPU_SUBTYPE_POWERPC_7450
:
329 if (*count
< PROCESSOR_PM_REGS_COUNT_POWERPC_7400
)
330 return(KERN_FAILURE
);
332 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
333 PERFMON_MMCR0(perf_regs
) = mfmmcr0();
334 PERFMON_PMC1(perf_regs
) = mfpmc1();
335 PERFMON_PMC2(perf_regs
) = mfpmc2();
336 PERFMON_MMCR1(perf_regs
) = mfmmcr1();
337 PERFMON_PMC3(perf_regs
) = mfpmc3();
338 PERFMON_PMC4(perf_regs
) = mfpmc4();
339 PERFMON_MMCR2(perf_regs
) = mfmmcr2();
340 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
342 *count
= PROCESSOR_PM_REGS_COUNT_POWERPC_7400
;
343 return(KERN_SUCCESS
);
346 return(KERN_FAILURE
);
347 } /* switch cpu_subtype */
349 case PROCESSOR_TEMPERATURE
: /* Get the temperature of a processor */
351 disable_preemption(); /* Don't move me now */
353 if(slot_num
== cpu_number()) { /* Is this for the local CPU? */
354 *info
= ml_read_temp(); /* Get the temperature */
356 else { /* For another CPU */
357 temp
[0] = -1; /* Set sync flag */
360 temp
[1] = -1; /* Set invalid temperature */
361 (void)cpu_signal(slot_num
, SIGPcpureq
, CPRQtemp
,(unsigned int)&temp
); /* Ask him to take his temperature */
362 (void)hw_cpu_sync(temp
, LockTimeOut
); /* Wait for the other processor to get its temperature */
363 *info
= temp
[1]; /* Pass it back */
366 enable_preemption(); /* Ok to move now */
367 return(KERN_SUCCESS
);
370 return(KERN_INVALID_ARGUMENT
);
383 machine_slot
[cpu
].running
= TRUE
;
384 machine_slot
[cpu
].cpu_type
= CPU_TYPE_POWERPC
;
385 machine_slot
[cpu
].cpu_subtype
= (cpu_subtype_t
)per_proc_info
[cpu
].pf
.rptdProc
;
393 struct per_proc_info
*proc_info
;
396 /* TODO: realese mutex lock reset_handler_lock */
399 proc_info
= &per_proc_info
[cpu
];
400 PE_cpu_machine_init(proc_info
->cpu_id
, !(proc_info
->cpu_flags
& BootDone
));
401 if (cpu
!= master_cpu
)
404 proc_info
->cpu_flags
|= BootDone
;
416 * - Run cpu_register() in exclusion mode
420 for(cpu
=0; cpu
< wncpu
; cpu
++) {
421 if(!machine_slot
[cpu
].is_cpu
) {
422 machine_slot
[cpu
].is_cpu
= TRUE
;
427 if (*target_cpu
!= -1) {
438 struct per_proc_info
*proc_info
;
441 extern void (*exception_handlers
[])(void);
442 extern vm_offset_t intstack
;
443 extern vm_offset_t debstack
;
445 proc_info
= &per_proc_info
[cpu
];
447 if (cpu
== cpu_number()) {
448 PE_cpu_machine_init(proc_info
->cpu_id
, !(proc_info
->cpu_flags
& BootDone
));
450 proc_info
->cpu_flags
|= BootDone
;
454 extern void _start_cpu(void);
456 proc_info
->cpu_number
= cpu
;
457 proc_info
->cpu_flags
&= BootDone
;
458 proc_info
->istackptr
= (vm_offset_t
)&intstack
+ (INTSTACK_SIZE
*(cpu
+1)) - sizeof (struct ppc_saved_state
);
459 proc_info
->intstack_top_ss
= proc_info
->istackptr
;
460 #if MACH_KDP || MACH_KDB
461 proc_info
->debstackptr
= (vm_offset_t
)&debstack
+ (KERNEL_STACK_SIZE
*(cpu
+1)) - sizeof (struct ppc_saved_state
);
462 proc_info
->debstack_top_ss
= proc_info
->debstackptr
;
463 #endif /* MACH_KDP || MACH_KDB */
464 proc_info
->get_interrupts_enabled
= fake_get_interrupts_enabled
;
465 proc_info
->set_interrupts_enabled
= fake_set_interrupts_enabled
;
466 proc_info
->active_kloaded
= (unsigned int)&active_kloaded
[cpu
];
467 proc_info
->cpu_data
= (unsigned int)&cpu_data
[cpu
];
468 proc_info
->active_stacks
= (unsigned int)&active_stacks
[cpu
];
469 proc_info
->need_ast
= (unsigned int)&need_ast
[cpu
];
470 proc_info
->FPU_thread
= 0;
471 proc_info
->FPU_vmmCtx
= 0;
472 proc_info
->VMX_thread
= 0;
473 proc_info
->VMX_vmmCtx
= 0;
475 if (proc_info
->start_paddr
== EXCEPTION_VECTOR(T_RESET
)) {
477 /* TODO: get mutex lock reset_handler_lock */
479 resethandler_target
.type
= RESET_HANDLER_START
;
480 resethandler_target
.call_paddr
= kvtophys((vm_offset_t
)_start_cpu
);
481 resethandler_target
.arg__paddr
= kvtophys((vm_offset_t
)proc_info
);
483 ml_phys_write((vm_offset_t
)&ResetHandler
+ 0,
484 resethandler_target
.type
);
485 ml_phys_write((vm_offset_t
)&ResetHandler
+ 4,
486 resethandler_target
.call_paddr
);
487 ml_phys_write((vm_offset_t
)&ResetHandler
+ 8,
488 resethandler_target
.arg__paddr
);
492 * Note: we pass the current time to the other processor here. He will load it
493 * as early as possible so that there is a chance that it is close to accurate.
494 * After the machine is up a while, we will officially resync the clocks so
495 * that all processors are the same. This is just to get close.
498 ml_get_timebase(&proc_info
->ruptStamp
); /* Pass our current time to the other guy */
500 __asm__
volatile("sync"); /* Commit to storage */
501 __asm__
volatile("isync"); /* Wait a second */
502 ret
= PE_cpu_start(proc_info
->cpu_id
,
503 proc_info
->start_paddr
, (vm_offset_t
)proc_info
);
505 if (ret
!= KERN_SUCCESS
&&
506 proc_info
->start_paddr
== EXCEPTION_VECTOR(T_RESET
)) {
508 /* TODO: realese mutex lock reset_handler_lock */
515 * Here is where we implement the receiver of the signaling protocol.
516 * We wait for the signal status area to be passed to us. Then we snarf
517 * up the status, the sender, and the 3 potential parms. Next we release
518 * the lock and signal the other guy.
526 unsigned int holdStat
, holdParm0
, holdParm1
, holdParm2
, mtype
;
527 unsigned int *parmAddr
;
528 struct per_proc_info
*pproc
; /* Area for my per_proc address */
530 struct SIGtimebase
*timebaseAddr
;
531 natural_t tbu
, tbu2
, tbl
;
533 cpu
= cpu_number(); /* Get the CPU number */
534 pproc
= &per_proc_info
[cpu
]; /* Point to our block */
537 * Since we've been signaled, wait just under 1ms for the signal lock to pass
539 if(!hw_lock_mbits(&pproc
->MPsigpStat
, MPsigpMsgp
, (MPsigpBusy
| MPsigpPass
),
540 (MPsigpBusy
| MPsigpPass
), (gPEClockFrequencyInfo
.bus_clock_rate_hz
>> 7))) {
541 panic("cpu_signal_handler: Lock pass timed out\n");
544 holdStat
= pproc
->MPsigpStat
; /* Snarf stat word */
545 holdParm0
= pproc
->MPsigpParm0
; /* Snarf parameter */
546 holdParm1
= pproc
->MPsigpParm1
; /* Snarf parameter */
547 holdParm2
= pproc
->MPsigpParm2
; /* Snarf parameter */
549 __asm__
volatile("isync"); /* Make sure we don't unlock until memory is in */
551 pproc
->MPsigpStat
= holdStat
& ~(MPsigpMsgp
| MPsigpFunc
); /* Release lock */
553 switch ((holdStat
& MPsigpFunc
) >> 8) { /* Decode function code */
555 case MPsigpIdle
: /* Was function cancelled? */
558 case MPsigpSigp
: /* Signal Processor message? */
560 switch (holdParm0
) { /* Decode SIGP message order */
562 case SIGPast
: /* Should we do an AST? */
563 pproc
->numSIGPast
++; /* Count this one */
565 kprintf("cpu_signal_handler: AST check on cpu %x\n", cpu_number());
567 ast_check(); /* Yes, do it */
568 /* XXX: Should check if AST_URGENT is needed */
570 return; /* All done... */
572 case SIGPcpureq
: /* CPU specific function? */
574 pproc
->numSIGPcpureq
++; /* Count this one */
575 switch (holdParm1
) { /* Select specific function */
577 case CPRQtemp
: /* Get the temperature */
578 parmAddr
= (unsigned int *)holdParm2
; /* Get the destination address */
579 parmAddr
[1] = ml_read_temp(); /* Get the core temperature */
580 eieio(); /* Force order */
581 sync(); /* Force to memory */
582 parmAddr
[0] = 0; /* Show we're done */
587 timebaseAddr
= (struct SIGtimebase
*)holdParm2
;
589 if(pproc
->time_base_enable
!= (void(*)(cpu_id_t
, boolean_t
))NULL
)
590 pproc
->time_base_enable(pproc
->cpu_id
, FALSE
);
592 timebaseAddr
->abstime
.hi
= 0; /* Touch to force into cache */
596 asm volatile(" mftbu %0" : "=r" (tbu
));
597 asm volatile(" mftb %0" : "=r" (tbl
));
598 asm volatile(" mftbu %0" : "=r" (tbu2
));
599 } while (tbu
!= tbu2
);
601 timebaseAddr
->abstime
.lo
= tbl
; /* Set low order */
602 timebaseAddr
->abstime
.hi
= tbu
; /* Set high order */
603 sync(); /* Force order */
605 timebaseAddr
->avail
= TRUE
;
607 while (*(volatile int *)&(syncClkSpot
.ready
) == FALSE
);
609 if(pproc
->time_base_enable
!= (void(*)(cpu_id_t
, boolean_t
))NULL
)
610 pproc
->time_base_enable(pproc
->cpu_id
, TRUE
);
612 timebaseAddr
->done
= TRUE
;
617 panic("cpu_signal_handler: unknown CPU request - %08X\n", holdParm1
);
622 case SIGPdebug
: /* Enter the debugger? */
624 pproc
->numSIGPdebug
++; /* Count this one */
625 debugger_is_slave
[cpu
]++; /* Bump up the count to show we're here */
626 hw_atomic_sub(&debugger_sync
, 1); /* Show we've received the 'rupt */
627 __asm__
volatile("tw 4,r3,r3"); /* Enter the debugger */
628 return; /* All done now... */
630 case SIGPwake
: /* Wake up CPU */
631 pproc
->numSIGPwake
++; /* Count this one */
632 return; /* No need to do anything, the interrupt does it all... */
635 panic("cpu_signal_handler: unknown SIGP message order - %08X\n", holdParm0
);
641 panic("cpu_signal_handler: unknown SIGP function - %08X\n", (holdStat
& MPsigpFunc
) >> 8);
645 panic("cpu_signal_handler: we should never get here\n");
649 * Here is where we send a message to another processor. So far we only have two:
650 * SIGPast and SIGPdebug. SIGPast is used to preempt and kick off threads (this is
651 * currently disabled). SIGPdebug is used to enter the debugger.
653 * We set up the SIGP function to indicate that this is a simple message and set the
654 * order code (MPsigpParm0) to SIGPast or SIGPdebug). After finding the per_processor
655 * block for the target, we lock the message block. Then we set the parameter(s).
656 * Next we change the lock (also called "busy") to "passing" and finally signal
657 * the other processor. Note that we only wait about 1ms to get the message lock.
658 * If we time out, we return failure to our caller. It is their responsibility to
670 unsigned int holdStat
, holdParm0
, holdParm1
, holdParm2
, mtype
;
671 struct per_proc_info
*tpproc
, *mpproc
; /* Area for per_proc addresses */
675 if(target
> NCPUS
) panic("cpu_signal: invalid target CPU - %08X\n", target
);
678 cpu
= cpu_number(); /* Get our CPU number */
679 if(target
== cpu
) return KERN_FAILURE
; /* Don't play with ourselves */
680 if(!machine_slot
[target
].running
) return KERN_FAILURE
; /* These guys are too young */
682 mpproc
= &per_proc_info
[cpu
]; /* Point to our block */
683 tpproc
= &per_proc_info
[target
]; /* Point to the target's block */
685 if(!hw_lock_mbits(&tpproc
->MPsigpStat
, MPsigpMsgp
, 0, MPsigpBusy
,
686 (gPEClockFrequencyInfo
.bus_clock_rate_hz
>> 7))) { /* Try to lock the message block */
687 return KERN_FAILURE
; /* Timed out, take your ball and go home... */
690 holdStat
= MPsigpBusy
| MPsigpPass
| (MPsigpSigp
<< 8) | cpu
; /* Set up the signal status word */
691 tpproc
->MPsigpParm0
= signal
; /* Set message order */
692 tpproc
->MPsigpParm1
= p1
; /* Set additional parm */
693 tpproc
->MPsigpParm2
= p2
; /* Set additional parm */
695 __asm__
volatile("sync"); /* Make sure it's all there */
697 tpproc
->MPsigpStat
= holdStat
; /* Set status and pass the lock */
698 __asm__
volatile("eieio"); /* I'm a paraniod freak */
700 PE_cpu_signal(mpproc
->cpu_id
, tpproc
->cpu_id
); /* Kick the other processor */
702 return KERN_SUCCESS
; /* All is goodness and rainbows... */
709 processor_doshutdown(current_processor());
716 struct per_proc_info
*proc_info
;
718 extern void (*exception_handlers
[])(void);
719 extern vm_offset_t intstack
;
720 extern vm_offset_t debstack
;
721 extern void _restart_cpu(void);
725 kprintf("******* About to sleep cpu %d\n", cpu
);
728 proc_info
= &per_proc_info
[cpu
];
730 if (proc_info
->cpu_number
== 0) {
731 proc_info
->cpu_flags
&= BootDone
;
732 proc_info
->istackptr
= (vm_offset_t
)&intstack
+ (INTSTACK_SIZE
*(cpu
+1)) - sizeof (struct ppc_saved_state
);
733 proc_info
->intstack_top_ss
= proc_info
->istackptr
;
734 #if MACH_KDP || MACH_KDB
735 proc_info
->debstackptr
= (vm_offset_t
)&debstack
+ (KERNEL_STACK_SIZE
*(cpu
+1)) - sizeof (struct ppc_saved_state
);
736 proc_info
->debstack_top_ss
= proc_info
->debstackptr
;
737 #endif /* MACH_KDP || MACH_KDB */
738 proc_info
->get_interrupts_enabled
= fake_get_interrupts_enabled
;
739 proc_info
->set_interrupts_enabled
= fake_set_interrupts_enabled
;
740 proc_info
->FPU_thread
= 0;
742 if (proc_info
->start_paddr
== EXCEPTION_VECTOR(T_RESET
)) {
743 extern void _start_cpu(void);
745 resethandler_target
.type
= RESET_HANDLER_START
;
746 resethandler_target
.call_paddr
= kvtophys((vm_offset_t
)_start_cpu
);
747 resethandler_target
.arg__paddr
= kvtophys((vm_offset_t
)proc_info
);
749 ml_phys_write((vm_offset_t
)&ResetHandler
+ 0,
750 resethandler_target
.type
);
751 ml_phys_write((vm_offset_t
)&ResetHandler
+ 4,
752 resethandler_target
.call_paddr
);
753 ml_phys_write((vm_offset_t
)&ResetHandler
+ 8,
754 resethandler_target
.arg__paddr
);
756 __asm__
volatile("sync");
757 __asm__
volatile("isync");
761 PE_cpu_machine_quiesce(proc_info
->cpu_id
);
771 intr
= ml_set_interrupts_enabled(FALSE
); /* No interruptions in here */
773 /* Note that syncClkSpot is in a cache aligned area */
774 syncClkSpot
.avail
= FALSE
;
775 syncClkSpot
.ready
= FALSE
;
776 syncClkSpot
.done
= FALSE
;
778 while (cpu_signal(master_cpu
, SIGPcpureq
, CPRQtimebase
, (unsigned int)&syncClkSpot
)
782 while (*(volatile int *)&(syncClkSpot
.avail
) == FALSE
);
786 * We do the following to keep the compiler from generating extra stuff
789 tbu
= syncClkSpot
.abstime
.hi
;
790 tbl
= syncClkSpot
.abstime
.lo
;
796 syncClkSpot
.ready
= TRUE
;
798 while (*(volatile int *)&(syncClkSpot
.done
) == FALSE
);
800 (void)ml_set_interrupts_enabled(intr
);