2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
25 * cpu specific routines
28 #include <kern/machine.h>
29 #include <kern/misc_protos.h>
30 #include <kern/thread.h>
31 #include <kern/processor.h>
32 #include <mach/machine.h>
33 #include <mach/processor_info.h>
34 #include <mach/mach_types.h>
35 #include <ppc/proc_reg.h>
36 #include <ppc/misc_protos.h>
37 #include <ppc/machine_routines.h>
38 #include <ppc/machine_cpu.h>
39 #include <ppc/exception.h>
41 #include <pexpert/pexpert.h>
42 #include <kern/cpu_data.h>
44 /* TODO: BOGUS TO BE REMOVED */
48 resethandler_t resethandler_target
;
50 #define MMCR0_SUPPORT_MASK 0xf83f1fff
51 #define MMCR1_SUPPORT_MASK 0xffc00000
52 #define MMCR2_SUPPORT_MASK 0x80000000
54 extern int debugger_pending
[NCPUS
];
55 extern int debugger_is_slave
[NCPUS
];
56 extern int debugger_holdoff
[NCPUS
];
57 extern int debugger_sync
;
66 struct per_proc_info
*pper_proc_info
= per_proc_info
;
68 extern struct SIGtimebase syncClkSpot
;
70 void cpu_sync_timebase(void);
75 processor_info_t info
,
79 cpu_subtype_t cpu_subtype
;
80 processor_pm_regs_t perf_regs
;
81 processor_control_cmd_t cmd
;
84 cpu_type
= machine_slot
[slot_num
].cpu_type
;
85 cpu_subtype
= machine_slot
[slot_num
].cpu_subtype
;
86 cmd
= (processor_control_cmd_t
) info
;
88 if (count
< PROCESSOR_CONTROL_CMD_COUNT
)
91 if ( cpu_type
!= cmd
->cmd_cpu_type
||
92 cpu_subtype
!= cmd
->cmd_cpu_subtype
)
97 case PROCESSOR_PM_CLR_PMC
: /* Clear Performance Monitor Counters */
100 case CPU_SUBTYPE_POWERPC_604
:
102 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
105 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
106 return(KERN_SUCCESS
);
108 case CPU_SUBTYPE_POWERPC_604e
:
109 case CPU_SUBTYPE_POWERPC_750
:
110 case CPU_SUBTYPE_POWERPC_7400
:
111 case CPU_SUBTYPE_POWERPC_7450
:
113 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
118 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
119 return(KERN_SUCCESS
);
122 return(KERN_FAILURE
);
124 case PROCESSOR_PM_SET_REGS
: /* Set Performance Monitor Registors */
127 case CPU_SUBTYPE_POWERPC_604
:
128 if (count
< (PROCESSOR_CONTROL_CMD_COUNT
129 + PROCESSOR_PM_REGS_COUNT_POWERPC_604
))
130 return(KERN_FAILURE
);
133 perf_regs
= (processor_pm_regs_t
)cmd
->cmd_pm_regs
;
134 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
135 mtmmcr0(PERFMON_MMCR0(perf_regs
) & MMCR0_SUPPORT_MASK
);
136 mtpmc1(PERFMON_PMC1(perf_regs
));
137 mtpmc2(PERFMON_PMC2(perf_regs
));
138 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
139 return(KERN_SUCCESS
);
141 case CPU_SUBTYPE_POWERPC_604e
:
142 case CPU_SUBTYPE_POWERPC_750
:
143 if (count
< (PROCESSOR_CONTROL_CMD_COUNT
+
144 PROCESSOR_PM_REGS_COUNT_POWERPC_750
))
145 return(KERN_FAILURE
);
148 perf_regs
= (processor_pm_regs_t
)cmd
->cmd_pm_regs
;
149 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
150 mtmmcr0(PERFMON_MMCR0(perf_regs
) & MMCR0_SUPPORT_MASK
);
151 mtpmc1(PERFMON_PMC1(perf_regs
));
152 mtpmc2(PERFMON_PMC2(perf_regs
));
153 mtmmcr1(PERFMON_MMCR1(perf_regs
) & MMCR1_SUPPORT_MASK
);
154 mtpmc3(PERFMON_PMC3(perf_regs
));
155 mtpmc4(PERFMON_PMC4(perf_regs
));
156 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
157 return(KERN_SUCCESS
);
159 case CPU_SUBTYPE_POWERPC_7400
:
160 case CPU_SUBTYPE_POWERPC_7450
:
161 if (count
< (PROCESSOR_CONTROL_CMD_COUNT
+
162 PROCESSOR_PM_REGS_COUNT_POWERPC_7400
))
163 return(KERN_FAILURE
);
166 perf_regs
= (processor_pm_regs_t
)cmd
->cmd_pm_regs
;
167 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
168 mtmmcr0(PERFMON_MMCR0(perf_regs
) & MMCR0_SUPPORT_MASK
);
169 mtpmc1(PERFMON_PMC1(perf_regs
));
170 mtpmc2(PERFMON_PMC2(perf_regs
));
171 mtmmcr1(PERFMON_MMCR1(perf_regs
) & MMCR1_SUPPORT_MASK
);
172 mtpmc3(PERFMON_PMC3(perf_regs
));
173 mtpmc4(PERFMON_PMC4(perf_regs
));
174 mtmmcr2(PERFMON_MMCR2(perf_regs
) & MMCR2_SUPPORT_MASK
);
175 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
176 return(KERN_SUCCESS
);
179 return(KERN_FAILURE
);
180 } /* switch cpu_subtype */
181 case PROCESSOR_PM_SET_MMCR
:
184 case CPU_SUBTYPE_POWERPC_604
:
185 if (count
< (PROCESSOR_CONTROL_CMD_COUNT
+
186 PROCESSOR_PM_REGS_COUNT_POWERPC_604
))
187 return(KERN_FAILURE
);
190 perf_regs
= (processor_pm_regs_t
)cmd
->cmd_pm_regs
;
191 mtmmcr0(PERFMON_MMCR0(perf_regs
) & MMCR0_SUPPORT_MASK
);
192 return(KERN_SUCCESS
);
194 case CPU_SUBTYPE_POWERPC_604e
:
195 case CPU_SUBTYPE_POWERPC_750
:
196 if (count
< (PROCESSOR_CONTROL_CMD_COUNT
+
197 PROCESSOR_PM_REGS_COUNT_POWERPC_750
))
198 return(KERN_FAILURE
);
201 perf_regs
= (processor_pm_regs_t
)cmd
->cmd_pm_regs
;
202 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
203 mtmmcr0(PERFMON_MMCR0(perf_regs
) & MMCR0_SUPPORT_MASK
);
204 mtmmcr1(PERFMON_MMCR1(perf_regs
) & MMCR1_SUPPORT_MASK
);
205 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
206 return(KERN_SUCCESS
);
208 case CPU_SUBTYPE_POWERPC_7400
:
209 case CPU_SUBTYPE_POWERPC_7450
:
210 if (count
< (PROCESSOR_CONTROL_CMD_COUNT
+
211 PROCESSOR_PM_REGS_COUNT_POWERPC_7400
))
212 return(KERN_FAILURE
);
215 perf_regs
= (processor_pm_regs_t
)cmd
->cmd_pm_regs
;
216 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
217 mtmmcr0(PERFMON_MMCR0(perf_regs
) & MMCR0_SUPPORT_MASK
);
218 mtmmcr1(PERFMON_MMCR1(perf_regs
) & MMCR1_SUPPORT_MASK
);
219 mtmmcr2(PERFMON_MMCR2(perf_regs
) & MMCR2_SUPPORT_MASK
);
220 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
221 return(KERN_SUCCESS
);
224 return(KERN_FAILURE
);
227 return(KERN_FAILURE
);
228 } /* switch cmd_op */
233 processor_flavor_t flavor
,
236 cpu_subtype_t cpu_subtype
;
239 * For now, we just assume that all CPUs are of the same type
241 cpu_subtype
= machine_slot
[0].cpu_subtype
;
243 case PROCESSOR_PM_REGS_INFO
:
244 switch (cpu_subtype
) {
245 case CPU_SUBTYPE_POWERPC_604
:
246 *count
= PROCESSOR_PM_REGS_COUNT_POWERPC_604
;
247 return(KERN_SUCCESS
);
249 case CPU_SUBTYPE_POWERPC_604e
:
250 case CPU_SUBTYPE_POWERPC_750
:
252 *count
= PROCESSOR_PM_REGS_COUNT_POWERPC_750
;
253 return(KERN_SUCCESS
);
255 case CPU_SUBTYPE_POWERPC_7400
:
256 case CPU_SUBTYPE_POWERPC_7450
:
258 *count
= PROCESSOR_PM_REGS_COUNT_POWERPC_7400
;
259 return(KERN_SUCCESS
);
263 return(KERN_INVALID_ARGUMENT
);
264 } /* switch cpu_subtype */
266 case PROCESSOR_TEMPERATURE
:
267 *count
= PROCESSOR_TEMPERATURE_COUNT
;
268 return (KERN_SUCCESS
);
272 return(KERN_INVALID_ARGUMENT
);
279 processor_flavor_t flavor
,
281 processor_info_t info
,
284 cpu_subtype_t cpu_subtype
;
285 processor_pm_regs_t perf_regs
;
287 unsigned int temp
[2];
289 cpu_subtype
= machine_slot
[slot_num
].cpu_subtype
;
292 case PROCESSOR_PM_REGS_INFO
:
294 perf_regs
= (processor_pm_regs_t
) info
;
296 switch (cpu_subtype
) {
297 case CPU_SUBTYPE_POWERPC_604
:
299 if (*count
< PROCESSOR_PM_REGS_COUNT_POWERPC_604
)
300 return(KERN_FAILURE
);
302 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
303 PERFMON_MMCR0(perf_regs
) = mfmmcr0();
304 PERFMON_PMC1(perf_regs
) = mfpmc1();
305 PERFMON_PMC2(perf_regs
) = mfpmc2();
306 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
308 *count
= PROCESSOR_PM_REGS_COUNT_POWERPC_604
;
309 return(KERN_SUCCESS
);
311 case CPU_SUBTYPE_POWERPC_604e
:
312 case CPU_SUBTYPE_POWERPC_750
:
314 if (*count
< PROCESSOR_PM_REGS_COUNT_POWERPC_750
)
315 return(KERN_FAILURE
);
317 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
318 PERFMON_MMCR0(perf_regs
) = mfmmcr0();
319 PERFMON_PMC1(perf_regs
) = mfpmc1();
320 PERFMON_PMC2(perf_regs
) = mfpmc2();
321 PERFMON_MMCR1(perf_regs
) = mfmmcr1();
322 PERFMON_PMC3(perf_regs
) = mfpmc3();
323 PERFMON_PMC4(perf_regs
) = mfpmc4();
324 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
326 *count
= PROCESSOR_PM_REGS_COUNT_POWERPC_750
;
327 return(KERN_SUCCESS
);
329 case CPU_SUBTYPE_POWERPC_7400
:
330 case CPU_SUBTYPE_POWERPC_7450
:
332 if (*count
< PROCESSOR_PM_REGS_COUNT_POWERPC_7400
)
333 return(KERN_FAILURE
);
335 oldlevel
= ml_set_interrupts_enabled(FALSE
); /* disable interrupts */
336 PERFMON_MMCR0(perf_regs
) = mfmmcr0();
337 PERFMON_PMC1(perf_regs
) = mfpmc1();
338 PERFMON_PMC2(perf_regs
) = mfpmc2();
339 PERFMON_MMCR1(perf_regs
) = mfmmcr1();
340 PERFMON_PMC3(perf_regs
) = mfpmc3();
341 PERFMON_PMC4(perf_regs
) = mfpmc4();
342 PERFMON_MMCR2(perf_regs
) = mfmmcr2();
343 ml_set_interrupts_enabled(oldlevel
); /* enable interrupts */
345 *count
= PROCESSOR_PM_REGS_COUNT_POWERPC_7400
;
346 return(KERN_SUCCESS
);
349 return(KERN_FAILURE
);
350 } /* switch cpu_subtype */
352 case PROCESSOR_TEMPERATURE
: /* Get the temperature of a processor */
354 disable_preemption(); /* Don't move me now */
356 if(slot_num
== cpu_number()) { /* Is this for the local CPU? */
357 *info
= ml_read_temp(); /* Get the temperature */
359 else { /* For another CPU */
360 temp
[0] = -1; /* Set sync flag */
363 temp
[1] = -1; /* Set invalid temperature */
364 (void)cpu_signal(slot_num
, SIGPcpureq
, CPRQtemp
,(unsigned int)&temp
); /* Ask him to take his temperature */
365 (void)hw_cpu_sync(temp
, LockTimeOut
); /* Wait for the other processor to get its temperature */
366 *info
= temp
[1]; /* Pass it back */
369 enable_preemption(); /* Ok to move now */
370 return(KERN_SUCCESS
);
373 return(KERN_INVALID_ARGUMENT
);
386 machine_slot
[cpu
].running
= TRUE
;
387 machine_slot
[cpu
].cpu_type
= CPU_TYPE_POWERPC
;
388 machine_slot
[cpu
].cpu_subtype
= (cpu_subtype_t
)per_proc_info
[cpu
].pf
.rptdProc
;
396 struct per_proc_info
*tproc_info
;
397 volatile struct per_proc_info
*mproc_info
;
400 /* TODO: realese mutex lock reset_handler_lock */
403 tproc_info
= &per_proc_info
[cpu
];
404 mproc_info
= &per_proc_info
[master_cpu
];
405 PE_cpu_machine_init(tproc_info
->cpu_id
, !(tproc_info
->cpu_flags
& BootDone
));
406 if (cpu
!= master_cpu
) {
407 while (!((mproc_info
->cpu_flags
) & SignalReady
))
412 tproc_info
->cpu_flags
|= BootDone
|SignalReady
;
424 * - Run cpu_register() in exclusion mode
428 for(cpu
=0; cpu
< wncpu
; cpu
++) {
429 if(!machine_slot
[cpu
].is_cpu
) {
430 machine_slot
[cpu
].is_cpu
= TRUE
;
435 if (*target_cpu
!= -1) {
446 struct per_proc_info
*proc_info
;
449 extern void (*exception_handlers
[])(void);
450 extern vm_offset_t intstack
;
451 extern vm_offset_t debstack
;
453 proc_info
= &per_proc_info
[cpu
];
455 if (cpu
== cpu_number()) {
456 PE_cpu_machine_init(proc_info
->cpu_id
, !(proc_info
->cpu_flags
& BootDone
));
458 proc_info
->cpu_flags
|= BootDone
|SignalReady
;
462 extern void _start_cpu(void);
464 proc_info
->cpu_number
= cpu
;
465 proc_info
->cpu_flags
&= BootDone
;
466 proc_info
->istackptr
= (vm_offset_t
)&intstack
+ (INTSTACK_SIZE
*(cpu
+1)) - FM_SIZE
;
467 proc_info
->intstack_top_ss
= proc_info
->istackptr
;
468 #if MACH_KDP || MACH_KDB
469 proc_info
->debstackptr
= (vm_offset_t
)&debstack
+ (KERNEL_STACK_SIZE
*(cpu
+1)) - FM_SIZE
;
470 proc_info
->debstack_top_ss
= proc_info
->debstackptr
;
471 #endif /* MACH_KDP || MACH_KDB */
472 proc_info
->interrupts_enabled
= 0;
473 proc_info
->active_kloaded
= (unsigned int)&active_kloaded
[cpu
];
474 proc_info
->active_stacks
= (unsigned int)&active_stacks
[cpu
];
475 proc_info
->need_ast
= (unsigned int)&need_ast
[cpu
];
476 proc_info
->FPU_owner
= 0;
477 proc_info
->VMX_owner
= 0;
480 if (proc_info
->start_paddr
== EXCEPTION_VECTOR(T_RESET
)) {
482 /* TODO: get mutex lock reset_handler_lock */
484 resethandler_target
.type
= RESET_HANDLER_START
;
485 resethandler_target
.call_paddr
= kvtophys((vm_offset_t
)_start_cpu
);
486 resethandler_target
.arg__paddr
= kvtophys((vm_offset_t
)proc_info
);
488 ml_phys_write((vm_offset_t
)&ResetHandler
+ 0,
489 resethandler_target
.type
);
490 ml_phys_write((vm_offset_t
)&ResetHandler
+ 4,
491 resethandler_target
.call_paddr
);
492 ml_phys_write((vm_offset_t
)&ResetHandler
+ 8,
493 resethandler_target
.arg__paddr
);
497 * Note: we pass the current time to the other processor here. He will load it
498 * as early as possible so that there is a chance that it is close to accurate.
499 * After the machine is up a while, we will officially resync the clocks so
500 * that all processors are the same. This is just to get close.
503 ml_get_timebase((unsigned long long *)&proc_info
->ruptStamp
); /* Pass our current time to the other guy */
505 __asm__
volatile("sync"); /* Commit to storage */
506 __asm__
volatile("isync"); /* Wait a second */
507 ret
= PE_cpu_start(proc_info
->cpu_id
,
508 proc_info
->start_paddr
, (vm_offset_t
)proc_info
);
510 if (ret
!= KERN_SUCCESS
&&
511 proc_info
->start_paddr
== EXCEPTION_VECTOR(T_RESET
)) {
513 /* TODO: realese mutex lock reset_handler_lock */
520 * Here is where we implement the receiver of the signaling protocol.
521 * We wait for the signal status area to be passed to us. Then we snarf
522 * up the status, the sender, and the 3 potential parms. Next we release
523 * the lock and signal the other guy.
531 unsigned int holdStat
, holdParm0
, holdParm1
, holdParm2
, mtype
;
532 unsigned int *parmAddr
;
533 struct per_proc_info
*pproc
; /* Area for my per_proc address */
535 struct SIGtimebase
*timebaseAddr
;
536 natural_t tbu
, tbu2
, tbl
;
538 cpu
= cpu_number(); /* Get the CPU number */
539 pproc
= &per_proc_info
[cpu
]; /* Point to our block */
542 * Since we've been signaled, wait just under 1ms for the signal lock to pass
544 if(!hw_lock_mbits(&pproc
->MPsigpStat
, (MPsigpMsgp
| MPsigpAck
), (MPsigpBusy
| MPsigpPass
),
545 (MPsigpBusy
| MPsigpPass
| MPsigpAck
), (gPEClockFrequencyInfo
.bus_clock_rate_hz
>> 7))) {
546 panic("cpu_signal_handler: Lock pass timed out\n");
549 holdStat
= pproc
->MPsigpStat
; /* Snarf stat word */
550 holdParm0
= pproc
->MPsigpParm0
; /* Snarf parameter */
551 holdParm1
= pproc
->MPsigpParm1
; /* Snarf parameter */
552 holdParm2
= pproc
->MPsigpParm2
; /* Snarf parameter */
554 __asm__
volatile("isync"); /* Make sure we don't unlock until memory is in */
556 pproc
->MPsigpStat
= holdStat
& ~(MPsigpMsgp
| MPsigpAck
| MPsigpFunc
); /* Release lock */
558 switch ((holdStat
& MPsigpFunc
) >> 8) { /* Decode function code */
560 case MPsigpIdle
: /* Was function cancelled? */
563 case MPsigpSigp
: /* Signal Processor message? */
565 switch (holdParm0
) { /* Decode SIGP message order */
567 case SIGPast
: /* Should we do an AST? */
568 pproc
->numSIGPast
++; /* Count this one */
570 kprintf("cpu_signal_handler: AST check on cpu %x\n", cpu_number());
572 ast_check(cpu_to_processor(cpu
));
573 return; /* All done... */
575 case SIGPcpureq
: /* CPU specific function? */
577 pproc
->numSIGPcpureq
++; /* Count this one */
578 switch (holdParm1
) { /* Select specific function */
580 case CPRQtemp
: /* Get the temperature */
581 parmAddr
= (unsigned int *)holdParm2
; /* Get the destination address */
582 parmAddr
[1] = ml_read_temp(); /* Get the core temperature */
583 eieio(); /* Force order */
584 sync(); /* Force to memory */
585 parmAddr
[0] = 0; /* Show we're done */
590 timebaseAddr
= (struct SIGtimebase
*)holdParm2
;
592 if(pproc
->time_base_enable
!= (void(*)(cpu_id_t
, boolean_t
))NULL
)
593 pproc
->time_base_enable(pproc
->cpu_id
, FALSE
);
595 timebaseAddr
->abstime
= 0; /* Touch to force into cache */
599 asm volatile(" mftbu %0" : "=r" (tbu
));
600 asm volatile(" mftb %0" : "=r" (tbl
));
601 asm volatile(" mftbu %0" : "=r" (tbu2
));
602 } while (tbu
!= tbu2
);
604 timebaseAddr
->abstime
= ((uint64_t)tbu
<< 32) | tbl
;
605 sync(); /* Force order */
607 timebaseAddr
->avail
= TRUE
;
609 while (*(volatile int *)&(syncClkSpot
.ready
) == FALSE
);
611 if(pproc
->time_base_enable
!= (void(*)(cpu_id_t
, boolean_t
))NULL
)
612 pproc
->time_base_enable(pproc
->cpu_id
, TRUE
);
614 timebaseAddr
->done
= TRUE
;
619 panic("cpu_signal_handler: unknown CPU request - %08X\n", holdParm1
);
624 case SIGPdebug
: /* Enter the debugger? */
626 pproc
->numSIGPdebug
++; /* Count this one */
627 debugger_is_slave
[cpu
]++; /* Bump up the count to show we're here */
628 hw_atomic_sub(&debugger_sync
, 1); /* Show we've received the 'rupt */
629 __asm__
volatile("tw 4,r3,r3"); /* Enter the debugger */
630 return; /* All done now... */
632 case SIGPwake
: /* Wake up CPU */
633 pproc
->numSIGPwake
++; /* Count this one */
634 return; /* No need to do anything, the interrupt does it all... */
637 panic("cpu_signal_handler: unknown SIGP message order - %08X\n", holdParm0
);
643 panic("cpu_signal_handler: unknown SIGP function - %08X\n", (holdStat
& MPsigpFunc
) >> 8);
647 panic("cpu_signal_handler: we should never get here\n");
651 * Here is where we send a message to another processor. So far we only have two:
652 * SIGPast and SIGPdebug. SIGPast is used to preempt and kick off threads (this is
653 * currently disabled). SIGPdebug is used to enter the debugger.
655 * We set up the SIGP function to indicate that this is a simple message and set the
656 * order code (MPsigpParm0) to SIGPast or SIGPdebug). After finding the per_processor
657 * block for the target, we lock the message block. Then we set the parameter(s).
658 * Next we change the lock (also called "busy") to "passing" and finally signal
659 * the other processor. Note that we only wait about 1ms to get the message lock.
660 * If we time out, we return failure to our caller. It is their responsibility to
672 unsigned int holdStat
, holdParm0
, holdParm1
, holdParm2
, mtype
;
673 struct per_proc_info
*tpproc
, *mpproc
; /* Area for per_proc addresses */
678 if(target
> NCPUS
) panic("cpu_signal: invalid target CPU - %08X\n", target
);
681 cpu
= cpu_number(); /* Get our CPU number */
682 if(target
== cpu
) return KERN_FAILURE
; /* Don't play with ourselves */
683 if(!machine_slot
[target
].running
) return KERN_FAILURE
; /* These guys are too young */
685 mpproc
= &per_proc_info
[cpu
]; /* Point to our block */
686 tpproc
= &per_proc_info
[target
]; /* Point to the target's block */
688 if (!(tpproc
->cpu_flags
& SignalReady
)) return KERN_FAILURE
;
690 if((tpproc
->MPsigpStat
& MPsigpMsgp
) == MPsigpMsgp
) { /* Is there an unreceived message already pending? */
692 if(signal
== SIGPwake
) { /* SIGPwake can merge into all others... */
693 mpproc
->numSIGPmwake
++; /* Account for merged wakes */
697 if((signal
== SIGPast
) && (tpproc
->MPsigpParm0
== SIGPast
)) { /* We can merge ASTs */
698 mpproc
->numSIGPmast
++; /* Account for merged ASTs */
699 return KERN_SUCCESS
; /* Don't bother to send this one... */
702 if (tpproc
->MPsigpParm0
== SIGPwake
) {
703 if (hw_lock_mbits(&tpproc
->MPsigpStat
, (MPsigpMsgp
| MPsigpAck
),
704 (MPsigpBusy
| MPsigpPass
), MPsigpBusy
, 0)) {
706 mpproc
->numSIGPmwake
++;
711 if((busybitset
== 0) &&
712 (!hw_lock_mbits(&tpproc
->MPsigpStat
, MPsigpMsgp
, 0, MPsigpBusy
,
713 (gPEClockFrequencyInfo
.bus_clock_rate_hz
>> 13)))) { /* Try to lock the message block with a .5ms timeout */
714 mpproc
->numSIGPtimo
++; /* Account for timeouts */
715 return KERN_FAILURE
; /* Timed out, take your ball and go home... */
718 holdStat
= MPsigpBusy
| MPsigpPass
| (MPsigpSigp
<< 8) | cpu
; /* Set up the signal status word */
719 tpproc
->MPsigpParm0
= signal
; /* Set message order */
720 tpproc
->MPsigpParm1
= p1
; /* Set additional parm */
721 tpproc
->MPsigpParm2
= p2
; /* Set additional parm */
723 __asm__
volatile("sync"); /* Make sure it's all there */
725 tpproc
->MPsigpStat
= holdStat
; /* Set status and pass the lock */
726 __asm__
volatile("eieio"); /* I'm a paraniod freak */
729 PE_cpu_signal(mpproc
->cpu_id
, tpproc
->cpu_id
); /* Kick the other processor */
731 return KERN_SUCCESS
; /* All is goodness and rainbows... */
738 processor_doshutdown(current_processor());
745 struct per_proc_info
*proc_info
;
747 facility_context
*fowner
;
748 extern void (*exception_handlers
[])(void);
749 extern vm_offset_t intstack
;
750 extern vm_offset_t debstack
;
751 extern void _restart_cpu(void);
755 kprintf("******* About to sleep cpu %d\n", cpu
);
758 proc_info
= &per_proc_info
[cpu
];
760 fowner
= proc_info
->FPU_owner
; /* Cache this */
761 if(fowner
) fpu_save(fowner
); /* If anyone owns FPU, save it */
762 proc_info
->FPU_owner
= 0; /* Set no fpu owner now */
764 fowner
= proc_info
->VMX_owner
; /* Cache this */
765 if(fowner
) vec_save(fowner
); /* If anyone owns vectors, save it */
766 proc_info
->VMX_owner
= 0; /* Set no vector owner now */
768 if (proc_info
->cpu_number
== 0) {
769 proc_info
->cpu_flags
&= BootDone
;
770 proc_info
->istackptr
= (vm_offset_t
)&intstack
+ (INTSTACK_SIZE
*(cpu
+1)) - FM_SIZE
;
771 proc_info
->intstack_top_ss
= proc_info
->istackptr
;
772 #if MACH_KDP || MACH_KDB
773 proc_info
->debstackptr
= (vm_offset_t
)&debstack
+ (KERNEL_STACK_SIZE
*(cpu
+1)) - FM_SIZE
;
774 proc_info
->debstack_top_ss
= proc_info
->debstackptr
;
775 #endif /* MACH_KDP || MACH_KDB */
776 proc_info
->interrupts_enabled
= 0;
778 if (proc_info
->start_paddr
== EXCEPTION_VECTOR(T_RESET
)) {
779 extern void _start_cpu(void);
781 resethandler_target
.type
= RESET_HANDLER_START
;
782 resethandler_target
.call_paddr
= kvtophys((vm_offset_t
)_start_cpu
);
783 resethandler_target
.arg__paddr
= kvtophys((vm_offset_t
)proc_info
);
785 ml_phys_write((vm_offset_t
)&ResetHandler
+ 0,
786 resethandler_target
.type
);
787 ml_phys_write((vm_offset_t
)&ResetHandler
+ 4,
788 resethandler_target
.call_paddr
);
789 ml_phys_write((vm_offset_t
)&ResetHandler
+ 8,
790 resethandler_target
.arg__paddr
);
792 __asm__
volatile("sync");
793 __asm__
volatile("isync");
797 PE_cpu_machine_quiesce(proc_info
->cpu_id
);
807 intr
= ml_set_interrupts_enabled(FALSE
); /* No interruptions in here */
809 /* Note that syncClkSpot is in a cache aligned area */
810 syncClkSpot
.avail
= FALSE
;
811 syncClkSpot
.ready
= FALSE
;
812 syncClkSpot
.done
= FALSE
;
814 while (cpu_signal(master_cpu
, SIGPcpureq
, CPRQtimebase
,
815 (unsigned int)&syncClkSpot
) != KERN_SUCCESS
)
818 while (*(volatile int *)&(syncClkSpot
.avail
) == FALSE
)
824 * We do the following to keep the compiler from generating extra stuff
827 tbu
= syncClkSpot
.abstime
>> 32;
828 tbl
= (uint32_t)syncClkSpot
.abstime
;
834 syncClkSpot
.ready
= TRUE
;
836 while (*(volatile int *)&(syncClkSpot
.done
) == FALSE
)
839 (void)ml_set_interrupts_enabled(intr
);