2 * Copyright (c) 2003 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
26 #include <ppc/chud/chud_spr.h>
27 #include <ppc/chud/chud_xnu.h>
28 #include <ppc/chud/chud_cpu_asm.h>
29 #include <kern/processor.h>
30 #include <ppc/machine_routines.h>
31 #include <ppc/exception.h>
32 #include <ppc/proc_reg.h>
33 #include <ppc/Diagnostics.h>
36 int chudxnu_avail_cpu_count(void)
38 host_basic_info_data_t hinfo
;
40 mach_msg_type_number_t count
= HOST_BASIC_INFO_COUNT
;
42 kr
= host_info(host_self(), HOST_BASIC_INFO
, (integer_t
*)&hinfo
, &count
);
43 if(kr
== KERN_SUCCESS
) {
44 return hinfo
.avail_cpus
;
51 int chudxnu_phys_cpu_count(void)
53 host_basic_info_data_t hinfo
;
55 mach_msg_type_number_t count
= HOST_BASIC_INFO_COUNT
;
57 kr
= host_info(host_self(), HOST_BASIC_INFO
, (integer_t
*)&hinfo
, &count
);
58 if(kr
== KERN_SUCCESS
) {
59 return hinfo
.max_cpus
;
66 int chudxnu_cpu_number(void)
72 kern_return_t
chudxnu_enable_cpu(int cpu
, boolean_t enable
)
74 chudxnu_unbind_current_thread();
76 if(cpu
<0 || cpu
>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument
80 if(processor_ptr
[cpu
]!=PROCESSOR_NULL
&& processor_ptr
[cpu
]!=master_processor
) {
82 return processor_start(processor_ptr
[cpu
]);
84 return processor_exit(processor_ptr
[cpu
]);
91 kern_return_t
chudxnu_enable_cpu_nap(int cpu
, boolean_t enable
)
93 if(cpu
<0 || cpu
>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument
97 if(processor_ptr
[cpu
]!=PROCESSOR_NULL
) {
98 ml_enable_nap(cpu
, enable
);
106 boolean_t
chudxnu_cpu_nap_enabled(int cpu
)
110 if(cpu
<0 || cpu
>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument
114 prev
= ml_enable_nap(cpu
, TRUE
);
115 ml_enable_nap(cpu
, prev
);
121 kern_return_t
chudxnu_set_shadowed_spr(int cpu
, int spr
, uint32_t val
)
123 cpu_subtype_t cpu_subtype
;
125 kern_return_t retval
= KERN_FAILURE
;
127 if(cpu
<0 || cpu
>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument
131 chudxnu_bind_current_thread(cpu
);
133 available
= per_proc_info
[cpu
].pf
.Available
;
134 cpu_subtype
= machine_slot
[cpu
].cpu_subtype
;
136 if(spr
==chud_750_l2cr
) {
137 switch(cpu_subtype
) {
138 case CPU_SUBTYPE_POWERPC_750
:
139 case CPU_SUBTYPE_POWERPC_7400
:
140 case CPU_SUBTYPE_POWERPC_7450
:
141 if(available
& pfL2
) {
142 // int enable = (val & 0x80000000) ? TRUE : FALSE;
144 // per_proc_info[cpu].pf.l2cr = val;
146 // per_proc_info[cpu].pf.l2cr = 0;
148 per_proc_info
[cpu
].pf
.l2cr
= val
;
150 // mtspr(l2cr, per_proc_info[cpu].pf.l2cr); // XXXXXXX why is this necessary? XXXXXXX
151 retval
= KERN_SUCCESS
;
153 retval
= KERN_FAILURE
;
157 retval
= KERN_INVALID_ARGUMENT
;
161 else if(spr
==chud_7450_l3cr
) {
162 switch(cpu_subtype
) {
163 case CPU_SUBTYPE_POWERPC_7450
:
164 if(available
& pfL3
) {
165 int enable
= (val
& 0x80000000) ? TRUE
: FALSE
;
167 per_proc_info
[cpu
].pf
.l3cr
= val
;
169 per_proc_info
[cpu
].pf
.l3cr
= 0;
172 retval
= KERN_SUCCESS
;
174 retval
= KERN_FAILURE
;
178 retval
= KERN_INVALID_ARGUMENT
;
182 else if(spr
==chud_750_hid0
) {
183 switch(cpu_subtype
) {
184 case CPU_SUBTYPE_POWERPC_750
:
186 cacheDisable(); /* disable caches */
187 __asm__
volatile ("mtspr %0, %1" : : "n" (chud_750_hid0
), "r" (val
));
188 per_proc_info
[cpu
].pf
.pfHID0
= val
;
189 cacheInit(); /* reenable caches */
190 retval
= KERN_SUCCESS
;
192 case CPU_SUBTYPE_POWERPC_7400
:
193 case CPU_SUBTYPE_POWERPC_7450
:
194 __asm__
volatile ("mtspr %0, %1" : : "n" (chud_750_hid0
), "r" (val
));
195 per_proc_info
[cpu
].pf
.pfHID0
= val
;
196 retval
= KERN_SUCCESS
;
199 retval
= KERN_INVALID_ARGUMENT
;
203 else if(spr
==chud_750_hid1
) {
204 switch(cpu_subtype
) {
205 case CPU_SUBTYPE_POWERPC_750
:
206 case CPU_SUBTYPE_POWERPC_7400
:
207 case CPU_SUBTYPE_POWERPC_7450
:
208 __asm__
volatile ("mtspr %0, %1" : : "n" (chud_750_hid1
), "r" (val
));
209 per_proc_info
[cpu
].pf
.pfHID1
= val
;
210 retval
= KERN_SUCCESS
;
213 retval
= KERN_INVALID_ARGUMENT
;
217 else if(spr
==chud_750fx_hid2
&& cpu_subtype
==CPU_SUBTYPE_POWERPC_750
) {
218 __asm__
volatile ("mtspr %0, %1" : : "n" (chud_750fx_hid2
), "r" (val
));
219 per_proc_info
[cpu
].pf
.pfHID2
= val
;
220 retval
= KERN_SUCCESS
;
222 else if(spr
==chud_7400_msscr0
&& (cpu_subtype
==CPU_SUBTYPE_POWERPC_7400
|| cpu_subtype
==CPU_SUBTYPE_POWERPC_7450
)) {
223 __asm__
volatile ("mtspr %0, %1" : : "n" (chud_7400_msscr0
), "r" (val
));
224 per_proc_info
[cpu
].pf
.pfMSSCR0
= val
;
225 retval
= KERN_SUCCESS
;
227 else if(spr
==chud_7400_msscr1
&& cpu_subtype
==CPU_SUBTYPE_POWERPC_7400
|| cpu_subtype
==CPU_SUBTYPE_POWERPC_7450
) { // called msssr0 on 7450
228 __asm__
volatile ("mtspr %0, %1" : : "n" (chud_7400_msscr1
), "r" (val
));
229 per_proc_info
[cpu
].pf
.pfMSSCR1
= val
;
230 retval
= KERN_SUCCESS
;
232 else if(spr
==chud_7450_ldstcr
&& cpu_subtype
==CPU_SUBTYPE_POWERPC_7450
) {
233 __asm__
volatile ("mtspr %0, %1" : : "n" (chud_7450_ldstcr
), "r" (val
));
234 per_proc_info
[cpu
].pf
.pfLDSTCR
= val
;
235 retval
= KERN_SUCCESS
;
237 else if(spr
==chud_7450_ictrl
&& cpu_subtype
==CPU_SUBTYPE_POWERPC_7450
) {
238 __asm__
volatile ("mtspr %0, %1" : : "n" (chud_7450_ictrl
), "r" (val
));
239 per_proc_info
[cpu
].pf
.pfICTRL
= val
;
240 retval
= KERN_SUCCESS
;
242 retval
= KERN_INVALID_ARGUMENT
;
245 chudxnu_unbind_current_thread();
250 kern_return_t
chudxnu_set_shadowed_spr64(int cpu
, int spr
, uint64_t val
)
252 cpu_subtype_t cpu_subtype
;
253 kern_return_t retval
= KERN_FAILURE
;
255 if(cpu
<0 || cpu
>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument
259 chudxnu_bind_current_thread(cpu
);
261 cpu_subtype
= machine_slot
[cpu
].cpu_subtype
;
263 if(spr
==chud_970_hid0
) {
264 switch(cpu_subtype
) {
265 case CPU_SUBTYPE_POWERPC_970
:
266 chudxnu_mthid0_64(&val
);
267 per_proc_info
[cpu
].pf
.pfHID0
= val
;
268 retval
= KERN_SUCCESS
;
271 retval
= KERN_INVALID_ARGUMENT
;
275 else if(spr
==chud_970_hid1
) {
276 switch(cpu_subtype
) {
277 case CPU_SUBTYPE_POWERPC_970
:
278 chudxnu_mthid1_64(&val
);
279 per_proc_info
[cpu
].pf
.pfHID1
= val
;
280 retval
= KERN_SUCCESS
;
283 retval
= KERN_INVALID_ARGUMENT
;
287 else if(spr
==chud_970_hid4
) {
288 switch(cpu_subtype
) {
289 case CPU_SUBTYPE_POWERPC_970
:
290 chudxnu_mthid4_64(&val
);
291 per_proc_info
[cpu
].pf
.pfHID4
= val
;
292 retval
= KERN_SUCCESS
;
295 retval
= KERN_INVALID_ARGUMENT
;
299 else if(spr
==chud_970_hid5
) {
300 switch(cpu_subtype
) {
301 case CPU_SUBTYPE_POWERPC_970
:
302 chudxnu_mthid5_64(&val
);
303 per_proc_info
[cpu
].pf
.pfHID5
= val
;
304 retval
= KERN_SUCCESS
;
307 retval
= KERN_INVALID_ARGUMENT
;
311 retval
= KERN_INVALID_ARGUMENT
;
314 chudxnu_unbind_current_thread();
320 uint32_t chudxnu_get_orig_cpu_l2cr(int cpu
)
322 if(cpu
<0 || cpu
>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument
325 return per_proc_info
[cpu
].pf
.l2crOriginal
;
329 uint32_t chudxnu_get_orig_cpu_l3cr(int cpu
)
331 if(cpu
<0 || cpu
>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument
334 return per_proc_info
[cpu
].pf
.l3crOriginal
;
338 void chudxnu_flush_caches(void)
344 void chudxnu_enable_caches(boolean_t enable
)
355 kern_return_t
chudxnu_perfmon_acquire_facility(task_t task
)
357 return perfmon_acquire_facility(task
);
361 kern_return_t
chudxnu_perfmon_release_facility(task_t task
)
363 return perfmon_release_facility(task
);
367 uint32_t * chudxnu_get_branch_trace_buffer(uint32_t *entries
)
369 extern int pc_trace_buf
[1024];
371 *entries
= sizeof(pc_trace_buf
)/sizeof(int);
377 boolean_t
chudxnu_get_interrupts_enabled(void)
379 return ml_get_interrupts_enabled();
383 boolean_t
chudxnu_set_interrupts_enabled(boolean_t enable
)
385 return ml_set_interrupts_enabled(enable
);
389 boolean_t
chudxnu_at_interrupt_context(void)
391 return ml_at_interrupt_context();
395 void chudxnu_cause_interrupt(void)
397 ml_cause_interrupt();
401 kern_return_t
chudxnu_get_cpu_rupt_counters(int cpu
, rupt_counters_t
*rupts
)
403 if(cpu
<0 || cpu
>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument
408 boolean_t oldlevel
= ml_set_interrupts_enabled(FALSE
);
410 rupts
->hwResets
= per_proc_info
[cpu
].hwCtr
.hwResets
;
411 rupts
->hwMachineChecks
= per_proc_info
[cpu
].hwCtr
.hwMachineChecks
;
412 rupts
->hwDSIs
= per_proc_info
[cpu
].hwCtr
.hwDSIs
;
413 rupts
->hwISIs
= per_proc_info
[cpu
].hwCtr
.hwISIs
;
414 rupts
->hwExternals
= per_proc_info
[cpu
].hwCtr
.hwExternals
;
415 rupts
->hwAlignments
= per_proc_info
[cpu
].hwCtr
.hwAlignments
;
416 rupts
->hwPrograms
= per_proc_info
[cpu
].hwCtr
.hwPrograms
;
417 rupts
->hwFloatPointUnavailable
= per_proc_info
[cpu
].hwCtr
.hwFloatPointUnavailable
;
418 rupts
->hwDecrementers
= per_proc_info
[cpu
].hwCtr
.hwDecrementers
;
419 rupts
->hwIOErrors
= per_proc_info
[cpu
].hwCtr
.hwIOErrors
;
420 rupts
->hwSystemCalls
= per_proc_info
[cpu
].hwCtr
.hwSystemCalls
;
421 rupts
->hwTraces
= per_proc_info
[cpu
].hwCtr
.hwTraces
;
422 rupts
->hwFloatingPointAssists
= per_proc_info
[cpu
].hwCtr
.hwFloatingPointAssists
;
423 rupts
->hwPerformanceMonitors
= per_proc_info
[cpu
].hwCtr
.hwPerformanceMonitors
;
424 rupts
->hwAltivecs
= per_proc_info
[cpu
].hwCtr
.hwAltivecs
;
425 rupts
->hwInstBreakpoints
= per_proc_info
[cpu
].hwCtr
.hwInstBreakpoints
;
426 rupts
->hwSystemManagements
= per_proc_info
[cpu
].hwCtr
.hwSystemManagements
;
427 rupts
->hwAltivecAssists
= per_proc_info
[cpu
].hwCtr
.hwAltivecAssists
;
428 rupts
->hwThermal
= per_proc_info
[cpu
].hwCtr
.hwThermal
;
429 rupts
->hwSoftPatches
= per_proc_info
[cpu
].hwCtr
.hwSoftPatches
;
430 rupts
->hwMaintenances
= per_proc_info
[cpu
].hwCtr
.hwMaintenances
;
431 rupts
->hwInstrumentations
= per_proc_info
[cpu
].hwCtr
.hwInstrumentations
;
433 ml_set_interrupts_enabled(oldlevel
);
441 kern_return_t
chudxnu_clear_cpu_rupt_counters(int cpu
)
443 if(cpu
<0 || cpu
>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument
447 bzero(&(per_proc_info
[cpu
].hwCtr
), sizeof(struct hwCtrs
));
452 kern_return_t
chudxnu_passup_alignment_exceptions(boolean_t enable
)
455 dgWork
.dgFlags
|= enaNotifyEM
;
457 dgWork
.dgFlags
&= ~enaNotifyEM
;