2 * Copyright (c) 2003 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 #include <ppc/chud/chud_spr.h>
24 #include <ppc/chud/chud_xnu.h>
25 #include <ppc/chud/chud_cpu_asm.h>
26 #include <kern/processor.h>
27 #include <ppc/machine_routines.h>
28 #include <ppc/exception.h>
29 #include <ppc/proc_reg.h>
30 #include <ppc/Diagnostics.h>
33 int chudxnu_avail_cpu_count(void)
35 host_basic_info_data_t hinfo
;
37 mach_msg_type_number_t count
= HOST_BASIC_INFO_COUNT
;
39 kr
= host_info(host_self(), HOST_BASIC_INFO
, (integer_t
*)&hinfo
, &count
);
40 if(kr
== KERN_SUCCESS
) {
41 return hinfo
.avail_cpus
;
48 int chudxnu_phys_cpu_count(void)
50 host_basic_info_data_t hinfo
;
52 mach_msg_type_number_t count
= HOST_BASIC_INFO_COUNT
;
54 kr
= host_info(host_self(), HOST_BASIC_INFO
, (integer_t
*)&hinfo
, &count
);
55 if(kr
== KERN_SUCCESS
) {
56 return hinfo
.max_cpus
;
63 int chudxnu_cpu_number(void)
69 kern_return_t
chudxnu_enable_cpu(int cpu
, boolean_t enable
)
71 chudxnu_unbind_current_thread();
73 if(cpu
<0 || cpu
>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument
77 if(processor_ptr
[cpu
]!=PROCESSOR_NULL
&& processor_ptr
[cpu
]!=master_processor
) {
79 return processor_start(processor_ptr
[cpu
]);
81 return processor_exit(processor_ptr
[cpu
]);
88 kern_return_t
chudxnu_enable_cpu_nap(int cpu
, boolean_t enable
)
90 if(cpu
<0 || cpu
>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument
94 if(processor_ptr
[cpu
]!=PROCESSOR_NULL
) {
95 ml_enable_nap(cpu
, enable
);
103 boolean_t
chudxnu_cpu_nap_enabled(int cpu
)
107 if(cpu
<0 || cpu
>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument
111 prev
= ml_enable_nap(cpu
, TRUE
);
112 ml_enable_nap(cpu
, prev
);
118 kern_return_t
chudxnu_set_shadowed_spr(int cpu
, int spr
, uint32_t val
)
120 cpu_subtype_t cpu_subtype
;
122 kern_return_t retval
= KERN_FAILURE
;
124 if(cpu
<0 || cpu
>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument
128 chudxnu_bind_current_thread(cpu
);
130 available
= per_proc_info
[cpu
].pf
.Available
;
131 cpu_subtype
= machine_slot
[cpu
].cpu_subtype
;
133 if(spr
==chud_750_l2cr
) {
134 switch(cpu_subtype
) {
135 case CPU_SUBTYPE_POWERPC_750
:
136 case CPU_SUBTYPE_POWERPC_7400
:
137 case CPU_SUBTYPE_POWERPC_7450
:
138 if(available
& pfL2
) {
139 // int enable = (val & 0x80000000) ? TRUE : FALSE;
141 // per_proc_info[cpu].pf.l2cr = val;
143 // per_proc_info[cpu].pf.l2cr = 0;
145 per_proc_info
[cpu
].pf
.l2cr
= val
;
147 // mtspr(l2cr, per_proc_info[cpu].pf.l2cr); // XXXXXXX why is this necessary? XXXXXXX
148 retval
= KERN_SUCCESS
;
150 retval
= KERN_FAILURE
;
154 retval
= KERN_INVALID_ARGUMENT
;
158 else if(spr
==chud_7450_l3cr
) {
159 switch(cpu_subtype
) {
160 case CPU_SUBTYPE_POWERPC_7450
:
161 if(available
& pfL3
) {
162 int enable
= (val
& 0x80000000) ? TRUE
: FALSE
;
164 per_proc_info
[cpu
].pf
.l3cr
= val
;
166 per_proc_info
[cpu
].pf
.l3cr
= 0;
169 retval
= KERN_SUCCESS
;
171 retval
= KERN_FAILURE
;
175 retval
= KERN_INVALID_ARGUMENT
;
179 else if(spr
==chud_750_hid0
) {
180 switch(cpu_subtype
) {
181 case CPU_SUBTYPE_POWERPC_750
:
183 cacheDisable(); /* disable caches */
184 __asm__
volatile ("mtspr %0, %1" : : "n" (chud_750_hid0
), "r" (val
));
185 per_proc_info
[cpu
].pf
.pfHID0
= val
;
186 cacheInit(); /* reenable caches */
187 retval
= KERN_SUCCESS
;
189 case CPU_SUBTYPE_POWERPC_7400
:
190 case CPU_SUBTYPE_POWERPC_7450
:
191 __asm__
volatile ("mtspr %0, %1" : : "n" (chud_750_hid0
), "r" (val
));
192 per_proc_info
[cpu
].pf
.pfHID0
= val
;
193 retval
= KERN_SUCCESS
;
196 retval
= KERN_INVALID_ARGUMENT
;
200 else if(spr
==chud_750_hid1
) {
201 switch(cpu_subtype
) {
202 case CPU_SUBTYPE_POWERPC_750
:
203 case CPU_SUBTYPE_POWERPC_7400
:
204 case CPU_SUBTYPE_POWERPC_7450
:
205 __asm__
volatile ("mtspr %0, %1" : : "n" (chud_750_hid1
), "r" (val
));
206 per_proc_info
[cpu
].pf
.pfHID1
= val
;
207 retval
= KERN_SUCCESS
;
210 retval
= KERN_INVALID_ARGUMENT
;
214 else if(spr
==chud_750fx_hid2
&& cpu_subtype
==CPU_SUBTYPE_POWERPC_750
) {
215 __asm__
volatile ("mtspr %0, %1" : : "n" (chud_750fx_hid2
), "r" (val
));
216 per_proc_info
[cpu
].pf
.pfHID2
= val
;
217 retval
= KERN_SUCCESS
;
219 else if(spr
==chud_7400_msscr0
&& (cpu_subtype
==CPU_SUBTYPE_POWERPC_7400
|| cpu_subtype
==CPU_SUBTYPE_POWERPC_7450
)) {
220 __asm__
volatile ("mtspr %0, %1" : : "n" (chud_7400_msscr0
), "r" (val
));
221 per_proc_info
[cpu
].pf
.pfMSSCR0
= val
;
222 retval
= KERN_SUCCESS
;
224 else if(spr
==chud_7400_msscr1
&& cpu_subtype
==CPU_SUBTYPE_POWERPC_7400
|| cpu_subtype
==CPU_SUBTYPE_POWERPC_7450
) { // called msssr0 on 7450
225 __asm__
volatile ("mtspr %0, %1" : : "n" (chud_7400_msscr1
), "r" (val
));
226 per_proc_info
[cpu
].pf
.pfMSSCR1
= val
;
227 retval
= KERN_SUCCESS
;
229 else if(spr
==chud_7450_ldstcr
&& cpu_subtype
==CPU_SUBTYPE_POWERPC_7450
) {
230 __asm__
volatile ("mtspr %0, %1" : : "n" (chud_7450_ldstcr
), "r" (val
));
231 per_proc_info
[cpu
].pf
.pfLDSTCR
= val
;
232 retval
= KERN_SUCCESS
;
234 else if(spr
==chud_7450_ictrl
&& cpu_subtype
==CPU_SUBTYPE_POWERPC_7450
) {
235 __asm__
volatile ("mtspr %0, %1" : : "n" (chud_7450_ictrl
), "r" (val
));
236 per_proc_info
[cpu
].pf
.pfICTRL
= val
;
237 retval
= KERN_SUCCESS
;
239 retval
= KERN_INVALID_ARGUMENT
;
242 chudxnu_unbind_current_thread();
247 kern_return_t
chudxnu_set_shadowed_spr64(int cpu
, int spr
, uint64_t val
)
249 cpu_subtype_t cpu_subtype
;
250 kern_return_t retval
= KERN_FAILURE
;
252 if(cpu
<0 || cpu
>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument
256 chudxnu_bind_current_thread(cpu
);
258 cpu_subtype
= machine_slot
[cpu
].cpu_subtype
;
260 if(spr
==chud_970_hid0
) {
261 switch(cpu_subtype
) {
262 case CPU_SUBTYPE_POWERPC_970
:
263 chudxnu_mthid0_64(&val
);
264 per_proc_info
[cpu
].pf
.pfHID0
= val
;
265 retval
= KERN_SUCCESS
;
268 retval
= KERN_INVALID_ARGUMENT
;
272 else if(spr
==chud_970_hid1
) {
273 switch(cpu_subtype
) {
274 case CPU_SUBTYPE_POWERPC_970
:
275 chudxnu_mthid1_64(&val
);
276 per_proc_info
[cpu
].pf
.pfHID1
= val
;
277 retval
= KERN_SUCCESS
;
280 retval
= KERN_INVALID_ARGUMENT
;
284 else if(spr
==chud_970_hid4
) {
285 switch(cpu_subtype
) {
286 case CPU_SUBTYPE_POWERPC_970
:
287 chudxnu_mthid4_64(&val
);
288 per_proc_info
[cpu
].pf
.pfHID4
= val
;
289 retval
= KERN_SUCCESS
;
292 retval
= KERN_INVALID_ARGUMENT
;
296 else if(spr
==chud_970_hid5
) {
297 switch(cpu_subtype
) {
298 case CPU_SUBTYPE_POWERPC_970
:
299 chudxnu_mthid5_64(&val
);
300 per_proc_info
[cpu
].pf
.pfHID5
= val
;
301 retval
= KERN_SUCCESS
;
304 retval
= KERN_INVALID_ARGUMENT
;
308 retval
= KERN_INVALID_ARGUMENT
;
311 chudxnu_unbind_current_thread();
317 uint32_t chudxnu_get_orig_cpu_l2cr(int cpu
)
319 if(cpu
<0 || cpu
>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument
322 return per_proc_info
[cpu
].pf
.l2crOriginal
;
326 uint32_t chudxnu_get_orig_cpu_l3cr(int cpu
)
328 if(cpu
<0 || cpu
>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument
331 return per_proc_info
[cpu
].pf
.l3crOriginal
;
335 void chudxnu_flush_caches(void)
341 void chudxnu_enable_caches(boolean_t enable
)
352 kern_return_t
chudxnu_perfmon_acquire_facility(task_t task
)
354 return perfmon_acquire_facility(task
);
358 kern_return_t
chudxnu_perfmon_release_facility(task_t task
)
360 return perfmon_release_facility(task
);
364 uint32_t * chudxnu_get_branch_trace_buffer(uint32_t *entries
)
366 extern int pc_trace_buf
[1024];
368 *entries
= sizeof(pc_trace_buf
)/sizeof(int);
374 boolean_t
chudxnu_get_interrupts_enabled(void)
376 return ml_get_interrupts_enabled();
380 boolean_t
chudxnu_set_interrupts_enabled(boolean_t enable
)
382 return ml_set_interrupts_enabled(enable
);
386 boolean_t
chudxnu_at_interrupt_context(void)
388 return ml_at_interrupt_context();
392 void chudxnu_cause_interrupt(void)
394 ml_cause_interrupt();
398 kern_return_t
chudxnu_get_cpu_rupt_counters(int cpu
, rupt_counters_t
*rupts
)
400 if(cpu
<0 || cpu
>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument
405 boolean_t oldlevel
= ml_set_interrupts_enabled(FALSE
);
407 rupts
->hwResets
= per_proc_info
[cpu
].hwCtr
.hwResets
;
408 rupts
->hwMachineChecks
= per_proc_info
[cpu
].hwCtr
.hwMachineChecks
;
409 rupts
->hwDSIs
= per_proc_info
[cpu
].hwCtr
.hwDSIs
;
410 rupts
->hwISIs
= per_proc_info
[cpu
].hwCtr
.hwISIs
;
411 rupts
->hwExternals
= per_proc_info
[cpu
].hwCtr
.hwExternals
;
412 rupts
->hwAlignments
= per_proc_info
[cpu
].hwCtr
.hwAlignments
;
413 rupts
->hwPrograms
= per_proc_info
[cpu
].hwCtr
.hwPrograms
;
414 rupts
->hwFloatPointUnavailable
= per_proc_info
[cpu
].hwCtr
.hwFloatPointUnavailable
;
415 rupts
->hwDecrementers
= per_proc_info
[cpu
].hwCtr
.hwDecrementers
;
416 rupts
->hwIOErrors
= per_proc_info
[cpu
].hwCtr
.hwIOErrors
;
417 rupts
->hwSystemCalls
= per_proc_info
[cpu
].hwCtr
.hwSystemCalls
;
418 rupts
->hwTraces
= per_proc_info
[cpu
].hwCtr
.hwTraces
;
419 rupts
->hwFloatingPointAssists
= per_proc_info
[cpu
].hwCtr
.hwFloatingPointAssists
;
420 rupts
->hwPerformanceMonitors
= per_proc_info
[cpu
].hwCtr
.hwPerformanceMonitors
;
421 rupts
->hwAltivecs
= per_proc_info
[cpu
].hwCtr
.hwAltivecs
;
422 rupts
->hwInstBreakpoints
= per_proc_info
[cpu
].hwCtr
.hwInstBreakpoints
;
423 rupts
->hwSystemManagements
= per_proc_info
[cpu
].hwCtr
.hwSystemManagements
;
424 rupts
->hwAltivecAssists
= per_proc_info
[cpu
].hwCtr
.hwAltivecAssists
;
425 rupts
->hwThermal
= per_proc_info
[cpu
].hwCtr
.hwThermal
;
426 rupts
->hwSoftPatches
= per_proc_info
[cpu
].hwCtr
.hwSoftPatches
;
427 rupts
->hwMaintenances
= per_proc_info
[cpu
].hwCtr
.hwMaintenances
;
428 rupts
->hwInstrumentations
= per_proc_info
[cpu
].hwCtr
.hwInstrumentations
;
430 ml_set_interrupts_enabled(oldlevel
);
438 kern_return_t
chudxnu_clear_cpu_rupt_counters(int cpu
)
440 if(cpu
<0 || cpu
>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument
444 bzero(&(per_proc_info
[cpu
].hwCtr
), sizeof(struct hwCtrs
));
449 kern_return_t
chudxnu_passup_alignment_exceptions(boolean_t enable
)
452 dgWork
.dgFlags
|= enaNotifyEM
;
454 dgWork
.dgFlags
&= ~enaNotifyEM
;