]>
Commit | Line | Data |
---|---|---|
55e303ae A |
1 | /* |
2 | * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. | |
7 | * | |
8 | * This file contains Original Code and/or Modifications of Original Code | |
9 | * as defined in and that are subject to the Apple Public Source License | |
10 | * Version 2.0 (the 'License'). You may not use this file except in | |
11 | * compliance with the License. Please obtain a copy of the License at | |
12 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
13 | * file. | |
14 | * | |
15 | * The Original Code and all software distributed under the License are | |
16 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
17 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
18 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
19 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
20 | * Please see the License for the specific language governing rights and | |
21 | * limitations under the License. | |
22 | * | |
23 | * @APPLE_LICENSE_HEADER_END@ | |
24 | */ | |
25 | ||
26 | #include <ppc/chud/chud_spr.h> | |
27 | #include <ppc/chud/chud_xnu.h> | |
28 | #include <ppc/chud/chud_cpu_asm.h> | |
29 | #include <kern/processor.h> | |
30 | #include <ppc/machine_routines.h> | |
31 | #include <ppc/exception.h> | |
32 | #include <ppc/proc_reg.h> | |
33 | #include <ppc/Diagnostics.h> | |
34 | ||
35 | __private_extern__ | |
36 | int chudxnu_avail_cpu_count(void) | |
37 | { | |
38 | host_basic_info_data_t hinfo; | |
39 | kern_return_t kr; | |
40 | mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT; | |
41 | ||
42 | kr = host_info(host_self(), HOST_BASIC_INFO, (integer_t *)&hinfo, &count); | |
43 | if(kr == KERN_SUCCESS) { | |
44 | return hinfo.avail_cpus; | |
45 | } else { | |
46 | return 0; | |
47 | } | |
48 | } | |
49 | ||
50 | __private_extern__ | |
51 | int chudxnu_phys_cpu_count(void) | |
52 | { | |
53 | host_basic_info_data_t hinfo; | |
54 | kern_return_t kr; | |
55 | mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT; | |
56 | ||
57 | kr = host_info(host_self(), HOST_BASIC_INFO, (integer_t *)&hinfo, &count); | |
58 | if(kr == KERN_SUCCESS) { | |
59 | return hinfo.max_cpus; | |
60 | } else { | |
61 | return 0; | |
62 | } | |
63 | } | |
64 | ||
65 | __private_extern__ | |
66 | int chudxnu_cpu_number(void) | |
67 | { | |
68 | return cpu_number(); | |
69 | } | |
70 | ||
71 | __private_extern__ | |
72 | kern_return_t chudxnu_enable_cpu(int cpu, boolean_t enable) | |
73 | { | |
74 | chudxnu_unbind_current_thread(); | |
75 | ||
76 | if(cpu<0 || cpu>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument | |
77 | return KERN_FAILURE; | |
78 | } | |
79 | ||
80 | if(processor_ptr[cpu]!=PROCESSOR_NULL && processor_ptr[cpu]!=master_processor) { | |
81 | if(enable) { | |
82 | return processor_start(processor_ptr[cpu]); | |
83 | } else { | |
84 | return processor_exit(processor_ptr[cpu]); | |
85 | } | |
86 | } | |
87 | return KERN_FAILURE; | |
88 | } | |
89 | ||
90 | __private_extern__ | |
91 | kern_return_t chudxnu_enable_cpu_nap(int cpu, boolean_t enable) | |
92 | { | |
93 | if(cpu<0 || cpu>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument | |
94 | return KERN_FAILURE; | |
95 | } | |
96 | ||
97 | if(processor_ptr[cpu]!=PROCESSOR_NULL) { | |
98 | ml_enable_nap(cpu, enable); | |
99 | return KERN_SUCCESS; | |
100 | } | |
101 | ||
102 | return KERN_FAILURE; | |
103 | } | |
104 | ||
105 | __private_extern__ | |
106 | boolean_t chudxnu_cpu_nap_enabled(int cpu) | |
107 | { | |
108 | boolean_t prev; | |
109 | ||
110 | if(cpu<0 || cpu>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument | |
111 | cpu = 0; | |
112 | } | |
113 | ||
114 | prev = ml_enable_nap(cpu, TRUE); | |
115 | ml_enable_nap(cpu, prev); | |
116 | ||
117 | return prev; | |
118 | } | |
119 | ||
120 | __private_extern__ | |
121 | kern_return_t chudxnu_set_shadowed_spr(int cpu, int spr, uint32_t val) | |
122 | { | |
123 | cpu_subtype_t cpu_subtype; | |
124 | uint32_t available; | |
125 | kern_return_t retval = KERN_FAILURE; | |
126 | ||
127 | if(cpu<0 || cpu>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument | |
128 | return KERN_FAILURE; | |
129 | } | |
130 | ||
131 | chudxnu_bind_current_thread(cpu); | |
132 | ||
133 | available = per_proc_info[cpu].pf.Available; | |
134 | cpu_subtype = machine_slot[cpu].cpu_subtype; | |
135 | ||
136 | if(spr==chud_750_l2cr) { | |
137 | switch(cpu_subtype) { | |
138 | case CPU_SUBTYPE_POWERPC_750: | |
139 | case CPU_SUBTYPE_POWERPC_7400: | |
140 | case CPU_SUBTYPE_POWERPC_7450: | |
141 | if(available & pfL2) { | |
142 | // int enable = (val & 0x80000000) ? TRUE : FALSE; | |
143 | // if(enable) { | |
144 | // per_proc_info[cpu].pf.l2cr = val; | |
145 | // } else { | |
146 | // per_proc_info[cpu].pf.l2cr = 0; | |
147 | // } | |
148 | per_proc_info[cpu].pf.l2cr = val; | |
149 | cacheInit(); | |
150 | // mtspr(l2cr, per_proc_info[cpu].pf.l2cr); // XXXXXXX why is this necessary? XXXXXXX | |
151 | retval = KERN_SUCCESS; | |
152 | } else { | |
153 | retval = KERN_FAILURE; | |
154 | } | |
155 | break; | |
156 | default: | |
157 | retval = KERN_INVALID_ARGUMENT; | |
158 | break; | |
159 | } | |
160 | } | |
161 | else if(spr==chud_7450_l3cr) { | |
162 | switch(cpu_subtype) { | |
163 | case CPU_SUBTYPE_POWERPC_7450: | |
164 | if(available & pfL3) { | |
165 | int enable = (val & 0x80000000) ? TRUE : FALSE; | |
166 | if(enable) { | |
167 | per_proc_info[cpu].pf.l3cr = val; | |
168 | } else { | |
169 | per_proc_info[cpu].pf.l3cr = 0; | |
170 | } | |
171 | cacheInit(); | |
172 | retval = KERN_SUCCESS; | |
173 | } else { | |
174 | retval = KERN_FAILURE; | |
175 | } | |
176 | break; | |
177 | default: | |
178 | retval = KERN_INVALID_ARGUMENT; | |
179 | break; | |
180 | } | |
181 | } | |
182 | else if(spr==chud_750_hid0) { | |
183 | switch(cpu_subtype) { | |
184 | case CPU_SUBTYPE_POWERPC_750: | |
185 | cacheInit(); | |
186 | cacheDisable(); /* disable caches */ | |
187 | __asm__ volatile ("mtspr %0, %1" : : "n" (chud_750_hid0), "r" (val)); | |
188 | per_proc_info[cpu].pf.pfHID0 = val; | |
189 | cacheInit(); /* reenable caches */ | |
190 | retval = KERN_SUCCESS; | |
191 | break; | |
192 | case CPU_SUBTYPE_POWERPC_7400: | |
193 | case CPU_SUBTYPE_POWERPC_7450: | |
194 | __asm__ volatile ("mtspr %0, %1" : : "n" (chud_750_hid0), "r" (val)); | |
195 | per_proc_info[cpu].pf.pfHID0 = val; | |
196 | retval = KERN_SUCCESS; | |
197 | break; | |
198 | default: | |
199 | retval = KERN_INVALID_ARGUMENT; | |
200 | break; | |
201 | } | |
202 | } | |
203 | else if(spr==chud_750_hid1) { | |
204 | switch(cpu_subtype) { | |
205 | case CPU_SUBTYPE_POWERPC_750: | |
206 | case CPU_SUBTYPE_POWERPC_7400: | |
207 | case CPU_SUBTYPE_POWERPC_7450: | |
208 | __asm__ volatile ("mtspr %0, %1" : : "n" (chud_750_hid1), "r" (val)); | |
209 | per_proc_info[cpu].pf.pfHID1 = val; | |
210 | retval = KERN_SUCCESS; | |
211 | break; | |
212 | default: | |
213 | retval = KERN_INVALID_ARGUMENT; | |
214 | break; | |
215 | } | |
216 | } | |
217 | else if(spr==chud_750fx_hid2 && cpu_subtype==CPU_SUBTYPE_POWERPC_750) { | |
218 | __asm__ volatile ("mtspr %0, %1" : : "n" (chud_750fx_hid2), "r" (val)); | |
219 | per_proc_info[cpu].pf.pfHID2 = val; | |
220 | retval = KERN_SUCCESS; | |
221 | } | |
222 | else if(spr==chud_7400_msscr0 && (cpu_subtype==CPU_SUBTYPE_POWERPC_7400 || cpu_subtype==CPU_SUBTYPE_POWERPC_7450)) { | |
223 | __asm__ volatile ("mtspr %0, %1" : : "n" (chud_7400_msscr0), "r" (val)); | |
224 | per_proc_info[cpu].pf.pfMSSCR0 = val; | |
225 | retval = KERN_SUCCESS; | |
226 | } | |
227 | else if(spr==chud_7400_msscr1 && cpu_subtype==CPU_SUBTYPE_POWERPC_7400 || cpu_subtype==CPU_SUBTYPE_POWERPC_7450) { // called msssr0 on 7450 | |
228 | __asm__ volatile ("mtspr %0, %1" : : "n" (chud_7400_msscr1), "r" (val)); | |
229 | per_proc_info[cpu].pf.pfMSSCR1 = val; | |
230 | retval = KERN_SUCCESS; | |
231 | } | |
232 | else if(spr==chud_7450_ldstcr && cpu_subtype==CPU_SUBTYPE_POWERPC_7450) { | |
233 | __asm__ volatile ("mtspr %0, %1" : : "n" (chud_7450_ldstcr), "r" (val)); | |
234 | per_proc_info[cpu].pf.pfLDSTCR = val; | |
235 | retval = KERN_SUCCESS; | |
236 | } | |
237 | else if(spr==chud_7450_ictrl && cpu_subtype==CPU_SUBTYPE_POWERPC_7450) { | |
238 | __asm__ volatile ("mtspr %0, %1" : : "n" (chud_7450_ictrl), "r" (val)); | |
239 | per_proc_info[cpu].pf.pfICTRL = val; | |
240 | retval = KERN_SUCCESS; | |
241 | } else { | |
242 | retval = KERN_INVALID_ARGUMENT; | |
243 | } | |
244 | ||
245 | chudxnu_unbind_current_thread(); | |
246 | return retval; | |
247 | } | |
248 | ||
249 | __private_extern__ | |
250 | kern_return_t chudxnu_set_shadowed_spr64(int cpu, int spr, uint64_t val) | |
251 | { | |
252 | cpu_subtype_t cpu_subtype; | |
253 | kern_return_t retval = KERN_FAILURE; | |
254 | ||
255 | if(cpu<0 || cpu>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument | |
256 | return KERN_FAILURE; | |
257 | } | |
258 | ||
259 | chudxnu_bind_current_thread(cpu); | |
260 | ||
261 | cpu_subtype = machine_slot[cpu].cpu_subtype; | |
262 | ||
263 | if(spr==chud_970_hid0) { | |
264 | switch(cpu_subtype) { | |
265 | case CPU_SUBTYPE_POWERPC_970: | |
266 | chudxnu_mthid0_64(&val); | |
267 | per_proc_info[cpu].pf.pfHID0 = val; | |
268 | retval = KERN_SUCCESS; | |
269 | break; | |
270 | default: | |
271 | retval = KERN_INVALID_ARGUMENT; | |
272 | break; | |
273 | } | |
274 | } | |
275 | else if(spr==chud_970_hid1) { | |
276 | switch(cpu_subtype) { | |
277 | case CPU_SUBTYPE_POWERPC_970: | |
278 | chudxnu_mthid1_64(&val); | |
279 | per_proc_info[cpu].pf.pfHID1 = val; | |
280 | retval = KERN_SUCCESS; | |
281 | break; | |
282 | default: | |
283 | retval = KERN_INVALID_ARGUMENT; | |
284 | break; | |
285 | } | |
286 | } | |
287 | else if(spr==chud_970_hid4) { | |
288 | switch(cpu_subtype) { | |
289 | case CPU_SUBTYPE_POWERPC_970: | |
290 | chudxnu_mthid4_64(&val); | |
291 | per_proc_info[cpu].pf.pfHID4 = val; | |
292 | retval = KERN_SUCCESS; | |
293 | break; | |
294 | default: | |
295 | retval = KERN_INVALID_ARGUMENT; | |
296 | break; | |
297 | } | |
298 | } | |
299 | else if(spr==chud_970_hid5) { | |
300 | switch(cpu_subtype) { | |
301 | case CPU_SUBTYPE_POWERPC_970: | |
302 | chudxnu_mthid5_64(&val); | |
303 | per_proc_info[cpu].pf.pfHID5 = val; | |
304 | retval = KERN_SUCCESS; | |
305 | break; | |
306 | default: | |
307 | retval = KERN_INVALID_ARGUMENT; | |
308 | break; | |
309 | } | |
310 | } else { | |
311 | retval = KERN_INVALID_ARGUMENT; | |
312 | } | |
313 | ||
314 | chudxnu_unbind_current_thread(); | |
315 | ||
316 | return retval; | |
317 | } | |
318 | ||
319 | __private_extern__ | |
320 | uint32_t chudxnu_get_orig_cpu_l2cr(int cpu) | |
321 | { | |
322 | if(cpu<0 || cpu>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument | |
323 | cpu = 0; | |
324 | } | |
325 | return per_proc_info[cpu].pf.l2crOriginal; | |
326 | } | |
327 | ||
328 | __private_extern__ | |
329 | uint32_t chudxnu_get_orig_cpu_l3cr(int cpu) | |
330 | { | |
331 | if(cpu<0 || cpu>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument | |
332 | cpu = 0; | |
333 | } | |
334 | return per_proc_info[cpu].pf.l3crOriginal; | |
335 | } | |
336 | ||
337 | __private_extern__ | |
338 | void chudxnu_flush_caches(void) | |
339 | { | |
340 | cacheInit(); | |
341 | } | |
342 | ||
343 | __private_extern__ | |
344 | void chudxnu_enable_caches(boolean_t enable) | |
345 | { | |
346 | if(!enable) { | |
347 | cacheInit(); | |
348 | cacheDisable(); | |
349 | } else { | |
350 | cacheInit(); | |
351 | } | |
352 | } | |
353 | ||
354 | __private_extern__ | |
355 | kern_return_t chudxnu_perfmon_acquire_facility(task_t task) | |
356 | { | |
357 | return perfmon_acquire_facility(task); | |
358 | } | |
359 | ||
360 | __private_extern__ | |
361 | kern_return_t chudxnu_perfmon_release_facility(task_t task) | |
362 | { | |
363 | return perfmon_release_facility(task); | |
364 | } | |
365 | ||
366 | __private_extern__ | |
367 | uint32_t * chudxnu_get_branch_trace_buffer(uint32_t *entries) | |
368 | { | |
369 | extern int pc_trace_buf[1024]; | |
370 | if(entries) { | |
371 | *entries = sizeof(pc_trace_buf)/sizeof(int); | |
372 | } | |
373 | return pc_trace_buf; | |
374 | } | |
375 | ||
376 | __private_extern__ | |
377 | boolean_t chudxnu_get_interrupts_enabled(void) | |
378 | { | |
379 | return ml_get_interrupts_enabled(); | |
380 | } | |
381 | ||
382 | __private_extern__ | |
383 | boolean_t chudxnu_set_interrupts_enabled(boolean_t enable) | |
384 | { | |
385 | return ml_set_interrupts_enabled(enable); | |
386 | } | |
387 | ||
388 | __private_extern__ | |
389 | boolean_t chudxnu_at_interrupt_context(void) | |
390 | { | |
391 | return ml_at_interrupt_context(); | |
392 | } | |
393 | ||
394 | __private_extern__ | |
395 | void chudxnu_cause_interrupt(void) | |
396 | { | |
397 | ml_cause_interrupt(); | |
398 | } | |
399 | ||
400 | __private_extern__ | |
401 | kern_return_t chudxnu_get_cpu_rupt_counters(int cpu, rupt_counters_t *rupts) | |
402 | { | |
403 | if(cpu<0 || cpu>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument | |
404 | return KERN_FAILURE; | |
405 | } | |
406 | ||
407 | if(rupts) { | |
408 | boolean_t oldlevel = ml_set_interrupts_enabled(FALSE); | |
409 | ||
410 | rupts->hwResets = per_proc_info[cpu].hwCtr.hwResets; | |
411 | rupts->hwMachineChecks = per_proc_info[cpu].hwCtr.hwMachineChecks; | |
412 | rupts->hwDSIs = per_proc_info[cpu].hwCtr.hwDSIs; | |
413 | rupts->hwISIs = per_proc_info[cpu].hwCtr.hwISIs; | |
414 | rupts->hwExternals = per_proc_info[cpu].hwCtr.hwExternals; | |
415 | rupts->hwAlignments = per_proc_info[cpu].hwCtr.hwAlignments; | |
416 | rupts->hwPrograms = per_proc_info[cpu].hwCtr.hwPrograms; | |
417 | rupts->hwFloatPointUnavailable = per_proc_info[cpu].hwCtr.hwFloatPointUnavailable; | |
418 | rupts->hwDecrementers = per_proc_info[cpu].hwCtr.hwDecrementers; | |
419 | rupts->hwIOErrors = per_proc_info[cpu].hwCtr.hwIOErrors; | |
420 | rupts->hwSystemCalls = per_proc_info[cpu].hwCtr.hwSystemCalls; | |
421 | rupts->hwTraces = per_proc_info[cpu].hwCtr.hwTraces; | |
422 | rupts->hwFloatingPointAssists = per_proc_info[cpu].hwCtr.hwFloatingPointAssists; | |
423 | rupts->hwPerformanceMonitors = per_proc_info[cpu].hwCtr.hwPerformanceMonitors; | |
424 | rupts->hwAltivecs = per_proc_info[cpu].hwCtr.hwAltivecs; | |
425 | rupts->hwInstBreakpoints = per_proc_info[cpu].hwCtr.hwInstBreakpoints; | |
426 | rupts->hwSystemManagements = per_proc_info[cpu].hwCtr.hwSystemManagements; | |
427 | rupts->hwAltivecAssists = per_proc_info[cpu].hwCtr.hwAltivecAssists; | |
428 | rupts->hwThermal = per_proc_info[cpu].hwCtr.hwThermal; | |
429 | rupts->hwSoftPatches = per_proc_info[cpu].hwCtr.hwSoftPatches; | |
430 | rupts->hwMaintenances = per_proc_info[cpu].hwCtr.hwMaintenances; | |
431 | rupts->hwInstrumentations = per_proc_info[cpu].hwCtr.hwInstrumentations; | |
432 | ||
433 | ml_set_interrupts_enabled(oldlevel); | |
434 | return KERN_SUCCESS; | |
435 | } else { | |
436 | return KERN_FAILURE; | |
437 | } | |
438 | } | |
439 | ||
440 | __private_extern__ | |
441 | kern_return_t chudxnu_clear_cpu_rupt_counters(int cpu) | |
442 | { | |
443 | if(cpu<0 || cpu>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument | |
444 | return KERN_FAILURE; | |
445 | } | |
446 | ||
447 | bzero(&(per_proc_info[cpu].hwCtr), sizeof(struct hwCtrs)); | |
448 | return KERN_SUCCESS; | |
449 | } | |
450 | ||
451 | __private_extern__ | |
452 | kern_return_t chudxnu_passup_alignment_exceptions(boolean_t enable) | |
453 | { | |
454 | if(enable) { | |
455 | dgWork.dgFlags |= enaNotifyEM; | |
456 | } else { | |
457 | dgWork.dgFlags &= ~enaNotifyEM; | |
458 | } | |
459 | } |