]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/chud/chud_cpu.c
xnu-517.9.4.tar.gz
[apple/xnu.git] / osfmk / ppc / chud / chud_cpu.c
1 /*
2 * Copyright (c) 2003 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 #include <ppc/chud/chud_spr.h>
24 #include <ppc/chud/chud_xnu.h>
25 #include <ppc/chud/chud_cpu_asm.h>
26 #include <kern/processor.h>
27 #include <ppc/machine_routines.h>
28 #include <ppc/exception.h>
29 #include <ppc/proc_reg.h>
30 #include <ppc/Diagnostics.h>
31
32 __private_extern__
33 int chudxnu_avail_cpu_count(void)
34 {
35 host_basic_info_data_t hinfo;
36 kern_return_t kr;
37 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
38
39 kr = host_info(host_self(), HOST_BASIC_INFO, (integer_t *)&hinfo, &count);
40 if(kr == KERN_SUCCESS) {
41 return hinfo.avail_cpus;
42 } else {
43 return 0;
44 }
45 }
46
47 __private_extern__
48 int chudxnu_phys_cpu_count(void)
49 {
50 host_basic_info_data_t hinfo;
51 kern_return_t kr;
52 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
53
54 kr = host_info(host_self(), HOST_BASIC_INFO, (integer_t *)&hinfo, &count);
55 if(kr == KERN_SUCCESS) {
56 return hinfo.max_cpus;
57 } else {
58 return 0;
59 }
60 }
61
62 __private_extern__
63 int chudxnu_cpu_number(void)
64 {
65 return cpu_number();
66 }
67
68 __private_extern__
69 kern_return_t chudxnu_enable_cpu(int cpu, boolean_t enable)
70 {
71 chudxnu_unbind_current_thread();
72
73 if(cpu<0 || cpu>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument
74 return KERN_FAILURE;
75 }
76
77 if(processor_ptr[cpu]!=PROCESSOR_NULL && processor_ptr[cpu]!=master_processor) {
78 if(enable) {
79 return processor_start(processor_ptr[cpu]);
80 } else {
81 return processor_exit(processor_ptr[cpu]);
82 }
83 }
84 return KERN_FAILURE;
85 }
86
87 __private_extern__
88 kern_return_t chudxnu_enable_cpu_nap(int cpu, boolean_t enable)
89 {
90 if(cpu<0 || cpu>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument
91 return KERN_FAILURE;
92 }
93
94 if(processor_ptr[cpu]!=PROCESSOR_NULL) {
95 ml_enable_nap(cpu, enable);
96 return KERN_SUCCESS;
97 }
98
99 return KERN_FAILURE;
100 }
101
102 __private_extern__
103 boolean_t chudxnu_cpu_nap_enabled(int cpu)
104 {
105 boolean_t prev;
106
107 if(cpu<0 || cpu>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument
108 cpu = 0;
109 }
110
111 prev = ml_enable_nap(cpu, TRUE);
112 ml_enable_nap(cpu, prev);
113
114 return prev;
115 }
116
117 __private_extern__
118 kern_return_t chudxnu_set_shadowed_spr(int cpu, int spr, uint32_t val)
119 {
120 cpu_subtype_t cpu_subtype;
121 uint32_t available;
122 kern_return_t retval = KERN_FAILURE;
123
124 if(cpu<0 || cpu>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument
125 return KERN_FAILURE;
126 }
127
128 chudxnu_bind_current_thread(cpu);
129
130 available = per_proc_info[cpu].pf.Available;
131 cpu_subtype = machine_slot[cpu].cpu_subtype;
132
133 if(spr==chud_750_l2cr) {
134 switch(cpu_subtype) {
135 case CPU_SUBTYPE_POWERPC_750:
136 case CPU_SUBTYPE_POWERPC_7400:
137 case CPU_SUBTYPE_POWERPC_7450:
138 if(available & pfL2) {
139 // int enable = (val & 0x80000000) ? TRUE : FALSE;
140 // if(enable) {
141 // per_proc_info[cpu].pf.l2cr = val;
142 // } else {
143 // per_proc_info[cpu].pf.l2cr = 0;
144 // }
145 per_proc_info[cpu].pf.l2cr = val;
146 cacheInit();
147 // mtspr(l2cr, per_proc_info[cpu].pf.l2cr); // XXXXXXX why is this necessary? XXXXXXX
148 retval = KERN_SUCCESS;
149 } else {
150 retval = KERN_FAILURE;
151 }
152 break;
153 default:
154 retval = KERN_INVALID_ARGUMENT;
155 break;
156 }
157 }
158 else if(spr==chud_7450_l3cr) {
159 switch(cpu_subtype) {
160 case CPU_SUBTYPE_POWERPC_7450:
161 if(available & pfL3) {
162 int enable = (val & 0x80000000) ? TRUE : FALSE;
163 if(enable) {
164 per_proc_info[cpu].pf.l3cr = val;
165 } else {
166 per_proc_info[cpu].pf.l3cr = 0;
167 }
168 cacheInit();
169 retval = KERN_SUCCESS;
170 } else {
171 retval = KERN_FAILURE;
172 }
173 break;
174 default:
175 retval = KERN_INVALID_ARGUMENT;
176 break;
177 }
178 }
179 else if(spr==chud_750_hid0) {
180 switch(cpu_subtype) {
181 case CPU_SUBTYPE_POWERPC_750:
182 cacheInit();
183 cacheDisable(); /* disable caches */
184 __asm__ volatile ("mtspr %0, %1" : : "n" (chud_750_hid0), "r" (val));
185 per_proc_info[cpu].pf.pfHID0 = val;
186 cacheInit(); /* reenable caches */
187 retval = KERN_SUCCESS;
188 break;
189 case CPU_SUBTYPE_POWERPC_7400:
190 case CPU_SUBTYPE_POWERPC_7450:
191 __asm__ volatile ("mtspr %0, %1" : : "n" (chud_750_hid0), "r" (val));
192 per_proc_info[cpu].pf.pfHID0 = val;
193 retval = KERN_SUCCESS;
194 break;
195 default:
196 retval = KERN_INVALID_ARGUMENT;
197 break;
198 }
199 }
200 else if(spr==chud_750_hid1) {
201 switch(cpu_subtype) {
202 case CPU_SUBTYPE_POWERPC_750:
203 case CPU_SUBTYPE_POWERPC_7400:
204 case CPU_SUBTYPE_POWERPC_7450:
205 __asm__ volatile ("mtspr %0, %1" : : "n" (chud_750_hid1), "r" (val));
206 per_proc_info[cpu].pf.pfHID1 = val;
207 retval = KERN_SUCCESS;
208 break;
209 default:
210 retval = KERN_INVALID_ARGUMENT;
211 break;
212 }
213 }
214 else if(spr==chud_750fx_hid2 && cpu_subtype==CPU_SUBTYPE_POWERPC_750) {
215 __asm__ volatile ("mtspr %0, %1" : : "n" (chud_750fx_hid2), "r" (val));
216 per_proc_info[cpu].pf.pfHID2 = val;
217 retval = KERN_SUCCESS;
218 }
219 else if(spr==chud_7400_msscr0 && (cpu_subtype==CPU_SUBTYPE_POWERPC_7400 || cpu_subtype==CPU_SUBTYPE_POWERPC_7450)) {
220 __asm__ volatile ("mtspr %0, %1" : : "n" (chud_7400_msscr0), "r" (val));
221 per_proc_info[cpu].pf.pfMSSCR0 = val;
222 retval = KERN_SUCCESS;
223 }
224 else if(spr==chud_7400_msscr1 && cpu_subtype==CPU_SUBTYPE_POWERPC_7400 || cpu_subtype==CPU_SUBTYPE_POWERPC_7450) { // called msssr0 on 7450
225 __asm__ volatile ("mtspr %0, %1" : : "n" (chud_7400_msscr1), "r" (val));
226 per_proc_info[cpu].pf.pfMSSCR1 = val;
227 retval = KERN_SUCCESS;
228 }
229 else if(spr==chud_7450_ldstcr && cpu_subtype==CPU_SUBTYPE_POWERPC_7450) {
230 __asm__ volatile ("mtspr %0, %1" : : "n" (chud_7450_ldstcr), "r" (val));
231 per_proc_info[cpu].pf.pfLDSTCR = val;
232 retval = KERN_SUCCESS;
233 }
234 else if(spr==chud_7450_ictrl && cpu_subtype==CPU_SUBTYPE_POWERPC_7450) {
235 __asm__ volatile ("mtspr %0, %1" : : "n" (chud_7450_ictrl), "r" (val));
236 per_proc_info[cpu].pf.pfICTRL = val;
237 retval = KERN_SUCCESS;
238 } else {
239 retval = KERN_INVALID_ARGUMENT;
240 }
241
242 chudxnu_unbind_current_thread();
243 return retval;
244 }
245
246 __private_extern__
247 kern_return_t chudxnu_set_shadowed_spr64(int cpu, int spr, uint64_t val)
248 {
249 cpu_subtype_t cpu_subtype;
250 kern_return_t retval = KERN_FAILURE;
251
252 if(cpu<0 || cpu>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument
253 return KERN_FAILURE;
254 }
255
256 chudxnu_bind_current_thread(cpu);
257
258 cpu_subtype = machine_slot[cpu].cpu_subtype;
259
260 if(spr==chud_970_hid0) {
261 switch(cpu_subtype) {
262 case CPU_SUBTYPE_POWERPC_970:
263 chudxnu_mthid0_64(&val);
264 per_proc_info[cpu].pf.pfHID0 = val;
265 retval = KERN_SUCCESS;
266 break;
267 default:
268 retval = KERN_INVALID_ARGUMENT;
269 break;
270 }
271 }
272 else if(spr==chud_970_hid1) {
273 switch(cpu_subtype) {
274 case CPU_SUBTYPE_POWERPC_970:
275 chudxnu_mthid1_64(&val);
276 per_proc_info[cpu].pf.pfHID1 = val;
277 retval = KERN_SUCCESS;
278 break;
279 default:
280 retval = KERN_INVALID_ARGUMENT;
281 break;
282 }
283 }
284 else if(spr==chud_970_hid4) {
285 switch(cpu_subtype) {
286 case CPU_SUBTYPE_POWERPC_970:
287 chudxnu_mthid4_64(&val);
288 per_proc_info[cpu].pf.pfHID4 = val;
289 retval = KERN_SUCCESS;
290 break;
291 default:
292 retval = KERN_INVALID_ARGUMENT;
293 break;
294 }
295 }
296 else if(spr==chud_970_hid5) {
297 switch(cpu_subtype) {
298 case CPU_SUBTYPE_POWERPC_970:
299 chudxnu_mthid5_64(&val);
300 per_proc_info[cpu].pf.pfHID5 = val;
301 retval = KERN_SUCCESS;
302 break;
303 default:
304 retval = KERN_INVALID_ARGUMENT;
305 break;
306 }
307 } else {
308 retval = KERN_INVALID_ARGUMENT;
309 }
310
311 chudxnu_unbind_current_thread();
312
313 return retval;
314 }
315
316 __private_extern__
317 uint32_t chudxnu_get_orig_cpu_l2cr(int cpu)
318 {
319 if(cpu<0 || cpu>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument
320 cpu = 0;
321 }
322 return per_proc_info[cpu].pf.l2crOriginal;
323 }
324
325 __private_extern__
326 uint32_t chudxnu_get_orig_cpu_l3cr(int cpu)
327 {
328 if(cpu<0 || cpu>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument
329 cpu = 0;
330 }
331 return per_proc_info[cpu].pf.l3crOriginal;
332 }
333
334 __private_extern__
335 void chudxnu_flush_caches(void)
336 {
337 cacheInit();
338 }
339
340 __private_extern__
341 void chudxnu_enable_caches(boolean_t enable)
342 {
343 if(!enable) {
344 cacheInit();
345 cacheDisable();
346 } else {
347 cacheInit();
348 }
349 }
350
351 __private_extern__
352 kern_return_t chudxnu_perfmon_acquire_facility(task_t task)
353 {
354 return perfmon_acquire_facility(task);
355 }
356
357 __private_extern__
358 kern_return_t chudxnu_perfmon_release_facility(task_t task)
359 {
360 return perfmon_release_facility(task);
361 }
362
363 __private_extern__
364 uint32_t * chudxnu_get_branch_trace_buffer(uint32_t *entries)
365 {
366 extern int pc_trace_buf[1024];
367 if(entries) {
368 *entries = sizeof(pc_trace_buf)/sizeof(int);
369 }
370 return pc_trace_buf;
371 }
372
373 __private_extern__
374 boolean_t chudxnu_get_interrupts_enabled(void)
375 {
376 return ml_get_interrupts_enabled();
377 }
378
379 __private_extern__
380 boolean_t chudxnu_set_interrupts_enabled(boolean_t enable)
381 {
382 return ml_set_interrupts_enabled(enable);
383 }
384
385 __private_extern__
386 boolean_t chudxnu_at_interrupt_context(void)
387 {
388 return ml_at_interrupt_context();
389 }
390
391 __private_extern__
392 void chudxnu_cause_interrupt(void)
393 {
394 ml_cause_interrupt();
395 }
396
397 __private_extern__
398 kern_return_t chudxnu_get_cpu_rupt_counters(int cpu, rupt_counters_t *rupts)
399 {
400 if(cpu<0 || cpu>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument
401 return KERN_FAILURE;
402 }
403
404 if(rupts) {
405 boolean_t oldlevel = ml_set_interrupts_enabled(FALSE);
406
407 rupts->hwResets = per_proc_info[cpu].hwCtr.hwResets;
408 rupts->hwMachineChecks = per_proc_info[cpu].hwCtr.hwMachineChecks;
409 rupts->hwDSIs = per_proc_info[cpu].hwCtr.hwDSIs;
410 rupts->hwISIs = per_proc_info[cpu].hwCtr.hwISIs;
411 rupts->hwExternals = per_proc_info[cpu].hwCtr.hwExternals;
412 rupts->hwAlignments = per_proc_info[cpu].hwCtr.hwAlignments;
413 rupts->hwPrograms = per_proc_info[cpu].hwCtr.hwPrograms;
414 rupts->hwFloatPointUnavailable = per_proc_info[cpu].hwCtr.hwFloatPointUnavailable;
415 rupts->hwDecrementers = per_proc_info[cpu].hwCtr.hwDecrementers;
416 rupts->hwIOErrors = per_proc_info[cpu].hwCtr.hwIOErrors;
417 rupts->hwSystemCalls = per_proc_info[cpu].hwCtr.hwSystemCalls;
418 rupts->hwTraces = per_proc_info[cpu].hwCtr.hwTraces;
419 rupts->hwFloatingPointAssists = per_proc_info[cpu].hwCtr.hwFloatingPointAssists;
420 rupts->hwPerformanceMonitors = per_proc_info[cpu].hwCtr.hwPerformanceMonitors;
421 rupts->hwAltivecs = per_proc_info[cpu].hwCtr.hwAltivecs;
422 rupts->hwInstBreakpoints = per_proc_info[cpu].hwCtr.hwInstBreakpoints;
423 rupts->hwSystemManagements = per_proc_info[cpu].hwCtr.hwSystemManagements;
424 rupts->hwAltivecAssists = per_proc_info[cpu].hwCtr.hwAltivecAssists;
425 rupts->hwThermal = per_proc_info[cpu].hwCtr.hwThermal;
426 rupts->hwSoftPatches = per_proc_info[cpu].hwCtr.hwSoftPatches;
427 rupts->hwMaintenances = per_proc_info[cpu].hwCtr.hwMaintenances;
428 rupts->hwInstrumentations = per_proc_info[cpu].hwCtr.hwInstrumentations;
429
430 ml_set_interrupts_enabled(oldlevel);
431 return KERN_SUCCESS;
432 } else {
433 return KERN_FAILURE;
434 }
435 }
436
437 __private_extern__
438 kern_return_t chudxnu_clear_cpu_rupt_counters(int cpu)
439 {
440 if(cpu<0 || cpu>=chudxnu_phys_cpu_count()) { // check sanity of cpu argument
441 return KERN_FAILURE;
442 }
443
444 bzero(&(per_proc_info[cpu].hwCtr), sizeof(struct hwCtrs));
445 return KERN_SUCCESS;
446 }
447
448 __private_extern__
449 kern_return_t chudxnu_passup_alignment_exceptions(boolean_t enable)
450 {
451 if(enable) {
452 dgWork.dgFlags |= enaNotifyEM;
453 } else {
454 dgWork.dgFlags &= ~enaNotifyEM;
455 }
456 }