]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/machine_routines.c
7ad3389224454cc08f5fd84a31f8b2106198603c
[apple/xnu.git] / osfmk / ppc / machine_routines.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 #include <ppc/machine_routines.h>
23 #include <ppc/machine_cpu.h>
24 #include <ppc/exception.h>
25 #include <ppc/misc_protos.h>
26 #include <ppc/Firmware.h>
27 #include <vm/vm_page.h>
28 #include <ppc/pmap.h>
29 #include <ppc/proc_reg.h>
30 #include <kern/processor.h>
31
32 boolean_t get_interrupts_enabled(void);
33
34 /* Map memory map IO space */
35 vm_offset_t
36 ml_io_map(
37 vm_offset_t phys_addr,
38 vm_size_t size)
39 {
40 return(io_map(phys_addr,size));
41 }
42
43 /* static memory allocation */
44 vm_offset_t
45 ml_static_malloc(
46 vm_size_t size)
47 {
48 extern vm_offset_t static_memory_end;
49 extern boolean_t pmap_initialized;
50 vm_offset_t vaddr;
51
52 if (pmap_initialized)
53 return((vm_offset_t)NULL);
54 else {
55 vaddr = static_memory_end;
56 static_memory_end = round_page(vaddr+size);
57 return(vaddr);
58 }
59 }
60
61 vm_offset_t
62 ml_static_ptovirt(
63 vm_offset_t paddr)
64 {
65 extern vm_offset_t static_memory_end;
66 vm_offset_t vaddr;
67
68 /* Static memory is map V=R */
69 vaddr = paddr;
70 if ( (vaddr < static_memory_end) && (pmap_extract(kernel_pmap, vaddr)==paddr) )
71 return(vaddr);
72 else
73 return((vm_offset_t)NULL);
74 }
75
76 void
77 ml_static_mfree(
78 vm_offset_t vaddr,
79 vm_size_t size)
80 {
81 vm_offset_t paddr_cur, vaddr_cur;
82
83 for (vaddr_cur = round_page(vaddr);
84 vaddr_cur < trunc_page(vaddr+size);
85 vaddr_cur += PAGE_SIZE) {
86 paddr_cur = pmap_extract(kernel_pmap, vaddr_cur);
87 if (paddr_cur != (vm_offset_t)NULL) {
88 vm_page_wire_count--;
89 pmap_remove(kernel_pmap, vaddr_cur, vaddr_cur+PAGE_SIZE);
90 vm_page_create(paddr_cur,paddr_cur+PAGE_SIZE);
91 }
92 }
93 }
94
95 /* virtual to physical on wired pages */
96 vm_offset_t ml_vtophys(
97 vm_offset_t vaddr)
98 {
99 return(pmap_extract(kernel_pmap, vaddr));
100 }
101
102 /* Initialize Interrupt Handler */
103 void ml_install_interrupt_handler(
104 void *nub,
105 int source,
106 void *target,
107 IOInterruptHandler handler,
108 void *refCon)
109 {
110 int current_cpu;
111 boolean_t current_state;
112
113 current_cpu = cpu_number();
114 current_state = ml_get_interrupts_enabled();
115
116 per_proc_info[current_cpu].interrupt_nub = nub;
117 per_proc_info[current_cpu].interrupt_source = source;
118 per_proc_info[current_cpu].interrupt_target = target;
119 per_proc_info[current_cpu].interrupt_handler = handler;
120 per_proc_info[current_cpu].interrupt_refCon = refCon;
121
122 per_proc_info[current_cpu].interrupts_enabled = TRUE;
123 (void) ml_set_interrupts_enabled(current_state);
124 }
125
126 /* Initialize Interrupts */
127 void ml_init_interrupt(void)
128 {
129 int current_cpu;
130 boolean_t current_state;
131
132 current_state = ml_get_interrupts_enabled();
133
134 current_cpu = cpu_number();
135 per_proc_info[current_cpu].interrupts_enabled = TRUE;
136 (void) ml_set_interrupts_enabled(current_state);
137 }
138
139 boolean_t fake_get_interrupts_enabled(void)
140 {
141 /*
142 * The scheduler is not active on this cpu. There is no need to disable
143 * preemption. The current thread wont be dispatched on anhother cpu.
144 */
145 return((per_proc_info[cpu_number()].cpu_flags & turnEEon) != 0);
146 }
147
148 boolean_t fake_set_interrupts_enabled(boolean_t enable)
149 {
150 boolean_t interrupt_state_prev;
151
152 /*
153 * The scheduler is not active on this cpu. There is no need to disable
154 * preemption. The current thread wont be dispatched on anhother cpu.
155 */
156 interrupt_state_prev =
157 (per_proc_info[cpu_number()].cpu_flags & turnEEon) != 0;
158 if (interrupt_state_prev != enable)
159 per_proc_info[cpu_number()].cpu_flags ^= turnEEon;
160 return(interrupt_state_prev);
161 }
162
163 /* Get Interrupts Enabled */
164 boolean_t ml_get_interrupts_enabled(void)
165 {
166 if (per_proc_info[cpu_number()].interrupts_enabled == TRUE)
167 return(get_interrupts_enabled());
168 else
169 return(fake_get_interrupts_enabled());
170 }
171
172 boolean_t get_interrupts_enabled(void)
173 {
174 return((mfmsr() & MASK(MSR_EE)) != 0);
175 }
176
177 /* Check if running at interrupt context */
178 boolean_t ml_at_interrupt_context(void)
179 {
180 boolean_t ret;
181 boolean_t current_state;
182
183 current_state = ml_set_interrupts_enabled(FALSE);
184 ret = (per_proc_info[cpu_number()].istackptr == 0);
185 ml_set_interrupts_enabled(current_state);
186 return(ret);
187 }
188
189 /* Generate a fake interrupt */
190 void ml_cause_interrupt(void)
191 {
192 CreateFakeIO();
193 }
194
195 void ml_thread_policy(
196 thread_t thread,
197 unsigned policy_id,
198 unsigned policy_info)
199 {
200 if ((policy_id == MACHINE_GROUP) &&
201 ((per_proc_info[0].pf.Available) & pfSMPcap))
202 thread_bind(thread, master_processor);
203 }
204
205 void machine_idle(void)
206 {
207 if (per_proc_info[cpu_number()].interrupts_enabled == TRUE) {
208 int cur_decr;
209
210 machine_idle_ppc();
211
212 /*
213 * protect against a lost decrementer trap
214 * if the current decrementer value is negative
215 * by more than 10 ticks, re-arm it since it's
216 * unlikely to fire at this point... a hardware
217 * interrupt got us out of machine_idle and may
218 * also be contributing to this state
219 */
220 cur_decr = isync_mfdec();
221
222 if (cur_decr < -10) {
223 mtdec(1);
224 }
225 }
226 }
227
228 void
229 machine_signal_idle(
230 processor_t processor)
231 {
232 (void)cpu_signal(processor->slot_num, SIGPwake, 0, 0);
233 }
234
235 kern_return_t
236 ml_processor_register(
237 ml_processor_info_t *processor_info,
238 processor_t *processor,
239 ipi_handler_t *ipi_handler)
240 {
241 kern_return_t ret;
242 int target_cpu;
243
244 if (processor_info->boot_cpu == FALSE) {
245 if (cpu_register(&target_cpu) != KERN_SUCCESS)
246 return KERN_FAILURE;
247 } else {
248 /* boot_cpu is always 0 */
249 target_cpu= 0;
250 }
251
252 per_proc_info[target_cpu].cpu_id = processor_info->cpu_id;
253 per_proc_info[target_cpu].start_paddr = processor_info->start_paddr;
254
255 if(per_proc_info[target_cpu].pf.Available & pfCanNap)
256 if(processor_info->supports_nap)
257 per_proc_info[target_cpu].pf.Available |= pfWillNap;
258
259 if(processor_info->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
260 per_proc_info[target_cpu].time_base_enable = processor_info->time_base_enable;
261 else
262 per_proc_info[target_cpu].time_base_enable = (void(*)(cpu_id_t, boolean_t ))NULL;
263
264 if(target_cpu == cpu_number())
265 __asm__ volatile("mtsprg 2,%0" : : "r" (per_proc_info[target_cpu].pf.Available)); /* Set live value */
266
267 *processor = cpu_to_processor(target_cpu);
268 *ipi_handler = cpu_signal_handler;
269
270 return KERN_SUCCESS;
271 }
272
273 boolean_t
274 ml_enable_nap(int target_cpu, boolean_t nap_enabled)
275 {
276 boolean_t prev_value = (per_proc_info[target_cpu].pf.Available & pfCanNap) && (per_proc_info[target_cpu].pf.Available & pfWillNap);
277
278 if(per_proc_info[target_cpu].pf.Available & pfCanNap) { /* Can the processor nap? */
279 if (nap_enabled) per_proc_info[target_cpu].pf.Available |= pfWillNap; /* Is nap supported on this machine? */
280 else per_proc_info[target_cpu].pf.Available &= ~pfWillNap; /* Clear if not */
281 }
282
283 if(target_cpu == cpu_number())
284 __asm__ volatile("mtsprg 2,%0" : : "r" (per_proc_info[target_cpu].pf.Available)); /* Set live value */
285
286 return (prev_value);
287 }
288
289 void
290 ml_ppc_get_info(ml_ppc_cpu_info_t *cpu_info)
291 {
292 if (cpu_info == 0) return;
293
294 cpu_info->vector_unit = (per_proc_info[0].pf.Available & pfAltivec) != 0;
295 cpu_info->cache_line_size = per_proc_info[0].pf.lineSize;
296 cpu_info->l1_icache_size = per_proc_info[0].pf.l1iSize;
297 cpu_info->l1_dcache_size = per_proc_info[0].pf.l1dSize;
298
299 if (per_proc_info[0].pf.Available & pfL2) {
300 cpu_info->l2_settings = per_proc_info[0].pf.l2cr;
301 cpu_info->l2_cache_size = per_proc_info[0].pf.l2Size;
302 } else {
303 cpu_info->l2_settings = 0;
304 cpu_info->l2_cache_size = 0xFFFFFFFF;
305 }
306 if (per_proc_info[0].pf.Available & pfL3) {
307 cpu_info->l3_settings = per_proc_info[0].pf.l3cr;
308 cpu_info->l3_cache_size = per_proc_info[0].pf.l3Size;
309 } else {
310 cpu_info->l3_settings = 0;
311 cpu_info->l3_cache_size = 0xFFFFFFFF;
312 }
313 }
314
315 #define l2em 0x80000000
316 #define l3em 0x80000000
317
318 extern int real_ncpus;
319
320 int
321 ml_enable_cache_level(int cache_level, int enable)
322 {
323 int old_mode;
324 unsigned long available, ccr;
325
326 if (real_ncpus != 1) return -1;
327
328 available = per_proc_info[0].pf.Available;
329
330 if ((cache_level == 2) && (available & pfL2)) {
331 ccr = per_proc_info[0].pf.l2cr;
332 old_mode = (ccr & l2em) ? TRUE : FALSE;
333 if (old_mode != enable) {
334 if (enable) ccr = per_proc_info[0].pf.l2crOriginal;
335 else ccr = 0;
336 per_proc_info[0].pf.l2cr = ccr;
337 cacheInit();
338 }
339
340 return old_mode;
341 }
342
343 if ((cache_level == 3) && (available & pfL3)) {
344 ccr = per_proc_info[0].pf.l3cr;
345 old_mode = (ccr & l3em) ? TRUE : FALSE;
346 if (old_mode != enable) {
347 if (enable) ccr = per_proc_info[0].pf.l3crOriginal;
348 else ccr = 0;
349 per_proc_info[0].pf.l3cr = ccr;
350 cacheInit();
351 }
352
353 return old_mode;
354 }
355
356 return -1;
357 }
358
359 void
360 init_ast_check(processor_t processor)
361 {}
362
363 void
364 cause_ast_check(processor_t processor)
365 {
366 if ((processor != current_processor())
367 && (per_proc_info[processor->slot_num].interrupts_enabled == TRUE))
368 cpu_signal(processor->slot_num, SIGPast, NULL, NULL);
369 }
370
371 thread_t
372 switch_to_shutdown_context(
373 thread_t thread,
374 void (*doshutdown)(processor_t),
375 processor_t processor)
376 {
377 disable_preemption();
378 CreateShutdownCTX();
379 enable_preemption();
380 return((thread_t)(per_proc_info[cpu_number()].old_thread));
381 }
382
383 int
384 set_be_bit()
385 {
386
387 int mycpu;
388 boolean_t current_state;
389
390 current_state = ml_set_interrupts_enabled(FALSE); /* Can't allow interruptions when mucking with per_proc flags */
391 mycpu = cpu_number();
392 per_proc_info[mycpu].cpu_flags |= traceBE;
393 (void) ml_set_interrupts_enabled(current_state);
394 return(1);
395 }
396
397 int
398 clr_be_bit()
399 {
400 int mycpu;
401 boolean_t current_state;
402
403 current_state = ml_set_interrupts_enabled(FALSE); /* Can't allow interruptions when mucking with per_proc flags */
404 mycpu = cpu_number();
405 per_proc_info[mycpu].cpu_flags &= ~traceBE;
406 (void) ml_set_interrupts_enabled(current_state);
407 return(1);
408 }
409
410 int
411 be_tracing()
412 {
413 int mycpu = cpu_number();
414 return(per_proc_info[mycpu].cpu_flags & traceBE);
415 }
416