]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/machine_routines.c
ad124b42134cf29f13775efbb78e5d22667cd195
[apple/xnu.git] / osfmk / ppc / machine_routines.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 #include <ppc/machine_routines.h>
23 #include <ppc/machine_cpu.h>
24 #include <ppc/exception.h>
25 #include <ppc/misc_protos.h>
26 #include <ppc/Firmware.h>
27 #include <vm/vm_page.h>
28 #include <ppc/pmap.h>
29 #include <ppc/proc_reg.h>
30 #include <kern/processor.h>
31
32 boolean_t get_interrupts_enabled(void);
33
34 /* Map memory map IO space */
35 vm_offset_t
36 ml_io_map(
37 vm_offset_t phys_addr,
38 vm_size_t size)
39 {
40 return(io_map(phys_addr,size));
41 }
42
43 /* static memory allocation */
44 vm_offset_t
45 ml_static_malloc(
46 vm_size_t size)
47 {
48 extern vm_offset_t static_memory_end;
49 extern boolean_t pmap_initialized;
50 vm_offset_t vaddr;
51
52 if (pmap_initialized)
53 return((vm_offset_t)NULL);
54 else {
55 vaddr = static_memory_end;
56 static_memory_end = round_page(vaddr+size);
57 return(vaddr);
58 }
59 }
60
61 vm_offset_t
62 ml_static_ptovirt(
63 vm_offset_t paddr)
64 {
65 extern vm_offset_t static_memory_end;
66 vm_offset_t vaddr;
67
68 /* Static memory is map V=R */
69 vaddr = paddr;
70 if ( (vaddr < static_memory_end) && (pmap_extract(kernel_pmap, vaddr)==paddr) )
71 return(vaddr);
72 else
73 return((vm_offset_t)NULL);
74 }
75
76 void
77 ml_static_mfree(
78 vm_offset_t vaddr,
79 vm_size_t size)
80 {
81 vm_offset_t paddr_cur, vaddr_cur;
82
83 for (vaddr_cur = round_page(vaddr);
84 vaddr_cur < trunc_page(vaddr+size);
85 vaddr_cur += PAGE_SIZE) {
86 paddr_cur = pmap_extract(kernel_pmap, vaddr_cur);
87 if (paddr_cur != (vm_offset_t)NULL) {
88 vm_page_wire_count--;
89 pmap_remove(kernel_pmap, vaddr_cur, vaddr_cur+PAGE_SIZE);
90 vm_page_create(paddr_cur,paddr_cur+PAGE_SIZE);
91 }
92 }
93 }
94
95 /* virtual to physical on wired pages */
96 vm_offset_t ml_vtophys(
97 vm_offset_t vaddr)
98 {
99 return(pmap_extract(kernel_pmap, vaddr));
100 }
101
102 /* Initialize Interrupt Handler */
103 void ml_install_interrupt_handler(
104 void *nub,
105 int source,
106 void *target,
107 IOInterruptHandler handler,
108 void *refCon)
109 {
110 int current_cpu;
111 boolean_t current_state;
112
113 current_cpu = cpu_number();
114 current_state = ml_get_interrupts_enabled();
115
116 per_proc_info[current_cpu].interrupt_nub = nub;
117 per_proc_info[current_cpu].interrupt_source = source;
118 per_proc_info[current_cpu].interrupt_target = target;
119 per_proc_info[current_cpu].interrupt_handler = handler;
120 per_proc_info[current_cpu].interrupt_refCon = refCon;
121
122 per_proc_info[current_cpu].interrupts_enabled = TRUE;
123 (void) ml_set_interrupts_enabled(current_state);
124 }
125
126 /* Initialize Interrupts */
127 void ml_init_interrupt(void)
128 {
129 int current_cpu;
130 boolean_t current_state;
131
132 current_state = ml_get_interrupts_enabled();
133
134 current_cpu = cpu_number();
135 per_proc_info[current_cpu].interrupts_enabled = TRUE;
136 (void) ml_set_interrupts_enabled(current_state);
137 }
138
139 boolean_t fake_get_interrupts_enabled(void)
140 {
141 /*
142 * The scheduler is not active on this cpu. There is no need to disable
143 * preemption. The current thread wont be dispatched on anhother cpu.
144 */
145 return((per_proc_info[cpu_number()].cpu_flags & turnEEon) != 0);
146 }
147
148 boolean_t fake_set_interrupts_enabled(boolean_t enable)
149 {
150 boolean_t interrupt_state_prev;
151
152 /*
153 * The scheduler is not active on this cpu. There is no need to disable
154 * preemption. The current thread wont be dispatched on anhother cpu.
155 */
156 interrupt_state_prev =
157 (per_proc_info[cpu_number()].cpu_flags & turnEEon) != 0;
158 if (interrupt_state_prev != enable)
159 per_proc_info[cpu_number()].cpu_flags ^= turnEEon;
160 return(interrupt_state_prev);
161 }
162
163 /* Get Interrupts Enabled */
164 boolean_t ml_get_interrupts_enabled(void)
165 {
166 if (per_proc_info[cpu_number()].interrupts_enabled == TRUE)
167 return(get_interrupts_enabled());
168 else
169 return(fake_get_interrupts_enabled());
170 }
171
172 boolean_t get_interrupts_enabled(void)
173 {
174 return((mfmsr() & MASK(MSR_EE)) != 0);
175 }
176
177 /* Check if running at interrupt context */
178 boolean_t ml_at_interrupt_context(void)
179 {
180 boolean_t ret;
181 boolean_t current_state;
182
183 current_state = ml_set_interrupts_enabled(FALSE);
184 ret = (per_proc_info[cpu_number()].istackptr == 0);
185 ml_set_interrupts_enabled(current_state);
186 return(ret);
187 }
188
189 /* Generate a fake interrupt */
190 void ml_cause_interrupt(void)
191 {
192 CreateFakeIO();
193 }
194
195 void machine_idle(void)
196 {
197 if (per_proc_info[cpu_number()].interrupts_enabled == TRUE) {
198 int cur_decr;
199
200 machine_idle_ppc();
201
202 /*
203 * protect against a lost decrementer trap
204 * if the current decrementer value is negative
205 * by more than 10 ticks, re-arm it since it's
206 * unlikely to fire at this point... a hardware
207 * interrupt got us out of machine_idle and may
208 * also be contributing to this state
209 */
210 cur_decr = isync_mfdec();
211
212 if (cur_decr < -10) {
213 mtdec(1);
214 }
215 }
216 }
217
218 void
219 machine_signal_idle(
220 processor_t processor)
221 {
222 (void)cpu_signal(processor->slot_num, SIGPwake, 0, 0);
223 }
224
225 kern_return_t
226 ml_processor_register(
227 ml_processor_info_t *processor_info,
228 processor_t *processor,
229 ipi_handler_t *ipi_handler)
230 {
231 kern_return_t ret;
232 int target_cpu;
233
234 if (processor_info->boot_cpu == FALSE) {
235 if (cpu_register(&target_cpu) != KERN_SUCCESS)
236 return KERN_FAILURE;
237 } else {
238 /* boot_cpu is always 0 */
239 target_cpu= 0;
240 }
241
242 per_proc_info[target_cpu].cpu_id = processor_info->cpu_id;
243 per_proc_info[target_cpu].start_paddr = processor_info->start_paddr;
244
245 if(per_proc_info[target_cpu].pf.Available & pfCanNap)
246 if(processor_info->supports_nap)
247 per_proc_info[target_cpu].pf.Available |= pfWillNap;
248
249 if(processor_info->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
250 per_proc_info[target_cpu].time_base_enable = processor_info->time_base_enable;
251 else
252 per_proc_info[target_cpu].time_base_enable = (void(*)(cpu_id_t, boolean_t ))NULL;
253
254 if(target_cpu == cpu_number())
255 __asm__ volatile("mtsprg 2,%0" : : "r" (per_proc_info[target_cpu].pf.Available)); /* Set live value */
256
257 *processor = cpu_to_processor(target_cpu);
258 *ipi_handler = cpu_signal_handler;
259
260 return KERN_SUCCESS;
261 }
262
263 boolean_t
264 ml_enable_nap(int target_cpu, boolean_t nap_enabled)
265 {
266 boolean_t prev_value = (per_proc_info[target_cpu].pf.Available & pfCanNap) && (per_proc_info[target_cpu].pf.Available & pfWillNap);
267
268 if(per_proc_info[target_cpu].pf.Available & pfCanNap) { /* Can the processor nap? */
269 if (nap_enabled) per_proc_info[target_cpu].pf.Available |= pfWillNap; /* Is nap supported on this machine? */
270 else per_proc_info[target_cpu].pf.Available &= ~pfWillNap; /* Clear if not */
271 }
272
273 if(target_cpu == cpu_number())
274 __asm__ volatile("mtsprg 2,%0" : : "r" (per_proc_info[target_cpu].pf.Available)); /* Set live value */
275
276 return (prev_value);
277 }
278
279 void
280 ml_ppc_get_info(ml_ppc_cpu_info_t *cpu_info)
281 {
282 if (cpu_info == 0) return;
283
284 cpu_info->vector_unit = (per_proc_info[0].pf.Available & pfAltivec) != 0;
285 cpu_info->cache_line_size = per_proc_info[0].pf.lineSize;
286 cpu_info->l1_icache_size = per_proc_info[0].pf.l1iSize;
287 cpu_info->l1_dcache_size = per_proc_info[0].pf.l1dSize;
288
289 if (per_proc_info[0].pf.Available & pfL2) {
290 cpu_info->l2_settings = per_proc_info[0].pf.l2cr;
291 cpu_info->l2_cache_size = per_proc_info[0].pf.l2Size;
292 } else {
293 cpu_info->l2_settings = 0;
294 cpu_info->l2_cache_size = 0xFFFFFFFF;
295 }
296 if (per_proc_info[0].pf.Available & pfL3) {
297 cpu_info->l3_settings = per_proc_info[0].pf.l3cr;
298 cpu_info->l3_cache_size = per_proc_info[0].pf.l3Size;
299 } else {
300 cpu_info->l3_settings = 0;
301 cpu_info->l3_cache_size = 0xFFFFFFFF;
302 }
303 }
304
305 void
306 init_ast_check(processor_t processor)
307 {}
308
309 void
310 cause_ast_check(processor_t processor)
311 {
312 if ((processor != current_processor())
313 && (per_proc_info[processor->slot_num].interrupts_enabled == TRUE))
314 cpu_signal(processor->slot_num, SIGPast, NULL, NULL);
315 }
316
317 thread_t
318 switch_to_shutdown_context(
319 thread_t thread,
320 void (*doshutdown)(processor_t),
321 processor_t processor)
322 {
323 disable_preemption();
324 CreateShutdownCTX();
325 enable_preemption();
326 return((thread_t)(per_proc_info[cpu_number()].old_thread));
327 }
328
329 int
330 set_be_bit()
331 {
332
333 int mycpu;
334 boolean_t current_state;
335
336 current_state = ml_set_interrupts_enabled(FALSE); /* Can't allow interruptions when mucking with per_proc flags */
337 mycpu = cpu_number();
338 per_proc_info[mycpu].cpu_flags |= traceBE;
339 (void) ml_set_interrupts_enabled(current_state);
340 return(1);
341 }
342
343 int
344 clr_be_bit()
345 {
346 int mycpu;
347 boolean_t current_state;
348
349 current_state = ml_set_interrupts_enabled(FALSE); /* Can't allow interruptions when mucking with per_proc flags */
350 mycpu = cpu_number();
351 per_proc_info[mycpu].cpu_flags &= ~traceBE;
352 (void) ml_set_interrupts_enabled(current_state);
353 return(1);
354 }
355
356 int
357 be_tracing()
358 {
359 int mycpu = cpu_number();
360 return(per_proc_info[mycpu].cpu_flags & traceBE);
361 }
362