]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/machine_routines.c
xnu-517.tar.gz
[apple/xnu.git] / osfmk / ppc / machine_routines.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 #include <ppc/machine_routines.h>
26 #include <ppc/machine_cpu.h>
27 #include <ppc/exception.h>
28 #include <ppc/misc_protos.h>
29 #include <ppc/Firmware.h>
30 #include <vm/vm_page.h>
31 #include <ppc/pmap.h>
32 #include <ppc/proc_reg.h>
33 #include <kern/processor.h>
34
35 unsigned int max_cpus_initialized = 0;
36 extern int forcenap;
37
38 #define MAX_CPUS_SET 0x1
39 #define MAX_CPUS_WAIT 0x2
40
41 boolean_t get_interrupts_enabled(void);
42
43 /* Map memory map IO space */
44 vm_offset_t
45 ml_io_map(
46 vm_offset_t phys_addr,
47 vm_size_t size)
48 {
49 return(io_map(phys_addr,size));
50 }
51
52 /* static memory allocation */
53 vm_offset_t
54 ml_static_malloc(
55 vm_size_t size)
56 {
57 extern vm_offset_t static_memory_end;
58 extern boolean_t pmap_initialized;
59 vm_offset_t vaddr;
60
61 if (pmap_initialized)
62 return((vm_offset_t)NULL);
63 else {
64 vaddr = static_memory_end;
65 static_memory_end = round_page_32(vaddr+size);
66 return(vaddr);
67 }
68 }
69
70 vm_offset_t
71 ml_static_ptovirt(
72 vm_offset_t paddr)
73 {
74 extern vm_offset_t static_memory_end;
75 vm_offset_t vaddr;
76
77 /* Static memory is map V=R */
78 vaddr = paddr;
79 if ( (vaddr < static_memory_end) && (pmap_extract(kernel_pmap, vaddr)==paddr) )
80 return(vaddr);
81 else
82 return((vm_offset_t)NULL);
83 }
84
85 void
86 ml_static_mfree(
87 vm_offset_t vaddr,
88 vm_size_t size)
89 {
90 vm_offset_t paddr_cur, vaddr_cur;
91
92 for (vaddr_cur = round_page_32(vaddr);
93 vaddr_cur < trunc_page_32(vaddr+size);
94 vaddr_cur += PAGE_SIZE) {
95 paddr_cur = pmap_extract(kernel_pmap, vaddr_cur);
96 if (paddr_cur != (vm_offset_t)NULL) {
97 vm_page_wire_count--;
98 pmap_remove(kernel_pmap, (addr64_t)vaddr_cur, (addr64_t)(vaddr_cur+PAGE_SIZE));
99 vm_page_create(paddr_cur>>12,(paddr_cur+PAGE_SIZE)>>12);
100 }
101 }
102 }
103
104 /* virtual to physical on wired pages */
105 vm_offset_t ml_vtophys(
106 vm_offset_t vaddr)
107 {
108 return(pmap_extract(kernel_pmap, vaddr));
109 }
110
111 /* Initialize Interrupt Handler */
112 void ml_install_interrupt_handler(
113 void *nub,
114 int source,
115 void *target,
116 IOInterruptHandler handler,
117 void *refCon)
118 {
119 int current_cpu;
120 boolean_t current_state;
121
122 current_cpu = cpu_number();
123 current_state = ml_get_interrupts_enabled();
124
125 per_proc_info[current_cpu].interrupt_nub = nub;
126 per_proc_info[current_cpu].interrupt_source = source;
127 per_proc_info[current_cpu].interrupt_target = target;
128 per_proc_info[current_cpu].interrupt_handler = handler;
129 per_proc_info[current_cpu].interrupt_refCon = refCon;
130
131 per_proc_info[current_cpu].interrupts_enabled = TRUE;
132 (void) ml_set_interrupts_enabled(current_state);
133
134 initialize_screen(0, kPEAcquireScreen);
135 }
136
137 /* Initialize Interrupts */
138 void ml_init_interrupt(void)
139 {
140 int current_cpu;
141 boolean_t current_state;
142
143 current_state = ml_get_interrupts_enabled();
144
145 current_cpu = cpu_number();
146 per_proc_info[current_cpu].interrupts_enabled = TRUE;
147 (void) ml_set_interrupts_enabled(current_state);
148 }
149
150 /* Get Interrupts Enabled */
151 boolean_t ml_get_interrupts_enabled(void)
152 {
153 return((mfmsr() & MASK(MSR_EE)) != 0);
154 }
155
156 /* Check if running at interrupt context */
157 boolean_t ml_at_interrupt_context(void)
158 {
159 boolean_t ret;
160 boolean_t current_state;
161
162 current_state = ml_set_interrupts_enabled(FALSE);
163 ret = (per_proc_info[cpu_number()].istackptr == 0);
164 ml_set_interrupts_enabled(current_state);
165 return(ret);
166 }
167
168 /* Generate a fake interrupt */
169 void ml_cause_interrupt(void)
170 {
171 CreateFakeIO();
172 }
173
174 void ml_thread_policy(
175 thread_t thread,
176 unsigned policy_id,
177 unsigned policy_info)
178 {
179 extern int srv;
180
181 if ((policy_id == MACHINE_GROUP) &&
182 ((per_proc_info[0].pf.Available) & pfSMPcap))
183 thread_bind(thread, master_processor);
184
185 if (policy_info & MACHINE_NETWORK_WORKLOOP) {
186 spl_t s = splsched();
187
188 thread_lock(thread);
189
190 if (srv == 0)
191 thread->sched_mode |= TH_MODE_FORCEDPREEMPT;
192 set_priority(thread, thread->priority + 1);
193
194 thread_unlock(thread);
195 splx(s);
196 }
197 }
198
199 void machine_idle(void)
200 {
201 if (per_proc_info[cpu_number()].interrupts_enabled == TRUE) {
202 int cur_decr;
203
204 machine_idle_ppc();
205
206 /*
207 * protect against a lost decrementer trap
208 * if the current decrementer value is negative
209 * by more than 10 ticks, re-arm it since it's
210 * unlikely to fire at this point... a hardware
211 * interrupt got us out of machine_idle and may
212 * also be contributing to this state
213 */
214 cur_decr = isync_mfdec();
215
216 if (cur_decr < -10) {
217 mtdec(1);
218 }
219 }
220 }
221
222 void
223 machine_signal_idle(
224 processor_t processor)
225 {
226 if (per_proc_info[processor->slot_num].pf.Available & (pfCanDoze|pfWillNap))
227 (void)cpu_signal(processor->slot_num, SIGPwake, 0, 0);
228 }
229
230 kern_return_t
231 ml_processor_register(
232 ml_processor_info_t *processor_info,
233 processor_t *processor,
234 ipi_handler_t *ipi_handler)
235 {
236 kern_return_t ret;
237 int target_cpu, cpu;
238 int donap;
239
240 if (processor_info->boot_cpu == FALSE) {
241 if (cpu_register(&target_cpu) != KERN_SUCCESS)
242 return KERN_FAILURE;
243 } else {
244 /* boot_cpu is always 0 */
245 target_cpu = 0;
246 }
247
248 per_proc_info[target_cpu].cpu_id = processor_info->cpu_id;
249 per_proc_info[target_cpu].start_paddr = processor_info->start_paddr;
250
251 donap = processor_info->supports_nap; /* Assume we use requested nap */
252 if(forcenap) donap = forcenap - 1; /* If there was an override, use that */
253
254 if(per_proc_info[target_cpu].pf.Available & pfCanNap)
255 if(donap)
256 per_proc_info[target_cpu].pf.Available |= pfWillNap;
257
258 if(processor_info->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
259 per_proc_info[target_cpu].time_base_enable = processor_info->time_base_enable;
260 else
261 per_proc_info[target_cpu].time_base_enable = (void(*)(cpu_id_t, boolean_t ))NULL;
262
263 if(target_cpu == cpu_number())
264 __asm__ volatile("mtsprg 2,%0" : : "r" (per_proc_info[target_cpu].pf.Available)); /* Set live value */
265
266 *processor = cpu_to_processor(target_cpu);
267 *ipi_handler = cpu_signal_handler;
268
269 return KERN_SUCCESS;
270 }
271
272 boolean_t
273 ml_enable_nap(int target_cpu, boolean_t nap_enabled)
274 {
275 boolean_t prev_value = (per_proc_info[target_cpu].pf.Available & pfCanNap) && (per_proc_info[target_cpu].pf.Available & pfWillNap);
276
277 if(forcenap) nap_enabled = forcenap - 1; /* If we are to force nap on or off, do it */
278
279 if(per_proc_info[target_cpu].pf.Available & pfCanNap) { /* Can the processor nap? */
280 if (nap_enabled) per_proc_info[target_cpu].pf.Available |= pfWillNap; /* Is nap supported on this machine? */
281 else per_proc_info[target_cpu].pf.Available &= ~pfWillNap; /* Clear if not */
282 }
283
284 if(target_cpu == cpu_number())
285 __asm__ volatile("mtsprg 2,%0" : : "r" (per_proc_info[target_cpu].pf.Available)); /* Set live value */
286
287 return (prev_value);
288 }
289
290 void
291 ml_init_max_cpus(unsigned long max_cpus)
292 {
293 boolean_t current_state;
294
295 current_state = ml_set_interrupts_enabled(FALSE);
296 if (max_cpus_initialized != MAX_CPUS_SET) {
297 if (max_cpus > 0 && max_cpus < NCPUS)
298 machine_info.max_cpus = max_cpus;
299 if (max_cpus_initialized == MAX_CPUS_WAIT)
300 wakeup((event_t)&max_cpus_initialized);
301 max_cpus_initialized = MAX_CPUS_SET;
302 }
303 (void) ml_set_interrupts_enabled(current_state);
304 }
305
306 int
307 ml_get_max_cpus(void)
308 {
309 boolean_t current_state;
310
311 current_state = ml_set_interrupts_enabled(FALSE);
312 if (max_cpus_initialized != MAX_CPUS_SET) {
313 max_cpus_initialized = MAX_CPUS_WAIT;
314 assert_wait((event_t)&max_cpus_initialized, THREAD_UNINT);
315 (void)thread_block(THREAD_CONTINUE_NULL);
316 }
317 (void) ml_set_interrupts_enabled(current_state);
318 return(machine_info.max_cpus);
319 }
320
321 void
322 ml_cpu_get_info(ml_cpu_info_t *cpu_info)
323 {
324 if (cpu_info == 0) return;
325
326 cpu_info->vector_unit = (per_proc_info[0].pf.Available & pfAltivec) != 0;
327 cpu_info->cache_line_size = per_proc_info[0].pf.lineSize;
328 cpu_info->l1_icache_size = per_proc_info[0].pf.l1iSize;
329 cpu_info->l1_dcache_size = per_proc_info[0].pf.l1dSize;
330
331 if (per_proc_info[0].pf.Available & pfL2) {
332 cpu_info->l2_settings = per_proc_info[0].pf.l2cr;
333 cpu_info->l2_cache_size = per_proc_info[0].pf.l2Size;
334 } else {
335 cpu_info->l2_settings = 0;
336 cpu_info->l2_cache_size = 0xFFFFFFFF;
337 }
338 if (per_proc_info[0].pf.Available & pfL3) {
339 cpu_info->l3_settings = per_proc_info[0].pf.l3cr;
340 cpu_info->l3_cache_size = per_proc_info[0].pf.l3Size;
341 } else {
342 cpu_info->l3_settings = 0;
343 cpu_info->l3_cache_size = 0xFFFFFFFF;
344 }
345 }
346
347 #define l2em 0x80000000
348 #define l3em 0x80000000
349
350 extern int real_ncpus;
351
352 int
353 ml_enable_cache_level(int cache_level, int enable)
354 {
355 int old_mode;
356 unsigned long available, ccr;
357
358 if (real_ncpus != 1) return -1;
359
360 available = per_proc_info[0].pf.Available;
361
362 if ((cache_level == 2) && (available & pfL2)) {
363 ccr = per_proc_info[0].pf.l2cr;
364 old_mode = (ccr & l2em) ? TRUE : FALSE;
365 if (old_mode != enable) {
366 if (enable) ccr = per_proc_info[0].pf.l2crOriginal;
367 else ccr = 0;
368 per_proc_info[0].pf.l2cr = ccr;
369 cacheInit();
370 }
371
372 return old_mode;
373 }
374
375 if ((cache_level == 3) && (available & pfL3)) {
376 ccr = per_proc_info[0].pf.l3cr;
377 old_mode = (ccr & l3em) ? TRUE : FALSE;
378 if (old_mode != enable) {
379 if (enable) ccr = per_proc_info[0].pf.l3crOriginal;
380 else ccr = 0;
381 per_proc_info[0].pf.l3cr = ccr;
382 cacheInit();
383 }
384
385 return old_mode;
386 }
387
388 return -1;
389 }
390
391 void
392 init_ast_check(processor_t processor)
393 {}
394
395 void
396 cause_ast_check(
397 processor_t processor)
398 {
399 if ( processor != current_processor() &&
400 per_proc_info[processor->slot_num].interrupts_enabled == TRUE )
401 cpu_signal(processor->slot_num, SIGPast, NULL, NULL);
402 }
403
404 thread_t
405 switch_to_shutdown_context(
406 thread_t thread,
407 void (*doshutdown)(processor_t),
408 processor_t processor)
409 {
410 CreateShutdownCTX();
411 return((thread_t)(per_proc_info[cpu_number()].old_thread));
412 }
413
414 int
415 set_be_bit()
416 {
417
418 int mycpu;
419 boolean_t current_state;
420
421 current_state = ml_set_interrupts_enabled(FALSE); /* Can't allow interruptions when mucking with per_proc flags */
422 mycpu = cpu_number();
423 per_proc_info[mycpu].cpu_flags |= traceBE;
424 (void) ml_set_interrupts_enabled(current_state);
425 return(1);
426 }
427
428 int
429 clr_be_bit()
430 {
431 int mycpu;
432 boolean_t current_state;
433
434 current_state = ml_set_interrupts_enabled(FALSE); /* Can't allow interruptions when mucking with per_proc flags */
435 mycpu = cpu_number();
436 per_proc_info[mycpu].cpu_flags &= ~traceBE;
437 (void) ml_set_interrupts_enabled(current_state);
438 return(1);
439 }
440
441 int
442 be_tracing()
443 {
444 int mycpu = cpu_number();
445 return(per_proc_info[mycpu].cpu_flags & traceBE);
446 }
447