]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/machine_routines.c
xnu-344.32.tar.gz
[apple/xnu.git] / osfmk / ppc / machine_routines.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
de355530
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
de355530
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
de355530
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22#include <ppc/machine_routines.h>
23#include <ppc/machine_cpu.h>
24#include <ppc/exception.h>
25#include <ppc/misc_protos.h>
26#include <ppc/Firmware.h>
27#include <vm/vm_page.h>
28#include <ppc/pmap.h>
29#include <ppc/proc_reg.h>
30#include <kern/processor.h>
31
32boolean_t get_interrupts_enabled(void);
1c79356b
A
33
34/* Map memory map IO space */
35vm_offset_t
36ml_io_map(
37 vm_offset_t phys_addr,
38 vm_size_t size)
39{
40 return(io_map(phys_addr,size));
41}
42
43/* static memory allocation */
44vm_offset_t
45ml_static_malloc(
46 vm_size_t size)
47{
48 extern vm_offset_t static_memory_end;
49 extern boolean_t pmap_initialized;
50 vm_offset_t vaddr;
51
52 if (pmap_initialized)
53 return((vm_offset_t)NULL);
54 else {
55 vaddr = static_memory_end;
de355530 56 static_memory_end = round_page(vaddr+size);
1c79356b
A
57 return(vaddr);
58 }
59}
60
61vm_offset_t
62ml_static_ptovirt(
63 vm_offset_t paddr)
64{
65 extern vm_offset_t static_memory_end;
66 vm_offset_t vaddr;
67
68 /* Static memory is map V=R */
69 vaddr = paddr;
70 if ( (vaddr < static_memory_end) && (pmap_extract(kernel_pmap, vaddr)==paddr) )
71 return(vaddr);
72 else
73 return((vm_offset_t)NULL);
74}
75
76void
77ml_static_mfree(
78 vm_offset_t vaddr,
79 vm_size_t size)
80{
81 vm_offset_t paddr_cur, vaddr_cur;
82
de355530
A
83 for (vaddr_cur = round_page(vaddr);
84 vaddr_cur < trunc_page(vaddr+size);
1c79356b
A
85 vaddr_cur += PAGE_SIZE) {
86 paddr_cur = pmap_extract(kernel_pmap, vaddr_cur);
87 if (paddr_cur != (vm_offset_t)NULL) {
88 vm_page_wire_count--;
de355530
A
89 pmap_remove(kernel_pmap, vaddr_cur, vaddr_cur+PAGE_SIZE);
90 vm_page_create(paddr_cur,paddr_cur+PAGE_SIZE);
1c79356b
A
91 }
92 }
93}
94
95/* virtual to physical on wired pages */
96vm_offset_t ml_vtophys(
97 vm_offset_t vaddr)
98{
99 return(pmap_extract(kernel_pmap, vaddr));
100}
101
102/* Initialize Interrupt Handler */
103void ml_install_interrupt_handler(
104 void *nub,
105 int source,
106 void *target,
107 IOInterruptHandler handler,
108 void *refCon)
109{
110 int current_cpu;
111 boolean_t current_state;
112
113 current_cpu = cpu_number();
114 current_state = ml_get_interrupts_enabled();
115
116 per_proc_info[current_cpu].interrupt_nub = nub;
117 per_proc_info[current_cpu].interrupt_source = source;
118 per_proc_info[current_cpu].interrupt_target = target;
119 per_proc_info[current_cpu].interrupt_handler = handler;
120 per_proc_info[current_cpu].interrupt_refCon = refCon;
121
0b4e3aa0 122 per_proc_info[current_cpu].interrupts_enabled = TRUE;
1c79356b 123 (void) ml_set_interrupts_enabled(current_state);
9bccf70c
A
124
125 initialize_screen(0, kPEAcquireScreen);
1c79356b
A
126}
127
128/* Initialize Interrupts */
129void ml_init_interrupt(void)
130{
131 int current_cpu;
132 boolean_t current_state;
133
134 current_state = ml_get_interrupts_enabled();
135
136 current_cpu = cpu_number();
0b4e3aa0 137 per_proc_info[current_cpu].interrupts_enabled = TRUE;
1c79356b
A
138 (void) ml_set_interrupts_enabled(current_state);
139}
140
de355530
A
141boolean_t fake_get_interrupts_enabled(void)
142{
143 /*
144 * The scheduler is not active on this cpu. There is no need to disable
145 * preemption. The current thread wont be dispatched on anhother cpu.
146 */
147 return((per_proc_info[cpu_number()].cpu_flags & turnEEon) != 0);
148}
149
150boolean_t fake_set_interrupts_enabled(boolean_t enable)
151{
152 boolean_t interrupt_state_prev;
153
154 /*
155 * The scheduler is not active on this cpu. There is no need to disable
156 * preemption. The current thread wont be dispatched on anhother cpu.
157 */
158 interrupt_state_prev =
159 (per_proc_info[cpu_number()].cpu_flags & turnEEon) != 0;
160 if (interrupt_state_prev != enable)
161 per_proc_info[cpu_number()].cpu_flags ^= turnEEon;
162 return(interrupt_state_prev);
163}
164
1c79356b
A
165/* Get Interrupts Enabled */
166boolean_t ml_get_interrupts_enabled(void)
de355530
A
167{
168 if (per_proc_info[cpu_number()].interrupts_enabled == TRUE)
169 return(get_interrupts_enabled());
170 else
171 return(fake_get_interrupts_enabled());
172}
173
174boolean_t get_interrupts_enabled(void)
1c79356b
A
175{
176 return((mfmsr() & MASK(MSR_EE)) != 0);
177}
178
1c79356b
A
179/* Check if running at interrupt context */
180boolean_t ml_at_interrupt_context(void)
181{
0b4e3aa0
A
182 boolean_t ret;
183 boolean_t current_state;
184
185 current_state = ml_set_interrupts_enabled(FALSE);
186 ret = (per_proc_info[cpu_number()].istackptr == 0);
187 ml_set_interrupts_enabled(current_state);
188 return(ret);
1c79356b
A
189}
190
191/* Generate a fake interrupt */
192void ml_cause_interrupt(void)
193{
194 CreateFakeIO();
195}
196
9bccf70c 197void ml_thread_policy(
d52fe63f
A
198 thread_t thread,
199 unsigned policy_id,
200 unsigned policy_info)
201{
202 if ((policy_id == MACHINE_GROUP) &&
9bccf70c
A
203 ((per_proc_info[0].pf.Available) & pfSMPcap))
204 thread_bind(thread, master_processor);
205
206 if (policy_info & MACHINE_NETWORK_WORKLOOP) {
207 spl_t s = splsched();
208
209 thread_lock(thread);
210
211 thread->sched_mode |= TH_MODE_FORCEDPREEMPT;
212 set_priority(thread, thread->priority + 1);
213
214 thread_unlock(thread);
215 splx(s);
216 }
d52fe63f
A
217}
218
1c79356b
A
219void machine_idle(void)
220{
0b4e3aa0 221 if (per_proc_info[cpu_number()].interrupts_enabled == TRUE) {
1c79356b
A
222 int cur_decr;
223
224 machine_idle_ppc();
225
226 /*
227 * protect against a lost decrementer trap
228 * if the current decrementer value is negative
229 * by more than 10 ticks, re-arm it since it's
230 * unlikely to fire at this point... a hardware
231 * interrupt got us out of machine_idle and may
232 * also be contributing to this state
233 */
234 cur_decr = isync_mfdec();
235
236 if (cur_decr < -10) {
237 mtdec(1);
238 }
239 }
240}
241
242void
243machine_signal_idle(
244 processor_t processor)
245{
246 (void)cpu_signal(processor->slot_num, SIGPwake, 0, 0);
247}
248
249kern_return_t
250ml_processor_register(
251 ml_processor_info_t *processor_info,
252 processor_t *processor,
253 ipi_handler_t *ipi_handler)
254{
255 kern_return_t ret;
de355530 256 int target_cpu;
1c79356b
A
257
258 if (processor_info->boot_cpu == FALSE) {
259 if (cpu_register(&target_cpu) != KERN_SUCCESS)
260 return KERN_FAILURE;
261 } else {
262 /* boot_cpu is always 0 */
de355530 263 target_cpu= 0;
1c79356b
A
264 }
265
266 per_proc_info[target_cpu].cpu_id = processor_info->cpu_id;
267 per_proc_info[target_cpu].start_paddr = processor_info->start_paddr;
268
269 if(per_proc_info[target_cpu].pf.Available & pfCanNap)
de355530 270 if(processor_info->supports_nap)
1c79356b
A
271 per_proc_info[target_cpu].pf.Available |= pfWillNap;
272
273 if(processor_info->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
274 per_proc_info[target_cpu].time_base_enable = processor_info->time_base_enable;
275 else
276 per_proc_info[target_cpu].time_base_enable = (void(*)(cpu_id_t, boolean_t ))NULL;
277
278 if(target_cpu == cpu_number())
279 __asm__ volatile("mtsprg 2,%0" : : "r" (per_proc_info[target_cpu].pf.Available)); /* Set live value */
280
281 *processor = cpu_to_processor(target_cpu);
282 *ipi_handler = cpu_signal_handler;
283
284 return KERN_SUCCESS;
285}
286
287boolean_t
288ml_enable_nap(int target_cpu, boolean_t nap_enabled)
289{
290 boolean_t prev_value = (per_proc_info[target_cpu].pf.Available & pfCanNap) && (per_proc_info[target_cpu].pf.Available & pfWillNap);
291
292 if(per_proc_info[target_cpu].pf.Available & pfCanNap) { /* Can the processor nap? */
293 if (nap_enabled) per_proc_info[target_cpu].pf.Available |= pfWillNap; /* Is nap supported on this machine? */
294 else per_proc_info[target_cpu].pf.Available &= ~pfWillNap; /* Clear if not */
295 }
296
297 if(target_cpu == cpu_number())
298 __asm__ volatile("mtsprg 2,%0" : : "r" (per_proc_info[target_cpu].pf.Available)); /* Set live value */
1c79356b 299
de355530 300 return (prev_value);
d7e50217
A
301}
302
303void
de355530 304ml_ppc_get_info(ml_ppc_cpu_info_t *cpu_info)
1c79356b
A
305{
306 if (cpu_info == 0) return;
307
308 cpu_info->vector_unit = (per_proc_info[0].pf.Available & pfAltivec) != 0;
309 cpu_info->cache_line_size = per_proc_info[0].pf.lineSize;
310 cpu_info->l1_icache_size = per_proc_info[0].pf.l1iSize;
311 cpu_info->l1_dcache_size = per_proc_info[0].pf.l1dSize;
312
313 if (per_proc_info[0].pf.Available & pfL2) {
314 cpu_info->l2_settings = per_proc_info[0].pf.l2cr;
315 cpu_info->l2_cache_size = per_proc_info[0].pf.l2Size;
316 } else {
317 cpu_info->l2_settings = 0;
318 cpu_info->l2_cache_size = 0xFFFFFFFF;
319 }
320 if (per_proc_info[0].pf.Available & pfL3) {
321 cpu_info->l3_settings = per_proc_info[0].pf.l3cr;
322 cpu_info->l3_cache_size = per_proc_info[0].pf.l3Size;
323 } else {
324 cpu_info->l3_settings = 0;
325 cpu_info->l3_cache_size = 0xFFFFFFFF;
326 }
327}
328
d52fe63f
A
329#define l2em 0x80000000
330#define l3em 0x80000000
331
332extern int real_ncpus;
333
334int
335ml_enable_cache_level(int cache_level, int enable)
336{
337 int old_mode;
338 unsigned long available, ccr;
339
340 if (real_ncpus != 1) return -1;
341
342 available = per_proc_info[0].pf.Available;
343
344 if ((cache_level == 2) && (available & pfL2)) {
345 ccr = per_proc_info[0].pf.l2cr;
346 old_mode = (ccr & l2em) ? TRUE : FALSE;
347 if (old_mode != enable) {
348 if (enable) ccr = per_proc_info[0].pf.l2crOriginal;
349 else ccr = 0;
350 per_proc_info[0].pf.l2cr = ccr;
351 cacheInit();
352 }
353
354 return old_mode;
355 }
356
357 if ((cache_level == 3) && (available & pfL3)) {
358 ccr = per_proc_info[0].pf.l3cr;
359 old_mode = (ccr & l3em) ? TRUE : FALSE;
360 if (old_mode != enable) {
361 if (enable) ccr = per_proc_info[0].pf.l3crOriginal;
362 else ccr = 0;
363 per_proc_info[0].pf.l3cr = ccr;
364 cacheInit();
365 }
366
367 return old_mode;
368 }
369
370 return -1;
371}
372
1c79356b
A
373void
374init_ast_check(processor_t processor)
375{}
376
377void
9bccf70c
A
378cause_ast_check(
379 processor_t processor)
1c79356b 380{
9bccf70c
A
381 if ( processor != current_processor() &&
382 per_proc_info[processor->slot_num].interrupts_enabled == TRUE )
1c79356b
A
383 cpu_signal(processor->slot_num, SIGPast, NULL, NULL);
384}
385
386thread_t
387switch_to_shutdown_context(
388 thread_t thread,
389 void (*doshutdown)(processor_t),
390 processor_t processor)
391{
1c79356b 392 CreateShutdownCTX();
1c79356b
A
393 return((thread_t)(per_proc_info[cpu_number()].old_thread));
394}
395
396int
397set_be_bit()
398{
399
400 int mycpu;
401 boolean_t current_state;
402
403 current_state = ml_set_interrupts_enabled(FALSE); /* Can't allow interruptions when mucking with per_proc flags */
404 mycpu = cpu_number();
405 per_proc_info[mycpu].cpu_flags |= traceBE;
406 (void) ml_set_interrupts_enabled(current_state);
407 return(1);
408}
409
410int
411clr_be_bit()
412{
413 int mycpu;
414 boolean_t current_state;
415
416 current_state = ml_set_interrupts_enabled(FALSE); /* Can't allow interruptions when mucking with per_proc flags */
417 mycpu = cpu_number();
418 per_proc_info[mycpu].cpu_flags &= ~traceBE;
419 (void) ml_set_interrupts_enabled(current_state);
420 return(1);
421}
422
423int
424be_tracing()
425{
426 int mycpu = cpu_number();
427 return(per_proc_info[mycpu].cpu_flags & traceBE);
428}
0b4e3aa0 429