]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/machine_routines.c
xnu-517.9.5.tar.gz
[apple/xnu.git] / osfmk / ppc / machine_routines.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
e5568f75
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
e5568f75
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
e5568f75
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22#include <ppc/machine_routines.h>
23#include <ppc/machine_cpu.h>
24#include <ppc/exception.h>
25#include <ppc/misc_protos.h>
26#include <ppc/Firmware.h>
27#include <vm/vm_page.h>
28#include <ppc/pmap.h>
29#include <ppc/proc_reg.h>
30#include <kern/processor.h>
31
43866e37 32unsigned int max_cpus_initialized = 0;
ab86ba33
A
33unsigned int LockTimeOut = 12500000;
34unsigned int MutexSpin = 0;
55e303ae 35extern int forcenap;
43866e37
A
36
37#define MAX_CPUS_SET 0x1
38#define MAX_CPUS_WAIT 0x2
39
1c79356b 40boolean_t get_interrupts_enabled(void);
1c79356b
A
41
42/* Map memory map IO space */
43vm_offset_t
44ml_io_map(
45 vm_offset_t phys_addr,
46 vm_size_t size)
47{
48 return(io_map(phys_addr,size));
49}
50
51/* static memory allocation */
52vm_offset_t
53ml_static_malloc(
54 vm_size_t size)
55{
56 extern vm_offset_t static_memory_end;
57 extern boolean_t pmap_initialized;
58 vm_offset_t vaddr;
59
60 if (pmap_initialized)
61 return((vm_offset_t)NULL);
62 else {
63 vaddr = static_memory_end;
55e303ae 64 static_memory_end = round_page_32(vaddr+size);
1c79356b
A
65 return(vaddr);
66 }
67}
68
69vm_offset_t
70ml_static_ptovirt(
71 vm_offset_t paddr)
72{
73 extern vm_offset_t static_memory_end;
74 vm_offset_t vaddr;
75
76 /* Static memory is map V=R */
77 vaddr = paddr;
78 if ( (vaddr < static_memory_end) && (pmap_extract(kernel_pmap, vaddr)==paddr) )
79 return(vaddr);
80 else
81 return((vm_offset_t)NULL);
82}
83
84void
85ml_static_mfree(
86 vm_offset_t vaddr,
87 vm_size_t size)
88{
89 vm_offset_t paddr_cur, vaddr_cur;
90
55e303ae
A
91 for (vaddr_cur = round_page_32(vaddr);
92 vaddr_cur < trunc_page_32(vaddr+size);
1c79356b
A
93 vaddr_cur += PAGE_SIZE) {
94 paddr_cur = pmap_extract(kernel_pmap, vaddr_cur);
95 if (paddr_cur != (vm_offset_t)NULL) {
96 vm_page_wire_count--;
55e303ae
A
97 pmap_remove(kernel_pmap, (addr64_t)vaddr_cur, (addr64_t)(vaddr_cur+PAGE_SIZE));
98 vm_page_create(paddr_cur>>12,(paddr_cur+PAGE_SIZE)>>12);
1c79356b
A
99 }
100 }
101}
102
103/* virtual to physical on wired pages */
104vm_offset_t ml_vtophys(
105 vm_offset_t vaddr)
106{
107 return(pmap_extract(kernel_pmap, vaddr));
108}
109
110/* Initialize Interrupt Handler */
111void ml_install_interrupt_handler(
112 void *nub,
113 int source,
114 void *target,
115 IOInterruptHandler handler,
116 void *refCon)
117{
118 int current_cpu;
119 boolean_t current_state;
120
121 current_cpu = cpu_number();
122 current_state = ml_get_interrupts_enabled();
123
124 per_proc_info[current_cpu].interrupt_nub = nub;
125 per_proc_info[current_cpu].interrupt_source = source;
126 per_proc_info[current_cpu].interrupt_target = target;
127 per_proc_info[current_cpu].interrupt_handler = handler;
128 per_proc_info[current_cpu].interrupt_refCon = refCon;
129
0b4e3aa0 130 per_proc_info[current_cpu].interrupts_enabled = TRUE;
1c79356b 131 (void) ml_set_interrupts_enabled(current_state);
9bccf70c
A
132
133 initialize_screen(0, kPEAcquireScreen);
1c79356b
A
134}
135
136/* Initialize Interrupts */
137void ml_init_interrupt(void)
138{
139 int current_cpu;
140 boolean_t current_state;
141
142 current_state = ml_get_interrupts_enabled();
143
144 current_cpu = cpu_number();
0b4e3aa0 145 per_proc_info[current_cpu].interrupts_enabled = TRUE;
1c79356b
A
146 (void) ml_set_interrupts_enabled(current_state);
147}
148
1c79356b
A
149/* Get Interrupts Enabled */
150boolean_t ml_get_interrupts_enabled(void)
1c79356b
A
151{
152 return((mfmsr() & MASK(MSR_EE)) != 0);
153}
154
1c79356b
A
155/* Check if running at interrupt context */
156boolean_t ml_at_interrupt_context(void)
157{
0b4e3aa0
A
158 boolean_t ret;
159 boolean_t current_state;
160
161 current_state = ml_set_interrupts_enabled(FALSE);
162 ret = (per_proc_info[cpu_number()].istackptr == 0);
163 ml_set_interrupts_enabled(current_state);
164 return(ret);
1c79356b
A
165}
166
167/* Generate a fake interrupt */
168void ml_cause_interrupt(void)
169{
170 CreateFakeIO();
171}
172
9bccf70c 173void ml_thread_policy(
d52fe63f
A
174 thread_t thread,
175 unsigned policy_id,
176 unsigned policy_info)
177{
55e303ae
A
178 extern int srv;
179
d52fe63f 180 if ((policy_id == MACHINE_GROUP) &&
9bccf70c
A
181 ((per_proc_info[0].pf.Available) & pfSMPcap))
182 thread_bind(thread, master_processor);
183
184 if (policy_info & MACHINE_NETWORK_WORKLOOP) {
185 spl_t s = splsched();
186
187 thread_lock(thread);
188
55e303ae
A
189 if (srv == 0)
190 thread->sched_mode |= TH_MODE_FORCEDPREEMPT;
9bccf70c
A
191 set_priority(thread, thread->priority + 1);
192
193 thread_unlock(thread);
194 splx(s);
195 }
d52fe63f
A
196}
197
1c79356b
A
198void machine_idle(void)
199{
0b4e3aa0 200 if (per_proc_info[cpu_number()].interrupts_enabled == TRUE) {
1c79356b
A
201 int cur_decr;
202
203 machine_idle_ppc();
204
205 /*
206 * protect against a lost decrementer trap
207 * if the current decrementer value is negative
208 * by more than 10 ticks, re-arm it since it's
209 * unlikely to fire at this point... a hardware
210 * interrupt got us out of machine_idle and may
211 * also be contributing to this state
212 */
213 cur_decr = isync_mfdec();
214
215 if (cur_decr < -10) {
216 mtdec(1);
217 }
218 }
219}
220
221void
222machine_signal_idle(
223 processor_t processor)
224{
55e303ae
A
225 if (per_proc_info[processor->slot_num].pf.Available & (pfCanDoze|pfWillNap))
226 (void)cpu_signal(processor->slot_num, SIGPwake, 0, 0);
1c79356b
A
227}
228
229kern_return_t
230ml_processor_register(
231 ml_processor_info_t *processor_info,
232 processor_t *processor,
233 ipi_handler_t *ipi_handler)
234{
235 kern_return_t ret;
55e303ae
A
236 int target_cpu, cpu;
237 int donap;
1c79356b
A
238
239 if (processor_info->boot_cpu == FALSE) {
240 if (cpu_register(&target_cpu) != KERN_SUCCESS)
241 return KERN_FAILURE;
242 } else {
243 /* boot_cpu is always 0 */
55e303ae 244 target_cpu = 0;
1c79356b
A
245 }
246
247 per_proc_info[target_cpu].cpu_id = processor_info->cpu_id;
248 per_proc_info[target_cpu].start_paddr = processor_info->start_paddr;
249
483a1d10
A
250 if (per_proc_info[target_cpu].pf.pfPowerModes & pmPowerTune) {
251 per_proc_info[target_cpu].pf.pfPowerTune0 = processor_info->power_mode_0;
252 per_proc_info[target_cpu].pf.pfPowerTune1 = processor_info->power_mode_1;
253 }
254
55e303ae
A
255 donap = processor_info->supports_nap; /* Assume we use requested nap */
256 if(forcenap) donap = forcenap - 1; /* If there was an override, use that */
257
1c79356b 258 if(per_proc_info[target_cpu].pf.Available & pfCanNap)
55e303ae 259 if(donap)
1c79356b
A
260 per_proc_info[target_cpu].pf.Available |= pfWillNap;
261
262 if(processor_info->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
263 per_proc_info[target_cpu].time_base_enable = processor_info->time_base_enable;
264 else
265 per_proc_info[target_cpu].time_base_enable = (void(*)(cpu_id_t, boolean_t ))NULL;
266
267 if(target_cpu == cpu_number())
268 __asm__ volatile("mtsprg 2,%0" : : "r" (per_proc_info[target_cpu].pf.Available)); /* Set live value */
269
270 *processor = cpu_to_processor(target_cpu);
271 *ipi_handler = cpu_signal_handler;
272
273 return KERN_SUCCESS;
274}
275
276boolean_t
277ml_enable_nap(int target_cpu, boolean_t nap_enabled)
278{
279 boolean_t prev_value = (per_proc_info[target_cpu].pf.Available & pfCanNap) && (per_proc_info[target_cpu].pf.Available & pfWillNap);
280
55e303ae
A
281 if(forcenap) nap_enabled = forcenap - 1; /* If we are to force nap on or off, do it */
282
1c79356b
A
283 if(per_proc_info[target_cpu].pf.Available & pfCanNap) { /* Can the processor nap? */
284 if (nap_enabled) per_proc_info[target_cpu].pf.Available |= pfWillNap; /* Is nap supported on this machine? */
285 else per_proc_info[target_cpu].pf.Available &= ~pfWillNap; /* Clear if not */
286 }
287
288 if(target_cpu == cpu_number())
289 __asm__ volatile("mtsprg 2,%0" : : "r" (per_proc_info[target_cpu].pf.Available)); /* Set live value */
55e303ae 290
de355530 291 return (prev_value);
d7e50217
A
292}
293
294void
43866e37
A
295ml_init_max_cpus(unsigned long max_cpus)
296{
297 boolean_t current_state;
298
299 current_state = ml_set_interrupts_enabled(FALSE);
300 if (max_cpus_initialized != MAX_CPUS_SET) {
301 if (max_cpus > 0 && max_cpus < NCPUS)
302 machine_info.max_cpus = max_cpus;
303 if (max_cpus_initialized == MAX_CPUS_WAIT)
304 wakeup((event_t)&max_cpus_initialized);
305 max_cpus_initialized = MAX_CPUS_SET;
306 }
307 (void) ml_set_interrupts_enabled(current_state);
308}
309
310int
311ml_get_max_cpus(void)
312{
313 boolean_t current_state;
314
315 current_state = ml_set_interrupts_enabled(FALSE);
316 if (max_cpus_initialized != MAX_CPUS_SET) {
317 max_cpus_initialized = MAX_CPUS_WAIT;
318 assert_wait((event_t)&max_cpus_initialized, THREAD_UNINT);
319 (void)thread_block(THREAD_CONTINUE_NULL);
320 }
321 (void) ml_set_interrupts_enabled(current_state);
322 return(machine_info.max_cpus);
323}
324
43866e37
A
325void
326ml_cpu_get_info(ml_cpu_info_t *cpu_info)
1c79356b
A
327{
328 if (cpu_info == 0) return;
329
330 cpu_info->vector_unit = (per_proc_info[0].pf.Available & pfAltivec) != 0;
331 cpu_info->cache_line_size = per_proc_info[0].pf.lineSize;
332 cpu_info->l1_icache_size = per_proc_info[0].pf.l1iSize;
333 cpu_info->l1_dcache_size = per_proc_info[0].pf.l1dSize;
334
335 if (per_proc_info[0].pf.Available & pfL2) {
336 cpu_info->l2_settings = per_proc_info[0].pf.l2cr;
337 cpu_info->l2_cache_size = per_proc_info[0].pf.l2Size;
338 } else {
339 cpu_info->l2_settings = 0;
340 cpu_info->l2_cache_size = 0xFFFFFFFF;
341 }
342 if (per_proc_info[0].pf.Available & pfL3) {
343 cpu_info->l3_settings = per_proc_info[0].pf.l3cr;
344 cpu_info->l3_cache_size = per_proc_info[0].pf.l3Size;
345 } else {
346 cpu_info->l3_settings = 0;
347 cpu_info->l3_cache_size = 0xFFFFFFFF;
348 }
349}
350
d52fe63f
A
351#define l2em 0x80000000
352#define l3em 0x80000000
353
354extern int real_ncpus;
355
356int
357ml_enable_cache_level(int cache_level, int enable)
358{
359 int old_mode;
360 unsigned long available, ccr;
361
362 if (real_ncpus != 1) return -1;
363
364 available = per_proc_info[0].pf.Available;
365
366 if ((cache_level == 2) && (available & pfL2)) {
367 ccr = per_proc_info[0].pf.l2cr;
368 old_mode = (ccr & l2em) ? TRUE : FALSE;
369 if (old_mode != enable) {
370 if (enable) ccr = per_proc_info[0].pf.l2crOriginal;
371 else ccr = 0;
372 per_proc_info[0].pf.l2cr = ccr;
373 cacheInit();
374 }
375
376 return old_mode;
377 }
378
379 if ((cache_level == 3) && (available & pfL3)) {
380 ccr = per_proc_info[0].pf.l3cr;
381 old_mode = (ccr & l3em) ? TRUE : FALSE;
382 if (old_mode != enable) {
383 if (enable) ccr = per_proc_info[0].pf.l3crOriginal;
384 else ccr = 0;
385 per_proc_info[0].pf.l3cr = ccr;
386 cacheInit();
387 }
388
389 return old_mode;
390 }
391
392 return -1;
393}
394
ab86ba33
A
395void
396ml_init_lock_timeout(void)
397{
398 uint64_t abstime;
399 uint32_t mtxspin;
400
401 nanoseconds_to_absolutetime(NSEC_PER_SEC>>2, &abstime);
402 LockTimeOut = (unsigned int)abstime;
403
404 if (PE_parse_boot_arg("mtxspin", &mtxspin)) {
405 if (mtxspin > USEC_PER_SEC>>4)
406 mtxspin = USEC_PER_SEC>>4;
407 nanoseconds_to_absolutetime(mtxspin*NSEC_PER_USEC, &abstime);
408 } else {
409 nanoseconds_to_absolutetime(20*NSEC_PER_USEC, &abstime);
410 }
411 MutexSpin = (unsigned int)abstime;
412}
413
1c79356b
A
414void
415init_ast_check(processor_t processor)
416{}
417
418void
9bccf70c
A
419cause_ast_check(
420 processor_t processor)
1c79356b 421{
9bccf70c
A
422 if ( processor != current_processor() &&
423 per_proc_info[processor->slot_num].interrupts_enabled == TRUE )
1c79356b
A
424 cpu_signal(processor->slot_num, SIGPast, NULL, NULL);
425}
426
427thread_t
428switch_to_shutdown_context(
429 thread_t thread,
430 void (*doshutdown)(processor_t),
431 processor_t processor)
432{
1c79356b 433 CreateShutdownCTX();
1c79356b
A
434 return((thread_t)(per_proc_info[cpu_number()].old_thread));
435}
436
437int
438set_be_bit()
439{
440
441 int mycpu;
442 boolean_t current_state;
443
444 current_state = ml_set_interrupts_enabled(FALSE); /* Can't allow interruptions when mucking with per_proc flags */
445 mycpu = cpu_number();
446 per_proc_info[mycpu].cpu_flags |= traceBE;
447 (void) ml_set_interrupts_enabled(current_state);
448 return(1);
449}
450
451int
452clr_be_bit()
453{
454 int mycpu;
455 boolean_t current_state;
456
457 current_state = ml_set_interrupts_enabled(FALSE); /* Can't allow interruptions when mucking with per_proc flags */
458 mycpu = cpu_number();
459 per_proc_info[mycpu].cpu_flags &= ~traceBE;
460 (void) ml_set_interrupts_enabled(current_state);
461 return(1);
462}
463
464int
465be_tracing()
466{
467 int mycpu = cpu_number();
468 return(per_proc_info[mycpu].cpu_flags & traceBE);
469}
0b4e3aa0 470