]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/machine_routines.c
8118d1adc757f395714b7b4196ee4ede0b29e014
[apple/xnu.git] / osfmk / ppc / machine_routines.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 #include <ppc/machine_routines.h>
23 #include <ppc/machine_cpu.h>
24 #include <ppc/exception.h>
25 #include <ppc/misc_protos.h>
26 #include <ppc/Firmware.h>
27 #include <vm/vm_page.h>
28 #include <ppc/pmap.h>
29 #include <ppc/proc_reg.h>
30 #include <kern/processor.h>
31
32 unsigned int max_cpus_initialized = 0;
33 unsigned int LockTimeOut = 12500000;
34 unsigned int MutexSpin = 0;
35 extern int forcenap;
36
37 decl_simple_lock_data(, spsLock);
38 unsigned int spsLockInit = 0;
39
40 #define MAX_CPUS_SET 0x1
41 #define MAX_CPUS_WAIT 0x2
42
43 boolean_t get_interrupts_enabled(void);
44
45 /* Map memory map IO space */
46 vm_offset_t
47 ml_io_map(
48 vm_offset_t phys_addr,
49 vm_size_t size)
50 {
51 return(io_map(phys_addr,size));
52 }
53
54 /* static memory allocation */
55 vm_offset_t
56 ml_static_malloc(
57 vm_size_t size)
58 {
59 extern vm_offset_t static_memory_end;
60 extern boolean_t pmap_initialized;
61 vm_offset_t vaddr;
62
63 if (pmap_initialized)
64 return((vm_offset_t)NULL);
65 else {
66 vaddr = static_memory_end;
67 static_memory_end = round_page_32(vaddr+size);
68 return(vaddr);
69 }
70 }
71
72 vm_offset_t
73 ml_static_ptovirt(
74 vm_offset_t paddr)
75 {
76 extern vm_offset_t static_memory_end;
77 vm_offset_t vaddr;
78
79 /* Static memory is map V=R */
80 vaddr = paddr;
81 if ( (vaddr < static_memory_end) && (pmap_extract(kernel_pmap, vaddr)==paddr) )
82 return(vaddr);
83 else
84 return((vm_offset_t)NULL);
85 }
86
87 void
88 ml_static_mfree(
89 vm_offset_t vaddr,
90 vm_size_t size)
91 {
92 vm_offset_t paddr_cur, vaddr_cur;
93
94 for (vaddr_cur = round_page_32(vaddr);
95 vaddr_cur < trunc_page_32(vaddr+size);
96 vaddr_cur += PAGE_SIZE) {
97 paddr_cur = pmap_extract(kernel_pmap, vaddr_cur);
98 if (paddr_cur != (vm_offset_t)NULL) {
99 vm_page_wire_count--;
100 pmap_remove(kernel_pmap, (addr64_t)vaddr_cur, (addr64_t)(vaddr_cur+PAGE_SIZE));
101 vm_page_create(paddr_cur>>12,(paddr_cur+PAGE_SIZE)>>12);
102 }
103 }
104 }
105
106 /* virtual to physical on wired pages */
107 vm_offset_t ml_vtophys(
108 vm_offset_t vaddr)
109 {
110 return(pmap_extract(kernel_pmap, vaddr));
111 }
112
113 /* Initialize Interrupt Handler */
114 void ml_install_interrupt_handler(
115 void *nub,
116 int source,
117 void *target,
118 IOInterruptHandler handler,
119 void *refCon)
120 {
121 int current_cpu;
122 boolean_t current_state;
123
124 current_cpu = cpu_number();
125 current_state = ml_get_interrupts_enabled();
126
127 per_proc_info[current_cpu].interrupt_nub = nub;
128 per_proc_info[current_cpu].interrupt_source = source;
129 per_proc_info[current_cpu].interrupt_target = target;
130 per_proc_info[current_cpu].interrupt_handler = handler;
131 per_proc_info[current_cpu].interrupt_refCon = refCon;
132
133 per_proc_info[current_cpu].interrupts_enabled = TRUE;
134 (void) ml_set_interrupts_enabled(current_state);
135
136 initialize_screen(0, kPEAcquireScreen);
137 }
138
139 /* Initialize Interrupts */
140 void ml_init_interrupt(void)
141 {
142 int current_cpu;
143 boolean_t current_state;
144
145 current_state = ml_get_interrupts_enabled();
146
147 current_cpu = cpu_number();
148 per_proc_info[current_cpu].interrupts_enabled = TRUE;
149 (void) ml_set_interrupts_enabled(current_state);
150 }
151
152 /* Get Interrupts Enabled */
153 boolean_t ml_get_interrupts_enabled(void)
154 {
155 return((mfmsr() & MASK(MSR_EE)) != 0);
156 }
157
158 /* Check if running at interrupt context */
159 boolean_t ml_at_interrupt_context(void)
160 {
161 boolean_t ret;
162 boolean_t current_state;
163
164 current_state = ml_set_interrupts_enabled(FALSE);
165 ret = (per_proc_info[cpu_number()].istackptr == 0);
166 ml_set_interrupts_enabled(current_state);
167 return(ret);
168 }
169
170 /* Generate a fake interrupt */
171 void ml_cause_interrupt(void)
172 {
173 CreateFakeIO();
174 }
175
176 void ml_thread_policy(
177 thread_t thread,
178 unsigned policy_id,
179 unsigned policy_info)
180 {
181 extern int srv;
182
183 if ((policy_id == MACHINE_GROUP) &&
184 ((per_proc_info[0].pf.Available) & pfSMPcap))
185 thread_bind(thread, master_processor);
186
187 if (policy_info & MACHINE_NETWORK_WORKLOOP) {
188 spl_t s = splsched();
189
190 thread_lock(thread);
191
192 if (srv == 0)
193 thread->sched_mode |= TH_MODE_FORCEDPREEMPT;
194 set_priority(thread, thread->priority + 1);
195
196 thread_unlock(thread);
197 splx(s);
198 }
199 }
200
201 void machine_idle(void)
202 {
203 if (per_proc_info[cpu_number()].interrupts_enabled == TRUE) {
204 int cur_decr;
205
206 machine_idle_ppc();
207
208 /*
209 * protect against a lost decrementer trap
210 * if the current decrementer value is negative
211 * by more than 10 ticks, re-arm it since it's
212 * unlikely to fire at this point... a hardware
213 * interrupt got us out of machine_idle and may
214 * also be contributing to this state
215 */
216 cur_decr = isync_mfdec();
217
218 if (cur_decr < -10) {
219 mtdec(1);
220 }
221 }
222 }
223
224 void
225 machine_signal_idle(
226 processor_t processor)
227 {
228 if (per_proc_info[processor->slot_num].pf.Available & (pfCanDoze|pfWillNap))
229 (void)cpu_signal(processor->slot_num, SIGPwake, 0, 0);
230 }
231
232 kern_return_t
233 ml_processor_register(
234 ml_processor_info_t *processor_info,
235 processor_t *processor,
236 ipi_handler_t *ipi_handler)
237 {
238 kern_return_t ret;
239 int target_cpu, cpu;
240 int donap;
241
242 if (processor_info->boot_cpu == FALSE) {
243 if (spsLockInit == 0) {
244 spsLockInit = 1;
245 simple_lock_init(&spsLock, 0);
246 }
247 if (cpu_register(&target_cpu) != KERN_SUCCESS)
248 return KERN_FAILURE;
249 } else {
250 /* boot_cpu is always 0 */
251 target_cpu = 0;
252 }
253
254 per_proc_info[target_cpu].cpu_id = processor_info->cpu_id;
255 per_proc_info[target_cpu].start_paddr = processor_info->start_paddr;
256
257 if (per_proc_info[target_cpu].pf.pfPowerModes & pmPowerTune) {
258 per_proc_info[target_cpu].pf.pfPowerTune0 = processor_info->power_mode_0;
259 per_proc_info[target_cpu].pf.pfPowerTune1 = processor_info->power_mode_1;
260 }
261
262 donap = processor_info->supports_nap; /* Assume we use requested nap */
263 if(forcenap) donap = forcenap - 1; /* If there was an override, use that */
264
265 if(per_proc_info[target_cpu].pf.Available & pfCanNap)
266 if(donap)
267 per_proc_info[target_cpu].pf.Available |= pfWillNap;
268
269 if(processor_info->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
270 per_proc_info[target_cpu].time_base_enable = processor_info->time_base_enable;
271 else
272 per_proc_info[target_cpu].time_base_enable = (void(*)(cpu_id_t, boolean_t ))NULL;
273
274 if(target_cpu == cpu_number())
275 __asm__ volatile("mtsprg 2,%0" : : "r" (per_proc_info[target_cpu].pf.Available)); /* Set live value */
276
277 *processor = cpu_to_processor(target_cpu);
278 *ipi_handler = cpu_signal_handler;
279
280 return KERN_SUCCESS;
281 }
282
283 boolean_t
284 ml_enable_nap(int target_cpu, boolean_t nap_enabled)
285 {
286 boolean_t prev_value = (per_proc_info[target_cpu].pf.Available & pfCanNap) && (per_proc_info[target_cpu].pf.Available & pfWillNap);
287
288 if(forcenap) nap_enabled = forcenap - 1; /* If we are to force nap on or off, do it */
289
290 if(per_proc_info[target_cpu].pf.Available & pfCanNap) { /* Can the processor nap? */
291 if (nap_enabled) per_proc_info[target_cpu].pf.Available |= pfWillNap; /* Is nap supported on this machine? */
292 else per_proc_info[target_cpu].pf.Available &= ~pfWillNap; /* Clear if not */
293 }
294
295 if(target_cpu == cpu_number())
296 __asm__ volatile("mtsprg 2,%0" : : "r" (per_proc_info[target_cpu].pf.Available)); /* Set live value */
297
298 return (prev_value);
299 }
300
301 void
302 ml_init_max_cpus(unsigned long max_cpus)
303 {
304 boolean_t current_state;
305
306 current_state = ml_set_interrupts_enabled(FALSE);
307 if (max_cpus_initialized != MAX_CPUS_SET) {
308 if (max_cpus > 0 && max_cpus < NCPUS)
309 machine_info.max_cpus = max_cpus;
310 if (max_cpus_initialized == MAX_CPUS_WAIT)
311 wakeup((event_t)&max_cpus_initialized);
312 max_cpus_initialized = MAX_CPUS_SET;
313 }
314 (void) ml_set_interrupts_enabled(current_state);
315 }
316
317 int
318 ml_get_max_cpus(void)
319 {
320 boolean_t current_state;
321
322 current_state = ml_set_interrupts_enabled(FALSE);
323 if (max_cpus_initialized != MAX_CPUS_SET) {
324 max_cpus_initialized = MAX_CPUS_WAIT;
325 assert_wait((event_t)&max_cpus_initialized, THREAD_UNINT);
326 (void)thread_block(THREAD_CONTINUE_NULL);
327 }
328 (void) ml_set_interrupts_enabled(current_state);
329 return(machine_info.max_cpus);
330 }
331
332 void
333 ml_cpu_get_info(ml_cpu_info_t *cpu_info)
334 {
335 if (cpu_info == 0) return;
336
337 cpu_info->vector_unit = (per_proc_info[0].pf.Available & pfAltivec) != 0;
338 cpu_info->cache_line_size = per_proc_info[0].pf.lineSize;
339 cpu_info->l1_icache_size = per_proc_info[0].pf.l1iSize;
340 cpu_info->l1_dcache_size = per_proc_info[0].pf.l1dSize;
341
342 if (per_proc_info[0].pf.Available & pfL2) {
343 cpu_info->l2_settings = per_proc_info[0].pf.l2cr;
344 cpu_info->l2_cache_size = per_proc_info[0].pf.l2Size;
345 } else {
346 cpu_info->l2_settings = 0;
347 cpu_info->l2_cache_size = 0xFFFFFFFF;
348 }
349 if (per_proc_info[0].pf.Available & pfL3) {
350 cpu_info->l3_settings = per_proc_info[0].pf.l3cr;
351 cpu_info->l3_cache_size = per_proc_info[0].pf.l3Size;
352 } else {
353 cpu_info->l3_settings = 0;
354 cpu_info->l3_cache_size = 0xFFFFFFFF;
355 }
356 }
357
358 #define l2em 0x80000000
359 #define l3em 0x80000000
360
361 extern int real_ncpus;
362
363 int
364 ml_enable_cache_level(int cache_level, int enable)
365 {
366 int old_mode;
367 unsigned long available, ccr;
368
369 if (real_ncpus != 1) return -1;
370
371 available = per_proc_info[0].pf.Available;
372
373 if ((cache_level == 2) && (available & pfL2)) {
374 ccr = per_proc_info[0].pf.l2cr;
375 old_mode = (ccr & l2em) ? TRUE : FALSE;
376 if (old_mode != enable) {
377 if (enable) ccr = per_proc_info[0].pf.l2crOriginal;
378 else ccr = 0;
379 per_proc_info[0].pf.l2cr = ccr;
380 cacheInit();
381 }
382
383 return old_mode;
384 }
385
386 if ((cache_level == 3) && (available & pfL3)) {
387 ccr = per_proc_info[0].pf.l3cr;
388 old_mode = (ccr & l3em) ? TRUE : FALSE;
389 if (old_mode != enable) {
390 if (enable) ccr = per_proc_info[0].pf.l3crOriginal;
391 else ccr = 0;
392 per_proc_info[0].pf.l3cr = ccr;
393 cacheInit();
394 }
395
396 return old_mode;
397 }
398
399 return -1;
400 }
401
402 /*
403 * Routine: ml_set_processor_speed
404 * Function:
405 */
406 void
407 ml_set_processor_speed(unsigned long speed)
408 {
409 struct per_proc_info *proc_info;
410 uint32_t powerModes, cpu;
411 kern_return_t result;
412 boolean_t current_state;
413 unsigned int i;
414
415 extern void ml_set_processor_speed_slave(unsigned long speed);
416 extern void ml_set_processor_speed_dpll(unsigned long speed);
417 extern void ml_set_processor_speed_dfs(unsigned long speed);
418 extern void ml_set_processor_speed_powertune(unsigned long speed);
419
420 powerModes = per_proc_info[0].pf.pfPowerModes;
421
422 if (powerModes & pmDualPLL) {
423
424 ml_set_processor_speed_dpll(speed);
425
426 } else if (powerModes & pmDFS) {
427
428 for (cpu = 0; cpu < real_ncpus; cpu++) {
429 /*
430 * cpu_signal() returns after .5ms if it fails to signal a running cpu
431 * retry cpu_signal() for .1s to deal with long interrupt latency at boot
432 */
433 for (i=200; i>0; i--) {
434 current_state = ml_set_interrupts_enabled(FALSE);
435 if (cpu != cpu_number()) {
436 if(!((machine_slot[cpu].running) &&
437 (per_proc_info[cpu].cpu_flags & SignalReady)))
438 /*
439 * Target cpu is off-line, skip
440 */
441 result = KERN_SUCCESS;
442 else {
443 simple_lock(&spsLock);
444 result = cpu_signal(cpu, SIGPcpureq, CPRQsps, speed);
445 if (result == KERN_SUCCESS)
446 thread_sleep_simple_lock(&spsLock, &spsLock, THREAD_UNINT);
447 simple_unlock(&spsLock);
448 }
449 } else {
450 ml_set_processor_speed_dfs(speed);
451 result = KERN_SUCCESS;
452 }
453 (void) ml_set_interrupts_enabled(current_state);
454 if (result == KERN_SUCCESS)
455 break;
456 }
457 if (result != KERN_SUCCESS)
458 panic("ml_set_processor_speed(): Fail to set cpu%d speed\n", cpu);
459 }
460
461 } else if (powerModes & pmPowerTune) {
462
463 ml_set_processor_speed_powertune(speed);
464
465 }
466 }
467
468 /*
469 * Routine: ml_set_processor_speed_slave
470 * Function:
471 */
472 void
473 ml_set_processor_speed_slave(unsigned long speed)
474 {
475 extern void ml_set_processor_speed_dfs(unsigned long speed);
476
477 ml_set_processor_speed_dfs(speed);
478
479 simple_lock(&spsLock);
480 thread_wakeup(&spsLock);
481 simple_unlock(&spsLock);
482 }
483
484 /*
485 * Routine: ml_init_lock_timeout
486 * Function:
487 */
488 void
489 ml_init_lock_timeout(void)
490 {
491 uint64_t abstime;
492 uint32_t mtxspin;
493
494 nanoseconds_to_absolutetime(NSEC_PER_SEC>>2, &abstime);
495 LockTimeOut = (unsigned int)abstime;
496
497 if (PE_parse_boot_arg("mtxspin", &mtxspin)) {
498 if (mtxspin > USEC_PER_SEC>>4)
499 mtxspin = USEC_PER_SEC>>4;
500 nanoseconds_to_absolutetime(mtxspin*NSEC_PER_USEC, &abstime);
501 } else {
502 nanoseconds_to_absolutetime(20*NSEC_PER_USEC, &abstime);
503 }
504 MutexSpin = (unsigned int)abstime;
505 }
506
507 void
508 init_ast_check(processor_t processor)
509 {}
510
511 void
512 cause_ast_check(
513 processor_t processor)
514 {
515 if ( processor != current_processor() &&
516 per_proc_info[processor->slot_num].interrupts_enabled == TRUE )
517 cpu_signal(processor->slot_num, SIGPast, NULL, NULL);
518 }
519
520 thread_t
521 switch_to_shutdown_context(
522 thread_t thread,
523 void (*doshutdown)(processor_t),
524 processor_t processor)
525 {
526 CreateShutdownCTX();
527 return((thread_t)(per_proc_info[cpu_number()].old_thread));
528 }
529
530 int
531 set_be_bit()
532 {
533
534 int mycpu;
535 boolean_t current_state;
536
537 current_state = ml_set_interrupts_enabled(FALSE); /* Can't allow interruptions when mucking with per_proc flags */
538 mycpu = cpu_number();
539 per_proc_info[mycpu].cpu_flags |= traceBE;
540 (void) ml_set_interrupts_enabled(current_state);
541 return(1);
542 }
543
544 int
545 clr_be_bit()
546 {
547 int mycpu;
548 boolean_t current_state;
549
550 current_state = ml_set_interrupts_enabled(FALSE); /* Can't allow interruptions when mucking with per_proc flags */
551 mycpu = cpu_number();
552 per_proc_info[mycpu].cpu_flags &= ~traceBE;
553 (void) ml_set_interrupts_enabled(current_state);
554 return(1);
555 }
556
557 int
558 be_tracing()
559 {
560 int mycpu = cpu_number();
561 return(per_proc_info[mycpu].cpu_flags & traceBE);
562 }
563