]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/machine_routines.c
xnu-1228.3.13.tar.gz
[apple/xnu.git] / osfmk / ppc / machine_routines.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30
31 #include <ppc/machine_routines.h>
32 #include <ppc/cpu_internal.h>
33 #include <ppc/exception.h>
34 #include <ppc/io_map_entries.h>
35 #include <ppc/misc_protos.h>
36 #include <ppc/savearea.h>
37 #include <ppc/Firmware.h>
38 #include <ppc/pmap.h>
39 #include <ppc/mem.h>
40 #include <ppc/new_screen.h>
41 #include <ppc/proc_reg.h>
42 #include <ppc/machine_cpu.h> /* for cpu_signal_handler() */
43 #include <ppc/fpu_protos.h>
44 #include <kern/kern_types.h>
45 #include <kern/processor.h>
46 #include <kern/machine.h>
47
48 #include <vm/vm_page.h>
49
50 unsigned int LockTimeOut = 1250000000;
51 unsigned int MutexSpin = 0;
52
53 decl_mutex_data(static,mcpus_lock);
54 unsigned int mcpus_lock_initialized = 0;
55 unsigned int mcpus_state = 0;
56
57 uint32_t warFlags = 0;
58 #define warDisMBpoff 0x80000000
59 #define MAX_CPUS_SET 0x01
60 #define MAX_CPUS_WAIT 0x02
61
62 decl_simple_lock_data(, spsLock);
63 unsigned int spsLockInit = 0;
64
65 extern unsigned int hwllckPatch_isync;
66 extern unsigned int hwulckPatch_isync;
67 extern unsigned int hwulckbPatch_isync;
68 extern unsigned int hwlmlckPatch_isync;
69 extern unsigned int hwltlckPatch_isync;
70 extern unsigned int hwcsatomicPatch_isync;
71 extern unsigned int mlckePatch_isync;
72 extern unsigned int mlckPatch_isync;
73 extern unsigned int mltelckPatch_isync;
74 extern unsigned int mltlckPatch_isync;
75 extern unsigned int mulckePatch_isync;
76 extern unsigned int mulckPatch_isync;
77 extern unsigned int slckPatch_isync;
78 extern unsigned int stlckPatch_isync;
79 extern unsigned int sulckPatch_isync;
80 extern unsigned int rwlePatch_isync;
81 extern unsigned int rwlsPatch_isync;
82 extern unsigned int rwlsePatch_isync;
83 extern unsigned int rwlesPatch_isync;
84 extern unsigned int rwtlePatch_isync;
85 extern unsigned int rwtlsPatch_isync;
86 extern unsigned int rwldPatch_isync;
87 extern unsigned int hwulckPatch_eieio;
88 extern unsigned int mulckPatch_eieio;
89 extern unsigned int mulckePatch_eieio;
90 extern unsigned int sulckPatch_eieio;
91 extern unsigned int rwlesPatch_eieio;
92 extern unsigned int rwldPatch_eieio;
93
94 struct patch_up {
95 unsigned int *addr;
96 unsigned int data;
97 };
98
99 typedef struct patch_up patch_up_t;
100
101 patch_up_t patch_up_table[] = {
102 {&hwllckPatch_isync, 0x60000000},
103 {&hwulckPatch_isync, 0x60000000},
104 {&hwulckbPatch_isync, 0x60000000},
105 {&hwlmlckPatch_isync, 0x60000000},
106 {&hwltlckPatch_isync, 0x60000000},
107 {&hwcsatomicPatch_isync, 0x60000000},
108 {&mlckePatch_isync, 0x60000000},
109 {&mlckPatch_isync, 0x60000000},
110 {&mltelckPatch_isync, 0x60000000},
111 {&mltlckPatch_isync, 0x60000000},
112 {&mulckePatch_isync, 0x60000000},
113 {&mulckPatch_isync, 0x60000000},
114 {&slckPatch_isync, 0x60000000},
115 {&stlckPatch_isync, 0x60000000},
116 {&sulckPatch_isync, 0x60000000},
117 {&rwlePatch_isync, 0x60000000},
118 {&rwlsPatch_isync, 0x60000000},
119 {&rwlsePatch_isync, 0x60000000},
120 {&rwlesPatch_isync, 0x60000000},
121 {&rwtlePatch_isync, 0x60000000},
122 {&rwtlsPatch_isync, 0x60000000},
123 {&rwldPatch_isync, 0x60000000},
124 {&hwulckPatch_eieio, 0x60000000},
125 {&hwulckPatch_eieio, 0x60000000},
126 {&mulckPatch_eieio, 0x60000000},
127 {&mulckePatch_eieio, 0x60000000},
128 {&sulckPatch_eieio, 0x60000000},
129 {&rwlesPatch_eieio, 0x60000000},
130 {&rwldPatch_eieio, 0x60000000},
131 {NULL, 0x00000000}
132 };
133
134 extern int forcenap;
135 extern boolean_t pmap_initialized;
136
137 /* Map memory map IO space */
138 vm_offset_t
139 ml_io_map(
140 vm_offset_t phys_addr,
141 vm_size_t size)
142 {
143 return(io_map(phys_addr,size,VM_WIMG_IO));
144 }
145
146
147 void ml_get_bouncepool_info(vm_offset_t *phys_addr, vm_size_t *size)
148 {
149 *phys_addr = 0;
150 *size = 0;
151 }
152
153
154 /*
155 * Routine: ml_static_malloc
156 * Function: static memory allocation
157 */
158 vm_offset_t
159 ml_static_malloc(
160 vm_size_t size)
161 {
162 vm_offset_t vaddr;
163
164 if (pmap_initialized)
165 return((vm_offset_t)NULL);
166 else {
167 vaddr = static_memory_end;
168 static_memory_end = round_page(vaddr+size);
169 return(vaddr);
170 }
171 }
172
173 /*
174 * Routine: ml_static_ptovirt
175 * Function:
176 */
177 vm_offset_t
178 ml_static_ptovirt(
179 vm_offset_t paddr)
180 {
181 vm_offset_t vaddr;
182
183 /* Static memory is map V=R */
184 vaddr = paddr;
185 if ( (vaddr < static_memory_end) && (pmap_extract(kernel_pmap, vaddr)==paddr) )
186 return(vaddr);
187 else
188 return((vm_offset_t)NULL);
189 }
190
191 /*
192 * Routine: ml_static_mfree
193 * Function:
194 */
195 void
196 ml_static_mfree(
197 vm_offset_t vaddr,
198 vm_size_t size)
199 {
200 vm_offset_t paddr_cur, vaddr_cur;
201
202 for (vaddr_cur = round_page_32(vaddr);
203 vaddr_cur < trunc_page_32(vaddr+size);
204 vaddr_cur += PAGE_SIZE) {
205 paddr_cur = pmap_extract(kernel_pmap, vaddr_cur);
206 if (paddr_cur != (vm_offset_t)NULL) {
207 vm_page_wire_count--;
208 pmap_remove(kernel_pmap, (addr64_t)vaddr_cur, (addr64_t)(vaddr_cur+PAGE_SIZE));
209 vm_page_create(paddr_cur>>12,(paddr_cur+PAGE_SIZE)>>12);
210 }
211 }
212 }
213
214 /*
215 * Routine: ml_vtophys
216 * Function: virtual to physical on static pages
217 */
218 vm_offset_t ml_vtophys(
219 vm_offset_t vaddr)
220 {
221 return(pmap_extract(kernel_pmap, vaddr));
222 }
223
224 /*
225 * Routine: ml_install_interrupt_handler
226 * Function: Initialize Interrupt Handler
227 */
228 void ml_install_interrupt_handler(
229 void *nub,
230 int source,
231 void *target,
232 IOInterruptHandler handler,
233 void *refCon)
234 {
235 struct per_proc_info *proc_info;
236 boolean_t current_state;
237
238 current_state = ml_get_interrupts_enabled();
239 proc_info = getPerProc();
240
241 proc_info->interrupt_nub = nub;
242 proc_info->interrupt_source = source;
243 proc_info->interrupt_target = target;
244 proc_info->interrupt_handler = handler;
245 proc_info->interrupt_refCon = refCon;
246
247 proc_info->interrupts_enabled = TRUE;
248 (void) ml_set_interrupts_enabled(current_state);
249
250 initialize_screen(NULL, kPEAcquireScreen);
251 }
252
253 /*
254 * Routine: ml_nofault_copy
255 * Function: Perform a physical mode copy if the source and
256 * destination have valid translations in the kernel pmap.
257 * If translations are present, they are assumed to
258 * be wired; i.e. no attempt is made to guarantee that the
259 * translations obtained remained valid for
260 * the duration of their use.
261 */
262
263 vm_size_t ml_nofault_copy(
264 vm_offset_t virtsrc, vm_offset_t virtdst, vm_size_t size)
265 {
266 addr64_t cur_phys_dst, cur_phys_src;
267 uint32_t count, pindex, nbytes = 0;
268
269 while (size > 0) {
270 if (!(cur_phys_src = kvtophys(virtsrc)))
271 break;
272 if (!(cur_phys_dst = kvtophys(virtdst)))
273 break;
274 if (!mapping_phys_lookup((cur_phys_src>>12), &pindex) ||
275 !mapping_phys_lookup((cur_phys_dst>>12), &pindex))
276 break;
277 count = PAGE_SIZE - (cur_phys_src & PAGE_MASK);
278 if (count > (PAGE_SIZE - (cur_phys_dst & PAGE_MASK)))
279 count = PAGE_SIZE - (cur_phys_dst & PAGE_MASK);
280 if (count > size)
281 count = size;
282
283 bcopy_phys(cur_phys_src, cur_phys_dst, count);
284
285 nbytes += count;
286 virtsrc += count;
287 virtdst += count;
288 size -= count;
289 }
290
291 return nbytes;
292 }
293
294 /*
295 * Routine: ml_init_interrupt
296 * Function: Initialize Interrupts
297 */
298 void ml_init_interrupt(void)
299 {
300 boolean_t current_state;
301
302 current_state = ml_get_interrupts_enabled();
303
304 getPerProc()->interrupts_enabled = TRUE;
305 (void) ml_set_interrupts_enabled(current_state);
306 }
307
308 /*
309 * Routine: ml_get_interrupts_enabled
310 * Function: Get Interrupts Enabled
311 */
312 boolean_t ml_get_interrupts_enabled(void)
313 {
314 return((mfmsr() & MASK(MSR_EE)) != 0);
315 }
316
317 /*
318 * Routine: ml_at_interrupt_context
319 * Function: Check if running at interrupt context
320 */
321 boolean_t ml_at_interrupt_context(void)
322 {
323 boolean_t ret;
324 boolean_t current_state;
325
326 current_state = ml_set_interrupts_enabled(FALSE);
327 ret = (getPerProc()->istackptr == 0);
328 ml_set_interrupts_enabled(current_state);
329 return(ret);
330 }
331
332 /*
333 * Routine: ml_cause_interrupt
334 * Function: Generate a fake interrupt
335 */
336 void ml_cause_interrupt(void)
337 {
338 CreateFakeIO();
339 }
340
341 /*
342 * Routine: ml_thread_policy
343 * Function:
344 */
345 void ml_thread_policy(
346 thread_t thread,
347 __unused unsigned policy_id,
348 unsigned policy_info)
349 {
350 if (policy_info & MACHINE_NETWORK_WORKLOOP) {
351 spl_t s = splsched();
352
353 thread_lock(thread);
354
355 set_priority(thread, thread->priority + 1);
356
357 thread_unlock(thread);
358 splx(s);
359 }
360 }
361
362 /*
363 * Routine: machine_signal_idle
364 * Function:
365 */
366 void
367 machine_signal_idle(
368 processor_t processor)
369 {
370 struct per_proc_info *proc_info;
371
372 proc_info = PROCESSOR_TO_PER_PROC(processor);
373
374 if (proc_info->pf.Available & (pfCanDoze|pfWillNap))
375 (void)cpu_signal(proc_info->cpu_number, SIGPwake, 0, 0);
376 }
377
378 /*
379 * Routine: ml_processor_register
380 * Function:
381 */
382 kern_return_t
383 ml_processor_register(
384 ml_processor_info_t *in_processor_info,
385 processor_t *processor_out,
386 ipi_handler_t *ipi_handler)
387 {
388 struct per_proc_info *proc_info;
389 int donap;
390 boolean_t current_state;
391 boolean_t boot_processor;
392
393 if (in_processor_info->boot_cpu == FALSE) {
394 if (spsLockInit == 0) {
395 spsLockInit = 1;
396 simple_lock_init(&spsLock, 0);
397 }
398 boot_processor = FALSE;
399 proc_info = cpu_per_proc_alloc();
400 if (proc_info == (struct per_proc_info *)NULL)
401 return KERN_FAILURE;
402 proc_info->pp_cbfr = console_per_proc_alloc(FALSE);
403 if (proc_info->pp_cbfr == (void *)NULL)
404 goto processor_register_error;
405 } else {
406 boot_processor = TRUE;
407 proc_info = PerProcTable[master_cpu].ppe_vaddr;
408 }
409
410 proc_info->pp_chud = chudxnu_per_proc_alloc(boot_processor);
411 if (proc_info->pp_chud == (void *)NULL)
412 goto processor_register_error;
413
414 if (!boot_processor)
415 if (cpu_per_proc_register(proc_info) != KERN_SUCCESS)
416 goto processor_register_error;
417
418 proc_info->cpu_id = in_processor_info->cpu_id;
419 proc_info->start_paddr = in_processor_info->start_paddr;
420 if(in_processor_info->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
421 proc_info->time_base_enable = in_processor_info->time_base_enable;
422 else
423 proc_info->time_base_enable = (void(*)(cpu_id_t, boolean_t ))NULL;
424
425 if((proc_info->pf.pfPowerModes & pmType) == pmPowerTune) {
426 proc_info->pf.pfPowerTune0 = in_processor_info->power_mode_0;
427 proc_info->pf.pfPowerTune1 = in_processor_info->power_mode_1;
428 }
429
430 donap = in_processor_info->supports_nap; /* Assume we use requested nap */
431 if(forcenap) donap = forcenap - 1; /* If there was an override, use that */
432
433 if((proc_info->pf.Available & pfCanNap)
434 && (donap)) {
435 proc_info->pf.Available |= pfWillNap;
436 current_state = ml_set_interrupts_enabled(FALSE);
437 if(proc_info == getPerProc())
438 __asm__ volatile("mtsprg 2,%0" : : "r" (proc_info->pf.Available)); /* Set live value */
439 (void) ml_set_interrupts_enabled(current_state);
440 }
441
442 if (!boot_processor) {
443 (void)hw_atomic_add(&saveanchor.savetarget, FreeListMin); /* saveareas for this processor */
444 processor_init((struct processor *)proc_info->processor,
445 proc_info->cpu_number, processor_pset(master_processor));
446 }
447
448 *processor_out = (struct processor *)proc_info->processor;
449 *ipi_handler = cpu_signal_handler;
450
451 return KERN_SUCCESS;
452
453 processor_register_error:
454 if (proc_info->pp_cbfr != (void *)NULL)
455 console_per_proc_free(proc_info->pp_cbfr);
456 if (proc_info->pp_chud != (void *)NULL)
457 chudxnu_per_proc_free(proc_info->pp_chud);
458 if (!boot_processor)
459 cpu_per_proc_free(proc_info);
460 return KERN_FAILURE;
461 }
462
463 /*
464 * Routine: ml_enable_nap
465 * Function:
466 */
467 boolean_t
468 ml_enable_nap(int target_cpu, boolean_t nap_enabled)
469 {
470 struct per_proc_info *proc_info;
471 boolean_t prev_value;
472 boolean_t current_state;
473
474 proc_info = PerProcTable[target_cpu].ppe_vaddr;
475
476 prev_value = (proc_info->pf.Available & pfCanNap) && (proc_info->pf.Available & pfWillNap);
477
478 if(forcenap) nap_enabled = forcenap - 1; /* If we are to force nap on or off, do it */
479
480 if(proc_info->pf.Available & pfCanNap) { /* Can the processor nap? */
481 if (nap_enabled) proc_info->pf.Available |= pfWillNap; /* Is nap supported on this machine? */
482 else proc_info->pf.Available &= ~pfWillNap; /* Clear if not */
483 }
484
485 current_state = ml_set_interrupts_enabled(FALSE);
486 if(proc_info == getPerProc())
487 __asm__ volatile("mtsprg 2,%0" : : "r" (proc_info->pf.Available)); /* Set live value */
488 (void) ml_set_interrupts_enabled(current_state);
489
490 return (prev_value);
491 }
492
493 /*
494 * Routine: ml_init_max_cpus
495 * Function:
496 */
497 void
498 ml_init_max_cpus(unsigned int mcpus)
499 {
500
501 if (hw_compare_and_store(0,1,&mcpus_lock_initialized))
502 mutex_init(&mcpus_lock,0);
503 mutex_lock(&mcpus_lock);
504 if ((mcpus_state & MAX_CPUS_SET)
505 || (mcpus == 0)
506 || (mcpus > MAX_CPUS))
507 panic("ml_init_max_cpus(): Invalid call, max_cpus: %d\n", mcpus);
508
509 machine_info.max_cpus = mcpus;
510 machine_info.physical_cpu_max = mcpus;
511 machine_info.logical_cpu_max = mcpus;
512 mcpus_state |= MAX_CPUS_SET;
513
514 if (mcpus_state & MAX_CPUS_WAIT) {
515 mcpus_state |= ~MAX_CPUS_WAIT;
516 thread_wakeup((event_t)&mcpus_state);
517 }
518 mutex_unlock(&mcpus_lock);
519
520 if (machine_info.logical_cpu_max == 1) {
521 struct patch_up *patch_up_ptr;
522 boolean_t current_state;
523
524 patch_up_ptr = &patch_up_table[0];
525
526 current_state = ml_set_interrupts_enabled(FALSE);
527 while (patch_up_ptr->addr != NULL) {
528 /*
529 * Patch for V=R kernel text section
530 */
531 bcopy_phys((addr64_t)((unsigned int)(&patch_up_ptr->data)),
532 (addr64_t)((unsigned int)(patch_up_ptr->addr)), 4);
533 sync_cache64((addr64_t)((unsigned int)(patch_up_ptr->addr)),4);
534 patch_up_ptr++;
535 }
536 (void) ml_set_interrupts_enabled(current_state);
537 }
538 }
539
540 /*
541 * Routine: ml_get_max_cpus
542 * Function:
543 */
544 unsigned int
545 ml_get_max_cpus(void)
546 {
547 if (hw_compare_and_store(0,1,&mcpus_lock_initialized))
548 mutex_init(&mcpus_lock,0);
549 mutex_lock(&mcpus_lock);
550 if (!(mcpus_state & MAX_CPUS_SET)) {
551 mcpus_state |= MAX_CPUS_WAIT;
552 thread_sleep_mutex((event_t)&mcpus_state,
553 &mcpus_lock, THREAD_UNINT);
554 }
555 mutex_unlock(&mcpus_lock);
556 return(machine_info.max_cpus);
557 }
558
559 /*
560 * This is called from the machine-independent routine cpu_up()
561 * to perform machine-dependent info updates.
562 */
563 void
564 ml_cpu_up(void)
565 {
566 (void)hw_atomic_add(&machine_info.physical_cpu, 1);
567 (void)hw_atomic_add(&machine_info.logical_cpu, 1);
568 }
569
570 /*
571 * This is called from the machine-independent routine cpu_down()
572 * to perform machine-dependent info updates.
573 */
574 void
575 ml_cpu_down(void)
576 {
577 (void)hw_atomic_sub(&machine_info.physical_cpu, 1);
578 (void)hw_atomic_sub(&machine_info.logical_cpu, 1);
579 }
580
581 /*
582 * Routine: ml_cpu_get_info
583 * Function:
584 */
585 void
586 ml_cpu_get_info(ml_cpu_info_t *ml_cpu_info)
587 {
588 struct per_proc_info *proc_info;
589
590 if (ml_cpu_info == 0) return;
591
592 proc_info = PerProcTable[master_cpu].ppe_vaddr;
593 ml_cpu_info->vector_unit = (proc_info->pf.Available & pfAltivec) != 0;
594 ml_cpu_info->cache_line_size = proc_info->pf.lineSize;
595 ml_cpu_info->l1_icache_size = proc_info->pf.l1iSize;
596 ml_cpu_info->l1_dcache_size = proc_info->pf.l1dSize;
597
598 if (proc_info->pf.Available & pfL2) {
599 ml_cpu_info->l2_settings = proc_info->pf.l2cr;
600 ml_cpu_info->l2_cache_size = proc_info->pf.l2Size;
601 } else {
602 ml_cpu_info->l2_settings = 0;
603 ml_cpu_info->l2_cache_size = 0xFFFFFFFF;
604 }
605 if (proc_info->pf.Available & pfL3) {
606 ml_cpu_info->l3_settings = proc_info->pf.l3cr;
607 ml_cpu_info->l3_cache_size = proc_info->pf.l3Size;
608 } else {
609 ml_cpu_info->l3_settings = 0;
610 ml_cpu_info->l3_cache_size = 0xFFFFFFFF;
611 }
612 }
613
614 /*
615 * Routine: ml_enable_cache_level
616 * Function:
617 */
618 #define l2em 0x80000000
619 #define l3em 0x80000000
620 int
621 ml_enable_cache_level(int cache_level, int enable)
622 {
623 int old_mode;
624 unsigned long available, ccr;
625 struct per_proc_info *proc_info;
626
627 if (real_ncpus != 1) return -1; /* XXX: This test is not safe */
628
629 proc_info = PerProcTable[master_cpu].ppe_vaddr;
630 available = proc_info->pf.Available;
631
632 if ((cache_level == 2) && (available & pfL2)) {
633 ccr = proc_info->pf.l2cr;
634 old_mode = (ccr & l2em) ? TRUE : FALSE;
635 if (old_mode != enable) {
636 if (enable) ccr = proc_info->pf.l2crOriginal;
637 else ccr = 0;
638 proc_info->pf.l2cr = ccr;
639 cacheInit();
640 }
641
642 return old_mode;
643 }
644
645 if ((cache_level == 3) && (available & pfL3)) {
646 ccr = proc_info->pf.l3cr;
647 old_mode = (ccr & l3em) ? TRUE : FALSE;
648 if (old_mode != enable) {
649 if (enable) ccr = proc_info->pf.l3crOriginal;
650 else ccr = 0;
651 proc_info->pf.l3cr = ccr;
652 cacheInit();
653 }
654
655 return old_mode;
656 }
657
658 return -1;
659 }
660
661
662 /*
663 * Routine: ml_set_processor_speed
664 * Function:
665 */
666 void
667 ml_set_processor_speed(unsigned long speed)
668 {
669 struct per_proc_info *proc_info;
670 uint32_t cpu;
671 kern_return_t result;
672 boolean_t current_state;
673 unsigned int i;
674
675 proc_info = PerProcTable[master_cpu].ppe_vaddr;
676
677 switch (proc_info->pf.pfPowerModes & pmType) { /* Figure specific type */
678 case pmDualPLL:
679
680 ml_set_processor_speed_dpll(speed);
681 break;
682
683 case pmDFS:
684
685 for (cpu = 0; cpu < real_ncpus; cpu++) {
686 /*
687 * cpu_signal() returns after .5ms if it fails to signal a running cpu
688 * retry cpu_signal() for .1s to deal with long interrupt latency at boot
689 */
690 for (i=200; i>0; i--) {
691 current_state = ml_set_interrupts_enabled(FALSE);
692 if (cpu != (unsigned)cpu_number()) {
693 if (PerProcTable[cpu].ppe_vaddr->cpu_flags & SignalReady)
694 /*
695 * Target cpu is off-line, skip
696 */
697 result = KERN_SUCCESS;
698 else {
699 simple_lock(&spsLock);
700 result = cpu_signal(cpu, SIGPcpureq, CPRQsps, speed);
701 if (result == KERN_SUCCESS)
702 thread_sleep_simple_lock(&spsLock, &spsLock, THREAD_UNINT);
703 simple_unlock(&spsLock);
704 }
705 } else {
706 ml_set_processor_speed_dfs(speed);
707 result = KERN_SUCCESS;
708 }
709 (void) ml_set_interrupts_enabled(current_state);
710 if (result == KERN_SUCCESS)
711 break;
712 }
713 if (result != KERN_SUCCESS)
714 panic("ml_set_processor_speed(): Fail to set cpu%d speed\n", cpu);
715 }
716 break;
717
718 case pmPowerTune:
719
720 ml_set_processor_speed_powertune(speed);
721 break;
722
723 default:
724 break;
725
726 }
727 return;
728 }
729
730 /*
731 * Routine: ml_set_processor_speed_slave
732 * Function:
733 */
734 void
735 ml_set_processor_speed_slave(unsigned long speed)
736 {
737 ml_set_processor_speed_dfs(speed);
738
739 simple_lock(&spsLock);
740 thread_wakeup(&spsLock);
741 simple_unlock(&spsLock);
742 }
743
744 /*
745 * Routine: ml_init_lock_timeout
746 * Function:
747 */
748 void
749 ml_init_lock_timeout(void)
750 {
751 uint64_t abstime;
752 uint32_t mtxspin;
753
754 nanoseconds_to_absolutetime(NSEC_PER_SEC>>2, &abstime);
755 LockTimeOut = (unsigned int)abstime;
756
757 if (PE_parse_boot_arg("mtxspin", &mtxspin)) {
758 if (mtxspin > USEC_PER_SEC>>4)
759 mtxspin = USEC_PER_SEC>>4;
760 nanoseconds_to_absolutetime(mtxspin*NSEC_PER_USEC, &abstime);
761 } else {
762 nanoseconds_to_absolutetime(10*NSEC_PER_USEC, &abstime);
763 }
764 MutexSpin = (unsigned int)abstime;
765 }
766
767 /*
768 * Routine: init_ast_check
769 * Function:
770 */
771 void
772 init_ast_check(
773 __unused processor_t processor)
774 {}
775
776 /*
777 * Routine: cause_ast_check
778 * Function:
779 */
780 void
781 cause_ast_check(
782 processor_t processor)
783 {
784 struct per_proc_info *proc_info;
785
786 proc_info = PROCESSOR_TO_PER_PROC(processor);
787
788 if (proc_info != getPerProc()
789 && proc_info->interrupts_enabled == TRUE)
790 cpu_signal(proc_info->cpu_number, SIGPast, (unsigned int)NULL, (unsigned int)NULL);
791 }
792
793 /*
794 * Routine: machine_processor_shutdown
795 * Function:
796 */
797 thread_t
798 machine_processor_shutdown(
799 __unused thread_t thread,
800 __unused void (*doshutdown)(processor_t),
801 __unused processor_t processor)
802 {
803 CreateShutdownCTX();
804 return((thread_t)(getPerProc()->old_thread));
805 }
806
807
808 void ml_mem_backoff(void) {
809
810 if(warFlags & warDisMBpoff) return; /* If backoff disabled, exit */
811
812 __asm__ volatile("sync");
813 __asm__ volatile("isync");
814
815 return;
816 }
817