]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/machine_routines.c
7edacae01318a0e841cb00b1f1b44990d396642e
[apple/xnu.git] / osfmk / ppc / machine_routines.c
1 /*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30
31 #include <ppc/machine_routines.h>
32 #include <ppc/cpu_internal.h>
33 #include <ppc/exception.h>
34 #include <ppc/io_map_entries.h>
35 #include <ppc/misc_protos.h>
36 #include <ppc/savearea.h>
37 #include <ppc/Firmware.h>
38 #include <ppc/pmap.h>
39 #include <ppc/mem.h>
40 #include <ppc/new_screen.h>
41 #include <ppc/proc_reg.h>
42 #include <ppc/machine_cpu.h> /* for cpu_signal_handler() */
43 #include <ppc/fpu_protos.h>
44 #include <kern/kern_types.h>
45 #include <kern/processor.h>
46 #include <kern/machine.h>
47
48 #include <vm/vm_page.h>
49
50 unsigned int LockTimeOut = 1250000000;
51 unsigned int MutexSpin = 0;
52
53 static int max_cpus_initialized = 0;
54
55 uint32_t warFlags = 0;
56 #define warDisMBpoff 0x80000000
57 #define MAX_CPUS_SET 0x01
58 #define MAX_CPUS_WAIT 0x02
59
60 decl_simple_lock_data(, spsLock);
61 unsigned int spsLockInit = 0;
62
63 extern unsigned int hwllckPatch_isync;
64 extern unsigned int hwulckPatch_isync;
65 extern unsigned int hwulckbPatch_isync;
66 extern unsigned int hwlmlckPatch_isync;
67 extern unsigned int hwltlckPatch_isync;
68 extern unsigned int hwcsatomicPatch_isync;
69 extern unsigned int mlckePatch_isync;
70 extern unsigned int mlckPatch_isync;
71 extern unsigned int mltelckPatch_isync;
72 extern unsigned int mltlckPatch_isync;
73 extern unsigned int mulckePatch_isync;
74 extern unsigned int mulckPatch_isync;
75 extern unsigned int slckPatch_isync;
76 extern unsigned int stlckPatch_isync;
77 extern unsigned int sulckPatch_isync;
78 extern unsigned int rwlePatch_isync;
79 extern unsigned int rwlsPatch_isync;
80 extern unsigned int rwlsePatch_isync;
81 extern unsigned int rwlesPatch_isync;
82 extern unsigned int rwtlePatch_isync;
83 extern unsigned int rwtlsPatch_isync;
84 extern unsigned int rwldPatch_isync;
85 extern unsigned int hwulckPatch_eieio;
86 extern unsigned int mulckPatch_eieio;
87 extern unsigned int mulckePatch_eieio;
88 extern unsigned int sulckPatch_eieio;
89 extern unsigned int rwlesPatch_eieio;
90 extern unsigned int rwldPatch_eieio;
91
92 struct patch_up {
93 unsigned int *addr;
94 unsigned int data;
95 };
96
97 typedef struct patch_up patch_up_t;
98
99 patch_up_t patch_up_table[] = {
100 {&hwllckPatch_isync, 0x60000000},
101 {&hwulckPatch_isync, 0x60000000},
102 {&hwulckbPatch_isync, 0x60000000},
103 {&hwlmlckPatch_isync, 0x60000000},
104 {&hwltlckPatch_isync, 0x60000000},
105 {&hwcsatomicPatch_isync, 0x60000000},
106 {&mlckePatch_isync, 0x60000000},
107 {&mlckPatch_isync, 0x60000000},
108 {&mltelckPatch_isync, 0x60000000},
109 {&mltlckPatch_isync, 0x60000000},
110 {&mulckePatch_isync, 0x60000000},
111 {&mulckPatch_isync, 0x60000000},
112 {&slckPatch_isync, 0x60000000},
113 {&stlckPatch_isync, 0x60000000},
114 {&sulckPatch_isync, 0x60000000},
115 {&rwlePatch_isync, 0x60000000},
116 {&rwlsPatch_isync, 0x60000000},
117 {&rwlsePatch_isync, 0x60000000},
118 {&rwlesPatch_isync, 0x60000000},
119 {&rwtlePatch_isync, 0x60000000},
120 {&rwtlsPatch_isync, 0x60000000},
121 {&rwldPatch_isync, 0x60000000},
122 {&hwulckPatch_eieio, 0x60000000},
123 {&hwulckPatch_eieio, 0x60000000},
124 {&mulckPatch_eieio, 0x60000000},
125 {&mulckePatch_eieio, 0x60000000},
126 {&sulckPatch_eieio, 0x60000000},
127 {&rwlesPatch_eieio, 0x60000000},
128 {&rwldPatch_eieio, 0x60000000},
129 {NULL, 0x00000000}
130 };
131
132 extern int forcenap;
133 extern boolean_t pmap_initialized;
134
135 /* Map memory map IO space */
136 vm_offset_t
137 ml_io_map(
138 vm_offset_t phys_addr,
139 vm_size_t size)
140 {
141 return(io_map(phys_addr,size,VM_WIMG_IO));
142 }
143
144
145 void ml_get_bouncepool_info(vm_offset_t *phys_addr, vm_size_t *size)
146 {
147 *phys_addr = 0;
148 *size = 0;
149 }
150
151
152 /*
153 * Routine: ml_static_malloc
154 * Function: static memory allocation
155 */
156 vm_offset_t
157 ml_static_malloc(
158 vm_size_t size)
159 {
160 vm_offset_t vaddr;
161
162 if (pmap_initialized)
163 return((vm_offset_t)NULL);
164 else {
165 vaddr = static_memory_end;
166 static_memory_end = round_page(vaddr+size);
167 return(vaddr);
168 }
169 }
170
171 /*
172 * Routine: ml_static_ptovirt
173 * Function:
174 */
175 vm_offset_t
176 ml_static_ptovirt(
177 vm_offset_t paddr)
178 {
179 vm_offset_t vaddr;
180
181 /* Static memory is map V=R */
182 vaddr = paddr;
183 if ( (vaddr < static_memory_end) && (pmap_extract(kernel_pmap, vaddr)==paddr) )
184 return(vaddr);
185 else
186 return((vm_offset_t)NULL);
187 }
188
189 /*
190 * Routine: ml_static_mfree
191 * Function:
192 */
193 void
194 ml_static_mfree(
195 vm_offset_t vaddr,
196 vm_size_t size)
197 {
198 vm_offset_t paddr_cur, vaddr_cur;
199
200 for (vaddr_cur = round_page_32(vaddr);
201 vaddr_cur < trunc_page_32(vaddr+size);
202 vaddr_cur += PAGE_SIZE) {
203 paddr_cur = pmap_extract(kernel_pmap, vaddr_cur);
204 if (paddr_cur != (vm_offset_t)NULL) {
205 vm_page_wire_count--;
206 pmap_remove(kernel_pmap, (addr64_t)vaddr_cur, (addr64_t)(vaddr_cur+PAGE_SIZE));
207 vm_page_create(paddr_cur>>12,(paddr_cur+PAGE_SIZE)>>12);
208 }
209 }
210 }
211
212 /*
213 * Routine: ml_vtophys
214 * Function: virtual to physical on static pages
215 */
216 vm_offset_t ml_vtophys(
217 vm_offset_t vaddr)
218 {
219 return(pmap_extract(kernel_pmap, vaddr));
220 }
221
222 /*
223 * Routine: ml_install_interrupt_handler
224 * Function: Initialize Interrupt Handler
225 */
226 void ml_install_interrupt_handler(
227 void *nub,
228 int source,
229 void *target,
230 IOInterruptHandler handler,
231 void *refCon)
232 {
233 struct per_proc_info *proc_info;
234 boolean_t current_state;
235
236 current_state = ml_get_interrupts_enabled();
237 proc_info = getPerProc();
238
239 proc_info->interrupt_nub = nub;
240 proc_info->interrupt_source = source;
241 proc_info->interrupt_target = target;
242 proc_info->interrupt_handler = handler;
243 proc_info->interrupt_refCon = refCon;
244
245 proc_info->interrupts_enabled = TRUE;
246 (void) ml_set_interrupts_enabled(current_state);
247
248 initialize_screen(NULL, kPEAcquireScreen);
249 }
250
251 /*
252 * Routine: ml_nofault_copy
253 * Function: Perform a physical mode copy if the source and
254 * destination have valid translations in the kernel pmap.
255 * If translations are present, they are assumed to
256 * be wired; i.e. no attempt is made to guarantee that the
257 * translations obtained remained valid for
258 * the duration of their use.
259 */
260
261 vm_size_t ml_nofault_copy(
262 vm_offset_t virtsrc, vm_offset_t virtdst, vm_size_t size)
263 {
264 addr64_t cur_phys_dst, cur_phys_src;
265 uint32_t count, pindex, nbytes = 0;
266
267 while (size > 0) {
268 if (!(cur_phys_src = kvtophys(virtsrc)))
269 break;
270 if (!(cur_phys_dst = kvtophys(virtdst)))
271 break;
272 if (!mapping_phys_lookup((cur_phys_src>>12), &pindex) ||
273 !mapping_phys_lookup((cur_phys_dst>>12), &pindex))
274 break;
275 count = PAGE_SIZE - (cur_phys_src & PAGE_MASK);
276 if (count > (PAGE_SIZE - (cur_phys_dst & PAGE_MASK)))
277 count = PAGE_SIZE - (cur_phys_dst & PAGE_MASK);
278 if (count > size)
279 count = size;
280
281 bcopy_phys(cur_phys_src, cur_phys_dst, count);
282
283 nbytes += count;
284 virtsrc += count;
285 virtdst += count;
286 size -= count;
287 }
288
289 return nbytes;
290 }
291
292 /*
293 * Routine: ml_init_interrupt
294 * Function: Initialize Interrupts
295 */
296 void ml_init_interrupt(void)
297 {
298 boolean_t current_state;
299
300 current_state = ml_get_interrupts_enabled();
301
302 getPerProc()->interrupts_enabled = TRUE;
303 (void) ml_set_interrupts_enabled(current_state);
304 }
305
306 /*
307 * Routine: ml_get_interrupts_enabled
308 * Function: Get Interrupts Enabled
309 */
310 boolean_t ml_get_interrupts_enabled(void)
311 {
312 return((mfmsr() & MASK(MSR_EE)) != 0);
313 }
314
315 /*
316 * Routine: ml_at_interrupt_context
317 * Function: Check if running at interrupt context
318 */
319 boolean_t ml_at_interrupt_context(void)
320 {
321 boolean_t ret;
322 boolean_t current_state;
323
324 current_state = ml_set_interrupts_enabled(FALSE);
325 ret = (getPerProc()->istackptr == 0);
326 ml_set_interrupts_enabled(current_state);
327 return(ret);
328 }
329
330 /*
331 * Routine: ml_cause_interrupt
332 * Function: Generate a fake interrupt
333 */
334 void ml_cause_interrupt(void)
335 {
336 CreateFakeIO();
337 }
338
339 /*
340 * Routine: ml_thread_policy
341 * Function:
342 */
343 void ml_thread_policy(
344 thread_t thread,
345 __unused unsigned policy_id,
346 unsigned policy_info)
347 {
348 if (policy_info & MACHINE_NETWORK_WORKLOOP) {
349 spl_t s = splsched();
350
351 thread_lock(thread);
352
353 set_priority(thread, thread->priority + 1);
354
355 thread_unlock(thread);
356 splx(s);
357 }
358 }
359
360 /*
361 * Routine: machine_signal_idle
362 * Function:
363 */
364 void
365 machine_signal_idle(
366 processor_t processor)
367 {
368 struct per_proc_info *proc_info;
369
370 proc_info = PROCESSOR_TO_PER_PROC(processor);
371
372 if (proc_info->pf.Available & (pfCanDoze|pfWillNap))
373 (void)cpu_signal(proc_info->cpu_number, SIGPwake, 0, 0);
374 }
375
376 /*
377 * Routine: ml_processor_register
378 * Function:
379 */
380 kern_return_t
381 ml_processor_register(
382 ml_processor_info_t *in_processor_info,
383 processor_t *processor_out,
384 ipi_handler_t *ipi_handler)
385 {
386 struct per_proc_info *proc_info;
387 int donap;
388 boolean_t current_state;
389 boolean_t boot_processor;
390
391 if (in_processor_info->boot_cpu == FALSE) {
392 if (spsLockInit == 0) {
393 spsLockInit = 1;
394 simple_lock_init(&spsLock, 0);
395 }
396 boot_processor = FALSE;
397 proc_info = cpu_per_proc_alloc();
398 if (proc_info == (struct per_proc_info *)NULL)
399 return KERN_FAILURE;
400 proc_info->pp_cbfr = console_per_proc_alloc(FALSE);
401 if (proc_info->pp_cbfr == (void *)NULL)
402 goto processor_register_error;
403 } else {
404 boot_processor = TRUE;
405 proc_info = PerProcTable[master_cpu].ppe_vaddr;
406 }
407
408 proc_info->pp_chud = chudxnu_per_proc_alloc(boot_processor);
409 if (proc_info->pp_chud == (void *)NULL)
410 goto processor_register_error;
411
412 if (!boot_processor)
413 if (cpu_per_proc_register(proc_info) != KERN_SUCCESS)
414 goto processor_register_error;
415
416 proc_info->cpu_id = in_processor_info->cpu_id;
417 proc_info->start_paddr = in_processor_info->start_paddr;
418 if(in_processor_info->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
419 proc_info->time_base_enable = in_processor_info->time_base_enable;
420 else
421 proc_info->time_base_enable = (void(*)(cpu_id_t, boolean_t ))NULL;
422
423 if((proc_info->pf.pfPowerModes & pmType) == pmPowerTune) {
424 proc_info->pf.pfPowerTune0 = in_processor_info->power_mode_0;
425 proc_info->pf.pfPowerTune1 = in_processor_info->power_mode_1;
426 }
427
428 donap = in_processor_info->supports_nap; /* Assume we use requested nap */
429 if(forcenap) donap = forcenap - 1; /* If there was an override, use that */
430
431 if((proc_info->pf.Available & pfCanNap)
432 && (donap)) {
433 proc_info->pf.Available |= pfWillNap;
434 current_state = ml_set_interrupts_enabled(FALSE);
435 if(proc_info == getPerProc())
436 __asm__ volatile("mtsprg 2,%0" : : "r" (proc_info->pf.Available)); /* Set live value */
437 (void) ml_set_interrupts_enabled(current_state);
438 }
439
440 if (!boot_processor) {
441 (void)hw_atomic_add(&saveanchor.savetarget, FreeListMin); /* saveareas for this processor */
442 processor_init((struct processor *)proc_info->processor,
443 proc_info->cpu_number, processor_pset(master_processor));
444 }
445
446 *processor_out = (struct processor *)proc_info->processor;
447 *ipi_handler = cpu_signal_handler;
448
449 return KERN_SUCCESS;
450
451 processor_register_error:
452 if (proc_info->pp_cbfr != (void *)NULL)
453 console_per_proc_free(proc_info->pp_cbfr);
454 if (proc_info->pp_chud != (void *)NULL)
455 chudxnu_per_proc_free(proc_info->pp_chud);
456 if (!boot_processor)
457 cpu_per_proc_free(proc_info);
458 return KERN_FAILURE;
459 }
460
461 /*
462 * Routine: ml_enable_nap
463 * Function:
464 */
465 boolean_t
466 ml_enable_nap(int target_cpu, boolean_t nap_enabled)
467 {
468 struct per_proc_info *proc_info;
469 boolean_t prev_value;
470 boolean_t current_state;
471
472 proc_info = PerProcTable[target_cpu].ppe_vaddr;
473
474 prev_value = (proc_info->pf.Available & pfCanNap) && (proc_info->pf.Available & pfWillNap);
475
476 if(forcenap) nap_enabled = forcenap - 1; /* If we are to force nap on or off, do it */
477
478 if(proc_info->pf.Available & pfCanNap) { /* Can the processor nap? */
479 if (nap_enabled) proc_info->pf.Available |= pfWillNap; /* Is nap supported on this machine? */
480 else proc_info->pf.Available &= ~pfWillNap; /* Clear if not */
481 }
482
483 current_state = ml_set_interrupts_enabled(FALSE);
484 if(proc_info == getPerProc())
485 __asm__ volatile("mtsprg 2,%0" : : "r" (proc_info->pf.Available)); /* Set live value */
486 (void) ml_set_interrupts_enabled(current_state);
487
488 return (prev_value);
489 }
490
491 /*
492 * Routine: ml_init_max_cpus
493 * Function:
494 */
495 void
496 ml_init_max_cpus(unsigned int max_cpus)
497 {
498 boolean_t current_state;
499
500 current_state = ml_set_interrupts_enabled(FALSE);
501 if (max_cpus_initialized != MAX_CPUS_SET) {
502 if (max_cpus > 0 && max_cpus <= MAX_CPUS) {
503 /*
504 * Note: max_ncpus is the maximum number
505 * that the kernel supports or that the "cpus="
506 * boot-arg has set. Here we take int minimum.
507 */
508 machine_info.max_cpus = MIN(max_cpus, max_ncpus);
509 machine_info.physical_cpu_max = max_cpus;
510 machine_info.logical_cpu_max = max_cpus;
511 }
512 if (max_cpus_initialized == MAX_CPUS_WAIT)
513 wakeup((event_t)&max_cpus_initialized);
514 max_cpus_initialized = MAX_CPUS_SET;
515 }
516
517 if (machine_info.logical_cpu_max == 1) {
518 struct patch_up *patch_up_ptr = &patch_up_table[0];
519
520 while (patch_up_ptr->addr != NULL) {
521 /*
522 * Patch for V=R kernel text section
523 */
524 bcopy_phys((addr64_t)((unsigned int)(&patch_up_ptr->data)),
525 (addr64_t)((unsigned int)(patch_up_ptr->addr)), 4);
526 sync_cache64((addr64_t)((unsigned int)(patch_up_ptr->addr)),4);
527 patch_up_ptr++;
528 }
529 }
530
531 (void) ml_set_interrupts_enabled(current_state);
532 }
533
534 /*
535 * Routine: ml_get_max_cpus
536 * Function:
537 */
538 unsigned int
539 ml_get_max_cpus(void)
540 {
541 boolean_t current_state;
542
543 current_state = ml_set_interrupts_enabled(FALSE);
544 if (max_cpus_initialized != MAX_CPUS_SET) {
545 max_cpus_initialized = MAX_CPUS_WAIT;
546 assert_wait((event_t)&max_cpus_initialized, THREAD_UNINT);
547 (void)thread_block(THREAD_CONTINUE_NULL);
548 }
549 (void) ml_set_interrupts_enabled(current_state);
550 return(machine_info.max_cpus);
551 }
552
553 /*
554 * This is called from the machine-independent routine cpu_up()
555 * to perform machine-dependent info updates.
556 */
557 void
558 ml_cpu_up(void)
559 {
560 (void)hw_atomic_add(&machine_info.physical_cpu, 1);
561 (void)hw_atomic_add(&machine_info.logical_cpu, 1);
562 }
563
564 /*
565 * This is called from the machine-independent routine cpu_down()
566 * to perform machine-dependent info updates.
567 */
568 void
569 ml_cpu_down(void)
570 {
571 (void)hw_atomic_sub(&machine_info.physical_cpu, 1);
572 (void)hw_atomic_sub(&machine_info.logical_cpu, 1);
573 }
574
575 /*
576 * Routine: ml_cpu_get_info
577 * Function:
578 */
579 void
580 ml_cpu_get_info(ml_cpu_info_t *ml_cpu_info)
581 {
582 struct per_proc_info *proc_info;
583
584 if (ml_cpu_info == 0) return;
585
586 proc_info = PerProcTable[master_cpu].ppe_vaddr;
587 ml_cpu_info->vector_unit = (proc_info->pf.Available & pfAltivec) != 0;
588 ml_cpu_info->cache_line_size = proc_info->pf.lineSize;
589 ml_cpu_info->l1_icache_size = proc_info->pf.l1iSize;
590 ml_cpu_info->l1_dcache_size = proc_info->pf.l1dSize;
591
592 if (proc_info->pf.Available & pfL2) {
593 ml_cpu_info->l2_settings = proc_info->pf.l2cr;
594 ml_cpu_info->l2_cache_size = proc_info->pf.l2Size;
595 } else {
596 ml_cpu_info->l2_settings = 0;
597 ml_cpu_info->l2_cache_size = 0xFFFFFFFF;
598 }
599 if (proc_info->pf.Available & pfL3) {
600 ml_cpu_info->l3_settings = proc_info->pf.l3cr;
601 ml_cpu_info->l3_cache_size = proc_info->pf.l3Size;
602 } else {
603 ml_cpu_info->l3_settings = 0;
604 ml_cpu_info->l3_cache_size = 0xFFFFFFFF;
605 }
606 }
607
608 /*
609 * Routine: ml_enable_cache_level
610 * Function:
611 */
612 #define l2em 0x80000000
613 #define l3em 0x80000000
614 int
615 ml_enable_cache_level(int cache_level, int enable)
616 {
617 int old_mode;
618 unsigned long available, ccr;
619 struct per_proc_info *proc_info;
620
621 if (real_ncpus != 1) return -1; /* XXX: This test is not safe */
622
623 proc_info = PerProcTable[master_cpu].ppe_vaddr;
624 available = proc_info->pf.Available;
625
626 if ((cache_level == 2) && (available & pfL2)) {
627 ccr = proc_info->pf.l2cr;
628 old_mode = (ccr & l2em) ? TRUE : FALSE;
629 if (old_mode != enable) {
630 if (enable) ccr = proc_info->pf.l2crOriginal;
631 else ccr = 0;
632 proc_info->pf.l2cr = ccr;
633 cacheInit();
634 }
635
636 return old_mode;
637 }
638
639 if ((cache_level == 3) && (available & pfL3)) {
640 ccr = proc_info->pf.l3cr;
641 old_mode = (ccr & l3em) ? TRUE : FALSE;
642 if (old_mode != enable) {
643 if (enable) ccr = proc_info->pf.l3crOriginal;
644 else ccr = 0;
645 proc_info->pf.l3cr = ccr;
646 cacheInit();
647 }
648
649 return old_mode;
650 }
651
652 return -1;
653 }
654
655
656 /*
657 * Routine: ml_set_processor_speed
658 * Function:
659 */
660 void
661 ml_set_processor_speed(unsigned long speed)
662 {
663 struct per_proc_info *proc_info;
664 uint32_t cpu;
665 kern_return_t result;
666 boolean_t current_state;
667 unsigned int i;
668
669 proc_info = PerProcTable[master_cpu].ppe_vaddr;
670
671 switch (proc_info->pf.pfPowerModes & pmType) { /* Figure specific type */
672 case pmDualPLL:
673
674 ml_set_processor_speed_dpll(speed);
675 break;
676
677 case pmDFS:
678
679 for (cpu = 0; cpu < real_ncpus; cpu++) {
680 /*
681 * cpu_signal() returns after .5ms if it fails to signal a running cpu
682 * retry cpu_signal() for .1s to deal with long interrupt latency at boot
683 */
684 for (i=200; i>0; i--) {
685 current_state = ml_set_interrupts_enabled(FALSE);
686 if (cpu != (unsigned)cpu_number()) {
687 if (PerProcTable[cpu].ppe_vaddr->cpu_flags & SignalReady)
688 /*
689 * Target cpu is off-line, skip
690 */
691 result = KERN_SUCCESS;
692 else {
693 simple_lock(&spsLock);
694 result = cpu_signal(cpu, SIGPcpureq, CPRQsps, speed);
695 if (result == KERN_SUCCESS)
696 thread_sleep_simple_lock(&spsLock, &spsLock, THREAD_UNINT);
697 simple_unlock(&spsLock);
698 }
699 } else {
700 ml_set_processor_speed_dfs(speed);
701 result = KERN_SUCCESS;
702 }
703 (void) ml_set_interrupts_enabled(current_state);
704 if (result == KERN_SUCCESS)
705 break;
706 }
707 if (result != KERN_SUCCESS)
708 panic("ml_set_processor_speed(): Fail to set cpu%d speed\n", cpu);
709 }
710 break;
711
712 case pmPowerTune:
713
714 ml_set_processor_speed_powertune(speed);
715 break;
716
717 default:
718 break;
719
720 }
721 return;
722 }
723
724 /*
725 * Routine: ml_set_processor_speed_slave
726 * Function:
727 */
728 void
729 ml_set_processor_speed_slave(unsigned long speed)
730 {
731 ml_set_processor_speed_dfs(speed);
732
733 simple_lock(&spsLock);
734 thread_wakeup(&spsLock);
735 simple_unlock(&spsLock);
736 }
737
738 /*
739 * Routine: ml_init_lock_timeout
740 * Function:
741 */
742 void
743 ml_init_lock_timeout(void)
744 {
745 uint64_t abstime;
746 uint32_t mtxspin;
747
748 nanoseconds_to_absolutetime(NSEC_PER_SEC>>2, &abstime);
749 LockTimeOut = (unsigned int)abstime;
750
751 if (PE_parse_boot_argn("mtxspin", &mtxspin, sizeof (mtxspin))) {
752 if (mtxspin > USEC_PER_SEC>>4)
753 mtxspin = USEC_PER_SEC>>4;
754 nanoseconds_to_absolutetime(mtxspin*NSEC_PER_USEC, &abstime);
755 } else {
756 nanoseconds_to_absolutetime(10*NSEC_PER_USEC, &abstime);
757 }
758 MutexSpin = (unsigned int)abstime;
759 }
760
761 /*
762 * Routine: init_ast_check
763 * Function:
764 */
765 void
766 init_ast_check(
767 __unused processor_t processor)
768 {}
769
770 /*
771 * Routine: cause_ast_check
772 * Function:
773 */
774 void
775 cause_ast_check(
776 processor_t processor)
777 {
778 struct per_proc_info *proc_info;
779
780 proc_info = PROCESSOR_TO_PER_PROC(processor);
781
782 if (proc_info != getPerProc()
783 && proc_info->interrupts_enabled == TRUE)
784 cpu_signal(proc_info->cpu_number, SIGPast, (unsigned int)NULL, (unsigned int)NULL);
785 }
786
787 /*
788 * Routine: machine_processor_shutdown
789 * Function:
790 */
791 thread_t
792 machine_processor_shutdown(
793 __unused thread_t thread,
794 __unused void (*doshutdown)(processor_t),
795 __unused processor_t processor)
796 {
797 CreateShutdownCTX();
798 return((thread_t)(getPerProc()->old_thread));
799 }
800
801
802 void ml_mem_backoff(void) {
803
804 if(warFlags & warDisMBpoff) return; /* If backoff disabled, exit */
805
806 __asm__ volatile("sync");
807 __asm__ volatile("isync");
808
809 return;
810 }
811
812
813
814 /*
815 * Stubs for CPU Stepper
816 */
817 void
818 machine_run_count(__unused uint32_t count)
819 {
820 }
821
822 boolean_t
823 machine_cpu_is_inactive(__unused int num)
824 {
825 return(FALSE);
826 }
827
828 vm_offset_t ml_stack_remaining(void)
829 {
830 uintptr_t local = (uintptr_t) &local;
831
832 if (ml_at_interrupt_context()) {
833 return (local - (getPerProc()->intstack_top_ss - INTSTACK_SIZE));
834 } else {
835 return (local - current_thread()->kernel_stack);
836 }
837 }