]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/machine_routines.c
xnu-792.13.8.tar.gz
[apple/xnu.git] / osfmk / ppc / machine_routines.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30
31 #include <mach/mach_types.h>
32
33 #include <ppc/machine_routines.h>
34 #include <ppc/cpu_internal.h>
35 #include <ppc/exception.h>
36 #include <ppc/io_map_entries.h>
37 #include <ppc/misc_protos.h>
38 #include <ppc/savearea.h>
39 #include <ppc/Firmware.h>
40 #include <ppc/pmap.h>
41 #include <ppc/mem.h>
42 #include <ppc/new_screen.h>
43 #include <ppc/proc_reg.h>
44 #include <kern/kern_types.h>
45 #include <kern/processor.h>
46 #include <kern/machine.h>
47
48 #include <vm/vm_page.h>
49
50 unsigned int LockTimeOut = 1250000000;
51 unsigned int MutexSpin = 0;
52
53 decl_mutex_data(static,mcpus_lock);
54 unsigned int mcpus_lock_initialized = 0;
55 unsigned int mcpus_state = 0;
56
57 uint32_t warFlags = 0;
58 #define warDisMBpoff 0x80000000
59 #define MAX_CPUS_SET 0x01
60 #define MAX_CPUS_WAIT 0x02
61
62 decl_simple_lock_data(, spsLock);
63 unsigned int spsLockInit = 0;
64
65 extern unsigned int hwllckPatch_isync;
66 extern unsigned int hwulckPatch_isync;
67 extern unsigned int hwulckbPatch_isync;
68 extern unsigned int hwlmlckPatch_isync;
69 extern unsigned int hwltlckPatch_isync;
70 extern unsigned int hwcsatomicPatch_isync;
71 extern unsigned int mlckePatch_isync;
72 extern unsigned int mlckPatch_isync;
73 extern unsigned int mltelckPatch_isync;
74 extern unsigned int mltlckPatch_isync;
75 extern unsigned int mulckePatch_isync;
76 extern unsigned int mulckPatch_isync;
77 extern unsigned int slckPatch_isync;
78 extern unsigned int stlckPatch_isync;
79 extern unsigned int sulckPatch_isync;
80 extern unsigned int rwlePatch_isync;
81 extern unsigned int rwlsPatch_isync;
82 extern unsigned int rwlsePatch_isync;
83 extern unsigned int rwlesPatch_isync;
84 extern unsigned int rwtlePatch_isync;
85 extern unsigned int rwtlsPatch_isync;
86 extern unsigned int rwldPatch_isync;
87 extern unsigned int hwulckPatch_eieio;
88 extern unsigned int mulckPatch_eieio;
89 extern unsigned int mulckePatch_eieio;
90 extern unsigned int sulckPatch_eieio;
91 extern unsigned int rwlesPatch_eieio;
92 extern unsigned int rwldPatch_eieio;
93 #if !MACH_LDEBUG
94 extern unsigned int entfsectPatch_isync;
95 extern unsigned int retfsectPatch_isync;
96 extern unsigned int retfsectPatch_eieio;
97 #endif
98
99 struct patch_up {
100 unsigned int *addr;
101 unsigned int data;
102 };
103
104 typedef struct patch_up patch_up_t;
105
106 patch_up_t patch_up_table[] = {
107 {&hwllckPatch_isync, 0x60000000},
108 {&hwulckPatch_isync, 0x60000000},
109 {&hwulckbPatch_isync, 0x60000000},
110 {&hwlmlckPatch_isync, 0x60000000},
111 {&hwltlckPatch_isync, 0x60000000},
112 {&hwcsatomicPatch_isync, 0x60000000},
113 {&mlckePatch_isync, 0x60000000},
114 {&mlckPatch_isync, 0x60000000},
115 {&mltelckPatch_isync, 0x60000000},
116 {&mltlckPatch_isync, 0x60000000},
117 {&mulckePatch_isync, 0x60000000},
118 {&mulckPatch_isync, 0x60000000},
119 {&slckPatch_isync, 0x60000000},
120 {&stlckPatch_isync, 0x60000000},
121 {&sulckPatch_isync, 0x60000000},
122 {&rwlePatch_isync, 0x60000000},
123 {&rwlsPatch_isync, 0x60000000},
124 {&rwlsePatch_isync, 0x60000000},
125 {&rwlesPatch_isync, 0x60000000},
126 {&rwtlePatch_isync, 0x60000000},
127 {&rwtlsPatch_isync, 0x60000000},
128 {&rwldPatch_isync, 0x60000000},
129 {&hwulckPatch_eieio, 0x60000000},
130 {&hwulckPatch_eieio, 0x60000000},
131 {&mulckPatch_eieio, 0x60000000},
132 {&mulckePatch_eieio, 0x60000000},
133 {&sulckPatch_eieio, 0x60000000},
134 {&rwlesPatch_eieio, 0x60000000},
135 {&rwldPatch_eieio, 0x60000000},
136 #if !MACH_LDEBUG
137 {&entfsectPatch_isync, 0x60000000},
138 {&retfsectPatch_isync, 0x60000000},
139 {&retfsectPatch_eieio, 0x60000000},
140 #endif
141 {NULL, 0x00000000}
142 };
143
144 extern int forcenap;
145 extern boolean_t pmap_initialized;
146
147 /* Map memory map IO space */
148 vm_offset_t
149 ml_io_map(
150 vm_offset_t phys_addr,
151 vm_size_t size)
152 {
153 return(io_map(phys_addr,size,VM_WIMG_IO));
154 }
155
156
157 void ml_get_bouncepool_info(vm_offset_t *phys_addr, vm_size_t *size)
158 {
159 *phys_addr = 0;
160 *size = 0;
161 }
162
163
164 /*
165 * Routine: ml_static_malloc
166 * Function: static memory allocation
167 */
168 vm_offset_t
169 ml_static_malloc(
170 vm_size_t size)
171 {
172 vm_offset_t vaddr;
173
174 if (pmap_initialized)
175 return((vm_offset_t)NULL);
176 else {
177 vaddr = static_memory_end;
178 static_memory_end = round_page(vaddr+size);
179 return(vaddr);
180 }
181 }
182
183 /*
184 * Routine: ml_static_ptovirt
185 * Function:
186 */
187 vm_offset_t
188 ml_static_ptovirt(
189 vm_offset_t paddr)
190 {
191 vm_offset_t vaddr;
192
193 /* Static memory is map V=R */
194 vaddr = paddr;
195 if ( (vaddr < static_memory_end) && (pmap_extract(kernel_pmap, vaddr)==paddr) )
196 return(vaddr);
197 else
198 return((vm_offset_t)NULL);
199 }
200
201 /*
202 * Routine: ml_static_mfree
203 * Function:
204 */
205 void
206 ml_static_mfree(
207 vm_offset_t vaddr,
208 vm_size_t size)
209 {
210 vm_offset_t paddr_cur, vaddr_cur;
211
212 for (vaddr_cur = round_page_32(vaddr);
213 vaddr_cur < trunc_page_32(vaddr+size);
214 vaddr_cur += PAGE_SIZE) {
215 paddr_cur = pmap_extract(kernel_pmap, vaddr_cur);
216 if (paddr_cur != (vm_offset_t)NULL) {
217 vm_page_wire_count--;
218 pmap_remove(kernel_pmap, (addr64_t)vaddr_cur, (addr64_t)(vaddr_cur+PAGE_SIZE));
219 vm_page_create(paddr_cur>>12,(paddr_cur+PAGE_SIZE)>>12);
220 }
221 }
222 }
223
224 /*
225 * Routine: ml_vtophys
226 * Function: virtual to physical on static pages
227 */
228 vm_offset_t ml_vtophys(
229 vm_offset_t vaddr)
230 {
231 return(pmap_extract(kernel_pmap, vaddr));
232 }
233
234 /*
235 * Routine: ml_install_interrupt_handler
236 * Function: Initialize Interrupt Handler
237 */
238 void ml_install_interrupt_handler(
239 void *nub,
240 int source,
241 void *target,
242 IOInterruptHandler handler,
243 void *refCon)
244 {
245 struct per_proc_info *proc_info;
246 boolean_t current_state;
247
248 current_state = ml_get_interrupts_enabled();
249 proc_info = getPerProc();
250
251 proc_info->interrupt_nub = nub;
252 proc_info->interrupt_source = source;
253 proc_info->interrupt_target = target;
254 proc_info->interrupt_handler = handler;
255 proc_info->interrupt_refCon = refCon;
256
257 proc_info->interrupts_enabled = TRUE;
258 (void) ml_set_interrupts_enabled(current_state);
259
260 initialize_screen(0, kPEAcquireScreen);
261 }
262
263 /*
264 * Routine: ml_init_interrupt
265 * Function: Initialize Interrupts
266 */
267 void ml_init_interrupt(void)
268 {
269 boolean_t current_state;
270
271 current_state = ml_get_interrupts_enabled();
272
273 getPerProc()->interrupts_enabled = TRUE;
274 (void) ml_set_interrupts_enabled(current_state);
275 }
276
277 /*
278 * Routine: ml_get_interrupts_enabled
279 * Function: Get Interrupts Enabled
280 */
281 boolean_t ml_get_interrupts_enabled(void)
282 {
283 return((mfmsr() & MASK(MSR_EE)) != 0);
284 }
285
286 /*
287 * Routine: ml_at_interrupt_context
288 * Function: Check if running at interrupt context
289 */
290 boolean_t ml_at_interrupt_context(void)
291 {
292 boolean_t ret;
293 boolean_t current_state;
294
295 current_state = ml_set_interrupts_enabled(FALSE);
296 ret = (getPerProc()->istackptr == 0);
297 ml_set_interrupts_enabled(current_state);
298 return(ret);
299 }
300
301 /*
302 * Routine: ml_cause_interrupt
303 * Function: Generate a fake interrupt
304 */
305 void ml_cause_interrupt(void)
306 {
307 CreateFakeIO();
308 }
309
310 /*
311 * Routine: ml_thread_policy
312 * Function:
313 */
314 void ml_thread_policy(
315 thread_t thread,
316 unsigned policy_id,
317 unsigned policy_info)
318 {
319
320 if ((policy_id == MACHINE_GROUP) &&
321 ((PerProcTable[master_cpu].ppe_vaddr->pf.Available) & pfSMPcap))
322 thread_bind(thread, master_processor);
323
324 if (policy_info & MACHINE_NETWORK_WORKLOOP) {
325 spl_t s = splsched();
326
327 thread_lock(thread);
328
329 set_priority(thread, thread->priority + 1);
330
331 thread_unlock(thread);
332 splx(s);
333 }
334 }
335
336 /*
337 * Routine: machine_signal_idle
338 * Function:
339 */
340 void
341 machine_signal_idle(
342 processor_t processor)
343 {
344 struct per_proc_info *proc_info;
345
346 proc_info = PROCESSOR_TO_PER_PROC(processor);
347
348 if (proc_info->pf.Available & (pfCanDoze|pfWillNap))
349 (void)cpu_signal(proc_info->cpu_number, SIGPwake, 0, 0);
350 }
351
352 /*
353 * Routine: ml_processor_register
354 * Function:
355 */
356 kern_return_t
357 ml_processor_register(
358 ml_processor_info_t *in_processor_info,
359 processor_t *processor_out,
360 ipi_handler_t *ipi_handler)
361 {
362 struct per_proc_info *proc_info;
363 int donap;
364 boolean_t current_state;
365 boolean_t boot_processor;
366
367 if (in_processor_info->boot_cpu == FALSE) {
368 if (spsLockInit == 0) {
369 spsLockInit = 1;
370 simple_lock_init(&spsLock, 0);
371 }
372 boot_processor = FALSE;
373 proc_info = cpu_per_proc_alloc();
374 if (proc_info == (struct per_proc_info *)NULL)
375 return KERN_FAILURE;
376 proc_info->pp_cbfr = console_per_proc_alloc(FALSE);
377 if (proc_info->pp_cbfr == (void *)NULL)
378 goto processor_register_error;
379 } else {
380 boot_processor = TRUE;
381 proc_info = PerProcTable[master_cpu].ppe_vaddr;
382 }
383
384 proc_info->pp_chud = chudxnu_per_proc_alloc(boot_processor);
385 if (proc_info->pp_chud == (void *)NULL)
386 goto processor_register_error;
387
388 if (!boot_processor)
389 if (cpu_per_proc_register(proc_info) != KERN_SUCCESS)
390 goto processor_register_error;
391
392 proc_info->cpu_id = in_processor_info->cpu_id;
393 proc_info->start_paddr = in_processor_info->start_paddr;
394 if(in_processor_info->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
395 proc_info->time_base_enable = in_processor_info->time_base_enable;
396 else
397 proc_info->time_base_enable = (void(*)(cpu_id_t, boolean_t ))NULL;
398
399 if((proc_info->pf.pfPowerModes & pmType) == pmPowerTune) {
400 proc_info->pf.pfPowerTune0 = in_processor_info->power_mode_0;
401 proc_info->pf.pfPowerTune1 = in_processor_info->power_mode_1;
402 }
403
404 donap = in_processor_info->supports_nap; /* Assume we use requested nap */
405 if(forcenap) donap = forcenap - 1; /* If there was an override, use that */
406
407 if((proc_info->pf.Available & pfCanNap)
408 && (donap)) {
409 proc_info->pf.Available |= pfWillNap;
410 current_state = ml_set_interrupts_enabled(FALSE);
411 if(proc_info == getPerProc())
412 __asm__ volatile("mtsprg 2,%0" : : "r" (proc_info->pf.Available)); /* Set live value */
413 (void) ml_set_interrupts_enabled(current_state);
414 }
415
416 if (!boot_processor) {
417 (void)hw_atomic_add((uint32_t *)&saveanchor.savetarget, FreeListMin); /* saveareas for this processor */
418 processor_init((struct processor *)proc_info->processor, proc_info->cpu_number);
419 }
420
421 *processor_out = (struct processor *)proc_info->processor;
422 *ipi_handler = cpu_signal_handler;
423
424 return KERN_SUCCESS;
425
426 processor_register_error:
427 if (proc_info->pp_cbfr != (void *)NULL)
428 console_per_proc_free(proc_info->pp_cbfr);
429 if (proc_info->pp_chud != (void *)NULL)
430 chudxnu_per_proc_free(proc_info->pp_chud);
431 if (!boot_processor)
432 cpu_per_proc_free(proc_info);
433 return KERN_FAILURE;
434 }
435
436 /*
437 * Routine: ml_enable_nap
438 * Function:
439 */
440 boolean_t
441 ml_enable_nap(int target_cpu, boolean_t nap_enabled)
442 {
443 struct per_proc_info *proc_info;
444 boolean_t prev_value;
445 boolean_t current_state;
446
447 proc_info = PerProcTable[target_cpu].ppe_vaddr;
448
449 prev_value = (proc_info->pf.Available & pfCanNap) && (proc_info->pf.Available & pfWillNap);
450
451 if(forcenap) nap_enabled = forcenap - 1; /* If we are to force nap on or off, do it */
452
453 if(proc_info->pf.Available & pfCanNap) { /* Can the processor nap? */
454 if (nap_enabled) proc_info->pf.Available |= pfWillNap; /* Is nap supported on this machine? */
455 else proc_info->pf.Available &= ~pfWillNap; /* Clear if not */
456 }
457
458 current_state = ml_set_interrupts_enabled(FALSE);
459 if(proc_info == getPerProc())
460 __asm__ volatile("mtsprg 2,%0" : : "r" (proc_info->pf.Available)); /* Set live value */
461 (void) ml_set_interrupts_enabled(current_state);
462
463 return (prev_value);
464 }
465
466 /*
467 * Routine: ml_init_max_cpus
468 * Function:
469 */
470 void
471 ml_init_max_cpus(unsigned int mcpus)
472 {
473
474 if (hw_compare_and_store(0,1,&mcpus_lock_initialized))
475 mutex_init(&mcpus_lock,0);
476 mutex_lock(&mcpus_lock);
477 if ((mcpus_state & MAX_CPUS_SET)
478 || (mcpus == 0)
479 || (mcpus > MAX_CPUS))
480 panic("ml_init_max_cpus(): Invalid call, max_cpus: %d\n", mcpus);
481
482 machine_info.max_cpus = mcpus;
483 machine_info.physical_cpu_max = mcpus;
484 machine_info.logical_cpu_max = mcpus;
485 mcpus_state |= MAX_CPUS_SET;
486
487 if (mcpus_state & MAX_CPUS_WAIT) {
488 mcpus_state |= ~MAX_CPUS_WAIT;
489 thread_wakeup((event_t)&mcpus_state);
490 }
491 mutex_unlock(&mcpus_lock);
492
493 if (machine_info.logical_cpu_max == 1) {
494 struct patch_up *patch_up_ptr;
495 boolean_t current_state;
496
497 patch_up_ptr = &patch_up_table[0];
498
499 current_state = ml_set_interrupts_enabled(FALSE);
500 while (patch_up_ptr->addr != NULL) {
501 /*
502 * Patch for V=R kernel text section
503 */
504 bcopy_phys((addr64_t)((unsigned int)(&patch_up_ptr->data)),
505 (addr64_t)((unsigned int)(patch_up_ptr->addr)), 4);
506 sync_cache64((addr64_t)((unsigned int)(patch_up_ptr->addr)),4);
507 patch_up_ptr++;
508 }
509 (void) ml_set_interrupts_enabled(current_state);
510 }
511 }
512
513 /*
514 * Routine: ml_get_max_cpus
515 * Function:
516 */
517 unsigned int
518 ml_get_max_cpus(void)
519 {
520 if (hw_compare_and_store(0,1,&mcpus_lock_initialized))
521 mutex_init(&mcpus_lock,0);
522 mutex_lock(&mcpus_lock);
523 if (!(mcpus_state & MAX_CPUS_SET)) {
524 mcpus_state |= MAX_CPUS_WAIT;
525 thread_sleep_mutex((event_t)&mcpus_state,
526 &mcpus_lock, THREAD_UNINT);
527 } else
528 mutex_unlock(&mcpus_lock);
529 return(machine_info.max_cpus);
530 }
531
532 /*
533 * This is called from the machine-independent routine cpu_up()
534 * to perform machine-dependent info updates.
535 */
536 void
537 ml_cpu_up(void)
538 {
539 hw_atomic_add(&machine_info.physical_cpu, 1);
540 hw_atomic_add(&machine_info.logical_cpu, 1);
541 }
542
543 /*
544 * This is called from the machine-independent routine cpu_down()
545 * to perform machine-dependent info updates.
546 */
547 void
548 ml_cpu_down(void)
549 {
550 hw_atomic_sub(&machine_info.physical_cpu, 1);
551 hw_atomic_sub(&machine_info.logical_cpu, 1);
552 }
553
554 /*
555 * Routine: ml_cpu_get_info
556 * Function:
557 */
558 void
559 ml_cpu_get_info(ml_cpu_info_t *ml_cpu_info)
560 {
561 struct per_proc_info *proc_info;
562
563 if (ml_cpu_info == 0) return;
564
565 proc_info = PerProcTable[master_cpu].ppe_vaddr;
566 ml_cpu_info->vector_unit = (proc_info->pf.Available & pfAltivec) != 0;
567 ml_cpu_info->cache_line_size = proc_info->pf.lineSize;
568 ml_cpu_info->l1_icache_size = proc_info->pf.l1iSize;
569 ml_cpu_info->l1_dcache_size = proc_info->pf.l1dSize;
570
571 if (proc_info->pf.Available & pfL2) {
572 ml_cpu_info->l2_settings = proc_info->pf.l2cr;
573 ml_cpu_info->l2_cache_size = proc_info->pf.l2Size;
574 } else {
575 ml_cpu_info->l2_settings = 0;
576 ml_cpu_info->l2_cache_size = 0xFFFFFFFF;
577 }
578 if (proc_info->pf.Available & pfL3) {
579 ml_cpu_info->l3_settings = proc_info->pf.l3cr;
580 ml_cpu_info->l3_cache_size = proc_info->pf.l3Size;
581 } else {
582 ml_cpu_info->l3_settings = 0;
583 ml_cpu_info->l3_cache_size = 0xFFFFFFFF;
584 }
585 }
586
587 /*
588 * Routine: ml_enable_cache_level
589 * Function:
590 */
591 #define l2em 0x80000000
592 #define l3em 0x80000000
593 int
594 ml_enable_cache_level(int cache_level, int enable)
595 {
596 int old_mode;
597 unsigned long available, ccr;
598 struct per_proc_info *proc_info;
599
600 if (real_ncpus != 1) return -1; /* XXX: This test is not safe */
601
602 proc_info = PerProcTable[master_cpu].ppe_vaddr;
603 available = proc_info->pf.Available;
604
605 if ((cache_level == 2) && (available & pfL2)) {
606 ccr = proc_info->pf.l2cr;
607 old_mode = (ccr & l2em) ? TRUE : FALSE;
608 if (old_mode != enable) {
609 if (enable) ccr = proc_info->pf.l2crOriginal;
610 else ccr = 0;
611 proc_info->pf.l2cr = ccr;
612 cacheInit();
613 }
614
615 return old_mode;
616 }
617
618 if ((cache_level == 3) && (available & pfL3)) {
619 ccr = proc_info->pf.l3cr;
620 old_mode = (ccr & l3em) ? TRUE : FALSE;
621 if (old_mode != enable) {
622 if (enable) ccr = proc_info->pf.l3crOriginal;
623 else ccr = 0;
624 proc_info->pf.l3cr = ccr;
625 cacheInit();
626 }
627
628 return old_mode;
629 }
630
631 return -1;
632 }
633
634
635 decl_simple_lock_data(, spsLock);
636
637 /*
638 * Routine: ml_set_processor_speed
639 * Function:
640 */
641 void
642 ml_set_processor_speed(unsigned long speed)
643 {
644 struct per_proc_info *proc_info;
645 uint32_t cpu;
646 kern_return_t result;
647 boolean_t current_state;
648 unsigned int i;
649
650 proc_info = PerProcTable[master_cpu].ppe_vaddr;
651
652 switch (proc_info->pf.pfPowerModes & pmType) { /* Figure specific type */
653 case pmDualPLL:
654
655 ml_set_processor_speed_dpll(speed);
656 break;
657
658 case pmDFS:
659
660 for (cpu = 0; cpu < real_ncpus; cpu++) {
661 /*
662 * cpu_signal() returns after .5ms if it fails to signal a running cpu
663 * retry cpu_signal() for .1s to deal with long interrupt latency at boot
664 */
665 for (i=200; i>0; i--) {
666 current_state = ml_set_interrupts_enabled(FALSE);
667 if (cpu != cpu_number()) {
668 if (PerProcTable[cpu].ppe_vaddr->cpu_flags & SignalReady)
669 /*
670 * Target cpu is off-line, skip
671 */
672 result = KERN_SUCCESS;
673 else {
674 simple_lock(&spsLock);
675 result = cpu_signal(cpu, SIGPcpureq, CPRQsps, speed);
676 if (result == KERN_SUCCESS)
677 thread_sleep_simple_lock(&spsLock, &spsLock, THREAD_UNINT);
678 simple_unlock(&spsLock);
679 }
680 } else {
681 ml_set_processor_speed_dfs(speed);
682 result = KERN_SUCCESS;
683 }
684 (void) ml_set_interrupts_enabled(current_state);
685 if (result == KERN_SUCCESS)
686 break;
687 }
688 if (result != KERN_SUCCESS)
689 panic("ml_set_processor_speed(): Fail to set cpu%d speed\n", cpu);
690 }
691 break;
692
693 case pmPowerTune:
694
695 ml_set_processor_speed_powertune(speed);
696 break;
697
698 default:
699 break;
700
701 }
702 return;
703 }
704
705 /*
706 * Routine: ml_set_processor_speed_slave
707 * Function:
708 */
709 void
710 ml_set_processor_speed_slave(unsigned long speed)
711 {
712 ml_set_processor_speed_dfs(speed);
713
714 simple_lock(&spsLock);
715 thread_wakeup(&spsLock);
716 simple_unlock(&spsLock);
717 }
718
719 /*
720 * Routine: ml_init_lock_timeout
721 * Function:
722 */
723 void
724 ml_init_lock_timeout(void)
725 {
726 uint64_t abstime;
727 uint32_t mtxspin;
728
729 nanoseconds_to_absolutetime(NSEC_PER_SEC>>2, &abstime);
730 LockTimeOut = (unsigned int)abstime;
731
732 if (PE_parse_boot_arg("mtxspin", &mtxspin)) {
733 if (mtxspin > USEC_PER_SEC>>4)
734 mtxspin = USEC_PER_SEC>>4;
735 nanoseconds_to_absolutetime(mtxspin*NSEC_PER_USEC, &abstime);
736 } else {
737 nanoseconds_to_absolutetime(10*NSEC_PER_USEC, &abstime);
738 }
739 MutexSpin = (unsigned int)abstime;
740 }
741
742 /*
743 * Routine: init_ast_check
744 * Function:
745 */
746 void
747 init_ast_check(
748 __unused processor_t processor)
749 {}
750
751 /*
752 * Routine: cause_ast_check
753 * Function:
754 */
755 void
756 cause_ast_check(
757 processor_t processor)
758 {
759 struct per_proc_info *proc_info;
760
761 proc_info = PROCESSOR_TO_PER_PROC(processor);
762
763 if (proc_info != getPerProc()
764 && proc_info->interrupts_enabled == TRUE)
765 cpu_signal(proc_info->cpu_number, SIGPast, (unsigned int)NULL, (unsigned int)NULL);
766 }
767
768 /*
769 * Routine: machine_processor_shutdown
770 * Function:
771 */
772 thread_t
773 machine_processor_shutdown(
774 __unused thread_t thread,
775 __unused void (*doshutdown)(processor_t),
776 __unused processor_t processor)
777 {
778 CreateShutdownCTX();
779 return((thread_t)(getPerProc()->old_thread));
780 }
781
782 /*
783 * Routine: set_be_bit
784 * Function:
785 */
786 int
787 set_be_bit(
788 void)
789 {
790 boolean_t current_state;
791
792 current_state = ml_set_interrupts_enabled(FALSE);
793 getPerProc()->cpu_flags |= traceBE;
794 (void) ml_set_interrupts_enabled(current_state);
795 return(1);
796 }
797
798 /*
799 * Routine: clr_be_bit
800 * Function:
801 */
802 int
803 clr_be_bit(
804 void)
805 {
806 boolean_t current_state;
807
808 current_state = ml_set_interrupts_enabled(FALSE);
809 getPerProc()->cpu_flags &= ~traceBE;
810 (void) ml_set_interrupts_enabled(current_state);
811 return(1);
812 }
813
814 /*
815 * Routine: be_tracing
816 * Function:
817 */
818 int
819 be_tracing(
820 void)
821 {
822 return(getPerProc()->cpu_flags & traceBE);
823 }
824
825
826 void ml_mem_backoff(void) {
827
828 if(warFlags & warDisMBpoff) return; /* If backoff disabled, exit */
829
830 __asm__ volatile("sync");
831 __asm__ volatile("isync");
832
833 return;
834 }
835