]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/machine_routines.c
xnu-792.2.4.tar.gz
[apple/xnu.git] / osfmk / ppc / machine_routines.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 #include <mach/mach_types.h>
24
25 #include <ppc/machine_routines.h>
26 #include <ppc/cpu_internal.h>
27 #include <ppc/exception.h>
28 #include <ppc/io_map_entries.h>
29 #include <ppc/misc_protos.h>
30 #include <ppc/savearea.h>
31 #include <ppc/Firmware.h>
32 #include <ppc/pmap.h>
33 #include <ppc/mem.h>
34 #include <ppc/new_screen.h>
35 #include <ppc/proc_reg.h>
36 #include <kern/kern_types.h>
37 #include <kern/processor.h>
38 #include <kern/machine.h>
39
40 #include <vm/vm_page.h>
41
42 unsigned int LockTimeOut = 12500000;
43 unsigned int MutexSpin = 0;
44
45 decl_mutex_data(static,mcpus_lock);
46 unsigned int mcpus_lock_initialized = 0;
47 unsigned int mcpus_state = 0;
48
49 uint32_t warFlags = 0;
50 #define warDisMBpoff 0x80000000
51 #define MAX_CPUS_SET 0x01
52 #define MAX_CPUS_WAIT 0x02
53
54 decl_simple_lock_data(, spsLock);
55 unsigned int spsLockInit = 0;
56
57 extern unsigned int hwllckPatch_isync;
58 extern unsigned int hwulckPatch_isync;
59 extern unsigned int hwulckbPatch_isync;
60 extern unsigned int hwlmlckPatch_isync;
61 extern unsigned int hwltlckPatch_isync;
62 extern unsigned int hwcsatomicPatch_isync;
63 extern unsigned int mlckePatch_isync;
64 extern unsigned int mlckPatch_isync;
65 extern unsigned int mltelckPatch_isync;
66 extern unsigned int mltlckPatch_isync;
67 extern unsigned int mulckePatch_isync;
68 extern unsigned int mulckPatch_isync;
69 extern unsigned int slckPatch_isync;
70 extern unsigned int stlckPatch_isync;
71 extern unsigned int sulckPatch_isync;
72 extern unsigned int rwlePatch_isync;
73 extern unsigned int rwlsPatch_isync;
74 extern unsigned int rwlsePatch_isync;
75 extern unsigned int rwlesPatch_isync;
76 extern unsigned int rwtlePatch_isync;
77 extern unsigned int rwtlsPatch_isync;
78 extern unsigned int rwldPatch_isync;
79 extern unsigned int hwulckPatch_eieio;
80 extern unsigned int mulckPatch_eieio;
81 extern unsigned int mulckePatch_eieio;
82 extern unsigned int sulckPatch_eieio;
83 extern unsigned int rwlesPatch_eieio;
84 extern unsigned int rwldPatch_eieio;
85 #if !MACH_LDEBUG
86 extern unsigned int entfsectPatch_isync;
87 extern unsigned int retfsectPatch_isync;
88 extern unsigned int retfsectPatch_eieio;
89 #endif
90
91 struct patch_up {
92 unsigned int *addr;
93 unsigned int data;
94 };
95
96 typedef struct patch_up patch_up_t;
97
98 patch_up_t patch_up_table[] = {
99 {&hwllckPatch_isync, 0x60000000},
100 {&hwulckPatch_isync, 0x60000000},
101 {&hwulckbPatch_isync, 0x60000000},
102 {&hwlmlckPatch_isync, 0x60000000},
103 {&hwltlckPatch_isync, 0x60000000},
104 {&hwcsatomicPatch_isync, 0x60000000},
105 {&mlckePatch_isync, 0x60000000},
106 {&mlckPatch_isync, 0x60000000},
107 {&mltelckPatch_isync, 0x60000000},
108 {&mltlckPatch_isync, 0x60000000},
109 {&mulckePatch_isync, 0x60000000},
110 {&mulckPatch_isync, 0x60000000},
111 {&slckPatch_isync, 0x60000000},
112 {&stlckPatch_isync, 0x60000000},
113 {&sulckPatch_isync, 0x60000000},
114 {&rwlePatch_isync, 0x60000000},
115 {&rwlsPatch_isync, 0x60000000},
116 {&rwlsePatch_isync, 0x60000000},
117 {&rwlesPatch_isync, 0x60000000},
118 {&rwtlePatch_isync, 0x60000000},
119 {&rwtlsPatch_isync, 0x60000000},
120 {&rwldPatch_isync, 0x60000000},
121 {&hwulckPatch_eieio, 0x60000000},
122 {&hwulckPatch_eieio, 0x60000000},
123 {&mulckPatch_eieio, 0x60000000},
124 {&mulckePatch_eieio, 0x60000000},
125 {&sulckPatch_eieio, 0x60000000},
126 {&rwlesPatch_eieio, 0x60000000},
127 {&rwldPatch_eieio, 0x60000000},
128 #if !MACH_LDEBUG
129 {&entfsectPatch_isync, 0x60000000},
130 {&retfsectPatch_isync, 0x60000000},
131 {&retfsectPatch_eieio, 0x60000000},
132 #endif
133 {NULL, 0x00000000}
134 };
135
136 extern int forcenap;
137 extern boolean_t pmap_initialized;
138
139 /* Map memory map IO space */
140 vm_offset_t
141 ml_io_map(
142 vm_offset_t phys_addr,
143 vm_size_t size)
144 {
145 return(io_map(phys_addr,size));
146 }
147
148 /*
149 * Routine: ml_static_malloc
150 * Function: static memory allocation
151 */
152 vm_offset_t
153 ml_static_malloc(
154 vm_size_t size)
155 {
156 vm_offset_t vaddr;
157
158 if (pmap_initialized)
159 return((vm_offset_t)NULL);
160 else {
161 vaddr = static_memory_end;
162 static_memory_end = round_page(vaddr+size);
163 return(vaddr);
164 }
165 }
166
167 /*
168 * Routine: ml_static_ptovirt
169 * Function:
170 */
171 vm_offset_t
172 ml_static_ptovirt(
173 vm_offset_t paddr)
174 {
175 vm_offset_t vaddr;
176
177 /* Static memory is map V=R */
178 vaddr = paddr;
179 if ( (vaddr < static_memory_end) && (pmap_extract(kernel_pmap, vaddr)==paddr) )
180 return(vaddr);
181 else
182 return((vm_offset_t)NULL);
183 }
184
185 /*
186 * Routine: ml_static_mfree
187 * Function:
188 */
189 void
190 ml_static_mfree(
191 vm_offset_t vaddr,
192 vm_size_t size)
193 {
194 vm_offset_t paddr_cur, vaddr_cur;
195
196 for (vaddr_cur = round_page_32(vaddr);
197 vaddr_cur < trunc_page_32(vaddr+size);
198 vaddr_cur += PAGE_SIZE) {
199 paddr_cur = pmap_extract(kernel_pmap, vaddr_cur);
200 if (paddr_cur != (vm_offset_t)NULL) {
201 vm_page_wire_count--;
202 pmap_remove(kernel_pmap, (addr64_t)vaddr_cur, (addr64_t)(vaddr_cur+PAGE_SIZE));
203 vm_page_create(paddr_cur>>12,(paddr_cur+PAGE_SIZE)>>12);
204 }
205 }
206 }
207
208 /*
209 * Routine: ml_vtophys
210 * Function: virtual to physical on static pages
211 */
212 vm_offset_t ml_vtophys(
213 vm_offset_t vaddr)
214 {
215 return(pmap_extract(kernel_pmap, vaddr));
216 }
217
218 /*
219 * Routine: ml_install_interrupt_handler
220 * Function: Initialize Interrupt Handler
221 */
222 void ml_install_interrupt_handler(
223 void *nub,
224 int source,
225 void *target,
226 IOInterruptHandler handler,
227 void *refCon)
228 {
229 struct per_proc_info *proc_info;
230 boolean_t current_state;
231
232 current_state = ml_get_interrupts_enabled();
233 proc_info = getPerProc();
234
235 proc_info->interrupt_nub = nub;
236 proc_info->interrupt_source = source;
237 proc_info->interrupt_target = target;
238 proc_info->interrupt_handler = handler;
239 proc_info->interrupt_refCon = refCon;
240
241 proc_info->interrupts_enabled = TRUE;
242 (void) ml_set_interrupts_enabled(current_state);
243
244 initialize_screen(0, kPEAcquireScreen);
245 }
246
247 /*
248 * Routine: ml_init_interrupt
249 * Function: Initialize Interrupts
250 */
251 void ml_init_interrupt(void)
252 {
253 boolean_t current_state;
254
255 current_state = ml_get_interrupts_enabled();
256
257 getPerProc()->interrupts_enabled = TRUE;
258 (void) ml_set_interrupts_enabled(current_state);
259 }
260
261 /*
262 * Routine: ml_get_interrupts_enabled
263 * Function: Get Interrupts Enabled
264 */
265 boolean_t ml_get_interrupts_enabled(void)
266 {
267 return((mfmsr() & MASK(MSR_EE)) != 0);
268 }
269
270 /*
271 * Routine: ml_at_interrupt_context
272 * Function: Check if running at interrupt context
273 */
274 boolean_t ml_at_interrupt_context(void)
275 {
276 boolean_t ret;
277 boolean_t current_state;
278
279 current_state = ml_set_interrupts_enabled(FALSE);
280 ret = (getPerProc()->istackptr == 0);
281 ml_set_interrupts_enabled(current_state);
282 return(ret);
283 }
284
285 /*
286 * Routine: ml_cause_interrupt
287 * Function: Generate a fake interrupt
288 */
289 void ml_cause_interrupt(void)
290 {
291 CreateFakeIO();
292 }
293
294 /*
295 * Routine: ml_thread_policy
296 * Function:
297 */
298 void ml_thread_policy(
299 thread_t thread,
300 unsigned policy_id,
301 unsigned policy_info)
302 {
303
304 if ((policy_id == MACHINE_GROUP) &&
305 ((PerProcTable[master_cpu].ppe_vaddr->pf.Available) & pfSMPcap))
306 thread_bind(thread, master_processor);
307
308 if (policy_info & MACHINE_NETWORK_WORKLOOP) {
309 spl_t s = splsched();
310
311 thread_lock(thread);
312
313 set_priority(thread, thread->priority + 1);
314
315 thread_unlock(thread);
316 splx(s);
317 }
318 }
319
320 /*
321 * Routine: machine_signal_idle
322 * Function:
323 */
324 void
325 machine_signal_idle(
326 processor_t processor)
327 {
328 struct per_proc_info *proc_info;
329
330 proc_info = PROCESSOR_TO_PER_PROC(processor);
331
332 if (proc_info->pf.Available & (pfCanDoze|pfWillNap))
333 (void)cpu_signal(proc_info->cpu_number, SIGPwake, 0, 0);
334 }
335
336 /*
337 * Routine: ml_processor_register
338 * Function:
339 */
340 kern_return_t
341 ml_processor_register(
342 ml_processor_info_t *in_processor_info,
343 processor_t *processor_out,
344 ipi_handler_t *ipi_handler)
345 {
346 struct per_proc_info *proc_info;
347 int donap;
348 boolean_t current_state;
349 boolean_t boot_processor;
350
351 if (in_processor_info->boot_cpu == FALSE) {
352 if (spsLockInit == 0) {
353 spsLockInit = 1;
354 simple_lock_init(&spsLock, 0);
355 }
356 boot_processor = FALSE;
357 proc_info = cpu_per_proc_alloc();
358 if (proc_info == (struct per_proc_info *)NULL)
359 return KERN_FAILURE;
360 proc_info->pp_cbfr = console_per_proc_alloc(FALSE);
361 if (proc_info->pp_cbfr == (void *)NULL)
362 goto processor_register_error;
363 } else {
364 boot_processor = TRUE;
365 proc_info = PerProcTable[master_cpu].ppe_vaddr;
366 }
367
368 proc_info->pp_chud = chudxnu_per_proc_alloc(boot_processor);
369 if (proc_info->pp_chud == (void *)NULL)
370 goto processor_register_error;
371
372 if (!boot_processor)
373 if (cpu_per_proc_register(proc_info) != KERN_SUCCESS)
374 goto processor_register_error;
375
376 proc_info->cpu_id = in_processor_info->cpu_id;
377 proc_info->start_paddr = in_processor_info->start_paddr;
378 if(in_processor_info->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
379 proc_info->time_base_enable = in_processor_info->time_base_enable;
380 else
381 proc_info->time_base_enable = (void(*)(cpu_id_t, boolean_t ))NULL;
382
383 if (proc_info->pf.pfPowerModes & pmPowerTune) {
384 proc_info->pf.pfPowerTune0 = in_processor_info->power_mode_0;
385 proc_info->pf.pfPowerTune1 = in_processor_info->power_mode_1;
386 }
387
388 donap = in_processor_info->supports_nap; /* Assume we use requested nap */
389 if(forcenap) donap = forcenap - 1; /* If there was an override, use that */
390
391 if((proc_info->pf.Available & pfCanNap)
392 && (donap)) {
393 proc_info->pf.Available |= pfWillNap;
394 current_state = ml_set_interrupts_enabled(FALSE);
395 if(proc_info == getPerProc())
396 __asm__ volatile("mtsprg 2,%0" : : "r" (proc_info->pf.Available)); /* Set live value */
397 (void) ml_set_interrupts_enabled(current_state);
398 }
399
400 if (!boot_processor) {
401 (void)hw_atomic_add((uint32_t *)&saveanchor.savetarget, FreeListMin); /* saveareas for this processor */
402 processor_init((struct processor *)proc_info->processor, proc_info->cpu_number);
403 }
404
405 *processor_out = (struct processor *)proc_info->processor;
406 *ipi_handler = cpu_signal_handler;
407
408 return KERN_SUCCESS;
409
410 processor_register_error:
411 if (proc_info->pp_cbfr != (void *)NULL)
412 console_per_proc_free(proc_info->pp_cbfr);
413 if (proc_info->pp_chud != (void *)NULL)
414 chudxnu_per_proc_free(proc_info->pp_chud);
415 if (!boot_processor)
416 cpu_per_proc_free(proc_info);
417 return KERN_FAILURE;
418 }
419
420 /*
421 * Routine: ml_enable_nap
422 * Function:
423 */
424 boolean_t
425 ml_enable_nap(int target_cpu, boolean_t nap_enabled)
426 {
427 struct per_proc_info *proc_info;
428 boolean_t prev_value;
429 boolean_t current_state;
430
431 proc_info = PerProcTable[target_cpu].ppe_vaddr;
432
433 prev_value = (proc_info->pf.Available & pfCanNap) && (proc_info->pf.Available & pfWillNap);
434
435 if(forcenap) nap_enabled = forcenap - 1; /* If we are to force nap on or off, do it */
436
437 if(proc_info->pf.Available & pfCanNap) { /* Can the processor nap? */
438 if (nap_enabled) proc_info->pf.Available |= pfWillNap; /* Is nap supported on this machine? */
439 else proc_info->pf.Available &= ~pfWillNap; /* Clear if not */
440 }
441
442 current_state = ml_set_interrupts_enabled(FALSE);
443 if(proc_info == getPerProc())
444 __asm__ volatile("mtsprg 2,%0" : : "r" (proc_info->pf.Available)); /* Set live value */
445 (void) ml_set_interrupts_enabled(current_state);
446
447 return (prev_value);
448 }
449
450 /*
451 * Routine: ml_init_max_cpus
452 * Function:
453 */
454 void
455 ml_init_max_cpus(unsigned int mcpus)
456 {
457
458 if (hw_compare_and_store(0,1,&mcpus_lock_initialized))
459 mutex_init(&mcpus_lock,0);
460 mutex_lock(&mcpus_lock);
461 if ((mcpus_state & MAX_CPUS_SET)
462 || (mcpus == 0)
463 || (mcpus > MAX_CPUS))
464 panic("ml_init_max_cpus(): Invalid call, max_cpus: %d\n", mcpus);
465
466 machine_info.max_cpus = mcpus;
467 machine_info.physical_cpu_max = mcpus;
468 machine_info.logical_cpu_max = mcpus;
469 mcpus_state |= MAX_CPUS_SET;
470
471 if (mcpus_state & MAX_CPUS_WAIT) {
472 mcpus_state |= ~MAX_CPUS_WAIT;
473 thread_wakeup((event_t)&mcpus_state);
474 }
475 mutex_unlock(&mcpus_lock);
476
477 if (machine_info.logical_cpu_max == 1) {
478 struct patch_up *patch_up_ptr;
479 boolean_t current_state;
480
481 patch_up_ptr = &patch_up_table[0];
482
483 current_state = ml_set_interrupts_enabled(FALSE);
484 while (patch_up_ptr->addr != NULL) {
485 /*
486 * Patch for V=R kernel text section
487 */
488 bcopy_phys((addr64_t)((unsigned int)(&patch_up_ptr->data)),
489 (addr64_t)((unsigned int)(patch_up_ptr->addr)), 4);
490 sync_cache64((addr64_t)((unsigned int)(patch_up_ptr->addr)),4);
491 patch_up_ptr++;
492 }
493 (void) ml_set_interrupts_enabled(current_state);
494 }
495 }
496
497 /*
498 * Routine: ml_get_max_cpus
499 * Function:
500 */
501 unsigned int
502 ml_get_max_cpus(void)
503 {
504 if (hw_compare_and_store(0,1,&mcpus_lock_initialized))
505 mutex_init(&mcpus_lock,0);
506 mutex_lock(&mcpus_lock);
507 if (!(mcpus_state & MAX_CPUS_SET)) {
508 mcpus_state |= MAX_CPUS_WAIT;
509 thread_sleep_mutex((event_t)&mcpus_state,
510 &mcpus_lock, THREAD_UNINT);
511 } else
512 mutex_unlock(&mcpus_lock);
513 return(machine_info.max_cpus);
514 }
515
516 /*
517 * This is called from the machine-independent routine cpu_up()
518 * to perform machine-dependent info updates.
519 */
520 void
521 ml_cpu_up(void)
522 {
523 hw_atomic_add(&machine_info.physical_cpu, 1);
524 hw_atomic_add(&machine_info.logical_cpu, 1);
525 }
526
527 /*
528 * This is called from the machine-independent routine cpu_down()
529 * to perform machine-dependent info updates.
530 */
531 void
532 ml_cpu_down(void)
533 {
534 hw_atomic_sub(&machine_info.physical_cpu, 1);
535 hw_atomic_sub(&machine_info.logical_cpu, 1);
536 }
537
538 /*
539 * Routine: ml_cpu_get_info
540 * Function:
541 */
542 void
543 ml_cpu_get_info(ml_cpu_info_t *ml_cpu_info)
544 {
545 struct per_proc_info *proc_info;
546
547 if (ml_cpu_info == 0) return;
548
549 proc_info = PerProcTable[master_cpu].ppe_vaddr;
550 ml_cpu_info->vector_unit = (proc_info->pf.Available & pfAltivec) != 0;
551 ml_cpu_info->cache_line_size = proc_info->pf.lineSize;
552 ml_cpu_info->l1_icache_size = proc_info->pf.l1iSize;
553 ml_cpu_info->l1_dcache_size = proc_info->pf.l1dSize;
554
555 if (proc_info->pf.Available & pfL2) {
556 ml_cpu_info->l2_settings = proc_info->pf.l2cr;
557 ml_cpu_info->l2_cache_size = proc_info->pf.l2Size;
558 } else {
559 ml_cpu_info->l2_settings = 0;
560 ml_cpu_info->l2_cache_size = 0xFFFFFFFF;
561 }
562 if (proc_info->pf.Available & pfL3) {
563 ml_cpu_info->l3_settings = proc_info->pf.l3cr;
564 ml_cpu_info->l3_cache_size = proc_info->pf.l3Size;
565 } else {
566 ml_cpu_info->l3_settings = 0;
567 ml_cpu_info->l3_cache_size = 0xFFFFFFFF;
568 }
569 }
570
571 /*
572 * Routine: ml_enable_cache_level
573 * Function:
574 */
575 #define l2em 0x80000000
576 #define l3em 0x80000000
577 int
578 ml_enable_cache_level(int cache_level, int enable)
579 {
580 int old_mode;
581 unsigned long available, ccr;
582 struct per_proc_info *proc_info;
583
584 if (real_ncpus != 1) return -1; /* XXX: This test is not safe */
585
586 proc_info = PerProcTable[master_cpu].ppe_vaddr;
587 available = proc_info->pf.Available;
588
589 if ((cache_level == 2) && (available & pfL2)) {
590 ccr = proc_info->pf.l2cr;
591 old_mode = (ccr & l2em) ? TRUE : FALSE;
592 if (old_mode != enable) {
593 if (enable) ccr = proc_info->pf.l2crOriginal;
594 else ccr = 0;
595 proc_info->pf.l2cr = ccr;
596 cacheInit();
597 }
598
599 return old_mode;
600 }
601
602 if ((cache_level == 3) && (available & pfL3)) {
603 ccr = proc_info->pf.l3cr;
604 old_mode = (ccr & l3em) ? TRUE : FALSE;
605 if (old_mode != enable) {
606 if (enable) ccr = proc_info->pf.l3crOriginal;
607 else ccr = 0;
608 proc_info->pf.l3cr = ccr;
609 cacheInit();
610 }
611
612 return old_mode;
613 }
614
615 return -1;
616 }
617
618
619 decl_simple_lock_data(, spsLock);
620
621 /*
622 * Routine: ml_set_processor_speed
623 * Function:
624 */
625 void
626 ml_set_processor_speed(unsigned long speed)
627 {
628 struct per_proc_info *proc_info;
629 uint32_t powerModes, cpu;
630 kern_return_t result;
631 boolean_t current_state;
632 unsigned int i;
633
634 proc_info = PerProcTable[master_cpu].ppe_vaddr;
635 powerModes = proc_info->pf.pfPowerModes;
636
637 if (powerModes & pmDualPLL) {
638
639 ml_set_processor_speed_dpll(speed);
640
641 } else if (powerModes & pmDFS) {
642
643 for (cpu = 0; cpu < real_ncpus; cpu++) {
644 /*
645 * cpu_signal() returns after .5ms if it fails to signal a running cpu
646 * retry cpu_signal() for .1s to deal with long interrupt latency at boot
647 */
648 for (i=200; i>0; i--) {
649 current_state = ml_set_interrupts_enabled(FALSE);
650 if (cpu != cpu_number()) {
651 if (PerProcTable[cpu].ppe_vaddr->cpu_flags & SignalReady)
652 /*
653 * Target cpu is off-line, skip
654 */
655 result = KERN_SUCCESS;
656 else {
657 simple_lock(&spsLock);
658 result = cpu_signal(cpu, SIGPcpureq, CPRQsps, speed);
659 if (result == KERN_SUCCESS)
660 thread_sleep_simple_lock(&spsLock, &spsLock, THREAD_UNINT);
661 simple_unlock(&spsLock);
662 }
663 } else {
664 ml_set_processor_speed_dfs(speed);
665 result = KERN_SUCCESS;
666 }
667 (void) ml_set_interrupts_enabled(current_state);
668 if (result == KERN_SUCCESS)
669 break;
670 }
671 if (result != KERN_SUCCESS)
672 panic("ml_set_processor_speed(): Fail to set cpu%d speed\n", cpu);
673 }
674
675 } else if (powerModes & pmPowerTune) {
676
677 ml_set_processor_speed_powertune(speed);
678
679 }
680 }
681
682 /*
683 * Routine: ml_set_processor_speed_slave
684 * Function:
685 */
686 void
687 ml_set_processor_speed_slave(unsigned long speed)
688 {
689 ml_set_processor_speed_dfs(speed);
690
691 simple_lock(&spsLock);
692 thread_wakeup(&spsLock);
693 simple_unlock(&spsLock);
694 }
695
696 /*
697 * Routine: ml_init_lock_timeout
698 * Function:
699 */
700 void
701 ml_init_lock_timeout(void)
702 {
703 uint64_t abstime;
704 uint32_t mtxspin;
705
706 nanoseconds_to_absolutetime(NSEC_PER_SEC>>2, &abstime);
707 LockTimeOut = (unsigned int)abstime;
708
709 if (PE_parse_boot_arg("mtxspin", &mtxspin)) {
710 if (mtxspin > USEC_PER_SEC>>4)
711 mtxspin = USEC_PER_SEC>>4;
712 nanoseconds_to_absolutetime(mtxspin*NSEC_PER_USEC, &abstime);
713 } else {
714 nanoseconds_to_absolutetime(10*NSEC_PER_USEC, &abstime);
715 }
716 MutexSpin = (unsigned int)abstime;
717 }
718
719 /*
720 * Routine: init_ast_check
721 * Function:
722 */
723 void
724 init_ast_check(
725 __unused processor_t processor)
726 {}
727
728 /*
729 * Routine: cause_ast_check
730 * Function:
731 */
732 void
733 cause_ast_check(
734 processor_t processor)
735 {
736 struct per_proc_info *proc_info;
737
738 proc_info = PROCESSOR_TO_PER_PROC(processor);
739
740 if (proc_info != getPerProc()
741 && proc_info->interrupts_enabled == TRUE)
742 cpu_signal(proc_info->cpu_number, SIGPast, (unsigned int)NULL, (unsigned int)NULL);
743 }
744
745 /*
746 * Routine: machine_processor_shutdown
747 * Function:
748 */
749 thread_t
750 machine_processor_shutdown(
751 __unused thread_t thread,
752 __unused void (*doshutdown)(processor_t),
753 __unused processor_t processor)
754 {
755 CreateShutdownCTX();
756 return((thread_t)(getPerProc()->old_thread));
757 }
758
759 /*
760 * Routine: set_be_bit
761 * Function:
762 */
763 int
764 set_be_bit(
765 void)
766 {
767 boolean_t current_state;
768
769 current_state = ml_set_interrupts_enabled(FALSE);
770 getPerProc()->cpu_flags |= traceBE;
771 (void) ml_set_interrupts_enabled(current_state);
772 return(1);
773 }
774
775 /*
776 * Routine: clr_be_bit
777 * Function:
778 */
779 int
780 clr_be_bit(
781 void)
782 {
783 boolean_t current_state;
784
785 current_state = ml_set_interrupts_enabled(FALSE);
786 getPerProc()->cpu_flags &= ~traceBE;
787 (void) ml_set_interrupts_enabled(current_state);
788 return(1);
789 }
790
791 /*
792 * Routine: be_tracing
793 * Function:
794 */
795 int
796 be_tracing(
797 void)
798 {
799 return(getPerProc()->cpu_flags & traceBE);
800 }
801
802
803 void ml_mem_backoff(void) {
804
805 if(warFlags & warDisMBpoff) return; /* If backoff disabled, exit */
806
807 __asm__ volatile("sync");
808 __asm__ volatile("isync");
809
810 return;
811 }
812