2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
31 #include <ppc/machine_routines.h>
32 #include <ppc/cpu_internal.h>
33 #include <ppc/exception.h>
34 #include <ppc/io_map_entries.h>
35 #include <ppc/misc_protos.h>
36 #include <ppc/savearea.h>
37 #include <ppc/Firmware.h>
40 #include <ppc/new_screen.h>
41 #include <ppc/proc_reg.h>
42 #include <ppc/machine_cpu.h> /* for cpu_signal_handler() */
43 #include <ppc/fpu_protos.h>
44 #include <kern/kern_types.h>
45 #include <kern/processor.h>
46 #include <kern/machine.h>
48 #include <vm/vm_page.h>
50 unsigned int LockTimeOut
= 1250000000;
51 unsigned int MutexSpin
= 0;
53 decl_mutex_data(static,mcpus_lock
);
54 unsigned int mcpus_lock_initialized
= 0;
55 unsigned int mcpus_state
= 0;
57 uint32_t warFlags
= 0;
58 #define warDisMBpoff 0x80000000
59 #define MAX_CPUS_SET 0x01
60 #define MAX_CPUS_WAIT 0x02
62 decl_simple_lock_data(, spsLock
);
63 unsigned int spsLockInit
= 0;
65 extern unsigned int hwllckPatch_isync
;
66 extern unsigned int hwulckPatch_isync
;
67 extern unsigned int hwulckbPatch_isync
;
68 extern unsigned int hwlmlckPatch_isync
;
69 extern unsigned int hwltlckPatch_isync
;
70 extern unsigned int hwcsatomicPatch_isync
;
71 extern unsigned int mlckePatch_isync
;
72 extern unsigned int mlckPatch_isync
;
73 extern unsigned int mltelckPatch_isync
;
74 extern unsigned int mltlckPatch_isync
;
75 extern unsigned int mulckePatch_isync
;
76 extern unsigned int mulckPatch_isync
;
77 extern unsigned int slckPatch_isync
;
78 extern unsigned int stlckPatch_isync
;
79 extern unsigned int sulckPatch_isync
;
80 extern unsigned int rwlePatch_isync
;
81 extern unsigned int rwlsPatch_isync
;
82 extern unsigned int rwlsePatch_isync
;
83 extern unsigned int rwlesPatch_isync
;
84 extern unsigned int rwtlePatch_isync
;
85 extern unsigned int rwtlsPatch_isync
;
86 extern unsigned int rwldPatch_isync
;
87 extern unsigned int hwulckPatch_eieio
;
88 extern unsigned int mulckPatch_eieio
;
89 extern unsigned int mulckePatch_eieio
;
90 extern unsigned int sulckPatch_eieio
;
91 extern unsigned int rwlesPatch_eieio
;
92 extern unsigned int rwldPatch_eieio
;
99 typedef struct patch_up patch_up_t
;
101 patch_up_t patch_up_table
[] = {
102 {&hwllckPatch_isync
, 0x60000000},
103 {&hwulckPatch_isync
, 0x60000000},
104 {&hwulckbPatch_isync
, 0x60000000},
105 {&hwlmlckPatch_isync
, 0x60000000},
106 {&hwltlckPatch_isync
, 0x60000000},
107 {&hwcsatomicPatch_isync
, 0x60000000},
108 {&mlckePatch_isync
, 0x60000000},
109 {&mlckPatch_isync
, 0x60000000},
110 {&mltelckPatch_isync
, 0x60000000},
111 {&mltlckPatch_isync
, 0x60000000},
112 {&mulckePatch_isync
, 0x60000000},
113 {&mulckPatch_isync
, 0x60000000},
114 {&slckPatch_isync
, 0x60000000},
115 {&stlckPatch_isync
, 0x60000000},
116 {&sulckPatch_isync
, 0x60000000},
117 {&rwlePatch_isync
, 0x60000000},
118 {&rwlsPatch_isync
, 0x60000000},
119 {&rwlsePatch_isync
, 0x60000000},
120 {&rwlesPatch_isync
, 0x60000000},
121 {&rwtlePatch_isync
, 0x60000000},
122 {&rwtlsPatch_isync
, 0x60000000},
123 {&rwldPatch_isync
, 0x60000000},
124 {&hwulckPatch_eieio
, 0x60000000},
125 {&hwulckPatch_eieio
, 0x60000000},
126 {&mulckPatch_eieio
, 0x60000000},
127 {&mulckePatch_eieio
, 0x60000000},
128 {&sulckPatch_eieio
, 0x60000000},
129 {&rwlesPatch_eieio
, 0x60000000},
130 {&rwldPatch_eieio
, 0x60000000},
135 extern boolean_t pmap_initialized
;
137 /* Map memory map IO space */
140 vm_offset_t phys_addr
,
143 return(io_map(phys_addr
,size
,VM_WIMG_IO
));
147 void ml_get_bouncepool_info(vm_offset_t
*phys_addr
, vm_size_t
*size
)
155 * Routine: ml_static_malloc
156 * Function: static memory allocation
164 if (pmap_initialized
)
165 return((vm_offset_t
)NULL
);
167 vaddr
= static_memory_end
;
168 static_memory_end
= round_page(vaddr
+size
);
174 * Routine: ml_static_ptovirt
183 /* Static memory is map V=R */
185 if ( (vaddr
< static_memory_end
) && (pmap_extract(kernel_pmap
, vaddr
)==paddr
) )
188 return((vm_offset_t
)NULL
);
192 * Routine: ml_static_mfree
200 vm_offset_t paddr_cur
, vaddr_cur
;
202 for (vaddr_cur
= round_page_32(vaddr
);
203 vaddr_cur
< trunc_page_32(vaddr
+size
);
204 vaddr_cur
+= PAGE_SIZE
) {
205 paddr_cur
= pmap_extract(kernel_pmap
, vaddr_cur
);
206 if (paddr_cur
!= (vm_offset_t
)NULL
) {
207 vm_page_wire_count
--;
208 pmap_remove(kernel_pmap
, (addr64_t
)vaddr_cur
, (addr64_t
)(vaddr_cur
+PAGE_SIZE
));
209 vm_page_create(paddr_cur
>>12,(paddr_cur
+PAGE_SIZE
)>>12);
215 * Routine: ml_vtophys
216 * Function: virtual to physical on static pages
218 vm_offset_t
ml_vtophys(
221 return(pmap_extract(kernel_pmap
, vaddr
));
225 * Routine: ml_install_interrupt_handler
226 * Function: Initialize Interrupt Handler
228 void ml_install_interrupt_handler(
232 IOInterruptHandler handler
,
235 struct per_proc_info
*proc_info
;
236 boolean_t current_state
;
238 current_state
= ml_get_interrupts_enabled();
239 proc_info
= getPerProc();
241 proc_info
->interrupt_nub
= nub
;
242 proc_info
->interrupt_source
= source
;
243 proc_info
->interrupt_target
= target
;
244 proc_info
->interrupt_handler
= handler
;
245 proc_info
->interrupt_refCon
= refCon
;
247 proc_info
->interrupts_enabled
= TRUE
;
248 (void) ml_set_interrupts_enabled(current_state
);
250 initialize_screen(NULL
, kPEAcquireScreen
);
254 * Routine: ml_nofault_copy
255 * Function: Perform a physical mode copy if the source and
256 * destination have valid translations in the kernel pmap.
257 * If translations are present, they are assumed to
258 * be wired; i.e. no attempt is made to guarantee that the
259 * translations obtained remained valid for
260 * the duration of their use.
263 vm_size_t
ml_nofault_copy(
264 vm_offset_t virtsrc
, vm_offset_t virtdst
, vm_size_t size
)
266 addr64_t cur_phys_dst
, cur_phys_src
;
267 uint32_t count
, pindex
, nbytes
= 0;
270 if (!(cur_phys_src
= kvtophys(virtsrc
)))
272 if (!(cur_phys_dst
= kvtophys(virtdst
)))
274 if (!mapping_phys_lookup((cur_phys_src
>>12), &pindex
) ||
275 !mapping_phys_lookup((cur_phys_dst
>>12), &pindex
))
277 count
= PAGE_SIZE
- (cur_phys_src
& PAGE_MASK
);
278 if (count
> (PAGE_SIZE
- (cur_phys_dst
& PAGE_MASK
)))
279 count
= PAGE_SIZE
- (cur_phys_dst
& PAGE_MASK
);
283 bcopy_phys(cur_phys_src
, cur_phys_dst
, count
);
295 * Routine: ml_init_interrupt
296 * Function: Initialize Interrupts
298 void ml_init_interrupt(void)
300 boolean_t current_state
;
302 current_state
= ml_get_interrupts_enabled();
304 getPerProc()->interrupts_enabled
= TRUE
;
305 (void) ml_set_interrupts_enabled(current_state
);
309 * Routine: ml_get_interrupts_enabled
310 * Function: Get Interrupts Enabled
312 boolean_t
ml_get_interrupts_enabled(void)
314 return((mfmsr() & MASK(MSR_EE
)) != 0);
318 * Routine: ml_at_interrupt_context
319 * Function: Check if running at interrupt context
321 boolean_t
ml_at_interrupt_context(void)
324 boolean_t current_state
;
326 current_state
= ml_set_interrupts_enabled(FALSE
);
327 ret
= (getPerProc()->istackptr
== 0);
328 ml_set_interrupts_enabled(current_state
);
333 * Routine: ml_cause_interrupt
334 * Function: Generate a fake interrupt
336 void ml_cause_interrupt(void)
342 * Routine: ml_thread_policy
345 void ml_thread_policy(
347 __unused
unsigned policy_id
,
348 unsigned policy_info
)
350 if (policy_info
& MACHINE_NETWORK_WORKLOOP
) {
351 spl_t s
= splsched();
355 set_priority(thread
, thread
->priority
+ 1);
357 thread_unlock(thread
);
363 * Routine: machine_signal_idle
368 processor_t processor
)
370 struct per_proc_info
*proc_info
;
372 proc_info
= PROCESSOR_TO_PER_PROC(processor
);
374 if (proc_info
->pf
.Available
& (pfCanDoze
|pfWillNap
))
375 (void)cpu_signal(proc_info
->cpu_number
, SIGPwake
, 0, 0);
379 * Routine: ml_processor_register
383 ml_processor_register(
384 ml_processor_info_t
*in_processor_info
,
385 processor_t
*processor_out
,
386 ipi_handler_t
*ipi_handler
)
388 struct per_proc_info
*proc_info
;
390 boolean_t current_state
;
391 boolean_t boot_processor
;
393 if (in_processor_info
->boot_cpu
== FALSE
) {
394 if (spsLockInit
== 0) {
396 simple_lock_init(&spsLock
, 0);
398 boot_processor
= FALSE
;
399 proc_info
= cpu_per_proc_alloc();
400 if (proc_info
== (struct per_proc_info
*)NULL
)
402 proc_info
->pp_cbfr
= console_per_proc_alloc(FALSE
);
403 if (proc_info
->pp_cbfr
== (void *)NULL
)
404 goto processor_register_error
;
406 boot_processor
= TRUE
;
407 proc_info
= PerProcTable
[master_cpu
].ppe_vaddr
;
410 proc_info
->pp_chud
= chudxnu_per_proc_alloc(boot_processor
);
411 if (proc_info
->pp_chud
== (void *)NULL
)
412 goto processor_register_error
;
415 if (cpu_per_proc_register(proc_info
) != KERN_SUCCESS
)
416 goto processor_register_error
;
418 proc_info
->cpu_id
= in_processor_info
->cpu_id
;
419 proc_info
->start_paddr
= in_processor_info
->start_paddr
;
420 if(in_processor_info
->time_base_enable
!= (void(*)(cpu_id_t
, boolean_t
))NULL
)
421 proc_info
->time_base_enable
= in_processor_info
->time_base_enable
;
423 proc_info
->time_base_enable
= (void(*)(cpu_id_t
, boolean_t
))NULL
;
425 if((proc_info
->pf
.pfPowerModes
& pmType
) == pmPowerTune
) {
426 proc_info
->pf
.pfPowerTune0
= in_processor_info
->power_mode_0
;
427 proc_info
->pf
.pfPowerTune1
= in_processor_info
->power_mode_1
;
430 donap
= in_processor_info
->supports_nap
; /* Assume we use requested nap */
431 if(forcenap
) donap
= forcenap
- 1; /* If there was an override, use that */
433 if((proc_info
->pf
.Available
& pfCanNap
)
435 proc_info
->pf
.Available
|= pfWillNap
;
436 current_state
= ml_set_interrupts_enabled(FALSE
);
437 if(proc_info
== getPerProc())
438 __asm__
volatile("mtsprg 2,%0" : : "r" (proc_info
->pf
.Available
)); /* Set live value */
439 (void) ml_set_interrupts_enabled(current_state
);
442 if (!boot_processor
) {
443 (void)hw_atomic_add(&saveanchor
.savetarget
, FreeListMin
); /* saveareas for this processor */
444 processor_init((struct processor
*)proc_info
->processor
,
445 proc_info
->cpu_number
, processor_pset(master_processor
));
448 *processor_out
= (struct processor
*)proc_info
->processor
;
449 *ipi_handler
= cpu_signal_handler
;
453 processor_register_error
:
454 if (proc_info
->pp_cbfr
!= (void *)NULL
)
455 console_per_proc_free(proc_info
->pp_cbfr
);
456 if (proc_info
->pp_chud
!= (void *)NULL
)
457 chudxnu_per_proc_free(proc_info
->pp_chud
);
459 cpu_per_proc_free(proc_info
);
464 * Routine: ml_enable_nap
468 ml_enable_nap(int target_cpu
, boolean_t nap_enabled
)
470 struct per_proc_info
*proc_info
;
471 boolean_t prev_value
;
472 boolean_t current_state
;
474 proc_info
= PerProcTable
[target_cpu
].ppe_vaddr
;
476 prev_value
= (proc_info
->pf
.Available
& pfCanNap
) && (proc_info
->pf
.Available
& pfWillNap
);
478 if(forcenap
) nap_enabled
= forcenap
- 1; /* If we are to force nap on or off, do it */
480 if(proc_info
->pf
.Available
& pfCanNap
) { /* Can the processor nap? */
481 if (nap_enabled
) proc_info
->pf
.Available
|= pfWillNap
; /* Is nap supported on this machine? */
482 else proc_info
->pf
.Available
&= ~pfWillNap
; /* Clear if not */
485 current_state
= ml_set_interrupts_enabled(FALSE
);
486 if(proc_info
== getPerProc())
487 __asm__
volatile("mtsprg 2,%0" : : "r" (proc_info
->pf
.Available
)); /* Set live value */
488 (void) ml_set_interrupts_enabled(current_state
);
494 * Routine: ml_init_max_cpus
498 ml_init_max_cpus(unsigned int mcpus
)
501 if (hw_compare_and_store(0,1,&mcpus_lock_initialized
))
502 mutex_init(&mcpus_lock
,0);
503 mutex_lock(&mcpus_lock
);
504 if ((mcpus_state
& MAX_CPUS_SET
)
506 || (mcpus
> MAX_CPUS
))
507 panic("ml_init_max_cpus(): Invalid call, max_cpus: %d\n", mcpus
);
509 machine_info
.max_cpus
= mcpus
;
510 machine_info
.physical_cpu_max
= mcpus
;
511 machine_info
.logical_cpu_max
= mcpus
;
512 mcpus_state
|= MAX_CPUS_SET
;
514 if (mcpus_state
& MAX_CPUS_WAIT
) {
515 mcpus_state
|= ~MAX_CPUS_WAIT
;
516 thread_wakeup((event_t
)&mcpus_state
);
518 mutex_unlock(&mcpus_lock
);
520 if (machine_info
.logical_cpu_max
== 1) {
521 struct patch_up
*patch_up_ptr
;
522 boolean_t current_state
;
524 patch_up_ptr
= &patch_up_table
[0];
526 current_state
= ml_set_interrupts_enabled(FALSE
);
527 while (patch_up_ptr
->addr
!= NULL
) {
529 * Patch for V=R kernel text section
531 bcopy_phys((addr64_t
)((unsigned int)(&patch_up_ptr
->data
)),
532 (addr64_t
)((unsigned int)(patch_up_ptr
->addr
)), 4);
533 sync_cache64((addr64_t
)((unsigned int)(patch_up_ptr
->addr
)),4);
536 (void) ml_set_interrupts_enabled(current_state
);
541 * Routine: ml_get_max_cpus
545 ml_get_max_cpus(void)
547 if (hw_compare_and_store(0,1,&mcpus_lock_initialized
))
548 mutex_init(&mcpus_lock
,0);
549 mutex_lock(&mcpus_lock
);
550 if (!(mcpus_state
& MAX_CPUS_SET
)) {
551 mcpus_state
|= MAX_CPUS_WAIT
;
552 thread_sleep_mutex((event_t
)&mcpus_state
,
553 &mcpus_lock
, THREAD_UNINT
);
555 mutex_unlock(&mcpus_lock
);
556 return(machine_info
.max_cpus
);
560 * This is called from the machine-independent routine cpu_up()
561 * to perform machine-dependent info updates.
566 (void)hw_atomic_add(&machine_info
.physical_cpu
, 1);
567 (void)hw_atomic_add(&machine_info
.logical_cpu
, 1);
571 * This is called from the machine-independent routine cpu_down()
572 * to perform machine-dependent info updates.
577 (void)hw_atomic_sub(&machine_info
.physical_cpu
, 1);
578 (void)hw_atomic_sub(&machine_info
.logical_cpu
, 1);
582 * Routine: ml_cpu_get_info
586 ml_cpu_get_info(ml_cpu_info_t
*ml_cpu_info
)
588 struct per_proc_info
*proc_info
;
590 if (ml_cpu_info
== 0) return;
592 proc_info
= PerProcTable
[master_cpu
].ppe_vaddr
;
593 ml_cpu_info
->vector_unit
= (proc_info
->pf
.Available
& pfAltivec
) != 0;
594 ml_cpu_info
->cache_line_size
= proc_info
->pf
.lineSize
;
595 ml_cpu_info
->l1_icache_size
= proc_info
->pf
.l1iSize
;
596 ml_cpu_info
->l1_dcache_size
= proc_info
->pf
.l1dSize
;
598 if (proc_info
->pf
.Available
& pfL2
) {
599 ml_cpu_info
->l2_settings
= proc_info
->pf
.l2cr
;
600 ml_cpu_info
->l2_cache_size
= proc_info
->pf
.l2Size
;
602 ml_cpu_info
->l2_settings
= 0;
603 ml_cpu_info
->l2_cache_size
= 0xFFFFFFFF;
605 if (proc_info
->pf
.Available
& pfL3
) {
606 ml_cpu_info
->l3_settings
= proc_info
->pf
.l3cr
;
607 ml_cpu_info
->l3_cache_size
= proc_info
->pf
.l3Size
;
609 ml_cpu_info
->l3_settings
= 0;
610 ml_cpu_info
->l3_cache_size
= 0xFFFFFFFF;
615 * Routine: ml_enable_cache_level
618 #define l2em 0x80000000
619 #define l3em 0x80000000
621 ml_enable_cache_level(int cache_level
, int enable
)
624 unsigned long available
, ccr
;
625 struct per_proc_info
*proc_info
;
627 if (real_ncpus
!= 1) return -1; /* XXX: This test is not safe */
629 proc_info
= PerProcTable
[master_cpu
].ppe_vaddr
;
630 available
= proc_info
->pf
.Available
;
632 if ((cache_level
== 2) && (available
& pfL2
)) {
633 ccr
= proc_info
->pf
.l2cr
;
634 old_mode
= (ccr
& l2em
) ? TRUE
: FALSE
;
635 if (old_mode
!= enable
) {
636 if (enable
) ccr
= proc_info
->pf
.l2crOriginal
;
638 proc_info
->pf
.l2cr
= ccr
;
645 if ((cache_level
== 3) && (available
& pfL3
)) {
646 ccr
= proc_info
->pf
.l3cr
;
647 old_mode
= (ccr
& l3em
) ? TRUE
: FALSE
;
648 if (old_mode
!= enable
) {
649 if (enable
) ccr
= proc_info
->pf
.l3crOriginal
;
651 proc_info
->pf
.l3cr
= ccr
;
663 * Routine: ml_set_processor_speed
667 ml_set_processor_speed(unsigned long speed
)
669 struct per_proc_info
*proc_info
;
671 kern_return_t result
;
672 boolean_t current_state
;
675 proc_info
= PerProcTable
[master_cpu
].ppe_vaddr
;
677 switch (proc_info
->pf
.pfPowerModes
& pmType
) { /* Figure specific type */
680 ml_set_processor_speed_dpll(speed
);
685 for (cpu
= 0; cpu
< real_ncpus
; cpu
++) {
687 * cpu_signal() returns after .5ms if it fails to signal a running cpu
688 * retry cpu_signal() for .1s to deal with long interrupt latency at boot
690 for (i
=200; i
>0; i
--) {
691 current_state
= ml_set_interrupts_enabled(FALSE
);
692 if (cpu
!= (unsigned)cpu_number()) {
693 if (PerProcTable
[cpu
].ppe_vaddr
->cpu_flags
& SignalReady
)
695 * Target cpu is off-line, skip
697 result
= KERN_SUCCESS
;
699 simple_lock(&spsLock
);
700 result
= cpu_signal(cpu
, SIGPcpureq
, CPRQsps
, speed
);
701 if (result
== KERN_SUCCESS
)
702 thread_sleep_simple_lock(&spsLock
, &spsLock
, THREAD_UNINT
);
703 simple_unlock(&spsLock
);
706 ml_set_processor_speed_dfs(speed
);
707 result
= KERN_SUCCESS
;
709 (void) ml_set_interrupts_enabled(current_state
);
710 if (result
== KERN_SUCCESS
)
713 if (result
!= KERN_SUCCESS
)
714 panic("ml_set_processor_speed(): Fail to set cpu%d speed\n", cpu
);
720 ml_set_processor_speed_powertune(speed
);
731 * Routine: ml_set_processor_speed_slave
735 ml_set_processor_speed_slave(unsigned long speed
)
737 ml_set_processor_speed_dfs(speed
);
739 simple_lock(&spsLock
);
740 thread_wakeup(&spsLock
);
741 simple_unlock(&spsLock
);
745 * Routine: ml_init_lock_timeout
749 ml_init_lock_timeout(void)
754 nanoseconds_to_absolutetime(NSEC_PER_SEC
>>2, &abstime
);
755 LockTimeOut
= (unsigned int)abstime
;
757 if (PE_parse_boot_arg("mtxspin", &mtxspin
)) {
758 if (mtxspin
> USEC_PER_SEC
>>4)
759 mtxspin
= USEC_PER_SEC
>>4;
760 nanoseconds_to_absolutetime(mtxspin
*NSEC_PER_USEC
, &abstime
);
762 nanoseconds_to_absolutetime(10*NSEC_PER_USEC
, &abstime
);
764 MutexSpin
= (unsigned int)abstime
;
768 * Routine: init_ast_check
773 __unused processor_t processor
)
777 * Routine: cause_ast_check
782 processor_t processor
)
784 struct per_proc_info
*proc_info
;
786 proc_info
= PROCESSOR_TO_PER_PROC(processor
);
788 if (proc_info
!= getPerProc()
789 && proc_info
->interrupts_enabled
== TRUE
)
790 cpu_signal(proc_info
->cpu_number
, SIGPast
, (unsigned int)NULL
, (unsigned int)NULL
);
794 * Routine: machine_processor_shutdown
798 machine_processor_shutdown(
799 __unused thread_t thread
,
800 __unused
void (*doshutdown
)(processor_t
),
801 __unused processor_t processor
)
804 return((thread_t
)(getPerProc()->old_thread
));
808 void ml_mem_backoff(void) {
810 if(warFlags
& warDisMBpoff
) return; /* If backoff disabled, exit */
812 __asm__
volatile("sync");
813 __asm__
volatile("isync");