2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
31 #include <mach/mach_types.h>
33 #include <ppc/machine_routines.h>
34 #include <ppc/cpu_internal.h>
35 #include <ppc/exception.h>
36 #include <ppc/io_map_entries.h>
37 #include <ppc/misc_protos.h>
38 #include <ppc/savearea.h>
39 #include <ppc/Firmware.h>
42 #include <ppc/new_screen.h>
43 #include <ppc/proc_reg.h>
44 #include <kern/kern_types.h>
45 #include <kern/processor.h>
46 #include <kern/machine.h>
48 #include <vm/vm_page.h>
50 unsigned int LockTimeOut
= 1250000000;
51 unsigned int MutexSpin
= 0;
53 decl_mutex_data(static,mcpus_lock
);
54 unsigned int mcpus_lock_initialized
= 0;
55 unsigned int mcpus_state
= 0;
57 uint32_t warFlags
= 0;
58 #define warDisMBpoff 0x80000000
59 #define MAX_CPUS_SET 0x01
60 #define MAX_CPUS_WAIT 0x02
62 decl_simple_lock_data(, spsLock
);
63 unsigned int spsLockInit
= 0;
65 extern unsigned int hwllckPatch_isync
;
66 extern unsigned int hwulckPatch_isync
;
67 extern unsigned int hwulckbPatch_isync
;
68 extern unsigned int hwlmlckPatch_isync
;
69 extern unsigned int hwltlckPatch_isync
;
70 extern unsigned int hwcsatomicPatch_isync
;
71 extern unsigned int mlckePatch_isync
;
72 extern unsigned int mlckPatch_isync
;
73 extern unsigned int mltelckPatch_isync
;
74 extern unsigned int mltlckPatch_isync
;
75 extern unsigned int mulckePatch_isync
;
76 extern unsigned int mulckPatch_isync
;
77 extern unsigned int slckPatch_isync
;
78 extern unsigned int stlckPatch_isync
;
79 extern unsigned int sulckPatch_isync
;
80 extern unsigned int rwlePatch_isync
;
81 extern unsigned int rwlsPatch_isync
;
82 extern unsigned int rwlsePatch_isync
;
83 extern unsigned int rwlesPatch_isync
;
84 extern unsigned int rwtlePatch_isync
;
85 extern unsigned int rwtlsPatch_isync
;
86 extern unsigned int rwldPatch_isync
;
87 extern unsigned int hwulckPatch_eieio
;
88 extern unsigned int mulckPatch_eieio
;
89 extern unsigned int mulckePatch_eieio
;
90 extern unsigned int sulckPatch_eieio
;
91 extern unsigned int rwlesPatch_eieio
;
92 extern unsigned int rwldPatch_eieio
;
94 extern unsigned int entfsectPatch_isync
;
95 extern unsigned int retfsectPatch_isync
;
96 extern unsigned int retfsectPatch_eieio
;
104 typedef struct patch_up patch_up_t
;
106 patch_up_t patch_up_table
[] = {
107 {&hwllckPatch_isync
, 0x60000000},
108 {&hwulckPatch_isync
, 0x60000000},
109 {&hwulckbPatch_isync
, 0x60000000},
110 {&hwlmlckPatch_isync
, 0x60000000},
111 {&hwltlckPatch_isync
, 0x60000000},
112 {&hwcsatomicPatch_isync
, 0x60000000},
113 {&mlckePatch_isync
, 0x60000000},
114 {&mlckPatch_isync
, 0x60000000},
115 {&mltelckPatch_isync
, 0x60000000},
116 {&mltlckPatch_isync
, 0x60000000},
117 {&mulckePatch_isync
, 0x60000000},
118 {&mulckPatch_isync
, 0x60000000},
119 {&slckPatch_isync
, 0x60000000},
120 {&stlckPatch_isync
, 0x60000000},
121 {&sulckPatch_isync
, 0x60000000},
122 {&rwlePatch_isync
, 0x60000000},
123 {&rwlsPatch_isync
, 0x60000000},
124 {&rwlsePatch_isync
, 0x60000000},
125 {&rwlesPatch_isync
, 0x60000000},
126 {&rwtlePatch_isync
, 0x60000000},
127 {&rwtlsPatch_isync
, 0x60000000},
128 {&rwldPatch_isync
, 0x60000000},
129 {&hwulckPatch_eieio
, 0x60000000},
130 {&hwulckPatch_eieio
, 0x60000000},
131 {&mulckPatch_eieio
, 0x60000000},
132 {&mulckePatch_eieio
, 0x60000000},
133 {&sulckPatch_eieio
, 0x60000000},
134 {&rwlesPatch_eieio
, 0x60000000},
135 {&rwldPatch_eieio
, 0x60000000},
137 {&entfsectPatch_isync
, 0x60000000},
138 {&retfsectPatch_isync
, 0x60000000},
139 {&retfsectPatch_eieio
, 0x60000000},
145 extern boolean_t pmap_initialized
;
147 /* Map memory map IO space */
150 vm_offset_t phys_addr
,
153 return(io_map(phys_addr
,size
,VM_WIMG_IO
));
157 void ml_get_bouncepool_info(vm_offset_t
*phys_addr
, vm_size_t
*size
)
165 * Routine: ml_static_malloc
166 * Function: static memory allocation
174 if (pmap_initialized
)
175 return((vm_offset_t
)NULL
);
177 vaddr
= static_memory_end
;
178 static_memory_end
= round_page(vaddr
+size
);
184 * Routine: ml_static_ptovirt
193 /* Static memory is map V=R */
195 if ( (vaddr
< static_memory_end
) && (pmap_extract(kernel_pmap
, vaddr
)==paddr
) )
198 return((vm_offset_t
)NULL
);
202 * Routine: ml_static_mfree
210 vm_offset_t paddr_cur
, vaddr_cur
;
212 for (vaddr_cur
= round_page_32(vaddr
);
213 vaddr_cur
< trunc_page_32(vaddr
+size
);
214 vaddr_cur
+= PAGE_SIZE
) {
215 paddr_cur
= pmap_extract(kernel_pmap
, vaddr_cur
);
216 if (paddr_cur
!= (vm_offset_t
)NULL
) {
217 vm_page_wire_count
--;
218 pmap_remove(kernel_pmap
, (addr64_t
)vaddr_cur
, (addr64_t
)(vaddr_cur
+PAGE_SIZE
));
219 vm_page_create(paddr_cur
>>12,(paddr_cur
+PAGE_SIZE
)>>12);
225 * Routine: ml_vtophys
226 * Function: virtual to physical on static pages
228 vm_offset_t
ml_vtophys(
231 return(pmap_extract(kernel_pmap
, vaddr
));
235 * Routine: ml_install_interrupt_handler
236 * Function: Initialize Interrupt Handler
238 void ml_install_interrupt_handler(
242 IOInterruptHandler handler
,
245 struct per_proc_info
*proc_info
;
246 boolean_t current_state
;
248 current_state
= ml_get_interrupts_enabled();
249 proc_info
= getPerProc();
251 proc_info
->interrupt_nub
= nub
;
252 proc_info
->interrupt_source
= source
;
253 proc_info
->interrupt_target
= target
;
254 proc_info
->interrupt_handler
= handler
;
255 proc_info
->interrupt_refCon
= refCon
;
257 proc_info
->interrupts_enabled
= TRUE
;
258 (void) ml_set_interrupts_enabled(current_state
);
260 initialize_screen(0, kPEAcquireScreen
);
264 * Routine: ml_init_interrupt
265 * Function: Initialize Interrupts
267 void ml_init_interrupt(void)
269 boolean_t current_state
;
271 current_state
= ml_get_interrupts_enabled();
273 getPerProc()->interrupts_enabled
= TRUE
;
274 (void) ml_set_interrupts_enabled(current_state
);
278 * Routine: ml_get_interrupts_enabled
279 * Function: Get Interrupts Enabled
281 boolean_t
ml_get_interrupts_enabled(void)
283 return((mfmsr() & MASK(MSR_EE
)) != 0);
287 * Routine: ml_at_interrupt_context
288 * Function: Check if running at interrupt context
290 boolean_t
ml_at_interrupt_context(void)
293 boolean_t current_state
;
295 current_state
= ml_set_interrupts_enabled(FALSE
);
296 ret
= (getPerProc()->istackptr
== 0);
297 ml_set_interrupts_enabled(current_state
);
302 * Routine: ml_cause_interrupt
303 * Function: Generate a fake interrupt
305 void ml_cause_interrupt(void)
311 * Routine: ml_thread_policy
314 void ml_thread_policy(
317 unsigned policy_info
)
320 if ((policy_id
== MACHINE_GROUP
) &&
321 ((PerProcTable
[master_cpu
].ppe_vaddr
->pf
.Available
) & pfSMPcap
))
322 thread_bind(thread
, master_processor
);
324 if (policy_info
& MACHINE_NETWORK_WORKLOOP
) {
325 spl_t s
= splsched();
329 set_priority(thread
, thread
->priority
+ 1);
331 thread_unlock(thread
);
337 * Routine: machine_signal_idle
342 processor_t processor
)
344 struct per_proc_info
*proc_info
;
346 proc_info
= PROCESSOR_TO_PER_PROC(processor
);
348 if (proc_info
->pf
.Available
& (pfCanDoze
|pfWillNap
))
349 (void)cpu_signal(proc_info
->cpu_number
, SIGPwake
, 0, 0);
353 * Routine: ml_processor_register
357 ml_processor_register(
358 ml_processor_info_t
*in_processor_info
,
359 processor_t
*processor_out
,
360 ipi_handler_t
*ipi_handler
)
362 struct per_proc_info
*proc_info
;
364 boolean_t current_state
;
365 boolean_t boot_processor
;
367 if (in_processor_info
->boot_cpu
== FALSE
) {
368 if (spsLockInit
== 0) {
370 simple_lock_init(&spsLock
, 0);
372 boot_processor
= FALSE
;
373 proc_info
= cpu_per_proc_alloc();
374 if (proc_info
== (struct per_proc_info
*)NULL
)
376 proc_info
->pp_cbfr
= console_per_proc_alloc(FALSE
);
377 if (proc_info
->pp_cbfr
== (void *)NULL
)
378 goto processor_register_error
;
380 boot_processor
= TRUE
;
381 proc_info
= PerProcTable
[master_cpu
].ppe_vaddr
;
384 proc_info
->pp_chud
= chudxnu_per_proc_alloc(boot_processor
);
385 if (proc_info
->pp_chud
== (void *)NULL
)
386 goto processor_register_error
;
389 if (cpu_per_proc_register(proc_info
) != KERN_SUCCESS
)
390 goto processor_register_error
;
392 proc_info
->cpu_id
= in_processor_info
->cpu_id
;
393 proc_info
->start_paddr
= in_processor_info
->start_paddr
;
394 if(in_processor_info
->time_base_enable
!= (void(*)(cpu_id_t
, boolean_t
))NULL
)
395 proc_info
->time_base_enable
= in_processor_info
->time_base_enable
;
397 proc_info
->time_base_enable
= (void(*)(cpu_id_t
, boolean_t
))NULL
;
399 if((proc_info
->pf
.pfPowerModes
& pmType
) == pmPowerTune
) {
400 proc_info
->pf
.pfPowerTune0
= in_processor_info
->power_mode_0
;
401 proc_info
->pf
.pfPowerTune1
= in_processor_info
->power_mode_1
;
404 donap
= in_processor_info
->supports_nap
; /* Assume we use requested nap */
405 if(forcenap
) donap
= forcenap
- 1; /* If there was an override, use that */
407 if((proc_info
->pf
.Available
& pfCanNap
)
409 proc_info
->pf
.Available
|= pfWillNap
;
410 current_state
= ml_set_interrupts_enabled(FALSE
);
411 if(proc_info
== getPerProc())
412 __asm__
volatile("mtsprg 2,%0" : : "r" (proc_info
->pf
.Available
)); /* Set live value */
413 (void) ml_set_interrupts_enabled(current_state
);
416 if (!boot_processor
) {
417 (void)hw_atomic_add((uint32_t *)&saveanchor
.savetarget
, FreeListMin
); /* saveareas for this processor */
418 processor_init((struct processor
*)proc_info
->processor
, proc_info
->cpu_number
);
421 *processor_out
= (struct processor
*)proc_info
->processor
;
422 *ipi_handler
= cpu_signal_handler
;
426 processor_register_error
:
427 if (proc_info
->pp_cbfr
!= (void *)NULL
)
428 console_per_proc_free(proc_info
->pp_cbfr
);
429 if (proc_info
->pp_chud
!= (void *)NULL
)
430 chudxnu_per_proc_free(proc_info
->pp_chud
);
432 cpu_per_proc_free(proc_info
);
437 * Routine: ml_enable_nap
441 ml_enable_nap(int target_cpu
, boolean_t nap_enabled
)
443 struct per_proc_info
*proc_info
;
444 boolean_t prev_value
;
445 boolean_t current_state
;
447 proc_info
= PerProcTable
[target_cpu
].ppe_vaddr
;
449 prev_value
= (proc_info
->pf
.Available
& pfCanNap
) && (proc_info
->pf
.Available
& pfWillNap
);
451 if(forcenap
) nap_enabled
= forcenap
- 1; /* If we are to force nap on or off, do it */
453 if(proc_info
->pf
.Available
& pfCanNap
) { /* Can the processor nap? */
454 if (nap_enabled
) proc_info
->pf
.Available
|= pfWillNap
; /* Is nap supported on this machine? */
455 else proc_info
->pf
.Available
&= ~pfWillNap
; /* Clear if not */
458 current_state
= ml_set_interrupts_enabled(FALSE
);
459 if(proc_info
== getPerProc())
460 __asm__
volatile("mtsprg 2,%0" : : "r" (proc_info
->pf
.Available
)); /* Set live value */
461 (void) ml_set_interrupts_enabled(current_state
);
467 * Routine: ml_init_max_cpus
471 ml_init_max_cpus(unsigned int mcpus
)
474 if (hw_compare_and_store(0,1,&mcpus_lock_initialized
))
475 mutex_init(&mcpus_lock
,0);
476 mutex_lock(&mcpus_lock
);
477 if ((mcpus_state
& MAX_CPUS_SET
)
479 || (mcpus
> MAX_CPUS
))
480 panic("ml_init_max_cpus(): Invalid call, max_cpus: %d\n", mcpus
);
482 machine_info
.max_cpus
= mcpus
;
483 machine_info
.physical_cpu_max
= mcpus
;
484 machine_info
.logical_cpu_max
= mcpus
;
485 mcpus_state
|= MAX_CPUS_SET
;
487 if (mcpus_state
& MAX_CPUS_WAIT
) {
488 mcpus_state
|= ~MAX_CPUS_WAIT
;
489 thread_wakeup((event_t
)&mcpus_state
);
491 mutex_unlock(&mcpus_lock
);
493 if (machine_info
.logical_cpu_max
== 1) {
494 struct patch_up
*patch_up_ptr
;
495 boolean_t current_state
;
497 patch_up_ptr
= &patch_up_table
[0];
499 current_state
= ml_set_interrupts_enabled(FALSE
);
500 while (patch_up_ptr
->addr
!= NULL
) {
502 * Patch for V=R kernel text section
504 bcopy_phys((addr64_t
)((unsigned int)(&patch_up_ptr
->data
)),
505 (addr64_t
)((unsigned int)(patch_up_ptr
->addr
)), 4);
506 sync_cache64((addr64_t
)((unsigned int)(patch_up_ptr
->addr
)),4);
509 (void) ml_set_interrupts_enabled(current_state
);
514 * Routine: ml_get_max_cpus
518 ml_get_max_cpus(void)
520 if (hw_compare_and_store(0,1,&mcpus_lock_initialized
))
521 mutex_init(&mcpus_lock
,0);
522 mutex_lock(&mcpus_lock
);
523 if (!(mcpus_state
& MAX_CPUS_SET
)) {
524 mcpus_state
|= MAX_CPUS_WAIT
;
525 thread_sleep_mutex((event_t
)&mcpus_state
,
526 &mcpus_lock
, THREAD_UNINT
);
528 mutex_unlock(&mcpus_lock
);
529 return(machine_info
.max_cpus
);
533 * This is called from the machine-independent routine cpu_up()
534 * to perform machine-dependent info updates.
539 hw_atomic_add(&machine_info
.physical_cpu
, 1);
540 hw_atomic_add(&machine_info
.logical_cpu
, 1);
544 * This is called from the machine-independent routine cpu_down()
545 * to perform machine-dependent info updates.
550 hw_atomic_sub(&machine_info
.physical_cpu
, 1);
551 hw_atomic_sub(&machine_info
.logical_cpu
, 1);
555 * Routine: ml_cpu_get_info
559 ml_cpu_get_info(ml_cpu_info_t
*ml_cpu_info
)
561 struct per_proc_info
*proc_info
;
563 if (ml_cpu_info
== 0) return;
565 proc_info
= PerProcTable
[master_cpu
].ppe_vaddr
;
566 ml_cpu_info
->vector_unit
= (proc_info
->pf
.Available
& pfAltivec
) != 0;
567 ml_cpu_info
->cache_line_size
= proc_info
->pf
.lineSize
;
568 ml_cpu_info
->l1_icache_size
= proc_info
->pf
.l1iSize
;
569 ml_cpu_info
->l1_dcache_size
= proc_info
->pf
.l1dSize
;
571 if (proc_info
->pf
.Available
& pfL2
) {
572 ml_cpu_info
->l2_settings
= proc_info
->pf
.l2cr
;
573 ml_cpu_info
->l2_cache_size
= proc_info
->pf
.l2Size
;
575 ml_cpu_info
->l2_settings
= 0;
576 ml_cpu_info
->l2_cache_size
= 0xFFFFFFFF;
578 if (proc_info
->pf
.Available
& pfL3
) {
579 ml_cpu_info
->l3_settings
= proc_info
->pf
.l3cr
;
580 ml_cpu_info
->l3_cache_size
= proc_info
->pf
.l3Size
;
582 ml_cpu_info
->l3_settings
= 0;
583 ml_cpu_info
->l3_cache_size
= 0xFFFFFFFF;
588 * Routine: ml_enable_cache_level
591 #define l2em 0x80000000
592 #define l3em 0x80000000
594 ml_enable_cache_level(int cache_level
, int enable
)
597 unsigned long available
, ccr
;
598 struct per_proc_info
*proc_info
;
600 if (real_ncpus
!= 1) return -1; /* XXX: This test is not safe */
602 proc_info
= PerProcTable
[master_cpu
].ppe_vaddr
;
603 available
= proc_info
->pf
.Available
;
605 if ((cache_level
== 2) && (available
& pfL2
)) {
606 ccr
= proc_info
->pf
.l2cr
;
607 old_mode
= (ccr
& l2em
) ? TRUE
: FALSE
;
608 if (old_mode
!= enable
) {
609 if (enable
) ccr
= proc_info
->pf
.l2crOriginal
;
611 proc_info
->pf
.l2cr
= ccr
;
618 if ((cache_level
== 3) && (available
& pfL3
)) {
619 ccr
= proc_info
->pf
.l3cr
;
620 old_mode
= (ccr
& l3em
) ? TRUE
: FALSE
;
621 if (old_mode
!= enable
) {
622 if (enable
) ccr
= proc_info
->pf
.l3crOriginal
;
624 proc_info
->pf
.l3cr
= ccr
;
635 decl_simple_lock_data(, spsLock
);
638 * Routine: ml_set_processor_speed
642 ml_set_processor_speed(unsigned long speed
)
644 struct per_proc_info
*proc_info
;
646 kern_return_t result
;
647 boolean_t current_state
;
650 proc_info
= PerProcTable
[master_cpu
].ppe_vaddr
;
652 switch (proc_info
->pf
.pfPowerModes
& pmType
) { /* Figure specific type */
655 ml_set_processor_speed_dpll(speed
);
660 for (cpu
= 0; cpu
< real_ncpus
; cpu
++) {
662 * cpu_signal() returns after .5ms if it fails to signal a running cpu
663 * retry cpu_signal() for .1s to deal with long interrupt latency at boot
665 for (i
=200; i
>0; i
--) {
666 current_state
= ml_set_interrupts_enabled(FALSE
);
667 if (cpu
!= cpu_number()) {
668 if (PerProcTable
[cpu
].ppe_vaddr
->cpu_flags
& SignalReady
)
670 * Target cpu is off-line, skip
672 result
= KERN_SUCCESS
;
674 simple_lock(&spsLock
);
675 result
= cpu_signal(cpu
, SIGPcpureq
, CPRQsps
, speed
);
676 if (result
== KERN_SUCCESS
)
677 thread_sleep_simple_lock(&spsLock
, &spsLock
, THREAD_UNINT
);
678 simple_unlock(&spsLock
);
681 ml_set_processor_speed_dfs(speed
);
682 result
= KERN_SUCCESS
;
684 (void) ml_set_interrupts_enabled(current_state
);
685 if (result
== KERN_SUCCESS
)
688 if (result
!= KERN_SUCCESS
)
689 panic("ml_set_processor_speed(): Fail to set cpu%d speed\n", cpu
);
695 ml_set_processor_speed_powertune(speed
);
706 * Routine: ml_set_processor_speed_slave
710 ml_set_processor_speed_slave(unsigned long speed
)
712 ml_set_processor_speed_dfs(speed
);
714 simple_lock(&spsLock
);
715 thread_wakeup(&spsLock
);
716 simple_unlock(&spsLock
);
720 * Routine: ml_init_lock_timeout
724 ml_init_lock_timeout(void)
729 nanoseconds_to_absolutetime(NSEC_PER_SEC
>>2, &abstime
);
730 LockTimeOut
= (unsigned int)abstime
;
732 if (PE_parse_boot_arg("mtxspin", &mtxspin
)) {
733 if (mtxspin
> USEC_PER_SEC
>>4)
734 mtxspin
= USEC_PER_SEC
>>4;
735 nanoseconds_to_absolutetime(mtxspin
*NSEC_PER_USEC
, &abstime
);
737 nanoseconds_to_absolutetime(10*NSEC_PER_USEC
, &abstime
);
739 MutexSpin
= (unsigned int)abstime
;
743 * Routine: init_ast_check
748 __unused processor_t processor
)
752 * Routine: cause_ast_check
757 processor_t processor
)
759 struct per_proc_info
*proc_info
;
761 proc_info
= PROCESSOR_TO_PER_PROC(processor
);
763 if (proc_info
!= getPerProc()
764 && proc_info
->interrupts_enabled
== TRUE
)
765 cpu_signal(proc_info
->cpu_number
, SIGPast
, (unsigned int)NULL
, (unsigned int)NULL
);
769 * Routine: machine_processor_shutdown
773 machine_processor_shutdown(
774 __unused thread_t thread
,
775 __unused
void (*doshutdown
)(processor_t
),
776 __unused processor_t processor
)
779 return((thread_t
)(getPerProc()->old_thread
));
783 * Routine: set_be_bit
790 boolean_t current_state
;
792 current_state
= ml_set_interrupts_enabled(FALSE
);
793 getPerProc()->cpu_flags
|= traceBE
;
794 (void) ml_set_interrupts_enabled(current_state
);
799 * Routine: clr_be_bit
806 boolean_t current_state
;
808 current_state
= ml_set_interrupts_enabled(FALSE
);
809 getPerProc()->cpu_flags
&= ~traceBE
;
810 (void) ml_set_interrupts_enabled(current_state
);
815 * Routine: be_tracing
822 return(getPerProc()->cpu_flags
& traceBE
);
826 void ml_mem_backoff(void) {
828 if(warFlags
& warDisMBpoff
) return; /* If backoff disabled, exit */
830 __asm__
volatile("sync");
831 __asm__
volatile("isync");