2 * Copyright (c) 2000-2011 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/kern_return.h>
30 #include <kern/kalloc.h>
31 #include <kern/cpu_number.h>
32 #include <kern/cpu_data.h>
33 #include <i386/cpuid.h>
35 #include <i386/proc_reg.h>
36 #include <i386/mtrr.h>
37 #include <i386/machine_check.h>
39 struct mtrr_var_range
{
40 uint64_t base
; /* in IA32_MTRR_PHYSBASE format */
41 uint64_t mask
; /* in IA32_MTRR_PHYSMASK format */
42 uint32_t refcnt
; /* var ranges reference count */
45 struct mtrr_fix_range
{
46 uint64_t types
; /* fixed-range type octet */
49 typedef struct mtrr_var_range mtrr_var_range_t
;
50 typedef struct mtrr_fix_range mtrr_fix_range_t
;
55 mtrr_var_range_t
* var_range
;
56 unsigned int var_count
;
57 mtrr_fix_range_t fix_range
[11];
60 static boolean_t mtrr_initialized
= FALSE
;
62 decl_simple_lock_data(static, mtrr_lock
);
63 #define MTRR_LOCK() simple_lock(&mtrr_lock);
64 #define MTRR_UNLOCK() simple_unlock(&mtrr_lock);
66 //#define MTRR_DEBUG 1
68 #define DBG(x...) kprintf(x)
73 /* Private functions */
74 static void mtrr_get_var_ranges(mtrr_var_range_t
* range
, int count
);
75 static void mtrr_set_var_ranges(const mtrr_var_range_t
* range
, int count
);
76 static void mtrr_get_fix_ranges(mtrr_fix_range_t
* range
);
77 static void mtrr_set_fix_ranges(const mtrr_fix_range_t
* range
);
78 static void mtrr_update_setup(void * param
);
79 static void mtrr_update_teardown(void * param
);
80 static void mtrr_update_action(void * param
);
81 static void var_range_encode(mtrr_var_range_t
* range
, addr64_t address
,
82 uint64_t length
, uint32_t type
, int valid
);
83 static int var_range_overlap(mtrr_var_range_t
* range
, addr64_t address
,
84 uint64_t length
, uint32_t type
);
86 #define CACHE_CONTROL_MTRR (NULL)
87 #define CACHE_CONTROL_PAT ((void *)1)
90 * MTRR MSR bit fields.
92 #define IA32_MTRR_DEF_TYPE_MT 0x000000ff
93 #define IA32_MTRR_DEF_TYPE_FE 0x00000400
94 #define IA32_MTRR_DEF_TYPE_E 0x00000800
96 #define IA32_MTRRCAP_VCNT 0x000000ff
97 #define IA32_MTRRCAP_FIX 0x00000100
98 #define IA32_MTRRCAP_WC 0x00000400
101 #define PHYS_BITS_TO_MASK(bits) \
102 ((((1ULL << (bits-1)) - 1) << 1) | 1)
105 * Default mask for 36 physical address bits, this can
106 * change depending on the cpu model.
108 static uint64_t mtrr_phys_mask
= PHYS_BITS_TO_MASK(36);
110 #define IA32_MTRR_PHYMASK_VALID 0x0000000000000800ULL
111 #define IA32_MTRR_PHYSBASE_MASK (mtrr_phys_mask & ~0x0000000000000FFFULL)
112 #define IA32_MTRR_PHYSBASE_TYPE 0x00000000000000FFULL
115 * Variable-range mask to/from length conversions.
117 #define MASK_TO_LEN(mask) \
118 ((~((mask) & IA32_MTRR_PHYSBASE_MASK) & mtrr_phys_mask) + 1)
120 #define LEN_TO_MASK(len) \
121 (~((len) - 1) & IA32_MTRR_PHYSBASE_MASK)
123 #define LSB(x) ((x) & (~((x) - 1)))
126 * Fetch variable-range MTRR register pairs.
129 mtrr_get_var_ranges(mtrr_var_range_t
* range
, int count
)
133 for (i
= 0; i
< count
; i
++) {
134 range
[i
].base
= rdmsr64(MSR_IA32_MTRR_PHYSBASE(i
));
135 range
[i
].mask
= rdmsr64(MSR_IA32_MTRR_PHYSMASK(i
));
137 /* bump ref count for firmware configured ranges */
138 if (range
[i
].mask
& IA32_MTRR_PHYMASK_VALID
)
146 * Update variable-range MTRR register pairs.
149 mtrr_set_var_ranges(const mtrr_var_range_t
* range
, int count
)
153 for (i
= 0; i
< count
; i
++) {
154 wrmsr64(MSR_IA32_MTRR_PHYSBASE(i
), range
[i
].base
);
155 wrmsr64(MSR_IA32_MTRR_PHYSMASK(i
), range
[i
].mask
);
160 * Fetch all fixed-range MTRR's. Note MSR offsets are not consecutive.
163 mtrr_get_fix_ranges(mtrr_fix_range_t
* range
)
167 /* assume 11 fix range registers */
168 range
[0].types
= rdmsr64(MSR_IA32_MTRR_FIX64K_00000
);
169 range
[1].types
= rdmsr64(MSR_IA32_MTRR_FIX16K_80000
);
170 range
[2].types
= rdmsr64(MSR_IA32_MTRR_FIX16K_A0000
);
171 for (i
= 0; i
< 8; i
++)
172 range
[3 + i
].types
= rdmsr64(MSR_IA32_MTRR_FIX4K_C0000
+ i
);
176 * Update all fixed-range MTRR's.
179 mtrr_set_fix_ranges(const struct mtrr_fix_range
* range
)
183 /* assume 11 fix range registers */
184 wrmsr64(MSR_IA32_MTRR_FIX64K_00000
, range
[0].types
);
185 wrmsr64(MSR_IA32_MTRR_FIX16K_80000
, range
[1].types
);
186 wrmsr64(MSR_IA32_MTRR_FIX16K_A0000
, range
[2].types
);
187 for (i
= 0; i
< 8; i
++)
188 wrmsr64(MSR_IA32_MTRR_FIX4K_C0000
+ i
, range
[3 + i
].types
);
192 mtrr_check_fix_ranges(const struct mtrr_fix_range
* range
)
195 boolean_t match
= TRUE
;
197 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__
);
199 /* assume 11 fix range registers */
200 match
= range
[0].types
== rdmsr64(MSR_IA32_MTRR_FIX64K_00000
) &&
201 range
[1].types
== rdmsr64(MSR_IA32_MTRR_FIX16K_80000
) &&
202 range
[2].types
== rdmsr64(MSR_IA32_MTRR_FIX16K_A0000
);
203 for (i
= 0; match
&& i
< 8; i
++) {
204 match
= range
[3 + i
].types
==
205 rdmsr64(MSR_IA32_MTRR_FIX4K_C0000
+ i
);
212 mtrr_check_var_ranges(mtrr_var_range_t
* range
, int count
)
215 boolean_t match
= TRUE
;
217 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__
);
219 for (i
= 0; match
&& i
< count
; i
++) {
220 match
= range
[i
].base
== rdmsr64(MSR_IA32_MTRR_PHYSBASE(i
)) &&
221 range
[i
].mask
== rdmsr64(MSR_IA32_MTRR_PHYSMASK(i
));
232 int count
= rdmsr64(MSR_IA32_MTRRCAP
) & IA32_MTRRCAP_VCNT
;
234 DBG("VAR -- BASE -------------- MASK -------------- SIZE\n");
235 for (i
= 0; i
< count
; i
++) {
236 DBG(" %02x 0x%016llx 0x%016llx 0x%llx\n", i
,
237 rdmsr64(MSR_IA32_MTRR_PHYSBASE(i
)),
238 rdmsr64(MSR_IA32_MTRR_PHYSMASK(i
)),
239 MASK_TO_LEN(rdmsr64(MSR_IA32_MTRR_PHYSMASK(i
))));
243 DBG("FIX64K_00000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX64K_00000
));
244 DBG("FIX16K_80000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX16K_80000
));
245 DBG("FIX16K_A0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX16K_A0000
));
246 DBG(" FIX4K_C0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_C0000
));
247 DBG(" FIX4K_C8000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_C8000
));
248 DBG(" FIX4K_D0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_D0000
));
249 DBG(" FIX4K_D8000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_D8000
));
250 DBG(" FIX4K_E0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_E0000
));
251 DBG(" FIX4K_E8000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_E8000
));
252 DBG(" FIX4K_F0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_F0000
));
253 DBG(" FIX4K_F8000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_F8000
));
255 DBG("\nMTRRcap = 0x%llx MTRRdefType = 0x%llx\n",
256 rdmsr64(MSR_IA32_MTRRCAP
), rdmsr64(MSR_IA32_MTRR_DEF_TYPE
));
258 #endif /* MTRR_DEBUG */
261 * Called by the boot processor (BP) early during boot to initialize MTRR
262 * support. The MTRR state on the BP is saved, any additional processors
263 * will have the same settings applied to ensure MTRR consistency.
268 /* no reason to init more than once */
269 if (mtrr_initialized
== TRUE
)
272 /* check for presence of MTRR feature on the processor */
273 if ((cpuid_features() & CPUID_FEATURE_MTRR
) == 0)
274 return; /* no MTRR feature */
276 /* use a lock to serialize MTRR changes */
277 bzero((void *)&mtrr_state
, sizeof(mtrr_state
));
278 simple_lock_init(&mtrr_lock
, 0);
280 mtrr_state
.MTRRcap
= rdmsr64(MSR_IA32_MTRRCAP
);
281 mtrr_state
.MTRRdefType
= rdmsr64(MSR_IA32_MTRR_DEF_TYPE
);
282 mtrr_state
.var_count
= (unsigned int)(mtrr_state
.MTRRcap
& IA32_MTRRCAP_VCNT
);
284 /* allocate storage for variable ranges (can block?) */
285 if (mtrr_state
.var_count
) {
286 mtrr_state
.var_range
= (mtrr_var_range_t
*)
287 kalloc(sizeof(mtrr_var_range_t
) *
288 mtrr_state
.var_count
);
289 if (mtrr_state
.var_range
== NULL
)
290 mtrr_state
.var_count
= 0;
293 /* fetch the initial firmware configured variable ranges */
294 if (mtrr_state
.var_count
)
295 mtrr_get_var_ranges(mtrr_state
.var_range
,
296 mtrr_state
.var_count
);
298 /* fetch the initial firmware configured fixed ranges */
299 if (mtrr_state
.MTRRcap
& IA32_MTRRCAP_FIX
)
300 mtrr_get_fix_ranges(mtrr_state
.fix_range
);
302 mtrr_initialized
= TRUE
;
305 mtrr_msr_dump(); /* dump firmware settings */
311 * Performs the Intel recommended procedure for changing the MTRR
312 * in a MP system. Leverage rendezvous mechanism for the required
313 * barrier synchronization among all processors. This function is
314 * called from the rendezvous IPI handler, and mtrr_update_cpu().
317 mtrr_update_action(void * cache_control_type
)
325 /* enter no-fill cache mode */
333 /* clear the PGE flag in CR4 */
335 set_cr4(cr4
& ~CR4_PGE
);
340 if (CACHE_CONTROL_PAT
== cache_control_type
) {
341 /* Change PA6 attribute field to WC */
342 uint64_t pat
= rdmsr64(MSR_IA32_CR_PAT
);
343 DBG("CPU%d PAT: was 0x%016llx\n", get_cpu_number(), pat
);
344 pat
&= ~(0x0FULL
<< 48);
345 pat
|= (0x01ULL
<< 48);
346 wrmsr64(MSR_IA32_CR_PAT
, pat
);
347 DBG("CPU%d PAT: is 0x%016llx\n",
348 get_cpu_number(), rdmsr64(MSR_IA32_CR_PAT
));
351 /* disable all MTRR ranges */
352 wrmsr64(MSR_IA32_MTRR_DEF_TYPE
,
353 mtrr_state
.MTRRdefType
& ~IA32_MTRR_DEF_TYPE_E
);
355 /* apply MTRR settings */
356 if (mtrr_state
.var_count
)
357 mtrr_set_var_ranges(mtrr_state
.var_range
,
358 mtrr_state
.var_count
);
360 if (mtrr_state
.MTRRcap
& IA32_MTRRCAP_FIX
)
361 mtrr_set_fix_ranges(mtrr_state
.fix_range
);
363 /* enable all MTRR range registers (what if E was not set?) */
364 wrmsr64(MSR_IA32_MTRR_DEF_TYPE
,
365 mtrr_state
.MTRRdefType
| IA32_MTRR_DEF_TYPE_E
);
368 /* flush all caches and TLBs a second time */
372 /* restore normal cache mode */
375 /* restore PGE flag */
379 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__
);
383 mtrr_update_setup(__unused
void * param_not_used
)
385 /* disable interrupts before the first barrier */
386 current_cpu_datap()->cpu_iflag
= ml_set_interrupts_enabled(FALSE
);
387 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__
);
391 mtrr_update_teardown(__unused
void * param_not_used
)
393 /* restore interrupt flag following MTRR changes */
394 ml_set_interrupts_enabled(current_cpu_datap()->cpu_iflag
);
395 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__
);
399 * Update MTRR settings on all processors.
402 mtrr_update_all_cpus(void)
404 if (mtrr_initialized
== FALSE
)
405 return KERN_NOT_SUPPORTED
;
408 mp_rendezvous(mtrr_update_setup
,
410 mtrr_update_teardown
, NULL
);
417 * Verify that a processor has been set with the BSP's MTRR settings. Called
418 * during slave processor initialization to check and set MTRR settings
419 * discovered on the boot processor by mtrr_init().
422 mtrr_update_cpu(void)
424 boolean_t match
= TRUE
;
426 if (mtrr_initialized
== FALSE
)
427 return KERN_NOT_SUPPORTED
;
429 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__
);
433 /* Check MSR_IA32_MTRR_DEF_TYPE MSR */
434 match
= mtrr_state
.MTRRdefType
== rdmsr64(MSR_IA32_MTRR_DEF_TYPE
);
436 /* Check MSR_IA32_MTRRCAP MSR */
438 match
= mtrr_state
.MTRRcap
== rdmsr64(MSR_IA32_MTRRCAP
);
441 /* Check variable ranges */
442 if (match
&& mtrr_state
.var_count
) {
443 match
= mtrr_check_var_ranges(mtrr_state
.var_range
,
444 mtrr_state
.var_count
);
447 /* Check fixed ranges */
448 if (match
&& (mtrr_state
.MTRRcap
& IA32_MTRRCAP_FIX
)) {
449 match
= mtrr_check_fix_ranges(mtrr_state
.fix_range
);
457 DBG("mtrr_update_cpu() setting MTRR for cpu %d\n",
459 mtrr_update_action(NULL
);
472 * Add a MTRR range to associate the physical memory range specified
473 * with a given memory caching type.
476 mtrr_range_add(addr64_t address
, uint64_t length
, uint32_t type
)
478 mtrr_var_range_t
* vr
;
479 mtrr_var_range_t
* free_range
;
480 kern_return_t ret
= KERN_NO_SPACE
;
484 DBG("mtrr_range_add base = 0x%llx, size = 0x%llx, type = %d\n",
485 address
, length
, type
);
487 if (mtrr_initialized
== FALSE
) {
488 return KERN_NOT_SUPPORTED
;
491 /* check memory type (GPF exception for undefined types) */
492 if ((type
!= MTRR_TYPE_UNCACHEABLE
) &&
493 (type
!= MTRR_TYPE_WRITECOMBINE
) &&
494 (type
!= MTRR_TYPE_WRITETHROUGH
) &&
495 (type
!= MTRR_TYPE_WRITEPROTECT
) &&
496 (type
!= MTRR_TYPE_WRITEBACK
)) {
497 return KERN_INVALID_ARGUMENT
;
500 /* check WC support if requested */
501 if ((type
== MTRR_TYPE_WRITECOMBINE
) &&
502 (mtrr_state
.MTRRcap
& IA32_MTRRCAP_WC
) == 0) {
503 return KERN_NOT_SUPPORTED
;
506 /* leave the fix range area below 1MB alone */
507 if (address
< 0x100000 || mtrr_state
.var_count
== 0) {
508 return KERN_NOT_SUPPORTED
;
512 * Length must be a power of 2 given by 2^n, where n >= 12.
513 * Base address alignment must be larger than or equal to length.
515 if ((length
< 0x1000) ||
516 (LSB(length
) != length
) ||
517 (address
&& (length
> LSB(address
)))) {
518 return KERN_INVALID_ARGUMENT
;
524 * Check for overlap and locate a free range.
526 for (i
= 0, free_range
= NULL
; i
< mtrr_state
.var_count
; i
++)
528 vr
= &mtrr_state
.var_range
[i
];
530 if (vr
->refcnt
== 0) {
531 /* free range candidate if no overlaps are found */
536 overlap
= var_range_overlap(vr
, address
, length
, type
);
539 * identical overlap permitted, increment ref count.
540 * no hardware update required.
546 /* unsupported overlapping of memory types */
553 if (free_range
->refcnt
++ == 0) {
554 var_range_encode(free_range
, address
, length
, type
, 1);
555 mp_rendezvous(mtrr_update_setup
,
557 mtrr_update_teardown
, NULL
);
572 * Remove a previously added MTRR range. The same arguments used for adding
573 * the memory range must be supplied again.
576 mtrr_range_remove(addr64_t address
, uint64_t length
, uint32_t type
)
578 mtrr_var_range_t
* vr
;
579 int result
= KERN_FAILURE
;
583 DBG("mtrr_range_remove base = 0x%llx, size = 0x%llx, type = %d\n",
584 address
, length
, type
);
586 if (mtrr_initialized
== FALSE
) {
587 return KERN_NOT_SUPPORTED
;
592 for (i
= 0; i
< mtrr_state
.var_count
; i
++) {
593 vr
= &mtrr_state
.var_range
[i
];
596 var_range_overlap(vr
, address
, length
, type
) > 0) {
597 /* found specified variable range */
598 if (--mtrr_state
.var_range
[i
].refcnt
== 0) {
599 var_range_encode(vr
, address
, length
, type
, 0);
602 result
= KERN_SUCCESS
;
608 mp_rendezvous(mtrr_update_setup
,
610 mtrr_update_teardown
, NULL
);
611 result
= KERN_SUCCESS
;
624 * Variable range helper routines
627 var_range_encode(mtrr_var_range_t
* range
, addr64_t address
,
628 uint64_t length
, uint32_t type
, int valid
)
630 range
->base
= (address
& IA32_MTRR_PHYSBASE_MASK
) |
631 (type
& (uint32_t)IA32_MTRR_PHYSBASE_TYPE
);
633 range
->mask
= LEN_TO_MASK(length
) |
634 (valid
? IA32_MTRR_PHYMASK_VALID
: 0);
638 var_range_overlap(mtrr_var_range_t
* range
, addr64_t address
,
639 uint64_t length
, uint32_t type
)
641 uint64_t v_address
, v_length
;
643 int result
= 0; /* no overlap, or overlap ok */
645 v_address
= range
->base
& IA32_MTRR_PHYSBASE_MASK
;
646 v_type
= (uint32_t)(range
->base
& IA32_MTRR_PHYSBASE_TYPE
);
647 v_length
= MASK_TO_LEN(range
->mask
);
649 /* detect range overlap */
650 if ((v_address
>= address
&& v_address
< (address
+ length
)) ||
651 (address
>= v_address
&& address
< (v_address
+ v_length
))) {
653 if (v_address
== address
&& v_length
== length
&& v_type
== type
)
654 result
= 1; /* identical overlap ok */
655 else if ( v_type
== MTRR_TYPE_UNCACHEABLE
&&
656 type
== MTRR_TYPE_UNCACHEABLE
) {
657 /* UC ranges can overlap */
659 else if ((v_type
== MTRR_TYPE_UNCACHEABLE
&&
660 type
== MTRR_TYPE_WRITEBACK
) ||
661 (v_type
== MTRR_TYPE_WRITEBACK
&&
662 type
== MTRR_TYPE_UNCACHEABLE
)) {
663 /* UC/WB can overlap - effective type becomes UC */
666 /* anything else may cause undefined behavior */
675 * Initialize PAT (Page Attribute Table)
683 if (!(cpuid_features() & CPUID_FEATURE_PAT
))
686 istate
= ml_set_interrupts_enabled(FALSE
);
688 pat
= rdmsr64(MSR_IA32_CR_PAT
);
689 DBG("CPU%d PAT: was 0x%016llx\n", get_cpu_number(), pat
);
691 /* Change PA6 attribute field to WC if required */
692 if ((pat
& ~(0x0FULL
<< 48)) != (0x01ULL
<< 48)) {
693 mtrr_update_action(CACHE_CONTROL_PAT
);
695 ml_set_interrupts_enabled(istate
);
700 mtrr_lapic_cached(void);
702 mtrr_lapic_cached(void)
707 uint64_t lapic_pbase
;
713 /* Find the local APIC physical base address */
714 rdmsr(MSR_IA32_APIC_BASE
, lo
, hi
);
715 lapic_pbase
= (lo
& MSR_IA32_APIC_BASE_BASE
);
717 DBG("mtrr_lapic_cached() on cpu %d, lapic_pbase: 0x%016llx\n",
718 get_cpu_number(), lapic_pbase
);
720 istate
= ml_set_interrupts_enabled(FALSE
);
723 * Search for the variable range MTRR mapping the lapic.
724 * Flip its type to WC and return.
726 for (i
= 0; i
< mtrr_state
.var_count
; i
++) {
727 if (!(mtrr_state
.var_range
[i
].mask
& IA32_MTRR_PHYMASK_VALID
))
729 base
= mtrr_state
.var_range
[i
].base
& IA32_MTRR_PHYSBASE_MASK
;
730 type
= (uint32_t)(mtrr_state
.var_range
[i
].base
& IA32_MTRR_PHYSBASE_TYPE
);
731 length
= MASK_TO_LEN(mtrr_state
.var_range
[i
].mask
);
732 DBG("%d: base: 0x%016llx size: 0x%016llx type: %d\n",
733 i
, base
, length
, type
);
734 if (base
<= lapic_pbase
&&
735 lapic_pbase
<= base
+ length
- PAGE_SIZE
) {
736 DBG("mtrr_lapic_cached() matched var: %d\n", i
);
737 mtrr_state
.var_range
[i
].base
&=~IA32_MTRR_PHYSBASE_TYPE
;
738 mtrr_state
.var_range
[i
].base
|= MTRR_TYPE_WRITECOMBINE
;
739 ml_set_interrupts_enabled(istate
);
744 * In case we didn't find a covering variable range,
745 * we slam WC into the default memory type.
747 mtrr_state
.MTRRdefType
= MTRR_TYPE_WRITECOMBINE
;
751 ml_set_interrupts_enabled(istate
);