2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
31 #include <mach/kern_return.h>
32 #include <kern/kalloc.h>
33 #include <kern/cpu_number.h>
34 #include <kern/cpu_data.h>
36 #include <i386/cpuid.h>
37 #include <i386/proc_reg.h>
38 #include <i386/mtrr.h>
40 struct mtrr_var_range
{
41 uint64_t base
; /* in IA32_MTRR_PHYSBASE format */
42 uint64_t mask
; /* in IA32_MTRR_PHYSMASK format */
43 uint32_t refcnt
; /* var ranges reference count */
46 struct mtrr_fix_range
{
47 uint64_t types
; /* fixed-range type octet */
50 typedef struct mtrr_var_range mtrr_var_range_t
;
51 typedef struct mtrr_fix_range mtrr_fix_range_t
;
56 mtrr_var_range_t
* var_range
;
57 unsigned int var_count
;
58 mtrr_fix_range_t fix_range
[11];
61 static boolean_t mtrr_initialized
= FALSE
;
63 decl_simple_lock_data(static, mtrr_lock
);
64 #define MTRR_LOCK() simple_lock(&mtrr_lock);
65 #define MTRR_UNLOCK() simple_unlock(&mtrr_lock);
68 #define DBG(x...) kprintf(x)
73 /* Private functions */
74 static void mtrr_get_var_ranges(mtrr_var_range_t
* range
, int count
);
75 static void mtrr_set_var_ranges(const mtrr_var_range_t
* range
, int count
);
76 static void mtrr_get_fix_ranges(mtrr_fix_range_t
* range
);
77 static void mtrr_set_fix_ranges(const mtrr_fix_range_t
* range
);
78 static void mtrr_update_setup(void * param
);
79 static void mtrr_update_teardown(void * param
);
80 static void mtrr_update_action(void * param
);
81 static void var_range_encode(mtrr_var_range_t
* range
, addr64_t address
,
82 uint64_t length
, uint32_t type
, int valid
);
83 static int var_range_overlap(mtrr_var_range_t
* range
, addr64_t address
,
84 uint64_t length
, uint32_t type
);
86 #define CACHE_CONTROL_MTRR (NULL)
87 #define CACHE_CONTROL_PAT ((void *)1)
90 * MTRR MSR bit fields.
92 #define IA32_MTRR_DEF_TYPE_MT 0x000000ff
93 #define IA32_MTRR_DEF_TYPE_FE 0x00000400
94 #define IA32_MTRR_DEF_TYPE_E 0x00000800
96 #define IA32_MTRRCAP_VCNT 0x000000ff
97 #define IA32_MTRRCAP_FIX 0x00000100
98 #define IA32_MTRRCAP_WC 0x00000400
101 #define PHYS_BITS_TO_MASK(bits) \
102 ((((1ULL << (bits-1)) - 1) << 1) | 1)
105 * Default mask for 36 physical address bits, this can
106 * change depending on the cpu model.
108 static uint64_t mtrr_phys_mask
= PHYS_BITS_TO_MASK(36);
110 #define IA32_MTRR_PHYMASK_VALID 0x0000000000000800ULL
111 #define IA32_MTRR_PHYSBASE_MASK (mtrr_phys_mask & ~0xFFF)
112 #define IA32_MTRR_PHYSBASE_TYPE 0x00000000000000FFULL
115 * Variable-range mask to/from length conversions.
117 #define MASK_TO_LEN(mask) \
118 ((~((mask) & IA32_MTRR_PHYSBASE_MASK) & mtrr_phys_mask) + 1)
120 #define LEN_TO_MASK(len) \
121 (~((len) - 1) & IA32_MTRR_PHYSBASE_MASK)
123 #define LSB(x) ((x) & (~((x) - 1)))
126 * Fetch variable-range MTRR register pairs.
129 mtrr_get_var_ranges(mtrr_var_range_t
* range
, int count
)
133 for (i
= 0; i
< count
; i
++) {
134 range
[i
].base
= rdmsr64(MSR_IA32_MTRR_PHYSBASE(i
));
135 range
[i
].mask
= rdmsr64(MSR_IA32_MTRR_PHYSMASK(i
));
137 /* bump ref count for firmware configured ranges */
138 if (range
[i
].mask
& IA32_MTRR_PHYMASK_VALID
)
146 * Update variable-range MTRR register pairs.
149 mtrr_set_var_ranges(const mtrr_var_range_t
* range
, int count
)
153 for (i
= 0; i
< count
; i
++) {
154 wrmsr64(MSR_IA32_MTRR_PHYSBASE(i
), range
[i
].base
);
155 wrmsr64(MSR_IA32_MTRR_PHYSMASK(i
), range
[i
].mask
);
160 * Fetch all fixed-range MTRR's. Note MSR offsets are not consecutive.
163 mtrr_get_fix_ranges(mtrr_fix_range_t
* range
)
167 /* assume 11 fix range registers */
168 range
[0].types
= rdmsr64(MSR_IA32_MTRR_FIX64K_00000
);
169 range
[1].types
= rdmsr64(MSR_IA32_MTRR_FIX16K_80000
);
170 range
[2].types
= rdmsr64(MSR_IA32_MTRR_FIX16K_A0000
);
171 for (i
= 0; i
< 8; i
++)
172 range
[3 + i
].types
= rdmsr64(MSR_IA32_MTRR_FIX4K_C0000
+ i
);
176 * Update all fixed-range MTRR's.
179 mtrr_set_fix_ranges(const struct mtrr_fix_range
* range
)
183 /* assume 11 fix range registers */
184 wrmsr64(MSR_IA32_MTRR_FIX64K_00000
, range
[0].types
);
185 wrmsr64(MSR_IA32_MTRR_FIX16K_80000
, range
[1].types
);
186 wrmsr64(MSR_IA32_MTRR_FIX16K_A0000
, range
[2].types
);
187 for (i
= 0; i
< 8; i
++)
188 wrmsr64(MSR_IA32_MTRR_FIX4K_C0000
+ i
, range
[3 + i
].types
);
196 int count
= rdmsr64(MSR_IA32_MTRRCAP
) & IA32_MTRRCAP_VCNT
;
198 DBG("VAR -- BASE -------------- MASK -------------- SIZE\n");
199 for (i
= 0; i
< count
; i
++) {
200 DBG(" %02x 0x%016llx 0x%016llx 0x%llx\n", i
,
201 rdmsr64(MSR_IA32_MTRR_PHYSBASE(i
)),
202 rdmsr64(MSR_IA32_MTRR_PHYSMASK(i
)),
203 MASK_TO_LEN(rdmsr64(MSR_IA32_MTRR_PHYSMASK(i
))));
207 DBG("FIX64K_00000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX64K_00000
));
208 DBG("FIX16K_80000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX16K_80000
));
209 DBG("FIX16K_A0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX16K_A0000
));
210 DBG(" FIX4K_C0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_C0000
));
211 DBG(" FIX4K_C8000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_C8000
));
212 DBG(" FIX4K_D0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_D0000
));
213 DBG(" FIX4K_D8000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_D8000
));
214 DBG(" FIX4K_E0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_E0000
));
215 DBG(" FIX4K_E8000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_E8000
));
216 DBG(" FIX4K_F0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_F0000
));
217 DBG(" FIX4K_F8000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_F8000
));
219 DBG("\nMTRRcap = 0x%llx MTRRdefType = 0x%llx\n",
220 rdmsr64(MSR_IA32_MTRRCAP
), rdmsr64(MSR_IA32_MTRR_DEF_TYPE
));
222 #endif /* MTRR_DEBUG */
225 * Called by the boot processor (BP) early during boot to initialize MTRR
226 * support. The MTRR state on the BP is saved, any additional processors
227 * will have the same settings applied to ensure MTRR consistency.
232 i386_cpu_info_t
* infop
= cpuid_info();
234 /* no reason to init more than once */
235 if (mtrr_initialized
== TRUE
)
238 /* check for presence of MTRR feature on the processor */
239 if ((cpuid_features() & CPUID_FEATURE_MTRR
) == 0)
240 return; /* no MTRR feature */
242 /* cpu vendor/model specific handling */
243 if (!strncmp(infop
->cpuid_vendor
, CPUID_VID_AMD
, sizeof(CPUID_VID_AMD
)))
245 /* Check for AMD Athlon 64 and Opteron */
246 if (cpuid_family() == 0xF)
248 uint32_t cpuid_result
[4];
250 /* check if cpu support Address Sizes function */
251 do_cpuid(0x80000000, cpuid_result
);
252 if (cpuid_result
[0] >= 0x80000008)
256 do_cpuid(0x80000008, cpuid_result
);
257 DBG("MTRR: AMD 8000_0008 EAX = %08x\n",
261 * Function 8000_0008 (Address Sizes) EAX
262 * Bits 7-0 : phys address size
263 * Bits 15-8 : virt address size
265 bits
= cpuid_result
[0] & 0xFF;
266 if ((bits
< 36) || (bits
> 64))
268 printf("MTRR: bad address size\n");
269 return; /* bogus size */
272 mtrr_phys_mask
= PHYS_BITS_TO_MASK(bits
);
277 /* use a lock to serialize MTRR changes */
278 bzero((void *)&mtrr_state
, sizeof(mtrr_state
));
279 simple_lock_init(&mtrr_lock
, 0);
281 mtrr_state
.MTRRcap
= rdmsr64(MSR_IA32_MTRRCAP
);
282 mtrr_state
.MTRRdefType
= rdmsr64(MSR_IA32_MTRR_DEF_TYPE
);
283 mtrr_state
.var_count
= mtrr_state
.MTRRcap
& IA32_MTRRCAP_VCNT
;
285 /* allocate storage for variable ranges (can block?) */
286 if (mtrr_state
.var_count
) {
287 mtrr_state
.var_range
= (mtrr_var_range_t
*)
288 kalloc(sizeof(mtrr_var_range_t
) *
289 mtrr_state
.var_count
);
290 if (mtrr_state
.var_range
== NULL
)
291 mtrr_state
.var_count
= 0;
294 /* fetch the initial firmware configured variable ranges */
295 if (mtrr_state
.var_count
)
296 mtrr_get_var_ranges(mtrr_state
.var_range
,
297 mtrr_state
.var_count
);
299 /* fetch the initial firmware configured fixed ranges */
300 if (mtrr_state
.MTRRcap
& IA32_MTRRCAP_FIX
)
301 mtrr_get_fix_ranges(mtrr_state
.fix_range
);
303 mtrr_initialized
= TRUE
;
306 mtrr_msr_dump(); /* dump firmware settings */
311 * Performs the Intel recommended procedure for changing the MTRR
312 * in a MP system. Leverage rendezvous mechanism for the required
313 * barrier synchronization among all processors. This function is
314 * called from the rendezvous IPI handler, and mtrr_update_cpu().
317 mtrr_update_action(void * cache_control_type
)
325 /* enter no-fill cache mode */
333 /* clear the PGE flag in CR4 */
335 set_cr4(cr4
& ~CR4_PGE
);
340 if (CACHE_CONTROL_PAT
== cache_control_type
) {
341 /* Change PA6 attribute field to WC */
342 uint64_t pat
= rdmsr64(MSR_IA32_CR_PAT
);
343 DBG("CPU%d PAT: was 0x%016llx\n", get_cpu_number(), pat
);
344 pat
&= ~(0x0FULL
<< 48);
345 pat
|= (0x01ULL
<< 48);
346 wrmsr64(MSR_IA32_CR_PAT
, pat
);
347 DBG("CPU%d PAT: is 0x%016llx\n",
348 get_cpu_number(), rdmsr64(MSR_IA32_CR_PAT
));
351 /* disable all MTRR ranges */
352 wrmsr64(MSR_IA32_MTRR_DEF_TYPE
,
353 mtrr_state
.MTRRdefType
& ~IA32_MTRR_DEF_TYPE_E
);
355 /* apply MTRR settings */
356 if (mtrr_state
.var_count
)
357 mtrr_set_var_ranges(mtrr_state
.var_range
,
358 mtrr_state
.var_count
);
360 if (mtrr_state
.MTRRcap
& IA32_MTRRCAP_FIX
)
361 mtrr_set_fix_ranges(mtrr_state
.fix_range
);
363 /* enable all MTRR range registers (what if E was not set?) */
364 wrmsr64(MSR_IA32_MTRR_DEF_TYPE
,
365 mtrr_state
.MTRRdefType
| IA32_MTRR_DEF_TYPE_E
);
368 /* flush all caches and TLBs a second time */
372 /* restore normal cache mode */
375 /* restore PGE flag */
379 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__
);
383 mtrr_update_setup(__unused
void * param_not_used
)
385 /* disable interrupts before the first barrier */
386 current_cpu_datap()->cpu_iflag
= ml_set_interrupts_enabled(FALSE
);
387 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__
);
391 mtrr_update_teardown(__unused
void * param_not_used
)
393 /* restore interrupt flag following MTRR changes */
394 ml_set_interrupts_enabled(current_cpu_datap()->cpu_iflag
);
395 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__
);
399 * Update MTRR settings on all processors.
402 mtrr_update_all_cpus(void)
404 if (mtrr_initialized
== FALSE
)
405 return KERN_NOT_SUPPORTED
;
408 mp_rendezvous(mtrr_update_setup
,
410 mtrr_update_teardown
, NULL
);
417 * Update a single CPU with the current MTRR settings. Can be called
418 * during slave processor initialization to mirror the MTRR settings
419 * discovered on the boot processor by mtrr_init().
422 mtrr_update_cpu(void)
424 if (mtrr_initialized
== FALSE
)
425 return KERN_NOT_SUPPORTED
;
428 mtrr_update_setup(NULL
);
429 mtrr_update_action(NULL
);
430 mtrr_update_teardown(NULL
);
437 * Add a MTRR range to associate the physical memory range specified
438 * with a given memory caching type.
441 mtrr_range_add(addr64_t address
, uint64_t length
, uint32_t type
)
443 mtrr_var_range_t
* vr
;
444 mtrr_var_range_t
* free_range
;
445 kern_return_t ret
= KERN_NO_SPACE
;
449 DBG("mtrr_range_add base = 0x%llx, size = 0x%llx, type = %d\n",
450 address
, length
, type
);
452 if (mtrr_initialized
== FALSE
) {
453 return KERN_NOT_SUPPORTED
;
456 /* check memory type (GPF exception for undefined types) */
457 if ((type
!= MTRR_TYPE_UNCACHEABLE
) &&
458 (type
!= MTRR_TYPE_WRITECOMBINE
) &&
459 (type
!= MTRR_TYPE_WRITETHROUGH
) &&
460 (type
!= MTRR_TYPE_WRITEPROTECT
) &&
461 (type
!= MTRR_TYPE_WRITEBACK
)) {
462 return KERN_INVALID_ARGUMENT
;
465 /* check WC support if requested */
466 if ((type
== MTRR_TYPE_WRITECOMBINE
) &&
467 (mtrr_state
.MTRRcap
& IA32_MTRRCAP_WC
) == 0) {
468 return KERN_NOT_SUPPORTED
;
471 /* leave the fix range area below 1MB alone */
472 if (address
< 0x100000 || mtrr_state
.var_count
== 0) {
473 return KERN_NOT_SUPPORTED
;
477 * Length must be a power of 2 given by 2^n, where n >= 12.
478 * Base address alignment must be larger than or equal to length.
480 if ((length
< 0x1000) ||
481 (LSB(length
) != length
) ||
482 (address
&& (length
> LSB(address
)))) {
483 return KERN_INVALID_ARGUMENT
;
489 * Check for overlap and locate a free range.
491 for (i
= 0, free_range
= NULL
; i
< mtrr_state
.var_count
; i
++)
493 vr
= &mtrr_state
.var_range
[i
];
495 if (vr
->refcnt
== 0) {
496 /* free range candidate if no overlaps are found */
501 overlap
= var_range_overlap(vr
, address
, length
, type
);
504 * identical overlap permitted, increment ref count.
505 * no hardware update required.
511 /* unsupported overlapping of memory types */
518 if (free_range
->refcnt
++ == 0) {
519 var_range_encode(free_range
, address
, length
, type
, 1);
520 mp_rendezvous(mtrr_update_setup
,
522 mtrr_update_teardown
, NULL
);
537 * Remove a previously added MTRR range. The same arguments used for adding
538 * the memory range must be supplied again.
541 mtrr_range_remove(addr64_t address
, uint64_t length
, uint32_t type
)
543 mtrr_var_range_t
* vr
;
544 int result
= KERN_FAILURE
;
548 DBG("mtrr_range_remove base = 0x%llx, size = 0x%llx, type = %d\n",
549 address
, length
, type
);
551 if (mtrr_initialized
== FALSE
) {
552 return KERN_NOT_SUPPORTED
;
557 for (i
= 0; i
< mtrr_state
.var_count
; i
++) {
558 vr
= &mtrr_state
.var_range
[i
];
561 var_range_overlap(vr
, address
, length
, type
) > 0) {
562 /* found specified variable range */
563 if (--mtrr_state
.var_range
[i
].refcnt
== 0) {
564 var_range_encode(vr
, address
, length
, type
, 0);
567 result
= KERN_SUCCESS
;
573 mp_rendezvous(mtrr_update_setup
,
575 mtrr_update_teardown
, NULL
);
576 result
= KERN_SUCCESS
;
589 * Variable range helper routines
592 var_range_encode(mtrr_var_range_t
* range
, addr64_t address
,
593 uint64_t length
, uint32_t type
, int valid
)
595 range
->base
= (address
& IA32_MTRR_PHYSBASE_MASK
) |
596 (type
& IA32_MTRR_PHYSBASE_TYPE
);
598 range
->mask
= LEN_TO_MASK(length
) |
599 (valid
? IA32_MTRR_PHYMASK_VALID
: 0);
603 var_range_overlap(mtrr_var_range_t
* range
, addr64_t address
,
604 uint64_t length
, uint32_t type
)
606 uint64_t v_address
, v_length
;
608 int result
= 0; /* no overlap, or overlap ok */
610 v_address
= range
->base
& IA32_MTRR_PHYSBASE_MASK
;
611 v_type
= range
->base
& IA32_MTRR_PHYSBASE_TYPE
;
612 v_length
= MASK_TO_LEN(range
->mask
);
614 /* detect range overlap */
615 if ((v_address
>= address
&& v_address
< (address
+ length
)) ||
616 (address
>= v_address
&& address
< (v_address
+ v_length
))) {
618 if (v_address
== address
&& v_length
== length
&& v_type
== type
)
619 result
= 1; /* identical overlap ok */
620 else if ( v_type
== MTRR_TYPE_UNCACHEABLE
&&
621 type
== MTRR_TYPE_UNCACHEABLE
) {
622 /* UC ranges can overlap */
624 else if ((v_type
== MTRR_TYPE_UNCACHEABLE
&&
625 type
== MTRR_TYPE_WRITEBACK
) ||
626 (v_type
== MTRR_TYPE_WRITEBACK
&&
627 type
== MTRR_TYPE_UNCACHEABLE
)) {
628 /* UC/WB can overlap - effective type becomes UC */
631 /* anything else may cause undefined behavior */
640 * Initialize PAT (Page Attribute Table)
645 if (cpuid_features() & CPUID_FEATURE_PAT
)
647 boolean_t istate
= ml_set_interrupts_enabled(FALSE
);
648 mtrr_update_action(CACHE_CONTROL_PAT
);
649 ml_set_interrupts_enabled(istate
);