]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/mtrr.c
xnu-1699.32.7.tar.gz
[apple/xnu.git] / osfmk / i386 / mtrr.c
CommitLineData
91447636
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
91447636 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
91447636
A
27 */
28
29#include <mach/kern_return.h>
30#include <kern/kalloc.h>
31#include <kern/cpu_number.h>
32#include <kern/cpu_data.h>
91447636 33#include <i386/cpuid.h>
b0d623f7 34#include <i386/mp.h>
91447636
A
35#include <i386/proc_reg.h>
36#include <i386/mtrr.h>
37
38struct mtrr_var_range {
39 uint64_t base; /* in IA32_MTRR_PHYSBASE format */
40 uint64_t mask; /* in IA32_MTRR_PHYSMASK format */
41 uint32_t refcnt; /* var ranges reference count */
42};
43
44struct mtrr_fix_range {
45 uint64_t types; /* fixed-range type octet */
46};
47
48typedef struct mtrr_var_range mtrr_var_range_t;
49typedef struct mtrr_fix_range mtrr_fix_range_t;
50
51static struct {
52 uint64_t MTRRcap;
53 uint64_t MTRRdefType;
54 mtrr_var_range_t * var_range;
55 unsigned int var_count;
56 mtrr_fix_range_t fix_range[11];
57} mtrr_state;
58
59static boolean_t mtrr_initialized = FALSE;
60
61decl_simple_lock_data(static, mtrr_lock);
62#define MTRR_LOCK() simple_lock(&mtrr_lock);
63#define MTRR_UNLOCK() simple_unlock(&mtrr_lock);
64
65#if MTRR_DEBUG
66#define DBG(x...) kprintf(x)
67#else
68#define DBG(x...)
69#endif
70
71/* Private functions */
72static void mtrr_get_var_ranges(mtrr_var_range_t * range, int count);
73static void mtrr_set_var_ranges(const mtrr_var_range_t * range, int count);
74static void mtrr_get_fix_ranges(mtrr_fix_range_t * range);
75static void mtrr_set_fix_ranges(const mtrr_fix_range_t * range);
76static void mtrr_update_setup(void * param);
77static void mtrr_update_teardown(void * param);
78static void mtrr_update_action(void * param);
79static void var_range_encode(mtrr_var_range_t * range, addr64_t address,
80 uint64_t length, uint32_t type, int valid);
81static int var_range_overlap(mtrr_var_range_t * range, addr64_t address,
82 uint64_t length, uint32_t type);
83
84#define CACHE_CONTROL_MTRR (NULL)
85#define CACHE_CONTROL_PAT ((void *)1)
86
87/*
88 * MTRR MSR bit fields.
89 */
90#define IA32_MTRR_DEF_TYPE_MT 0x000000ff
91#define IA32_MTRR_DEF_TYPE_FE 0x00000400
92#define IA32_MTRR_DEF_TYPE_E 0x00000800
93
94#define IA32_MTRRCAP_VCNT 0x000000ff
95#define IA32_MTRRCAP_FIX 0x00000100
96#define IA32_MTRRCAP_WC 0x00000400
97
98/* 0 < bits <= 64 */
99#define PHYS_BITS_TO_MASK(bits) \
100 ((((1ULL << (bits-1)) - 1) << 1) | 1)
101
102/*
103 * Default mask for 36 physical address bits, this can
104 * change depending on the cpu model.
105 */
106static uint64_t mtrr_phys_mask = PHYS_BITS_TO_MASK(36);
107
108#define IA32_MTRR_PHYMASK_VALID 0x0000000000000800ULL
b0d623f7 109#define IA32_MTRR_PHYSBASE_MASK (mtrr_phys_mask & ~0x0000000000000FFFULL)
91447636
A
110#define IA32_MTRR_PHYSBASE_TYPE 0x00000000000000FFULL
111
112/*
113 * Variable-range mask to/from length conversions.
114 */
115#define MASK_TO_LEN(mask) \
116 ((~((mask) & IA32_MTRR_PHYSBASE_MASK) & mtrr_phys_mask) + 1)
117
118#define LEN_TO_MASK(len) \
119 (~((len) - 1) & IA32_MTRR_PHYSBASE_MASK)
120
121#define LSB(x) ((x) & (~((x) - 1)))
122
123/*
124 * Fetch variable-range MTRR register pairs.
125 */
126static void
127mtrr_get_var_ranges(mtrr_var_range_t * range, int count)
128{
129 int i;
130
131 for (i = 0; i < count; i++) {
132 range[i].base = rdmsr64(MSR_IA32_MTRR_PHYSBASE(i));
133 range[i].mask = rdmsr64(MSR_IA32_MTRR_PHYSMASK(i));
134
135 /* bump ref count for firmware configured ranges */
136 if (range[i].mask & IA32_MTRR_PHYMASK_VALID)
137 range[i].refcnt = 1;
138 else
139 range[i].refcnt = 0;
140 }
141}
142
143/*
144 * Update variable-range MTRR register pairs.
145 */
146static void
147mtrr_set_var_ranges(const mtrr_var_range_t * range, int count)
148{
149 int i;
150
151 for (i = 0; i < count; i++) {
152 wrmsr64(MSR_IA32_MTRR_PHYSBASE(i), range[i].base);
153 wrmsr64(MSR_IA32_MTRR_PHYSMASK(i), range[i].mask);
154 }
155}
156
157/*
158 * Fetch all fixed-range MTRR's. Note MSR offsets are not consecutive.
159 */
160static void
161mtrr_get_fix_ranges(mtrr_fix_range_t * range)
162{
163 int i;
164
165 /* assume 11 fix range registers */
166 range[0].types = rdmsr64(MSR_IA32_MTRR_FIX64K_00000);
167 range[1].types = rdmsr64(MSR_IA32_MTRR_FIX16K_80000);
168 range[2].types = rdmsr64(MSR_IA32_MTRR_FIX16K_A0000);
169 for (i = 0; i < 8; i++)
170 range[3 + i].types = rdmsr64(MSR_IA32_MTRR_FIX4K_C0000 + i);
171}
172
173/*
174 * Update all fixed-range MTRR's.
175 */
176static void
177mtrr_set_fix_ranges(const struct mtrr_fix_range * range)
178{
179 int i;
180
181 /* assume 11 fix range registers */
182 wrmsr64(MSR_IA32_MTRR_FIX64K_00000, range[0].types);
183 wrmsr64(MSR_IA32_MTRR_FIX16K_80000, range[1].types);
184 wrmsr64(MSR_IA32_MTRR_FIX16K_A0000, range[2].types);
185 for (i = 0; i < 8; i++)
186 wrmsr64(MSR_IA32_MTRR_FIX4K_C0000 + i, range[3 + i].types);
187}
188
b0d623f7
A
189static boolean_t
190mtrr_check_fix_ranges(const struct mtrr_fix_range * range)
191{
192 int i;
193 boolean_t match = TRUE;
194
195 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
196
197 /* assume 11 fix range registers */
198 match = range[0].types == rdmsr64(MSR_IA32_MTRR_FIX64K_00000) &&
199 range[1].types == rdmsr64(MSR_IA32_MTRR_FIX16K_80000) &&
200 range[2].types == rdmsr64(MSR_IA32_MTRR_FIX16K_A0000);
201 for (i = 0; match && i < 8; i++) {
202 match = range[3 + i].types ==
203 rdmsr64(MSR_IA32_MTRR_FIX4K_C0000 + i);
204 }
205
206 return match;
207}
208
209static boolean_t
210mtrr_check_var_ranges(mtrr_var_range_t * range, int count)
211{
212 int i;
213 boolean_t match = TRUE;
214
215 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
216
217 for (i = 0; match && i < count; i++) {
218 match = range[i].base == rdmsr64(MSR_IA32_MTRR_PHYSBASE(i)) &&
219 range[i].mask == rdmsr64(MSR_IA32_MTRR_PHYSMASK(i));
220 }
221
222 return match;
223}
224
91447636
A
225#if MTRR_DEBUG
226static void
227mtrr_msr_dump(void)
228{
229 int i;
230 int count = rdmsr64(MSR_IA32_MTRRCAP) & IA32_MTRRCAP_VCNT;
231
232 DBG("VAR -- BASE -------------- MASK -------------- SIZE\n");
233 for (i = 0; i < count; i++) {
234 DBG(" %02x 0x%016llx 0x%016llx 0x%llx\n", i,
235 rdmsr64(MSR_IA32_MTRR_PHYSBASE(i)),
236 rdmsr64(MSR_IA32_MTRR_PHYSMASK(i)),
237 MASK_TO_LEN(rdmsr64(MSR_IA32_MTRR_PHYSMASK(i))));
238 }
239 DBG("\n");
240
241 DBG("FIX64K_00000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX64K_00000));
242 DBG("FIX16K_80000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX16K_80000));
243 DBG("FIX16K_A0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX16K_A0000));
244 DBG(" FIX4K_C0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_C0000));
245 DBG(" FIX4K_C8000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_C8000));
246 DBG(" FIX4K_D0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_D0000));
247 DBG(" FIX4K_D8000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_D8000));
248 DBG(" FIX4K_E0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_E0000));
249 DBG(" FIX4K_E8000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_E8000));
250 DBG(" FIX4K_F0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_F0000));
251 DBG(" FIX4K_F8000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_F8000));
252
253 DBG("\nMTRRcap = 0x%llx MTRRdefType = 0x%llx\n",
254 rdmsr64(MSR_IA32_MTRRCAP), rdmsr64(MSR_IA32_MTRR_DEF_TYPE));
255}
256#endif /* MTRR_DEBUG */
257
258/*
259 * Called by the boot processor (BP) early during boot to initialize MTRR
260 * support. The MTRR state on the BP is saved, any additional processors
261 * will have the same settings applied to ensure MTRR consistency.
262 */
263void
264mtrr_init(void)
265{
91447636
A
266 /* no reason to init more than once */
267 if (mtrr_initialized == TRUE)
268 return;
269
270 /* check for presence of MTRR feature on the processor */
271 if ((cpuid_features() & CPUID_FEATURE_MTRR) == 0)
272 return; /* no MTRR feature */
273
91447636
A
274 /* use a lock to serialize MTRR changes */
275 bzero((void *)&mtrr_state, sizeof(mtrr_state));
276 simple_lock_init(&mtrr_lock, 0);
277
278 mtrr_state.MTRRcap = rdmsr64(MSR_IA32_MTRRCAP);
279 mtrr_state.MTRRdefType = rdmsr64(MSR_IA32_MTRR_DEF_TYPE);
b0d623f7 280 mtrr_state.var_count = (unsigned int)(mtrr_state.MTRRcap & IA32_MTRRCAP_VCNT);
91447636
A
281
282 /* allocate storage for variable ranges (can block?) */
283 if (mtrr_state.var_count) {
284 mtrr_state.var_range = (mtrr_var_range_t *)
285 kalloc(sizeof(mtrr_var_range_t) *
286 mtrr_state.var_count);
287 if (mtrr_state.var_range == NULL)
288 mtrr_state.var_count = 0;
289 }
290
291 /* fetch the initial firmware configured variable ranges */
292 if (mtrr_state.var_count)
293 mtrr_get_var_ranges(mtrr_state.var_range,
294 mtrr_state.var_count);
295
296 /* fetch the initial firmware configured fixed ranges */
297 if (mtrr_state.MTRRcap & IA32_MTRRCAP_FIX)
298 mtrr_get_fix_ranges(mtrr_state.fix_range);
299
300 mtrr_initialized = TRUE;
301
302#if MTRR_DEBUG
303 mtrr_msr_dump(); /* dump firmware settings */
304#endif
b0d623f7 305
91447636
A
306}
307
308/*
309 * Performs the Intel recommended procedure for changing the MTRR
310 * in a MP system. Leverage rendezvous mechanism for the required
311 * barrier synchronization among all processors. This function is
312 * called from the rendezvous IPI handler, and mtrr_update_cpu().
313 */
314static void
315mtrr_update_action(void * cache_control_type)
316{
b0d623f7
A
317 uintptr_t cr0, cr4;
318 uintptr_t tmp;
91447636
A
319
320 cr0 = get_cr0();
321 cr4 = get_cr4();
322
323 /* enter no-fill cache mode */
324 tmp = cr0 | CR0_CD;
325 tmp &= ~CR0_NW;
326 set_cr0(tmp);
327
328 /* flush caches */
329 wbinvd();
330
331 /* clear the PGE flag in CR4 */
332 if (cr4 & CR4_PGE)
333 set_cr4(cr4 & ~CR4_PGE);
334
335 /* flush TLBs */
6d2010ae 336 flush_tlb_raw();
91447636
A
337
338 if (CACHE_CONTROL_PAT == cache_control_type) {
339 /* Change PA6 attribute field to WC */
340 uint64_t pat = rdmsr64(MSR_IA32_CR_PAT);
341 DBG("CPU%d PAT: was 0x%016llx\n", get_cpu_number(), pat);
342 pat &= ~(0x0FULL << 48);
343 pat |= (0x01ULL << 48);
344 wrmsr64(MSR_IA32_CR_PAT, pat);
345 DBG("CPU%d PAT: is 0x%016llx\n",
346 get_cpu_number(), rdmsr64(MSR_IA32_CR_PAT));
347 }
348 else {
349 /* disable all MTRR ranges */
350 wrmsr64(MSR_IA32_MTRR_DEF_TYPE,
351 mtrr_state.MTRRdefType & ~IA32_MTRR_DEF_TYPE_E);
352
353 /* apply MTRR settings */
354 if (mtrr_state.var_count)
355 mtrr_set_var_ranges(mtrr_state.var_range,
356 mtrr_state.var_count);
357
358 if (mtrr_state.MTRRcap & IA32_MTRRCAP_FIX)
359 mtrr_set_fix_ranges(mtrr_state.fix_range);
360
361 /* enable all MTRR range registers (what if E was not set?) */
362 wrmsr64(MSR_IA32_MTRR_DEF_TYPE,
363 mtrr_state.MTRRdefType | IA32_MTRR_DEF_TYPE_E);
364 }
365
366 /* flush all caches and TLBs a second time */
367 wbinvd();
6d2010ae 368 flush_tlb_raw();
91447636
A
369
370 /* restore normal cache mode */
371 set_cr0(cr0);
372
373 /* restore PGE flag */
374 if (cr4 & CR4_PGE)
375 set_cr4(cr4);
376
377 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
378}
379
380static void
381mtrr_update_setup(__unused void * param_not_used)
382{
383 /* disable interrupts before the first barrier */
384 current_cpu_datap()->cpu_iflag = ml_set_interrupts_enabled(FALSE);
385 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
386}
387
388static void
389mtrr_update_teardown(__unused void * param_not_used)
390{
391 /* restore interrupt flag following MTRR changes */
392 ml_set_interrupts_enabled(current_cpu_datap()->cpu_iflag);
393 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
394}
395
396/*
397 * Update MTRR settings on all processors.
398 */
399kern_return_t
400mtrr_update_all_cpus(void)
401{
402 if (mtrr_initialized == FALSE)
403 return KERN_NOT_SUPPORTED;
404
405 MTRR_LOCK();
406 mp_rendezvous(mtrr_update_setup,
407 mtrr_update_action,
408 mtrr_update_teardown, NULL);
409 MTRR_UNLOCK();
410
411 return KERN_SUCCESS;
412}
413
414/*
b0d623f7
A
415 * Verify that a processor has been set with the BSP's MTRR settings. Called
416 * during slave processor initialization to check and set MTRR settings
91447636
A
417 * discovered on the boot processor by mtrr_init().
418 */
419kern_return_t
420mtrr_update_cpu(void)
421{
b0d623f7
A
422 boolean_t match = TRUE;
423
91447636
A
424 if (mtrr_initialized == FALSE)
425 return KERN_NOT_SUPPORTED;
426
b0d623f7
A
427 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
428
91447636 429 MTRR_LOCK();
b0d623f7
A
430
431 /* Check MSR_IA32_MTRR_DEF_TYPE MSR */
432 match = mtrr_state.MTRRdefType == rdmsr64(MSR_IA32_MTRR_DEF_TYPE);
433
434 /* Check MSR_IA32_MTRRCAP MSR */
435 if (match) {
436 match = mtrr_state.MTRRcap == rdmsr64(MSR_IA32_MTRRCAP);
437 }
438
439 /* Check variable ranges */
440 if (match && mtrr_state.var_count) {
441 match = mtrr_check_var_ranges(mtrr_state.var_range,
442 mtrr_state.var_count);
443 }
444
445 /* Check fixed ranges */
446 if (match && (mtrr_state.MTRRcap & IA32_MTRRCAP_FIX)) {
447 match = mtrr_check_fix_ranges(mtrr_state.fix_range);
448 }
449
450#if MTRR_DEBUG
451 if (!match)
452 mtrr_msr_dump();
453#endif
454 if (!match) {
455 DBG("mtrr_update_cpu() setting MTRR for cpu %d\n",
456 get_cpu_number());
457 mtrr_update_action(NULL);
458 }
459#if MTRR_DEBUG
460 if (!match)
461 mtrr_msr_dump();
462#endif
463
91447636
A
464 MTRR_UNLOCK();
465
466 return KERN_SUCCESS;
467}
468
469/*
470 * Add a MTRR range to associate the physical memory range specified
471 * with a given memory caching type.
472 */
473kern_return_t
474mtrr_range_add(addr64_t address, uint64_t length, uint32_t type)
475{
476 mtrr_var_range_t * vr;
477 mtrr_var_range_t * free_range;
478 kern_return_t ret = KERN_NO_SPACE;
479 int overlap;
480 unsigned int i;
481
482 DBG("mtrr_range_add base = 0x%llx, size = 0x%llx, type = %d\n",
483 address, length, type);
484
485 if (mtrr_initialized == FALSE) {
486 return KERN_NOT_SUPPORTED;
487 }
488
489 /* check memory type (GPF exception for undefined types) */
490 if ((type != MTRR_TYPE_UNCACHEABLE) &&
491 (type != MTRR_TYPE_WRITECOMBINE) &&
492 (type != MTRR_TYPE_WRITETHROUGH) &&
493 (type != MTRR_TYPE_WRITEPROTECT) &&
494 (type != MTRR_TYPE_WRITEBACK)) {
495 return KERN_INVALID_ARGUMENT;
496 }
497
498 /* check WC support if requested */
499 if ((type == MTRR_TYPE_WRITECOMBINE) &&
500 (mtrr_state.MTRRcap & IA32_MTRRCAP_WC) == 0) {
501 return KERN_NOT_SUPPORTED;
502 }
503
504 /* leave the fix range area below 1MB alone */
505 if (address < 0x100000 || mtrr_state.var_count == 0) {
506 return KERN_NOT_SUPPORTED;
507 }
508
509 /*
510 * Length must be a power of 2 given by 2^n, where n >= 12.
511 * Base address alignment must be larger than or equal to length.
512 */
513 if ((length < 0x1000) ||
514 (LSB(length) != length) ||
515 (address && (length > LSB(address)))) {
516 return KERN_INVALID_ARGUMENT;
517 }
518
519 MTRR_LOCK();
520
521 /*
522 * Check for overlap and locate a free range.
523 */
524 for (i = 0, free_range = NULL; i < mtrr_state.var_count; i++)
525 {
526 vr = &mtrr_state.var_range[i];
527
528 if (vr->refcnt == 0) {
529 /* free range candidate if no overlaps are found */
530 free_range = vr;
531 continue;
532 }
533
534 overlap = var_range_overlap(vr, address, length, type);
535 if (overlap > 0) {
536 /*
537 * identical overlap permitted, increment ref count.
538 * no hardware update required.
539 */
540 free_range = vr;
541 break;
542 }
543 if (overlap < 0) {
544 /* unsupported overlapping of memory types */
545 free_range = NULL;
546 break;
547 }
548 }
549
550 if (free_range) {
551 if (free_range->refcnt++ == 0) {
552 var_range_encode(free_range, address, length, type, 1);
553 mp_rendezvous(mtrr_update_setup,
554 mtrr_update_action,
555 mtrr_update_teardown, NULL);
556 }
557 ret = KERN_SUCCESS;
558 }
559
560#if MTRR_DEBUG
561 mtrr_msr_dump();
562#endif
563
564 MTRR_UNLOCK();
565
566 return ret;
567}
568
569/*
570 * Remove a previously added MTRR range. The same arguments used for adding
571 * the memory range must be supplied again.
572 */
573kern_return_t
574mtrr_range_remove(addr64_t address, uint64_t length, uint32_t type)
575{
576 mtrr_var_range_t * vr;
577 int result = KERN_FAILURE;
578 int cpu_update = 0;
579 unsigned int i;
580
581 DBG("mtrr_range_remove base = 0x%llx, size = 0x%llx, type = %d\n",
582 address, length, type);
583
584 if (mtrr_initialized == FALSE) {
585 return KERN_NOT_SUPPORTED;
586 }
587
588 MTRR_LOCK();
589
590 for (i = 0; i < mtrr_state.var_count; i++) {
591 vr = &mtrr_state.var_range[i];
592
593 if (vr->refcnt &&
594 var_range_overlap(vr, address, length, type) > 0) {
595 /* found specified variable range */
596 if (--mtrr_state.var_range[i].refcnt == 0) {
597 var_range_encode(vr, address, length, type, 0);
598 cpu_update = 1;
599 }
600 result = KERN_SUCCESS;
601 break;
602 }
603 }
604
605 if (cpu_update) {
606 mp_rendezvous(mtrr_update_setup,
607 mtrr_update_action,
608 mtrr_update_teardown, NULL);
609 result = KERN_SUCCESS;
610 }
611
612#if MTRR_DEBUG
613 mtrr_msr_dump();
614#endif
615
616 MTRR_UNLOCK();
617
618 return result;
619}
620
621/*
622 * Variable range helper routines
623 */
624static void
625var_range_encode(mtrr_var_range_t * range, addr64_t address,
626 uint64_t length, uint32_t type, int valid)
627{
628 range->base = (address & IA32_MTRR_PHYSBASE_MASK) |
b0d623f7 629 (type & (uint32_t)IA32_MTRR_PHYSBASE_TYPE);
91447636
A
630
631 range->mask = LEN_TO_MASK(length) |
632 (valid ? IA32_MTRR_PHYMASK_VALID : 0);
633}
634
635static int
636var_range_overlap(mtrr_var_range_t * range, addr64_t address,
637 uint64_t length, uint32_t type)
638{
639 uint64_t v_address, v_length;
640 uint32_t v_type;
641 int result = 0; /* no overlap, or overlap ok */
642
643 v_address = range->base & IA32_MTRR_PHYSBASE_MASK;
b0d623f7 644 v_type = (uint32_t)(range->base & IA32_MTRR_PHYSBASE_TYPE);
91447636
A
645 v_length = MASK_TO_LEN(range->mask);
646
647 /* detect range overlap */
648 if ((v_address >= address && v_address < (address + length)) ||
649 (address >= v_address && address < (v_address + v_length))) {
650
651 if (v_address == address && v_length == length && v_type == type)
652 result = 1; /* identical overlap ok */
653 else if ( v_type == MTRR_TYPE_UNCACHEABLE &&
654 type == MTRR_TYPE_UNCACHEABLE ) {
655 /* UC ranges can overlap */
656 }
657 else if ((v_type == MTRR_TYPE_UNCACHEABLE &&
658 type == MTRR_TYPE_WRITEBACK) ||
659 (v_type == MTRR_TYPE_WRITEBACK &&
660 type == MTRR_TYPE_UNCACHEABLE)) {
661 /* UC/WB can overlap - effective type becomes UC */
662 }
663 else {
664 /* anything else may cause undefined behavior */
665 result = -1;
666 }
667 }
668
669 return result;
670}
671
672/*
673 * Initialize PAT (Page Attribute Table)
674 */
675void
676pat_init(void)
677{
b0d623f7
A
678 boolean_t istate;
679 uint64_t pat;
680
681 if (!(cpuid_features() & CPUID_FEATURE_PAT))
682 return;
683
684 istate = ml_set_interrupts_enabled(FALSE);
685
686 pat = rdmsr64(MSR_IA32_CR_PAT);
687 DBG("CPU%d PAT: was 0x%016llx\n", get_cpu_number(), pat);
688
689 /* Change PA6 attribute field to WC if required */
690 if ((pat & ~(0x0FULL << 48)) != (0x01ULL << 48)) {
91447636 691 mtrr_update_action(CACHE_CONTROL_PAT);
91447636 692 }
b0d623f7 693 ml_set_interrupts_enabled(istate);
91447636 694}