]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/mtrr.c
xnu-1504.15.3.tar.gz
[apple/xnu.git] / osfmk / i386 / mtrr.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/kern_return.h>
30 #include <kern/kalloc.h>
31 #include <kern/cpu_number.h>
32 #include <kern/cpu_data.h>
33 #include <i386/cpuid.h>
34 #include <i386/mp.h>
35 #include <i386/proc_reg.h>
36 #include <i386/mtrr.h>
37
38 struct mtrr_var_range {
39 uint64_t base; /* in IA32_MTRR_PHYSBASE format */
40 uint64_t mask; /* in IA32_MTRR_PHYSMASK format */
41 uint32_t refcnt; /* var ranges reference count */
42 };
43
44 struct mtrr_fix_range {
45 uint64_t types; /* fixed-range type octet */
46 };
47
48 typedef struct mtrr_var_range mtrr_var_range_t;
49 typedef struct mtrr_fix_range mtrr_fix_range_t;
50
51 static struct {
52 uint64_t MTRRcap;
53 uint64_t MTRRdefType;
54 mtrr_var_range_t * var_range;
55 unsigned int var_count;
56 mtrr_fix_range_t fix_range[11];
57 } mtrr_state;
58
59 static boolean_t mtrr_initialized = FALSE;
60
61 decl_simple_lock_data(static, mtrr_lock);
62 #define MTRR_LOCK() simple_lock(&mtrr_lock);
63 #define MTRR_UNLOCK() simple_unlock(&mtrr_lock);
64
65 #if MTRR_DEBUG
66 #define DBG(x...) kprintf(x)
67 #else
68 #define DBG(x...)
69 #endif
70
71 /* Private functions */
72 static void mtrr_get_var_ranges(mtrr_var_range_t * range, int count);
73 static void mtrr_set_var_ranges(const mtrr_var_range_t * range, int count);
74 static void mtrr_get_fix_ranges(mtrr_fix_range_t * range);
75 static void mtrr_set_fix_ranges(const mtrr_fix_range_t * range);
76 static void mtrr_update_setup(void * param);
77 static void mtrr_update_teardown(void * param);
78 static void mtrr_update_action(void * param);
79 static void var_range_encode(mtrr_var_range_t * range, addr64_t address,
80 uint64_t length, uint32_t type, int valid);
81 static int var_range_overlap(mtrr_var_range_t * range, addr64_t address,
82 uint64_t length, uint32_t type);
83
84 #define CACHE_CONTROL_MTRR (NULL)
85 #define CACHE_CONTROL_PAT ((void *)1)
86
87 /*
88 * MTRR MSR bit fields.
89 */
90 #define IA32_MTRR_DEF_TYPE_MT 0x000000ff
91 #define IA32_MTRR_DEF_TYPE_FE 0x00000400
92 #define IA32_MTRR_DEF_TYPE_E 0x00000800
93
94 #define IA32_MTRRCAP_VCNT 0x000000ff
95 #define IA32_MTRRCAP_FIX 0x00000100
96 #define IA32_MTRRCAP_WC 0x00000400
97
98 /* 0 < bits <= 64 */
99 #define PHYS_BITS_TO_MASK(bits) \
100 ((((1ULL << (bits-1)) - 1) << 1) | 1)
101
102 /*
103 * Default mask for 36 physical address bits, this can
104 * change depending on the cpu model.
105 */
106 static uint64_t mtrr_phys_mask = PHYS_BITS_TO_MASK(36);
107
108 #define IA32_MTRR_PHYMASK_VALID 0x0000000000000800ULL
109 #define IA32_MTRR_PHYSBASE_MASK (mtrr_phys_mask & ~0x0000000000000FFFULL)
110 #define IA32_MTRR_PHYSBASE_TYPE 0x00000000000000FFULL
111
112 /*
113 * Variable-range mask to/from length conversions.
114 */
115 #define MASK_TO_LEN(mask) \
116 ((~((mask) & IA32_MTRR_PHYSBASE_MASK) & mtrr_phys_mask) + 1)
117
118 #define LEN_TO_MASK(len) \
119 (~((len) - 1) & IA32_MTRR_PHYSBASE_MASK)
120
121 #define LSB(x) ((x) & (~((x) - 1)))
122
123 /*
124 * Fetch variable-range MTRR register pairs.
125 */
126 static void
127 mtrr_get_var_ranges(mtrr_var_range_t * range, int count)
128 {
129 int i;
130
131 for (i = 0; i < count; i++) {
132 range[i].base = rdmsr64(MSR_IA32_MTRR_PHYSBASE(i));
133 range[i].mask = rdmsr64(MSR_IA32_MTRR_PHYSMASK(i));
134
135 /* bump ref count for firmware configured ranges */
136 if (range[i].mask & IA32_MTRR_PHYMASK_VALID)
137 range[i].refcnt = 1;
138 else
139 range[i].refcnt = 0;
140 }
141 }
142
143 /*
144 * Update variable-range MTRR register pairs.
145 */
146 static void
147 mtrr_set_var_ranges(const mtrr_var_range_t * range, int count)
148 {
149 int i;
150
151 for (i = 0; i < count; i++) {
152 wrmsr64(MSR_IA32_MTRR_PHYSBASE(i), range[i].base);
153 wrmsr64(MSR_IA32_MTRR_PHYSMASK(i), range[i].mask);
154 }
155 }
156
157 /*
158 * Fetch all fixed-range MTRR's. Note MSR offsets are not consecutive.
159 */
160 static void
161 mtrr_get_fix_ranges(mtrr_fix_range_t * range)
162 {
163 int i;
164
165 /* assume 11 fix range registers */
166 range[0].types = rdmsr64(MSR_IA32_MTRR_FIX64K_00000);
167 range[1].types = rdmsr64(MSR_IA32_MTRR_FIX16K_80000);
168 range[2].types = rdmsr64(MSR_IA32_MTRR_FIX16K_A0000);
169 for (i = 0; i < 8; i++)
170 range[3 + i].types = rdmsr64(MSR_IA32_MTRR_FIX4K_C0000 + i);
171 }
172
173 /*
174 * Update all fixed-range MTRR's.
175 */
176 static void
177 mtrr_set_fix_ranges(const struct mtrr_fix_range * range)
178 {
179 int i;
180
181 /* assume 11 fix range registers */
182 wrmsr64(MSR_IA32_MTRR_FIX64K_00000, range[0].types);
183 wrmsr64(MSR_IA32_MTRR_FIX16K_80000, range[1].types);
184 wrmsr64(MSR_IA32_MTRR_FIX16K_A0000, range[2].types);
185 for (i = 0; i < 8; i++)
186 wrmsr64(MSR_IA32_MTRR_FIX4K_C0000 + i, range[3 + i].types);
187 }
188
189 static boolean_t
190 mtrr_check_fix_ranges(const struct mtrr_fix_range * range)
191 {
192 int i;
193 boolean_t match = TRUE;
194
195 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
196
197 /* assume 11 fix range registers */
198 match = range[0].types == rdmsr64(MSR_IA32_MTRR_FIX64K_00000) &&
199 range[1].types == rdmsr64(MSR_IA32_MTRR_FIX16K_80000) &&
200 range[2].types == rdmsr64(MSR_IA32_MTRR_FIX16K_A0000);
201 for (i = 0; match && i < 8; i++) {
202 match = range[3 + i].types ==
203 rdmsr64(MSR_IA32_MTRR_FIX4K_C0000 + i);
204 }
205
206 return match;
207 }
208
209 static boolean_t
210 mtrr_check_var_ranges(mtrr_var_range_t * range, int count)
211 {
212 int i;
213 boolean_t match = TRUE;
214
215 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
216
217 for (i = 0; match && i < count; i++) {
218 match = range[i].base == rdmsr64(MSR_IA32_MTRR_PHYSBASE(i)) &&
219 range[i].mask == rdmsr64(MSR_IA32_MTRR_PHYSMASK(i));
220 }
221
222 return match;
223 }
224
225 #if MTRR_DEBUG
226 static void
227 mtrr_msr_dump(void)
228 {
229 int i;
230 int count = rdmsr64(MSR_IA32_MTRRCAP) & IA32_MTRRCAP_VCNT;
231
232 DBG("VAR -- BASE -------------- MASK -------------- SIZE\n");
233 for (i = 0; i < count; i++) {
234 DBG(" %02x 0x%016llx 0x%016llx 0x%llx\n", i,
235 rdmsr64(MSR_IA32_MTRR_PHYSBASE(i)),
236 rdmsr64(MSR_IA32_MTRR_PHYSMASK(i)),
237 MASK_TO_LEN(rdmsr64(MSR_IA32_MTRR_PHYSMASK(i))));
238 }
239 DBG("\n");
240
241 DBG("FIX64K_00000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX64K_00000));
242 DBG("FIX16K_80000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX16K_80000));
243 DBG("FIX16K_A0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX16K_A0000));
244 DBG(" FIX4K_C0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_C0000));
245 DBG(" FIX4K_C8000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_C8000));
246 DBG(" FIX4K_D0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_D0000));
247 DBG(" FIX4K_D8000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_D8000));
248 DBG(" FIX4K_E0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_E0000));
249 DBG(" FIX4K_E8000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_E8000));
250 DBG(" FIX4K_F0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_F0000));
251 DBG(" FIX4K_F8000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_F8000));
252
253 DBG("\nMTRRcap = 0x%llx MTRRdefType = 0x%llx\n",
254 rdmsr64(MSR_IA32_MTRRCAP), rdmsr64(MSR_IA32_MTRR_DEF_TYPE));
255 }
256 #endif /* MTRR_DEBUG */
257
258 /*
259 * Called by the boot processor (BP) early during boot to initialize MTRR
260 * support. The MTRR state on the BP is saved, any additional processors
261 * will have the same settings applied to ensure MTRR consistency.
262 */
263 void
264 mtrr_init(void)
265 {
266 /* no reason to init more than once */
267 if (mtrr_initialized == TRUE)
268 return;
269
270 /* check for presence of MTRR feature on the processor */
271 if ((cpuid_features() & CPUID_FEATURE_MTRR) == 0)
272 return; /* no MTRR feature */
273
274 /* use a lock to serialize MTRR changes */
275 bzero((void *)&mtrr_state, sizeof(mtrr_state));
276 simple_lock_init(&mtrr_lock, 0);
277
278 mtrr_state.MTRRcap = rdmsr64(MSR_IA32_MTRRCAP);
279 mtrr_state.MTRRdefType = rdmsr64(MSR_IA32_MTRR_DEF_TYPE);
280 mtrr_state.var_count = (unsigned int)(mtrr_state.MTRRcap & IA32_MTRRCAP_VCNT);
281
282 /* allocate storage for variable ranges (can block?) */
283 if (mtrr_state.var_count) {
284 mtrr_state.var_range = (mtrr_var_range_t *)
285 kalloc(sizeof(mtrr_var_range_t) *
286 mtrr_state.var_count);
287 if (mtrr_state.var_range == NULL)
288 mtrr_state.var_count = 0;
289 }
290
291 /* fetch the initial firmware configured variable ranges */
292 if (mtrr_state.var_count)
293 mtrr_get_var_ranges(mtrr_state.var_range,
294 mtrr_state.var_count);
295
296 /* fetch the initial firmware configured fixed ranges */
297 if (mtrr_state.MTRRcap & IA32_MTRRCAP_FIX)
298 mtrr_get_fix_ranges(mtrr_state.fix_range);
299
300 mtrr_initialized = TRUE;
301
302 #if MTRR_DEBUG
303 mtrr_msr_dump(); /* dump firmware settings */
304 #endif
305
306 }
307
308 /*
309 * Performs the Intel recommended procedure for changing the MTRR
310 * in a MP system. Leverage rendezvous mechanism for the required
311 * barrier synchronization among all processors. This function is
312 * called from the rendezvous IPI handler, and mtrr_update_cpu().
313 */
314 static void
315 mtrr_update_action(void * cache_control_type)
316 {
317 uintptr_t cr0, cr4;
318 uintptr_t tmp;
319
320 cr0 = get_cr0();
321 cr4 = get_cr4();
322
323 /* enter no-fill cache mode */
324 tmp = cr0 | CR0_CD;
325 tmp &= ~CR0_NW;
326 set_cr0(tmp);
327
328 /* flush caches */
329 wbinvd();
330
331 /* clear the PGE flag in CR4 */
332 if (cr4 & CR4_PGE)
333 set_cr4(cr4 & ~CR4_PGE);
334
335 /* flush TLBs */
336 flush_tlb();
337
338 if (CACHE_CONTROL_PAT == cache_control_type) {
339 /* Change PA6 attribute field to WC */
340 uint64_t pat = rdmsr64(MSR_IA32_CR_PAT);
341 DBG("CPU%d PAT: was 0x%016llx\n", get_cpu_number(), pat);
342 pat &= ~(0x0FULL << 48);
343 pat |= (0x01ULL << 48);
344 wrmsr64(MSR_IA32_CR_PAT, pat);
345 DBG("CPU%d PAT: is 0x%016llx\n",
346 get_cpu_number(), rdmsr64(MSR_IA32_CR_PAT));
347 }
348 else {
349 /* disable all MTRR ranges */
350 wrmsr64(MSR_IA32_MTRR_DEF_TYPE,
351 mtrr_state.MTRRdefType & ~IA32_MTRR_DEF_TYPE_E);
352
353 /* apply MTRR settings */
354 if (mtrr_state.var_count)
355 mtrr_set_var_ranges(mtrr_state.var_range,
356 mtrr_state.var_count);
357
358 if (mtrr_state.MTRRcap & IA32_MTRRCAP_FIX)
359 mtrr_set_fix_ranges(mtrr_state.fix_range);
360
361 /* enable all MTRR range registers (what if E was not set?) */
362 wrmsr64(MSR_IA32_MTRR_DEF_TYPE,
363 mtrr_state.MTRRdefType | IA32_MTRR_DEF_TYPE_E);
364 }
365
366 /* flush all caches and TLBs a second time */
367 wbinvd();
368 flush_tlb();
369
370 /* restore normal cache mode */
371 set_cr0(cr0);
372
373 /* restore PGE flag */
374 if (cr4 & CR4_PGE)
375 set_cr4(cr4);
376
377 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
378 }
379
380 static void
381 mtrr_update_setup(__unused void * param_not_used)
382 {
383 /* disable interrupts before the first barrier */
384 current_cpu_datap()->cpu_iflag = ml_set_interrupts_enabled(FALSE);
385 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
386 }
387
388 static void
389 mtrr_update_teardown(__unused void * param_not_used)
390 {
391 /* restore interrupt flag following MTRR changes */
392 ml_set_interrupts_enabled(current_cpu_datap()->cpu_iflag);
393 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
394 }
395
396 /*
397 * Update MTRR settings on all processors.
398 */
399 kern_return_t
400 mtrr_update_all_cpus(void)
401 {
402 if (mtrr_initialized == FALSE)
403 return KERN_NOT_SUPPORTED;
404
405 MTRR_LOCK();
406 mp_rendezvous(mtrr_update_setup,
407 mtrr_update_action,
408 mtrr_update_teardown, NULL);
409 MTRR_UNLOCK();
410
411 return KERN_SUCCESS;
412 }
413
414 /*
415 * Verify that a processor has been set with the BSP's MTRR settings. Called
416 * during slave processor initialization to check and set MTRR settings
417 * discovered on the boot processor by mtrr_init().
418 */
419 kern_return_t
420 mtrr_update_cpu(void)
421 {
422 boolean_t match = TRUE;
423
424 if (mtrr_initialized == FALSE)
425 return KERN_NOT_SUPPORTED;
426
427 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
428
429 MTRR_LOCK();
430
431 /* Check MSR_IA32_MTRR_DEF_TYPE MSR */
432 match = mtrr_state.MTRRdefType == rdmsr64(MSR_IA32_MTRR_DEF_TYPE);
433
434 /* Check MSR_IA32_MTRRCAP MSR */
435 if (match) {
436 match = mtrr_state.MTRRcap == rdmsr64(MSR_IA32_MTRRCAP);
437 }
438
439 /* Check variable ranges */
440 if (match && mtrr_state.var_count) {
441 match = mtrr_check_var_ranges(mtrr_state.var_range,
442 mtrr_state.var_count);
443 }
444
445 /* Check fixed ranges */
446 if (match && (mtrr_state.MTRRcap & IA32_MTRRCAP_FIX)) {
447 match = mtrr_check_fix_ranges(mtrr_state.fix_range);
448 }
449
450 #if MTRR_DEBUG
451 if (!match)
452 mtrr_msr_dump();
453 #endif
454 if (!match) {
455 DBG("mtrr_update_cpu() setting MTRR for cpu %d\n",
456 get_cpu_number());
457 mtrr_update_action(NULL);
458 }
459 #if MTRR_DEBUG
460 if (!match)
461 mtrr_msr_dump();
462 #endif
463
464 MTRR_UNLOCK();
465
466 return KERN_SUCCESS;
467 }
468
469 /*
470 * Add a MTRR range to associate the physical memory range specified
471 * with a given memory caching type.
472 */
473 kern_return_t
474 mtrr_range_add(addr64_t address, uint64_t length, uint32_t type)
475 {
476 mtrr_var_range_t * vr;
477 mtrr_var_range_t * free_range;
478 kern_return_t ret = KERN_NO_SPACE;
479 int overlap;
480 unsigned int i;
481
482 DBG("mtrr_range_add base = 0x%llx, size = 0x%llx, type = %d\n",
483 address, length, type);
484
485 if (mtrr_initialized == FALSE) {
486 return KERN_NOT_SUPPORTED;
487 }
488
489
490 /* check memory type (GPF exception for undefined types) */
491 if ((type != MTRR_TYPE_UNCACHEABLE) &&
492 (type != MTRR_TYPE_WRITECOMBINE) &&
493 (type != MTRR_TYPE_WRITETHROUGH) &&
494 (type != MTRR_TYPE_WRITEPROTECT) &&
495 (type != MTRR_TYPE_WRITEBACK)) {
496 return KERN_INVALID_ARGUMENT;
497 }
498
499 /* check WC support if requested */
500 if ((type == MTRR_TYPE_WRITECOMBINE) &&
501 (mtrr_state.MTRRcap & IA32_MTRRCAP_WC) == 0) {
502 return KERN_NOT_SUPPORTED;
503 }
504
505 /* leave the fix range area below 1MB alone */
506 if (address < 0x100000 || mtrr_state.var_count == 0) {
507 return KERN_NOT_SUPPORTED;
508 }
509
510 /*
511 * Length must be a power of 2 given by 2^n, where n >= 12.
512 * Base address alignment must be larger than or equal to length.
513 */
514 if ((length < 0x1000) ||
515 (LSB(length) != length) ||
516 (address && (length > LSB(address)))) {
517 return KERN_INVALID_ARGUMENT;
518 }
519
520 MTRR_LOCK();
521
522 /*
523 * Check for overlap and locate a free range.
524 */
525 for (i = 0, free_range = NULL; i < mtrr_state.var_count; i++)
526 {
527 vr = &mtrr_state.var_range[i];
528
529 if (vr->refcnt == 0) {
530 /* free range candidate if no overlaps are found */
531 free_range = vr;
532 continue;
533 }
534
535 overlap = var_range_overlap(vr, address, length, type);
536 if (overlap > 0) {
537 /*
538 * identical overlap permitted, increment ref count.
539 * no hardware update required.
540 */
541 free_range = vr;
542 break;
543 }
544 if (overlap < 0) {
545 /* unsupported overlapping of memory types */
546 free_range = NULL;
547 break;
548 }
549 }
550
551 if (free_range) {
552 if (free_range->refcnt++ == 0) {
553 var_range_encode(free_range, address, length, type, 1);
554 mp_rendezvous(mtrr_update_setup,
555 mtrr_update_action,
556 mtrr_update_teardown, NULL);
557 }
558 ret = KERN_SUCCESS;
559 }
560
561 #if MTRR_DEBUG
562 mtrr_msr_dump();
563 #endif
564
565 MTRR_UNLOCK();
566
567 return ret;
568 }
569
570 /*
571 * Remove a previously added MTRR range. The same arguments used for adding
572 * the memory range must be supplied again.
573 */
574 kern_return_t
575 mtrr_range_remove(addr64_t address, uint64_t length, uint32_t type)
576 {
577 mtrr_var_range_t * vr;
578 int result = KERN_FAILURE;
579 int cpu_update = 0;
580 unsigned int i;
581
582 DBG("mtrr_range_remove base = 0x%llx, size = 0x%llx, type = %d\n",
583 address, length, type);
584
585 if (mtrr_initialized == FALSE) {
586 return KERN_NOT_SUPPORTED;
587 }
588
589 MTRR_LOCK();
590
591 for (i = 0; i < mtrr_state.var_count; i++) {
592 vr = &mtrr_state.var_range[i];
593
594 if (vr->refcnt &&
595 var_range_overlap(vr, address, length, type) > 0) {
596 /* found specified variable range */
597 if (--mtrr_state.var_range[i].refcnt == 0) {
598 var_range_encode(vr, address, length, type, 0);
599 cpu_update = 1;
600 }
601 result = KERN_SUCCESS;
602 break;
603 }
604 }
605
606 if (cpu_update) {
607 mp_rendezvous(mtrr_update_setup,
608 mtrr_update_action,
609 mtrr_update_teardown, NULL);
610 result = KERN_SUCCESS;
611 }
612
613 #if MTRR_DEBUG
614 mtrr_msr_dump();
615 #endif
616
617 MTRR_UNLOCK();
618
619 return result;
620 }
621
622 /*
623 * Variable range helper routines
624 */
625 static void
626 var_range_encode(mtrr_var_range_t * range, addr64_t address,
627 uint64_t length, uint32_t type, int valid)
628 {
629 range->base = (address & IA32_MTRR_PHYSBASE_MASK) |
630 (type & (uint32_t)IA32_MTRR_PHYSBASE_TYPE);
631
632 range->mask = LEN_TO_MASK(length) |
633 (valid ? IA32_MTRR_PHYMASK_VALID : 0);
634 }
635
636 static int
637 var_range_overlap(mtrr_var_range_t * range, addr64_t address,
638 uint64_t length, uint32_t type)
639 {
640 uint64_t v_address, v_length;
641 uint32_t v_type;
642 int result = 0; /* no overlap, or overlap ok */
643
644 v_address = range->base & IA32_MTRR_PHYSBASE_MASK;
645 v_type = (uint32_t)(range->base & IA32_MTRR_PHYSBASE_TYPE);
646 v_length = MASK_TO_LEN(range->mask);
647
648 /* detect range overlap */
649 if ((v_address >= address && v_address < (address + length)) ||
650 (address >= v_address && address < (v_address + v_length))) {
651
652 if (v_address == address && v_length == length && v_type == type)
653 result = 1; /* identical overlap ok */
654 else if ( v_type == MTRR_TYPE_UNCACHEABLE &&
655 type == MTRR_TYPE_UNCACHEABLE ) {
656 /* UC ranges can overlap */
657 }
658 else if ((v_type == MTRR_TYPE_UNCACHEABLE &&
659 type == MTRR_TYPE_WRITEBACK) ||
660 (v_type == MTRR_TYPE_WRITEBACK &&
661 type == MTRR_TYPE_UNCACHEABLE)) {
662 /* UC/WB can overlap - effective type becomes UC */
663 }
664 else {
665 /* anything else may cause undefined behavior */
666 result = -1;
667 }
668 }
669
670 return result;
671 }
672
673 /*
674 * Initialize PAT (Page Attribute Table)
675 */
676 void
677 pat_init(void)
678 {
679 boolean_t istate;
680 uint64_t pat;
681
682 if (!(cpuid_features() & CPUID_FEATURE_PAT))
683 return;
684
685 istate = ml_set_interrupts_enabled(FALSE);
686
687 pat = rdmsr64(MSR_IA32_CR_PAT);
688 DBG("CPU%d PAT: was 0x%016llx\n", get_cpu_number(), pat);
689
690 /* Change PA6 attribute field to WC if required */
691 if ((pat & ~(0x0FULL << 48)) != (0x01ULL << 48)) {
692 mtrr_update_action(CACHE_CONTROL_PAT);
693 }
694 ml_set_interrupts_enabled(istate);
695 }