]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/mtrr.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / i386 / mtrr.c
CommitLineData
91447636 1/*
316670eb 2 * Copyright (c) 2000-2011 Apple Computer, Inc. All rights reserved.
91447636 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
91447636
A
27 */
28
29#include <mach/kern_return.h>
f427ee49 30#include <kern/zalloc.h>
91447636
A
31#include <kern/cpu_number.h>
32#include <kern/cpu_data.h>
91447636 33#include <i386/cpuid.h>
b0d623f7 34#include <i386/mp.h>
91447636
A
35#include <i386/proc_reg.h>
36#include <i386/mtrr.h>
316670eb 37#include <i386/machine_check.h>
91447636
A
38
39struct mtrr_var_range {
0a7de745
A
40 uint64_t base; /* in IA32_MTRR_PHYSBASE format */
41 uint64_t mask; /* in IA32_MTRR_PHYSMASK format */
42 uint32_t refcnt; /* var ranges reference count */
91447636
A
43};
44
45struct mtrr_fix_range {
0a7de745 46 uint64_t types; /* fixed-range type octet */
91447636
A
47};
48
49typedef struct mtrr_var_range mtrr_var_range_t;
50typedef struct mtrr_fix_range mtrr_fix_range_t;
51
52static struct {
53 uint64_t MTRRcap;
54 uint64_t MTRRdefType;
55 mtrr_var_range_t * var_range;
56 unsigned int var_count;
57 mtrr_fix_range_t fix_range[11];
58} mtrr_state;
59
60static boolean_t mtrr_initialized = FALSE;
61
62decl_simple_lock_data(static, mtrr_lock);
0a7de745
A
63#define MTRR_LOCK() simple_lock(&mtrr_lock, LCK_GRP_NULL);
64#define MTRR_UNLOCK() simple_unlock(&mtrr_lock);
91447636 65
316670eb 66//#define MTRR_DEBUG 1
0a7de745
A
67#if MTRR_DEBUG
68#define DBG(x...) kprintf(x)
91447636
A
69#else
70#define DBG(x...)
71#endif
72
73/* Private functions */
74static void mtrr_get_var_ranges(mtrr_var_range_t * range, int count);
75static void mtrr_set_var_ranges(const mtrr_var_range_t * range, int count);
76static void mtrr_get_fix_ranges(mtrr_fix_range_t * range);
77static void mtrr_set_fix_ranges(const mtrr_fix_range_t * range);
78static void mtrr_update_setup(void * param);
79static void mtrr_update_teardown(void * param);
80static void mtrr_update_action(void * param);
81static void var_range_encode(mtrr_var_range_t * range, addr64_t address,
0a7de745 82 uint64_t length, uint32_t type, int valid);
91447636 83static int var_range_overlap(mtrr_var_range_t * range, addr64_t address,
0a7de745 84 uint64_t length, uint32_t type);
91447636 85
0a7de745
A
86#define CACHE_CONTROL_MTRR (NULL)
87#define CACHE_CONTROL_PAT ((void *)1)
91447636
A
88
89/*
90 * MTRR MSR bit fields.
91 */
0a7de745
A
92#define IA32_MTRR_DEF_TYPE_MT 0x000000ff
93#define IA32_MTRR_DEF_TYPE_FE 0x00000400
94#define IA32_MTRR_DEF_TYPE_E 0x00000800
91447636 95
0a7de745
A
96#define IA32_MTRRCAP_VCNT 0x000000ff
97#define IA32_MTRRCAP_FIX 0x00000100
98#define IA32_MTRRCAP_WC 0x00000400
91447636
A
99
100/* 0 < bits <= 64 */
101#define PHYS_BITS_TO_MASK(bits) \
102 ((((1ULL << (bits-1)) - 1) << 1) | 1)
103
104/*
105 * Default mask for 36 physical address bits, this can
106 * change depending on the cpu model.
107 */
108static uint64_t mtrr_phys_mask = PHYS_BITS_TO_MASK(36);
109
0a7de745
A
110#define IA32_MTRR_PHYMASK_VALID 0x0000000000000800ULL
111#define IA32_MTRR_PHYSBASE_MASK (mtrr_phys_mask & ~0x0000000000000FFFULL)
112#define IA32_MTRR_PHYSBASE_TYPE 0x00000000000000FFULL
91447636
A
113
114/*
115 * Variable-range mask to/from length conversions.
116 */
117#define MASK_TO_LEN(mask) \
118 ((~((mask) & IA32_MTRR_PHYSBASE_MASK) & mtrr_phys_mask) + 1)
119
120#define LEN_TO_MASK(len) \
121 (~((len) - 1) & IA32_MTRR_PHYSBASE_MASK)
122
0a7de745 123#define LSB(x) ((x) & (~((x) - 1)))
91447636
A
124
125/*
126 * Fetch variable-range MTRR register pairs.
127 */
128static void
129mtrr_get_var_ranges(mtrr_var_range_t * range, int count)
130{
131 int i;
132
133 for (i = 0; i < count; i++) {
134 range[i].base = rdmsr64(MSR_IA32_MTRR_PHYSBASE(i));
135 range[i].mask = rdmsr64(MSR_IA32_MTRR_PHYSMASK(i));
136
137 /* bump ref count for firmware configured ranges */
0a7de745 138 if (range[i].mask & IA32_MTRR_PHYMASK_VALID) {
91447636 139 range[i].refcnt = 1;
0a7de745 140 } else {
91447636 141 range[i].refcnt = 0;
0a7de745 142 }
91447636
A
143 }
144}
145
146/*
147 * Update variable-range MTRR register pairs.
148 */
149static void
150mtrr_set_var_ranges(const mtrr_var_range_t * range, int count)
151{
152 int i;
153
154 for (i = 0; i < count; i++) {
155 wrmsr64(MSR_IA32_MTRR_PHYSBASE(i), range[i].base);
156 wrmsr64(MSR_IA32_MTRR_PHYSMASK(i), range[i].mask);
157 }
158}
159
160/*
161 * Fetch all fixed-range MTRR's. Note MSR offsets are not consecutive.
162 */
163static void
164mtrr_get_fix_ranges(mtrr_fix_range_t * range)
165{
166 int i;
167
168 /* assume 11 fix range registers */
169 range[0].types = rdmsr64(MSR_IA32_MTRR_FIX64K_00000);
170 range[1].types = rdmsr64(MSR_IA32_MTRR_FIX16K_80000);
171 range[2].types = rdmsr64(MSR_IA32_MTRR_FIX16K_A0000);
0a7de745 172 for (i = 0; i < 8; i++) {
91447636 173 range[3 + i].types = rdmsr64(MSR_IA32_MTRR_FIX4K_C0000 + i);
0a7de745 174 }
91447636
A
175}
176
177/*
178 * Update all fixed-range MTRR's.
179 */
180static void
181mtrr_set_fix_ranges(const struct mtrr_fix_range * range)
182{
183 int i;
184
185 /* assume 11 fix range registers */
186 wrmsr64(MSR_IA32_MTRR_FIX64K_00000, range[0].types);
187 wrmsr64(MSR_IA32_MTRR_FIX16K_80000, range[1].types);
188 wrmsr64(MSR_IA32_MTRR_FIX16K_A0000, range[2].types);
0a7de745 189 for (i = 0; i < 8; i++) {
91447636 190 wrmsr64(MSR_IA32_MTRR_FIX4K_C0000 + i, range[3 + i].types);
0a7de745 191 }
91447636
A
192}
193
b0d623f7
A
194static boolean_t
195mtrr_check_fix_ranges(const struct mtrr_fix_range * range)
196{
0a7de745
A
197 int i;
198 boolean_t match = TRUE;
b0d623f7
A
199
200 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
201
202 /* assume 11 fix range registers */
203 match = range[0].types == rdmsr64(MSR_IA32_MTRR_FIX64K_00000) &&
0a7de745
A
204 range[1].types == rdmsr64(MSR_IA32_MTRR_FIX16K_80000) &&
205 range[2].types == rdmsr64(MSR_IA32_MTRR_FIX16K_A0000);
b0d623f7
A
206 for (i = 0; match && i < 8; i++) {
207 match = range[3 + i].types ==
0a7de745 208 rdmsr64(MSR_IA32_MTRR_FIX4K_C0000 + i);
b0d623f7
A
209 }
210
211 return match;
212}
213
214static boolean_t
215mtrr_check_var_ranges(mtrr_var_range_t * range, int count)
216{
0a7de745
A
217 int i;
218 boolean_t match = TRUE;
219
b0d623f7
A
220 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
221
222 for (i = 0; match && i < count; i++) {
223 match = range[i].base == rdmsr64(MSR_IA32_MTRR_PHYSBASE(i)) &&
0a7de745 224 range[i].mask == rdmsr64(MSR_IA32_MTRR_PHYSMASK(i));
b0d623f7
A
225 }
226
227 return match;
228}
229
91447636
A
230#if MTRR_DEBUG
231static void
232mtrr_msr_dump(void)
233{
234 int i;
235 int count = rdmsr64(MSR_IA32_MTRRCAP) & IA32_MTRRCAP_VCNT;
236
237 DBG("VAR -- BASE -------------- MASK -------------- SIZE\n");
238 for (i = 0; i < count; i++) {
239 DBG(" %02x 0x%016llx 0x%016llx 0x%llx\n", i,
240 rdmsr64(MSR_IA32_MTRR_PHYSBASE(i)),
241 rdmsr64(MSR_IA32_MTRR_PHYSMASK(i)),
242 MASK_TO_LEN(rdmsr64(MSR_IA32_MTRR_PHYSMASK(i))));
243 }
244 DBG("\n");
245
246 DBG("FIX64K_00000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX64K_00000));
247 DBG("FIX16K_80000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX16K_80000));
248 DBG("FIX16K_A0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX16K_A0000));
249 DBG(" FIX4K_C0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_C0000));
250 DBG(" FIX4K_C8000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_C8000));
251 DBG(" FIX4K_D0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_D0000));
252 DBG(" FIX4K_D8000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_D8000));
253 DBG(" FIX4K_E0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_E0000));
254 DBG(" FIX4K_E8000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_E8000));
255 DBG(" FIX4K_F0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_F0000));
256 DBG(" FIX4K_F8000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_F8000));
257
258 DBG("\nMTRRcap = 0x%llx MTRRdefType = 0x%llx\n",
0a7de745 259 rdmsr64(MSR_IA32_MTRRCAP), rdmsr64(MSR_IA32_MTRR_DEF_TYPE));
91447636
A
260}
261#endif /* MTRR_DEBUG */
262
263/*
264 * Called by the boot processor (BP) early during boot to initialize MTRR
265 * support. The MTRR state on the BP is saved, any additional processors
266 * will have the same settings applied to ensure MTRR consistency.
267 */
268void
269mtrr_init(void)
270{
91447636 271 /* no reason to init more than once */
0a7de745 272 if (mtrr_initialized == TRUE) {
91447636 273 return;
0a7de745 274 }
91447636
A
275
276 /* check for presence of MTRR feature on the processor */
0a7de745
A
277 if ((cpuid_features() & CPUID_FEATURE_MTRR) == 0) {
278 return; /* no MTRR feature */
279 }
91447636
A
280 /* use a lock to serialize MTRR changes */
281 bzero((void *)&mtrr_state, sizeof(mtrr_state));
282 simple_lock_init(&mtrr_lock, 0);
283
284 mtrr_state.MTRRcap = rdmsr64(MSR_IA32_MTRRCAP);
285 mtrr_state.MTRRdefType = rdmsr64(MSR_IA32_MTRR_DEF_TYPE);
b0d623f7 286 mtrr_state.var_count = (unsigned int)(mtrr_state.MTRRcap & IA32_MTRRCAP_VCNT);
91447636
A
287
288 /* allocate storage for variable ranges (can block?) */
289 if (mtrr_state.var_count) {
290 mtrr_state.var_range = (mtrr_var_range_t *)
f427ee49
A
291 zalloc_permanent(sizeof(mtrr_var_range_t) *
292 mtrr_state.var_count, ZALIGN(mtrr_var_range_t));
0a7de745 293 if (mtrr_state.var_range == NULL) {
91447636 294 mtrr_state.var_count = 0;
0a7de745 295 }
91447636
A
296 }
297
298 /* fetch the initial firmware configured variable ranges */
0a7de745 299 if (mtrr_state.var_count) {
91447636 300 mtrr_get_var_ranges(mtrr_state.var_range,
0a7de745
A
301 mtrr_state.var_count);
302 }
91447636
A
303
304 /* fetch the initial firmware configured fixed ranges */
0a7de745 305 if (mtrr_state.MTRRcap & IA32_MTRRCAP_FIX) {
91447636 306 mtrr_get_fix_ranges(mtrr_state.fix_range);
0a7de745 307 }
91447636
A
308
309 mtrr_initialized = TRUE;
310
311#if MTRR_DEBUG
0a7de745 312 mtrr_msr_dump(); /* dump firmware settings */
91447636
A
313#endif
314}
315
316/*
317 * Performs the Intel recommended procedure for changing the MTRR
318 * in a MP system. Leverage rendezvous mechanism for the required
319 * barrier synchronization among all processors. This function is
320 * called from the rendezvous IPI handler, and mtrr_update_cpu().
321 */
322static void
323mtrr_update_action(void * cache_control_type)
324{
b0d623f7
A
325 uintptr_t cr0, cr4;
326 uintptr_t tmp;
91447636
A
327
328 cr0 = get_cr0();
329 cr4 = get_cr4();
330
331 /* enter no-fill cache mode */
332 tmp = cr0 | CR0_CD;
333 tmp &= ~CR0_NW;
334 set_cr0(tmp);
335
336 /* flush caches */
337 wbinvd();
338
339 /* clear the PGE flag in CR4 */
0a7de745 340 if (cr4 & CR4_PGE) {
91447636 341 set_cr4(cr4 & ~CR4_PGE);
0a7de745
A
342 } else {
343 set_cr3_raw(get_cr3_raw());
344 }
91447636
A
345
346 if (CACHE_CONTROL_PAT == cache_control_type) {
347 /* Change PA6 attribute field to WC */
348 uint64_t pat = rdmsr64(MSR_IA32_CR_PAT);
349 DBG("CPU%d PAT: was 0x%016llx\n", get_cpu_number(), pat);
0a7de745
A
350 /*
351 * Intel doc states:
352 * "The IA32_PAT MSR contains eight page attribute fields: PA0 through PA7.
353 * The three low-order bits of each field are used to specify a memory type.
354 * The five high-order bits of each field are reserved, and must be set to all 0s."
355 * So, we zero-out the high 5 bits of the PA6 entry here:
356 */
357 pat &= ~(0xFFULL << 48);
91447636
A
358 pat |= (0x01ULL << 48);
359 wrmsr64(MSR_IA32_CR_PAT, pat);
360 DBG("CPU%d PAT: is 0x%016llx\n",
361 get_cpu_number(), rdmsr64(MSR_IA32_CR_PAT));
0a7de745 362 } else {
91447636
A
363 /* disable all MTRR ranges */
364 wrmsr64(MSR_IA32_MTRR_DEF_TYPE,
0a7de745 365 mtrr_state.MTRRdefType & ~IA32_MTRR_DEF_TYPE_E);
91447636
A
366
367 /* apply MTRR settings */
0a7de745 368 if (mtrr_state.var_count) {
91447636 369 mtrr_set_var_ranges(mtrr_state.var_range,
0a7de745
A
370 mtrr_state.var_count);
371 }
91447636 372
0a7de745 373 if (mtrr_state.MTRRcap & IA32_MTRRCAP_FIX) {
91447636 374 mtrr_set_fix_ranges(mtrr_state.fix_range);
0a7de745 375 }
91447636
A
376
377 /* enable all MTRR range registers (what if E was not set?) */
378 wrmsr64(MSR_IA32_MTRR_DEF_TYPE,
0a7de745 379 mtrr_state.MTRRdefType | IA32_MTRR_DEF_TYPE_E);
91447636
A
380 }
381
382 /* flush all caches and TLBs a second time */
383 wbinvd();
0a7de745 384 set_cr3_raw(get_cr3_raw());
91447636
A
385 /* restore normal cache mode */
386 set_cr0(cr0);
387
388 /* restore PGE flag */
0a7de745 389 if (cr4 & CR4_PGE) {
91447636 390 set_cr4(cr4);
0a7de745 391 }
91447636
A
392
393 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
394}
395
396static void
397mtrr_update_setup(__unused void * param_not_used)
398{
399 /* disable interrupts before the first barrier */
400 current_cpu_datap()->cpu_iflag = ml_set_interrupts_enabled(FALSE);
401 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
402}
403
404static void
405mtrr_update_teardown(__unused void * param_not_used)
406{
407 /* restore interrupt flag following MTRR changes */
408 ml_set_interrupts_enabled(current_cpu_datap()->cpu_iflag);
409 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
410}
411
412/*
413 * Update MTRR settings on all processors.
414 */
415kern_return_t
416mtrr_update_all_cpus(void)
417{
0a7de745 418 if (mtrr_initialized == FALSE) {
91447636 419 return KERN_NOT_SUPPORTED;
0a7de745 420 }
91447636
A
421
422 MTRR_LOCK();
423 mp_rendezvous(mtrr_update_setup,
0a7de745
A
424 mtrr_update_action,
425 mtrr_update_teardown, NULL);
91447636
A
426 MTRR_UNLOCK();
427
428 return KERN_SUCCESS;
429}
430
431/*
b0d623f7
A
432 * Verify that a processor has been set with the BSP's MTRR settings. Called
433 * during slave processor initialization to check and set MTRR settings
91447636
A
434 * discovered on the boot processor by mtrr_init().
435 */
436kern_return_t
437mtrr_update_cpu(void)
438{
0a7de745 439 boolean_t match = TRUE;
b0d623f7 440
0a7de745 441 if (mtrr_initialized == FALSE) {
91447636 442 return KERN_NOT_SUPPORTED;
0a7de745 443 }
91447636 444
b0d623f7
A
445 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
446
91447636 447 MTRR_LOCK();
b0d623f7
A
448
449 /* Check MSR_IA32_MTRR_DEF_TYPE MSR */
450 match = mtrr_state.MTRRdefType == rdmsr64(MSR_IA32_MTRR_DEF_TYPE);
451
452 /* Check MSR_IA32_MTRRCAP MSR */
453 if (match) {
454 match = mtrr_state.MTRRcap == rdmsr64(MSR_IA32_MTRRCAP);
455 }
456
457 /* Check variable ranges */
458 if (match && mtrr_state.var_count) {
459 match = mtrr_check_var_ranges(mtrr_state.var_range,
0a7de745 460 mtrr_state.var_count);
b0d623f7
A
461 }
462
463 /* Check fixed ranges */
464 if (match && (mtrr_state.MTRRcap & IA32_MTRRCAP_FIX)) {
465 match = mtrr_check_fix_ranges(mtrr_state.fix_range);
466 }
467
468#if MTRR_DEBUG
0a7de745 469 if (!match) {
b0d623f7 470 mtrr_msr_dump();
0a7de745 471 }
b0d623f7
A
472#endif
473 if (!match) {
474 DBG("mtrr_update_cpu() setting MTRR for cpu %d\n",
0a7de745 475 get_cpu_number());
b0d623f7
A
476 mtrr_update_action(NULL);
477 }
478#if MTRR_DEBUG
0a7de745 479 if (!match) {
b0d623f7 480 mtrr_msr_dump();
0a7de745 481 }
b0d623f7
A
482#endif
483
91447636
A
484 MTRR_UNLOCK();
485
486 return KERN_SUCCESS;
487}
488
489/*
490 * Add a MTRR range to associate the physical memory range specified
491 * with a given memory caching type.
492 */
493kern_return_t
494mtrr_range_add(addr64_t address, uint64_t length, uint32_t type)
495{
496 mtrr_var_range_t * vr;
497 mtrr_var_range_t * free_range;
498 kern_return_t ret = KERN_NO_SPACE;
499 int overlap;
500 unsigned int i;
501
502 DBG("mtrr_range_add base = 0x%llx, size = 0x%llx, type = %d\n",
0a7de745 503 address, length, type);
91447636
A
504
505 if (mtrr_initialized == FALSE) {
506 return KERN_NOT_SUPPORTED;
507 }
508
509 /* check memory type (GPF exception for undefined types) */
0a7de745 510 if ((type != MTRR_TYPE_UNCACHEABLE) &&
91447636
A
511 (type != MTRR_TYPE_WRITECOMBINE) &&
512 (type != MTRR_TYPE_WRITETHROUGH) &&
513 (type != MTRR_TYPE_WRITEPROTECT) &&
514 (type != MTRR_TYPE_WRITEBACK)) {
515 return KERN_INVALID_ARGUMENT;
516 }
517
518 /* check WC support if requested */
519 if ((type == MTRR_TYPE_WRITECOMBINE) &&
520 (mtrr_state.MTRRcap & IA32_MTRRCAP_WC) == 0) {
521 return KERN_NOT_SUPPORTED;
522 }
523
524 /* leave the fix range area below 1MB alone */
525 if (address < 0x100000 || mtrr_state.var_count == 0) {
526 return KERN_NOT_SUPPORTED;
527 }
528
529 /*
530 * Length must be a power of 2 given by 2^n, where n >= 12.
531 * Base address alignment must be larger than or equal to length.
532 */
0a7de745 533 if ((length < 0x1000) ||
91447636 534 (LSB(length) != length) ||
0a7de745 535 (address && (length > LSB(address)))) {
91447636
A
536 return KERN_INVALID_ARGUMENT;
537 }
538
539 MTRR_LOCK();
540
541 /*
542 * Check for overlap and locate a free range.
543 */
0a7de745 544 for (i = 0, free_range = NULL; i < mtrr_state.var_count; i++) {
91447636
A
545 vr = &mtrr_state.var_range[i];
546
547 if (vr->refcnt == 0) {
548 /* free range candidate if no overlaps are found */
549 free_range = vr;
550 continue;
551 }
552
553 overlap = var_range_overlap(vr, address, length, type);
554 if (overlap > 0) {
555 /*
556 * identical overlap permitted, increment ref count.
557 * no hardware update required.
558 */
559 free_range = vr;
560 break;
561 }
562 if (overlap < 0) {
563 /* unsupported overlapping of memory types */
564 free_range = NULL;
565 break;
566 }
567 }
568
569 if (free_range) {
570 if (free_range->refcnt++ == 0) {
571 var_range_encode(free_range, address, length, type, 1);
572 mp_rendezvous(mtrr_update_setup,
0a7de745
A
573 mtrr_update_action,
574 mtrr_update_teardown, NULL);
91447636
A
575 }
576 ret = KERN_SUCCESS;
577 }
578
579#if MTRR_DEBUG
580 mtrr_msr_dump();
581#endif
582
583 MTRR_UNLOCK();
584
585 return ret;
586}
587
588/*
589 * Remove a previously added MTRR range. The same arguments used for adding
590 * the memory range must be supplied again.
591 */
592kern_return_t
593mtrr_range_remove(addr64_t address, uint64_t length, uint32_t type)
594{
595 mtrr_var_range_t * vr;
596 int result = KERN_FAILURE;
597 int cpu_update = 0;
598 unsigned int i;
599
600 DBG("mtrr_range_remove base = 0x%llx, size = 0x%llx, type = %d\n",
0a7de745 601 address, length, type);
91447636
A
602
603 if (mtrr_initialized == FALSE) {
604 return KERN_NOT_SUPPORTED;
605 }
606
607 MTRR_LOCK();
608
609 for (i = 0; i < mtrr_state.var_count; i++) {
610 vr = &mtrr_state.var_range[i];
611
612 if (vr->refcnt &&
613 var_range_overlap(vr, address, length, type) > 0) {
614 /* found specified variable range */
615 if (--mtrr_state.var_range[i].refcnt == 0) {
616 var_range_encode(vr, address, length, type, 0);
617 cpu_update = 1;
618 }
619 result = KERN_SUCCESS;
620 break;
621 }
622 }
623
624 if (cpu_update) {
625 mp_rendezvous(mtrr_update_setup,
0a7de745
A
626 mtrr_update_action,
627 mtrr_update_teardown, NULL);
91447636
A
628 result = KERN_SUCCESS;
629 }
630
631#if MTRR_DEBUG
632 mtrr_msr_dump();
633#endif
634
635 MTRR_UNLOCK();
636
637 return result;
638}
639
640/*
641 * Variable range helper routines
642 */
643static void
644var_range_encode(mtrr_var_range_t * range, addr64_t address,
0a7de745 645 uint64_t length, uint32_t type, int valid)
91447636
A
646{
647 range->base = (address & IA32_MTRR_PHYSBASE_MASK) |
0a7de745 648 (type & (uint32_t)IA32_MTRR_PHYSBASE_TYPE);
91447636
A
649
650 range->mask = LEN_TO_MASK(length) |
0a7de745 651 (valid ? IA32_MTRR_PHYMASK_VALID : 0);
91447636
A
652}
653
654static int
655var_range_overlap(mtrr_var_range_t * range, addr64_t address,
0a7de745 656 uint64_t length, uint32_t type)
91447636
A
657{
658 uint64_t v_address, v_length;
659 uint32_t v_type;
660 int result = 0; /* no overlap, or overlap ok */
661
662 v_address = range->base & IA32_MTRR_PHYSBASE_MASK;
b0d623f7 663 v_type = (uint32_t)(range->base & IA32_MTRR_PHYSBASE_TYPE);
91447636
A
664 v_length = MASK_TO_LEN(range->mask);
665
666 /* detect range overlap */
667 if ((v_address >= address && v_address < (address + length)) ||
668 (address >= v_address && address < (v_address + v_length))) {
0a7de745 669 if (v_address == address && v_length == length && v_type == type) {
91447636 670 result = 1; /* identical overlap ok */
0a7de745
A
671 } else if (v_type == MTRR_TYPE_UNCACHEABLE &&
672 type == MTRR_TYPE_UNCACHEABLE) {
91447636 673 /* UC ranges can overlap */
0a7de745
A
674 } else if ((v_type == MTRR_TYPE_UNCACHEABLE &&
675 type == MTRR_TYPE_WRITEBACK) ||
676 (v_type == MTRR_TYPE_WRITEBACK &&
677 type == MTRR_TYPE_UNCACHEABLE)) {
91447636 678 /* UC/WB can overlap - effective type becomes UC */
0a7de745 679 } else {
91447636
A
680 /* anything else may cause undefined behavior */
681 result = -1;
682 }
683 }
684
685 return result;
686}
687
688/*
689 * Initialize PAT (Page Attribute Table)
690 */
691void
692pat_init(void)
693{
0a7de745
A
694 boolean_t istate;
695 uint64_t pat;
b0d623f7 696
0a7de745 697 if (!(cpuid_features() & CPUID_FEATURE_PAT)) {
b0d623f7 698 return;
0a7de745 699 }
b0d623f7
A
700
701 istate = ml_set_interrupts_enabled(FALSE);
702
703 pat = rdmsr64(MSR_IA32_CR_PAT);
704 DBG("CPU%d PAT: was 0x%016llx\n", get_cpu_number(), pat);
705
706 /* Change PA6 attribute field to WC if required */
0a7de745 707 if ((pat & (0x07ULL << 48)) != (0x01ULL << 48)) {
91447636 708 mtrr_update_action(CACHE_CONTROL_PAT);
91447636 709 }
b0d623f7 710 ml_set_interrupts_enabled(istate);
91447636 711}