]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/mtrr.c
04deba34e7274f9684caf8bddb4cd8831af54224
[apple/xnu.git] / osfmk / i386 / mtrr.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30
31 #include <mach/kern_return.h>
32 #include <kern/kalloc.h>
33 #include <kern/cpu_number.h>
34 #include <kern/cpu_data.h>
35 #include <i386/mp.h>
36 #include <i386/cpuid.h>
37 #include <i386/proc_reg.h>
38 #include <i386/mtrr.h>
39
40 struct mtrr_var_range {
41 uint64_t base; /* in IA32_MTRR_PHYSBASE format */
42 uint64_t mask; /* in IA32_MTRR_PHYSMASK format */
43 uint32_t refcnt; /* var ranges reference count */
44 };
45
46 struct mtrr_fix_range {
47 uint64_t types; /* fixed-range type octet */
48 };
49
50 typedef struct mtrr_var_range mtrr_var_range_t;
51 typedef struct mtrr_fix_range mtrr_fix_range_t;
52
53 static struct {
54 uint64_t MTRRcap;
55 uint64_t MTRRdefType;
56 mtrr_var_range_t * var_range;
57 unsigned int var_count;
58 mtrr_fix_range_t fix_range[11];
59 } mtrr_state;
60
61 static boolean_t mtrr_initialized = FALSE;
62
63 decl_simple_lock_data(static, mtrr_lock);
64 #define MTRR_LOCK() simple_lock(&mtrr_lock);
65 #define MTRR_UNLOCK() simple_unlock(&mtrr_lock);
66
67 #if MTRR_DEBUG
68 #define DBG(x...) kprintf(x)
69 #else
70 #define DBG(x...)
71 #endif
72
73 /* Private functions */
74 static void mtrr_get_var_ranges(mtrr_var_range_t * range, int count);
75 static void mtrr_set_var_ranges(const mtrr_var_range_t * range, int count);
76 static void mtrr_get_fix_ranges(mtrr_fix_range_t * range);
77 static void mtrr_set_fix_ranges(const mtrr_fix_range_t * range);
78 static void mtrr_update_setup(void * param);
79 static void mtrr_update_teardown(void * param);
80 static void mtrr_update_action(void * param);
81 static void var_range_encode(mtrr_var_range_t * range, addr64_t address,
82 uint64_t length, uint32_t type, int valid);
83 static int var_range_overlap(mtrr_var_range_t * range, addr64_t address,
84 uint64_t length, uint32_t type);
85
86 #define CACHE_CONTROL_MTRR (NULL)
87 #define CACHE_CONTROL_PAT ((void *)1)
88
89 /*
90 * MTRR MSR bit fields.
91 */
92 #define IA32_MTRR_DEF_TYPE_MT 0x000000ff
93 #define IA32_MTRR_DEF_TYPE_FE 0x00000400
94 #define IA32_MTRR_DEF_TYPE_E 0x00000800
95
96 #define IA32_MTRRCAP_VCNT 0x000000ff
97 #define IA32_MTRRCAP_FIX 0x00000100
98 #define IA32_MTRRCAP_WC 0x00000400
99
100 /* 0 < bits <= 64 */
101 #define PHYS_BITS_TO_MASK(bits) \
102 ((((1ULL << (bits-1)) - 1) << 1) | 1)
103
104 /*
105 * Default mask for 36 physical address bits, this can
106 * change depending on the cpu model.
107 */
108 static uint64_t mtrr_phys_mask = PHYS_BITS_TO_MASK(36);
109
110 #define IA32_MTRR_PHYMASK_VALID 0x0000000000000800ULL
111 #define IA32_MTRR_PHYSBASE_MASK (mtrr_phys_mask & ~0xFFF)
112 #define IA32_MTRR_PHYSBASE_TYPE 0x00000000000000FFULL
113
114 /*
115 * Variable-range mask to/from length conversions.
116 */
117 #define MASK_TO_LEN(mask) \
118 ((~((mask) & IA32_MTRR_PHYSBASE_MASK) & mtrr_phys_mask) + 1)
119
120 #define LEN_TO_MASK(len) \
121 (~((len) - 1) & IA32_MTRR_PHYSBASE_MASK)
122
123 #define LSB(x) ((x) & (~((x) - 1)))
124
125 /*
126 * Fetch variable-range MTRR register pairs.
127 */
128 static void
129 mtrr_get_var_ranges(mtrr_var_range_t * range, int count)
130 {
131 int i;
132
133 for (i = 0; i < count; i++) {
134 range[i].base = rdmsr64(MSR_IA32_MTRR_PHYSBASE(i));
135 range[i].mask = rdmsr64(MSR_IA32_MTRR_PHYSMASK(i));
136
137 /* bump ref count for firmware configured ranges */
138 if (range[i].mask & IA32_MTRR_PHYMASK_VALID)
139 range[i].refcnt = 1;
140 else
141 range[i].refcnt = 0;
142 }
143 }
144
145 /*
146 * Update variable-range MTRR register pairs.
147 */
148 static void
149 mtrr_set_var_ranges(const mtrr_var_range_t * range, int count)
150 {
151 int i;
152
153 for (i = 0; i < count; i++) {
154 wrmsr64(MSR_IA32_MTRR_PHYSBASE(i), range[i].base);
155 wrmsr64(MSR_IA32_MTRR_PHYSMASK(i), range[i].mask);
156 }
157 }
158
159 /*
160 * Fetch all fixed-range MTRR's. Note MSR offsets are not consecutive.
161 */
162 static void
163 mtrr_get_fix_ranges(mtrr_fix_range_t * range)
164 {
165 int i;
166
167 /* assume 11 fix range registers */
168 range[0].types = rdmsr64(MSR_IA32_MTRR_FIX64K_00000);
169 range[1].types = rdmsr64(MSR_IA32_MTRR_FIX16K_80000);
170 range[2].types = rdmsr64(MSR_IA32_MTRR_FIX16K_A0000);
171 for (i = 0; i < 8; i++)
172 range[3 + i].types = rdmsr64(MSR_IA32_MTRR_FIX4K_C0000 + i);
173 }
174
175 /*
176 * Update all fixed-range MTRR's.
177 */
178 static void
179 mtrr_set_fix_ranges(const struct mtrr_fix_range * range)
180 {
181 int i;
182
183 /* assume 11 fix range registers */
184 wrmsr64(MSR_IA32_MTRR_FIX64K_00000, range[0].types);
185 wrmsr64(MSR_IA32_MTRR_FIX16K_80000, range[1].types);
186 wrmsr64(MSR_IA32_MTRR_FIX16K_A0000, range[2].types);
187 for (i = 0; i < 8; i++)
188 wrmsr64(MSR_IA32_MTRR_FIX4K_C0000 + i, range[3 + i].types);
189 }
190
191 #if MTRR_DEBUG
192 static void
193 mtrr_msr_dump(void)
194 {
195 int i;
196 int count = rdmsr64(MSR_IA32_MTRRCAP) & IA32_MTRRCAP_VCNT;
197
198 DBG("VAR -- BASE -------------- MASK -------------- SIZE\n");
199 for (i = 0; i < count; i++) {
200 DBG(" %02x 0x%016llx 0x%016llx 0x%llx\n", i,
201 rdmsr64(MSR_IA32_MTRR_PHYSBASE(i)),
202 rdmsr64(MSR_IA32_MTRR_PHYSMASK(i)),
203 MASK_TO_LEN(rdmsr64(MSR_IA32_MTRR_PHYSMASK(i))));
204 }
205 DBG("\n");
206
207 DBG("FIX64K_00000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX64K_00000));
208 DBG("FIX16K_80000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX16K_80000));
209 DBG("FIX16K_A0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX16K_A0000));
210 DBG(" FIX4K_C0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_C0000));
211 DBG(" FIX4K_C8000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_C8000));
212 DBG(" FIX4K_D0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_D0000));
213 DBG(" FIX4K_D8000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_D8000));
214 DBG(" FIX4K_E0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_E0000));
215 DBG(" FIX4K_E8000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_E8000));
216 DBG(" FIX4K_F0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_F0000));
217 DBG(" FIX4K_F8000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_F8000));
218
219 DBG("\nMTRRcap = 0x%llx MTRRdefType = 0x%llx\n",
220 rdmsr64(MSR_IA32_MTRRCAP), rdmsr64(MSR_IA32_MTRR_DEF_TYPE));
221 }
222 #endif /* MTRR_DEBUG */
223
224 /*
225 * Called by the boot processor (BP) early during boot to initialize MTRR
226 * support. The MTRR state on the BP is saved, any additional processors
227 * will have the same settings applied to ensure MTRR consistency.
228 */
229 void
230 mtrr_init(void)
231 {
232 i386_cpu_info_t * infop = cpuid_info();
233
234 /* no reason to init more than once */
235 if (mtrr_initialized == TRUE)
236 return;
237
238 /* check for presence of MTRR feature on the processor */
239 if ((cpuid_features() & CPUID_FEATURE_MTRR) == 0)
240 return; /* no MTRR feature */
241
242 /* cpu vendor/model specific handling */
243 if (!strncmp(infop->cpuid_vendor, CPUID_VID_AMD, sizeof(CPUID_VID_AMD)))
244 {
245 /* Check for AMD Athlon 64 and Opteron */
246 if (cpuid_family() == 0xF)
247 {
248 uint32_t cpuid_result[4];
249
250 /* check if cpu support Address Sizes function */
251 do_cpuid(0x80000000, cpuid_result);
252 if (cpuid_result[0] >= 0x80000008)
253 {
254 int bits;
255
256 do_cpuid(0x80000008, cpuid_result);
257 DBG("MTRR: AMD 8000_0008 EAX = %08x\n",
258 cpuid_result[0]);
259
260 /*
261 * Function 8000_0008 (Address Sizes) EAX
262 * Bits 7-0 : phys address size
263 * Bits 15-8 : virt address size
264 */
265 bits = cpuid_result[0] & 0xFF;
266 if ((bits < 36) || (bits > 64))
267 {
268 printf("MTRR: bad address size\n");
269 return; /* bogus size */
270 }
271
272 mtrr_phys_mask = PHYS_BITS_TO_MASK(bits);
273 }
274 }
275 }
276
277 /* use a lock to serialize MTRR changes */
278 bzero((void *)&mtrr_state, sizeof(mtrr_state));
279 simple_lock_init(&mtrr_lock, 0);
280
281 mtrr_state.MTRRcap = rdmsr64(MSR_IA32_MTRRCAP);
282 mtrr_state.MTRRdefType = rdmsr64(MSR_IA32_MTRR_DEF_TYPE);
283 mtrr_state.var_count = mtrr_state.MTRRcap & IA32_MTRRCAP_VCNT;
284
285 /* allocate storage for variable ranges (can block?) */
286 if (mtrr_state.var_count) {
287 mtrr_state.var_range = (mtrr_var_range_t *)
288 kalloc(sizeof(mtrr_var_range_t) *
289 mtrr_state.var_count);
290 if (mtrr_state.var_range == NULL)
291 mtrr_state.var_count = 0;
292 }
293
294 /* fetch the initial firmware configured variable ranges */
295 if (mtrr_state.var_count)
296 mtrr_get_var_ranges(mtrr_state.var_range,
297 mtrr_state.var_count);
298
299 /* fetch the initial firmware configured fixed ranges */
300 if (mtrr_state.MTRRcap & IA32_MTRRCAP_FIX)
301 mtrr_get_fix_ranges(mtrr_state.fix_range);
302
303 mtrr_initialized = TRUE;
304
305 #if MTRR_DEBUG
306 mtrr_msr_dump(); /* dump firmware settings */
307 #endif
308 }
309
310 /*
311 * Performs the Intel recommended procedure for changing the MTRR
312 * in a MP system. Leverage rendezvous mechanism for the required
313 * barrier synchronization among all processors. This function is
314 * called from the rendezvous IPI handler, and mtrr_update_cpu().
315 */
316 static void
317 mtrr_update_action(void * cache_control_type)
318 {
319 uint32_t cr0, cr4;
320 uint32_t tmp;
321
322 cr0 = get_cr0();
323 cr4 = get_cr4();
324
325 /* enter no-fill cache mode */
326 tmp = cr0 | CR0_CD;
327 tmp &= ~CR0_NW;
328 set_cr0(tmp);
329
330 /* flush caches */
331 wbinvd();
332
333 /* clear the PGE flag in CR4 */
334 if (cr4 & CR4_PGE)
335 set_cr4(cr4 & ~CR4_PGE);
336
337 /* flush TLBs */
338 flush_tlb();
339
340 if (CACHE_CONTROL_PAT == cache_control_type) {
341 /* Change PA6 attribute field to WC */
342 uint64_t pat = rdmsr64(MSR_IA32_CR_PAT);
343 DBG("CPU%d PAT: was 0x%016llx\n", get_cpu_number(), pat);
344 pat &= ~(0x0FULL << 48);
345 pat |= (0x01ULL << 48);
346 wrmsr64(MSR_IA32_CR_PAT, pat);
347 DBG("CPU%d PAT: is 0x%016llx\n",
348 get_cpu_number(), rdmsr64(MSR_IA32_CR_PAT));
349 }
350 else {
351 /* disable all MTRR ranges */
352 wrmsr64(MSR_IA32_MTRR_DEF_TYPE,
353 mtrr_state.MTRRdefType & ~IA32_MTRR_DEF_TYPE_E);
354
355 /* apply MTRR settings */
356 if (mtrr_state.var_count)
357 mtrr_set_var_ranges(mtrr_state.var_range,
358 mtrr_state.var_count);
359
360 if (mtrr_state.MTRRcap & IA32_MTRRCAP_FIX)
361 mtrr_set_fix_ranges(mtrr_state.fix_range);
362
363 /* enable all MTRR range registers (what if E was not set?) */
364 wrmsr64(MSR_IA32_MTRR_DEF_TYPE,
365 mtrr_state.MTRRdefType | IA32_MTRR_DEF_TYPE_E);
366 }
367
368 /* flush all caches and TLBs a second time */
369 wbinvd();
370 flush_tlb();
371
372 /* restore normal cache mode */
373 set_cr0(cr0);
374
375 /* restore PGE flag */
376 if (cr4 & CR4_PGE)
377 set_cr4(cr4);
378
379 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
380 }
381
382 static void
383 mtrr_update_setup(__unused void * param_not_used)
384 {
385 /* disable interrupts before the first barrier */
386 current_cpu_datap()->cpu_iflag = ml_set_interrupts_enabled(FALSE);
387 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
388 }
389
390 static void
391 mtrr_update_teardown(__unused void * param_not_used)
392 {
393 /* restore interrupt flag following MTRR changes */
394 ml_set_interrupts_enabled(current_cpu_datap()->cpu_iflag);
395 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
396 }
397
398 /*
399 * Update MTRR settings on all processors.
400 */
401 kern_return_t
402 mtrr_update_all_cpus(void)
403 {
404 if (mtrr_initialized == FALSE)
405 return KERN_NOT_SUPPORTED;
406
407 MTRR_LOCK();
408 mp_rendezvous(mtrr_update_setup,
409 mtrr_update_action,
410 mtrr_update_teardown, NULL);
411 MTRR_UNLOCK();
412
413 return KERN_SUCCESS;
414 }
415
416 /*
417 * Update a single CPU with the current MTRR settings. Can be called
418 * during slave processor initialization to mirror the MTRR settings
419 * discovered on the boot processor by mtrr_init().
420 */
421 kern_return_t
422 mtrr_update_cpu(void)
423 {
424 if (mtrr_initialized == FALSE)
425 return KERN_NOT_SUPPORTED;
426
427 MTRR_LOCK();
428 mtrr_update_setup(NULL);
429 mtrr_update_action(NULL);
430 mtrr_update_teardown(NULL);
431 MTRR_UNLOCK();
432
433 return KERN_SUCCESS;
434 }
435
436 /*
437 * Add a MTRR range to associate the physical memory range specified
438 * with a given memory caching type.
439 */
440 kern_return_t
441 mtrr_range_add(addr64_t address, uint64_t length, uint32_t type)
442 {
443 mtrr_var_range_t * vr;
444 mtrr_var_range_t * free_range;
445 kern_return_t ret = KERN_NO_SPACE;
446 int overlap;
447 unsigned int i;
448
449 DBG("mtrr_range_add base = 0x%llx, size = 0x%llx, type = %d\n",
450 address, length, type);
451
452 if (mtrr_initialized == FALSE) {
453 return KERN_NOT_SUPPORTED;
454 }
455
456 /* check memory type (GPF exception for undefined types) */
457 if ((type != MTRR_TYPE_UNCACHEABLE) &&
458 (type != MTRR_TYPE_WRITECOMBINE) &&
459 (type != MTRR_TYPE_WRITETHROUGH) &&
460 (type != MTRR_TYPE_WRITEPROTECT) &&
461 (type != MTRR_TYPE_WRITEBACK)) {
462 return KERN_INVALID_ARGUMENT;
463 }
464
465 /* check WC support if requested */
466 if ((type == MTRR_TYPE_WRITECOMBINE) &&
467 (mtrr_state.MTRRcap & IA32_MTRRCAP_WC) == 0) {
468 return KERN_NOT_SUPPORTED;
469 }
470
471 /* leave the fix range area below 1MB alone */
472 if (address < 0x100000 || mtrr_state.var_count == 0) {
473 return KERN_NOT_SUPPORTED;
474 }
475
476 /*
477 * Length must be a power of 2 given by 2^n, where n >= 12.
478 * Base address alignment must be larger than or equal to length.
479 */
480 if ((length < 0x1000) ||
481 (LSB(length) != length) ||
482 (address && (length > LSB(address)))) {
483 return KERN_INVALID_ARGUMENT;
484 }
485
486 MTRR_LOCK();
487
488 /*
489 * Check for overlap and locate a free range.
490 */
491 for (i = 0, free_range = NULL; i < mtrr_state.var_count; i++)
492 {
493 vr = &mtrr_state.var_range[i];
494
495 if (vr->refcnt == 0) {
496 /* free range candidate if no overlaps are found */
497 free_range = vr;
498 continue;
499 }
500
501 overlap = var_range_overlap(vr, address, length, type);
502 if (overlap > 0) {
503 /*
504 * identical overlap permitted, increment ref count.
505 * no hardware update required.
506 */
507 free_range = vr;
508 break;
509 }
510 if (overlap < 0) {
511 /* unsupported overlapping of memory types */
512 free_range = NULL;
513 break;
514 }
515 }
516
517 if (free_range) {
518 if (free_range->refcnt++ == 0) {
519 var_range_encode(free_range, address, length, type, 1);
520 mp_rendezvous(mtrr_update_setup,
521 mtrr_update_action,
522 mtrr_update_teardown, NULL);
523 }
524 ret = KERN_SUCCESS;
525 }
526
527 #if MTRR_DEBUG
528 mtrr_msr_dump();
529 #endif
530
531 MTRR_UNLOCK();
532
533 return ret;
534 }
535
536 /*
537 * Remove a previously added MTRR range. The same arguments used for adding
538 * the memory range must be supplied again.
539 */
540 kern_return_t
541 mtrr_range_remove(addr64_t address, uint64_t length, uint32_t type)
542 {
543 mtrr_var_range_t * vr;
544 int result = KERN_FAILURE;
545 int cpu_update = 0;
546 unsigned int i;
547
548 DBG("mtrr_range_remove base = 0x%llx, size = 0x%llx, type = %d\n",
549 address, length, type);
550
551 if (mtrr_initialized == FALSE) {
552 return KERN_NOT_SUPPORTED;
553 }
554
555 MTRR_LOCK();
556
557 for (i = 0; i < mtrr_state.var_count; i++) {
558 vr = &mtrr_state.var_range[i];
559
560 if (vr->refcnt &&
561 var_range_overlap(vr, address, length, type) > 0) {
562 /* found specified variable range */
563 if (--mtrr_state.var_range[i].refcnt == 0) {
564 var_range_encode(vr, address, length, type, 0);
565 cpu_update = 1;
566 }
567 result = KERN_SUCCESS;
568 break;
569 }
570 }
571
572 if (cpu_update) {
573 mp_rendezvous(mtrr_update_setup,
574 mtrr_update_action,
575 mtrr_update_teardown, NULL);
576 result = KERN_SUCCESS;
577 }
578
579 #if MTRR_DEBUG
580 mtrr_msr_dump();
581 #endif
582
583 MTRR_UNLOCK();
584
585 return result;
586 }
587
588 /*
589 * Variable range helper routines
590 */
591 static void
592 var_range_encode(mtrr_var_range_t * range, addr64_t address,
593 uint64_t length, uint32_t type, int valid)
594 {
595 range->base = (address & IA32_MTRR_PHYSBASE_MASK) |
596 (type & IA32_MTRR_PHYSBASE_TYPE);
597
598 range->mask = LEN_TO_MASK(length) |
599 (valid ? IA32_MTRR_PHYMASK_VALID : 0);
600 }
601
602 static int
603 var_range_overlap(mtrr_var_range_t * range, addr64_t address,
604 uint64_t length, uint32_t type)
605 {
606 uint64_t v_address, v_length;
607 uint32_t v_type;
608 int result = 0; /* no overlap, or overlap ok */
609
610 v_address = range->base & IA32_MTRR_PHYSBASE_MASK;
611 v_type = range->base & IA32_MTRR_PHYSBASE_TYPE;
612 v_length = MASK_TO_LEN(range->mask);
613
614 /* detect range overlap */
615 if ((v_address >= address && v_address < (address + length)) ||
616 (address >= v_address && address < (v_address + v_length))) {
617
618 if (v_address == address && v_length == length && v_type == type)
619 result = 1; /* identical overlap ok */
620 else if ( v_type == MTRR_TYPE_UNCACHEABLE &&
621 type == MTRR_TYPE_UNCACHEABLE ) {
622 /* UC ranges can overlap */
623 }
624 else if ((v_type == MTRR_TYPE_UNCACHEABLE &&
625 type == MTRR_TYPE_WRITEBACK) ||
626 (v_type == MTRR_TYPE_WRITEBACK &&
627 type == MTRR_TYPE_UNCACHEABLE)) {
628 /* UC/WB can overlap - effective type becomes UC */
629 }
630 else {
631 /* anything else may cause undefined behavior */
632 result = -1;
633 }
634 }
635
636 return result;
637 }
638
639 /*
640 * Initialize PAT (Page Attribute Table)
641 */
642 void
643 pat_init(void)
644 {
645 if (cpuid_features() & CPUID_FEATURE_PAT)
646 {
647 boolean_t istate = ml_set_interrupts_enabled(FALSE);
648 mtrr_update_action(CACHE_CONTROL_PAT);
649 ml_set_interrupts_enabled(istate);
650 }
651 }