]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/mtrr.c
xnu-792.17.14.tar.gz
[apple/xnu.git] / osfmk / i386 / mtrr.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/kern_return.h>
30 #include <kern/kalloc.h>
31 #include <kern/cpu_number.h>
32 #include <kern/cpu_data.h>
33 #include <i386/mp.h>
34 #include <i386/cpuid.h>
35 #include <i386/proc_reg.h>
36 #include <i386/mtrr.h>
37
38 struct mtrr_var_range {
39 uint64_t base; /* in IA32_MTRR_PHYSBASE format */
40 uint64_t mask; /* in IA32_MTRR_PHYSMASK format */
41 uint32_t refcnt; /* var ranges reference count */
42 };
43
44 struct mtrr_fix_range {
45 uint64_t types; /* fixed-range type octet */
46 };
47
48 typedef struct mtrr_var_range mtrr_var_range_t;
49 typedef struct mtrr_fix_range mtrr_fix_range_t;
50
51 static struct {
52 uint64_t MTRRcap;
53 uint64_t MTRRdefType;
54 mtrr_var_range_t * var_range;
55 unsigned int var_count;
56 mtrr_fix_range_t fix_range[11];
57 } mtrr_state;
58
59 static boolean_t mtrr_initialized = FALSE;
60
61 decl_simple_lock_data(static, mtrr_lock);
62 #define MTRR_LOCK() simple_lock(&mtrr_lock);
63 #define MTRR_UNLOCK() simple_unlock(&mtrr_lock);
64
65 #if MTRR_DEBUG
66 #define DBG(x...) kprintf(x)
67 #else
68 #define DBG(x...)
69 #endif
70
71 /* Private functions */
72 static void mtrr_get_var_ranges(mtrr_var_range_t * range, int count);
73 static void mtrr_set_var_ranges(const mtrr_var_range_t * range, int count);
74 static void mtrr_get_fix_ranges(mtrr_fix_range_t * range);
75 static void mtrr_set_fix_ranges(const mtrr_fix_range_t * range);
76 static void mtrr_update_setup(void * param);
77 static void mtrr_update_teardown(void * param);
78 static void mtrr_update_action(void * param);
79 static void var_range_encode(mtrr_var_range_t * range, addr64_t address,
80 uint64_t length, uint32_t type, int valid);
81 static int var_range_overlap(mtrr_var_range_t * range, addr64_t address,
82 uint64_t length, uint32_t type);
83
84 #define CACHE_CONTROL_MTRR (NULL)
85 #define CACHE_CONTROL_PAT ((void *)1)
86
87 /*
88 * MTRR MSR bit fields.
89 */
90 #define IA32_MTRR_DEF_TYPE_MT 0x000000ff
91 #define IA32_MTRR_DEF_TYPE_FE 0x00000400
92 #define IA32_MTRR_DEF_TYPE_E 0x00000800
93
94 #define IA32_MTRRCAP_VCNT 0x000000ff
95 #define IA32_MTRRCAP_FIX 0x00000100
96 #define IA32_MTRRCAP_WC 0x00000400
97
98 /* 0 < bits <= 64 */
99 #define PHYS_BITS_TO_MASK(bits) \
100 ((((1ULL << (bits-1)) - 1) << 1) | 1)
101
102 /*
103 * Default mask for 36 physical address bits, this can
104 * change depending on the cpu model.
105 */
106 static uint64_t mtrr_phys_mask = PHYS_BITS_TO_MASK(36);
107
108 #define IA32_MTRR_PHYMASK_VALID 0x0000000000000800ULL
109 #define IA32_MTRR_PHYSBASE_MASK (mtrr_phys_mask & ~0xFFF)
110 #define IA32_MTRR_PHYSBASE_TYPE 0x00000000000000FFULL
111
112 /*
113 * Variable-range mask to/from length conversions.
114 */
115 #define MASK_TO_LEN(mask) \
116 ((~((mask) & IA32_MTRR_PHYSBASE_MASK) & mtrr_phys_mask) + 1)
117
118 #define LEN_TO_MASK(len) \
119 (~((len) - 1) & IA32_MTRR_PHYSBASE_MASK)
120
121 #define LSB(x) ((x) & (~((x) - 1)))
122
123 /*
124 * Fetch variable-range MTRR register pairs.
125 */
126 static void
127 mtrr_get_var_ranges(mtrr_var_range_t * range, int count)
128 {
129 int i;
130
131 for (i = 0; i < count; i++) {
132 range[i].base = rdmsr64(MSR_IA32_MTRR_PHYSBASE(i));
133 range[i].mask = rdmsr64(MSR_IA32_MTRR_PHYSMASK(i));
134
135 /* bump ref count for firmware configured ranges */
136 if (range[i].mask & IA32_MTRR_PHYMASK_VALID)
137 range[i].refcnt = 1;
138 else
139 range[i].refcnt = 0;
140 }
141 }
142
143 /*
144 * Update variable-range MTRR register pairs.
145 */
146 static void
147 mtrr_set_var_ranges(const mtrr_var_range_t * range, int count)
148 {
149 int i;
150
151 for (i = 0; i < count; i++) {
152 wrmsr64(MSR_IA32_MTRR_PHYSBASE(i), range[i].base);
153 wrmsr64(MSR_IA32_MTRR_PHYSMASK(i), range[i].mask);
154 }
155 }
156
157 /*
158 * Fetch all fixed-range MTRR's. Note MSR offsets are not consecutive.
159 */
160 static void
161 mtrr_get_fix_ranges(mtrr_fix_range_t * range)
162 {
163 int i;
164
165 /* assume 11 fix range registers */
166 range[0].types = rdmsr64(MSR_IA32_MTRR_FIX64K_00000);
167 range[1].types = rdmsr64(MSR_IA32_MTRR_FIX16K_80000);
168 range[2].types = rdmsr64(MSR_IA32_MTRR_FIX16K_A0000);
169 for (i = 0; i < 8; i++)
170 range[3 + i].types = rdmsr64(MSR_IA32_MTRR_FIX4K_C0000 + i);
171 }
172
173 /*
174 * Update all fixed-range MTRR's.
175 */
176 static void
177 mtrr_set_fix_ranges(const struct mtrr_fix_range * range)
178 {
179 int i;
180
181 /* assume 11 fix range registers */
182 wrmsr64(MSR_IA32_MTRR_FIX64K_00000, range[0].types);
183 wrmsr64(MSR_IA32_MTRR_FIX16K_80000, range[1].types);
184 wrmsr64(MSR_IA32_MTRR_FIX16K_A0000, range[2].types);
185 for (i = 0; i < 8; i++)
186 wrmsr64(MSR_IA32_MTRR_FIX4K_C0000 + i, range[3 + i].types);
187 }
188
189 #if MTRR_DEBUG
190 static void
191 mtrr_msr_dump(void)
192 {
193 int i;
194 int count = rdmsr64(MSR_IA32_MTRRCAP) & IA32_MTRRCAP_VCNT;
195
196 DBG("VAR -- BASE -------------- MASK -------------- SIZE\n");
197 for (i = 0; i < count; i++) {
198 DBG(" %02x 0x%016llx 0x%016llx 0x%llx\n", i,
199 rdmsr64(MSR_IA32_MTRR_PHYSBASE(i)),
200 rdmsr64(MSR_IA32_MTRR_PHYSMASK(i)),
201 MASK_TO_LEN(rdmsr64(MSR_IA32_MTRR_PHYSMASK(i))));
202 }
203 DBG("\n");
204
205 DBG("FIX64K_00000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX64K_00000));
206 DBG("FIX16K_80000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX16K_80000));
207 DBG("FIX16K_A0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX16K_A0000));
208 DBG(" FIX4K_C0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_C0000));
209 DBG(" FIX4K_C8000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_C8000));
210 DBG(" FIX4K_D0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_D0000));
211 DBG(" FIX4K_D8000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_D8000));
212 DBG(" FIX4K_E0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_E0000));
213 DBG(" FIX4K_E8000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_E8000));
214 DBG(" FIX4K_F0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_F0000));
215 DBG(" FIX4K_F8000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_F8000));
216
217 DBG("\nMTRRcap = 0x%llx MTRRdefType = 0x%llx\n",
218 rdmsr64(MSR_IA32_MTRRCAP), rdmsr64(MSR_IA32_MTRR_DEF_TYPE));
219 }
220 #endif /* MTRR_DEBUG */
221
222 /*
223 * Called by the boot processor (BP) early during boot to initialize MTRR
224 * support. The MTRR state on the BP is saved, any additional processors
225 * will have the same settings applied to ensure MTRR consistency.
226 */
227 void
228 mtrr_init(void)
229 {
230 i386_cpu_info_t * infop = cpuid_info();
231
232 /* no reason to init more than once */
233 if (mtrr_initialized == TRUE)
234 return;
235
236 /* check for presence of MTRR feature on the processor */
237 if ((cpuid_features() & CPUID_FEATURE_MTRR) == 0)
238 return; /* no MTRR feature */
239
240 /* cpu vendor/model specific handling */
241 if (!strncmp(infop->cpuid_vendor, CPUID_VID_AMD, sizeof(CPUID_VID_AMD)))
242 {
243 /* Check for AMD Athlon 64 and Opteron */
244 if (cpuid_family() == 0xF)
245 {
246 uint32_t cpuid_result[4];
247
248 /* check if cpu support Address Sizes function */
249 do_cpuid(0x80000000, cpuid_result);
250 if (cpuid_result[0] >= 0x80000008)
251 {
252 int bits;
253
254 do_cpuid(0x80000008, cpuid_result);
255 DBG("MTRR: AMD 8000_0008 EAX = %08x\n",
256 cpuid_result[0]);
257
258 /*
259 * Function 8000_0008 (Address Sizes) EAX
260 * Bits 7-0 : phys address size
261 * Bits 15-8 : virt address size
262 */
263 bits = cpuid_result[0] & 0xFF;
264 if ((bits < 36) || (bits > 64))
265 {
266 printf("MTRR: bad address size\n");
267 return; /* bogus size */
268 }
269
270 mtrr_phys_mask = PHYS_BITS_TO_MASK(bits);
271 }
272 }
273 }
274
275 /* use a lock to serialize MTRR changes */
276 bzero((void *)&mtrr_state, sizeof(mtrr_state));
277 simple_lock_init(&mtrr_lock, 0);
278
279 mtrr_state.MTRRcap = rdmsr64(MSR_IA32_MTRRCAP);
280 mtrr_state.MTRRdefType = rdmsr64(MSR_IA32_MTRR_DEF_TYPE);
281 mtrr_state.var_count = mtrr_state.MTRRcap & IA32_MTRRCAP_VCNT;
282
283 /* allocate storage for variable ranges (can block?) */
284 if (mtrr_state.var_count) {
285 mtrr_state.var_range = (mtrr_var_range_t *)
286 kalloc(sizeof(mtrr_var_range_t) *
287 mtrr_state.var_count);
288 if (mtrr_state.var_range == NULL)
289 mtrr_state.var_count = 0;
290 }
291
292 /* fetch the initial firmware configured variable ranges */
293 if (mtrr_state.var_count)
294 mtrr_get_var_ranges(mtrr_state.var_range,
295 mtrr_state.var_count);
296
297 /* fetch the initial firmware configured fixed ranges */
298 if (mtrr_state.MTRRcap & IA32_MTRRCAP_FIX)
299 mtrr_get_fix_ranges(mtrr_state.fix_range);
300
301 mtrr_initialized = TRUE;
302
303 #if MTRR_DEBUG
304 mtrr_msr_dump(); /* dump firmware settings */
305 #endif
306 }
307
308 /*
309 * Performs the Intel recommended procedure for changing the MTRR
310 * in a MP system. Leverage rendezvous mechanism for the required
311 * barrier synchronization among all processors. This function is
312 * called from the rendezvous IPI handler, and mtrr_update_cpu().
313 */
314 static void
315 mtrr_update_action(void * cache_control_type)
316 {
317 uint32_t cr0, cr4;
318 uint32_t tmp;
319
320 cr0 = get_cr0();
321 cr4 = get_cr4();
322
323 /* enter no-fill cache mode */
324 tmp = cr0 | CR0_CD;
325 tmp &= ~CR0_NW;
326 set_cr0(tmp);
327
328 /* flush caches */
329 wbinvd();
330
331 /* clear the PGE flag in CR4 */
332 if (cr4 & CR4_PGE)
333 set_cr4(cr4 & ~CR4_PGE);
334
335 /* flush TLBs */
336 flush_tlb();
337
338 if (CACHE_CONTROL_PAT == cache_control_type) {
339 /* Change PA6 attribute field to WC */
340 uint64_t pat = rdmsr64(MSR_IA32_CR_PAT);
341 DBG("CPU%d PAT: was 0x%016llx\n", get_cpu_number(), pat);
342 pat &= ~(0x0FULL << 48);
343 pat |= (0x01ULL << 48);
344 wrmsr64(MSR_IA32_CR_PAT, pat);
345 DBG("CPU%d PAT: is 0x%016llx\n",
346 get_cpu_number(), rdmsr64(MSR_IA32_CR_PAT));
347 }
348 else {
349 /* disable all MTRR ranges */
350 wrmsr64(MSR_IA32_MTRR_DEF_TYPE,
351 mtrr_state.MTRRdefType & ~IA32_MTRR_DEF_TYPE_E);
352
353 /* apply MTRR settings */
354 if (mtrr_state.var_count)
355 mtrr_set_var_ranges(mtrr_state.var_range,
356 mtrr_state.var_count);
357
358 if (mtrr_state.MTRRcap & IA32_MTRRCAP_FIX)
359 mtrr_set_fix_ranges(mtrr_state.fix_range);
360
361 /* enable all MTRR range registers (what if E was not set?) */
362 wrmsr64(MSR_IA32_MTRR_DEF_TYPE,
363 mtrr_state.MTRRdefType | IA32_MTRR_DEF_TYPE_E);
364 }
365
366 /* flush all caches and TLBs a second time */
367 wbinvd();
368 flush_tlb();
369
370 /* restore normal cache mode */
371 set_cr0(cr0);
372
373 /* restore PGE flag */
374 if (cr4 & CR4_PGE)
375 set_cr4(cr4);
376
377 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
378 }
379
380 static void
381 mtrr_update_setup(__unused void * param_not_used)
382 {
383 /* disable interrupts before the first barrier */
384 current_cpu_datap()->cpu_iflag = ml_set_interrupts_enabled(FALSE);
385 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
386 }
387
388 static void
389 mtrr_update_teardown(__unused void * param_not_used)
390 {
391 /* restore interrupt flag following MTRR changes */
392 ml_set_interrupts_enabled(current_cpu_datap()->cpu_iflag);
393 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
394 }
395
396 /*
397 * Update MTRR settings on all processors.
398 */
399 kern_return_t
400 mtrr_update_all_cpus(void)
401 {
402 if (mtrr_initialized == FALSE)
403 return KERN_NOT_SUPPORTED;
404
405 MTRR_LOCK();
406 mp_rendezvous(mtrr_update_setup,
407 mtrr_update_action,
408 mtrr_update_teardown, NULL);
409 MTRR_UNLOCK();
410
411 return KERN_SUCCESS;
412 }
413
414 /*
415 * Update a single CPU with the current MTRR settings. Can be called
416 * during slave processor initialization to mirror the MTRR settings
417 * discovered on the boot processor by mtrr_init().
418 */
419 kern_return_t
420 mtrr_update_cpu(void)
421 {
422 if (mtrr_initialized == FALSE)
423 return KERN_NOT_SUPPORTED;
424
425 MTRR_LOCK();
426 mtrr_update_setup(NULL);
427 mtrr_update_action(NULL);
428 mtrr_update_teardown(NULL);
429 MTRR_UNLOCK();
430
431 return KERN_SUCCESS;
432 }
433
434 /*
435 * Add a MTRR range to associate the physical memory range specified
436 * with a given memory caching type.
437 */
438 kern_return_t
439 mtrr_range_add(addr64_t address, uint64_t length, uint32_t type)
440 {
441 mtrr_var_range_t * vr;
442 mtrr_var_range_t * free_range;
443 kern_return_t ret = KERN_NO_SPACE;
444 int overlap;
445 unsigned int i;
446
447 DBG("mtrr_range_add base = 0x%llx, size = 0x%llx, type = %d\n",
448 address, length, type);
449
450 if (mtrr_initialized == FALSE) {
451 return KERN_NOT_SUPPORTED;
452 }
453
454 /* check memory type (GPF exception for undefined types) */
455 if ((type != MTRR_TYPE_UNCACHEABLE) &&
456 (type != MTRR_TYPE_WRITECOMBINE) &&
457 (type != MTRR_TYPE_WRITETHROUGH) &&
458 (type != MTRR_TYPE_WRITEPROTECT) &&
459 (type != MTRR_TYPE_WRITEBACK)) {
460 return KERN_INVALID_ARGUMENT;
461 }
462
463 /* check WC support if requested */
464 if ((type == MTRR_TYPE_WRITECOMBINE) &&
465 (mtrr_state.MTRRcap & IA32_MTRRCAP_WC) == 0) {
466 return KERN_NOT_SUPPORTED;
467 }
468
469 /* leave the fix range area below 1MB alone */
470 if (address < 0x100000 || mtrr_state.var_count == 0) {
471 return KERN_NOT_SUPPORTED;
472 }
473
474 /*
475 * Length must be a power of 2 given by 2^n, where n >= 12.
476 * Base address alignment must be larger than or equal to length.
477 */
478 if ((length < 0x1000) ||
479 (LSB(length) != length) ||
480 (address && (length > LSB(address)))) {
481 return KERN_INVALID_ARGUMENT;
482 }
483
484 MTRR_LOCK();
485
486 /*
487 * Check for overlap and locate a free range.
488 */
489 for (i = 0, free_range = NULL; i < mtrr_state.var_count; i++)
490 {
491 vr = &mtrr_state.var_range[i];
492
493 if (vr->refcnt == 0) {
494 /* free range candidate if no overlaps are found */
495 free_range = vr;
496 continue;
497 }
498
499 overlap = var_range_overlap(vr, address, length, type);
500 if (overlap > 0) {
501 /*
502 * identical overlap permitted, increment ref count.
503 * no hardware update required.
504 */
505 free_range = vr;
506 break;
507 }
508 if (overlap < 0) {
509 /* unsupported overlapping of memory types */
510 free_range = NULL;
511 break;
512 }
513 }
514
515 if (free_range) {
516 if (free_range->refcnt++ == 0) {
517 var_range_encode(free_range, address, length, type, 1);
518 mp_rendezvous(mtrr_update_setup,
519 mtrr_update_action,
520 mtrr_update_teardown, NULL);
521 }
522 ret = KERN_SUCCESS;
523 }
524
525 #if MTRR_DEBUG
526 mtrr_msr_dump();
527 #endif
528
529 MTRR_UNLOCK();
530
531 return ret;
532 }
533
534 /*
535 * Remove a previously added MTRR range. The same arguments used for adding
536 * the memory range must be supplied again.
537 */
538 kern_return_t
539 mtrr_range_remove(addr64_t address, uint64_t length, uint32_t type)
540 {
541 mtrr_var_range_t * vr;
542 int result = KERN_FAILURE;
543 int cpu_update = 0;
544 unsigned int i;
545
546 DBG("mtrr_range_remove base = 0x%llx, size = 0x%llx, type = %d\n",
547 address, length, type);
548
549 if (mtrr_initialized == FALSE) {
550 return KERN_NOT_SUPPORTED;
551 }
552
553 MTRR_LOCK();
554
555 for (i = 0; i < mtrr_state.var_count; i++) {
556 vr = &mtrr_state.var_range[i];
557
558 if (vr->refcnt &&
559 var_range_overlap(vr, address, length, type) > 0) {
560 /* found specified variable range */
561 if (--mtrr_state.var_range[i].refcnt == 0) {
562 var_range_encode(vr, address, length, type, 0);
563 cpu_update = 1;
564 }
565 result = KERN_SUCCESS;
566 break;
567 }
568 }
569
570 if (cpu_update) {
571 mp_rendezvous(mtrr_update_setup,
572 mtrr_update_action,
573 mtrr_update_teardown, NULL);
574 result = KERN_SUCCESS;
575 }
576
577 #if MTRR_DEBUG
578 mtrr_msr_dump();
579 #endif
580
581 MTRR_UNLOCK();
582
583 return result;
584 }
585
586 /*
587 * Variable range helper routines
588 */
589 static void
590 var_range_encode(mtrr_var_range_t * range, addr64_t address,
591 uint64_t length, uint32_t type, int valid)
592 {
593 range->base = (address & IA32_MTRR_PHYSBASE_MASK) |
594 (type & IA32_MTRR_PHYSBASE_TYPE);
595
596 range->mask = LEN_TO_MASK(length) |
597 (valid ? IA32_MTRR_PHYMASK_VALID : 0);
598 }
599
600 static int
601 var_range_overlap(mtrr_var_range_t * range, addr64_t address,
602 uint64_t length, uint32_t type)
603 {
604 uint64_t v_address, v_length;
605 uint32_t v_type;
606 int result = 0; /* no overlap, or overlap ok */
607
608 v_address = range->base & IA32_MTRR_PHYSBASE_MASK;
609 v_type = range->base & IA32_MTRR_PHYSBASE_TYPE;
610 v_length = MASK_TO_LEN(range->mask);
611
612 /* detect range overlap */
613 if ((v_address >= address && v_address < (address + length)) ||
614 (address >= v_address && address < (v_address + v_length))) {
615
616 if (v_address == address && v_length == length && v_type == type)
617 result = 1; /* identical overlap ok */
618 else if ( v_type == MTRR_TYPE_UNCACHEABLE &&
619 type == MTRR_TYPE_UNCACHEABLE ) {
620 /* UC ranges can overlap */
621 }
622 else if ((v_type == MTRR_TYPE_UNCACHEABLE &&
623 type == MTRR_TYPE_WRITEBACK) ||
624 (v_type == MTRR_TYPE_WRITEBACK &&
625 type == MTRR_TYPE_UNCACHEABLE)) {
626 /* UC/WB can overlap - effective type becomes UC */
627 }
628 else {
629 /* anything else may cause undefined behavior */
630 result = -1;
631 }
632 }
633
634 return result;
635 }
636
637 /*
638 * Initialize PAT (Page Attribute Table)
639 */
640 void
641 pat_init(void)
642 {
643 if (cpuid_features() & CPUID_FEATURE_PAT)
644 {
645 boolean_t istate = ml_set_interrupts_enabled(FALSE);
646 mtrr_update_action(CACHE_CONTROL_PAT);
647 ml_set_interrupts_enabled(istate);
648 }
649 }