]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/mtrr.c
xnu-792.6.56.tar.gz
[apple/xnu.git] / osfmk / i386 / mtrr.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #include <mach/kern_return.h>
25 #include <kern/kalloc.h>
26 #include <kern/cpu_number.h>
27 #include <kern/cpu_data.h>
28 #include <i386/mp.h>
29 #include <i386/cpuid.h>
30 #include <i386/proc_reg.h>
31 #include <i386/mtrr.h>
32
33 struct mtrr_var_range {
34 uint64_t base; /* in IA32_MTRR_PHYSBASE format */
35 uint64_t mask; /* in IA32_MTRR_PHYSMASK format */
36 uint32_t refcnt; /* var ranges reference count */
37 };
38
39 struct mtrr_fix_range {
40 uint64_t types; /* fixed-range type octet */
41 };
42
43 typedef struct mtrr_var_range mtrr_var_range_t;
44 typedef struct mtrr_fix_range mtrr_fix_range_t;
45
46 static struct {
47 uint64_t MTRRcap;
48 uint64_t MTRRdefType;
49 mtrr_var_range_t * var_range;
50 unsigned int var_count;
51 mtrr_fix_range_t fix_range[11];
52 } mtrr_state;
53
54 static boolean_t mtrr_initialized = FALSE;
55
56 decl_simple_lock_data(static, mtrr_lock);
57 #define MTRR_LOCK() simple_lock(&mtrr_lock);
58 #define MTRR_UNLOCK() simple_unlock(&mtrr_lock);
59
60 #if MTRR_DEBUG
61 #define DBG(x...) kprintf(x)
62 #else
63 #define DBG(x...)
64 #endif
65
66 /* Private functions */
67 static void mtrr_get_var_ranges(mtrr_var_range_t * range, int count);
68 static void mtrr_set_var_ranges(const mtrr_var_range_t * range, int count);
69 static void mtrr_get_fix_ranges(mtrr_fix_range_t * range);
70 static void mtrr_set_fix_ranges(const mtrr_fix_range_t * range);
71 static void mtrr_update_setup(void * param);
72 static void mtrr_update_teardown(void * param);
73 static void mtrr_update_action(void * param);
74 static void var_range_encode(mtrr_var_range_t * range, addr64_t address,
75 uint64_t length, uint32_t type, int valid);
76 static int var_range_overlap(mtrr_var_range_t * range, addr64_t address,
77 uint64_t length, uint32_t type);
78
79 #define CACHE_CONTROL_MTRR (NULL)
80 #define CACHE_CONTROL_PAT ((void *)1)
81
82 /*
83 * MTRR MSR bit fields.
84 */
85 #define IA32_MTRR_DEF_TYPE_MT 0x000000ff
86 #define IA32_MTRR_DEF_TYPE_FE 0x00000400
87 #define IA32_MTRR_DEF_TYPE_E 0x00000800
88
89 #define IA32_MTRRCAP_VCNT 0x000000ff
90 #define IA32_MTRRCAP_FIX 0x00000100
91 #define IA32_MTRRCAP_WC 0x00000400
92
93 /* 0 < bits <= 64 */
94 #define PHYS_BITS_TO_MASK(bits) \
95 ((((1ULL << (bits-1)) - 1) << 1) | 1)
96
97 /*
98 * Default mask for 36 physical address bits, this can
99 * change depending on the cpu model.
100 */
101 static uint64_t mtrr_phys_mask = PHYS_BITS_TO_MASK(36);
102
103 #define IA32_MTRR_PHYMASK_VALID 0x0000000000000800ULL
104 #define IA32_MTRR_PHYSBASE_MASK (mtrr_phys_mask & ~0xFFF)
105 #define IA32_MTRR_PHYSBASE_TYPE 0x00000000000000FFULL
106
107 /*
108 * Variable-range mask to/from length conversions.
109 */
110 #define MASK_TO_LEN(mask) \
111 ((~((mask) & IA32_MTRR_PHYSBASE_MASK) & mtrr_phys_mask) + 1)
112
113 #define LEN_TO_MASK(len) \
114 (~((len) - 1) & IA32_MTRR_PHYSBASE_MASK)
115
116 #define LSB(x) ((x) & (~((x) - 1)))
117
118 /*
119 * Fetch variable-range MTRR register pairs.
120 */
121 static void
122 mtrr_get_var_ranges(mtrr_var_range_t * range, int count)
123 {
124 int i;
125
126 for (i = 0; i < count; i++) {
127 range[i].base = rdmsr64(MSR_IA32_MTRR_PHYSBASE(i));
128 range[i].mask = rdmsr64(MSR_IA32_MTRR_PHYSMASK(i));
129
130 /* bump ref count for firmware configured ranges */
131 if (range[i].mask & IA32_MTRR_PHYMASK_VALID)
132 range[i].refcnt = 1;
133 else
134 range[i].refcnt = 0;
135 }
136 }
137
138 /*
139 * Update variable-range MTRR register pairs.
140 */
141 static void
142 mtrr_set_var_ranges(const mtrr_var_range_t * range, int count)
143 {
144 int i;
145
146 for (i = 0; i < count; i++) {
147 wrmsr64(MSR_IA32_MTRR_PHYSBASE(i), range[i].base);
148 wrmsr64(MSR_IA32_MTRR_PHYSMASK(i), range[i].mask);
149 }
150 }
151
152 /*
153 * Fetch all fixed-range MTRR's. Note MSR offsets are not consecutive.
154 */
155 static void
156 mtrr_get_fix_ranges(mtrr_fix_range_t * range)
157 {
158 int i;
159
160 /* assume 11 fix range registers */
161 range[0].types = rdmsr64(MSR_IA32_MTRR_FIX64K_00000);
162 range[1].types = rdmsr64(MSR_IA32_MTRR_FIX16K_80000);
163 range[2].types = rdmsr64(MSR_IA32_MTRR_FIX16K_A0000);
164 for (i = 0; i < 8; i++)
165 range[3 + i].types = rdmsr64(MSR_IA32_MTRR_FIX4K_C0000 + i);
166 }
167
168 /*
169 * Update all fixed-range MTRR's.
170 */
171 static void
172 mtrr_set_fix_ranges(const struct mtrr_fix_range * range)
173 {
174 int i;
175
176 /* assume 11 fix range registers */
177 wrmsr64(MSR_IA32_MTRR_FIX64K_00000, range[0].types);
178 wrmsr64(MSR_IA32_MTRR_FIX16K_80000, range[1].types);
179 wrmsr64(MSR_IA32_MTRR_FIX16K_A0000, range[2].types);
180 for (i = 0; i < 8; i++)
181 wrmsr64(MSR_IA32_MTRR_FIX4K_C0000 + i, range[3 + i].types);
182 }
183
184 #if MTRR_DEBUG
185 static void
186 mtrr_msr_dump(void)
187 {
188 int i;
189 int count = rdmsr64(MSR_IA32_MTRRCAP) & IA32_MTRRCAP_VCNT;
190
191 DBG("VAR -- BASE -------------- MASK -------------- SIZE\n");
192 for (i = 0; i < count; i++) {
193 DBG(" %02x 0x%016llx 0x%016llx 0x%llx\n", i,
194 rdmsr64(MSR_IA32_MTRR_PHYSBASE(i)),
195 rdmsr64(MSR_IA32_MTRR_PHYSMASK(i)),
196 MASK_TO_LEN(rdmsr64(MSR_IA32_MTRR_PHYSMASK(i))));
197 }
198 DBG("\n");
199
200 DBG("FIX64K_00000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX64K_00000));
201 DBG("FIX16K_80000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX16K_80000));
202 DBG("FIX16K_A0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX16K_A0000));
203 DBG(" FIX4K_C0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_C0000));
204 DBG(" FIX4K_C8000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_C8000));
205 DBG(" FIX4K_D0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_D0000));
206 DBG(" FIX4K_D8000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_D8000));
207 DBG(" FIX4K_E0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_E0000));
208 DBG(" FIX4K_E8000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_E8000));
209 DBG(" FIX4K_F0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_F0000));
210 DBG(" FIX4K_F8000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_F8000));
211
212 DBG("\nMTRRcap = 0x%llx MTRRdefType = 0x%llx\n",
213 rdmsr64(MSR_IA32_MTRRCAP), rdmsr64(MSR_IA32_MTRR_DEF_TYPE));
214 }
215 #endif /* MTRR_DEBUG */
216
217 /*
218 * Called by the boot processor (BP) early during boot to initialize MTRR
219 * support. The MTRR state on the BP is saved, any additional processors
220 * will have the same settings applied to ensure MTRR consistency.
221 */
222 void
223 mtrr_init(void)
224 {
225 i386_cpu_info_t * infop = cpuid_info();
226
227 /* no reason to init more than once */
228 if (mtrr_initialized == TRUE)
229 return;
230
231 /* check for presence of MTRR feature on the processor */
232 if ((cpuid_features() & CPUID_FEATURE_MTRR) == 0)
233 return; /* no MTRR feature */
234
235 /* cpu vendor/model specific handling */
236 if (!strncmp(infop->cpuid_vendor, CPUID_VID_AMD, sizeof(CPUID_VID_AMD)))
237 {
238 /* Check for AMD Athlon 64 and Opteron */
239 if (cpuid_family() == 0xF)
240 {
241 uint32_t cpuid_result[4];
242
243 /* check if cpu support Address Sizes function */
244 do_cpuid(0x80000000, cpuid_result);
245 if (cpuid_result[0] >= 0x80000008)
246 {
247 int bits;
248
249 do_cpuid(0x80000008, cpuid_result);
250 DBG("MTRR: AMD 8000_0008 EAX = %08x\n",
251 cpuid_result[0]);
252
253 /*
254 * Function 8000_0008 (Address Sizes) EAX
255 * Bits 7-0 : phys address size
256 * Bits 15-8 : virt address size
257 */
258 bits = cpuid_result[0] & 0xFF;
259 if ((bits < 36) || (bits > 64))
260 {
261 printf("MTRR: bad address size\n");
262 return; /* bogus size */
263 }
264
265 mtrr_phys_mask = PHYS_BITS_TO_MASK(bits);
266 }
267 }
268 }
269
270 /* use a lock to serialize MTRR changes */
271 bzero((void *)&mtrr_state, sizeof(mtrr_state));
272 simple_lock_init(&mtrr_lock, 0);
273
274 mtrr_state.MTRRcap = rdmsr64(MSR_IA32_MTRRCAP);
275 mtrr_state.MTRRdefType = rdmsr64(MSR_IA32_MTRR_DEF_TYPE);
276 mtrr_state.var_count = mtrr_state.MTRRcap & IA32_MTRRCAP_VCNT;
277
278 /* allocate storage for variable ranges (can block?) */
279 if (mtrr_state.var_count) {
280 mtrr_state.var_range = (mtrr_var_range_t *)
281 kalloc(sizeof(mtrr_var_range_t) *
282 mtrr_state.var_count);
283 if (mtrr_state.var_range == NULL)
284 mtrr_state.var_count = 0;
285 }
286
287 /* fetch the initial firmware configured variable ranges */
288 if (mtrr_state.var_count)
289 mtrr_get_var_ranges(mtrr_state.var_range,
290 mtrr_state.var_count);
291
292 /* fetch the initial firmware configured fixed ranges */
293 if (mtrr_state.MTRRcap & IA32_MTRRCAP_FIX)
294 mtrr_get_fix_ranges(mtrr_state.fix_range);
295
296 mtrr_initialized = TRUE;
297
298 #if MTRR_DEBUG
299 mtrr_msr_dump(); /* dump firmware settings */
300 #endif
301 }
302
303 /*
304 * Performs the Intel recommended procedure for changing the MTRR
305 * in a MP system. Leverage rendezvous mechanism for the required
306 * barrier synchronization among all processors. This function is
307 * called from the rendezvous IPI handler, and mtrr_update_cpu().
308 */
309 static void
310 mtrr_update_action(void * cache_control_type)
311 {
312 uint32_t cr0, cr4;
313 uint32_t tmp;
314
315 cr0 = get_cr0();
316 cr4 = get_cr4();
317
318 /* enter no-fill cache mode */
319 tmp = cr0 | CR0_CD;
320 tmp &= ~CR0_NW;
321 set_cr0(tmp);
322
323 /* flush caches */
324 wbinvd();
325
326 /* clear the PGE flag in CR4 */
327 if (cr4 & CR4_PGE)
328 set_cr4(cr4 & ~CR4_PGE);
329
330 /* flush TLBs */
331 flush_tlb();
332
333 if (CACHE_CONTROL_PAT == cache_control_type) {
334 /* Change PA6 attribute field to WC */
335 uint64_t pat = rdmsr64(MSR_IA32_CR_PAT);
336 DBG("CPU%d PAT: was 0x%016llx\n", get_cpu_number(), pat);
337 pat &= ~(0x0FULL << 48);
338 pat |= (0x01ULL << 48);
339 wrmsr64(MSR_IA32_CR_PAT, pat);
340 DBG("CPU%d PAT: is 0x%016llx\n",
341 get_cpu_number(), rdmsr64(MSR_IA32_CR_PAT));
342 }
343 else {
344 /* disable all MTRR ranges */
345 wrmsr64(MSR_IA32_MTRR_DEF_TYPE,
346 mtrr_state.MTRRdefType & ~IA32_MTRR_DEF_TYPE_E);
347
348 /* apply MTRR settings */
349 if (mtrr_state.var_count)
350 mtrr_set_var_ranges(mtrr_state.var_range,
351 mtrr_state.var_count);
352
353 if (mtrr_state.MTRRcap & IA32_MTRRCAP_FIX)
354 mtrr_set_fix_ranges(mtrr_state.fix_range);
355
356 /* enable all MTRR range registers (what if E was not set?) */
357 wrmsr64(MSR_IA32_MTRR_DEF_TYPE,
358 mtrr_state.MTRRdefType | IA32_MTRR_DEF_TYPE_E);
359 }
360
361 /* flush all caches and TLBs a second time */
362 wbinvd();
363 flush_tlb();
364
365 /* restore normal cache mode */
366 set_cr0(cr0);
367
368 /* restore PGE flag */
369 if (cr4 & CR4_PGE)
370 set_cr4(cr4);
371
372 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
373 }
374
375 static void
376 mtrr_update_setup(__unused void * param_not_used)
377 {
378 /* disable interrupts before the first barrier */
379 current_cpu_datap()->cpu_iflag = ml_set_interrupts_enabled(FALSE);
380 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
381 }
382
383 static void
384 mtrr_update_teardown(__unused void * param_not_used)
385 {
386 /* restore interrupt flag following MTRR changes */
387 ml_set_interrupts_enabled(current_cpu_datap()->cpu_iflag);
388 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
389 }
390
391 /*
392 * Update MTRR settings on all processors.
393 */
394 kern_return_t
395 mtrr_update_all_cpus(void)
396 {
397 if (mtrr_initialized == FALSE)
398 return KERN_NOT_SUPPORTED;
399
400 MTRR_LOCK();
401 mp_rendezvous(mtrr_update_setup,
402 mtrr_update_action,
403 mtrr_update_teardown, NULL);
404 MTRR_UNLOCK();
405
406 return KERN_SUCCESS;
407 }
408
409 /*
410 * Update a single CPU with the current MTRR settings. Can be called
411 * during slave processor initialization to mirror the MTRR settings
412 * discovered on the boot processor by mtrr_init().
413 */
414 kern_return_t
415 mtrr_update_cpu(void)
416 {
417 if (mtrr_initialized == FALSE)
418 return KERN_NOT_SUPPORTED;
419
420 MTRR_LOCK();
421 mtrr_update_setup(NULL);
422 mtrr_update_action(NULL);
423 mtrr_update_teardown(NULL);
424 MTRR_UNLOCK();
425
426 return KERN_SUCCESS;
427 }
428
429 /*
430 * Add a MTRR range to associate the physical memory range specified
431 * with a given memory caching type.
432 */
433 kern_return_t
434 mtrr_range_add(addr64_t address, uint64_t length, uint32_t type)
435 {
436 mtrr_var_range_t * vr;
437 mtrr_var_range_t * free_range;
438 kern_return_t ret = KERN_NO_SPACE;
439 int overlap;
440 unsigned int i;
441
442 DBG("mtrr_range_add base = 0x%llx, size = 0x%llx, type = %d\n",
443 address, length, type);
444
445 if (mtrr_initialized == FALSE) {
446 return KERN_NOT_SUPPORTED;
447 }
448
449 /* check memory type (GPF exception for undefined types) */
450 if ((type != MTRR_TYPE_UNCACHEABLE) &&
451 (type != MTRR_TYPE_WRITECOMBINE) &&
452 (type != MTRR_TYPE_WRITETHROUGH) &&
453 (type != MTRR_TYPE_WRITEPROTECT) &&
454 (type != MTRR_TYPE_WRITEBACK)) {
455 return KERN_INVALID_ARGUMENT;
456 }
457
458 /* check WC support if requested */
459 if ((type == MTRR_TYPE_WRITECOMBINE) &&
460 (mtrr_state.MTRRcap & IA32_MTRRCAP_WC) == 0) {
461 return KERN_NOT_SUPPORTED;
462 }
463
464 /* leave the fix range area below 1MB alone */
465 if (address < 0x100000 || mtrr_state.var_count == 0) {
466 return KERN_NOT_SUPPORTED;
467 }
468
469 /*
470 * Length must be a power of 2 given by 2^n, where n >= 12.
471 * Base address alignment must be larger than or equal to length.
472 */
473 if ((length < 0x1000) ||
474 (LSB(length) != length) ||
475 (address && (length > LSB(address)))) {
476 return KERN_INVALID_ARGUMENT;
477 }
478
479 MTRR_LOCK();
480
481 /*
482 * Check for overlap and locate a free range.
483 */
484 for (i = 0, free_range = NULL; i < mtrr_state.var_count; i++)
485 {
486 vr = &mtrr_state.var_range[i];
487
488 if (vr->refcnt == 0) {
489 /* free range candidate if no overlaps are found */
490 free_range = vr;
491 continue;
492 }
493
494 overlap = var_range_overlap(vr, address, length, type);
495 if (overlap > 0) {
496 /*
497 * identical overlap permitted, increment ref count.
498 * no hardware update required.
499 */
500 free_range = vr;
501 break;
502 }
503 if (overlap < 0) {
504 /* unsupported overlapping of memory types */
505 free_range = NULL;
506 break;
507 }
508 }
509
510 if (free_range) {
511 if (free_range->refcnt++ == 0) {
512 var_range_encode(free_range, address, length, type, 1);
513 mp_rendezvous(mtrr_update_setup,
514 mtrr_update_action,
515 mtrr_update_teardown, NULL);
516 }
517 ret = KERN_SUCCESS;
518 }
519
520 #if MTRR_DEBUG
521 mtrr_msr_dump();
522 #endif
523
524 MTRR_UNLOCK();
525
526 return ret;
527 }
528
529 /*
530 * Remove a previously added MTRR range. The same arguments used for adding
531 * the memory range must be supplied again.
532 */
533 kern_return_t
534 mtrr_range_remove(addr64_t address, uint64_t length, uint32_t type)
535 {
536 mtrr_var_range_t * vr;
537 int result = KERN_FAILURE;
538 int cpu_update = 0;
539 unsigned int i;
540
541 DBG("mtrr_range_remove base = 0x%llx, size = 0x%llx, type = %d\n",
542 address, length, type);
543
544 if (mtrr_initialized == FALSE) {
545 return KERN_NOT_SUPPORTED;
546 }
547
548 MTRR_LOCK();
549
550 for (i = 0; i < mtrr_state.var_count; i++) {
551 vr = &mtrr_state.var_range[i];
552
553 if (vr->refcnt &&
554 var_range_overlap(vr, address, length, type) > 0) {
555 /* found specified variable range */
556 if (--mtrr_state.var_range[i].refcnt == 0) {
557 var_range_encode(vr, address, length, type, 0);
558 cpu_update = 1;
559 }
560 result = KERN_SUCCESS;
561 break;
562 }
563 }
564
565 if (cpu_update) {
566 mp_rendezvous(mtrr_update_setup,
567 mtrr_update_action,
568 mtrr_update_teardown, NULL);
569 result = KERN_SUCCESS;
570 }
571
572 #if MTRR_DEBUG
573 mtrr_msr_dump();
574 #endif
575
576 MTRR_UNLOCK();
577
578 return result;
579 }
580
581 /*
582 * Variable range helper routines
583 */
584 static void
585 var_range_encode(mtrr_var_range_t * range, addr64_t address,
586 uint64_t length, uint32_t type, int valid)
587 {
588 range->base = (address & IA32_MTRR_PHYSBASE_MASK) |
589 (type & IA32_MTRR_PHYSBASE_TYPE);
590
591 range->mask = LEN_TO_MASK(length) |
592 (valid ? IA32_MTRR_PHYMASK_VALID : 0);
593 }
594
595 static int
596 var_range_overlap(mtrr_var_range_t * range, addr64_t address,
597 uint64_t length, uint32_t type)
598 {
599 uint64_t v_address, v_length;
600 uint32_t v_type;
601 int result = 0; /* no overlap, or overlap ok */
602
603 v_address = range->base & IA32_MTRR_PHYSBASE_MASK;
604 v_type = range->base & IA32_MTRR_PHYSBASE_TYPE;
605 v_length = MASK_TO_LEN(range->mask);
606
607 /* detect range overlap */
608 if ((v_address >= address && v_address < (address + length)) ||
609 (address >= v_address && address < (v_address + v_length))) {
610
611 if (v_address == address && v_length == length && v_type == type)
612 result = 1; /* identical overlap ok */
613 else if ( v_type == MTRR_TYPE_UNCACHEABLE &&
614 type == MTRR_TYPE_UNCACHEABLE ) {
615 /* UC ranges can overlap */
616 }
617 else if ((v_type == MTRR_TYPE_UNCACHEABLE &&
618 type == MTRR_TYPE_WRITEBACK) ||
619 (v_type == MTRR_TYPE_WRITEBACK &&
620 type == MTRR_TYPE_UNCACHEABLE)) {
621 /* UC/WB can overlap - effective type becomes UC */
622 }
623 else {
624 /* anything else may cause undefined behavior */
625 result = -1;
626 }
627 }
628
629 return result;
630 }
631
632 /*
633 * Initialize PAT (Page Attribute Table)
634 */
635 void
636 pat_init(void)
637 {
638 if (cpuid_features() & CPUID_FEATURE_PAT)
639 {
640 boolean_t istate = ml_set_interrupts_enabled(FALSE);
641 mtrr_update_action(CACHE_CONTROL_PAT);
642 ml_set_interrupts_enabled(istate);
643 }
644 }