]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/mtrr.c
xnu-792.25.20.tar.gz
[apple/xnu.git] / osfmk / i386 / mtrr.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 #include <mach/kern_return.h>
24 #include <kern/kalloc.h>
25 #include <kern/cpu_number.h>
26 #include <kern/cpu_data.h>
27 #include <i386/mp.h>
28 #include <i386/cpuid.h>
29 #include <i386/proc_reg.h>
30 #include <i386/mtrr.h>
31
32 struct mtrr_var_range {
33 uint64_t base; /* in IA32_MTRR_PHYSBASE format */
34 uint64_t mask; /* in IA32_MTRR_PHYSMASK format */
35 uint32_t refcnt; /* var ranges reference count */
36 };
37
38 struct mtrr_fix_range {
39 uint64_t types; /* fixed-range type octet */
40 };
41
42 typedef struct mtrr_var_range mtrr_var_range_t;
43 typedef struct mtrr_fix_range mtrr_fix_range_t;
44
45 static struct {
46 uint64_t MTRRcap;
47 uint64_t MTRRdefType;
48 mtrr_var_range_t * var_range;
49 unsigned int var_count;
50 mtrr_fix_range_t fix_range[11];
51 } mtrr_state;
52
53 static boolean_t mtrr_initialized = FALSE;
54
55 decl_simple_lock_data(static, mtrr_lock);
56 #define MTRR_LOCK() simple_lock(&mtrr_lock);
57 #define MTRR_UNLOCK() simple_unlock(&mtrr_lock);
58
59 #if MTRR_DEBUG
60 #define DBG(x...) kprintf(x)
61 #else
62 #define DBG(x...)
63 #endif
64
65 /* Private functions */
66 static void mtrr_get_var_ranges(mtrr_var_range_t * range, int count);
67 static void mtrr_set_var_ranges(const mtrr_var_range_t * range, int count);
68 static void mtrr_get_fix_ranges(mtrr_fix_range_t * range);
69 static void mtrr_set_fix_ranges(const mtrr_fix_range_t * range);
70 static void mtrr_update_setup(void * param);
71 static void mtrr_update_teardown(void * param);
72 static void mtrr_update_action(void * param);
73 static void var_range_encode(mtrr_var_range_t * range, addr64_t address,
74 uint64_t length, uint32_t type, int valid);
75 static int var_range_overlap(mtrr_var_range_t * range, addr64_t address,
76 uint64_t length, uint32_t type);
77
78 #define CACHE_CONTROL_MTRR (NULL)
79 #define CACHE_CONTROL_PAT ((void *)1)
80
81 /*
82 * MTRR MSR bit fields.
83 */
84 #define IA32_MTRR_DEF_TYPE_MT 0x000000ff
85 #define IA32_MTRR_DEF_TYPE_FE 0x00000400
86 #define IA32_MTRR_DEF_TYPE_E 0x00000800
87
88 #define IA32_MTRRCAP_VCNT 0x000000ff
89 #define IA32_MTRRCAP_FIX 0x00000100
90 #define IA32_MTRRCAP_WC 0x00000400
91
92 /* 0 < bits <= 64 */
93 #define PHYS_BITS_TO_MASK(bits) \
94 ((((1ULL << (bits-1)) - 1) << 1) | 1)
95
96 /*
97 * Default mask for 36 physical address bits, this can
98 * change depending on the cpu model.
99 */
100 static uint64_t mtrr_phys_mask = PHYS_BITS_TO_MASK(36);
101
102 #define IA32_MTRR_PHYMASK_VALID 0x0000000000000800ULL
103 #define IA32_MTRR_PHYSBASE_MASK (mtrr_phys_mask & ~0xFFF)
104 #define IA32_MTRR_PHYSBASE_TYPE 0x00000000000000FFULL
105
106 /*
107 * Variable-range mask to/from length conversions.
108 */
109 #define MASK_TO_LEN(mask) \
110 ((~((mask) & IA32_MTRR_PHYSBASE_MASK) & mtrr_phys_mask) + 1)
111
112 #define LEN_TO_MASK(len) \
113 (~((len) - 1) & IA32_MTRR_PHYSBASE_MASK)
114
115 #define LSB(x) ((x) & (~((x) - 1)))
116
117 /*
118 * Fetch variable-range MTRR register pairs.
119 */
120 static void
121 mtrr_get_var_ranges(mtrr_var_range_t * range, int count)
122 {
123 int i;
124
125 for (i = 0; i < count; i++) {
126 range[i].base = rdmsr64(MSR_IA32_MTRR_PHYSBASE(i));
127 range[i].mask = rdmsr64(MSR_IA32_MTRR_PHYSMASK(i));
128
129 /* bump ref count for firmware configured ranges */
130 if (range[i].mask & IA32_MTRR_PHYMASK_VALID)
131 range[i].refcnt = 1;
132 else
133 range[i].refcnt = 0;
134 }
135 }
136
137 /*
138 * Update variable-range MTRR register pairs.
139 */
140 static void
141 mtrr_set_var_ranges(const mtrr_var_range_t * range, int count)
142 {
143 int i;
144
145 for (i = 0; i < count; i++) {
146 wrmsr64(MSR_IA32_MTRR_PHYSBASE(i), range[i].base);
147 wrmsr64(MSR_IA32_MTRR_PHYSMASK(i), range[i].mask);
148 }
149 }
150
151 /*
152 * Fetch all fixed-range MTRR's. Note MSR offsets are not consecutive.
153 */
154 static void
155 mtrr_get_fix_ranges(mtrr_fix_range_t * range)
156 {
157 int i;
158
159 /* assume 11 fix range registers */
160 range[0].types = rdmsr64(MSR_IA32_MTRR_FIX64K_00000);
161 range[1].types = rdmsr64(MSR_IA32_MTRR_FIX16K_80000);
162 range[2].types = rdmsr64(MSR_IA32_MTRR_FIX16K_A0000);
163 for (i = 0; i < 8; i++)
164 range[3 + i].types = rdmsr64(MSR_IA32_MTRR_FIX4K_C0000 + i);
165 }
166
167 /*
168 * Update all fixed-range MTRR's.
169 */
170 static void
171 mtrr_set_fix_ranges(const struct mtrr_fix_range * range)
172 {
173 int i;
174
175 /* assume 11 fix range registers */
176 wrmsr64(MSR_IA32_MTRR_FIX64K_00000, range[0].types);
177 wrmsr64(MSR_IA32_MTRR_FIX16K_80000, range[1].types);
178 wrmsr64(MSR_IA32_MTRR_FIX16K_A0000, range[2].types);
179 for (i = 0; i < 8; i++)
180 wrmsr64(MSR_IA32_MTRR_FIX4K_C0000 + i, range[3 + i].types);
181 }
182
183 #if MTRR_DEBUG
184 static void
185 mtrr_msr_dump(void)
186 {
187 int i;
188 int count = rdmsr64(MSR_IA32_MTRRCAP) & IA32_MTRRCAP_VCNT;
189
190 DBG("VAR -- BASE -------------- MASK -------------- SIZE\n");
191 for (i = 0; i < count; i++) {
192 DBG(" %02x 0x%016llx 0x%016llx 0x%llx\n", i,
193 rdmsr64(MSR_IA32_MTRR_PHYSBASE(i)),
194 rdmsr64(MSR_IA32_MTRR_PHYSMASK(i)),
195 MASK_TO_LEN(rdmsr64(MSR_IA32_MTRR_PHYSMASK(i))));
196 }
197 DBG("\n");
198
199 DBG("FIX64K_00000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX64K_00000));
200 DBG("FIX16K_80000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX16K_80000));
201 DBG("FIX16K_A0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX16K_A0000));
202 DBG(" FIX4K_C0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_C0000));
203 DBG(" FIX4K_C8000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_C8000));
204 DBG(" FIX4K_D0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_D0000));
205 DBG(" FIX4K_D8000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_D8000));
206 DBG(" FIX4K_E0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_E0000));
207 DBG(" FIX4K_E8000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_E8000));
208 DBG(" FIX4K_F0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_F0000));
209 DBG(" FIX4K_F8000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_F8000));
210
211 DBG("\nMTRRcap = 0x%llx MTRRdefType = 0x%llx\n",
212 rdmsr64(MSR_IA32_MTRRCAP), rdmsr64(MSR_IA32_MTRR_DEF_TYPE));
213 }
214 #endif /* MTRR_DEBUG */
215
216 /*
217 * Called by the boot processor (BP) early during boot to initialize MTRR
218 * support. The MTRR state on the BP is saved, any additional processors
219 * will have the same settings applied to ensure MTRR consistency.
220 */
221 void
222 mtrr_init(void)
223 {
224 i386_cpu_info_t * infop = cpuid_info();
225
226 /* no reason to init more than once */
227 if (mtrr_initialized == TRUE)
228 return;
229
230 /* check for presence of MTRR feature on the processor */
231 if ((cpuid_features() & CPUID_FEATURE_MTRR) == 0)
232 return; /* no MTRR feature */
233
234 /* cpu vendor/model specific handling */
235 if (!strncmp(infop->cpuid_vendor, CPUID_VID_AMD, sizeof(CPUID_VID_AMD)))
236 {
237 /* Check for AMD Athlon 64 and Opteron */
238 if (cpuid_family() == 0xF)
239 {
240 uint32_t cpuid_result[4];
241
242 /* check if cpu support Address Sizes function */
243 do_cpuid(0x80000000, cpuid_result);
244 if (cpuid_result[0] >= 0x80000008)
245 {
246 int bits;
247
248 do_cpuid(0x80000008, cpuid_result);
249 DBG("MTRR: AMD 8000_0008 EAX = %08x\n",
250 cpuid_result[0]);
251
252 /*
253 * Function 8000_0008 (Address Sizes) EAX
254 * Bits 7-0 : phys address size
255 * Bits 15-8 : virt address size
256 */
257 bits = cpuid_result[0] & 0xFF;
258 if ((bits < 36) || (bits > 64))
259 {
260 printf("MTRR: bad address size\n");
261 return; /* bogus size */
262 }
263
264 mtrr_phys_mask = PHYS_BITS_TO_MASK(bits);
265 }
266 }
267 }
268
269 /* use a lock to serialize MTRR changes */
270 bzero((void *)&mtrr_state, sizeof(mtrr_state));
271 simple_lock_init(&mtrr_lock, 0);
272
273 mtrr_state.MTRRcap = rdmsr64(MSR_IA32_MTRRCAP);
274 mtrr_state.MTRRdefType = rdmsr64(MSR_IA32_MTRR_DEF_TYPE);
275 mtrr_state.var_count = mtrr_state.MTRRcap & IA32_MTRRCAP_VCNT;
276
277 /* allocate storage for variable ranges (can block?) */
278 if (mtrr_state.var_count) {
279 mtrr_state.var_range = (mtrr_var_range_t *)
280 kalloc(sizeof(mtrr_var_range_t) *
281 mtrr_state.var_count);
282 if (mtrr_state.var_range == NULL)
283 mtrr_state.var_count = 0;
284 }
285
286 /* fetch the initial firmware configured variable ranges */
287 if (mtrr_state.var_count)
288 mtrr_get_var_ranges(mtrr_state.var_range,
289 mtrr_state.var_count);
290
291 /* fetch the initial firmware configured fixed ranges */
292 if (mtrr_state.MTRRcap & IA32_MTRRCAP_FIX)
293 mtrr_get_fix_ranges(mtrr_state.fix_range);
294
295 mtrr_initialized = TRUE;
296
297 #if MTRR_DEBUG
298 mtrr_msr_dump(); /* dump firmware settings */
299 #endif
300 }
301
302 /*
303 * Performs the Intel recommended procedure for changing the MTRR
304 * in a MP system. Leverage rendezvous mechanism for the required
305 * barrier synchronization among all processors. This function is
306 * called from the rendezvous IPI handler, and mtrr_update_cpu().
307 */
308 static void
309 mtrr_update_action(void * cache_control_type)
310 {
311 uint32_t cr0, cr4;
312 uint32_t tmp;
313
314 cr0 = get_cr0();
315 cr4 = get_cr4();
316
317 /* enter no-fill cache mode */
318 tmp = cr0 | CR0_CD;
319 tmp &= ~CR0_NW;
320 set_cr0(tmp);
321
322 /* flush caches */
323 wbinvd();
324
325 /* clear the PGE flag in CR4 */
326 if (cr4 & CR4_PGE)
327 set_cr4(cr4 & ~CR4_PGE);
328
329 /* flush TLBs */
330 flush_tlb();
331
332 if (CACHE_CONTROL_PAT == cache_control_type) {
333 /* Change PA6 attribute field to WC */
334 uint64_t pat = rdmsr64(MSR_IA32_CR_PAT);
335 DBG("CPU%d PAT: was 0x%016llx\n", get_cpu_number(), pat);
336 pat &= ~(0x0FULL << 48);
337 pat |= (0x01ULL << 48);
338 wrmsr64(MSR_IA32_CR_PAT, pat);
339 DBG("CPU%d PAT: is 0x%016llx\n",
340 get_cpu_number(), rdmsr64(MSR_IA32_CR_PAT));
341 }
342 else {
343 /* disable all MTRR ranges */
344 wrmsr64(MSR_IA32_MTRR_DEF_TYPE,
345 mtrr_state.MTRRdefType & ~IA32_MTRR_DEF_TYPE_E);
346
347 /* apply MTRR settings */
348 if (mtrr_state.var_count)
349 mtrr_set_var_ranges(mtrr_state.var_range,
350 mtrr_state.var_count);
351
352 if (mtrr_state.MTRRcap & IA32_MTRRCAP_FIX)
353 mtrr_set_fix_ranges(mtrr_state.fix_range);
354
355 /* enable all MTRR range registers (what if E was not set?) */
356 wrmsr64(MSR_IA32_MTRR_DEF_TYPE,
357 mtrr_state.MTRRdefType | IA32_MTRR_DEF_TYPE_E);
358 }
359
360 /* flush all caches and TLBs a second time */
361 wbinvd();
362 flush_tlb();
363
364 /* restore normal cache mode */
365 set_cr0(cr0);
366
367 /* restore PGE flag */
368 if (cr4 & CR4_PGE)
369 set_cr4(cr4);
370
371 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
372 }
373
374 static void
375 mtrr_update_setup(__unused void * param_not_used)
376 {
377 /* disable interrupts before the first barrier */
378 current_cpu_datap()->cpu_iflag = ml_set_interrupts_enabled(FALSE);
379 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
380 }
381
382 static void
383 mtrr_update_teardown(__unused void * param_not_used)
384 {
385 /* restore interrupt flag following MTRR changes */
386 ml_set_interrupts_enabled(current_cpu_datap()->cpu_iflag);
387 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
388 }
389
390 /*
391 * Update MTRR settings on all processors.
392 */
393 kern_return_t
394 mtrr_update_all_cpus(void)
395 {
396 if (mtrr_initialized == FALSE)
397 return KERN_NOT_SUPPORTED;
398
399 MTRR_LOCK();
400 mp_rendezvous(mtrr_update_setup,
401 mtrr_update_action,
402 mtrr_update_teardown, NULL);
403 MTRR_UNLOCK();
404
405 return KERN_SUCCESS;
406 }
407
408 /*
409 * Update a single CPU with the current MTRR settings. Can be called
410 * during slave processor initialization to mirror the MTRR settings
411 * discovered on the boot processor by mtrr_init().
412 */
413 kern_return_t
414 mtrr_update_cpu(void)
415 {
416 if (mtrr_initialized == FALSE)
417 return KERN_NOT_SUPPORTED;
418
419 MTRR_LOCK();
420 mtrr_update_setup(NULL);
421 mtrr_update_action(NULL);
422 mtrr_update_teardown(NULL);
423 MTRR_UNLOCK();
424
425 return KERN_SUCCESS;
426 }
427
428 /*
429 * Add a MTRR range to associate the physical memory range specified
430 * with a given memory caching type.
431 */
432 kern_return_t
433 mtrr_range_add(addr64_t address, uint64_t length, uint32_t type)
434 {
435 mtrr_var_range_t * vr;
436 mtrr_var_range_t * free_range;
437 kern_return_t ret = KERN_NO_SPACE;
438 int overlap;
439 unsigned int i;
440
441 DBG("mtrr_range_add base = 0x%llx, size = 0x%llx, type = %d\n",
442 address, length, type);
443
444 if (mtrr_initialized == FALSE) {
445 return KERN_NOT_SUPPORTED;
446 }
447
448 /* check memory type (GPF exception for undefined types) */
449 if ((type != MTRR_TYPE_UNCACHEABLE) &&
450 (type != MTRR_TYPE_WRITECOMBINE) &&
451 (type != MTRR_TYPE_WRITETHROUGH) &&
452 (type != MTRR_TYPE_WRITEPROTECT) &&
453 (type != MTRR_TYPE_WRITEBACK)) {
454 return KERN_INVALID_ARGUMENT;
455 }
456
457 /* check WC support if requested */
458 if ((type == MTRR_TYPE_WRITECOMBINE) &&
459 (mtrr_state.MTRRcap & IA32_MTRRCAP_WC) == 0) {
460 return KERN_NOT_SUPPORTED;
461 }
462
463 /* leave the fix range area below 1MB alone */
464 if (address < 0x100000 || mtrr_state.var_count == 0) {
465 return KERN_NOT_SUPPORTED;
466 }
467
468 /*
469 * Length must be a power of 2 given by 2^n, where n >= 12.
470 * Base address alignment must be larger than or equal to length.
471 */
472 if ((length < 0x1000) ||
473 (LSB(length) != length) ||
474 (address && (length > LSB(address)))) {
475 return KERN_INVALID_ARGUMENT;
476 }
477
478 MTRR_LOCK();
479
480 /*
481 * Check for overlap and locate a free range.
482 */
483 for (i = 0, free_range = NULL; i < mtrr_state.var_count; i++)
484 {
485 vr = &mtrr_state.var_range[i];
486
487 if (vr->refcnt == 0) {
488 /* free range candidate if no overlaps are found */
489 free_range = vr;
490 continue;
491 }
492
493 overlap = var_range_overlap(vr, address, length, type);
494 if (overlap > 0) {
495 /*
496 * identical overlap permitted, increment ref count.
497 * no hardware update required.
498 */
499 free_range = vr;
500 break;
501 }
502 if (overlap < 0) {
503 /* unsupported overlapping of memory types */
504 free_range = NULL;
505 break;
506 }
507 }
508
509 if (free_range) {
510 if (free_range->refcnt++ == 0) {
511 var_range_encode(free_range, address, length, type, 1);
512 mp_rendezvous(mtrr_update_setup,
513 mtrr_update_action,
514 mtrr_update_teardown, NULL);
515 }
516 ret = KERN_SUCCESS;
517 }
518
519 #if MTRR_DEBUG
520 mtrr_msr_dump();
521 #endif
522
523 MTRR_UNLOCK();
524
525 return ret;
526 }
527
528 /*
529 * Remove a previously added MTRR range. The same arguments used for adding
530 * the memory range must be supplied again.
531 */
532 kern_return_t
533 mtrr_range_remove(addr64_t address, uint64_t length, uint32_t type)
534 {
535 mtrr_var_range_t * vr;
536 int result = KERN_FAILURE;
537 int cpu_update = 0;
538 unsigned int i;
539
540 DBG("mtrr_range_remove base = 0x%llx, size = 0x%llx, type = %d\n",
541 address, length, type);
542
543 if (mtrr_initialized == FALSE) {
544 return KERN_NOT_SUPPORTED;
545 }
546
547 MTRR_LOCK();
548
549 for (i = 0; i < mtrr_state.var_count; i++) {
550 vr = &mtrr_state.var_range[i];
551
552 if (vr->refcnt &&
553 var_range_overlap(vr, address, length, type) > 0) {
554 /* found specified variable range */
555 if (--mtrr_state.var_range[i].refcnt == 0) {
556 var_range_encode(vr, address, length, type, 0);
557 cpu_update = 1;
558 }
559 result = KERN_SUCCESS;
560 break;
561 }
562 }
563
564 if (cpu_update) {
565 mp_rendezvous(mtrr_update_setup,
566 mtrr_update_action,
567 mtrr_update_teardown, NULL);
568 result = KERN_SUCCESS;
569 }
570
571 #if MTRR_DEBUG
572 mtrr_msr_dump();
573 #endif
574
575 MTRR_UNLOCK();
576
577 return result;
578 }
579
580 /*
581 * Variable range helper routines
582 */
583 static void
584 var_range_encode(mtrr_var_range_t * range, addr64_t address,
585 uint64_t length, uint32_t type, int valid)
586 {
587 range->base = (address & IA32_MTRR_PHYSBASE_MASK) |
588 (type & IA32_MTRR_PHYSBASE_TYPE);
589
590 range->mask = LEN_TO_MASK(length) |
591 (valid ? IA32_MTRR_PHYMASK_VALID : 0);
592 }
593
594 static int
595 var_range_overlap(mtrr_var_range_t * range, addr64_t address,
596 uint64_t length, uint32_t type)
597 {
598 uint64_t v_address, v_length;
599 uint32_t v_type;
600 int result = 0; /* no overlap, or overlap ok */
601
602 v_address = range->base & IA32_MTRR_PHYSBASE_MASK;
603 v_type = range->base & IA32_MTRR_PHYSBASE_TYPE;
604 v_length = MASK_TO_LEN(range->mask);
605
606 /* detect range overlap */
607 if ((v_address >= address && v_address < (address + length)) ||
608 (address >= v_address && address < (v_address + v_length))) {
609
610 if (v_address == address && v_length == length && v_type == type)
611 result = 1; /* identical overlap ok */
612 else if ( v_type == MTRR_TYPE_UNCACHEABLE &&
613 type == MTRR_TYPE_UNCACHEABLE ) {
614 /* UC ranges can overlap */
615 }
616 else if ((v_type == MTRR_TYPE_UNCACHEABLE &&
617 type == MTRR_TYPE_WRITEBACK) ||
618 (v_type == MTRR_TYPE_WRITEBACK &&
619 type == MTRR_TYPE_UNCACHEABLE)) {
620 /* UC/WB can overlap - effective type becomes UC */
621 }
622 else {
623 /* anything else may cause undefined behavior */
624 result = -1;
625 }
626 }
627
628 return result;
629 }
630
631 /*
632 * Initialize PAT (Page Attribute Table)
633 */
634 void
635 pat_init(void)
636 {
637 if (cpuid_features() & CPUID_FEATURE_PAT)
638 {
639 boolean_t istate = ml_set_interrupts_enabled(FALSE);
640 mtrr_update_action(CACHE_CONTROL_PAT);
641 ml_set_interrupts_enabled(istate);
642 }
643 }