]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/loose_ends.c
e935c8bd0e8d0ab58c5153a9330ed4a171d96661
[apple/xnu.git] / osfmk / i386 / loose_ends.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * @OSF_COPYRIGHT@
25 */
26 /*
27 * Mach Operating System
28 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
29 * All Rights Reserved.
30 *
31 * Permission to use, copy, modify and distribute this software and its
32 * documentation is hereby granted, provided that both the copyright
33 * notice and this permission notice appear in all copies of the
34 * software, derivative works or modified versions, and any portions
35 * thereof, and that both notices appear in supporting documentation.
36 *
37 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
38 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
39 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 *
41 * Carnegie Mellon requests users of this software to return to
42 *
43 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
44 * School of Computer Science
45 * Carnegie Mellon University
46 * Pittsburgh PA 15213-3890
47 *
48 * any improvements or extensions that they make and grant Carnegie Mellon
49 * the rights to redistribute these changes.
50 */
51 /*
52 */
53 #include <mach_assert.h>
54
55 #include <string.h>
56 #include <mach/boolean.h>
57 #include <mach/i386/vm_types.h>
58 #include <mach/i386/vm_param.h>
59 #include <kern/kern_types.h>
60 #include <kern/misc_protos.h>
61 #include <i386/param.h>
62 #include <i386/misc_protos.h>
63 #include <i386/cpu_data.h>
64 #include <i386/machine_routines.h>
65 #include <i386/cpuid.h>
66 #include <vm/pmap.h>
67 #include <vm/vm_map.h>
68 #include <vm/vm_kern.h>
69 #include <vm/vm_fault.h>
70
71 /* XXX - should be gone from here */
72 extern void invalidate_icache64(addr64_t addr, unsigned cnt, int phys);
73 extern void flush_dcache64(addr64_t addr, unsigned count, int phys);
74 extern boolean_t phys_page_exists(ppnum_t);
75 extern pt_entry_t *pmap_mapgetpte(vm_map_t, vm_offset_t);
76 extern void bcopy_no_overwrite(const char *from, char *to,vm_size_t bytes);
77 extern void pmap_set_reference(ppnum_t pn);
78 extern void mapping_set_mod(ppnum_t pa);
79 extern void mapping_set_ref(ppnum_t pn);
80 extern void switch_to_serial_console(void);
81 extern kern_return_t copyp2p(vm_offset_t source,
82 vm_offset_t dest,
83 unsigned int size,
84 unsigned int flush_action);
85 extern void fillPage(ppnum_t pa, unsigned int fill);
86 extern void ovbcopy(const char *from,
87 char *to,
88 vm_size_t nbytes);
89 void machine_callstack(natural_t *buf, vm_size_t callstack_max);
90
91
92 #define value_64bit(value) ((value) & 0xFFFFFFFF00000000LL)
93 #define low32(x) ((unsigned int)((x) & 0x00000000FFFFFFFFLL))
94
95
96 void
97 bzero_phys(
98 addr64_t src64,
99 vm_size_t bytes)
100 {
101 vm_offset_t src = low32(src64);
102 pt_entry_t save2;
103 mp_disable_preemption();
104 if (*(pt_entry_t *) CM2)
105 panic("bzero_phys: CMAP busy");
106
107 *(pt_entry_t *) CM2 = INTEL_PTE_VALID | INTEL_PTE_RW | (src & PG_FRAME) |
108 INTEL_PTE_REF | INTEL_PTE_MOD;
109 save2=*(pt_entry_t *)CM2;
110 invlpg((u_int)CA2);
111
112 bzero((void *)((unsigned int)CA2 | (src & INTEL_OFFMASK)), bytes);
113 if (save2 != *(pt_entry_t *)CM2) panic("bzero_phys CMAP changed");
114 *(pt_entry_t *) CM2 = 0;
115 mp_enable_preemption();
116 }
117
118 /*
119 * copy 'size' bytes from physical to physical address
120 * the caller must validate the physical ranges
121 *
122 * if flush_action == 0, no cache flush necessary
123 * if flush_action == 1, flush the source
124 * if flush_action == 2, flush the dest
125 * if flush_action == 3, flush both source and dest
126 */
127
128 kern_return_t
129 copyp2p(vm_offset_t source,
130 vm_offset_t dest,
131 unsigned int size,
132 unsigned int flush_action)
133 {
134
135 switch(flush_action) {
136 case 1:
137 flush_dcache(source, size, 1);
138 break;
139 case 2:
140 flush_dcache(dest, size, 1);
141 break;
142 case 3:
143 flush_dcache(source, size, 1);
144 flush_dcache(dest, size, 1);
145 break;
146
147 }
148 bcopy_phys((addr64_t)source, (addr64_t)dest, (vm_size_t)size); /* Do a physical copy */
149
150 switch(flush_action) {
151 case 1:
152 flush_dcache(source, size, 1);
153 break;
154 case 2:
155 flush_dcache(dest, size, 1);
156 break;
157 case 3:
158 flush_dcache(source, size, 1);
159 flush_dcache(dest, size, 1);
160 break;
161
162 }
163 return KERN_SUCCESS;
164 }
165
166 /*
167 * bcopy_phys - like bcopy but copies from/to physical addresses.
168 */
169
170 void
171 bcopy_phys(
172 addr64_t src64,
173 addr64_t dst64,
174 vm_size_t bytes)
175 {
176 vm_offset_t src = low32(src64);
177 vm_offset_t dst = low32(dst64);
178 pt_entry_t save1,save2;
179 /* ensure we stay within a page */
180 if ( (((src & (NBPG-1)) + bytes) > NBPG) ||
181 (((dst & (NBPG-1)) + bytes) > NBPG) ) panic("bcopy_phys");
182 mp_disable_preemption();
183 if (*(pt_entry_t *) CM1 || *(pt_entry_t *) CM2)
184 panic("bcopy_phys: CMAP busy");
185
186 *(pt_entry_t *) CM1 = INTEL_PTE_VALID | (src & PG_FRAME) | INTEL_PTE_REF;
187 *(pt_entry_t *) CM2 = INTEL_PTE_VALID | INTEL_PTE_RW | (dst & PG_FRAME) |
188 INTEL_PTE_REF | INTEL_PTE_MOD;
189 save1 = *(pt_entry_t *)CM1;save2 = *(pt_entry_t *)CM2;
190 invlpg((u_int)CA1);
191 invlpg((u_int)CA2);
192
193 bcopy((void *) ((uintptr_t)CA1 | (src & INTEL_OFFMASK)),
194 (void *) ((uintptr_t)CA2 | (dst & INTEL_OFFMASK)), bytes);
195 if ( (save1 != *(pt_entry_t *)CM1) || (save2 != *(pt_entry_t *)CM2)) panic("bcopy_phys CMAP changed");
196 *(pt_entry_t *) CM1 = 0;
197 *(pt_entry_t *) CM2 = 0;
198 mp_enable_preemption();
199
200 }
201
202 /*
203 * ovbcopy - like bcopy, but recognizes overlapping ranges and handles
204 * them correctly.
205 */
206
207 void
208 ovbcopy(
209 const char *from,
210 char *to,
211 vm_size_t bytes) /* num bytes to copy */
212 {
213 /* Assume that bcopy copies left-to-right (low addr first). */
214 if (from + bytes <= to || to + bytes <= from || to == from)
215 bcopy_no_overwrite(from, to, bytes); /* non-overlapping or no-op*/
216 else if (from > to)
217 bcopy_no_overwrite(from, to, bytes); /* overlapping but OK */
218 else {
219 /* to > from: overlapping, and must copy right-to-left. */
220 from += bytes - 1;
221 to += bytes - 1;
222 while (bytes-- > 0)
223 *to-- = *from--;
224 }
225 }
226
227
228 /*
229 * Read data from a physical address. Memory should not be cache inhibited.
230 */
231
232
233 static unsigned int
234 ml_phys_read_data( vm_offset_t paddr, int size )
235 {
236 unsigned int result;
237 pt_entry_t save;
238 mp_disable_preemption();
239 if (*(pt_entry_t *) CM3)
240 panic("ml_phys_read_data: CMAP busy");
241
242 *(pt_entry_t *) CM3 = INTEL_PTE_VALID | (paddr & PG_FRAME) | INTEL_PTE_REF;
243 save = *(pt_entry_t *)CM3;
244 invlpg((u_int)CA3);
245
246
247 switch (size) {
248 unsigned char s1;
249 unsigned short s2;
250 case 1:
251 s1 = *(unsigned char *)((unsigned int)CA3 | (paddr & INTEL_OFFMASK));
252 result = s1;
253 break;
254 case 2:
255 s2 = *(unsigned short *)((unsigned int)CA3 | (paddr & INTEL_OFFMASK));
256 result = s2;
257 break;
258 case 4:
259 default:
260 result = *(unsigned int *)((unsigned int)CA3 | (paddr & INTEL_OFFMASK));
261 break;
262 }
263
264 if (save != *(pt_entry_t *)CM3) panic("ml_phys_read_data CMAP changed");
265 *(pt_entry_t *) CM3 = 0;
266 mp_enable_preemption();
267 return result;
268 }
269
270 static unsigned long long
271 ml_phys_read_long_long( vm_offset_t paddr )
272 {
273 unsigned long long result;
274 pt_entry_t save;
275 mp_disable_preemption();
276 if (*(pt_entry_t *) CM3)
277 panic("ml_phys_read_data: CMAP busy");
278
279 *(pt_entry_t *) CM3 = INTEL_PTE_VALID | (paddr & PG_FRAME) | INTEL_PTE_REF;
280 save = *(pt_entry_t *)CM3;
281 invlpg((u_int)CA3);
282
283 result = *(unsigned long long *)((unsigned int)CA3 | (paddr & INTEL_OFFMASK));
284
285 if (save != *(pt_entry_t *)CM3) panic("ml_phys_read_data CMAP changed");
286 *(pt_entry_t *) CM3 = 0;
287 mp_enable_preemption();
288 return result;
289 }
290
291 unsigned int ml_phys_read( vm_offset_t paddr)
292 {
293 return ml_phys_read_data(paddr, 4);
294 }
295
296 unsigned int ml_phys_read_word(vm_offset_t paddr) {
297 return ml_phys_read_data(paddr, 4);
298 }
299
300 unsigned int ml_phys_read_64(addr64_t paddr64)
301 {
302 return ml_phys_read_data(low32(paddr64), 4);
303 }
304
305 unsigned int ml_phys_read_word_64(addr64_t paddr64)
306 {
307 return ml_phys_read_data(low32(paddr64), 4);
308 }
309
310 unsigned int ml_phys_read_half(vm_offset_t paddr)
311 {
312 return ml_phys_read_data(paddr, 2);
313 }
314
315 unsigned int ml_phys_read_half_64(addr64_t paddr64)
316 {
317 return ml_phys_read_data(low32(paddr64), 2);
318 }
319
320 unsigned int ml_phys_read_byte(vm_offset_t paddr)
321 {
322 return ml_phys_read_data(paddr, 1);
323 }
324
325 unsigned int ml_phys_read_byte_64(addr64_t paddr64)
326 {
327 return ml_phys_read_data(low32(paddr64), 1);
328 }
329
330 unsigned long long ml_phys_read_double(vm_offset_t paddr)
331 {
332 return ml_phys_read_long_long(paddr);
333 }
334
335 unsigned long long ml_phys_read_double_64(addr64_t paddr)
336 {
337 return ml_phys_read_long_long(low32(paddr));
338 }
339
340
341 /*
342 * Write data to a physical address. Memory should not be cache inhibited.
343 */
344
345 static void
346 ml_phys_write_data( vm_offset_t paddr, unsigned long data, int size )
347 {
348 pt_entry_t save;
349 mp_disable_preemption();
350 if (*(pt_entry_t *) CM3)
351 panic("ml_phys_write_data: CMAP busy");
352
353 *(pt_entry_t *) CM3 = INTEL_PTE_VALID | INTEL_PTE_RW | (paddr & PG_FRAME) |
354 INTEL_PTE_REF | INTEL_PTE_MOD;
355 save = *(pt_entry_t *)CM3;
356 invlpg((u_int)CA3);
357
358 switch (size) {
359 case 1:
360 *(unsigned char *)((unsigned int)CA3 | (paddr & INTEL_OFFMASK)) = (unsigned char)data;
361 break;
362 case 2:
363 *(unsigned short *)((unsigned int)CA3 | (paddr & INTEL_OFFMASK)) = (unsigned short)data;
364 break;
365 case 4:
366 default:
367 *(unsigned int *)((unsigned int)CA3 | (paddr & INTEL_OFFMASK)) = data;
368 break;
369 }
370
371 if (save != *(pt_entry_t *)CM3) panic("ml_phys_write_data CMAP changed");
372 *(pt_entry_t *) CM3 = 0;
373 mp_enable_preemption();
374 }
375
376 static void
377 ml_phys_write_long_long( vm_offset_t paddr, unsigned long long data )
378 {
379 pt_entry_t save;
380 mp_disable_preemption();
381 if (*(pt_entry_t *) CM3)
382 panic("ml_phys_write_data: CMAP busy");
383
384 *(pt_entry_t *) CM3 = INTEL_PTE_VALID | INTEL_PTE_RW | (paddr & PG_FRAME) |
385 INTEL_PTE_REF | INTEL_PTE_MOD;
386 save = *(pt_entry_t *)CM3;
387 invlpg((u_int)CA3);
388
389 *(unsigned long long *)((unsigned int)CA3 | (paddr & INTEL_OFFMASK)) = data;
390
391 if (save != *(pt_entry_t *)CM3) panic("ml_phys_write_data CMAP changed");
392 *(pt_entry_t *) CM3 = 0;
393 mp_enable_preemption();
394 }
395
396 void ml_phys_write_byte(vm_offset_t paddr, unsigned int data)
397 {
398 ml_phys_write_data(paddr, data, 1);
399 }
400
401 void ml_phys_write_byte_64(addr64_t paddr, unsigned int data)
402 {
403 ml_phys_write_data(low32(paddr), data, 1);
404 }
405
406 void ml_phys_write_half(vm_offset_t paddr, unsigned int data)
407 {
408 ml_phys_write_data(paddr, data, 2);
409 }
410
411 void ml_phys_write_half_64(addr64_t paddr, unsigned int data)
412 {
413 ml_phys_write_data(low32(paddr), data, 2);
414 }
415
416 void ml_phys_write(vm_offset_t paddr, unsigned int data)
417 {
418 ml_phys_write_data(paddr, data, 4);
419 }
420
421 void ml_phys_write_64(addr64_t paddr, unsigned int data)
422 {
423 ml_phys_write_data(low32(paddr), data, 4);
424 }
425
426 void ml_phys_write_word(vm_offset_t paddr, unsigned int data)
427 {
428 ml_phys_write_data(paddr, data, 4);
429 }
430
431 void ml_phys_write_word_64(addr64_t paddr, unsigned int data)
432 {
433 ml_phys_write_data(low32(paddr), data, 4);
434 }
435
436
437 void ml_phys_write_double(vm_offset_t paddr, unsigned long long data)
438 {
439 ml_phys_write_long_long(paddr, data);
440 }
441
442 void ml_phys_write_double_64(addr64_t paddr, unsigned long long data)
443 {
444 ml_phys_write_long_long(low32(paddr), data);
445 }
446
447
448 /* PCI config cycle probing
449 *
450 *
451 * Read the memory location at physical address paddr.
452 * This is a part of a device probe, so there is a good chance we will
453 * have a machine check here. So we have to be able to handle that.
454 * We assume that machine checks are enabled both in MSR and HIDs
455 */
456
457 boolean_t
458 ml_probe_read(vm_offset_t paddr, unsigned int *val)
459 {
460 *val = ml_phys_read(paddr);
461 return TRUE;
462 }
463
464 /*
465 * Read the memory location at physical address paddr.
466 * This is a part of a device probe, so there is a good chance we will
467 * have a machine check here. So we have to be able to handle that.
468 * We assume that machine checks are enabled both in MSR and HIDs
469 */
470 boolean_t
471 ml_probe_read_64(addr64_t paddr, unsigned int *val)
472 {
473 *val = ml_phys_read_64(paddr);
474 return TRUE;
475 }
476
477
478 int bcmp(
479 const void *pa,
480 const void *pb,
481 size_t len)
482 {
483 const char *a = (const char *)pa;
484 const char *b = (const char *)pb;
485
486 if (len == 0)
487 return 0;
488
489 do
490 if (*a++ != *b++)
491 break;
492 while (--len);
493
494 return len;
495 }
496
497 int
498 memcmp(s1, s2, n)
499 const void *s1, *s2;
500 size_t n;
501 {
502 if (n != 0) {
503 const unsigned char *p1 = s1, *p2 = s2;
504
505 do {
506 if (*p1++ != *p2++)
507 return (*--p1 - *--p2);
508 } while (--n != 0);
509 }
510 return (0);
511 }
512
513 /*
514 * Abstract:
515 * strlen returns the number of characters in "string" preceeding
516 * the terminating null character.
517 */
518
519 size_t
520 strlen(
521 register const char *string)
522 {
523 register const char *ret = string;
524
525 while (*string++ != '\0')
526 continue;
527 return string - 1 - ret;
528 }
529
530 #include <libkern/OSAtomic.h>
531
532 uint32_t
533 hw_atomic_add(
534 uint32_t *dest,
535 uint32_t delt)
536 {
537 uint32_t oldValue;
538 uint32_t newValue;
539
540 do {
541 oldValue = *dest;
542 newValue = (oldValue + delt);
543 } while (!OSCompareAndSwap((UInt32)oldValue,
544 (UInt32)newValue, (UInt32 *)dest));
545
546 return newValue;
547 }
548
549 uint32_t
550 hw_atomic_sub(
551 uint32_t *dest,
552 uint32_t delt)
553 {
554 uint32_t oldValue;
555 uint32_t newValue;
556
557 do {
558 oldValue = *dest;
559 newValue = (oldValue - delt);
560 } while (!OSCompareAndSwap((UInt32)oldValue,
561 (UInt32)newValue, (UInt32 *)dest));
562
563 return newValue;
564 }
565
566 uint32_t
567 hw_atomic_or(
568 uint32_t *dest,
569 uint32_t mask)
570 {
571 uint32_t oldValue;
572 uint32_t newValue;
573
574 do {
575 oldValue = *dest;
576 newValue = (oldValue | mask);
577 } while (!OSCompareAndSwap((UInt32)oldValue,
578 (UInt32)newValue, (UInt32 *)dest));
579
580 return newValue;
581 }
582
583 uint32_t
584 hw_atomic_and(
585 uint32_t *dest,
586 uint32_t mask)
587 {
588 uint32_t oldValue;
589 uint32_t newValue;
590
591 do {
592 oldValue = *dest;
593 newValue = (oldValue & mask);
594 } while (!OSCompareAndSwap((UInt32)oldValue,
595 (UInt32)newValue, (UInt32 *)dest));
596
597 return newValue;
598 }
599
600 uint32_t
601 hw_compare_and_store(
602 uint32_t oldval,
603 uint32_t newval,
604 uint32_t *dest)
605 {
606 return OSCompareAndSwap((UInt32)oldval, (UInt32)newval, (UInt32 *)dest);
607 }
608
609 #if MACH_ASSERT
610
611 /*
612 * Machine-dependent routine to fill in an array with up to callstack_max
613 * levels of return pc information.
614 */
615 void machine_callstack(
616 __unused natural_t *buf,
617 __unused vm_size_t callstack_max)
618 {
619 }
620
621 #endif /* MACH_ASSERT */
622
623
624
625
626 void fillPage(ppnum_t pa, unsigned int fill)
627 {
628 pmap_paddr_t src;
629 int i;
630 int cnt = PAGE_SIZE/sizeof(unsigned int);
631 unsigned int *addr;
632 mp_disable_preemption();
633 if (*(pt_entry_t *) CM2)
634 panic("fillPage: CMAP busy");
635 src = (pmap_paddr_t)i386_ptob(pa);
636 *(pt_entry_t *) CM2 = INTEL_PTE_VALID | INTEL_PTE_RW | (src & PG_FRAME) |
637 INTEL_PTE_REF | INTEL_PTE_MOD;
638 invlpg((u_int)CA2);
639
640 for (i = 0, addr = (unsigned int *)CA2; i < cnt ; i++ )
641 *addr++ = fill;
642
643 *(pt_entry_t *) CM2 = 0;
644 mp_enable_preemption();
645 }
646
647 static inline void __sfence(void)
648 {
649 __asm__ volatile("sfence");
650 }
651 static inline void __mfence(void)
652 {
653 __asm__ volatile("mfence");
654 }
655 static inline void __wbinvd(void)
656 {
657 __asm__ volatile("wbinvd");
658 }
659 static inline void __clflush(void *ptr)
660 {
661 __asm__ volatile(".byte 0x0F; .byte 0xae; .byte 0x38" : : "a" (ptr));
662 }
663
664 void dcache_incoherent_io_store64(addr64_t pa, unsigned int count)
665 {
666 if (cpuid_features() & CPUID_FEATURE_CLFSH)
667 {
668 uint32_t linesize = cpuid_info()->cache_linesize;
669 addr64_t addr;
670 uint32_t offset, chunk;
671 boolean_t istate;
672
673 istate = ml_set_interrupts_enabled(FALSE);
674
675 if (*(pt_entry_t *) CM2)
676 panic("cache_flush_page_phys: CMAP busy");
677
678 offset = pa & (linesize - 1);
679 count += offset;
680 addr = pa - offset;
681 offset = addr & ((addr64_t) (page_size - 1));
682 chunk = page_size - offset;
683
684 do
685 {
686 if (chunk > count)
687 chunk = count;
688
689 *(pt_entry_t *) CM2 = i386_ptob(atop_64(addr)) | INTEL_PTE_VALID;
690 invlpg((u_int)CA2);
691
692 for (; offset < chunk; offset += linesize)
693 __clflush((void *)(((u_int)CA2) + offset));
694
695 count -= chunk;
696 addr += chunk;
697 chunk = page_size;
698 offset = 0;
699 }
700 while (count);
701
702 *(pt_entry_t *) CM2 = 0;
703
704 (void) ml_set_interrupts_enabled(istate);
705 }
706 else
707 __wbinvd();
708 __sfence();
709 }
710
711 void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count)
712 {
713 return(dcache_incoherent_io_store64(pa,count));
714 }
715
716 void
717 flush_dcache64(__unused addr64_t addr,
718 __unused unsigned count,
719 __unused int phys)
720 {
721 }
722
723 void
724 invalidate_icache64(__unused addr64_t addr,
725 __unused unsigned count,
726 __unused int phys)
727 {
728 }
729
730 kern_return_t copypv(addr64_t src64,
731 addr64_t snk64,
732 unsigned int size,
733 int which)
734 {
735
736 vm_map_t map;
737 kern_return_t ret;
738 vm_offset_t source, sink;
739 vm_offset_t vaddr;
740 vm_offset_t paddr;
741 spl_t s;
742 unsigned int lop, csize;
743 int needtran, bothphys;
744 vm_prot_t prot;
745 pt_entry_t *ptep;
746
747 map = (which & cppvKmap) ? kernel_map : current_map_fast();
748
749 source = low32(src64);
750 sink = low32(snk64);
751
752 if((which & (cppvPsrc | cppvPsnk)) == 0 ) { /* Make sure that only one is virtual */
753 panic("copypv: no more than 1 parameter may be virtual\n"); /* Not allowed */
754 }
755
756 bothphys = 1; /* Assume both are physical */
757
758 if(!(which & cppvPsnk)) { /* Is there a virtual page here? */
759 vaddr = sink; /* Sink side is virtual */
760 bothphys = 0; /* Show both aren't physical */
761 prot = VM_PROT_READ | VM_PROT_WRITE; /* Sink always must be read/write */
762 } else /* if(!(which & cppvPsrc)) */ { /* Source side is virtual */
763 vaddr = source; /* Source side is virtual */
764 bothphys = 0; /* Show both aren't physical */
765 prot = VM_PROT_READ; /* Virtual source is always read only */
766 }
767
768 needtran = 1; /* Show we need to map the virtual the first time */
769 s = splhigh(); /* Don't bother me */
770
771 while(size) {
772
773 if(!bothphys && (needtran || !(vaddr & 4095LL))) { /* If first time or we stepped onto a new page, we need to translate */
774 needtran = 0;
775 while(1) {
776 ptep = pmap_mapgetpte(map, vaddr);
777 if((0 == ptep) || ((*ptep & INTEL_PTE_VALID) == 0)) {
778 splx(s); /* Restore the interrupt level */
779 ret = vm_fault(map, vm_map_trunc_page(vaddr), prot, FALSE, THREAD_UNINT, NULL, 0); /* Didn't find it, try to fault it in... */
780
781 if(ret != KERN_SUCCESS)return KERN_FAILURE; /* Didn't find any, return no good... */
782
783 s = splhigh(); /* Don't bother me */
784 continue; /* Go try for the map again... */
785
786 }
787
788 /* Note that we have to have the destination writable. So, if we already have it, or we are mapping the source,
789 we can just leave.
790 */
791 if((which & cppvPsnk) || (*ptep & INTEL_PTE_WRITE)) break; /* We got it mapped R/W or the source is not virtual, leave... */
792 splx(s); /* Restore the interrupt level */
793
794 ret = vm_fault(map, vm_map_trunc_page(vaddr), VM_PROT_READ | VM_PROT_WRITE, FALSE, THREAD_UNINT, NULL, 0); /* check for a COW area */
795 if (ret != KERN_SUCCESS) return KERN_FAILURE; /* We couldn't get it R/W, leave in disgrace... */
796 s = splhigh(); /* Don't bother me */
797 }
798
799 paddr = pte_to_pa(*ptep) | (vaddr & 4095);
800
801 if(which & cppvPsrc) sink = paddr; /* If source is physical, then the sink is virtual */
802 else source = paddr; /* Otherwise the source is */
803 }
804
805 lop = (unsigned int)(4096LL - (sink & 4095LL)); /* Assume sink smallest */
806 if(lop > (unsigned int)(4096LL - (source & 4095LL))) lop = (unsigned int)(4096LL - (source & 4095LL)); /* No, source is smaller */
807
808 csize = size; /* Assume we can copy it all */
809 if(lop < size) csize = lop; /* Nope, we can't do it all */
810
811 if(which & cppvFsrc) flush_dcache64((addr64_t)source, csize, 1); /* If requested, flush source before move */
812 if(which & cppvFsnk) flush_dcache64((addr64_t)sink, csize, 1); /* If requested, flush sink before move */
813
814 bcopy_phys((addr64_t)source, (addr64_t)sink, csize); /* Do a physical copy, virtually */
815
816 if(which & cppvFsrc) flush_dcache64((addr64_t)source, csize, 1); /* If requested, flush source after move */
817 if(which & cppvFsnk) flush_dcache64((addr64_t)sink, csize, 1); /* If requested, flush sink after move */
818
819
820 /*
821 * Note that for certain ram disk flavors, we may be copying outside of known memory.
822 * Therefore, before we try to mark it modifed, we check if it exists.
823 */
824
825 if( !(which & cppvNoModSnk)) {
826 if (phys_page_exists((ppnum_t)sink >> 12))
827 mapping_set_mod((ppnum_t)(sink >> 12)); /* Make sure we know that it is modified */
828 }
829 if( !(which & cppvNoRefSrc)) {
830 if (phys_page_exists((ppnum_t)source >> 12))
831 mapping_set_ref((ppnum_t)(source >> 12)); /* Make sure we know that it is modified */
832 }
833
834
835 size = size - csize; /* Calculate what is left */
836 vaddr = vaddr + csize; /* Move to next sink address */
837 source = source + csize; /* Bump source to next physical address */
838 sink = sink + csize; /* Bump sink to next physical address */
839 }
840
841 splx(s); /* Open up for interrupts */
842
843 return KERN_SUCCESS;
844 }
845
846 void switch_to_serial_console(void)
847 {
848 }
849
850 addr64_t vm_last_addr;
851
852 void
853 mapping_set_mod(ppnum_t pn)
854 {
855 pmap_set_modify(pn);
856 }
857
858 void
859 mapping_set_ref(ppnum_t pn)
860 {
861 pmap_set_reference(pn);
862 }
863
864 void
865 cache_flush_page_phys(ppnum_t pa)
866 {
867 boolean_t istate;
868 int i;
869 unsigned int *cacheline_addr;
870 int cacheline_size = cpuid_info()->cache_linesize;
871 int cachelines_in_page = PAGE_SIZE/cacheline_size;
872
873 /*
874 * If there's no clflush instruction, we're sadly forced to use wbinvd.
875 */
876 if (!(cpuid_features() & CPUID_FEATURE_CLFSH)) {
877 asm volatile("wbinvd" : : : "memory");
878 return;
879 }
880
881 istate = ml_set_interrupts_enabled(FALSE);
882
883 if (*(pt_entry_t *) CM2)
884 panic("cache_flush_page_phys: CMAP busy");
885
886 *(pt_entry_t *) CM2 = i386_ptob(pa) | INTEL_PTE_VALID;
887 invlpg((u_int)CA2);
888
889 for (i = 0, cacheline_addr = (unsigned int *)CA2;
890 i < cachelines_in_page;
891 i++, cacheline_addr += cacheline_size) {
892 asm volatile("clflush %0" : : "m" (cacheline_addr));
893 }
894
895 *(pt_entry_t *) CM2 = 0;
896
897 (void) ml_set_interrupts_enabled(istate);
898
899 }
900