]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/loose_ends.c
c65c7280efbbecbf0ab7c4d13ce3e953d982809d
[apple/xnu.git] / osfmk / i386 / loose_ends.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 #include <mach_assert.h>
59
60 #include <string.h>
61 #include <mach/boolean.h>
62 #include <mach/i386/vm_types.h>
63 #include <mach/i386/vm_param.h>
64 #include <kern/kern_types.h>
65 #include <kern/misc_protos.h>
66 #include <i386/param.h>
67 #include <i386/misc_protos.h>
68 #include <i386/cpu_data.h>
69 #include <i386/machine_routines.h>
70 #include <i386/cpuid.h>
71 #include <vm/pmap.h>
72 #include <vm/vm_map.h>
73 #include <vm/vm_kern.h>
74 #include <vm/vm_fault.h>
75
76 /* XXX - should be gone from here */
77 extern void invalidate_icache64(addr64_t addr, unsigned cnt, int phys);
78 extern void flush_dcache64(addr64_t addr, unsigned count, int phys);
79 extern boolean_t phys_page_exists(ppnum_t);
80 extern pt_entry_t *pmap_mapgetpte(vm_map_t, vm_offset_t);
81 extern void bcopy_no_overwrite(const char *from, char *to,vm_size_t bytes);
82 extern void pmap_set_reference(ppnum_t pn);
83 extern void mapping_set_mod(ppnum_t pa);
84 extern void mapping_set_ref(ppnum_t pn);
85 extern void switch_to_serial_console(void);
86 extern kern_return_t copyp2p(vm_offset_t source,
87 vm_offset_t dest,
88 unsigned int size,
89 unsigned int flush_action);
90 extern void fillPage(ppnum_t pa, unsigned int fill);
91 extern void ovbcopy(const char *from,
92 char *to,
93 vm_size_t nbytes);
94 void machine_callstack(natural_t *buf, vm_size_t callstack_max);
95
96
97 #define value_64bit(value) ((value) & 0xFFFFFFFF00000000LL)
98 #define low32(x) ((unsigned int)((x) & 0x00000000FFFFFFFFLL))
99
100
101 void
102 bzero_phys(
103 addr64_t src64,
104 vm_size_t bytes)
105 {
106 vm_offset_t src = low32(src64);
107 pt_entry_t save2;
108 mp_disable_preemption();
109 if (*(pt_entry_t *) CM2)
110 panic("bzero_phys: CMAP busy");
111
112 *(pt_entry_t *) CM2 = INTEL_PTE_VALID | INTEL_PTE_RW | (src & PG_FRAME) |
113 INTEL_PTE_REF | INTEL_PTE_MOD;
114 save2=*(pt_entry_t *)CM2;
115 invlpg((u_int)CA2);
116
117 bzero((void *)((unsigned int)CA2 | (src & INTEL_OFFMASK)), bytes);
118 if (save2 != *(pt_entry_t *)CM2) panic("bzero_phys CMAP changed");
119 *(pt_entry_t *) CM2 = 0;
120 mp_enable_preemption();
121 }
122
123 /*
124 * copy 'size' bytes from physical to physical address
125 * the caller must validate the physical ranges
126 *
127 * if flush_action == 0, no cache flush necessary
128 * if flush_action == 1, flush the source
129 * if flush_action == 2, flush the dest
130 * if flush_action == 3, flush both source and dest
131 */
132
133 kern_return_t
134 copyp2p(vm_offset_t source,
135 vm_offset_t dest,
136 unsigned int size,
137 unsigned int flush_action)
138 {
139
140 switch(flush_action) {
141 case 1:
142 flush_dcache(source, size, 1);
143 break;
144 case 2:
145 flush_dcache(dest, size, 1);
146 break;
147 case 3:
148 flush_dcache(source, size, 1);
149 flush_dcache(dest, size, 1);
150 break;
151
152 }
153 bcopy_phys((addr64_t)source, (addr64_t)dest, (vm_size_t)size); /* Do a physical copy */
154
155 switch(flush_action) {
156 case 1:
157 flush_dcache(source, size, 1);
158 break;
159 case 2:
160 flush_dcache(dest, size, 1);
161 break;
162 case 3:
163 flush_dcache(source, size, 1);
164 flush_dcache(dest, size, 1);
165 break;
166
167 }
168 return KERN_SUCCESS;
169 }
170
171 /*
172 * bcopy_phys - like bcopy but copies from/to physical addresses.
173 */
174
175 void
176 bcopy_phys(
177 addr64_t src64,
178 addr64_t dst64,
179 vm_size_t bytes)
180 {
181 vm_offset_t src = low32(src64);
182 vm_offset_t dst = low32(dst64);
183 pt_entry_t save1,save2;
184 /* ensure we stay within a page */
185 if ( (((src & (NBPG-1)) + bytes) > NBPG) ||
186 (((dst & (NBPG-1)) + bytes) > NBPG) ) panic("bcopy_phys");
187 mp_disable_preemption();
188 if (*(pt_entry_t *) CM1 || *(pt_entry_t *) CM2)
189 panic("bcopy_phys: CMAP busy");
190
191 *(pt_entry_t *) CM1 = INTEL_PTE_VALID | (src & PG_FRAME) | INTEL_PTE_REF;
192 *(pt_entry_t *) CM2 = INTEL_PTE_VALID | INTEL_PTE_RW | (dst & PG_FRAME) |
193 INTEL_PTE_REF | INTEL_PTE_MOD;
194 save1 = *(pt_entry_t *)CM1;save2 = *(pt_entry_t *)CM2;
195 invlpg((u_int)CA1);
196 invlpg((u_int)CA2);
197
198 bcopy((void *) ((uintptr_t)CA1 | (src & INTEL_OFFMASK)),
199 (void *) ((uintptr_t)CA2 | (dst & INTEL_OFFMASK)), bytes);
200 if ( (save1 != *(pt_entry_t *)CM1) || (save2 != *(pt_entry_t *)CM2)) panic("bcopy_phys CMAP changed");
201 *(pt_entry_t *) CM1 = 0;
202 *(pt_entry_t *) CM2 = 0;
203 mp_enable_preemption();
204
205 }
206
207 /*
208 * ovbcopy - like bcopy, but recognizes overlapping ranges and handles
209 * them correctly.
210 */
211
212 void
213 ovbcopy(
214 const char *from,
215 char *to,
216 vm_size_t bytes) /* num bytes to copy */
217 {
218 /* Assume that bcopy copies left-to-right (low addr first). */
219 if (from + bytes <= to || to + bytes <= from || to == from)
220 bcopy_no_overwrite(from, to, bytes); /* non-overlapping or no-op*/
221 else if (from > to)
222 bcopy_no_overwrite(from, to, bytes); /* overlapping but OK */
223 else {
224 /* to > from: overlapping, and must copy right-to-left. */
225 from += bytes - 1;
226 to += bytes - 1;
227 while (bytes-- > 0)
228 *to-- = *from--;
229 }
230 }
231
232
233 /*
234 * Read data from a physical address. Memory should not be cache inhibited.
235 */
236
237
238 static unsigned int
239 ml_phys_read_data( vm_offset_t paddr, int size )
240 {
241 unsigned int result;
242 pt_entry_t save;
243 mp_disable_preemption();
244 if (*(pt_entry_t *) CM3)
245 panic("ml_phys_read_data: CMAP busy");
246
247 *(pt_entry_t *) CM3 = INTEL_PTE_VALID | (paddr & PG_FRAME) | INTEL_PTE_REF;
248 save = *(pt_entry_t *)CM3;
249 invlpg((u_int)CA3);
250
251
252 switch (size) {
253 unsigned char s1;
254 unsigned short s2;
255 case 1:
256 s1 = *(unsigned char *)((unsigned int)CA3 | (paddr & INTEL_OFFMASK));
257 result = s1;
258 break;
259 case 2:
260 s2 = *(unsigned short *)((unsigned int)CA3 | (paddr & INTEL_OFFMASK));
261 result = s2;
262 break;
263 case 4:
264 default:
265 result = *(unsigned int *)((unsigned int)CA3 | (paddr & INTEL_OFFMASK));
266 break;
267 }
268
269 if (save != *(pt_entry_t *)CM3) panic("ml_phys_read_data CMAP changed");
270 *(pt_entry_t *) CM3 = 0;
271 mp_enable_preemption();
272 return result;
273 }
274
275 static unsigned long long
276 ml_phys_read_long_long( vm_offset_t paddr )
277 {
278 unsigned long long result;
279 pt_entry_t save;
280 mp_disable_preemption();
281 if (*(pt_entry_t *) CM3)
282 panic("ml_phys_read_data: CMAP busy");
283
284 *(pt_entry_t *) CM3 = INTEL_PTE_VALID | (paddr & PG_FRAME) | INTEL_PTE_REF;
285 save = *(pt_entry_t *)CM3;
286 invlpg((u_int)CA3);
287
288 result = *(unsigned long long *)((unsigned int)CA3 | (paddr & INTEL_OFFMASK));
289
290 if (save != *(pt_entry_t *)CM3) panic("ml_phys_read_data CMAP changed");
291 *(pt_entry_t *) CM3 = 0;
292 mp_enable_preemption();
293 return result;
294 }
295
296 unsigned int ml_phys_read( vm_offset_t paddr)
297 {
298 return ml_phys_read_data(paddr, 4);
299 }
300
301 unsigned int ml_phys_read_word(vm_offset_t paddr) {
302 return ml_phys_read_data(paddr, 4);
303 }
304
305 unsigned int ml_phys_read_64(addr64_t paddr64)
306 {
307 return ml_phys_read_data(low32(paddr64), 4);
308 }
309
310 unsigned int ml_phys_read_word_64(addr64_t paddr64)
311 {
312 return ml_phys_read_data(low32(paddr64), 4);
313 }
314
315 unsigned int ml_phys_read_half(vm_offset_t paddr)
316 {
317 return ml_phys_read_data(paddr, 2);
318 }
319
320 unsigned int ml_phys_read_half_64(addr64_t paddr64)
321 {
322 return ml_phys_read_data(low32(paddr64), 2);
323 }
324
325 unsigned int ml_phys_read_byte(vm_offset_t paddr)
326 {
327 return ml_phys_read_data(paddr, 1);
328 }
329
330 unsigned int ml_phys_read_byte_64(addr64_t paddr64)
331 {
332 return ml_phys_read_data(low32(paddr64), 1);
333 }
334
335 unsigned long long ml_phys_read_double(vm_offset_t paddr)
336 {
337 return ml_phys_read_long_long(paddr);
338 }
339
340 unsigned long long ml_phys_read_double_64(addr64_t paddr)
341 {
342 return ml_phys_read_long_long(low32(paddr));
343 }
344
345
346 /*
347 * Write data to a physical address. Memory should not be cache inhibited.
348 */
349
350 static void
351 ml_phys_write_data( vm_offset_t paddr, unsigned long data, int size )
352 {
353 pt_entry_t save;
354 mp_disable_preemption();
355 if (*(pt_entry_t *) CM3)
356 panic("ml_phys_write_data: CMAP busy");
357
358 *(pt_entry_t *) CM3 = INTEL_PTE_VALID | INTEL_PTE_RW | (paddr & PG_FRAME) |
359 INTEL_PTE_REF | INTEL_PTE_MOD;
360 save = *(pt_entry_t *)CM3;
361 invlpg((u_int)CA3);
362
363 switch (size) {
364 case 1:
365 *(unsigned char *)((unsigned int)CA3 | (paddr & INTEL_OFFMASK)) = (unsigned char)data;
366 break;
367 case 2:
368 *(unsigned short *)((unsigned int)CA3 | (paddr & INTEL_OFFMASK)) = (unsigned short)data;
369 break;
370 case 4:
371 default:
372 *(unsigned int *)((unsigned int)CA3 | (paddr & INTEL_OFFMASK)) = data;
373 break;
374 }
375
376 if (save != *(pt_entry_t *)CM3) panic("ml_phys_write_data CMAP changed");
377 *(pt_entry_t *) CM3 = 0;
378 mp_enable_preemption();
379 }
380
381 static void
382 ml_phys_write_long_long( vm_offset_t paddr, unsigned long long data )
383 {
384 pt_entry_t save;
385 mp_disable_preemption();
386 if (*(pt_entry_t *) CM3)
387 panic("ml_phys_write_data: CMAP busy");
388
389 *(pt_entry_t *) CM3 = INTEL_PTE_VALID | INTEL_PTE_RW | (paddr & PG_FRAME) |
390 INTEL_PTE_REF | INTEL_PTE_MOD;
391 save = *(pt_entry_t *)CM3;
392 invlpg((u_int)CA3);
393
394 *(unsigned long long *)((unsigned int)CA3 | (paddr & INTEL_OFFMASK)) = data;
395
396 if (save != *(pt_entry_t *)CM3) panic("ml_phys_write_data CMAP changed");
397 *(pt_entry_t *) CM3 = 0;
398 mp_enable_preemption();
399 }
400
401 void ml_phys_write_byte(vm_offset_t paddr, unsigned int data)
402 {
403 ml_phys_write_data(paddr, data, 1);
404 }
405
406 void ml_phys_write_byte_64(addr64_t paddr, unsigned int data)
407 {
408 ml_phys_write_data(low32(paddr), data, 1);
409 }
410
411 void ml_phys_write_half(vm_offset_t paddr, unsigned int data)
412 {
413 ml_phys_write_data(paddr, data, 2);
414 }
415
416 void ml_phys_write_half_64(addr64_t paddr, unsigned int data)
417 {
418 ml_phys_write_data(low32(paddr), data, 2);
419 }
420
421 void ml_phys_write(vm_offset_t paddr, unsigned int data)
422 {
423 ml_phys_write_data(paddr, data, 4);
424 }
425
426 void ml_phys_write_64(addr64_t paddr, unsigned int data)
427 {
428 ml_phys_write_data(low32(paddr), data, 4);
429 }
430
431 void ml_phys_write_word(vm_offset_t paddr, unsigned int data)
432 {
433 ml_phys_write_data(paddr, data, 4);
434 }
435
436 void ml_phys_write_word_64(addr64_t paddr, unsigned int data)
437 {
438 ml_phys_write_data(low32(paddr), data, 4);
439 }
440
441
442 void ml_phys_write_double(vm_offset_t paddr, unsigned long long data)
443 {
444 ml_phys_write_long_long(paddr, data);
445 }
446
447 void ml_phys_write_double_64(addr64_t paddr, unsigned long long data)
448 {
449 ml_phys_write_long_long(low32(paddr), data);
450 }
451
452
453 /* PCI config cycle probing
454 *
455 *
456 * Read the memory location at physical address paddr.
457 * This is a part of a device probe, so there is a good chance we will
458 * have a machine check here. So we have to be able to handle that.
459 * We assume that machine checks are enabled both in MSR and HIDs
460 */
461
462 boolean_t
463 ml_probe_read(vm_offset_t paddr, unsigned int *val)
464 {
465 *val = ml_phys_read(paddr);
466 return TRUE;
467 }
468
469 /*
470 * Read the memory location at physical address paddr.
471 * This is a part of a device probe, so there is a good chance we will
472 * have a machine check here. So we have to be able to handle that.
473 * We assume that machine checks are enabled both in MSR and HIDs
474 */
475 boolean_t
476 ml_probe_read_64(addr64_t paddr, unsigned int *val)
477 {
478 *val = ml_phys_read_64(paddr);
479 return TRUE;
480 }
481
482
483 int bcmp(
484 const void *pa,
485 const void *pb,
486 size_t len)
487 {
488 const char *a = (const char *)pa;
489 const char *b = (const char *)pb;
490
491 if (len == 0)
492 return 0;
493
494 do
495 if (*a++ != *b++)
496 break;
497 while (--len);
498
499 return len;
500 }
501
502 int
503 memcmp(s1, s2, n)
504 const void *s1, *s2;
505 size_t n;
506 {
507 if (n != 0) {
508 const unsigned char *p1 = s1, *p2 = s2;
509
510 do {
511 if (*p1++ != *p2++)
512 return (*--p1 - *--p2);
513 } while (--n != 0);
514 }
515 return (0);
516 }
517
518 /*
519 * Abstract:
520 * strlen returns the number of characters in "string" preceeding
521 * the terminating null character.
522 */
523
524 size_t
525 strlen(
526 register const char *string)
527 {
528 register const char *ret = string;
529
530 while (*string++ != '\0')
531 continue;
532 return string - 1 - ret;
533 }
534
535 #include <libkern/OSAtomic.h>
536
537 uint32_t
538 hw_atomic_add(
539 uint32_t *dest,
540 uint32_t delt)
541 {
542 uint32_t oldValue;
543 uint32_t newValue;
544
545 do {
546 oldValue = *dest;
547 newValue = (oldValue + delt);
548 } while (!OSCompareAndSwap((UInt32)oldValue,
549 (UInt32)newValue, (UInt32 *)dest));
550
551 return newValue;
552 }
553
554 uint32_t
555 hw_atomic_sub(
556 uint32_t *dest,
557 uint32_t delt)
558 {
559 uint32_t oldValue;
560 uint32_t newValue;
561
562 do {
563 oldValue = *dest;
564 newValue = (oldValue - delt);
565 } while (!OSCompareAndSwap((UInt32)oldValue,
566 (UInt32)newValue, (UInt32 *)dest));
567
568 return newValue;
569 }
570
571 uint32_t
572 hw_atomic_or(
573 uint32_t *dest,
574 uint32_t mask)
575 {
576 uint32_t oldValue;
577 uint32_t newValue;
578
579 do {
580 oldValue = *dest;
581 newValue = (oldValue | mask);
582 } while (!OSCompareAndSwap((UInt32)oldValue,
583 (UInt32)newValue, (UInt32 *)dest));
584
585 return newValue;
586 }
587
588 uint32_t
589 hw_atomic_and(
590 uint32_t *dest,
591 uint32_t mask)
592 {
593 uint32_t oldValue;
594 uint32_t newValue;
595
596 do {
597 oldValue = *dest;
598 newValue = (oldValue & mask);
599 } while (!OSCompareAndSwap((UInt32)oldValue,
600 (UInt32)newValue, (UInt32 *)dest));
601
602 return newValue;
603 }
604
605 uint32_t
606 hw_compare_and_store(
607 uint32_t oldval,
608 uint32_t newval,
609 uint32_t *dest)
610 {
611 return OSCompareAndSwap((UInt32)oldval, (UInt32)newval, (UInt32 *)dest);
612 }
613
614 #if MACH_ASSERT
615
616 /*
617 * Machine-dependent routine to fill in an array with up to callstack_max
618 * levels of return pc information.
619 */
620 void machine_callstack(
621 __unused natural_t *buf,
622 __unused vm_size_t callstack_max)
623 {
624 }
625
626 #endif /* MACH_ASSERT */
627
628
629
630
631 void fillPage(ppnum_t pa, unsigned int fill)
632 {
633 pmap_paddr_t src;
634 int i;
635 int cnt = PAGE_SIZE/sizeof(unsigned int);
636 unsigned int *addr;
637 mp_disable_preemption();
638 if (*(pt_entry_t *) CM2)
639 panic("fillPage: CMAP busy");
640 src = (pmap_paddr_t)i386_ptob(pa);
641 *(pt_entry_t *) CM2 = INTEL_PTE_VALID | INTEL_PTE_RW | (src & PG_FRAME) |
642 INTEL_PTE_REF | INTEL_PTE_MOD;
643 invlpg((u_int)CA2);
644
645 for (i = 0, addr = (unsigned int *)CA2; i < cnt ; i++ )
646 *addr++ = fill;
647
648 *(pt_entry_t *) CM2 = 0;
649 mp_enable_preemption();
650 }
651
652 static inline void __sfence(void)
653 {
654 __asm__ volatile("sfence");
655 }
656 static inline void __mfence(void)
657 {
658 __asm__ volatile("mfence");
659 }
660 static inline void __wbinvd(void)
661 {
662 __asm__ volatile("wbinvd");
663 }
664 static inline void __clflush(void *ptr)
665 {
666 __asm__ volatile(".byte 0x0F; .byte 0xae; .byte 0x38" : : "a" (ptr));
667 }
668
669 void dcache_incoherent_io_store64(addr64_t pa, unsigned int count)
670 {
671 if (cpuid_features() & CPUID_FEATURE_CLFSH)
672 {
673 uint32_t linesize = cpuid_info()->cache_linesize;
674 addr64_t addr;
675 uint32_t offset, chunk;
676 boolean_t istate;
677
678 istate = ml_set_interrupts_enabled(FALSE);
679
680 if (*(pt_entry_t *) CM2)
681 panic("cache_flush_page_phys: CMAP busy");
682
683 offset = pa & (linesize - 1);
684 count += offset;
685 addr = pa - offset;
686 offset = addr & ((addr64_t) (page_size - 1));
687 chunk = page_size - offset;
688
689 do
690 {
691 if (chunk > count)
692 chunk = count;
693
694 *(pt_entry_t *) CM2 = i386_ptob(atop_64(addr)) | INTEL_PTE_VALID;
695 invlpg((u_int)CA2);
696
697 for (; offset < chunk; offset += linesize)
698 __clflush((void *)(((u_int)CA2) + offset));
699
700 count -= chunk;
701 addr += chunk;
702 chunk = page_size;
703 offset = 0;
704 }
705 while (count);
706
707 *(pt_entry_t *) CM2 = 0;
708
709 (void) ml_set_interrupts_enabled(istate);
710 }
711 else
712 __wbinvd();
713 __sfence();
714 }
715
716 void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count)
717 {
718 return(dcache_incoherent_io_store64(pa,count));
719 }
720
721 void
722 flush_dcache64(__unused addr64_t addr,
723 __unused unsigned count,
724 __unused int phys)
725 {
726 }
727
728 void
729 invalidate_icache64(__unused addr64_t addr,
730 __unused unsigned count,
731 __unused int phys)
732 {
733 }
734
735 kern_return_t copypv(addr64_t src64,
736 addr64_t snk64,
737 unsigned int size,
738 int which)
739 {
740
741 vm_map_t map;
742 kern_return_t ret;
743 vm_offset_t source, sink;
744 vm_offset_t vaddr;
745 vm_offset_t paddr;
746 spl_t s;
747 unsigned int lop, csize;
748 int needtran, bothphys;
749 vm_prot_t prot;
750 pt_entry_t *ptep;
751
752 map = (which & cppvKmap) ? kernel_map : current_map_fast();
753
754 source = low32(src64);
755 sink = low32(snk64);
756
757 if((which & (cppvPsrc | cppvPsnk)) == 0 ) { /* Make sure that only one is virtual */
758 panic("copypv: no more than 1 parameter may be virtual\n"); /* Not allowed */
759 }
760
761 bothphys = 1; /* Assume both are physical */
762
763 if(!(which & cppvPsnk)) { /* Is there a virtual page here? */
764 vaddr = sink; /* Sink side is virtual */
765 bothphys = 0; /* Show both aren't physical */
766 prot = VM_PROT_READ | VM_PROT_WRITE; /* Sink always must be read/write */
767 } else /* if(!(which & cppvPsrc)) */ { /* Source side is virtual */
768 vaddr = source; /* Source side is virtual */
769 bothphys = 0; /* Show both aren't physical */
770 prot = VM_PROT_READ; /* Virtual source is always read only */
771 }
772
773 needtran = 1; /* Show we need to map the virtual the first time */
774 s = splhigh(); /* Don't bother me */
775
776 while(size) {
777
778 if(!bothphys && (needtran || !(vaddr & 4095LL))) { /* If first time or we stepped onto a new page, we need to translate */
779 needtran = 0;
780 while(1) {
781 ptep = pmap_mapgetpte(map, vaddr);
782 if((0 == ptep) || ((*ptep & INTEL_PTE_VALID) == 0)) {
783 splx(s); /* Restore the interrupt level */
784 ret = vm_fault(map, vm_map_trunc_page(vaddr), prot, FALSE, THREAD_UNINT, NULL, 0); /* Didn't find it, try to fault it in... */
785
786 if(ret != KERN_SUCCESS)return KERN_FAILURE; /* Didn't find any, return no good... */
787
788 s = splhigh(); /* Don't bother me */
789 continue; /* Go try for the map again... */
790
791 }
792
793 /* Note that we have to have the destination writable. So, if we already have it, or we are mapping the source,
794 we can just leave.
795 */
796 if((which & cppvPsnk) || (*ptep & INTEL_PTE_WRITE)) break; /* We got it mapped R/W or the source is not virtual, leave... */
797 splx(s); /* Restore the interrupt level */
798
799 ret = vm_fault(map, vm_map_trunc_page(vaddr), VM_PROT_READ | VM_PROT_WRITE, FALSE, THREAD_UNINT, NULL, 0); /* check for a COW area */
800 if (ret != KERN_SUCCESS) return KERN_FAILURE; /* We couldn't get it R/W, leave in disgrace... */
801 s = splhigh(); /* Don't bother me */
802 }
803
804 paddr = pte_to_pa(*ptep) | (vaddr & 4095);
805
806 if(which & cppvPsrc) sink = paddr; /* If source is physical, then the sink is virtual */
807 else source = paddr; /* Otherwise the source is */
808 }
809
810 lop = (unsigned int)(4096LL - (sink & 4095LL)); /* Assume sink smallest */
811 if(lop > (unsigned int)(4096LL - (source & 4095LL))) lop = (unsigned int)(4096LL - (source & 4095LL)); /* No, source is smaller */
812
813 csize = size; /* Assume we can copy it all */
814 if(lop < size) csize = lop; /* Nope, we can't do it all */
815
816 if(which & cppvFsrc) flush_dcache64((addr64_t)source, csize, 1); /* If requested, flush source before move */
817 if(which & cppvFsnk) flush_dcache64((addr64_t)sink, csize, 1); /* If requested, flush sink before move */
818
819 bcopy_phys((addr64_t)source, (addr64_t)sink, csize); /* Do a physical copy, virtually */
820
821 if(which & cppvFsrc) flush_dcache64((addr64_t)source, csize, 1); /* If requested, flush source after move */
822 if(which & cppvFsnk) flush_dcache64((addr64_t)sink, csize, 1); /* If requested, flush sink after move */
823
824
825 /*
826 * Note that for certain ram disk flavors, we may be copying outside of known memory.
827 * Therefore, before we try to mark it modifed, we check if it exists.
828 */
829
830 if( !(which & cppvNoModSnk)) {
831 if (phys_page_exists((ppnum_t)sink >> 12))
832 mapping_set_mod((ppnum_t)(sink >> 12)); /* Make sure we know that it is modified */
833 }
834 if( !(which & cppvNoRefSrc)) {
835 if (phys_page_exists((ppnum_t)source >> 12))
836 mapping_set_ref((ppnum_t)(source >> 12)); /* Make sure we know that it is modified */
837 }
838
839
840 size = size - csize; /* Calculate what is left */
841 vaddr = vaddr + csize; /* Move to next sink address */
842 source = source + csize; /* Bump source to next physical address */
843 sink = sink + csize; /* Bump sink to next physical address */
844 }
845
846 splx(s); /* Open up for interrupts */
847
848 return KERN_SUCCESS;
849 }
850
851 void switch_to_serial_console(void)
852 {
853 }
854
855 addr64_t vm_last_addr;
856
857 void
858 mapping_set_mod(ppnum_t pn)
859 {
860 pmap_set_modify(pn);
861 }
862
863 void
864 mapping_set_ref(ppnum_t pn)
865 {
866 pmap_set_reference(pn);
867 }
868
869 void
870 cache_flush_page_phys(ppnum_t pa)
871 {
872 boolean_t istate;
873 int i;
874 unsigned int *cacheline_addr;
875 int cacheline_size = cpuid_info()->cache_linesize;
876 int cachelines_in_page = PAGE_SIZE/cacheline_size;
877
878 /*
879 * If there's no clflush instruction, we're sadly forced to use wbinvd.
880 */
881 if (!(cpuid_features() & CPUID_FEATURE_CLFSH)) {
882 asm volatile("wbinvd" : : : "memory");
883 return;
884 }
885
886 istate = ml_set_interrupts_enabled(FALSE);
887
888 if (*(pt_entry_t *) CM2)
889 panic("cache_flush_page_phys: CMAP busy");
890
891 *(pt_entry_t *) CM2 = i386_ptob(pa) | INTEL_PTE_VALID;
892 invlpg((u_int)CA2);
893
894 for (i = 0, cacheline_addr = (unsigned int *)CA2;
895 i < cachelines_in_page;
896 i++, cacheline_addr += cacheline_size) {
897 asm volatile("clflush %0" : : "m" (cacheline_addr));
898 }
899
900 *(pt_entry_t *) CM2 = 0;
901
902 (void) ml_set_interrupts_enabled(istate);
903
904 }
905