]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/loose_ends.c
xnu-792.6.61.tar.gz
[apple/xnu.git] / osfmk / i386 / loose_ends.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
37839358
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
37839358
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
37839358
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
25/*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50/*
51 */
52#include <mach_assert.h>
53
54#include <string.h>
55#include <mach/boolean.h>
56#include <mach/i386/vm_types.h>
0b4e3aa0 57#include <mach/i386/vm_param.h>
1c79356b
A
58#include <kern/kern_types.h>
59#include <kern/misc_protos.h>
55e303ae 60#include <i386/param.h>
1c79356b 61#include <i386/misc_protos.h>
91447636
A
62#include <i386/cpu_data.h>
63#include <i386/machine_routines.h>
64#include <i386/cpuid.h>
65#include <vm/pmap.h>
66#include <vm/vm_map.h>
67#include <vm/vm_kern.h>
68#include <vm/vm_fault.h>
69
70/* XXX - should be gone from here */
71extern void invalidate_icache64(addr64_t addr, unsigned cnt, int phys);
72extern void flush_dcache64(addr64_t addr, unsigned count, int phys);
73extern boolean_t phys_page_exists(ppnum_t);
74extern pt_entry_t *pmap_mapgetpte(vm_map_t, vm_offset_t);
75extern void bcopy_no_overwrite(const char *from, char *to,vm_size_t bytes);
76extern void pmap_set_reference(ppnum_t pn);
77extern void mapping_set_mod(ppnum_t pa);
78extern void mapping_set_ref(ppnum_t pn);
79extern void switch_to_serial_console(void);
80extern kern_return_t copyp2p(vm_offset_t source,
81 vm_offset_t dest,
82 unsigned int size,
83 unsigned int flush_action);
84extern void fillPage(ppnum_t pa, unsigned int fill);
85extern void ovbcopy(const char *from,
86 char *to,
87 vm_size_t nbytes);
88void machine_callstack(natural_t *buf, vm_size_t callstack_max);
89
1c79356b 90
55e303ae
A
91#define value_64bit(value) ((value) & 0xFFFFFFFF00000000LL)
92#define low32(x) ((unsigned int)((x) & 0x00000000FFFFFFFFLL))
93
9bccf70c 94
55e303ae 95void
91447636
A
96bzero_phys(
97 addr64_t src64,
98 vm_size_t bytes)
55e303ae 99{
91447636
A
100 vm_offset_t src = low32(src64);
101 pt_entry_t save2;
102 mp_disable_preemption();
103 if (*(pt_entry_t *) CM2)
104 panic("bzero_phys: CMAP busy");
105
106 *(pt_entry_t *) CM2 = INTEL_PTE_VALID | INTEL_PTE_RW | (src & PG_FRAME) |
107 INTEL_PTE_REF | INTEL_PTE_MOD;
108 save2=*(pt_entry_t *)CM2;
109 invlpg((u_int)CA2);
110
111 bzero((void *)((unsigned int)CA2 | (src & INTEL_OFFMASK)), bytes);
112 if (save2 != *(pt_entry_t *)CM2) panic("bzero_phys CMAP changed");
113 *(pt_entry_t *) CM2 = 0;
114 mp_enable_preemption();
55e303ae
A
115}
116
b4c24cb9
A
117/*
118 * copy 'size' bytes from physical to physical address
119 * the caller must validate the physical ranges
120 *
121 * if flush_action == 0, no cache flush necessary
122 * if flush_action == 1, flush the source
123 * if flush_action == 2, flush the dest
124 * if flush_action == 3, flush both source and dest
125 */
126
91447636
A
127kern_return_t
128copyp2p(vm_offset_t source,
129 vm_offset_t dest,
130 unsigned int size,
131 unsigned int flush_action)
132{
b4c24cb9
A
133
134 switch(flush_action) {
135 case 1:
136 flush_dcache(source, size, 1);
137 break;
138 case 2:
139 flush_dcache(dest, size, 1);
140 break;
141 case 3:
142 flush_dcache(source, size, 1);
143 flush_dcache(dest, size, 1);
144 break;
145
146 }
55e303ae 147 bcopy_phys((addr64_t)source, (addr64_t)dest, (vm_size_t)size); /* Do a physical copy */
b4c24cb9
A
148
149 switch(flush_action) {
150 case 1:
151 flush_dcache(source, size, 1);
152 break;
153 case 2:
154 flush_dcache(dest, size, 1);
155 break;
156 case 3:
157 flush_dcache(source, size, 1);
158 flush_dcache(dest, size, 1);
159 break;
160
161 }
55e303ae 162 return KERN_SUCCESS;
b4c24cb9
A
163}
164
0b4e3aa0
A
165/*
166 * bcopy_phys - like bcopy but copies from/to physical addresses.
0b4e3aa0
A
167 */
168
169void
91447636
A
170bcopy_phys(
171 addr64_t src64,
172 addr64_t dst64,
173 vm_size_t bytes)
0b4e3aa0 174{
91447636
A
175 vm_offset_t src = low32(src64);
176 vm_offset_t dst = low32(dst64);
177 pt_entry_t save1,save2;
178 /* ensure we stay within a page */
179 if ( (((src & (NBPG-1)) + bytes) > NBPG) ||
180 (((dst & (NBPG-1)) + bytes) > NBPG) ) panic("bcopy_phys");
181 mp_disable_preemption();
182 if (*(pt_entry_t *) CM1 || *(pt_entry_t *) CM2)
183 panic("bcopy_phys: CMAP busy");
184
185 *(pt_entry_t *) CM1 = INTEL_PTE_VALID | (src & PG_FRAME) | INTEL_PTE_REF;
186 *(pt_entry_t *) CM2 = INTEL_PTE_VALID | INTEL_PTE_RW | (dst & PG_FRAME) |
187 INTEL_PTE_REF | INTEL_PTE_MOD;
188 save1 = *(pt_entry_t *)CM1;save2 = *(pt_entry_t *)CM2;
189 invlpg((u_int)CA1);
190 invlpg((u_int)CA2);
191
192 bcopy((void *) ((uintptr_t)CA1 | (src & INTEL_OFFMASK)),
193 (void *) ((uintptr_t)CA2 | (dst & INTEL_OFFMASK)), bytes);
194 if ( (save1 != *(pt_entry_t *)CM1) || (save2 != *(pt_entry_t *)CM2)) panic("bcopy_phys CMAP changed");
195 *(pt_entry_t *) CM1 = 0;
196 *(pt_entry_t *) CM2 = 0;
197 mp_enable_preemption();
0b4e3aa0 198
91447636 199}
0b4e3aa0 200
1c79356b
A
201/*
202 * ovbcopy - like bcopy, but recognizes overlapping ranges and handles
203 * them correctly.
204 */
205
206void
207ovbcopy(
208 const char *from,
209 char *to,
210 vm_size_t bytes) /* num bytes to copy */
211{
212 /* Assume that bcopy copies left-to-right (low addr first). */
213 if (from + bytes <= to || to + bytes <= from || to == from)
214 bcopy_no_overwrite(from, to, bytes); /* non-overlapping or no-op*/
215 else if (from > to)
216 bcopy_no_overwrite(from, to, bytes); /* overlapping but OK */
217 else {
218 /* to > from: overlapping, and must copy right-to-left. */
219 from += bytes - 1;
220 to += bytes - 1;
221 while (bytes-- > 0)
222 *to-- = *from--;
223 }
224}
225
91447636
A
226
227/*
228 * Read data from a physical address. Memory should not be cache inhibited.
229 */
230
231
232static unsigned int
233ml_phys_read_data( vm_offset_t paddr, int size )
234{
235 unsigned int result;
236 pt_entry_t save;
237 mp_disable_preemption();
238 if (*(pt_entry_t *) CM3)
239 panic("ml_phys_read_data: CMAP busy");
240
241 *(pt_entry_t *) CM3 = INTEL_PTE_VALID | (paddr & PG_FRAME) | INTEL_PTE_REF;
242 save = *(pt_entry_t *)CM3;
243 invlpg((u_int)CA3);
244
245
246 switch (size) {
247 unsigned char s1;
248 unsigned short s2;
249 case 1:
250 s1 = *(unsigned char *)((unsigned int)CA3 | (paddr & INTEL_OFFMASK));
251 result = s1;
252 break;
253 case 2:
254 s2 = *(unsigned short *)((unsigned int)CA3 | (paddr & INTEL_OFFMASK));
255 result = s2;
256 break;
257 case 4:
258 default:
259 result = *(unsigned int *)((unsigned int)CA3 | (paddr & INTEL_OFFMASK));
260 break;
261 }
262
263 if (save != *(pt_entry_t *)CM3) panic("ml_phys_read_data CMAP changed");
264 *(pt_entry_t *) CM3 = 0;
265 mp_enable_preemption();
266 return result;
267}
268
269static unsigned long long
270ml_phys_read_long_long( vm_offset_t paddr )
271{
272 unsigned long long result;
273 pt_entry_t save;
274 mp_disable_preemption();
275 if (*(pt_entry_t *) CM3)
276 panic("ml_phys_read_data: CMAP busy");
277
278 *(pt_entry_t *) CM3 = INTEL_PTE_VALID | (paddr & PG_FRAME) | INTEL_PTE_REF;
279 save = *(pt_entry_t *)CM3;
280 invlpg((u_int)CA3);
281
282 result = *(unsigned long long *)((unsigned int)CA3 | (paddr & INTEL_OFFMASK));
283
284 if (save != *(pt_entry_t *)CM3) panic("ml_phys_read_data CMAP changed");
285 *(pt_entry_t *) CM3 = 0;
286 mp_enable_preemption();
287 return result;
288}
289
290unsigned int ml_phys_read( vm_offset_t paddr)
291{
292 return ml_phys_read_data(paddr, 4);
293}
294
295unsigned int ml_phys_read_word(vm_offset_t paddr) {
296 return ml_phys_read_data(paddr, 4);
297}
298
299unsigned int ml_phys_read_64(addr64_t paddr64)
300{
301 return ml_phys_read_data(low32(paddr64), 4);
302}
303
304unsigned int ml_phys_read_word_64(addr64_t paddr64)
305{
306 return ml_phys_read_data(low32(paddr64), 4);
307}
308
309unsigned int ml_phys_read_half(vm_offset_t paddr)
310{
311 return ml_phys_read_data(paddr, 2);
312}
313
314unsigned int ml_phys_read_half_64(addr64_t paddr64)
315{
316 return ml_phys_read_data(low32(paddr64), 2);
317}
318
319unsigned int ml_phys_read_byte(vm_offset_t paddr)
320{
321 return ml_phys_read_data(paddr, 1);
322}
323
324unsigned int ml_phys_read_byte_64(addr64_t paddr64)
325{
326 return ml_phys_read_data(low32(paddr64), 1);
327}
328
329unsigned long long ml_phys_read_double(vm_offset_t paddr)
330{
331 return ml_phys_read_long_long(paddr);
332}
333
334unsigned long long ml_phys_read_double_64(addr64_t paddr)
335{
336 return ml_phys_read_long_long(low32(paddr));
337}
338
339
340/*
341 * Write data to a physical address. Memory should not be cache inhibited.
342 */
343
344static void
345ml_phys_write_data( vm_offset_t paddr, unsigned long data, int size )
346{
347 pt_entry_t save;
348 mp_disable_preemption();
349 if (*(pt_entry_t *) CM3)
350 panic("ml_phys_write_data: CMAP busy");
351
352 *(pt_entry_t *) CM3 = INTEL_PTE_VALID | INTEL_PTE_RW | (paddr & PG_FRAME) |
353 INTEL_PTE_REF | INTEL_PTE_MOD;
354 save = *(pt_entry_t *)CM3;
355 invlpg((u_int)CA3);
356
357 switch (size) {
358 case 1:
359 *(unsigned char *)((unsigned int)CA3 | (paddr & INTEL_OFFMASK)) = (unsigned char)data;
360 break;
361 case 2:
362 *(unsigned short *)((unsigned int)CA3 | (paddr & INTEL_OFFMASK)) = (unsigned short)data;
363 break;
364 case 4:
365 default:
366 *(unsigned int *)((unsigned int)CA3 | (paddr & INTEL_OFFMASK)) = data;
367 break;
368 }
369
370 if (save != *(pt_entry_t *)CM3) panic("ml_phys_write_data CMAP changed");
371 *(pt_entry_t *) CM3 = 0;
372 mp_enable_preemption();
373}
374
375static void
376ml_phys_write_long_long( vm_offset_t paddr, unsigned long long data )
377{
378 pt_entry_t save;
379 mp_disable_preemption();
380 if (*(pt_entry_t *) CM3)
381 panic("ml_phys_write_data: CMAP busy");
382
383 *(pt_entry_t *) CM3 = INTEL_PTE_VALID | INTEL_PTE_RW | (paddr & PG_FRAME) |
384 INTEL_PTE_REF | INTEL_PTE_MOD;
385 save = *(pt_entry_t *)CM3;
386 invlpg((u_int)CA3);
387
388 *(unsigned long long *)((unsigned int)CA3 | (paddr & INTEL_OFFMASK)) = data;
389
390 if (save != *(pt_entry_t *)CM3) panic("ml_phys_write_data CMAP changed");
391 *(pt_entry_t *) CM3 = 0;
392 mp_enable_preemption();
393}
394
395void ml_phys_write_byte(vm_offset_t paddr, unsigned int data)
396{
397 ml_phys_write_data(paddr, data, 1);
398}
399
400void ml_phys_write_byte_64(addr64_t paddr, unsigned int data)
401{
402 ml_phys_write_data(low32(paddr), data, 1);
403}
404
405void ml_phys_write_half(vm_offset_t paddr, unsigned int data)
406{
407 ml_phys_write_data(paddr, data, 2);
408}
409
410void ml_phys_write_half_64(addr64_t paddr, unsigned int data)
411{
412 ml_phys_write_data(low32(paddr), data, 2);
413}
414
415void ml_phys_write(vm_offset_t paddr, unsigned int data)
1c79356b 416{
91447636 417 ml_phys_write_data(paddr, data, 4);
1c79356b
A
418}
419
91447636
A
420void ml_phys_write_64(addr64_t paddr, unsigned int data)
421{
422 ml_phys_write_data(low32(paddr), data, 4);
423}
424
425void ml_phys_write_word(vm_offset_t paddr, unsigned int data)
426{
427 ml_phys_write_data(paddr, data, 4);
428}
429
430void ml_phys_write_word_64(addr64_t paddr, unsigned int data)
431{
432 ml_phys_write_data(low32(paddr), data, 4);
433}
434
435
436void ml_phys_write_double(vm_offset_t paddr, unsigned long long data)
437{
438 ml_phys_write_long_long(paddr, data);
439}
440
441void ml_phys_write_double_64(addr64_t paddr, unsigned long long data)
442{
443 ml_phys_write_long_long(low32(paddr), data);
444}
445
446
447/* PCI config cycle probing
448 *
449 *
450 * Read the memory location at physical address paddr.
451 * This is a part of a device probe, so there is a good chance we will
452 * have a machine check here. So we have to be able to handle that.
453 * We assume that machine checks are enabled both in MSR and HIDs
454 */
455
456boolean_t
457ml_probe_read(vm_offset_t paddr, unsigned int *val)
458{
459 *val = ml_phys_read(paddr);
460 return TRUE;
461}
462
463/*
464 * Read the memory location at physical address paddr.
465 * This is a part of a device probe, so there is a good chance we will
466 * have a machine check here. So we have to be able to handle that.
467 * We assume that machine checks are enabled both in MSR and HIDs
468 */
469boolean_t
470ml_probe_read_64(addr64_t paddr, unsigned int *val)
471{
472 *val = ml_phys_read_64(paddr);
473 return TRUE;
474}
475
476
1c79356b 477int bcmp(
91447636
A
478 const void *pa,
479 const void *pb,
480 size_t len)
1c79356b 481{
91447636
A
482 const char *a = (const char *)pa;
483 const char *b = (const char *)pb;
484
1c79356b
A
485 if (len == 0)
486 return 0;
487
488 do
489 if (*a++ != *b++)
490 break;
491 while (--len);
492
493 return len;
494}
495
0b4e3aa0
A
496int
497memcmp(s1, s2, n)
91447636
A
498 const void *s1, *s2;
499 size_t n;
0b4e3aa0 500{
91447636
A
501 if (n != 0) {
502 const unsigned char *p1 = s1, *p2 = s2;
503
504 do {
505 if (*p1++ != *p2++)
506 return (*--p1 - *--p2);
507 } while (--n != 0);
508 }
0b4e3aa0
A
509 return (0);
510}
511
512/*
513 * Abstract:
514 * strlen returns the number of characters in "string" preceeding
515 * the terminating null character.
516 */
517
518size_t
519strlen(
520 register const char *string)
521{
522 register const char *ret = string;
523
524 while (*string++ != '\0')
525 continue;
526 return string - 1 - ret;
527}
528
9bccf70c
A
529#include <libkern/OSAtomic.h>
530
531uint32_t
532hw_atomic_add(
533 uint32_t *dest,
534 uint32_t delt)
535{
536 uint32_t oldValue;
537 uint32_t newValue;
538
539 do {
540 oldValue = *dest;
541 newValue = (oldValue + delt);
542 } while (!OSCompareAndSwap((UInt32)oldValue,
543 (UInt32)newValue, (UInt32 *)dest));
544
545 return newValue;
546}
547
548uint32_t
549hw_atomic_sub(
550 uint32_t *dest,
551 uint32_t delt)
552{
553 uint32_t oldValue;
554 uint32_t newValue;
555
556 do {
557 oldValue = *dest;
558 newValue = (oldValue - delt);
559 } while (!OSCompareAndSwap((UInt32)oldValue,
560 (UInt32)newValue, (UInt32 *)dest));
561
562 return newValue;
563}
564
565uint32_t
566hw_atomic_or(
567 uint32_t *dest,
568 uint32_t mask)
569{
570 uint32_t oldValue;
571 uint32_t newValue;
572
573 do {
574 oldValue = *dest;
575 newValue = (oldValue | mask);
576 } while (!OSCompareAndSwap((UInt32)oldValue,
577 (UInt32)newValue, (UInt32 *)dest));
578
579 return newValue;
580}
581
582uint32_t
583hw_atomic_and(
584 uint32_t *dest,
585 uint32_t mask)
586{
587 uint32_t oldValue;
588 uint32_t newValue;
589
590 do {
591 oldValue = *dest;
592 newValue = (oldValue & mask);
593 } while (!OSCompareAndSwap((UInt32)oldValue,
594 (UInt32)newValue, (UInt32 *)dest));
595
596 return newValue;
597}
598
599uint32_t
600hw_compare_and_store(
601 uint32_t oldval,
602 uint32_t newval,
603 uint32_t *dest)
604{
605 return OSCompareAndSwap((UInt32)oldval, (UInt32)newval, (UInt32 *)dest);
606}
607
1c79356b
A
608#if MACH_ASSERT
609
610/*
611 * Machine-dependent routine to fill in an array with up to callstack_max
612 * levels of return pc information.
613 */
614void machine_callstack(
91447636
A
615 __unused natural_t *buf,
616 __unused vm_size_t callstack_max)
1c79356b
A
617{
618}
619
620#endif /* MACH_ASSERT */
55e303ae
A
621
622
623
624
625void fillPage(ppnum_t pa, unsigned int fill)
626{
91447636 627 pmap_paddr_t src;
55e303ae 628 int i;
91447636
A
629 int cnt = PAGE_SIZE/sizeof(unsigned int);
630 unsigned int *addr;
631 mp_disable_preemption();
632 if (*(pt_entry_t *) CM2)
633 panic("fillPage: CMAP busy");
634 src = (pmap_paddr_t)i386_ptob(pa);
635 *(pt_entry_t *) CM2 = INTEL_PTE_VALID | INTEL_PTE_RW | (src & PG_FRAME) |
636 INTEL_PTE_REF | INTEL_PTE_MOD;
637 invlpg((u_int)CA2);
638
639 for (i = 0, addr = (unsigned int *)CA2; i < cnt ; i++ )
55e303ae 640 *addr++ = fill;
91447636
A
641
642 *(pt_entry_t *) CM2 = 0;
643 mp_enable_preemption();
644}
645
646static inline void __sfence(void)
647{
648 __asm__ volatile("sfence");
649}
650static inline void __mfence(void)
651{
652 __asm__ volatile("mfence");
653}
654static inline void __wbinvd(void)
655{
656 __asm__ volatile("wbinvd");
657}
658static inline void __clflush(void *ptr)
659{
660 __asm__ volatile(".byte 0x0F; .byte 0xae; .byte 0x38" : : "a" (ptr));
661}
662
663void dcache_incoherent_io_store64(addr64_t pa, unsigned int count)
664{
665 if (cpuid_features() & CPUID_FEATURE_CLFSH)
666 {
667 uint32_t linesize = cpuid_info()->cache_linesize;
668 addr64_t addr;
669 uint32_t offset, chunk;
670 boolean_t istate;
671
672 istate = ml_set_interrupts_enabled(FALSE);
673
674 if (*(pt_entry_t *) CM2)
675 panic("cache_flush_page_phys: CMAP busy");
676
677 offset = pa & (linesize - 1);
678 count += offset;
679 addr = pa - offset;
680 offset = addr & ((addr64_t) (page_size - 1));
681 chunk = page_size - offset;
682
683 do
684 {
685 if (chunk > count)
686 chunk = count;
687
688 *(pt_entry_t *) CM2 = i386_ptob(atop_64(addr)) | INTEL_PTE_VALID;
689 invlpg((u_int)CA2);
690
691 for (; offset < chunk; offset += linesize)
692 __clflush((void *)(((u_int)CA2) + offset));
693
694 count -= chunk;
695 addr += chunk;
696 chunk = page_size;
697 offset = 0;
698 }
699 while (count);
700
701 *(pt_entry_t *) CM2 = 0;
702
703 (void) ml_set_interrupts_enabled(istate);
704 }
705 else
706 __wbinvd();
707 __sfence();
55e303ae
A
708}
709
91447636
A
710void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count)
711{
712 return(dcache_incoherent_io_store64(pa,count));
713}
55e303ae 714
91447636
A
715void
716flush_dcache64(__unused addr64_t addr,
717 __unused unsigned count,
718 __unused int phys)
55e303ae 719{
91447636 720}
55e303ae 721
91447636
A
722void
723invalidate_icache64(__unused addr64_t addr,
724 __unused unsigned count,
725 __unused int phys)
726{
727}
55e303ae 728
91447636
A
729kern_return_t copypv(addr64_t src64,
730 addr64_t snk64,
731 unsigned int size,
732 int which)
733{
734
735 vm_map_t map;
736 kern_return_t ret;
737 vm_offset_t source, sink;
738 vm_offset_t vaddr;
739 vm_offset_t paddr;
740 spl_t s;
741 unsigned int lop, csize;
742 int needtran, bothphys;
743 vm_prot_t prot;
744 pt_entry_t *ptep;
745
746 map = (which & cppvKmap) ? kernel_map : current_map_fast();
55e303ae 747
91447636
A
748 source = low32(src64);
749 sink = low32(snk64);
55e303ae 750
91447636
A
751 if((which & (cppvPsrc | cppvPsnk)) == 0 ) { /* Make sure that only one is virtual */
752 panic("copypv: no more than 1 parameter may be virtual\n"); /* Not allowed */
753 }
754
755 bothphys = 1; /* Assume both are physical */
756
757 if(!(which & cppvPsnk)) { /* Is there a virtual page here? */
758 vaddr = sink; /* Sink side is virtual */
759 bothphys = 0; /* Show both aren't physical */
760 prot = VM_PROT_READ | VM_PROT_WRITE; /* Sink always must be read/write */
761 } else /* if(!(which & cppvPsrc)) */ { /* Source side is virtual */
762 vaddr = source; /* Source side is virtual */
763 bothphys = 0; /* Show both aren't physical */
764 prot = VM_PROT_READ; /* Virtual source is always read only */
765 }
55e303ae 766
91447636
A
767 needtran = 1; /* Show we need to map the virtual the first time */
768 s = splhigh(); /* Don't bother me */
769
770 while(size) {
771
772 if(!bothphys && (needtran || !(vaddr & 4095LL))) { /* If first time or we stepped onto a new page, we need to translate */
773 needtran = 0;
774 while(1) {
775 ptep = pmap_mapgetpte(map, vaddr);
776 if((0 == ptep) || ((*ptep & INTEL_PTE_VALID) == 0)) {
777 splx(s); /* Restore the interrupt level */
778 ret = vm_fault(map, vm_map_trunc_page(vaddr), prot, FALSE, THREAD_UNINT, NULL, 0); /* Didn't find it, try to fault it in... */
779
780 if(ret != KERN_SUCCESS)return KERN_FAILURE; /* Didn't find any, return no good... */
781
782 s = splhigh(); /* Don't bother me */
783 continue; /* Go try for the map again... */
784
785 }
786
787 /* Note that we have to have the destination writable. So, if we already have it, or we are mapping the source,
788 we can just leave.
789 */
790 if((which & cppvPsnk) || (*ptep & INTEL_PTE_WRITE)) break; /* We got it mapped R/W or the source is not virtual, leave... */
791 splx(s); /* Restore the interrupt level */
792
793 ret = vm_fault(map, vm_map_trunc_page(vaddr), VM_PROT_READ | VM_PROT_WRITE, FALSE, THREAD_UNINT, NULL, 0); /* check for a COW area */
794 if (ret != KERN_SUCCESS) return KERN_FAILURE; /* We couldn't get it R/W, leave in disgrace... */
795 s = splhigh(); /* Don't bother me */
796 }
797
798 paddr = pte_to_pa(*ptep) | (vaddr & 4095);
799
800 if(which & cppvPsrc) sink = paddr; /* If source is physical, then the sink is virtual */
801 else source = paddr; /* Otherwise the source is */
802 }
803
804 lop = (unsigned int)(4096LL - (sink & 4095LL)); /* Assume sink smallest */
805 if(lop > (unsigned int)(4096LL - (source & 4095LL))) lop = (unsigned int)(4096LL - (source & 4095LL)); /* No, source is smaller */
806
807 csize = size; /* Assume we can copy it all */
808 if(lop < size) csize = lop; /* Nope, we can't do it all */
809
810 if(which & cppvFsrc) flush_dcache64((addr64_t)source, csize, 1); /* If requested, flush source before move */
811 if(which & cppvFsnk) flush_dcache64((addr64_t)sink, csize, 1); /* If requested, flush sink before move */
812
813 bcopy_phys((addr64_t)source, (addr64_t)sink, csize); /* Do a physical copy, virtually */
814
815 if(which & cppvFsrc) flush_dcache64((addr64_t)source, csize, 1); /* If requested, flush source after move */
816 if(which & cppvFsnk) flush_dcache64((addr64_t)sink, csize, 1); /* If requested, flush sink after move */
55e303ae 817
55e303ae 818
91447636
A
819/*
820 * Note that for certain ram disk flavors, we may be copying outside of known memory.
821 * Therefore, before we try to mark it modifed, we check if it exists.
822 */
55e303ae 823
91447636
A
824 if( !(which & cppvNoModSnk)) {
825 if (phys_page_exists((ppnum_t)sink >> 12))
826 mapping_set_mod((ppnum_t)(sink >> 12)); /* Make sure we know that it is modified */
827 }
828 if( !(which & cppvNoRefSrc)) {
829 if (phys_page_exists((ppnum_t)source >> 12))
830 mapping_set_ref((ppnum_t)(source >> 12)); /* Make sure we know that it is modified */
831 }
55e303ae
A
832
833
91447636
A
834 size = size - csize; /* Calculate what is left */
835 vaddr = vaddr + csize; /* Move to next sink address */
836 source = source + csize; /* Bump source to next physical address */
837 sink = sink + csize; /* Bump sink to next physical address */
838 }
839
840 splx(s); /* Open up for interrupts */
55e303ae 841
91447636 842 return KERN_SUCCESS;
55e303ae
A
843}
844
55e303ae
A
845void switch_to_serial_console(void)
846{
847}
848
849addr64_t vm_last_addr;
850
851void
852mapping_set_mod(ppnum_t pn)
853{
854 pmap_set_modify(pn);
855}
856
91447636
A
857void
858mapping_set_ref(ppnum_t pn)
859{
860 pmap_set_reference(pn);
861}
862
863void
864cache_flush_page_phys(ppnum_t pa)
55e303ae 865{
91447636
A
866 boolean_t istate;
867 int i;
868 unsigned int *cacheline_addr;
869 int cacheline_size = cpuid_info()->cache_linesize;
870 int cachelines_in_page = PAGE_SIZE/cacheline_size;
871
872 /*
873 * If there's no clflush instruction, we're sadly forced to use wbinvd.
874 */
875 if (!(cpuid_features() & CPUID_FEATURE_CLFSH)) {
876 asm volatile("wbinvd" : : : "memory");
877 return;
878 }
879
880 istate = ml_set_interrupts_enabled(FALSE);
881
882 if (*(pt_entry_t *) CM2)
883 panic("cache_flush_page_phys: CMAP busy");
884
885 *(pt_entry_t *) CM2 = i386_ptob(pa) | INTEL_PTE_VALID;
886 invlpg((u_int)CA2);
887
888 for (i = 0, cacheline_addr = (unsigned int *)CA2;
889 i < cachelines_in_page;
890 i++, cacheline_addr += cacheline_size) {
891 asm volatile("clflush %0" : : "m" (cacheline_addr));
892 }
893
894 *(pt_entry_t *) CM2 = 0;
895
896 (void) ml_set_interrupts_enabled(istate);
897
55e303ae 898}
91447636 899