2 * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
58 #include <mach_assert.h>
61 #include <mach/boolean.h>
62 #include <mach/i386/vm_types.h>
63 #include <mach/i386/vm_param.h>
64 #include <kern/kern_types.h>
65 #include <kern/misc_protos.h>
66 #include <sys/errno.h>
67 #include <i386/param.h>
68 #include <i386/misc_protos.h>
69 #include <i386/cpu_data.h>
70 #include <i386/machine_routines.h>
71 #include <i386/cpuid.h>
74 #include <vm/vm_map.h>
75 #include <vm/vm_kern.h>
76 #include <vm/vm_fault.h>
78 #include <libkern/OSAtomic.h>
79 #include <sys/kdebug.h>
84 #define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT
89 /* XXX - should be gone from here */
90 extern void invalidate_icache64(addr64_t addr
, unsigned cnt
, int phys
);
91 extern void flush_dcache64(addr64_t addr
, unsigned count
, int phys
);
92 extern boolean_t
phys_page_exists(ppnum_t
);
93 extern void bcopy_no_overwrite(const char *from
, char *to
,vm_size_t bytes
);
94 extern void pmap_set_reference(ppnum_t pn
);
95 extern void mapping_set_mod(ppnum_t pa
);
96 extern void mapping_set_ref(ppnum_t pn
);
98 extern void ovbcopy(const char *from
,
101 void machine_callstack(natural_t
*buf
, vm_size_t callstack_max
);
104 #define value_64bit(value) ((value) & 0xFFFFFFFF00000000LL)
105 #define low32(x) ((unsigned int)((x) & 0x00000000FFFFFFFFLL))
114 bzero_phys(src64
,bytes
);
124 mp_disable_preemption();
126 map
= pmap_get_mapwindow((pt_entry_t
)(INTEL_PTE_VALID
| INTEL_PTE_RW
| ((pmap_paddr_t
)src64
& PG_FRAME
) | INTEL_PTE_REF
| INTEL_PTE_MOD
));
128 bzero((void *)((uintptr_t)map
->prv_CADDR
| ((uint32_t)src64
& INTEL_OFFMASK
)), bytes
);
130 pmap_put_mapwindow(map
);
132 mp_enable_preemption();
137 * bcopy_phys - like bcopy but copies from/to physical addresses.
146 mapwindow_t
*src_map
, *dst_map
;
148 /* ensure we stay within a page */
149 if ( ((((uint32_t)src64
& (NBPG
-1)) + bytes
) > NBPG
) || ((((uint32_t)dst64
& (NBPG
-1)) + bytes
) > NBPG
) ) {
150 panic("bcopy_phys alignment");
152 mp_disable_preemption();
154 src_map
= pmap_get_mapwindow((pt_entry_t
)(INTEL_PTE_VALID
| ((pmap_paddr_t
)src64
& PG_FRAME
) | INTEL_PTE_REF
));
155 dst_map
= pmap_get_mapwindow((pt_entry_t
)(INTEL_PTE_VALID
| INTEL_PTE_RW
| ((pmap_paddr_t
)dst64
& PG_FRAME
) |
156 INTEL_PTE_REF
| INTEL_PTE_MOD
));
158 bcopy((void *) ((uintptr_t)src_map
->prv_CADDR
| ((uint32_t)src64
& INTEL_OFFMASK
)),
159 (void *) ((uintptr_t)dst_map
->prv_CADDR
| ((uint32_t)dst64
& INTEL_OFFMASK
)), bytes
);
161 pmap_put_mapwindow(src_map
);
162 pmap_put_mapwindow(dst_map
);
164 mp_enable_preemption();
168 * allow a function to get a quick virtual mapping of a physical page
175 int (*func
)(void * buffer
, vm_size_t bytes
, void * arg
),
178 mapwindow_t
*dst_map
;
181 /* ensure we stay within a page */
182 if ( ((((uint32_t)dst64
& (NBPG
-1)) + bytes
) > NBPG
) ) {
183 panic("apply_func_phys alignment");
185 mp_disable_preemption();
187 dst_map
= pmap_get_mapwindow((pt_entry_t
)(INTEL_PTE_VALID
| INTEL_PTE_RW
| ((pmap_paddr_t
)dst64
& PG_FRAME
) |
188 INTEL_PTE_REF
| INTEL_PTE_MOD
));
190 rc
= func((void *)((uintptr_t)dst_map
->prv_CADDR
| ((uint32_t)dst64
& INTEL_OFFMASK
)), bytes
, arg
);
192 pmap_put_mapwindow(dst_map
);
194 mp_enable_preemption();
200 * ovbcopy - like bcopy, but recognizes overlapping ranges and handles
208 vm_size_t bytes
) /* num bytes to copy */
210 /* Assume that bcopy copies left-to-right (low addr first). */
211 if (from
+ bytes
<= to
|| to
+ bytes
<= from
|| to
== from
)
212 bcopy_no_overwrite(from
, to
, bytes
); /* non-overlapping or no-op*/
214 bcopy_no_overwrite(from
, to
, bytes
); /* overlapping but OK */
216 /* to > from: overlapping, and must copy right-to-left. */
226 * Read data from a physical address.
231 ml_phys_read_data(pmap_paddr_t paddr
, int size
)
236 mp_disable_preemption();
238 map
= pmap_get_mapwindow((pt_entry_t
)(INTEL_PTE_VALID
| (paddr
& PG_FRAME
) | INTEL_PTE_REF
));
244 s1
= *(unsigned char *)((uintptr_t)map
->prv_CADDR
| ((uint32_t)paddr
& INTEL_OFFMASK
));
248 s2
= *(unsigned short *)((uintptr_t)map
->prv_CADDR
| ((uint32_t)paddr
& INTEL_OFFMASK
));
253 result
= *(unsigned int *)((uintptr_t)map
->prv_CADDR
| ((uint32_t)paddr
& INTEL_OFFMASK
));
256 pmap_put_mapwindow(map
);
258 mp_enable_preemption();
263 static unsigned long long
264 ml_phys_read_long_long(pmap_paddr_t paddr
)
267 unsigned long long result
;
269 mp_disable_preemption();
271 map
= pmap_get_mapwindow((pt_entry_t
)(INTEL_PTE_VALID
| (paddr
& PG_FRAME
) | INTEL_PTE_REF
));
273 result
= *(unsigned long long *)((uintptr_t)map
->prv_CADDR
| ((uint32_t)paddr
& INTEL_OFFMASK
));
275 pmap_put_mapwindow(map
);
277 mp_enable_preemption();
282 unsigned int ml_phys_read( vm_offset_t paddr
)
284 return ml_phys_read_data((pmap_paddr_t
)paddr
, 4);
287 unsigned int ml_phys_read_word(vm_offset_t paddr
) {
289 return ml_phys_read_data((pmap_paddr_t
)paddr
, 4);
292 unsigned int ml_phys_read_64(addr64_t paddr64
)
294 return ml_phys_read_data((pmap_paddr_t
)paddr64
, 4);
297 unsigned int ml_phys_read_word_64(addr64_t paddr64
)
299 return ml_phys_read_data((pmap_paddr_t
)paddr64
, 4);
302 unsigned int ml_phys_read_half(vm_offset_t paddr
)
304 return ml_phys_read_data((pmap_paddr_t
)paddr
, 2);
307 unsigned int ml_phys_read_half_64(addr64_t paddr64
)
309 return ml_phys_read_data((pmap_paddr_t
)paddr64
, 2);
312 unsigned int ml_phys_read_byte(vm_offset_t paddr
)
314 return ml_phys_read_data((pmap_paddr_t
)paddr
, 1);
317 unsigned int ml_phys_read_byte_64(addr64_t paddr64
)
319 return ml_phys_read_data((pmap_paddr_t
)paddr64
, 1);
322 unsigned long long ml_phys_read_double(vm_offset_t paddr
)
324 return ml_phys_read_long_long((pmap_paddr_t
)paddr
);
327 unsigned long long ml_phys_read_double_64(addr64_t paddr64
)
329 return ml_phys_read_long_long((pmap_paddr_t
)paddr64
);
335 * Write data to a physical address.
339 ml_phys_write_data(pmap_paddr_t paddr
, unsigned long data
, int size
)
343 mp_disable_preemption();
345 map
= pmap_get_mapwindow((pt_entry_t
)(INTEL_PTE_VALID
| INTEL_PTE_RW
| (paddr
& PG_FRAME
) |
346 INTEL_PTE_REF
| INTEL_PTE_MOD
));
350 *(unsigned char *)((uintptr_t)map
->prv_CADDR
| ((uint32_t)paddr
& INTEL_OFFMASK
)) = (unsigned char)data
;
353 *(unsigned short *)((uintptr_t)map
->prv_CADDR
| ((uint32_t)paddr
& INTEL_OFFMASK
)) = (unsigned short)data
;
357 *(unsigned int *)((uintptr_t)map
->prv_CADDR
| ((uint32_t)paddr
& INTEL_OFFMASK
)) = (uint32_t)data
;
360 pmap_put_mapwindow(map
);
362 mp_enable_preemption();
366 ml_phys_write_long_long(pmap_paddr_t paddr
, unsigned long long data
)
370 mp_disable_preemption();
372 map
= pmap_get_mapwindow((pt_entry_t
)(INTEL_PTE_VALID
| INTEL_PTE_RW
| (paddr
& PG_FRAME
) |
373 INTEL_PTE_REF
| INTEL_PTE_MOD
));
375 *(unsigned long long *)((uintptr_t)map
->prv_CADDR
| ((uint32_t)paddr
& INTEL_OFFMASK
)) = data
;
377 pmap_put_mapwindow(map
);
379 mp_enable_preemption();
384 void ml_phys_write_byte(vm_offset_t paddr
, unsigned int data
)
386 ml_phys_write_data((pmap_paddr_t
)paddr
, data
, 1);
389 void ml_phys_write_byte_64(addr64_t paddr64
, unsigned int data
)
391 ml_phys_write_data((pmap_paddr_t
)paddr64
, data
, 1);
394 void ml_phys_write_half(vm_offset_t paddr
, unsigned int data
)
396 ml_phys_write_data((pmap_paddr_t
)paddr
, data
, 2);
399 void ml_phys_write_half_64(addr64_t paddr64
, unsigned int data
)
401 ml_phys_write_data((pmap_paddr_t
)paddr64
, data
, 2);
404 void ml_phys_write(vm_offset_t paddr
, unsigned int data
)
406 ml_phys_write_data((pmap_paddr_t
)paddr
, data
, 4);
409 void ml_phys_write_64(addr64_t paddr64
, unsigned int data
)
411 ml_phys_write_data((pmap_paddr_t
)paddr64
, data
, 4);
414 void ml_phys_write_word(vm_offset_t paddr
, unsigned int data
)
416 ml_phys_write_data((pmap_paddr_t
)paddr
, data
, 4);
419 void ml_phys_write_word_64(addr64_t paddr64
, unsigned int data
)
421 ml_phys_write_data((pmap_paddr_t
)paddr64
, data
, 4);
424 void ml_phys_write_double(vm_offset_t paddr
, unsigned long long data
)
426 ml_phys_write_long_long((pmap_paddr_t
)paddr
, data
);
429 void ml_phys_write_double_64(addr64_t paddr64
, unsigned long long data
)
431 ml_phys_write_long_long((pmap_paddr_t
)paddr64
, data
);
435 /* PCI config cycle probing
438 * Read the memory location at physical address paddr.
439 * This is a part of a device probe, so there is a good chance we will
440 * have a machine check here. So we have to be able to handle that.
441 * We assume that machine checks are enabled both in MSR and HIDs
445 ml_probe_read(vm_offset_t paddr
, unsigned int *val
)
447 if ((PAGE_SIZE
- (paddr
& PAGE_MASK
)) < 4)
450 *val
= ml_phys_read(paddr
);
456 * Read the memory location at physical address paddr.
457 * This is a part of a device probe, so there is a good chance we will
458 * have a machine check here. So we have to be able to handle that.
459 * We assume that machine checks are enabled both in MSR and HIDs
462 ml_probe_read_64(addr64_t paddr64
, unsigned int *val
)
464 if ((PAGE_SIZE
- (paddr64
& PAGE_MASK
)) < 4)
467 *val
= ml_phys_read_64((pmap_paddr_t
)paddr64
);
477 const char *a
= (const char *)pa
;
478 const char *b
= (const char *)pb
;
492 memcmp(const void *s1
, const void *s2
, size_t n
)
495 const unsigned char *p1
= s1
, *p2
= s2
;
499 return (*--p1
- *--p2
);
507 * strlen returns the number of characters in "string" preceeding
508 * the terminating null character.
513 register const char *string
)
515 register const char *ret
= string
;
517 while (*string
++ != '\0')
519 return string
- 1 - ret
;
523 hw_compare_and_store(uint32_t oldval
, uint32_t newval
, volatile uint32_t *dest
)
525 return OSCompareAndSwap((UInt32
)oldval
,
527 (volatile UInt32
*)dest
);
533 * Machine-dependent routine to fill in an array with up to callstack_max
534 * levels of return pc information.
536 void machine_callstack(
537 __unused natural_t
*buf
,
538 __unused vm_size_t callstack_max
)
542 #endif /* MACH_ASSERT */
544 void fillPage(ppnum_t pa
, unsigned int fill
)
549 int cnt
= PAGE_SIZE
/sizeof(unsigned int);
552 mp_disable_preemption();
555 map
= pmap_get_mapwindow((pt_entry_t
)(INTEL_PTE_VALID
| INTEL_PTE_RW
| (src
& PG_FRAME
) |
556 INTEL_PTE_REF
| INTEL_PTE_MOD
));
558 for (i
= 0, addr
= (unsigned int *)map
->prv_CADDR
; i
< cnt
; i
++ )
561 pmap_put_mapwindow(map
);
563 mp_enable_preemption();
566 static inline void __sfence(void)
568 __asm__
volatile("sfence");
570 static inline void __mfence(void)
572 __asm__
volatile("mfence");
574 static inline void __wbinvd(void)
576 __asm__
volatile("wbinvd");
578 static inline void __clflush(void *ptr
)
580 __asm__
volatile("clflush (%0)" : : "r" (ptr
));
583 void dcache_incoherent_io_store64(addr64_t pa
, unsigned int count
)
586 uint32_t linesize
= cpuid_info()->cache_linesize
;
588 uint32_t offset
, chunk
;
593 istate
= ml_set_interrupts_enabled(FALSE
);
595 offset
= (uint32_t)(pa
& (linesize
- 1));
598 map
= pmap_get_mapwindow((pt_entry_t
)(i386_ptob(atop_64(addr
)) | INTEL_PTE_VALID
));
601 offset
= (uint32_t)(addr
& ((addr64_t
) (page_size
- 1)));
602 chunk
= (uint32_t)page_size
- offset
;
609 for (; offset
< chunk
; offset
+= linesize
)
610 __clflush((void *)(((uintptr_t)map
->prv_CADDR
) + offset
));
614 chunk
= (uint32_t) page_size
;
618 pmap_store_pte(map
->prv_CMAP
, (pt_entry_t
)(i386_ptob(atop_64(addr
)) | INTEL_PTE_VALID
));
619 invlpg((uintptr_t)map
->prv_CADDR
);
624 pmap_put_mapwindow(map
);
626 (void) ml_set_interrupts_enabled(istate
);
631 void dcache_incoherent_io_flush64(addr64_t pa
, unsigned int count
)
633 return(dcache_incoherent_io_store64(pa
,count
));
638 flush_dcache64(addr64_t addr
, unsigned count
, int phys
)
641 dcache_incoherent_io_flush64(addr
, count
);
644 uint64_t linesize
= cpuid_info()->cache_linesize
;
645 addr64_t bound
= (addr
+ count
+ linesize
- 1) & ~(linesize
- 1);
647 while (addr
< bound
) {
648 __clflush((void *) (uintptr_t) addr
);
656 invalidate_icache64(__unused addr64_t addr
,
657 __unused
unsigned count
,
663 addr64_t vm_last_addr
;
666 mapping_set_mod(ppnum_t pn
)
672 mapping_set_ref(ppnum_t pn
)
674 pmap_set_reference(pn
);
678 cache_flush_page_phys(ppnum_t pa
)
683 unsigned char *cacheline_addr
;
684 int cacheline_size
= cpuid_info()->cache_linesize
;
685 int cachelines_in_page
= PAGE_SIZE
/cacheline_size
;
689 istate
= ml_set_interrupts_enabled(FALSE
);
691 map
= pmap_get_mapwindow((pt_entry_t
)(i386_ptob(pa
) | INTEL_PTE_VALID
));
693 for (i
= 0, cacheline_addr
= (unsigned char *)map
->prv_CADDR
;
694 i
< cachelines_in_page
;
695 i
++, cacheline_addr
+= cacheline_size
) {
696 __clflush((void *) cacheline_addr
);
698 pmap_put_mapwindow(map
);
700 (void) ml_set_interrupts_enabled(istate
);
708 kdp_register_callout(void)
714 * Return a uniformly distributed 64-bit random number.
716 * This interface should have minimal dependencies on kernel
717 * services, and thus be available very early in the life
718 * of the kernel. But as a result, it may not be very random
724 return (ml_early_random());
728 int host_vmxon(boolean_t exclusive __unused
)
730 return VMX_UNSUPPORTED
;
733 void host_vmxoff(void)
741 #define INT_SIZE (BYTE_SIZE * sizeof (int))
744 * Set indicated bit in bit string.
747 setbit(int bitno
, int *s
)
749 s
[bitno
/ INT_SIZE
] |= 1 << (bitno
% INT_SIZE
);
753 * Clear indicated bit in bit string.
756 clrbit(int bitno
, int *s
)
758 s
[bitno
/ INT_SIZE
] &= ~(1 << (bitno
% INT_SIZE
));
762 * Test if indicated bit is set in bit string.
765 testbit(int bitno
, int *s
)
767 return s
[bitno
/ INT_SIZE
] & (1 << (bitno
% INT_SIZE
));
771 * Find first bit set in bit string.
778 for (offset
= 0; !*s
; offset
+= (int)INT_SIZE
, ++s
);
779 return offset
+ __builtin_ctz(*s
);
783 ffs(unsigned int mask
)
789 * NOTE: cannot use __builtin_ffs because it generates a call to
792 return 1 + __builtin_ctz(mask
);