/*
- * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <libkern/OSAtomic.h>
#include <sys/kdebug.h>
+#if !MACH_KDP
+#include <kdp/kdp_callout.h>
+#endif /* !MACH_KDP */
+
#if 0
#undef KERNEL_DEBUG
extern void ovbcopy(const char *from,
char *to,
vm_size_t nbytes);
-void machine_callstack(natural_t *buf, vm_size_t callstack_max);
+void machine_callstack(uintptr_t *buf, vm_size_t callstack_max);
#define value_64bit(value) ((value) & 0xFFFFFFFF00000000ULL)
static inline unsigned int
ml_phys_read_data(pmap_paddr_t paddr, int size)
{
- unsigned int result;
+ unsigned int result = 0;
+
+ if (!physmap_enclosed(paddr))
+ panic("%s: 0x%llx out of bounds\n", __FUNCTION__, paddr);
switch (size) {
- unsigned char s1;
- unsigned short s2;
+ unsigned char s1;
+ unsigned short s2;
case 1:
- s1 = *(unsigned char *)PHYSMAP_PTOV(paddr);
- result = s1;
- break;
+ s1 = *(volatile unsigned char *)PHYSMAP_PTOV(paddr);
+ result = s1;
+ break;
case 2:
- s2 = *(unsigned short *)PHYSMAP_PTOV(paddr);
- result = s2;
- break;
+ s2 = *(volatile unsigned short *)PHYSMAP_PTOV(paddr);
+ result = s2;
+ break;
case 4:
- default:
- result = *(unsigned int *)PHYSMAP_PTOV(paddr);
- break;
+ result = *(volatile unsigned int *)PHYSMAP_PTOV(paddr);
+ break;
+ default:
+ panic("Invalid size %d for ml_phys_read_data\n", size);
+ break;
}
-
return result;
}
static unsigned long long
ml_phys_read_long_long(pmap_paddr_t paddr )
{
- return *(unsigned long long *)PHYSMAP_PTOV(paddr);
+ if (!physmap_enclosed(paddr))
+ panic("%s: 0x%llx out of bounds\n", __FUNCTION__, paddr);
+ return *(volatile unsigned long long *)PHYSMAP_PTOV(paddr);
}
unsigned int ml_phys_read( vm_offset_t paddr)
static inline void
ml_phys_write_data(pmap_paddr_t paddr, unsigned long data, int size)
{
+ if (!physmap_enclosed(paddr))
+ panic("%s: 0x%llx out of bounds\n", __FUNCTION__, paddr);
+
switch (size) {
case 1:
- *(unsigned char *)PHYSMAP_PTOV(paddr) = (unsigned char)data;
+ *(volatile unsigned char *)PHYSMAP_PTOV(paddr) = (unsigned char)data;
break;
case 2:
- *(unsigned short *)PHYSMAP_PTOV(paddr) = (unsigned short)data;
+ *(volatile unsigned short *)PHYSMAP_PTOV(paddr) = (unsigned short)data;
break;
case 4:
- default:
- *(unsigned int *)PHYSMAP_PTOV(paddr) = (unsigned int)data;
+ *(volatile unsigned int *)PHYSMAP_PTOV(paddr) = (unsigned int)data;
break;
+ default:
+ panic("Invalid size %d for ml_phys_write_data\n", size);
+ break;
}
}
static void
ml_phys_write_long_long(pmap_paddr_t paddr, unsigned long long data)
{
- *(unsigned long long *)PHYSMAP_PTOV(paddr) = data;
+ if (!physmap_enclosed(paddr))
+ panic("%s: 0x%llx out of bounds\n", __FUNCTION__, paddr);
+
+ *(volatile unsigned long long *)PHYSMAP_PTOV(paddr) = data;
}
void ml_phys_write_byte(vm_offset_t paddr, unsigned int data)
*
*
* Read the memory location at physical address paddr.
- * This is a part of a device probe, so there is a good chance we will
- * have a machine check here. So we have to be able to handle that.
- * We assume that machine checks are enabled both in MSR and HIDs
+ * *Does not* recover from machine checks, unlike the PowerPC implementation.
+ * Should probably be deprecated.
*/
boolean_t
* levels of return pc information.
*/
void machine_callstack(
- __unused natural_t *buf,
+ __unused uintptr_t *buf,
__unused vm_size_t callstack_max)
{
}
*addr++ = fill;
}
-static inline void __sfence(void)
-{
- __asm__ volatile("sfence");
-}
-static inline void __mfence(void)
-{
- __asm__ volatile("mfence");
-}
-static inline void __wbinvd(void)
-{
- __asm__ volatile("wbinvd");
-}
static inline void __clflush(void *ptr)
{
__asm__ volatile("clflush (%0)" : : "r" (ptr));
addr64_t linesize = cpuid_info()->cache_linesize;
addr64_t bound = (pa + count + linesize - 1) & ~(linesize - 1);
- __mfence();
+ mfence();
while (pa < bound) {
__clflush(PHYSMAP_PTOV(pa));
pa += linesize;
}
- __mfence();
+ mfence();
}
void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count)
dcache_incoherent_io_flush64(addr, count);
}
else {
- uint32_t linesize = cpuid_info()->cache_linesize;
+ uint64_t linesize = cpuid_info()->cache_linesize;
addr64_t bound = (addr + count + linesize -1) & ~(linesize - 1);
- __mfence();
+ mfence();
while (addr < bound) {
__clflush((void *) (uintptr_t) addr);
addr += linesize;
}
- __mfence();
+ mfence();
}
}
pmap_set_reference(pn);
}
+extern i386_cpu_info_t cpuid_cpu_info;
void
cache_flush_page_phys(ppnum_t pa)
{
boolean_t istate;
unsigned char *cacheline_addr;
- int cacheline_size = cpuid_info()->cache_linesize;
- int cachelines_to_flush = PAGE_SIZE/cacheline_size;
+ i386_cpu_info_t *cpuid_infop = cpuid_info();
+ int cacheline_size;
+ int cachelines_to_flush;
+
+ cacheline_size = cpuid_infop->cache_linesize;
+ if (cacheline_size == 0)
+ panic("cacheline_size=0 cpuid_infop=%p\n", cpuid_infop);
+ cachelines_to_flush = PAGE_SIZE/cacheline_size;
- __mfence();
+ mfence();
istate = ml_set_interrupts_enabled(FALSE);
(void) ml_set_interrupts_enabled(istate);
- __mfence();
+ mfence();
}
#if !MACH_KDP
void
-kdp_register_callout(void)
+kdp_register_callout(kdp_callout_fn_t fn, void *arg)
{
+#pragma unused(fn,arg)
}
#endif
+/*
+ * Return a uniformly distributed 64-bit random number.
+ *
+ * This interface should have minimal dependencies on kernel
+ * services, and thus be available very early in the life
+ * of the kernel. But as a result, it may not be very random
+ * on all platforms.
+ */
+uint64_t
+early_random(void)
+{
+ return (ml_early_random());
+}
+
#if !CONFIG_VMX
int host_vmxon(boolean_t exclusive __unused)
{