#include <arm/misc_protos.h>
#include <sys/errno.h>
+#include <libkern/section_keywords.h>
#define INT_SIZE (BYTE_SIZE * sizeof (int))
-void
-bcopy_phys(addr64_t src, addr64_t dst, vm_size_t bytes)
+#define BCOPY_PHYS_SRC_IS_PHYS(flags) (((flags) & cppvPsrc) != 0)
+#define BCOPY_PHYS_DST_IS_PHYS(flags) (((flags) & cppvPsnk) != 0)
+#define BCOPY_PHYS_SRC_IS_USER(flags) (((flags) & (cppvPsrc | cppvKmap)) == 0)
+#define BCOPY_PHYS_DST_IS_USER(flags) (((flags) & (cppvPsnk | cppvKmap)) == 0)
+
+static kern_return_t
+bcopy_phys_internal(addr64_t src, addr64_t dst, vm_size_t bytes, int flags)
{
unsigned int src_index;
unsigned int dst_index;
vm_offset_t dst_offset;
unsigned int wimg_bits_src, wimg_bits_dst;
unsigned int cpu_num = 0;
- ppnum_t pn_src = (ppnum_t)(src >> PAGE_SHIFT);
- ppnum_t pn_dst = (ppnum_t)(dst >> PAGE_SHIFT);
-
-#ifdef __ARM_COHERENT_IO__
- if (pmap_valid_address(src) &&
- pmap_valid_address(dst) &&
- (mmu_kvtop_wpreflight(phystokv((pmap_paddr_t) dst)))) {
- bcopy((char *)phystokv((pmap_paddr_t) src), (char *)phystokv((pmap_paddr_t) dst), bytes);
- return;
+ ppnum_t pn_src;
+ ppnum_t pn_dst;
+ addr64_t end __assert_only;
+ kern_return_t res = KERN_SUCCESS;
+
+ if (!BCOPY_PHYS_SRC_IS_USER(flags)) {
+ assert(!__improbable(os_add_overflow(src, bytes, &end)));
+ }
+ if (!BCOPY_PHYS_DST_IS_USER(flags)) {
+ assert(!__improbable(os_add_overflow(dst, bytes, &end)));
}
+
+ while ((bytes > 0) && (res == KERN_SUCCESS)) {
+ src_offset = src & PAGE_MASK;
+ dst_offset = dst & PAGE_MASK;
+ boolean_t use_copy_window_src = FALSE;
+ boolean_t use_copy_window_dst = FALSE;
+ vm_size_t count = bytes;
+ vm_size_t count2 = bytes;
+ if (BCOPY_PHYS_SRC_IS_PHYS(flags)) {
+ use_copy_window_src = !pmap_valid_address(src);
+ pn_src = (ppnum_t)(src >> PAGE_SHIFT);
+#if !defined(__ARM_COHERENT_IO__) && !__ARM_PTE_PHYSMAP__
+ count = PAGE_SIZE - src_offset;
+ wimg_bits_src = pmap_cache_attributes(pn_src);
+ if ((wimg_bits_src & VM_WIMG_MASK) != VM_WIMG_DEFAULT) {
+ use_copy_window_src = TRUE;
+ }
+#else
+ if (use_copy_window_src) {
+ wimg_bits_src = pmap_cache_attributes(pn_src);
+ count = PAGE_SIZE - src_offset;
+ }
#endif
+ }
+ if (BCOPY_PHYS_DST_IS_PHYS(flags)) {
+ // write preflighting needed for things like dtrace which may write static read-only mappings
+ use_copy_window_dst = (!pmap_valid_address(dst) || !mmu_kvtop_wpreflight(phystokv((pmap_paddr_t)dst)));
+ pn_dst = (ppnum_t)(dst >> PAGE_SHIFT);
+#if !defined(__ARM_COHERENT_IO__) && !__ARM_PTE_PHYSMAP__
+ count2 = PAGE_SIZE - dst_offset;
+ wimg_bits_dst = pmap_cache_attributes(pn_dst);
+ if ((wimg_bits_dst & VM_WIMG_MASK) != VM_WIMG_DEFAULT) {
+ use_copy_window_dst = TRUE;
+ }
+#else
+ if (use_copy_window_dst) {
+ wimg_bits_dst = pmap_cache_attributes(pn_dst);
+ count2 = PAGE_SIZE - dst_offset;
+ }
+#endif
+ }
- wimg_bits_src = pmap_cache_attributes(pn_src);
- wimg_bits_dst = pmap_cache_attributes(pn_dst);
+ char *tmp_src;
+ char *tmp_dst;
-#ifndef __ARM_COHERENT_IO__
- if (((wimg_bits_src & VM_WIMG_MASK) == VM_WIMG_DEFAULT) &&
- ((wimg_bits_dst & VM_WIMG_MASK) == VM_WIMG_DEFAULT) &&
- (mmu_kvtop_wpreflight(phystokv((pmap_paddr_t) dst)))) {
- /* Fast path - dst is writable and both source and destination have default attributes */
- bcopy((char *)phystokv((pmap_paddr_t) src), (char *)phystokv((pmap_paddr_t) dst), bytes);
- return;
- }
-#endif
+ if (use_copy_window_src || use_copy_window_dst) {
+ mp_disable_preemption();
+ cpu_num = cpu_number();
+ }
- src_offset = src & PAGE_MASK;
- dst_offset = dst & PAGE_MASK;
+ if (use_copy_window_src) {
+ src_index = pmap_map_cpu_windows_copy(pn_src, VM_PROT_READ, wimg_bits_src);
+ tmp_src = (char*)(pmap_cpu_windows_copy_addr(cpu_num, src_index) + src_offset);
+ } else if (BCOPY_PHYS_SRC_IS_PHYS(flags)) {
+ tmp_src = (char*)phystokv_range((pmap_paddr_t)src, &count);
+ } else {
+ tmp_src = (char*)src;
+ }
+ if (use_copy_window_dst) {
+ dst_index = pmap_map_cpu_windows_copy(pn_dst, VM_PROT_READ | VM_PROT_WRITE, wimg_bits_dst);
+ tmp_dst = (char*)(pmap_cpu_windows_copy_addr(cpu_num, dst_index) + dst_offset);
+ } else if (BCOPY_PHYS_DST_IS_PHYS(flags)) {
+ tmp_dst = (char*)phystokv_range((pmap_paddr_t)dst, &count2);
+ } else {
+ tmp_dst = (char*)dst;
+ }
- if ((src_offset + bytes) > PAGE_SIZE || (dst_offset + bytes) > PAGE_SIZE)
- panic("bcopy extends beyond copy windows");
+ if (count > count2) {
+ count = count2;
+ }
+ if (count > bytes) {
+ count = bytes;
+ }
- mp_disable_preemption();
- cpu_num = cpu_number();
- src_index = pmap_map_cpu_windows_copy(pn_src, VM_PROT_READ, wimg_bits_src);
- dst_index = pmap_map_cpu_windows_copy(pn_dst, VM_PROT_READ|VM_PROT_WRITE, wimg_bits_dst);
+ if (BCOPY_PHYS_SRC_IS_USER(flags)) {
+ res = copyin((user_addr_t)src, tmp_dst, count);
+ } else if (BCOPY_PHYS_DST_IS_USER(flags)) {
+ res = copyout(tmp_src, (user_addr_t)dst, count);
+ } else {
+ bcopy(tmp_src, tmp_dst, count);
+ }
- bcopy((char *)(pmap_cpu_windows_copy_addr(cpu_num, src_index) + src_offset),
- (char *)(pmap_cpu_windows_copy_addr(cpu_num, dst_index) + dst_offset),
- bytes);
+ if (use_copy_window_src) {
+ pmap_unmap_cpu_windows_copy(src_index);
+ }
+ if (use_copy_window_dst) {
+ pmap_unmap_cpu_windows_copy(dst_index);
+ }
+ if (use_copy_window_src || use_copy_window_dst) {
+ mp_enable_preemption();
+ }
- pmap_unmap_cpu_windows_copy(src_index);
- pmap_unmap_cpu_windows_copy(dst_index);
- mp_enable_preemption();
+ src += count;
+ dst += count;
+ bytes -= count;
+ }
+ return res;
+}
+
+void
+bcopy_phys(addr64_t src, addr64_t dst, vm_size_t bytes)
+{
+ bcopy_phys_internal(src, dst, bytes, cppvPsrc | cppvPsnk);
}
void
bzero_phys(src64, bytes);
}
+extern void *secure_memset(void *, int, size_t);
+
/* Zero bytes starting at a physical address */
void
bzero_phys(addr64_t src, vm_size_t bytes)
{
unsigned int wimg_bits;
unsigned int cpu_num = cpu_number();
- ppnum_t pn = (ppnum_t)(src >> PAGE_SHIFT);
-
-#ifdef __ARM_COHERENT_IO__
- if (pmap_valid_address(src)) {
- bzero((char *)phystokv((pmap_paddr_t) src), bytes);
- return;
- }
-#endif
+ ppnum_t pn;
+ addr64_t end __assert_only;
- wimg_bits = pmap_cache_attributes(pn);
-
-#ifndef __ARM_COHERENT_IO__
- if ((wimg_bits & VM_WIMG_MASK) == VM_WIMG_DEFAULT) {
- /* Fast path - default attributes */
- bzero((char *)phystokv((pmap_paddr_t) src), bytes);
- return;
- }
-#endif
-
- mp_disable_preemption();
- cpu_num = cpu_number();
+ assert(!__improbable(os_add_overflow(src, bytes, &end)));
+ vm_offset_t offset = src & PAGE_MASK;
while (bytes > 0) {
- vm_offset_t offset = src & PAGE_MASK;
- uint64_t count = PAGE_SIZE - offset;
-
- if (count > bytes)
- count = bytes;
+ vm_size_t count = bytes;
+ boolean_t use_copy_window = !pmap_valid_address(src);
pn = (ppnum_t)(src >> PAGE_SHIFT);
+ wimg_bits = pmap_cache_attributes(pn);
+#if !defined(__ARM_COHERENT_IO__) && !__ARM_PTE_PHYSMAP__
+ count = PAGE_SIZE - offset;
+ if ((wimg_bits & VM_WIMG_MASK) != VM_WIMG_DEFAULT) {
+ use_copy_window = TRUE;
+ }
+#else
+ if (use_copy_window) {
+ count = PAGE_SIZE - offset;
+ }
+#endif
+ char *buf;
+ unsigned int index;
+ if (use_copy_window) {
+ mp_disable_preemption();
+ cpu_num = cpu_number();
+ index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ | VM_PROT_WRITE, wimg_bits);
+ buf = (char *)(pmap_cpu_windows_copy_addr(cpu_num, index) + offset);
+ } else {
+ buf = (char *)phystokv_range((pmap_paddr_t)src, &count);
+ }
- unsigned int index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ | VM_PROT_WRITE, wimg_bits);
+ if (count > bytes) {
+ count = bytes;
+ }
- bzero((char *)(pmap_cpu_windows_copy_addr(cpu_num, index) + offset), count);
+ switch (wimg_bits & VM_WIMG_MASK) {
+ case VM_WIMG_DEFAULT:
+ case VM_WIMG_WCOMB:
+ case VM_WIMG_INNERWBACK:
+ case VM_WIMG_WTHRU:
+ bzero(buf, count);
+ break;
+ default:
+ /* 'dc zva' performed by bzero is not safe for device memory */
+ secure_memset((void*)buf, 0, count);
+ }
- pmap_unmap_cpu_windows_copy(index);
+ if (use_copy_window) {
+ pmap_unmap_cpu_windows_copy(index);
+ mp_enable_preemption();
+ }
src += count;
bytes -= count;
+ offset = 0;
}
-
- mp_enable_preemption();
}
/*
unsigned int index;
unsigned int wimg_bits;
ppnum_t pn = (ppnum_t)(paddr >> PAGE_SHIFT);
+ ppnum_t pn_end = (ppnum_t)((paddr + size - 1) >> PAGE_SHIFT);
unsigned long long result = 0;
vm_offset_t copywindow_vaddr = 0;
unsigned char s1;
unsigned short s2;
unsigned int s4;
-#ifdef __ARM_COHERENT_IO__
+ if (__improbable(pn_end != pn)) {
+ panic("%s: paddr 0x%llx spans a page boundary", __func__, (uint64_t)paddr);
+ }
+
+#if defined(__ARM_COHERENT_IO__) || __ARM_PTE_PHYSMAP__
if (pmap_valid_address(paddr)) {
switch (size) {
case 1:
break;
default:
panic("Invalid size %d for ml_phys_read_data\n", size);
- break;
+ break;
}
return result;
}
copywindow_vaddr = pmap_cpu_windows_copy_addr(cpu_number(), index) | ((uint32_t)paddr & PAGE_MASK);
switch (size) {
- case 1:
- s1 = *(volatile unsigned char *)copywindow_vaddr;
- result = s1;
- break;
- case 2:
- s2 = *(volatile unsigned short *)copywindow_vaddr;
- result = s2;
- break;
- case 4:
- s4 = *(volatile unsigned int *)copywindow_vaddr;
- result = s4;
- break;
- case 8:
- result = *(volatile unsigned long long*)copywindow_vaddr;
- break;
- default:
- panic("Invalid size %d for ml_phys_read_data\n", size);
- break;
-
+ case 1:
+ s1 = *(volatile unsigned char *)copywindow_vaddr;
+ result = s1;
+ break;
+ case 2:
+ s2 = *(volatile unsigned short *)copywindow_vaddr;
+ result = s2;
+ break;
+ case 4:
+ s4 = *(volatile unsigned int *)copywindow_vaddr;
+ result = s4;
+ break;
+ case 8:
+ result = *(volatile unsigned long long*)copywindow_vaddr;
+ break;
+ default:
+ panic("Invalid size %d for ml_phys_read_data\n", size);
+ break;
}
pmap_unmap_cpu_windows_copy(index);
return result;
}
-unsigned int ml_phys_read( vm_offset_t paddr)
+unsigned int
+ml_phys_read( vm_offset_t paddr)
{
- return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 4);
+ return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 4);
}
-unsigned int ml_phys_read_word(vm_offset_t paddr) {
-
- return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 4);
+unsigned int
+ml_phys_read_word(vm_offset_t paddr)
+{
+ return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 4);
}
-unsigned int ml_phys_read_64(addr64_t paddr64)
+unsigned int
+ml_phys_read_64(addr64_t paddr64)
{
- return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 4);
+ return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 4);
}
-unsigned int ml_phys_read_word_64(addr64_t paddr64)
+unsigned int
+ml_phys_read_word_64(addr64_t paddr64)
{
- return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 4);
+ return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 4);
}
-unsigned int ml_phys_read_half(vm_offset_t paddr)
+unsigned int
+ml_phys_read_half(vm_offset_t paddr)
{
- return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 2);
+ return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 2);
}
-unsigned int ml_phys_read_half_64(addr64_t paddr64)
+unsigned int
+ml_phys_read_half_64(addr64_t paddr64)
{
- return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 2);
+ return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 2);
}
-unsigned int ml_phys_read_byte(vm_offset_t paddr)
+unsigned int
+ml_phys_read_byte(vm_offset_t paddr)
{
- return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 1);
+ return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 1);
}
-unsigned int ml_phys_read_byte_64(addr64_t paddr64)
+unsigned int
+ml_phys_read_byte_64(addr64_t paddr64)
{
- return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 1);
+ return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 1);
}
-unsigned long long ml_phys_read_double(vm_offset_t paddr)
+unsigned long long
+ml_phys_read_double(vm_offset_t paddr)
{
- return ml_phys_read_data((pmap_paddr_t)paddr, 8);
+ return ml_phys_read_data((pmap_paddr_t)paddr, 8);
}
-unsigned long long ml_phys_read_double_64(addr64_t paddr64)
+unsigned long long
+ml_phys_read_double_64(addr64_t paddr64)
{
- return ml_phys_read_data((pmap_paddr_t)paddr64, 8);
+ return ml_phys_read_data((pmap_paddr_t)paddr64, 8);
}
unsigned int index;
unsigned int wimg_bits;
ppnum_t pn = (ppnum_t)(paddr >> PAGE_SHIFT);
+ ppnum_t pn_end = (ppnum_t)((paddr + size - 1) >> PAGE_SHIFT);
vm_offset_t copywindow_vaddr = 0;
-#ifdef __ARM_COHERENT_IO__
+ if (__improbable(pn_end != pn)) {
+ panic("%s: paddr 0x%llx spans a page boundary", __func__, (uint64_t)paddr);
+ }
+
+#if defined(__ARM_COHERENT_IO__) || __ARM_PTE_PHYSMAP__
if (pmap_valid_address(paddr)) {
switch (size) {
case 1:
mp_disable_preemption();
wimg_bits = pmap_cache_attributes(pn);
- index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ|VM_PROT_WRITE, wimg_bits);
+ index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ | VM_PROT_WRITE, wimg_bits);
copywindow_vaddr = pmap_cpu_windows_copy_addr(cpu_number(), index) | ((uint32_t)paddr & PAGE_MASK);
switch (size) {
- case 1:
- *(volatile unsigned char *)(copywindow_vaddr) =
- (unsigned char)data;
- break;
- case 2:
- *(volatile unsigned short *)(copywindow_vaddr) =
- (unsigned short)data;
- break;
- case 4:
- *(volatile unsigned int *)(copywindow_vaddr) =
- (uint32_t)data;
- break;
- case 8:
- *(volatile unsigned long long *)(copywindow_vaddr) =
- (unsigned long long)data;
- break;
- default:
- panic("Invalid size %d for ml_phys_write_data\n", size);
- break;
+ case 1:
+ *(volatile unsigned char *)(copywindow_vaddr) =
+ (unsigned char)data;
+ break;
+ case 2:
+ *(volatile unsigned short *)(copywindow_vaddr) =
+ (unsigned short)data;
+ break;
+ case 4:
+ *(volatile unsigned int *)(copywindow_vaddr) =
+ (uint32_t)data;
+ break;
+ case 8:
+ *(volatile unsigned long long *)(copywindow_vaddr) =
+ (unsigned long long)data;
+ break;
+ default:
+ panic("Invalid size %d for ml_phys_write_data\n", size);
+ break;
}
pmap_unmap_cpu_windows_copy(index);
mp_enable_preemption();
}
-void ml_phys_write_byte(vm_offset_t paddr, unsigned int data)
+void
+ml_phys_write_byte(vm_offset_t paddr, unsigned int data)
{
- ml_phys_write_data((pmap_paddr_t)paddr, data, 1);
+ ml_phys_write_data((pmap_paddr_t)paddr, data, 1);
}
-void ml_phys_write_byte_64(addr64_t paddr64, unsigned int data)
+void
+ml_phys_write_byte_64(addr64_t paddr64, unsigned int data)
{
- ml_phys_write_data((pmap_paddr_t)paddr64, data, 1);
+ ml_phys_write_data((pmap_paddr_t)paddr64, data, 1);
}
-void ml_phys_write_half(vm_offset_t paddr, unsigned int data)
+void
+ml_phys_write_half(vm_offset_t paddr, unsigned int data)
{
- ml_phys_write_data((pmap_paddr_t)paddr, data, 2);
+ ml_phys_write_data((pmap_paddr_t)paddr, data, 2);
}
-void ml_phys_write_half_64(addr64_t paddr64, unsigned int data)
+void
+ml_phys_write_half_64(addr64_t paddr64, unsigned int data)
{
- ml_phys_write_data((pmap_paddr_t)paddr64, data, 2);
+ ml_phys_write_data((pmap_paddr_t)paddr64, data, 2);
}
-void ml_phys_write(vm_offset_t paddr, unsigned int data)
+void
+ml_phys_write(vm_offset_t paddr, unsigned int data)
{
- ml_phys_write_data((pmap_paddr_t)paddr, data, 4);
+ ml_phys_write_data((pmap_paddr_t)paddr, data, 4);
}
-void ml_phys_write_64(addr64_t paddr64, unsigned int data)
+void
+ml_phys_write_64(addr64_t paddr64, unsigned int data)
{
- ml_phys_write_data((pmap_paddr_t)paddr64, data, 4);
+ ml_phys_write_data((pmap_paddr_t)paddr64, data, 4);
}
-void ml_phys_write_word(vm_offset_t paddr, unsigned int data)
+void
+ml_phys_write_word(vm_offset_t paddr, unsigned int data)
{
- ml_phys_write_data((pmap_paddr_t)paddr, data, 4);
+ ml_phys_write_data((pmap_paddr_t)paddr, data, 4);
}
-void ml_phys_write_word_64(addr64_t paddr64, unsigned int data)
+void
+ml_phys_write_word_64(addr64_t paddr64, unsigned int data)
{
- ml_phys_write_data((pmap_paddr_t)paddr64, data, 4);
+ ml_phys_write_data((pmap_paddr_t)paddr64, data, 4);
}
-void ml_phys_write_double(vm_offset_t paddr, unsigned long long data)
+void
+ml_phys_write_double(vm_offset_t paddr, unsigned long long data)
{
- ml_phys_write_data((pmap_paddr_t)paddr, data, 8);
+ ml_phys_write_data((pmap_paddr_t)paddr, data, 8);
}
-void ml_phys_write_double_64(addr64_t paddr64, unsigned long long data)
+void
+ml_phys_write_double_64(addr64_t paddr64, unsigned long long data)
{
- ml_phys_write_data((pmap_paddr_t)paddr64, data, 8);
+ ml_phys_write_data((pmap_paddr_t)paddr64, data, 8);
}
void
setbit(int bitno, int *s)
{
- s[bitno / INT_SIZE] |= 1 << (bitno % INT_SIZE);
+ s[bitno / INT_SIZE] |= 1U << (bitno % INT_SIZE);
}
/*
void
clrbit(int bitno, int *s)
{
- s[bitno / INT_SIZE] &= ~(1 << (bitno % INT_SIZE));
+ s[bitno / INT_SIZE] &= ~(1U << (bitno % INT_SIZE));
}
/*
int
testbit(int bitno, int *s)
{
- return s[bitno / INT_SIZE] & (1 << (bitno % INT_SIZE));
+ return s[bitno / INT_SIZE] & (1U << (bitno % INT_SIZE));
}
/*
{
int offset;
- for (offset = 0; !*s; offset += INT_SIZE, ++s);
+ for (offset = 0; !*s; offset += INT_SIZE, ++s) {
+ ;
+ }
return offset + __builtin_ctz(*s);
}
int
ffs(unsigned int mask)
{
- if (mask == 0)
+ if (mask == 0) {
return 0;
+ }
/*
* NOTE: cannot use __builtin_ffs because it generates a call to
int
ffsll(unsigned long long mask)
{
- if (mask == 0)
+ if (mask == 0) {
return 0;
+ }
/*
* NOTE: cannot use __builtin_ffsll because it generates a call to
int
fls(unsigned int mask)
{
- if (mask == 0)
+ if (mask == 0) {
return 0;
+ }
- return (sizeof (mask) << 3) - __builtin_clz(mask);
+ return (sizeof(mask) << 3) - __builtin_clz(mask);
}
int
flsll(unsigned long long mask)
{
- if (mask == 0)
+ if (mask == 0) {
return 0;
+ }
- return (sizeof (mask) << 3) - __builtin_clzll(mask);
+ return (sizeof(mask) << 3) - __builtin_clzll(mask);
}
#undef bcmp
-int
+int
bcmp(
- const void *pa,
- const void *pb,
- size_t len)
+ const void *pa,
+ const void *pb,
+ size_t len)
{
const char *a = (const char *) pa;
const char *b = (const char *) pb;
- if (len == 0)
+ if (len == 0) {
return 0;
+ }
- do
- if (*a++ != *b++)
+ do{
+ if (*a++ != *b++) {
break;
- while (--len);
+ }
+ } while (--len);
/*
* Check for the overflow case but continue to handle the non-overflow
* case the same way just in case someone is using the return value
* as more than zero/non-zero
*/
- if ((len & 0xFFFFFFFF00000000ULL) && !(len & 0x00000000FFFFFFFFULL))
+ if ((len & 0xFFFFFFFF00000000ULL) && !(len & 0x00000000FFFFFFFFULL)) {
return 0xFFFFFFFFL;
- else
+ } else {
return (int)len;
+ }
}
#undef memcmp
+MARK_AS_HIBERNATE_TEXT
int
memcmp(const void *s1, const void *s2, size_t n)
{
const unsigned char *p1 = s1, *p2 = s2;
do {
- if (*p1++ != *p2++)
- return (*--p1 - *--p2);
+ if (*p1++ != *p2++) {
+ return *--p1 - *--p2;
+ }
} while (--n != 0);
}
- return (0);
+ return 0;
}
kern_return_t
copypv(addr64_t source, addr64_t sink, unsigned int size, int which)
{
- kern_return_t retval = KERN_SUCCESS;
- void *from, *to;
-#ifndef __ARM_COHERENT_IO__
- unsigned int from_wimg_bits, to_wimg_bits;
-#endif
-
- from = CAST_DOWN(void *, source);
- to = CAST_DOWN(void *, sink);
-
- if ((which & (cppvPsrc | cppvPsnk)) == 0) /* Make sure that only
- * one is virtual */
- panic("copypv: no more than 1 parameter may be virtual\n"); /* Not allowed */
-
- if (which & cppvPsrc)
- from = (void *)phystokv(from);
- if (which & cppvPsnk)
- to = (void *)phystokv(to);
+ if ((which & (cppvPsrc | cppvPsnk)) == 0) { /* Make sure that only one is virtual */
+ panic("%s: no more than 1 parameter may be virtual", __func__);
+ }
- if ((which & (cppvPsrc | cppvKmap)) == 0) /* Source is virtual in
- * current map */
- retval = copyin((user_addr_t) from, to, size);
- else if ((which & (cppvPsnk | cppvKmap)) == 0) /* Sink is virtual in
- * current map */
- retval = copyout(from, (user_addr_t) to, size);
- else /* both addresses are physical or kernel map */
- bcopy(from, to, size);
+ kern_return_t res = bcopy_phys_internal(source, sink, size, which);
-#ifndef __ARM_COHERENT_IO__
+#ifndef __ARM_COHERENT_IO__
if (which & cppvFsrc) {
flush_dcache64(source, size, ((which & cppvPsrc) == cppvPsrc));
- } else if (which & cppvPsrc) {
- from_wimg_bits = pmap_cache_attributes(source >> PAGE_SHIFT);
- if ((from_wimg_bits != VM_WIMG_COPYBACK) && (from_wimg_bits != VM_WIMG_WTHRU))
- flush_dcache64(source, size, TRUE);
}
if (which & cppvFsnk) {
flush_dcache64(sink, size, ((which & cppvPsnk) == cppvPsnk));
- } else if (which & cppvPsnk) {
- to_wimg_bits = pmap_cache_attributes(sink >> PAGE_SHIFT);
- if (to_wimg_bits != VM_WIMG_COPYBACK)
- flush_dcache64(sink, size, TRUE);
}
#endif
- return retval;
-}
+ return res;
+}
#if MACH_ASSERT
*/
void
machine_callstack(
- uintptr_t * buf,
- vm_size_t callstack_max)
+ uintptr_t * buf,
+ vm_size_t callstack_max)
{
/* Captures the USER call stack */
- uint32_t i=0;
+ uint32_t i = 0;
struct arm_saved_state *state = find_user_regs(current_thread());
if (!state) {
- while (i<callstack_max)
+ while (i < callstack_max) {
buf[i++] = 0;
+ }
} else {
if (is_saved_state64(state)) {
uint64_t frame[2];
buf[i++] = (uintptr_t)get_saved_state_pc(state);
frame[0] = get_saved_state_fp(state);
- while (i<callstack_max && frame[0] != 0) {
- if (copyinframe(frame[0], (void*) frame, TRUE))
+ while (i < callstack_max && frame[0] != 0) {
+ if (copyinframe(frame[0], (void*) frame, TRUE)) {
break;
+ }
buf[i++] = (uintptr_t)frame[1];
}
- }
- else {
+ } else {
uint32_t frame[2];
buf[i++] = (uintptr_t)get_saved_state_pc(state);
frame[0] = (uint32_t)get_saved_state_fp(state);
- while (i<callstack_max && frame[0] != 0) {
- if (copyinframe(frame[0], (void*) frame, FALSE))
+ while (i < callstack_max && frame[0] != 0) {
+ if (copyinframe(frame[0], (void*) frame, FALSE)) {
break;
+ }
buf[i++] = (uintptr_t)frame[1];
}
}
- while (i<callstack_max)
+ while (i < callstack_max) {
buf[i++] = 0;
+ }
}
}
-#endif /* MACH_ASSERT */
+#endif /* MACH_ASSERT */
int
clr_be_bit(void)
boolean_t
ml_probe_read(
- __unused vm_offset_t paddr,
- __unused unsigned int *val)
+ __unused vm_offset_t paddr,
+ __unused unsigned int *val)
{
panic("ml_probe_read() unimplemented");
return 1;
boolean_t
ml_probe_read_64(
- __unused addr64_t paddr,
- __unused unsigned int *val)
+ __unused addr64_t paddr,
+ __unused unsigned int *val)
{
panic("ml_probe_read_64() unimplemented");
return 1;
void
ml_thread_policy(
- __unused thread_t thread,
- __unused unsigned policy_id,
- __unused unsigned policy_info)
+ __unused thread_t thread,
+ __unused unsigned policy_id,
+ __unused unsigned policy_info)
{
- // <rdar://problem/7141284>: Reduce print noise
- // kprintf("ml_thread_policy() unimplemented\n");
+ // <rdar://problem/7141284>: Reduce print noise
+ // kprintf("ml_thread_policy() unimplemented\n");
}
+__dead2
void
-panic_unimplemented()
+panic_unimplemented(void)
{
panic("Not yet implemented.");
}
/* ARM64_TODO <rdar://problem/9198953> */
-void abort(void);
+void abort(void) __dead2;
void
-abort()
+abort(void)
{
panic("Abort.");
}
}
#endif
+/*
+ * Get a quick virtual mapping of a physical page and run a callback on that
+ * page's virtual address.
+ *
+ * @param dst64 Physical address to access (doesn't need to be page-aligned).
+ * @param bytes Number of bytes to be accessed. This cannot cross page boundaries.
+ * @param func Callback function to call with the page's virtual address.
+ * @param arg Argument passed directly to `func`.
+ *
+ * @return The return value from `func`.
+ */
+int
+apply_func_phys(
+ addr64_t dst64,
+ vm_size_t bytes,
+ int (*func)(void * buffer, vm_size_t bytes, void * arg),
+ void * arg)
+{
+ /* The physical aperture is only guaranteed to work with kernel-managed addresses. */
+ if (!pmap_valid_address(dst64)) {
+ panic("%s address error: passed in address (%#llx) not a kernel managed address",
+ __FUNCTION__, dst64);
+ }
+
+ /* Ensure we stay within a single page */
+ if (((((uint32_t)dst64 & (ARM_PGBYTES - 1)) + bytes) > ARM_PGBYTES)) {
+ panic("%s alignment error: tried accessing addresses spanning more than one page %#llx %#lx",
+ __FUNCTION__, dst64, bytes);
+ }
+
+ return func((void*)phystokv(dst64), bytes, arg);
+}