]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/x86_64/loose_ends.c
xnu-2050.7.9.tar.gz
[apple/xnu.git] / osfmk / x86_64 / loose_ends.c
index e8a1605a75957a0fdb12d28d7f0c595d7f2894e6..b912c6d9b98277be64a22de20103c7d7559468b6 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2011 Apple Computer, Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
@@ -193,6 +193,25 @@ bcopy_phys(
        bcopy(PHYSMAP_PTOV(src64), PHYSMAP_PTOV(dst64), bytes);
 }
 
+/*
+ * allow a function to get a quick virtual mapping of a physical page
+ */
+
+int
+apply_func_phys(
+               addr64_t dst64,
+               vm_size_t bytes,
+               int (*func)(void * buffer, vm_size_t bytes, void * arg),
+               void * arg)
+{
+       /* Not necessary for K64 - but ensure we stay within a page */
+       if (((((uint32_t)dst64 & (NBPG-1)) + bytes) > NBPG) ) {
+               panic("apply_func_phys alignment");
+       }
+
+       return func(PHYSMAP_PTOV(dst64), bytes, arg);
+}
+
 /* 
  * ovbcopy - like bcopy, but recognizes overlapping ranges and handles 
  *           them correctly.
@@ -224,39 +243,43 @@ ovbcopy(
  */
 
 
-static unsigned int
+static inline unsigned int
 ml_phys_read_data(pmap_paddr_t paddr, int size)
 {
        unsigned int result;
 
+       if (!physmap_enclosed(paddr))
+               panic("%s: 0x%llx out of bounds\n", __FUNCTION__, paddr);
+
         switch (size) {
-            unsigned char s1;
-            unsigned short s2;
+               unsigned char s1;
+               unsigned short s2;
         case 1:
-            s1 = *(unsigned char *)PHYSMAP_PTOV(paddr);
-            result = s1;
-            break;
+               s1 = *(volatile unsigned char *)PHYSMAP_PTOV(paddr);
+               result = s1;
+               break;
         case 2:
-            s2 = *(unsigned short *)PHYSMAP_PTOV(paddr);
-            result = s2;
-            break;
+               s2 = *(volatile unsigned short *)PHYSMAP_PTOV(paddr);
+               result = s2;
+               break;
         case 4:
-        default:
-            result = *(unsigned int *)PHYSMAP_PTOV(paddr);
-            break;
+               result = *(volatile unsigned int *)PHYSMAP_PTOV(paddr);
+               break;
+       default:
+               panic("Invalid size %d for ml_phys_read_data\n", size);
+               break;
         }
-
         return result;
 }
 
 static unsigned long long
 ml_phys_read_long_long(pmap_paddr_t paddr )
 {
-       return *(unsigned long long *)PHYSMAP_PTOV(paddr);
+       if (!physmap_enclosed(paddr))
+               panic("%s: 0x%llx out of bounds\n", __FUNCTION__, paddr);
+       return *(volatile unsigned long long *)PHYSMAP_PTOV(paddr);
 }
 
-
-
 unsigned int ml_phys_read( vm_offset_t paddr)
 {
         return ml_phys_read_data((pmap_paddr_t)paddr, 4);
@@ -313,30 +336,36 @@ unsigned long long ml_phys_read_double_64(addr64_t paddr64)
  *  Write data to a physical address. Memory should not be cache inhibited.
  */
 
-static void
+static inline void
 ml_phys_write_data(pmap_paddr_t paddr, unsigned long data, int size)
 {
+       if (!physmap_enclosed(paddr))
+               panic("%s: 0x%llx out of bounds\n", __FUNCTION__, paddr);
+
         switch (size) {
         case 1:
-           *(unsigned char *)PHYSMAP_PTOV(paddr) = (unsigned char)data;
+           *(volatile unsigned char *)PHYSMAP_PTOV(paddr) = (unsigned char)data;
             break;
         case 2:
-           *(unsigned short *)PHYSMAP_PTOV(paddr) = (unsigned short)data;
+           *(volatile unsigned short *)PHYSMAP_PTOV(paddr) = (unsigned short)data;
             break;
         case 4:
-        default:
-           *(unsigned int *)PHYSMAP_PTOV(paddr) = (unsigned int)data;
+           *(volatile unsigned int *)PHYSMAP_PTOV(paddr) = (unsigned int)data;
             break;
+       default:
+               panic("Invalid size %d for ml_phys_write_data\n", size);
+               break;
         }
 }
 
 static void
 ml_phys_write_long_long(pmap_paddr_t paddr, unsigned long long data)
 {
-       *(unsigned long long *)PHYSMAP_PTOV(paddr) = data;
-}
-
+       if (!physmap_enclosed(paddr))
+               panic("%s: 0x%llx out of bounds\n", __FUNCTION__, paddr);
 
+       *(volatile unsigned long long *)PHYSMAP_PTOV(paddr) = data;
+}
 
 void ml_phys_write_byte(vm_offset_t paddr, unsigned int data)
 {
@@ -393,9 +422,8 @@ void ml_phys_write_double_64(addr64_t paddr64, unsigned long long data)
  *
  *
  *      Read the memory location at physical address paddr.
- *  This is a part of a device probe, so there is a good chance we will
- *  have a machine check here. So we have to be able to handle that.
- *  We assume that machine checks are enabled both in MSR and HIDs
+ * *Does not* recover from machine checks, unlike the PowerPC implementation.
+ * Should probably be deprecated.
  */
 
 boolean_t
@@ -529,18 +557,15 @@ static inline void __clflush(void *ptr)
 
 void dcache_incoherent_io_store64(addr64_t pa, unsigned int count)
 {
-        uint32_t  linesize = cpuid_info()->cache_linesize;
-        addr64_t  addr;
-        boolean_t istate;
+       addr64_t  linesize = cpuid_info()->cache_linesize;
+       addr64_t  bound = (pa + count + linesize - 1) & ~(linesize - 1);
 
        __mfence();
 
-        istate = ml_set_interrupts_enabled(FALSE);
-
-       for (addr = pa; addr < pa + count; addr += linesize)
-               __clflush(PHYSMAP_PTOV(addr));
-
-        (void) ml_set_interrupts_enabled(istate);
+       while (pa < bound) {
+               __clflush(PHYSMAP_PTOV(pa));
+               pa += linesize;
+       }
 
        __mfence();
 }
@@ -551,10 +576,21 @@ void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count)
 }
 
 void
-flush_dcache64(__unused addr64_t addr,
-              __unused unsigned count,
-              __unused int phys)
+flush_dcache64(addr64_t addr, unsigned count, int phys)
 {
+       if (phys) {
+               dcache_incoherent_io_flush64(addr, count);
+       }
+       else {
+               uint64_t  linesize = cpuid_info()->cache_linesize;
+               addr64_t  bound = (addr + count + linesize -1) & ~(linesize - 1);
+               __mfence();
+               while (addr < bound) {
+                       __clflush((void *) (uintptr_t) addr);
+                       addr += linesize;
+               }
+               __mfence();
+       }
 }
 
 void
@@ -603,316 +639,6 @@ cache_flush_page_phys(ppnum_t pa)
 }
 
 
-static int copyio(int, user_addr_t, char *, vm_size_t, vm_size_t *, int);
-static int copyio_phys(addr64_t, addr64_t, vm_size_t, int);
-
-/*
- * The copy engine has the following characteristics
- *   - copyio() handles copies to/from user or kernel space
- *   - copypv() deals with physical or virtual addresses
- *
- * Readers familiar with the 32-bit kernel will expect Joe's thesis at this
- * point describing the full glory of the copy window implementation. In K64,
- * however, there is no need for windowing. Thanks to the vast shared address
- * space, the kernel has direct access to userspace and to physical memory.
- *
- * User virtual addresses are accessible provided the user's cr3 is loaded.
- * Physical addresses are accessible via the direct map and the PHYSMAP_PTOV()
- * translation.
- *
- * Copyin/out variants all boil done to just these 2 routines in locore.s which
- * provide fault-recoverable copying:
- */
-extern int _bcopy(const void *, void *, vm_size_t);
-extern int _bcopystr(const void *, void *, vm_size_t, vm_size_t *);
-
-
-/*
- * Types of copies:
- */
-#define COPYIN         0       /* from user virtual to kernel virtual */
-#define COPYOUT                1       /* from kernel virtual to user virtual */
-#define COPYINSTR      2       /* string variant of copyout */
-#define COPYINPHYS     3       /* from user virtual to kernel physical */
-#define COPYOUTPHYS    4       /* from kernel physical to user virtual */
-
-
-static int
-copyio(int copy_type, user_addr_t user_addr, char *kernel_addr,
-       vm_size_t nbytes, vm_size_t *lencopied, int use_kernel_map)
-{
-        thread_t       thread;
-       pmap_t          pmap;
-       vm_size_t       bytes_copied;
-       int             error = 0;
-       boolean_t       istate = FALSE;
-       boolean_t       recursive_CopyIOActive;
-#if KDEBUG
-       int             debug_type = 0xeff70010;
-       debug_type += (copy_type << 2);
-#endif
-
-       thread = current_thread();
-
-       KERNEL_DEBUG(debug_type | DBG_FUNC_START,
-                    (unsigned)(user_addr >> 32), (unsigned)user_addr,
-                    nbytes, thread->machine.copyio_state, 0);
-
-       if (nbytes == 0)
-               goto out;
-
-        pmap = thread->map->pmap;
-
-
-       assert((vm_offset_t)kernel_addr >= VM_MIN_KERNEL_AND_KEXT_ADDRESS ||
-              copy_type == COPYINPHYS || copy_type == COPYOUTPHYS);
-
-       /* Sanity and security check for addresses to/from a user */
-
-       if (((pmap != kernel_pmap) && (use_kernel_map == 0)) &&
-           ((nbytes && (user_addr+nbytes <= user_addr)) || ((user_addr + nbytes) > vm_map_max(thread->map)))) {
-               error = EFAULT;
-               goto out;
-       }
-
-       /*
-        * If the no_shared_cr3 boot-arg is set (true), the kernel runs on 
-        * its own pmap and cr3 rather than the user's -- so that wild accesses
-        * from kernel or kexts can be trapped. So, during copyin and copyout,
-        * we need to switch back to the user's map/cr3. The thread is flagged
-        * "CopyIOActive" at this time so that if the thread is pre-empted,
-        * we will later restore the correct cr3.
-        */
-       recursive_CopyIOActive = thread->machine.specFlags & CopyIOActive;
-       thread->machine.specFlags |= CopyIOActive;
-       if (no_shared_cr3) {
-               istate = ml_set_interrupts_enabled(FALSE);
-               if (get_cr3() != pmap->pm_cr3)
-                       set_cr3(pmap->pm_cr3);
-       }
-
-       /*
-        * Ensure that we're running on the target thread's cr3.
-        */
-       if ((pmap != kernel_pmap) && !use_kernel_map &&
-           (get_cr3() != pmap->pm_cr3)) {
-               panic("copyio(%d,%p,%p,%ld,%p,%d) cr3 is %p expects %p",
-                       copy_type, (void *)user_addr, kernel_addr, nbytes, lencopied, use_kernel_map,
-                       (void *) get_cr3(), (void *) pmap->pm_cr3);
-       }
-       if (no_shared_cr3)
-               (void) ml_set_interrupts_enabled(istate);
-
-       KERNEL_DEBUG(0xeff70044 | DBG_FUNC_NONE, (unsigned)user_addr,
-                    (unsigned)kernel_addr, nbytes, 0, 0);
-
-        switch (copy_type) {
-
-       case COPYIN:
-               error = _bcopy((const void *) user_addr,
-                               kernel_addr,
-                               nbytes);
-               break;
-                       
-       case COPYOUT:
-               error = _bcopy(kernel_addr,
-                               (void *) user_addr,
-                               nbytes);
-               break;
-
-       case COPYINPHYS:
-               error = _bcopy((const void *) user_addr,
-                               PHYSMAP_PTOV(kernel_addr),
-                               nbytes);
-               break;
-
-       case COPYOUTPHYS:
-               error = _bcopy((const void *) PHYSMAP_PTOV(kernel_addr),
-                               (void *) user_addr,
-                               nbytes);
-               break;
-
-       case COPYINSTR:
-               error = _bcopystr((const void *) user_addr,
-                               kernel_addr,
-                               (int) nbytes,
-                               &bytes_copied);
-
-               /*
-                * lencopied should be updated on success
-                * or ENAMETOOLONG...  but not EFAULT
-                */
-               if (error != EFAULT)
-                       *lencopied = bytes_copied;
-
-               if (error) {
-#if KDEBUG
-                       nbytes = *lencopied;
-#endif
-                       break;
-               }
-               if (*(kernel_addr + bytes_copied - 1) == 0) {
-                       /*
-                        * we found a NULL terminator... we're done
-                        */
-#if KDEBUG
-                       nbytes = *lencopied;
-#endif
-                       break;
-               } else {
-                       /*
-                        * no more room in the buffer and we haven't
-                        * yet come across a NULL terminator
-                        */
-#if KDEBUG
-                       nbytes = *lencopied;
-#endif
-                       error = ENAMETOOLONG;
-                       break;
-               }
-               break;
-       }
-
-       if (!recursive_CopyIOActive)
-               thread->machine.specFlags &= ~CopyIOActive;
-       if (no_shared_cr3) {
-               istate = ml_set_interrupts_enabled(FALSE);
-               if  (get_cr3() != kernel_pmap->pm_cr3)
-                       set_cr3(kernel_pmap->pm_cr3);
-               (void) ml_set_interrupts_enabled(istate);
-       }
-
-out:
-       KERNEL_DEBUG(debug_type | DBG_FUNC_END, (unsigned)user_addr,
-                    (unsigned)kernel_addr, (unsigned)nbytes, error, 0);
-
-       return (error);
-}
-
-
-static int
-copyio_phys(addr64_t source, addr64_t sink, vm_size_t csize, int which)
-{
-        char       *paddr;
-       user_addr_t vaddr;
-       int         ctype;
-
-       if (which & cppvPsnk) {
-               paddr  = (char *)sink;
-               vaddr  = (user_addr_t)source;
-               ctype  = COPYINPHYS;
-       } else {
-               paddr  = (char *)source;
-               vaddr  = (user_addr_t)sink;
-               ctype  = COPYOUTPHYS;
-       }
-       return copyio(ctype, vaddr, paddr, csize, NULL, which & cppvKmap);
-}
-
-int
-copyinmsg(const user_addr_t user_addr, char *kernel_addr, mach_msg_size_t nbytes)
-{
-    return copyio(COPYIN, user_addr, kernel_addr, nbytes, NULL, 0);
-}    
-
-int
-copyin(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes)
-{
-    return copyio(COPYIN, user_addr, kernel_addr, nbytes, NULL, 0);
-}
-
-int
-copyinstr(const user_addr_t user_addr,  char *kernel_addr, vm_size_t nbytes, vm_size_t *lencopied)
-{
-    *lencopied = 0;
-
-    return copyio(COPYINSTR, user_addr, kernel_addr, nbytes, lencopied, 0);
-}
-
-int
-copyoutmsg(const char *kernel_addr, user_addr_t user_addr, mach_msg_size_t nbytes)
-{
-    return copyio(COPYOUT, user_addr, (char *)(uintptr_t)kernel_addr, nbytes, NULL, 0);
-}
-
-int
-copyout(const void *kernel_addr, user_addr_t user_addr, vm_size_t nbytes)
-{
-    return copyio(COPYOUT, user_addr, (char *)(uintptr_t)kernel_addr, nbytes, NULL, 0);
-}
-
-
-kern_return_t
-copypv(addr64_t src64, addr64_t snk64, unsigned int size, int which)
-{
-       unsigned int lop, csize;
-       int bothphys = 0;
-       
-       KERNEL_DEBUG(0xeff7004c | DBG_FUNC_START, (unsigned)src64,
-                    (unsigned)snk64, size, which, 0);
-
-       if ((which & (cppvPsrc | cppvPsnk)) == 0 )                              /* Make sure that only one is virtual */
-               panic("copypv: no more than 1 parameter may be virtual\n");     /* Not allowed */
-
-       if ((which & (cppvPsrc | cppvPsnk)) == (cppvPsrc | cppvPsnk))
-               bothphys = 1;                                                   /* both are physical */
-
-       while (size) {
-         
-               if (bothphys) {
-                       lop = (unsigned int)(PAGE_SIZE - (snk64 & (PAGE_SIZE - 1)));            /* Assume sink smallest */
-
-                       if (lop > (unsigned int)(PAGE_SIZE - (src64 & (PAGE_SIZE - 1))))
-                               lop = (unsigned int)(PAGE_SIZE - (src64 & (PAGE_SIZE - 1)));    /* No, source is smaller */
-               } else {
-                       /*
-                        * only need to compute the resid for the physical page
-                        * address... we don't care about where we start/finish in
-                        * the virtual since we just call the normal copyin/copyout
-                        */
-                       if (which & cppvPsrc)
-                               lop = (unsigned int)(PAGE_SIZE - (src64 & (PAGE_SIZE - 1)));
-                       else
-                               lop = (unsigned int)(PAGE_SIZE - (snk64 & (PAGE_SIZE - 1)));
-               }
-               csize = size;                                           /* Assume we can copy it all */
-               if (lop < size)
-                       csize = lop;                                    /* Nope, we can't do it all */
-#if 0          
-               /*
-                * flush_dcache64 is currently a nop on the i386... 
-                * it's used when copying to non-system memory such
-                * as video capture cards... on PPC there was a need
-                * to flush due to how we mapped this memory... not
-                * sure if it's needed on i386.
-                */
-               if (which & cppvFsrc)
-                       flush_dcache64(src64, csize, 1);                /* If requested, flush source before move */
-               if (which & cppvFsnk)
-                       flush_dcache64(snk64, csize, 1);                /* If requested, flush sink before move */
-#endif
-               if (bothphys)
-                       bcopy_phys(src64, snk64, csize);                /* Do a physical copy, virtually */
-               else {
-                       if (copyio_phys(src64, snk64, csize, which))
-                               return (KERN_FAILURE);
-               }
-#if 0
-               if (which & cppvFsrc)
-                       flush_dcache64(src64, csize, 1);        /* If requested, flush source after move */
-               if (which & cppvFsnk)
-                       flush_dcache64(snk64, csize, 1);        /* If requested, flush sink after move */
-#endif
-               size   -= csize;                                        /* Calculate what is left */
-               snk64 += csize;                                 /* Bump sink to next physical address */
-               src64 += csize;                                 /* Bump source to next physical address */
-       }
-       KERNEL_DEBUG(0xeff7004c | DBG_FUNC_END, (unsigned)src64,
-                    (unsigned)snk64, size, which, 0);
-
-       return KERN_SUCCESS;
-}
-
 #if !MACH_KDP
 void
 kdp_register_callout(void)
@@ -920,6 +646,20 @@ kdp_register_callout(void)
 }
 #endif
 
+/*
+ * Return a uniformly distributed 64-bit random number.
+ *
+ * This interface should have minimal dependencies on kernel
+ * services, and thus be available very early in the life
+ * of the kernel.  But as a result, it may not be very random
+ * on all platforms.
+ */
+uint64_t
+early_random(void)
+{
+       return (ml_early_random());
+}
+
 #if !CONFIG_VMX
 int host_vmxon(boolean_t exclusive __unused)
 {