uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(arg);
        uint16_t low    = DTRACE_LLQUANTIZE_LOW(arg);
        uint16_t high   = DTRACE_LLQUANTIZE_HIGH(arg);
-       uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEPS(arg);
+       uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(arg);
 
        llquanta[dtrace_aggregate_llquantize_bucket(factor, low, high, nsteps, nval)] += incr;
 }
                uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(desc->dtad_arg);
                uint16_t low    = DTRACE_LLQUANTIZE_LOW(desc->dtad_arg);
                uint16_t high   = DTRACE_LLQUANTIZE_HIGH(desc->dtad_arg);
-               uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEPS(desc->dtad_arg);
+               uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(desc->dtad_arg);
                int64_t v;
 
                agg->dtag_initial = desc->dtad_arg;
 
             ErrorLog("ubc_upl_commit_range error %d\n", (int)kr);
     } else {
         VerboseLog("committing upl, flags 0x%08x\n", flags | UPL_COMMIT_CLEAR_DIRTY);
-               kr = ubc_upl_commit_range(upl, pl_offset, uplSize, flags | UPL_COMMIT_CLEAR_DIRTY);
+               kr = ubc_upl_commit_range(upl, pl_offset, uplSize, flags | UPL_COMMIT_CLEAR_DIRTY | UPL_COMMIT_WRITTEN_BY_KERNEL);
         if (kr != KERN_SUCCESS)
             ErrorLog("ubc_upl_commit_range error %d\n", (int)kr);
     }
 
 
 #include <pexpert/pexpert.h>
 
+#include <mach/shared_region.h>
+
 unsigned long cs_procs_killed = 0;
 unsigned long cs_procs_invalidated = 0;
 
 SYSCTL_INT(_vm, OID_AUTO, cs_enforcement_panic, CTLFLAG_RW | CTLFLAG_LOCKED, &cs_enforcement_panic, 0, "");
 #endif
 
+int panic_on_cs_killed = 0;
 void
 cs_init(void)
 {
+#if MACH_ASSERT
+       panic_on_cs_killed = 1;
+#endif
+       PE_parse_boot_argn("panic_on_cs_killed", &panic_on_cs_killed,
+                          sizeof (panic_on_cs_killed));
 #if !SECURE_KERNEL
        int disable_cs_enforcement = 0;
        PE_parse_boot_argn("cs_enforcement_disable", &disable_cs_enforcement, 
 
        /* CS_KILL triggers a kill signal, and no you can't have the page. Nothing else. */
        if (p->p_csflags & CS_KILL) {
+               if (panic_on_cs_killed &&
+                   vaddr >= SHARED_REGION_BASE &&
+                   vaddr < SHARED_REGION_BASE + SHARED_REGION_SIZE) {
+                       panic("<rdar://14393620> cs_invalid_page(va=0x%llx): killing p=%p\n", (uint64_t) vaddr, p);
+               }
                p->p_csflags |= CS_KILLED;
                cs_procs_killed++;
                send_kill = 1;
                retval = 1;
        }
        
+#if __x86_64__
+       if (panic_on_cs_killed &&
+           vaddr >= SHARED_REGION_BASE &&
+           vaddr < SHARED_REGION_BASE + SHARED_REGION_SIZE) {
+               panic("<rdar://14393620> cs_invalid_page(va=0x%llx): cs error p=%p\n", (uint64_t) vaddr, p);
+       }
+#endif /* __x86_64__ */
+
        /* CS_HARD means fail the mapping operation so the process stays valid. */
        if (p->p_csflags & CS_HARD) {
                retval = 1;
                        NULL
                );
                printf("CODE SIGNING: cs_invalid_page(0x%llx): "
-                      "p=%d[%s] final status 0x%x, %sing page%s\n",
+                      "p=%d[%s] final status 0x%x, %s page%s\n",
                       vaddr, p->p_pid, p->p_comm, p->p_csflags,
-                      retval ? "deny" : "allow (remove VALID)",
+                      retval ? "denying" : "allowing (remove VALID)",
                       send_kill ? " sending SIGKILL" : "");
        }
 
        if (send_kill)
-               psignal(p, SIGKILL);
+               threadsignal(current_thread(), SIGKILL, EXC_BAD_ACCESS);
 
 
        return retval;
 
 
        if (set_file_size && (set_file_size != (off_t) va.va_data_alloc))
        {
-           off_t     bytesallocated = 0;
            u_int32_t alloc_flags = PREALLOCATE | ALLOCATEFROMPEOF | ALLOCATEALL;
 
            vnode_lock_spin(ref->vp);
            CLR(ref->vp->v_flag, VSWAP);
            vnode_unlock(ref->vp);
 
-           error = VNOP_ALLOCATE(ref->vp, set_file_size, alloc_flags,
-                                 &bytesallocated, 0 /*fst_offset*/,
-                                 ref->ctx);
+            if (set_file_size < (off_t) va.va_data_alloc)
+            {
+                struct vnode_attr setva;
+                VATTR_INIT(&setva);
+                VATTR_SET(&setva, va_data_size, set_file_size);
+                error = vnode_setattr(ref->vp, &setva, ref->ctx);
+            }
+            else
+            {
+                off_t bytesallocated = set_file_size - va.va_data_alloc;
+                error = VNOP_ALLOCATE(ref->vp, bytesallocated, alloc_flags,
+                                      &bytesallocated, 0 /*fst_offset*/,
+                                      ref->ctx);
+                HIBLOG("VNOP_ALLOCATE(%d) %qd\n", error, bytesallocated);
+            }
            // F_SETSIZE:
-           if (!error) error = vnode_setsize(ref->vp, set_file_size, IO_NOZEROFILL, ref->ctx);
-           kprintf("vnode_setsize(%d) %qd\n", error, set_file_size);
-           ref->filelength = bytesallocated;
+           (void) vnode_setsize(ref->vp, set_file_size, IO_NOZEROFILL, ref->ctx);
+           ref->filelength = set_file_size;
 
            vnode_lock_spin(ref->vp);
            SET(ref->vp->v_flag, VSWAP);
 
 0x531024C      CPUPM_PST_QOS_RATEUNLIMIT
 0x5310250      CPUPM_PST_QOS_SWITCH
 0x5310254      CPUPM_FORCED_IDLE
+0x5310258      CPUPM_PST_RAW_PERF
+0x531025C      CPUPM_CPU_HALT_DEEP
+0x5310260      CPUPM_CPU_HALT
+0x5310264      CPUPM_CPU_OFFLINE
+0x5310268      CPUPM_CPU_EXIT_HALT
+0x531026C      CPUPM_PST_QOS_CHARGE
 0x5330000      HIBERNATE
 0x5330004      HIBERNATE_WRITE_IMAGE
 0x5330008      HIBERNATE_MACHINE_INIT
 
        const CS_CodeDirectory *cd;
        off_t                   blob_start_offset, blob_end_offset;
        SHA1_CTX                sha1ctxt;
+       boolean_t               record_mtime;
+
+       record_mtime = FALSE;
 
        blob_handle = IPC_PORT_NULL;
 
                goto out;
        }
 
+       if (uip->cs_blobs == NULL) {
+               /* loading 1st blob: record the file's current "modify time" */
+               record_mtime = TRUE;
+       }
+
        /*
         * Add this blob to the list of blobs for this vnode.
         * We always add at the front of the list and we never remove a
 
        vnode_unlock(vp);
 
+       if (record_mtime) {
+               vnode_mtime(vp, &uip->cs_mtime, vfs_context_current());
+       }
+
        error = 0;      /* success ! */
 
 out:
        return blobs;
 }
 
+void
+ubc_get_cs_mtime(
+       struct vnode    *vp,
+       struct timespec *cs_mtime)
+{
+       struct ubc_info *uip;
+
+       if (! UBCINFOEXISTS(vp)) {
+               cs_mtime->tv_sec = 0;
+               cs_mtime->tv_nsec = 0;
+               return;
+       }
+
+       uip = vp->v_ubcinfo;
+       cs_mtime->tv_sec = uip->cs_mtime.tv_sec;
+       cs_mtime->tv_nsec = uip->cs_mtime.tv_nsec;
+}
+
 unsigned long cs_validate_page_no_hash = 0;
 unsigned long cs_validate_page_bad_hash = 0;
 boolean_t
 
 #endif
        default:
                error = ENETDOWN;
-               goto end;
+               break;
        }
 
-end:
        GIF_UNLOCK(sc);
+end:        
        if (error) {
                /* the mbuf was freed either by in_gif_output or in here */
                ifnet_stat_increment_out(ifp, 0, 0, 1);
 
        if (rt->rt_expire == 0 || (rt->rt_flags & RTF_STATIC)) {
                ap->sticky++;
                /* ARP entry is permanent? */
-               if (!(rt->rt_flags & RTF_STATIC)) {
+               if (rt->rt_expire == 0) {
                        RT_UNLOCK(rt);
                        return;
                }
 
         * interface has been set as a multicast option, use the
         * address of that interface as our source address.
         */
-       if (error == 0 && IN_MULTICAST(ntohl(SIN(nam)->sin_addr.s_addr)) &&
+       if (IN_MULTICAST(ntohl(SIN(nam)->sin_addr.s_addr)) &&
            inp->inp_moptions != NULL) {
                struct ip_moptions *imo;
                struct ifnet *ifp;
                        lck_rw_done(in_ifaddr_rwlock);
                        if (ia == NULL)
                                error = EADDRNOTAVAIL;
+                       else
+                               error = 0;
                }
                IMO_UNLOCK(imo);
        }
 
                 * socket lock for better performance. If there are
                 * any pcbs in time-wait, the timer will get rescheduled.
                 * Hence some error in this check can be tolerated.
+                *
+                * Sometimes a socket on time-wait queue can be closed if
+                * 2MSL timer expired but the application still has a
+                * usecount on it. 
                 */
-               if (TSTMP_GEQ(tcp_now, tw_tp->t_timer[TCPT_2MSL])) {
+               if (tw_tp->t_state == TCPS_CLOSED ||  
+                   TSTMP_GEQ(tcp_now, tw_tp->t_timer[TCPT_2MSL])) {
                        if (tcp_garbage_collect(tw_tp->t_inpcb, 1))
                                atomic_add_32(&ipi->ipi_gc_req.intimer_lazy, 1);
-               } else {
-                       break;
                }
        }
 
 
 
 #include <net/net_osdep.h>
 
+#define MAX_REALIGN_LEN 2000
 #define AES_BLOCKLEN 16
-#define MAX_SBUF_LEN 2000
 
 extern lck_mtx_t *sadb_mutex;
 
        int sn, dn;     /* offset from the head of the mbuf, to meat */
        size_t ivoff, bodyoff;
        u_int8_t iv[AES_BLOCKLEN] __attribute__((aligned(4))), *dptr;
-       u_int8_t sbuf[MAX_SBUF_LEN] __attribute__((aligned(4))), *sp, *sp_unaligned;
+       u_int8_t sbuf[AES_BLOCKLEN] __attribute__((aligned(4))), *sp, *sp_unaligned, *sp_aligned = NULL;
        struct mbuf *scut;
        int scutoff;
        int     i, len;
                        sp_unaligned = NULL;
                } else {
                        sp_unaligned = sp;
-                       sp = sbuf;
+                       if (len > MAX_REALIGN_LEN) {
+                               return ENOBUFS;
+                       }
+                       if (sp_aligned == NULL) {
+                               sp_aligned = (u_int8_t *)_MALLOC(MAX_REALIGN_LEN, M_SECA, M_DONTWAIT);
+                               if (sp_aligned == NULL)
+                                       return ENOMEM;
+                       }
+                       sp = sp_aligned;
                        memcpy(sp, sp_unaligned, len);
                }
                // no need to check output pointer alignment
        scut->m_len = scutoff;
        scut->m_next = d0;
 
+       // free memory
+       if (sp_aligned != NULL) {
+               FREE(sp_aligned, M_SECA);
+               sp_aligned = NULL;
+       }
+       
        /* just in case */
        bzero(iv, sizeof(iv));
        bzero(sbuf, sizeof(sbuf));
        int sn, dn;     /* offset from the head of the mbuf, to meat */
        size_t ivoff, bodyoff;
        u_int8_t *ivp, *dptr, *ivp_unaligned;
-       u_int8_t sbuf[MAX_SBUF_LEN] __attribute__((aligned(4))), *sp, *sp_unaligned;
+       u_int8_t sbuf[AES_BLOCKLEN] __attribute__((aligned(4))), *sp, *sp_unaligned, *sp_aligned = NULL;
        u_int8_t ivp_aligned_buf[AES_BLOCKLEN] __attribute__((aligned(4)));
        struct mbuf *scut;
        int scutoff;
                        sp_unaligned = NULL;
                } else {
                        sp_unaligned = sp;
-                       sp = sbuf;
+                       if (len > MAX_REALIGN_LEN) {
+                               return ENOBUFS;
+                       }
+                       if (sp_aligned == NULL) {
+                               sp_aligned = (u_int8_t *)_MALLOC(MAX_REALIGN_LEN, M_SECA, M_DONTWAIT);
+                               if (sp_aligned == NULL)
+                                       return ENOMEM;
+                       }
+                       sp = sp_aligned;
                        memcpy(sp, sp_unaligned, len);
                }
                // check ivp pointer alignment and use a separate aligned buffer (if ivp is not aligned on 4-byte boundary).
                } else {
                        ivp_unaligned = ivp;
                        ivp = ivp_aligned_buf;
-                       memcpy(ivp, ivp_unaligned, len);
+                       memcpy(ivp, ivp_unaligned, AES_BLOCKLEN);
                }
                // no need to check output pointer alignment
                aes_encrypt_cbc(sp, ivp, len >> 4, dptr + dn, 
                        soff += s->m_len;
                        s = s->m_next;
                }
-
        }
 
        /* free un-needed source mbufs and add dest mbufs to chain */
        m_freem(scut->m_next);
        scut->m_len = scutoff;
        scut->m_next = d0;
+       
+       // free memory
+       if (sp_aligned != NULL) {
+               FREE(sp_aligned, M_SECA);
+               sp_aligned = NULL;
+       }
 
        /* just in case */
        bzero(sbuf, sizeof(sbuf));
 
        return (error);
 }
 
+/*
+ * See if our mount is in trouble. Note this is inherently racey.
+ */
+static int
+nfs_notresponding(struct nfsmount *nmp)
+{
+       int timeoutmask = NFSSTA_TIMEO | NFSSTA_LOCKTIMEO | NFSSTA_JUKEBOXTIMEO;
+       if (NMFLAG(nmp, MUTEJUKEBOX)) /* jukebox timeouts don't count as unresponsive if muted */
+                  timeoutmask &= ~NFSSTA_JUKEBOXTIMEO;
+
+       return ((nmp->nm_state & timeoutmask) || !(nmp->nm_sockflags & NMSOCK_READY));
+}
+
 /*
  * NFS access vnode op.
  * For NFS version 2, just return ok. File accesses may fail later.
         * in the cache.
         */
 
-       /*
-        * In addition if the kernel is checking for access, KAUTH_VNODE_ACCESS
-        * not set, just return. At this moment do not know what the state of
-        * the server is and what ever we get back be it either yea or nay is
-        * going to be stale.  Finder (Desktop services/FileURL) might hang when
-        * going over the wire when just asking getattrlist for the roots FSID
-        * since we are going to be called to see if we're authorized for
-        * search. Since we are returning without checking the cache and/or
-        * going over the wire, it makes no sense to update the cache.
-        *
-        * N.B. This is also the strategy that SMB is using.
-        */
-       if (!(ap->a_action & KAUTH_VNODE_ACCESS))
-               return (0);
-       
        /*
         * Convert KAUTH primitives to NFS access rights.
         */
                dorpc = 0;
                waccess = 0;
        } else if (NACCESSVALID(np, slot)) {
-               microuptime(&now);
-               if ((now.tv_sec < (np->n_accessstamp[slot] + nfs_access_cache_timeout)) &&
-                   ((np->n_access[slot] & access) == access)) {
+               /*
+                * In addition if the kernel is checking for access, i.e.,
+                * KAUTH_VNODE_ACCESS is not set, and the server does not seem
+                * to be responding just return if we have something in the
+                * cache even if its stale for the user. If were granted access
+                * by the cache and we're a kernel access, then call it good
+                * enough. We want to avoid having this particular request going
+                * over the wire causing a hang. This is because at this moment
+                * we do not know what the state of the server is and what ever
+                * we get back be it either yea or nay is going to be stale.
+                * Finder (Desktop services/FileURL) might hang when going over
+                * the wire when just asking getattrlist for the roots FSID
+                * since we are going to be called to see if we're authorized
+                * for search. 
+                *
+                * N.B. This is also the strategy that SMB is using.
+                */
+               int granted = ((np->n_access[slot] & access) == access);
+
+               if (!(ap->a_action & KAUTH_VNODE_ACCESS)) {
+                       if (granted || nfs_notresponding(nmp)) {
+                               dorpc = 0;
+                               waccess = np->n_access[slot];
+                       }
+               } else {
+                       int stale;
+                       microuptime(&now);
+                       stale = (now.tv_sec >= (np->n_accessstamp[slot] + nfs_access_cache_timeout));
+                       if (granted && !stale) {
                        /* OSAddAtomic(1, &nfsstats.accesscache_hits); */
-                       dorpc = 0;
-                       waccess = np->n_access[slot];
+                               dorpc = 0;
+                               waccess = np->n_access[slot];
+                       }
                }
        }
        nfs_node_unlock(np);
 
  * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=55965
  */
 
-#if __STDC_VERSION__ >= 199901L && (!defined(__GNUC__) || defined(__clang__))
+#if defined(__cplusplus) || \
+    (__STDC_VERSION__ >= 199901L && \
+     !defined(__GNUC_GNU_INLINE__) && \
+     (!defined(__GNUC__) || defined(__clang__)))
 # define __header_inline           inline
 #elif defined(__GNUC__) && defined(__GNUC_STDC_INLINE__)
 # define __header_inline           extern __inline __attribute__((__gnu_inline__))
 
         (uint16_t)(((x) & DTRACE_LLQUANTIZE_HIGHMASK) >> \
         DTRACE_LLQUANTIZE_HIGHSHIFT)
 
-#define  DTRACE_LLQUANTIZE_NSTEPS(x)    \
+#define  DTRACE_LLQUANTIZE_NSTEP(x)    \
         (uint16_t)(((x) & DTRACE_LLQUANTIZE_NSTEPMASK) >> \
         DTRACE_LLQUANTIZE_NSTEPSHIFT)
 
 
                    sigmask(SIGIOT)|sigmask(SIGEMT)|\
                    sigmask(SIGFPE)|sigmask(SIGBUS)|\
                    sigmask(SIGSEGV)|sigmask(SIGSYS)|\
-                   sigmask(SIGPIPE))
+                   sigmask(SIGPIPE)|sigmask(SIGKILL))
 
 #define workq_threadmask (threadmask | sigcantmask)
 
 
         struct cl_readahead   *cl_rahead;      /* cluster read ahead context */
         struct cl_writebehind *cl_wbehind;     /* cluster write behind context */
 
+       struct timespec         cs_mtime;       /* modify time of file when
+                                                  first cs_blob was loaded */
        struct  cs_blob         *cs_blobs;      /* for CODE SIGNING */
 #if CHECK_CS_VALIDATION_BITMAP
        void                    *cs_valid_bitmap;     /* right now: used only for signed files on the read-only root volume */
 int    ubc_cs_blob_add(vnode_t, cpu_type_t, off_t, vm_address_t, off_t, vm_size_t);
 int    ubc_cs_sigpup_add(vnode_t, vm_address_t, vm_size_t);
 struct cs_blob *ubc_get_cs_blobs(vnode_t);
+void   ubc_get_cs_mtime(vnode_t, struct timespec *);
 int    ubc_cs_getcdhash(vnode_t, off_t, unsigned char *);
 kern_return_t ubc_cs_blob_allocate(vm_offset_t *, vm_size_t *);
 void ubc_cs_blob_deallocate(vm_offset_t, vm_size_t);
 
 errno_t        vnode_suspend(vnode_t);
 
 
+errno_t        vnode_mtime(vnode_t, struct timespec *, vfs_context_t);
+
 errno_t        vnode_size(vnode_t, off_t *, vfs_context_t);
 errno_t        vnode_setsize(vnode_t, off_t, int ioflag, vfs_context_t);
 int    vnode_setattr_fallback(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx);
 
        return (error);
 }
 
+errno_t
+vnode_mtime(vnode_t vp, struct timespec *mtime, vfs_context_t ctx)
+{
+       struct vnode_attr       va;
+       int                     error;
+
+       VATTR_INIT(&va);
+       VATTR_WANTED(&va, va_modify_time);
+       error = vnode_getattr(vp, &va, ctx);
+       if (!error)
+               *mtime = va.va_modify_time;
+       return error;
+}
+
 /*
  * Returns:    0                       Success
  *     vnode_getattr:???
 
                size_t bufsize;
                void * bufptr;
                uio_t auio;
-               struct direntry entry64;
+               struct direntry *entry64;
                struct dirent *dep;
                int bytesread;
                int error;
                dep = (struct dirent *)bufptr;
                bytesread = bufsize - uio_resid(auio);
 
+               MALLOC(entry64, struct direntry *, sizeof(struct direntry),
+                      M_TEMP, M_WAITOK);
                /*
                 * Convert all the entries and copy them out to user's buffer.
                 */
                while (error == 0 && (char *)dep < ((char *)bufptr + bytesread)) {
+                       size_t  enbufsize = DIRENT64_LEN(dep->d_namlen);
+
+                       bzero(entry64, enbufsize);
                        /* Convert a dirent to a dirent64. */
-                       entry64.d_ino = dep->d_ino;
-                       entry64.d_seekoff = 0;
-                       entry64.d_reclen = DIRENT64_LEN(dep->d_namlen);
-                       entry64.d_namlen = dep->d_namlen;
-                       entry64.d_type = dep->d_type;
-                       bcopy(dep->d_name, entry64.d_name, dep->d_namlen + 1);
+                       entry64->d_ino = dep->d_ino;
+                       entry64->d_seekoff = 0;
+                       entry64->d_reclen = enbufsize;
+                       entry64->d_namlen = dep->d_namlen;
+                       entry64->d_type = dep->d_type;
+                       bcopy(dep->d_name, entry64->d_name, dep->d_namlen + 1);
 
                        /* Move to next entry. */
                        dep = (struct dirent *)((char *)dep + dep->d_reclen);
 
                        /* Copy entry64 to user's buffer. */
-                       error = uiomove((caddr_t)&entry64, entry64.d_reclen, uio);
+                       error = uiomove((caddr_t)entry64, entry64->d_reclen, uio);
                }
 
                /* Update the real offset using the offset we got from VNOP_READDIR. */
                }
                uio_free(auio);
                FREE(bufptr, M_TEMP);
+               FREE(entry64, M_TEMP);
                return (error);
        }
 }
 
        upl_size_t      upl_size = 0;
 
        upl_create_flags = UPL_SET_INTERNAL | UPL_SET_LITE;
-       upl_control_flags = UPL_IOSYNC;
+       upl_control_flags = UPL_IOSYNC | UPL_PAGING_ENCRYPTED;
 
        if ((flags & SWAP_READ) == FALSE) {
                upl_create_flags |= UPL_COPYOUT_FROM;
 
 #include <sys/kern_memorystatus.h>
 
 
-int _shared_region_map( struct proc*, int, unsigned int, struct shared_file_mapping_np*, memory_object_control_t*, struct shared_file_mapping_np*); 
+int _shared_region_map_and_slide(struct proc*, int, unsigned int, struct shared_file_mapping_np*, uint32_t, user_addr_t, user_addr_t);
 int shared_region_copyin_mappings(struct proc*, user_addr_t, unsigned int, struct shared_file_mapping_np *);
 
 SYSCTL_INT(_vm, OID_AUTO, vm_debug_events, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_debug_events, 0, "");
  * requiring any further setup.
  */
 int
-_shared_region_map(
+_shared_region_map_and_slide(
        struct proc                             *p,
        int                                     fd,
        uint32_t                                mappings_count,
        struct shared_file_mapping_np           *mappings,
-       memory_object_control_t                 *sr_file_control,
-       struct shared_file_mapping_np           *mapping_to_slide)
+       uint32_t                                slide,
+       user_addr_t                             slide_start,
+       user_addr_t                             slide_size)
 {
        int                             error;
        kern_return_t                   kr;
                goto done;
        }
 
-       if (sr_file_control != NULL) {
-               *sr_file_control = file_control;
-       }
-                        
-
 
        /* get the process's shared region (setup in vm_map_exec()) */
        shared_region = vm_shared_region_get(current_task());
                                       file_control,
                                       file_size,
                                       (void *) p->p_fd->fd_rdir,
-                                      mapping_to_slide);
+                                      slide,
+                                      slide_start,
+                                      slide_size);
        if (kr != KERN_SUCCESS) {
                SHARED_REGION_TRACE_ERROR(
                        ("shared_region: %p [%d(%s)] map(%p:'%s'): "
        struct shared_region_map_and_slide_np_args      *uap,
        __unused int                                    *retvalp)
 {
-       struct shared_file_mapping_np   mapping_to_slide;
        struct shared_file_mapping_np   *mappings;
-       unsigned int mappings_count = uap->count;
-
-       memory_object_control_t         sr_file_control;
+       unsigned int                    mappings_count = uap->count;
        kern_return_t                   kr = KERN_SUCCESS;
        uint32_t                        slide = uap->slide;
        
        }
 
 
-       kr = _shared_region_map(p, uap->fd, mappings_count, mappings, &sr_file_control, &mapping_to_slide);
+       kr = _shared_region_map_and_slide(p, uap->fd, mappings_count, mappings,
+                                         slide,
+                                         uap->slide_start, uap->slide_size);
        if (kr != KERN_SUCCESS) {
                return kr;
        }
 
-       if (slide) {
-               kr = vm_shared_region_slide(slide, 
-                               mapping_to_slide.sfm_file_offset, 
-                               mapping_to_slide.sfm_size, 
-                               uap->slide_start, 
-                               uap->slide_size, 
-                               sr_file_control);
-               if (kr  != KERN_SUCCESS) {
-                       vm_shared_region_undo_mappings(NULL, 0, mappings, mappings_count);
-                       return kr;
-               }
-       }
 done:
        return kr;
 }
 
        return (vm_object_offset_t) ubc_getsize(vp);
 }
 
+extern int safe_getpath(struct vnode *dvp, char *leafname, char *path, int _len, int *truncated_path);
+
 kern_return_t
-vnode_pager_get_pathname(
+vnode_pager_get_name(
        struct vnode    *vp,
        char            *pathname,
-       vm_size_t       *length_p)
+       vm_size_t       pathname_len,
+       char            *filename,
+       vm_size_t       filename_len,
+       boolean_t       *truncated_path_p)
 {
-       int     error, len;
-
-       len = (int) *length_p;
-       error = vn_getpath(vp, pathname, &len);
-       if (error != 0) {
-               return KERN_FAILURE;
+       *truncated_path_p = FALSE;
+       if (pathname != NULL) {
+               /* get the path name */
+               safe_getpath(vp, NULL,
+                            pathname, (int) pathname_len,
+                            truncated_path_p);
+       }
+       if ((pathname == NULL || *truncated_path_p) &&
+           filename != NULL) {
+               /* get the file name */
+               const char *name;
+
+               name = vnode_getname_printable(vp);
+               strlcpy(filename, name, (size_t) filename_len);
+               vnode_putname_printable(name);
        }
-       *length_p = (vm_size_t) len;
        return KERN_SUCCESS;
 }
 
 kern_return_t
-vnode_pager_get_filename(
+vnode_pager_get_mtime(
        struct vnode    *vp,
-       const char      **filename)
+       struct timespec *current_mtime,
+       struct timespec *cs_mtime)
 {
-       *filename = vp->v_name;
+       vnode_mtime(vp, current_mtime, vfs_context_current());
+       if (cs_mtime != NULL) {
+               ubc_get_cs_mtime(vp, cs_mtime);
+       }
        return KERN_SUCCESS;
 }
 
 
-13.0.0
+13.1.0
 
 # The first line of this file contains the master version number for the kernel.
 # All other instances of the kernel version in xnu are derived from this file.
 
 __ZTV24IOCPUInterruptController
 _assert_wait_timeout_with_leeway
 _assert_wait_deadline_with_leeway
+_audio_active
 _b_to_q
 _bdevsw
 _boot
 
 _cpuid_features
 _cpuid_info
 _lapic_end_of_interrupt
+_lapic_get_cmci_vector
 _lapic_unmask_perfcnt_interrupt
 _mp_broadcast
 _mp_cpus_call
 
     kIODirectionInOut = kIODirectionIn  | kIODirectionOut,
 
     // these flags are valid for the prepare() method only
-    kIODirectionPrepareToPhys32   = 0x00000004,
-    kIODirectionPrepareNoFault    = 0x00000008,
-    kIODirectionPrepareReserved1  = 0x00000010,
+    kIODirectionPrepareToPhys32    = 0x00000004,
+    kIODirectionPrepareNoFault     = 0x00000008,
+    kIODirectionPrepareReserved1   = 0x00000010,
+#define IODIRECTIONPREPARENONCOHERENTDEFINED   1
+    kIODirectionPrepareNonCoherent = 0x00000020,
 };
 #ifdef __LP64__
 typedef IOOptionBits IODirection;
 
 #define kIOSelectedBootDeviceKey       "boot-device"
 #endif
 
-
-enum { kIOHibernateMinPollersNeeded = 2 };
-
 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
 
 // copy from phys addr to MD
 
        IORegistryEntry * next;
        IORegistryEntry * child;
-       OSData * data;
+       IOService       * service;
+       OSData          * data;
 
         vars->pollers = OSArray::withCapacity(4);
        if (!vars->pollers)
            }
             else if ((poller = OSDynamicCast(IOPolledInterface, obj)))
                 vars->pollers->setObject(poller);
+
+           if ((service = OSDynamicCast(IOService, next)) 
+               && service->getDeviceMemory()
+               && !vars->pollers->getCount())  break;
+
            if ((num = OSDynamicCast(OSNumber, next->getProperty(kIOMediaPreferredBlockSizeKey))))
                vars->blockSize = num->unsigned32BitValue();
             child = next;
        if (vars->blockSize < 4096) vars->blockSize = 4096;
 
        HIBLOG("hibernate image major %d, minor %d, blocksize %ld, pollers %d\n",
-                   major(hibernate_image_dev), minor(hibernate_image_dev), (long)vars->blockSize, vars->pollers->getCount());
+                   major(hibernate_image_dev), minor(hibernate_image_dev), (long)vars->blockSize, 
+                   vars->pollers->getCount());
 
-       if (vars->pollers->getCount() < kIOHibernateMinPollersNeeded)
+       if (!vars->pollers->getCount())
        {
             err = kIOReturnUnsupported;
            continue;
 
        if (!mapper) uplFlags |= UPL_NEED_32BIT_ADDR;
        if (dataP->fDMAMapNumAddressBits > 32) dataP->fDMAMapNumAddressBits = 32;
     }
-    if (kIODirectionPrepareNoFault    & forDirection) uplFlags |= UPL_REQUEST_NO_FAULT;
-    if (kIODirectionPrepareNoZeroFill & forDirection) uplFlags |= UPL_NOZEROFILLIO;
+    if (kIODirectionPrepareNoFault     & forDirection) uplFlags |= UPL_REQUEST_NO_FAULT;
+    if (kIODirectionPrepareNoZeroFill  & forDirection) uplFlags |= UPL_NOZEROFILLIO;
+    if (kIODirectionPrepareNonCoherent & forDirection) uplFlags |= UPL_REQUEST_FORCE_COHERENCY;
 
     mapBase = 0;
     sharedMem = (ipc_port_t) _memEntry;
 
     // Current factors based on environment and assertions
     if (sleepTimerMaintenance)
         currentFactors |= kIOPMSleepFactorSleepTimerWake;
-    if (standbyEnabled && sleepToStandby)
+    if (standbyEnabled && sleepToStandby && !gSleepPolicyHandler)
         currentFactors |= kIOPMSleepFactorSleepTimerWake;
     if (!clamshellClosed)
         currentFactors |= kIOPMSleepFactorLidOpen;
 
     OSArray *          stack;
     IORegistryEntry *  root;
     const IORegistryEntry * entry;
-    IORegistryEntry *  parent;
+    const IORegistryEntry * parent;
     const OSSymbol *   alias;
     int                        index;
     int                        len, maxLength, compLen, aliasLen;
        return( ok );
     }
 
-    entry = this;
-    parent = entry->getParentEntry( plane );
-    if( !parent)
-       // Error if not attached in plane
-       return( false);
-
     stack = OSArray::withCapacity( getDepth( plane ));
-    if( !stack)
-       return( false);
+    if (!stack) return( false);
 
     RLOCK;
 
+    parent = entry = this;
     root = gRegistryRoot->getChildEntry( plane );
-    while( parent && (entry != root)) {
+    while (parent && (parent != root))
+    {
        // stop below root
-       stack->setObject( (OSObject *) entry );
        entry = parent;
        parent = entry->getParentEntry( plane );
+       stack->setObject( (OSObject *) entry );
     }
 
-    index = stack->getCount();
-    ok = true;
-
-    if( 0 == index) {
-
-        *nextComp++ = '/';
-        *nextComp = 0;
-        len++;
-
-    } else while( ok && ((--index) >= 0)) {
-
-        entry = (IORegistryEntry *) stack->getObject((unsigned int) index );
-        assert( entry );
-
-        if( (alias = entry->hasAlias( plane ))) {
-            len = plane->nameKey->getLength() + 1;
-            nextComp = path + len;
-
-            compLen = alias->getLength();
-            ok = (maxLength > (len + compLen));
-            if( ok)
-                strlcpy( nextComp, alias->getCStringNoCopy(), compLen + 1);
-        } else {
-            compLen = maxLength - len;
-            ok = entry->getPathComponent( nextComp + 1, &compLen, plane );
-
-            if( ok && compLen) {
-                compLen++;
-                *nextComp = '/';
+    ok = (0 != parent);
+    if (ok)
+    {
+        index = stack->getCount();
+        if( 0 == index) {
+
+            *nextComp++ = '/';
+            *nextComp = 0;
+            len++;
+
+        } else while( ok && ((--index) >= 0)) {
+
+            entry = (IORegistryEntry *) stack->getObject((unsigned int) index );
+            assert( entry );
+
+            if( (alias = entry->hasAlias( plane ))) {
+                len = plane->nameKey->getLength() + 1;
+                nextComp = path + len;
+
+                compLen = alias->getLength();
+                ok = (maxLength > (len + compLen));
+                if( ok)
+                    strlcpy( nextComp, alias->getCStringNoCopy(), compLen + 1);
+            } else {
+                compLen = maxLength - len;
+                ok = entry->getPathComponent( nextComp + 1, &compLen, plane );
+
+                if( ok && compLen) {
+                    compLen++;
+                    *nextComp = '/';
+                }
             }
-        }
 
-        if( ok) {
-            len += compLen;
-            nextComp += compLen;
+            if( ok) {
+                len += compLen;
+                nextComp += compLen;
+            }
         }
+        *length = len;
     }
-    *length = len;
-
     UNLOCK;
-
     stack->release();
 
     return( ok );
 
 #  Standard Apple MacOS X Configurations:
 #  -------- ---- -------- ---------------
 #
-#  RELEASE = [ medium intel pc iokit mach_pe mach mach_kdp config_serial_kdp event vol hd pst gdb fixpri simple_clock mkernserv uxpr kernstack ipc_compat ipc_debug fb mk30 mk30_i386 hibernation config_sleep crypto config_dtrace config_mca config_vmx config_mtrr config_lapic config_counters zleaks config_gzalloc config_sched_traditional config_sched_proto config_sched_grrr config_sched_fixedpriority mach_pagemap vm_pressure_events kperf kpc memorystatus config_kext_basement config_telemetry importance_inheritance dynamic_codesigning config_nomonitors ]
+#  RELEASE = [ medium intel pc iokit mach_pe mach mach_kdp config_serial_kdp event vol hd pst gdb fixpri simple_clock mkernserv uxpr kernstack ipc_compat ipc_debug fb mk30 mk30_i386 hibernation config_sleep crypto config_dtrace config_mca config_vmx config_mtrr config_lapic config_counters zleaks config_gzalloc config_sched_traditional config_sched_proto config_sched_grrr config_sched_fixedpriority mach_pagemap vm_pressure_events kperf kpc memorystatus config_kext_basement config_telemetry importance_inheritance dynamic_codesigning ]
 #  DEBUG = [ RELEASE osf_debug debug mach_assert task_zone_info ]
 #
 #  EMBEDDED_BASE = [ bsmall intel pc iokit mach_pe mach mach_kdp config_serial_kdp event vol hd pst gdb fixpri simple_clock mkernserv uxpr kernstack ipc_compat ipc_debug fb mk30 mk30_i386 hibernation config_sleep crypto ]
 
                        cpufamily = CPUFAMILY_INTEL_SANDYBRIDGE;
                        break;
                case CPUID_MODEL_IVYBRIDGE:
+               case CPUID_MODEL_IVYBRIDGE_EP:
                        cpufamily = CPUFAMILY_INTEL_IVYBRIDGE;
                        break;
                case CPUID_MODEL_HASWELL:
 
 #define CPUID_MODEL_JAKETOWN           0x2D
 #define CPUID_MODEL_IVYBRIDGE          0x3A
 #ifdef PRIVATE
+#define CPUID_MODEL_IVYBRIDGE_EP       0x3E
 #define CPUID_MODEL_CRYSTALWELL                0x46
 #endif
 #define CPUID_MODEL_HASWELL            0x3C
 
 extern void            lapic_interrupt_counts(uint64_t intrs[256]);
 extern void            lapic_disable_timer(void);
 
+extern uint8_t         lapic_get_cmci_vector(void);
+
 #define        MAX_LAPICIDS    (LAPIC_ID_MAX+1)
 #ifdef MP_DEBUG
 #define LAPIC_CPU_MAP_DUMP()   lapic_cpu_map_dump()
 
        }
 }
 
+/* SPI returning the CMCI vector */
+uint8_t
+lapic_get_cmci_vector(void)
+{
+       uint8_t cmci_vector = 0;
+#if CONFIG_MCA
+       /* CMCI, if available */
+       if (mca_is_cmci_present())
+               cmci_vector = LAPIC_VECTOR(CMCI);
+#endif
+       return cmci_vector;
+}
+
+#if DEBUG
+extern void lapic_trigger_MC(void);
+void
+lapic_trigger_MC(void)
+{
+       /* A 64-bit access to any register will do it. */
+       volatile uint64_t dummy = *(uint64_t *) (void *) LAPIC_MMIO(ID);
+       dummy++;
+}
+#endif
 
 #include <i386/cpuid.h>
 #include <i386/cpu_topology.h>
 #include <i386/cpu_threads.h>
+#include <i386/lapic.h>
 #include <i386/machine_cpu.h>
 #include <i386/machine_check.h>
 #include <i386/proc_reg.h>
 static uint32_t                mca_family = 0;
 static unsigned int    mca_error_bank_count = 0;
 static boolean_t       mca_control_MSR_present = FALSE;
-static boolean_t       mca_threshold_status_present = FALSE;
-static boolean_t       mca_sw_error_recovery_present = FALSE;
-static boolean_t       mca_extended_MSRs_present = FALSE;
-static unsigned int    mca_extended_MSRs_count = 0;
 static boolean_t       mca_cmci_present = FALSE;
 static ia32_mcg_cap_t  ia32_mcg_cap;
 decl_simple_lock_data(static, mca_lock);
                ia32_mcg_cap.u64 = rdmsr64(IA32_MCG_CAP);
                mca_error_bank_count = ia32_mcg_cap.bits.count;
                mca_control_MSR_present = ia32_mcg_cap.bits.mcg_ctl_p;
-               mca_threshold_status_present = ia32_mcg_cap.bits.mcg_tes_p;
-               mca_sw_error_recovery_present = ia32_mcg_cap.bits.mcg_ser_p;
                mca_cmci_present = ia32_mcg_cap.bits.mcg_ext_corr_err_p;
-               if (family == 0x0F) {
-                       mca_extended_MSRs_present = ia32_mcg_cap.bits.mcg_ext_p;
-                       mca_extended_MSRs_count = ia32_mcg_cap.bits.mcg_ext_cnt;
-               }
        }
 }
 
         * and don't care about races
         */
        if (x86_package()->mca_state == NULL)
-                       x86_package()->mca_state = mca_state;
+               x86_package()->mca_state = mca_state;
 
        mca_state->mca_is_saved = TRUE;
 }
                mca_save_state(current_cpu_datap()->cpu_mca_state);
 }
 
-static void mca_dump_64bit_state(void)
-{
-       kdb_printf("Extended Machine Check State:\n");
-       kdb_printf("  IA32_MCG_RAX:    0x%016qx\n", rdmsr64(IA32_MCG_RAX));
-       kdb_printf("  IA32_MCG_RBX:    0x%016qx\n", rdmsr64(IA32_MCG_RBX));
-       kdb_printf("  IA32_MCG_RCX:    0x%016qx\n", rdmsr64(IA32_MCG_RCX));
-       kdb_printf("  IA32_MCG_RDX:    0x%016qx\n", rdmsr64(IA32_MCG_RDX));
-       kdb_printf("  IA32_MCG_RSI:    0x%016qx\n", rdmsr64(IA32_MCG_RSI));
-       kdb_printf("  IA32_MCG_RDI:    0x%016qx\n", rdmsr64(IA32_MCG_RDI));
-       kdb_printf("  IA32_MCG_RBP:    0x%016qx\n", rdmsr64(IA32_MCG_RBP));
-       kdb_printf("  IA32_MCG_RSP:    0x%016qx\n", rdmsr64(IA32_MCG_RSP));
-       kdb_printf("  IA32_MCG_RFLAGS: 0x%016qx\n", rdmsr64(IA32_MCG_RFLAGS));
-       kdb_printf("  IA32_MCG_RIP:    0x%016qx\n", rdmsr64(IA32_MCG_RIP));
-       kdb_printf("  IA32_MCG_MISC:   0x%016qx\n", rdmsr64(IA32_MCG_MISC));
-       kdb_printf("  IA32_MCG_R8:     0x%016qx\n", rdmsr64(IA32_MCG_R8));
-       kdb_printf("  IA32_MCG_R9:     0x%016qx\n", rdmsr64(IA32_MCG_R9));
-       kdb_printf("  IA32_MCG_R10:    0x%016qx\n", rdmsr64(IA32_MCG_R10));
-       kdb_printf("  IA32_MCG_R11:    0x%016qx\n", rdmsr64(IA32_MCG_R11));
-       kdb_printf("  IA32_MCG_R12:    0x%016qx\n", rdmsr64(IA32_MCG_R12));
-       kdb_printf("  IA32_MCG_R13:    0x%016qx\n", rdmsr64(IA32_MCG_R13));
-       kdb_printf("  IA32_MCG_R14:    0x%016qx\n", rdmsr64(IA32_MCG_R14));
-       kdb_printf("  IA32_MCG_R15:    0x%016qx\n", rdmsr64(IA32_MCG_R15));
-}
-
 static void
 mca_report_cpu_info(void)
 {
                infop->cpuid_model,
                infop->cpuid_stepping,
                infop->cpuid_microcode_version);
-       kdb_printf(" %s\n", infop->cpuid_brand_string);
-}
-
-static const char *mc8_memory_operation[] = {
-       [MC8_MMM_GENERIC] =             "generic",
-       [MC8_MMM_READ] =                "read",
-       [MC8_MMM_WRITE] =               "write",
-       [MC8_MMM_ADDRESS_COMMAND] =     "address/command",
-       [MC8_MMM_RESERVED] =            "reserved"
-};
-
-static void
-mca_dump_bank_mc8(mca_state_t *state, int i)
-{
-       mca_mci_bank_t                  *bank;
-       ia32_mci_status_t               status;
-       struct ia32_mc8_specific        mc8;
-       int                             mmm;
-
-       bank = &state->mca_error_bank[i];
-       status = bank->mca_mci_status;
-       mc8 = status.bits_mc8;
-       mmm = MIN(mc8.memory_operation, MC8_MMM_RESERVED);
+       kdb_printf(" signature: 0x%x\n",
+               infop->cpuid_signature);
+       kdb_printf(" %s\n",
+               infop->cpuid_brand_string);
 
-       kdb_printf(
-               " IA32_MC%d_STATUS(0x%x): 0x%016qx %svalid\n",
-               i, IA32_MCi_STATUS(i), status.u64, IF(!status.bits.val, "in"));
-       if (!status.bits.val)
-               return;
-
-       kdb_printf(
-               "  Channel number:         %d%s\n"
-               "  Memory Operation:       %s\n"
-               "  Machine-specific error: %s%s%s%s%s%s%s%s%s\n"
-               "  COR_ERR_CNT:            %d\n",
-               mc8.channel_number,
-               IF(mc8.channel_number == 15, " (unknown)"),
-               mc8_memory_operation[mmm],
-               IF(mc8.read_ecc,            "Read ECC "),
-               IF(mc8.ecc_on_a_scrub,      "ECC on scrub "),
-               IF(mc8.write_parity,        "Write parity "),
-               IF(mc8.redundant_memory,    "Redundant memory "),
-               IF(mc8.sparing,             "Sparing/Resilvering "),
-               IF(mc8.access_out_of_range, "Access out of Range "),
-               IF(mc8.rtid_out_of_range,   "RTID out of Range "),
-               IF(mc8.address_parity,      "Address Parity "),
-               IF(mc8.byte_enable_parity,  "Byte Enable Parity "),
-               mc8.cor_err_cnt);
-       kdb_printf(
-               "  Status bits:\n%s%s%s%s%s%s",
-               IF(status.bits.pcc,         "   Processor context corrupt\n"),
-               IF(status.bits.addrv,       "   ADDR register valid\n"),
-               IF(status.bits.miscv,       "   MISC register valid\n"),
-               IF(status.bits.en,          "   Error enabled\n"),
-               IF(status.bits.uc,          "   Uncorrected error\n"),
-               IF(status.bits.over,        "   Error overflow\n"));
-       if (status.bits.addrv)
-               kdb_printf(
-                       " IA32_MC%d_ADDR(0x%x): 0x%016qx\n",
-                       i, IA32_MCi_ADDR(i), bank->mca_mci_addr);
-       if (status.bits.miscv) {
-               ia32_mc8_misc_t mc8_misc;
-
-               mc8_misc.u64 = bank->mca_mci_misc;
-               kdb_printf(
-                       " IA32_MC%d_MISC(0x%x): 0x%016qx\n"
-                       "  RTID:     %d\n"
-                       "  DIMM:     %d\n"
-                       "  Channel:  %d\n"
-                       "  Syndrome: 0x%x\n",
-                       i, IA32_MCi_MISC(i), mc8_misc.u64,
-                       mc8_misc.bits.rtid,
-                       mc8_misc.bits.dimm,
-                       mc8_misc.bits.channel,
-                       (int) mc8_misc.bits.syndrome);
-       }
 }
 
-static const char *mca_threshold_status[] = {
-       [THRESHOLD_STATUS_NO_TRACKING] =        "No tracking",
-       [THRESHOLD_STATUS_GREEN] =              "Green",
-       [THRESHOLD_STATUS_YELLOW] =             "Yellow",
-       [THRESHOLD_STATUS_RESERVED] =           "Reserved"
-};
-
 static void
 mca_dump_bank(mca_state_t *state, int i)
 {
 
        bank = &state->mca_error_bank[i];
        status = bank->mca_mci_status;
-       kdb_printf(
-               " IA32_MC%d_STATUS(0x%x): 0x%016qx %svalid\n",
-               i, IA32_MCi_STATUS(i), status.u64, IF(!status.bits.val, "in"));
        if (!status.bits.val)
                return;
 
-       kdb_printf(
-               "  MCA error code:            0x%04x\n",
-               status.bits.mca_error);
-       kdb_printf(
-               "  Model specific error code: 0x%04x\n",
-               status.bits.model_specific_error);
-       if (!mca_threshold_status_present) {
-               kdb_printf(
-                       "  Other information:         0x%08x\n",
-                       status.bits.other_information);
-       } else {
-               int     threshold = status.bits_tes_p.threshold;
-               kdb_printf(
-                       "  Other information:         0x%08x\n"
-                       "  Threshold-based status:    %s\n",
-                       status.bits_tes_p.other_information,
-                       (status.bits_tes_p.uc == 0) ?
-                           mca_threshold_status[threshold] :
-                           "Undefined");
-       }
-       if (mca_threshold_status_present &&
-           mca_sw_error_recovery_present) {
-               kdb_printf(
-                       "  Software Error Recovery:\n%s%s",
-                       IF(status.bits_tes_p.ar, "   Recovery action reqd\n"),
-                       IF(status.bits_tes_p.s,  "   Signaling UCR error\n"));
-       }
-       kdb_printf(
-               "  Status bits:\n%s%s%s%s%s%s",
-               IF(status.bits.pcc,   "   Processor context corrupt\n"),
-               IF(status.bits.addrv, "   ADDR register valid\n"),
-               IF(status.bits.miscv, "   MISC register valid\n"),
-               IF(status.bits.en,    "   Error enabled\n"),
-               IF(status.bits.uc,    "   Uncorrected error\n"),
-               IF(status.bits.over,  "   Error overflow\n"));
+       kdb_printf(" IA32_MC%d_STATUS(0x%x): 0x%016qx\n",
+               i, IA32_MCi_STATUS(i), status.u64);
+
        if (status.bits.addrv)
-               kdb_printf(
-                       " IA32_MC%d_ADDR(0x%x): 0x%016qx\n",
+               kdb_printf(" IA32_MC%d_ADDR(0x%x):   0x%016qx\n",
                        i, IA32_MCi_ADDR(i), bank->mca_mci_addr);
+
        if (status.bits.miscv)
-               kdb_printf(
-                       " IA32_MC%d_MISC(0x%x): 0x%016qx\n",
+               kdb_printf(" IA32_MC%d_MISC(0x%x):   0x%016qx\n",
                        i, IA32_MCi_MISC(i), bank->mca_mci_misc);
 }
 
        if (!state->mca_is_valid)
                return;
 
-       kdb_printf("MCA error-reporting registers:\n");
        for (i = 0; i < mca_error_bank_count; i++ ) {
-               if (i == 8 && state == x86_package()->mca_state) {
-                       /*
-                        * Fatal Memory Error
-                        */
-
-                       /* Dump MC8 for this package */
-                       kdb_printf(" Package %d logged:\n",
-                                  x86_package()->ppkg_num);
-                       mca_dump_bank_mc8(state, 8);
-                       continue;
-               }
                mca_dump_bank(state, i);
        }
 }
        /*
         * Report machine-check capabilities:
         */
-       kdb_printf(
-               "Machine-check capabilities 0x%016qx:\n", ia32_mcg_cap.u64);
+       kdb_printf("Machine-check capabilities: 0x%016qx\n", ia32_mcg_cap.u64);
 
        mca_report_cpu_info();
 
-       kdb_printf(
-               " %d error-reporting banks\n%s%s%s", mca_error_bank_count,
-               IF(mca_control_MSR_present,
-                  " control MSR present\n"),
-               IF(mca_threshold_status_present,
-                  " threshold-based error status present\n"),
-               IF(mca_cmci_present,
-                  " extended corrected memory error handling present\n"));
-       if (mca_extended_MSRs_present)
-               kdb_printf(
-                       " %d extended MSRs present\n", mca_extended_MSRs_count);
+       kdb_printf(" %d error-reporting banks\n", mca_error_bank_count);
  
        /*
         * Dump all processor state:
                mca_state_t             *mcsp = cpu_datap(i)->cpu_mca_state;
                ia32_mcg_status_t       status;
 
-               kdb_printf("Processor %d: ", i);
                if (mcsp == NULL ||
                    mcsp->mca_is_saved == FALSE ||
-                   mcsp->mca_mcg_status.u64 == 0) {
-                       kdb_printf("no machine-check status reported\n");
-                       continue;
-               }
-               if (!mcsp->mca_is_valid) {
-                       kdb_printf("no valid machine-check state\n");
+                   mcsp->mca_mcg_status.u64 == 0 ||
+                   !mcsp->mca_is_valid) {
                        continue;
                }
                status = mcsp->mca_mcg_status;
-               kdb_printf(
-                       "machine-check status 0x%016qx:\n%s%s%s", status.u64,
-                       IF(status.bits.ripv, " restart IP valid\n"),
-                       IF(status.bits.eipv, " error IP valid\n"),
-                       IF(status.bits.mcip, " machine-check in progress\n"));
-
+               kdb_printf("Processor %d: IA32_MCG_STATUS: 0x%016qx\n",
+                       i, status.u64);
                mca_cpu_dump_error_banks(mcsp);
        }
 
-       /*
-        * Dump any extended machine state:
-        */
-       if (mca_extended_MSRs_present) {
-               mca_dump_64bit_state();
-       }
-
        /* Update state to release any other threads. */
        mca_dump_state = DUMPED;
 }
 
 
 extern void mca_exception_panic(void);
-extern void mtrr_lapic_cached(void);
+extern void lapic_trigger_MC(void);
 void mca_exception_panic(void)
 {
 #if DEBUG
-       mtrr_lapic_cached();
+       lapic_trigger_MC();
 #else
        kprintf("mca_exception_panic() requires DEBUG build\n");
 #endif
 
        uint64_t        over                    :BIT1(62);
        uint64_t        val                     :BIT1(63);
     }          bits_tes_p;
-    struct ia32_mc8_specific {
-       uint64_t        channel_number          :BITS(3,0);
-       uint64_t        memory_operation        :BITS(6,4);
-       uint64_t        unused                  :BITS(15,7);
-       uint64_t        read_ecc                :BIT1(16);
-       uint64_t        ecc_on_a_scrub          :BIT1(17);
-       uint64_t        write_parity            :BIT1(18);
-       uint64_t        redundant_memory        :BIT1(19);
-       uint64_t        sparing                 :BIT1(20);
-       uint64_t        access_out_of_range     :BIT1(21);
-       uint64_t        rtid_out_of_range       :BIT1(22);
-       uint64_t        address_parity          :BIT1(23);
-       uint64_t        byte_enable_parity      :BIT1(24);
-       uint64_t        reserved                :BITS(37,25);
-       uint64_t        cor_err_cnt             :BITS(52,38);
-    }          bits_mc8;
     uint64_t   u64;
 } ia32_mci_status_t;
 
 #define THRESHOLD_STATUS_YELLOW                2
 #define THRESHOLD_STATUS_RESERVED      3
 
-/* MC8 memory operations encoding: */
-#define        MC8_MMM_GENERIC                 0
-#define        MC8_MMM_READ                    1
-#define        MC8_MMM_WRITE                   2
-#define        MC8_MMM_ADDRESS_COMMAND         3
-#define        MC8_MMM_RESERVED                4
-typedef union {
-    struct {
-       uint64_t        rtid                    :BITS(7,0);
-       uint64_t        reserved1               :BITS(15,8);
-       uint64_t        dimm                    :BITS(17,16);
-       uint64_t        channel                 :BITS(19,18);
-       uint64_t        reserved2               :BITS(31,20);
-       uint64_t        syndrome                :BITS(63,32);
-    }          bits;
-    uint64_t   u64;
-} ia32_mc8_misc_t;
-
 typedef uint64_t       ia32_mci_addr_t;
 typedef uint64_t       ia32_mci_misc_t;
 
-#define IA32_MCG_EAX           (0x180)
-#define IA32_MCG_EBX           (0x181)
-#define IA32_MCG_ECX           (0x182)
-#define IA32_MCG_EDX           (0x183)
-#define IA32_MCG_ESI           (0x184)
-#define IA32_MCG_EDI           (0x185)
-#define IA32_MCG_EBP           (0x186)
-#define IA32_MCG_ESP           (0x187)
-#define IA32_MCG_EFLAGS                (0x188)
-#define IA32_MCG_EIP           (0x189)
-#define IA32_MCG_MISC          (0x18A)
-
-#define IA32_MCG_RAX           (0x180)
-#define IA32_MCG_RBX           (0x181)
-#define IA32_MCG_RCX           (0x182)
-#define IA32_MCG_RDX           (0x183)
-#define IA32_MCG_RSI           (0x184)
-#define IA32_MCG_RDI           (0x185)
-#define IA32_MCG_RBP           (0x186)
-#define IA32_MCG_RSP           (0x187)
-#define IA32_MCG_RFLAGS                (0x188)
-#define IA32_MCG_RIP           (0x189)
-#define IA32_MCG_MISC          (0x18A)
-#define IA32_MCG_RESERVED1     (0x18B)
-#define IA32_MCG_RESERVED2     (0x18C)
-#define IA32_MCG_RESERVED3     (0x18D)
-#define IA32_MCG_RESERVED4     (0x18E)
-#define IA32_MCG_RESERVED5     (0x18F)
-#define IA32_MCG_R8            (0x190)
-#define IA32_MCG_R9            (0x191)
-#define IA32_MCG_R10           (0x192)
-#define IA32_MCG_R11           (0x193)
-#define IA32_MCG_R12           (0x194)
-#define IA32_MCG_R13           (0x195)
-#define IA32_MCG_R14           (0x196)
-#define IA32_MCG_R15           (0x197)
-
 extern void            mca_cpu_alloc(cpu_data_t *cdp);
 extern void            mca_cpu_init(void);
 extern void            mca_dump(void);
 
        if (pmsafe_debug && !kdp_snapshot)
                pmSafeMode(¤t_cpu_datap()->lcpu, PM_SAFE_FL_SAFE);
        current_cpu_datap()->cpu_NMI_acknowledged = TRUE;
+       i_bit_clear(MP_KDP, ¤t_cpu_datap()->cpu_signals);
        mp_kdp_wait(FALSE, pmap_tlb_flush_timeout || spinlock_timed_out || panic_active());
        if (pmsafe_debug && !kdp_snapshot)
                pmSafeMode(¤t_cpu_datap()->lcpu, PM_SAFE_FL_NORMAL);
 
        }
        ml_set_interrupts_enabled(istate);
 }
-
-#if DEBUG
-void
-mtrr_lapic_cached(void);
-void
-mtrr_lapic_cached(void)
-{
-       boolean_t       istate;
-       uint32_t        lo;
-       uint32_t        hi;
-       uint64_t        lapic_pbase;
-       uint64_t        base;
-       uint64_t        length;
-       uint32_t        type;
-       unsigned int    i;
-
-       /* Find the local APIC physical base address */
-       rdmsr(MSR_IA32_APIC_BASE, lo, hi);
-       lapic_pbase = (lo &  MSR_IA32_APIC_BASE_BASE);
-
-       DBG("mtrr_lapic_cached() on cpu %d, lapic_pbase: 0x%016llx\n",
-           get_cpu_number(), lapic_pbase);
-
-       istate = ml_set_interrupts_enabled(FALSE);
-
-       /*
-        * Search for the variable range MTRR mapping the lapic.
-        * Flip its type to WC and return.
-        */
-       for (i = 0; i < mtrr_state.var_count; i++) {
-               if (!(mtrr_state.var_range[i].mask & IA32_MTRR_PHYMASK_VALID))
-                       continue;
-               base = mtrr_state.var_range[i].base & IA32_MTRR_PHYSBASE_MASK;
-               type = (uint32_t)(mtrr_state.var_range[i].base & IA32_MTRR_PHYSBASE_TYPE);
-               length = MASK_TO_LEN(mtrr_state.var_range[i].mask);
-               DBG("%d: base: 0x%016llx size: 0x%016llx type: %d\n",
-                    i, base, length, type);
-               if (base <= lapic_pbase &&
-                   lapic_pbase <= base + length - PAGE_SIZE) {
-                       DBG("mtrr_lapic_cached() matched var: %d\n", i);
-                       mtrr_state.var_range[i].base &=~IA32_MTRR_PHYSBASE_TYPE;
-                       mtrr_state.var_range[i].base |= MTRR_TYPE_WRITECOMBINE;
-                       ml_set_interrupts_enabled(istate);
-               }
-       }
-
-       /*
-        * In case we didn't find a covering variable range,
-        * we slam WC into the default memory type.
-        */
-       mtrr_state.MTRRdefType = MTRR_TYPE_WRITECOMBINE;
-
-       mtrr_update_cpu(); 
-
-       ml_set_interrupts_enabled(istate);
-
-       return;
-}
-#endif /* DEBUG */
 
 }
 
 /*
- * Set the maximum delay time allowed for snoop on the bus.
- *
- * Note that this value will be compared to the amount of time that it takes
- * to transition from a non-snooping power state (C4) to a snooping state (C2).
- * If maxBusDelay is less than C4C2SnoopDelay,
- * we will not enter the lowest power state.
+ * Advertise a memory access latency tolerance of "mdelay" ns
  */
 void
 ml_set_maxbusdelay(uint32_t mdelay)
 
                }
                if (superpage)          /* this path can not be used */
                        template |= INTEL_PTE_PS;       /* to change the page size! */
+
+               if (old_attributes == template)
+                       goto dont_update_pte;
+
                /* Determine delta, PV locked */
                need_tlbflush =
                    ((old_attributes ^ template) != INTEL_PTE_WIRED);
                        opte = *pte;
                        npte = template | (opte & (INTEL_PTE_REF | INTEL_PTE_MOD));
                } while (!pmap_cmpx_pte(pte, opte, npte));
+dont_update_pte:
                if (old_pa_locked) {
                        UNLOCK_PVH(pai);
                        old_pa_locked = FALSE;
 
 #ifndef        _I386_POSTCODE_H_
 #define        _I386_POSTCODE_H_
 
+/*
+ * Postcodes are no longer enabled by default in the DEBUG kernel
+ * because platforms may not have builtin port 0x80 support.
+ * To re-enable postcode outpout, uncomment the following define:
+ */
+//#define DEBUG_POSTCODE 1
+
 /* Define this to delay about 1 sec after posting each code */
 //#define POSTCODE_DELAY 1
 
 #define        SPINCOUNT       300000000
 #define CPU_PAUSE()    rep; nop
 
-#if DEBUG
+#if DEBUG_POSTCODE
 /*
  * Macro to output byte value to postcode, destoying register al.
  * Additionally, if POSTCODE_DELAY, spin for about a second.
        movl    %ebx, %eax;             \
        POSTCODE_AL
 
-#else  /* DEBUG */
+#else  /* DEBUG_POSTCODE */
 #define POSTCODE_AL
 #define POSTCODE_AX
 #define POSTCODE(X)
 #define POSTCODE2(X)
 #define POSTCODE_SAVE_EAX(X)
 #define POSTCODE32_EBX
-#endif /* DEBUG */
+#endif /* DEBUG_POSTCODE */
 
 /*
  * The following postcodes are defined for stages of early startup:
 {
        asm volatile("outw %0, %1" : : "a" (xxxx), "N" (POSTPORT));
 }
-#if    DEBUG
+#if    DEBUG_POSTCODE
 inline static void
 postcode(uint8_t       xx)
 {
 
 
        rntp->shift = shift;
 
+       /*
+        * On some platforms, the TSC is not reset at warm boot. But the
+        * rebase time must be relative to the current boot so we can't use
+        * mach_absolute_time(). Instead, we convert the TSC delta since boot
+        * to nanoseconds.
+        */
        if (tsc_rebase_abs_time == 0)
-               tsc_rebase_abs_time = mach_absolute_time();
+               tsc_rebase_abs_time = _rtc_tsc_to_nanoseconds(
+                                               rdtsc64() - tsc_at_boot, rntp);
 
        rtc_nanotime_init(0);
 }
 
 uint32_t       flex_ratio_min = 0;
 uint32_t       flex_ratio_max = 0;
 
+uint64_t       tsc_at_boot = 0;
 
 #define bit(n)         (1ULL << (n))
 #define bitmask(h,l)   ((bit(h)|(bit(h)-1)) & ~(bit(l)-1))
 #define CPU_FAMILY_PENTIUM_M   (0x6)
 
 static const char      FSB_Frequency_prop[] = "FSBFrequency";
+static const char      TSC_at_boot_prop[]   = "InitialTSC";
 /*
  * This routine extracts the bus frequency in Hz from the device tree.
+ * Also reads any initial TSC value at boot from the device tree.
  */
 static uint64_t
 EFI_FSB_frequency(void)
        } else {
                kprintf("EFI_FSB_frequency: unexpected size %d\n", size);
        }
+
+       /*
+        * While we're here, see if EFI published an initial TSC value.
+        */
+       if (DTGetProperty(entry,TSC_at_boot_prop,&value,&size) == kSuccess) {
+               if (size == sizeof(uint64_t)) {
+                       tsc_at_boot = *(uint64_t *) value;
+                       kprintf("EFI_FSB_frequency: read %s value: %llu\n",
+                               TSC_at_boot_prop, tsc_at_boot);
+               }
+       }
+
        return frequency;
 }
 
 
 extern uint32_t        flex_ratio;
 extern uint32_t        flex_ratio_min;
 extern uint32_t        flex_ratio_max;
+extern uint64_t        tsc_at_boot;
 
 struct tscInfo
 {
 
                        if(task->pidsuspended) task_snap->ss_flags |= kPidSuspended;
                        if(task->frozen) task_snap->ss_flags |= kFrozen;
 
+                       if (task->effective_policy.darwinbg ==  1) {
+                               task_snap->ss_flags |= kTaskDarwinBG;
+                       }
+
                        if (task->effective_policy.t_sup_active == 1)
                                task_snap->ss_flags |= kTaskIsSuppressed;
 
 
 void
 kernel_early_bootstrap(void)
 {
+       /* serverperfmode is needed by timer setup */
+        if (PE_parse_boot_argn("serverperfmode", &serverperfmode, sizeof (serverperfmode))) {
+                serverperfmode = 1;
+        }
 
        lck_mod_init();
 
 
        PE_parse_boot_argn("trace_wake", &wake_nkdbufs, sizeof (wake_nkdbufs));
 
-       /* i386_vm_init already checks for this ; do it aagin anyway */
-        if (PE_parse_boot_argn("serverperfmode", &serverperfmode, sizeof (serverperfmode))) {
-                serverperfmode = 1;
-        }
        scale_setup();
 
        kernel_bootstrap_kprintf("calling vm_mem_bootstrap\n");
 
 lck_grp_t       task_lck_grp;
 lck_grp_attr_t  task_lck_grp_attr;
 
+/* Flag set by core audio when audio is playing. Used to stifle EXC_RESOURCE generation when active. */
+int audio_active = 0;
+
 zinfo_usage_store_t tasks_tkm_private;
 zinfo_usage_store_t tasks_tkm_shared;
 
                        "supressed by a boot-arg\n", procname, pid);
                return;
        }
+       if (audio_active) {
+               printf("process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
+                      "supressed due to audio playback\n", procname, pid);
+               return;
+       }
        printf("process %s[%d] caught causing excessive wakeups. Observed wakeups rate "
                "(per sec): %lld; Maximum permitted wakeups rate (per sec): %lld; Observation "
                "period: %lld seconds; Task lifetime number of wakeups: %lld\n",
 
                tsnap->ss_flags |= kTaskRsrcFlagged;
        }
 
-       proc_get_darwinbgstate(task, &tmp);
-
-       if (tmp & PROC_FLAG_DARWINBG) {
+       if (task->effective_policy.darwinbg == 1) {
                tsnap->ss_flags |= kTaskDarwinBG;
        }
-       if (tmp & PROC_FLAG_EXT_DARWINBG) {
-               tsnap->ss_flags |= kTaskExtDarwinBG;
-       }
+
+       proc_get_darwinbgstate(task, &tmp);
 
        if (task->requested_policy.t_role == TASK_FOREGROUND_APPLICATION) {
                tsnap->ss_flags |= kTaskIsForeground;
 
 #endif /* MACH_BSD */
 
 extern int disable_exc_resource;
+extern int audio_active;
 extern int debug_task;
 int thread_max = CONFIG_THREAD_MAX;    /* Max number of threads */
 int task_threadmax = CONFIG_THREAD_MAX;
                return;
        }
 
+       if (audio_active) {
+               printf("process %s[%d] thread %llu caught burning CPU!; EXC_RESOURCE "
+                      "supressed due to audio playback\n", procname, pid, tid);
+               return;
+       }
        printf("process %s[%d] thread %llu caught burning CPU! "
               "It used more than %d%% CPU (Actual recent usage: %d%%) over %d seconds. "
               "thread lifetime cpu usage %d.%06d seconds, (%d.%06d user, %d.%06d system) "
 
        DBG("timer_longterm_init() tlp: %p, queue: %p\n", tlp, &tlp->queue);
 
        /*
-        * Set the longterm timer threshold.
-        * Defaults to TIMER_LONGTERM_THRESHOLD; overridden longterm boot-arg 
+        * Set the longterm timer threshold. Defaults to TIMER_LONGTERM_THRESHOLD
+        * or TIMER_LONGTERM_NONE (disabled) for server;
+        * overridden longterm boot-arg 
         */
-       tlp->threshold.interval = TIMER_LONGTERM_THRESHOLD;
+       tlp->threshold.interval = serverperfmode ? TIMER_LONGTERM_NONE
+                                                : TIMER_LONGTERM_THRESHOLD;
        if (PE_parse_boot_argn("longterm", &longterm, sizeof (longterm))) {
                tlp->threshold.interval = (longterm == 0) ?
                                                TIMER_LONGTERM_NONE :
 
 #define UPL_REQUEST_SET_DIRTY  0x10000000
 #define UPL_REQUEST_NO_FAULT   0x20000000 /* fail if pages not all resident */
 #define UPL_NOZEROFILLIO       0x40000000 /* allow non zerofill pages present */
+#define UPL_REQUEST_FORCE_COHERENCY    0x80000000
 
 /* UPL flags known by this kernel */
-#define UPL_VALID_FLAGS                0x7FFFFFFF
+#define UPL_VALID_FLAGS                0xFFFFFFFF
 
 
 /* upl abort error flags */
 #define UPL_COMMIT_CLEAR_PRECIOUS      0x80
 #define UPL_COMMIT_SPECULATE           0x100
 #define UPL_COMMIT_FREE_ABSENT         0x200
+#define UPL_COMMIT_WRITTEN_BY_KERNEL   0x400
 
 #define UPL_COMMIT_KERNEL_ONLY_FLAGS   (UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_FREE_ABSENT)
 
 
 }
 
 kern_return_t
-vnode_pager_get_object_pathname(
+vnode_pager_get_object_name(
        memory_object_t         mem_obj,
        char                    *pathname,
-       vm_size_t               *length_p)
+       vm_size_t               pathname_len,
+       char                    *filename,
+       vm_size_t               filename_len,
+       boolean_t               *truncated_path_p)
 {
        vnode_pager_t   vnode_object;
 
 
        vnode_object = vnode_pager_lookup(mem_obj);
 
-       return vnode_pager_get_pathname(vnode_object->vnode_handle,
-                                       pathname,
-                                       length_p);
+       return vnode_pager_get_name(vnode_object->vnode_handle,
+                                   pathname,
+                                   pathname_len,
+                                   filename,
+                                   filename_len,
+                                   truncated_path_p);
 }
 
 kern_return_t
-vnode_pager_get_object_filename(
-       memory_object_t mem_obj,
-       const char      **filename)
+vnode_pager_get_object_mtime(
+       memory_object_t         mem_obj,
+       struct timespec         *mtime,
+       struct timespec         *cs_mtime)
 {
        vnode_pager_t   vnode_object;
 
 
        vnode_object = vnode_pager_lookup(mem_obj);
 
-       return vnode_pager_get_filename(vnode_object->vnode_handle,
-                                       filename);
+       return vnode_pager_get_mtime(vnode_object->vnode_handle,
+                                    mtime,
+                                    cs_mtime);
 }
 
 kern_return_t
 
                } else {
                        boolean_t empty;
                        upl_commit_range(upl, 0, upl->size, 
-                                        UPL_COMMIT_CS_VALIDATED,
+                                        UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL,
                                         upl_pl, pl_count, &empty);
                }
 
 
 
 #include <sys/codesign.h>
 
+#include <libsa/sys/timers.h>  /* for struct timespec */
+
 #define VM_FAULT_CLASSIFY      0
 
 #define TRACEFAULTPAGE 0 /* (TEST/DEBUG) */
                                                 */
                                                pmap_sync_page_attributes_phys(
                                                        m->phys_page);
-                                       }
+                                       } else
+                                               m->written_by_kernel = TRUE;
                                        break;
                                case KERN_MEMORY_FAILURE:
                                        m->unusual = TRUE;
  * careful not to modify the VM object in any way that is not
  * legal under a shared lock...
  */
+extern int proc_selfpid(void);
+extern char *proc_name_address(void *p);
 unsigned long cs_enter_tainted_rejected = 0;
 unsigned long cs_enter_tainted_accepted = 0;
 kern_return_t
                
                if (reject_page) {
                        /* reject the tainted page: abort the page fault */
+                       int                     pid;
+                       const char              *procname;
+                       task_t                  task;
+                       vm_object_t             file_object, shadow;
+                       vm_object_offset_t      file_offset;
+                       char                    *pathname, *filename;
+                       vm_size_t               pathname_len, filename_len;
+                       boolean_t               truncated_path;
+#define __PATH_MAX 1024
+                       struct timespec         mtime, cs_mtime;
+
                        kr = KERN_CODESIGN_ERROR;
                        cs_enter_tainted_rejected++;
+
+                       /* get process name and pid */
+                       procname = "?";
+                       task = current_task();
+                       pid = proc_selfpid();
+                       if (task->bsd_info != NULL)
+                               procname = proc_name_address(task->bsd_info);
+
+                       /* get file's VM object */
+                       file_object = m->object;
+                       file_offset = m->offset;
+                       for (shadow = file_object->shadow;
+                            shadow != VM_OBJECT_NULL;
+                            shadow = file_object->shadow) {
+                               vm_object_lock_shared(shadow);
+                               if (file_object != m->object) {
+                                       vm_object_unlock(file_object);
+                               }
+                               file_offset += file_object->vo_shadow_offset;
+                               file_object = shadow;
+                       }
+
+                       mtime.tv_sec = 0;
+                       mtime.tv_nsec = 0;
+                       cs_mtime.tv_sec = 0;
+                       cs_mtime.tv_nsec = 0;
+
+                       /* get file's pathname and/or filename */
+                       pathname = NULL;
+                       filename = NULL;
+                       pathname_len = 0;
+                       filename_len = 0;
+                       truncated_path = FALSE;
+                       if (file_object->pager == NULL) {
+                               /* no pager -> no file -> no pathname */
+                               pathname = (char *) "<nil>";
+                       } else {
+                               pathname = (char *)kalloc(__PATH_MAX * 2);
+                               if (pathname) {
+                                       pathname_len = __PATH_MAX;
+                                       filename = pathname + pathname_len;
+                                       filename_len = __PATH_MAX;
+                               }
+                               vnode_pager_get_object_name(file_object->pager,
+                                                           pathname,
+                                                           pathname_len,
+                                                           filename,
+                                                           filename_len,
+                                                           &truncated_path);
+                               vnode_pager_get_object_mtime(file_object->pager,
+                                                            &mtime,
+                                                            &cs_mtime);
+                       }
+                       printf("CODE SIGNING: process %d[%s]: "
+                              "rejecting invalid page at address 0x%llx "
+                              "from offset 0x%llx in file \"%s%s%s\" "
+                              "(cs_mtime:%lu.%ld %s mtime:%lu.%ld) "
+                              "(signed:%d validated:%d tainted:%d "
+                              "wpmapped:%d slid:%d)\n",
+                              pid, procname, (addr64_t) vaddr,
+                              file_offset,
+                              pathname,
+                              (truncated_path ? "/.../" : ""),
+                              (truncated_path ? filename : ""),
+                              cs_mtime.tv_sec, cs_mtime.tv_nsec,
+                              ((cs_mtime.tv_sec == mtime.tv_sec &&
+                                cs_mtime.tv_nsec == mtime.tv_nsec)
+                               ? "=="
+                               : "!="),
+                              mtime.tv_sec, mtime.tv_nsec,
+                              m->object->code_signed,
+                              m->cs_validated,
+                              m->cs_tainted,
+                              m->wpmapped,
+                              m->slid);
+                       if (file_object != m->object) {
+                               vm_object_unlock(file_object);
+                       }
+                       if (pathname_len != 0) {
+                               kfree(pathname, __PATH_MAX * 2);
+                               pathname = NULL;
+                               filename = NULL;
+                       }
                } else {
                        /* proceed with the tainted page */
                        kr = KERN_SUCCESS;
                        m->cs_tainted = TRUE;
                        cs_enter_tainted_accepted++;
                }
-               if (cs_debug || kr != KERN_SUCCESS) {
-                       printf("CODESIGNING: vm_fault_enter(0x%llx): "
-                              "page %p obj %p off 0x%llx *** INVALID PAGE ***\n",
-                              (long long)vaddr, m, m->object, m->offset);
+               if (kr != KERN_SUCCESS) {
+                       if (cs_debug) {
+                               printf("CODESIGNING: vm_fault_enter(0x%llx): "
+                                      "page %p obj %p off 0x%llx *** INVALID PAGE ***\n",
+                                      (long long)vaddr, m, m->object, m->offset);
+                       }
 #if !SECURE_KERNEL
-                       if (kr != KERN_SUCCESS && cs_enforcement_panic) {
+                       if (cs_enforcement_panic) {
                                panic("CODESIGNING: panicking on invalid page\n");
                        }
 #endif
        }
 }
 
+extern int panic_on_cs_killed;
 void
 vm_page_validate_cs(
        vm_page_t       page)
                return;
        }
 
+       if (panic_on_cs_killed &&
+           page->slid) {
+               panic("vm_page_validate_cs(%p): page is slid\n", page);
+       }
+       assert(!page->slid);
+
 #if CHECK_CS_VALIDATION_BITMAP 
        if ( vnode_pager_cs_check_validation_bitmap( page->object->pager, trunc_page(page->offset + page->object->paging_offset), CS_BITMAP_CHECK ) == KERN_SUCCESS) {
                page->cs_validated = TRUE;
 
 #define __VM_VM_OPTIONS_H__
 
 #define UPL_DEBUG DEBUG
+// #define VM_PIP_DEBUG
+
+#define VM_PAGE_BUCKETS_CHECK DEBUG
+#if VM_PAGE_BUCKETS_CHECK
+#define VM_PAGE_FAKE_BUCKETS 1
+#endif /* VM_PAGE_BUCKETS_CHECK */
 
 #endif /* __VM_VM_OPTIONS_H__ */
 
 #define _VM_VM_PAGE_H_
 
 #include <debug.h>
+#include <vm/vm_options.h>
 
 #include <mach/boolean.h>
 #include <mach/vm_prot.h>
        /* boolean_t */ busy:1,         /* page is in transit (O) */
                        wanted:1,       /* someone is waiting for page (O) */
                        tabled:1,       /* page is in VP table (O) */
+                       hashed:1,       /* page is in vm_page_buckets[]
+                                          (O) + the bucket lock */
                        fictitious:1,   /* Physical page doesn't exist (O) */
        /*
         * IMPORTANT: the "pmapped" bit can be turned on while holding the
                        slid:1,
                        was_dirty:1,    /* was this page previously dirty? */
                        compressor:1,   /* page owned by compressor pool */
-                       __unused_object_bits:7;  /* 7 bits available here */
+                       written_by_kernel:1,    /* page was written by kernel (i.e. decompressed) */
+                       __unused_object_bits:5; /* 5 bits available here */
 
 #if __LP64__
        unsigned int __unused_padding;  /* Pad structure explicitly
 
 extern vm_page_t vm_object_page_grab(vm_object_t);
 
+#if VM_PAGE_BUCKETS_CHECK
+extern void vm_page_buckets_check(void);
+#endif /* VM_PAGE_BUCKETS_CHECK */
 
 #endif /* _VM_VM_PAGE_H_ */
 
 #define VM_PAGEOUT_STAT_AFTER(i) \
        (((i) == VM_PAGEOUT_STAT_SIZE - 1) ? 0 : (i) + 1)
 
+#if VM_PAGE_BUCKETS_CHECK
+int vm_page_buckets_check_interval = 10; /* in seconds */
+#endif /* VM_PAGE_BUCKETS_CHECK */
+
 /*
  * Called from compute_averages().
  */
 {
        unsigned int vm_pageout_next;
 
+#if VM_PAGE_BUCKETS_CHECK
+       /* check the consistency of VM page buckets at regular interval */
+       static int counter = 0;
+       if ((++counter % vm_page_buckets_check_interval) == 0) {
+               vm_page_buckets_check();
+       }
+#endif /* VM_PAGE_BUCKETS_CHECK */
+
        vm_memory_pressure =
                vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].reclaimed;
 
 }
 
 
+#if VM_PAGE_BUCKETS_CHECK
+#if VM_PAGE_FAKE_BUCKETS
+extern vm_map_offset_t vm_page_fake_buckets_start, vm_page_fake_buckets_end;
+#endif /* VM_PAGE_FAKE_BUCKETS */
+#endif /* VM_PAGE_BUCKETS_CHECK */
 
 void
 vm_pageout(void)
        if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE)
                vm_compressor_pager_init();
 
+#if VM_PAGE_BUCKETS_CHECK
+#if VM_PAGE_FAKE_BUCKETS
+       printf("**** DEBUG: protecting fake buckets [0x%llx:0x%llx]\n",
+              vm_page_fake_buckets_start, vm_page_fake_buckets_end);
+       pmap_protect(kernel_pmap,
+                    vm_page_fake_buckets_start,
+                    vm_page_fake_buckets_end,
+                    VM_PROT_READ);
+//     *(char *) vm_page_fake_buckets_start = 'x';     /* panic! */
+#endif /* VM_PAGE_FAKE_BUCKETS */
+#endif /* VM_PAGE_BUCKETS_CHECK */
+
        vm_pageout_continue();
 
        /*
        return KERN_FAILURE;
 }
 
-
+extern int panic_on_cs_killed;
 kern_return_t
 upl_commit_range(
        upl_t                   upl, 
                        m->cs_validated = page_list[entry].cs_validated;
                        m->cs_tainted = page_list[entry].cs_tainted;
                }
+               if (flags & UPL_COMMIT_WRITTEN_BY_KERNEL)
+                       m->written_by_kernel = TRUE;
+
                if (upl->flags & UPL_IO_WIRE) {
 
                        if (page_list)
                                         * so it will need to be
                                         * re-validated.
                                         */
+                                       if (panic_on_cs_killed &&
+                                           m->slid) {
+                                               panic("upl_commit_range(%p): page %p was slid\n",
+                                                     upl, m);
+                                       }
+                                       assert(!m->slid);
                                        m->cs_validated = FALSE;
 #if DEVELOPMENT || DEBUG
                                        vm_cs_validated_resets++;
                         * so it will need to be
                         * re-validated.
                         */
+                       if (panic_on_cs_killed &&
+                           m->slid) {
+                               panic("upl_commit_range(%p): page %p was slid\n",
+                                     upl, m);
+                       }
+                       assert(!m->slid);
                        m->cs_validated = FALSE;
 #if DEVELOPMENT || DEBUG
                        vm_cs_validated_resets++;
                         */
                        dwp->dw_mask |= DW_clear_busy;
                }
+
                /*
                 * Wakeup any thread waiting for the page to be un-cleaning.
                 */
                if (!(cntrl_flags & UPL_COPYOUT_FROM)) {
                        SET_PAGE_DIRTY(dst_page, TRUE); 
                }
+               if ((cntrl_flags & UPL_REQUEST_FORCE_COHERENCY) && dst_page->written_by_kernel == TRUE) {
+                       pmap_sync_page_attributes_phys(dst_page->phys_page);
+                       dst_page->written_by_kernel = FALSE;
+               }
+
 record_phys_addr:
                if (dst_page->busy)
                        upl->flags |= UPL_HAS_BUSY;
 
 extern uint32_t vnode_pager_return_throttle_io_limit(
        struct vnode *,
        uint32_t     *);
-extern kern_return_t vnode_pager_get_pathname(
+extern kern_return_t vnode_pager_get_name(
        struct vnode    *vp,
        char            *pathname,
-       vm_size_t       *length_p);
-extern kern_return_t vnode_pager_get_filename(
+       vm_size_t       pathname_len,
+       char            *filename,
+       vm_size_t       filename_len,
+       boolean_t       *truncated_path_p);
+struct timespec;
+extern kern_return_t vnode_pager_get_mtime(
        struct vnode    *vp,
-       const char      **filename);
+       struct timespec *mtime,
+       struct timespec *cs_mtime);
 extern kern_return_t vnode_pager_get_cs_blobs(
        struct vnode    *vp,
        void            **blobs);
 extern kern_return_t vnode_pager_get_throttle_io_limit(
        memory_object_t,
        uint32_t *);
-extern kern_return_t vnode_pager_get_object_pathname(
+extern kern_return_t vnode_pager_get_object_name(
        memory_object_t mem_obj,
        char            *pathname,
-       vm_size_t       *length_p);
-extern kern_return_t vnode_pager_get_object_filename(
+       vm_size_t       pathname_len,
+       char            *filename,
+       vm_size_t       filename_len,
+       boolean_t       *truncated_path_p);
+extern kern_return_t vnode_pager_get_object_mtime(
        memory_object_t mem_obj,
-       const char      **filename);
+       struct timespec *mtime,
+       struct timespec *cs_mtime);
 extern kern_return_t vnode_pager_get_object_cs_blobs(
        memory_object_t mem_obj,
        void            **blobs);
 
 
 lck_spin_t     *vm_page_bucket_locks;
 
+#if VM_PAGE_BUCKETS_CHECK
+boolean_t vm_page_buckets_check_ready = FALSE;
+#if VM_PAGE_FAKE_BUCKETS
+vm_page_bucket_t *vm_page_fake_buckets;        /* decoy buckets */
+vm_map_offset_t vm_page_fake_buckets_start, vm_page_fake_buckets_end;
+#endif /* VM_PAGE_FAKE_BUCKETS */
+#endif /* VM_PAGE_BUCKETS_CHECK */
 
 #if    MACH_PAGE_HASH_STATS
 /* This routine is only for debug.  It is intended to be called by
        m->busy = TRUE;
        m->wanted = FALSE;
        m->tabled = FALSE;
+       m->hashed = FALSE;
        m->fictitious = FALSE;
        m->pmapped = FALSE;
        m->wpmapped = FALSE;
        m->was_dirty = FALSE;
        m->xpmapped = FALSE;
        m->compressor = FALSE;
+       m->written_by_kernel = FALSE;
        m->__unused_object_bits = 0;
 
        /*
        if (vm_page_hash_mask & vm_page_bucket_count)
                printf("vm_page_bootstrap: WARNING -- strange page hash\n");
 
+#if VM_PAGE_BUCKETS_CHECK
+#if VM_PAGE_FAKE_BUCKETS
+       /*
+        * Allocate a decoy set of page buckets, to detect
+        * any stomping there.
+        */
+       vm_page_fake_buckets = (vm_page_bucket_t *)
+               pmap_steal_memory(vm_page_bucket_count *
+                                 sizeof(vm_page_bucket_t));
+       vm_page_fake_buckets_start = (vm_map_offset_t) vm_page_fake_buckets;
+       vm_page_fake_buckets_end =
+               vm_map_round_page((vm_page_fake_buckets_start +
+                                  (vm_page_bucket_count *
+                                   sizeof (vm_page_bucket_t))),
+                                 PAGE_MASK);
+       char *cp;
+       for (cp = (char *)vm_page_fake_buckets_start;
+            cp < (char *)vm_page_fake_buckets_end;
+            cp++) {
+               *cp = 0x5a;
+       }
+#endif /* VM_PAGE_FAKE_BUCKETS */
+#endif /* VM_PAGE_BUCKETS_CHECK */
+
        vm_page_buckets = (vm_page_bucket_t *)
                pmap_steal_memory(vm_page_bucket_count *
                                  sizeof(vm_page_bucket_t));
        for (i = 0; i < vm_page_bucket_lock_count; i++)
                lck_spin_init(&vm_page_bucket_locks[i], &vm_page_lck_grp_bucket, &vm_page_lck_attr);
 
+#if VM_PAGE_BUCKETS_CHECK
+       vm_page_buckets_check_ready = TRUE;
+#endif /* VM_PAGE_BUCKETS_CHECK */
+
        /*
         *      Machine-dependent code allocates the resident page table.
         *      It uses vm_page_init to initialize the page frames.
 #endif /* DEBUG */
        
        if (insert_in_hash == TRUE) {
-#if DEBUG
+#if DEBUG || VM_PAGE_CHECK_BUCKETS
                if (mem->tabled || mem->object != VM_OBJECT_NULL)
                        panic("vm_page_insert: page %p for (obj=%p,off=0x%llx) "
                              "already in (obj=%p,off=0x%llx)",
                if (++bucket->cur_count > bucket->hi_count)
                        bucket->hi_count = bucket->cur_count;
 #endif /* MACH_PAGE_HASH_STATS */
-
+               mem->hashed = TRUE;
                lck_spin_unlock(bucket_lock);
        }
 
        VM_PAGE_CHECK(mem);
 #endif
        vm_object_lock_assert_exclusive(object);
-#if DEBUG
+#if DEBUG || VM_PAGE_CHECK_BUCKETS
        if (mem->tabled || mem->object != VM_OBJECT_NULL)
                panic("vm_page_replace: page %p for (obj=%p,off=0x%llx) "
                      "already in (obj=%p,off=0x%llx)",
                                 * Remove old page from hash list
                                 */
                                *mp = m->next;
+                               m->hashed = FALSE;
 
                                found_m = m;
                                break;
         * insert new page at head of hash list
         */
        bucket->pages = mem;
+       mem->hashed = TRUE;
 
        lck_spin_unlock(bucket_lock);
 
 #if     MACH_PAGE_HASH_STATS
                bucket->cur_count--;
 #endif /* MACH_PAGE_HASH_STATS */
-
+               mem->hashed = FALSE;
                lck_spin_unlock(bucket_lock);
        }
        /*
                                        m2->was_dirty   = m1->was_dirty;
                                        m2->compressor  = m1->compressor;
 
+                                       /*
+                                        * page may need to be flushed if
+                                        * it is marshalled into a UPL
+                                        * that is going to be used by a device
+                                        * that doesn't support coherency
+                                        */
+                                       m2->written_by_kernel = TRUE;
+
                                        /*
                                         * make sure we clear the ref/mod state
                                         * from the pmap layer... else we risk
        vm_page_bucket_t *bucket;
        int             hash_id;
 
-       assert(mem->tabled);
+       assert(mem->hashed);
        assert(mem->object);
        assert(mem->offset != (vm_object_offset_t) -1);
 
                        *tmem = *mem;
                        mem = tmem;
                }
-               if (mem->tabled)
+               if (mem->hashed)
                        hibernate_hash_insert_page(mem);
                /*
                 * the 'hole' between this vm_page_t and the previous
        assert(vm_page_free_count == hibernate_teardown_vm_page_free_count);
 
        /*
-        * process the list of vm_page_t's that were tabled in the hash,
+        * process the list of vm_page_t's that were entered in the hash,
         * but were not located in the vm_pages arrary... these are 
         * vm_page_t's that were created on the fly (i.e. fictitious)
         */
                bucket = &vm_page_buckets[i];
 
                for (mem = bucket->pages; mem != VM_PAGE_NULL; mem = mem_next) {
-
-                       assert(mem->tabled);
+                       assert(mem->hashed);
 
                        mem_next = mem->next;
 
        return vm_page_bucket_count;
 }
 #endif /* MACH_VM_DEBUG */
+
+#if VM_PAGE_BUCKETS_CHECK
+void
+vm_page_buckets_check(void)
+{
+       unsigned int i;
+       vm_page_t p;
+       unsigned int p_hash;
+       vm_page_bucket_t *bucket;
+       lck_spin_t      *bucket_lock;
+
+       if (!vm_page_buckets_check_ready) {
+               return;
+       }
+
+#if HIBERNATION
+       if (hibernate_rebuild_needed ||
+           hibernate_rebuild_hash_list) {
+               panic("BUCKET_CHECK: hibernation in progress: "
+                     "rebuild_needed=%d rebuild_hash_list=%p\n",
+                     hibernate_rebuild_needed,
+                     hibernate_rebuild_hash_list);
+       }
+#endif /* HIBERNATION */
+
+#if VM_PAGE_FAKE_BUCKETS
+       char *cp;
+       for (cp = (char *) vm_page_fake_buckets_start;
+            cp < (char *) vm_page_fake_buckets_end;
+            cp++) {
+               if (*cp != 0x5a) {
+                       panic("BUCKET_CHECK: corruption at %p in fake buckets "
+                             "[0x%llx:0x%llx]\n",
+                             cp,
+                             vm_page_fake_buckets_start,
+                             vm_page_fake_buckets_end);
+               }
+       }
+#endif /* VM_PAGE_FAKE_BUCKETS */
+
+       for (i = 0; i < vm_page_bucket_count; i++) {
+               bucket = &vm_page_buckets[i];
+               if (bucket->pages == VM_PAGE_NULL) {
+                       continue;
+               }
+
+               bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK];
+               lck_spin_lock(bucket_lock);
+               p = bucket->pages;
+               while (p != VM_PAGE_NULL) {
+                       if (!p->hashed) {
+                               panic("BUCKET_CHECK: page %p (%p,0x%llx) "
+                                     "hash %d in bucket %d at %p "
+                                     "is not hashed\n",
+                                     p, p->object, p->offset,
+                                     p_hash, i, bucket);
+                       }
+                       p_hash = vm_page_hash(p->object, p->offset);
+                       if (p_hash != i) {
+                               panic("BUCKET_CHECK: corruption in bucket %d "
+                                     "at %p: page %p object %p offset 0x%llx "
+                                     "hash %d\n",
+                                     i, bucket, p, p->object, p->offset,
+                                     p_hash);
+                       }
+                       p = p->next;
+               }
+               lck_spin_unlock(bucket_lock);
+       }
+
+//     printf("BUCKET_CHECK: checked buckets\n");
+}
+#endif /* VM_PAGE_BUCKETS_CHECK */
 
                assert(kr2 == KERN_SUCCESS);
        }
 
-       /*
-        * This is how check_np() knows if the shared region
-        * is mapped. So clear it here.
-        */
-       shared_region->sr_first_mapping = (mach_vm_offset_t) -1;
-
        if (reset_shared_region_state) {
                vm_shared_region_lock();
                assert(shared_region->sr_ref_count > 1);
        memory_object_control_t         file_control,
        memory_object_size_t            file_size,
        void                            *root_dir,
-       struct shared_file_mapping_np   *mapping_to_slide)
+       uint32_t                        slide,
+       user_addr_t                     slide_start,
+       user_addr_t                     slide_size)
 {
        kern_return_t           kr;
        vm_object_t             file_object;
        vm_map_offset_t         target_address;
        vm_object_t             object;
        vm_object_size_t        obj_size;
-       boolean_t               found_mapping_to_slide = FALSE;
+       struct shared_file_mapping_np   *mapping_to_slide = NULL;
+       mach_vm_offset_t        first_mapping = (mach_vm_offset_t) -1;
 
 
        kr = KERN_SUCCESS;
                        /*
                         * This is the mapping that needs to be slid.
                         */
-                       if (found_mapping_to_slide == TRUE) {
+                       if (mapping_to_slide != NULL) {
                                SHARED_REGION_TRACE_INFO(
                                        ("shared_region: mapping[%d]: "
                                         "address:0x%016llx size:0x%016llx "
                                         mappings[i].sfm_max_prot,
                                         mappings[i].sfm_init_prot));
                        } else {
-                               if (mapping_to_slide != NULL) {
-                                       mapping_to_slide->sfm_file_offset = mappings[i].sfm_file_offset;
-                                       mapping_to_slide->sfm_size = mappings[i].sfm_size;
-                                       found_mapping_to_slide = TRUE;
-                               }
+                               mapping_to_slide = &mappings[i];
                        }
                }
 
                                VM_INHERIT_DEFAULT);
                }
 
-               if (kr != KERN_SUCCESS) {
+               if (kr == KERN_SUCCESS) {
+                       /*
+                        * Record the first (chronologically) successful
+                        * mapping in this shared region.
+                        * We're protected by "sr_mapping_in_progress" here,
+                        * so no need to lock "shared_region".
+                        */
+                       if (first_mapping == (mach_vm_offset_t) -1) {
+                               first_mapping = target_address;
+                       }
+               } else {
                        if (map_port == MACH_PORT_NULL) {
                                /*
                                 * Get rid of the VM object we just created
 
                }
 
-               /*
-                * Record the first (chronologically) mapping in
-                * this shared region.
-                * We're protected by "sr_mapping_in_progress" here,
-                * so no need to lock "shared_region".
-                */
-               if (shared_region->sr_first_mapping == (mach_vm_offset_t) -1) {
-                       shared_region->sr_first_mapping = target_address;
+       }
+
+       if (kr == KERN_SUCCESS &&
+           slide &&
+           mapping_to_slide != NULL) {
+               kr = vm_shared_region_slide(slide, 
+                                           mapping_to_slide->sfm_file_offset, 
+                                           mapping_to_slide->sfm_size, 
+                                           slide_start, 
+                                           slide_size, 
+                                           file_control);
+               if (kr  != KERN_SUCCESS) {
+                       SHARED_REGION_TRACE_ERROR(
+                               ("shared_region: region_slide("
+                                "slide:0x%x start:0x%016llx "
+                                "size:0x%016llx) failed 0x%x\n",
+                                slide,
+                                (long long)slide_start,
+                                (long long)slide_size,
+                                kr));
+                       vm_shared_region_undo_mappings(NULL,
+                                                      0,
+                                                      mappings,
+                                                      mappings_count);
                }
        }
 
        vm_shared_region_lock();
        assert(shared_region->sr_ref_count > 1);
        assert(shared_region->sr_mapping_in_progress);
+       /* set "sr_first_mapping"; dyld uses it to validate the shared cache */ 
+       if (kr == KERN_SUCCESS &&
+           shared_region->sr_first_mapping == (mach_vm_offset_t) -1) {
+               shared_region->sr_first_mapping = first_mapping;
+       }
        /* we're done working on that shared region */
        shared_region->sr_mapping_in_progress = FALSE;
        thread_wakeup((event_t) &shared_region->sr_mapping_in_progress);
 
        memory_object_control_t file_control,
        memory_object_size_t    file_size,
        void                    *root_dir,
-       struct shared_file_mapping_np *mapping_to_slide);
+       uint32_t                slide,
+       user_addr_t             slide_start,
+       user_addr_t             slide_size);
 extern kern_return_t vm_shared_region_sliding_valid(uint32_t slide);
 extern kern_return_t vm_shared_region_slide_sanity_check(vm_shared_region_t sr);
 extern kern_return_t vm_shared_region_slide_init(vm_shared_region_t sr,
 
                                            rwlg.lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cnt, rwlg.lck_grp_name)
     return out_string
 
+#Macro: showlock
+@lldb_type_summary(['lck_mtx_t *'])
+@header("===== Mutex Lock Summary =====")
+def GetMutexLockSummary(mtx):
+    """ Summarize mutex lock with important information.
+        params:
+        mtx: value - obj representing a mutex lock in kernel
+        returns:
+        out_str - summary of the mutex lock
+    """
+    if not mtx:
+        return "Invalid lock value: 0x0"
+
+    if kern.arch == "x86_64":
+        out_str = "Lock Type\t\t: MUTEX\n"
+        mtxd = mtx.lck_mtx_sw.lck_mtxd
+        out_str += "Owner Thread\t\t: {:#x}\n".format(mtxd.lck_mtxd_owner)
+        cmd_str = "p/d ((lck_mtx_t*){:#x})->lck_mtx_sw.lck_mtxd.".format(mtx)
+        cmd_out = lldb_run_command(cmd_str + "lck_mtxd_waiters")
+        out_str += "Number of Waiters\t: {:s}\n".format(cmd_out.split()[-1])
+        cmd_out = lldb_run_command(cmd_str + "lck_mtxd_ilocked")
+        out_str += "ILocked\t\t\t: {:s}\n".format(cmd_out.split()[-1])
+        cmd_out = lldb_run_command(cmd_str + "lck_mtxd_mlocked")
+        out_str += "MLocked\t\t\t: {:s}\n".format(cmd_out.split()[-1])
+        cmd_out = lldb_run_command(cmd_str + "lck_mtxd_promoted")
+        out_str += "Promoted\t\t: {:s}\n".format(cmd_out.split()[-1])
+        cmd_out = lldb_run_command(cmd_str + "lck_mtxd_spin")
+        out_str += "Spin\t\t\t: {:s}\n".format(cmd_out.split()[-1])
+        return out_str
+
+    out_str = "Lock Type\t\t: MUTEX\n"
+    out_str += "Owner Thread\t\t: {:#x}\n".format(mtx.lck_mtx_hdr.lck_mtxd_data & ~0x3)
+    out_str += "Number of Waiters\t: {:d}\n".format(mtx.lck_mtx_sw.lck_mtxd.lck_mtxd_waiters)
+    out_str += "Flags\t\t\t: "
+    if mtx.lck_mtx_hdr.lck_mtxd_data & 0x1:
+        out_str += "[Interlock Locked] "
+    if mtx.lck_mtx_hdr.lck_mtxd_data & 0x2:
+        out_str += "[Wait Flag]"
+    if (mtx.lck_mtx_hdr.lck_mtxd_data & 0x3) == 0:
+        out_str += "None"
+    return out_str
+
+@lldb_type_summary(['lck_spin_t *'])
+@header("===== SpinLock Summary =====")
+def GetSpinLockSummary(spinlock):
+    """ Summarize spinlock with important information.
+        params:
+        spinlock: value - obj representing a spinlock in kernel
+        returns:
+        out_str - summary of the spinlock
+    """
+    if not spinlock:
+        return "Invalid lock value: 0x0"
+
+    out_str = "Lock Type\t\t: SPINLOCK\n"
+    if kern.arch == "x86_64":
+        out_str += "Interlock\t\t: {:#x}\n".format(spinlock.interlock)
+        return out_str 
+
+    out_str += "Owner Thread\t\t: {:#x}\n".format(spinlock.lck_spin_data & ~0x3)
+    out_str += "Flags\t\t\t: "
+    if spinlock.lck_spin_data & 0x1:
+        out_str += "[Interlock Locked] "
+    if spinlock.lck_spin_data & 0x2:
+        out_str += "[Wait Flag]"
+    if (spinlock.lck_spin_data & 0x3) == 0:
+        out_str += "None" 
+    return out_str
+
+@lldb_command('showlock', 'MS')
+def ShowLock(cmd_args=None, cmd_options={}):
+    """ Show info about a lock - its state and owner thread details
+        Usage: showlock <address of a lock>
+        -M : to consider <addr> as lck_mtx_t 
+        -S : to consider <addr> as lck_spin_t 
+    """
+    if not cmd_args:
+        raise ArgumentError("Please specify the address of the lock whose info you want to view.")
+        return
+
+    summary_str = ""
+    lock = kern.GetValueFromAddress(cmd_args[0], 'uintptr_t*')
+
+    if kern.arch == "x86_64" and lock:
+        if "-M" in cmd_options:
+            lock_mtx = Cast(lock, 'lck_mtx_t *')
+            summary_str = GetMutexLockSummary(lock_mtx)
+        elif "-S" in cmd_options:
+            lock_spin = Cast(lock, 'lck_spin_t *')
+            summary_str = GetSpinLockSummary(lock_spin)
+        else:
+            summary_str = "Please specify supported lock option(-M/-S)"
+
+        print summary_str
+        return
+
+    if lock:
+        lock_mtx = Cast(lock, 'lck_mtx_t*')
+        if lock_mtx.lck_mtx_type == 0x22:
+            summary_str = GetMutexLockSummary(lock_mtx)
+
+        lock_spin = Cast(lock, 'lck_spin_t*')
+        if lock_spin.lck_spin_type == 0x11:
+            summary_str = GetSpinLockSummary(lock_spin)
+
+    if summary_str == "":
+        summary_str = "Lock Type\t\t: INVALID LOCK" 
+    print summary_str
+
+#EndMacro: showlock
+
 @lldb_command('showallrwlck')
 def ShowAllRWLck(cmd_args=None):
     """ Routine to print a summary listing of all read/writer locks