]> git.saurik.com Git - apple/xnu.git/blobdiff - bsd/kern/ubc_subr.c
xnu-4903.221.2.tar.gz
[apple/xnu.git] / bsd / kern / ubc_subr.c
index 3e4353a97071cc952f11470e917fcca342aa805e..6f09debf794c7940d9068cc7e85761ebfb1d6724 100644 (file)
@@ -65,6 +65,7 @@
 #include <kern/kalloc.h>
 #include <kern/zalloc.h>
 #include <kern/thread.h>
+#include <vm/pmap.h>
 #include <vm/vm_kern.h>
 #include <vm/vm_protos.h> /* last */
 
@@ -80,9 +81,10 @@ extern kern_return_t memory_object_pages_resident(memory_object_control_t,
                                                        boolean_t *);
 extern kern_return_t   memory_object_signed(memory_object_control_t control,
                                             boolean_t is_signed);
-extern boolean_t       memory_object_is_slid(memory_object_control_t   control);
 extern boolean_t       memory_object_is_signed(memory_object_control_t);
 
+/* XXX Same for those. */
+
 extern void Debugger(const char *message);
 
 
@@ -110,6 +112,9 @@ static int ubc_umcallback(vnode_t, void *);
 static int ubc_msync_internal(vnode_t, off_t, off_t, off_t *, int, int *);
 static void ubc_cs_free(struct ubc_info *uip);
 
+static boolean_t ubc_cs_supports_multilevel_hash(struct cs_blob *blob);
+static kern_return_t ubc_cs_convert_to_multilevel_hash(struct cs_blob *blob);
+
 struct zone    *ubc_info_zone;
 static uint32_t        cs_blob_generation_count = 1;
 
@@ -121,9 +126,6 @@ static uint32_t     cs_blob_generation_count = 1;
 extern int cs_debug;
 
 #define        PAGE_SHIFT_4K           (12)
-#define        PAGE_SIZE_4K            ((1<<PAGE_SHIFT_4K))
-#define        PAGE_MASK_4K            ((PAGE_SIZE_4K-1))
-#define round_page_4K(x)       (((vm_offset_t)(x) + PAGE_MASK_4K) & ~((vm_offset_t)PAGE_MASK_4K))
 
 static boolean_t
 cs_valid_range(
@@ -158,7 +160,13 @@ struct cs_hash {
     cs_md_final                cs_final;
 };
 
-static struct cs_hash cs_hash_sha1 = {
+uint8_t cs_hash_type(
+    struct cs_hash const * const cs_hash)
+{
+    return cs_hash->cs_type;
+}
+
+static const struct cs_hash cs_hash_sha1 = {
     .cs_type = CS_HASHTYPE_SHA1,
     .cs_size = CS_SHA1_LEN,
     .cs_digest_size = SHA_DIGEST_LENGTH,
@@ -167,7 +175,7 @@ static struct cs_hash cs_hash_sha1 = {
     .cs_final = (cs_md_final)SHA1Final,
 };
 #if CRYPTO_SHA2
-static struct cs_hash cs_hash_sha256 = {
+static const struct cs_hash cs_hash_sha256 = {
     .cs_type = CS_HASHTYPE_SHA256,
     .cs_size = SHA256_DIGEST_LENGTH,
     .cs_digest_size = SHA256_DIGEST_LENGTH,
@@ -175,7 +183,7 @@ static struct cs_hash cs_hash_sha256 = {
     .cs_update = (cs_md_update)SHA256_Update,
     .cs_final = (cs_md_final)SHA256_Final,
 };
-static struct cs_hash cs_hash_sha256_truncate = {
+static const struct cs_hash cs_hash_sha256_truncate = {
     .cs_type = CS_HASHTYPE_SHA256_TRUNCATED,
     .cs_size = CS_SHA256_TRUNCATED_LEN,
     .cs_digest_size = SHA256_DIGEST_LENGTH,
@@ -183,7 +191,7 @@ static struct cs_hash cs_hash_sha256_truncate = {
     .cs_update = (cs_md_update)SHA256_Update,
     .cs_final = (cs_md_final)SHA256_Final,
 };
-static struct cs_hash cs_hash_sha384 = {
+static const struct cs_hash cs_hash_sha384 = {
     .cs_type = CS_HASHTYPE_SHA384,
     .cs_size = SHA384_DIGEST_LENGTH,
     .cs_digest_size = SHA384_DIGEST_LENGTH,
@@ -192,8 +200,8 @@ static struct cs_hash cs_hash_sha384 = {
     .cs_final = (cs_md_final)SHA384_Final,
 };
 #endif
-    
-static struct cs_hash *
+
+static struct cs_hash const *
 cs_find_md(uint8_t type)
 {
        if (type == CS_HASHTYPE_SHA1) {
@@ -221,7 +229,7 @@ union cs_hash_union {
  * Choose among different hash algorithms.
  * Higher is better, 0 => don't use at all.
  */
-static uint32_t hashPriorities[] = {
+static const uint32_t hashPriorities[] = {
        CS_HASHTYPE_SHA1,
        CS_HASHTYPE_SHA256_TRUNCATED,
        CS_HASHTYPE_SHA256,
@@ -354,13 +362,13 @@ hashes(
 static int
 cs_validate_codedirectory(const CS_CodeDirectory *cd, size_t length)
 {
-       struct cs_hash *hashtype;
+       struct cs_hash const *hashtype;
 
        if (length < sizeof(*cd))
                return EBADEXEC;
        if (ntohl(cd->magic) != CSMAGIC_CODEDIRECTORY)
                return EBADEXEC;
-       if (cd->pageSize != PAGE_SHIFT_4K)
+       if (cd->pageSize < PAGE_SHIFT_4K || cd->pageSize > PAGE_SHIFT)
                return EBADEXEC;
        hashtype = cs_find_md(cd->hashType);
        if (hashtype == NULL)
@@ -468,18 +476,25 @@ cs_validate_blob(const CS_GenericBlob *blob, size_t length)
  */
 
 static int
-cs_validate_csblob(const uint8_t *addr, size_t length,
-                  const CS_CodeDirectory **rcd)
+cs_validate_csblob(
+       const uint8_t *addr,
+       const size_t blob_size,
+       const CS_CodeDirectory **rcd,
+       const CS_GenericBlob **rentitlements)
 {
-       const CS_GenericBlob *blob = (const CS_GenericBlob *)(const void *)addr;
+       const CS_GenericBlob *blob;
        int error;
+       size_t length;
 
        *rcd = NULL;
+       *rentitlements = NULL;
 
+       blob = (const CS_GenericBlob *)(const void *)addr;
+
+       length = blob_size;
        error = cs_validate_blob(blob, length);
        if (error)
                return error;
-
        length = ntohl(blob->length);
 
        if (ntohl(blob->magic) == CSMAGIC_EMBEDDED_SIGNATURE) {
@@ -487,6 +502,9 @@ cs_validate_csblob(const uint8_t *addr, size_t length,
                uint32_t n, count;
                const CS_CodeDirectory *best_cd = NULL;
                unsigned int best_rank = 0;
+#if PLATFORM_WatchOS
+               const CS_CodeDirectory *sha1_cd = NULL;
+#endif
 
                if (length < sizeof(CS_SuperBlob))
                        return EBADEXEC;
@@ -526,16 +544,66 @@ cs_validate_csblob(const uint8_t *addr, size_t length,
                                if (best_cd == NULL || rank > best_rank) {
                                        best_cd = candidate;
                                        best_rank = rank;
+
+                                       if (cs_debug > 2)
+                                               printf("using CodeDirectory type %d (rank %d)\n", (int)best_cd->hashType, best_rank);
+                                       *rcd = best_cd;
                                } else if (best_cd != NULL && rank == best_rank) {
                                        /* repeat of a hash type (1:1 mapped to ranks), illegal and suspicious */
-                                       printf("multiple hash=%d CodeDirectories in signature; rejecting", best_cd->hashType);
+                                       printf("multiple hash=%d CodeDirectories in signature; rejecting\n", best_cd->hashType);
+                                       return EBADEXEC;
+                               }
+#if PLATFORM_WatchOS
+                               if (candidate->hashType == CS_HASHTYPE_SHA1) {
+                                       if (sha1_cd != NULL) {
+                                               printf("multiple sha1 CodeDirectories in signature; rejecting\n");
+                                               return EBADEXEC;
+                                       }
+                                       sha1_cd = candidate;
+                               }
+#endif
+                       } else if (type == CSSLOT_ENTITLEMENTS) {
+                               if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_ENTITLEMENTS) {
                                        return EBADEXEC;
                                }
+                               if (*rentitlements != NULL) {
+                                       printf("multiple entitlements blobs\n");
+                                       return EBADEXEC;
+                               }
+                               *rentitlements = subBlob;
+                       }
+               }
+
+#if PLATFORM_WatchOS
+               /* To keep watchOS fast enough, we have to resort to sha1 for
+                * some code.
+                *
+                * At the time of writing this comment, known sha1 attacks are
+                * collision attacks (not preimage or second preimage
+                * attacks), which do not apply to platform binaries since
+                * they have a fixed hash in the trust cache.  Given this
+                * property, we only prefer sha1 code directories for adhoc
+                * signatures, which always have to be in a trust cache to be
+                * valid (can-load-cdhash does not exist for watchOS). Those
+                * are, incidentally, also the platform binaries, for which we
+                * care about the performance hit that sha256 would bring us.
+                *
+                * Platform binaries may still contain a (not chosen) sha256
+                * code directory, which keeps software updates that switch to
+                * sha256-only small.
+                */
+
+               if (*rcd != NULL && sha1_cd != NULL && (ntohl(sha1_cd->flags) & CS_ADHOC)) {
+                       if (sha1_cd->flags != (*rcd)->flags) {
+                               printf("mismatched flags between hash %d (flags: %#x) and sha1 (flags: %#x) cd.\n",
+                                          (int)(*rcd)->hashType, (*rcd)->flags, sha1_cd->flags);
+                               *rcd = NULL;
+                               return EBADEXEC;
                        }
-                       if (best_cd && cs_debug > 2)
-                               printf("using CodeDirectory type %d (rank %d)\n", (int)best_cd->hashType, best_rank);
-                       *rcd = best_cd;
+
+                       *rcd = sha1_cd;
                }
+#endif
 
        } else if (ntohl(blob->magic) == CSMAGIC_CODEDIRECTORY) {
 
@@ -636,7 +704,11 @@ csblob_get_entitlements(struct cs_blob *csblob, void **out_start, size_t *out_le
 
        code_dir = csblob->csb_cd;
 
-       entitlements = csblob_find_blob(csblob, CSSLOT_ENTITLEMENTS, CSMAGIC_EMBEDDED_ENTITLEMENTS);
+       if ((csblob->csb_flags & CS_VALID) == 0) {
+               entitlements = NULL;
+       } else {
+               entitlements = csblob->csb_entitlements_blob;
+       }
        embedded_hash = find_special_slot(code_dir, csblob->csb_hashtype->cs_size, CSSLOT_ENTITLEMENTS);
 
        if (embedded_hash == NULL) {
@@ -765,7 +837,7 @@ ubc_info_init_withsize(struct vnode *vp, off_t filesize)
 static int
 ubc_info_init_internal(vnode_t vp, int withfsize, off_t filesize)
 {
-       register struct ubc_info        *uip;
+       struct ubc_info *uip;
        void *  pager;
        int error = 0;
        kern_return_t kret;
@@ -1056,7 +1128,7 @@ errno_t ubc_setsize_ex(struct vnode *vp, off_t nsize, ubc_setsize_opts_t opts)
                 * zero the tail of this page if it's currently
                 * present in the cache
                 */
-               kret = ubc_create_upl(vp, lastpg, PAGE_SIZE, &upl, &pl, UPL_SET_LITE);
+               kret = ubc_create_upl_kernel(vp, lastpg, PAGE_SIZE, &upl, &pl, UPL_SET_LITE, VM_KERN_MEMORY_FILE);
 
                if (kret != KERN_SUCCESS)
                        panic("ubc_setsize: ubc_create_upl (error = %d)\n", kret);
@@ -1393,17 +1465,6 @@ ubc_getobject(struct vnode *vp, __unused int flags)
        return (MEMORY_OBJECT_CONTROL_NULL);
 }
 
-boolean_t
-ubc_strict_uncached_IO(struct vnode *vp)
-{
-        boolean_t result = FALSE;
-
-       if (UBCINFOEXISTS(vp)) {
-               result = memory_object_is_slid(vp->v_ubcinfo->ui_control);
-       }
-       return result;
-}
-
 /*
  * ubc_blktooff
  *
@@ -1766,8 +1827,19 @@ ubc_map(vnode_t vp, int flags)
 
                error = VNOP_MMAP(vp, flags, vfs_context_current());
 
-               if (error != EPERM)
-                       error = 0;
+               /*
+                * rdar://problem/22587101 required that we stop propagating
+                * EPERM up the stack. Otherwise, we would have to funnel up 
+                * the error at all the call sites for memory_object_map().
+                * The risk is in having to undo the map/object/entry state at 
+                * all these call sites. It would also affect more than just mmap()
+                * e.g. vm_remap().
+                *
+                *      if (error != EPERM)
+                *              error = 0;
+                */
+
+               error = 0;
 
                vnode_lock_spin(vp);
 
@@ -1790,8 +1862,13 @@ ubc_map(vnode_t vp, int flags)
                if (need_wakeup)
                        wakeup(&uip->ui_flags);
 
-               if (need_ref)
-                       vnode_ref(vp);
+               if (need_ref) {
+                       /*
+                        * Make sure we get a ref as we can't unwind from here
+                        */
+                       if (vnode_ref_ext(vp, 0, VNODE_REF_FORCE))
+                               panic("%s : VNODE_REF_FORCE failed\n", __FUNCTION__);
+               }
        }
        return (error);
 }
@@ -2245,13 +2322,26 @@ ubc_range_op(
  *             ubc_upl_abort(), or ubc_upl_abort_range().
  */
 kern_return_t
-ubc_create_upl(
+ubc_create_upl_external(
        struct vnode    *vp,
        off_t           f_offset,
        int             bufsize,
        upl_t           *uplp,
        upl_page_info_t **plp,
        int             uplflags)
+{
+    return (ubc_create_upl_kernel(vp, f_offset, bufsize, uplp, plp, uplflags, vm_tag_bt()));
+}
+
+kern_return_t
+ubc_create_upl_kernel(
+       struct vnode    *vp,
+       off_t           f_offset,
+       int             bufsize,
+       upl_t           *uplp,
+       upl_page_info_t **plp,
+       int             uplflags,
+       vm_tag_t tag)
 {
        memory_object_control_t         control;
        kern_return_t                   kr;
@@ -2311,7 +2401,7 @@ ubc_create_upl(
        if (control == MEMORY_OBJECT_CONTROL_NULL)
                return KERN_INVALID_ARGUMENT;
 
-       kr = memory_object_upl_request(control, f_offset, bufsize, uplp, NULL, NULL, uplflags);
+       kr = memory_object_upl_request(control, f_offset, bufsize, uplp, NULL, NULL, uplflags, tag);
        if (kr == KERN_SUCCESS && plp != NULL)
                *plp = UPL_GET_INTERNAL_PAGE_LIST(*uplp);
        return kr;
@@ -2682,7 +2772,6 @@ boolean_t ubc_is_mapped_writable(const struct vnode *vp)
 /*
  * CODE SIGNING
  */
-#define CS_BLOB_PAGEABLE 0
 static volatile SInt32 cs_blob_size = 0;
 static volatile SInt32 cs_blob_count = 0;
 static SInt32 cs_blob_size_peak = 0;
@@ -2733,19 +2822,18 @@ ubc_cs_blob_allocate(
        vm_offset_t     *blob_addr_p,
        vm_size_t       *blob_size_p)
 {
-       kern_return_t   kr;
+       kern_return_t   kr = KERN_FAILURE;
 
-#if CS_BLOB_PAGEABLE
-       *blob_size_p = round_page(*blob_size_p);
-       kr = kmem_alloc(kernel_map, blob_addr_p, *blob_size_p, VM_KERN_MEMORY_SECURITY);
-#else  /* CS_BLOB_PAGEABLE */
-       *blob_addr_p = (vm_offset_t) kalloc_tag(*blob_size_p, VM_KERN_MEMORY_SECURITY);
-       if (*blob_addr_p == 0) {
-               kr = KERN_NO_SPACE;
-       } else {
-               kr = KERN_SUCCESS;
+       {
+               *blob_addr_p = (vm_offset_t) kalloc_tag(*blob_size_p, VM_KERN_MEMORY_SECURITY);
+
+               if (*blob_addr_p == 0) {
+                       kr = KERN_NO_SPACE;
+               } else {
+                       kr = KERN_SUCCESS;
+               }
        }
-#endif /* CS_BLOB_PAGEABLE */
+
        return kr;
 }
 
@@ -2754,116 +2842,407 @@ ubc_cs_blob_deallocate(
        vm_offset_t     blob_addr,
        vm_size_t       blob_size)
 {
-#if CS_BLOB_PAGEABLE
-       kmem_free(kernel_map, blob_addr, blob_size);
-#else  /* CS_BLOB_PAGEABLE */
-       kfree((void *) blob_addr, blob_size);
-#endif /* CS_BLOB_PAGEABLE */
+#if PMAP_CS
+       if (blob_size > pmap_cs_blob_limit) {
+               kmem_free(kernel_map, blob_addr, blob_size);
+       } else
+#endif
+       {
+               kfree((void *) blob_addr, blob_size);
+       }
+}
+
+/*
+ * Some codesigned files use a lowest common denominator page size of
+ * 4KiB, but can be used on systems that have a runtime page size of
+ * 16KiB. Since faults will only occur on 16KiB ranges in
+ * cs_validate_range(), we can convert the original Code Directory to
+ * a multi-level scheme where groups of 4 hashes are combined to form
+ * a new hash, which represents 16KiB in the on-disk file.  This can
+ * reduce the wired memory requirement for the Code Directory by
+ * 75%. Care must be taken for binaries that use the "fourk" VM pager
+ * for unaligned access, which may still attempt to validate on
+ * non-16KiB multiples for compatibility with 3rd party binaries.
+ */
+static boolean_t
+ubc_cs_supports_multilevel_hash(struct cs_blob *blob)
+{
+       const CS_CodeDirectory *cd;
+
+       
+       /*
+        * Only applies to binaries that ship as part of the OS,
+        * primarily the shared cache.
+        */
+       if (!blob->csb_platform_binary || blob->csb_teamid != NULL) {
+               return FALSE;
+       }
+
+       /*
+        * If the runtime page size matches the code signing page
+        * size, there is no work to do.
+        */
+       if (PAGE_SHIFT <= blob->csb_hash_pageshift) {
+               return FALSE;
+       }
+
+       cd = blob->csb_cd;
+
+       /*
+        * There must be a valid integral multiple of hashes
+        */
+       if (ntohl(cd->nCodeSlots) & (PAGE_MASK >> blob->csb_hash_pageshift)) {
+               return FALSE;
+       }
+
+       /*
+        * Scatter lists must also have ranges that have an integral number of hashes
+        */
+       if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
+
+               const SC_Scatter *scatter = (const SC_Scatter*)
+                       ((const char*)cd + ntohl(cd->scatterOffset));
+               /* iterate all scatter structs to make sure they are all aligned */
+               do {
+                       uint32_t sbase = ntohl(scatter->base);
+                       uint32_t scount = ntohl(scatter->count);
+
+                       /* last scatter? */
+                       if (scount == 0) {
+                               break;
+                       }
+
+                       if (sbase & (PAGE_MASK >> blob->csb_hash_pageshift)) {
+                               return FALSE;
+                       }
+
+                       if (scount & (PAGE_MASK >> blob->csb_hash_pageshift)) {
+                               return FALSE;
+                       }
+
+                       scatter++;
+               } while(1);
+       }
+
+       /* Covered range must be a multiple of the new page size */
+       if (ntohl(cd->codeLimit) & PAGE_MASK) {
+               return FALSE;
+       }
+
+       /* All checks pass */
+       return TRUE;
+}
+
+/*
+ * Given a cs_blob with an already chosen best code directory, this
+ * function allocates memory and copies into it only the blobs that
+ * will be needed by the kernel, namely the single chosen code
+ * directory (and not any of its alternatives) and the entitlement
+ * blob.
+ *
+ * This saves significant memory with agile signatures, and additional
+ * memory for 3rd Party Code because we also omit the CMS blob.
+ *
+ * To support multilevel and other potential code directory rewriting,
+ * the size of a new code directory can be specified. Since that code
+ * directory will replace the existing code directory,
+ * ubc_cs_reconstitute_code_signature does not copy the original code
+ * directory when a size is given, and the caller must fill it in.
+ */
+static int
+ubc_cs_reconstitute_code_signature(struct cs_blob const *blob, vm_size_t optional_new_cd_size,
+                                                                  vm_address_t *new_blob_addr_p, vm_size_t *new_blob_size_p,
+                                                                  CS_CodeDirectory **new_cd_p, CS_GenericBlob const **new_entitlements_p)
+{
+       const CS_CodeDirectory  *old_cd, *cd;
+       CS_CodeDirectory        *new_cd;
+       const CS_GenericBlob *entitlements;
+       vm_offset_t     new_blob_addr;
+       vm_size_t       new_blob_size;
+       vm_size_t       new_cdsize;
+       kern_return_t   kr;
+       int                             error;
+
+       old_cd = blob->csb_cd;
+
+       new_cdsize = optional_new_cd_size != 0 ? optional_new_cd_size : htonl(old_cd->length);
+
+       new_blob_size  = sizeof(CS_SuperBlob);
+       new_blob_size += sizeof(CS_BlobIndex);
+       new_blob_size += new_cdsize;
+
+       if (blob->csb_entitlements_blob) {
+               /* We need to add a slot for the entitlements */
+               new_blob_size += sizeof(CS_BlobIndex);
+               new_blob_size += ntohl(blob->csb_entitlements_blob->length);
+       }
+
+       kr = ubc_cs_blob_allocate(&new_blob_addr, &new_blob_size);
+       if (kr != KERN_SUCCESS) {
+               if (cs_debug > 1) {
+                       printf("CODE SIGNING: Failed to allocate memory for new Code Signing Blob: %d\n",
+                                  kr);
+               }
+               return ENOMEM;
+       }
+
+       CS_SuperBlob            *new_superblob;
+
+       new_superblob = (CS_SuperBlob *)new_blob_addr;
+       new_superblob->magic = htonl(CSMAGIC_EMBEDDED_SIGNATURE);
+       new_superblob->length = htonl((uint32_t)new_blob_size);
+       if (blob->csb_entitlements_blob) {
+               vm_size_t                       ent_offset, cd_offset;
+
+               cd_offset  = sizeof(CS_SuperBlob) + 2 * sizeof(CS_BlobIndex);
+               ent_offset = cd_offset +  new_cdsize;
+
+               new_superblob->count = htonl(2);
+               new_superblob->index[0].type = htonl(CSSLOT_CODEDIRECTORY);
+               new_superblob->index[0].offset = htonl((uint32_t)cd_offset);
+               new_superblob->index[1].type = htonl(CSSLOT_ENTITLEMENTS);
+               new_superblob->index[1].offset = htonl((uint32_t)ent_offset);
+
+               memcpy((void *)(new_blob_addr + ent_offset), blob->csb_entitlements_blob, ntohl(blob->csb_entitlements_blob->length));
+
+               new_cd = (CS_CodeDirectory *)(new_blob_addr + cd_offset);
+       } else {
+               // Blob is the code directory, directly.
+               new_cd = (CS_CodeDirectory *)new_blob_addr;
+       }
+
+       if (optional_new_cd_size == 0) {
+               // Copy code directory, and revalidate.
+               memcpy(new_cd, old_cd, new_cdsize);
+
+               vm_size_t length = new_blob_size;
+
+               error = cs_validate_csblob((const uint8_t *)new_blob_addr, length, &cd, &entitlements);
+
+               if (error) {
+                       printf("CODE SIGNING: Failed to validate new Code Signing Blob: %d\n",
+                                  error);
+
+                       ubc_cs_blob_deallocate(new_blob_addr, new_blob_size);
+                       return error;
+               }
+               *new_entitlements_p = entitlements;
+       } else {
+               // Caller will fill out and validate code directory.
+               memset(new_cd, 0, new_cdsize);
+               *new_entitlements_p = NULL;
+       }
+
+       *new_blob_addr_p = new_blob_addr;
+       *new_blob_size_p = new_blob_size;
+       *new_cd_p = new_cd;
+
+       return 0;
+}
+
+static int
+ubc_cs_convert_to_multilevel_hash(struct cs_blob *blob)
+{
+       const CS_CodeDirectory  *old_cd, *cd;
+       CS_CodeDirectory        *new_cd;
+       const CS_GenericBlob *entitlements;
+       vm_offset_t     new_blob_addr;
+       vm_size_t       new_blob_size;
+       vm_size_t       new_cdsize;
+       int                             error;
+
+       uint32_t                hashes_per_new_hash_shift = (uint32_t)(PAGE_SHIFT - blob->csb_hash_pageshift);
+
+       if (cs_debug > 1) {
+               printf("CODE SIGNING: Attempting to convert Code Directory for %lu -> %lu page shift\n",
+                          (unsigned long)blob->csb_hash_pageshift, (unsigned long)PAGE_SHIFT);
+       }
+
+       old_cd = blob->csb_cd;
+
+       /* Up to the hashes, we can copy all data */
+       new_cdsize  = ntohl(old_cd->hashOffset);
+       new_cdsize += (ntohl(old_cd->nCodeSlots) >> hashes_per_new_hash_shift) * old_cd->hashSize;
+
+       error = ubc_cs_reconstitute_code_signature(blob, new_cdsize,
+                                                                                       &new_blob_addr, &new_blob_size, &new_cd,
+                                                                                       &entitlements);
+       if (error != 0) {
+               printf("CODE SIGNING: Failed to reconsitute code signature: %d\n", error);
+               return error;
+       }
+
+       memcpy(new_cd, old_cd, ntohl(old_cd->hashOffset));
+
+       /* Update fields in the Code Directory structure */
+       new_cd->length = htonl((uint32_t)new_cdsize);
+
+       uint32_t nCodeSlots = ntohl(new_cd->nCodeSlots);
+       nCodeSlots >>= hashes_per_new_hash_shift;
+       new_cd->nCodeSlots = htonl(nCodeSlots);
+
+       new_cd->pageSize = PAGE_SHIFT; /* Not byte-swapped */
+
+       if ((ntohl(new_cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(new_cd->scatterOffset))) {
+               SC_Scatter *scatter = (SC_Scatter*)
+                       ((char *)new_cd + ntohl(new_cd->scatterOffset));
+               /* iterate all scatter structs to scale their counts */
+               do {
+                       uint32_t scount = ntohl(scatter->count);
+                       uint32_t sbase  = ntohl(scatter->base);
+
+                       /* last scatter? */
+                       if (scount == 0) {
+                               break;
+                       }
+
+                       scount >>= hashes_per_new_hash_shift;
+                       scatter->count = htonl(scount);
+
+                       sbase >>= hashes_per_new_hash_shift;
+                       scatter->base = htonl(sbase);
+
+                       scatter++;
+               } while(1);
+       }
+
+       /* For each group of hashes, hash them together */
+       const unsigned char *src_base = (const unsigned char *)old_cd + ntohl(old_cd->hashOffset);
+       unsigned char *dst_base = (unsigned char *)new_cd + ntohl(new_cd->hashOffset);
+
+       uint32_t hash_index;
+       for (hash_index = 0; hash_index < nCodeSlots; hash_index++) {
+               union cs_hash_union     mdctx;
+
+               uint32_t source_hash_len = old_cd->hashSize << hashes_per_new_hash_shift;
+               const unsigned char *src = src_base + hash_index * source_hash_len;
+               unsigned char *dst = dst_base + hash_index * new_cd->hashSize;
+
+               blob->csb_hashtype->cs_init(&mdctx);
+               blob->csb_hashtype->cs_update(&mdctx, src, source_hash_len);
+               blob->csb_hashtype->cs_final(dst, &mdctx);
+       }
+
+       error = cs_validate_csblob((const uint8_t *)new_blob_addr, new_blob_size, &cd, &entitlements);
+       if (error != 0) {
+
+               printf("CODE SIGNING: Failed to validate new Code Signing Blob: %d\n",
+                          error);
+
+               ubc_cs_blob_deallocate(new_blob_addr, new_blob_size);
+               return error;
+       }
+
+    /* New Code Directory is ready for use, swap it out in the blob structure */
+       ubc_cs_blob_deallocate(blob->csb_mem_kaddr, blob->csb_mem_size);
+
+       blob->csb_mem_size = new_blob_size;
+       blob->csb_mem_kaddr = new_blob_addr;
+       blob->csb_cd = cd;
+       blob->csb_entitlements_blob = entitlements;
+
+       /* The blob has some cached attributes of the Code Directory, so update those */
+
+       blob->csb_hash_firstlevel_pagesize = blob->csb_hash_pagesize; /* Save the original page size */
+
+       blob->csb_hash_pagesize = PAGE_SIZE;
+       blob->csb_hash_pagemask = PAGE_MASK;
+       blob->csb_hash_pageshift = PAGE_SHIFT;
+       blob->csb_end_offset = ntohl(cd->codeLimit);
+       if((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
+               const SC_Scatter *scatter = (const SC_Scatter*)
+                       ((const char*)cd + ntohl(cd->scatterOffset));
+               blob->csb_start_offset = ((off_t)ntohl(scatter->base)) * PAGE_SIZE;
+       } else {
+               blob->csb_start_offset = 0;
+       }
+
+       return 0;
 }
 
+/*
+ * Validate the code signature blob, create a struct cs_blob wrapper
+ * and return it together with a pointer to the chosen code directory
+ * and entitlements blob.
+ *
+ * Note that this takes ownership of the memory as addr, mainly because
+ * this function can actually replace the passed in blob with another
+ * one, e.g. when performing multilevel hashing optimization.
+ */
 int
-ubc_cs_blob_add(
-       struct vnode    *vp,
-       cpu_type_t      cputype,
-       off_t           base_offset,
-       vm_address_t    addr,
-       vm_size_t       size,
-       __unused int    flags,
-       struct cs_blob  **ret_blob)
+cs_blob_create_validated(
+       vm_address_t * const            addr,
+       vm_size_t                       size,
+       struct cs_blob ** const         ret_blob,
+    CS_CodeDirectory const ** const    ret_cd)
 {
-       kern_return_t           kr;
-       struct ubc_info         *uip;
-       struct cs_blob          *blob, *oblob;
-       int                     error;
-       ipc_port_t              blob_handle;
-       memory_object_size_t    blob_size;
+       struct cs_blob          *blob;
+       int             error = EINVAL;
        const CS_CodeDirectory *cd;
-       off_t                   blob_start_offset, blob_end_offset;
+       const CS_GenericBlob *entitlements;
        union cs_hash_union     mdctx;
-       boolean_t               record_mtime;
-       int                     cs_flags;
+       size_t                  length;
 
-       record_mtime = FALSE;
-       cs_flags = 0;
        if (ret_blob)
            *ret_blob = NULL;
 
-       blob_handle = IPC_PORT_NULL;
-
        blob = (struct cs_blob *) kalloc(sizeof (struct cs_blob));
        if (blob == NULL) {
                return ENOMEM;
        }
 
-#if CS_BLOB_PAGEABLE
-       /* get a memory entry on the blob */
-       blob_size = (memory_object_size_t) size;
-       kr = mach_make_memory_entry_64(kernel_map,
-                                      &blob_size,
-                                      addr,
-                                      VM_PROT_READ,
-                                      &blob_handle,
-                                      IPC_PORT_NULL);
-       if (kr != KERN_SUCCESS) {
-               error = ENOMEM;
-               goto out;
-       }
-       if (memory_object_round_page(blob_size) !=
-           (memory_object_size_t) round_page(size)) {
-               printf("ubc_cs_blob_add: size mismatch 0x%llx 0x%lx !?\n",
-                      blob_size, (size_t)size);
-               panic("XXX FBDP size mismatch 0x%llx 0x%lx\n", blob_size, (size_t)size);
-               error = EINVAL;
-               goto out;
-       }
-#else
-       blob_size = (memory_object_size_t) size;
-       blob_handle = IPC_PORT_NULL;
-#endif
-
        /* fill in the new blob */
-       blob->csb_cpu_type = cputype;
-       blob->csb_base_offset = base_offset;
        blob->csb_mem_size = size;
        blob->csb_mem_offset = 0;
-       blob->csb_mem_handle = blob_handle;
-       blob->csb_mem_kaddr = addr;
+       blob->csb_mem_kaddr = *addr;
        blob->csb_flags = 0;
+       blob->csb_signer_type = CS_SIGNER_TYPE_UNKNOWN;
        blob->csb_platform_binary = 0;
        blob->csb_platform_path = 0;
        blob->csb_teamid = NULL;
-       
+       blob->csb_entitlements_blob = NULL;
+       blob->csb_entitlements = NULL;
+       blob->csb_reconstituted = false;
+
+       /* Transfer ownership. Even on error, this function will deallocate */
+       *addr = 0;
+
        /*
         * Validate the blob's contents
         */
-
-       error = cs_validate_csblob((const uint8_t *)addr, size, &cd);
+       length = (size_t) size;
+       error = cs_validate_csblob((const uint8_t *)blob->csb_mem_kaddr,
+                                                          length, &cd, &entitlements);
        if (error) {
 
-        if (cs_debug)
+               if (cs_debug)
                        printf("CODESIGNING: csblob invalid: %d\n", error);
-        /* The vnode checker can't make the rest of this function succeed if csblob validation failed, so bail */
-        goto out;
+               /*
+                * The vnode checker can't make the rest of this function
+                * succeed if csblob validation failed, so bail */
+               goto out;
 
        } else {
                const unsigned char *md_base;
                uint8_t hash[CS_HASH_MAX_SIZE];
                int md_size;
 
-#if CS_BLOB_PAGEABLE
-#error "cd might move under CS_BLOB_PAGEABLE; reconsider this code"
-#endif
                blob->csb_cd = cd;
+               blob->csb_entitlements_blob = entitlements; /* may be NULL, not yet validated */
                blob->csb_hashtype = cs_find_md(cd->hashType);
                if (blob->csb_hashtype == NULL || blob->csb_hashtype->cs_digest_size > sizeof(hash))
                        panic("validated CodeDirectory but unsupported type");
-                   
+
+               blob->csb_hash_pageshift = cd->pageSize;
+               blob->csb_hash_pagesize = (1U << cd->pageSize);
+               blob->csb_hash_pagemask = blob->csb_hash_pagesize - 1;
+               blob->csb_hash_firstlevel_pagesize = 0;
                blob->csb_flags = (ntohl(cd->flags) & CS_ALLOWED_MACHO) | CS_VALID;
-               blob->csb_end_offset = round_page_4K(ntohl(cd->codeLimit));
+               blob->csb_end_offset = (((vm_offset_t)ntohl(cd->codeLimit) + blob->csb_hash_pagemask) & ~((vm_offset_t)blob->csb_hash_pagemask));
                if((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
                        const SC_Scatter *scatter = (const SC_Scatter*)
                                ((const char*)cd + ntohl(cd->scatterOffset));
-                       blob->csb_start_offset = ntohl(scatter->base) * PAGE_SIZE_4K;
+                       blob->csb_start_offset = ((off_t)ntohl(scatter->base)) * blob->csb_hash_pagesize;
                } else {
                        blob->csb_start_offset = 0;
                }
@@ -2878,33 +3257,140 @@ ubc_cs_blob_add(
                memcpy(blob->csb_cdhash, hash, CS_CDHASH_LEN);
        }
 
-       /* 
+    error = 0;
+
+out:
+    if (error != 0) {
+        cs_blob_free(blob);
+        blob = NULL;
+        cd = NULL;
+    }
+
+    if (ret_blob != NULL) {
+        *ret_blob = blob;
+    }
+    if (ret_cd != NULL) {
+        *ret_cd = cd;
+    }
+
+    return error;
+}
+
+/*
+ * Free a cs_blob previously created by cs_blob_create_validated.
+ */
+void
+cs_blob_free(
+    struct cs_blob * const blob)
+{
+    if (blob != NULL) {
+        if (blob->csb_mem_kaddr) {
+            ubc_cs_blob_deallocate(blob->csb_mem_kaddr, blob->csb_mem_size);
+            blob->csb_mem_kaddr = 0;
+        }
+        if (blob->csb_entitlements != NULL) {
+            osobject_release(blob->csb_entitlements);
+            blob->csb_entitlements = NULL;
+        }
+        kfree(blob, sizeof (*blob));
+    }
+}
+
+int
+ubc_cs_blob_add(
+       struct vnode    *vp,
+       cpu_type_t      cputype,
+       off_t           base_offset,
+       vm_address_t    *addr,
+       vm_size_t       size,
+       struct image_params *imgp,
+       __unused int    flags,
+       struct cs_blob  **ret_blob)
+{
+       kern_return_t           kr;
+       struct ubc_info         *uip;
+       struct cs_blob          *blob, *oblob;
+       int                     error;
+       CS_CodeDirectory const *cd;
+       off_t                   blob_start_offset, blob_end_offset;
+       boolean_t               record_mtime;
+
+       record_mtime = FALSE;
+       if (ret_blob)
+           *ret_blob = NULL;
+    /* Create the struct cs_blob wrapper that will be attached to the vnode.
+     * Validates the passed in blob in the process. */
+    error = cs_blob_create_validated(addr, size, &blob, &cd);
+
+    if (error != 0) {
+               printf("malform code signature blob: %d\n", error);
+        return error;
+    }
+
+    blob->csb_cpu_type = cputype;
+       blob->csb_base_offset = base_offset;
+
+       /*
         * Let policy module check whether the blob's signature is accepted.
         */
 #if CONFIG_MACF
-       error = mac_vnode_check_signature(vp, 
-                                         base_offset, 
-                                         blob->csb_cdhash, 
-                                         (const void*)addr, size,
-                                         flags, &cs_flags);
+    unsigned int cs_flags = blob->csb_flags;
+       unsigned int signer_type = blob->csb_signer_type;
+       error = mac_vnode_check_signature(vp, blob, imgp, &cs_flags, &signer_type, flags);
+    blob->csb_flags = cs_flags;
+       blob->csb_signer_type = signer_type;
+
        if (error) {
                if (cs_debug) 
                        printf("check_signature[pid: %d], error = %d\n", current_proc()->p_pid, error);
                goto out;
        }
-       if ((flags & MAC_VNODE_CHECK_DYLD_SIM) && !(cs_flags & CS_PLATFORM_BINARY)) {
+       if ((flags & MAC_VNODE_CHECK_DYLD_SIM) && !(blob->csb_flags & CS_PLATFORM_BINARY)) {
                if (cs_debug)
                        printf("check_signature[pid: %d], is not apple signed\n", current_proc()->p_pid);
                error = EPERM;
                goto out;
        }
-#endif 
-       
-       if (cs_flags & CS_PLATFORM_BINARY) {
+#endif
+
+#if CONFIG_ENFORCE_SIGNED_CODE
+       /*
+        * Reconstitute code signature
+        */
+       {
+               vm_address_t new_mem_kaddr = 0;
+               vm_size_t new_mem_size = 0;
+
+               CS_CodeDirectory *new_cd = NULL;
+               CS_GenericBlob const *new_entitlements = NULL;
+
+               error = ubc_cs_reconstitute_code_signature(blob, 0,
+                                                                                                  &new_mem_kaddr, &new_mem_size,
+                                                                                                  &new_cd, &new_entitlements);
+
+               if (error != 0) {
+                       printf("failed code signature reconstitution: %d\n", error);
+                       goto out;
+               }
+
+               ubc_cs_blob_deallocate(blob->csb_mem_kaddr, blob->csb_mem_size);
+
+               blob->csb_mem_kaddr = new_mem_kaddr;
+               blob->csb_mem_size = new_mem_size;
+               blob->csb_cd = new_cd;
+               blob->csb_entitlements_blob = new_entitlements;
+               blob->csb_reconstituted = true;
+       }
+
+#endif
+
+
+       if (blob->csb_flags & CS_PLATFORM_BINARY) {
                if (cs_debug > 1)
                        printf("check_signature[pid: %d]: platform binary\n", current_proc()->p_pid);
                blob->csb_platform_binary = 1;
-               blob->csb_platform_path = !!(cs_flags & CS_PLATFORM_PATH);
+               blob->csb_platform_path = !!(blob->csb_flags & CS_PLATFORM_PATH);
        } else {
                blob->csb_platform_binary = 0;
                blob->csb_platform_path = 0;
@@ -2916,7 +3402,7 @@ ubc_cs_blob_add(
                                printf("check_signature[pid: %d]: no team-id\n", current_proc()->p_pid);
                }
        }
-       
+
        /*
         * Validate the blob's coverage
         */
@@ -2931,6 +3417,15 @@ ubc_cs_blob_add(
                goto out;
        }
 
+       if (ubc_cs_supports_multilevel_hash(blob)) {
+               error = ubc_cs_convert_to_multilevel_hash(blob);
+               if (error != 0) {
+                       printf("failed multilevel hash conversion: %d\n", error);
+                       goto out;
+               }
+               blob->csb_reconstituted = true;
+       }
+
        vnode_lock(vp);
        if (! UBCINFOEXISTS(vp)) {
                vnode_unlock(vp);
@@ -2945,8 +3440,11 @@ ubc_cs_blob_add(
             oblob = oblob->csb_next) {
                 off_t oblob_start_offset, oblob_end_offset;
 
-                /* check for conflicting teamid */
-                if (blob->csb_platform_binary) { //platform binary needs to be the same for app slices
+                if (blob->csb_signer_type != oblob->csb_signer_type) { // signer type needs to be the same for slices
+                        vnode_unlock(vp);
+                        error = EALREADY;
+                        goto out;
+                } else if (blob->csb_platform_binary) { //platform binary needs to be the same for app slices
                         if (!oblob->csb_platform_binary) {
                                 vnode_unlock(vp);
                                 error = EALREADY;
@@ -3002,6 +3500,11 @@ ubc_cs_blob_add(
                                          */
                                         oblob->csb_cpu_type = cputype;
                                 }
+
+                                /* The signature is still accepted, so update the
+                                 * generation count. */
+                                uip->cs_add_gen = cs_blob_generation_count;
+
                                 vnode_unlock(vp);
                                 if (ret_blob)
                                         *ret_blob = oblob;
@@ -3088,15 +3591,7 @@ out:
                if (cs_debug)
                        printf("check_signature[pid: %d]: error = %d\n", current_proc()->p_pid, error);
 
-               /* we failed; release what we allocated */
-               if (blob) {
-                       kfree(blob, sizeof (*blob));
-                       blob = NULL;
-               }
-               if (blob_handle != IPC_PORT_NULL) {
-                       mach_memory_entry_port_release(blob_handle);
-                       blob_handle = IPC_PORT_NULL;
-               }
+        cs_blob_free(blob);
        }
 
        if (error == EAGAIN) {
@@ -3106,10 +3601,6 @@ out:
                 * blob and we want to return success.
                 */
                error = 0;
-               /*
-                * Since we're not failing, consume the data we received.
-                */
-               ubc_cs_blob_deallocate(addr, size);
        }
 
        return error;
@@ -3201,18 +3692,9 @@ ubc_cs_free(
             blob != NULL;
             blob = next_blob) {
                next_blob = blob->csb_next;
-               if (blob->csb_mem_kaddr != 0) {
-                       ubc_cs_blob_deallocate(blob->csb_mem_kaddr,
-                                              blob->csb_mem_size);
-                       blob->csb_mem_kaddr = 0;
-               }
-               if (blob->csb_mem_handle != IPC_PORT_NULL) {
-                       mach_memory_entry_port_release(blob->csb_mem_handle);
-               }
-               blob->csb_mem_handle = IPC_PORT_NULL;
                OSAddAtomic(-1, &cs_blob_count);
                OSAddAtomic((SInt32) -blob->csb_mem_size, &cs_blob_size);
-               kfree(blob, sizeof (*blob));
+               cs_blob_free(blob);
        }
 #if CHECK_CS_VALIDATION_BITMAP
        ubc_cs_validation_bitmap_deallocate( uip->ui_vnode );
@@ -3245,19 +3727,20 @@ int
 ubc_cs_blob_revalidate(
        struct vnode    *vp,
        struct cs_blob *blob,
-       __unused int flags
+       struct image_params *imgp,
+       int flags
        )
 {
        int error = 0;
-#if CONFIG_MACF
-       int cs_flags = 0;
-#endif
        const CS_CodeDirectory *cd = NULL;
-       
+       const CS_GenericBlob *entitlements = NULL;
+       size_t size;
        assert(vp != NULL);
        assert(blob != NULL);
 
-       error = cs_validate_csblob((const uint8_t *)blob->csb_mem_kaddr, blob->csb_mem_size, &cd);
+       size = blob->csb_mem_size;
+       error = cs_validate_csblob((const uint8_t *)blob->csb_mem_kaddr,
+                                                          size, &cd, &entitlements);
        if (error) {
                if (cs_debug) {
                        printf("CODESIGNING: csblob invalid: %d\n", error);
@@ -3265,18 +3748,52 @@ ubc_cs_blob_revalidate(
                goto out;
        }
 
+    unsigned int cs_flags = (ntohl(cd->flags) & CS_ALLOWED_MACHO) | CS_VALID;
+    unsigned int signer_type = CS_SIGNER_TYPE_UNKNOWN;
+
+       if (blob->csb_reconstituted) {
+               /*
+                * Code signatures that have been modified after validation
+                * cannot be revalidated inline from their in-memory blob.
+                *
+                * That's okay, though, because the only path left that relies
+                * on revalidation of existing in-memory blobs is the legacy
+                * detached signature database path, which only exists on macOS,
+                * which does not do reconstitution of any kind.
+                */
+               if (cs_debug) {
+                       printf("CODESIGNING: revalidate: not inline revalidating reconstituted signature.\n");
+               }
+
+               /*
+                * EAGAIN tells the caller that they may reread the code
+                * signature and try attaching it again, which is the same
+                * thing they would do if there was no cs_blob yet in the
+                * first place.
+                *
+                * Conveniently, after ubc_cs_blob_add did a successful
+                * validation, it will detect that a matching cs_blob (cdhash,
+                * offset, arch etc.) already exists, and return success
+                * without re-adding a cs_blob to the vnode.
+                */
+               return EAGAIN;
+       }
+
        /* callout to mac_vnode_check_signature */
 #if CONFIG_MACF
-       error = mac_vnode_check_signature(vp, blob->csb_base_offset, blob->csb_cdhash,
-                                         (const void*)blob->csb_mem_kaddr, (int)blob->csb_mem_size,
-                                         flags, &cs_flags);
+       error = mac_vnode_check_signature(vp, blob, imgp, &cs_flags, &signer_type, flags);
        if (cs_debug && error) {
                        printf("revalidate: check_signature[pid: %d], error = %d\n", current_proc()->p_pid, error);
        }
+#else
+       (void)flags;
+       (void)signer_type;
 #endif
 
        /* update generation number if success */
        vnode_lock_spin(vp);
+    blob->csb_flags = cs_flags;
+       blob->csb_signer_type = signer_type;
        if (UBCINFOEXISTS(vp)) {
                if (error == 0)
                        vp->v_ubcinfo->cs_add_gen = cs_blob_generation_count;
@@ -3352,20 +3869,21 @@ ubc_get_cs_mtime(
 
 unsigned long cs_validate_page_no_hash = 0;
 unsigned long cs_validate_page_bad_hash = 0;
-boolean_t
-cs_validate_page(
-       void                    *_blobs,
+static boolean_t
+cs_validate_hash(
+       struct cs_blob          *blobs,
        memory_object_t         pager,
        memory_object_offset_t  page_offset,
        const void              *data,
+       vm_size_t               *bytes_processed,
        unsigned                *tainted)
 {
        union cs_hash_union     mdctx;
-       struct cs_hash          *hashtype = NULL;
+       struct cs_hash const    *hashtype = NULL;
        unsigned char           actual_hash[CS_HASH_MAX_SIZE];
        unsigned char           expected_hash[CS_HASH_MAX_SIZE];
        boolean_t               found_hash;
-       struct cs_blob          *blobs, *blob;
+       struct cs_blob          *blob;
        const CS_CodeDirectory  *cd;
        const unsigned char     *hash;
        boolean_t               validated;
@@ -3374,14 +3892,9 @@ cs_validate_page(
        off_t                   codeLimit = 0;
        const char              *lower_bound, *upper_bound;
        vm_offset_t             kaddr, blob_addr;
-       vm_size_t               ksize;
-       kern_return_t           kr;
-
-       offset = page_offset;
 
        /* retrieve the expected hash */
        found_hash = FALSE;
-       blobs = (struct cs_blob *) _blobs;
 
        for (blob = blobs;
             blob != NULL;
@@ -3393,29 +3906,10 @@ cs_validate_page(
                        continue;
                }
 
-               /* map the blob in the kernel address space */
+               /* blob data has been released */
                kaddr = blob->csb_mem_kaddr;
                if (kaddr == 0) {
-                       ksize = (vm_size_t) (blob->csb_mem_size +
-                                            blob->csb_mem_offset);
-                       kr = vm_map(kernel_map,
-                                   &kaddr,
-                                   ksize,
-                                   0,
-                                   VM_FLAGS_ANYWHERE,
-                                   blob->csb_mem_handle,
-                                   0,
-                                   TRUE,
-                                   VM_PROT_READ,
-                                   VM_PROT_READ,
-                                   VM_INHERIT_NONE);
-                       if (kr != KERN_SUCCESS) {
-                               /* XXX FBDP what to do !? */
-                               printf("cs_validate_page: failed to map blob, "
-                                      "size=0x%lx kr=0x%x\n",
-                                      (size_t)blob->csb_mem_size, kr);
-                               break;
-                       }
+                       continue;
                }
 
                blob_addr = kaddr + blob->csb_mem_offset;
@@ -3426,22 +3920,17 @@ cs_validate_page(
                if (cd != NULL) {
                        /* all CD's that have been injected is already validated */
 
-                       offset = page_offset - blob->csb_base_offset;
-                       if (offset < blob->csb_start_offset ||
-                           offset >= blob->csb_end_offset) {
-                               /* our page is not covered by this blob */
-                               continue;
-                       }
-
                        hashtype = blob->csb_hashtype;
                        if (hashtype == NULL)
                                panic("unknown hash type ?");
                        if (hashtype->cs_digest_size > sizeof(actual_hash))
                                panic("hash size too large");
+                       if (offset & blob->csb_hash_pagemask)
+                               panic("offset not aligned to cshash boundary");
 
                        codeLimit = ntohl(cd->codeLimit);
 
-                       hash = hashes(cd, (uint32_t)(offset>>PAGE_SHIFT_4K),
+                       hash = hashes(cd, (uint32_t)(offset>>blob->csb_hash_pageshift),
                                      hashtype->cs_size,
                                      lower_bound, upper_bound);
                        if (hash != NULL) {
@@ -3474,17 +3963,39 @@ cs_validate_page(
 
                *tainted = 0;
 
-               size = PAGE_SIZE_4K;
+               size = blob->csb_hash_pagesize;
+               *bytes_processed = size;
+
                const uint32_t *asha1, *esha1;
                if ((off_t)(offset + size) > codeLimit) {
                        /* partial page at end of segment */
                        assert(offset < codeLimit);
-                       size = (size_t) (codeLimit & PAGE_MASK_4K);
+                       size = (size_t) (codeLimit & blob->csb_hash_pagemask);
                        *tainted |= CS_VALIDATE_NX;
                }
 
                hashtype->cs_init(&mdctx);
-               hashtype->cs_update(&mdctx, data, size);
+
+               if (blob->csb_hash_firstlevel_pagesize) {
+                       const unsigned char *partial_data = (const unsigned char *)data;
+                       size_t i;
+                       for (i=0; i < size;) {
+                               union cs_hash_union     partialctx;
+                               unsigned char partial_digest[CS_HASH_MAX_SIZE];
+                               size_t partial_size = MIN(size-i, blob->csb_hash_firstlevel_pagesize);
+
+                               hashtype->cs_init(&partialctx);
+                               hashtype->cs_update(&partialctx, partial_data, partial_size);
+                               hashtype->cs_final(partial_digest, &partialctx);
+
+                               /* Update cumulative multi-level hash */
+                               hashtype->cs_update(&mdctx, partial_digest, hashtype->cs_size);
+                               partial_data = partial_data + partial_size;
+                               i += partial_size;
+                       }
+               } else {
+                       hashtype->cs_update(&mdctx, data, size);
+               }
                hashtype->cs_final(actual_hash, &mdctx);
 
                asha1 = (const uint32_t *) actual_hash;
@@ -3518,6 +4029,53 @@ cs_validate_page(
        return validated;
 }
 
+boolean_t
+cs_validate_range(
+       struct vnode    *vp,
+       memory_object_t         pager,
+       memory_object_offset_t  page_offset,
+       const void              *data,
+       vm_size_t               dsize,
+       unsigned                *tainted)
+{
+       vm_size_t offset_in_range;
+       boolean_t all_subranges_validated = TRUE; /* turn false if any subrange fails */
+
+       struct cs_blob *blobs = ubc_get_cs_blobs(vp);
+
+       *tainted = 0;
+
+       for (offset_in_range = 0;
+                offset_in_range < dsize;
+                /* offset_in_range updated based on bytes processed */) {
+               unsigned subrange_tainted = 0;
+               boolean_t subrange_validated;
+               vm_size_t bytes_processed = 0;
+
+               subrange_validated = cs_validate_hash(blobs,
+                                                                                         pager,
+                                                                                         page_offset + offset_in_range,
+                                                                                         (const void *)((const char *)data + offset_in_range),
+                                                                                         &bytes_processed,
+                                                                                         &subrange_tainted);
+
+               *tainted |= subrange_tainted;
+
+               if (bytes_processed == 0) {
+                       /* Cannote make forward progress, so return an error */
+                       all_subranges_validated = FALSE;
+                       break;
+               } else if (subrange_validated == FALSE) {
+                       all_subranges_validated = FALSE;
+                       /* Keep going to detect other types of failures in subranges */
+               }
+
+               offset_in_range += bytes_processed;
+       }
+
+       return all_subranges_validated;
+}
+
 int
 ubc_cs_getcdhash(
        vnode_t         vp,
@@ -3557,8 +4115,54 @@ ubc_cs_getcdhash(
        return ret;
 }
 
+boolean_t
+ubc_cs_is_range_codesigned(
+       vnode_t                 vp,
+       mach_vm_offset_t        start,
+       mach_vm_size_t          size)
+{
+       struct cs_blob          *csblob;
+       mach_vm_offset_t        blob_start;
+       mach_vm_offset_t        blob_end;
+
+       if (vp == NULL) {
+               /* no file: no code signature */
+               return FALSE;
+       }
+       if (size == 0) {
+               /* no range: no code signature */
+               return FALSE;
+       }
+       if (start + size < start) {
+               /* overflow */
+               return FALSE;
+       }
+
+       csblob = ubc_cs_blob_get(vp, -1, start);
+       if (csblob == NULL) {
+               return FALSE;
+       }
+
+       /*
+        * We currently check if the range is covered by a single blob,
+        * which should always be the case for the dyld shared cache.
+        * If we ever want to make this routine handle other cases, we
+        * would have to iterate if the blob does not cover the full range.
+        */
+       blob_start = (mach_vm_offset_t) (csblob->csb_base_offset +
+                                        csblob->csb_start_offset);
+       blob_end = (mach_vm_offset_t) (csblob->csb_base_offset +
+                                      csblob->csb_end_offset);
+       if (blob_start > start || blob_end < (start + size)) {
+               /* range not fully covered by this code-signing blob */
+               return FALSE;
+       }
+
+       return TRUE;
+}
+
 #if CHECK_CS_VALIDATION_BITMAP
-#define stob(s)        ((atop_64((s)) + 07) >> 3)
+#define stob(s)        (((atop_64(round_page_64(s))) + 07) >> 3)
 extern boolean_t       root_fs_upgrade_try;
 
 /*
@@ -3680,3 +4284,66 @@ void     ubc_cs_validation_bitmap_deallocate(__unused vnode_t vp){
        return;
 }
 #endif /* CHECK_CS_VALIDATION_BITMAP */
+
+#if PMAP_CS
+kern_return_t
+cs_associate_blob_with_mapping(
+       void                    *pmap,
+       vm_map_offset_t         start,
+       vm_map_size_t           size,
+       vm_object_offset_t      offset,
+       void                    *blobs_p)
+{
+       off_t                   blob_start_offset, blob_end_offset;
+       kern_return_t           kr;
+       struct cs_blob          *blobs, *blob;
+       vm_offset_t             kaddr;
+       struct pmap_cs_code_directory *cd_entry = NULL;
+
+       if (!pmap_cs) {
+               return KERN_NOT_SUPPORTED;
+       }
+       
+       blobs = (struct cs_blob *)blobs_p;
+
+       for (blob = blobs;
+            blob != NULL;
+            blob = blob->csb_next) {
+               blob_start_offset = (blob->csb_base_offset +
+                                    blob->csb_start_offset);
+               blob_end_offset = (blob->csb_base_offset +
+                                  blob->csb_end_offset);
+               if ((off_t) offset < blob_start_offset ||
+                   (off_t) offset >= blob_end_offset ||
+                   (off_t) (offset + size) <= blob_start_offset ||
+                   (off_t) (offset + size) > blob_end_offset) {
+                       continue;
+               }
+               kaddr = blob->csb_mem_kaddr;
+               if (kaddr == 0) {
+                       /* blob data has been released */
+                       continue;
+               }
+               cd_entry = blob->csb_pmap_cs_entry;
+               if (cd_entry == NULL) {
+                       continue;
+               }
+
+               break;
+       }
+
+       if (cd_entry != NULL) {
+               kr = pmap_cs_associate(pmap,
+                                      cd_entry,
+                                      start,
+                                      size);
+       } else {
+               kr = KERN_CODESIGN_ERROR;
+       }
+#if 00
+       printf("FBDP %d[%s] pmap_cs_associate(%p,%p,0x%llx,0x%llx) -> kr=0x%x\n", proc_selfpid(), &(current_proc()->p_comm[0]), pmap, cd_entry, (uint64_t)start, (uint64_t)size, kr);
+       kr = KERN_SUCCESS;
+#endif
+       return kr;
+}
+#endif /* PMAP_CS */