]> git.saurik.com Git - apple/xnu.git/blobdiff - bsd/kern/ubc_subr.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / bsd / kern / ubc_subr.c
index 75abf3d3961f1505b26127c8e1a62f5096d85e0c..320e71ef766264e571e5ceb482d1bf4d2136a931 100644 (file)
@@ -1,8 +1,8 @@
 /*
- * Copyright (c) 1999-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 1999-2020 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- * 
+ *
  * This file contains Original Code and/or Modifications of Original Code
  * as defined in and that are subject to the Apple Public Source License
  * Version 2.0 (the 'License'). You may not use this file except in
  * unlawful or unlicensed copies of an Apple operating system, or to
  * circumvent, violate, or enable the circumvention or violation of, any
  * terms of an Apple operating system software license agreement.
- * 
+ *
  * Please obtain a copy of the License at
  * http://www.opensource.apple.com/apsl/ and read it before using this file.
- * 
+ *
  * The Original Code and all software distributed under the License are
  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
  * Please see the License for the specific language governing rights and
  * limitations under the License.
- * 
+ *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
-/* 
+/*
  *     File:   ubc_subr.c
  *     Author: Umesh Vaishampayan [umeshv@apple.com]
  *             05-Aug-1999     umeshv  Created.
@@ -34,7 +34,7 @@
  *
  * Caller of UBC functions MUST have a valid reference on the vnode.
  *
- */ 
+ */
 
 #include <sys/types.h>
 #include <sys/param.h>
 #include <sys/buf.h>
 #include <sys/user.h>
 #include <sys/codesign.h>
+#include <sys/codedir_internal.h>
+#include <sys/fsevents.h>
+#include <sys/fcntl.h>
+#include <sys/reboot.h>
 
 #include <mach/mach_types.h>
 #include <mach/memory_object_types.h>
 #include <mach/memory_object_control.h>
 #include <mach/vm_map.h>
+#include <mach/mach_vm.h>
 #include <mach/upl.h>
 
 #include <kern/kern_types.h>
 #include <kern/kalloc.h>
 #include <kern/zalloc.h>
 #include <kern/thread.h>
+#include <vm/pmap.h>
 #include <vm/vm_kern.h>
 #include <vm/vm_protos.h> /* last */
 
 #include <libkern/crypto/sha1.h>
+#include <libkern/crypto/sha2.h>
+#include <libkern/libkern.h>
+#include <libkern/ptrauth_utils.h>
+
+#include <security/mac_framework.h>
+#include <stdbool.h>
+#include <stdatomic.h>
 
 /* XXX These should be in a BSD accessible Mach header, but aren't. */
 extern kern_return_t memory_object_pages_resident(memory_object_control_t,
-                                                       boolean_t *);
-extern kern_return_t   memory_object_signed(memory_object_control_t control,
-                                            boolean_t is_signed);
+    boolean_t *);
+extern kern_return_t    memory_object_signed(memory_object_control_t control,
+    boolean_t is_signed);
+extern boolean_t        memory_object_is_signed(memory_object_control_t);
+extern void             memory_object_mark_trusted(
+       memory_object_control_t         control);
+
+/* XXX Same for those. */
+
 extern void Debugger(const char *message);
 
 
 /* XXX no one uses this interface! */
 kern_return_t ubc_page_op_with_control(
-       memory_object_control_t  control,
-       off_t                    f_offset,
-       int                      ops,
-       ppnum_t                  *phys_entryp,
-       int                      *flagsp);
+       memory_object_control_t  control,
+       off_t                    f_offset,
+       int                      ops,
+       ppnum_t                  *phys_entryp,
+       int                      *flagsp);
 
 
 #if DIAGNOSTIC
 #if defined(assert)
-#undef assert()
+#undef assert
 #endif
 #define assert(cond)    \
     ((void) ((cond) ? 0 : panic("Assert failed: %s", # cond)))
@@ -98,13 +117,22 @@ static int ubc_umcallback(vnode_t, void *);
 static int ubc_msync_internal(vnode_t, off_t, off_t, off_t *, int, int *);
 static void ubc_cs_free(struct ubc_info *uip);
 
-struct zone    *ubc_info_zone;
+static boolean_t ubc_cs_supports_multilevel_hash(struct cs_blob *blob);
+static kern_return_t ubc_cs_convert_to_multilevel_hash(struct cs_blob *blob);
 
+ZONE_DECLARE(ubc_info_zone, "ubc_info zone", sizeof(struct ubc_info),
+    ZC_NOENCRYPT | ZC_ZFREE_CLEARMEM);
+static uint32_t cs_blob_generation_count = 1;
 
 /*
  * CODESIGNING
  * Routines to navigate code signing data structures in the kernel...
  */
+
+extern int cs_debug;
+
+#define PAGE_SHIFT_4K           (12)
+
 static boolean_t
 cs_valid_range(
        const void *start,
@@ -125,173 +153,654 @@ cs_valid_range(
        return TRUE;
 }
 
-/*
- * Magic numbers used by Code Signing
- */
-enum {
-       CSMAGIC_REQUIREMENT = 0xfade0c00,               /* single Requirement blob */
-       CSMAGIC_REQUIREMENTS = 0xfade0c01,              /* Requirements vector (internal requirements) */
-       CSMAGIC_CODEDIRECTORY = 0xfade0c02,             /* CodeDirectory blob */
-       CSMAGIC_EMBEDDED_SIGNATURE = 0xfade0cc0, /* embedded form of signature data */
-       CSMAGIC_EMBEDDED_SIGNATURE_OLD = 0xfade0b02,    /* XXX */
-       CSMAGIC_DETACHED_SIGNATURE = 0xfade0cc1, /* multi-arch collection of embedded signatures */
-       
-       CSSLOT_CODEDIRECTORY = 0,                               /* slot index for CodeDirectory */
+typedef void (*cs_md_init)(void *ctx);
+typedef void (*cs_md_update)(void *ctx, const void *data, size_t size);
+typedef void (*cs_md_final)(void *hash, void *ctx);
+
+struct cs_hash {
+       uint8_t             cs_type;    /* type code as per code signing */
+       size_t              cs_size;    /* size of effective hash (may be truncated) */
+       size_t              cs_digest_size;/* size of native hash */
+       cs_md_init          cs_init;
+       cs_md_update        cs_update;
+       cs_md_final         cs_final;
+};
+
+uint8_t
+cs_hash_type(
+       struct cs_hash const * const cs_hash)
+{
+       return cs_hash->cs_type;
+}
+
+static const struct cs_hash cs_hash_sha1 = {
+       .cs_type = CS_HASHTYPE_SHA1,
+       .cs_size = CS_SHA1_LEN,
+       .cs_digest_size = SHA_DIGEST_LENGTH,
+       .cs_init = (cs_md_init)SHA1Init,
+       .cs_update = (cs_md_update)SHA1Update,
+       .cs_final = (cs_md_final)SHA1Final,
+};
+#if CRYPTO_SHA2
+static const struct cs_hash cs_hash_sha256 = {
+       .cs_type = CS_HASHTYPE_SHA256,
+       .cs_size = SHA256_DIGEST_LENGTH,
+       .cs_digest_size = SHA256_DIGEST_LENGTH,
+       .cs_init = (cs_md_init)SHA256_Init,
+       .cs_update = (cs_md_update)SHA256_Update,
+       .cs_final = (cs_md_final)SHA256_Final,
+};
+static const struct cs_hash cs_hash_sha256_truncate = {
+       .cs_type = CS_HASHTYPE_SHA256_TRUNCATED,
+       .cs_size = CS_SHA256_TRUNCATED_LEN,
+       .cs_digest_size = SHA256_DIGEST_LENGTH,
+       .cs_init = (cs_md_init)SHA256_Init,
+       .cs_update = (cs_md_update)SHA256_Update,
+       .cs_final = (cs_md_final)SHA256_Final,
+};
+static const struct cs_hash cs_hash_sha384 = {
+       .cs_type = CS_HASHTYPE_SHA384,
+       .cs_size = SHA384_DIGEST_LENGTH,
+       .cs_digest_size = SHA384_DIGEST_LENGTH,
+       .cs_init = (cs_md_init)SHA384_Init,
+       .cs_update = (cs_md_update)SHA384_Update,
+       .cs_final = (cs_md_final)SHA384_Final,
+};
+#endif
+
+static struct cs_hash const *
+cs_find_md(uint8_t type)
+{
+       if (type == CS_HASHTYPE_SHA1) {
+               return &cs_hash_sha1;
+#if CRYPTO_SHA2
+       } else if (type == CS_HASHTYPE_SHA256) {
+               return &cs_hash_sha256;
+       } else if (type == CS_HASHTYPE_SHA256_TRUNCATED) {
+               return &cs_hash_sha256_truncate;
+       } else if (type == CS_HASHTYPE_SHA384) {
+               return &cs_hash_sha384;
+#endif
+       }
+       return NULL;
+}
+
+union cs_hash_union {
+       SHA1_CTX                sha1ctxt;
+       SHA256_CTX              sha256ctx;
+       SHA384_CTX              sha384ctx;
 };
 
 
 /*
- * Structure of an embedded-signature SuperBlob
+ * Choose among different hash algorithms.
+ * Higher is better, 0 => don't use at all.
  */
-typedef struct __BlobIndex {
-       uint32_t type;                                  /* type of entry */
-       uint32_t offset;                                /* offset of entry */
-} CS_BlobIndex;
+static const uint32_t hashPriorities[] = {
+       CS_HASHTYPE_SHA1,
+       CS_HASHTYPE_SHA256_TRUNCATED,
+       CS_HASHTYPE_SHA256,
+       CS_HASHTYPE_SHA384,
+};
+
+static unsigned int
+hash_rank(const CS_CodeDirectory *cd)
+{
+       uint32_t type = cd->hashType;
+       unsigned int n;
 
-typedef struct __SuperBlob {
-       uint32_t magic;                                 /* magic number */
-       uint32_t length;                                /* total length of SuperBlob */
-       uint32_t count;                                 /* number of index entries following */
-       CS_BlobIndex index[];                   /* (count) entries */
-       /* followed by Blobs in no particular order as indicated by offsets in index */
-} CS_SuperBlob;
+       for (n = 0; n < sizeof(hashPriorities) / sizeof(hashPriorities[0]); ++n) {
+               if (hashPriorities[n] == type) {
+                       return n + 1;
+               }
+       }
+       return 0;       /* not supported */
+}
 
 
 /*
- * C form of a CodeDirectory.
+ * Locating a page hash
  */
-typedef struct __CodeDirectory {
-       uint32_t magic;                                 /* magic number (CSMAGIC_CODEDIRECTORY) */
-       uint32_t length;                                /* total length of CodeDirectory blob */
-       uint32_t version;                               /* compatibility version */
-       uint32_t flags;                                 /* setup and mode flags */
-       uint32_t hashOffset;                    /* offset of hash slot element at index zero */
-       uint32_t identOffset;                   /* offset of identifier string */
-       uint32_t nSpecialSlots;                 /* number of special hash slots */
-       uint32_t nCodeSlots;                    /* number of ordinary (code) hash slots */
-       uint32_t codeLimit;                             /* limit to main image signature range */
-       uint8_t hashSize;                               /* size of each hash in bytes */
-       uint8_t hashType;                               /* type of hash (cdHashType* constants) */
-       uint8_t spare1;                                 /* unused (must be zero) */
-       uint8_t pageSize;                               /* log2(page size in bytes); 0 => infinite */
-       uint32_t spare2;                                /* unused (must be zero) */
-       /* followed by dynamic content as located by offset fields above */
-} CS_CodeDirectory;
+static const unsigned char *
+hashes(
+       const CS_CodeDirectory *cd,
+       uint32_t page,
+       size_t hash_len,
+       const char *lower_bound,
+       const char *upper_bound)
+{
+       const unsigned char *base, *top, *hash;
+       uint32_t nCodeSlots = ntohl(cd->nCodeSlots);
+
+       assert(cs_valid_range(cd, cd + 1, lower_bound, upper_bound));
+
+       if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
+               /* Get first scatter struct */
+               const SC_Scatter *scatter = (const SC_Scatter*)
+                   ((const char*)cd + ntohl(cd->scatterOffset));
+               uint32_t hashindex = 0, scount, sbase = 0;
+               /* iterate all scatter structs */
+               do {
+                       if ((const char*)scatter > (const char*)cd + ntohl(cd->length)) {
+                               if (cs_debug) {
+                                       printf("CODE SIGNING: Scatter extends past Code Directory\n");
+                               }
+                               return NULL;
+                       }
+
+                       scount = ntohl(scatter->count);
+                       uint32_t new_base = ntohl(scatter->base);
+
+                       /* last scatter? */
+                       if (scount == 0) {
+                               return NULL;
+                       }
+
+                       if ((hashindex > 0) && (new_base <= sbase)) {
+                               if (cs_debug) {
+                                       printf("CODE SIGNING: unordered Scatter, prev base %d, cur base %d\n",
+                                           sbase, new_base);
+                               }
+                               return NULL;    /* unordered scatter array */
+                       }
+                       sbase = new_base;
+
+                       /* this scatter beyond page we're looking for? */
+                       if (sbase > page) {
+                               return NULL;
+                       }
+
+                       if (sbase + scount >= page) {
+                               /* Found the scatter struct that is
+                                * referencing our page */
+
+                               /* base = address of first hash covered by scatter */
+                               base = (const unsigned char *)cd + ntohl(cd->hashOffset) +
+                                   hashindex * hash_len;
+                               /* top = address of first hash after this scatter */
+                               top = base + scount * hash_len;
+                               if (!cs_valid_range(base, top, lower_bound,
+                                   upper_bound) ||
+                                   hashindex > nCodeSlots) {
+                                       return NULL;
+                               }
+
+                               break;
+                       }
+
+                       /* this scatter struct is before the page we're looking
+                        * for. Iterate. */
+                       hashindex += scount;
+                       scatter++;
+               } while (1);
+
+               hash = base + (page - sbase) * hash_len;
+       } else {
+               base = (const unsigned char *)cd + ntohl(cd->hashOffset);
+               top = base + nCodeSlots * hash_len;
+               if (!cs_valid_range(base, top, lower_bound, upper_bound) ||
+                   page > nCodeSlots) {
+                       return NULL;
+               }
+               assert(page < nCodeSlots);
 
+               hash = base + page * hash_len;
+       }
+
+       if (!cs_valid_range(hash, hash + hash_len,
+           lower_bound, upper_bound)) {
+               hash = NULL;
+       }
+
+       return hash;
+}
 
 /*
- * Locate the CodeDirectory from an embedded signature blob
+ * cs_validate_codedirectory
+ *
+ * Validate that pointers inside the code directory to make sure that
+ * all offsets and lengths are constrained within the buffer.
+ *
+ * Parameters: cd                      Pointer to code directory buffer
+ *             length                  Length of buffer
+ *
+ * Returns:    0                       Success
+ *             EBADEXEC                Invalid code signature
  */
-static const 
-CS_CodeDirectory *findCodeDirectory(
-       const CS_SuperBlob *embedded,
-       char *lower_bound,
-       char *upper_bound)
+
+static int
+cs_validate_codedirectory(const CS_CodeDirectory *cd, size_t length)
 {
-       const CS_CodeDirectory *cd = NULL;
+       struct cs_hash const *hashtype;
+
+       if (length < sizeof(*cd)) {
+               return EBADEXEC;
+       }
+       if (ntohl(cd->magic) != CSMAGIC_CODEDIRECTORY) {
+               return EBADEXEC;
+       }
+       if (cd->pageSize < PAGE_SHIFT_4K || cd->pageSize > PAGE_SHIFT) {
+               return EBADEXEC;
+       }
+       hashtype = cs_find_md(cd->hashType);
+       if (hashtype == NULL) {
+               return EBADEXEC;
+       }
+
+       if (cd->hashSize != hashtype->cs_size) {
+               return EBADEXEC;
+       }
 
-       if (embedded &&
-           cs_valid_range(embedded, embedded + 1, lower_bound, upper_bound) &&
-           ntohl(embedded->magic) == CSMAGIC_EMBEDDED_SIGNATURE) {
-               const CS_BlobIndex *limit;
-               const CS_BlobIndex *p;
+       if (length < ntohl(cd->hashOffset)) {
+               return EBADEXEC;
+       }
 
-               limit = &embedded->index[ntohl(embedded->count)];
-               if (!cs_valid_range(&embedded->index[0], limit,
-                                   lower_bound, upper_bound)) {
-                       return NULL;
+       /* check that nSpecialSlots fits in the buffer in front of hashOffset */
+       if (ntohl(cd->hashOffset) / hashtype->cs_size < ntohl(cd->nSpecialSlots)) {
+               return EBADEXEC;
+       }
+
+       /* check that codeslots fits in the buffer */
+       if ((length - ntohl(cd->hashOffset)) / hashtype->cs_size < ntohl(cd->nCodeSlots)) {
+               return EBADEXEC;
+       }
+
+       if (ntohl(cd->version) >= CS_SUPPORTSSCATTER && cd->scatterOffset) {
+               if (length < ntohl(cd->scatterOffset)) {
+                       return EBADEXEC;
                }
-               for (p = embedded->index; p < limit; ++p) {
-                       if (ntohl(p->type) == CSSLOT_CODEDIRECTORY) {
-                               const unsigned char *base;
 
-                               base = (const unsigned char *)embedded;
-                               cd = (const CS_CodeDirectory *)(base + ntohl(p->offset));
+               const SC_Scatter *scatter = (const SC_Scatter *)
+                   (((const uint8_t *)cd) + ntohl(cd->scatterOffset));
+               uint32_t nPages = 0;
+
+               /*
+                * Check each scatter buffer, since we don't know the
+                * length of the scatter buffer array, we have to
+                * check each entry.
+                */
+               while (1) {
+                       /* check that the end of each scatter buffer in within the length */
+                       if (((const uint8_t *)scatter) + sizeof(scatter[0]) > (const uint8_t *)cd + length) {
+                               return EBADEXEC;
+                       }
+                       uint32_t scount = ntohl(scatter->count);
+                       if (scount == 0) {
                                break;
                        }
+                       if (nPages + scount < nPages) {
+                               return EBADEXEC;
+                       }
+                       nPages += scount;
+                       scatter++;
+
+                       /* XXX check that basees doesn't overlap */
+                       /* XXX check that targetOffset doesn't overlap */
                }
-       } else {
-               /*
-                * Detached signatures come as a bare CS_CodeDirectory,
-                * without a blob.
-                */
-               cd = (const CS_CodeDirectory *) embedded;
+#if 0 /* rdar://12579439 */
+               if (nPages != ntohl(cd->nCodeSlots)) {
+                       return EBADEXEC;
+               }
+#endif
        }
 
-       if (cd &&
-           cs_valid_range(cd, cd + 1, lower_bound, upper_bound) &&
-           cs_valid_range(cd, (const char *) cd + ntohl(cd->length),
-                          lower_bound, upper_bound) &&
-           ntohl(cd->magic) == CSMAGIC_CODEDIRECTORY) {
-               return cd;
+       if (length < ntohl(cd->identOffset)) {
+               return EBADEXEC;
        }
 
-       // not found or not a valid code directory
-       return NULL;
+       /* identifier is NUL terminated string */
+       if (cd->identOffset) {
+               const uint8_t *ptr = (const uint8_t *)cd + ntohl(cd->identOffset);
+               if (memchr(ptr, 0, length - ntohl(cd->identOffset)) == NULL) {
+                       return EBADEXEC;
+               }
+       }
+
+       /* team identifier is NULL terminated string */
+       if (ntohl(cd->version) >= CS_SUPPORTSTEAMID && ntohl(cd->teamOffset)) {
+               if (length < ntohl(cd->teamOffset)) {
+                       return EBADEXEC;
+               }
+
+               const uint8_t *ptr = (const uint8_t *)cd + ntohl(cd->teamOffset);
+               if (memchr(ptr, 0, length - ntohl(cd->teamOffset)) == NULL) {
+                       return EBADEXEC;
+               }
+       }
+
+       /* linkage is variable length binary data */
+       if (ntohl(cd->version) >= CS_SUPPORTSLINKAGE && cd->linkageHashType != 0) {
+               const uintptr_t ptr = (uintptr_t)cd + ntohl(cd->linkageOffset);
+               const uintptr_t ptr_end = ptr + ntohl(cd->linkageSize);
+
+               if (ptr_end < ptr || ptr < (uintptr_t)cd || ptr_end > (uintptr_t)cd + length) {
+                       return EBADEXEC;
+               }
+       }
+
+
+       return 0;
 }
 
+/*
+ *
+ */
+
+static int
+cs_validate_blob(const CS_GenericBlob *blob, size_t length)
+{
+       if (length < sizeof(CS_GenericBlob) || length < ntohl(blob->length)) {
+               return EBADEXEC;
+       }
+       return 0;
+}
 
 /*
- * Locating a page hash
+ * cs_validate_csblob
+ *
+ * Validate that superblob/embedded code directory to make sure that
+ * all internal pointers are valid.
+ *
+ * Will validate both a superblob csblob and a "raw" code directory.
+ *
+ *
+ * Parameters: buffer                  Pointer to code signature
+ *             length                  Length of buffer
+ *             rcd                     returns pointer to code directory
+ *
+ * Returns:    0                       Success
+ *             EBADEXEC                Invalid code signature
  */
-static const unsigned char *
-hashes(
-       const CS_CodeDirectory *cd,
-       unsigned page,
-       char *lower_bound,
-       char *upper_bound)
+
+static int
+cs_validate_csblob(
+       const uint8_t *addr,
+       const size_t blob_size,
+       const CS_CodeDirectory **rcd,
+       const CS_GenericBlob **rentitlements)
 {
-       const unsigned char *base, *top, *hash;
-       uint32_t nCodeSlots;
+       const CS_GenericBlob *blob;
+       int error;
+       size_t length;
 
-       assert(cs_valid_range(cd, cd + 1, lower_bound, upper_bound));
+       *rcd = NULL;
+       *rentitlements = NULL;
 
-       base = (const unsigned char *)cd + ntohl(cd->hashOffset);
-       nCodeSlots = ntohl(cd->nCodeSlots);
-       top = base + nCodeSlots * SHA1_RESULTLEN;
-       if (!cs_valid_range(base, top, 
-                           lower_bound, upper_bound) ||
-           page > nCodeSlots) {
-               return NULL;
+       blob = (const CS_GenericBlob *)(const void *)addr;
+
+       length = blob_size;
+       error = cs_validate_blob(blob, length);
+       if (error) {
+               return error;
        }
-       assert(page < nCodeSlots);
+       length = ntohl(blob->length);
+
+       if (ntohl(blob->magic) == CSMAGIC_EMBEDDED_SIGNATURE) {
+               const CS_SuperBlob *sb;
+               uint32_t n, count;
+               const CS_CodeDirectory *best_cd = NULL;
+               unsigned int best_rank = 0;
+#if PLATFORM_WatchOS
+               const CS_CodeDirectory *sha1_cd = NULL;
+#endif
 
-       hash = base + page * SHA1_RESULTLEN;
-       if (!cs_valid_range(hash, hash + SHA1_RESULTLEN,
-                           lower_bound, upper_bound)) {
-               hash = NULL;
+               if (length < sizeof(CS_SuperBlob)) {
+                       return EBADEXEC;
+               }
+
+               sb = (const CS_SuperBlob *)blob;
+               count = ntohl(sb->count);
+
+               /* check that the array of BlobIndex fits in the rest of the data */
+               if ((length - sizeof(CS_SuperBlob)) / sizeof(CS_BlobIndex) < count) {
+                       return EBADEXEC;
+               }
+
+               /* now check each BlobIndex */
+               for (n = 0; n < count; n++) {
+                       const CS_BlobIndex *blobIndex = &sb->index[n];
+                       uint32_t type = ntohl(blobIndex->type);
+                       uint32_t offset = ntohl(blobIndex->offset);
+                       if (length < offset) {
+                               return EBADEXEC;
+                       }
+
+                       const CS_GenericBlob *subBlob =
+                           (const CS_GenericBlob *)(const void *)(addr + offset);
+
+                       size_t subLength = length - offset;
+
+                       if ((error = cs_validate_blob(subBlob, subLength)) != 0) {
+                               return error;
+                       }
+                       subLength = ntohl(subBlob->length);
+
+                       /* extra validation for CDs, that is also returned */
+                       if (type == CSSLOT_CODEDIRECTORY || (type >= CSSLOT_ALTERNATE_CODEDIRECTORIES && type < CSSLOT_ALTERNATE_CODEDIRECTORY_LIMIT)) {
+                               const CS_CodeDirectory *candidate = (const CS_CodeDirectory *)subBlob;
+                               if ((error = cs_validate_codedirectory(candidate, subLength)) != 0) {
+                                       return error;
+                               }
+                               unsigned int rank = hash_rank(candidate);
+                               if (cs_debug > 3) {
+                                       printf("CodeDirectory type %d rank %d at slot 0x%x index %d\n", candidate->hashType, (int)rank, (int)type, (int)n);
+                               }
+                               if (best_cd == NULL || rank > best_rank) {
+                                       best_cd = candidate;
+                                       best_rank = rank;
+
+                                       if (cs_debug > 2) {
+                                               printf("using CodeDirectory type %d (rank %d)\n", (int)best_cd->hashType, best_rank);
+                                       }
+                                       *rcd = best_cd;
+                               } else if (best_cd != NULL && rank == best_rank) {
+                                       /* repeat of a hash type (1:1 mapped to ranks), illegal and suspicious */
+                                       printf("multiple hash=%d CodeDirectories in signature; rejecting\n", best_cd->hashType);
+                                       return EBADEXEC;
+                               }
+#if PLATFORM_WatchOS
+                               if (candidate->hashType == CS_HASHTYPE_SHA1) {
+                                       if (sha1_cd != NULL) {
+                                               printf("multiple sha1 CodeDirectories in signature; rejecting\n");
+                                               return EBADEXEC;
+                                       }
+                                       sha1_cd = candidate;
+                               }
+#endif
+                       } else if (type == CSSLOT_ENTITLEMENTS) {
+                               if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_ENTITLEMENTS) {
+                                       return EBADEXEC;
+                               }
+                               if (*rentitlements != NULL) {
+                                       printf("multiple entitlements blobs\n");
+                                       return EBADEXEC;
+                               }
+                               *rentitlements = subBlob;
+                       }
+               }
+
+#if PLATFORM_WatchOS
+               /* To keep watchOS fast enough, we have to resort to sha1 for
+                * some code.
+                *
+                * At the time of writing this comment, known sha1 attacks are
+                * collision attacks (not preimage or second preimage
+                * attacks), which do not apply to platform binaries since
+                * they have a fixed hash in the trust cache.  Given this
+                * property, we only prefer sha1 code directories for adhoc
+                * signatures, which always have to be in a trust cache to be
+                * valid (can-load-cdhash does not exist for watchOS). Those
+                * are, incidentally, also the platform binaries, for which we
+                * care about the performance hit that sha256 would bring us.
+                *
+                * Platform binaries may still contain a (not chosen) sha256
+                * code directory, which keeps software updates that switch to
+                * sha256-only small.
+                */
+
+               if (*rcd != NULL && sha1_cd != NULL && (ntohl(sha1_cd->flags) & CS_ADHOC)) {
+                       if (sha1_cd->flags != (*rcd)->flags) {
+                               printf("mismatched flags between hash %d (flags: %#x) and sha1 (flags: %#x) cd.\n",
+                                   (int)(*rcd)->hashType, (*rcd)->flags, sha1_cd->flags);
+                               *rcd = NULL;
+                               return EBADEXEC;
+                       }
+
+                       *rcd = sha1_cd;
+               }
+#endif
+       } else if (ntohl(blob->magic) == CSMAGIC_CODEDIRECTORY) {
+               if ((error = cs_validate_codedirectory((const CS_CodeDirectory *)(const void *)addr, length)) != 0) {
+                       return error;
+               }
+               *rcd = (const CS_CodeDirectory *)blob;
+       } else {
+               return EBADEXEC;
        }
 
-       return hash;
-}
-/*
- * CODESIGNING
- * End of routines to navigate code signing data structures in the kernel.
- */
+       if (*rcd == NULL) {
+               return EBADEXEC;
+       }
 
+       return 0;
+}
 
 /*
- * ubc_init
- * 
- * Initialization of the zone for Unified Buffer Cache.
+ * cs_find_blob_bytes
  *
- * Parameters: (void)
+ * Find an blob from the superblob/code directory. The blob must have
+ * been been validated by cs_validate_csblob() before calling
+ * this. Use csblob_find_blob() instead.
  *
- * Returns:    (void)
+ * Will also find a "raw" code directory if its stored as well as
+ * searching the superblob.
  *
- * Implicit returns:
- *             ubc_info_zone(global)   initialized for subsequent allocations
+ * Parameters: buffer                  Pointer to code signature
+ *             length                  Length of buffer
+ *             type                    type of blob to find
+ *             magic                   the magic number for that blob
+ *
+ * Returns:    pointer                 Success
+ *             NULL                    Buffer not found
  */
-__private_extern__ void
-ubc_init(void)
+
+const CS_GenericBlob *
+csblob_find_blob_bytes(const uint8_t *addr, size_t length, uint32_t type, uint32_t magic)
+{
+       const CS_GenericBlob *blob = (const CS_GenericBlob *)(const void *)addr;
+
+       if (ntohl(blob->magic) == CSMAGIC_EMBEDDED_SIGNATURE) {
+               const CS_SuperBlob *sb = (const CS_SuperBlob *)blob;
+               size_t n, count = ntohl(sb->count);
+
+               for (n = 0; n < count; n++) {
+                       if (ntohl(sb->index[n].type) != type) {
+                               continue;
+                       }
+                       uint32_t offset = ntohl(sb->index[n].offset);
+                       if (length - sizeof(const CS_GenericBlob) < offset) {
+                               return NULL;
+                       }
+                       blob = (const CS_GenericBlob *)(const void *)(addr + offset);
+                       if (ntohl(blob->magic) != magic) {
+                               continue;
+                       }
+                       return blob;
+               }
+       } else if (type == CSSLOT_CODEDIRECTORY
+           && ntohl(blob->magic) == CSMAGIC_CODEDIRECTORY
+           && magic == CSMAGIC_CODEDIRECTORY) {
+               return blob;
+       }
+       return NULL;
+}
+
+
+const CS_GenericBlob *
+csblob_find_blob(struct cs_blob *csblob, uint32_t type, uint32_t magic)
+{
+       if ((csblob->csb_flags & CS_VALID) == 0) {
+               return NULL;
+       }
+       return csblob_find_blob_bytes((const uint8_t *)csblob->csb_mem_kaddr, csblob->csb_mem_size, type, magic);
+}
+
+static const uint8_t *
+find_special_slot(const CS_CodeDirectory *cd, size_t slotsize, uint32_t slot)
+{
+       /* there is no zero special slot since that is the first code slot */
+       if (ntohl(cd->nSpecialSlots) < slot || slot == 0) {
+               return NULL;
+       }
+
+       return (const uint8_t *)cd + ntohl(cd->hashOffset) - (slotsize * slot);
+}
+
+static uint8_t cshash_zero[CS_HASH_MAX_SIZE] = { 0 };
+
+int
+csblob_get_entitlements(struct cs_blob *csblob, void **out_start, size_t *out_length)
 {
-       int     i;
+       uint8_t computed_hash[CS_HASH_MAX_SIZE];
+       const CS_GenericBlob *entitlements;
+       const CS_CodeDirectory *code_dir;
+       const uint8_t *embedded_hash;
+       union cs_hash_union context;
+
+       *out_start = NULL;
+       *out_length = 0;
+
+       if (csblob->csb_hashtype == NULL || csblob->csb_hashtype->cs_digest_size > sizeof(computed_hash)) {
+               return EBADEXEC;
+       }
+
+       code_dir = csblob->csb_cd;
+
+       if ((csblob->csb_flags & CS_VALID) == 0) {
+               entitlements = NULL;
+       } else {
+               entitlements = csblob->csb_entitlements_blob;
+       }
+       embedded_hash = find_special_slot(code_dir, csblob->csb_hashtype->cs_size, CSSLOT_ENTITLEMENTS);
+
+       if (embedded_hash == NULL) {
+               if (entitlements) {
+                       return EBADEXEC;
+               }
+               return 0;
+       } else if (entitlements == NULL) {
+               if (memcmp(embedded_hash, cshash_zero, csblob->csb_hashtype->cs_size) != 0) {
+                       return EBADEXEC;
+               } else {
+                       return 0;
+               }
+       }
+
+       csblob->csb_hashtype->cs_init(&context);
+       ptrauth_utils_auth_blob_generic(entitlements,
+           ntohl(entitlements->length),
+           OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_entitlements_blob_signature"),
+           PTRAUTH_ADDR_DIVERSIFY,
+           csblob->csb_entitlements_blob_signature);
+       csblob->csb_hashtype->cs_update(&context, entitlements, ntohl(entitlements->length));
+       csblob->csb_hashtype->cs_final(computed_hash, &context);
+
+       if (memcmp(computed_hash, embedded_hash, csblob->csb_hashtype->cs_size) != 0) {
+               return EBADEXEC;
+       }
 
-       i = (vm_size_t) sizeof (struct ubc_info);
+       *out_start = __DECONST(void *, entitlements);
+       *out_length = ntohl(entitlements->length);
 
-       ubc_info_zone = zinit (i, 10000*i, 8192, "ubc_info zone");
+       return 0;
 }
 
+/*
+ * CODESIGNING
+ * End of routines to navigate code signing data structures in the kernel.
+ */
+
+
 
 /*
  * ubc_info_init
@@ -308,7 +817,7 @@ ubc_init(void)
 int
 ubc_info_init(struct vnode *vp)
 {
-       return(ubc_info_init_internal(vp, 0, 0));
+       return ubc_info_init_internal(vp, 0, 0);
 }
 
 
@@ -327,7 +836,7 @@ ubc_info_init(struct vnode *vp)
 int
 ubc_info_init_withsize(struct vnode *vp, off_t filesize)
 {
-       return(ubc_info_init_internal(vp, 1, filesize));
+       return ubc_info_init_internal(vp, 1, filesize);
 }
 
 
@@ -362,7 +871,7 @@ ubc_info_init_withsize(struct vnode *vp, off_t filesize)
 static int
 ubc_info_init_internal(vnode_t vp, int withfsize, off_t filesize)
 {
-       register struct ubc_info        *uip;
+       struct ubc_info *uip;
        void *  pager;
        int error = 0;
        kern_return_t kret;
@@ -375,7 +884,6 @@ ubc_info_init_internal(vnode_t vp, int withfsize, off_t filesize)
         * attach one; otherwise, we will reuse the one that's there.
         */
        if (uip == UBC_INFO_NULL) {
-
                uip = (struct ubc_info *) zalloc(ubc_info_zone);
                bzero((char *)uip, sizeof(struct ubc_info));
 
@@ -423,26 +931,28 @@ ubc_info_init_internal(vnode_t vp, int withfsize, off_t filesize)
         * vnode_pager_setup() returned here.
         */
        kret = memory_object_create_named(pager,
-               (memory_object_size_t)uip->ui_size, &control);
-       vnode_pager_deallocate(pager); 
-       if (kret != KERN_SUCCESS)
+           (memory_object_size_t)uip->ui_size, &control);
+       vnode_pager_deallocate(pager);
+       if (kret != KERN_SUCCESS) {
                panic("ubc_info_init: memory_object_create_named returned %d", kret);
+       }
 
        assert(control);
-       uip->ui_control = control;      /* cache the value of the mo control */
-       SET(uip->ui_flags, UI_HASOBJREF);       /* with a named reference */
+       uip->ui_control = control;      /* cache the value of the mo control */
+       SET(uip->ui_flags, UI_HASOBJREF);       /* with a named reference */
 
        if (withfsize == 0) {
                /* initialize the size */
                error = vnode_size(vp, &uip->ui_size, vfs_context_current());
-               if (error)
+               if (error) {
                        uip->ui_size = 0;
+               }
        } else {
                uip->ui_size = filesize;
        }
-       vp->v_lflag |= VNAMED_UBC;      /* vnode has a named ubc reference */
+       vp->v_lflag |= VNAMED_UBC;      /* vnode has a named ubc reference */
 
-       return (error);
+       return error;
 }
 
 
@@ -469,9 +979,10 @@ ubc_info_free(struct ubc_info *uip)
                kauth_cred_unref(&uip->ui_ucred);
        }
 
-       if (uip->ui_control != MEMORY_OBJECT_CONTROL_NULL)
+       if (uip->ui_control != MEMORY_OBJECT_CONTROL_NULL) {
                memory_object_control_deallocate(uip->ui_control);
-       
+       }
+
        cluster_release(uip);
        ubc_cs_free(uip);
 
@@ -483,64 +994,159 @@ ubc_info_free(struct ubc_info *uip)
 void
 ubc_info_deallocate(struct ubc_info *uip)
 {
-        ubc_info_free(uip);
+       ubc_info_free(uip);
 }
 
+errno_t
+mach_to_bsd_errno(kern_return_t mach_err)
+{
+       switch (mach_err) {
+       case KERN_SUCCESS:
+               return 0;
+
+       case KERN_INVALID_ADDRESS:
+       case KERN_INVALID_ARGUMENT:
+       case KERN_NOT_IN_SET:
+       case KERN_INVALID_NAME:
+       case KERN_INVALID_TASK:
+       case KERN_INVALID_RIGHT:
+       case KERN_INVALID_VALUE:
+       case KERN_INVALID_CAPABILITY:
+       case KERN_INVALID_HOST:
+       case KERN_MEMORY_PRESENT:
+       case KERN_INVALID_PROCESSOR_SET:
+       case KERN_INVALID_POLICY:
+       case KERN_ALREADY_WAITING:
+       case KERN_DEFAULT_SET:
+       case KERN_EXCEPTION_PROTECTED:
+       case KERN_INVALID_LEDGER:
+       case KERN_INVALID_MEMORY_CONTROL:
+       case KERN_INVALID_SECURITY:
+       case KERN_NOT_DEPRESSED:
+       case KERN_LOCK_OWNED:
+       case KERN_LOCK_OWNED_SELF:
+               return EINVAL;
+
+       case KERN_PROTECTION_FAILURE:
+       case KERN_NOT_RECEIVER:
+       case KERN_NO_ACCESS:
+       case KERN_POLICY_STATIC:
+               return EACCES;
+
+       case KERN_NO_SPACE:
+       case KERN_RESOURCE_SHORTAGE:
+       case KERN_UREFS_OVERFLOW:
+       case KERN_INVALID_OBJECT:
+               return ENOMEM;
+
+       case KERN_FAILURE:
+               return EIO;
+
+       case KERN_MEMORY_FAILURE:
+       case KERN_POLICY_LIMIT:
+       case KERN_CODESIGN_ERROR:
+               return EPERM;
+
+       case KERN_MEMORY_ERROR:
+               return EBUSY;
+
+       case KERN_ALREADY_IN_SET:
+       case KERN_NAME_EXISTS:
+       case KERN_RIGHT_EXISTS:
+               return EEXIST;
+
+       case KERN_ABORTED:
+               return EINTR;
+
+       case KERN_TERMINATED:
+       case KERN_LOCK_SET_DESTROYED:
+       case KERN_LOCK_UNSTABLE:
+       case KERN_SEMAPHORE_DESTROYED:
+               return ENOENT;
+
+       case KERN_RPC_SERVER_TERMINATED:
+               return ECONNRESET;
+
+       case KERN_NOT_SUPPORTED:
+               return ENOTSUP;
+
+       case KERN_NODE_DOWN:
+               return ENETDOWN;
+
+       case KERN_NOT_WAITING:
+               return ENOENT;
+
+       case KERN_OPERATION_TIMED_OUT:
+               return ETIMEDOUT;
+
+       default:
+               return EIO;
+       }
+}
 
 /*
- * ubc_setsize
+ * ubc_setsize_ex
  *
- * Tell the  VM that the the size of the file represented by the vnode has
+ * Tell the VM that the the size of the file represented by the vnode has
  * changed
  *
- * Parameters: vp                      The vp whose backing file size is
- *                                     being changed
- *             nsize                   The new size of the backing file
- *
- * Returns:    1                       Success
- *             0                       Failure
- *
- * Notes:      This function will indicate failure if the new size that's
- *             being attempted to be set is negative.
- *
- *             This function will fail if there is no ubc_info currently
- *             associated with the vnode.
- *
- *             This function will indicate success it the new size is the
- *             same or larger than the old size (in this case, the remainder
- *             of the file will require modification or use of an existing upl
- *             to access successfully).
- *
- *             This function will fail if the new file size is smaller, and
- *             the memory region being invalidated was unable to actually be
- *             invalidated and/or the last page could not be flushed, if the
- *             new size is not aligned to a page boundary.  This is usually
- *             indicative of an I/O error.
+ * Parameters: vp         The vp whose backing file size is
+ *                                        being changed
+ *                             nsize  The new size of the backing file
+ *                             opts   Options
+ *
+ * Returns:    EINVAL for new size < 0
+ *                     ENOENT if no UBC info exists
+ *          EAGAIN if UBC_SETSIZE_NO_FS_REENTRY option is set and new_size < old size
+ *          Other errors (mapped to errno_t) returned by VM functions
+ *
+ * Notes:   This function will indicate success if the new size is the
+ *                 same or larger than the old size (in this case, the
+ *                 remainder of the file will require modification or use of
+ *                 an existing upl to access successfully).
+ *
+ *                 This function will fail if the new file size is smaller,
+ *                 and the memory region being invalidated was unable to
+ *                 actually be invalidated and/or the last page could not be
+ *                 flushed, if the new size is not aligned to a page
+ *                 boundary.  This is usually indicative of an I/O error.
  */
-int
-ubc_setsize(struct vnode *vp, off_t nsize)
+errno_t
+ubc_setsize_ex(struct vnode *vp, off_t nsize, ubc_setsize_opts_t opts)
 {
-       off_t osize;    /* ui_size before change */
+       off_t osize;    /* ui_size before change */
        off_t lastpg, olastpgend, lastoff;
        struct ubc_info *uip;
        memory_object_control_t control;
        kern_return_t kret = KERN_SUCCESS;
 
-       if (nsize < (off_t)0)
-               return (0);
+       if (nsize < (off_t)0) {
+               return EINVAL;
+       }
 
-       if (!UBCINFOEXISTS(vp))
-               return (0);
+       if (!UBCINFOEXISTS(vp)) {
+               return ENOENT;
+       }
 
        uip = vp->v_ubcinfo;
        osize = uip->ui_size;
+
+       if (ISSET(opts, UBC_SETSIZE_NO_FS_REENTRY) && nsize < osize) {
+               return EAGAIN;
+       }
+
        /*
         * Update the size before flushing the VM
         */
        uip->ui_size = nsize;
 
-       if (nsize >= osize)     /* Nothing more to do */
-               return (1);             /* return success */
+       if (nsize >= osize) {   /* Nothing more to do */
+               if (nsize > osize) {
+                       lock_vnode_and_post(vp, NOTE_EXTEND);
+               }
+
+               return 0;
+       }
 
        /*
         * When the file shrinks, invalidate the pages beyond the
@@ -555,43 +1161,57 @@ ubc_setsize(struct vnode *vp, off_t nsize)
        lastoff = (nsize & PAGE_MASK_64);
 
        if (lastoff) {
-               upl_t           upl;
-               upl_page_info_t *pl;
+               upl_t           upl;
+               upl_page_info_t *pl;
 
-
-               /*
+               /*
                 * new EOF ends up in the middle of a page
-                * zero the tail of this page if its currently
+                * zero the tail of this page if it's currently
                 * present in the cache
                 */
-               kret = ubc_create_upl(vp, lastpg, PAGE_SIZE, &upl, &pl, UPL_SET_LITE);
-               
-               if (kret != KERN_SUCCESS)
-                       panic("ubc_setsize: ubc_create_upl (error = %d)\n", kret);
+               kret = ubc_create_upl_kernel(vp, lastpg, PAGE_SIZE, &upl, &pl, UPL_SET_LITE | UPL_WILL_MODIFY, VM_KERN_MEMORY_FILE);
+
+               if (kret != KERN_SUCCESS) {
+                       panic("ubc_setsize: ubc_create_upl (error = %d)\n", kret);
+               }
 
-               if (upl_valid_page(pl, 0))
-                       cluster_zero(upl, (uint32_t)lastoff, PAGE_SIZE - (uint32_t)lastoff, NULL);
+               if (upl_valid_page(pl, 0)) {
+                       cluster_zero(upl, (uint32_t)lastoff, PAGE_SIZE - (uint32_t)lastoff, NULL);
+               }
 
                ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
 
                lastpg += PAGE_SIZE_64;
        }
        if (olastpgend > lastpg) {
-               /*
+               int     flags;
+
+               if (lastpg == 0) {
+                       flags = MEMORY_OBJECT_DATA_FLUSH_ALL;
+               } else {
+                       flags = MEMORY_OBJECT_DATA_FLUSH;
+               }
+               /*
                 * invalidate the pages beyond the new EOF page
                 *
                 */
-               kret = memory_object_lock_request(control,
-                                                 (memory_object_offset_t)lastpg,
-                                                 (memory_object_size_t)(olastpgend - lastpg), NULL, NULL,
-                                                 MEMORY_OBJECT_RETURN_NONE, MEMORY_OBJECT_DATA_FLUSH,
-                                                 VM_PROT_NO_CHANGE);
-               if (kret != KERN_SUCCESS)
-                       printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
+               kret = memory_object_lock_request(control,
+                   (memory_object_offset_t)lastpg,
+                   (memory_object_size_t)(olastpgend - lastpg), NULL, NULL,
+                   MEMORY_OBJECT_RETURN_NONE, flags, VM_PROT_NO_CHANGE);
+               if (kret != KERN_SUCCESS) {
+                       printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
+               }
        }
-       return ((kret == KERN_SUCCESS) ? 1 : 0);
+       return mach_to_bsd_errno(kret);
 }
 
+// Returns true for success
+int
+ubc_setsize(vnode_t vp, off_t nsize)
+{
+       return ubc_setsize_ex(vp, nsize, 0) == 0;
+}
 
 /*
  * ubc_getsize
@@ -615,18 +1235,19 @@ off_t
 ubc_getsize(struct vnode *vp)
 {
        /* people depend on the side effect of this working this way
-        * as they call this for directory 
+        * as they call this for directory
         */
-       if (!UBCINFOEXISTS(vp))
-               return ((off_t)0);
-       return (vp->v_ubcinfo->ui_size);
+       if (!UBCINFOEXISTS(vp)) {
+               return (off_t)0;
+       }
+       return vp->v_ubcinfo->ui_size;
 }
 
 
 /*
  * ubc_umount
  *
- * Call ubc_sync_range(vp, 0, EOF, UBC_PUSHALL) on all the vnodes for this
+ * Call ubc_msync(vp, 0, EOF, NULL, UBC_PUSHALL) on all the vnodes for this
  * mount point
  *
  * Parameters: mp                      The mount point
@@ -650,7 +1271,7 @@ __private_extern__ int
 ubc_umount(struct mount *mp)
 {
        vnode_iterate(mp, 0, ubc_umcallback, 0);
-       return(0);
+       return 0;
 }
 
 
@@ -663,12 +1284,10 @@ ubc_umount(struct mount *mp)
 static int
 ubc_umcallback(vnode_t vp, __unused void * args)
 {
-
        if (UBCINFOEXISTS(vp)) {
-
                (void) ubc_msync(vp, (off_t)0, ubc_getsize(vp), NULL, UBC_PUSHALL);
        }
-       return (VNODE_RETURNED);
+       return VNODE_RETURNED;
 }
 
 
@@ -690,10 +1309,11 @@ ubc_umcallback(vnode_t vp, __unused void * args)
 kauth_cred_t
 ubc_getcred(struct vnode *vp)
 {
-        if (UBCINFOEXISTS(vp))
-               return (vp->v_ubcinfo->ui_ucred);
+       if (UBCINFOEXISTS(vp)) {
+               return vp->v_ubcinfo->ui_ucred;
+       }
 
-       return (NOCRED);
+       return NOCRED;
 }
 
 
@@ -725,7 +1345,6 @@ ubc_getcred(struct vnode *vp)
  *             This function is generally used only in the following cases:
  *
  *             o       a memory mapped file via the mmap() system call
- *             o       a memory mapped file via the deprecated map_fd() call
  *             o       a swap store backing file
  *             o       subsequent to a successful write via vn_write()
  *
@@ -741,8 +1360,7 @@ ubc_getcred(struct vnode *vp)
  *
  *             o       Because a page-in may occur prior to a write, the
  *                     credential may not be set at this time, if the page-in
- *                     is not the result of a mapping established via mmap()
- *                     or map_fd().
+ *                     is not the result of a mapping established via mmap().
  *
  *             In both these cases, this will be triggered from the paging
  *             path, which will instead use the credential of the current
@@ -761,8 +1379,9 @@ ubc_setthreadcred(struct vnode *vp, proc_t p, thread_t thread)
        kauth_cred_t credp;
        struct uthread  *uthread = get_bsdthread_info(thread);
 
-       if (!UBCINFOEXISTS(vp))
-               return (1); 
+       if (!UBCINFOEXISTS(vp)) {
+               return 1;
+       }
 
        vnode_lock(vp);
 
@@ -777,10 +1396,10 @@ ubc_setthreadcred(struct vnode *vp, proc_t p, thread_t thread)
                        uip->ui_ucred = uthread->uu_ucred;
                        kauth_cred_ref(uip->ui_ucred);
                }
-       } 
+       }
        vnode_unlock(vp);
 
-       return (0);
+       return 0;
 }
 
 
@@ -808,7 +1427,7 @@ ubc_setthreadcred(struct vnode *vp, proc_t p, thread_t thread)
  *             not be used, as it is incompatible with per-thread credentials;
  *             it exists for legacy KPI reasons.
  *
- * DEPRECATION:        ubc_setcred() is being deprecated. Please use 
+ * DEPRECATION:        ubc_setcred() is being deprecated. Please use
  *             ubc_setthreadcred() instead.
  */
 int
@@ -818,8 +1437,9 @@ ubc_setcred(struct vnode *vp, proc_t p)
        kauth_cred_t credp;
 
        /* If there is no ubc_info, deny the operation */
-       if ( !UBCINFOEXISTS(vp))
-               return (0); 
+       if (!UBCINFOEXISTS(vp)) {
+               return 0;
+       }
 
        /*
         * Check to see if there is already a credential reference in the
@@ -830,13 +1450,12 @@ ubc_setcred(struct vnode *vp, proc_t p)
        credp = uip->ui_ucred;
        if (!IS_VALID_CRED(credp)) {
                uip->ui_ucred = kauth_cred_proc_ref(p);
-       } 
+       }
        vnode_unlock(vp);
 
-       return (1);
+       return 1;
 }
 
-
 /*
  * ubc_getpager
  *
@@ -855,10 +1474,11 @@ ubc_setcred(struct vnode *vp, proc_t p)
 __private_extern__ memory_object_t
 ubc_getpager(struct vnode *vp)
 {
-        if (UBCINFOEXISTS(vp))
-               return (vp->v_ubcinfo->ui_pager);
+       if (UBCINFOEXISTS(vp)) {
+               return vp->v_ubcinfo->ui_pager;
+       }
 
-       return (0);
+       return 0;
 }
 
 
@@ -888,13 +1508,13 @@ ubc_getpager(struct vnode *vp)
 memory_object_control_t
 ubc_getobject(struct vnode *vp, __unused int flags)
 {
-        if (UBCINFOEXISTS(vp))
-               return((vp->v_ubcinfo->ui_control));
+       if (UBCINFOEXISTS(vp)) {
+               return vp->v_ubcinfo->ui_control;
+       }
 
-       return (MEMORY_OBJECT_CONTROL_NULL);
+       return MEMORY_OBJECT_CONTROL_NULL;
 }
 
-
 /*
  * ubc_blktooff
  *
@@ -928,11 +1548,12 @@ ubc_blktooff(vnode_t vp, daddr64_t blkno)
 
        if (UBCINFOEXISTS(vp)) {
                error = VNOP_BLKTOOFF(vp, blkno, &file_offset);
-               if (error)
+               if (error) {
                        file_offset = -1;
+               }
        }
 
-       return (file_offset);
+       return file_offset;
 }
 
 
@@ -971,11 +1592,12 @@ ubc_offtoblk(vnode_t vp, off_t offset)
 
        if (UBCINFOEXISTS(vp)) {
                error = VNOP_OFFTOBLK(vp, offset, &blkno);
-               if (error)
+               if (error) {
                        blkno = -1;
+               }
        }
 
-       return (blkno);
+       return blkno;
 }
 
 
@@ -993,12 +1615,13 @@ ubc_offtoblk(vnode_t vp, off_t offset)
 int
 ubc_pages_resident(vnode_t vp)
 {
-       kern_return_t           kret;
-       boolean_t                       has_pages_resident;
-       
-       if (!UBCINFOEXISTS(vp))
-               return (0);
-                       
+       kern_return_t           kret;
+       boolean_t                       has_pages_resident;
+
+       if (!UBCINFOEXISTS(vp)) {
+               return 0;
+       }
+
        /*
         * The following call may fail if an invalid ui_control is specified,
         * or if there is no VM object associated with the control object.  In
@@ -1006,44 +1629,17 @@ ubc_pages_resident(vnode_t vp)
         * result in correct behavior.
         */
        kret = memory_object_pages_resident(vp->v_ubcinfo->ui_control, &has_pages_resident);
-       
-       if (kret != KERN_SUCCESS)
-               return (0);
-               
-       if (has_pages_resident == TRUE)
-               return (1);
-               
-       return (0);
-}
 
+       if (kret != KERN_SUCCESS) {
+               return 0;
+       }
 
-/*
- * ubc_sync_range
- *
- * Clean and/or invalidate a range in the memory object that backs this vnode
- *
- * Parameters: vp                      The vnode whose associated ubc_info's
- *                                     associated memory object is to have a
- *                                     range invalidated within it
- *             beg_off                 The start of the range, as an offset
- *             end_off                 The end of the range, as an offset
- *             flags                   See ubc_msync_internal()
- *
- * Returns:    1                       Success
- *             0                       Failure
- *
- * Notes:      see ubc_msync_internal() for more detailed information.
- *
- * DEPRECATED: This interface is obsolete due to a failure to return error
- *             information needed in order to correct failures.  The currently
- *             recommended interface is ubc_msync().
- */
-int
-ubc_sync_range(vnode_t vp, off_t beg_off, off_t end_off, int flags)
-{
-        return (ubc_msync_internal(vp, beg_off, end_off, NULL, flags, NULL));
-}
+       if (has_pages_resident == TRUE) {
+               return 1;
+       }
 
+       return 0;
+}
 
 /*
  * ubc_msync
@@ -1081,21 +1677,25 @@ ubc_sync_range(vnode_t vp, off_t beg_off, off_t end_off, int flags)
 errno_t
 ubc_msync(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags)
 {
-        int retval;
+       int retval;
        int io_errno = 0;
-       
-       if (resid_off)
-               *resid_off = beg_off;
 
-        retval = ubc_msync_internal(vp, beg_off, end_off, resid_off, flags, &io_errno);
+       if (resid_off) {
+               *resid_off = beg_off;
+       }
+
+       retval = ubc_msync_internal(vp, beg_off, end_off, resid_off, flags, &io_errno);
 
-       if (retval == 0 && io_errno == 0)
-               return (EINVAL);
-       return (io_errno);
+       if (retval == 0 && io_errno == 0) {
+               return EINVAL;
+       }
+       return io_errno;
 }
 
 
 /*
+ * ubc_msync_internal
+ *
  * Clean and/or invalidate a range in the memory object that backs this vnode
  *
  * Parameters: vp                      The vnode whose associated ubc_info's
@@ -1159,42 +1759,49 @@ ubc_msync(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags)
 static int
 ubc_msync_internal(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags, int *io_errno)
 {
-       memory_object_size_t    tsize;
-       kern_return_t           kret;
+       memory_object_size_t    tsize;
+       kern_return_t           kret;
        int request_flags = 0;
        int flush_flags   = MEMORY_OBJECT_RETURN_NONE;
-       
-       if ( !UBCINFOEXISTS(vp))
-               return (0);
-       if ((flags & (UBC_INVALIDATE | UBC_PUSHDIRTY | UBC_PUSHALL)) == 0)
-               return (0);
-       if (end_off <= beg_off)
-               return (1);
-
-       if (flags & UBC_INVALIDATE)
-               /*
+
+       if (!UBCINFOEXISTS(vp)) {
+               return 0;
+       }
+       if ((flags & (UBC_INVALIDATE | UBC_PUSHDIRTY | UBC_PUSHALL)) == 0) {
+               return 0;
+       }
+       if (end_off <= beg_off) {
+               return 1;
+       }
+
+       if (flags & UBC_INVALIDATE) {
+               /*
                 * discard the resident pages
                 */
                request_flags = (MEMORY_OBJECT_DATA_FLUSH | MEMORY_OBJECT_DATA_NO_CHANGE);
+       }
 
-       if (flags & UBC_SYNC)
-               /*
+       if (flags & UBC_SYNC) {
+               /*
                 * wait for all the I/O to complete before returning
                 */
-               request_flags |= MEMORY_OBJECT_IO_SYNC;
+               request_flags |= MEMORY_OBJECT_IO_SYNC;
+       }
 
-       if (flags & UBC_PUSHDIRTY)
-               /*
+       if (flags & UBC_PUSHDIRTY) {
+               /*
                 * we only return the dirty pages in the range
                 */
-               flush_flags = MEMORY_OBJECT_RETURN_DIRTY;
+               flush_flags = MEMORY_OBJECT_RETURN_DIRTY;
+       }
 
-       if (flags & UBC_PUSHALL)
-               /*
+       if (flags & UBC_PUSHALL) {
+               /*
                 * then return all the interesting pages in the range (both
                 * dirty and precious) to the pager
                 */
-               flush_flags = MEMORY_OBJECT_RETURN_ALL;
+               flush_flags = MEMORY_OBJECT_RETURN_ALL;
+       }
 
        beg_off = trunc_page_64(beg_off);
        end_off = round_page_64(end_off);
@@ -1202,17 +1809,17 @@ ubc_msync_internal(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, i
 
        /* flush and/or invalidate pages in the range requested */
        kret = memory_object_lock_request(vp->v_ubcinfo->ui_control,
-                                         beg_off, tsize,
-                                         (memory_object_offset_t *)resid_off,
-                                         io_errno, flush_flags, request_flags,
-                                         VM_PROT_NO_CHANGE);
-       
-       return ((kret == KERN_SUCCESS) ? 1 : 0);
+           beg_off, tsize,
+           (memory_object_offset_t *)resid_off,
+           io_errno, flush_flags, request_flags,
+           VM_PROT_NO_CHANGE);
+
+       return (kret == KERN_SUCCESS) ? 1 : 0;
 }
 
 
 /*
- * ubc_msync_internal
+ * ubc_map
  *
  * Explicitly map a vnode that has an associate ubc_info, and add a reference
  * to it for the ubc system, if there isn't one already, so it will not be
@@ -1241,7 +1848,6 @@ ubc_msync_internal(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, i
  *             It is primarily used by:
  *
  *             o       mmap(), when mapping a file
- *             o       The deprecated map_fd() interface, when mapping a file
  *             o       When mapping a shared file (a shared library in the
  *                     shared segment region)
  *             o       When loading a program image during the exec process
@@ -1271,29 +1877,43 @@ ubc_map(vnode_t vp, int flags)
        int need_wakeup = 0;
 
        if (UBCINFOEXISTS(vp)) {
-
                vnode_lock(vp);
                uip = vp->v_ubcinfo;
 
                while (ISSET(uip->ui_flags, UI_MAPBUSY)) {
                        SET(uip->ui_flags, UI_MAPWAITING);
                        (void) msleep(&uip->ui_flags, &vp->v_lock,
-                                     PRIBIO, "ubc_map", NULL);
+                           PRIBIO, "ubc_map", NULL);
                }
                SET(uip->ui_flags, UI_MAPBUSY);
                vnode_unlock(vp);
 
                error = VNOP_MMAP(vp, flags, vfs_context_current());
 
-               if (error != EPERM)
-                       error = 0;
+               /*
+                * rdar://problem/22587101 required that we stop propagating
+                * EPERM up the stack. Otherwise, we would have to funnel up
+                * the error at all the call sites for memory_object_map().
+                * The risk is in having to undo the map/object/entry state at
+                * all these call sites. It would also affect more than just mmap()
+                * e.g. vm_remap().
+                *
+                *      if (error != EPERM)
+                *              error = 0;
+                */
+
+               error = 0;
 
                vnode_lock_spin(vp);
 
                if (error == 0) {
-                       if ( !ISSET(uip->ui_flags, UI_ISMAPPED))
-                               need_ref = 1;
+                       if (!ISSET(uip->ui_flags, UI_ISMAPPED)) {
+                               need_ref = 1;
+                       }
                        SET(uip->ui_flags, (UI_WASMAPPED | UI_ISMAPPED));
+                       if (flags & PROT_WRITE) {
+                               SET(uip->ui_flags, UI_MAPPEDWRITE);
+                       }
                }
                CLR(uip->ui_flags, UI_MAPBUSY);
 
@@ -1303,13 +1923,47 @@ ubc_map(vnode_t vp, int flags)
                }
                vnode_unlock(vp);
 
-               if (need_wakeup)
+               if (need_wakeup) {
                        wakeup(&uip->ui_flags);
+               }
 
-               if (need_ref)
-                       vnode_ref(vp);
+               if (need_ref) {
+                       /*
+                        * Make sure we get a ref as we can't unwind from here
+                        */
+                       if (vnode_ref_ext(vp, 0, VNODE_REF_FORCE)) {
+                               panic("%s : VNODE_REF_FORCE failed\n", __FUNCTION__);
+                       }
+                       /*
+                        * Vnodes that are on "unreliable" media (like disk
+                        * images, network filesystems, 3rd-party filesystems,
+                        * and possibly external devices) could see their
+                        * contents be changed via the backing store without
+                        * triggering copy-on-write, so we can't fully rely
+                        * on copy-on-write and might have to resort to
+                        * copy-on-read to protect "privileged" processes and
+                        * prevent privilege escalation.
+                        *
+                        * The root filesystem is considered "reliable" because
+                        * there's not much point in trying to protect
+                        * ourselves from such a vulnerability and the extra
+                        * cost of copy-on-read (CPU time and memory pressure)
+                        * could result in some serious regressions.
+                        */
+                       if (vp->v_mount != NULL &&
+                           ((vp->v_mount->mnt_flag & MNT_ROOTFS) ||
+                           vnode_on_reliable_media(vp))) {
+                               /*
+                                * This vnode is deemed "reliable" so mark
+                                * its VM object as "trusted".
+                                */
+                               memory_object_mark_trusted(uip->ui_control);
+                       } else {
+//                             printf("BUGGYCOW: %s:%d vp %p \"%s\" in mnt %p \"%s\" is untrusted\n", __FUNCTION__, __LINE__, vp, vp->v_name, vp->v_mount, vp->v_mount->mnt_vnodecovered->v_name);
+                       }
+               }
        }
-       return (error);
+       return error;
 }
 
 
@@ -1345,14 +1999,15 @@ ubc_destroy_named(vnode_t vp)
        kern_return_t kret;
 
        if (UBCINFOEXISTS(vp)) {
-               uip = vp->v_ubcinfo;
+               uip = vp->v_ubcinfo;
 
                /* Terminate the memory object  */
                control = ubc_getobject(vp, UBC_HOLDOBJECT);
                if (control != MEMORY_OBJECT_CONTROL_NULL) {
-                       kret = memory_object_destroy(control, 0);
-                       if (kret != KERN_SUCCESS)
-                               panic("ubc_destroy_named: memory_object_destroy failed");
+                       kret = memory_object_destroy(control, 0);
+                       if (kret != KERN_SUCCESS) {
+                               panic("ubc_destroy_named: memory_object_destroy failed");
+                       }
                }
        }
 }
@@ -1385,9 +2040,10 @@ ubc_destroy_named(vnode_t vp)
 int
 ubc_isinuse(struct vnode *vp, int busycount)
 {
-       if ( !UBCINFOEXISTS(vp))
-               return (0);
-       return(ubc_isinuse_locked(vp, busycount, 0));
+       if (!UBCINFOEXISTS(vp)) {
+               return 0;
+       }
+       return ubc_isinuse_locked(vp, busycount, 0);
 }
 
 
@@ -1425,15 +2081,18 @@ ubc_isinuse_locked(struct vnode *vp, int busycount, int locked)
        int retval = 0;
 
 
-       if (!locked)
-               vnode_lock(vp);
+       if (!locked) {
+               vnode_lock_spin(vp);
+       }
 
-       if ((vp->v_usecount - vp->v_kusecount) > busycount)
+       if ((vp->v_usecount - vp->v_kusecount) > busycount) {
                retval = 1;
+       }
 
-       if (!locked)
+       if (!locked) {
                vnode_unlock(vp);
-       return (retval);
+       }
+       return retval;
 }
 
 
@@ -1460,64 +2119,86 @@ __private_extern__ void
 ubc_unmap(struct vnode *vp)
 {
        struct ubc_info *uip;
-       int     need_rele = 0;
-       int     need_wakeup = 0;
-#if NAMEDRSRCFORK 
-       int     named_fork = 0;
-#endif
+       int     need_rele = 0;
+       int     need_wakeup = 0;
 
-       if (vnode_getwithref(vp))
-               return;
+       if (vnode_getwithref(vp)) {
+               return;
+       }
 
        if (UBCINFOEXISTS(vp)) {
+               bool want_fsevent = false;
+
                vnode_lock(vp);
                uip = vp->v_ubcinfo;
 
                while (ISSET(uip->ui_flags, UI_MAPBUSY)) {
                        SET(uip->ui_flags, UI_MAPWAITING);
                        (void) msleep(&uip->ui_flags, &vp->v_lock,
-                                     PRIBIO, "ubc_unmap", NULL);
+                           PRIBIO, "ubc_unmap", NULL);
                }
                SET(uip->ui_flags, UI_MAPBUSY);
 
-#if NAMEDRSRCFORK
-               if ((vp->v_flag & VISNAMEDSTREAM) &&
-                   (vp->v_parent != NULLVP) && 
-                   !(vp->v_parent->v_mount->mnt_kern_flag & MNTK_NAMED_STREAMS)) {
-                       named_fork = 1;
-               }
-#endif
-
                if (ISSET(uip->ui_flags, UI_ISMAPPED)) {
-                       CLR(uip->ui_flags, UI_ISMAPPED);
+                       if (ISSET(uip->ui_flags, UI_MAPPEDWRITE)) {
+                               want_fsevent = true;
+                       }
+
                        need_rele = 1;
+
+                       /*
+                        * We want to clear the mapped flags after we've called
+                        * VNOP_MNOMAP to avoid certain races and allow
+                        * VNOP_MNOMAP to call ubc_is_mapped_writable.
+                        */
                }
                vnode_unlock(vp);
-               
-               if (need_rele) {
-                       (void)VNOP_MNOMAP(vp, vfs_context_current());
 
-#if NAMEDRSRCFORK
-                       if (named_fork) {
-                               vnode_relenamedstream(vp->v_parent, vp, vfs_context_current());
+               if (need_rele) {
+                       vfs_context_t ctx = vfs_context_current();
+
+                       (void)VNOP_MNOMAP(vp, ctx);
+
+#if CONFIG_FSE
+                       /*
+                        * Why do we want an fsevent here?  Normally the
+                        * content modified fsevent is posted when a file is
+                        * closed and only if it's written to via conventional
+                        * means.  It's perfectly legal to close a file and
+                        * keep your mappings and we don't currently track
+                        * whether it was written to via a mapping.
+                        * Therefore, we need to post an fsevent here if the
+                        * file was mapped writable.  This may result in false
+                        * events, i.e. we post a notification when nothing
+                        * has really changed.
+                        */
+                       if (want_fsevent && need_fsevent(FSE_CONTENT_MODIFIED, vp)) {
+                               add_fsevent(FSE_CONTENT_MODIFIED, ctx,
+                                   FSE_ARG_VNODE, vp,
+                                   FSE_ARG_DONE);
                        }
 #endif
 
-                       vnode_rele(vp);
+                       vnode_rele(vp);
                }
 
                vnode_lock_spin(vp);
 
+               if (need_rele) {
+                       CLR(uip->ui_flags, UI_ISMAPPED | UI_MAPPEDWRITE);
+               }
+
                CLR(uip->ui_flags, UI_MAPBUSY);
+
                if (ISSET(uip->ui_flags, UI_MAPWAITING)) {
                        CLR(uip->ui_flags, UI_MAPWAITING);
                        need_wakeup = 1;
                }
                vnode_unlock(vp);
 
-               if (need_wakeup)
-                       wakeup(&uip->ui_flags);
-
+               if (need_wakeup) {
+                       wakeup(&uip->ui_flags);
+               }
        }
        /*
         * the drop of the vnode ref will cleanup
@@ -1603,23 +2284,24 @@ ubc_unmap(struct vnode *vp)
  */
 kern_return_t
 ubc_page_op(
-       struct vnode    *vp,
-       off_t           f_offset,
-       int             ops,
-       ppnum_t *phys_entryp,
-       int             *flagsp)
+       struct vnode    *vp,
+       off_t           f_offset,
+       int             ops,
+       ppnum_t *phys_entryp,
+       int             *flagsp)
 {
-       memory_object_control_t         control;
+       memory_object_control_t         control;
 
        control = ubc_getobject(vp, UBC_FLAGS_NONE);
-       if (control == MEMORY_OBJECT_CONTROL_NULL)
+       if (control == MEMORY_OBJECT_CONTROL_NULL) {
                return KERN_INVALID_ARGUMENT;
+       }
 
-       return (memory_object_page_op(control,
-                                     (memory_object_offset_t)f_offset,
-                                     ops,
-                                     phys_entryp,
-                                     flagsp));
+       return memory_object_page_op(control,
+                  (memory_object_offset_t)f_offset,
+                  ops,
+                  phys_entryp,
+                  flagsp);
 }
 
 
@@ -1683,23 +2365,24 @@ ubc_page_op(
  */
 kern_return_t
 ubc_range_op(
-       struct vnode    *vp,
-       off_t           f_offset_beg,
-       off_t           f_offset_end,
+       struct vnode    *vp,
+       off_t           f_offset_beg,
+       off_t           f_offset_end,
        int             ops,
        int             *range)
 {
-       memory_object_control_t         control;
+       memory_object_control_t         control;
 
        control = ubc_getobject(vp, UBC_FLAGS_NONE);
-       if (control == MEMORY_OBJECT_CONTROL_NULL)
+       if (control == MEMORY_OBJECT_CONTROL_NULL) {
                return KERN_INVALID_ARGUMENT;
+       }
 
-       return (memory_object_range_op(control,
-                                     (memory_object_offset_t)f_offset_beg,
-                                     (memory_object_offset_t)f_offset_end,
-                                     ops,
-                                     range));
+       return memory_object_range_op(control,
+                  (memory_object_offset_t)f_offset_beg,
+                  (memory_object_offset_t)f_offset_end,
+                  ops,
+                  range);
 }
 
 
@@ -1725,12 +2408,12 @@ ubc_range_op(
  *                                     multiple of the page size
  *             KERN_INVALID_ARGUMENT   There is no ubc_info associated with
  *                                     the vnode, or there is no memory object
- *                                     control associated with the ubc_info 
+ *                                     control associated with the ubc_info
  *     memory_object_upl_request:KERN_INVALID_VALUE
  *                                     The supplied upl_flags argument is
  *                                     invalid
  * Implicit Returns:
- *             *uplp (modified)        
+ *             *uplp (modified)
  *             *plp (modified)         If non-NULL, the value of *plp will be
  *                                     modified to point to the internal page
  *                                     list; this modification may occur even
@@ -1742,46 +2425,98 @@ ubc_range_op(
  *             ubc_upl_abort(), or ubc_upl_abort_range().
  */
 kern_return_t
-ubc_create_upl(
-       struct vnode    *vp,
-       off_t           f_offset,
-       long            bufsize,
-       upl_t           *uplp,
-       upl_page_info_t **plp,
-       int             uplflags)
-{
-       memory_object_control_t         control;
-       mach_msg_type_number_t          count;
-       int                             ubcflags;
-       kern_return_t                   kr;
-       
-       if (bufsize & 0xfff)
+ubc_create_upl_external(
+       struct vnode    *vp,
+       off_t           f_offset,
+       int             bufsize,
+       upl_t           *uplp,
+       upl_page_info_t **plp,
+       int             uplflags)
+{
+       return ubc_create_upl_kernel(vp, f_offset, bufsize, uplp, plp, uplflags, vm_tag_bt());
+}
+
+kern_return_t
+ubc_create_upl_kernel(
+       struct vnode    *vp,
+       off_t           f_offset,
+       int             bufsize,
+       upl_t           *uplp,
+       upl_page_info_t **plp,
+       int             uplflags,
+       vm_tag_t tag)
+{
+       memory_object_control_t         control;
+       kern_return_t                   kr;
+
+       if (plp != NULL) {
+               *plp = NULL;
+       }
+       *uplp = NULL;
+
+       if (bufsize & 0xfff) {
+               return KERN_INVALID_ARGUMENT;
+       }
+
+       if (bufsize > MAX_UPL_SIZE_BYTES) {
                return KERN_INVALID_ARGUMENT;
+       }
+
+       if (uplflags & (UPL_UBC_MSYNC | UPL_UBC_PAGEOUT | UPL_UBC_PAGEIN)) {
+               if (uplflags & UPL_UBC_MSYNC) {
+                       uplflags &= UPL_RET_ONLY_DIRTY;
 
-       if (uplflags & UPL_FOR_PAGEOUT) {
+                       uplflags |= UPL_COPYOUT_FROM | UPL_CLEAN_IN_PLACE |
+                           UPL_SET_INTERNAL | UPL_SET_LITE;
+               } else if (uplflags & UPL_UBC_PAGEOUT) {
+                       uplflags &= UPL_RET_ONLY_DIRTY;
+
+                       if (uplflags & UPL_RET_ONLY_DIRTY) {
+                               uplflags |= UPL_NOBLOCK;
+                       }
+
+                       uplflags |= UPL_FOR_PAGEOUT | UPL_CLEAN_IN_PLACE |
+                           UPL_COPYOUT_FROM | UPL_SET_INTERNAL | UPL_SET_LITE;
+               } else {
+                       uplflags |= UPL_RET_ONLY_ABSENT |
+                           UPL_NO_SYNC | UPL_CLEAN_IN_PLACE |
+                           UPL_SET_INTERNAL | UPL_SET_LITE;
+
+                       /*
+                        * if the requested size == PAGE_SIZE, we don't want to set
+                        * the UPL_NOBLOCK since we may be trying to recover from a
+                        * previous partial pagein I/O that occurred because we were low
+                        * on memory and bailed early in order to honor the UPL_NOBLOCK...
+                        * since we're only asking for a single page, we can block w/o fear
+                        * of tying up pages while waiting for more to become available
+                        */
+                       if (bufsize > PAGE_SIZE) {
+                               uplflags |= UPL_NOBLOCK;
+                       }
+               }
+       } else {
                uplflags &= ~UPL_FOR_PAGEOUT;
-               ubcflags  =  UBC_FOR_PAGEOUT;
-       } else
-               ubcflags = UBC_FLAGS_NONE;
 
-       control = ubc_getobject(vp, ubcflags);
-       if (control == MEMORY_OBJECT_CONTROL_NULL)
+               if (uplflags & UPL_WILL_BE_DUMPED) {
+                       uplflags &= ~UPL_WILL_BE_DUMPED;
+                       uplflags |= (UPL_NO_SYNC | UPL_SET_INTERNAL);
+               } else {
+                       uplflags |= (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL);
+               }
+       }
+       control = ubc_getobject(vp, UBC_FLAGS_NONE);
+       if (control == MEMORY_OBJECT_CONTROL_NULL) {
                return KERN_INVALID_ARGUMENT;
+       }
 
-       if (uplflags & UPL_WILL_BE_DUMPED) {
-               uplflags &= ~UPL_WILL_BE_DUMPED;
-               uplflags |= (UPL_NO_SYNC|UPL_SET_INTERNAL);
-       } else
-               uplflags |= (UPL_NO_SYNC|UPL_CLEAN_IN_PLACE|UPL_SET_INTERNAL);
-       count = 0;
-
-       kr = memory_object_upl_request(control, f_offset, bufsize, uplp, NULL, &count, uplflags);
-       if (plp != NULL)
-                       *plp = UPL_GET_INTERNAL_PAGE_LIST(*uplp);
+       kr = memory_object_upl_request(control, f_offset, bufsize, uplp, NULL, NULL, uplflags, tag);
+       if (kr == KERN_SUCCESS && plp != NULL) {
+               *plp = UPL_GET_INTERNAL_PAGE_LIST(*uplp);
+       }
        return kr;
 }
-               
-                                                             
+
+
 /*
  * ubc_upl_maxbufsize
  *
@@ -1791,11 +2526,11 @@ ubc_create_upl(
  *
  * Returns:    maximum size buffer (in bytes) ubc_create_upl( ) will take.
  */
-upl_size_t 
+upl_size_t
 ubc_upl_maxbufsize(
        void)
 {
-       return(MAX_UPL_TRANSFER * PAGE_SIZE);
+       return MAX_UPL_SIZE_BYTES;
 }
 
 /*
@@ -1817,10 +2552,10 @@ ubc_upl_maxbufsize(
  */
 kern_return_t
 ubc_upl_map(
-       upl_t           upl,
-       vm_offset_t     *dst_addr)
+       upl_t           upl,
+       vm_offset_t     *dst_addr)
 {
-       return (vm_upl_map(kernel_map, upl, dst_addr));
+       return vm_upl_map(kernel_map, upl, dst_addr);
 }
 
 
@@ -1838,9 +2573,9 @@ ubc_upl_map(
  */
 kern_return_t
 ubc_upl_unmap(
-       upl_t   upl)
+       upl_t   upl)
 {
-       return(vm_upl_unmap(kernel_map, upl));
+       return vm_upl_unmap(kernel_map, upl);
 }
 
 
@@ -1869,13 +2604,13 @@ ubc_upl_unmap(
  */
 kern_return_t
 ubc_upl_commit(
-       upl_t                   upl)
+       upl_t                   upl)
 {
-       upl_page_info_t *pl;
-       kern_return_t   kr;
+       upl_page_info_t *pl;
+       kern_return_t   kr;
 
        pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
-       kr = upl_commit(upl, pl, MAX_UPL_TRANSFER);
+       kr = upl_commit(upl, pl, MAX_UPL_SIZE_BYTES >> PAGE_SHIFT);
        upl_deallocate(upl);
        return kr;
 }
@@ -1935,25 +2670,31 @@ ubc_upl_commit(
  */
 kern_return_t
 ubc_upl_commit_range(
-       upl_t                   upl,
-       vm_offset_t             offset,
-       vm_size_t               size,
-       int                             flags)
+       upl_t                   upl,
+       upl_offset_t            offset,
+       upl_size_t              size,
+       int                             flags)
 {
-       upl_page_info_t *pl;
-       boolean_t               empty;
-       kern_return_t   kr;
+       upl_page_info_t *pl;
+       boolean_t               empty;
+       kern_return_t   kr;
 
-       if (flags & UPL_COMMIT_FREE_ON_EMPTY)
+       if (flags & UPL_COMMIT_FREE_ON_EMPTY) {
                flags |= UPL_COMMIT_NOTIFY_EMPTY;
+       }
+
+       if (flags & UPL_COMMIT_KERNEL_ONLY_FLAGS) {
+               return KERN_INVALID_ARGUMENT;
+       }
 
        pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
 
        kr = upl_commit_range(upl, offset, size, flags,
-                                                 pl, MAX_UPL_TRANSFER, &empty);
+           pl, MAX_UPL_SIZE_BYTES >> PAGE_SHIFT, &empty);
 
-       if((flags & UPL_COMMIT_FREE_ON_EMPTY) && empty)
+       if ((flags & UPL_COMMIT_FREE_ON_EMPTY) && empty) {
                upl_deallocate(upl);
+       }
 
        return kr;
 }
@@ -2006,21 +2747,23 @@ ubc_upl_commit_range(
  */
 kern_return_t
 ubc_upl_abort_range(
-       upl_t                   upl,
-       vm_offset_t             offset,
-       vm_size_t               size,
-       int                             abort_flags)
+       upl_t                   upl,
+       upl_offset_t            offset,
+       upl_size_t              size,
+       int                             abort_flags)
 {
-       kern_return_t   kr;
-       boolean_t               empty = FALSE;
+       kern_return_t   kr;
+       boolean_t               empty = FALSE;
 
-       if (abort_flags & UPL_ABORT_FREE_ON_EMPTY)
+       if (abort_flags & UPL_ABORT_FREE_ON_EMPTY) {
                abort_flags |= UPL_ABORT_NOTIFY_EMPTY;
+       }
 
        kr = upl_abort_range(upl, offset, size, abort_flags, &empty);
 
-       if((abort_flags & UPL_ABORT_FREE_ON_EMPTY) && empty)
+       if ((abort_flags & UPL_ABORT_FREE_ON_EMPTY) && empty) {
                upl_deallocate(upl);
+       }
 
        return kr;
 }
@@ -2070,10 +2813,10 @@ ubc_upl_abort_range(
  */
 kern_return_t
 ubc_upl_abort(
-       upl_t                   upl,
-       int                             abort_type)
+       upl_t                   upl,
+       int                             abort_type)
 {
-       kern_return_t   kr;
+       kern_return_t   kr;
 
        kr = upl_abort(upl, abort_type);
        upl_deallocate(upl);
@@ -2101,194 +2844,889 @@ ubc_upl_abort(
  */
 upl_page_info_t *
 ubc_upl_pageinfo(
-       upl_t                   upl)
-{             
-       return (UPL_GET_INTERNAL_PAGE_LIST(upl));
+       upl_t                   upl)
+{
+       return UPL_GET_INTERNAL_PAGE_LIST(upl);
 }
 
 
-int 
-UBCINFOEXISTS(struct vnode * vp)
+int
+UBCINFOEXISTS(const struct vnode * vp)
+{
+       return (vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo != UBC_INFO_NULL);
+}
+
+
+void
+ubc_upl_range_needed(
+       upl_t           upl,
+       int             index,
+       int             count)
+{
+       upl_range_needed(upl, index, count);
+}
+
+boolean_t
+ubc_is_mapped(const struct vnode *vp, boolean_t *writable)
+{
+       if (!UBCINFOEXISTS(vp) || !ISSET(vp->v_ubcinfo->ui_flags, UI_ISMAPPED)) {
+               return FALSE;
+       }
+       if (writable) {
+               *writable = ISSET(vp->v_ubcinfo->ui_flags, UI_MAPPEDWRITE);
+       }
+       return TRUE;
+}
+
+boolean_t
+ubc_is_mapped_writable(const struct vnode *vp)
 {
-        return((vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo != UBC_INFO_NULL));
+       boolean_t writable;
+       return ubc_is_mapped(vp, &writable) && writable;
 }
 
 
 /*
  * CODE SIGNING
  */
-#define CS_BLOB_KEEP_IN_KERNEL 1
-static volatile SInt32 cs_blob_size = 0;
-static volatile SInt32 cs_blob_count = 0;
-static SInt32 cs_blob_size_peak = 0;
-static UInt32 cs_blob_size_max = 0;
-static SInt32 cs_blob_count_peak = 0;
-extern int cs_debug;
-
-int cs_validation = 1;
+static atomic_size_t cs_blob_size = 0;
+static atomic_uint_fast32_t cs_blob_count = 0;
+static atomic_size_t cs_blob_size_peak = 0;
+static atomic_size_t cs_blob_size_max = 0;
+static atomic_uint_fast32_t cs_blob_count_peak = 0;
+
+SYSCTL_UINT(_vm, OID_AUTO, cs_blob_count, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_count, 0, "Current number of code signature blobs");
+SYSCTL_ULONG(_vm, OID_AUTO, cs_blob_size, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size, "Current size of all code signature blobs");
+SYSCTL_UINT(_vm, OID_AUTO, cs_blob_count_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_count_peak, 0, "Peak number of code signature blobs");
+SYSCTL_ULONG(_vm, OID_AUTO, cs_blob_size_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size_peak, "Peak size of code signature blobs");
+SYSCTL_ULONG(_vm, OID_AUTO, cs_blob_size_max, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size_max, "Size of biggest code signature blob");
 
-SYSCTL_INT(_vm, OID_AUTO, cs_validation, CTLFLAG_RW, &cs_validation, 0, "Do validate code signatures");
-SYSCTL_INT(_vm, OID_AUTO, cs_blob_count, CTLFLAG_RD, &cs_blob_count, 0, "Current number of code signature blobs");
-SYSCTL_INT(_vm, OID_AUTO, cs_blob_size, CTLFLAG_RD, &cs_blob_size, 0, "Current size of all code signature blobs");
-SYSCTL_INT(_vm, OID_AUTO, cs_blob_count_peak, CTLFLAG_RD, &cs_blob_count_peak, 0, "Peak number of code signature blobs");
-SYSCTL_INT(_vm, OID_AUTO, cs_blob_size_peak, CTLFLAG_RD, &cs_blob_size_peak, 0, "Peak size of code signature blobs");
-SYSCTL_INT(_vm, OID_AUTO, cs_blob_size_max, CTLFLAG_RD, &cs_blob_size_max, 0, "Size of biggest code signature blob");
+/*
+ * Function: csblob_parse_teamid
+ *
+ * Description: This function returns a pointer to the team id
+ *               stored within the codedirectory of the csblob.
+ *               If the codedirectory predates team-ids, it returns
+ *               NULL.
+ *               This does not copy the name but returns a pointer to
+ *               it within the CD. Subsequently, the CD must be
+ *               available when this is used.
+ */
 
-int
-ubc_cs_blob_add(
-       struct vnode    *vp,
-       cpu_type_t      cputype,
-       off_t           base_offset,
-       vm_address_t    addr,
-       vm_size_t       size)
-{
-       kern_return_t           kr;
-       struct ubc_info         *uip;
-       struct cs_blob          *blob, *oblob;
-       int                     error;
-       ipc_port_t              blob_handle;
-       memory_object_size_t    blob_size;
+static const char *
+csblob_parse_teamid(struct cs_blob *csblob)
+{
        const CS_CodeDirectory *cd;
-       off_t                   blob_start_offset, blob_end_offset;
-       SHA1_CTX                sha1ctxt;
 
-       blob_handle = IPC_PORT_NULL;
+       cd = csblob->csb_cd;
 
-       blob = (struct cs_blob *) kalloc(sizeof (struct cs_blob));
-       if (blob == NULL) {
-               return ENOMEM;
+       if (ntohl(cd->version) < CS_SUPPORTSTEAMID) {
+               return NULL;
        }
 
-       /* get a memory entry on the blob */
-       blob_size = (memory_object_size_t) size;
-       kr = mach_make_memory_entry_64(kernel_map,
-                                      &blob_size,
-                                      addr,
-                                      VM_PROT_READ,
-                                      &blob_handle,
-                                      IPC_PORT_NULL);
-       if (kr != KERN_SUCCESS) {
-               error = ENOMEM;
-               goto out;
+       if (cd->teamOffset == 0) {
+               return NULL;
        }
-       if (memory_object_round_page(blob_size) !=
-           (memory_object_size_t) round_page(size)) {
-               printf("ubc_cs_blob_add: size mismatch 0x%llx 0x%x !?\n",
-                      blob_size, size);
-               panic("XXX FBDP size mismatch 0x%llx 0x%x\n", blob_size, size);
-               error = EINVAL;
-               goto out;
+
+       const char *name = ((const char *)cd) + ntohl(cd->teamOffset);
+       if (cs_debug > 1) {
+               printf("found team-id %s in cdblob\n", name);
        }
 
+       return name;
+}
 
-       /* fill in the new blob */
-       blob->csb_cpu_type = cputype;
-       blob->csb_base_offset = base_offset;
-       blob->csb_mem_size = size;
-       blob->csb_mem_offset = 0;
-       blob->csb_mem_handle = blob_handle;
-       blob->csb_mem_kaddr = addr;
 
-       
-       /*
-        * Validate the blob's contents
-        */
-       cd = findCodeDirectory(
-               (const CS_SuperBlob *) addr, 
-               (char *) addr, 
-               (char *) addr + blob->csb_mem_size);
-       if (cd == NULL) {
-               /* no code directory => useless blob ! */
-               blob->csb_flags = 0;
-               blob->csb_start_offset = 0;
-               blob->csb_end_offset = 0;
-       } else {
-               unsigned char *sha1_base;
-               int sha1_size;
+kern_return_t
+ubc_cs_blob_allocate(
+       vm_offset_t     *blob_addr_p,
+       vm_size_t       *blob_size_p)
+{
+       kern_return_t   kr = KERN_FAILURE;
 
-               blob->csb_flags = ntohl(cd->flags) | CS_VALID;
-               blob->csb_end_offset = round_page(ntohl(cd->codeLimit));
-               blob->csb_start_offset = (blob->csb_end_offset -
-                                         (ntohl(cd->nCodeSlots) * PAGE_SIZE));
-               /* compute the blob's SHA1 hash */
-               sha1_base = (const unsigned char *) cd;
-               sha1_size = ntohl(cd->length);
-               SHA1Init(&sha1ctxt);
-               SHA1Update(&sha1ctxt, sha1_base, sha1_size);
-               SHA1Final(blob->csb_sha1, &sha1ctxt);
-       }
+       {
+               *blob_addr_p = (vm_offset_t) kalloc_tag(*blob_size_p, VM_KERN_MEMORY_SECURITY);
 
+               if (*blob_addr_p == 0) {
+                       kr = KERN_NO_SPACE;
+               } else {
+                       kr = KERN_SUCCESS;
+               }
+       }
 
-       /*
-        * Validate the blob's coverage
-        */
-       blob_start_offset = blob->csb_base_offset + blob->csb_start_offset;
-       blob_end_offset = blob->csb_base_offset + blob->csb_end_offset;
+       return kr;
+}
 
-       if (blob_start_offset >= blob_end_offset) {
-               /* reject empty or backwards blob */
-               error = EINVAL;
-               goto out;
+void
+ubc_cs_blob_deallocate(
+       vm_offset_t     blob_addr,
+       vm_size_t       blob_size)
+{
+       {
+               kfree(blob_addr, blob_size);
        }
+}
 
-       vnode_lock(vp);
-       if (! UBCINFOEXISTS(vp)) {
-               vnode_unlock(vp);
-               error = ENOENT;
-               goto out;
-       }
-       uip = vp->v_ubcinfo;
+/*
+ * Some codesigned files use a lowest common denominator page size of
+ * 4KiB, but can be used on systems that have a runtime page size of
+ * 16KiB. Since faults will only occur on 16KiB ranges in
+ * cs_validate_range(), we can convert the original Code Directory to
+ * a multi-level scheme where groups of 4 hashes are combined to form
+ * a new hash, which represents 16KiB in the on-disk file.  This can
+ * reduce the wired memory requirement for the Code Directory by
+ * 75%. Care must be taken for binaries that use the "fourk" VM pager
+ * for unaligned access, which may still attempt to validate on
+ * non-16KiB multiples for compatibility with 3rd party binaries.
+ */
+static boolean_t
+ubc_cs_supports_multilevel_hash(struct cs_blob *blob __unused)
+{
+       const CS_CodeDirectory *cd;
 
-       /* check if this new blob overlaps with an existing blob */
-       for (oblob = uip->cs_blobs;
-            oblob != NULL;
-            oblob = oblob->csb_next) {
-                off_t oblob_start_offset, oblob_end_offset;
-
-                oblob_start_offset = (oblob->csb_base_offset +
-                                      oblob->csb_start_offset);
-                oblob_end_offset = (oblob->csb_base_offset +
-                                    oblob->csb_end_offset);
-                if (blob_start_offset >= oblob_end_offset ||
-                    blob_end_offset <= oblob_start_offset) {
-                        /* no conflict with this existing blob */
-                } else {
-                        /* conflict ! */
-                        if (blob_start_offset == oblob_start_offset &&
-                            blob_end_offset == oblob_end_offset &&
-                            blob->csb_mem_size == oblob->csb_mem_size &&
-                            blob->csb_flags == oblob->csb_flags &&
-                            (blob->csb_cpu_type == CPU_TYPE_ANY ||
-                             oblob->csb_cpu_type == CPU_TYPE_ANY ||
-                             blob->csb_cpu_type == oblob->csb_cpu_type) &&
-                            !bcmp(blob->csb_sha1,
-                                  oblob->csb_sha1,
-                                  SHA1_RESULTLEN)) {
-                                /*
-                                 * We already have this blob:
-                                 * we'll return success but
-                                 * throw away the new blob.
-                                 */
-                                if (oblob->csb_cpu_type == CPU_TYPE_ANY) {
-                                        /*
-                                         * The old blob matches this one
-                                         * but doesn't have any CPU type.
-                                         * Update it with whatever the caller
-                                         * provided this time.
-                                         */
-                                        oblob->csb_cpu_type = cputype;
-                                }
-                                vnode_unlock(vp);
-                                error = EAGAIN;
-                                goto out;
-                        } else {
-                                /* different blob: reject the new one */
-                                vnode_unlock(vp);
-                                error = EALREADY;
-                                goto out;
-                        }
-                }
 
+       /*
+        * Only applies to binaries that ship as part of the OS,
+        * primarily the shared cache.
+        */
+       if (!blob->csb_platform_binary || blob->csb_teamid != NULL) {
+               return FALSE;
+       }
+
+       /*
+        * If the runtime page size matches the code signing page
+        * size, there is no work to do.
+        */
+       if (PAGE_SHIFT <= blob->csb_hash_pageshift) {
+               return FALSE;
+       }
+
+       cd = blob->csb_cd;
+
+       /*
+        * There must be a valid integral multiple of hashes
+        */
+       if (ntohl(cd->nCodeSlots) & (PAGE_MASK >> blob->csb_hash_pageshift)) {
+               return FALSE;
+       }
+
+       /*
+        * Scatter lists must also have ranges that have an integral number of hashes
+        */
+       if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
+               const SC_Scatter *scatter = (const SC_Scatter*)
+                   ((const char*)cd + ntohl(cd->scatterOffset));
+               /* iterate all scatter structs to make sure they are all aligned */
+               do {
+                       uint32_t sbase = ntohl(scatter->base);
+                       uint32_t scount = ntohl(scatter->count);
+
+                       /* last scatter? */
+                       if (scount == 0) {
+                               break;
+                       }
+
+                       if (sbase & (PAGE_MASK >> blob->csb_hash_pageshift)) {
+                               return FALSE;
+                       }
+
+                       if (scount & (PAGE_MASK >> blob->csb_hash_pageshift)) {
+                               return FALSE;
+                       }
+
+                       scatter++;
+               } while (1);
+       }
+
+       /* Covered range must be a multiple of the new page size */
+       if (ntohl(cd->codeLimit) & PAGE_MASK) {
+               return FALSE;
+       }
+
+       /* All checks pass */
+       return TRUE;
+}
+
+/*
+ * Given a cs_blob with an already chosen best code directory, this
+ * function allocates memory and copies into it only the blobs that
+ * will be needed by the kernel, namely the single chosen code
+ * directory (and not any of its alternatives) and the entitlement
+ * blob.
+ *
+ * This saves significant memory with agile signatures, and additional
+ * memory for 3rd Party Code because we also omit the CMS blob.
+ *
+ * To support multilevel and other potential code directory rewriting,
+ * the size of a new code directory can be specified. Since that code
+ * directory will replace the existing code directory,
+ * ubc_cs_reconstitute_code_signature does not copy the original code
+ * directory when a size is given, and the caller must fill it in.
+ */
+static int
+ubc_cs_reconstitute_code_signature(struct cs_blob const *blob, vm_size_t optional_new_cd_size,
+    vm_address_t *new_blob_addr_p, vm_size_t *new_blob_size_p,
+    CS_CodeDirectory **new_cd_p, CS_GenericBlob const **new_entitlements_p)
+{
+       const CS_CodeDirectory  *old_cd, *cd;
+       CS_CodeDirectory        *new_cd;
+       const CS_GenericBlob *entitlements;
+       vm_offset_t     new_blob_addr;
+       vm_size_t       new_blob_size;
+       vm_size_t       new_cdsize;
+       kern_return_t   kr;
+       int                             error;
+
+       old_cd = blob->csb_cd;
+
+       new_cdsize = optional_new_cd_size != 0 ? optional_new_cd_size : htonl(old_cd->length);
+
+       new_blob_size  = sizeof(CS_SuperBlob);
+       new_blob_size += sizeof(CS_BlobIndex);
+       new_blob_size += new_cdsize;
+
+       if (blob->csb_entitlements_blob) {
+               /* We need to add a slot for the entitlements */
+               ptrauth_utils_auth_blob_generic(blob->csb_entitlements_blob,
+                   ntohl(blob->csb_entitlements_blob->length),
+                   OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_entitlements_blob_signature"),
+                   PTRAUTH_ADDR_DIVERSIFY,
+                   blob->csb_entitlements_blob_signature);
+
+               new_blob_size += sizeof(CS_BlobIndex);
+               new_blob_size += ntohl(blob->csb_entitlements_blob->length);
+       }
+
+       kr = ubc_cs_blob_allocate(&new_blob_addr, &new_blob_size);
+       if (kr != KERN_SUCCESS) {
+               if (cs_debug > 1) {
+                       printf("CODE SIGNING: Failed to allocate memory for new Code Signing Blob: %d\n",
+                           kr);
+               }
+               return ENOMEM;
+       }
+
+       CS_SuperBlob            *new_superblob;
+
+       new_superblob = (CS_SuperBlob *)new_blob_addr;
+       new_superblob->magic = htonl(CSMAGIC_EMBEDDED_SIGNATURE);
+       new_superblob->length = htonl((uint32_t)new_blob_size);
+       if (blob->csb_entitlements_blob) {
+               vm_size_t                       ent_offset, cd_offset;
+
+               cd_offset  = sizeof(CS_SuperBlob) + 2 * sizeof(CS_BlobIndex);
+               ent_offset = cd_offset +  new_cdsize;
+
+               new_superblob->count = htonl(2);
+               new_superblob->index[0].type = htonl(CSSLOT_CODEDIRECTORY);
+               new_superblob->index[0].offset = htonl((uint32_t)cd_offset);
+               new_superblob->index[1].type = htonl(CSSLOT_ENTITLEMENTS);
+               new_superblob->index[1].offset = htonl((uint32_t)ent_offset);
+
+               ptrauth_utils_auth_blob_generic(blob->csb_entitlements_blob,
+                   ntohl(blob->csb_entitlements_blob->length),
+                   OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_entitlements_blob_signature"),
+                   PTRAUTH_ADDR_DIVERSIFY,
+                   blob->csb_entitlements_blob_signature);
+
+               memcpy((void *)(new_blob_addr + ent_offset), blob->csb_entitlements_blob, ntohl(blob->csb_entitlements_blob->length));
+
+               new_cd = (CS_CodeDirectory *)(new_blob_addr + cd_offset);
+       } else {
+               // Blob is the code directory, directly.
+               new_cd = (CS_CodeDirectory *)new_blob_addr;
+       }
+
+       if (optional_new_cd_size == 0) {
+               // Copy code directory, and revalidate.
+               memcpy(new_cd, old_cd, new_cdsize);
+
+               vm_size_t length = new_blob_size;
+
+               error = cs_validate_csblob((const uint8_t *)new_blob_addr, length, &cd, &entitlements);
+
+               if (error) {
+                       printf("CODE SIGNING: Failed to validate new Code Signing Blob: %d\n",
+                           error);
+
+                       ubc_cs_blob_deallocate(new_blob_addr, new_blob_size);
+                       return error;
+               }
+               *new_entitlements_p = entitlements;
+       } else {
+               // Caller will fill out and validate code directory.
+               memset(new_cd, 0, new_cdsize);
+               *new_entitlements_p = NULL;
+       }
+
+       *new_blob_addr_p = new_blob_addr;
+       *new_blob_size_p = new_blob_size;
+       *new_cd_p = new_cd;
+
+       return 0;
+}
+
+static int
+ubc_cs_convert_to_multilevel_hash(struct cs_blob *blob)
+{
+       const CS_CodeDirectory  *old_cd, *cd;
+       CS_CodeDirectory        *new_cd;
+       const CS_GenericBlob *entitlements;
+       vm_offset_t     new_blob_addr;
+       vm_size_t       new_blob_size;
+       vm_size_t       new_cdsize;
+       int                             error;
+
+       uint32_t                hashes_per_new_hash_shift = (uint32_t)(PAGE_SHIFT - blob->csb_hash_pageshift);
+
+       if (cs_debug > 1) {
+               printf("CODE SIGNING: Attempting to convert Code Directory for %lu -> %lu page shift\n",
+                   (unsigned long)blob->csb_hash_pageshift, (unsigned long)PAGE_SHIFT);
+       }
+
+       old_cd = blob->csb_cd;
+
+       /* Up to the hashes, we can copy all data */
+       new_cdsize  = ntohl(old_cd->hashOffset);
+       new_cdsize += (ntohl(old_cd->nCodeSlots) >> hashes_per_new_hash_shift) * old_cd->hashSize;
+
+       error = ubc_cs_reconstitute_code_signature(blob, new_cdsize,
+           &new_blob_addr, &new_blob_size, &new_cd,
+           &entitlements);
+       if (error != 0) {
+               printf("CODE SIGNING: Failed to reconsitute code signature: %d\n", error);
+               return error;
+       }
+
+       memcpy(new_cd, old_cd, ntohl(old_cd->hashOffset));
+
+       /* Update fields in the Code Directory structure */
+       new_cd->length = htonl((uint32_t)new_cdsize);
+
+       uint32_t nCodeSlots = ntohl(new_cd->nCodeSlots);
+       nCodeSlots >>= hashes_per_new_hash_shift;
+       new_cd->nCodeSlots = htonl(nCodeSlots);
+
+       new_cd->pageSize = (uint8_t)PAGE_SHIFT; /* Not byte-swapped */
+
+       if ((ntohl(new_cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(new_cd->scatterOffset))) {
+               SC_Scatter *scatter = (SC_Scatter*)
+                   ((char *)new_cd + ntohl(new_cd->scatterOffset));
+               /* iterate all scatter structs to scale their counts */
+               do {
+                       uint32_t scount = ntohl(scatter->count);
+                       uint32_t sbase  = ntohl(scatter->base);
+
+                       /* last scatter? */
+                       if (scount == 0) {
+                               break;
+                       }
+
+                       scount >>= hashes_per_new_hash_shift;
+                       scatter->count = htonl(scount);
+
+                       sbase >>= hashes_per_new_hash_shift;
+                       scatter->base = htonl(sbase);
+
+                       scatter++;
+               } while (1);
+       }
+
+       /* For each group of hashes, hash them together */
+       const unsigned char *src_base = (const unsigned char *)old_cd + ntohl(old_cd->hashOffset);
+       unsigned char *dst_base = (unsigned char *)new_cd + ntohl(new_cd->hashOffset);
+
+       uint32_t hash_index;
+       for (hash_index = 0; hash_index < nCodeSlots; hash_index++) {
+               union cs_hash_union     mdctx;
+
+               uint32_t source_hash_len = old_cd->hashSize << hashes_per_new_hash_shift;
+               const unsigned char *src = src_base + hash_index * source_hash_len;
+               unsigned char *dst = dst_base + hash_index * new_cd->hashSize;
+
+               blob->csb_hashtype->cs_init(&mdctx);
+               blob->csb_hashtype->cs_update(&mdctx, src, source_hash_len);
+               blob->csb_hashtype->cs_final(dst, &mdctx);
+       }
+
+       error = cs_validate_csblob((const uint8_t *)new_blob_addr, new_blob_size, &cd, &entitlements);
+       if (error != 0) {
+               printf("CODE SIGNING: Failed to validate new Code Signing Blob: %d\n",
+                   error);
+
+               ubc_cs_blob_deallocate(new_blob_addr, new_blob_size);
+               return error;
+       }
+
+       /* New Code Directory is ready for use, swap it out in the blob structure */
+       ubc_cs_blob_deallocate((vm_offset_t)blob->csb_mem_kaddr, blob->csb_mem_size);
+
+       blob->csb_mem_size = new_blob_size;
+       blob->csb_mem_kaddr = (void *)new_blob_addr;
+       blob->csb_cd = cd;
+       blob->csb_entitlements_blob = entitlements;
+       if (blob->csb_entitlements_blob != NULL) {
+               blob->csb_entitlements_blob_signature = ptrauth_utils_sign_blob_generic(blob->csb_entitlements_blob,
+                   ntohl(blob->csb_entitlements_blob->length),
+                   OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_entitlements_blob_signature"),
+                   PTRAUTH_ADDR_DIVERSIFY);
+       }
+
+       /* The blob has some cached attributes of the Code Directory, so update those */
+
+       blob->csb_hash_firstlevel_pageshift = blob->csb_hash_pageshift; /* Save the original page size */
+
+       blob->csb_hash_pageshift = PAGE_SHIFT;
+       blob->csb_end_offset = ntohl(cd->codeLimit);
+       if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
+               const SC_Scatter *scatter = (const SC_Scatter*)
+                   ((const char*)cd + ntohl(cd->scatterOffset));
+               blob->csb_start_offset = ((off_t)ntohl(scatter->base)) * PAGE_SIZE;
+       } else {
+               blob->csb_start_offset = 0;
+       }
+
+       return 0;
+}
+
+/*
+ * Validate the code signature blob, create a struct cs_blob wrapper
+ * and return it together with a pointer to the chosen code directory
+ * and entitlements blob.
+ *
+ * Note that this takes ownership of the memory as addr, mainly because
+ * this function can actually replace the passed in blob with another
+ * one, e.g. when performing multilevel hashing optimization.
+ */
+int
+cs_blob_create_validated(
+       vm_address_t * const            addr,
+       vm_size_t                       size,
+       struct cs_blob ** const         ret_blob,
+       CS_CodeDirectory const ** const     ret_cd)
+{
+       struct cs_blob          *blob;
+       int             error = EINVAL;
+       const CS_CodeDirectory *cd;
+       const CS_GenericBlob *entitlements;
+       union cs_hash_union     mdctx;
+       size_t                  length;
+
+       if (ret_blob) {
+               *ret_blob = NULL;
+       }
+
+       blob = (struct cs_blob *) kalloc(sizeof(struct cs_blob));
+       if (blob == NULL) {
+               return ENOMEM;
+       }
+
+       /* fill in the new blob */
+       blob->csb_mem_size = size;
+       blob->csb_mem_offset = 0;
+       blob->csb_mem_kaddr = (void *)*addr;
+       blob->csb_flags = 0;
+       blob->csb_signer_type = CS_SIGNER_TYPE_UNKNOWN;
+       blob->csb_platform_binary = 0;
+       blob->csb_platform_path = 0;
+       blob->csb_teamid = NULL;
+#if CONFIG_SUPPLEMENTAL_SIGNATURES
+       blob->csb_supplement_teamid = NULL;
+#endif
+       blob->csb_entitlements_blob = NULL;
+       blob->csb_entitlements = NULL;
+       blob->csb_reconstituted = false;
+
+       /* Transfer ownership. Even on error, this function will deallocate */
+       *addr = 0;
+
+       /*
+        * Validate the blob's contents
+        */
+       length = (size_t) size;
+       error = cs_validate_csblob((const uint8_t *)blob->csb_mem_kaddr,
+           length, &cd, &entitlements);
+       if (error) {
+               if (cs_debug) {
+                       printf("CODESIGNING: csblob invalid: %d\n", error);
+               }
+               /*
+                * The vnode checker can't make the rest of this function
+                * succeed if csblob validation failed, so bail */
+               goto out;
+       } else {
+               const unsigned char *md_base;
+               uint8_t hash[CS_HASH_MAX_SIZE];
+               int md_size;
+               vm_offset_t hash_pagemask;
+
+               blob->csb_cd = cd;
+               blob->csb_entitlements_blob = entitlements; /* may be NULL, not yet validated */
+               if (blob->csb_entitlements_blob != NULL) {
+                       blob->csb_entitlements_blob_signature = ptrauth_utils_sign_blob_generic(blob->csb_entitlements_blob,
+                           ntohl(blob->csb_entitlements_blob->length),
+                           OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_entitlements_blob_signature"),
+                           PTRAUTH_ADDR_DIVERSIFY);
+               }
+               blob->csb_hashtype = cs_find_md(cd->hashType);
+               if (blob->csb_hashtype == NULL || blob->csb_hashtype->cs_digest_size > sizeof(hash)) {
+                       panic("validated CodeDirectory but unsupported type");
+               }
+
+               blob->csb_hash_pageshift = cd->pageSize;
+               hash_pagemask = (1U << cd->pageSize) - 1;
+               blob->csb_hash_firstlevel_pageshift = 0;
+               blob->csb_flags = (ntohl(cd->flags) & CS_ALLOWED_MACHO) | CS_VALID;
+               blob->csb_end_offset = (((vm_offset_t)ntohl(cd->codeLimit) + hash_pagemask) & ~hash_pagemask);
+               if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
+                       const SC_Scatter *scatter = (const SC_Scatter*)
+                           ((const char*)cd + ntohl(cd->scatterOffset));
+                       blob->csb_start_offset = ((off_t)ntohl(scatter->base)) * (1U << blob->csb_hash_pageshift);
+               } else {
+                       blob->csb_start_offset = 0;
+               }
+               /* compute the blob's cdhash */
+               md_base = (const unsigned char *) cd;
+               md_size = ntohl(cd->length);
+
+               blob->csb_hashtype->cs_init(&mdctx);
+               blob->csb_hashtype->cs_update(&mdctx, md_base, md_size);
+               blob->csb_hashtype->cs_final(hash, &mdctx);
+
+               memcpy(blob->csb_cdhash, hash, CS_CDHASH_LEN);
+               blob->csb_cdhash_signature = ptrauth_utils_sign_blob_generic(blob->csb_cdhash,
+                   sizeof(blob->csb_cdhash),
+                   OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_cd_signature"),
+                   PTRAUTH_ADDR_DIVERSIFY);
+
+#if CONFIG_SUPPLEMENTAL_SIGNATURES
+               blob->csb_linkage_hashtype = NULL;
+               if (ntohl(cd->version) >= CS_SUPPORTSLINKAGE && cd->linkageHashType != 0 &&
+                   ntohl(cd->linkageSize) >= CS_CDHASH_LEN) {
+                       blob->csb_linkage_hashtype = cs_find_md(cd->linkageHashType);
+
+                       if (blob->csb_linkage_hashtype != NULL) {
+                               memcpy(blob->csb_linkage, (uint8_t const*)cd + ntohl(cd->linkageOffset),
+                                   CS_CDHASH_LEN);
+                       }
+               }
+#endif
+       }
+
+       error = 0;
+
+out:
+       if (error != 0) {
+               cs_blob_free(blob);
+               blob = NULL;
+               cd = NULL;
+       }
+
+       if (ret_blob != NULL) {
+               *ret_blob = blob;
+       }
+       if (ret_cd != NULL) {
+               *ret_cd = cd;
+       }
+
+       return error;
+}
+
+/*
+ * Free a cs_blob previously created by cs_blob_create_validated.
+ */
+void
+cs_blob_free(
+       struct cs_blob * const blob)
+{
+       if (blob != NULL) {
+               if (blob->csb_mem_kaddr) {
+                       ubc_cs_blob_deallocate((vm_offset_t)blob->csb_mem_kaddr, blob->csb_mem_size);
+                       blob->csb_mem_kaddr = NULL;
+               }
+               if (blob->csb_entitlements != NULL) {
+                       osobject_release(blob->csb_entitlements);
+                       blob->csb_entitlements = NULL;
+               }
+               (kfree)(blob, sizeof(*blob));
+       }
+}
+#if CONFIG_SUPPLEMENTAL_SIGNATURES
+static void
+cs_blob_supplement_free(struct cs_blob * const blob)
+{
+       if (blob != NULL) {
+               if (blob->csb_supplement_teamid != NULL) {
+                       vm_size_t teamid_size = strlen(blob->csb_supplement_teamid) + 1;
+                       kfree(blob->csb_supplement_teamid, teamid_size);
+                       blob->csb_supplement_teamid = NULL;
+               }
+               cs_blob_free(blob);
+       }
+}
+#endif
+
+static void
+ubc_cs_blob_adjust_statistics(struct cs_blob const *blob)
+{
+       /* Note that the atomic ops are not enough to guarantee
+        * correctness: If a blob with an intermediate size is inserted
+        * concurrently, we can lose a peak value assignment. But these
+        * statistics are only advisory anyway, so we're not going to
+        * employ full locking here. (Consequently, we are also okay with
+        * relaxed ordering of those accesses.)
+        */
+
+       unsigned int new_cs_blob_count = os_atomic_add(&cs_blob_count, 1, relaxed);
+       if (new_cs_blob_count > os_atomic_load(&cs_blob_count_peak, relaxed)) {
+               os_atomic_store(&cs_blob_count_peak, new_cs_blob_count, relaxed);
+       }
+
+       size_t new_cs_blob_size = os_atomic_add(&cs_blob_size, blob->csb_mem_size, relaxed);
+
+       if (new_cs_blob_size > os_atomic_load(&cs_blob_size_peak, relaxed)) {
+               os_atomic_store(&cs_blob_size_peak, new_cs_blob_size, relaxed);
+       }
+       if (blob->csb_mem_size > os_atomic_load(&cs_blob_size_max, relaxed)) {
+               os_atomic_store(&cs_blob_size_max, blob->csb_mem_size, relaxed);
+       }
+}
+
+int
+ubc_cs_blob_add(
+       struct vnode    *vp,
+       uint32_t        platform,
+       cpu_type_t      cputype,
+       cpu_subtype_t   cpusubtype,
+       off_t           base_offset,
+       vm_address_t    *addr,
+       vm_size_t       size,
+       struct image_params *imgp,
+       __unused int    flags,
+       struct cs_blob  **ret_blob)
+{
+       kern_return_t           kr;
+       struct ubc_info         *uip;
+       struct cs_blob          *blob = NULL, *oblob = NULL;
+       int                     error;
+       CS_CodeDirectory const *cd;
+       off_t                   blob_start_offset, blob_end_offset;
+       boolean_t               record_mtime;
+
+       record_mtime = FALSE;
+       if (ret_blob) {
+               *ret_blob = NULL;
+       }
+
+       /* Create the struct cs_blob wrapper that will be attached to the vnode.
+        * Validates the passed in blob in the process. */
+       error = cs_blob_create_validated(addr, size, &blob, &cd);
+
+       if (error != 0) {
+               printf("malform code signature blob: %d\n", error);
+               return error;
+       }
+
+       blob->csb_cpu_type = cputype;
+       blob->csb_cpu_subtype = cpusubtype & ~CPU_SUBTYPE_MASK;
+       blob->csb_base_offset = base_offset;
+
+       /*
+        * Let policy module check whether the blob's signature is accepted.
+        */
+#if CONFIG_MACF
+       unsigned int cs_flags = blob->csb_flags;
+       unsigned int signer_type = blob->csb_signer_type;
+       error = mac_vnode_check_signature(vp, blob, imgp, &cs_flags, &signer_type, flags, platform);
+       blob->csb_flags = cs_flags;
+       blob->csb_signer_type = signer_type;
+
+       if (error) {
+               if (cs_debug) {
+                       printf("check_signature[pid: %d], error = %d\n", current_proc()->p_pid, error);
+               }
+               goto out;
+       }
+       if ((flags & MAC_VNODE_CHECK_DYLD_SIM) && !(blob->csb_flags & CS_PLATFORM_BINARY)) {
+               if (cs_debug) {
+                       printf("check_signature[pid: %d], is not apple signed\n", current_proc()->p_pid);
+               }
+               error = EPERM;
+               goto out;
+       }
+#endif
+
+#if CONFIG_ENFORCE_SIGNED_CODE
+       /*
+        * Reconstitute code signature
+        */
+       {
+               vm_address_t new_mem_kaddr = 0;
+               vm_size_t new_mem_size = 0;
+
+               CS_CodeDirectory *new_cd = NULL;
+               CS_GenericBlob const *new_entitlements = NULL;
+
+               error = ubc_cs_reconstitute_code_signature(blob, 0,
+                   &new_mem_kaddr, &new_mem_size,
+                   &new_cd, &new_entitlements);
+
+               if (error != 0) {
+                       printf("failed code signature reconstitution: %d\n", error);
+                       goto out;
+               }
+
+               ubc_cs_blob_deallocate((vm_offset_t)blob->csb_mem_kaddr, blob->csb_mem_size);
+
+               blob->csb_mem_kaddr = (void *)new_mem_kaddr;
+               blob->csb_mem_size = new_mem_size;
+               blob->csb_cd = new_cd;
+               blob->csb_entitlements_blob = new_entitlements;
+               if (blob->csb_entitlements_blob != NULL) {
+                       blob->csb_entitlements_blob_signature = ptrauth_utils_sign_blob_generic(blob->csb_entitlements_blob,
+                           ntohl(blob->csb_entitlements_blob->length),
+                           OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_entitlements_blob_signature"),
+                           PTRAUTH_ADDR_DIVERSIFY);
+               }
+               blob->csb_reconstituted = true;
+       }
+#endif
+
+
+       if (blob->csb_flags & CS_PLATFORM_BINARY) {
+               if (cs_debug > 1) {
+                       printf("check_signature[pid: %d]: platform binary\n", current_proc()->p_pid);
+               }
+               blob->csb_platform_binary = 1;
+               blob->csb_platform_path = !!(blob->csb_flags & CS_PLATFORM_PATH);
+       } else {
+               blob->csb_platform_binary = 0;
+               blob->csb_platform_path = 0;
+               blob->csb_teamid = csblob_parse_teamid(blob);
+               if (cs_debug > 1) {
+                       if (blob->csb_teamid) {
+                               printf("check_signature[pid: %d]: team-id is %s\n", current_proc()->p_pid, blob->csb_teamid);
+                       } else {
+                               printf("check_signature[pid: %d]: no team-id\n", current_proc()->p_pid);
+                       }
+               }
+       }
+
+       /*
+        * Validate the blob's coverage
+        */
+       blob_start_offset = blob->csb_base_offset + blob->csb_start_offset;
+       blob_end_offset = blob->csb_base_offset + blob->csb_end_offset;
+
+       if (blob_start_offset >= blob_end_offset ||
+           blob_start_offset < 0 ||
+           blob_end_offset <= 0) {
+               /* reject empty or backwards blob */
+               error = EINVAL;
+               goto out;
+       }
+
+       if (ubc_cs_supports_multilevel_hash(blob)) {
+               error = ubc_cs_convert_to_multilevel_hash(blob);
+               if (error != 0) {
+                       printf("failed multilevel hash conversion: %d\n", error);
+                       goto out;
+               }
+               blob->csb_reconstituted = true;
+       }
+
+       vnode_lock(vp);
+       if (!UBCINFOEXISTS(vp)) {
+               vnode_unlock(vp);
+               error = ENOENT;
+               goto out;
+       }
+       uip = vp->v_ubcinfo;
+
+       /* check if this new blob overlaps with an existing blob */
+       for (oblob = uip->cs_blobs;
+           oblob != NULL;
+           oblob = oblob->csb_next) {
+               off_t oblob_start_offset, oblob_end_offset;
+
+               if (blob->csb_signer_type != oblob->csb_signer_type) {  // signer type needs to be the same for slices
+                       vnode_unlock(vp);
+                       error = EALREADY;
+                       goto out;
+               } else if (blob->csb_platform_binary) {  //platform binary needs to be the same for app slices
+                       if (!oblob->csb_platform_binary) {
+                               vnode_unlock(vp);
+                               error = EALREADY;
+                               goto out;
+                       }
+               } else if (blob->csb_teamid) {  //teamid binary needs to be the same for app slices
+                       if (oblob->csb_platform_binary ||
+                           oblob->csb_teamid == NULL ||
+                           strcmp(oblob->csb_teamid, blob->csb_teamid) != 0) {
+                               vnode_unlock(vp);
+                               error = EALREADY;
+                               goto out;
+                       }
+               } else {  // non teamid binary needs to be the same for app slices
+                       if (oblob->csb_platform_binary ||
+                           oblob->csb_teamid != NULL) {
+                               vnode_unlock(vp);
+                               error = EALREADY;
+                               goto out;
+                       }
+               }
+
+               oblob_start_offset = (oblob->csb_base_offset +
+                   oblob->csb_start_offset);
+               oblob_end_offset = (oblob->csb_base_offset +
+                   oblob->csb_end_offset);
+               if (blob_start_offset >= oblob_end_offset ||
+                   blob_end_offset <= oblob_start_offset) {
+                       /* no conflict with this existing blob */
+               } else {
+                       /* conflict ! */
+                       if (blob_start_offset == oblob_start_offset &&
+                           blob_end_offset == oblob_end_offset &&
+                           blob->csb_mem_size == oblob->csb_mem_size &&
+                           blob->csb_flags == oblob->csb_flags &&
+                           (blob->csb_cpu_type == CPU_TYPE_ANY ||
+                           oblob->csb_cpu_type == CPU_TYPE_ANY ||
+                           blob->csb_cpu_type == oblob->csb_cpu_type) &&
+                           !bcmp(blob->csb_cdhash,
+                           oblob->csb_cdhash,
+                           CS_CDHASH_LEN)) {
+                               /*
+                                * We already have this blob:
+                                * we'll return success but
+                                * throw away the new blob.
+                                */
+                               if (oblob->csb_cpu_type == CPU_TYPE_ANY) {
+                                       /*
+                                        * The old blob matches this one
+                                        * but doesn't have any CPU type.
+                                        * Update it with whatever the caller
+                                        * provided this time.
+                                        */
+                                       oblob->csb_cpu_type = cputype;
+                               }
+
+                               /* The signature is still accepted, so update the
+                                * generation count. */
+                               uip->cs_add_gen = cs_blob_generation_count;
+
+                               vnode_unlock(vp);
+                               if (ret_blob) {
+                                       *ret_blob = oblob;
+                               }
+                               error = EAGAIN;
+                               goto out;
+                       } else {
+                               /* different blob: reject the new one */
+                               vnode_unlock(vp);
+                               error = EALREADY;
+                               goto out;
+                       }
+               }
        }
 
 
@@ -2300,6 +3738,14 @@ ubc_cs_blob_add(
                goto out;
        }
 
+       if (uip->cs_blobs == NULL) {
+               /* loading 1st blob: record the file's current "modify time" */
+               record_mtime = TRUE;
+       }
+
+       /* set the generation count for cs_blobs */
+       uip->cs_add_gen = cs_blob_generation_count;
+
        /*
         * Add this blob to the list of blobs for this vnode.
         * We always add at the front of the list and we never remove a
@@ -2310,97 +3756,374 @@ ubc_cs_blob_add(
        blob->csb_next = uip->cs_blobs;
        uip->cs_blobs = blob;
 
-       OSAddAtomic(+1, &cs_blob_count);
-       if (cs_blob_count > cs_blob_count_peak) {
-               cs_blob_count_peak = cs_blob_count; /* XXX atomic ? */
-       }
-       OSAddAtomic(+blob->csb_mem_size, &cs_blob_size);
-       if (cs_blob_size > cs_blob_size_peak) {
-               cs_blob_size_peak = cs_blob_size; /* XXX atomic ? */
-       }
-       if (blob->csb_mem_size > cs_blob_size_max) {
-               cs_blob_size_max = blob->csb_mem_size;
-       }
+       ubc_cs_blob_adjust_statistics(blob);
 
-       if (cs_debug) {
+       if (cs_debug > 1) {
                proc_t p;
-
+               const char *name = vnode_getname_printable(vp);
                p = current_proc();
                printf("CODE SIGNING: proc %d(%s) "
-                      "loaded %s signatures for file (%s) "
-                      "range 0x%llx:0x%llx flags 0x%x\n",
-                      p->p_pid, p->p_comm,
-                      blob->csb_cpu_type == -1 ? "detached" : "embedded",
-                      vnode_name(vp),
-                      blob->csb_base_offset + blob->csb_start_offset,
-                      blob->csb_base_offset + blob->csb_end_offset,
-                      blob->csb_flags);
+                   "loaded %s signatures for file (%s) "
+                   "range 0x%llx:0x%llx flags 0x%x\n",
+                   p->p_pid, p->p_comm,
+                   blob->csb_cpu_type == -1 ? "detached" : "embedded",
+                   name,
+                   blob->csb_base_offset + blob->csb_start_offset,
+                   blob->csb_base_offset + blob->csb_end_offset,
+                   blob->csb_flags);
+               vnode_putname_printable(name);
        }
 
-#if !CS_BLOB_KEEP_IN_KERNEL
-       blob->csb_mem_kaddr = 0;
-#endif /* CS_BLOB_KEEP_IN_KERNEL */
-
        vnode_unlock(vp);
 
-       error = 0;      /* success ! */
+       if (record_mtime) {
+               vnode_mtime(vp, &uip->cs_mtime, vfs_context_current());
+       }
+
+       if (ret_blob) {
+               *ret_blob = blob;
+       }
+
+       error = 0;      /* success ! */
 
 out:
        if (error) {
-               /* we failed; release what we allocated */
-               if (blob) {
-                       kfree(blob, sizeof (*blob));
-                       blob = NULL;
-               }
-               if (blob_handle != IPC_PORT_NULL) {
-                       mach_memory_entry_port_release(blob_handle);
-                       blob_handle = IPC_PORT_NULL;
+               if (cs_debug) {
+                       printf("check_signature[pid: %d]: error = %d\n", current_proc()->p_pid, error);
                }
-       } else {
-#if !CS_BLOB_KEEP_IN_KERNEL
-               kmem_free(kernel_map, addr, size);
-#endif /* CS_BLOB_KEEP_IN_KERNEL */
+
+               cs_blob_free(blob);
        }
 
        if (error == EAGAIN) {
                /*
-                * See above:  error is EAGAIN if we were asked 
+                * See above:  error is EAGAIN if we were asked
                 * to add an existing blob again.  We cleaned the new
                 * blob and we want to return success.
                 */
                error = 0;
-               /*
-                * Since we're not failing, consume the data we received.
-                */
-               kmem_free(kernel_map, addr, size);
        }
 
-       return error;
-}
+       return error;
+}
+
+#if CONFIG_SUPPLEMENTAL_SIGNATURES
+int
+ubc_cs_blob_add_supplement(
+       struct vnode    *vp,
+       struct vnode    *orig_vp,
+       off_t           base_offset,
+       vm_address_t    *addr,
+       vm_size_t       size,
+       struct cs_blob  **ret_blob)
+{
+       kern_return_t           kr;
+       struct ubc_info         *uip, *orig_uip;
+       int                     error;
+       struct cs_blob          *blob, *orig_blob;
+       CS_CodeDirectory const *cd;
+       off_t                   blob_start_offset, blob_end_offset;
+
+       if (ret_blob) {
+               *ret_blob = NULL;
+       }
+
+       /* Create the struct cs_blob wrapper that will be attached to the vnode.
+        * Validates the passed in blob in the process. */
+       error = cs_blob_create_validated(addr, size, &blob, &cd);
+
+       if (error != 0) {
+               printf("malformed code signature supplement blob: %d\n", error);
+               return error;
+       }
+
+       blob->csb_cpu_type = -1;
+       blob->csb_base_offset = base_offset;
+
+       blob->csb_reconstituted = false;
+
+       vnode_lock(orig_vp);
+       if (!UBCINFOEXISTS(orig_vp)) {
+               vnode_unlock(orig_vp);
+               error = ENOENT;
+               goto out;
+       }
+
+       orig_uip = orig_vp->v_ubcinfo;
+
+       /* check that the supplement's linked cdhash matches a cdhash of
+        * the target image.
+        */
+
+       if (blob->csb_linkage_hashtype == NULL) {
+               proc_t p;
+               const char *iname = vnode_getname_printable(vp);
+               p = current_proc();
+
+               printf("CODE SIGNING: proc %d(%s) supplemental signature for file (%s) "
+                   "is not a supplemental.\n",
+                   p->p_pid, p->p_comm, iname);
+
+               error = EINVAL;
+
+               vnode_putname_printable(iname);
+               vnode_unlock(orig_vp);
+               goto out;
+       }
+
+       for (orig_blob = orig_uip->cs_blobs; orig_blob != NULL;
+           orig_blob = orig_blob->csb_next) {
+               ptrauth_utils_auth_blob_generic(orig_blob->csb_cdhash,
+                   sizeof(orig_blob->csb_cdhash),
+                   OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_cd_signature"),
+                   PTRAUTH_ADDR_DIVERSIFY,
+                   orig_blob->csb_cdhash_signature);
+               if (orig_blob->csb_hashtype == blob->csb_linkage_hashtype &&
+                   memcmp(orig_blob->csb_cdhash, blob->csb_linkage, CS_CDHASH_LEN) == 0) {
+                       // Found match!
+                       break;
+               }
+       }
+
+       if (orig_blob == NULL) {
+               // Not found.
+
+               proc_t p;
+               const char *iname = vnode_getname_printable(vp);
+               p = current_proc();
+
+               printf("CODE SIGNING: proc %d(%s) supplemental signature for file (%s) "
+                   "does not match any attached cdhash.\n",
+                   p->p_pid, p->p_comm, iname);
+
+               error = ESRCH;
+
+               vnode_putname_printable(iname);
+               vnode_unlock(orig_vp);
+               goto out;
+       }
+
+       vnode_unlock(orig_vp);
+
+       // validate the signature against policy!
+#if CONFIG_MACF
+       unsigned int signer_type = blob->csb_signer_type;
+       error = mac_vnode_check_supplemental_signature(vp, blob, orig_vp, orig_blob, &signer_type);
+       blob->csb_signer_type = signer_type;
+
+
+       if (error) {
+               if (cs_debug) {
+                       printf("check_supplemental_signature[pid: %d], error = %d\n", current_proc()->p_pid, error);
+               }
+               goto out;
+       }
+#endif
+
+       // We allowed the supplemental signature blob so
+       // copy the platform bit or team-id from the linked signature and whether or not the original is developer code
+       blob->csb_platform_binary = 0;
+       blob->csb_platform_path = 0;
+       if (orig_blob->csb_platform_binary == 1) {
+               blob->csb_platform_binary = orig_blob->csb_platform_binary;
+               blob->csb_platform_path = orig_blob->csb_platform_path;
+       } else if (orig_blob->csb_teamid != NULL) {
+               vm_size_t teamid_size = strlen(orig_blob->csb_teamid) + 1;
+               blob->csb_supplement_teamid  = kalloc(teamid_size);
+               if (blob->csb_supplement_teamid == NULL) {
+                       error = ENOMEM;
+                       goto out;
+               }
+               strlcpy(blob->csb_supplement_teamid, orig_blob->csb_teamid, teamid_size);
+       }
+       blob->csb_flags = (orig_blob->csb_flags & CS_DEV_CODE);
+
+       // Validate the blob's coverage
+       blob_start_offset = blob->csb_base_offset + blob->csb_start_offset;
+       blob_end_offset = blob->csb_base_offset + blob->csb_end_offset;
+
+       if (blob_start_offset >= blob_end_offset || blob_start_offset < 0 || blob_end_offset <= 0) {
+               /* reject empty or backwards blob */
+               error = EINVAL;
+               goto out;
+       }
+
+       vnode_lock(vp);
+       if (!UBCINFOEXISTS(vp)) {
+               vnode_unlock(vp);
+               error = ENOENT;
+               goto out;
+       }
+       uip = vp->v_ubcinfo;
+
+       struct cs_blob *existing = uip->cs_blob_supplement;
+       if (existing != NULL) {
+               if (blob->csb_hashtype == existing->csb_hashtype &&
+                   memcmp(blob->csb_cdhash, existing->csb_cdhash, CS_CDHASH_LEN) == 0) {
+                       error = EAGAIN; // non-fatal
+               } else {
+                       error = EALREADY; // fatal
+               }
+
+               vnode_unlock(vp);
+               goto out;
+       }
+
+       /* Unlike regular cs_blobs, we only ever support one supplement. */
+       blob->csb_next = NULL;
+       uip->cs_blob_supplement = blob;
+
+       /* mark this vnode's VM object as having "signed pages" */
+       kr = memory_object_signed(uip->ui_control, TRUE);
+       if (kr != KERN_SUCCESS) {
+               vnode_unlock(vp);
+               error = ENOENT;
+               goto out;
+       }
+
+       vnode_unlock(vp);
+
+       /* We still adjust statistics even for supplemental blobs, as they
+        * consume memory just the same. */
+       ubc_cs_blob_adjust_statistics(blob);
+
+       if (cs_debug > 1) {
+               proc_t p;
+               const char *name = vnode_getname_printable(vp);
+               p = current_proc();
+               printf("CODE SIGNING: proc %d(%s) "
+                   "loaded supplemental signature for file (%s) "
+                   "range 0x%llx:0x%llx\n",
+                   p->p_pid, p->p_comm,
+                   name,
+                   blob->csb_base_offset + blob->csb_start_offset,
+                   blob->csb_base_offset + blob->csb_end_offset);
+               vnode_putname_printable(name);
+       }
+
+       if (ret_blob) {
+               *ret_blob = blob;
+       }
+
+       error = 0; // Success!
+out:
+       if (error) {
+               if (cs_debug) {
+                       printf("ubc_cs_blob_add_supplement[pid: %d]: error = %d\n", current_proc()->p_pid, error);
+               }
+
+               cs_blob_supplement_free(blob);
+       }
+
+       if (error == EAGAIN) {
+               /* We were asked to add an existing blob.
+                * We cleaned up and ignore the attempt. */
+               error = 0;
+       }
+
+       return error;
+}
+#endif
+
+
+
+void
+csvnode_print_debug(struct vnode *vp)
+{
+       const char      *name = NULL;
+       struct ubc_info *uip;
+       struct cs_blob *blob;
+
+       name = vnode_getname_printable(vp);
+       if (name) {
+               printf("csvnode: name: %s\n", name);
+               vnode_putname_printable(name);
+       }
+
+       vnode_lock_spin(vp);
+
+       if (!UBCINFOEXISTS(vp)) {
+               blob = NULL;
+               goto out;
+       }
+
+       uip = vp->v_ubcinfo;
+       for (blob = uip->cs_blobs; blob != NULL; blob = blob->csb_next) {
+               printf("csvnode: range: %lu -> %lu flags: 0x%08x platform: %s path: %s team: %s\n",
+                   (unsigned long)blob->csb_start_offset,
+                   (unsigned long)blob->csb_end_offset,
+                   blob->csb_flags,
+                   blob->csb_platform_binary ? "yes" : "no",
+                   blob->csb_platform_path ? "yes" : "no",
+                   blob->csb_teamid ? blob->csb_teamid : "<NO-TEAM>");
+       }
+
+out:
+       vnode_unlock(vp);
+}
+
+#if CONFIG_SUPPLEMENTAL_SIGNATURES
+struct cs_blob *
+ubc_cs_blob_get_supplement(
+       struct vnode    *vp,
+       off_t           offset)
+{
+       struct cs_blob *blob;
+       off_t offset_in_blob;
+
+       vnode_lock_spin(vp);
+
+       if (!UBCINFOEXISTS(vp)) {
+               blob = NULL;
+               goto out;
+       }
+
+       blob = vp->v_ubcinfo->cs_blob_supplement;
+
+       if (blob == NULL) {
+               // no supplemental blob
+               goto out;
+       }
+
+
+       if (offset != -1) {
+               offset_in_blob = offset - blob->csb_base_offset;
+               if (offset_in_blob < blob->csb_start_offset || offset_in_blob >= blob->csb_end_offset) {
+                       // not actually covered by this blob
+                       blob = NULL;
+               }
+       }
+
+out:
+       vnode_unlock(vp);
 
+       return blob;
+}
+#endif
 
 struct cs_blob *
 ubc_cs_blob_get(
-       struct vnode    *vp,
-       cpu_type_t      cputype,
-       off_t           offset)
+       struct vnode    *vp,
+       cpu_type_t      cputype,
+       cpu_subtype_t   cpusubtype,
+       off_t           offset)
 {
-       struct ubc_info *uip;
-       struct cs_blob  *blob;
+       struct ubc_info *uip;
+       struct cs_blob  *blob;
        off_t offset_in_blob;
 
        vnode_lock_spin(vp);
 
-       if (! UBCINFOEXISTS(vp)) {
+       if (!UBCINFOEXISTS(vp)) {
                blob = NULL;
                goto out;
        }
 
        uip = vp->v_ubcinfo;
        for (blob = uip->cs_blobs;
-            blob != NULL;
-            blob = blob->csb_next) {
-               if (cputype != -1 && blob->csb_cpu_type == cputype) {
+           blob != NULL;
+           blob = blob->csb_next) {
+               if (cputype != -1 && blob->csb_cpu_type == cputype && (cpusubtype == -1 || blob->csb_cpu_subtype == (cpusubtype & ~CPU_SUBTYPE_MASK))) {
                        break;
                }
                if (offset != -1) {
@@ -2421,39 +4144,171 @@ out:
 
 static void
 ubc_cs_free(
-       struct ubc_info *uip)
+       struct ubc_info *uip)
 {
-       struct cs_blob  *blob, *next_blob;
+       struct cs_blob  *blob, *next_blob;
 
        for (blob = uip->cs_blobs;
-            blob != NULL;
-            blob = next_blob) {
+           blob != NULL;
+           blob = next_blob) {
                next_blob = blob->csb_next;
-               if (blob->csb_mem_kaddr != 0) {
-                       kmem_free(kernel_map,
-                                 blob->csb_mem_kaddr,
-                                 blob->csb_mem_size);
-                       blob->csb_mem_kaddr = 0;
-               }
-               mach_memory_entry_port_release(blob->csb_mem_handle);
-               blob->csb_mem_handle = IPC_PORT_NULL;
-               OSAddAtomic(-1, &cs_blob_count);
-               OSAddAtomic(-blob->csb_mem_size, &cs_blob_size);
-               kfree(blob, sizeof (*blob));
+               os_atomic_add(&cs_blob_count, -1, relaxed);
+               os_atomic_add(&cs_blob_size, -blob->csb_mem_size, relaxed);
+               cs_blob_free(blob);
        }
+#if CHECK_CS_VALIDATION_BITMAP
+       ubc_cs_validation_bitmap_deallocate( uip->ui_vnode );
+#endif
        uip->cs_blobs = NULL;
+#if CONFIG_SUPPLEMENTAL_SIGNATURES
+       if (uip->cs_blob_supplement != NULL) {
+               blob = uip->cs_blob_supplement;
+               os_atomic_add(&cs_blob_count, -1, relaxed);
+               os_atomic_add(&cs_blob_size, -blob->csb_mem_size, relaxed);
+               cs_blob_supplement_free(uip->cs_blob_supplement);
+               uip->cs_blob_supplement = NULL;
+       }
+#endif
+}
+
+/* check cs blob generation on vnode
+ * returns:
+ *    0         : Success, the cs_blob attached is current
+ *    ENEEDAUTH : Generation count mismatch. Needs authentication again.
+ */
+int
+ubc_cs_generation_check(
+       struct vnode    *vp)
+{
+       int retval = ENEEDAUTH;
+
+       vnode_lock_spin(vp);
+
+       if (UBCINFOEXISTS(vp) && vp->v_ubcinfo->cs_add_gen == cs_blob_generation_count) {
+               retval = 0;
+       }
+
+       vnode_unlock(vp);
+       return retval;
+}
+
+int
+ubc_cs_blob_revalidate(
+       struct vnode    *vp,
+       struct cs_blob *blob,
+       struct image_params *imgp,
+       int flags,
+       uint32_t platform
+       )
+{
+       int error = 0;
+       const CS_CodeDirectory *cd = NULL;
+       const CS_GenericBlob *entitlements = NULL;
+       size_t size;
+       assert(vp != NULL);
+       assert(blob != NULL);
+
+       size = blob->csb_mem_size;
+       error = cs_validate_csblob((const uint8_t *)blob->csb_mem_kaddr,
+           size, &cd, &entitlements);
+       if (error) {
+               if (cs_debug) {
+                       printf("CODESIGNING: csblob invalid: %d\n", error);
+               }
+               goto out;
+       }
+
+       unsigned int cs_flags = (ntohl(cd->flags) & CS_ALLOWED_MACHO) | CS_VALID;
+       unsigned int signer_type = CS_SIGNER_TYPE_UNKNOWN;
+
+       if (blob->csb_reconstituted) {
+               /*
+                * Code signatures that have been modified after validation
+                * cannot be revalidated inline from their in-memory blob.
+                *
+                * That's okay, though, because the only path left that relies
+                * on revalidation of existing in-memory blobs is the legacy
+                * detached signature database path, which only exists on macOS,
+                * which does not do reconstitution of any kind.
+                */
+               if (cs_debug) {
+                       printf("CODESIGNING: revalidate: not inline revalidating reconstituted signature.\n");
+               }
+
+               /*
+                * EAGAIN tells the caller that they may reread the code
+                * signature and try attaching it again, which is the same
+                * thing they would do if there was no cs_blob yet in the
+                * first place.
+                *
+                * Conveniently, after ubc_cs_blob_add did a successful
+                * validation, it will detect that a matching cs_blob (cdhash,
+                * offset, arch etc.) already exists, and return success
+                * without re-adding a cs_blob to the vnode.
+                */
+               return EAGAIN;
+       }
+
+       /* callout to mac_vnode_check_signature */
+#if CONFIG_MACF
+       error = mac_vnode_check_signature(vp, blob, imgp, &cs_flags, &signer_type, flags, platform);
+       if (cs_debug && error) {
+               printf("revalidate: check_signature[pid: %d], error = %d\n", current_proc()->p_pid, error);
+       }
+#else
+       (void)flags;
+       (void)signer_type;
+#endif
+
+       /* update generation number if success */
+       vnode_lock_spin(vp);
+       blob->csb_flags = cs_flags;
+       blob->csb_signer_type = signer_type;
+       if (UBCINFOEXISTS(vp)) {
+               if (error == 0) {
+                       vp->v_ubcinfo->cs_add_gen = cs_blob_generation_count;
+               } else {
+                       vp->v_ubcinfo->cs_add_gen = 0;
+               }
+       }
+
+       vnode_unlock(vp);
+
+out:
+       return error;
+}
+
+void
+cs_blob_reset_cache()
+{
+       /* incrementing odd no by 2 makes sure '0' is never reached. */
+       OSAddAtomic(+2, &cs_blob_generation_count);
+       printf("Reseting cs_blob cache from all vnodes. \n");
 }
 
 struct cs_blob *
 ubc_get_cs_blobs(
-       struct vnode    *vp)
+       struct vnode    *vp)
 {
-       struct ubc_info *uip;
-       struct cs_blob  *blobs;
+       struct ubc_info *uip;
+       struct cs_blob  *blobs;
 
-       vnode_lock_spin(vp);
+       /*
+        * No need to take the vnode lock here.  The caller must be holding
+        * a reference on the vnode (via a VM mapping or open file descriptor),
+        * so the vnode will not go away.  The ubc_info stays until the vnode
+        * goes away.  And we only modify "blobs" by adding to the head of the
+        * list.
+        * The ubc_info could go away entirely if the vnode gets reclaimed as
+        * part of a forced unmount.  In the case of a code-signature validation
+        * during a page fault, the "paging_in_progress" reference on the VM
+        * object guarantess that the vnode pager (and the ubc_info) won't go
+        * away during the fault.
+        * Other callers need to protect against vnode reclaim by holding the
+        * vnode lock, for example.
+        */
 
-       if (! UBCINFOEXISTS(vp)) {
+       if (!UBCINFOEXISTS(vp)) {
                blobs = NULL;
                goto out;
        }
@@ -2462,47 +4317,95 @@ ubc_get_cs_blobs(
        blobs = uip->cs_blobs;
 
 out:
-       vnode_unlock(vp);
-
        return blobs;
 }
 
+#if CONFIG_SUPPLEMENTAL_SIGNATURES
+struct cs_blob *
+ubc_get_cs_supplement(
+       struct vnode    *vp)
+{
+       struct ubc_info *uip;
+       struct cs_blob  *blob;
+
+       /*
+        * No need to take the vnode lock here.  The caller must be holding
+        * a reference on the vnode (via a VM mapping or open file descriptor),
+        * so the vnode will not go away.  The ubc_info stays until the vnode
+        * goes away.
+        * The ubc_info could go away entirely if the vnode gets reclaimed as
+        * part of a forced unmount.  In the case of a code-signature validation
+        * during a page fault, the "paging_in_progress" reference on the VM
+        * object guarantess that the vnode pager (and the ubc_info) won't go
+        * away during the fault.
+        * Other callers need to protect against vnode reclaim by holding the
+        * vnode lock, for example.
+        */
+
+       if (!UBCINFOEXISTS(vp)) {
+               blob = NULL;
+               goto out;
+       }
+
+       uip = vp->v_ubcinfo;
+       blob = uip->cs_blob_supplement;
+
+out:
+       return blob;
+}
+#endif
+
+
+void
+ubc_get_cs_mtime(
+       struct vnode    *vp,
+       struct timespec *cs_mtime)
+{
+       struct ubc_info *uip;
+
+       if (!UBCINFOEXISTS(vp)) {
+               cs_mtime->tv_sec = 0;
+               cs_mtime->tv_nsec = 0;
+               return;
+       }
+
+       uip = vp->v_ubcinfo;
+       cs_mtime->tv_sec = uip->cs_mtime.tv_sec;
+       cs_mtime->tv_nsec = uip->cs_mtime.tv_nsec;
+}
+
 unsigned long cs_validate_page_no_hash = 0;
 unsigned long cs_validate_page_bad_hash = 0;
-boolean_t
-cs_validate_page(
-       void                    *_blobs,
-       memory_object_offset_t  page_offset,
-       const void              *data,
-       boolean_t               *tainted)
-{
-       SHA1_CTX                sha1ctxt;
-       unsigned char           actual_hash[SHA1_RESULTLEN];
-       unsigned char           expected_hash[SHA1_RESULTLEN];
-       boolean_t               found_hash;
-       struct cs_blob          *blobs, *blob;
-       const CS_CodeDirectory  *cd;
-       const CS_SuperBlob      *embedded;
-       off_t                   start_offset, end_offset;
-       const unsigned char     *hash;
-       boolean_t               validated;
-       off_t                   offset; /* page offset in the file */
-       size_t                  size;
-       off_t                   codeLimit = 0;
-       char                    *lower_bound, *upper_bound;
-       vm_offset_t             kaddr, blob_addr;
-       vm_size_t               ksize;
-       kern_return_t           kr;
-
-       offset = page_offset;
+static boolean_t
+cs_validate_hash(
+       struct cs_blob          *blobs,
+       memory_object_t         pager,
+       memory_object_offset_t  page_offset,
+       const void              *data,
+       vm_size_t               *bytes_processed,
+       unsigned                *tainted)
+{
+       union cs_hash_union     mdctx;
+       struct cs_hash const    *hashtype = NULL;
+       unsigned char           actual_hash[CS_HASH_MAX_SIZE];
+       unsigned char           expected_hash[CS_HASH_MAX_SIZE];
+       boolean_t               found_hash;
+       struct cs_blob          *blob;
+       const CS_CodeDirectory  *cd;
+       const unsigned char     *hash;
+       boolean_t               validated;
+       off_t                   offset; /* page offset in the file */
+       size_t                  size;
+       off_t                   codeLimit = 0;
+       const char              *lower_bound, *upper_bound;
+       vm_offset_t             kaddr, blob_addr;
 
        /* retrieve the expected hash */
        found_hash = FALSE;
-       blobs = (struct cs_blob *) _blobs;
 
        for (blob = blobs;
-            blob != NULL;
-            blob = blob->csb_next) {
+           blob != NULL;
+           blob = blob->csb_next) {
                offset = page_offset - blob->csb_base_offset;
                if (offset < blob->csb_start_offset ||
                    offset >= blob->csb_end_offset) {
@@ -2510,70 +4413,40 @@ cs_validate_page(
                        continue;
                }
 
-               /* map the blob in the kernel address space */
-               kaddr = blob->csb_mem_kaddr;
+               /* blob data has been released */
+               kaddr = (vm_offset_t)blob->csb_mem_kaddr;
                if (kaddr == 0) {
-                       ksize = (vm_size_t) (blob->csb_mem_size +
-                                            blob->csb_mem_offset);
-                       kr = vm_map(kernel_map,
-                                   &kaddr,
-                                   ksize,
-                                   0,
-                                   VM_FLAGS_ANYWHERE,
-                                   blob->csb_mem_handle,
-                                   0,
-                                   TRUE,
-                                   VM_PROT_READ,
-                                   VM_PROT_READ,
-                                   VM_INHERIT_NONE);
-                       if (kr != KERN_SUCCESS) {
-                               /* XXX FBDP what to do !? */
-                               printf("cs_validate_page: failed to map blob, "
-                                      "size=0x%x kr=0x%x\n",
-                                      blob->csb_mem_size, kr);
-                               break;
-                       }
+                       continue;
                }
+
                blob_addr = kaddr + blob->csb_mem_offset;
-               
                lower_bound = CAST_DOWN(char *, blob_addr);
                upper_bound = lower_bound + blob->csb_mem_size;
 
-               embedded = (const CS_SuperBlob *) blob_addr;
-               cd = findCodeDirectory(embedded, lower_bound, upper_bound);
+               cd = blob->csb_cd;
                if (cd != NULL) {
-                       if (cd->pageSize != PAGE_SHIFT ||
-                           cd->hashType != 0x1 ||
-                           cd->hashSize != SHA1_RESULTLEN) {
-                               /* bogus blob ? */
-#if !CS_BLOB_KEEP_IN_KERNEL
-                               kmem_free(kernel_map, kaddr, ksize);
-#endif /* CS_BLOB_KEEP_IN_KERNEL */
-                               continue;
+                       /* all CD's that have been injected is already validated */
+
+                       hashtype = blob->csb_hashtype;
+                       if (hashtype == NULL) {
+                               panic("unknown hash type ?");
                        }
-                           
-                       end_offset = round_page(ntohl(cd->codeLimit));
-                       start_offset = end_offset - (ntohl(cd->nCodeSlots) * PAGE_SIZE);
-                       offset = page_offset - blob->csb_base_offset;
-                       if (offset < start_offset ||
-                           offset >= end_offset) {
-                               /* our page is not covered by this blob */
-#if !CS_BLOB_KEEP_IN_KERNEL
-                               kmem_free(kernel_map, kaddr, ksize);
-#endif /* CS_BLOB_KEEP_IN_KERNEL */
-                               continue;
+                       if (hashtype->cs_digest_size > sizeof(actual_hash)) {
+                               panic("hash size too large");
+                       }
+                       if (offset & ((1U << blob->csb_hash_pageshift) - 1)) {
+                               panic("offset not aligned to cshash boundary");
                        }
 
                        codeLimit = ntohl(cd->codeLimit);
-                       hash = hashes(cd, atop(offset),
-                                     lower_bound, upper_bound);
-                       bcopy(hash, expected_hash, sizeof (expected_hash));
-                       found_hash = TRUE;
 
-#if !CS_BLOB_KEEP_IN_KERNEL
-                       /* we no longer need that blob in the kernel map */
-                       kmem_free(kernel_map, kaddr, ksize);
-#endif /* CS_BLOB_KEEP_IN_KERNEL */
+                       hash = hashes(cd, (uint32_t)(offset >> blob->csb_hash_pageshift),
+                           hashtype->cs_size,
+                           lower_bound, upper_bound);
+                       if (hash != NULL) {
+                               bcopy(hash, expected_hash, hashtype->cs_size);
+                               found_hash = TRUE;
+                       }
 
                        break;
                }
@@ -2591,69 +4464,242 @@ cs_validate_page(
                cs_validate_page_no_hash++;
                if (cs_debug > 1) {
                        printf("CODE SIGNING: cs_validate_page: "
-                              "off 0x%llx: no hash to validate !?\n",
-                              page_offset);
+                           "mobj %p off 0x%llx: no hash to validate !?\n",
+                           pager, page_offset);
                }
                validated = FALSE;
-               *tainted = FALSE;
+               *tainted = 0;
        } else {
-               const uint32_t *asha1, *esha1;
+               *tainted = 0;
+
+               size = (1U << blob->csb_hash_pageshift);
+               *bytes_processed = size;
 
-               size = PAGE_SIZE;
-               if (offset + size > codeLimit) {
+               const uint32_t *asha1, *esha1;
+               if ((off_t)(offset + size) > codeLimit) {
                        /* partial page at end of segment */
                        assert(offset < codeLimit);
-                       size = codeLimit & PAGE_MASK;
+                       size = (size_t) (codeLimit & (size - 1));
+                       *tainted |= CS_VALIDATE_NX;
+               }
+
+               hashtype->cs_init(&mdctx);
+
+               if (blob->csb_hash_firstlevel_pageshift) {
+                       const unsigned char *partial_data = (const unsigned char *)data;
+                       size_t i;
+                       for (i = 0; i < size;) {
+                               union cs_hash_union     partialctx;
+                               unsigned char partial_digest[CS_HASH_MAX_SIZE];
+                               size_t partial_size = MIN(size - i, (1U << blob->csb_hash_firstlevel_pageshift));
+
+                               hashtype->cs_init(&partialctx);
+                               hashtype->cs_update(&partialctx, partial_data, partial_size);
+                               hashtype->cs_final(partial_digest, &partialctx);
+
+                               /* Update cumulative multi-level hash */
+                               hashtype->cs_update(&mdctx, partial_digest, hashtype->cs_size);
+                               partial_data = partial_data + partial_size;
+                               i += partial_size;
+                       }
+               } else {
+                       hashtype->cs_update(&mdctx, data, size);
                }
-               /* compute the actual page's SHA1 hash */
-               SHA1Init(&sha1ctxt);
-               SHA1Update(&sha1ctxt, data, size);
-               SHA1Final(actual_hash, &sha1ctxt);
+               hashtype->cs_final(actual_hash, &mdctx);
 
                asha1 = (const uint32_t *) actual_hash;
                esha1 = (const uint32_t *) expected_hash;
 
-               if (bcmp(expected_hash, actual_hash, SHA1_RESULTLEN) != 0) {
+               if (bcmp(expected_hash, actual_hash, hashtype->cs_size) != 0) {
                        if (cs_debug) {
                                printf("CODE SIGNING: cs_validate_page: "
-                                      "off 0x%llx size 0x%lx: "
-                                      "actual [0x%x 0x%x 0x%x 0x%x 0x%x] != "
-                                      "expected [0x%x 0x%x 0x%x 0x%x 0x%x]\n",
-                                      page_offset, size,
-                                      asha1[0], asha1[1], asha1[2],
-                                      asha1[3], asha1[4],
-                                      esha1[0], esha1[1], esha1[2],
-                                      esha1[3], esha1[4]);
+                                   "mobj %p off 0x%llx size 0x%lx: "
+                                   "actual [0x%x 0x%x 0x%x 0x%x 0x%x] != "
+                                   "expected [0x%x 0x%x 0x%x 0x%x 0x%x]\n",
+                                   pager, page_offset, size,
+                                   asha1[0], asha1[1], asha1[2],
+                                   asha1[3], asha1[4],
+                                   esha1[0], esha1[1], esha1[2],
+                                   esha1[3], esha1[4]);
                        }
                        cs_validate_page_bad_hash++;
-                       *tainted = TRUE;
+                       *tainted |= CS_VALIDATE_TAINTED;
                } else {
-                       if (cs_debug > 1) {
+                       if (cs_debug > 10) {
                                printf("CODE SIGNING: cs_validate_page: "
-                                      "off 0x%llx size 0x%lx: SHA1 OK\n",
-                                      page_offset, size);
+                                   "mobj %p off 0x%llx size 0x%lx: "
+                                   "SHA1 OK\n",
+                                   pager, page_offset, size);
                        }
-                       *tainted = FALSE;
                }
                validated = TRUE;
        }
-       
+
        return validated;
 }
 
+boolean_t
+cs_validate_range(
+       struct vnode    *vp,
+       memory_object_t         pager,
+       memory_object_offset_t  page_offset,
+       const void              *data,
+       vm_size_t               dsize,
+       unsigned                *tainted)
+{
+       vm_size_t offset_in_range;
+       boolean_t all_subranges_validated = TRUE; /* turn false if any subrange fails */
+
+       struct cs_blob *blobs = ubc_get_cs_blobs(vp);
+
+#if CONFIG_SUPPLEMENTAL_SIGNATURES
+       if (blobs == NULL && proc_is_translated(current_proc())) {
+               struct cs_blob *supp = ubc_get_cs_supplement(vp);
+
+               if (supp != NULL) {
+                       blobs = supp;
+               } else {
+                       return FALSE;
+               }
+       }
+#endif
+
+
+
+       *tainted = 0;
+
+       for (offset_in_range = 0;
+           offset_in_range < dsize;
+           /* offset_in_range updated based on bytes processed */) {
+               unsigned subrange_tainted = 0;
+               boolean_t subrange_validated;
+               vm_size_t bytes_processed = 0;
+
+               subrange_validated = cs_validate_hash(blobs,
+                   pager,
+                   page_offset + offset_in_range,
+                   (const void *)((const char *)data + offset_in_range),
+                   &bytes_processed,
+                   &subrange_tainted);
+
+               *tainted |= subrange_tainted;
+
+               if (bytes_processed == 0) {
+                       /* Cannote make forward progress, so return an error */
+                       all_subranges_validated = FALSE;
+                       break;
+               } else if (subrange_validated == FALSE) {
+                       all_subranges_validated = FALSE;
+                       /* Keep going to detect other types of failures in subranges */
+               }
+
+               offset_in_range += bytes_processed;
+       }
+
+       return all_subranges_validated;
+}
+
+void
+cs_validate_page(
+       struct vnode            *vp,
+       memory_object_t         pager,
+       memory_object_offset_t  page_offset,
+       const void              *data,
+       int                     *validated_p,
+       int                     *tainted_p,
+       int                     *nx_p)
+{
+       vm_size_t offset_in_page;
+       struct cs_blob *blobs;
+
+       blobs = ubc_get_cs_blobs(vp);
+
+#if CONFIG_SUPPLEMENTAL_SIGNATURES
+       if (blobs == NULL && proc_is_translated(current_proc())) {
+               struct cs_blob *supp = ubc_get_cs_supplement(vp);
+
+               if (supp != NULL) {
+                       blobs = supp;
+               }
+       }
+#endif
+
+       *validated_p = VMP_CS_ALL_FALSE;
+       *tainted_p = VMP_CS_ALL_FALSE;
+       *nx_p = VMP_CS_ALL_FALSE;
+
+       for (offset_in_page = 0;
+           offset_in_page < PAGE_SIZE;
+           /* offset_in_page updated based on bytes processed */) {
+               unsigned subrange_tainted = 0;
+               boolean_t subrange_validated;
+               vm_size_t bytes_processed = 0;
+               int sub_bit;
+
+               subrange_validated = cs_validate_hash(blobs,
+                   pager,
+                   page_offset + offset_in_page,
+                   (const void *)((const char *)data + offset_in_page),
+                   &bytes_processed,
+                   &subrange_tainted);
+
+               if (bytes_processed == 0) {
+                       /* 4k chunk not code-signed: try next one */
+                       offset_in_page += FOURK_PAGE_SIZE;
+                       continue;
+               }
+               if (offset_in_page == 0 &&
+                   bytes_processed > PAGE_SIZE - FOURK_PAGE_SIZE) {
+                       /* all processed: no 4k granularity */
+                       if (subrange_validated) {
+                               *validated_p = VMP_CS_ALL_TRUE;
+                       }
+                       if (subrange_tainted & CS_VALIDATE_TAINTED) {
+                               *tainted_p = VMP_CS_ALL_TRUE;
+                       }
+                       if (subrange_tainted & CS_VALIDATE_NX) {
+                               *nx_p = VMP_CS_ALL_TRUE;
+                       }
+                       break;
+               }
+               /* we only handle 4k or 16k code-signing granularity... */
+               assertf(bytes_processed <= FOURK_PAGE_SIZE,
+                   "vp %p blobs %p offset 0x%llx + 0x%llx bytes_processed 0x%llx\n",
+                   vp, blobs, (uint64_t)page_offset,
+                   (uint64_t)offset_in_page, (uint64_t)bytes_processed);
+               sub_bit = 1 << (offset_in_page >> FOURK_PAGE_SHIFT);
+               if (subrange_validated) {
+                       *validated_p |= sub_bit;
+               }
+               if (subrange_tainted & CS_VALIDATE_TAINTED) {
+                       *tainted_p |= sub_bit;
+               }
+               if (subrange_tainted & CS_VALIDATE_NX) {
+                       *nx_p |= sub_bit;
+               }
+               /* go to next 4k chunk */
+               offset_in_page += FOURK_PAGE_SIZE;
+       }
+
+       return;
+}
+
 int
 ubc_cs_getcdhash(
-       vnode_t         vp,
-       off_t           offset,
-       unsigned char   *cdhash)
+       vnode_t         vp,
+       off_t           offset,
+       unsigned char   *cdhash)
 {
-       struct cs_blob *blobs, *blob;
-       off_t rel_offset;
+       struct cs_blob  *blobs, *blob;
+       off_t           rel_offset;
+       int             ret;
+
+       vnode_lock(vp);
 
        blobs = ubc_get_cs_blobs(vp);
        for (blob = blobs;
-            blob != NULL;
-            blob = blob->csb_next) {
+           blob != NULL;
+           blob = blob->csb_next) {
                /* compute offset relative to this blob */
                rel_offset = offset - blob->csb_base_offset;
                if (rel_offset >= blob->csb_start_offset &&
@@ -2665,11 +4711,194 @@ ubc_cs_getcdhash(
 
        if (blob == NULL) {
                /* we didn't find a blob covering "offset" */
-               return EBADEXEC; /* XXX any better error ? */
+               ret = EBADEXEC; /* XXX any better error ? */
+       } else {
+               /* get the SHA1 hash of that blob */
+               ptrauth_utils_auth_blob_generic(blob->csb_cdhash,
+                   sizeof(blob->csb_cdhash),
+                   OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_cd_signature"),
+                   PTRAUTH_ADDR_DIVERSIFY,
+                   blob->csb_cdhash_signature);
+               bcopy(blob->csb_cdhash, cdhash, sizeof(blob->csb_cdhash));
+               ret = 0;
        }
 
-       /* get the SHA1 hash of that blob */
-       bcopy(blob->csb_sha1, cdhash, sizeof (blob->csb_sha1));
+       vnode_unlock(vp);
+
+       return ret;
+}
 
-       return 0;
+boolean_t
+ubc_cs_is_range_codesigned(
+       vnode_t                 vp,
+       mach_vm_offset_t        start,
+       mach_vm_size_t          size)
+{
+       struct cs_blob          *csblob;
+       mach_vm_offset_t        blob_start;
+       mach_vm_offset_t        blob_end;
+
+       if (vp == NULL) {
+               /* no file: no code signature */
+               return FALSE;
+       }
+       if (size == 0) {
+               /* no range: no code signature */
+               return FALSE;
+       }
+       if (start + size < start) {
+               /* overflow */
+               return FALSE;
+       }
+
+       csblob = ubc_cs_blob_get(vp, -1, -1, start);
+       if (csblob == NULL) {
+               return FALSE;
+       }
+
+       /*
+        * We currently check if the range is covered by a single blob,
+        * which should always be the case for the dyld shared cache.
+        * If we ever want to make this routine handle other cases, we
+        * would have to iterate if the blob does not cover the full range.
+        */
+       blob_start = (mach_vm_offset_t) (csblob->csb_base_offset +
+           csblob->csb_start_offset);
+       blob_end = (mach_vm_offset_t) (csblob->csb_base_offset +
+           csblob->csb_end_offset);
+       if (blob_start > start || blob_end < (start + size)) {
+               /* range not fully covered by this code-signing blob */
+               return FALSE;
+       }
+
+       return TRUE;
+}
+
+#if CHECK_CS_VALIDATION_BITMAP
+#define stob(s) (((atop_64(round_page_64(s))) + 07) >> 3)
+extern  boolean_t       root_fs_upgrade_try;
+
+/*
+ * Should we use the code-sign bitmap to avoid repeated code-sign validation?
+ * Depends:
+ * a) Is the target vnode on the root filesystem?
+ * b) Has someone tried to mount the root filesystem read-write?
+ * If answers are (a) yes AND (b) no, then we can use the bitmap.
+ */
+#define USE_CODE_SIGN_BITMAP(vp)        ( (vp != NULL) && (vp->v_mount != NULL) && (vp->v_mount->mnt_flag & MNT_ROOTFS) && !root_fs_upgrade_try)
+kern_return_t
+ubc_cs_validation_bitmap_allocate(
+       vnode_t         vp)
+{
+       kern_return_t   kr = KERN_SUCCESS;
+       struct ubc_info *uip;
+       char            *target_bitmap;
+       vm_object_size_t        bitmap_size;
+
+       if (!USE_CODE_SIGN_BITMAP(vp) || (!UBCINFOEXISTS(vp))) {
+               kr = KERN_INVALID_ARGUMENT;
+       } else {
+               uip = vp->v_ubcinfo;
+
+               if (uip->cs_valid_bitmap == NULL) {
+                       bitmap_size = stob(uip->ui_size);
+                       target_bitmap = (char*) kalloc((vm_size_t)bitmap_size );
+                       if (target_bitmap == 0) {
+                               kr = KERN_NO_SPACE;
+                       } else {
+                               kr = KERN_SUCCESS;
+                       }
+                       if (kr == KERN_SUCCESS) {
+                               memset( target_bitmap, 0, (size_t)bitmap_size);
+                               uip->cs_valid_bitmap = (void*)target_bitmap;
+                               uip->cs_valid_bitmap_size = bitmap_size;
+                       }
+               }
+       }
+       return kr;
+}
+
+kern_return_t
+ubc_cs_check_validation_bitmap(
+       vnode_t                 vp,
+       memory_object_offset_t          offset,
+       int                     optype)
+{
+       kern_return_t   kr = KERN_SUCCESS;
+
+       if (!USE_CODE_SIGN_BITMAP(vp) || !UBCINFOEXISTS(vp)) {
+               kr = KERN_INVALID_ARGUMENT;
+       } else {
+               struct ubc_info *uip = vp->v_ubcinfo;
+               char            *target_bitmap = uip->cs_valid_bitmap;
+
+               if (target_bitmap == NULL) {
+                       kr = KERN_INVALID_ARGUMENT;
+               } else {
+                       uint64_t        bit, byte;
+                       bit = atop_64( offset );
+                       byte = bit >> 3;
+
+                       if (byte > uip->cs_valid_bitmap_size) {
+                               kr = KERN_INVALID_ARGUMENT;
+                       } else {
+                               if (optype == CS_BITMAP_SET) {
+                                       target_bitmap[byte] |= (1 << (bit & 07));
+                                       kr = KERN_SUCCESS;
+                               } else if (optype == CS_BITMAP_CLEAR) {
+                                       target_bitmap[byte] &= ~(1 << (bit & 07));
+                                       kr = KERN_SUCCESS;
+                               } else if (optype == CS_BITMAP_CHECK) {
+                                       if (target_bitmap[byte] & (1 << (bit & 07))) {
+                                               kr = KERN_SUCCESS;
+                                       } else {
+                                               kr = KERN_FAILURE;
+                                       }
+                               }
+                       }
+               }
+       }
+       return kr;
+}
+
+void
+ubc_cs_validation_bitmap_deallocate(
+       vnode_t         vp)
+{
+       struct ubc_info *uip;
+       void            *target_bitmap;
+       vm_object_size_t        bitmap_size;
+
+       if (UBCINFOEXISTS(vp)) {
+               uip = vp->v_ubcinfo;
+
+               if ((target_bitmap = uip->cs_valid_bitmap) != NULL) {
+                       bitmap_size = uip->cs_valid_bitmap_size;
+                       kfree( target_bitmap, (vm_size_t) bitmap_size );
+                       uip->cs_valid_bitmap = NULL;
+               }
+       }
+}
+#else
+kern_return_t
+ubc_cs_validation_bitmap_allocate(__unused vnode_t vp)
+{
+       return KERN_INVALID_ARGUMENT;
+}
+
+kern_return_t
+ubc_cs_check_validation_bitmap(
+       __unused struct vnode *vp,
+       __unused memory_object_offset_t offset,
+       __unused int optype)
+{
+       return KERN_INVALID_ARGUMENT;
+}
+
+void
+ubc_cs_validation_bitmap_deallocate(__unused vnode_t vp)
+{
+       return;
 }
+#endif /* CHECK_CS_VALIDATION_BITMAP */
+