+ if ((ntohl(new_cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(new_cd->scatterOffset))) {
+ SC_Scatter *scatter = (SC_Scatter*)
+ ((char *)new_cd + ntohl(new_cd->scatterOffset));
+ /* iterate all scatter structs to scale their counts */
+ do {
+ uint32_t scount = ntohl(scatter->count);
+ uint32_t sbase = ntohl(scatter->base);
+
+ /* last scatter? */
+ if (scount == 0) {
+ break;
+ }
+
+ scount >>= hashes_per_new_hash_shift;
+ scatter->count = htonl(scount);
+
+ sbase >>= hashes_per_new_hash_shift;
+ scatter->base = htonl(sbase);
+
+ scatter++;
+ } while (1);
+ }
+
+ /* For each group of hashes, hash them together */
+ const unsigned char *src_base = (const unsigned char *)old_cd + ntohl(old_cd->hashOffset);
+ unsigned char *dst_base = (unsigned char *)new_cd + ntohl(new_cd->hashOffset);
+
+ uint32_t hash_index;
+ for (hash_index = 0; hash_index < nCodeSlots; hash_index++) {
+ union cs_hash_union mdctx;
+
+ uint32_t source_hash_len = old_cd->hashSize << hashes_per_new_hash_shift;
+ const unsigned char *src = src_base + hash_index * source_hash_len;
+ unsigned char *dst = dst_base + hash_index * new_cd->hashSize;
+
+ blob->csb_hashtype->cs_init(&mdctx);
+ blob->csb_hashtype->cs_update(&mdctx, src, source_hash_len);
+ blob->csb_hashtype->cs_final(dst, &mdctx);
+ }
+
+ error = cs_validate_csblob((const uint8_t *)new_blob_addr, new_blob_size, &cd, &entitlements);
+ if (error != 0) {
+ printf("CODE SIGNING: Failed to validate new Code Signing Blob: %d\n",
+ error);
+
+ ubc_cs_blob_deallocate(new_blob_addr, new_blob_size);
+ return error;
+ }
+
+ /* New Code Directory is ready for use, swap it out in the blob structure */
+ ubc_cs_blob_deallocate((vm_offset_t)blob->csb_mem_kaddr, blob->csb_mem_size);
+
+ blob->csb_mem_size = new_blob_size;
+ blob->csb_mem_kaddr = (void *)new_blob_addr;
+ blob->csb_cd = cd;
+ blob->csb_entitlements_blob = entitlements;
+ if (blob->csb_entitlements_blob != NULL) {
+ blob->csb_entitlements_blob_signature = ptrauth_utils_sign_blob_generic(blob->csb_entitlements_blob,
+ ntohl(blob->csb_entitlements_blob->length),
+ OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_entitlements_blob_signature"),
+ PTRAUTH_ADDR_DIVERSIFY);
+ }
+
+ /* The blob has some cached attributes of the Code Directory, so update those */
+
+ blob->csb_hash_firstlevel_pageshift = blob->csb_hash_pageshift; /* Save the original page size */
+
+ blob->csb_hash_pageshift = PAGE_SHIFT;
+ blob->csb_end_offset = ntohl(cd->codeLimit);
+ if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
+ const SC_Scatter *scatter = (const SC_Scatter*)
+ ((const char*)cd + ntohl(cd->scatterOffset));
+ blob->csb_start_offset = ((off_t)ntohl(scatter->base)) * PAGE_SIZE;
+ } else {
+ blob->csb_start_offset = 0;
+ }
+
+ return 0;
+}
+
+/*
+ * Validate the code signature blob, create a struct cs_blob wrapper
+ * and return it together with a pointer to the chosen code directory
+ * and entitlements blob.
+ *
+ * Note that this takes ownership of the memory as addr, mainly because
+ * this function can actually replace the passed in blob with another
+ * one, e.g. when performing multilevel hashing optimization.
+ */
+int
+cs_blob_create_validated(
+ vm_address_t * const addr,
+ vm_size_t size,
+ struct cs_blob ** const ret_blob,
+ CS_CodeDirectory const ** const ret_cd)
+{
+ struct cs_blob *blob;
+ int error = EINVAL;
+ const CS_CodeDirectory *cd;
+ const CS_GenericBlob *entitlements;
+ union cs_hash_union mdctx;
+ size_t length;
+
+ if (ret_blob) {
+ *ret_blob = NULL;
+ }
+
+ blob = (struct cs_blob *) kalloc(sizeof(struct cs_blob));
+ if (blob == NULL) {
+ return ENOMEM;
+ }
+
+ /* fill in the new blob */
+ blob->csb_mem_size = size;
+ blob->csb_mem_offset = 0;
+ blob->csb_mem_kaddr = (void *)*addr;
+ blob->csb_flags = 0;
+ blob->csb_signer_type = CS_SIGNER_TYPE_UNKNOWN;
+ blob->csb_platform_binary = 0;
+ blob->csb_platform_path = 0;
+ blob->csb_teamid = NULL;
+#if CONFIG_SUPPLEMENTAL_SIGNATURES
+ blob->csb_supplement_teamid = NULL;
+#endif
+ blob->csb_entitlements_blob = NULL;
+ blob->csb_entitlements = NULL;
+ blob->csb_reconstituted = false;
+
+ /* Transfer ownership. Even on error, this function will deallocate */
+ *addr = 0;
+
+ /*
+ * Validate the blob's contents
+ */
+ length = (size_t) size;
+ error = cs_validate_csblob((const uint8_t *)blob->csb_mem_kaddr,
+ length, &cd, &entitlements);
+ if (error) {
+ if (cs_debug) {
+ printf("CODESIGNING: csblob invalid: %d\n", error);
+ }
+ /*
+ * The vnode checker can't make the rest of this function
+ * succeed if csblob validation failed, so bail */
+ goto out;
+ } else {
+ const unsigned char *md_base;
+ uint8_t hash[CS_HASH_MAX_SIZE];
+ int md_size;
+ vm_offset_t hash_pagemask;
+
+ blob->csb_cd = cd;
+ blob->csb_entitlements_blob = entitlements; /* may be NULL, not yet validated */
+ if (blob->csb_entitlements_blob != NULL) {
+ blob->csb_entitlements_blob_signature = ptrauth_utils_sign_blob_generic(blob->csb_entitlements_blob,
+ ntohl(blob->csb_entitlements_blob->length),
+ OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_entitlements_blob_signature"),
+ PTRAUTH_ADDR_DIVERSIFY);
+ }
+ blob->csb_hashtype = cs_find_md(cd->hashType);
+ if (blob->csb_hashtype == NULL || blob->csb_hashtype->cs_digest_size > sizeof(hash)) {
+ panic("validated CodeDirectory but unsupported type");
+ }
+
+ blob->csb_hash_pageshift = cd->pageSize;
+ hash_pagemask = (1U << cd->pageSize) - 1;
+ blob->csb_hash_firstlevel_pageshift = 0;
+ blob->csb_flags = (ntohl(cd->flags) & CS_ALLOWED_MACHO) | CS_VALID;
+ blob->csb_end_offset = (((vm_offset_t)ntohl(cd->codeLimit) + hash_pagemask) & ~hash_pagemask);
+ if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
+ const SC_Scatter *scatter = (const SC_Scatter*)
+ ((const char*)cd + ntohl(cd->scatterOffset));
+ blob->csb_start_offset = ((off_t)ntohl(scatter->base)) * (1U << blob->csb_hash_pageshift);
+ } else {
+ blob->csb_start_offset = 0;
+ }
+ /* compute the blob's cdhash */
+ md_base = (const unsigned char *) cd;
+ md_size = ntohl(cd->length);
+
+ blob->csb_hashtype->cs_init(&mdctx);
+ blob->csb_hashtype->cs_update(&mdctx, md_base, md_size);
+ blob->csb_hashtype->cs_final(hash, &mdctx);
+
+ memcpy(blob->csb_cdhash, hash, CS_CDHASH_LEN);
+ blob->csb_cdhash_signature = ptrauth_utils_sign_blob_generic(blob->csb_cdhash,
+ sizeof(blob->csb_cdhash),
+ OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_cd_signature"),
+ PTRAUTH_ADDR_DIVERSIFY);
+
+#if CONFIG_SUPPLEMENTAL_SIGNATURES
+ blob->csb_linkage_hashtype = NULL;
+ if (ntohl(cd->version) >= CS_SUPPORTSLINKAGE && cd->linkageHashType != 0 &&
+ ntohl(cd->linkageSize) >= CS_CDHASH_LEN) {
+ blob->csb_linkage_hashtype = cs_find_md(cd->linkageHashType);
+
+ if (blob->csb_linkage_hashtype != NULL) {
+ memcpy(blob->csb_linkage, (uint8_t const*)cd + ntohl(cd->linkageOffset),
+ CS_CDHASH_LEN);
+ }
+ }
+#endif
+ }
+
+ error = 0;
+
+out:
+ if (error != 0) {
+ cs_blob_free(blob);
+ blob = NULL;
+ cd = NULL;
+ }
+
+ if (ret_blob != NULL) {
+ *ret_blob = blob;
+ }
+ if (ret_cd != NULL) {
+ *ret_cd = cd;
+ }
+
+ return error;
+}
+
+/*
+ * Free a cs_blob previously created by cs_blob_create_validated.
+ */
+void
+cs_blob_free(
+ struct cs_blob * const blob)
+{
+ if (blob != NULL) {
+ if (blob->csb_mem_kaddr) {
+ ubc_cs_blob_deallocate((vm_offset_t)blob->csb_mem_kaddr, blob->csb_mem_size);
+ blob->csb_mem_kaddr = NULL;
+ }
+ if (blob->csb_entitlements != NULL) {
+ osobject_release(blob->csb_entitlements);
+ blob->csb_entitlements = NULL;
+ }
+ (kfree)(blob, sizeof(*blob));
+ }
+}
+#if CONFIG_SUPPLEMENTAL_SIGNATURES
+static void
+cs_blob_supplement_free(struct cs_blob * const blob)
+{
+ if (blob != NULL) {
+ if (blob->csb_supplement_teamid != NULL) {
+ vm_size_t teamid_size = strlen(blob->csb_supplement_teamid) + 1;
+ kfree(blob->csb_supplement_teamid, teamid_size);
+ blob->csb_supplement_teamid = NULL;
+ }
+ cs_blob_free(blob);
+ }
+}
+#endif
+
+static void
+ubc_cs_blob_adjust_statistics(struct cs_blob const *blob)
+{
+ /* Note that the atomic ops are not enough to guarantee
+ * correctness: If a blob with an intermediate size is inserted
+ * concurrently, we can lose a peak value assignment. But these
+ * statistics are only advisory anyway, so we're not going to
+ * employ full locking here. (Consequently, we are also okay with
+ * relaxed ordering of those accesses.)
+ */
+
+ unsigned int new_cs_blob_count = os_atomic_add(&cs_blob_count, 1, relaxed);
+ if (new_cs_blob_count > os_atomic_load(&cs_blob_count_peak, relaxed)) {
+ os_atomic_store(&cs_blob_count_peak, new_cs_blob_count, relaxed);
+ }
+
+ size_t new_cs_blob_size = os_atomic_add(&cs_blob_size, blob->csb_mem_size, relaxed);
+
+ if (new_cs_blob_size > os_atomic_load(&cs_blob_size_peak, relaxed)) {
+ os_atomic_store(&cs_blob_size_peak, new_cs_blob_size, relaxed);
+ }
+ if (blob->csb_mem_size > os_atomic_load(&cs_blob_size_max, relaxed)) {
+ os_atomic_store(&cs_blob_size_max, blob->csb_mem_size, relaxed);
+ }
+}
+
+int
+ubc_cs_blob_add(
+ struct vnode *vp,
+ uint32_t platform,
+ cpu_type_t cputype,
+ cpu_subtype_t cpusubtype,
+ off_t base_offset,
+ vm_address_t *addr,
+ vm_size_t size,
+ struct image_params *imgp,
+ __unused int flags,
+ struct cs_blob **ret_blob)
+{
+ kern_return_t kr;
+ struct ubc_info *uip;
+ struct cs_blob *blob = NULL, *oblob = NULL;
+ int error;
+ CS_CodeDirectory const *cd;
+ off_t blob_start_offset, blob_end_offset;
+ boolean_t record_mtime;
+
+ record_mtime = FALSE;
+ if (ret_blob) {
+ *ret_blob = NULL;
+ }
+
+ /* Create the struct cs_blob wrapper that will be attached to the vnode.
+ * Validates the passed in blob in the process. */
+ error = cs_blob_create_validated(addr, size, &blob, &cd);
+
+ if (error != 0) {
+ printf("malform code signature blob: %d\n", error);
+ return error;
+ }
+
+ blob->csb_cpu_type = cputype;
+ blob->csb_cpu_subtype = cpusubtype & ~CPU_SUBTYPE_MASK;
+ blob->csb_base_offset = base_offset;
+
+ /*
+ * Let policy module check whether the blob's signature is accepted.
+ */
+#if CONFIG_MACF
+ unsigned int cs_flags = blob->csb_flags;
+ unsigned int signer_type = blob->csb_signer_type;
+ error = mac_vnode_check_signature(vp, blob, imgp, &cs_flags, &signer_type, flags, platform);
+ blob->csb_flags = cs_flags;
+ blob->csb_signer_type = signer_type;
+
+ if (error) {
+ if (cs_debug) {
+ printf("check_signature[pid: %d], error = %d\n", current_proc()->p_pid, error);
+ }
+ goto out;
+ }
+ if ((flags & MAC_VNODE_CHECK_DYLD_SIM) && !(blob->csb_flags & CS_PLATFORM_BINARY)) {
+ if (cs_debug) {
+ printf("check_signature[pid: %d], is not apple signed\n", current_proc()->p_pid);
+ }
+ error = EPERM;
+ goto out;
+ }
+#endif
+
+#if CONFIG_ENFORCE_SIGNED_CODE
+ /*
+ * Reconstitute code signature
+ */
+ {
+ vm_address_t new_mem_kaddr = 0;
+ vm_size_t new_mem_size = 0;
+
+ CS_CodeDirectory *new_cd = NULL;
+ CS_GenericBlob const *new_entitlements = NULL;
+
+ error = ubc_cs_reconstitute_code_signature(blob, 0,
+ &new_mem_kaddr, &new_mem_size,
+ &new_cd, &new_entitlements);
+
+ if (error != 0) {
+ printf("failed code signature reconstitution: %d\n", error);
+ goto out;
+ }
+
+ ubc_cs_blob_deallocate((vm_offset_t)blob->csb_mem_kaddr, blob->csb_mem_size);
+
+ blob->csb_mem_kaddr = (void *)new_mem_kaddr;
+ blob->csb_mem_size = new_mem_size;
+ blob->csb_cd = new_cd;
+ blob->csb_entitlements_blob = new_entitlements;
+ if (blob->csb_entitlements_blob != NULL) {
+ blob->csb_entitlements_blob_signature = ptrauth_utils_sign_blob_generic(blob->csb_entitlements_blob,
+ ntohl(blob->csb_entitlements_blob->length),
+ OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_entitlements_blob_signature"),
+ PTRAUTH_ADDR_DIVERSIFY);
+ }
+ blob->csb_reconstituted = true;
+ }
+#endif
+
+
+ if (blob->csb_flags & CS_PLATFORM_BINARY) {
+ if (cs_debug > 1) {
+ printf("check_signature[pid: %d]: platform binary\n", current_proc()->p_pid);
+ }
+ blob->csb_platform_binary = 1;
+ blob->csb_platform_path = !!(blob->csb_flags & CS_PLATFORM_PATH);
+ } else {
+ blob->csb_platform_binary = 0;
+ blob->csb_platform_path = 0;
+ blob->csb_teamid = csblob_parse_teamid(blob);
+ if (cs_debug > 1) {
+ if (blob->csb_teamid) {
+ printf("check_signature[pid: %d]: team-id is %s\n", current_proc()->p_pid, blob->csb_teamid);
+ } else {
+ printf("check_signature[pid: %d]: no team-id\n", current_proc()->p_pid);
+ }
+ }
+ }
+
+ /*
+ * Validate the blob's coverage
+ */
+ blob_start_offset = blob->csb_base_offset + blob->csb_start_offset;
+ blob_end_offset = blob->csb_base_offset + blob->csb_end_offset;
+
+ if (blob_start_offset >= blob_end_offset ||
+ blob_start_offset < 0 ||
+ blob_end_offset <= 0) {
+ /* reject empty or backwards blob */
+ error = EINVAL;
+ goto out;
+ }
+
+ if (ubc_cs_supports_multilevel_hash(blob)) {
+ error = ubc_cs_convert_to_multilevel_hash(blob);
+ if (error != 0) {
+ printf("failed multilevel hash conversion: %d\n", error);
+ goto out;
+ }
+ blob->csb_reconstituted = true;
+ }
+
+ vnode_lock(vp);
+ if (!UBCINFOEXISTS(vp)) {
+ vnode_unlock(vp);
+ error = ENOENT;
+ goto out;
+ }
+ uip = vp->v_ubcinfo;
+
+ /* check if this new blob overlaps with an existing blob */
+ for (oblob = uip->cs_blobs;
+ oblob != NULL;
+ oblob = oblob->csb_next) {
+ off_t oblob_start_offset, oblob_end_offset;
+
+ if (blob->csb_signer_type != oblob->csb_signer_type) { // signer type needs to be the same for slices
+ vnode_unlock(vp);
+ error = EALREADY;
+ goto out;
+ } else if (blob->csb_platform_binary) { //platform binary needs to be the same for app slices
+ if (!oblob->csb_platform_binary) {
+ vnode_unlock(vp);
+ error = EALREADY;
+ goto out;
+ }
+ } else if (blob->csb_teamid) { //teamid binary needs to be the same for app slices
+ if (oblob->csb_platform_binary ||
+ oblob->csb_teamid == NULL ||
+ strcmp(oblob->csb_teamid, blob->csb_teamid) != 0) {
+ vnode_unlock(vp);
+ error = EALREADY;
+ goto out;
+ }
+ } else { // non teamid binary needs to be the same for app slices
+ if (oblob->csb_platform_binary ||
+ oblob->csb_teamid != NULL) {
+ vnode_unlock(vp);
+ error = EALREADY;
+ goto out;
+ }
+ }
+
+ oblob_start_offset = (oblob->csb_base_offset +
+ oblob->csb_start_offset);
+ oblob_end_offset = (oblob->csb_base_offset +
+ oblob->csb_end_offset);
+ if (blob_start_offset >= oblob_end_offset ||
+ blob_end_offset <= oblob_start_offset) {
+ /* no conflict with this existing blob */
+ } else {
+ /* conflict ! */
+ if (blob_start_offset == oblob_start_offset &&
+ blob_end_offset == oblob_end_offset &&
+ blob->csb_mem_size == oblob->csb_mem_size &&
+ blob->csb_flags == oblob->csb_flags &&
+ (blob->csb_cpu_type == CPU_TYPE_ANY ||
+ oblob->csb_cpu_type == CPU_TYPE_ANY ||
+ blob->csb_cpu_type == oblob->csb_cpu_type) &&
+ !bcmp(blob->csb_cdhash,
+ oblob->csb_cdhash,
+ CS_CDHASH_LEN)) {
+ /*
+ * We already have this blob:
+ * we'll return success but
+ * throw away the new blob.
+ */
+ if (oblob->csb_cpu_type == CPU_TYPE_ANY) {
+ /*
+ * The old blob matches this one
+ * but doesn't have any CPU type.
+ * Update it with whatever the caller
+ * provided this time.
+ */
+ oblob->csb_cpu_type = cputype;
+ }
+
+ /* The signature is still accepted, so update the
+ * generation count. */
+ uip->cs_add_gen = cs_blob_generation_count;
+
+ vnode_unlock(vp);
+ if (ret_blob) {
+ *ret_blob = oblob;
+ }
+ error = EAGAIN;
+ goto out;
+ } else {
+ /* different blob: reject the new one */
+ vnode_unlock(vp);
+ error = EALREADY;
+ goto out;
+ }
+ }
+ }
+
+
+ /* mark this vnode's VM object as having "signed pages" */
+ kr = memory_object_signed(uip->ui_control, TRUE);
+ if (kr != KERN_SUCCESS) {
+ vnode_unlock(vp);
+ error = ENOENT;
+ goto out;
+ }
+
+ if (uip->cs_blobs == NULL) {
+ /* loading 1st blob: record the file's current "modify time" */
+ record_mtime = TRUE;
+ }
+
+ /* set the generation count for cs_blobs */
+ uip->cs_add_gen = cs_blob_generation_count;
+
+ /*
+ * Add this blob to the list of blobs for this vnode.
+ * We always add at the front of the list and we never remove a
+ * blob from the list, so ubc_cs_get_blobs() can return whatever
+ * the top of the list was and that list will remain valid
+ * while we validate a page, even after we release the vnode's lock.
+ */
+ blob->csb_next = uip->cs_blobs;
+ uip->cs_blobs = blob;
+
+ ubc_cs_blob_adjust_statistics(blob);
+
+ if (cs_debug > 1) {
+ proc_t p;
+ const char *name = vnode_getname_printable(vp);
+ p = current_proc();
+ printf("CODE SIGNING: proc %d(%s) "
+ "loaded %s signatures for file (%s) "
+ "range 0x%llx:0x%llx flags 0x%x\n",
+ p->p_pid, p->p_comm,
+ blob->csb_cpu_type == -1 ? "detached" : "embedded",
+ name,
+ blob->csb_base_offset + blob->csb_start_offset,
+ blob->csb_base_offset + blob->csb_end_offset,
+ blob->csb_flags);
+ vnode_putname_printable(name);
+ }
+
+ vnode_unlock(vp);
+
+ if (record_mtime) {
+ vnode_mtime(vp, &uip->cs_mtime, vfs_context_current());
+ }
+
+ if (ret_blob) {
+ *ret_blob = blob;
+ }
+
+ error = 0; /* success ! */
+
+out:
+ if (error) {
+ if (cs_debug) {
+ printf("check_signature[pid: %d]: error = %d\n", current_proc()->p_pid, error);
+ }
+
+ cs_blob_free(blob);
+ }
+
+ if (error == EAGAIN) {
+ /*
+ * See above: error is EAGAIN if we were asked
+ * to add an existing blob again. We cleaned the new
+ * blob and we want to return success.
+ */
+ error = 0;
+ }
+
+ return error;
+}
+
+#if CONFIG_SUPPLEMENTAL_SIGNATURES
+int
+ubc_cs_blob_add_supplement(
+ struct vnode *vp,
+ struct vnode *orig_vp,
+ off_t base_offset,
+ vm_address_t *addr,
+ vm_size_t size,
+ struct cs_blob **ret_blob)
+{
+ kern_return_t kr;
+ struct ubc_info *uip, *orig_uip;
+ int error;
+ struct cs_blob *blob, *orig_blob;
+ CS_CodeDirectory const *cd;
+ off_t blob_start_offset, blob_end_offset;
+
+ if (ret_blob) {
+ *ret_blob = NULL;
+ }
+
+ /* Create the struct cs_blob wrapper that will be attached to the vnode.
+ * Validates the passed in blob in the process. */
+ error = cs_blob_create_validated(addr, size, &blob, &cd);
+
+ if (error != 0) {
+ printf("malformed code signature supplement blob: %d\n", error);
+ return error;
+ }
+
+ blob->csb_cpu_type = -1;
+ blob->csb_base_offset = base_offset;
+
+ blob->csb_reconstituted = false;
+
+ vnode_lock(orig_vp);
+ if (!UBCINFOEXISTS(orig_vp)) {
+ vnode_unlock(orig_vp);
+ error = ENOENT;
+ goto out;
+ }
+
+ orig_uip = orig_vp->v_ubcinfo;
+
+ /* check that the supplement's linked cdhash matches a cdhash of
+ * the target image.
+ */
+
+ if (blob->csb_linkage_hashtype == NULL) {
+ proc_t p;
+ const char *iname = vnode_getname_printable(vp);
+ p = current_proc();
+
+ printf("CODE SIGNING: proc %d(%s) supplemental signature for file (%s) "
+ "is not a supplemental.\n",
+ p->p_pid, p->p_comm, iname);
+
+ error = EINVAL;
+
+ vnode_putname_printable(iname);
+ vnode_unlock(orig_vp);
+ goto out;
+ }
+
+ for (orig_blob = orig_uip->cs_blobs; orig_blob != NULL;
+ orig_blob = orig_blob->csb_next) {
+ ptrauth_utils_auth_blob_generic(orig_blob->csb_cdhash,
+ sizeof(orig_blob->csb_cdhash),
+ OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_cd_signature"),
+ PTRAUTH_ADDR_DIVERSIFY,
+ orig_blob->csb_cdhash_signature);
+ if (orig_blob->csb_hashtype == blob->csb_linkage_hashtype &&
+ memcmp(orig_blob->csb_cdhash, blob->csb_linkage, CS_CDHASH_LEN) == 0) {
+ // Found match!
+ break;
+ }
+ }
+
+ if (orig_blob == NULL) {
+ // Not found.
+
+ proc_t p;
+ const char *iname = vnode_getname_printable(vp);
+ p = current_proc();
+
+ printf("CODE SIGNING: proc %d(%s) supplemental signature for file (%s) "
+ "does not match any attached cdhash.\n",
+ p->p_pid, p->p_comm, iname);
+
+ error = ESRCH;
+
+ vnode_putname_printable(iname);
+ vnode_unlock(orig_vp);
+ goto out;
+ }
+
+ vnode_unlock(orig_vp);
+
+ // validate the signature against policy!
+#if CONFIG_MACF
+ unsigned int signer_type = blob->csb_signer_type;
+ error = mac_vnode_check_supplemental_signature(vp, blob, orig_vp, orig_blob, &signer_type);
+ blob->csb_signer_type = signer_type;
+
+
+ if (error) {
+ if (cs_debug) {
+ printf("check_supplemental_signature[pid: %d], error = %d\n", current_proc()->p_pid, error);
+ }
+ goto out;
+ }
+#endif
+
+ // We allowed the supplemental signature blob so
+ // copy the platform bit or team-id from the linked signature and whether or not the original is developer code
+ blob->csb_platform_binary = 0;
+ blob->csb_platform_path = 0;
+ if (orig_blob->csb_platform_binary == 1) {
+ blob->csb_platform_binary = orig_blob->csb_platform_binary;
+ blob->csb_platform_path = orig_blob->csb_platform_path;
+ } else if (orig_blob->csb_teamid != NULL) {
+ vm_size_t teamid_size = strlen(orig_blob->csb_teamid) + 1;
+ blob->csb_supplement_teamid = kalloc(teamid_size);
+ if (blob->csb_supplement_teamid == NULL) {
+ error = ENOMEM;
+ goto out;
+ }
+ strlcpy(blob->csb_supplement_teamid, orig_blob->csb_teamid, teamid_size);
+ }
+ blob->csb_flags = (orig_blob->csb_flags & CS_DEV_CODE);
+
+ // Validate the blob's coverage
+ blob_start_offset = blob->csb_base_offset + blob->csb_start_offset;
+ blob_end_offset = blob->csb_base_offset + blob->csb_end_offset;
+
+ if (blob_start_offset >= blob_end_offset || blob_start_offset < 0 || blob_end_offset <= 0) {
+ /* reject empty or backwards blob */
+ error = EINVAL;
+ goto out;
+ }
+
+ vnode_lock(vp);
+ if (!UBCINFOEXISTS(vp)) {
+ vnode_unlock(vp);
+ error = ENOENT;
+ goto out;
+ }
+ uip = vp->v_ubcinfo;
+
+ struct cs_blob *existing = uip->cs_blob_supplement;
+ if (existing != NULL) {
+ if (blob->csb_hashtype == existing->csb_hashtype &&
+ memcmp(blob->csb_cdhash, existing->csb_cdhash, CS_CDHASH_LEN) == 0) {
+ error = EAGAIN; // non-fatal
+ } else {
+ error = EALREADY; // fatal
+ }
+
+ vnode_unlock(vp);
+ goto out;
+ }
+
+ /* Unlike regular cs_blobs, we only ever support one supplement. */
+ blob->csb_next = NULL;
+ uip->cs_blob_supplement = blob;
+
+ /* mark this vnode's VM object as having "signed pages" */
+ kr = memory_object_signed(uip->ui_control, TRUE);
+ if (kr != KERN_SUCCESS) {
+ vnode_unlock(vp);
+ error = ENOENT;
+ goto out;
+ }
+
+ vnode_unlock(vp);
+
+ /* We still adjust statistics even for supplemental blobs, as they
+ * consume memory just the same. */
+ ubc_cs_blob_adjust_statistics(blob);
+
+ if (cs_debug > 1) {
+ proc_t p;
+ const char *name = vnode_getname_printable(vp);
+ p = current_proc();
+ printf("CODE SIGNING: proc %d(%s) "
+ "loaded supplemental signature for file (%s) "
+ "range 0x%llx:0x%llx\n",
+ p->p_pid, p->p_comm,
+ name,
+ blob->csb_base_offset + blob->csb_start_offset,
+ blob->csb_base_offset + blob->csb_end_offset);
+ vnode_putname_printable(name);
+ }
+
+ if (ret_blob) {
+ *ret_blob = blob;
+ }
+
+ error = 0; // Success!
+out:
+ if (error) {
+ if (cs_debug) {
+ printf("ubc_cs_blob_add_supplement[pid: %d]: error = %d\n", current_proc()->p_pid, error);
+ }
+
+ cs_blob_supplement_free(blob);
+ }
+
+ if (error == EAGAIN) {
+ /* We were asked to add an existing blob.
+ * We cleaned up and ignore the attempt. */
+ error = 0;
+ }
+
+ return error;
+}
+#endif
+
+
+
+void
+csvnode_print_debug(struct vnode *vp)
+{
+ const char *name = NULL;
+ struct ubc_info *uip;
+ struct cs_blob *blob;
+
+ name = vnode_getname_printable(vp);
+ if (name) {
+ printf("csvnode: name: %s\n", name);
+ vnode_putname_printable(name);
+ }
+
+ vnode_lock_spin(vp);
+
+ if (!UBCINFOEXISTS(vp)) {
+ blob = NULL;
+ goto out;
+ }
+
+ uip = vp->v_ubcinfo;
+ for (blob = uip->cs_blobs; blob != NULL; blob = blob->csb_next) {
+ printf("csvnode: range: %lu -> %lu flags: 0x%08x platform: %s path: %s team: %s\n",
+ (unsigned long)blob->csb_start_offset,
+ (unsigned long)blob->csb_end_offset,
+ blob->csb_flags,
+ blob->csb_platform_binary ? "yes" : "no",
+ blob->csb_platform_path ? "yes" : "no",
+ blob->csb_teamid ? blob->csb_teamid : "<NO-TEAM>");
+ }
+
+out:
+ vnode_unlock(vp);
+}
+
+#if CONFIG_SUPPLEMENTAL_SIGNATURES
+struct cs_blob *
+ubc_cs_blob_get_supplement(
+ struct vnode *vp,
+ off_t offset)
+{
+ struct cs_blob *blob;
+ off_t offset_in_blob;
+
+ vnode_lock_spin(vp);
+
+ if (!UBCINFOEXISTS(vp)) {
+ blob = NULL;
+ goto out;
+ }
+
+ blob = vp->v_ubcinfo->cs_blob_supplement;
+
+ if (blob == NULL) {
+ // no supplemental blob
+ goto out;
+ }
+
+
+ if (offset != -1) {
+ offset_in_blob = offset - blob->csb_base_offset;
+ if (offset_in_blob < blob->csb_start_offset || offset_in_blob >= blob->csb_end_offset) {
+ // not actually covered by this blob
+ blob = NULL;
+ }
+ }
+
+out:
+ vnode_unlock(vp);
+
+ return blob;
+}
+#endif
+
+struct cs_blob *
+ubc_cs_blob_get(
+ struct vnode *vp,
+ cpu_type_t cputype,
+ cpu_subtype_t cpusubtype,
+ off_t offset)
+{
+ struct ubc_info *uip;
+ struct cs_blob *blob;
+ off_t offset_in_blob;
+
+ vnode_lock_spin(vp);
+
+ if (!UBCINFOEXISTS(vp)) {