+ cd = blob->csb_cd;
+
+ /*
+ * There must be a valid integral multiple of hashes
+ */
+ if (ntohl(cd->nCodeSlots) & (PAGE_MASK >> blob->csb_hash_pageshift)) {
+ return FALSE;
+ }
+
+ /*
+ * Scatter lists must also have ranges that have an integral number of hashes
+ */
+ if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
+ const SC_Scatter *scatter = (const SC_Scatter*)
+ ((const char*)cd + ntohl(cd->scatterOffset));
+ /* iterate all scatter structs to make sure they are all aligned */
+ do {
+ uint32_t sbase = ntohl(scatter->base);
+ uint32_t scount = ntohl(scatter->count);
+
+ /* last scatter? */
+ if (scount == 0) {
+ break;
+ }
+
+ if (sbase & (PAGE_MASK >> blob->csb_hash_pageshift)) {
+ return FALSE;
+ }
+
+ if (scount & (PAGE_MASK >> blob->csb_hash_pageshift)) {
+ return FALSE;
+ }
+
+ scatter++;
+ } while (1);
+ }
+
+ /* Covered range must be a multiple of the new page size */
+ if (ntohl(cd->codeLimit) & PAGE_MASK) {
+ return FALSE;
+ }
+
+ /* All checks pass */
+ return TRUE;
+}
+
+/*
+ * Given a cs_blob with an already chosen best code directory, this
+ * function allocates memory and copies into it only the blobs that
+ * will be needed by the kernel, namely the single chosen code
+ * directory (and not any of its alternatives) and the entitlement
+ * blob.
+ *
+ * This saves significant memory with agile signatures, and additional
+ * memory for 3rd Party Code because we also omit the CMS blob.
+ *
+ * To support multilevel and other potential code directory rewriting,
+ * the size of a new code directory can be specified. Since that code
+ * directory will replace the existing code directory,
+ * ubc_cs_reconstitute_code_signature does not copy the original code
+ * directory when a size is given, and the caller must fill it in.
+ */
+static int
+ubc_cs_reconstitute_code_signature(struct cs_blob const *blob, vm_size_t optional_new_cd_size,
+ vm_address_t *new_blob_addr_p, vm_size_t *new_blob_size_p,
+ CS_CodeDirectory **new_cd_p, CS_GenericBlob const **new_entitlements_p)
+{
+ const CS_CodeDirectory *old_cd, *cd;
+ CS_CodeDirectory *new_cd;
+ const CS_GenericBlob *entitlements;
+ vm_offset_t new_blob_addr;
+ vm_size_t new_blob_size;
+ vm_size_t new_cdsize;
+ kern_return_t kr;
+ int error;
+
+ old_cd = blob->csb_cd;
+
+ new_cdsize = optional_new_cd_size != 0 ? optional_new_cd_size : htonl(old_cd->length);
+
+ new_blob_size = sizeof(CS_SuperBlob);
+ new_blob_size += sizeof(CS_BlobIndex);
+ new_blob_size += new_cdsize;
+
+ if (blob->csb_entitlements_blob) {
+ /* We need to add a slot for the entitlements */
+ ptrauth_utils_auth_blob_generic(blob->csb_entitlements_blob,
+ ntohl(blob->csb_entitlements_blob->length),
+ OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_entitlements_blob_signature"),
+ PTRAUTH_ADDR_DIVERSIFY,
+ blob->csb_entitlements_blob_signature);
+
+ new_blob_size += sizeof(CS_BlobIndex);
+ new_blob_size += ntohl(blob->csb_entitlements_blob->length);
+ }
+
+ kr = ubc_cs_blob_allocate(&new_blob_addr, &new_blob_size);
+ if (kr != KERN_SUCCESS) {
+ if (cs_debug > 1) {
+ printf("CODE SIGNING: Failed to allocate memory for new Code Signing Blob: %d\n",
+ kr);
+ }
+ return ENOMEM;
+ }
+
+ CS_SuperBlob *new_superblob;
+
+ new_superblob = (CS_SuperBlob *)new_blob_addr;
+ new_superblob->magic = htonl(CSMAGIC_EMBEDDED_SIGNATURE);
+ new_superblob->length = htonl((uint32_t)new_blob_size);
+ if (blob->csb_entitlements_blob) {
+ vm_size_t ent_offset, cd_offset;
+
+ cd_offset = sizeof(CS_SuperBlob) + 2 * sizeof(CS_BlobIndex);
+ ent_offset = cd_offset + new_cdsize;
+
+ new_superblob->count = htonl(2);
+ new_superblob->index[0].type = htonl(CSSLOT_CODEDIRECTORY);
+ new_superblob->index[0].offset = htonl((uint32_t)cd_offset);
+ new_superblob->index[1].type = htonl(CSSLOT_ENTITLEMENTS);
+ new_superblob->index[1].offset = htonl((uint32_t)ent_offset);
+
+ ptrauth_utils_auth_blob_generic(blob->csb_entitlements_blob,
+ ntohl(blob->csb_entitlements_blob->length),
+ OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_entitlements_blob_signature"),
+ PTRAUTH_ADDR_DIVERSIFY,
+ blob->csb_entitlements_blob_signature);
+
+ memcpy((void *)(new_blob_addr + ent_offset), blob->csb_entitlements_blob, ntohl(blob->csb_entitlements_blob->length));
+
+ new_cd = (CS_CodeDirectory *)(new_blob_addr + cd_offset);
+ } else {
+ // Blob is the code directory, directly.
+ new_cd = (CS_CodeDirectory *)new_blob_addr;
+ }
+
+ if (optional_new_cd_size == 0) {
+ // Copy code directory, and revalidate.
+ memcpy(new_cd, old_cd, new_cdsize);
+
+ vm_size_t length = new_blob_size;
+
+ error = cs_validate_csblob((const uint8_t *)new_blob_addr, length, &cd, &entitlements);
+
+ if (error) {
+ printf("CODE SIGNING: Failed to validate new Code Signing Blob: %d\n",
+ error);
+
+ ubc_cs_blob_deallocate(new_blob_addr, new_blob_size);
+ return error;
+ }
+ *new_entitlements_p = entitlements;
+ } else {
+ // Caller will fill out and validate code directory.
+ memset(new_cd, 0, new_cdsize);
+ *new_entitlements_p = NULL;
+ }
+
+ *new_blob_addr_p = new_blob_addr;
+ *new_blob_size_p = new_blob_size;
+ *new_cd_p = new_cd;
+
+ return 0;
+}
+
+static int
+ubc_cs_convert_to_multilevel_hash(struct cs_blob *blob)
+{
+ const CS_CodeDirectory *old_cd, *cd;
+ CS_CodeDirectory *new_cd;
+ const CS_GenericBlob *entitlements;
+ vm_offset_t new_blob_addr;
+ vm_size_t new_blob_size;
+ vm_size_t new_cdsize;
+ int error;
+
+ uint32_t hashes_per_new_hash_shift = (uint32_t)(PAGE_SHIFT - blob->csb_hash_pageshift);
+
+ if (cs_debug > 1) {
+ printf("CODE SIGNING: Attempting to convert Code Directory for %lu -> %lu page shift\n",
+ (unsigned long)blob->csb_hash_pageshift, (unsigned long)PAGE_SHIFT);
+ }
+
+ old_cd = blob->csb_cd;
+
+ /* Up to the hashes, we can copy all data */
+ new_cdsize = ntohl(old_cd->hashOffset);
+ new_cdsize += (ntohl(old_cd->nCodeSlots) >> hashes_per_new_hash_shift) * old_cd->hashSize;
+
+ error = ubc_cs_reconstitute_code_signature(blob, new_cdsize,
+ &new_blob_addr, &new_blob_size, &new_cd,
+ &entitlements);
+ if (error != 0) {
+ printf("CODE SIGNING: Failed to reconsitute code signature: %d\n", error);
+ return error;
+ }
+
+ memcpy(new_cd, old_cd, ntohl(old_cd->hashOffset));
+
+ /* Update fields in the Code Directory structure */
+ new_cd->length = htonl((uint32_t)new_cdsize);
+
+ uint32_t nCodeSlots = ntohl(new_cd->nCodeSlots);
+ nCodeSlots >>= hashes_per_new_hash_shift;
+ new_cd->nCodeSlots = htonl(nCodeSlots);
+
+ new_cd->pageSize = (uint8_t)PAGE_SHIFT; /* Not byte-swapped */
+
+ if ((ntohl(new_cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(new_cd->scatterOffset))) {
+ SC_Scatter *scatter = (SC_Scatter*)
+ ((char *)new_cd + ntohl(new_cd->scatterOffset));
+ /* iterate all scatter structs to scale their counts */
+ do {
+ uint32_t scount = ntohl(scatter->count);
+ uint32_t sbase = ntohl(scatter->base);
+
+ /* last scatter? */
+ if (scount == 0) {
+ break;
+ }
+
+ scount >>= hashes_per_new_hash_shift;
+ scatter->count = htonl(scount);
+
+ sbase >>= hashes_per_new_hash_shift;
+ scatter->base = htonl(sbase);
+
+ scatter++;
+ } while (1);
+ }
+
+ /* For each group of hashes, hash them together */
+ const unsigned char *src_base = (const unsigned char *)old_cd + ntohl(old_cd->hashOffset);
+ unsigned char *dst_base = (unsigned char *)new_cd + ntohl(new_cd->hashOffset);
+
+ uint32_t hash_index;
+ for (hash_index = 0; hash_index < nCodeSlots; hash_index++) {
+ union cs_hash_union mdctx;
+
+ uint32_t source_hash_len = old_cd->hashSize << hashes_per_new_hash_shift;
+ const unsigned char *src = src_base + hash_index * source_hash_len;
+ unsigned char *dst = dst_base + hash_index * new_cd->hashSize;
+
+ blob->csb_hashtype->cs_init(&mdctx);
+ blob->csb_hashtype->cs_update(&mdctx, src, source_hash_len);
+ blob->csb_hashtype->cs_final(dst, &mdctx);
+ }
+
+ error = cs_validate_csblob((const uint8_t *)new_blob_addr, new_blob_size, &cd, &entitlements);
+ if (error != 0) {
+ printf("CODE SIGNING: Failed to validate new Code Signing Blob: %d\n",
+ error);
+
+ ubc_cs_blob_deallocate(new_blob_addr, new_blob_size);
+ return error;
+ }
+
+ /* New Code Directory is ready for use, swap it out in the blob structure */
+ ubc_cs_blob_deallocate((vm_offset_t)blob->csb_mem_kaddr, blob->csb_mem_size);
+
+ blob->csb_mem_size = new_blob_size;
+ blob->csb_mem_kaddr = (void *)new_blob_addr;
+ blob->csb_cd = cd;
+ blob->csb_entitlements_blob = entitlements;
+ if (blob->csb_entitlements_blob != NULL) {
+ blob->csb_entitlements_blob_signature = ptrauth_utils_sign_blob_generic(blob->csb_entitlements_blob,
+ ntohl(blob->csb_entitlements_blob->length),
+ OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_entitlements_blob_signature"),
+ PTRAUTH_ADDR_DIVERSIFY);
+ }
+
+ /* The blob has some cached attributes of the Code Directory, so update those */
+
+ blob->csb_hash_firstlevel_pageshift = blob->csb_hash_pageshift; /* Save the original page size */
+
+ blob->csb_hash_pageshift = PAGE_SHIFT;
+ blob->csb_end_offset = ntohl(cd->codeLimit);
+ if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
+ const SC_Scatter *scatter = (const SC_Scatter*)
+ ((const char*)cd + ntohl(cd->scatterOffset));
+ blob->csb_start_offset = ((off_t)ntohl(scatter->base)) * PAGE_SIZE;
+ } else {
+ blob->csb_start_offset = 0;
+ }
+
+ return 0;
+}
+
+/*
+ * Validate the code signature blob, create a struct cs_blob wrapper
+ * and return it together with a pointer to the chosen code directory
+ * and entitlements blob.
+ *
+ * Note that this takes ownership of the memory as addr, mainly because
+ * this function can actually replace the passed in blob with another
+ * one, e.g. when performing multilevel hashing optimization.
+ */
+int
+cs_blob_create_validated(
+ vm_address_t * const addr,
+ vm_size_t size,
+ struct cs_blob ** const ret_blob,
+ CS_CodeDirectory const ** const ret_cd)
+{
+ struct cs_blob *blob;
+ int error = EINVAL;
+ const CS_CodeDirectory *cd;
+ const CS_GenericBlob *entitlements;
+ union cs_hash_union mdctx;
+ size_t length;
+
+ if (ret_blob) {
+ *ret_blob = NULL;
+ }
+
+ blob = (struct cs_blob *) kalloc(sizeof(struct cs_blob));
+ if (blob == NULL) {
+ return ENOMEM;
+ }
+
+ /* fill in the new blob */
+ blob->csb_mem_size = size;
+ blob->csb_mem_offset = 0;
+ blob->csb_mem_kaddr = (void *)*addr;
+ blob->csb_flags = 0;
+ blob->csb_signer_type = CS_SIGNER_TYPE_UNKNOWN;
+ blob->csb_platform_binary = 0;
+ blob->csb_platform_path = 0;
+ blob->csb_teamid = NULL;
+#if CONFIG_SUPPLEMENTAL_SIGNATURES
+ blob->csb_supplement_teamid = NULL;
+#endif
+ blob->csb_entitlements_blob = NULL;
+ blob->csb_entitlements = NULL;
+ blob->csb_reconstituted = false;
+
+ /* Transfer ownership. Even on error, this function will deallocate */
+ *addr = 0;
+
+ /*
+ * Validate the blob's contents
+ */
+ length = (size_t) size;
+ error = cs_validate_csblob((const uint8_t *)blob->csb_mem_kaddr,
+ length, &cd, &entitlements);
+ if (error) {
+ if (cs_debug) {
+ printf("CODESIGNING: csblob invalid: %d\n", error);
+ }
+ /*
+ * The vnode checker can't make the rest of this function
+ * succeed if csblob validation failed, so bail */
+ goto out;
+ } else {
+ const unsigned char *md_base;
+ uint8_t hash[CS_HASH_MAX_SIZE];
+ int md_size;
+ vm_offset_t hash_pagemask;
+
+ blob->csb_cd = cd;
+ blob->csb_entitlements_blob = entitlements; /* may be NULL, not yet validated */
+ if (blob->csb_entitlements_blob != NULL) {
+ blob->csb_entitlements_blob_signature = ptrauth_utils_sign_blob_generic(blob->csb_entitlements_blob,
+ ntohl(blob->csb_entitlements_blob->length),
+ OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_entitlements_blob_signature"),
+ PTRAUTH_ADDR_DIVERSIFY);
+ }
+ blob->csb_hashtype = cs_find_md(cd->hashType);
+ if (blob->csb_hashtype == NULL || blob->csb_hashtype->cs_digest_size > sizeof(hash)) {
+ panic("validated CodeDirectory but unsupported type");
+ }
+
+ blob->csb_hash_pageshift = cd->pageSize;
+ hash_pagemask = (1U << cd->pageSize) - 1;
+ blob->csb_hash_firstlevel_pageshift = 0;
+ blob->csb_flags = (ntohl(cd->flags) & CS_ALLOWED_MACHO) | CS_VALID;
+ blob->csb_end_offset = (((vm_offset_t)ntohl(cd->codeLimit) + hash_pagemask) & ~hash_pagemask);
+ if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
+ const SC_Scatter *scatter = (const SC_Scatter*)
+ ((const char*)cd + ntohl(cd->scatterOffset));
+ blob->csb_start_offset = ((off_t)ntohl(scatter->base)) * (1U << blob->csb_hash_pageshift);
+ } else {
+ blob->csb_start_offset = 0;
+ }
+ /* compute the blob's cdhash */
+ md_base = (const unsigned char *) cd;
+ md_size = ntohl(cd->length);
+
+ blob->csb_hashtype->cs_init(&mdctx);
+ blob->csb_hashtype->cs_update(&mdctx, md_base, md_size);
+ blob->csb_hashtype->cs_final(hash, &mdctx);
+
+ memcpy(blob->csb_cdhash, hash, CS_CDHASH_LEN);
+ blob->csb_cdhash_signature = ptrauth_utils_sign_blob_generic(blob->csb_cdhash,
+ sizeof(blob->csb_cdhash),
+ OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_cd_signature"),
+ PTRAUTH_ADDR_DIVERSIFY);
+
+#if CONFIG_SUPPLEMENTAL_SIGNATURES
+ blob->csb_linkage_hashtype = NULL;
+ if (ntohl(cd->version) >= CS_SUPPORTSLINKAGE && cd->linkageHashType != 0 &&
+ ntohl(cd->linkageSize) >= CS_CDHASH_LEN) {
+ blob->csb_linkage_hashtype = cs_find_md(cd->linkageHashType);
+
+ if (blob->csb_linkage_hashtype != NULL) {
+ memcpy(blob->csb_linkage, (uint8_t const*)cd + ntohl(cd->linkageOffset),
+ CS_CDHASH_LEN);
+ }
+ }
+#endif
+ }
+
+ error = 0;
+
+out:
+ if (error != 0) {
+ cs_blob_free(blob);
+ blob = NULL;
+ cd = NULL;
+ }
+
+ if (ret_blob != NULL) {
+ *ret_blob = blob;
+ }
+ if (ret_cd != NULL) {
+ *ret_cd = cd;
+ }
+
+ return error;
+}
+
+/*
+ * Free a cs_blob previously created by cs_blob_create_validated.
+ */
+void
+cs_blob_free(
+ struct cs_blob * const blob)
+{
+ if (blob != NULL) {
+ if (blob->csb_mem_kaddr) {
+ ubc_cs_blob_deallocate((vm_offset_t)blob->csb_mem_kaddr, blob->csb_mem_size);
+ blob->csb_mem_kaddr = NULL;
+ }
+ if (blob->csb_entitlements != NULL) {
+ osobject_release(blob->csb_entitlements);
+ blob->csb_entitlements = NULL;
+ }
+ (kfree)(blob, sizeof(*blob));
+ }
+}
+#if CONFIG_SUPPLEMENTAL_SIGNATURES
+static void
+cs_blob_supplement_free(struct cs_blob * const blob)
+{
+ if (blob != NULL) {
+ if (blob->csb_supplement_teamid != NULL) {
+ vm_size_t teamid_size = strlen(blob->csb_supplement_teamid) + 1;
+ kfree(blob->csb_supplement_teamid, teamid_size);
+ blob->csb_supplement_teamid = NULL;
+ }
+ cs_blob_free(blob);
+ }
+}
+#endif
+
+static void
+ubc_cs_blob_adjust_statistics(struct cs_blob const *blob)
+{
+ /* Note that the atomic ops are not enough to guarantee
+ * correctness: If a blob with an intermediate size is inserted
+ * concurrently, we can lose a peak value assignment. But these
+ * statistics are only advisory anyway, so we're not going to
+ * employ full locking here. (Consequently, we are also okay with
+ * relaxed ordering of those accesses.)
+ */
+
+ unsigned int new_cs_blob_count = os_atomic_add(&cs_blob_count, 1, relaxed);
+ if (new_cs_blob_count > os_atomic_load(&cs_blob_count_peak, relaxed)) {
+ os_atomic_store(&cs_blob_count_peak, new_cs_blob_count, relaxed);
+ }
+
+ size_t new_cs_blob_size = os_atomic_add(&cs_blob_size, blob->csb_mem_size, relaxed);
+
+ if (new_cs_blob_size > os_atomic_load(&cs_blob_size_peak, relaxed)) {
+ os_atomic_store(&cs_blob_size_peak, new_cs_blob_size, relaxed);
+ }
+ if (blob->csb_mem_size > os_atomic_load(&cs_blob_size_max, relaxed)) {
+ os_atomic_store(&cs_blob_size_max, blob->csb_mem_size, relaxed);
+ }
+}
+
+int
+ubc_cs_blob_add(
+ struct vnode *vp,
+ uint32_t platform,
+ cpu_type_t cputype,
+ cpu_subtype_t cpusubtype,
+ off_t base_offset,
+ vm_address_t *addr,
+ vm_size_t size,
+ struct image_params *imgp,
+ __unused int flags,
+ struct cs_blob **ret_blob)
+{
+ kern_return_t kr;
+ struct ubc_info *uip;
+ struct cs_blob *blob = NULL, *oblob = NULL;
+ int error;
+ CS_CodeDirectory const *cd;
+ off_t blob_start_offset, blob_end_offset;
+ boolean_t record_mtime;
+
+ record_mtime = FALSE;
+ if (ret_blob) {
+ *ret_blob = NULL;
+ }
+
+ /* Create the struct cs_blob wrapper that will be attached to the vnode.
+ * Validates the passed in blob in the process. */
+ error = cs_blob_create_validated(addr, size, &blob, &cd);
+
+ if (error != 0) {
+ printf("malform code signature blob: %d\n", error);
+ return error;
+ }
+
+ blob->csb_cpu_type = cputype;
+ blob->csb_cpu_subtype = cpusubtype & ~CPU_SUBTYPE_MASK;
+ blob->csb_base_offset = base_offset;
+
+ /*
+ * Let policy module check whether the blob's signature is accepted.
+ */
+#if CONFIG_MACF
+ unsigned int cs_flags = blob->csb_flags;
+ unsigned int signer_type = blob->csb_signer_type;
+ error = mac_vnode_check_signature(vp, blob, imgp, &cs_flags, &signer_type, flags, platform);
+ blob->csb_flags = cs_flags;
+ blob->csb_signer_type = signer_type;
+
+ if (error) {
+ if (cs_debug) {
+ printf("check_signature[pid: %d], error = %d\n", current_proc()->p_pid, error);
+ }
+ goto out;
+ }
+ if ((flags & MAC_VNODE_CHECK_DYLD_SIM) && !(blob->csb_flags & CS_PLATFORM_BINARY)) {
+ if (cs_debug) {
+ printf("check_signature[pid: %d], is not apple signed\n", current_proc()->p_pid);
+ }
+ error = EPERM;
+ goto out;
+ }
+#endif
+
+#if CONFIG_ENFORCE_SIGNED_CODE
+ /*
+ * Reconstitute code signature
+ */
+ {
+ vm_address_t new_mem_kaddr = 0;
+ vm_size_t new_mem_size = 0;
+
+ CS_CodeDirectory *new_cd = NULL;
+ CS_GenericBlob const *new_entitlements = NULL;
+
+ error = ubc_cs_reconstitute_code_signature(blob, 0,
+ &new_mem_kaddr, &new_mem_size,
+ &new_cd, &new_entitlements);
+
+ if (error != 0) {
+ printf("failed code signature reconstitution: %d\n", error);
+ goto out;
+ }
+
+ ubc_cs_blob_deallocate((vm_offset_t)blob->csb_mem_kaddr, blob->csb_mem_size);
+
+ blob->csb_mem_kaddr = (void *)new_mem_kaddr;
+ blob->csb_mem_size = new_mem_size;
+ blob->csb_cd = new_cd;
+ blob->csb_entitlements_blob = new_entitlements;
+ if (blob->csb_entitlements_blob != NULL) {
+ blob->csb_entitlements_blob_signature = ptrauth_utils_sign_blob_generic(blob->csb_entitlements_blob,
+ ntohl(blob->csb_entitlements_blob->length),
+ OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_entitlements_blob_signature"),
+ PTRAUTH_ADDR_DIVERSIFY);
+ }
+ blob->csb_reconstituted = true;
+ }
+#endif
+
+
+ if (blob->csb_flags & CS_PLATFORM_BINARY) {
+ if (cs_debug > 1) {
+ printf("check_signature[pid: %d]: platform binary\n", current_proc()->p_pid);
+ }
+ blob->csb_platform_binary = 1;
+ blob->csb_platform_path = !!(blob->csb_flags & CS_PLATFORM_PATH);
+ } else {
+ blob->csb_platform_binary = 0;
+ blob->csb_platform_path = 0;
+ blob->csb_teamid = csblob_parse_teamid(blob);
+ if (cs_debug > 1) {
+ if (blob->csb_teamid) {
+ printf("check_signature[pid: %d]: team-id is %s\n", current_proc()->p_pid, blob->csb_teamid);
+ } else {
+ printf("check_signature[pid: %d]: no team-id\n", current_proc()->p_pid);
+ }
+ }
+ }
+
+ /*
+ * Validate the blob's coverage
+ */
+ blob_start_offset = blob->csb_base_offset + blob->csb_start_offset;
+ blob_end_offset = blob->csb_base_offset + blob->csb_end_offset;
+
+ if (blob_start_offset >= blob_end_offset ||
+ blob_start_offset < 0 ||
+ blob_end_offset <= 0) {
+ /* reject empty or backwards blob */
+ error = EINVAL;
+ goto out;
+ }
+
+ if (ubc_cs_supports_multilevel_hash(blob)) {
+ error = ubc_cs_convert_to_multilevel_hash(blob);
+ if (error != 0) {
+ printf("failed multilevel hash conversion: %d\n", error);
+ goto out;
+ }
+ blob->csb_reconstituted = true;
+ }
+
+ vnode_lock(vp);
+ if (!UBCINFOEXISTS(vp)) {
+ vnode_unlock(vp);
+ error = ENOENT;