+
+
+int
+UBCINFOEXISTS(struct vnode * vp)
+{
+ return((vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo != UBC_INFO_NULL));
+}
+
+
+/*
+ * CODE SIGNING
+ */
+#define CS_BLOB_PAGEABLE 0
+static volatile SInt32 cs_blob_size = 0;
+static volatile SInt32 cs_blob_count = 0;
+static SInt32 cs_blob_size_peak = 0;
+static UInt32 cs_blob_size_max = 0;
+static SInt32 cs_blob_count_peak = 0;
+extern int cs_debug;
+
+int cs_validation = 1;
+
+SYSCTL_INT(_vm, OID_AUTO, cs_validation, CTLFLAG_RW, &cs_validation, 0, "Do validate code signatures");
+SYSCTL_INT(_vm, OID_AUTO, cs_blob_count, CTLFLAG_RD, &cs_blob_count, 0, "Current number of code signature blobs");
+SYSCTL_INT(_vm, OID_AUTO, cs_blob_size, CTLFLAG_RD, &cs_blob_size, 0, "Current size of all code signature blobs");
+SYSCTL_INT(_vm, OID_AUTO, cs_blob_count_peak, CTLFLAG_RD, &cs_blob_count_peak, 0, "Peak number of code signature blobs");
+SYSCTL_INT(_vm, OID_AUTO, cs_blob_size_peak, CTLFLAG_RD, &cs_blob_size_peak, 0, "Peak size of code signature blobs");
+SYSCTL_INT(_vm, OID_AUTO, cs_blob_size_max, CTLFLAG_RD, &cs_blob_size_max, 0, "Size of biggest code signature blob");
+
+kern_return_t
+ubc_cs_blob_allocate(
+ vm_offset_t *blob_addr_p,
+ vm_size_t *blob_size_p)
+{
+ kern_return_t kr;
+
+#if CS_BLOB_PAGEABLE
+ *blob_size_p = round_page(*blob_size_p);
+ kr = kmem_alloc(kernel_map, blob_addr_p, *blob_size_p);
+#else /* CS_BLOB_PAGEABLE */
+ *blob_addr_p = (vm_offset_t) kalloc(*blob_size_p);
+ if (*blob_addr_p == 0) {
+ kr = KERN_NO_SPACE;
+ } else {
+ kr = KERN_SUCCESS;
+ }
+#endif /* CS_BLOB_PAGEABLE */
+ return kr;
+}
+
+void
+ubc_cs_blob_deallocate(
+ vm_offset_t blob_addr,
+ vm_size_t blob_size)
+{
+#if CS_BLOB_PAGEABLE
+ kmem_free(kernel_map, blob_addr, blob_size);
+#else /* CS_BLOB_PAGEABLE */
+ kfree((void *) blob_addr, blob_size);
+#endif /* CS_BLOB_PAGEABLE */
+}
+
+int
+ubc_cs_blob_add(
+ struct vnode *vp,
+ cpu_type_t cputype,
+ off_t base_offset,
+ vm_address_t addr,
+ vm_size_t size)
+{
+ kern_return_t kr;
+ struct ubc_info *uip;
+ struct cs_blob *blob, *oblob;
+ int error;
+ ipc_port_t blob_handle;
+ memory_object_size_t blob_size;
+ const CS_CodeDirectory *cd;
+ off_t blob_start_offset, blob_end_offset;
+ SHA1_CTX sha1ctxt;
+
+ blob_handle = IPC_PORT_NULL;
+
+ blob = (struct cs_blob *) kalloc(sizeof (struct cs_blob));
+ if (blob == NULL) {
+ return ENOMEM;
+ }
+
+#if CS_BLOB_PAGEABLE
+ /* get a memory entry on the blob */
+ blob_size = (memory_object_size_t) size;
+ kr = mach_make_memory_entry_64(kernel_map,
+ &blob_size,
+ addr,
+ VM_PROT_READ,
+ &blob_handle,
+ IPC_PORT_NULL);
+ if (kr != KERN_SUCCESS) {
+ error = ENOMEM;
+ goto out;
+ }
+ if (memory_object_round_page(blob_size) !=
+ (memory_object_size_t) round_page(size)) {
+ printf("ubc_cs_blob_add: size mismatch 0x%llx 0x%x !?\n",
+ blob_size, size);
+ panic("XXX FBDP size mismatch 0x%llx 0x%x\n", blob_size, size);
+ error = EINVAL;
+ goto out;
+ }
+#else
+ blob_size = (memory_object_size_t) size;
+ blob_handle = IPC_PORT_NULL;
+#endif
+
+ /* fill in the new blob */
+ blob->csb_cpu_type = cputype;
+ blob->csb_base_offset = base_offset;
+ blob->csb_mem_size = size;
+ blob->csb_mem_offset = 0;
+ blob->csb_mem_handle = blob_handle;
+ blob->csb_mem_kaddr = addr;
+
+ /*
+ * Validate the blob's contents
+ */
+ cd = findCodeDirectory(
+ (const CS_SuperBlob *) addr,
+ (char *) addr,
+ (char *) addr + blob->csb_mem_size);
+ if (cd == NULL) {
+ /* no code directory => useless blob ! */
+ blob->csb_flags = 0;
+ blob->csb_start_offset = 0;
+ blob->csb_end_offset = 0;
+ } else {
+ unsigned char *sha1_base;
+ int sha1_size;
+
+ blob->csb_flags = ntohl(cd->flags) | CS_VALID;
+ blob->csb_end_offset = round_page(ntohl(cd->codeLimit));
+ blob->csb_start_offset = (blob->csb_end_offset -
+ (ntohl(cd->nCodeSlots) * PAGE_SIZE));
+ /* compute the blob's SHA1 hash */
+ sha1_base = (const unsigned char *) cd;
+ sha1_size = ntohl(cd->length);
+ SHA1Init(&sha1ctxt);
+ SHA1Update(&sha1ctxt, sha1_base, sha1_size);
+ SHA1Final(blob->csb_sha1, &sha1ctxt);
+ }
+
+ /*
+ * Let policy module check whether the blob's signature is accepted.
+ */
+#if CONFIG_MACF
+ error = mac_vnode_check_signature(vp, blob->csb_sha1, (void*)addr, size);
+ if (error)
+ goto out;
+#endif
+
+ /*
+ * Validate the blob's coverage
+ */
+ blob_start_offset = blob->csb_base_offset + blob->csb_start_offset;
+ blob_end_offset = blob->csb_base_offset + blob->csb_end_offset;
+
+ if (blob_start_offset >= blob_end_offset ||
+ blob_start_offset < 0 ||
+ blob_end_offset <= 0) {
+ /* reject empty or backwards blob */
+ error = EINVAL;
+ goto out;
+ }
+
+ vnode_lock(vp);
+ if (! UBCINFOEXISTS(vp)) {
+ vnode_unlock(vp);
+ error = ENOENT;
+ goto out;
+ }
+ uip = vp->v_ubcinfo;
+
+ /* check if this new blob overlaps with an existing blob */
+ for (oblob = uip->cs_blobs;
+ oblob != NULL;
+ oblob = oblob->csb_next) {
+ off_t oblob_start_offset, oblob_end_offset;
+
+ oblob_start_offset = (oblob->csb_base_offset +
+ oblob->csb_start_offset);
+ oblob_end_offset = (oblob->csb_base_offset +
+ oblob->csb_end_offset);
+ if (blob_start_offset >= oblob_end_offset ||
+ blob_end_offset <= oblob_start_offset) {
+ /* no conflict with this existing blob */
+ } else {
+ /* conflict ! */
+ if (blob_start_offset == oblob_start_offset &&
+ blob_end_offset == oblob_end_offset &&
+ blob->csb_mem_size == oblob->csb_mem_size &&
+ blob->csb_flags == oblob->csb_flags &&
+ (blob->csb_cpu_type == CPU_TYPE_ANY ||
+ oblob->csb_cpu_type == CPU_TYPE_ANY ||
+ blob->csb_cpu_type == oblob->csb_cpu_type) &&
+ !bcmp(blob->csb_sha1,
+ oblob->csb_sha1,
+ SHA1_RESULTLEN)) {
+ /*
+ * We already have this blob:
+ * we'll return success but
+ * throw away the new blob.
+ */
+ if (oblob->csb_cpu_type == CPU_TYPE_ANY) {
+ /*
+ * The old blob matches this one
+ * but doesn't have any CPU type.
+ * Update it with whatever the caller
+ * provided this time.
+ */
+ oblob->csb_cpu_type = cputype;
+ }
+ vnode_unlock(vp);
+ error = EAGAIN;
+ goto out;
+ } else {
+ /* different blob: reject the new one */
+ vnode_unlock(vp);
+ error = EALREADY;
+ goto out;
+ }
+ }
+
+ }
+
+
+ /* mark this vnode's VM object as having "signed pages" */
+ kr = memory_object_signed(uip->ui_control, TRUE);
+ if (kr != KERN_SUCCESS) {
+ vnode_unlock(vp);
+ error = ENOENT;
+ goto out;
+ }
+
+ /*
+ * Add this blob to the list of blobs for this vnode.
+ * We always add at the front of the list and we never remove a
+ * blob from the list, so ubc_cs_get_blobs() can return whatever
+ * the top of the list was and that list will remain valid
+ * while we validate a page, even after we release the vnode's lock.
+ */
+ blob->csb_next = uip->cs_blobs;
+ uip->cs_blobs = blob;
+
+ OSAddAtomic(+1, &cs_blob_count);
+ if (cs_blob_count > cs_blob_count_peak) {
+ cs_blob_count_peak = cs_blob_count; /* XXX atomic ? */
+ }
+ OSAddAtomic(+blob->csb_mem_size, &cs_blob_size);
+ if (cs_blob_size > cs_blob_size_peak) {
+ cs_blob_size_peak = cs_blob_size; /* XXX atomic ? */
+ }
+ if (blob->csb_mem_size > cs_blob_size_max) {
+ cs_blob_size_max = blob->csb_mem_size;
+ }
+
+ if (cs_debug) {
+ proc_t p;
+
+ p = current_proc();
+ printf("CODE SIGNING: proc %d(%s) "
+ "loaded %s signatures for file (%s) "
+ "range 0x%llx:0x%llx flags 0x%x\n",
+ p->p_pid, p->p_comm,
+ blob->csb_cpu_type == -1 ? "detached" : "embedded",
+ vnode_name(vp),
+ blob->csb_base_offset + blob->csb_start_offset,
+ blob->csb_base_offset + blob->csb_end_offset,
+ blob->csb_flags);
+ }
+
+ vnode_unlock(vp);
+
+ error = 0; /* success ! */
+
+out:
+ if (error) {
+ /* we failed; release what we allocated */
+ if (blob) {
+ kfree(blob, sizeof (*blob));
+ blob = NULL;
+ }
+ if (blob_handle != IPC_PORT_NULL) {
+ mach_memory_entry_port_release(blob_handle);
+ blob_handle = IPC_PORT_NULL;
+ }
+ }
+
+ if (error == EAGAIN) {
+ /*
+ * See above: error is EAGAIN if we were asked
+ * to add an existing blob again. We cleaned the new
+ * blob and we want to return success.
+ */
+ error = 0;
+ /*
+ * Since we're not failing, consume the data we received.
+ */
+ ubc_cs_blob_deallocate(addr, size);
+ }
+
+ return error;
+}
+
+
+struct cs_blob *
+ubc_cs_blob_get(
+ struct vnode *vp,
+ cpu_type_t cputype,
+ off_t offset)
+{
+ struct ubc_info *uip;
+ struct cs_blob *blob;
+ off_t offset_in_blob;
+
+ vnode_lock_spin(vp);
+
+ if (! UBCINFOEXISTS(vp)) {
+ blob = NULL;
+ goto out;
+ }
+
+ uip = vp->v_ubcinfo;
+ for (blob = uip->cs_blobs;
+ blob != NULL;
+ blob = blob->csb_next) {
+ if (cputype != -1 && blob->csb_cpu_type == cputype) {
+ break;
+ }
+ if (offset != -1) {
+ offset_in_blob = offset - blob->csb_base_offset;
+ if (offset_in_blob >= blob->csb_start_offset &&
+ offset_in_blob < blob->csb_end_offset) {
+ /* our offset is covered by this blob */
+ break;
+ }
+ }
+ }
+
+out:
+ vnode_unlock(vp);
+
+ return blob;
+}
+
+static void
+ubc_cs_free(
+ struct ubc_info *uip)
+{
+ struct cs_blob *blob, *next_blob;
+
+ for (blob = uip->cs_blobs;
+ blob != NULL;
+ blob = next_blob) {
+ next_blob = blob->csb_next;
+ if (blob->csb_mem_kaddr != 0) {
+ ubc_cs_blob_deallocate(blob->csb_mem_kaddr,
+ blob->csb_mem_size);
+ blob->csb_mem_kaddr = 0;
+ }
+ if (blob->csb_mem_handle != IPC_PORT_NULL) {
+ mach_memory_entry_port_release(blob->csb_mem_handle);
+ }
+ blob->csb_mem_handle = IPC_PORT_NULL;
+ OSAddAtomic(-1, &cs_blob_count);
+ OSAddAtomic(-blob->csb_mem_size, &cs_blob_size);
+ kfree(blob, sizeof (*blob));
+ }
+ uip->cs_blobs = NULL;
+}
+
+struct cs_blob *
+ubc_get_cs_blobs(
+ struct vnode *vp)
+{
+ struct ubc_info *uip;
+ struct cs_blob *blobs;
+
+ vnode_lock_spin(vp);
+
+ if (! UBCINFOEXISTS(vp)) {
+ blobs = NULL;
+ goto out;
+ }
+
+ uip = vp->v_ubcinfo;
+ blobs = uip->cs_blobs;
+
+out:
+ vnode_unlock(vp);
+
+ return blobs;
+}
+
+unsigned long cs_validate_page_no_hash = 0;
+unsigned long cs_validate_page_bad_hash = 0;
+boolean_t
+cs_validate_page(
+ void *_blobs,
+ memory_object_offset_t page_offset,
+ const void *data,
+ boolean_t *tainted)
+{
+ SHA1_CTX sha1ctxt;
+ unsigned char actual_hash[SHA1_RESULTLEN];
+ unsigned char expected_hash[SHA1_RESULTLEN];
+ boolean_t found_hash;
+ struct cs_blob *blobs, *blob;
+ const CS_CodeDirectory *cd;
+ const CS_SuperBlob *embedded;
+ off_t start_offset, end_offset;
+ const unsigned char *hash;
+ boolean_t validated;
+ off_t offset; /* page offset in the file */
+ size_t size;
+ off_t codeLimit = 0;
+ char *lower_bound, *upper_bound;
+ vm_offset_t kaddr, blob_addr;
+ vm_size_t ksize;
+ kern_return_t kr;
+
+ offset = page_offset;
+
+ /* retrieve the expected hash */
+ found_hash = FALSE;
+ blobs = (struct cs_blob *) _blobs;
+
+ for (blob = blobs;
+ blob != NULL;
+ blob = blob->csb_next) {
+ offset = page_offset - blob->csb_base_offset;
+ if (offset < blob->csb_start_offset ||
+ offset >= blob->csb_end_offset) {
+ /* our page is not covered by this blob */
+ continue;
+ }
+
+ /* map the blob in the kernel address space */
+ kaddr = blob->csb_mem_kaddr;
+ if (kaddr == 0) {
+ ksize = (vm_size_t) (blob->csb_mem_size +
+ blob->csb_mem_offset);
+ kr = vm_map(kernel_map,
+ &kaddr,
+ ksize,
+ 0,
+ VM_FLAGS_ANYWHERE,
+ blob->csb_mem_handle,
+ 0,
+ TRUE,
+ VM_PROT_READ,
+ VM_PROT_READ,
+ VM_INHERIT_NONE);
+ if (kr != KERN_SUCCESS) {
+ /* XXX FBDP what to do !? */
+ printf("cs_validate_page: failed to map blob, "
+ "size=0x%x kr=0x%x\n",
+ blob->csb_mem_size, kr);
+ break;
+ }
+ }
+ blob_addr = kaddr + blob->csb_mem_offset;
+
+ lower_bound = CAST_DOWN(char *, blob_addr);
+ upper_bound = lower_bound + blob->csb_mem_size;
+
+ embedded = (const CS_SuperBlob *) blob_addr;
+ cd = findCodeDirectory(embedded, lower_bound, upper_bound);
+ if (cd != NULL) {
+ if (cd->pageSize != PAGE_SHIFT ||
+ cd->hashType != 0x1 ||
+ cd->hashSize != SHA1_RESULTLEN) {
+ /* bogus blob ? */
+ continue;
+ }
+
+ end_offset = round_page(ntohl(cd->codeLimit));
+ start_offset = end_offset - (ntohl(cd->nCodeSlots) * PAGE_SIZE);
+ offset = page_offset - blob->csb_base_offset;
+ if (offset < start_offset ||
+ offset >= end_offset) {
+ /* our page is not covered by this blob */
+ continue;
+ }
+
+ codeLimit = ntohl(cd->codeLimit);
+ hash = hashes(cd, atop(offset),
+ lower_bound, upper_bound);
+ if (hash != NULL) {
+ bcopy(hash, expected_hash,
+ sizeof (expected_hash));
+ found_hash = TRUE;
+ }
+
+ break;
+ }
+ }
+
+ if (found_hash == FALSE) {
+ /*
+ * We can't verify this page because there is no signature
+ * for it (yet). It's possible that this part of the object
+ * is not signed, or that signatures for that part have not
+ * been loaded yet.
+ * Report that the page has not been validated and let the
+ * caller decide if it wants to accept it or not.
+ */
+ cs_validate_page_no_hash++;
+ if (cs_debug > 1) {
+ printf("CODE SIGNING: cs_validate_page: "
+ "off 0x%llx: no hash to validate !?\n",
+ page_offset);
+ }
+ validated = FALSE;
+ *tainted = FALSE;
+ } else {
+
+ size = PAGE_SIZE;
+ const uint32_t *asha1, *esha1;
+ if (offset + size > codeLimit) {
+ /* partial page at end of segment */
+ assert(offset < codeLimit);
+ size = codeLimit & PAGE_MASK;
+ }
+ /* compute the actual page's SHA1 hash */
+ SHA1Init(&sha1ctxt);
+ SHA1UpdateUsePhysicalAddress(&sha1ctxt, data, size);
+ SHA1Final(actual_hash, &sha1ctxt);
+
+ asha1 = (const uint32_t *) actual_hash;
+ esha1 = (const uint32_t *) expected_hash;
+
+ if (bcmp(expected_hash, actual_hash, SHA1_RESULTLEN) != 0) {
+ if (cs_debug) {
+ printf("CODE SIGNING: cs_validate_page: "
+ "off 0x%llx size 0x%lx: "
+ "actual [0x%x 0x%x 0x%x 0x%x 0x%x] != "
+ "expected [0x%x 0x%x 0x%x 0x%x 0x%x]\n",
+ page_offset, size,
+ asha1[0], asha1[1], asha1[2],
+ asha1[3], asha1[4],
+ esha1[0], esha1[1], esha1[2],
+ esha1[3], esha1[4]);
+ }
+ cs_validate_page_bad_hash++;
+ *tainted = TRUE;
+ } else {
+ if (cs_debug > 1) {
+ printf("CODE SIGNING: cs_validate_page: "
+ "off 0x%llx size 0x%lx: SHA1 OK\n",
+ page_offset, size);
+ }
+ *tainted = FALSE;
+ }
+ validated = TRUE;
+ }
+
+ return validated;
+}
+
+int
+ubc_cs_getcdhash(
+ vnode_t vp,
+ off_t offset,
+ unsigned char *cdhash)
+{
+ struct cs_blob *blobs, *blob;
+ off_t rel_offset;
+
+ blobs = ubc_get_cs_blobs(vp);
+ for (blob = blobs;
+ blob != NULL;
+ blob = blob->csb_next) {
+ /* compute offset relative to this blob */
+ rel_offset = offset - blob->csb_base_offset;
+ if (rel_offset >= blob->csb_start_offset &&
+ rel_offset < blob->csb_end_offset) {
+ /* this blob does cover our "offset" ! */
+ break;
+ }
+ }
+
+ if (blob == NULL) {
+ /* we didn't find a blob covering "offset" */
+ return EBADEXEC; /* XXX any better error ? */
+ }
+
+ /* get the SHA1 hash of that blob */
+ bcopy(blob->csb_sha1, cdhash, sizeof (blob->csb_sha1));
+
+ return 0;
+}