+#if CS_BLOB_PAGEABLE
+ /* get a memory entry on the blob */
+ blob_size = (memory_object_size_t) size;
+ kr = mach_make_memory_entry_64(kernel_map,
+ &blob_size,
+ addr,
+ VM_PROT_READ,
+ &blob_handle,
+ IPC_PORT_NULL);
+ if (kr != KERN_SUCCESS) {
+ error = ENOMEM;
+ goto out;
+ }
+ if (memory_object_round_page(blob_size) !=
+ (memory_object_size_t) round_page(size)) {
+ printf("ubc_cs_blob_add: size mismatch 0x%llx 0x%x !?\n",
+ blob_size, size);
+ panic("XXX FBDP size mismatch 0x%llx 0x%x\n", blob_size, size);
+ error = EINVAL;
+ goto out;
+ }
+#else
+ blob_size = (memory_object_size_t) size;
+ blob_handle = IPC_PORT_NULL;
+#endif
+
+ /* fill in the new blob */
+ blob->csb_cpu_type = cputype;
+ blob->csb_base_offset = base_offset;
+ blob->csb_mem_size = size;
+ blob->csb_mem_offset = 0;
+ blob->csb_mem_handle = blob_handle;
+ blob->csb_mem_kaddr = addr;
+
+ /*
+ * Validate the blob's contents
+ */
+ cd = findCodeDirectory(
+ (const CS_SuperBlob *) addr,
+ (char *) addr,
+ (char *) addr + blob->csb_mem_size);
+ if (cd == NULL) {
+ /* no code directory => useless blob ! */
+ blob->csb_flags = 0;
+ blob->csb_start_offset = 0;
+ blob->csb_end_offset = 0;
+ } else {
+ unsigned char *sha1_base;
+ int sha1_size;
+
+ blob->csb_flags = ntohl(cd->flags) | CS_VALID;
+ blob->csb_end_offset = round_page(ntohl(cd->codeLimit));
+ blob->csb_start_offset = (blob->csb_end_offset -
+ (ntohl(cd->nCodeSlots) * PAGE_SIZE));
+ /* compute the blob's SHA1 hash */
+ sha1_base = (const unsigned char *) cd;
+ sha1_size = ntohl(cd->length);
+ SHA1Init(&sha1ctxt);
+ SHA1Update(&sha1ctxt, sha1_base, sha1_size);
+ SHA1Final(blob->csb_sha1, &sha1ctxt);
+ }
+
+ /*
+ * Let policy module check whether the blob's signature is accepted.
+ */
+#if CONFIG_MACF
+ error = mac_vnode_check_signature(vp, blob->csb_sha1, (void*)addr, size);
+ if (error)
+ goto out;
+#endif
+
+ /*
+ * Validate the blob's coverage
+ */
+ blob_start_offset = blob->csb_base_offset + blob->csb_start_offset;
+ blob_end_offset = blob->csb_base_offset + blob->csb_end_offset;
+
+ if (blob_start_offset >= blob_end_offset ||
+ blob_start_offset < 0 ||
+ blob_end_offset <= 0) {
+ /* reject empty or backwards blob */
+ error = EINVAL;
+ goto out;
+ }
+
+ vnode_lock(vp);
+ if (! UBCINFOEXISTS(vp)) {
+ vnode_unlock(vp);
+ error = ENOENT;
+ goto out;
+ }
+ uip = vp->v_ubcinfo;
+
+ /* check if this new blob overlaps with an existing blob */
+ for (oblob = uip->cs_blobs;
+ oblob != NULL;
+ oblob = oblob->csb_next) {
+ off_t oblob_start_offset, oblob_end_offset;
+
+ oblob_start_offset = (oblob->csb_base_offset +
+ oblob->csb_start_offset);
+ oblob_end_offset = (oblob->csb_base_offset +
+ oblob->csb_end_offset);
+ if (blob_start_offset >= oblob_end_offset ||
+ blob_end_offset <= oblob_start_offset) {
+ /* no conflict with this existing blob */
+ } else {
+ /* conflict ! */
+ if (blob_start_offset == oblob_start_offset &&
+ blob_end_offset == oblob_end_offset &&
+ blob->csb_mem_size == oblob->csb_mem_size &&
+ blob->csb_flags == oblob->csb_flags &&
+ (blob->csb_cpu_type == CPU_TYPE_ANY ||
+ oblob->csb_cpu_type == CPU_TYPE_ANY ||
+ blob->csb_cpu_type == oblob->csb_cpu_type) &&
+ !bcmp(blob->csb_sha1,
+ oblob->csb_sha1,
+ SHA1_RESULTLEN)) {
+ /*
+ * We already have this blob:
+ * we'll return success but
+ * throw away the new blob.
+ */
+ if (oblob->csb_cpu_type == CPU_TYPE_ANY) {
+ /*
+ * The old blob matches this one
+ * but doesn't have any CPU type.
+ * Update it with whatever the caller
+ * provided this time.
+ */
+ oblob->csb_cpu_type = cputype;
+ }
+ vnode_unlock(vp);
+ error = EAGAIN;
+ goto out;
+ } else {
+ /* different blob: reject the new one */
+ vnode_unlock(vp);
+ error = EALREADY;
+ goto out;
+ }
+ }
+
+ }
+
+
+ /* mark this vnode's VM object as having "signed pages" */
+ kr = memory_object_signed(uip->ui_control, TRUE);
+ if (kr != KERN_SUCCESS) {
+ vnode_unlock(vp);
+ error = ENOENT;
+ goto out;
+ }
+
+ /*
+ * Add this blob to the list of blobs for this vnode.
+ * We always add at the front of the list and we never remove a
+ * blob from the list, so ubc_cs_get_blobs() can return whatever
+ * the top of the list was and that list will remain valid
+ * while we validate a page, even after we release the vnode's lock.
+ */
+ blob->csb_next = uip->cs_blobs;
+ uip->cs_blobs = blob;
+
+ OSAddAtomic(+1, &cs_blob_count);
+ if (cs_blob_count > cs_blob_count_peak) {
+ cs_blob_count_peak = cs_blob_count; /* XXX atomic ? */
+ }
+ OSAddAtomic(+blob->csb_mem_size, &cs_blob_size);
+ if (cs_blob_size > cs_blob_size_peak) {
+ cs_blob_size_peak = cs_blob_size; /* XXX atomic ? */
+ }
+ if (blob->csb_mem_size > cs_blob_size_max) {
+ cs_blob_size_max = blob->csb_mem_size;
+ }
+
+ if (cs_debug) {
+ proc_t p;
+
+ p = current_proc();
+ printf("CODE SIGNING: proc %d(%s) "
+ "loaded %s signatures for file (%s) "
+ "range 0x%llx:0x%llx flags 0x%x\n",
+ p->p_pid, p->p_comm,
+ blob->csb_cpu_type == -1 ? "detached" : "embedded",
+ vnode_name(vp),
+ blob->csb_base_offset + blob->csb_start_offset,
+ blob->csb_base_offset + blob->csb_end_offset,
+ blob->csb_flags);
+ }
+
+ vnode_unlock(vp);
+
+ error = 0; /* success ! */
+
+out:
+ if (error) {
+ /* we failed; release what we allocated */
+ if (blob) {
+ kfree(blob, sizeof (*blob));
+ blob = NULL;
+ }
+ if (blob_handle != IPC_PORT_NULL) {
+ mach_memory_entry_port_release(blob_handle);
+ blob_handle = IPC_PORT_NULL;
+ }
+ }
+
+ if (error == EAGAIN) {
+ /*
+ * See above: error is EAGAIN if we were asked
+ * to add an existing blob again. We cleaned the new
+ * blob and we want to return success.
+ */
+ error = 0;
+ /*
+ * Since we're not failing, consume the data we received.
+ */
+ ubc_cs_blob_deallocate(addr, size);
+ }
+
+ return error;
+}
+
+
+struct cs_blob *
+ubc_cs_blob_get(
+ struct vnode *vp,
+ cpu_type_t cputype,
+ off_t offset)