#include <mach/memory_object_types.h>
#include <mach/memory_object_control.h>
#include <mach/vm_map.h>
+#include <mach/mach_vm.h>
#include <mach/upl.h>
#include <kern/kern_types.h>
boolean_t *);
extern kern_return_t memory_object_signed(memory_object_control_t control,
boolean_t is_signed);
+extern boolean_t memory_object_is_slid(memory_object_control_t control);
+
extern void Debugger(const char *message);
#if DIAGNOSTIC
#if defined(assert)
-#undef assert()
+#undef assert
#endif
#define assert(cond) \
((void) ((cond) ? 0 : panic("Assert failed: %s", # cond)))
* CODESIGNING
* Routines to navigate code signing data structures in the kernel...
*/
+
+extern int cs_debug;
+
static boolean_t
cs_valid_range(
const void *start,
CSMAGIC_CODEDIRECTORY = 0xfade0c02, /* CodeDirectory blob */
CSMAGIC_EMBEDDED_SIGNATURE = 0xfade0cc0, /* embedded form of signature data */
CSMAGIC_EMBEDDED_SIGNATURE_OLD = 0xfade0b02, /* XXX */
+ CSMAGIC_EMBEDDED_ENTITLEMENTS = 0xfade7171, /* embedded entitlements */
CSMAGIC_DETACHED_SIGNATURE = 0xfade0cc1, /* multi-arch collection of embedded signatures */
CSSLOT_CODEDIRECTORY = 0, /* slot index for CodeDirectory */
+ CSSLOT_ENTITLEMENTS = 5
};
+static const uint32_t supportsScatter = 0x20100; // first version to support scatter option
/*
* Structure of an embedded-signature SuperBlob
/* followed by Blobs in no particular order as indicated by offsets in index */
} CS_SuperBlob;
+typedef struct __GenericBlob {
+ uint32_t magic; /* magic number */
+ uint32_t length; /* total length of blob */
+ char data[];
+} CS_GenericBlob;
+
+struct Scatter {
+ uint32_t count; // number of pages; zero for sentinel (only)
+ uint32_t base; // first page number
+ uint64_t targetOffset; // offset in target
+ uint64_t spare; // reserved
+};
/*
* C form of a CodeDirectory.
uint8_t spare1; /* unused (must be zero) */
uint8_t pageSize; /* log2(page size in bytes); 0 => infinite */
uint32_t spare2; /* unused (must be zero) */
+ /* Version 0x20100 */
+ uint32_t scatterOffset; /* offset of optional scatter vector */
/* followed by dynamic content as located by offset fields above */
} CS_CodeDirectory;
*/
cd = (const CS_CodeDirectory *) embedded;
}
+
if (cd &&
cs_valid_range(cd, cd + 1, lower_bound, upper_bound) &&
cs_valid_range(cd, (const char *) cd + ntohl(cd->length),
char *upper_bound)
{
const unsigned char *base, *top, *hash;
- uint32_t nCodeSlots;
+ uint32_t nCodeSlots = ntohl(cd->nCodeSlots);
assert(cs_valid_range(cd, cd + 1, lower_bound, upper_bound));
- base = (const unsigned char *)cd + ntohl(cd->hashOffset);
- nCodeSlots = ntohl(cd->nCodeSlots);
- top = base + nCodeSlots * SHA1_RESULTLEN;
- if (!cs_valid_range(base, top,
- lower_bound, upper_bound) ||
- page > nCodeSlots) {
- return NULL;
- }
- assert(page < nCodeSlots);
+ if((ntohl(cd->version) >= supportsScatter) && (ntohl(cd->scatterOffset))) {
+ /* Get first scatter struct */
+ const struct Scatter *scatter = (const struct Scatter*)
+ ((const char*)cd + ntohl(cd->scatterOffset));
+ uint32_t hashindex=0, scount, sbase=0;
+ /* iterate all scatter structs */
+ do {
+ if((const char*)scatter > (const char*)cd + ntohl(cd->length)) {
+ if(cs_debug) {
+ printf("CODE SIGNING: Scatter extends past Code Directory\n");
+ }
+ return NULL;
+ }
+
+ scount = ntohl(scatter->count);
+ uint32_t new_base = ntohl(scatter->base);
+
+ /* last scatter? */
+ if (scount == 0) {
+ return NULL;
+ }
+
+ if((hashindex > 0) && (new_base <= sbase)) {
+ if(cs_debug) {
+ printf("CODE SIGNING: unordered Scatter, prev base %d, cur base %d\n",
+ sbase, new_base);
+ }
+ return NULL; /* unordered scatter array */
+ }
+ sbase = new_base;
+
+ /* this scatter beyond page we're looking for? */
+ if (sbase > page) {
+ return NULL;
+ }
+
+ if (sbase+scount >= page) {
+ /* Found the scatter struct that is
+ * referencing our page */
+
+ /* base = address of first hash covered by scatter */
+ base = (const unsigned char *)cd + ntohl(cd->hashOffset) +
+ hashindex * SHA1_RESULTLEN;
+ /* top = address of first hash after this scatter */
+ top = base + scount * SHA1_RESULTLEN;
+ if (!cs_valid_range(base, top, lower_bound,
+ upper_bound) ||
+ hashindex > nCodeSlots) {
+ return NULL;
+ }
+
+ break;
+ }
+
+ /* this scatter struct is before the page we're looking
+ * for. Iterate. */
+ hashindex+=scount;
+ scatter++;
+ } while(1);
+
+ hash = base + (page - sbase) * SHA1_RESULTLEN;
+ } else {
+ base = (const unsigned char *)cd + ntohl(cd->hashOffset);
+ top = base + nCodeSlots * SHA1_RESULTLEN;
+ if (!cs_valid_range(base, top, lower_bound, upper_bound) ||
+ page > nCodeSlots) {
+ return NULL;
+ }
+ assert(page < nCodeSlots);
- hash = base + page * SHA1_RESULTLEN;
+ hash = base + page * SHA1_RESULTLEN;
+ }
+
if (!cs_valid_range(hash, hash + SHA1_RESULTLEN,
lower_bound, upper_bound)) {
hash = NULL;
* End of routines to navigate code signing data structures in the kernel.
*/
+/*
+ * ENTITLEMENTS
+ * Routines to navigate entitlements in the kernel.
+ */
+
+/* Retrieve the entitlements blob for a process.
+ * Returns:
+ * EINVAL no text vnode associated with the process
+ * EBADEXEC invalid code signing data
+ * ENOMEM you should reboot
+ * 0 no error occurred
+ *
+ * On success, out_start and out_length will point to the
+ * entitlements blob if found; or will be set to NULL/zero
+ * if there were no entitlements.
+ */
+int
+cs_entitlements_blob_get(proc_t p, void **out_start, size_t *out_length)
+{
+ SHA1_CTX context; /* XXX hash agility */
+ int error = 0;
+ struct cs_blob *blob_list_entry;
+ CS_SuperBlob *super_blob;
+ CS_BlobIndex *blob_index;
+ CS_GenericBlob *blob;
+ CS_CodeDirectory *code_dir;
+ unsigned char *computed_hash = NULL;
+ unsigned char *embedded_hash = NULL;
+ void *start = NULL;
+ size_t length = 0;
+ size_t hash_size = 0;
+ unsigned int i, count;
+
+ if (NULL == p->p_textvp) {
+ error = EINVAL;
+ goto out;
+ }
+ if (NULL == (blob_list_entry = ubc_cs_blob_get(p->p_textvp, -1,
+ p->p_textoff)))
+ goto out;
+ super_blob = (void *)blob_list_entry->csb_mem_kaddr;
+ if (CSMAGIC_EMBEDDED_SIGNATURE != ntohl(super_blob->magic)) {
+ error = EBADEXEC;
+ goto out;
+ }
+ count = ntohl(super_blob->count);
+ for (i = 0; i < count; ++i) {
+ blob_index = &super_blob->index[i];
+ blob = (void *)((char *)super_blob + ntohl(blob_index->offset));
+ switch (ntohl(blob_index->type)) {
+ case CSSLOT_CODEDIRECTORY:
+ if (CSMAGIC_CODEDIRECTORY != ntohl(blob->magic))
+ break;
+ code_dir = (void *)blob;
+ hash_size = code_dir->hashSize;
+ if (CSSLOT_ENTITLEMENTS <=
+ ntohl(code_dir->nSpecialSlots)) {
+ embedded_hash = (void *)((char *)code_dir +
+ ntohl(code_dir->hashOffset) -
+ (hash_size * CSSLOT_ENTITLEMENTS));
+ }
+ break;
+ case CSSLOT_ENTITLEMENTS:
+ if (CSMAGIC_EMBEDDED_ENTITLEMENTS != ntohl(blob->magic))
+ break;
+ start = (void *)blob;
+ length = ntohl(blob->length);
+ break;
+ default:
+ break;
+ }
+ }
+ if (NULL == start && NULL == embedded_hash) {
+ error = 0;
+ goto out;
+ } else if (NULL == start || NULL == embedded_hash) {
+ error = EBADEXEC;
+ goto out;
+ }
+ if (NULL == (computed_hash = kalloc(hash_size))) {
+ error = ENOMEM;
+ goto out;
+ }
+ SHA1Init(&context);
+ SHA1Update(&context, start, length);
+ SHA1Final(computed_hash, &context);
+ if (0 != memcmp(computed_hash, embedded_hash, hash_size)) {
+ error = EBADEXEC;
+ goto out;
+ }
+ error = 0;
+out:
+ if (NULL != computed_hash)
+ kfree(computed_hash, hash_size);
+ if (0 == error) {
+ *out_start = start;
+ *out_length = length;
+ }
+ return error;
+}
+
+/*
+ * ENTITLEMENTS
+ * End of routines to navigate entitlements in the kernel.
+ */
+
+
/*
* ubc_init
i = (vm_size_t) sizeof (struct ubc_info);
ubc_info_zone = zinit (i, 10000*i, 8192, "ubc_info zone");
+
+ zone_change(ubc_info_zone, Z_NOENCRYPT, TRUE);
}
*/
uip->ui_size = nsize;
- if (nsize >= osize) /* Nothing more to do */
+ if (nsize >= osize) { /* Nothing more to do */
+ if (nsize > osize) {
+ lock_vnode_and_post(vp, NOTE_EXTEND);
+ }
+
return (1); /* return success */
+ }
/*
* When the file shrinks, invalidate the pages beyond the
lastpg += PAGE_SIZE_64;
}
if (olastpgend > lastpg) {
+ int flags;
+
+ if (lastpg == 0)
+ flags = MEMORY_OBJECT_DATA_FLUSH_ALL;
+ else
+ flags = MEMORY_OBJECT_DATA_FLUSH;
/*
* invalidate the pages beyond the new EOF page
*
kret = memory_object_lock_request(control,
(memory_object_offset_t)lastpg,
(memory_object_size_t)(olastpgend - lastpg), NULL, NULL,
- MEMORY_OBJECT_RETURN_NONE, MEMORY_OBJECT_DATA_FLUSH,
- VM_PROT_NO_CHANGE);
+ MEMORY_OBJECT_RETURN_NONE, flags, VM_PROT_NO_CHANGE);
if (kret != KERN_SUCCESS)
printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
}
return (1);
}
-
/*
* ubc_getpager
*
return (MEMORY_OBJECT_CONTROL_NULL);
}
+boolean_t
+ubc_strict_uncached_IO(struct vnode *vp)
+{
+ boolean_t result = FALSE;
+
+ if (UBCINFOEXISTS(vp)) {
+ result = memory_object_is_slid(vp->v_ubcinfo->ui_control);
+ }
+ return result;
+}
/*
* ubc_blktooff
if (!locked)
- vnode_lock(vp);
+ vnode_lock_spin(vp);
if ((vp->v_usecount - vp->v_kusecount) > busycount)
retval = 1;
struct ubc_info *uip;
int need_rele = 0;
int need_wakeup = 0;
-
+
if (vnode_getwithref(vp))
return;
SET(uip->ui_flags, UI_MAPBUSY);
if (ISSET(uip->ui_flags, UI_ISMAPPED)) {
- CLR(uip->ui_flags, UI_ISMAPPED);
+ CLR(uip->ui_flags, UI_ISMAPPED);
need_rele = 1;
}
vnode_unlock(vp);
-
+
if (need_rele) {
- (void) VNOP_MNOMAP(vp, vfs_context_current());
- vnode_rele(vp);
+ (void)VNOP_MNOMAP(vp, vfs_context_current());
+ vnode_rele(vp);
}
vnode_lock_spin(vp);
vnode_unlock(vp);
if (need_wakeup)
- wakeup(&uip->ui_flags);
+ wakeup(&uip->ui_flags);
}
/*
ubc_create_upl(
struct vnode *vp,
off_t f_offset,
- long bufsize,
+ int bufsize,
upl_t *uplp,
upl_page_info_t **plp,
int uplflags)
{
memory_object_control_t control;
- mach_msg_type_number_t count;
- int ubcflags;
kern_return_t kr;
+
+ if (plp != NULL)
+ *plp = NULL;
+ *uplp = NULL;
if (bufsize & 0xfff)
return KERN_INVALID_ARGUMENT;
- if (uplflags & UPL_FOR_PAGEOUT) {
+ if (bufsize > MAX_UPL_SIZE * PAGE_SIZE)
+ return KERN_INVALID_ARGUMENT;
+
+ if (uplflags & (UPL_UBC_MSYNC | UPL_UBC_PAGEOUT | UPL_UBC_PAGEIN)) {
+
+ if (uplflags & UPL_UBC_MSYNC) {
+ uplflags &= UPL_RET_ONLY_DIRTY;
+
+ uplflags |= UPL_COPYOUT_FROM | UPL_CLEAN_IN_PLACE |
+ UPL_SET_INTERNAL | UPL_SET_LITE;
+
+ } else if (uplflags & UPL_UBC_PAGEOUT) {
+ uplflags &= UPL_RET_ONLY_DIRTY;
+
+ if (uplflags & UPL_RET_ONLY_DIRTY)
+ uplflags |= UPL_NOBLOCK;
+
+ uplflags |= UPL_FOR_PAGEOUT | UPL_CLEAN_IN_PLACE |
+ UPL_COPYOUT_FROM | UPL_SET_INTERNAL | UPL_SET_LITE;
+ } else {
+ uplflags |= UPL_RET_ONLY_ABSENT | UPL_NOBLOCK |
+ UPL_NO_SYNC | UPL_CLEAN_IN_PLACE |
+ UPL_SET_INTERNAL | UPL_SET_LITE;
+ }
+ } else {
uplflags &= ~UPL_FOR_PAGEOUT;
- ubcflags = UBC_FOR_PAGEOUT;
- } else
- ubcflags = UBC_FLAGS_NONE;
- control = ubc_getobject(vp, ubcflags);
+ if (uplflags & UPL_WILL_BE_DUMPED) {
+ uplflags &= ~UPL_WILL_BE_DUMPED;
+ uplflags |= (UPL_NO_SYNC|UPL_SET_INTERNAL);
+ } else
+ uplflags |= (UPL_NO_SYNC|UPL_CLEAN_IN_PLACE|UPL_SET_INTERNAL);
+ }
+ control = ubc_getobject(vp, UBC_FLAGS_NONE);
if (control == MEMORY_OBJECT_CONTROL_NULL)
return KERN_INVALID_ARGUMENT;
- if (uplflags & UPL_WILL_BE_DUMPED) {
- uplflags &= ~UPL_WILL_BE_DUMPED;
- uplflags |= (UPL_NO_SYNC|UPL_SET_INTERNAL);
- } else
- uplflags |= (UPL_NO_SYNC|UPL_CLEAN_IN_PLACE|UPL_SET_INTERNAL);
- count = 0;
-
- kr = memory_object_upl_request(control, f_offset, bufsize, uplp, NULL, &count, uplflags);
- if (plp != NULL)
- *plp = UPL_GET_INTERNAL_PAGE_LIST(*uplp);
+ kr = memory_object_upl_request(control, f_offset, bufsize, uplp, NULL, NULL, uplflags);
+ if (kr == KERN_SUCCESS && plp != NULL)
+ *plp = UPL_GET_INTERNAL_PAGE_LIST(*uplp);
return kr;
}
kern_return_t
ubc_upl_commit_range(
upl_t upl,
- vm_offset_t offset,
- vm_size_t size,
+ upl_offset_t offset,
+ upl_size_t size,
int flags)
{
upl_page_info_t *pl;
kern_return_t
ubc_upl_abort_range(
upl_t upl,
- vm_offset_t offset,
- vm_size_t size,
+ upl_offset_t offset,
+ upl_size_t size,
int abort_flags)
{
kern_return_t kr;
static SInt32 cs_blob_size_peak = 0;
static UInt32 cs_blob_size_max = 0;
static SInt32 cs_blob_count_peak = 0;
-extern int cs_debug;
int cs_validation = 1;
-SYSCTL_INT(_vm, OID_AUTO, cs_validation, CTLFLAG_RW, &cs_validation, 0, "Do validate code signatures");
-SYSCTL_INT(_vm, OID_AUTO, cs_blob_count, CTLFLAG_RD, &cs_blob_count, 0, "Current number of code signature blobs");
-SYSCTL_INT(_vm, OID_AUTO, cs_blob_size, CTLFLAG_RD, &cs_blob_size, 0, "Current size of all code signature blobs");
-SYSCTL_INT(_vm, OID_AUTO, cs_blob_count_peak, CTLFLAG_RD, &cs_blob_count_peak, 0, "Peak number of code signature blobs");
-SYSCTL_INT(_vm, OID_AUTO, cs_blob_size_peak, CTLFLAG_RD, &cs_blob_size_peak, 0, "Peak size of code signature blobs");
-SYSCTL_INT(_vm, OID_AUTO, cs_blob_size_max, CTLFLAG_RD, &cs_blob_size_max, 0, "Size of biggest code signature blob");
+SYSCTL_INT(_vm, OID_AUTO, cs_validation, CTLFLAG_RW | CTLFLAG_LOCKED, &cs_validation, 0, "Do validate code signatures");
+SYSCTL_INT(_vm, OID_AUTO, cs_blob_count, CTLFLAG_RD | CTLFLAG_LOCKED, (int *)(uintptr_t)&cs_blob_count, 0, "Current number of code signature blobs");
+SYSCTL_INT(_vm, OID_AUTO, cs_blob_size, CTLFLAG_RD | CTLFLAG_LOCKED, (int *)(uintptr_t)&cs_blob_size, 0, "Current size of all code signature blobs");
+SYSCTL_INT(_vm, OID_AUTO, cs_blob_count_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_count_peak, 0, "Peak number of code signature blobs");
+SYSCTL_INT(_vm, OID_AUTO, cs_blob_size_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size_peak, 0, "Peak size of code signature blobs");
+SYSCTL_INT(_vm, OID_AUTO, cs_blob_size_max, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size_max, 0, "Size of biggest code signature blob");
kern_return_t
ubc_cs_blob_allocate(
}
if (memory_object_round_page(blob_size) !=
(memory_object_size_t) round_page(size)) {
- printf("ubc_cs_blob_add: size mismatch 0x%llx 0x%x !?\n",
- blob_size, size);
- panic("XXX FBDP size mismatch 0x%llx 0x%x\n", blob_size, size);
+ printf("ubc_cs_blob_add: size mismatch 0x%llx 0x%lx !?\n",
+ blob_size, (size_t)size);
+ panic("XXX FBDP size mismatch 0x%llx 0x%lx\n", blob_size, (size_t)size);
error = EINVAL;
goto out;
}
blob->csb_start_offset = 0;
blob->csb_end_offset = 0;
} else {
- unsigned char *sha1_base;
+ const unsigned char *sha1_base;
int sha1_size;
blob->csb_flags = ntohl(cd->flags) | CS_VALID;
blob->csb_end_offset = round_page(ntohl(cd->codeLimit));
- blob->csb_start_offset = (blob->csb_end_offset -
- (ntohl(cd->nCodeSlots) * PAGE_SIZE));
+ if((ntohl(cd->version) >= supportsScatter) && (ntohl(cd->scatterOffset))) {
+ const struct Scatter *scatter = (const struct Scatter*)
+ ((const char*)cd + ntohl(cd->scatterOffset));
+ blob->csb_start_offset = ntohl(scatter->base) * PAGE_SIZE;
+ } else {
+ blob->csb_start_offset = (blob->csb_end_offset -
+ (ntohl(cd->nCodeSlots) * PAGE_SIZE));
+ }
/* compute the blob's SHA1 hash */
sha1_base = (const unsigned char *) cd;
sha1_size = ntohl(cd->length);
if (cs_blob_count > cs_blob_count_peak) {
cs_blob_count_peak = cs_blob_count; /* XXX atomic ? */
}
- OSAddAtomic(+blob->csb_mem_size, &cs_blob_size);
- if (cs_blob_size > cs_blob_size_peak) {
- cs_blob_size_peak = cs_blob_size; /* XXX atomic ? */
+ OSAddAtomic((SInt32) +blob->csb_mem_size, &cs_blob_size);
+ if ((SInt32) cs_blob_size > cs_blob_size_peak) {
+ cs_blob_size_peak = (SInt32) cs_blob_size; /* XXX atomic ? */
}
- if (blob->csb_mem_size > cs_blob_size_max) {
- cs_blob_size_max = blob->csb_mem_size;
+ if ((UInt32) blob->csb_mem_size > cs_blob_size_max) {
+ cs_blob_size_max = (UInt32) blob->csb_mem_size;
}
- if (cs_debug) {
+ if (cs_debug > 1) {
proc_t p;
p = current_proc();
}
blob->csb_mem_handle = IPC_PORT_NULL;
OSAddAtomic(-1, &cs_blob_count);
- OSAddAtomic(-blob->csb_mem_size, &cs_blob_size);
+ OSAddAtomic((SInt32) -blob->csb_mem_size, &cs_blob_size);
kfree(blob, sizeof (*blob));
}
+#if CHECK_CS_VALIDATION_BITMAP
+ ubc_cs_validation_bitmap_deallocate( uip->ui_vnode );
+#endif
uip->cs_blobs = NULL;
}
struct ubc_info *uip;
struct cs_blob *blobs;
- vnode_lock_spin(vp);
+ /*
+ * No need to take the vnode lock here. The caller must be holding
+ * a reference on the vnode (via a VM mapping or open file descriptor),
+ * so the vnode will not go away. The ubc_info stays until the vnode
+ * goes away. And we only modify "blobs" by adding to the head of the
+ * list.
+ * The ubc_info could go away entirely if the vnode gets reclaimed as
+ * part of a forced unmount. In the case of a code-signature validation
+ * during a page fault, the "paging_in_progress" reference on the VM
+ * object guarantess that the vnode pager (and the ubc_info) won't go
+ * away during the fault.
+ * Other callers need to protect against vnode reclaim by holding the
+ * vnode lock, for example.
+ */
if (! UBCINFOEXISTS(vp)) {
blobs = NULL;
blobs = uip->cs_blobs;
out:
- vnode_unlock(vp);
-
return blobs;
}
struct cs_blob *blobs, *blob;
const CS_CodeDirectory *cd;
const CS_SuperBlob *embedded;
- off_t start_offset, end_offset;
const unsigned char *hash;
boolean_t validated;
off_t offset; /* page offset in the file */
if (kr != KERN_SUCCESS) {
/* XXX FBDP what to do !? */
printf("cs_validate_page: failed to map blob, "
- "size=0x%x kr=0x%x\n",
- blob->csb_mem_size, kr);
+ "size=0x%lx kr=0x%x\n",
+ (size_t)blob->csb_mem_size, kr);
break;
}
}
/* bogus blob ? */
continue;
}
-
- end_offset = round_page(ntohl(cd->codeLimit));
- start_offset = end_offset - (ntohl(cd->nCodeSlots) * PAGE_SIZE);
+
offset = page_offset - blob->csb_base_offset;
- if (offset < start_offset ||
- offset >= end_offset) {
+ if (offset < blob->csb_start_offset ||
+ offset >= blob->csb_end_offset) {
/* our page is not covered by this blob */
continue;
}
size = PAGE_SIZE;
const uint32_t *asha1, *esha1;
- if (offset + size > codeLimit) {
+ if ((off_t)(offset + size) > codeLimit) {
/* partial page at end of segment */
assert(offset < codeLimit);
- size = codeLimit & PAGE_MASK;
+ size = (size_t) (codeLimit & PAGE_MASK);
}
/* compute the actual page's SHA1 hash */
SHA1Init(&sha1ctxt);
off_t offset,
unsigned char *cdhash)
{
- struct cs_blob *blobs, *blob;
- off_t rel_offset;
+ struct cs_blob *blobs, *blob;
+ off_t rel_offset;
+ int ret;
+
+ vnode_lock(vp);
blobs = ubc_get_cs_blobs(vp);
for (blob = blobs;
if (blob == NULL) {
/* we didn't find a blob covering "offset" */
- return EBADEXEC; /* XXX any better error ? */
+ ret = EBADEXEC; /* XXX any better error ? */
+ } else {
+ /* get the SHA1 hash of that blob */
+ bcopy(blob->csb_sha1, cdhash, sizeof (blob->csb_sha1));
+ ret = 0;
+ }
+
+ vnode_unlock(vp);
+
+ return ret;
+}
+
+#if CHECK_CS_VALIDATION_BITMAP
+#define stob(s) ((atop_64((s)) + 07) >> 3)
+extern boolean_t root_fs_upgrade_try;
+
+/*
+ * Should we use the code-sign bitmap to avoid repeated code-sign validation?
+ * Depends:
+ * a) Is the target vnode on the root filesystem?
+ * b) Has someone tried to mount the root filesystem read-write?
+ * If answers are (a) yes AND (b) no, then we can use the bitmap.
+ */
+#define USE_CODE_SIGN_BITMAP(vp) ( (vp != NULL) && (vp->v_mount != NULL) && (vp->v_mount->mnt_flag & MNT_ROOTFS) && !root_fs_upgrade_try)
+kern_return_t
+ubc_cs_validation_bitmap_allocate(
+ vnode_t vp)
+{
+ kern_return_t kr = KERN_SUCCESS;
+ struct ubc_info *uip;
+ char *target_bitmap;
+ vm_object_size_t bitmap_size;
+
+ if ( ! USE_CODE_SIGN_BITMAP(vp) || (! UBCINFOEXISTS(vp))) {
+ kr = KERN_INVALID_ARGUMENT;
+ } else {
+ uip = vp->v_ubcinfo;
+
+ if ( uip->cs_valid_bitmap == NULL ) {
+ bitmap_size = stob(uip->ui_size);
+ target_bitmap = (char*) kalloc( (vm_size_t)bitmap_size );
+ if (target_bitmap == 0) {
+ kr = KERN_NO_SPACE;
+ } else {
+ kr = KERN_SUCCESS;
+ }
+ if( kr == KERN_SUCCESS ) {
+ memset( target_bitmap, 0, (size_t)bitmap_size);
+ uip->cs_valid_bitmap = (void*)target_bitmap;
+ uip->cs_valid_bitmap_size = bitmap_size;
+ }
+ }
+ }
+ return kr;
+}
+
+kern_return_t
+ubc_cs_check_validation_bitmap (
+ vnode_t vp,
+ memory_object_offset_t offset,
+ int optype)
+{
+ kern_return_t kr = KERN_SUCCESS;
+
+ if ( ! USE_CODE_SIGN_BITMAP(vp) || ! UBCINFOEXISTS(vp)) {
+ kr = KERN_INVALID_ARGUMENT;
+ } else {
+ struct ubc_info *uip = vp->v_ubcinfo;
+ char *target_bitmap = uip->cs_valid_bitmap;
+
+ if ( target_bitmap == NULL ) {
+ kr = KERN_INVALID_ARGUMENT;
+ } else {
+ uint64_t bit, byte;
+ bit = atop_64( offset );
+ byte = bit >> 3;
+
+ if ( byte > uip->cs_valid_bitmap_size ) {
+ kr = KERN_INVALID_ARGUMENT;
+ } else {
+
+ if (optype == CS_BITMAP_SET) {
+ target_bitmap[byte] |= (1 << (bit & 07));
+ kr = KERN_SUCCESS;
+ } else if (optype == CS_BITMAP_CLEAR) {
+ target_bitmap[byte] &= ~(1 << (bit & 07));
+ kr = KERN_SUCCESS;
+ } else if (optype == CS_BITMAP_CHECK) {
+ if ( target_bitmap[byte] & (1 << (bit & 07))) {
+ kr = KERN_SUCCESS;
+ } else {
+ kr = KERN_FAILURE;
+ }
+ }
+ }
+ }
+ }
+ return kr;
+}
+
+void
+ubc_cs_validation_bitmap_deallocate(
+ vnode_t vp)
+{
+ struct ubc_info *uip;
+ void *target_bitmap;
+ vm_object_size_t bitmap_size;
+
+ if ( UBCINFOEXISTS(vp)) {
+ uip = vp->v_ubcinfo;
+
+ if ( (target_bitmap = uip->cs_valid_bitmap) != NULL ) {
+ bitmap_size = uip->cs_valid_bitmap_size;
+ kfree( target_bitmap, (vm_size_t) bitmap_size );
+ uip->cs_valid_bitmap = NULL;
+ }
}
+}
+#else
+kern_return_t ubc_cs_validation_bitmap_allocate(__unused vnode_t vp){
+ return KERN_INVALID_ARGUMENT;
+}
- /* get the SHA1 hash of that blob */
- bcopy(blob->csb_sha1, cdhash, sizeof (blob->csb_sha1));
+kern_return_t ubc_cs_check_validation_bitmap(
+ __unused struct vnode *vp,
+ __unused memory_object_offset_t offset,
+ __unused int optype){
- return 0;
+ return KERN_INVALID_ARGUMENT;
+}
+
+void ubc_cs_validation_bitmap_deallocate(__unused vnode_t vp){
+ return;
}
+#endif /* CHECK_CS_VALIDATION_BITMAP */