X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/cf7d32b81c573a0536dc4da4157f9c26f8d0bed3..13f56ec4e58bf8687e2a68032c093c0213dd519b:/bsd/kern/ubc_subr.c diff --git a/bsd/kern/ubc_subr.c b/bsd/kern/ubc_subr.c index 43cd45431..c7661e41b 100644 --- a/bsd/kern/ubc_subr.c +++ b/bsd/kern/ubc_subr.c @@ -55,6 +55,7 @@ #include #include #include +#include #include #include @@ -66,11 +67,15 @@ #include +#include + /* XXX These should be in a BSD accessible Mach header, but aren't. */ extern kern_return_t memory_object_pages_resident(memory_object_control_t, boolean_t *); extern kern_return_t memory_object_signed(memory_object_control_t control, boolean_t is_signed); +extern boolean_t memory_object_is_slid(memory_object_control_t control); + extern void Debugger(const char *message); @@ -85,7 +90,7 @@ kern_return_t ubc_page_op_with_control( #if DIAGNOSTIC #if defined(assert) -#undef assert() +#undef assert #endif #define assert(cond) \ ((void) ((cond) ? 0 : panic("Assert failed: %s", # cond))) @@ -105,6 +110,9 @@ struct zone *ubc_info_zone; * CODESIGNING * Routines to navigate code signing data structures in the kernel... */ + +extern int cs_debug; + static boolean_t cs_valid_range( const void *start, @@ -134,11 +142,14 @@ enum { CSMAGIC_CODEDIRECTORY = 0xfade0c02, /* CodeDirectory blob */ CSMAGIC_EMBEDDED_SIGNATURE = 0xfade0cc0, /* embedded form of signature data */ CSMAGIC_EMBEDDED_SIGNATURE_OLD = 0xfade0b02, /* XXX */ + CSMAGIC_EMBEDDED_ENTITLEMENTS = 0xfade7171, /* embedded entitlements */ CSMAGIC_DETACHED_SIGNATURE = 0xfade0cc1, /* multi-arch collection of embedded signatures */ CSSLOT_CODEDIRECTORY = 0, /* slot index for CodeDirectory */ + CSSLOT_ENTITLEMENTS = 5 }; +static const uint32_t supportsScatter = 0x20100; // first version to support scatter option /* * Structure of an embedded-signature SuperBlob @@ -156,6 +167,18 @@ typedef struct __SuperBlob { /* followed by Blobs in no particular order as indicated by offsets in index */ } CS_SuperBlob; +typedef struct __GenericBlob { + uint32_t magic; /* magic number */ + uint32_t length; /* total length of blob */ + char data[]; +} CS_GenericBlob; + +struct Scatter { + uint32_t count; // number of pages; zero for sentinel (only) + uint32_t base; // first page number + uint64_t targetOffset; // offset in target + uint64_t spare; // reserved +}; /* * C form of a CodeDirectory. @@ -175,6 +198,8 @@ typedef struct __CodeDirectory { uint8_t spare1; /* unused (must be zero) */ uint8_t pageSize; /* log2(page size in bytes); 0 => infinite */ uint32_t spare2; /* unused (must be zero) */ + /* Version 0x20100 */ + uint32_t scatterOffset; /* offset of optional scatter vector */ /* followed by dynamic content as located by offset fields above */ } CS_CodeDirectory; @@ -249,21 +274,83 @@ hashes( char *upper_bound) { const unsigned char *base, *top, *hash; - uint32_t nCodeSlots; + uint32_t nCodeSlots = ntohl(cd->nCodeSlots); assert(cs_valid_range(cd, cd + 1, lower_bound, upper_bound)); - base = (const unsigned char *)cd + ntohl(cd->hashOffset); - nCodeSlots = ntohl(cd->nCodeSlots); - top = base + nCodeSlots * SHA1_RESULTLEN; - if (!cs_valid_range(base, top, - lower_bound, upper_bound) || - page > nCodeSlots) { - return NULL; - } - assert(page < nCodeSlots); + if((ntohl(cd->version) >= supportsScatter) && (ntohl(cd->scatterOffset))) { + /* Get first scatter struct */ + const struct Scatter *scatter = (const struct Scatter*) + ((const char*)cd + ntohl(cd->scatterOffset)); + uint32_t hashindex=0, scount, sbase=0; + /* iterate all scatter structs */ + do { + if((const char*)scatter > (const char*)cd + ntohl(cd->length)) { + if(cs_debug) { + printf("CODE SIGNING: Scatter extends past Code Directory\n"); + } + return NULL; + } + + scount = ntohl(scatter->count); + uint32_t new_base = ntohl(scatter->base); + + /* last scatter? */ + if (scount == 0) { + return NULL; + } + + if((hashindex > 0) && (new_base <= sbase)) { + if(cs_debug) { + printf("CODE SIGNING: unordered Scatter, prev base %d, cur base %d\n", + sbase, new_base); + } + return NULL; /* unordered scatter array */ + } + sbase = new_base; + + /* this scatter beyond page we're looking for? */ + if (sbase > page) { + return NULL; + } + + if (sbase+scount >= page) { + /* Found the scatter struct that is + * referencing our page */ + + /* base = address of first hash covered by scatter */ + base = (const unsigned char *)cd + ntohl(cd->hashOffset) + + hashindex * SHA1_RESULTLEN; + /* top = address of first hash after this scatter */ + top = base + scount * SHA1_RESULTLEN; + if (!cs_valid_range(base, top, lower_bound, + upper_bound) || + hashindex > nCodeSlots) { + return NULL; + } + + break; + } + + /* this scatter struct is before the page we're looking + * for. Iterate. */ + hashindex+=scount; + scatter++; + } while(1); + + hash = base + (page - sbase) * SHA1_RESULTLEN; + } else { + base = (const unsigned char *)cd + ntohl(cd->hashOffset); + top = base + nCodeSlots * SHA1_RESULTLEN; + if (!cs_valid_range(base, top, lower_bound, upper_bound) || + page > nCodeSlots) { + return NULL; + } + assert(page < nCodeSlots); - hash = base + page * SHA1_RESULTLEN; + hash = base + page * SHA1_RESULTLEN; + } + if (!cs_valid_range(hash, hash + SHA1_RESULTLEN, lower_bound, upper_bound)) { hash = NULL; @@ -276,6 +363,113 @@ hashes( * End of routines to navigate code signing data structures in the kernel. */ +/* + * ENTITLEMENTS + * Routines to navigate entitlements in the kernel. + */ + +/* Retrieve the entitlements blob for a process. + * Returns: + * EINVAL no text vnode associated with the process + * EBADEXEC invalid code signing data + * ENOMEM you should reboot + * 0 no error occurred + * + * On success, out_start and out_length will point to the + * entitlements blob if found; or will be set to NULL/zero + * if there were no entitlements. + */ +int +cs_entitlements_blob_get(proc_t p, void **out_start, size_t *out_length) +{ + SHA1_CTX context; /* XXX hash agility */ + int error = 0; + struct cs_blob *blob_list_entry; + CS_SuperBlob *super_blob; + CS_BlobIndex *blob_index; + CS_GenericBlob *blob; + CS_CodeDirectory *code_dir; + unsigned char *computed_hash = NULL; + unsigned char *embedded_hash = NULL; + void *start = NULL; + size_t length = 0; + size_t hash_size = 0; + unsigned int i, count; + + if (NULL == p->p_textvp) { + error = EINVAL; + goto out; + } + if (NULL == (blob_list_entry = ubc_cs_blob_get(p->p_textvp, -1, + p->p_textoff))) + goto out; + super_blob = (void *)blob_list_entry->csb_mem_kaddr; + if (CSMAGIC_EMBEDDED_SIGNATURE != ntohl(super_blob->magic)) { + error = EBADEXEC; + goto out; + } + count = ntohl(super_blob->count); + for (i = 0; i < count; ++i) { + blob_index = &super_blob->index[i]; + blob = (void *)((char *)super_blob + ntohl(blob_index->offset)); + switch (ntohl(blob_index->type)) { + case CSSLOT_CODEDIRECTORY: + if (CSMAGIC_CODEDIRECTORY != ntohl(blob->magic)) + break; + code_dir = (void *)blob; + hash_size = code_dir->hashSize; + if (CSSLOT_ENTITLEMENTS <= + ntohl(code_dir->nSpecialSlots)) { + embedded_hash = (void *)((char *)code_dir + + ntohl(code_dir->hashOffset) - + (hash_size * CSSLOT_ENTITLEMENTS)); + } + break; + case CSSLOT_ENTITLEMENTS: + if (CSMAGIC_EMBEDDED_ENTITLEMENTS != ntohl(blob->magic)) + break; + start = (void *)blob; + length = ntohl(blob->length); + break; + default: + break; + } + } + if (NULL == start && NULL == embedded_hash) { + error = 0; + goto out; + } else if (NULL == start || NULL == embedded_hash) { + error = EBADEXEC; + goto out; + } + if (NULL == (computed_hash = kalloc(hash_size))) { + error = ENOMEM; + goto out; + } + SHA1Init(&context); + SHA1Update(&context, start, length); + SHA1Final(computed_hash, &context); + if (0 != memcmp(computed_hash, embedded_hash, hash_size)) { + error = EBADEXEC; + goto out; + } + error = 0; +out: + if (NULL != computed_hash) + kfree(computed_hash, hash_size); + if (0 == error) { + *out_start = start; + *out_length = length; + } + return error; +} + +/* + * ENTITLEMENTS + * End of routines to navigate entitlements in the kernel. + */ + + /* * ubc_init @@ -297,6 +491,8 @@ ubc_init(void) i = (vm_size_t) sizeof (struct ubc_info); ubc_info_zone = zinit (i, 10000*i, 8192, "ubc_info zone"); + + zone_change(ubc_info_zone, Z_NOENCRYPT, TRUE); } @@ -546,8 +742,13 @@ ubc_setsize(struct vnode *vp, off_t nsize) */ uip->ui_size = nsize; - if (nsize >= osize) /* Nothing more to do */ + if (nsize >= osize) { /* Nothing more to do */ + if (nsize > osize) { + lock_vnode_and_post(vp, NOTE_EXTEND); + } + return (1); /* return success */ + } /* * When the file shrinks, invalidate the pages beyond the @@ -584,6 +785,12 @@ ubc_setsize(struct vnode *vp, off_t nsize) lastpg += PAGE_SIZE_64; } if (olastpgend > lastpg) { + int flags; + + if (lastpg == 0) + flags = MEMORY_OBJECT_DATA_FLUSH_ALL; + else + flags = MEMORY_OBJECT_DATA_FLUSH; /* * invalidate the pages beyond the new EOF page * @@ -591,8 +798,7 @@ ubc_setsize(struct vnode *vp, off_t nsize) kret = memory_object_lock_request(control, (memory_object_offset_t)lastpg, (memory_object_size_t)(olastpgend - lastpg), NULL, NULL, - MEMORY_OBJECT_RETURN_NONE, MEMORY_OBJECT_DATA_FLUSH, - VM_PROT_NO_CHANGE); + MEMORY_OBJECT_RETURN_NONE, flags, VM_PROT_NO_CHANGE); if (kret != KERN_SUCCESS) printf("ubc_setsize: invalidate failed (error = %d)\n", kret); } @@ -843,7 +1049,6 @@ ubc_setcred(struct vnode *vp, proc_t p) return (1); } - /* * ubc_getpager * @@ -901,6 +1106,16 @@ ubc_getobject(struct vnode *vp, __unused int flags) return (MEMORY_OBJECT_CONTROL_NULL); } +boolean_t +ubc_strict_uncached_IO(struct vnode *vp) +{ + boolean_t result = FALSE; + + if (UBCINFOEXISTS(vp)) { + result = memory_object_is_slid(vp->v_ubcinfo->ui_control); + } + return result; +} /* * ubc_blktooff @@ -1433,7 +1648,7 @@ ubc_isinuse_locked(struct vnode *vp, int busycount, int locked) if (!locked) - vnode_lock(vp); + vnode_lock_spin(vp); if ((vp->v_usecount - vp->v_kusecount) > busycount) retval = 1; @@ -1469,7 +1684,7 @@ ubc_unmap(struct vnode *vp) struct ubc_info *uip; int need_rele = 0; int need_wakeup = 0; - + if (vnode_getwithref(vp)) return; @@ -1485,14 +1700,14 @@ ubc_unmap(struct vnode *vp) SET(uip->ui_flags, UI_MAPBUSY); if (ISSET(uip->ui_flags, UI_ISMAPPED)) { - CLR(uip->ui_flags, UI_ISMAPPED); + CLR(uip->ui_flags, UI_ISMAPPED); need_rele = 1; } vnode_unlock(vp); - + if (need_rele) { - (void) VNOP_MNOMAP(vp, vfs_context_current()); - vnode_rele(vp); + (void)VNOP_MNOMAP(vp, vfs_context_current()); + vnode_rele(vp); } vnode_lock_spin(vp); @@ -1505,7 +1720,7 @@ ubc_unmap(struct vnode *vp) vnode_unlock(vp); if (need_wakeup) - wakeup(&uip->ui_flags); + wakeup(&uip->ui_flags); } /* @@ -1734,39 +1949,61 @@ kern_return_t ubc_create_upl( struct vnode *vp, off_t f_offset, - long bufsize, + int bufsize, upl_t *uplp, upl_page_info_t **plp, int uplflags) { memory_object_control_t control; - mach_msg_type_number_t count; - int ubcflags; kern_return_t kr; + + if (plp != NULL) + *plp = NULL; + *uplp = NULL; if (bufsize & 0xfff) return KERN_INVALID_ARGUMENT; - if (uplflags & UPL_FOR_PAGEOUT) { + if (bufsize > MAX_UPL_SIZE * PAGE_SIZE) + return KERN_INVALID_ARGUMENT; + + if (uplflags & (UPL_UBC_MSYNC | UPL_UBC_PAGEOUT | UPL_UBC_PAGEIN)) { + + if (uplflags & UPL_UBC_MSYNC) { + uplflags &= UPL_RET_ONLY_DIRTY; + + uplflags |= UPL_COPYOUT_FROM | UPL_CLEAN_IN_PLACE | + UPL_SET_INTERNAL | UPL_SET_LITE; + + } else if (uplflags & UPL_UBC_PAGEOUT) { + uplflags &= UPL_RET_ONLY_DIRTY; + + if (uplflags & UPL_RET_ONLY_DIRTY) + uplflags |= UPL_NOBLOCK; + + uplflags |= UPL_FOR_PAGEOUT | UPL_CLEAN_IN_PLACE | + UPL_COPYOUT_FROM | UPL_SET_INTERNAL | UPL_SET_LITE; + } else { + uplflags |= UPL_RET_ONLY_ABSENT | UPL_NOBLOCK | + UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | + UPL_SET_INTERNAL | UPL_SET_LITE; + } + } else { uplflags &= ~UPL_FOR_PAGEOUT; - ubcflags = UBC_FOR_PAGEOUT; - } else - ubcflags = UBC_FLAGS_NONE; - control = ubc_getobject(vp, ubcflags); + if (uplflags & UPL_WILL_BE_DUMPED) { + uplflags &= ~UPL_WILL_BE_DUMPED; + uplflags |= (UPL_NO_SYNC|UPL_SET_INTERNAL); + } else + uplflags |= (UPL_NO_SYNC|UPL_CLEAN_IN_PLACE|UPL_SET_INTERNAL); + } + control = ubc_getobject(vp, UBC_FLAGS_NONE); if (control == MEMORY_OBJECT_CONTROL_NULL) return KERN_INVALID_ARGUMENT; - if (uplflags & UPL_WILL_BE_DUMPED) { - uplflags &= ~UPL_WILL_BE_DUMPED; - uplflags |= (UPL_NO_SYNC|UPL_SET_INTERNAL); - } else - uplflags |= (UPL_NO_SYNC|UPL_CLEAN_IN_PLACE|UPL_SET_INTERNAL); - count = 0; - - kr = memory_object_upl_request(control, f_offset, bufsize, uplp, NULL, &count, uplflags); - if (plp != NULL) - *plp = UPL_GET_INTERNAL_PAGE_LIST(*uplp); + kr = memory_object_upl_request(control, f_offset, bufsize, uplp, NULL, NULL, uplflags); + if (kr == KERN_SUCCESS && plp != NULL) + *plp = UPL_GET_INTERNAL_PAGE_LIST(*uplp); return kr; } @@ -1925,8 +2162,8 @@ ubc_upl_commit( kern_return_t ubc_upl_commit_range( upl_t upl, - vm_offset_t offset, - vm_size_t size, + upl_offset_t offset, + upl_size_t size, int flags) { upl_page_info_t *pl; @@ -1936,6 +2173,10 @@ ubc_upl_commit_range( if (flags & UPL_COMMIT_FREE_ON_EMPTY) flags |= UPL_COMMIT_NOTIFY_EMPTY; + if (flags & UPL_COMMIT_KERNEL_ONLY_FLAGS) { + return KERN_INVALID_ARGUMENT; + } + pl = UPL_GET_INTERNAL_PAGE_LIST(upl); kr = upl_commit_range(upl, offset, size, flags, @@ -1996,8 +2237,8 @@ ubc_upl_commit_range( kern_return_t ubc_upl_abort_range( upl_t upl, - vm_offset_t offset, - vm_size_t size, + upl_offset_t offset, + upl_size_t size, int abort_flags) { kern_return_t kr; @@ -2106,23 +2347,55 @@ UBCINFOEXISTS(struct vnode * vp) /* * CODE SIGNING */ -#define CS_BLOB_KEEP_IN_KERNEL 1 +#define CS_BLOB_PAGEABLE 0 static volatile SInt32 cs_blob_size = 0; static volatile SInt32 cs_blob_count = 0; static SInt32 cs_blob_size_peak = 0; static UInt32 cs_blob_size_max = 0; static SInt32 cs_blob_count_peak = 0; -extern int cs_debug; int cs_validation = 1; -SYSCTL_INT(_vm, OID_AUTO, cs_validation, CTLFLAG_RW, &cs_validation, 0, "Do validate code signatures"); -SYSCTL_INT(_vm, OID_AUTO, cs_blob_count, CTLFLAG_RD, &cs_blob_count, 0, "Current number of code signature blobs"); -SYSCTL_INT(_vm, OID_AUTO, cs_blob_size, CTLFLAG_RD, &cs_blob_size, 0, "Current size of all code signature blobs"); -SYSCTL_INT(_vm, OID_AUTO, cs_blob_count_peak, CTLFLAG_RD, &cs_blob_count_peak, 0, "Peak number of code signature blobs"); -SYSCTL_INT(_vm, OID_AUTO, cs_blob_size_peak, CTLFLAG_RD, &cs_blob_size_peak, 0, "Peak size of code signature blobs"); -SYSCTL_INT(_vm, OID_AUTO, cs_blob_size_max, CTLFLAG_RD, &cs_blob_size_max, 0, "Size of biggest code signature blob"); +SYSCTL_INT(_vm, OID_AUTO, cs_validation, CTLFLAG_RW | CTLFLAG_LOCKED, &cs_validation, 0, "Do validate code signatures"); +SYSCTL_INT(_vm, OID_AUTO, cs_blob_count, CTLFLAG_RD | CTLFLAG_LOCKED, (int *)(uintptr_t)&cs_blob_count, 0, "Current number of code signature blobs"); +SYSCTL_INT(_vm, OID_AUTO, cs_blob_size, CTLFLAG_RD | CTLFLAG_LOCKED, (int *)(uintptr_t)&cs_blob_size, 0, "Current size of all code signature blobs"); +SYSCTL_INT(_vm, OID_AUTO, cs_blob_count_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_count_peak, 0, "Peak number of code signature blobs"); +SYSCTL_INT(_vm, OID_AUTO, cs_blob_size_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size_peak, 0, "Peak size of code signature blobs"); +SYSCTL_INT(_vm, OID_AUTO, cs_blob_size_max, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size_max, 0, "Size of biggest code signature blob"); + +kern_return_t +ubc_cs_blob_allocate( + vm_offset_t *blob_addr_p, + vm_size_t *blob_size_p) +{ + kern_return_t kr; + +#if CS_BLOB_PAGEABLE + *blob_size_p = round_page(*blob_size_p); + kr = kmem_alloc(kernel_map, blob_addr_p, *blob_size_p); +#else /* CS_BLOB_PAGEABLE */ + *blob_addr_p = (vm_offset_t) kalloc(*blob_size_p); + if (*blob_addr_p == 0) { + kr = KERN_NO_SPACE; + } else { + kr = KERN_SUCCESS; + } +#endif /* CS_BLOB_PAGEABLE */ + return kr; +} +void +ubc_cs_blob_deallocate( + vm_offset_t blob_addr, + vm_size_t blob_size) +{ +#if CS_BLOB_PAGEABLE + kmem_free(kernel_map, blob_addr, blob_size); +#else /* CS_BLOB_PAGEABLE */ + kfree((void *) blob_addr, blob_size); +#endif /* CS_BLOB_PAGEABLE */ +} + int ubc_cs_blob_add( struct vnode *vp, @@ -2148,6 +2421,7 @@ ubc_cs_blob_add( return ENOMEM; } +#if CS_BLOB_PAGEABLE /* get a memory entry on the blob */ blob_size = (memory_object_size_t) size; kr = mach_make_memory_entry_64(kernel_map, @@ -2162,13 +2436,16 @@ ubc_cs_blob_add( } if (memory_object_round_page(blob_size) != (memory_object_size_t) round_page(size)) { - printf("ubc_cs_blob_add: size mismatch 0x%llx 0x%x !?\n", - blob_size, size); - panic("XXX FBDP size mismatch 0x%llx 0x%x\n", blob_size, size); + printf("ubc_cs_blob_add: size mismatch 0x%llx 0x%lx !?\n", + blob_size, (size_t)size); + panic("XXX FBDP size mismatch 0x%llx 0x%lx\n", blob_size, (size_t)size); error = EINVAL; goto out; } - +#else + blob_size = (memory_object_size_t) size; + blob_handle = IPC_PORT_NULL; +#endif /* fill in the new blob */ blob->csb_cpu_type = cputype; @@ -2177,7 +2454,6 @@ ubc_cs_blob_add( blob->csb_mem_offset = 0; blob->csb_mem_handle = blob_handle; blob->csb_mem_kaddr = addr; - /* * Validate the blob's contents @@ -2192,13 +2468,19 @@ ubc_cs_blob_add( blob->csb_start_offset = 0; blob->csb_end_offset = 0; } else { - unsigned char *sha1_base; + const unsigned char *sha1_base; int sha1_size; blob->csb_flags = ntohl(cd->flags) | CS_VALID; blob->csb_end_offset = round_page(ntohl(cd->codeLimit)); - blob->csb_start_offset = (blob->csb_end_offset - - (ntohl(cd->nCodeSlots) * PAGE_SIZE)); + if((ntohl(cd->version) >= supportsScatter) && (ntohl(cd->scatterOffset))) { + const struct Scatter *scatter = (const struct Scatter*) + ((const char*)cd + ntohl(cd->scatterOffset)); + blob->csb_start_offset = ntohl(scatter->base) * PAGE_SIZE; + } else { + blob->csb_start_offset = (blob->csb_end_offset - + (ntohl(cd->nCodeSlots) * PAGE_SIZE)); + } /* compute the blob's SHA1 hash */ sha1_base = (const unsigned char *) cd; sha1_size = ntohl(cd->length); @@ -2207,7 +2489,15 @@ ubc_cs_blob_add( SHA1Final(blob->csb_sha1, &sha1ctxt); } - + /* + * Let policy module check whether the blob's signature is accepted. + */ +#if CONFIG_MACF + error = mac_vnode_check_signature(vp, blob->csb_sha1, (void*)addr, size); + if (error) + goto out; +#endif + /* * Validate the blob's coverage */ @@ -2305,15 +2595,15 @@ ubc_cs_blob_add( if (cs_blob_count > cs_blob_count_peak) { cs_blob_count_peak = cs_blob_count; /* XXX atomic ? */ } - OSAddAtomic(+blob->csb_mem_size, &cs_blob_size); - if (cs_blob_size > cs_blob_size_peak) { - cs_blob_size_peak = cs_blob_size; /* XXX atomic ? */ + OSAddAtomic((SInt32) +blob->csb_mem_size, &cs_blob_size); + if ((SInt32) cs_blob_size > cs_blob_size_peak) { + cs_blob_size_peak = (SInt32) cs_blob_size; /* XXX atomic ? */ } - if (blob->csb_mem_size > cs_blob_size_max) { - cs_blob_size_max = blob->csb_mem_size; + if ((UInt32) blob->csb_mem_size > cs_blob_size_max) { + cs_blob_size_max = (UInt32) blob->csb_mem_size; } - if (cs_debug) { + if (cs_debug > 1) { proc_t p; p = current_proc(); @@ -2328,10 +2618,6 @@ ubc_cs_blob_add( blob->csb_flags); } -#if !CS_BLOB_KEEP_IN_KERNEL - blob->csb_mem_kaddr = 0; -#endif /* CS_BLOB_KEEP_IN_KERNEL */ - vnode_unlock(vp); error = 0; /* success ! */ @@ -2347,10 +2633,6 @@ out: mach_memory_entry_port_release(blob_handle); blob_handle = IPC_PORT_NULL; } - } else { -#if !CS_BLOB_KEEP_IN_KERNEL - kmem_free(kernel_map, addr, size); -#endif /* CS_BLOB_KEEP_IN_KERNEL */ } if (error == EAGAIN) { @@ -2363,7 +2645,7 @@ out: /* * Since we're not failing, consume the data we received. */ - kmem_free(kernel_map, addr, size); + ubc_cs_blob_deallocate(addr, size); } return error; @@ -2421,17 +2703,21 @@ ubc_cs_free( blob = next_blob) { next_blob = blob->csb_next; if (blob->csb_mem_kaddr != 0) { - kmem_free(kernel_map, - blob->csb_mem_kaddr, - blob->csb_mem_size); + ubc_cs_blob_deallocate(blob->csb_mem_kaddr, + blob->csb_mem_size); blob->csb_mem_kaddr = 0; } - mach_memory_entry_port_release(blob->csb_mem_handle); + if (blob->csb_mem_handle != IPC_PORT_NULL) { + mach_memory_entry_port_release(blob->csb_mem_handle); + } blob->csb_mem_handle = IPC_PORT_NULL; OSAddAtomic(-1, &cs_blob_count); - OSAddAtomic(-blob->csb_mem_size, &cs_blob_size); + OSAddAtomic((SInt32) -blob->csb_mem_size, &cs_blob_size); kfree(blob, sizeof (*blob)); } +#if CHECK_CS_VALIDATION_BITMAP + ubc_cs_validation_bitmap_deallocate( uip->ui_vnode ); +#endif uip->cs_blobs = NULL; } @@ -2442,7 +2728,20 @@ ubc_get_cs_blobs( struct ubc_info *uip; struct cs_blob *blobs; - vnode_lock_spin(vp); + /* + * No need to take the vnode lock here. The caller must be holding + * a reference on the vnode (via a VM mapping or open file descriptor), + * so the vnode will not go away. The ubc_info stays until the vnode + * goes away. And we only modify "blobs" by adding to the head of the + * list. + * The ubc_info could go away entirely if the vnode gets reclaimed as + * part of a forced unmount. In the case of a code-signature validation + * during a page fault, the "paging_in_progress" reference on the VM + * object guarantess that the vnode pager (and the ubc_info) won't go + * away during the fault. + * Other callers need to protect against vnode reclaim by holding the + * vnode lock, for example. + */ if (! UBCINFOEXISTS(vp)) { blobs = NULL; @@ -2453,8 +2752,6 @@ ubc_get_cs_blobs( blobs = uip->cs_blobs; out: - vnode_unlock(vp); - return blobs; } @@ -2474,7 +2771,6 @@ cs_validate_page( struct cs_blob *blobs, *blob; const CS_CodeDirectory *cd; const CS_SuperBlob *embedded; - off_t start_offset, end_offset; const unsigned char *hash; boolean_t validated; off_t offset; /* page offset in the file */ @@ -2520,8 +2816,8 @@ cs_validate_page( if (kr != KERN_SUCCESS) { /* XXX FBDP what to do !? */ printf("cs_validate_page: failed to map blob, " - "size=0x%x kr=0x%x\n", - blob->csb_mem_size, kr); + "size=0x%lx kr=0x%x\n", + (size_t)blob->csb_mem_size, kr); break; } } @@ -2537,21 +2833,13 @@ cs_validate_page( cd->hashType != 0x1 || cd->hashSize != SHA1_RESULTLEN) { /* bogus blob ? */ -#if !CS_BLOB_KEEP_IN_KERNEL - kmem_free(kernel_map, kaddr, ksize); -#endif /* CS_BLOB_KEEP_IN_KERNEL */ continue; } - - end_offset = round_page(ntohl(cd->codeLimit)); - start_offset = end_offset - (ntohl(cd->nCodeSlots) * PAGE_SIZE); + offset = page_offset - blob->csb_base_offset; - if (offset < start_offset || - offset >= end_offset) { + if (offset < blob->csb_start_offset || + offset >= blob->csb_end_offset) { /* our page is not covered by this blob */ -#if !CS_BLOB_KEEP_IN_KERNEL - kmem_free(kernel_map, kaddr, ksize); -#endif /* CS_BLOB_KEEP_IN_KERNEL */ continue; } @@ -2564,11 +2852,6 @@ cs_validate_page( found_hash = TRUE; } -#if !CS_BLOB_KEEP_IN_KERNEL - /* we no longer need that blob in the kernel map */ - kmem_free(kernel_map, kaddr, ksize); -#endif /* CS_BLOB_KEEP_IN_KERNEL */ - break; } } @@ -2591,17 +2874,17 @@ cs_validate_page( validated = FALSE; *tainted = FALSE; } else { - const uint32_t *asha1, *esha1; size = PAGE_SIZE; - if (offset + size > codeLimit) { + const uint32_t *asha1, *esha1; + if ((off_t)(offset + size) > codeLimit) { /* partial page at end of segment */ assert(offset < codeLimit); - size = codeLimit & PAGE_MASK; + size = (size_t) (codeLimit & PAGE_MASK); } /* compute the actual page's SHA1 hash */ SHA1Init(&sha1ctxt); - SHA1Update(&sha1ctxt, data, size); + SHA1UpdateUsePhysicalAddress(&sha1ctxt, data, size); SHA1Final(actual_hash, &sha1ctxt); asha1 = (const uint32_t *) actual_hash; @@ -2641,8 +2924,11 @@ ubc_cs_getcdhash( off_t offset, unsigned char *cdhash) { - struct cs_blob *blobs, *blob; - off_t rel_offset; + struct cs_blob *blobs, *blob; + off_t rel_offset; + int ret; + + vnode_lock(vp); blobs = ubc_get_cs_blobs(vp); for (blob = blobs; @@ -2659,11 +2945,138 @@ ubc_cs_getcdhash( if (blob == NULL) { /* we didn't find a blob covering "offset" */ - return EBADEXEC; /* XXX any better error ? */ + ret = EBADEXEC; /* XXX any better error ? */ + } else { + /* get the SHA1 hash of that blob */ + bcopy(blob->csb_sha1, cdhash, sizeof (blob->csb_sha1)); + ret = 0; } - /* get the SHA1 hash of that blob */ - bcopy(blob->csb_sha1, cdhash, sizeof (blob->csb_sha1)); + vnode_unlock(vp); + + return ret; +} + +#if CHECK_CS_VALIDATION_BITMAP +#define stob(s) ((atop_64((s)) + 07) >> 3) +extern boolean_t root_fs_upgrade_try; + +/* + * Should we use the code-sign bitmap to avoid repeated code-sign validation? + * Depends: + * a) Is the target vnode on the root filesystem? + * b) Has someone tried to mount the root filesystem read-write? + * If answers are (a) yes AND (b) no, then we can use the bitmap. + */ +#define USE_CODE_SIGN_BITMAP(vp) ( (vp != NULL) && (vp->v_mount != NULL) && (vp->v_mount->mnt_flag & MNT_ROOTFS) && !root_fs_upgrade_try) +kern_return_t +ubc_cs_validation_bitmap_allocate( + vnode_t vp) +{ + kern_return_t kr = KERN_SUCCESS; + struct ubc_info *uip; + char *target_bitmap; + vm_object_size_t bitmap_size; + + if ( ! USE_CODE_SIGN_BITMAP(vp) || (! UBCINFOEXISTS(vp))) { + kr = KERN_INVALID_ARGUMENT; + } else { + uip = vp->v_ubcinfo; + + if ( uip->cs_valid_bitmap == NULL ) { + bitmap_size = stob(uip->ui_size); + target_bitmap = (char*) kalloc( (vm_size_t)bitmap_size ); + if (target_bitmap == 0) { + kr = KERN_NO_SPACE; + } else { + kr = KERN_SUCCESS; + } + if( kr == KERN_SUCCESS ) { + memset( target_bitmap, 0, (size_t)bitmap_size); + uip->cs_valid_bitmap = (void*)target_bitmap; + uip->cs_valid_bitmap_size = bitmap_size; + } + } + } + return kr; +} + +kern_return_t +ubc_cs_check_validation_bitmap ( + vnode_t vp, + memory_object_offset_t offset, + int optype) +{ + kern_return_t kr = KERN_SUCCESS; + + if ( ! USE_CODE_SIGN_BITMAP(vp) || ! UBCINFOEXISTS(vp)) { + kr = KERN_INVALID_ARGUMENT; + } else { + struct ubc_info *uip = vp->v_ubcinfo; + char *target_bitmap = uip->cs_valid_bitmap; + + if ( target_bitmap == NULL ) { + kr = KERN_INVALID_ARGUMENT; + } else { + uint64_t bit, byte; + bit = atop_64( offset ); + byte = bit >> 3; + + if ( byte > uip->cs_valid_bitmap_size ) { + kr = KERN_INVALID_ARGUMENT; + } else { + + if (optype == CS_BITMAP_SET) { + target_bitmap[byte] |= (1 << (bit & 07)); + kr = KERN_SUCCESS; + } else if (optype == CS_BITMAP_CLEAR) { + target_bitmap[byte] &= ~(1 << (bit & 07)); + kr = KERN_SUCCESS; + } else if (optype == CS_BITMAP_CHECK) { + if ( target_bitmap[byte] & (1 << (bit & 07))) { + kr = KERN_SUCCESS; + } else { + kr = KERN_FAILURE; + } + } + } + } + } + return kr; +} + +void +ubc_cs_validation_bitmap_deallocate( + vnode_t vp) +{ + struct ubc_info *uip; + void *target_bitmap; + vm_object_size_t bitmap_size; - return 0; + if ( UBCINFOEXISTS(vp)) { + uip = vp->v_ubcinfo; + + if ( (target_bitmap = uip->cs_valid_bitmap) != NULL ) { + bitmap_size = uip->cs_valid_bitmap_size; + kfree( target_bitmap, (vm_size_t) bitmap_size ); + uip->cs_valid_bitmap = NULL; + } + } +} +#else +kern_return_t ubc_cs_validation_bitmap_allocate(__unused vnode_t vp){ + return KERN_INVALID_ARGUMENT; +} + +kern_return_t ubc_cs_check_validation_bitmap( + __unused struct vnode *vp, + __unused memory_object_offset_t offset, + __unused int optype){ + + return KERN_INVALID_ARGUMENT; +} + +void ubc_cs_validation_bitmap_deallocate(__unused vnode_t vp){ + return; } +#endif /* CHECK_CS_VALIDATION_BITMAP */