X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/2d21ac55c334faf3a56e5634905ed6987fc787d4..ecc0ceb4089d506a0b8d16686a95817b331af9cb:/bsd/kern/ubc_subr.c diff --git a/bsd/kern/ubc_subr.c b/bsd/kern/ubc_subr.c index 75abf3d39..3e4353a97 100644 --- a/bsd/kern/ubc_subr.c +++ b/bsd/kern/ubc_subr.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999-2007 Apple Inc. All rights reserved. + * Copyright (c) 1999-2014 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -50,11 +50,15 @@ #include #include #include +#include +#include +#include #include #include #include #include +#include #include #include @@ -65,12 +69,20 @@ #include /* last */ #include +#include +#include + +#include +#include /* XXX These should be in a BSD accessible Mach header, but aren't. */ extern kern_return_t memory_object_pages_resident(memory_object_control_t, boolean_t *); extern kern_return_t memory_object_signed(memory_object_control_t control, boolean_t is_signed); +extern boolean_t memory_object_is_slid(memory_object_control_t control); +extern boolean_t memory_object_is_signed(memory_object_control_t); + extern void Debugger(const char *message); @@ -85,7 +97,7 @@ kern_return_t ubc_page_op_with_control( #if DIAGNOSTIC #if defined(assert) -#undef assert() +#undef assert #endif #define assert(cond) \ ((void) ((cond) ? 0 : panic("Assert failed: %s", # cond))) @@ -99,12 +111,20 @@ static int ubc_msync_internal(vnode_t, off_t, off_t, off_t *, int, int *); static void ubc_cs_free(struct ubc_info *uip); struct zone *ubc_info_zone; - +static uint32_t cs_blob_generation_count = 1; /* * CODESIGNING * Routines to navigate code signing data structures in the kernel... */ + +extern int cs_debug; + +#define PAGE_SHIFT_4K (12) +#define PAGE_SIZE_4K ((1< don't use at all. */ -enum { - CSMAGIC_REQUIREMENT = 0xfade0c00, /* single Requirement blob */ - CSMAGIC_REQUIREMENTS = 0xfade0c01, /* Requirements vector (internal requirements) */ - CSMAGIC_CODEDIRECTORY = 0xfade0c02, /* CodeDirectory blob */ - CSMAGIC_EMBEDDED_SIGNATURE = 0xfade0cc0, /* embedded form of signature data */ - CSMAGIC_EMBEDDED_SIGNATURE_OLD = 0xfade0b02, /* XXX */ - CSMAGIC_DETACHED_SIGNATURE = 0xfade0cc1, /* multi-arch collection of embedded signatures */ - - CSSLOT_CODEDIRECTORY = 0, /* slot index for CodeDirectory */ +static uint32_t hashPriorities[] = { + CS_HASHTYPE_SHA1, + CS_HASHTYPE_SHA256_TRUNCATED, + CS_HASHTYPE_SHA256, + CS_HASHTYPE_SHA384, }; +static unsigned int +hash_rank(const CS_CodeDirectory *cd) +{ + uint32_t type = cd->hashType; + unsigned int n; + + for (n = 0; n < sizeof(hashPriorities) / sizeof(hashPriorities[0]); ++n) + if (hashPriorities[n] == type) + return n + 1; + return 0; /* not supported */ +} + /* - * Structure of an embedded-signature SuperBlob + * Locating a page hash */ -typedef struct __BlobIndex { - uint32_t type; /* type of entry */ - uint32_t offset; /* offset of entry */ -} CS_BlobIndex; +static const unsigned char * +hashes( + const CS_CodeDirectory *cd, + uint32_t page, + size_t hash_len, + const char *lower_bound, + const char *upper_bound) +{ + const unsigned char *base, *top, *hash; + uint32_t nCodeSlots = ntohl(cd->nCodeSlots); + + assert(cs_valid_range(cd, cd + 1, lower_bound, upper_bound)); -typedef struct __SuperBlob { - uint32_t magic; /* magic number */ - uint32_t length; /* total length of SuperBlob */ - uint32_t count; /* number of index entries following */ - CS_BlobIndex index[]; /* (count) entries */ - /* followed by Blobs in no particular order as indicated by offsets in index */ -} CS_SuperBlob; + if((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) { + /* Get first scatter struct */ + const SC_Scatter *scatter = (const SC_Scatter*) + ((const char*)cd + ntohl(cd->scatterOffset)); + uint32_t hashindex=0, scount, sbase=0; + /* iterate all scatter structs */ + do { + if((const char*)scatter > (const char*)cd + ntohl(cd->length)) { + if(cs_debug) { + printf("CODE SIGNING: Scatter extends past Code Directory\n"); + } + return NULL; + } + + scount = ntohl(scatter->count); + uint32_t new_base = ntohl(scatter->base); + /* last scatter? */ + if (scount == 0) { + return NULL; + } + + if((hashindex > 0) && (new_base <= sbase)) { + if(cs_debug) { + printf("CODE SIGNING: unordered Scatter, prev base %d, cur base %d\n", + sbase, new_base); + } + return NULL; /* unordered scatter array */ + } + sbase = new_base; + + /* this scatter beyond page we're looking for? */ + if (sbase > page) { + return NULL; + } + + if (sbase+scount >= page) { + /* Found the scatter struct that is + * referencing our page */ + + /* base = address of first hash covered by scatter */ + base = (const unsigned char *)cd + ntohl(cd->hashOffset) + + hashindex * hash_len; + /* top = address of first hash after this scatter */ + top = base + scount * hash_len; + if (!cs_valid_range(base, top, lower_bound, + upper_bound) || + hashindex > nCodeSlots) { + return NULL; + } + + break; + } + + /* this scatter struct is before the page we're looking + * for. Iterate. */ + hashindex+=scount; + scatter++; + } while(1); + + hash = base + (page - sbase) * hash_len; + } else { + base = (const unsigned char *)cd + ntohl(cd->hashOffset); + top = base + nCodeSlots * hash_len; + if (!cs_valid_range(base, top, lower_bound, upper_bound) || + page > nCodeSlots) { + return NULL; + } + assert(page < nCodeSlots); + + hash = base + page * hash_len; + } + + if (!cs_valid_range(hash, hash + hash_len, + lower_bound, upper_bound)) { + hash = NULL; + } + + return hash; +} /* - * C form of a CodeDirectory. + * cs_validate_codedirectory + * + * Validate that pointers inside the code directory to make sure that + * all offsets and lengths are constrained within the buffer. + * + * Parameters: cd Pointer to code directory buffer + * length Length of buffer + * + * Returns: 0 Success + * EBADEXEC Invalid code signature */ -typedef struct __CodeDirectory { - uint32_t magic; /* magic number (CSMAGIC_CODEDIRECTORY) */ - uint32_t length; /* total length of CodeDirectory blob */ - uint32_t version; /* compatibility version */ - uint32_t flags; /* setup and mode flags */ - uint32_t hashOffset; /* offset of hash slot element at index zero */ - uint32_t identOffset; /* offset of identifier string */ - uint32_t nSpecialSlots; /* number of special hash slots */ - uint32_t nCodeSlots; /* number of ordinary (code) hash slots */ - uint32_t codeLimit; /* limit to main image signature range */ - uint8_t hashSize; /* size of each hash in bytes */ - uint8_t hashType; /* type of hash (cdHashType* constants) */ - uint8_t spare1; /* unused (must be zero) */ - uint8_t pageSize; /* log2(page size in bytes); 0 => infinite */ - uint32_t spare2; /* unused (must be zero) */ - /* followed by dynamic content as located by offset fields above */ -} CS_CodeDirectory; +static int +cs_validate_codedirectory(const CS_CodeDirectory *cd, size_t length) +{ + struct cs_hash *hashtype; + + if (length < sizeof(*cd)) + return EBADEXEC; + if (ntohl(cd->magic) != CSMAGIC_CODEDIRECTORY) + return EBADEXEC; + if (cd->pageSize != PAGE_SHIFT_4K) + return EBADEXEC; + hashtype = cs_find_md(cd->hashType); + if (hashtype == NULL) + return EBADEXEC; + + if (cd->hashSize != hashtype->cs_size) + return EBADEXEC; + + if (length < ntohl(cd->hashOffset)) + return EBADEXEC; + + /* check that nSpecialSlots fits in the buffer in front of hashOffset */ + if (ntohl(cd->hashOffset) / hashtype->cs_size < ntohl(cd->nSpecialSlots)) + return EBADEXEC; + + /* check that codeslots fits in the buffer */ + if ((length - ntohl(cd->hashOffset)) / hashtype->cs_size < ntohl(cd->nCodeSlots)) + return EBADEXEC; + + if (ntohl(cd->version) >= CS_SUPPORTSSCATTER && cd->scatterOffset) { + + if (length < ntohl(cd->scatterOffset)) + return EBADEXEC; + + const SC_Scatter *scatter = (const SC_Scatter *) + (((const uint8_t *)cd) + ntohl(cd->scatterOffset)); + uint32_t nPages = 0; + + /* + * Check each scatter buffer, since we don't know the + * length of the scatter buffer array, we have to + * check each entry. + */ + while(1) { + /* check that the end of each scatter buffer in within the length */ + if (((const uint8_t *)scatter) + sizeof(scatter[0]) > (const uint8_t *)cd + length) + return EBADEXEC; + uint32_t scount = ntohl(scatter->count); + if (scount == 0) + break; + if (nPages + scount < nPages) + return EBADEXEC; + nPages += scount; + scatter++; + + /* XXX check that basees doesn't overlap */ + /* XXX check that targetOffset doesn't overlap */ + } +#if 0 /* rdar://12579439 */ + if (nPages != ntohl(cd->nCodeSlots)) + return EBADEXEC; +#endif + } + + if (length < ntohl(cd->identOffset)) + return EBADEXEC; + + /* identifier is NUL terminated string */ + if (cd->identOffset) { + const uint8_t *ptr = (const uint8_t *)cd + ntohl(cd->identOffset); + if (memchr(ptr, 0, length - ntohl(cd->identOffset)) == NULL) + return EBADEXEC; + } + + /* team identifier is NULL terminated string */ + if (ntohl(cd->version) >= CS_SUPPORTSTEAMID && ntohl(cd->teamOffset)) { + if (length < ntohl(cd->teamOffset)) + return EBADEXEC; + + const uint8_t *ptr = (const uint8_t *)cd + ntohl(cd->teamOffset); + if (memchr(ptr, 0, length - ntohl(cd->teamOffset)) == NULL) + return EBADEXEC; + } + + return 0; +} /* - * Locate the CodeDirectory from an embedded signature blob + * */ -static const -CS_CodeDirectory *findCodeDirectory( - const CS_SuperBlob *embedded, - char *lower_bound, - char *upper_bound) + +static int +cs_validate_blob(const CS_GenericBlob *blob, size_t length) { - const CS_CodeDirectory *cd = NULL; + if (length < sizeof(CS_GenericBlob) || length < ntohl(blob->length)) + return EBADEXEC; + return 0; +} - if (embedded && - cs_valid_range(embedded, embedded + 1, lower_bound, upper_bound) && - ntohl(embedded->magic) == CSMAGIC_EMBEDDED_SIGNATURE) { - const CS_BlobIndex *limit; - const CS_BlobIndex *p; +/* + * cs_validate_csblob + * + * Validate that superblob/embedded code directory to make sure that + * all internal pointers are valid. + * + * Will validate both a superblob csblob and a "raw" code directory. + * + * + * Parameters: buffer Pointer to code signature + * length Length of buffer + * rcd returns pointer to code directory + * + * Returns: 0 Success + * EBADEXEC Invalid code signature + */ - limit = &embedded->index[ntohl(embedded->count)]; - if (!cs_valid_range(&embedded->index[0], limit, - lower_bound, upper_bound)) { - return NULL; - } - for (p = embedded->index; p < limit; ++p) { - if (ntohl(p->type) == CSSLOT_CODEDIRECTORY) { - const unsigned char *base; +static int +cs_validate_csblob(const uint8_t *addr, size_t length, + const CS_CodeDirectory **rcd) +{ + const CS_GenericBlob *blob = (const CS_GenericBlob *)(const void *)addr; + int error; - base = (const unsigned char *)embedded; - cd = (const CS_CodeDirectory *)(base + ntohl(p->offset)); - break; + *rcd = NULL; + + error = cs_validate_blob(blob, length); + if (error) + return error; + + length = ntohl(blob->length); + + if (ntohl(blob->magic) == CSMAGIC_EMBEDDED_SIGNATURE) { + const CS_SuperBlob *sb; + uint32_t n, count; + const CS_CodeDirectory *best_cd = NULL; + unsigned int best_rank = 0; + + if (length < sizeof(CS_SuperBlob)) + return EBADEXEC; + + sb = (const CS_SuperBlob *)blob; + count = ntohl(sb->count); + + /* check that the array of BlobIndex fits in the rest of the data */ + if ((length - sizeof(CS_SuperBlob)) / sizeof(CS_BlobIndex) < count) + return EBADEXEC; + + /* now check each BlobIndex */ + for (n = 0; n < count; n++) { + const CS_BlobIndex *blobIndex = &sb->index[n]; + uint32_t type = ntohl(blobIndex->type); + uint32_t offset = ntohl(blobIndex->offset); + if (length < offset) + return EBADEXEC; + + const CS_GenericBlob *subBlob = + (const CS_GenericBlob *)(const void *)(addr + offset); + + size_t subLength = length - offset; + + if ((error = cs_validate_blob(subBlob, subLength)) != 0) + return error; + subLength = ntohl(subBlob->length); + + /* extra validation for CDs, that is also returned */ + if (type == CSSLOT_CODEDIRECTORY || (type >= CSSLOT_ALTERNATE_CODEDIRECTORIES && type < CSSLOT_ALTERNATE_CODEDIRECTORY_LIMIT)) { + const CS_CodeDirectory *candidate = (const CS_CodeDirectory *)subBlob; + if ((error = cs_validate_codedirectory(candidate, subLength)) != 0) + return error; + unsigned int rank = hash_rank(candidate); + if (cs_debug > 3) + printf("CodeDirectory type %d rank %d at slot 0x%x index %d\n", candidate->hashType, (int)rank, (int)type, (int)n); + if (best_cd == NULL || rank > best_rank) { + best_cd = candidate; + best_rank = rank; + } else if (best_cd != NULL && rank == best_rank) { + /* repeat of a hash type (1:1 mapped to ranks), illegal and suspicious */ + printf("multiple hash=%d CodeDirectories in signature; rejecting", best_cd->hashType); + return EBADEXEC; + } } + if (best_cd && cs_debug > 2) + printf("using CodeDirectory type %d (rank %d)\n", (int)best_cd->hashType, best_rank); + *rcd = best_cd; } + + } else if (ntohl(blob->magic) == CSMAGIC_CODEDIRECTORY) { + + if ((error = cs_validate_codedirectory((const CS_CodeDirectory *)(const void *)addr, length)) != 0) + return error; + *rcd = (const CS_CodeDirectory *)blob; } else { - /* - * Detached signatures come as a bare CS_CodeDirectory, - * without a blob. - */ - cd = (const CS_CodeDirectory *) embedded; + return EBADEXEC; } - if (cd && - cs_valid_range(cd, cd + 1, lower_bound, upper_bound) && - cs_valid_range(cd, (const char *) cd + ntohl(cd->length), - lower_bound, upper_bound) && - ntohl(cd->magic) == CSMAGIC_CODEDIRECTORY) { - return cd; - } + if (*rcd == NULL) + return EBADEXEC; - // not found or not a valid code directory - return NULL; + return 0; } - /* - * Locating a page hash + * cs_find_blob_bytes + * + * Find an blob from the superblob/code directory. The blob must have + * been been validated by cs_validate_csblob() before calling + * this. Use csblob_find_blob() instead. + * + * Will also find a "raw" code directory if its stored as well as + * searching the superblob. + * + * Parameters: buffer Pointer to code signature + * length Length of buffer + * type type of blob to find + * magic the magic number for that blob + * + * Returns: pointer Success + * NULL Buffer not found */ -static const unsigned char * -hashes( - const CS_CodeDirectory *cd, - unsigned page, - char *lower_bound, - char *upper_bound) + +const CS_GenericBlob * +csblob_find_blob_bytes(const uint8_t *addr, size_t length, uint32_t type, uint32_t magic) { - const unsigned char *base, *top, *hash; - uint32_t nCodeSlots; + const CS_GenericBlob *blob = (const CS_GenericBlob *)(const void *)addr; - assert(cs_valid_range(cd, cd + 1, lower_bound, upper_bound)); + if (ntohl(blob->magic) == CSMAGIC_EMBEDDED_SIGNATURE) { + const CS_SuperBlob *sb = (const CS_SuperBlob *)blob; + size_t n, count = ntohl(sb->count); - base = (const unsigned char *)cd + ntohl(cd->hashOffset); - nCodeSlots = ntohl(cd->nCodeSlots); - top = base + nCodeSlots * SHA1_RESULTLEN; - if (!cs_valid_range(base, top, - lower_bound, upper_bound) || - page > nCodeSlots) { + for (n = 0; n < count; n++) { + if (ntohl(sb->index[n].type) != type) + continue; + uint32_t offset = ntohl(sb->index[n].offset); + if (length - sizeof(const CS_GenericBlob) < offset) + return NULL; + blob = (const CS_GenericBlob *)(const void *)(addr + offset); + if (ntohl(blob->magic) != magic) + continue; + return blob; + } + } else if (type == CSSLOT_CODEDIRECTORY + && ntohl(blob->magic) == CSMAGIC_CODEDIRECTORY + && magic == CSMAGIC_CODEDIRECTORY) + return blob; + return NULL; +} + + +const CS_GenericBlob * +csblob_find_blob(struct cs_blob *csblob, uint32_t type, uint32_t magic) +{ + if ((csblob->csb_flags & CS_VALID) == 0) return NULL; - } - assert(page < nCodeSlots); + return csblob_find_blob_bytes((const uint8_t *)csblob->csb_mem_kaddr, csblob->csb_mem_size, type, magic); +} - hash = base + page * SHA1_RESULTLEN; - if (!cs_valid_range(hash, hash + SHA1_RESULTLEN, - lower_bound, upper_bound)) { - hash = NULL; +static const uint8_t * +find_special_slot(const CS_CodeDirectory *cd, size_t slotsize, uint32_t slot) +{ + /* there is no zero special slot since that is the first code slot */ + if (ntohl(cd->nSpecialSlots) < slot || slot == 0) + return NULL; + + return ((const uint8_t *)cd + ntohl(cd->hashOffset) - (slotsize * slot)); +} + +static uint8_t cshash_zero[CS_HASH_MAX_SIZE] = { 0 }; + +int +csblob_get_entitlements(struct cs_blob *csblob, void **out_start, size_t *out_length) +{ + uint8_t computed_hash[CS_HASH_MAX_SIZE]; + const CS_GenericBlob *entitlements; + const CS_CodeDirectory *code_dir; + const uint8_t *embedded_hash; + union cs_hash_union context; + + *out_start = NULL; + *out_length = 0; + + if (csblob->csb_hashtype == NULL || csblob->csb_hashtype->cs_digest_size > sizeof(computed_hash)) + return EBADEXEC; + + code_dir = csblob->csb_cd; + + entitlements = csblob_find_blob(csblob, CSSLOT_ENTITLEMENTS, CSMAGIC_EMBEDDED_ENTITLEMENTS); + embedded_hash = find_special_slot(code_dir, csblob->csb_hashtype->cs_size, CSSLOT_ENTITLEMENTS); + + if (embedded_hash == NULL) { + if (entitlements) + return EBADEXEC; + return 0; + } else if (entitlements == NULL) { + if (memcmp(embedded_hash, cshash_zero, csblob->csb_hashtype->cs_size) != 0) { + return EBADEXEC; + } else { + return 0; + } } - return hash; + csblob->csb_hashtype->cs_init(&context); + csblob->csb_hashtype->cs_update(&context, entitlements, ntohl(entitlements->length)); + csblob->csb_hashtype->cs_final(computed_hash, &context); + + if (memcmp(computed_hash, embedded_hash, csblob->csb_hashtype->cs_size) != 0) + return EBADEXEC; + + *out_start = __DECONST(void *, entitlements); + *out_length = ntohl(entitlements->length); + + return 0; } + /* * CODESIGNING * End of routines to navigate code signing data structures in the kernel. */ + /* * ubc_init * @@ -290,6 +691,8 @@ ubc_init(void) i = (vm_size_t) sizeof (struct ubc_info); ubc_info_zone = zinit (i, 10000*i, 8192, "ubc_info zone"); + + zone_change(ubc_info_zone, Z_NOENCRYPT, TRUE); } @@ -486,39 +889,120 @@ ubc_info_deallocate(struct ubc_info *uip) ubc_info_free(uip); } +errno_t mach_to_bsd_errno(kern_return_t mach_err) +{ + switch (mach_err) { + case KERN_SUCCESS: + return 0; + + case KERN_INVALID_ADDRESS: + case KERN_INVALID_ARGUMENT: + case KERN_NOT_IN_SET: + case KERN_INVALID_NAME: + case KERN_INVALID_TASK: + case KERN_INVALID_RIGHT: + case KERN_INVALID_VALUE: + case KERN_INVALID_CAPABILITY: + case KERN_INVALID_HOST: + case KERN_MEMORY_PRESENT: + case KERN_INVALID_PROCESSOR_SET: + case KERN_INVALID_POLICY: + case KERN_ALREADY_WAITING: + case KERN_DEFAULT_SET: + case KERN_EXCEPTION_PROTECTED: + case KERN_INVALID_LEDGER: + case KERN_INVALID_MEMORY_CONTROL: + case KERN_INVALID_SECURITY: + case KERN_NOT_DEPRESSED: + case KERN_LOCK_OWNED: + case KERN_LOCK_OWNED_SELF: + return EINVAL; + + case KERN_PROTECTION_FAILURE: + case KERN_NOT_RECEIVER: + case KERN_NO_ACCESS: + case KERN_POLICY_STATIC: + return EACCES; + + case KERN_NO_SPACE: + case KERN_RESOURCE_SHORTAGE: + case KERN_UREFS_OVERFLOW: + case KERN_INVALID_OBJECT: + return ENOMEM; + + case KERN_FAILURE: + return EIO; + + case KERN_MEMORY_FAILURE: + case KERN_POLICY_LIMIT: + case KERN_CODESIGN_ERROR: + return EPERM; + + case KERN_MEMORY_ERROR: + return EBUSY; + + case KERN_ALREADY_IN_SET: + case KERN_NAME_EXISTS: + case KERN_RIGHT_EXISTS: + return EEXIST; + + case KERN_ABORTED: + return EINTR; + + case KERN_TERMINATED: + case KERN_LOCK_SET_DESTROYED: + case KERN_LOCK_UNSTABLE: + case KERN_SEMAPHORE_DESTROYED: + return ENOENT; + + case KERN_RPC_SERVER_TERMINATED: + return ECONNRESET; + + case KERN_NOT_SUPPORTED: + return ENOTSUP; + + case KERN_NODE_DOWN: + return ENETDOWN; + + case KERN_NOT_WAITING: + return ENOENT; + + case KERN_OPERATION_TIMED_OUT: + return ETIMEDOUT; + + default: + return EIO; + } +} /* - * ubc_setsize + * ubc_setsize_ex * - * Tell the VM that the the size of the file represented by the vnode has + * Tell the VM that the the size of the file represented by the vnode has * changed * - * Parameters: vp The vp whose backing file size is - * being changed - * nsize The new size of the backing file - * - * Returns: 1 Success - * 0 Failure - * - * Notes: This function will indicate failure if the new size that's - * being attempted to be set is negative. - * - * This function will fail if there is no ubc_info currently - * associated with the vnode. - * - * This function will indicate success it the new size is the - * same or larger than the old size (in this case, the remainder - * of the file will require modification or use of an existing upl - * to access successfully). - * - * This function will fail if the new file size is smaller, and - * the memory region being invalidated was unable to actually be - * invalidated and/or the last page could not be flushed, if the - * new size is not aligned to a page boundary. This is usually - * indicative of an I/O error. + * Parameters: vp The vp whose backing file size is + * being changed + * nsize The new size of the backing file + * opts Options + * + * Returns: EINVAL for new size < 0 + * ENOENT if no UBC info exists + * EAGAIN if UBC_SETSIZE_NO_FS_REENTRY option is set and new_size < old size + * Other errors (mapped to errno_t) returned by VM functions + * + * Notes: This function will indicate success if the new size is the + * same or larger than the old size (in this case, the + * remainder of the file will require modification or use of + * an existing upl to access successfully). + * + * This function will fail if the new file size is smaller, + * and the memory region being invalidated was unable to + * actually be invalidated and/or the last page could not be + * flushed, if the new size is not aligned to a page + * boundary. This is usually indicative of an I/O error. */ -int -ubc_setsize(struct vnode *vp, off_t nsize) +errno_t ubc_setsize_ex(struct vnode *vp, off_t nsize, ubc_setsize_opts_t opts) { off_t osize; /* ui_size before change */ off_t lastpg, olastpgend, lastoff; @@ -527,20 +1011,29 @@ ubc_setsize(struct vnode *vp, off_t nsize) kern_return_t kret = KERN_SUCCESS; if (nsize < (off_t)0) - return (0); + return EINVAL; if (!UBCINFOEXISTS(vp)) - return (0); + return ENOENT; uip = vp->v_ubcinfo; osize = uip->ui_size; + + if (ISSET(opts, UBC_SETSIZE_NO_FS_REENTRY) && nsize < osize) + return EAGAIN; + /* * Update the size before flushing the VM */ uip->ui_size = nsize; - if (nsize >= osize) /* Nothing more to do */ - return (1); /* return success */ + if (nsize >= osize) { /* Nothing more to do */ + if (nsize > osize) { + lock_vnode_and_post(vp, NOTE_EXTEND); + } + + return 0; + } /* * When the file shrinks, invalidate the pages beyond the @@ -555,17 +1048,16 @@ ubc_setsize(struct vnode *vp, off_t nsize) lastoff = (nsize & PAGE_MASK_64); if (lastoff) { - upl_t upl; + upl_t upl; upl_page_info_t *pl; - - /* + /* * new EOF ends up in the middle of a page - * zero the tail of this page if its currently + * zero the tail of this page if it's currently * present in the cache */ - kret = ubc_create_upl(vp, lastpg, PAGE_SIZE, &upl, &pl, UPL_SET_LITE); - + kret = ubc_create_upl(vp, lastpg, PAGE_SIZE, &upl, &pl, UPL_SET_LITE); + if (kret != KERN_SUCCESS) panic("ubc_setsize: ubc_create_upl (error = %d)\n", kret); @@ -577,21 +1069,31 @@ ubc_setsize(struct vnode *vp, off_t nsize) lastpg += PAGE_SIZE_64; } if (olastpgend > lastpg) { - /* + int flags; + + if (lastpg == 0) + flags = MEMORY_OBJECT_DATA_FLUSH_ALL; + else + flags = MEMORY_OBJECT_DATA_FLUSH; + /* * invalidate the pages beyond the new EOF page * */ - kret = memory_object_lock_request(control, - (memory_object_offset_t)lastpg, - (memory_object_size_t)(olastpgend - lastpg), NULL, NULL, - MEMORY_OBJECT_RETURN_NONE, MEMORY_OBJECT_DATA_FLUSH, - VM_PROT_NO_CHANGE); + kret = memory_object_lock_request(control, + (memory_object_offset_t)lastpg, + (memory_object_size_t)(olastpgend - lastpg), NULL, NULL, + MEMORY_OBJECT_RETURN_NONE, flags, VM_PROT_NO_CHANGE); if (kret != KERN_SUCCESS) printf("ubc_setsize: invalidate failed (error = %d)\n", kret); } - return ((kret == KERN_SUCCESS) ? 1 : 0); + return mach_to_bsd_errno(kret); } +// Returns true for success +int ubc_setsize(vnode_t vp, off_t nsize) +{ + return ubc_setsize_ex(vp, nsize, 0) == 0; +} /* * ubc_getsize @@ -626,7 +1128,7 @@ ubc_getsize(struct vnode *vp) /* * ubc_umount * - * Call ubc_sync_range(vp, 0, EOF, UBC_PUSHALL) on all the vnodes for this + * Call ubc_msync(vp, 0, EOF, NULL, UBC_PUSHALL) on all the vnodes for this * mount point * * Parameters: mp The mount point @@ -725,7 +1227,6 @@ ubc_getcred(struct vnode *vp) * This function is generally used only in the following cases: * * o a memory mapped file via the mmap() system call - * o a memory mapped file via the deprecated map_fd() call * o a swap store backing file * o subsequent to a successful write via vn_write() * @@ -741,8 +1242,7 @@ ubc_getcred(struct vnode *vp) * * o Because a page-in may occur prior to a write, the * credential may not be set at this time, if the page-in - * is not the result of a mapping established via mmap() - * or map_fd(). + * is not the result of a mapping established via mmap(). * * In both these cases, this will be triggered from the paging * path, which will instead use the credential of the current @@ -836,7 +1336,6 @@ ubc_setcred(struct vnode *vp, proc_t p) return (1); } - /* * ubc_getpager * @@ -894,6 +1393,16 @@ ubc_getobject(struct vnode *vp, __unused int flags) return (MEMORY_OBJECT_CONTROL_NULL); } +boolean_t +ubc_strict_uncached_IO(struct vnode *vp) +{ + boolean_t result = FALSE; + + if (UBCINFOEXISTS(vp)) { + result = memory_object_is_slid(vp->v_ubcinfo->ui_control); + } + return result; +} /* * ubc_blktooff @@ -1016,35 +1525,6 @@ ubc_pages_resident(vnode_t vp) return (0); } - -/* - * ubc_sync_range - * - * Clean and/or invalidate a range in the memory object that backs this vnode - * - * Parameters: vp The vnode whose associated ubc_info's - * associated memory object is to have a - * range invalidated within it - * beg_off The start of the range, as an offset - * end_off The end of the range, as an offset - * flags See ubc_msync_internal() - * - * Returns: 1 Success - * 0 Failure - * - * Notes: see ubc_msync_internal() for more detailed information. - * - * DEPRECATED: This interface is obsolete due to a failure to return error - * information needed in order to correct failures. The currently - * recommended interface is ubc_msync(). - */ -int -ubc_sync_range(vnode_t vp, off_t beg_off, off_t end_off, int flags) -{ - return (ubc_msync_internal(vp, beg_off, end_off, NULL, flags, NULL)); -} - - /* * ubc_msync * @@ -1096,6 +1576,8 @@ ubc_msync(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags) /* + * ubc_msync_internal + * * Clean and/or invalidate a range in the memory object that backs this vnode * * Parameters: vp The vnode whose associated ubc_info's @@ -1212,7 +1694,7 @@ ubc_msync_internal(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, i /* - * ubc_msync_internal + * ubc_map * * Explicitly map a vnode that has an associate ubc_info, and add a reference * to it for the ubc system, if there isn't one already, so it will not be @@ -1241,7 +1723,6 @@ ubc_msync_internal(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, i * It is primarily used by: * * o mmap(), when mapping a file - * o The deprecated map_fd() interface, when mapping a file * o When mapping a shared file (a shared library in the * shared segment region) * o When loading a program image during the exec process @@ -1294,6 +1775,9 @@ ubc_map(vnode_t vp, int flags) if ( !ISSET(uip->ui_flags, UI_ISMAPPED)) need_ref = 1; SET(uip->ui_flags, (UI_WASMAPPED | UI_ISMAPPED)); + if (flags & PROT_WRITE) { + SET(uip->ui_flags, UI_MAPPEDWRITE); + } } CLR(uip->ui_flags, UI_MAPBUSY); @@ -1426,7 +1910,7 @@ ubc_isinuse_locked(struct vnode *vp, int busycount, int locked) if (!locked) - vnode_lock(vp); + vnode_lock_spin(vp); if ((vp->v_usecount - vp->v_kusecount) > busycount) retval = 1; @@ -1462,14 +1946,13 @@ ubc_unmap(struct vnode *vp) struct ubc_info *uip; int need_rele = 0; int need_wakeup = 0; -#if NAMEDRSRCFORK - int named_fork = 0; -#endif if (vnode_getwithref(vp)) return; if (UBCINFOEXISTS(vp)) { + bool want_fsevent = false; + vnode_lock(vp); uip = vp->v_ubcinfo; @@ -1480,27 +1963,43 @@ ubc_unmap(struct vnode *vp) } SET(uip->ui_flags, UI_MAPBUSY); -#if NAMEDRSRCFORK - if ((vp->v_flag & VISNAMEDSTREAM) && - (vp->v_parent != NULLVP) && - !(vp->v_parent->v_mount->mnt_kern_flag & MNTK_NAMED_STREAMS)) { - named_fork = 1; - } -#endif - if (ISSET(uip->ui_flags, UI_ISMAPPED)) { - CLR(uip->ui_flags, UI_ISMAPPED); + if (ISSET(uip->ui_flags, UI_MAPPEDWRITE)) + want_fsevent = true; + need_rele = 1; + + /* + * We want to clear the mapped flags after we've called + * VNOP_MNOMAP to avoid certain races and allow + * VNOP_MNOMAP to call ubc_is_mapped_writable. + */ } vnode_unlock(vp); - - if (need_rele) { - (void)VNOP_MNOMAP(vp, vfs_context_current()); -#if NAMEDRSRCFORK - if (named_fork) { - vnode_relenamedstream(vp->v_parent, vp, vfs_context_current()); - } + if (need_rele) { + vfs_context_t ctx = vfs_context_current(); + + (void)VNOP_MNOMAP(vp, ctx); + +#if CONFIG_FSE + /* + * Why do we want an fsevent here? Normally the + * content modified fsevent is posted when a file is + * closed and only if it's written to via conventional + * means. It's perfectly legal to close a file and + * keep your mappings and we don't currently track + * whether it was written to via a mapping. + * Therefore, we need to post an fsevent here if the + * file was mapped writable. This may result in false + * events, i.e. we post a notification when nothing + * has really changed. + */ + if (want_fsevent && need_fsevent(FSE_CONTENT_MODIFIED, vp)) { + add_fsevent(FSE_CONTENT_MODIFIED, ctx, + FSE_ARG_VNODE, vp, + FSE_ARG_DONE); + } #endif vnode_rele(vp); @@ -1508,7 +2007,11 @@ ubc_unmap(struct vnode *vp) vnode_lock_spin(vp); + if (need_rele) + CLR(uip->ui_flags, UI_ISMAPPED | UI_MAPPEDWRITE); + CLR(uip->ui_flags, UI_MAPBUSY); + if (ISSET(uip->ui_flags, UI_MAPWAITING)) { CLR(uip->ui_flags, UI_MAPWAITING); need_wakeup = 1; @@ -1745,39 +2248,72 @@ kern_return_t ubc_create_upl( struct vnode *vp, off_t f_offset, - long bufsize, + int bufsize, upl_t *uplp, upl_page_info_t **plp, int uplflags) { memory_object_control_t control; - mach_msg_type_number_t count; - int ubcflags; kern_return_t kr; + + if (plp != NULL) + *plp = NULL; + *uplp = NULL; if (bufsize & 0xfff) return KERN_INVALID_ARGUMENT; - if (uplflags & UPL_FOR_PAGEOUT) { + if (bufsize > MAX_UPL_SIZE_BYTES) + return KERN_INVALID_ARGUMENT; + + if (uplflags & (UPL_UBC_MSYNC | UPL_UBC_PAGEOUT | UPL_UBC_PAGEIN)) { + + if (uplflags & UPL_UBC_MSYNC) { + uplflags &= UPL_RET_ONLY_DIRTY; + + uplflags |= UPL_COPYOUT_FROM | UPL_CLEAN_IN_PLACE | + UPL_SET_INTERNAL | UPL_SET_LITE; + + } else if (uplflags & UPL_UBC_PAGEOUT) { + uplflags &= UPL_RET_ONLY_DIRTY; + + if (uplflags & UPL_RET_ONLY_DIRTY) + uplflags |= UPL_NOBLOCK; + + uplflags |= UPL_FOR_PAGEOUT | UPL_CLEAN_IN_PLACE | + UPL_COPYOUT_FROM | UPL_SET_INTERNAL | UPL_SET_LITE; + } else { + uplflags |= UPL_RET_ONLY_ABSENT | + UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | + UPL_SET_INTERNAL | UPL_SET_LITE; + + /* + * if the requested size == PAGE_SIZE, we don't want to set + * the UPL_NOBLOCK since we may be trying to recover from a + * previous partial pagein I/O that occurred because we were low + * on memory and bailed early in order to honor the UPL_NOBLOCK... + * since we're only asking for a single page, we can block w/o fear + * of tying up pages while waiting for more to become available + */ + if (bufsize > PAGE_SIZE) + uplflags |= UPL_NOBLOCK; + } + } else { uplflags &= ~UPL_FOR_PAGEOUT; - ubcflags = UBC_FOR_PAGEOUT; - } else - ubcflags = UBC_FLAGS_NONE; - control = ubc_getobject(vp, ubcflags); + if (uplflags & UPL_WILL_BE_DUMPED) { + uplflags &= ~UPL_WILL_BE_DUMPED; + uplflags |= (UPL_NO_SYNC|UPL_SET_INTERNAL); + } else + uplflags |= (UPL_NO_SYNC|UPL_CLEAN_IN_PLACE|UPL_SET_INTERNAL); + } + control = ubc_getobject(vp, UBC_FLAGS_NONE); if (control == MEMORY_OBJECT_CONTROL_NULL) return KERN_INVALID_ARGUMENT; - if (uplflags & UPL_WILL_BE_DUMPED) { - uplflags &= ~UPL_WILL_BE_DUMPED; - uplflags |= (UPL_NO_SYNC|UPL_SET_INTERNAL); - } else - uplflags |= (UPL_NO_SYNC|UPL_CLEAN_IN_PLACE|UPL_SET_INTERNAL); - count = 0; - - kr = memory_object_upl_request(control, f_offset, bufsize, uplp, NULL, &count, uplflags); - if (plp != NULL) - *plp = UPL_GET_INTERNAL_PAGE_LIST(*uplp); + kr = memory_object_upl_request(control, f_offset, bufsize, uplp, NULL, NULL, uplflags); + if (kr == KERN_SUCCESS && plp != NULL) + *plp = UPL_GET_INTERNAL_PAGE_LIST(*uplp); return kr; } @@ -1795,7 +2331,7 @@ upl_size_t ubc_upl_maxbufsize( void) { - return(MAX_UPL_TRANSFER * PAGE_SIZE); + return(MAX_UPL_SIZE_BYTES); } /* @@ -1875,7 +2411,7 @@ ubc_upl_commit( kern_return_t kr; pl = UPL_GET_INTERNAL_PAGE_LIST(upl); - kr = upl_commit(upl, pl, MAX_UPL_TRANSFER); + kr = upl_commit(upl, pl, MAX_UPL_SIZE_BYTES >> PAGE_SHIFT); upl_deallocate(upl); return kr; } @@ -1936,8 +2472,8 @@ ubc_upl_commit( kern_return_t ubc_upl_commit_range( upl_t upl, - vm_offset_t offset, - vm_size_t size, + upl_offset_t offset, + upl_size_t size, int flags) { upl_page_info_t *pl; @@ -1947,10 +2483,14 @@ ubc_upl_commit_range( if (flags & UPL_COMMIT_FREE_ON_EMPTY) flags |= UPL_COMMIT_NOTIFY_EMPTY; + if (flags & UPL_COMMIT_KERNEL_ONLY_FLAGS) { + return KERN_INVALID_ARGUMENT; + } + pl = UPL_GET_INTERNAL_PAGE_LIST(upl); kr = upl_commit_range(upl, offset, size, flags, - pl, MAX_UPL_TRANSFER, &empty); + pl, MAX_UPL_SIZE_BYTES >> PAGE_SHIFT, &empty); if((flags & UPL_COMMIT_FREE_ON_EMPTY) && empty) upl_deallocate(upl); @@ -2007,8 +2547,8 @@ ubc_upl_commit_range( kern_return_t ubc_upl_abort_range( upl_t upl, - vm_offset_t offset, - vm_size_t size, + upl_offset_t offset, + upl_size_t size, int abort_flags) { kern_return_t kr; @@ -2108,31 +2648,118 @@ ubc_upl_pageinfo( int -UBCINFOEXISTS(struct vnode * vp) +UBCINFOEXISTS(const struct vnode * vp) { return((vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo != UBC_INFO_NULL)); } +void +ubc_upl_range_needed( + upl_t upl, + int index, + int count) +{ + upl_range_needed(upl, index, count); +} + +boolean_t ubc_is_mapped(const struct vnode *vp, boolean_t *writable) +{ + if (!UBCINFOEXISTS(vp) || !ISSET(vp->v_ubcinfo->ui_flags, UI_ISMAPPED)) + return FALSE; + if (writable) + *writable = ISSET(vp->v_ubcinfo->ui_flags, UI_MAPPEDWRITE); + return TRUE; +} + +boolean_t ubc_is_mapped_writable(const struct vnode *vp) +{ + boolean_t writable; + return ubc_is_mapped(vp, &writable) && writable; +} + + /* * CODE SIGNING */ -#define CS_BLOB_KEEP_IN_KERNEL 1 +#define CS_BLOB_PAGEABLE 0 static volatile SInt32 cs_blob_size = 0; static volatile SInt32 cs_blob_count = 0; static SInt32 cs_blob_size_peak = 0; static UInt32 cs_blob_size_max = 0; static SInt32 cs_blob_count_peak = 0; -extern int cs_debug; -int cs_validation = 1; +SYSCTL_INT(_vm, OID_AUTO, cs_blob_count, CTLFLAG_RD | CTLFLAG_LOCKED, (int *)(uintptr_t)&cs_blob_count, 0, "Current number of code signature blobs"); +SYSCTL_INT(_vm, OID_AUTO, cs_blob_size, CTLFLAG_RD | CTLFLAG_LOCKED, (int *)(uintptr_t)&cs_blob_size, 0, "Current size of all code signature blobs"); +SYSCTL_INT(_vm, OID_AUTO, cs_blob_count_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_count_peak, 0, "Peak number of code signature blobs"); +SYSCTL_INT(_vm, OID_AUTO, cs_blob_size_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size_peak, 0, "Peak size of code signature blobs"); +SYSCTL_INT(_vm, OID_AUTO, cs_blob_size_max, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size_max, 0, "Size of biggest code signature blob"); -SYSCTL_INT(_vm, OID_AUTO, cs_validation, CTLFLAG_RW, &cs_validation, 0, "Do validate code signatures"); -SYSCTL_INT(_vm, OID_AUTO, cs_blob_count, CTLFLAG_RD, &cs_blob_count, 0, "Current number of code signature blobs"); -SYSCTL_INT(_vm, OID_AUTO, cs_blob_size, CTLFLAG_RD, &cs_blob_size, 0, "Current size of all code signature blobs"); -SYSCTL_INT(_vm, OID_AUTO, cs_blob_count_peak, CTLFLAG_RD, &cs_blob_count_peak, 0, "Peak number of code signature blobs"); -SYSCTL_INT(_vm, OID_AUTO, cs_blob_size_peak, CTLFLAG_RD, &cs_blob_size_peak, 0, "Peak size of code signature blobs"); -SYSCTL_INT(_vm, OID_AUTO, cs_blob_size_max, CTLFLAG_RD, &cs_blob_size_max, 0, "Size of biggest code signature blob"); +/* + * Function: csblob_parse_teamid + * + * Description: This function returns a pointer to the team id + stored within the codedirectory of the csblob. + If the codedirectory predates team-ids, it returns + NULL. + This does not copy the name but returns a pointer to + it within the CD. Subsequently, the CD must be + available when this is used. +*/ + +static const char * +csblob_parse_teamid(struct cs_blob *csblob) +{ + const CS_CodeDirectory *cd; + + cd = csblob->csb_cd; + + if (ntohl(cd->version) < CS_SUPPORTSTEAMID) + return NULL; + + if (cd->teamOffset == 0) + return NULL; + + const char *name = ((const char *)cd) + ntohl(cd->teamOffset); + if (cs_debug > 1) + printf("found team-id %s in cdblob\n", name); + + return name; +} + + +kern_return_t +ubc_cs_blob_allocate( + vm_offset_t *blob_addr_p, + vm_size_t *blob_size_p) +{ + kern_return_t kr; + +#if CS_BLOB_PAGEABLE + *blob_size_p = round_page(*blob_size_p); + kr = kmem_alloc(kernel_map, blob_addr_p, *blob_size_p, VM_KERN_MEMORY_SECURITY); +#else /* CS_BLOB_PAGEABLE */ + *blob_addr_p = (vm_offset_t) kalloc_tag(*blob_size_p, VM_KERN_MEMORY_SECURITY); + if (*blob_addr_p == 0) { + kr = KERN_NO_SPACE; + } else { + kr = KERN_SUCCESS; + } +#endif /* CS_BLOB_PAGEABLE */ + return kr; +} + +void +ubc_cs_blob_deallocate( + vm_offset_t blob_addr, + vm_size_t blob_size) +{ +#if CS_BLOB_PAGEABLE + kmem_free(kernel_map, blob_addr, blob_size); +#else /* CS_BLOB_PAGEABLE */ + kfree((void *) blob_addr, blob_size); +#endif /* CS_BLOB_PAGEABLE */ +} int ubc_cs_blob_add( @@ -2140,7 +2767,9 @@ ubc_cs_blob_add( cpu_type_t cputype, off_t base_offset, vm_address_t addr, - vm_size_t size) + vm_size_t size, + __unused int flags, + struct cs_blob **ret_blob) { kern_return_t kr; struct ubc_info *uip; @@ -2150,7 +2779,14 @@ ubc_cs_blob_add( memory_object_size_t blob_size; const CS_CodeDirectory *cd; off_t blob_start_offset, blob_end_offset; - SHA1_CTX sha1ctxt; + union cs_hash_union mdctx; + boolean_t record_mtime; + int cs_flags; + + record_mtime = FALSE; + cs_flags = 0; + if (ret_blob) + *ret_blob = NULL; blob_handle = IPC_PORT_NULL; @@ -2159,6 +2795,7 @@ ubc_cs_blob_add( return ENOMEM; } +#if CS_BLOB_PAGEABLE /* get a memory entry on the blob */ blob_size = (memory_object_size_t) size; kr = mach_make_memory_entry_64(kernel_map, @@ -2173,13 +2810,16 @@ ubc_cs_blob_add( } if (memory_object_round_page(blob_size) != (memory_object_size_t) round_page(size)) { - printf("ubc_cs_blob_add: size mismatch 0x%llx 0x%x !?\n", - blob_size, size); - panic("XXX FBDP size mismatch 0x%llx 0x%x\n", blob_size, size); + printf("ubc_cs_blob_add: size mismatch 0x%llx 0x%lx !?\n", + blob_size, (size_t)size); + panic("XXX FBDP size mismatch 0x%llx 0x%lx\n", blob_size, (size_t)size); error = EINVAL; goto out; } - +#else + blob_size = (memory_object_size_t) size; + blob_handle = IPC_PORT_NULL; +#endif /* fill in the new blob */ blob->csb_cpu_type = cputype; @@ -2188,44 +2828,104 @@ ubc_cs_blob_add( blob->csb_mem_offset = 0; blob->csb_mem_handle = blob_handle; blob->csb_mem_kaddr = addr; - + blob->csb_flags = 0; + blob->csb_platform_binary = 0; + blob->csb_platform_path = 0; + blob->csb_teamid = NULL; /* * Validate the blob's contents */ - cd = findCodeDirectory( - (const CS_SuperBlob *) addr, - (char *) addr, - (char *) addr + blob->csb_mem_size); - if (cd == NULL) { - /* no code directory => useless blob ! */ - blob->csb_flags = 0; - blob->csb_start_offset = 0; - blob->csb_end_offset = 0; + + error = cs_validate_csblob((const uint8_t *)addr, size, &cd); + if (error) { + + if (cs_debug) + printf("CODESIGNING: csblob invalid: %d\n", error); + /* The vnode checker can't make the rest of this function succeed if csblob validation failed, so bail */ + goto out; + } else { - unsigned char *sha1_base; - int sha1_size; + const unsigned char *md_base; + uint8_t hash[CS_HASH_MAX_SIZE]; + int md_size; - blob->csb_flags = ntohl(cd->flags) | CS_VALID; - blob->csb_end_offset = round_page(ntohl(cd->codeLimit)); - blob->csb_start_offset = (blob->csb_end_offset - - (ntohl(cd->nCodeSlots) * PAGE_SIZE)); - /* compute the blob's SHA1 hash */ - sha1_base = (const unsigned char *) cd; - sha1_size = ntohl(cd->length); - SHA1Init(&sha1ctxt); - SHA1Update(&sha1ctxt, sha1_base, sha1_size); - SHA1Final(blob->csb_sha1, &sha1ctxt); - } +#if CS_BLOB_PAGEABLE +#error "cd might move under CS_BLOB_PAGEABLE; reconsider this code" +#endif + blob->csb_cd = cd; + blob->csb_hashtype = cs_find_md(cd->hashType); + if (blob->csb_hashtype == NULL || blob->csb_hashtype->cs_digest_size > sizeof(hash)) + panic("validated CodeDirectory but unsupported type"); + + blob->csb_flags = (ntohl(cd->flags) & CS_ALLOWED_MACHO) | CS_VALID; + blob->csb_end_offset = round_page_4K(ntohl(cd->codeLimit)); + if((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) { + const SC_Scatter *scatter = (const SC_Scatter*) + ((const char*)cd + ntohl(cd->scatterOffset)); + blob->csb_start_offset = ntohl(scatter->base) * PAGE_SIZE_4K; + } else { + blob->csb_start_offset = 0; + } + /* compute the blob's cdhash */ + md_base = (const unsigned char *) cd; + md_size = ntohl(cd->length); + blob->csb_hashtype->cs_init(&mdctx); + blob->csb_hashtype->cs_update(&mdctx, md_base, md_size); + blob->csb_hashtype->cs_final(hash, &mdctx); + memcpy(blob->csb_cdhash, hash, CS_CDHASH_LEN); + } + + /* + * Let policy module check whether the blob's signature is accepted. + */ +#if CONFIG_MACF + error = mac_vnode_check_signature(vp, + base_offset, + blob->csb_cdhash, + (const void*)addr, size, + flags, &cs_flags); + if (error) { + if (cs_debug) + printf("check_signature[pid: %d], error = %d\n", current_proc()->p_pid, error); + goto out; + } + if ((flags & MAC_VNODE_CHECK_DYLD_SIM) && !(cs_flags & CS_PLATFORM_BINARY)) { + if (cs_debug) + printf("check_signature[pid: %d], is not apple signed\n", current_proc()->p_pid); + error = EPERM; + goto out; + } +#endif + + if (cs_flags & CS_PLATFORM_BINARY) { + if (cs_debug > 1) + printf("check_signature[pid: %d]: platform binary\n", current_proc()->p_pid); + blob->csb_platform_binary = 1; + blob->csb_platform_path = !!(cs_flags & CS_PLATFORM_PATH); + } else { + blob->csb_platform_binary = 0; + blob->csb_platform_path = 0; + blob->csb_teamid = csblob_parse_teamid(blob); + if (cs_debug > 1) { + if (blob->csb_teamid) + printf("check_signature[pid: %d]: team-id is %s\n", current_proc()->p_pid, blob->csb_teamid); + else + printf("check_signature[pid: %d]: no team-id\n", current_proc()->p_pid); + } + } + /* * Validate the blob's coverage */ blob_start_offset = blob->csb_base_offset + blob->csb_start_offset; blob_end_offset = blob->csb_base_offset + blob->csb_end_offset; - if (blob_start_offset >= blob_end_offset) { + if (blob_start_offset >= blob_end_offset || + blob_start_offset < 0 || + blob_end_offset <= 0) { /* reject empty or backwards blob */ error = EINVAL; goto out; @@ -2245,6 +2945,30 @@ ubc_cs_blob_add( oblob = oblob->csb_next) { off_t oblob_start_offset, oblob_end_offset; + /* check for conflicting teamid */ + if (blob->csb_platform_binary) { //platform binary needs to be the same for app slices + if (!oblob->csb_platform_binary) { + vnode_unlock(vp); + error = EALREADY; + goto out; + } + } else if (blob->csb_teamid) { //teamid binary needs to be the same for app slices + if (oblob->csb_platform_binary || + oblob->csb_teamid == NULL || + strcmp(oblob->csb_teamid, blob->csb_teamid) != 0) { + vnode_unlock(vp); + error = EALREADY; + goto out; + } + } else { // non teamid binary needs to be the same for app slices + if (oblob->csb_platform_binary || + oblob->csb_teamid != NULL) { + vnode_unlock(vp); + error = EALREADY; + goto out; + } + } + oblob_start_offset = (oblob->csb_base_offset + oblob->csb_start_offset); oblob_end_offset = (oblob->csb_base_offset + @@ -2261,9 +2985,9 @@ ubc_cs_blob_add( (blob->csb_cpu_type == CPU_TYPE_ANY || oblob->csb_cpu_type == CPU_TYPE_ANY || blob->csb_cpu_type == oblob->csb_cpu_type) && - !bcmp(blob->csb_sha1, - oblob->csb_sha1, - SHA1_RESULTLEN)) { + !bcmp(blob->csb_cdhash, + oblob->csb_cdhash, + CS_CDHASH_LEN)) { /* * We already have this blob: * we'll return success but @@ -2279,6 +3003,8 @@ ubc_cs_blob_add( oblob->csb_cpu_type = cputype; } vnode_unlock(vp); + if (ret_blob) + *ret_blob = oblob; error = EAGAIN; goto out; } else { @@ -2300,6 +3026,14 @@ ubc_cs_blob_add( goto out; } + if (uip->cs_blobs == NULL) { + /* loading 1st blob: record the file's current "modify time" */ + record_mtime = TRUE; + } + + /* set the generation count for cs_blobs */ + uip->cs_add_gen = cs_blob_generation_count; + /* * Add this blob to the list of blobs for this vnode. * We always add at the front of the list and we never remove a @@ -2314,39 +3048,46 @@ ubc_cs_blob_add( if (cs_blob_count > cs_blob_count_peak) { cs_blob_count_peak = cs_blob_count; /* XXX atomic ? */ } - OSAddAtomic(+blob->csb_mem_size, &cs_blob_size); - if (cs_blob_size > cs_blob_size_peak) { - cs_blob_size_peak = cs_blob_size; /* XXX atomic ? */ + OSAddAtomic((SInt32) +blob->csb_mem_size, &cs_blob_size); + if ((SInt32) cs_blob_size > cs_blob_size_peak) { + cs_blob_size_peak = (SInt32) cs_blob_size; /* XXX atomic ? */ } - if (blob->csb_mem_size > cs_blob_size_max) { - cs_blob_size_max = blob->csb_mem_size; + if ((UInt32) blob->csb_mem_size > cs_blob_size_max) { + cs_blob_size_max = (UInt32) blob->csb_mem_size; } - if (cs_debug) { + if (cs_debug > 1) { proc_t p; - + const char *name = vnode_getname_printable(vp); p = current_proc(); printf("CODE SIGNING: proc %d(%s) " "loaded %s signatures for file (%s) " "range 0x%llx:0x%llx flags 0x%x\n", p->p_pid, p->p_comm, blob->csb_cpu_type == -1 ? "detached" : "embedded", - vnode_name(vp), + name, blob->csb_base_offset + blob->csb_start_offset, blob->csb_base_offset + blob->csb_end_offset, blob->csb_flags); + vnode_putname_printable(name); } -#if !CS_BLOB_KEEP_IN_KERNEL - blob->csb_mem_kaddr = 0; -#endif /* CS_BLOB_KEEP_IN_KERNEL */ - vnode_unlock(vp); + if (record_mtime) { + vnode_mtime(vp, &uip->cs_mtime, vfs_context_current()); + } + + if (ret_blob) + *ret_blob = blob; + error = 0; /* success ! */ out: if (error) { + if (cs_debug) + printf("check_signature[pid: %d]: error = %d\n", current_proc()->p_pid, error); + /* we failed; release what we allocated */ if (blob) { kfree(blob, sizeof (*blob)); @@ -2356,10 +3097,6 @@ out: mach_memory_entry_port_release(blob_handle); blob_handle = IPC_PORT_NULL; } - } else { -#if !CS_BLOB_KEEP_IN_KERNEL - kmem_free(kernel_map, addr, size); -#endif /* CS_BLOB_KEEP_IN_KERNEL */ } if (error == EAGAIN) { @@ -2372,12 +3109,47 @@ out: /* * Since we're not failing, consume the data we received. */ - kmem_free(kernel_map, addr, size); + ubc_cs_blob_deallocate(addr, size); } return error; } +void +csvnode_print_debug(struct vnode *vp) +{ + const char *name = NULL; + struct ubc_info *uip; + struct cs_blob *blob; + + name = vnode_getname_printable(vp); + if (name) { + printf("csvnode: name: %s\n", name); + vnode_putname_printable(name); + } + + vnode_lock_spin(vp); + + if (! UBCINFOEXISTS(vp)) { + blob = NULL; + goto out; + } + + uip = vp->v_ubcinfo; + for (blob = uip->cs_blobs; blob != NULL; blob = blob->csb_next) { + printf("csvnode: range: %lu -> %lu flags: 0x%08x platform: %s path: %s team: %s\n", + (unsigned long)blob->csb_start_offset, + (unsigned long)blob->csb_end_offset, + blob->csb_flags, + blob->csb_platform_binary ? "yes" : "no", + blob->csb_platform_path ? "yes" : "no", + blob->csb_teamid ? blob->csb_teamid : ""); + } + +out: + vnode_unlock(vp); + +} struct cs_blob * ubc_cs_blob_get( @@ -2430,20 +3202,102 @@ ubc_cs_free( blob = next_blob) { next_blob = blob->csb_next; if (blob->csb_mem_kaddr != 0) { - kmem_free(kernel_map, - blob->csb_mem_kaddr, - blob->csb_mem_size); + ubc_cs_blob_deallocate(blob->csb_mem_kaddr, + blob->csb_mem_size); blob->csb_mem_kaddr = 0; } - mach_memory_entry_port_release(blob->csb_mem_handle); + if (blob->csb_mem_handle != IPC_PORT_NULL) { + mach_memory_entry_port_release(blob->csb_mem_handle); + } blob->csb_mem_handle = IPC_PORT_NULL; OSAddAtomic(-1, &cs_blob_count); - OSAddAtomic(-blob->csb_mem_size, &cs_blob_size); + OSAddAtomic((SInt32) -blob->csb_mem_size, &cs_blob_size); kfree(blob, sizeof (*blob)); } +#if CHECK_CS_VALIDATION_BITMAP + ubc_cs_validation_bitmap_deallocate( uip->ui_vnode ); +#endif uip->cs_blobs = NULL; } +/* check cs blob generation on vnode + * returns: + * 0 : Success, the cs_blob attached is current + * ENEEDAUTH : Generation count mismatch. Needs authentication again. + */ +int +ubc_cs_generation_check( + struct vnode *vp) +{ + int retval = ENEEDAUTH; + + vnode_lock_spin(vp); + + if (UBCINFOEXISTS(vp) && vp->v_ubcinfo->cs_add_gen == cs_blob_generation_count) { + retval = 0; + } + + vnode_unlock(vp); + return retval; +} + +int +ubc_cs_blob_revalidate( + struct vnode *vp, + struct cs_blob *blob, + __unused int flags + ) +{ + int error = 0; +#if CONFIG_MACF + int cs_flags = 0; +#endif + const CS_CodeDirectory *cd = NULL; + + assert(vp != NULL); + assert(blob != NULL); + + error = cs_validate_csblob((const uint8_t *)blob->csb_mem_kaddr, blob->csb_mem_size, &cd); + if (error) { + if (cs_debug) { + printf("CODESIGNING: csblob invalid: %d\n", error); + } + goto out; + } + + /* callout to mac_vnode_check_signature */ +#if CONFIG_MACF + error = mac_vnode_check_signature(vp, blob->csb_base_offset, blob->csb_cdhash, + (const void*)blob->csb_mem_kaddr, (int)blob->csb_mem_size, + flags, &cs_flags); + if (cs_debug && error) { + printf("revalidate: check_signature[pid: %d], error = %d\n", current_proc()->p_pid, error); + } +#endif + + /* update generation number if success */ + vnode_lock_spin(vp); + if (UBCINFOEXISTS(vp)) { + if (error == 0) + vp->v_ubcinfo->cs_add_gen = cs_blob_generation_count; + else + vp->v_ubcinfo->cs_add_gen = 0; + } + + vnode_unlock(vp); + +out: + return error; +} + +void +cs_blob_reset_cache() +{ + /* incrementing odd no by 2 makes sure '0' is never reached. */ + OSAddAtomic(+2, &cs_blob_generation_count); + printf("Reseting cs_blob cache from all vnodes. \n"); +} + struct cs_blob * ubc_get_cs_blobs( struct vnode *vp) @@ -2451,7 +3305,20 @@ ubc_get_cs_blobs( struct ubc_info *uip; struct cs_blob *blobs; - vnode_lock_spin(vp); + /* + * No need to take the vnode lock here. The caller must be holding + * a reference on the vnode (via a VM mapping or open file descriptor), + * so the vnode will not go away. The ubc_info stays until the vnode + * goes away. And we only modify "blobs" by adding to the head of the + * list. + * The ubc_info could go away entirely if the vnode gets reclaimed as + * part of a forced unmount. In the case of a code-signature validation + * during a page fault, the "paging_in_progress" reference on the VM + * object guarantess that the vnode pager (and the ubc_info) won't go + * away during the fault. + * Other callers need to protect against vnode reclaim by holding the + * vnode lock, for example. + */ if (! UBCINFOEXISTS(vp)) { blobs = NULL; @@ -2462,34 +3329,50 @@ ubc_get_cs_blobs( blobs = uip->cs_blobs; out: - vnode_unlock(vp); - return blobs; } +void +ubc_get_cs_mtime( + struct vnode *vp, + struct timespec *cs_mtime) +{ + struct ubc_info *uip; + + if (! UBCINFOEXISTS(vp)) { + cs_mtime->tv_sec = 0; + cs_mtime->tv_nsec = 0; + return; + } + + uip = vp->v_ubcinfo; + cs_mtime->tv_sec = uip->cs_mtime.tv_sec; + cs_mtime->tv_nsec = uip->cs_mtime.tv_nsec; +} + unsigned long cs_validate_page_no_hash = 0; unsigned long cs_validate_page_bad_hash = 0; boolean_t cs_validate_page( void *_blobs, + memory_object_t pager, memory_object_offset_t page_offset, const void *data, - boolean_t *tainted) + unsigned *tainted) { - SHA1_CTX sha1ctxt; - unsigned char actual_hash[SHA1_RESULTLEN]; - unsigned char expected_hash[SHA1_RESULTLEN]; + union cs_hash_union mdctx; + struct cs_hash *hashtype = NULL; + unsigned char actual_hash[CS_HASH_MAX_SIZE]; + unsigned char expected_hash[CS_HASH_MAX_SIZE]; boolean_t found_hash; struct cs_blob *blobs, *blob; const CS_CodeDirectory *cd; - const CS_SuperBlob *embedded; - off_t start_offset, end_offset; const unsigned char *hash; boolean_t validated; off_t offset; /* page offset in the file */ size_t size; off_t codeLimit = 0; - char *lower_bound, *upper_bound; + const char *lower_bound, *upper_bound; vm_offset_t kaddr, blob_addr; vm_size_t ksize; kern_return_t kr; @@ -2529,51 +3412,42 @@ cs_validate_page( if (kr != KERN_SUCCESS) { /* XXX FBDP what to do !? */ printf("cs_validate_page: failed to map blob, " - "size=0x%x kr=0x%x\n", - blob->csb_mem_size, kr); + "size=0x%lx kr=0x%x\n", + (size_t)blob->csb_mem_size, kr); break; } } + blob_addr = kaddr + blob->csb_mem_offset; - lower_bound = CAST_DOWN(char *, blob_addr); upper_bound = lower_bound + blob->csb_mem_size; - - embedded = (const CS_SuperBlob *) blob_addr; - cd = findCodeDirectory(embedded, lower_bound, upper_bound); + + cd = blob->csb_cd; if (cd != NULL) { - if (cd->pageSize != PAGE_SHIFT || - cd->hashType != 0x1 || - cd->hashSize != SHA1_RESULTLEN) { - /* bogus blob ? */ -#if !CS_BLOB_KEEP_IN_KERNEL - kmem_free(kernel_map, kaddr, ksize); -#endif /* CS_BLOB_KEEP_IN_KERNEL */ - continue; - } - - end_offset = round_page(ntohl(cd->codeLimit)); - start_offset = end_offset - (ntohl(cd->nCodeSlots) * PAGE_SIZE); + /* all CD's that have been injected is already validated */ + offset = page_offset - blob->csb_base_offset; - if (offset < start_offset || - offset >= end_offset) { + if (offset < blob->csb_start_offset || + offset >= blob->csb_end_offset) { /* our page is not covered by this blob */ -#if !CS_BLOB_KEEP_IN_KERNEL - kmem_free(kernel_map, kaddr, ksize); -#endif /* CS_BLOB_KEEP_IN_KERNEL */ continue; } + hashtype = blob->csb_hashtype; + if (hashtype == NULL) + panic("unknown hash type ?"); + if (hashtype->cs_digest_size > sizeof(actual_hash)) + panic("hash size too large"); + codeLimit = ntohl(cd->codeLimit); - hash = hashes(cd, atop(offset), - lower_bound, upper_bound); - bcopy(hash, expected_hash, sizeof (expected_hash)); - found_hash = TRUE; -#if !CS_BLOB_KEEP_IN_KERNEL - /* we no longer need that blob in the kernel map */ - kmem_free(kernel_map, kaddr, ksize); -#endif /* CS_BLOB_KEEP_IN_KERNEL */ + hash = hashes(cd, (uint32_t)(offset>>PAGE_SHIFT_4K), + hashtype->cs_size, + lower_bound, upper_bound); + if (hash != NULL) { + bcopy(hash, expected_hash, hashtype->cs_size); + found_hash = TRUE; + } break; } @@ -2591,49 +3465,52 @@ cs_validate_page( cs_validate_page_no_hash++; if (cs_debug > 1) { printf("CODE SIGNING: cs_validate_page: " - "off 0x%llx: no hash to validate !?\n", - page_offset); + "mobj %p off 0x%llx: no hash to validate !?\n", + pager, page_offset); } validated = FALSE; - *tainted = FALSE; + *tainted = 0; } else { - const uint32_t *asha1, *esha1; - size = PAGE_SIZE; - if (offset + size > codeLimit) { + *tainted = 0; + + size = PAGE_SIZE_4K; + const uint32_t *asha1, *esha1; + if ((off_t)(offset + size) > codeLimit) { /* partial page at end of segment */ assert(offset < codeLimit); - size = codeLimit & PAGE_MASK; + size = (size_t) (codeLimit & PAGE_MASK_4K); + *tainted |= CS_VALIDATE_NX; } - /* compute the actual page's SHA1 hash */ - SHA1Init(&sha1ctxt); - SHA1Update(&sha1ctxt, data, size); - SHA1Final(actual_hash, &sha1ctxt); + + hashtype->cs_init(&mdctx); + hashtype->cs_update(&mdctx, data, size); + hashtype->cs_final(actual_hash, &mdctx); asha1 = (const uint32_t *) actual_hash; esha1 = (const uint32_t *) expected_hash; - if (bcmp(expected_hash, actual_hash, SHA1_RESULTLEN) != 0) { + if (bcmp(expected_hash, actual_hash, hashtype->cs_size) != 0) { if (cs_debug) { printf("CODE SIGNING: cs_validate_page: " - "off 0x%llx size 0x%lx: " + "mobj %p off 0x%llx size 0x%lx: " "actual [0x%x 0x%x 0x%x 0x%x 0x%x] != " "expected [0x%x 0x%x 0x%x 0x%x 0x%x]\n", - page_offset, size, + pager, page_offset, size, asha1[0], asha1[1], asha1[2], asha1[3], asha1[4], esha1[0], esha1[1], esha1[2], esha1[3], esha1[4]); } cs_validate_page_bad_hash++; - *tainted = TRUE; + *tainted |= CS_VALIDATE_TAINTED; } else { - if (cs_debug > 1) { + if (cs_debug > 10) { printf("CODE SIGNING: cs_validate_page: " - "off 0x%llx size 0x%lx: SHA1 OK\n", - page_offset, size); + "mobj %p off 0x%llx size 0x%lx: " + "SHA1 OK\n", + pager, page_offset, size); } - *tainted = FALSE; } validated = TRUE; } @@ -2647,8 +3524,11 @@ ubc_cs_getcdhash( off_t offset, unsigned char *cdhash) { - struct cs_blob *blobs, *blob; - off_t rel_offset; + struct cs_blob *blobs, *blob; + off_t rel_offset; + int ret; + + vnode_lock(vp); blobs = ubc_get_cs_blobs(vp); for (blob = blobs; @@ -2665,11 +3545,138 @@ ubc_cs_getcdhash( if (blob == NULL) { /* we didn't find a blob covering "offset" */ - return EBADEXEC; /* XXX any better error ? */ + ret = EBADEXEC; /* XXX any better error ? */ + } else { + /* get the SHA1 hash of that blob */ + bcopy(blob->csb_cdhash, cdhash, sizeof (blob->csb_cdhash)); + ret = 0; } - /* get the SHA1 hash of that blob */ - bcopy(blob->csb_sha1, cdhash, sizeof (blob->csb_sha1)); + vnode_unlock(vp); - return 0; + return ret; +} + +#if CHECK_CS_VALIDATION_BITMAP +#define stob(s) ((atop_64((s)) + 07) >> 3) +extern boolean_t root_fs_upgrade_try; + +/* + * Should we use the code-sign bitmap to avoid repeated code-sign validation? + * Depends: + * a) Is the target vnode on the root filesystem? + * b) Has someone tried to mount the root filesystem read-write? + * If answers are (a) yes AND (b) no, then we can use the bitmap. + */ +#define USE_CODE_SIGN_BITMAP(vp) ( (vp != NULL) && (vp->v_mount != NULL) && (vp->v_mount->mnt_flag & MNT_ROOTFS) && !root_fs_upgrade_try) +kern_return_t +ubc_cs_validation_bitmap_allocate( + vnode_t vp) +{ + kern_return_t kr = KERN_SUCCESS; + struct ubc_info *uip; + char *target_bitmap; + vm_object_size_t bitmap_size; + + if ( ! USE_CODE_SIGN_BITMAP(vp) || (! UBCINFOEXISTS(vp))) { + kr = KERN_INVALID_ARGUMENT; + } else { + uip = vp->v_ubcinfo; + + if ( uip->cs_valid_bitmap == NULL ) { + bitmap_size = stob(uip->ui_size); + target_bitmap = (char*) kalloc( (vm_size_t)bitmap_size ); + if (target_bitmap == 0) { + kr = KERN_NO_SPACE; + } else { + kr = KERN_SUCCESS; + } + if( kr == KERN_SUCCESS ) { + memset( target_bitmap, 0, (size_t)bitmap_size); + uip->cs_valid_bitmap = (void*)target_bitmap; + uip->cs_valid_bitmap_size = bitmap_size; + } + } + } + return kr; +} + +kern_return_t +ubc_cs_check_validation_bitmap ( + vnode_t vp, + memory_object_offset_t offset, + int optype) +{ + kern_return_t kr = KERN_SUCCESS; + + if ( ! USE_CODE_SIGN_BITMAP(vp) || ! UBCINFOEXISTS(vp)) { + kr = KERN_INVALID_ARGUMENT; + } else { + struct ubc_info *uip = vp->v_ubcinfo; + char *target_bitmap = uip->cs_valid_bitmap; + + if ( target_bitmap == NULL ) { + kr = KERN_INVALID_ARGUMENT; + } else { + uint64_t bit, byte; + bit = atop_64( offset ); + byte = bit >> 3; + + if ( byte > uip->cs_valid_bitmap_size ) { + kr = KERN_INVALID_ARGUMENT; + } else { + + if (optype == CS_BITMAP_SET) { + target_bitmap[byte] |= (1 << (bit & 07)); + kr = KERN_SUCCESS; + } else if (optype == CS_BITMAP_CLEAR) { + target_bitmap[byte] &= ~(1 << (bit & 07)); + kr = KERN_SUCCESS; + } else if (optype == CS_BITMAP_CHECK) { + if ( target_bitmap[byte] & (1 << (bit & 07))) { + kr = KERN_SUCCESS; + } else { + kr = KERN_FAILURE; + } + } + } + } + } + return kr; +} + +void +ubc_cs_validation_bitmap_deallocate( + vnode_t vp) +{ + struct ubc_info *uip; + void *target_bitmap; + vm_object_size_t bitmap_size; + + if ( UBCINFOEXISTS(vp)) { + uip = vp->v_ubcinfo; + + if ( (target_bitmap = uip->cs_valid_bitmap) != NULL ) { + bitmap_size = uip->cs_valid_bitmap_size; + kfree( target_bitmap, (vm_size_t) bitmap_size ); + uip->cs_valid_bitmap = NULL; + } + } +} +#else +kern_return_t ubc_cs_validation_bitmap_allocate(__unused vnode_t vp){ + return KERN_INVALID_ARGUMENT; +} + +kern_return_t ubc_cs_check_validation_bitmap( + __unused struct vnode *vp, + __unused memory_object_offset_t offset, + __unused int optype){ + + return KERN_INVALID_ARGUMENT; +} + +void ubc_cs_validation_bitmap_deallocate(__unused vnode_t vp){ + return; } +#endif /* CHECK_CS_VALIDATION_BITMAP */