2 * Copyright (c) 1999-2014 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 * Author: Umesh Vaishampayan [umeshv@apple.com]
31 * 05-Aug-1999 umeshv Created.
33 * Functions related to Unified Buffer cache.
35 * Caller of UBC functions MUST have a valid reference on the vnode.
39 #include <sys/types.h>
40 #include <sys/param.h>
41 #include <sys/systm.h>
44 #include <sys/mount_internal.h>
45 #include <sys/vnode_internal.h>
46 #include <sys/ubc_internal.h>
47 #include <sys/ucred.h>
48 #include <sys/proc_internal.h>
49 #include <sys/kauth.h>
52 #include <sys/codesign.h>
53 #include <sys/codedir_internal.h>
54 #include <sys/fsevents.h>
55 #include <sys/fcntl.h>
57 #include <mach/mach_types.h>
58 #include <mach/memory_object_types.h>
59 #include <mach/memory_object_control.h>
60 #include <mach/vm_map.h>
61 #include <mach/mach_vm.h>
64 #include <kern/kern_types.h>
65 #include <kern/kalloc.h>
66 #include <kern/zalloc.h>
67 #include <kern/thread.h>
69 #include <vm/vm_kern.h>
70 #include <vm/vm_protos.h> /* last */
72 #include <libkern/crypto/sha1.h>
73 #include <libkern/crypto/sha2.h>
74 #include <libkern/libkern.h>
76 #include <security/mac_framework.h>
79 /* XXX These should be in a BSD accessible Mach header, but aren't. */
80 extern kern_return_t
memory_object_pages_resident(memory_object_control_t
,
82 extern kern_return_t
memory_object_signed(memory_object_control_t control
,
84 extern boolean_t
memory_object_is_signed(memory_object_control_t
);
85 extern void memory_object_mark_trusted(
86 memory_object_control_t control
);
88 /* XXX Same for those. */
90 extern void Debugger(const char *message
);
93 /* XXX no one uses this interface! */
94 kern_return_t
ubc_page_op_with_control(
95 memory_object_control_t control
,
106 #define assert(cond) \
107 ((void) ((cond) ? 0 : panic("Assert failed: %s", # cond)))
109 #include <kern/assert.h>
110 #endif /* DIAGNOSTIC */
112 static int ubc_info_init_internal(struct vnode
*vp
, int withfsize
, off_t filesize
);
113 static int ubc_umcallback(vnode_t
, void *);
114 static int ubc_msync_internal(vnode_t
, off_t
, off_t
, off_t
*, int, int *);
115 static void ubc_cs_free(struct ubc_info
*uip
);
117 static boolean_t
ubc_cs_supports_multilevel_hash(struct cs_blob
*blob
);
118 static kern_return_t
ubc_cs_convert_to_multilevel_hash(struct cs_blob
*blob
);
120 struct zone
*ubc_info_zone
;
121 static uint32_t cs_blob_generation_count
= 1;
125 * Routines to navigate code signing data structures in the kernel...
130 #define PAGE_SHIFT_4K (12)
136 const void *lower_bound
,
137 const void *upper_bound
)
139 if (upper_bound
< lower_bound
||
144 if (start
< lower_bound
||
152 typedef void (*cs_md_init
)(void *ctx
);
153 typedef void (*cs_md_update
)(void *ctx
, const void *data
, size_t size
);
154 typedef void (*cs_md_final
)(void *hash
, void *ctx
);
157 uint8_t cs_type
; /* type code as per code signing */
158 size_t cs_size
; /* size of effective hash (may be truncated) */
159 size_t cs_digest_size
;/* size of native hash */
161 cs_md_update cs_update
;
162 cs_md_final cs_final
;
167 struct cs_hash
const * const cs_hash
)
169 return cs_hash
->cs_type
;
172 static const struct cs_hash cs_hash_sha1
= {
173 .cs_type
= CS_HASHTYPE_SHA1
,
174 .cs_size
= CS_SHA1_LEN
,
175 .cs_digest_size
= SHA_DIGEST_LENGTH
,
176 .cs_init
= (cs_md_init
)SHA1Init
,
177 .cs_update
= (cs_md_update
)SHA1Update
,
178 .cs_final
= (cs_md_final
)SHA1Final
,
181 static const struct cs_hash cs_hash_sha256
= {
182 .cs_type
= CS_HASHTYPE_SHA256
,
183 .cs_size
= SHA256_DIGEST_LENGTH
,
184 .cs_digest_size
= SHA256_DIGEST_LENGTH
,
185 .cs_init
= (cs_md_init
)SHA256_Init
,
186 .cs_update
= (cs_md_update
)SHA256_Update
,
187 .cs_final
= (cs_md_final
)SHA256_Final
,
189 static const struct cs_hash cs_hash_sha256_truncate
= {
190 .cs_type
= CS_HASHTYPE_SHA256_TRUNCATED
,
191 .cs_size
= CS_SHA256_TRUNCATED_LEN
,
192 .cs_digest_size
= SHA256_DIGEST_LENGTH
,
193 .cs_init
= (cs_md_init
)SHA256_Init
,
194 .cs_update
= (cs_md_update
)SHA256_Update
,
195 .cs_final
= (cs_md_final
)SHA256_Final
,
197 static const struct cs_hash cs_hash_sha384
= {
198 .cs_type
= CS_HASHTYPE_SHA384
,
199 .cs_size
= SHA384_DIGEST_LENGTH
,
200 .cs_digest_size
= SHA384_DIGEST_LENGTH
,
201 .cs_init
= (cs_md_init
)SHA384_Init
,
202 .cs_update
= (cs_md_update
)SHA384_Update
,
203 .cs_final
= (cs_md_final
)SHA384_Final
,
207 static struct cs_hash
const *
208 cs_find_md(uint8_t type
)
210 if (type
== CS_HASHTYPE_SHA1
) {
211 return &cs_hash_sha1
;
213 } else if (type
== CS_HASHTYPE_SHA256
) {
214 return &cs_hash_sha256
;
215 } else if (type
== CS_HASHTYPE_SHA256_TRUNCATED
) {
216 return &cs_hash_sha256_truncate
;
217 } else if (type
== CS_HASHTYPE_SHA384
) {
218 return &cs_hash_sha384
;
224 union cs_hash_union
{
226 SHA256_CTX sha256ctx
;
227 SHA384_CTX sha384ctx
;
232 * Choose among different hash algorithms.
233 * Higher is better, 0 => don't use at all.
235 static const uint32_t hashPriorities
[] = {
237 CS_HASHTYPE_SHA256_TRUNCATED
,
243 hash_rank(const CS_CodeDirectory
*cd
)
245 uint32_t type
= cd
->hashType
;
248 for (n
= 0; n
< sizeof(hashPriorities
) / sizeof(hashPriorities
[0]); ++n
) {
249 if (hashPriorities
[n
] == type
) {
253 return 0; /* not supported */
258 * Locating a page hash
260 static const unsigned char *
262 const CS_CodeDirectory
*cd
,
265 const char *lower_bound
,
266 const char *upper_bound
)
268 const unsigned char *base
, *top
, *hash
;
269 uint32_t nCodeSlots
= ntohl(cd
->nCodeSlots
);
271 assert(cs_valid_range(cd
, cd
+ 1, lower_bound
, upper_bound
));
273 if ((ntohl(cd
->version
) >= CS_SUPPORTSSCATTER
) && (ntohl(cd
->scatterOffset
))) {
274 /* Get first scatter struct */
275 const SC_Scatter
*scatter
= (const SC_Scatter
*)
276 ((const char*)cd
+ ntohl(cd
->scatterOffset
));
277 uint32_t hashindex
= 0, scount
, sbase
= 0;
278 /* iterate all scatter structs */
280 if ((const char*)scatter
> (const char*)cd
+ ntohl(cd
->length
)) {
282 printf("CODE SIGNING: Scatter extends past Code Directory\n");
287 scount
= ntohl(scatter
->count
);
288 uint32_t new_base
= ntohl(scatter
->base
);
295 if ((hashindex
> 0) && (new_base
<= sbase
)) {
297 printf("CODE SIGNING: unordered Scatter, prev base %d, cur base %d\n",
300 return NULL
; /* unordered scatter array */
304 /* this scatter beyond page we're looking for? */
309 if (sbase
+ scount
>= page
) {
310 /* Found the scatter struct that is
311 * referencing our page */
313 /* base = address of first hash covered by scatter */
314 base
= (const unsigned char *)cd
+ ntohl(cd
->hashOffset
) +
315 hashindex
* hash_len
;
316 /* top = address of first hash after this scatter */
317 top
= base
+ scount
* hash_len
;
318 if (!cs_valid_range(base
, top
, lower_bound
,
320 hashindex
> nCodeSlots
) {
327 /* this scatter struct is before the page we're looking
333 hash
= base
+ (page
- sbase
) * hash_len
;
335 base
= (const unsigned char *)cd
+ ntohl(cd
->hashOffset
);
336 top
= base
+ nCodeSlots
* hash_len
;
337 if (!cs_valid_range(base
, top
, lower_bound
, upper_bound
) ||
341 assert(page
< nCodeSlots
);
343 hash
= base
+ page
* hash_len
;
346 if (!cs_valid_range(hash
, hash
+ hash_len
,
347 lower_bound
, upper_bound
)) {
355 * cs_validate_codedirectory
357 * Validate that pointers inside the code directory to make sure that
358 * all offsets and lengths are constrained within the buffer.
360 * Parameters: cd Pointer to code directory buffer
361 * length Length of buffer
364 * EBADEXEC Invalid code signature
368 cs_validate_codedirectory(const CS_CodeDirectory
*cd
, size_t length
)
370 struct cs_hash
const *hashtype
;
372 if (length
< sizeof(*cd
)) {
375 if (ntohl(cd
->magic
) != CSMAGIC_CODEDIRECTORY
) {
378 if (cd
->pageSize
< PAGE_SHIFT_4K
|| cd
->pageSize
> PAGE_SHIFT
) {
381 hashtype
= cs_find_md(cd
->hashType
);
382 if (hashtype
== NULL
) {
386 if (cd
->hashSize
!= hashtype
->cs_size
) {
390 if (length
< ntohl(cd
->hashOffset
)) {
394 /* check that nSpecialSlots fits in the buffer in front of hashOffset */
395 if (ntohl(cd
->hashOffset
) / hashtype
->cs_size
< ntohl(cd
->nSpecialSlots
)) {
399 /* check that codeslots fits in the buffer */
400 if ((length
- ntohl(cd
->hashOffset
)) / hashtype
->cs_size
< ntohl(cd
->nCodeSlots
)) {
404 if (ntohl(cd
->version
) >= CS_SUPPORTSSCATTER
&& cd
->scatterOffset
) {
405 if (length
< ntohl(cd
->scatterOffset
)) {
409 const SC_Scatter
*scatter
= (const SC_Scatter
*)
410 (((const uint8_t *)cd
) + ntohl(cd
->scatterOffset
));
414 * Check each scatter buffer, since we don't know the
415 * length of the scatter buffer array, we have to
419 /* check that the end of each scatter buffer in within the length */
420 if (((const uint8_t *)scatter
) + sizeof(scatter
[0]) > (const uint8_t *)cd
+ length
) {
423 uint32_t scount
= ntohl(scatter
->count
);
427 if (nPages
+ scount
< nPages
) {
433 /* XXX check that basees doesn't overlap */
434 /* XXX check that targetOffset doesn't overlap */
436 #if 0 /* rdar://12579439 */
437 if (nPages
!= ntohl(cd
->nCodeSlots
)) {
443 if (length
< ntohl(cd
->identOffset
)) {
447 /* identifier is NUL terminated string */
448 if (cd
->identOffset
) {
449 const uint8_t *ptr
= (const uint8_t *)cd
+ ntohl(cd
->identOffset
);
450 if (memchr(ptr
, 0, length
- ntohl(cd
->identOffset
)) == NULL
) {
455 /* team identifier is NULL terminated string */
456 if (ntohl(cd
->version
) >= CS_SUPPORTSTEAMID
&& ntohl(cd
->teamOffset
)) {
457 if (length
< ntohl(cd
->teamOffset
)) {
461 const uint8_t *ptr
= (const uint8_t *)cd
+ ntohl(cd
->teamOffset
);
462 if (memchr(ptr
, 0, length
- ntohl(cd
->teamOffset
)) == NULL
) {
475 cs_validate_blob(const CS_GenericBlob
*blob
, size_t length
)
477 if (length
< sizeof(CS_GenericBlob
) || length
< ntohl(blob
->length
)) {
486 * Validate that superblob/embedded code directory to make sure that
487 * all internal pointers are valid.
489 * Will validate both a superblob csblob and a "raw" code directory.
492 * Parameters: buffer Pointer to code signature
493 * length Length of buffer
494 * rcd returns pointer to code directory
497 * EBADEXEC Invalid code signature
503 const size_t blob_size
,
504 const CS_CodeDirectory
**rcd
,
505 const CS_GenericBlob
**rentitlements
)
507 const CS_GenericBlob
*blob
;
512 *rentitlements
= NULL
;
514 blob
= (const CS_GenericBlob
*)(const void *)addr
;
517 error
= cs_validate_blob(blob
, length
);
521 length
= ntohl(blob
->length
);
523 if (ntohl(blob
->magic
) == CSMAGIC_EMBEDDED_SIGNATURE
) {
524 const CS_SuperBlob
*sb
;
526 const CS_CodeDirectory
*best_cd
= NULL
;
527 unsigned int best_rank
= 0;
529 const CS_CodeDirectory
*sha1_cd
= NULL
;
532 if (length
< sizeof(CS_SuperBlob
)) {
536 sb
= (const CS_SuperBlob
*)blob
;
537 count
= ntohl(sb
->count
);
539 /* check that the array of BlobIndex fits in the rest of the data */
540 if ((length
- sizeof(CS_SuperBlob
)) / sizeof(CS_BlobIndex
) < count
) {
544 /* now check each BlobIndex */
545 for (n
= 0; n
< count
; n
++) {
546 const CS_BlobIndex
*blobIndex
= &sb
->index
[n
];
547 uint32_t type
= ntohl(blobIndex
->type
);
548 uint32_t offset
= ntohl(blobIndex
->offset
);
549 if (length
< offset
) {
553 const CS_GenericBlob
*subBlob
=
554 (const CS_GenericBlob
*)(const void *)(addr
+ offset
);
556 size_t subLength
= length
- offset
;
558 if ((error
= cs_validate_blob(subBlob
, subLength
)) != 0) {
561 subLength
= ntohl(subBlob
->length
);
563 /* extra validation for CDs, that is also returned */
564 if (type
== CSSLOT_CODEDIRECTORY
|| (type
>= CSSLOT_ALTERNATE_CODEDIRECTORIES
&& type
< CSSLOT_ALTERNATE_CODEDIRECTORY_LIMIT
)) {
565 const CS_CodeDirectory
*candidate
= (const CS_CodeDirectory
*)subBlob
;
566 if ((error
= cs_validate_codedirectory(candidate
, subLength
)) != 0) {
569 unsigned int rank
= hash_rank(candidate
);
571 printf("CodeDirectory type %d rank %d at slot 0x%x index %d\n", candidate
->hashType
, (int)rank
, (int)type
, (int)n
);
573 if (best_cd
== NULL
|| rank
> best_rank
) {
578 printf("using CodeDirectory type %d (rank %d)\n", (int)best_cd
->hashType
, best_rank
);
581 } else if (best_cd
!= NULL
&& rank
== best_rank
) {
582 /* repeat of a hash type (1:1 mapped to ranks), illegal and suspicious */
583 printf("multiple hash=%d CodeDirectories in signature; rejecting\n", best_cd
->hashType
);
587 if (candidate
->hashType
== CS_HASHTYPE_SHA1
) {
588 if (sha1_cd
!= NULL
) {
589 printf("multiple sha1 CodeDirectories in signature; rejecting\n");
595 } else if (type
== CSSLOT_ENTITLEMENTS
) {
596 if (ntohl(subBlob
->magic
) != CSMAGIC_EMBEDDED_ENTITLEMENTS
) {
599 if (*rentitlements
!= NULL
) {
600 printf("multiple entitlements blobs\n");
603 *rentitlements
= subBlob
;
608 /* To keep watchOS fast enough, we have to resort to sha1 for
611 * At the time of writing this comment, known sha1 attacks are
612 * collision attacks (not preimage or second preimage
613 * attacks), which do not apply to platform binaries since
614 * they have a fixed hash in the trust cache. Given this
615 * property, we only prefer sha1 code directories for adhoc
616 * signatures, which always have to be in a trust cache to be
617 * valid (can-load-cdhash does not exist for watchOS). Those
618 * are, incidentally, also the platform binaries, for which we
619 * care about the performance hit that sha256 would bring us.
621 * Platform binaries may still contain a (not chosen) sha256
622 * code directory, which keeps software updates that switch to
626 if (*rcd
!= NULL
&& sha1_cd
!= NULL
&& (ntohl(sha1_cd
->flags
) & CS_ADHOC
)) {
627 if (sha1_cd
->flags
!= (*rcd
)->flags
) {
628 printf("mismatched flags between hash %d (flags: %#x) and sha1 (flags: %#x) cd.\n",
629 (int)(*rcd
)->hashType
, (*rcd
)->flags
, sha1_cd
->flags
);
637 } else if (ntohl(blob
->magic
) == CSMAGIC_CODEDIRECTORY
) {
638 if ((error
= cs_validate_codedirectory((const CS_CodeDirectory
*)(const void *)addr
, length
)) != 0) {
641 *rcd
= (const CS_CodeDirectory
*)blob
;
656 * Find an blob from the superblob/code directory. The blob must have
657 * been been validated by cs_validate_csblob() before calling
658 * this. Use csblob_find_blob() instead.
660 * Will also find a "raw" code directory if its stored as well as
661 * searching the superblob.
663 * Parameters: buffer Pointer to code signature
664 * length Length of buffer
665 * type type of blob to find
666 * magic the magic number for that blob
668 * Returns: pointer Success
669 * NULL Buffer not found
672 const CS_GenericBlob
*
673 csblob_find_blob_bytes(const uint8_t *addr
, size_t length
, uint32_t type
, uint32_t magic
)
675 const CS_GenericBlob
*blob
= (const CS_GenericBlob
*)(const void *)addr
;
677 if (ntohl(blob
->magic
) == CSMAGIC_EMBEDDED_SIGNATURE
) {
678 const CS_SuperBlob
*sb
= (const CS_SuperBlob
*)blob
;
679 size_t n
, count
= ntohl(sb
->count
);
681 for (n
= 0; n
< count
; n
++) {
682 if (ntohl(sb
->index
[n
].type
) != type
) {
685 uint32_t offset
= ntohl(sb
->index
[n
].offset
);
686 if (length
- sizeof(const CS_GenericBlob
) < offset
) {
689 blob
= (const CS_GenericBlob
*)(const void *)(addr
+ offset
);
690 if (ntohl(blob
->magic
) != magic
) {
695 } else if (type
== CSSLOT_CODEDIRECTORY
696 && ntohl(blob
->magic
) == CSMAGIC_CODEDIRECTORY
697 && magic
== CSMAGIC_CODEDIRECTORY
) {
704 const CS_GenericBlob
*
705 csblob_find_blob(struct cs_blob
*csblob
, uint32_t type
, uint32_t magic
)
707 if ((csblob
->csb_flags
& CS_VALID
) == 0) {
710 return csblob_find_blob_bytes((const uint8_t *)csblob
->csb_mem_kaddr
, csblob
->csb_mem_size
, type
, magic
);
713 static const uint8_t *
714 find_special_slot(const CS_CodeDirectory
*cd
, size_t slotsize
, uint32_t slot
)
716 /* there is no zero special slot since that is the first code slot */
717 if (ntohl(cd
->nSpecialSlots
) < slot
|| slot
== 0) {
721 return (const uint8_t *)cd
+ ntohl(cd
->hashOffset
) - (slotsize
* slot
);
724 static uint8_t cshash_zero
[CS_HASH_MAX_SIZE
] = { 0 };
727 csblob_get_entitlements(struct cs_blob
*csblob
, void **out_start
, size_t *out_length
)
729 uint8_t computed_hash
[CS_HASH_MAX_SIZE
];
730 const CS_GenericBlob
*entitlements
;
731 const CS_CodeDirectory
*code_dir
;
732 const uint8_t *embedded_hash
;
733 union cs_hash_union context
;
738 if (csblob
->csb_hashtype
== NULL
|| csblob
->csb_hashtype
->cs_digest_size
> sizeof(computed_hash
)) {
742 code_dir
= csblob
->csb_cd
;
744 if ((csblob
->csb_flags
& CS_VALID
) == 0) {
747 entitlements
= csblob
->csb_entitlements_blob
;
749 embedded_hash
= find_special_slot(code_dir
, csblob
->csb_hashtype
->cs_size
, CSSLOT_ENTITLEMENTS
);
751 if (embedded_hash
== NULL
) {
756 } else if (entitlements
== NULL
) {
757 if (memcmp(embedded_hash
, cshash_zero
, csblob
->csb_hashtype
->cs_size
) != 0) {
764 csblob
->csb_hashtype
->cs_init(&context
);
765 csblob
->csb_hashtype
->cs_update(&context
, entitlements
, ntohl(entitlements
->length
));
766 csblob
->csb_hashtype
->cs_final(computed_hash
, &context
);
768 if (memcmp(computed_hash
, embedded_hash
, csblob
->csb_hashtype
->cs_size
) != 0) {
772 *out_start
= __DECONST(void *, entitlements
);
773 *out_length
= ntohl(entitlements
->length
);
780 * End of routines to navigate code signing data structures in the kernel.
788 * Initialization of the zone for Unified Buffer Cache.
795 * ubc_info_zone(global) initialized for subsequent allocations
797 __private_extern__
void
802 i
= (vm_size_t
) sizeof(struct ubc_info
);
804 ubc_info_zone
= zinit(i
, 10000 * i
, 8192, "ubc_info zone");
806 zone_change(ubc_info_zone
, Z_NOENCRYPT
, TRUE
);
813 * Allocate and attach an empty ubc_info structure to a vnode
815 * Parameters: vp Pointer to the vnode
818 * vnode_size:ENOMEM Not enough space
819 * vnode_size:??? Other error from vnode_getattr
823 ubc_info_init(struct vnode
*vp
)
825 return ubc_info_init_internal(vp
, 0, 0);
830 * ubc_info_init_withsize
832 * Allocate and attach a sized ubc_info structure to a vnode
834 * Parameters: vp Pointer to the vnode
835 * filesize The size of the file
838 * vnode_size:ENOMEM Not enough space
839 * vnode_size:??? Other error from vnode_getattr
842 ubc_info_init_withsize(struct vnode
*vp
, off_t filesize
)
844 return ubc_info_init_internal(vp
, 1, filesize
);
849 * ubc_info_init_internal
851 * Allocate and attach a ubc_info structure to a vnode
853 * Parameters: vp Pointer to the vnode
854 * withfsize{0,1} Zero if the size should be obtained
855 * from the vnode; otherwise, use filesize
856 * filesize The size of the file, if withfsize == 1
859 * vnode_size:ENOMEM Not enough space
860 * vnode_size:??? Other error from vnode_getattr
862 * Notes: We call a blocking zalloc(), and the zone was created as an
863 * expandable and collectable zone, so if no memory is available,
864 * it is possible for zalloc() to block indefinitely. zalloc()
865 * may also panic if the zone of zones is exhausted, since it's
868 * We unconditionally call vnode_pager_setup(), even if this is
869 * a reuse of a ubc_info; in that case, we should probably assert
870 * that it does not already have a pager association, but do not.
872 * Since memory_object_create_named() can only fail from receiving
873 * an invalid pager argument, the explicit check and panic is
874 * merely precautionary.
877 ubc_info_init_internal(vnode_t vp
, int withfsize
, off_t filesize
)
879 struct ubc_info
*uip
;
883 memory_object_control_t control
;
888 * If there is not already a ubc_info attached to the vnode, we
889 * attach one; otherwise, we will reuse the one that's there.
891 if (uip
== UBC_INFO_NULL
) {
892 uip
= (struct ubc_info
*) zalloc(ubc_info_zone
);
893 bzero((char *)uip
, sizeof(struct ubc_info
));
896 uip
->ui_flags
= UI_INITED
;
897 uip
->ui_ucred
= NOCRED
;
899 assert(uip
->ui_flags
!= UI_NONE
);
900 assert(uip
->ui_vnode
== vp
);
902 /* now set this ubc_info in the vnode */
906 * Allocate a pager object for this vnode
908 * XXX The value of the pager parameter is currently ignored.
909 * XXX Presumably, this API changed to avoid the race between
910 * XXX setting the pager and the UI_HASPAGER flag.
912 pager
= (void *)vnode_pager_setup(vp
, uip
->ui_pager
);
916 * Explicitly set the pager into the ubc_info, after setting the
919 SET(uip
->ui_flags
, UI_HASPAGER
);
920 uip
->ui_pager
= pager
;
923 * Note: We can not use VNOP_GETATTR() to get accurate
924 * value of ui_size because this may be an NFS vnode, and
925 * nfs_getattr() can call vinvalbuf(); if this happens,
926 * ubc_info is not set up to deal with that event.
931 * create a vnode - vm_object association
932 * memory_object_create_named() creates a "named" reference on the
933 * memory object we hold this reference as long as the vnode is
934 * "alive." Since memory_object_create_named() took its own reference
935 * on the vnode pager we passed it, we can drop the reference
936 * vnode_pager_setup() returned here.
938 kret
= memory_object_create_named(pager
,
939 (memory_object_size_t
)uip
->ui_size
, &control
);
940 vnode_pager_deallocate(pager
);
941 if (kret
!= KERN_SUCCESS
) {
942 panic("ubc_info_init: memory_object_create_named returned %d", kret
);
946 uip
->ui_control
= control
; /* cache the value of the mo control */
947 SET(uip
->ui_flags
, UI_HASOBJREF
); /* with a named reference */
949 if (withfsize
== 0) {
950 /* initialize the size */
951 error
= vnode_size(vp
, &uip
->ui_size
, vfs_context_current());
956 uip
->ui_size
= filesize
;
958 vp
->v_lflag
|= VNAMED_UBC
; /* vnode has a named ubc reference */
967 * Free a ubc_info structure
969 * Parameters: uip A pointer to the ubc_info to free
973 * Notes: If there is a credential that has subsequently been associated
974 * with the ubc_info via a call to ubc_setcred(), the reference
975 * to the credential is dropped.
977 * It's actually impossible for a ubc_info.ui_control to take the
978 * value MEMORY_OBJECT_CONTROL_NULL.
981 ubc_info_free(struct ubc_info
*uip
)
983 if (IS_VALID_CRED(uip
->ui_ucred
)) {
984 kauth_cred_unref(&uip
->ui_ucred
);
987 if (uip
->ui_control
!= MEMORY_OBJECT_CONTROL_NULL
) {
988 memory_object_control_deallocate(uip
->ui_control
);
991 cluster_release(uip
);
994 zfree(ubc_info_zone
, uip
);
1000 ubc_info_deallocate(struct ubc_info
*uip
)
1006 mach_to_bsd_errno(kern_return_t mach_err
)
1012 case KERN_INVALID_ADDRESS
:
1013 case KERN_INVALID_ARGUMENT
:
1014 case KERN_NOT_IN_SET
:
1015 case KERN_INVALID_NAME
:
1016 case KERN_INVALID_TASK
:
1017 case KERN_INVALID_RIGHT
:
1018 case KERN_INVALID_VALUE
:
1019 case KERN_INVALID_CAPABILITY
:
1020 case KERN_INVALID_HOST
:
1021 case KERN_MEMORY_PRESENT
:
1022 case KERN_INVALID_PROCESSOR_SET
:
1023 case KERN_INVALID_POLICY
:
1024 case KERN_ALREADY_WAITING
:
1025 case KERN_DEFAULT_SET
:
1026 case KERN_EXCEPTION_PROTECTED
:
1027 case KERN_INVALID_LEDGER
:
1028 case KERN_INVALID_MEMORY_CONTROL
:
1029 case KERN_INVALID_SECURITY
:
1030 case KERN_NOT_DEPRESSED
:
1031 case KERN_LOCK_OWNED
:
1032 case KERN_LOCK_OWNED_SELF
:
1035 case KERN_PROTECTION_FAILURE
:
1036 case KERN_NOT_RECEIVER
:
1037 case KERN_NO_ACCESS
:
1038 case KERN_POLICY_STATIC
:
1042 case KERN_RESOURCE_SHORTAGE
:
1043 case KERN_UREFS_OVERFLOW
:
1044 case KERN_INVALID_OBJECT
:
1050 case KERN_MEMORY_FAILURE
:
1051 case KERN_POLICY_LIMIT
:
1052 case KERN_CODESIGN_ERROR
:
1055 case KERN_MEMORY_ERROR
:
1058 case KERN_ALREADY_IN_SET
:
1059 case KERN_NAME_EXISTS
:
1060 case KERN_RIGHT_EXISTS
:
1066 case KERN_TERMINATED
:
1067 case KERN_LOCK_SET_DESTROYED
:
1068 case KERN_LOCK_UNSTABLE
:
1069 case KERN_SEMAPHORE_DESTROYED
:
1072 case KERN_RPC_SERVER_TERMINATED
:
1075 case KERN_NOT_SUPPORTED
:
1078 case KERN_NODE_DOWN
:
1081 case KERN_NOT_WAITING
:
1084 case KERN_OPERATION_TIMED_OUT
:
1095 * Tell the VM that the the size of the file represented by the vnode has
1098 * Parameters: vp The vp whose backing file size is
1100 * nsize The new size of the backing file
1103 * Returns: EINVAL for new size < 0
1104 * ENOENT if no UBC info exists
1105 * EAGAIN if UBC_SETSIZE_NO_FS_REENTRY option is set and new_size < old size
1106 * Other errors (mapped to errno_t) returned by VM functions
1108 * Notes: This function will indicate success if the new size is the
1109 * same or larger than the old size (in this case, the
1110 * remainder of the file will require modification or use of
1111 * an existing upl to access successfully).
1113 * This function will fail if the new file size is smaller,
1114 * and the memory region being invalidated was unable to
1115 * actually be invalidated and/or the last page could not be
1116 * flushed, if the new size is not aligned to a page
1117 * boundary. This is usually indicative of an I/O error.
1120 ubc_setsize_ex(struct vnode
*vp
, off_t nsize
, ubc_setsize_opts_t opts
)
1122 off_t osize
; /* ui_size before change */
1123 off_t lastpg
, olastpgend
, lastoff
;
1124 struct ubc_info
*uip
;
1125 memory_object_control_t control
;
1126 kern_return_t kret
= KERN_SUCCESS
;
1128 if (nsize
< (off_t
)0) {
1132 if (!UBCINFOEXISTS(vp
)) {
1136 uip
= vp
->v_ubcinfo
;
1137 osize
= uip
->ui_size
;
1139 if (ISSET(opts
, UBC_SETSIZE_NO_FS_REENTRY
) && nsize
< osize
) {
1144 * Update the size before flushing the VM
1146 uip
->ui_size
= nsize
;
1148 if (nsize
>= osize
) { /* Nothing more to do */
1149 if (nsize
> osize
) {
1150 lock_vnode_and_post(vp
, NOTE_EXTEND
);
1157 * When the file shrinks, invalidate the pages beyond the
1158 * new size. Also get rid of garbage beyond nsize on the
1159 * last page. The ui_size already has the nsize, so any
1160 * subsequent page-in will zero-fill the tail properly
1162 lastpg
= trunc_page_64(nsize
);
1163 olastpgend
= round_page_64(osize
);
1164 control
= uip
->ui_control
;
1166 lastoff
= (nsize
& PAGE_MASK_64
);
1170 upl_page_info_t
*pl
;
1173 * new EOF ends up in the middle of a page
1174 * zero the tail of this page if it's currently
1175 * present in the cache
1177 kret
= ubc_create_upl_kernel(vp
, lastpg
, PAGE_SIZE
, &upl
, &pl
, UPL_SET_LITE
| UPL_WILL_MODIFY
, VM_KERN_MEMORY_FILE
);
1179 if (kret
!= KERN_SUCCESS
) {
1180 panic("ubc_setsize: ubc_create_upl (error = %d)\n", kret
);
1183 if (upl_valid_page(pl
, 0)) {
1184 cluster_zero(upl
, (uint32_t)lastoff
, PAGE_SIZE
- (uint32_t)lastoff
, NULL
);
1187 ubc_upl_abort_range(upl
, 0, PAGE_SIZE
, UPL_ABORT_FREE_ON_EMPTY
);
1189 lastpg
+= PAGE_SIZE_64
;
1191 if (olastpgend
> lastpg
) {
1195 flags
= MEMORY_OBJECT_DATA_FLUSH_ALL
;
1197 flags
= MEMORY_OBJECT_DATA_FLUSH
;
1200 * invalidate the pages beyond the new EOF page
1203 kret
= memory_object_lock_request(control
,
1204 (memory_object_offset_t
)lastpg
,
1205 (memory_object_size_t
)(olastpgend
- lastpg
), NULL
, NULL
,
1206 MEMORY_OBJECT_RETURN_NONE
, flags
, VM_PROT_NO_CHANGE
);
1207 if (kret
!= KERN_SUCCESS
) {
1208 printf("ubc_setsize: invalidate failed (error = %d)\n", kret
);
1211 return mach_to_bsd_errno(kret
);
1214 // Returns true for success
1216 ubc_setsize(vnode_t vp
, off_t nsize
)
1218 return ubc_setsize_ex(vp
, nsize
, 0) == 0;
1224 * Get the size of the file assocated with the specified vnode
1226 * Parameters: vp The vnode whose size is of interest
1228 * Returns: 0 There is no ubc_info associated with
1229 * this vnode, or the size is zero
1230 * !0 The size of the file
1232 * Notes: Using this routine, it is not possible for a caller to
1233 * successfully distinguish between a vnode associate with a zero
1234 * length file, and a vnode with no associated ubc_info. The
1235 * caller therefore needs to not care, or needs to ensure that
1236 * they have previously successfully called ubc_info_init() or
1237 * ubc_info_init_withsize().
1240 ubc_getsize(struct vnode
*vp
)
1242 /* people depend on the side effect of this working this way
1243 * as they call this for directory
1245 if (!UBCINFOEXISTS(vp
)) {
1248 return vp
->v_ubcinfo
->ui_size
;
1255 * Call ubc_msync(vp, 0, EOF, NULL, UBC_PUSHALL) on all the vnodes for this
1258 * Parameters: mp The mount point
1260 * Returns: 0 Success
1262 * Notes: There is no failure indication for this function.
1264 * This function is used in the unmount path; since it may block
1265 * I/O indefinitely, it should not be used in the forced unmount
1266 * path, since a device unavailability could also block that
1269 * Because there is no device ejection interlock on USB, FireWire,
1270 * or similar devices, it's possible that an ejection that begins
1271 * subsequent to the vnode_iterate() completing, either on one of
1272 * those devices, or a network mount for which the server quits
1273 * responding, etc., may cause the caller to block indefinitely.
1275 __private_extern__
int
1276 ubc_umount(struct mount
*mp
)
1278 vnode_iterate(mp
, 0, ubc_umcallback
, 0);
1286 * Used by ubc_umount() as an internal implementation detail; see ubc_umount()
1287 * and vnode_iterate() for details of implementation.
1290 ubc_umcallback(vnode_t vp
, __unused
void * args
)
1292 if (UBCINFOEXISTS(vp
)) {
1293 (void) ubc_msync(vp
, (off_t
)0, ubc_getsize(vp
), NULL
, UBC_PUSHALL
);
1295 return VNODE_RETURNED
;
1302 * Get the credentials currently active for the ubc_info associated with the
1305 * Parameters: vp The vnode whose ubc_info credentials
1306 * are to be retrieved
1308 * Returns: !NOCRED The credentials
1309 * NOCRED If there is no ubc_info for the vnode,
1310 * or if there is one, but it has not had
1311 * any credentials associated with it via
1312 * a call to ubc_setcred()
1315 ubc_getcred(struct vnode
*vp
)
1317 if (UBCINFOEXISTS(vp
)) {
1318 return vp
->v_ubcinfo
->ui_ucred
;
1328 * If they are not already set, set the credentials of the ubc_info structure
1329 * associated with the vnode to those of the supplied thread; otherwise leave
1332 * Parameters: vp The vnode whose ubc_info creds are to
1334 * p The process whose credentials are to
1335 * be used, if not running on an assumed
1337 * thread The thread whose credentials are to
1340 * Returns: 1 This vnode has no associated ubc_info
1343 * Notes: This function takes a proc parameter to account for bootstrap
1344 * issues where a task or thread may call this routine, either
1345 * before credentials have been initialized by bsd_init(), or if
1346 * there is no BSD info asscoiate with a mach thread yet. This
1347 * is known to happen in both the initial swap and memory mapping
1350 * This function is generally used only in the following cases:
1352 * o a memory mapped file via the mmap() system call
1353 * o a swap store backing file
1354 * o subsequent to a successful write via vn_write()
1356 * The information is then used by the NFS client in order to
1357 * cons up a wire message in either the page-in or page-out path.
1359 * There are two potential problems with the use of this API:
1361 * o Because the write path only set it on a successful
1362 * write, there is a race window between setting the
1363 * credential and its use to evict the pages to the
1364 * remote file server
1366 * o Because a page-in may occur prior to a write, the
1367 * credential may not be set at this time, if the page-in
1368 * is not the result of a mapping established via mmap().
1370 * In both these cases, this will be triggered from the paging
1371 * path, which will instead use the credential of the current
1372 * process, which in this case is either the dynamic_pager or
1373 * the kernel task, both of which utilize "root" credentials.
1375 * This may potentially permit operations to occur which should
1376 * be denied, or it may cause to be denied operations which
1377 * should be permitted, depending on the configuration of the NFS
1381 ubc_setthreadcred(struct vnode
*vp
, proc_t p
, thread_t thread
)
1383 struct ubc_info
*uip
;
1385 struct uthread
*uthread
= get_bsdthread_info(thread
);
1387 if (!UBCINFOEXISTS(vp
)) {
1393 uip
= vp
->v_ubcinfo
;
1394 credp
= uip
->ui_ucred
;
1396 if (!IS_VALID_CRED(credp
)) {
1397 /* use per-thread cred, if assumed identity, else proc cred */
1398 if (uthread
== NULL
|| (uthread
->uu_flag
& UT_SETUID
) == 0) {
1399 uip
->ui_ucred
= kauth_cred_proc_ref(p
);
1401 uip
->ui_ucred
= uthread
->uu_ucred
;
1402 kauth_cred_ref(uip
->ui_ucred
);
1414 * If they are not already set, set the credentials of the ubc_info structure
1415 * associated with the vnode to those of the process; otherwise leave them
1418 * Parameters: vp The vnode whose ubc_info creds are to
1420 * p The process whose credentials are to
1423 * Returns: 0 This vnode has no associated ubc_info
1426 * Notes: The return values for this function are inverted from nearly
1427 * all other uses in the kernel.
1429 * See also ubc_setthreadcred(), above.
1431 * This function is considered deprecated, and generally should
1432 * not be used, as it is incompatible with per-thread credentials;
1433 * it exists for legacy KPI reasons.
1435 * DEPRECATION: ubc_setcred() is being deprecated. Please use
1436 * ubc_setthreadcred() instead.
1439 ubc_setcred(struct vnode
*vp
, proc_t p
)
1441 struct ubc_info
*uip
;
1444 /* If there is no ubc_info, deny the operation */
1445 if (!UBCINFOEXISTS(vp
)) {
1450 * Check to see if there is already a credential reference in the
1451 * ubc_info; if there is not, take one on the supplied credential.
1454 uip
= vp
->v_ubcinfo
;
1455 credp
= uip
->ui_ucred
;
1456 if (!IS_VALID_CRED(credp
)) {
1457 uip
->ui_ucred
= kauth_cred_proc_ref(p
);
1467 * Get the pager associated with the ubc_info associated with the vnode.
1469 * Parameters: vp The vnode to obtain the pager from
1471 * Returns: !VNODE_PAGER_NULL The memory_object_t for the pager
1472 * VNODE_PAGER_NULL There is no ubc_info for this vnode
1474 * Notes: For each vnode that has a ubc_info associated with it, that
1475 * ubc_info SHALL have a pager associated with it, so in the
1476 * normal case, it's impossible to return VNODE_PAGER_NULL for
1477 * a vnode with an associated ubc_info.
1479 __private_extern__ memory_object_t
1480 ubc_getpager(struct vnode
*vp
)
1482 if (UBCINFOEXISTS(vp
)) {
1483 return vp
->v_ubcinfo
->ui_pager
;
1493 * Get the memory object control associated with the ubc_info associated with
1496 * Parameters: vp The vnode to obtain the memory object
1500 * Returns: !MEMORY_OBJECT_CONTROL_NULL
1501 * MEMORY_OBJECT_CONTROL_NULL
1503 * Notes: Historically, if the flags were not "do not reactivate", this
1504 * function would look up the memory object using the pager if
1505 * it did not exist (this could be the case if the vnode had
1506 * been previously reactivated). The flags would also permit a
1507 * hold to be requested, which would have created an object
1508 * reference, if one had not already existed. This usage is
1509 * deprecated, as it would permit a race between finding and
1510 * taking the reference vs. a single reference being dropped in
1513 memory_object_control_t
1514 ubc_getobject(struct vnode
*vp
, __unused
int flags
)
1516 if (UBCINFOEXISTS(vp
)) {
1517 return vp
->v_ubcinfo
->ui_control
;
1520 return MEMORY_OBJECT_CONTROL_NULL
;
1526 * Convert a given block number to a memory backing object (file) offset for a
1529 * Parameters: vp The vnode in which the block is located
1530 * blkno The block number to convert
1532 * Returns: !-1 The offset into the backing object
1533 * -1 There is no ubc_info associated with
1535 * -1 An error occurred in the underlying VFS
1536 * while translating the block to an
1537 * offset; the most likely cause is that
1538 * the caller specified a block past the
1539 * end of the file, but this could also be
1540 * any other error from VNOP_BLKTOOFF().
1542 * Note: Representing the error in band loses some information, but does
1543 * not occlude a valid offset, since an off_t of -1 is normally
1544 * used to represent EOF. If we had a more reliable constant in
1545 * our header files for it (i.e. explicitly cast to an off_t), we
1546 * would use it here instead.
1549 ubc_blktooff(vnode_t vp
, daddr64_t blkno
)
1551 off_t file_offset
= -1;
1554 if (UBCINFOEXISTS(vp
)) {
1555 error
= VNOP_BLKTOOFF(vp
, blkno
, &file_offset
);
1568 * Convert a given offset in a memory backing object into a block number for a
1571 * Parameters: vp The vnode in which the offset is
1573 * offset The offset into the backing object
1575 * Returns: !-1 The returned block number
1576 * -1 There is no ubc_info associated with
1578 * -1 An error occurred in the underlying VFS
1579 * while translating the block to an
1580 * offset; the most likely cause is that
1581 * the caller specified a block past the
1582 * end of the file, but this could also be
1583 * any other error from VNOP_OFFTOBLK().
1585 * Note: Representing the error in band loses some information, but does
1586 * not occlude a valid block number, since block numbers exceed
1587 * the valid range for offsets, due to their relative sizes. If
1588 * we had a more reliable constant than -1 in our header files
1589 * for it (i.e. explicitly cast to an daddr64_t), we would use it
1593 ubc_offtoblk(vnode_t vp
, off_t offset
)
1595 daddr64_t blkno
= -1;
1598 if (UBCINFOEXISTS(vp
)) {
1599 error
= VNOP_OFFTOBLK(vp
, offset
, &blkno
);
1610 * ubc_pages_resident
1612 * Determine whether or not a given vnode has pages resident via the memory
1613 * object control associated with the ubc_info associated with the vnode
1615 * Parameters: vp The vnode we want to know about
1621 ubc_pages_resident(vnode_t vp
)
1624 boolean_t has_pages_resident
;
1626 if (!UBCINFOEXISTS(vp
)) {
1631 * The following call may fail if an invalid ui_control is specified,
1632 * or if there is no VM object associated with the control object. In
1633 * either case, reacting to it as if there were no pages resident will
1634 * result in correct behavior.
1636 kret
= memory_object_pages_resident(vp
->v_ubcinfo
->ui_control
, &has_pages_resident
);
1638 if (kret
!= KERN_SUCCESS
) {
1642 if (has_pages_resident
== TRUE
) {
1652 * Clean and/or invalidate a range in the memory object that backs this vnode
1654 * Parameters: vp The vnode whose associated ubc_info's
1655 * associated memory object is to have a
1656 * range invalidated within it
1657 * beg_off The start of the range, as an offset
1658 * end_off The end of the range, as an offset
1659 * resid_off The address of an off_t supplied by the
1660 * caller; may be set to NULL to ignore
1661 * flags See ubc_msync_internal()
1663 * Returns: 0 Success
1664 * !0 Failure; an errno is returned
1667 * *resid_off, modified If non-NULL, the contents are ALWAYS
1668 * modified; they are initialized to the
1669 * beg_off, and in case of an I/O error,
1670 * the difference between beg_off and the
1671 * current value will reflect what was
1672 * able to be written before the error
1673 * occurred. If no error is returned, the
1674 * value of the resid_off is undefined; do
1675 * NOT use it in place of end_off if you
1676 * intend to increment from the end of the
1677 * last call and call iteratively.
1679 * Notes: see ubc_msync_internal() for more detailed information.
1683 ubc_msync(vnode_t vp
, off_t beg_off
, off_t end_off
, off_t
*resid_off
, int flags
)
1689 *resid_off
= beg_off
;
1692 retval
= ubc_msync_internal(vp
, beg_off
, end_off
, resid_off
, flags
, &io_errno
);
1694 if (retval
== 0 && io_errno
== 0) {
1702 * ubc_msync_internal
1704 * Clean and/or invalidate a range in the memory object that backs this vnode
1706 * Parameters: vp The vnode whose associated ubc_info's
1707 * associated memory object is to have a
1708 * range invalidated within it
1709 * beg_off The start of the range, as an offset
1710 * end_off The end of the range, as an offset
1711 * resid_off The address of an off_t supplied by the
1712 * caller; may be set to NULL to ignore
1713 * flags MUST contain at least one of the flags
1714 * UBC_INVALIDATE, UBC_PUSHDIRTY, or
1715 * UBC_PUSHALL; if UBC_PUSHDIRTY is used,
1716 * UBC_SYNC may also be specified to cause
1717 * this function to block until the
1718 * operation is complete. The behavior
1719 * of UBC_SYNC is otherwise undefined.
1720 * io_errno The address of an int to contain the
1721 * errno from a failed I/O operation, if
1722 * one occurs; may be set to NULL to
1725 * Returns: 1 Success
1729 * *resid_off, modified The contents of this offset MAY be
1730 * modified; in case of an I/O error, the
1731 * difference between beg_off and the
1732 * current value will reflect what was
1733 * able to be written before the error
1735 * *io_errno, modified The contents of this offset are set to
1736 * an errno, if an error occurs; if the
1737 * caller supplies an io_errno parameter,
1738 * they should be careful to initialize it
1739 * to 0 before calling this function to
1740 * enable them to distinguish an error
1741 * with a valid *resid_off from an invalid
1742 * one, and to avoid potentially falsely
1743 * reporting an error, depending on use.
1745 * Notes: If there is no ubc_info associated with the vnode supplied,
1746 * this function immediately returns success.
1748 * If the value of end_off is less than or equal to beg_off, this
1749 * function immediately returns success; that is, end_off is NOT
1752 * IMPORTANT: one of the flags UBC_INVALIDATE, UBC_PUSHDIRTY, or
1753 * UBC_PUSHALL MUST be specified; that is, it is NOT possible to
1754 * attempt to block on in-progress I/O by calling this function
1755 * with UBC_PUSHDIRTY, and then later call it with just UBC_SYNC
1756 * in order to block pending on the I/O already in progress.
1758 * The start offset is truncated to the page boundary and the
1759 * size is adjusted to include the last page in the range; that
1760 * is, end_off on exactly a page boundary will not change if it
1761 * is rounded, and the range of bytes written will be from the
1762 * truncate beg_off to the rounded (end_off - 1).
1765 ubc_msync_internal(vnode_t vp
, off_t beg_off
, off_t end_off
, off_t
*resid_off
, int flags
, int *io_errno
)
1767 memory_object_size_t tsize
;
1769 int request_flags
= 0;
1770 int flush_flags
= MEMORY_OBJECT_RETURN_NONE
;
1772 if (!UBCINFOEXISTS(vp
)) {
1775 if ((flags
& (UBC_INVALIDATE
| UBC_PUSHDIRTY
| UBC_PUSHALL
)) == 0) {
1778 if (end_off
<= beg_off
) {
1782 if (flags
& UBC_INVALIDATE
) {
1784 * discard the resident pages
1786 request_flags
= (MEMORY_OBJECT_DATA_FLUSH
| MEMORY_OBJECT_DATA_NO_CHANGE
);
1789 if (flags
& UBC_SYNC
) {
1791 * wait for all the I/O to complete before returning
1793 request_flags
|= MEMORY_OBJECT_IO_SYNC
;
1796 if (flags
& UBC_PUSHDIRTY
) {
1798 * we only return the dirty pages in the range
1800 flush_flags
= MEMORY_OBJECT_RETURN_DIRTY
;
1803 if (flags
& UBC_PUSHALL
) {
1805 * then return all the interesting pages in the range (both
1806 * dirty and precious) to the pager
1808 flush_flags
= MEMORY_OBJECT_RETURN_ALL
;
1811 beg_off
= trunc_page_64(beg_off
);
1812 end_off
= round_page_64(end_off
);
1813 tsize
= (memory_object_size_t
)end_off
- beg_off
;
1815 /* flush and/or invalidate pages in the range requested */
1816 kret
= memory_object_lock_request(vp
->v_ubcinfo
->ui_control
,
1818 (memory_object_offset_t
*)resid_off
,
1819 io_errno
, flush_flags
, request_flags
,
1822 return (kret
== KERN_SUCCESS
) ? 1 : 0;
1829 * Explicitly map a vnode that has an associate ubc_info, and add a reference
1830 * to it for the ubc system, if there isn't one already, so it will not be
1831 * recycled while it's in use, and set flags on the ubc_info to indicate that
1834 * Parameters: vp The vnode to map
1835 * flags The mapping flags for the vnode; this
1836 * will be a combination of one or more of
1837 * PROT_READ, PROT_WRITE, and PROT_EXEC
1839 * Returns: 0 Success
1840 * EPERM Permission was denied
1842 * Notes: An I/O reference on the vnode must already be held on entry
1844 * If there is no ubc_info associated with the vnode, this function
1845 * will return success.
1847 * If a permission error occurs, this function will return
1848 * failure; all other failures will cause this function to return
1851 * IMPORTANT: This is an internal use function, and its symbols
1852 * are not exported, hence its error checking is not very robust.
1853 * It is primarily used by:
1855 * o mmap(), when mapping a file
1856 * o When mapping a shared file (a shared library in the
1857 * shared segment region)
1858 * o When loading a program image during the exec process
1860 * ...all of these uses ignore the return code, and any fault that
1861 * results later because of a failure is handled in the fix-up path
1862 * of the fault handler. The interface exists primarily as a
1865 * Given that third party implementation of the type of interfaces
1866 * that would use this function, such as alternative executable
1867 * formats, etc., are unsupported, this function is not exported
1870 * The extra reference is held until the VM system unmaps the
1871 * vnode from its own context to maintain a vnode reference in
1872 * cases like open()/mmap()/close(), which leave the backing
1873 * object referenced by a mapped memory region in a process
1876 __private_extern__
int
1877 ubc_map(vnode_t vp
, int flags
)
1879 struct ubc_info
*uip
;
1882 int need_wakeup
= 0;
1884 if (UBCINFOEXISTS(vp
)) {
1886 uip
= vp
->v_ubcinfo
;
1888 while (ISSET(uip
->ui_flags
, UI_MAPBUSY
)) {
1889 SET(uip
->ui_flags
, UI_MAPWAITING
);
1890 (void) msleep(&uip
->ui_flags
, &vp
->v_lock
,
1891 PRIBIO
, "ubc_map", NULL
);
1893 SET(uip
->ui_flags
, UI_MAPBUSY
);
1896 error
= VNOP_MMAP(vp
, flags
, vfs_context_current());
1899 * rdar://problem/22587101 required that we stop propagating
1900 * EPERM up the stack. Otherwise, we would have to funnel up
1901 * the error at all the call sites for memory_object_map().
1902 * The risk is in having to undo the map/object/entry state at
1903 * all these call sites. It would also affect more than just mmap()
1906 * if (error != EPERM)
1912 vnode_lock_spin(vp
);
1915 if (!ISSET(uip
->ui_flags
, UI_ISMAPPED
)) {
1918 SET(uip
->ui_flags
, (UI_WASMAPPED
| UI_ISMAPPED
));
1919 if (flags
& PROT_WRITE
) {
1920 SET(uip
->ui_flags
, UI_MAPPEDWRITE
);
1923 CLR(uip
->ui_flags
, UI_MAPBUSY
);
1925 if (ISSET(uip
->ui_flags
, UI_MAPWAITING
)) {
1926 CLR(uip
->ui_flags
, UI_MAPWAITING
);
1932 wakeup(&uip
->ui_flags
);
1937 * Make sure we get a ref as we can't unwind from here
1939 if (vnode_ref_ext(vp
, 0, VNODE_REF_FORCE
)) {
1940 panic("%s : VNODE_REF_FORCE failed\n", __FUNCTION__
);
1943 * Vnodes that are on "unreliable" media (like disk
1944 * images, network filesystems, 3rd-party filesystems,
1945 * and possibly external devices) could see their
1946 * contents be changed via the backing store without
1947 * triggering copy-on-write, so we can't fully rely
1948 * on copy-on-write and might have to resort to
1949 * copy-on-read to protect "privileged" processes and
1950 * prevent privilege escalation.
1952 * The root filesystem is considered "reliable" because
1953 * there's not much point in trying to protect
1954 * ourselves from such a vulnerability and the extra
1955 * cost of copy-on-read (CPU time and memory pressure)
1956 * could result in some serious regressions.
1958 if (vp
->v_mount
!= NULL
&&
1959 ((vp
->v_mount
->mnt_flag
& MNT_ROOTFS
) ||
1960 vnode_on_reliable_media(vp
))) {
1962 * This vnode is deemed "reliable" so mark
1963 * its VM object as "trusted".
1965 memory_object_mark_trusted(uip
->ui_control
);
1967 // printf("BUGGYCOW: %s:%d vp %p \"%s\" in mnt %p \"%s\" is untrusted\n", __FUNCTION__, __LINE__, vp, vp->v_name, vp->v_mount, vp->v_mount->mnt_vnodecovered->v_name);
1978 * Destroy the named memory object associated with the ubc_info control object
1979 * associated with the designated vnode, if there is a ubc_info associated
1980 * with the vnode, and a control object is associated with it
1982 * Parameters: vp The designated vnode
1986 * Notes: This function is called on vnode termination for all vnodes,
1987 * and must therefore not assume that there is a ubc_info that is
1988 * associated with the vnode, nor that there is a control object
1989 * associated with the ubc_info.
1991 * If all the conditions necessary are present, this function
1992 * calls memory_object_destory(), which will in turn end up
1993 * calling ubc_unmap() to release any vnode references that were
1994 * established via ubc_map().
1996 * IMPORTANT: This is an internal use function that is used
1997 * exclusively by the internal use function vclean().
1999 __private_extern__
void
2000 ubc_destroy_named(vnode_t vp
)
2002 memory_object_control_t control
;
2003 struct ubc_info
*uip
;
2006 if (UBCINFOEXISTS(vp
)) {
2007 uip
= vp
->v_ubcinfo
;
2009 /* Terminate the memory object */
2010 control
= ubc_getobject(vp
, UBC_HOLDOBJECT
);
2011 if (control
!= MEMORY_OBJECT_CONTROL_NULL
) {
2012 kret
= memory_object_destroy(control
, 0);
2013 if (kret
!= KERN_SUCCESS
) {
2014 panic("ubc_destroy_named: memory_object_destroy failed");
2024 * Determine whether or not a vnode is currently in use by ubc at a level in
2025 * excess of the requested busycount
2027 * Parameters: vp The vnode to check
2028 * busycount The threshold busy count, used to bias
2029 * the count usually already held by the
2030 * caller to avoid races
2032 * Returns: 1 The vnode is in use over the threshold
2033 * 0 The vnode is not in use over the
2036 * Notes: Because the vnode is only held locked while actually asking
2037 * the use count, this function only represents a snapshot of the
2038 * current state of the vnode. If more accurate information is
2039 * required, an additional busycount should be held by the caller
2040 * and a non-zero busycount used.
2042 * If there is no ubc_info associated with the vnode, this
2043 * function will report that the vnode is not in use by ubc.
2046 ubc_isinuse(struct vnode
*vp
, int busycount
)
2048 if (!UBCINFOEXISTS(vp
)) {
2051 return ubc_isinuse_locked(vp
, busycount
, 0);
2056 * ubc_isinuse_locked
2058 * Determine whether or not a vnode is currently in use by ubc at a level in
2059 * excess of the requested busycount
2061 * Parameters: vp The vnode to check
2062 * busycount The threshold busy count, used to bias
2063 * the count usually already held by the
2064 * caller to avoid races
2065 * locked True if the vnode is already locked by
2068 * Returns: 1 The vnode is in use over the threshold
2069 * 0 The vnode is not in use over the
2072 * Notes: If the vnode is not locked on entry, it is locked while
2073 * actually asking the use count. If this is the case, this
2074 * function only represents a snapshot of the current state of
2075 * the vnode. If more accurate information is required, the
2076 * vnode lock should be held by the caller, otherwise an
2077 * additional busycount should be held by the caller and a
2078 * non-zero busycount used.
2080 * If there is no ubc_info associated with the vnode, this
2081 * function will report that the vnode is not in use by ubc.
2084 ubc_isinuse_locked(struct vnode
*vp
, int busycount
, int locked
)
2090 vnode_lock_spin(vp
);
2093 if ((vp
->v_usecount
- vp
->v_kusecount
) > busycount
) {
2107 * Reverse the effects of a ubc_map() call for a given vnode
2109 * Parameters: vp vnode to unmap from ubc
2113 * Notes: This is an internal use function used by vnode_pager_unmap().
2114 * It will attempt to obtain a reference on the supplied vnode,
2115 * and if it can do so, and there is an associated ubc_info, and
2116 * the flags indicate that it was mapped via ubc_map(), then the
2117 * flag is cleared, the mapping removed, and the reference taken
2118 * by ubc_map() is released.
2120 * IMPORTANT: This MUST only be called by the VM
2121 * to prevent race conditions.
2123 __private_extern__
void
2124 ubc_unmap(struct vnode
*vp
)
2126 struct ubc_info
*uip
;
2128 int need_wakeup
= 0;
2130 if (vnode_getwithref(vp
)) {
2134 if (UBCINFOEXISTS(vp
)) {
2135 bool want_fsevent
= false;
2138 uip
= vp
->v_ubcinfo
;
2140 while (ISSET(uip
->ui_flags
, UI_MAPBUSY
)) {
2141 SET(uip
->ui_flags
, UI_MAPWAITING
);
2142 (void) msleep(&uip
->ui_flags
, &vp
->v_lock
,
2143 PRIBIO
, "ubc_unmap", NULL
);
2145 SET(uip
->ui_flags
, UI_MAPBUSY
);
2147 if (ISSET(uip
->ui_flags
, UI_ISMAPPED
)) {
2148 if (ISSET(uip
->ui_flags
, UI_MAPPEDWRITE
)) {
2149 want_fsevent
= true;
2155 * We want to clear the mapped flags after we've called
2156 * VNOP_MNOMAP to avoid certain races and allow
2157 * VNOP_MNOMAP to call ubc_is_mapped_writable.
2163 vfs_context_t ctx
= vfs_context_current();
2165 (void)VNOP_MNOMAP(vp
, ctx
);
2169 * Why do we want an fsevent here? Normally the
2170 * content modified fsevent is posted when a file is
2171 * closed and only if it's written to via conventional
2172 * means. It's perfectly legal to close a file and
2173 * keep your mappings and we don't currently track
2174 * whether it was written to via a mapping.
2175 * Therefore, we need to post an fsevent here if the
2176 * file was mapped writable. This may result in false
2177 * events, i.e. we post a notification when nothing
2178 * has really changed.
2180 if (want_fsevent
&& need_fsevent(FSE_CONTENT_MODIFIED
, vp
)) {
2181 add_fsevent(FSE_CONTENT_MODIFIED
, ctx
,
2190 vnode_lock_spin(vp
);
2193 CLR(uip
->ui_flags
, UI_ISMAPPED
| UI_MAPPEDWRITE
);
2196 CLR(uip
->ui_flags
, UI_MAPBUSY
);
2198 if (ISSET(uip
->ui_flags
, UI_MAPWAITING
)) {
2199 CLR(uip
->ui_flags
, UI_MAPWAITING
);
2205 wakeup(&uip
->ui_flags
);
2209 * the drop of the vnode ref will cleanup
2218 * Manipulate individual page state for a vnode with an associated ubc_info
2219 * with an associated memory object control.
2221 * Parameters: vp The vnode backing the page
2222 * f_offset A file offset interior to the page
2223 * ops The operations to perform, as a bitmap
2224 * (see below for more information)
2225 * phys_entryp The address of a ppnum_t; may be NULL
2227 * flagsp A pointer to an int to contain flags;
2228 * may be NULL to ignore
2230 * Returns: KERN_SUCCESS Success
2231 * KERN_INVALID_ARGUMENT If the memory object control has no VM
2233 * KERN_INVALID_OBJECT If UPL_POP_PHYSICAL and the object is
2234 * not physically contiguous
2235 * KERN_INVALID_OBJECT If !UPL_POP_PHYSICAL and the object is
2236 * physically contiguous
2237 * KERN_FAILURE If the page cannot be looked up
2240 * *phys_entryp (modified) If phys_entryp is non-NULL and
2242 * *flagsp (modified) If flagsp is non-NULL and there was
2243 * !UPL_POP_PHYSICAL and a KERN_SUCCESS
2245 * Notes: For object boundaries, it is considerably more efficient to
2246 * ensure that f_offset is in fact on a page boundary, as this
2247 * will avoid internal use of the hash table to identify the
2248 * page, and would therefore skip a number of early optimizations.
2249 * Since this is a page operation anyway, the caller should try
2250 * to pass only a page aligned offset because of this.
2252 * *flagsp may be modified even if this function fails. If it is
2253 * modified, it will contain the condition of the page before the
2254 * requested operation was attempted; these will only include the
2255 * bitmap flags, and not the PL_POP_PHYSICAL, UPL_POP_DUMP,
2256 * UPL_POP_SET, or UPL_POP_CLR bits.
2258 * The flags field may contain a specific operation, such as
2259 * UPL_POP_PHYSICAL or UPL_POP_DUMP:
2261 * o UPL_POP_PHYSICAL Fail if not contiguous; if
2262 * *phys_entryp and successful, set
2264 * o UPL_POP_DUMP Dump the specified page
2266 * Otherwise, it is treated as a bitmap of one or more page
2267 * operations to perform on the final memory object; allowable
2270 * o UPL_POP_DIRTY The page is dirty
2271 * o UPL_POP_PAGEOUT The page is paged out
2272 * o UPL_POP_PRECIOUS The page is precious
2273 * o UPL_POP_ABSENT The page is absent
2274 * o UPL_POP_BUSY The page is busy
2276 * If the page status is only being queried and not modified, then
2277 * not other bits should be specified. However, if it is being
2278 * modified, exactly ONE of the following bits should be set:
2280 * o UPL_POP_SET Set the current bitmap bits
2281 * o UPL_POP_CLR Clear the current bitmap bits
2283 * Thus to effect a combination of setting an clearing, it may be
2284 * necessary to call this function twice. If this is done, the
2285 * set should be used before the clear, since clearing may trigger
2286 * a wakeup on the destination page, and if the page is backed by
2287 * an encrypted swap file, setting will trigger the decryption
2288 * needed before the wakeup occurs.
2295 ppnum_t
*phys_entryp
,
2298 memory_object_control_t control
;
2300 control
= ubc_getobject(vp
, UBC_FLAGS_NONE
);
2301 if (control
== MEMORY_OBJECT_CONTROL_NULL
) {
2302 return KERN_INVALID_ARGUMENT
;
2305 return memory_object_page_op(control
,
2306 (memory_object_offset_t
)f_offset
,
2316 * Manipulate page state for a range of memory for a vnode with an associated
2317 * ubc_info with an associated memory object control, when page level state is
2318 * not required to be returned from the call (i.e. there are no phys_entryp or
2319 * flagsp parameters to this call, and it takes a range which may contain
2320 * multiple pages, rather than an offset interior to a single page).
2322 * Parameters: vp The vnode backing the page
2323 * f_offset_beg A file offset interior to the start page
2324 * f_offset_end A file offset interior to the end page
2325 * ops The operations to perform, as a bitmap
2326 * (see below for more information)
2327 * range The address of an int; may be NULL to
2330 * Returns: KERN_SUCCESS Success
2331 * KERN_INVALID_ARGUMENT If the memory object control has no VM
2333 * KERN_INVALID_OBJECT If the object is physically contiguous
2336 * *range (modified) If range is non-NULL, its contents will
2337 * be modified to contain the number of
2338 * bytes successfully operated upon.
2340 * Notes: IMPORTANT: This function cannot be used on a range that
2341 * consists of physically contiguous pages.
2343 * For object boundaries, it is considerably more efficient to
2344 * ensure that f_offset_beg and f_offset_end are in fact on page
2345 * boundaries, as this will avoid internal use of the hash table
2346 * to identify the page, and would therefore skip a number of
2347 * early optimizations. Since this is an operation on a set of
2348 * pages anyway, the caller should try to pass only a page aligned
2349 * offsets because of this.
2351 * *range will be modified only if this function succeeds.
2353 * The flags field MUST contain a specific operation; allowable
2356 * o UPL_ROP_ABSENT Returns the extent of the range
2357 * presented which is absent, starting
2358 * with the start address presented
2360 * o UPL_ROP_PRESENT Returns the extent of the range
2361 * presented which is present (resident),
2362 * starting with the start address
2364 * o UPL_ROP_DUMP Dump the pages which are found in the
2365 * target object for the target range.
2367 * IMPORTANT: For UPL_ROP_ABSENT and UPL_ROP_PRESENT; if there are
2368 * multiple regions in the range, only the first matching region
2379 memory_object_control_t control
;
2381 control
= ubc_getobject(vp
, UBC_FLAGS_NONE
);
2382 if (control
== MEMORY_OBJECT_CONTROL_NULL
) {
2383 return KERN_INVALID_ARGUMENT
;
2386 return memory_object_range_op(control
,
2387 (memory_object_offset_t
)f_offset_beg
,
2388 (memory_object_offset_t
)f_offset_end
,
2397 * Given a vnode, cause the population of a portion of the vm_object; based on
2398 * the nature of the request, the pages returned may contain valid data, or
2399 * they may be uninitialized.
2401 * Parameters: vp The vnode from which to create the upl
2402 * f_offset The start offset into the backing store
2403 * represented by the vnode
2404 * bufsize The size of the upl to create
2405 * uplp Pointer to the upl_t to receive the
2406 * created upl; MUST NOT be NULL
2407 * plp Pointer to receive the internal page
2408 * list for the created upl; MAY be NULL
2411 * Returns: KERN_SUCCESS The requested upl has been created
2412 * KERN_INVALID_ARGUMENT The bufsize argument is not an even
2413 * multiple of the page size
2414 * KERN_INVALID_ARGUMENT There is no ubc_info associated with
2415 * the vnode, or there is no memory object
2416 * control associated with the ubc_info
2417 * memory_object_upl_request:KERN_INVALID_VALUE
2418 * The supplied upl_flags argument is
2422 * *plp (modified) If non-NULL, the value of *plp will be
2423 * modified to point to the internal page
2424 * list; this modification may occur even
2425 * if this function is unsuccessful, in
2426 * which case the contents may be invalid
2428 * Note: If successful, the returned *uplp MUST subsequently be freed
2429 * via a call to ubc_upl_commit(), ubc_upl_commit_range(),
2430 * ubc_upl_abort(), or ubc_upl_abort_range().
2433 ubc_create_upl_external(
2438 upl_page_info_t
**plp
,
2441 return ubc_create_upl_kernel(vp
, f_offset
, bufsize
, uplp
, plp
, uplflags
, vm_tag_bt());
2445 ubc_create_upl_kernel(
2450 upl_page_info_t
**plp
,
2454 memory_object_control_t control
;
2462 if (bufsize
& 0xfff) {
2463 return KERN_INVALID_ARGUMENT
;
2466 if (bufsize
> MAX_UPL_SIZE_BYTES
) {
2467 return KERN_INVALID_ARGUMENT
;
2470 if (uplflags
& (UPL_UBC_MSYNC
| UPL_UBC_PAGEOUT
| UPL_UBC_PAGEIN
)) {
2471 if (uplflags
& UPL_UBC_MSYNC
) {
2472 uplflags
&= UPL_RET_ONLY_DIRTY
;
2474 uplflags
|= UPL_COPYOUT_FROM
| UPL_CLEAN_IN_PLACE
|
2475 UPL_SET_INTERNAL
| UPL_SET_LITE
;
2476 } else if (uplflags
& UPL_UBC_PAGEOUT
) {
2477 uplflags
&= UPL_RET_ONLY_DIRTY
;
2479 if (uplflags
& UPL_RET_ONLY_DIRTY
) {
2480 uplflags
|= UPL_NOBLOCK
;
2483 uplflags
|= UPL_FOR_PAGEOUT
| UPL_CLEAN_IN_PLACE
|
2484 UPL_COPYOUT_FROM
| UPL_SET_INTERNAL
| UPL_SET_LITE
;
2486 uplflags
|= UPL_RET_ONLY_ABSENT
|
2487 UPL_NO_SYNC
| UPL_CLEAN_IN_PLACE
|
2488 UPL_SET_INTERNAL
| UPL_SET_LITE
;
2491 * if the requested size == PAGE_SIZE, we don't want to set
2492 * the UPL_NOBLOCK since we may be trying to recover from a
2493 * previous partial pagein I/O that occurred because we were low
2494 * on memory and bailed early in order to honor the UPL_NOBLOCK...
2495 * since we're only asking for a single page, we can block w/o fear
2496 * of tying up pages while waiting for more to become available
2498 if (bufsize
> PAGE_SIZE
) {
2499 uplflags
|= UPL_NOBLOCK
;
2503 uplflags
&= ~UPL_FOR_PAGEOUT
;
2505 if (uplflags
& UPL_WILL_BE_DUMPED
) {
2506 uplflags
&= ~UPL_WILL_BE_DUMPED
;
2507 uplflags
|= (UPL_NO_SYNC
| UPL_SET_INTERNAL
);
2509 uplflags
|= (UPL_NO_SYNC
| UPL_CLEAN_IN_PLACE
| UPL_SET_INTERNAL
);
2512 control
= ubc_getobject(vp
, UBC_FLAGS_NONE
);
2513 if (control
== MEMORY_OBJECT_CONTROL_NULL
) {
2514 return KERN_INVALID_ARGUMENT
;
2517 kr
= memory_object_upl_request(control
, f_offset
, bufsize
, uplp
, NULL
, NULL
, uplflags
, tag
);
2518 if (kr
== KERN_SUCCESS
&& plp
!= NULL
) {
2519 *plp
= UPL_GET_INTERNAL_PAGE_LIST(*uplp
);
2526 * ubc_upl_maxbufsize
2528 * Return the maximum bufsize ubc_create_upl( ) will take.
2532 * Returns: maximum size buffer (in bytes) ubc_create_upl( ) will take.
2538 return MAX_UPL_SIZE_BYTES
;
2544 * Map the page list assocated with the supplied upl into the kernel virtual
2545 * address space at the virtual address indicated by the dst_addr argument;
2546 * the entire upl is mapped
2548 * Parameters: upl The upl to map
2549 * dst_addr The address at which to map the upl
2551 * Returns: KERN_SUCCESS The upl has been mapped
2552 * KERN_INVALID_ARGUMENT The upl is UPL_NULL
2553 * KERN_FAILURE The upl is already mapped
2554 * vm_map_enter:KERN_INVALID_ARGUMENT
2555 * A failure code from vm_map_enter() due
2556 * to an invalid argument
2561 vm_offset_t
*dst_addr
)
2563 return vm_upl_map(kernel_map
, upl
, dst_addr
);
2570 * Unmap the page list assocated with the supplied upl from the kernel virtual
2571 * address space; the entire upl is unmapped.
2573 * Parameters: upl The upl to unmap
2575 * Returns: KERN_SUCCESS The upl has been unmapped
2576 * KERN_FAILURE The upl is not currently mapped
2577 * KERN_INVALID_ARGUMENT If the upl is UPL_NULL
2583 return vm_upl_unmap(kernel_map
, upl
);
2590 * Commit the contents of the upl to the backing store
2592 * Parameters: upl The upl to commit
2594 * Returns: KERN_SUCCESS The upl has been committed
2595 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2596 * KERN_FAILURE The supplied upl does not represent
2597 * device memory, and the offset plus the
2598 * size would exceed the actual size of
2601 * Notes: In practice, the only return value for this function should be
2602 * KERN_SUCCESS, unless there has been data structure corruption;
2603 * since the upl is deallocated regardless of success or failure,
2604 * there's really nothing to do about this other than panic.
2606 * IMPORTANT: Use of this function should not be mixed with use of
2607 * ubc_upl_commit_range(), due to the unconditional deallocation
2614 upl_page_info_t
*pl
;
2617 pl
= UPL_GET_INTERNAL_PAGE_LIST(upl
);
2618 kr
= upl_commit(upl
, pl
, MAX_UPL_SIZE_BYTES
>> PAGE_SHIFT
);
2619 upl_deallocate(upl
);
2627 * Commit the contents of the specified range of the upl to the backing store
2629 * Parameters: upl The upl to commit
2630 * offset The offset into the upl
2631 * size The size of the region to be committed,
2632 * starting at the specified offset
2633 * flags commit type (see below)
2635 * Returns: KERN_SUCCESS The range has been committed
2636 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2637 * KERN_FAILURE The supplied upl does not represent
2638 * device memory, and the offset plus the
2639 * size would exceed the actual size of
2642 * Notes: IMPORTANT: If the commit is successful, and the object is now
2643 * empty, the upl will be deallocated. Since the caller cannot
2644 * check that this is the case, the UPL_COMMIT_FREE_ON_EMPTY flag
2645 * should generally only be used when the offset is 0 and the size
2646 * is equal to the upl size.
2648 * The flags argument is a bitmap of flags on the rage of pages in
2649 * the upl to be committed; allowable flags are:
2651 * o UPL_COMMIT_FREE_ON_EMPTY Free the upl when it is
2652 * both empty and has been
2653 * successfully committed
2654 * o UPL_COMMIT_CLEAR_DIRTY Clear each pages dirty
2655 * bit; will prevent a
2657 * o UPL_COMMIT_SET_DIRTY Set each pages dirty
2658 * bit; will cause a later
2660 * o UPL_COMMIT_INACTIVATE Clear each pages
2661 * reference bit; the page
2662 * will not be accessed
2663 * o UPL_COMMIT_ALLOW_ACCESS Unbusy each page; pages
2664 * become busy when an
2665 * IOMemoryDescriptor is
2666 * mapped or redirected,
2667 * and we have to wait for
2670 * The flag UPL_COMMIT_NOTIFY_EMPTY is used internally, and should
2671 * not be specified by the caller.
2673 * The UPL_COMMIT_CLEAR_DIRTY and UPL_COMMIT_SET_DIRTY flags are
2674 * mutually exclusive, and should not be combined.
2677 ubc_upl_commit_range(
2679 upl_offset_t offset
,
2683 upl_page_info_t
*pl
;
2687 if (flags
& UPL_COMMIT_FREE_ON_EMPTY
) {
2688 flags
|= UPL_COMMIT_NOTIFY_EMPTY
;
2691 if (flags
& UPL_COMMIT_KERNEL_ONLY_FLAGS
) {
2692 return KERN_INVALID_ARGUMENT
;
2695 pl
= UPL_GET_INTERNAL_PAGE_LIST(upl
);
2697 kr
= upl_commit_range(upl
, offset
, size
, flags
,
2698 pl
, MAX_UPL_SIZE_BYTES
>> PAGE_SHIFT
, &empty
);
2700 if ((flags
& UPL_COMMIT_FREE_ON_EMPTY
) && empty
) {
2701 upl_deallocate(upl
);
2709 * ubc_upl_abort_range
2711 * Abort the contents of the specified range of the specified upl
2713 * Parameters: upl The upl to abort
2714 * offset The offset into the upl
2715 * size The size of the region to be aborted,
2716 * starting at the specified offset
2717 * abort_flags abort type (see below)
2719 * Returns: KERN_SUCCESS The range has been aborted
2720 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2721 * KERN_FAILURE The supplied upl does not represent
2722 * device memory, and the offset plus the
2723 * size would exceed the actual size of
2726 * Notes: IMPORTANT: If the abort is successful, and the object is now
2727 * empty, the upl will be deallocated. Since the caller cannot
2728 * check that this is the case, the UPL_ABORT_FREE_ON_EMPTY flag
2729 * should generally only be used when the offset is 0 and the size
2730 * is equal to the upl size.
2732 * The abort_flags argument is a bitmap of flags on the range of
2733 * pages in the upl to be aborted; allowable flags are:
2735 * o UPL_ABORT_FREE_ON_EMPTY Free the upl when it is both
2736 * empty and has been successfully
2738 * o UPL_ABORT_RESTART The operation must be restarted
2739 * o UPL_ABORT_UNAVAILABLE The pages are unavailable
2740 * o UPL_ABORT_ERROR An I/O error occurred
2741 * o UPL_ABORT_DUMP_PAGES Just free the pages
2742 * o UPL_ABORT_NOTIFY_EMPTY RESERVED
2743 * o UPL_ABORT_ALLOW_ACCESS RESERVED
2745 * The UPL_ABORT_NOTIFY_EMPTY is an internal use flag and should
2746 * not be specified by the caller. It is intended to fulfill the
2747 * same role as UPL_COMMIT_NOTIFY_EMPTY does in the function
2748 * ubc_upl_commit_range(), but is never referenced internally.
2750 * The UPL_ABORT_ALLOW_ACCESS is defined, but neither set nor
2751 * referenced; do not use it.
2754 ubc_upl_abort_range(
2756 upl_offset_t offset
,
2761 boolean_t empty
= FALSE
;
2763 if (abort_flags
& UPL_ABORT_FREE_ON_EMPTY
) {
2764 abort_flags
|= UPL_ABORT_NOTIFY_EMPTY
;
2767 kr
= upl_abort_range(upl
, offset
, size
, abort_flags
, &empty
);
2769 if ((abort_flags
& UPL_ABORT_FREE_ON_EMPTY
) && empty
) {
2770 upl_deallocate(upl
);
2780 * Abort the contents of the specified upl
2782 * Parameters: upl The upl to abort
2783 * abort_type abort type (see below)
2785 * Returns: KERN_SUCCESS The range has been aborted
2786 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2787 * KERN_FAILURE The supplied upl does not represent
2788 * device memory, and the offset plus the
2789 * size would exceed the actual size of
2792 * Notes: IMPORTANT: If the abort is successful, and the object is now
2793 * empty, the upl will be deallocated. Since the caller cannot
2794 * check that this is the case, the UPL_ABORT_FREE_ON_EMPTY flag
2795 * should generally only be used when the offset is 0 and the size
2796 * is equal to the upl size.
2798 * The abort_type is a bitmap of flags on the range of
2799 * pages in the upl to be aborted; allowable flags are:
2801 * o UPL_ABORT_FREE_ON_EMPTY Free the upl when it is both
2802 * empty and has been successfully
2804 * o UPL_ABORT_RESTART The operation must be restarted
2805 * o UPL_ABORT_UNAVAILABLE The pages are unavailable
2806 * o UPL_ABORT_ERROR An I/O error occurred
2807 * o UPL_ABORT_DUMP_PAGES Just free the pages
2808 * o UPL_ABORT_NOTIFY_EMPTY RESERVED
2809 * o UPL_ABORT_ALLOW_ACCESS RESERVED
2811 * The UPL_ABORT_NOTIFY_EMPTY is an internal use flag and should
2812 * not be specified by the caller. It is intended to fulfill the
2813 * same role as UPL_COMMIT_NOTIFY_EMPTY does in the function
2814 * ubc_upl_commit_range(), but is never referenced internally.
2816 * The UPL_ABORT_ALLOW_ACCESS is defined, but neither set nor
2817 * referenced; do not use it.
2826 kr
= upl_abort(upl
, abort_type
);
2827 upl_deallocate(upl
);
2835 * Retrieve the internal page list for the specified upl
2837 * Parameters: upl The upl to obtain the page list from
2839 * Returns: !NULL The (upl_page_info_t *) for the page
2840 * list internal to the upl
2841 * NULL Error/no page list associated
2843 * Notes: IMPORTANT: The function is only valid on internal objects
2844 * where the list request was made with the UPL_INTERNAL flag.
2846 * This function is a utility helper function, since some callers
2847 * may not have direct access to the header defining the macro,
2848 * due to abstraction layering constraints.
2854 return UPL_GET_INTERNAL_PAGE_LIST(upl
);
2859 UBCINFOEXISTS(const struct vnode
* vp
)
2861 return (vp
) && ((vp
)->v_type
== VREG
) && ((vp
)->v_ubcinfo
!= UBC_INFO_NULL
);
2866 ubc_upl_range_needed(
2871 upl_range_needed(upl
, index
, count
);
2875 ubc_is_mapped(const struct vnode
*vp
, boolean_t
*writable
)
2877 if (!UBCINFOEXISTS(vp
) || !ISSET(vp
->v_ubcinfo
->ui_flags
, UI_ISMAPPED
)) {
2881 *writable
= ISSET(vp
->v_ubcinfo
->ui_flags
, UI_MAPPEDWRITE
);
2887 ubc_is_mapped_writable(const struct vnode
*vp
)
2890 return ubc_is_mapped(vp
, &writable
) && writable
;
2897 static volatile SInt32 cs_blob_size
= 0;
2898 static volatile SInt32 cs_blob_count
= 0;
2899 static SInt32 cs_blob_size_peak
= 0;
2900 static UInt32 cs_blob_size_max
= 0;
2901 static SInt32 cs_blob_count_peak
= 0;
2903 SYSCTL_INT(_vm
, OID_AUTO
, cs_blob_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, (int *)(uintptr_t)&cs_blob_count
, 0, "Current number of code signature blobs");
2904 SYSCTL_INT(_vm
, OID_AUTO
, cs_blob_size
, CTLFLAG_RD
| CTLFLAG_LOCKED
, (int *)(uintptr_t)&cs_blob_size
, 0, "Current size of all code signature blobs");
2905 SYSCTL_INT(_vm
, OID_AUTO
, cs_blob_count_peak
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &cs_blob_count_peak
, 0, "Peak number of code signature blobs");
2906 SYSCTL_INT(_vm
, OID_AUTO
, cs_blob_size_peak
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &cs_blob_size_peak
, 0, "Peak size of code signature blobs");
2907 SYSCTL_INT(_vm
, OID_AUTO
, cs_blob_size_max
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &cs_blob_size_max
, 0, "Size of biggest code signature blob");
2910 * Function: csblob_parse_teamid
2912 * Description: This function returns a pointer to the team id
2913 * stored within the codedirectory of the csblob.
2914 * If the codedirectory predates team-ids, it returns
2916 * This does not copy the name but returns a pointer to
2917 * it within the CD. Subsequently, the CD must be
2918 * available when this is used.
2922 csblob_parse_teamid(struct cs_blob
*csblob
)
2924 const CS_CodeDirectory
*cd
;
2926 cd
= csblob
->csb_cd
;
2928 if (ntohl(cd
->version
) < CS_SUPPORTSTEAMID
) {
2932 if (cd
->teamOffset
== 0) {
2936 const char *name
= ((const char *)cd
) + ntohl(cd
->teamOffset
);
2938 printf("found team-id %s in cdblob\n", name
);
2946 ubc_cs_blob_allocate(
2947 vm_offset_t
*blob_addr_p
,
2948 vm_size_t
*blob_size_p
)
2950 kern_return_t kr
= KERN_FAILURE
;
2953 *blob_addr_p
= (vm_offset_t
) kalloc_tag(*blob_size_p
, VM_KERN_MEMORY_SECURITY
);
2955 if (*blob_addr_p
== 0) {
2966 ubc_cs_blob_deallocate(
2967 vm_offset_t blob_addr
,
2968 vm_size_t blob_size
)
2971 if (blob_size
> pmap_cs_blob_limit
) {
2972 kmem_free(kernel_map
, blob_addr
, blob_size
);
2976 kfree(blob_addr
, blob_size
);
2981 * Some codesigned files use a lowest common denominator page size of
2982 * 4KiB, but can be used on systems that have a runtime page size of
2983 * 16KiB. Since faults will only occur on 16KiB ranges in
2984 * cs_validate_range(), we can convert the original Code Directory to
2985 * a multi-level scheme where groups of 4 hashes are combined to form
2986 * a new hash, which represents 16KiB in the on-disk file. This can
2987 * reduce the wired memory requirement for the Code Directory by
2988 * 75%. Care must be taken for binaries that use the "fourk" VM pager
2989 * for unaligned access, which may still attempt to validate on
2990 * non-16KiB multiples for compatibility with 3rd party binaries.
2993 ubc_cs_supports_multilevel_hash(struct cs_blob
*blob
)
2995 const CS_CodeDirectory
*cd
;
2999 * Only applies to binaries that ship as part of the OS,
3000 * primarily the shared cache.
3002 if (!blob
->csb_platform_binary
|| blob
->csb_teamid
!= NULL
) {
3007 * If the runtime page size matches the code signing page
3008 * size, there is no work to do.
3010 if (PAGE_SHIFT
<= blob
->csb_hash_pageshift
) {
3017 * There must be a valid integral multiple of hashes
3019 if (ntohl(cd
->nCodeSlots
) & (PAGE_MASK
>> blob
->csb_hash_pageshift
)) {
3024 * Scatter lists must also have ranges that have an integral number of hashes
3026 if ((ntohl(cd
->version
) >= CS_SUPPORTSSCATTER
) && (ntohl(cd
->scatterOffset
))) {
3027 const SC_Scatter
*scatter
= (const SC_Scatter
*)
3028 ((const char*)cd
+ ntohl(cd
->scatterOffset
));
3029 /* iterate all scatter structs to make sure they are all aligned */
3031 uint32_t sbase
= ntohl(scatter
->base
);
3032 uint32_t scount
= ntohl(scatter
->count
);
3039 if (sbase
& (PAGE_MASK
>> blob
->csb_hash_pageshift
)) {
3043 if (scount
& (PAGE_MASK
>> blob
->csb_hash_pageshift
)) {
3051 /* Covered range must be a multiple of the new page size */
3052 if (ntohl(cd
->codeLimit
) & PAGE_MASK
) {
3056 /* All checks pass */
3061 * Given a cs_blob with an already chosen best code directory, this
3062 * function allocates memory and copies into it only the blobs that
3063 * will be needed by the kernel, namely the single chosen code
3064 * directory (and not any of its alternatives) and the entitlement
3067 * This saves significant memory with agile signatures, and additional
3068 * memory for 3rd Party Code because we also omit the CMS blob.
3070 * To support multilevel and other potential code directory rewriting,
3071 * the size of a new code directory can be specified. Since that code
3072 * directory will replace the existing code directory,
3073 * ubc_cs_reconstitute_code_signature does not copy the original code
3074 * directory when a size is given, and the caller must fill it in.
3077 ubc_cs_reconstitute_code_signature(struct cs_blob
const *blob
, vm_size_t optional_new_cd_size
,
3078 vm_address_t
*new_blob_addr_p
, vm_size_t
*new_blob_size_p
,
3079 CS_CodeDirectory
**new_cd_p
, CS_GenericBlob
const **new_entitlements_p
)
3081 const CS_CodeDirectory
*old_cd
, *cd
;
3082 CS_CodeDirectory
*new_cd
;
3083 const CS_GenericBlob
*entitlements
;
3084 vm_offset_t new_blob_addr
;
3085 vm_size_t new_blob_size
;
3086 vm_size_t new_cdsize
;
3090 old_cd
= blob
->csb_cd
;
3092 new_cdsize
= optional_new_cd_size
!= 0 ? optional_new_cd_size
: htonl(old_cd
->length
);
3094 new_blob_size
= sizeof(CS_SuperBlob
);
3095 new_blob_size
+= sizeof(CS_BlobIndex
);
3096 new_blob_size
+= new_cdsize
;
3098 if (blob
->csb_entitlements_blob
) {
3099 /* We need to add a slot for the entitlements */
3100 new_blob_size
+= sizeof(CS_BlobIndex
);
3101 new_blob_size
+= ntohl(blob
->csb_entitlements_blob
->length
);
3104 kr
= ubc_cs_blob_allocate(&new_blob_addr
, &new_blob_size
);
3105 if (kr
!= KERN_SUCCESS
) {
3107 printf("CODE SIGNING: Failed to allocate memory for new Code Signing Blob: %d\n",
3113 CS_SuperBlob
*new_superblob
;
3115 new_superblob
= (CS_SuperBlob
*)new_blob_addr
;
3116 new_superblob
->magic
= htonl(CSMAGIC_EMBEDDED_SIGNATURE
);
3117 new_superblob
->length
= htonl((uint32_t)new_blob_size
);
3118 if (blob
->csb_entitlements_blob
) {
3119 vm_size_t ent_offset
, cd_offset
;
3121 cd_offset
= sizeof(CS_SuperBlob
) + 2 * sizeof(CS_BlobIndex
);
3122 ent_offset
= cd_offset
+ new_cdsize
;
3124 new_superblob
->count
= htonl(2);
3125 new_superblob
->index
[0].type
= htonl(CSSLOT_CODEDIRECTORY
);
3126 new_superblob
->index
[0].offset
= htonl((uint32_t)cd_offset
);
3127 new_superblob
->index
[1].type
= htonl(CSSLOT_ENTITLEMENTS
);
3128 new_superblob
->index
[1].offset
= htonl((uint32_t)ent_offset
);
3130 memcpy((void *)(new_blob_addr
+ ent_offset
), blob
->csb_entitlements_blob
, ntohl(blob
->csb_entitlements_blob
->length
));
3132 new_cd
= (CS_CodeDirectory
*)(new_blob_addr
+ cd_offset
);
3134 // Blob is the code directory, directly.
3135 new_cd
= (CS_CodeDirectory
*)new_blob_addr
;
3138 if (optional_new_cd_size
== 0) {
3139 // Copy code directory, and revalidate.
3140 memcpy(new_cd
, old_cd
, new_cdsize
);
3142 vm_size_t length
= new_blob_size
;
3144 error
= cs_validate_csblob((const uint8_t *)new_blob_addr
, length
, &cd
, &entitlements
);
3147 printf("CODE SIGNING: Failed to validate new Code Signing Blob: %d\n",
3150 ubc_cs_blob_deallocate(new_blob_addr
, new_blob_size
);
3153 *new_entitlements_p
= entitlements
;
3155 // Caller will fill out and validate code directory.
3156 memset(new_cd
, 0, new_cdsize
);
3157 *new_entitlements_p
= NULL
;
3160 *new_blob_addr_p
= new_blob_addr
;
3161 *new_blob_size_p
= new_blob_size
;
3168 ubc_cs_convert_to_multilevel_hash(struct cs_blob
*blob
)
3170 const CS_CodeDirectory
*old_cd
, *cd
;
3171 CS_CodeDirectory
*new_cd
;
3172 const CS_GenericBlob
*entitlements
;
3173 vm_offset_t new_blob_addr
;
3174 vm_size_t new_blob_size
;
3175 vm_size_t new_cdsize
;
3178 uint32_t hashes_per_new_hash_shift
= (uint32_t)(PAGE_SHIFT
- blob
->csb_hash_pageshift
);
3181 printf("CODE SIGNING: Attempting to convert Code Directory for %lu -> %lu page shift\n",
3182 (unsigned long)blob
->csb_hash_pageshift
, (unsigned long)PAGE_SHIFT
);
3185 old_cd
= blob
->csb_cd
;
3187 /* Up to the hashes, we can copy all data */
3188 new_cdsize
= ntohl(old_cd
->hashOffset
);
3189 new_cdsize
+= (ntohl(old_cd
->nCodeSlots
) >> hashes_per_new_hash_shift
) * old_cd
->hashSize
;
3191 error
= ubc_cs_reconstitute_code_signature(blob
, new_cdsize
,
3192 &new_blob_addr
, &new_blob_size
, &new_cd
,
3195 printf("CODE SIGNING: Failed to reconsitute code signature: %d\n", error
);
3199 memcpy(new_cd
, old_cd
, ntohl(old_cd
->hashOffset
));
3201 /* Update fields in the Code Directory structure */
3202 new_cd
->length
= htonl((uint32_t)new_cdsize
);
3204 uint32_t nCodeSlots
= ntohl(new_cd
->nCodeSlots
);
3205 nCodeSlots
>>= hashes_per_new_hash_shift
;
3206 new_cd
->nCodeSlots
= htonl(nCodeSlots
);
3208 new_cd
->pageSize
= PAGE_SHIFT
; /* Not byte-swapped */
3210 if ((ntohl(new_cd
->version
) >= CS_SUPPORTSSCATTER
) && (ntohl(new_cd
->scatterOffset
))) {
3211 SC_Scatter
*scatter
= (SC_Scatter
*)
3212 ((char *)new_cd
+ ntohl(new_cd
->scatterOffset
));
3213 /* iterate all scatter structs to scale their counts */
3215 uint32_t scount
= ntohl(scatter
->count
);
3216 uint32_t sbase
= ntohl(scatter
->base
);
3223 scount
>>= hashes_per_new_hash_shift
;
3224 scatter
->count
= htonl(scount
);
3226 sbase
>>= hashes_per_new_hash_shift
;
3227 scatter
->base
= htonl(sbase
);
3233 /* For each group of hashes, hash them together */
3234 const unsigned char *src_base
= (const unsigned char *)old_cd
+ ntohl(old_cd
->hashOffset
);
3235 unsigned char *dst_base
= (unsigned char *)new_cd
+ ntohl(new_cd
->hashOffset
);
3237 uint32_t hash_index
;
3238 for (hash_index
= 0; hash_index
< nCodeSlots
; hash_index
++) {
3239 union cs_hash_union mdctx
;
3241 uint32_t source_hash_len
= old_cd
->hashSize
<< hashes_per_new_hash_shift
;
3242 const unsigned char *src
= src_base
+ hash_index
* source_hash_len
;
3243 unsigned char *dst
= dst_base
+ hash_index
* new_cd
->hashSize
;
3245 blob
->csb_hashtype
->cs_init(&mdctx
);
3246 blob
->csb_hashtype
->cs_update(&mdctx
, src
, source_hash_len
);
3247 blob
->csb_hashtype
->cs_final(dst
, &mdctx
);
3250 error
= cs_validate_csblob((const uint8_t *)new_blob_addr
, new_blob_size
, &cd
, &entitlements
);
3252 printf("CODE SIGNING: Failed to validate new Code Signing Blob: %d\n",
3255 ubc_cs_blob_deallocate(new_blob_addr
, new_blob_size
);
3259 /* New Code Directory is ready for use, swap it out in the blob structure */
3260 ubc_cs_blob_deallocate(blob
->csb_mem_kaddr
, blob
->csb_mem_size
);
3262 blob
->csb_mem_size
= new_blob_size
;
3263 blob
->csb_mem_kaddr
= new_blob_addr
;
3265 blob
->csb_entitlements_blob
= entitlements
;
3267 /* The blob has some cached attributes of the Code Directory, so update those */
3269 blob
->csb_hash_firstlevel_pagesize
= blob
->csb_hash_pagesize
; /* Save the original page size */
3271 blob
->csb_hash_pagesize
= PAGE_SIZE
;
3272 blob
->csb_hash_pagemask
= PAGE_MASK
;
3273 blob
->csb_hash_pageshift
= PAGE_SHIFT
;
3274 blob
->csb_end_offset
= ntohl(cd
->codeLimit
);
3275 if ((ntohl(cd
->version
) >= CS_SUPPORTSSCATTER
) && (ntohl(cd
->scatterOffset
))) {
3276 const SC_Scatter
*scatter
= (const SC_Scatter
*)
3277 ((const char*)cd
+ ntohl(cd
->scatterOffset
));
3278 blob
->csb_start_offset
= ((off_t
)ntohl(scatter
->base
)) * PAGE_SIZE
;
3280 blob
->csb_start_offset
= 0;
3287 * Validate the code signature blob, create a struct cs_blob wrapper
3288 * and return it together with a pointer to the chosen code directory
3289 * and entitlements blob.
3291 * Note that this takes ownership of the memory as addr, mainly because
3292 * this function can actually replace the passed in blob with another
3293 * one, e.g. when performing multilevel hashing optimization.
3296 cs_blob_create_validated(
3297 vm_address_t
* const addr
,
3299 struct cs_blob
** const ret_blob
,
3300 CS_CodeDirectory
const ** const ret_cd
)
3302 struct cs_blob
*blob
;
3304 const CS_CodeDirectory
*cd
;
3305 const CS_GenericBlob
*entitlements
;
3306 union cs_hash_union mdctx
;
3313 blob
= (struct cs_blob
*) kalloc(sizeof(struct cs_blob
));
3318 /* fill in the new blob */
3319 blob
->csb_mem_size
= size
;
3320 blob
->csb_mem_offset
= 0;
3321 blob
->csb_mem_kaddr
= *addr
;
3322 blob
->csb_flags
= 0;
3323 blob
->csb_signer_type
= CS_SIGNER_TYPE_UNKNOWN
;
3324 blob
->csb_platform_binary
= 0;
3325 blob
->csb_platform_path
= 0;
3326 blob
->csb_teamid
= NULL
;
3327 blob
->csb_entitlements_blob
= NULL
;
3328 blob
->csb_entitlements
= NULL
;
3329 blob
->csb_reconstituted
= false;
3331 /* Transfer ownership. Even on error, this function will deallocate */
3335 * Validate the blob's contents
3337 length
= (size_t) size
;
3338 error
= cs_validate_csblob((const uint8_t *)blob
->csb_mem_kaddr
,
3339 length
, &cd
, &entitlements
);
3342 printf("CODESIGNING: csblob invalid: %d\n", error
);
3345 * The vnode checker can't make the rest of this function
3346 * succeed if csblob validation failed, so bail */
3349 const unsigned char *md_base
;
3350 uint8_t hash
[CS_HASH_MAX_SIZE
];
3354 blob
->csb_entitlements_blob
= entitlements
; /* may be NULL, not yet validated */
3355 blob
->csb_hashtype
= cs_find_md(cd
->hashType
);
3356 if (blob
->csb_hashtype
== NULL
|| blob
->csb_hashtype
->cs_digest_size
> sizeof(hash
)) {
3357 panic("validated CodeDirectory but unsupported type");
3360 blob
->csb_hash_pageshift
= cd
->pageSize
;
3361 blob
->csb_hash_pagesize
= (1U << cd
->pageSize
);
3362 blob
->csb_hash_pagemask
= blob
->csb_hash_pagesize
- 1;
3363 blob
->csb_hash_firstlevel_pagesize
= 0;
3364 blob
->csb_flags
= (ntohl(cd
->flags
) & CS_ALLOWED_MACHO
) | CS_VALID
;
3365 blob
->csb_end_offset
= (((vm_offset_t
)ntohl(cd
->codeLimit
) + blob
->csb_hash_pagemask
) & ~((vm_offset_t
)blob
->csb_hash_pagemask
));
3366 if ((ntohl(cd
->version
) >= CS_SUPPORTSSCATTER
) && (ntohl(cd
->scatterOffset
))) {
3367 const SC_Scatter
*scatter
= (const SC_Scatter
*)
3368 ((const char*)cd
+ ntohl(cd
->scatterOffset
));
3369 blob
->csb_start_offset
= ((off_t
)ntohl(scatter
->base
)) * blob
->csb_hash_pagesize
;
3371 blob
->csb_start_offset
= 0;
3373 /* compute the blob's cdhash */
3374 md_base
= (const unsigned char *) cd
;
3375 md_size
= ntohl(cd
->length
);
3377 blob
->csb_hashtype
->cs_init(&mdctx
);
3378 blob
->csb_hashtype
->cs_update(&mdctx
, md_base
, md_size
);
3379 blob
->csb_hashtype
->cs_final(hash
, &mdctx
);
3381 memcpy(blob
->csb_cdhash
, hash
, CS_CDHASH_LEN
);
3393 if (ret_blob
!= NULL
) {
3396 if (ret_cd
!= NULL
) {
3404 * Free a cs_blob previously created by cs_blob_create_validated.
3408 struct cs_blob
* const blob
)
3411 if (blob
->csb_mem_kaddr
) {
3412 ubc_cs_blob_deallocate(blob
->csb_mem_kaddr
, blob
->csb_mem_size
);
3413 blob
->csb_mem_kaddr
= 0;
3415 if (blob
->csb_entitlements
!= NULL
) {
3416 osobject_release(blob
->csb_entitlements
);
3417 blob
->csb_entitlements
= NULL
;
3419 (kfree
)(blob
, sizeof(*blob
));
3430 struct image_params
*imgp
,
3432 struct cs_blob
**ret_blob
)
3435 struct ubc_info
*uip
;
3436 struct cs_blob
*blob
, *oblob
;
3438 CS_CodeDirectory
const *cd
;
3439 off_t blob_start_offset
, blob_end_offset
;
3440 boolean_t record_mtime
;
3442 record_mtime
= FALSE
;
3447 /* Create the struct cs_blob wrapper that will be attached to the vnode.
3448 * Validates the passed in blob in the process. */
3449 error
= cs_blob_create_validated(addr
, size
, &blob
, &cd
);
3452 printf("malform code signature blob: %d\n", error
);
3456 blob
->csb_cpu_type
= cputype
;
3457 blob
->csb_base_offset
= base_offset
;
3460 * Let policy module check whether the blob's signature is accepted.
3463 unsigned int cs_flags
= blob
->csb_flags
;
3464 unsigned int signer_type
= blob
->csb_signer_type
;
3465 error
= mac_vnode_check_signature(vp
, blob
, imgp
, &cs_flags
, &signer_type
, flags
);
3466 blob
->csb_flags
= cs_flags
;
3467 blob
->csb_signer_type
= signer_type
;
3471 printf("check_signature[pid: %d], error = %d\n", current_proc()->p_pid
, error
);
3475 if ((flags
& MAC_VNODE_CHECK_DYLD_SIM
) && !(blob
->csb_flags
& CS_PLATFORM_BINARY
)) {
3477 printf("check_signature[pid: %d], is not apple signed\n", current_proc()->p_pid
);
3484 #if CONFIG_ENFORCE_SIGNED_CODE
3486 * Reconstitute code signature
3489 vm_address_t new_mem_kaddr
= 0;
3490 vm_size_t new_mem_size
= 0;
3492 CS_CodeDirectory
*new_cd
= NULL
;
3493 CS_GenericBlob
const *new_entitlements
= NULL
;
3495 error
= ubc_cs_reconstitute_code_signature(blob
, 0,
3496 &new_mem_kaddr
, &new_mem_size
,
3497 &new_cd
, &new_entitlements
);
3500 printf("failed code signature reconstitution: %d\n", error
);
3504 ubc_cs_blob_deallocate(blob
->csb_mem_kaddr
, blob
->csb_mem_size
);
3506 blob
->csb_mem_kaddr
= new_mem_kaddr
;
3507 blob
->csb_mem_size
= new_mem_size
;
3508 blob
->csb_cd
= new_cd
;
3509 blob
->csb_entitlements_blob
= new_entitlements
;
3510 blob
->csb_reconstituted
= true;
3516 if (blob
->csb_flags
& CS_PLATFORM_BINARY
) {
3518 printf("check_signature[pid: %d]: platform binary\n", current_proc()->p_pid
);
3520 blob
->csb_platform_binary
= 1;
3521 blob
->csb_platform_path
= !!(blob
->csb_flags
& CS_PLATFORM_PATH
);
3523 blob
->csb_platform_binary
= 0;
3524 blob
->csb_platform_path
= 0;
3525 blob
->csb_teamid
= csblob_parse_teamid(blob
);
3527 if (blob
->csb_teamid
) {
3528 printf("check_signature[pid: %d]: team-id is %s\n", current_proc()->p_pid
, blob
->csb_teamid
);
3530 printf("check_signature[pid: %d]: no team-id\n", current_proc()->p_pid
);
3536 * Validate the blob's coverage
3538 blob_start_offset
= blob
->csb_base_offset
+ blob
->csb_start_offset
;
3539 blob_end_offset
= blob
->csb_base_offset
+ blob
->csb_end_offset
;
3541 if (blob_start_offset
>= blob_end_offset
||
3542 blob_start_offset
< 0 ||
3543 blob_end_offset
<= 0) {
3544 /* reject empty or backwards blob */
3549 if (ubc_cs_supports_multilevel_hash(blob
)) {
3550 error
= ubc_cs_convert_to_multilevel_hash(blob
);
3552 printf("failed multilevel hash conversion: %d\n", error
);
3555 blob
->csb_reconstituted
= true;
3559 if (!UBCINFOEXISTS(vp
)) {
3564 uip
= vp
->v_ubcinfo
;
3566 /* check if this new blob overlaps with an existing blob */
3567 for (oblob
= uip
->cs_blobs
;
3569 oblob
= oblob
->csb_next
) {
3570 off_t oblob_start_offset
, oblob_end_offset
;
3572 if (blob
->csb_signer_type
!= oblob
->csb_signer_type
) { // signer type needs to be the same for slices
3576 } else if (blob
->csb_platform_binary
) { //platform binary needs to be the same for app slices
3577 if (!oblob
->csb_platform_binary
) {
3582 } else if (blob
->csb_teamid
) { //teamid binary needs to be the same for app slices
3583 if (oblob
->csb_platform_binary
||
3584 oblob
->csb_teamid
== NULL
||
3585 strcmp(oblob
->csb_teamid
, blob
->csb_teamid
) != 0) {
3590 } else { // non teamid binary needs to be the same for app slices
3591 if (oblob
->csb_platform_binary
||
3592 oblob
->csb_teamid
!= NULL
) {
3599 oblob_start_offset
= (oblob
->csb_base_offset
+
3600 oblob
->csb_start_offset
);
3601 oblob_end_offset
= (oblob
->csb_base_offset
+
3602 oblob
->csb_end_offset
);
3603 if (blob_start_offset
>= oblob_end_offset
||
3604 blob_end_offset
<= oblob_start_offset
) {
3605 /* no conflict with this existing blob */
3608 if (blob_start_offset
== oblob_start_offset
&&
3609 blob_end_offset
== oblob_end_offset
&&
3610 blob
->csb_mem_size
== oblob
->csb_mem_size
&&
3611 blob
->csb_flags
== oblob
->csb_flags
&&
3612 (blob
->csb_cpu_type
== CPU_TYPE_ANY
||
3613 oblob
->csb_cpu_type
== CPU_TYPE_ANY
||
3614 blob
->csb_cpu_type
== oblob
->csb_cpu_type
) &&
3615 !bcmp(blob
->csb_cdhash
,
3619 * We already have this blob:
3620 * we'll return success but
3621 * throw away the new blob.
3623 if (oblob
->csb_cpu_type
== CPU_TYPE_ANY
) {
3625 * The old blob matches this one
3626 * but doesn't have any CPU type.
3627 * Update it with whatever the caller
3628 * provided this time.
3630 oblob
->csb_cpu_type
= cputype
;
3633 /* The signature is still accepted, so update the
3634 * generation count. */
3635 uip
->cs_add_gen
= cs_blob_generation_count
;
3644 /* different blob: reject the new one */
3653 /* mark this vnode's VM object as having "signed pages" */
3654 kr
= memory_object_signed(uip
->ui_control
, TRUE
);
3655 if (kr
!= KERN_SUCCESS
) {
3661 if (uip
->cs_blobs
== NULL
) {
3662 /* loading 1st blob: record the file's current "modify time" */
3663 record_mtime
= TRUE
;
3666 /* set the generation count for cs_blobs */
3667 uip
->cs_add_gen
= cs_blob_generation_count
;
3670 * Add this blob to the list of blobs for this vnode.
3671 * We always add at the front of the list and we never remove a
3672 * blob from the list, so ubc_cs_get_blobs() can return whatever
3673 * the top of the list was and that list will remain valid
3674 * while we validate a page, even after we release the vnode's lock.
3676 blob
->csb_next
= uip
->cs_blobs
;
3677 uip
->cs_blobs
= blob
;
3679 OSAddAtomic(+1, &cs_blob_count
);
3680 if (cs_blob_count
> cs_blob_count_peak
) {
3681 cs_blob_count_peak
= cs_blob_count
; /* XXX atomic ? */
3683 OSAddAtomic((SInt32
) + blob
->csb_mem_size
, &cs_blob_size
);
3684 if ((SInt32
) cs_blob_size
> cs_blob_size_peak
) {
3685 cs_blob_size_peak
= (SInt32
) cs_blob_size
; /* XXX atomic ? */
3687 if ((UInt32
) blob
->csb_mem_size
> cs_blob_size_max
) {
3688 cs_blob_size_max
= (UInt32
) blob
->csb_mem_size
;
3693 const char *name
= vnode_getname_printable(vp
);
3695 printf("CODE SIGNING: proc %d(%s) "
3696 "loaded %s signatures for file (%s) "
3697 "range 0x%llx:0x%llx flags 0x%x\n",
3698 p
->p_pid
, p
->p_comm
,
3699 blob
->csb_cpu_type
== -1 ? "detached" : "embedded",
3701 blob
->csb_base_offset
+ blob
->csb_start_offset
,
3702 blob
->csb_base_offset
+ blob
->csb_end_offset
,
3704 vnode_putname_printable(name
);
3710 vnode_mtime(vp
, &uip
->cs_mtime
, vfs_context_current());
3717 error
= 0; /* success ! */
3722 printf("check_signature[pid: %d]: error = %d\n", current_proc()->p_pid
, error
);
3728 if (error
== EAGAIN
) {
3730 * See above: error is EAGAIN if we were asked
3731 * to add an existing blob again. We cleaned the new
3732 * blob and we want to return success.
3741 csvnode_print_debug(struct vnode
*vp
)
3743 const char *name
= NULL
;
3744 struct ubc_info
*uip
;
3745 struct cs_blob
*blob
;
3747 name
= vnode_getname_printable(vp
);
3749 printf("csvnode: name: %s\n", name
);
3750 vnode_putname_printable(name
);
3753 vnode_lock_spin(vp
);
3755 if (!UBCINFOEXISTS(vp
)) {
3760 uip
= vp
->v_ubcinfo
;
3761 for (blob
= uip
->cs_blobs
; blob
!= NULL
; blob
= blob
->csb_next
) {
3762 printf("csvnode: range: %lu -> %lu flags: 0x%08x platform: %s path: %s team: %s\n",
3763 (unsigned long)blob
->csb_start_offset
,
3764 (unsigned long)blob
->csb_end_offset
,
3766 blob
->csb_platform_binary
? "yes" : "no",
3767 blob
->csb_platform_path
? "yes" : "no",
3768 blob
->csb_teamid
? blob
->csb_teamid
: "<NO-TEAM>");
3781 struct ubc_info
*uip
;
3782 struct cs_blob
*blob
;
3783 off_t offset_in_blob
;
3785 vnode_lock_spin(vp
);
3787 if (!UBCINFOEXISTS(vp
)) {
3792 uip
= vp
->v_ubcinfo
;
3793 for (blob
= uip
->cs_blobs
;
3795 blob
= blob
->csb_next
) {
3796 if (cputype
!= -1 && blob
->csb_cpu_type
== cputype
) {
3800 offset_in_blob
= offset
- blob
->csb_base_offset
;
3801 if (offset_in_blob
>= blob
->csb_start_offset
&&
3802 offset_in_blob
< blob
->csb_end_offset
) {
3803 /* our offset is covered by this blob */
3817 struct ubc_info
*uip
)
3819 struct cs_blob
*blob
, *next_blob
;
3821 for (blob
= uip
->cs_blobs
;
3824 next_blob
= blob
->csb_next
;
3825 OSAddAtomic(-1, &cs_blob_count
);
3826 OSAddAtomic((SInt32
) - blob
->csb_mem_size
, &cs_blob_size
);
3829 #if CHECK_CS_VALIDATION_BITMAP
3830 ubc_cs_validation_bitmap_deallocate( uip
->ui_vnode
);
3832 uip
->cs_blobs
= NULL
;
3835 /* check cs blob generation on vnode
3837 * 0 : Success, the cs_blob attached is current
3838 * ENEEDAUTH : Generation count mismatch. Needs authentication again.
3841 ubc_cs_generation_check(
3844 int retval
= ENEEDAUTH
;
3846 vnode_lock_spin(vp
);
3848 if (UBCINFOEXISTS(vp
) && vp
->v_ubcinfo
->cs_add_gen
== cs_blob_generation_count
) {
3857 ubc_cs_blob_revalidate(
3859 struct cs_blob
*blob
,
3860 struct image_params
*imgp
,
3865 const CS_CodeDirectory
*cd
= NULL
;
3866 const CS_GenericBlob
*entitlements
= NULL
;
3869 assert(blob
!= NULL
);
3871 size
= blob
->csb_mem_size
;
3872 error
= cs_validate_csblob((const uint8_t *)blob
->csb_mem_kaddr
,
3873 size
, &cd
, &entitlements
);
3876 printf("CODESIGNING: csblob invalid: %d\n", error
);
3881 unsigned int cs_flags
= (ntohl(cd
->flags
) & CS_ALLOWED_MACHO
) | CS_VALID
;
3882 unsigned int signer_type
= CS_SIGNER_TYPE_UNKNOWN
;
3884 if (blob
->csb_reconstituted
) {
3886 * Code signatures that have been modified after validation
3887 * cannot be revalidated inline from their in-memory blob.
3889 * That's okay, though, because the only path left that relies
3890 * on revalidation of existing in-memory blobs is the legacy
3891 * detached signature database path, which only exists on macOS,
3892 * which does not do reconstitution of any kind.
3895 printf("CODESIGNING: revalidate: not inline revalidating reconstituted signature.\n");
3899 * EAGAIN tells the caller that they may reread the code
3900 * signature and try attaching it again, which is the same
3901 * thing they would do if there was no cs_blob yet in the
3904 * Conveniently, after ubc_cs_blob_add did a successful
3905 * validation, it will detect that a matching cs_blob (cdhash,
3906 * offset, arch etc.) already exists, and return success
3907 * without re-adding a cs_blob to the vnode.
3912 /* callout to mac_vnode_check_signature */
3914 error
= mac_vnode_check_signature(vp
, blob
, imgp
, &cs_flags
, &signer_type
, flags
);
3915 if (cs_debug
&& error
) {
3916 printf("revalidate: check_signature[pid: %d], error = %d\n", current_proc()->p_pid
, error
);
3923 /* update generation number if success */
3924 vnode_lock_spin(vp
);
3925 blob
->csb_flags
= cs_flags
;
3926 blob
->csb_signer_type
= signer_type
;
3927 if (UBCINFOEXISTS(vp
)) {
3929 vp
->v_ubcinfo
->cs_add_gen
= cs_blob_generation_count
;
3931 vp
->v_ubcinfo
->cs_add_gen
= 0;
3942 cs_blob_reset_cache()
3944 /* incrementing odd no by 2 makes sure '0' is never reached. */
3945 OSAddAtomic(+2, &cs_blob_generation_count
);
3946 printf("Reseting cs_blob cache from all vnodes. \n");
3953 struct ubc_info
*uip
;
3954 struct cs_blob
*blobs
;
3957 * No need to take the vnode lock here. The caller must be holding
3958 * a reference on the vnode (via a VM mapping or open file descriptor),
3959 * so the vnode will not go away. The ubc_info stays until the vnode
3960 * goes away. And we only modify "blobs" by adding to the head of the
3962 * The ubc_info could go away entirely if the vnode gets reclaimed as
3963 * part of a forced unmount. In the case of a code-signature validation
3964 * during a page fault, the "paging_in_progress" reference on the VM
3965 * object guarantess that the vnode pager (and the ubc_info) won't go
3966 * away during the fault.
3967 * Other callers need to protect against vnode reclaim by holding the
3968 * vnode lock, for example.
3971 if (!UBCINFOEXISTS(vp
)) {
3976 uip
= vp
->v_ubcinfo
;
3977 blobs
= uip
->cs_blobs
;
3986 struct timespec
*cs_mtime
)
3988 struct ubc_info
*uip
;
3990 if (!UBCINFOEXISTS(vp
)) {
3991 cs_mtime
->tv_sec
= 0;
3992 cs_mtime
->tv_nsec
= 0;
3996 uip
= vp
->v_ubcinfo
;
3997 cs_mtime
->tv_sec
= uip
->cs_mtime
.tv_sec
;
3998 cs_mtime
->tv_nsec
= uip
->cs_mtime
.tv_nsec
;
4001 unsigned long cs_validate_page_no_hash
= 0;
4002 unsigned long cs_validate_page_bad_hash
= 0;
4005 struct cs_blob
*blobs
,
4006 memory_object_t pager
,
4007 memory_object_offset_t page_offset
,
4009 vm_size_t
*bytes_processed
,
4012 union cs_hash_union mdctx
;
4013 struct cs_hash
const *hashtype
= NULL
;
4014 unsigned char actual_hash
[CS_HASH_MAX_SIZE
];
4015 unsigned char expected_hash
[CS_HASH_MAX_SIZE
];
4016 boolean_t found_hash
;
4017 struct cs_blob
*blob
;
4018 const CS_CodeDirectory
*cd
;
4019 const unsigned char *hash
;
4020 boolean_t validated
;
4021 off_t offset
; /* page offset in the file */
4023 off_t codeLimit
= 0;
4024 const char *lower_bound
, *upper_bound
;
4025 vm_offset_t kaddr
, blob_addr
;
4027 /* retrieve the expected hash */
4032 blob
= blob
->csb_next
) {
4033 offset
= page_offset
- blob
->csb_base_offset
;
4034 if (offset
< blob
->csb_start_offset
||
4035 offset
>= blob
->csb_end_offset
) {
4036 /* our page is not covered by this blob */
4040 /* blob data has been released */
4041 kaddr
= blob
->csb_mem_kaddr
;
4046 blob_addr
= kaddr
+ blob
->csb_mem_offset
;
4047 lower_bound
= CAST_DOWN(char *, blob_addr
);
4048 upper_bound
= lower_bound
+ blob
->csb_mem_size
;
4052 /* all CD's that have been injected is already validated */
4054 hashtype
= blob
->csb_hashtype
;
4055 if (hashtype
== NULL
) {
4056 panic("unknown hash type ?");
4058 if (hashtype
->cs_digest_size
> sizeof(actual_hash
)) {
4059 panic("hash size too large");
4061 if (offset
& blob
->csb_hash_pagemask
) {
4062 panic("offset not aligned to cshash boundary");
4065 codeLimit
= ntohl(cd
->codeLimit
);
4067 hash
= hashes(cd
, (uint32_t)(offset
>> blob
->csb_hash_pageshift
),
4069 lower_bound
, upper_bound
);
4071 bcopy(hash
, expected_hash
, hashtype
->cs_size
);
4079 if (found_hash
== FALSE
) {
4081 * We can't verify this page because there is no signature
4082 * for it (yet). It's possible that this part of the object
4083 * is not signed, or that signatures for that part have not
4085 * Report that the page has not been validated and let the
4086 * caller decide if it wants to accept it or not.
4088 cs_validate_page_no_hash
++;
4090 printf("CODE SIGNING: cs_validate_page: "
4091 "mobj %p off 0x%llx: no hash to validate !?\n",
4092 pager
, page_offset
);
4099 size
= blob
->csb_hash_pagesize
;
4100 *bytes_processed
= size
;
4102 const uint32_t *asha1
, *esha1
;
4103 if ((off_t
)(offset
+ size
) > codeLimit
) {
4104 /* partial page at end of segment */
4105 assert(offset
< codeLimit
);
4106 size
= (size_t) (codeLimit
& blob
->csb_hash_pagemask
);
4107 *tainted
|= CS_VALIDATE_NX
;
4110 hashtype
->cs_init(&mdctx
);
4112 if (blob
->csb_hash_firstlevel_pagesize
) {
4113 const unsigned char *partial_data
= (const unsigned char *)data
;
4115 for (i
= 0; i
< size
;) {
4116 union cs_hash_union partialctx
;
4117 unsigned char partial_digest
[CS_HASH_MAX_SIZE
];
4118 size_t partial_size
= MIN(size
- i
, blob
->csb_hash_firstlevel_pagesize
);
4120 hashtype
->cs_init(&partialctx
);
4121 hashtype
->cs_update(&partialctx
, partial_data
, partial_size
);
4122 hashtype
->cs_final(partial_digest
, &partialctx
);
4124 /* Update cumulative multi-level hash */
4125 hashtype
->cs_update(&mdctx
, partial_digest
, hashtype
->cs_size
);
4126 partial_data
= partial_data
+ partial_size
;
4130 hashtype
->cs_update(&mdctx
, data
, size
);
4132 hashtype
->cs_final(actual_hash
, &mdctx
);
4134 asha1
= (const uint32_t *) actual_hash
;
4135 esha1
= (const uint32_t *) expected_hash
;
4137 if (bcmp(expected_hash
, actual_hash
, hashtype
->cs_size
) != 0) {
4139 printf("CODE SIGNING: cs_validate_page: "
4140 "mobj %p off 0x%llx size 0x%lx: "
4141 "actual [0x%x 0x%x 0x%x 0x%x 0x%x] != "
4142 "expected [0x%x 0x%x 0x%x 0x%x 0x%x]\n",
4143 pager
, page_offset
, size
,
4144 asha1
[0], asha1
[1], asha1
[2],
4146 esha1
[0], esha1
[1], esha1
[2],
4147 esha1
[3], esha1
[4]);
4149 cs_validate_page_bad_hash
++;
4150 *tainted
|= CS_VALIDATE_TAINTED
;
4152 if (cs_debug
> 10) {
4153 printf("CODE SIGNING: cs_validate_page: "
4154 "mobj %p off 0x%llx size 0x%lx: "
4156 pager
, page_offset
, size
);
4168 memory_object_t pager
,
4169 memory_object_offset_t page_offset
,
4174 vm_size_t offset_in_range
;
4175 boolean_t all_subranges_validated
= TRUE
; /* turn false if any subrange fails */
4177 struct cs_blob
*blobs
= ubc_get_cs_blobs(vp
);
4181 for (offset_in_range
= 0;
4182 offset_in_range
< dsize
;
4183 /* offset_in_range updated based on bytes processed */) {
4184 unsigned subrange_tainted
= 0;
4185 boolean_t subrange_validated
;
4186 vm_size_t bytes_processed
= 0;
4188 subrange_validated
= cs_validate_hash(blobs
,
4190 page_offset
+ offset_in_range
,
4191 (const void *)((const char *)data
+ offset_in_range
),
4195 *tainted
|= subrange_tainted
;
4197 if (bytes_processed
== 0) {
4198 /* Cannote make forward progress, so return an error */
4199 all_subranges_validated
= FALSE
;
4201 } else if (subrange_validated
== FALSE
) {
4202 all_subranges_validated
= FALSE
;
4203 /* Keep going to detect other types of failures in subranges */
4206 offset_in_range
+= bytes_processed
;
4209 return all_subranges_validated
;
4216 unsigned char *cdhash
)
4218 struct cs_blob
*blobs
, *blob
;
4224 blobs
= ubc_get_cs_blobs(vp
);
4227 blob
= blob
->csb_next
) {
4228 /* compute offset relative to this blob */
4229 rel_offset
= offset
- blob
->csb_base_offset
;
4230 if (rel_offset
>= blob
->csb_start_offset
&&
4231 rel_offset
< blob
->csb_end_offset
) {
4232 /* this blob does cover our "offset" ! */
4238 /* we didn't find a blob covering "offset" */
4239 ret
= EBADEXEC
; /* XXX any better error ? */
4241 /* get the SHA1 hash of that blob */
4242 bcopy(blob
->csb_cdhash
, cdhash
, sizeof(blob
->csb_cdhash
));
4252 ubc_cs_is_range_codesigned(
4254 mach_vm_offset_t start
,
4255 mach_vm_size_t size
)
4257 struct cs_blob
*csblob
;
4258 mach_vm_offset_t blob_start
;
4259 mach_vm_offset_t blob_end
;
4262 /* no file: no code signature */
4266 /* no range: no code signature */
4269 if (start
+ size
< start
) {
4274 csblob
= ubc_cs_blob_get(vp
, -1, start
);
4275 if (csblob
== NULL
) {
4280 * We currently check if the range is covered by a single blob,
4281 * which should always be the case for the dyld shared cache.
4282 * If we ever want to make this routine handle other cases, we
4283 * would have to iterate if the blob does not cover the full range.
4285 blob_start
= (mach_vm_offset_t
) (csblob
->csb_base_offset
+
4286 csblob
->csb_start_offset
);
4287 blob_end
= (mach_vm_offset_t
) (csblob
->csb_base_offset
+
4288 csblob
->csb_end_offset
);
4289 if (blob_start
> start
|| blob_end
< (start
+ size
)) {
4290 /* range not fully covered by this code-signing blob */
4297 #if CHECK_CS_VALIDATION_BITMAP
4298 #define stob(s) (((atop_64(round_page_64(s))) + 07) >> 3)
4299 extern boolean_t root_fs_upgrade_try
;
4302 * Should we use the code-sign bitmap to avoid repeated code-sign validation?
4304 * a) Is the target vnode on the root filesystem?
4305 * b) Has someone tried to mount the root filesystem read-write?
4306 * If answers are (a) yes AND (b) no, then we can use the bitmap.
4308 #define USE_CODE_SIGN_BITMAP(vp) ( (vp != NULL) && (vp->v_mount != NULL) && (vp->v_mount->mnt_flag & MNT_ROOTFS) && !root_fs_upgrade_try)
4310 ubc_cs_validation_bitmap_allocate(
4313 kern_return_t kr
= KERN_SUCCESS
;
4314 struct ubc_info
*uip
;
4315 char *target_bitmap
;
4316 vm_object_size_t bitmap_size
;
4318 if (!USE_CODE_SIGN_BITMAP(vp
) || (!UBCINFOEXISTS(vp
))) {
4319 kr
= KERN_INVALID_ARGUMENT
;
4321 uip
= vp
->v_ubcinfo
;
4323 if (uip
->cs_valid_bitmap
== NULL
) {
4324 bitmap_size
= stob(uip
->ui_size
);
4325 target_bitmap
= (char*) kalloc((vm_size_t
)bitmap_size
);
4326 if (target_bitmap
== 0) {
4331 if (kr
== KERN_SUCCESS
) {
4332 memset( target_bitmap
, 0, (size_t)bitmap_size
);
4333 uip
->cs_valid_bitmap
= (void*)target_bitmap
;
4334 uip
->cs_valid_bitmap_size
= bitmap_size
;
4342 ubc_cs_check_validation_bitmap(
4344 memory_object_offset_t offset
,
4347 kern_return_t kr
= KERN_SUCCESS
;
4349 if (!USE_CODE_SIGN_BITMAP(vp
) || !UBCINFOEXISTS(vp
)) {
4350 kr
= KERN_INVALID_ARGUMENT
;
4352 struct ubc_info
*uip
= vp
->v_ubcinfo
;
4353 char *target_bitmap
= uip
->cs_valid_bitmap
;
4355 if (target_bitmap
== NULL
) {
4356 kr
= KERN_INVALID_ARGUMENT
;
4359 bit
= atop_64( offset
);
4362 if (byte
> uip
->cs_valid_bitmap_size
) {
4363 kr
= KERN_INVALID_ARGUMENT
;
4365 if (optype
== CS_BITMAP_SET
) {
4366 target_bitmap
[byte
] |= (1 << (bit
& 07));
4368 } else if (optype
== CS_BITMAP_CLEAR
) {
4369 target_bitmap
[byte
] &= ~(1 << (bit
& 07));
4371 } else if (optype
== CS_BITMAP_CHECK
) {
4372 if (target_bitmap
[byte
] & (1 << (bit
& 07))) {
4385 ubc_cs_validation_bitmap_deallocate(
4388 struct ubc_info
*uip
;
4389 void *target_bitmap
;
4390 vm_object_size_t bitmap_size
;
4392 if (UBCINFOEXISTS(vp
)) {
4393 uip
= vp
->v_ubcinfo
;
4395 if ((target_bitmap
= uip
->cs_valid_bitmap
) != NULL
) {
4396 bitmap_size
= uip
->cs_valid_bitmap_size
;
4397 kfree( target_bitmap
, (vm_size_t
) bitmap_size
);
4398 uip
->cs_valid_bitmap
= NULL
;
4404 ubc_cs_validation_bitmap_allocate(__unused vnode_t vp
)
4406 return KERN_INVALID_ARGUMENT
;
4410 ubc_cs_check_validation_bitmap(
4411 __unused
struct vnode
*vp
,
4412 __unused memory_object_offset_t offset
,
4413 __unused
int optype
)
4415 return KERN_INVALID_ARGUMENT
;
4419 ubc_cs_validation_bitmap_deallocate(__unused vnode_t vp
)
4423 #endif /* CHECK_CS_VALIDATION_BITMAP */
4427 cs_associate_blob_with_mapping(
4429 vm_map_offset_t start
,
4431 vm_object_offset_t offset
,
4434 off_t blob_start_offset
, blob_end_offset
;
4436 struct cs_blob
*blobs
, *blob
;
4438 struct pmap_cs_code_directory
*cd_entry
= NULL
;
4441 return KERN_NOT_SUPPORTED
;
4444 blobs
= (struct cs_blob
*)blobs_p
;
4448 blob
= blob
->csb_next
) {
4449 blob_start_offset
= (blob
->csb_base_offset
+
4450 blob
->csb_start_offset
);
4451 blob_end_offset
= (blob
->csb_base_offset
+
4452 blob
->csb_end_offset
);
4453 if ((off_t
) offset
< blob_start_offset
||
4454 (off_t
) offset
>= blob_end_offset
||
4455 (off_t
) (offset
+ size
) <= blob_start_offset
||
4456 (off_t
) (offset
+ size
) > blob_end_offset
) {
4459 kaddr
= blob
->csb_mem_kaddr
;
4461 /* blob data has been released */
4464 cd_entry
= blob
->csb_pmap_cs_entry
;
4465 if (cd_entry
== NULL
) {
4472 if (cd_entry
!= NULL
) {
4473 kr
= pmap_cs_associate(pmap
,
4478 kr
= KERN_CODESIGN_ERROR
;
4481 printf("FBDP %d[%s] pmap_cs_associate(%p,%p,0x%llx,0x%llx) -> kr=0x%x\n", proc_selfpid(), &(current_proc()->p_comm
[0]), pmap
, cd_entry
, (uint64_t)start
, (uint64_t)size
, kr
);
4486 #endif /* PMAP_CS */