2 * Copyright (c) 1999-2014 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 * Author: Umesh Vaishampayan [umeshv@apple.com]
31 * 05-Aug-1999 umeshv Created.
33 * Functions related to Unified Buffer cache.
35 * Caller of UBC functions MUST have a valid reference on the vnode.
39 #include <sys/types.h>
40 #include <sys/param.h>
41 #include <sys/systm.h>
44 #include <sys/mount_internal.h>
45 #include <sys/vnode_internal.h>
46 #include <sys/ubc_internal.h>
47 #include <sys/ucred.h>
48 #include <sys/proc_internal.h>
49 #include <sys/kauth.h>
52 #include <sys/codesign.h>
53 #include <sys/codedir_internal.h>
54 #include <sys/fsevents.h>
55 #include <sys/fcntl.h>
57 #include <mach/mach_types.h>
58 #include <mach/memory_object_types.h>
59 #include <mach/memory_object_control.h>
60 #include <mach/vm_map.h>
61 #include <mach/mach_vm.h>
64 #include <kern/kern_types.h>
65 #include <kern/kalloc.h>
66 #include <kern/zalloc.h>
67 #include <kern/thread.h>
69 #include <vm/vm_kern.h>
70 #include <vm/vm_protos.h> /* last */
72 #include <libkern/crypto/sha1.h>
73 #include <libkern/crypto/sha2.h>
74 #include <libkern/libkern.h>
76 #include <security/mac_framework.h>
79 /* XXX These should be in a BSD accessible Mach header, but aren't. */
80 extern kern_return_t
memory_object_pages_resident(memory_object_control_t
,
82 extern kern_return_t
memory_object_signed(memory_object_control_t control
,
84 extern boolean_t
memory_object_is_signed(memory_object_control_t
);
86 /* XXX Same for those. */
88 extern void Debugger(const char *message
);
91 /* XXX no one uses this interface! */
92 kern_return_t
ubc_page_op_with_control(
93 memory_object_control_t control
,
104 #define assert(cond) \
105 ((void) ((cond) ? 0 : panic("Assert failed: %s", # cond)))
107 #include <kern/assert.h>
108 #endif /* DIAGNOSTIC */
110 static int ubc_info_init_internal(struct vnode
*vp
, int withfsize
, off_t filesize
);
111 static int ubc_umcallback(vnode_t
, void *);
112 static int ubc_msync_internal(vnode_t
, off_t
, off_t
, off_t
*, int, int *);
113 static void ubc_cs_free(struct ubc_info
*uip
);
115 static boolean_t
ubc_cs_supports_multilevel_hash(struct cs_blob
*blob
);
116 static kern_return_t
ubc_cs_convert_to_multilevel_hash(struct cs_blob
*blob
);
118 struct zone
*ubc_info_zone
;
119 static uint32_t cs_blob_generation_count
= 1;
123 * Routines to navigate code signing data structures in the kernel...
128 #define PAGE_SHIFT_4K (12)
134 const void *lower_bound
,
135 const void *upper_bound
)
137 if (upper_bound
< lower_bound
||
142 if (start
< lower_bound
||
150 typedef void (*cs_md_init
)(void *ctx
);
151 typedef void (*cs_md_update
)(void *ctx
, const void *data
, size_t size
);
152 typedef void (*cs_md_final
)(void *hash
, void *ctx
);
155 uint8_t cs_type
; /* type code as per code signing */
156 size_t cs_size
; /* size of effective hash (may be truncated) */
157 size_t cs_digest_size
; /* size of native hash */
159 cs_md_update cs_update
;
160 cs_md_final cs_final
;
163 uint8_t cs_hash_type(
164 struct cs_hash
const * const cs_hash
)
166 return cs_hash
->cs_type
;
169 static const struct cs_hash cs_hash_sha1
= {
170 .cs_type
= CS_HASHTYPE_SHA1
,
171 .cs_size
= CS_SHA1_LEN
,
172 .cs_digest_size
= SHA_DIGEST_LENGTH
,
173 .cs_init
= (cs_md_init
)SHA1Init
,
174 .cs_update
= (cs_md_update
)SHA1Update
,
175 .cs_final
= (cs_md_final
)SHA1Final
,
178 static const struct cs_hash cs_hash_sha256
= {
179 .cs_type
= CS_HASHTYPE_SHA256
,
180 .cs_size
= SHA256_DIGEST_LENGTH
,
181 .cs_digest_size
= SHA256_DIGEST_LENGTH
,
182 .cs_init
= (cs_md_init
)SHA256_Init
,
183 .cs_update
= (cs_md_update
)SHA256_Update
,
184 .cs_final
= (cs_md_final
)SHA256_Final
,
186 static const struct cs_hash cs_hash_sha256_truncate
= {
187 .cs_type
= CS_HASHTYPE_SHA256_TRUNCATED
,
188 .cs_size
= CS_SHA256_TRUNCATED_LEN
,
189 .cs_digest_size
= SHA256_DIGEST_LENGTH
,
190 .cs_init
= (cs_md_init
)SHA256_Init
,
191 .cs_update
= (cs_md_update
)SHA256_Update
,
192 .cs_final
= (cs_md_final
)SHA256_Final
,
194 static const struct cs_hash cs_hash_sha384
= {
195 .cs_type
= CS_HASHTYPE_SHA384
,
196 .cs_size
= SHA384_DIGEST_LENGTH
,
197 .cs_digest_size
= SHA384_DIGEST_LENGTH
,
198 .cs_init
= (cs_md_init
)SHA384_Init
,
199 .cs_update
= (cs_md_update
)SHA384_Update
,
200 .cs_final
= (cs_md_final
)SHA384_Final
,
204 static struct cs_hash
const *
205 cs_find_md(uint8_t type
)
207 if (type
== CS_HASHTYPE_SHA1
) {
208 return &cs_hash_sha1
;
210 } else if (type
== CS_HASHTYPE_SHA256
) {
211 return &cs_hash_sha256
;
212 } else if (type
== CS_HASHTYPE_SHA256_TRUNCATED
) {
213 return &cs_hash_sha256_truncate
;
214 } else if (type
== CS_HASHTYPE_SHA384
) {
215 return &cs_hash_sha384
;
221 union cs_hash_union
{
223 SHA256_CTX sha256ctx
;
224 SHA384_CTX sha384ctx
;
229 * Choose among different hash algorithms.
230 * Higher is better, 0 => don't use at all.
232 static const uint32_t hashPriorities
[] = {
234 CS_HASHTYPE_SHA256_TRUNCATED
,
240 hash_rank(const CS_CodeDirectory
*cd
)
242 uint32_t type
= cd
->hashType
;
245 for (n
= 0; n
< sizeof(hashPriorities
) / sizeof(hashPriorities
[0]); ++n
)
246 if (hashPriorities
[n
] == type
)
248 return 0; /* not supported */
253 * Locating a page hash
255 static const unsigned char *
257 const CS_CodeDirectory
*cd
,
260 const char *lower_bound
,
261 const char *upper_bound
)
263 const unsigned char *base
, *top
, *hash
;
264 uint32_t nCodeSlots
= ntohl(cd
->nCodeSlots
);
266 assert(cs_valid_range(cd
, cd
+ 1, lower_bound
, upper_bound
));
268 if((ntohl(cd
->version
) >= CS_SUPPORTSSCATTER
) && (ntohl(cd
->scatterOffset
))) {
269 /* Get first scatter struct */
270 const SC_Scatter
*scatter
= (const SC_Scatter
*)
271 ((const char*)cd
+ ntohl(cd
->scatterOffset
));
272 uint32_t hashindex
=0, scount
, sbase
=0;
273 /* iterate all scatter structs */
275 if((const char*)scatter
> (const char*)cd
+ ntohl(cd
->length
)) {
277 printf("CODE SIGNING: Scatter extends past Code Directory\n");
282 scount
= ntohl(scatter
->count
);
283 uint32_t new_base
= ntohl(scatter
->base
);
290 if((hashindex
> 0) && (new_base
<= sbase
)) {
292 printf("CODE SIGNING: unordered Scatter, prev base %d, cur base %d\n",
295 return NULL
; /* unordered scatter array */
299 /* this scatter beyond page we're looking for? */
304 if (sbase
+scount
>= page
) {
305 /* Found the scatter struct that is
306 * referencing our page */
308 /* base = address of first hash covered by scatter */
309 base
= (const unsigned char *)cd
+ ntohl(cd
->hashOffset
) +
310 hashindex
* hash_len
;
311 /* top = address of first hash after this scatter */
312 top
= base
+ scount
* hash_len
;
313 if (!cs_valid_range(base
, top
, lower_bound
,
315 hashindex
> nCodeSlots
) {
322 /* this scatter struct is before the page we're looking
328 hash
= base
+ (page
- sbase
) * hash_len
;
330 base
= (const unsigned char *)cd
+ ntohl(cd
->hashOffset
);
331 top
= base
+ nCodeSlots
* hash_len
;
332 if (!cs_valid_range(base
, top
, lower_bound
, upper_bound
) ||
336 assert(page
< nCodeSlots
);
338 hash
= base
+ page
* hash_len
;
341 if (!cs_valid_range(hash
, hash
+ hash_len
,
342 lower_bound
, upper_bound
)) {
350 * cs_validate_codedirectory
352 * Validate that pointers inside the code directory to make sure that
353 * all offsets and lengths are constrained within the buffer.
355 * Parameters: cd Pointer to code directory buffer
356 * length Length of buffer
359 * EBADEXEC Invalid code signature
363 cs_validate_codedirectory(const CS_CodeDirectory
*cd
, size_t length
)
365 struct cs_hash
const *hashtype
;
367 if (length
< sizeof(*cd
))
369 if (ntohl(cd
->magic
) != CSMAGIC_CODEDIRECTORY
)
371 if (cd
->pageSize
< PAGE_SHIFT_4K
|| cd
->pageSize
> PAGE_SHIFT
)
373 hashtype
= cs_find_md(cd
->hashType
);
374 if (hashtype
== NULL
)
377 if (cd
->hashSize
!= hashtype
->cs_size
)
380 if (length
< ntohl(cd
->hashOffset
))
383 /* check that nSpecialSlots fits in the buffer in front of hashOffset */
384 if (ntohl(cd
->hashOffset
) / hashtype
->cs_size
< ntohl(cd
->nSpecialSlots
))
387 /* check that codeslots fits in the buffer */
388 if ((length
- ntohl(cd
->hashOffset
)) / hashtype
->cs_size
< ntohl(cd
->nCodeSlots
))
391 if (ntohl(cd
->version
) >= CS_SUPPORTSSCATTER
&& cd
->scatterOffset
) {
393 if (length
< ntohl(cd
->scatterOffset
))
396 const SC_Scatter
*scatter
= (const SC_Scatter
*)
397 (((const uint8_t *)cd
) + ntohl(cd
->scatterOffset
));
401 * Check each scatter buffer, since we don't know the
402 * length of the scatter buffer array, we have to
406 /* check that the end of each scatter buffer in within the length */
407 if (((const uint8_t *)scatter
) + sizeof(scatter
[0]) > (const uint8_t *)cd
+ length
)
409 uint32_t scount
= ntohl(scatter
->count
);
412 if (nPages
+ scount
< nPages
)
417 /* XXX check that basees doesn't overlap */
418 /* XXX check that targetOffset doesn't overlap */
420 #if 0 /* rdar://12579439 */
421 if (nPages
!= ntohl(cd
->nCodeSlots
))
426 if (length
< ntohl(cd
->identOffset
))
429 /* identifier is NUL terminated string */
430 if (cd
->identOffset
) {
431 const uint8_t *ptr
= (const uint8_t *)cd
+ ntohl(cd
->identOffset
);
432 if (memchr(ptr
, 0, length
- ntohl(cd
->identOffset
)) == NULL
)
436 /* team identifier is NULL terminated string */
437 if (ntohl(cd
->version
) >= CS_SUPPORTSTEAMID
&& ntohl(cd
->teamOffset
)) {
438 if (length
< ntohl(cd
->teamOffset
))
441 const uint8_t *ptr
= (const uint8_t *)cd
+ ntohl(cd
->teamOffset
);
442 if (memchr(ptr
, 0, length
- ntohl(cd
->teamOffset
)) == NULL
)
454 cs_validate_blob(const CS_GenericBlob
*blob
, size_t length
)
456 if (length
< sizeof(CS_GenericBlob
) || length
< ntohl(blob
->length
))
464 * Validate that superblob/embedded code directory to make sure that
465 * all internal pointers are valid.
467 * Will validate both a superblob csblob and a "raw" code directory.
470 * Parameters: buffer Pointer to code signature
471 * length Length of buffer
472 * rcd returns pointer to code directory
475 * EBADEXEC Invalid code signature
481 const size_t blob_size
,
482 const CS_CodeDirectory
**rcd
,
483 const CS_GenericBlob
**rentitlements
)
485 const CS_GenericBlob
*blob
;
490 *rentitlements
= NULL
;
492 blob
= (const CS_GenericBlob
*)(const void *)addr
;
495 error
= cs_validate_blob(blob
, length
);
498 length
= ntohl(blob
->length
);
500 if (ntohl(blob
->magic
) == CSMAGIC_EMBEDDED_SIGNATURE
) {
501 const CS_SuperBlob
*sb
;
503 const CS_CodeDirectory
*best_cd
= NULL
;
504 unsigned int best_rank
= 0;
506 const CS_CodeDirectory
*sha1_cd
= NULL
;
509 if (length
< sizeof(CS_SuperBlob
))
512 sb
= (const CS_SuperBlob
*)blob
;
513 count
= ntohl(sb
->count
);
515 /* check that the array of BlobIndex fits in the rest of the data */
516 if ((length
- sizeof(CS_SuperBlob
)) / sizeof(CS_BlobIndex
) < count
)
519 /* now check each BlobIndex */
520 for (n
= 0; n
< count
; n
++) {
521 const CS_BlobIndex
*blobIndex
= &sb
->index
[n
];
522 uint32_t type
= ntohl(blobIndex
->type
);
523 uint32_t offset
= ntohl(blobIndex
->offset
);
527 const CS_GenericBlob
*subBlob
=
528 (const CS_GenericBlob
*)(const void *)(addr
+ offset
);
530 size_t subLength
= length
- offset
;
532 if ((error
= cs_validate_blob(subBlob
, subLength
)) != 0)
534 subLength
= ntohl(subBlob
->length
);
536 /* extra validation for CDs, that is also returned */
537 if (type
== CSSLOT_CODEDIRECTORY
|| (type
>= CSSLOT_ALTERNATE_CODEDIRECTORIES
&& type
< CSSLOT_ALTERNATE_CODEDIRECTORY_LIMIT
)) {
538 const CS_CodeDirectory
*candidate
= (const CS_CodeDirectory
*)subBlob
;
539 if ((error
= cs_validate_codedirectory(candidate
, subLength
)) != 0)
541 unsigned int rank
= hash_rank(candidate
);
543 printf("CodeDirectory type %d rank %d at slot 0x%x index %d\n", candidate
->hashType
, (int)rank
, (int)type
, (int)n
);
544 if (best_cd
== NULL
|| rank
> best_rank
) {
549 printf("using CodeDirectory type %d (rank %d)\n", (int)best_cd
->hashType
, best_rank
);
551 } else if (best_cd
!= NULL
&& rank
== best_rank
) {
552 /* repeat of a hash type (1:1 mapped to ranks), illegal and suspicious */
553 printf("multiple hash=%d CodeDirectories in signature; rejecting\n", best_cd
->hashType
);
557 if (candidate
->hashType
== CS_HASHTYPE_SHA1
) {
558 if (sha1_cd
!= NULL
) {
559 printf("multiple sha1 CodeDirectories in signature; rejecting\n");
565 } else if (type
== CSSLOT_ENTITLEMENTS
) {
566 if (ntohl(subBlob
->magic
) != CSMAGIC_EMBEDDED_ENTITLEMENTS
) {
569 if (*rentitlements
!= NULL
) {
570 printf("multiple entitlements blobs\n");
573 *rentitlements
= subBlob
;
578 /* To keep watchOS fast enough, we have to resort to sha1 for
581 * At the time of writing this comment, known sha1 attacks are
582 * collision attacks (not preimage or second preimage
583 * attacks), which do not apply to platform binaries since
584 * they have a fixed hash in the trust cache. Given this
585 * property, we only prefer sha1 code directories for adhoc
586 * signatures, which always have to be in a trust cache to be
587 * valid (can-load-cdhash does not exist for watchOS). Those
588 * are, incidentally, also the platform binaries, for which we
589 * care about the performance hit that sha256 would bring us.
591 * Platform binaries may still contain a (not chosen) sha256
592 * code directory, which keeps software updates that switch to
596 if (*rcd
!= NULL
&& sha1_cd
!= NULL
&& (ntohl(sha1_cd
->flags
) & CS_ADHOC
)) {
597 if (sha1_cd
->flags
!= (*rcd
)->flags
) {
598 printf("mismatched flags between hash %d (flags: %#x) and sha1 (flags: %#x) cd.\n",
599 (int)(*rcd
)->hashType
, (*rcd
)->flags
, sha1_cd
->flags
);
608 } else if (ntohl(blob
->magic
) == CSMAGIC_CODEDIRECTORY
) {
610 if ((error
= cs_validate_codedirectory((const CS_CodeDirectory
*)(const void *)addr
, length
)) != 0)
612 *rcd
= (const CS_CodeDirectory
*)blob
;
626 * Find an blob from the superblob/code directory. The blob must have
627 * been been validated by cs_validate_csblob() before calling
628 * this. Use csblob_find_blob() instead.
630 * Will also find a "raw" code directory if its stored as well as
631 * searching the superblob.
633 * Parameters: buffer Pointer to code signature
634 * length Length of buffer
635 * type type of blob to find
636 * magic the magic number for that blob
638 * Returns: pointer Success
639 * NULL Buffer not found
642 const CS_GenericBlob
*
643 csblob_find_blob_bytes(const uint8_t *addr
, size_t length
, uint32_t type
, uint32_t magic
)
645 const CS_GenericBlob
*blob
= (const CS_GenericBlob
*)(const void *)addr
;
647 if (ntohl(blob
->magic
) == CSMAGIC_EMBEDDED_SIGNATURE
) {
648 const CS_SuperBlob
*sb
= (const CS_SuperBlob
*)blob
;
649 size_t n
, count
= ntohl(sb
->count
);
651 for (n
= 0; n
< count
; n
++) {
652 if (ntohl(sb
->index
[n
].type
) != type
)
654 uint32_t offset
= ntohl(sb
->index
[n
].offset
);
655 if (length
- sizeof(const CS_GenericBlob
) < offset
)
657 blob
= (const CS_GenericBlob
*)(const void *)(addr
+ offset
);
658 if (ntohl(blob
->magic
) != magic
)
662 } else if (type
== CSSLOT_CODEDIRECTORY
663 && ntohl(blob
->magic
) == CSMAGIC_CODEDIRECTORY
664 && magic
== CSMAGIC_CODEDIRECTORY
)
670 const CS_GenericBlob
*
671 csblob_find_blob(struct cs_blob
*csblob
, uint32_t type
, uint32_t magic
)
673 if ((csblob
->csb_flags
& CS_VALID
) == 0)
675 return csblob_find_blob_bytes((const uint8_t *)csblob
->csb_mem_kaddr
, csblob
->csb_mem_size
, type
, magic
);
678 static const uint8_t *
679 find_special_slot(const CS_CodeDirectory
*cd
, size_t slotsize
, uint32_t slot
)
681 /* there is no zero special slot since that is the first code slot */
682 if (ntohl(cd
->nSpecialSlots
) < slot
|| slot
== 0)
685 return ((const uint8_t *)cd
+ ntohl(cd
->hashOffset
) - (slotsize
* slot
));
688 static uint8_t cshash_zero
[CS_HASH_MAX_SIZE
] = { 0 };
691 csblob_get_entitlements(struct cs_blob
*csblob
, void **out_start
, size_t *out_length
)
693 uint8_t computed_hash
[CS_HASH_MAX_SIZE
];
694 const CS_GenericBlob
*entitlements
;
695 const CS_CodeDirectory
*code_dir
;
696 const uint8_t *embedded_hash
;
697 union cs_hash_union context
;
702 if (csblob
->csb_hashtype
== NULL
|| csblob
->csb_hashtype
->cs_digest_size
> sizeof(computed_hash
))
705 code_dir
= csblob
->csb_cd
;
707 if ((csblob
->csb_flags
& CS_VALID
) == 0) {
710 entitlements
= csblob
->csb_entitlements_blob
;
712 embedded_hash
= find_special_slot(code_dir
, csblob
->csb_hashtype
->cs_size
, CSSLOT_ENTITLEMENTS
);
714 if (embedded_hash
== NULL
) {
718 } else if (entitlements
== NULL
) {
719 if (memcmp(embedded_hash
, cshash_zero
, csblob
->csb_hashtype
->cs_size
) != 0) {
726 csblob
->csb_hashtype
->cs_init(&context
);
727 csblob
->csb_hashtype
->cs_update(&context
, entitlements
, ntohl(entitlements
->length
));
728 csblob
->csb_hashtype
->cs_final(computed_hash
, &context
);
730 if (memcmp(computed_hash
, embedded_hash
, csblob
->csb_hashtype
->cs_size
) != 0)
733 *out_start
= __DECONST(void *, entitlements
);
734 *out_length
= ntohl(entitlements
->length
);
741 * End of routines to navigate code signing data structures in the kernel.
749 * Initialization of the zone for Unified Buffer Cache.
756 * ubc_info_zone(global) initialized for subsequent allocations
758 __private_extern__
void
763 i
= (vm_size_t
) sizeof (struct ubc_info
);
765 ubc_info_zone
= zinit (i
, 10000*i
, 8192, "ubc_info zone");
767 zone_change(ubc_info_zone
, Z_NOENCRYPT
, TRUE
);
774 * Allocate and attach an empty ubc_info structure to a vnode
776 * Parameters: vp Pointer to the vnode
779 * vnode_size:ENOMEM Not enough space
780 * vnode_size:??? Other error from vnode_getattr
784 ubc_info_init(struct vnode
*vp
)
786 return(ubc_info_init_internal(vp
, 0, 0));
791 * ubc_info_init_withsize
793 * Allocate and attach a sized ubc_info structure to a vnode
795 * Parameters: vp Pointer to the vnode
796 * filesize The size of the file
799 * vnode_size:ENOMEM Not enough space
800 * vnode_size:??? Other error from vnode_getattr
803 ubc_info_init_withsize(struct vnode
*vp
, off_t filesize
)
805 return(ubc_info_init_internal(vp
, 1, filesize
));
810 * ubc_info_init_internal
812 * Allocate and attach a ubc_info structure to a vnode
814 * Parameters: vp Pointer to the vnode
815 * withfsize{0,1} Zero if the size should be obtained
816 * from the vnode; otherwise, use filesize
817 * filesize The size of the file, if withfsize == 1
820 * vnode_size:ENOMEM Not enough space
821 * vnode_size:??? Other error from vnode_getattr
823 * Notes: We call a blocking zalloc(), and the zone was created as an
824 * expandable and collectable zone, so if no memory is available,
825 * it is possible for zalloc() to block indefinitely. zalloc()
826 * may also panic if the zone of zones is exhausted, since it's
829 * We unconditionally call vnode_pager_setup(), even if this is
830 * a reuse of a ubc_info; in that case, we should probably assert
831 * that it does not already have a pager association, but do not.
833 * Since memory_object_create_named() can only fail from receiving
834 * an invalid pager argument, the explicit check and panic is
835 * merely precautionary.
838 ubc_info_init_internal(vnode_t vp
, int withfsize
, off_t filesize
)
840 struct ubc_info
*uip
;
844 memory_object_control_t control
;
849 * If there is not already a ubc_info attached to the vnode, we
850 * attach one; otherwise, we will reuse the one that's there.
852 if (uip
== UBC_INFO_NULL
) {
854 uip
= (struct ubc_info
*) zalloc(ubc_info_zone
);
855 bzero((char *)uip
, sizeof(struct ubc_info
));
858 uip
->ui_flags
= UI_INITED
;
859 uip
->ui_ucred
= NOCRED
;
861 assert(uip
->ui_flags
!= UI_NONE
);
862 assert(uip
->ui_vnode
== vp
);
864 /* now set this ubc_info in the vnode */
868 * Allocate a pager object for this vnode
870 * XXX The value of the pager parameter is currently ignored.
871 * XXX Presumably, this API changed to avoid the race between
872 * XXX setting the pager and the UI_HASPAGER flag.
874 pager
= (void *)vnode_pager_setup(vp
, uip
->ui_pager
);
878 * Explicitly set the pager into the ubc_info, after setting the
881 SET(uip
->ui_flags
, UI_HASPAGER
);
882 uip
->ui_pager
= pager
;
885 * Note: We can not use VNOP_GETATTR() to get accurate
886 * value of ui_size because this may be an NFS vnode, and
887 * nfs_getattr() can call vinvalbuf(); if this happens,
888 * ubc_info is not set up to deal with that event.
893 * create a vnode - vm_object association
894 * memory_object_create_named() creates a "named" reference on the
895 * memory object we hold this reference as long as the vnode is
896 * "alive." Since memory_object_create_named() took its own reference
897 * on the vnode pager we passed it, we can drop the reference
898 * vnode_pager_setup() returned here.
900 kret
= memory_object_create_named(pager
,
901 (memory_object_size_t
)uip
->ui_size
, &control
);
902 vnode_pager_deallocate(pager
);
903 if (kret
!= KERN_SUCCESS
)
904 panic("ubc_info_init: memory_object_create_named returned %d", kret
);
907 uip
->ui_control
= control
; /* cache the value of the mo control */
908 SET(uip
->ui_flags
, UI_HASOBJREF
); /* with a named reference */
910 if (withfsize
== 0) {
911 /* initialize the size */
912 error
= vnode_size(vp
, &uip
->ui_size
, vfs_context_current());
916 uip
->ui_size
= filesize
;
918 vp
->v_lflag
|= VNAMED_UBC
; /* vnode has a named ubc reference */
927 * Free a ubc_info structure
929 * Parameters: uip A pointer to the ubc_info to free
933 * Notes: If there is a credential that has subsequently been associated
934 * with the ubc_info via a call to ubc_setcred(), the reference
935 * to the credential is dropped.
937 * It's actually impossible for a ubc_info.ui_control to take the
938 * value MEMORY_OBJECT_CONTROL_NULL.
941 ubc_info_free(struct ubc_info
*uip
)
943 if (IS_VALID_CRED(uip
->ui_ucred
)) {
944 kauth_cred_unref(&uip
->ui_ucred
);
947 if (uip
->ui_control
!= MEMORY_OBJECT_CONTROL_NULL
)
948 memory_object_control_deallocate(uip
->ui_control
);
950 cluster_release(uip
);
953 zfree(ubc_info_zone
, uip
);
959 ubc_info_deallocate(struct ubc_info
*uip
)
964 errno_t
mach_to_bsd_errno(kern_return_t mach_err
)
970 case KERN_INVALID_ADDRESS
:
971 case KERN_INVALID_ARGUMENT
:
972 case KERN_NOT_IN_SET
:
973 case KERN_INVALID_NAME
:
974 case KERN_INVALID_TASK
:
975 case KERN_INVALID_RIGHT
:
976 case KERN_INVALID_VALUE
:
977 case KERN_INVALID_CAPABILITY
:
978 case KERN_INVALID_HOST
:
979 case KERN_MEMORY_PRESENT
:
980 case KERN_INVALID_PROCESSOR_SET
:
981 case KERN_INVALID_POLICY
:
982 case KERN_ALREADY_WAITING
:
983 case KERN_DEFAULT_SET
:
984 case KERN_EXCEPTION_PROTECTED
:
985 case KERN_INVALID_LEDGER
:
986 case KERN_INVALID_MEMORY_CONTROL
:
987 case KERN_INVALID_SECURITY
:
988 case KERN_NOT_DEPRESSED
:
989 case KERN_LOCK_OWNED
:
990 case KERN_LOCK_OWNED_SELF
:
993 case KERN_PROTECTION_FAILURE
:
994 case KERN_NOT_RECEIVER
:
996 case KERN_POLICY_STATIC
:
1000 case KERN_RESOURCE_SHORTAGE
:
1001 case KERN_UREFS_OVERFLOW
:
1002 case KERN_INVALID_OBJECT
:
1008 case KERN_MEMORY_FAILURE
:
1009 case KERN_POLICY_LIMIT
:
1010 case KERN_CODESIGN_ERROR
:
1013 case KERN_MEMORY_ERROR
:
1016 case KERN_ALREADY_IN_SET
:
1017 case KERN_NAME_EXISTS
:
1018 case KERN_RIGHT_EXISTS
:
1024 case KERN_TERMINATED
:
1025 case KERN_LOCK_SET_DESTROYED
:
1026 case KERN_LOCK_UNSTABLE
:
1027 case KERN_SEMAPHORE_DESTROYED
:
1030 case KERN_RPC_SERVER_TERMINATED
:
1033 case KERN_NOT_SUPPORTED
:
1036 case KERN_NODE_DOWN
:
1039 case KERN_NOT_WAITING
:
1042 case KERN_OPERATION_TIMED_OUT
:
1053 * Tell the VM that the the size of the file represented by the vnode has
1056 * Parameters: vp The vp whose backing file size is
1058 * nsize The new size of the backing file
1061 * Returns: EINVAL for new size < 0
1062 * ENOENT if no UBC info exists
1063 * EAGAIN if UBC_SETSIZE_NO_FS_REENTRY option is set and new_size < old size
1064 * Other errors (mapped to errno_t) returned by VM functions
1066 * Notes: This function will indicate success if the new size is the
1067 * same or larger than the old size (in this case, the
1068 * remainder of the file will require modification or use of
1069 * an existing upl to access successfully).
1071 * This function will fail if the new file size is smaller,
1072 * and the memory region being invalidated was unable to
1073 * actually be invalidated and/or the last page could not be
1074 * flushed, if the new size is not aligned to a page
1075 * boundary. This is usually indicative of an I/O error.
1077 errno_t
ubc_setsize_ex(struct vnode
*vp
, off_t nsize
, ubc_setsize_opts_t opts
)
1079 off_t osize
; /* ui_size before change */
1080 off_t lastpg
, olastpgend
, lastoff
;
1081 struct ubc_info
*uip
;
1082 memory_object_control_t control
;
1083 kern_return_t kret
= KERN_SUCCESS
;
1085 if (nsize
< (off_t
)0)
1088 if (!UBCINFOEXISTS(vp
))
1091 uip
= vp
->v_ubcinfo
;
1092 osize
= uip
->ui_size
;
1094 if (ISSET(opts
, UBC_SETSIZE_NO_FS_REENTRY
) && nsize
< osize
)
1098 * Update the size before flushing the VM
1100 uip
->ui_size
= nsize
;
1102 if (nsize
>= osize
) { /* Nothing more to do */
1103 if (nsize
> osize
) {
1104 lock_vnode_and_post(vp
, NOTE_EXTEND
);
1111 * When the file shrinks, invalidate the pages beyond the
1112 * new size. Also get rid of garbage beyond nsize on the
1113 * last page. The ui_size already has the nsize, so any
1114 * subsequent page-in will zero-fill the tail properly
1116 lastpg
= trunc_page_64(nsize
);
1117 olastpgend
= round_page_64(osize
);
1118 control
= uip
->ui_control
;
1120 lastoff
= (nsize
& PAGE_MASK_64
);
1124 upl_page_info_t
*pl
;
1127 * new EOF ends up in the middle of a page
1128 * zero the tail of this page if it's currently
1129 * present in the cache
1131 kret
= ubc_create_upl_kernel(vp
, lastpg
, PAGE_SIZE
, &upl
, &pl
, UPL_SET_LITE
, VM_KERN_MEMORY_FILE
);
1133 if (kret
!= KERN_SUCCESS
)
1134 panic("ubc_setsize: ubc_create_upl (error = %d)\n", kret
);
1136 if (upl_valid_page(pl
, 0))
1137 cluster_zero(upl
, (uint32_t)lastoff
, PAGE_SIZE
- (uint32_t)lastoff
, NULL
);
1139 ubc_upl_abort_range(upl
, 0, PAGE_SIZE
, UPL_ABORT_FREE_ON_EMPTY
);
1141 lastpg
+= PAGE_SIZE_64
;
1143 if (olastpgend
> lastpg
) {
1147 flags
= MEMORY_OBJECT_DATA_FLUSH_ALL
;
1149 flags
= MEMORY_OBJECT_DATA_FLUSH
;
1151 * invalidate the pages beyond the new EOF page
1154 kret
= memory_object_lock_request(control
,
1155 (memory_object_offset_t
)lastpg
,
1156 (memory_object_size_t
)(olastpgend
- lastpg
), NULL
, NULL
,
1157 MEMORY_OBJECT_RETURN_NONE
, flags
, VM_PROT_NO_CHANGE
);
1158 if (kret
!= KERN_SUCCESS
)
1159 printf("ubc_setsize: invalidate failed (error = %d)\n", kret
);
1161 return mach_to_bsd_errno(kret
);
1164 // Returns true for success
1165 int ubc_setsize(vnode_t vp
, off_t nsize
)
1167 return ubc_setsize_ex(vp
, nsize
, 0) == 0;
1173 * Get the size of the file assocated with the specified vnode
1175 * Parameters: vp The vnode whose size is of interest
1177 * Returns: 0 There is no ubc_info associated with
1178 * this vnode, or the size is zero
1179 * !0 The size of the file
1181 * Notes: Using this routine, it is not possible for a caller to
1182 * successfully distinguish between a vnode associate with a zero
1183 * length file, and a vnode with no associated ubc_info. The
1184 * caller therefore needs to not care, or needs to ensure that
1185 * they have previously successfully called ubc_info_init() or
1186 * ubc_info_init_withsize().
1189 ubc_getsize(struct vnode
*vp
)
1191 /* people depend on the side effect of this working this way
1192 * as they call this for directory
1194 if (!UBCINFOEXISTS(vp
))
1196 return (vp
->v_ubcinfo
->ui_size
);
1203 * Call ubc_msync(vp, 0, EOF, NULL, UBC_PUSHALL) on all the vnodes for this
1206 * Parameters: mp The mount point
1208 * Returns: 0 Success
1210 * Notes: There is no failure indication for this function.
1212 * This function is used in the unmount path; since it may block
1213 * I/O indefinitely, it should not be used in the forced unmount
1214 * path, since a device unavailability could also block that
1217 * Because there is no device ejection interlock on USB, FireWire,
1218 * or similar devices, it's possible that an ejection that begins
1219 * subsequent to the vnode_iterate() completing, either on one of
1220 * those devices, or a network mount for which the server quits
1221 * responding, etc., may cause the caller to block indefinitely.
1223 __private_extern__
int
1224 ubc_umount(struct mount
*mp
)
1226 vnode_iterate(mp
, 0, ubc_umcallback
, 0);
1234 * Used by ubc_umount() as an internal implementation detail; see ubc_umount()
1235 * and vnode_iterate() for details of implementation.
1238 ubc_umcallback(vnode_t vp
, __unused
void * args
)
1241 if (UBCINFOEXISTS(vp
)) {
1243 (void) ubc_msync(vp
, (off_t
)0, ubc_getsize(vp
), NULL
, UBC_PUSHALL
);
1245 return (VNODE_RETURNED
);
1252 * Get the credentials currently active for the ubc_info associated with the
1255 * Parameters: vp The vnode whose ubc_info credentials
1256 * are to be retrieved
1258 * Returns: !NOCRED The credentials
1259 * NOCRED If there is no ubc_info for the vnode,
1260 * or if there is one, but it has not had
1261 * any credentials associated with it via
1262 * a call to ubc_setcred()
1265 ubc_getcred(struct vnode
*vp
)
1267 if (UBCINFOEXISTS(vp
))
1268 return (vp
->v_ubcinfo
->ui_ucred
);
1277 * If they are not already set, set the credentials of the ubc_info structure
1278 * associated with the vnode to those of the supplied thread; otherwise leave
1281 * Parameters: vp The vnode whose ubc_info creds are to
1283 * p The process whose credentials are to
1284 * be used, if not running on an assumed
1286 * thread The thread whose credentials are to
1289 * Returns: 1 This vnode has no associated ubc_info
1292 * Notes: This function takes a proc parameter to account for bootstrap
1293 * issues where a task or thread may call this routine, either
1294 * before credentials have been initialized by bsd_init(), or if
1295 * there is no BSD info asscoiate with a mach thread yet. This
1296 * is known to happen in both the initial swap and memory mapping
1299 * This function is generally used only in the following cases:
1301 * o a memory mapped file via the mmap() system call
1302 * o a swap store backing file
1303 * o subsequent to a successful write via vn_write()
1305 * The information is then used by the NFS client in order to
1306 * cons up a wire message in either the page-in or page-out path.
1308 * There are two potential problems with the use of this API:
1310 * o Because the write path only set it on a successful
1311 * write, there is a race window between setting the
1312 * credential and its use to evict the pages to the
1313 * remote file server
1315 * o Because a page-in may occur prior to a write, the
1316 * credential may not be set at this time, if the page-in
1317 * is not the result of a mapping established via mmap().
1319 * In both these cases, this will be triggered from the paging
1320 * path, which will instead use the credential of the current
1321 * process, which in this case is either the dynamic_pager or
1322 * the kernel task, both of which utilize "root" credentials.
1324 * This may potentially permit operations to occur which should
1325 * be denied, or it may cause to be denied operations which
1326 * should be permitted, depending on the configuration of the NFS
1330 ubc_setthreadcred(struct vnode
*vp
, proc_t p
, thread_t thread
)
1332 struct ubc_info
*uip
;
1334 struct uthread
*uthread
= get_bsdthread_info(thread
);
1336 if (!UBCINFOEXISTS(vp
))
1341 uip
= vp
->v_ubcinfo
;
1342 credp
= uip
->ui_ucred
;
1344 if (!IS_VALID_CRED(credp
)) {
1345 /* use per-thread cred, if assumed identity, else proc cred */
1346 if (uthread
== NULL
|| (uthread
->uu_flag
& UT_SETUID
) == 0) {
1347 uip
->ui_ucred
= kauth_cred_proc_ref(p
);
1349 uip
->ui_ucred
= uthread
->uu_ucred
;
1350 kauth_cred_ref(uip
->ui_ucred
);
1362 * If they are not already set, set the credentials of the ubc_info structure
1363 * associated with the vnode to those of the process; otherwise leave them
1366 * Parameters: vp The vnode whose ubc_info creds are to
1368 * p The process whose credentials are to
1371 * Returns: 0 This vnode has no associated ubc_info
1374 * Notes: The return values for this function are inverted from nearly
1375 * all other uses in the kernel.
1377 * See also ubc_setthreadcred(), above.
1379 * This function is considered deprecated, and generally should
1380 * not be used, as it is incompatible with per-thread credentials;
1381 * it exists for legacy KPI reasons.
1383 * DEPRECATION: ubc_setcred() is being deprecated. Please use
1384 * ubc_setthreadcred() instead.
1387 ubc_setcred(struct vnode
*vp
, proc_t p
)
1389 struct ubc_info
*uip
;
1392 /* If there is no ubc_info, deny the operation */
1393 if ( !UBCINFOEXISTS(vp
))
1397 * Check to see if there is already a credential reference in the
1398 * ubc_info; if there is not, take one on the supplied credential.
1401 uip
= vp
->v_ubcinfo
;
1402 credp
= uip
->ui_ucred
;
1403 if (!IS_VALID_CRED(credp
)) {
1404 uip
->ui_ucred
= kauth_cred_proc_ref(p
);
1414 * Get the pager associated with the ubc_info associated with the vnode.
1416 * Parameters: vp The vnode to obtain the pager from
1418 * Returns: !VNODE_PAGER_NULL The memory_object_t for the pager
1419 * VNODE_PAGER_NULL There is no ubc_info for this vnode
1421 * Notes: For each vnode that has a ubc_info associated with it, that
1422 * ubc_info SHALL have a pager associated with it, so in the
1423 * normal case, it's impossible to return VNODE_PAGER_NULL for
1424 * a vnode with an associated ubc_info.
1426 __private_extern__ memory_object_t
1427 ubc_getpager(struct vnode
*vp
)
1429 if (UBCINFOEXISTS(vp
))
1430 return (vp
->v_ubcinfo
->ui_pager
);
1439 * Get the memory object control associated with the ubc_info associated with
1442 * Parameters: vp The vnode to obtain the memory object
1446 * Returns: !MEMORY_OBJECT_CONTROL_NULL
1447 * MEMORY_OBJECT_CONTROL_NULL
1449 * Notes: Historically, if the flags were not "do not reactivate", this
1450 * function would look up the memory object using the pager if
1451 * it did not exist (this could be the case if the vnode had
1452 * been previously reactivated). The flags would also permit a
1453 * hold to be requested, which would have created an object
1454 * reference, if one had not already existed. This usage is
1455 * deprecated, as it would permit a race between finding and
1456 * taking the reference vs. a single reference being dropped in
1459 memory_object_control_t
1460 ubc_getobject(struct vnode
*vp
, __unused
int flags
)
1462 if (UBCINFOEXISTS(vp
))
1463 return((vp
->v_ubcinfo
->ui_control
));
1465 return (MEMORY_OBJECT_CONTROL_NULL
);
1471 * Convert a given block number to a memory backing object (file) offset for a
1474 * Parameters: vp The vnode in which the block is located
1475 * blkno The block number to convert
1477 * Returns: !-1 The offset into the backing object
1478 * -1 There is no ubc_info associated with
1480 * -1 An error occurred in the underlying VFS
1481 * while translating the block to an
1482 * offset; the most likely cause is that
1483 * the caller specified a block past the
1484 * end of the file, but this could also be
1485 * any other error from VNOP_BLKTOOFF().
1487 * Note: Representing the error in band loses some information, but does
1488 * not occlude a valid offset, since an off_t of -1 is normally
1489 * used to represent EOF. If we had a more reliable constant in
1490 * our header files for it (i.e. explicitly cast to an off_t), we
1491 * would use it here instead.
1494 ubc_blktooff(vnode_t vp
, daddr64_t blkno
)
1496 off_t file_offset
= -1;
1499 if (UBCINFOEXISTS(vp
)) {
1500 error
= VNOP_BLKTOOFF(vp
, blkno
, &file_offset
);
1505 return (file_offset
);
1512 * Convert a given offset in a memory backing object into a block number for a
1515 * Parameters: vp The vnode in which the offset is
1517 * offset The offset into the backing object
1519 * Returns: !-1 The returned block number
1520 * -1 There is no ubc_info associated with
1522 * -1 An error occurred in the underlying VFS
1523 * while translating the block to an
1524 * offset; the most likely cause is that
1525 * the caller specified a block past the
1526 * end of the file, but this could also be
1527 * any other error from VNOP_OFFTOBLK().
1529 * Note: Representing the error in band loses some information, but does
1530 * not occlude a valid block number, since block numbers exceed
1531 * the valid range for offsets, due to their relative sizes. If
1532 * we had a more reliable constant than -1 in our header files
1533 * for it (i.e. explicitly cast to an daddr64_t), we would use it
1537 ubc_offtoblk(vnode_t vp
, off_t offset
)
1539 daddr64_t blkno
= -1;
1542 if (UBCINFOEXISTS(vp
)) {
1543 error
= VNOP_OFFTOBLK(vp
, offset
, &blkno
);
1553 * ubc_pages_resident
1555 * Determine whether or not a given vnode has pages resident via the memory
1556 * object control associated with the ubc_info associated with the vnode
1558 * Parameters: vp The vnode we want to know about
1564 ubc_pages_resident(vnode_t vp
)
1567 boolean_t has_pages_resident
;
1569 if (!UBCINFOEXISTS(vp
))
1573 * The following call may fail if an invalid ui_control is specified,
1574 * or if there is no VM object associated with the control object. In
1575 * either case, reacting to it as if there were no pages resident will
1576 * result in correct behavior.
1578 kret
= memory_object_pages_resident(vp
->v_ubcinfo
->ui_control
, &has_pages_resident
);
1580 if (kret
!= KERN_SUCCESS
)
1583 if (has_pages_resident
== TRUE
)
1592 * Clean and/or invalidate a range in the memory object that backs this vnode
1594 * Parameters: vp The vnode whose associated ubc_info's
1595 * associated memory object is to have a
1596 * range invalidated within it
1597 * beg_off The start of the range, as an offset
1598 * end_off The end of the range, as an offset
1599 * resid_off The address of an off_t supplied by the
1600 * caller; may be set to NULL to ignore
1601 * flags See ubc_msync_internal()
1603 * Returns: 0 Success
1604 * !0 Failure; an errno is returned
1607 * *resid_off, modified If non-NULL, the contents are ALWAYS
1608 * modified; they are initialized to the
1609 * beg_off, and in case of an I/O error,
1610 * the difference between beg_off and the
1611 * current value will reflect what was
1612 * able to be written before the error
1613 * occurred. If no error is returned, the
1614 * value of the resid_off is undefined; do
1615 * NOT use it in place of end_off if you
1616 * intend to increment from the end of the
1617 * last call and call iteratively.
1619 * Notes: see ubc_msync_internal() for more detailed information.
1623 ubc_msync(vnode_t vp
, off_t beg_off
, off_t end_off
, off_t
*resid_off
, int flags
)
1629 *resid_off
= beg_off
;
1631 retval
= ubc_msync_internal(vp
, beg_off
, end_off
, resid_off
, flags
, &io_errno
);
1633 if (retval
== 0 && io_errno
== 0)
1640 * ubc_msync_internal
1642 * Clean and/or invalidate a range in the memory object that backs this vnode
1644 * Parameters: vp The vnode whose associated ubc_info's
1645 * associated memory object is to have a
1646 * range invalidated within it
1647 * beg_off The start of the range, as an offset
1648 * end_off The end of the range, as an offset
1649 * resid_off The address of an off_t supplied by the
1650 * caller; may be set to NULL to ignore
1651 * flags MUST contain at least one of the flags
1652 * UBC_INVALIDATE, UBC_PUSHDIRTY, or
1653 * UBC_PUSHALL; if UBC_PUSHDIRTY is used,
1654 * UBC_SYNC may also be specified to cause
1655 * this function to block until the
1656 * operation is complete. The behavior
1657 * of UBC_SYNC is otherwise undefined.
1658 * io_errno The address of an int to contain the
1659 * errno from a failed I/O operation, if
1660 * one occurs; may be set to NULL to
1663 * Returns: 1 Success
1667 * *resid_off, modified The contents of this offset MAY be
1668 * modified; in case of an I/O error, the
1669 * difference between beg_off and the
1670 * current value will reflect what was
1671 * able to be written before the error
1673 * *io_errno, modified The contents of this offset are set to
1674 * an errno, if an error occurs; if the
1675 * caller supplies an io_errno parameter,
1676 * they should be careful to initialize it
1677 * to 0 before calling this function to
1678 * enable them to distinguish an error
1679 * with a valid *resid_off from an invalid
1680 * one, and to avoid potentially falsely
1681 * reporting an error, depending on use.
1683 * Notes: If there is no ubc_info associated with the vnode supplied,
1684 * this function immediately returns success.
1686 * If the value of end_off is less than or equal to beg_off, this
1687 * function immediately returns success; that is, end_off is NOT
1690 * IMPORTANT: one of the flags UBC_INVALIDATE, UBC_PUSHDIRTY, or
1691 * UBC_PUSHALL MUST be specified; that is, it is NOT possible to
1692 * attempt to block on in-progress I/O by calling this function
1693 * with UBC_PUSHDIRTY, and then later call it with just UBC_SYNC
1694 * in order to block pending on the I/O already in progress.
1696 * The start offset is truncated to the page boundary and the
1697 * size is adjusted to include the last page in the range; that
1698 * is, end_off on exactly a page boundary will not change if it
1699 * is rounded, and the range of bytes written will be from the
1700 * truncate beg_off to the rounded (end_off - 1).
1703 ubc_msync_internal(vnode_t vp
, off_t beg_off
, off_t end_off
, off_t
*resid_off
, int flags
, int *io_errno
)
1705 memory_object_size_t tsize
;
1707 int request_flags
= 0;
1708 int flush_flags
= MEMORY_OBJECT_RETURN_NONE
;
1710 if ( !UBCINFOEXISTS(vp
))
1712 if ((flags
& (UBC_INVALIDATE
| UBC_PUSHDIRTY
| UBC_PUSHALL
)) == 0)
1714 if (end_off
<= beg_off
)
1717 if (flags
& UBC_INVALIDATE
)
1719 * discard the resident pages
1721 request_flags
= (MEMORY_OBJECT_DATA_FLUSH
| MEMORY_OBJECT_DATA_NO_CHANGE
);
1723 if (flags
& UBC_SYNC
)
1725 * wait for all the I/O to complete before returning
1727 request_flags
|= MEMORY_OBJECT_IO_SYNC
;
1729 if (flags
& UBC_PUSHDIRTY
)
1731 * we only return the dirty pages in the range
1733 flush_flags
= MEMORY_OBJECT_RETURN_DIRTY
;
1735 if (flags
& UBC_PUSHALL
)
1737 * then return all the interesting pages in the range (both
1738 * dirty and precious) to the pager
1740 flush_flags
= MEMORY_OBJECT_RETURN_ALL
;
1742 beg_off
= trunc_page_64(beg_off
);
1743 end_off
= round_page_64(end_off
);
1744 tsize
= (memory_object_size_t
)end_off
- beg_off
;
1746 /* flush and/or invalidate pages in the range requested */
1747 kret
= memory_object_lock_request(vp
->v_ubcinfo
->ui_control
,
1749 (memory_object_offset_t
*)resid_off
,
1750 io_errno
, flush_flags
, request_flags
,
1753 return ((kret
== KERN_SUCCESS
) ? 1 : 0);
1760 * Explicitly map a vnode that has an associate ubc_info, and add a reference
1761 * to it for the ubc system, if there isn't one already, so it will not be
1762 * recycled while it's in use, and set flags on the ubc_info to indicate that
1765 * Parameters: vp The vnode to map
1766 * flags The mapping flags for the vnode; this
1767 * will be a combination of one or more of
1768 * PROT_READ, PROT_WRITE, and PROT_EXEC
1770 * Returns: 0 Success
1771 * EPERM Permission was denied
1773 * Notes: An I/O reference on the vnode must already be held on entry
1775 * If there is no ubc_info associated with the vnode, this function
1776 * will return success.
1778 * If a permission error occurs, this function will return
1779 * failure; all other failures will cause this function to return
1782 * IMPORTANT: This is an internal use function, and its symbols
1783 * are not exported, hence its error checking is not very robust.
1784 * It is primarily used by:
1786 * o mmap(), when mapping a file
1787 * o When mapping a shared file (a shared library in the
1788 * shared segment region)
1789 * o When loading a program image during the exec process
1791 * ...all of these uses ignore the return code, and any fault that
1792 * results later because of a failure is handled in the fix-up path
1793 * of the fault handler. The interface exists primarily as a
1796 * Given that third party implementation of the type of interfaces
1797 * that would use this function, such as alternative executable
1798 * formats, etc., are unsupported, this function is not exported
1801 * The extra reference is held until the VM system unmaps the
1802 * vnode from its own context to maintain a vnode reference in
1803 * cases like open()/mmap()/close(), which leave the backing
1804 * object referenced by a mapped memory region in a process
1807 __private_extern__
int
1808 ubc_map(vnode_t vp
, int flags
)
1810 struct ubc_info
*uip
;
1813 int need_wakeup
= 0;
1815 if (UBCINFOEXISTS(vp
)) {
1818 uip
= vp
->v_ubcinfo
;
1820 while (ISSET(uip
->ui_flags
, UI_MAPBUSY
)) {
1821 SET(uip
->ui_flags
, UI_MAPWAITING
);
1822 (void) msleep(&uip
->ui_flags
, &vp
->v_lock
,
1823 PRIBIO
, "ubc_map", NULL
);
1825 SET(uip
->ui_flags
, UI_MAPBUSY
);
1828 error
= VNOP_MMAP(vp
, flags
, vfs_context_current());
1831 * rdar://problem/22587101 required that we stop propagating
1832 * EPERM up the stack. Otherwise, we would have to funnel up
1833 * the error at all the call sites for memory_object_map().
1834 * The risk is in having to undo the map/object/entry state at
1835 * all these call sites. It would also affect more than just mmap()
1838 * if (error != EPERM)
1844 vnode_lock_spin(vp
);
1847 if ( !ISSET(uip
->ui_flags
, UI_ISMAPPED
))
1849 SET(uip
->ui_flags
, (UI_WASMAPPED
| UI_ISMAPPED
));
1850 if (flags
& PROT_WRITE
) {
1851 SET(uip
->ui_flags
, UI_MAPPEDWRITE
);
1854 CLR(uip
->ui_flags
, UI_MAPBUSY
);
1856 if (ISSET(uip
->ui_flags
, UI_MAPWAITING
)) {
1857 CLR(uip
->ui_flags
, UI_MAPWAITING
);
1863 wakeup(&uip
->ui_flags
);
1867 * Make sure we get a ref as we can't unwind from here
1869 if (vnode_ref_ext(vp
, 0, VNODE_REF_FORCE
))
1870 panic("%s : VNODE_REF_FORCE failed\n", __FUNCTION__
);
1880 * Destroy the named memory object associated with the ubc_info control object
1881 * associated with the designated vnode, if there is a ubc_info associated
1882 * with the vnode, and a control object is associated with it
1884 * Parameters: vp The designated vnode
1888 * Notes: This function is called on vnode termination for all vnodes,
1889 * and must therefore not assume that there is a ubc_info that is
1890 * associated with the vnode, nor that there is a control object
1891 * associated with the ubc_info.
1893 * If all the conditions necessary are present, this function
1894 * calls memory_object_destory(), which will in turn end up
1895 * calling ubc_unmap() to release any vnode references that were
1896 * established via ubc_map().
1898 * IMPORTANT: This is an internal use function that is used
1899 * exclusively by the internal use function vclean().
1901 __private_extern__
void
1902 ubc_destroy_named(vnode_t vp
)
1904 memory_object_control_t control
;
1905 struct ubc_info
*uip
;
1908 if (UBCINFOEXISTS(vp
)) {
1909 uip
= vp
->v_ubcinfo
;
1911 /* Terminate the memory object */
1912 control
= ubc_getobject(vp
, UBC_HOLDOBJECT
);
1913 if (control
!= MEMORY_OBJECT_CONTROL_NULL
) {
1914 kret
= memory_object_destroy(control
, 0);
1915 if (kret
!= KERN_SUCCESS
)
1916 panic("ubc_destroy_named: memory_object_destroy failed");
1925 * Determine whether or not a vnode is currently in use by ubc at a level in
1926 * excess of the requested busycount
1928 * Parameters: vp The vnode to check
1929 * busycount The threshold busy count, used to bias
1930 * the count usually already held by the
1931 * caller to avoid races
1933 * Returns: 1 The vnode is in use over the threshold
1934 * 0 The vnode is not in use over the
1937 * Notes: Because the vnode is only held locked while actually asking
1938 * the use count, this function only represents a snapshot of the
1939 * current state of the vnode. If more accurate information is
1940 * required, an additional busycount should be held by the caller
1941 * and a non-zero busycount used.
1943 * If there is no ubc_info associated with the vnode, this
1944 * function will report that the vnode is not in use by ubc.
1947 ubc_isinuse(struct vnode
*vp
, int busycount
)
1949 if ( !UBCINFOEXISTS(vp
))
1951 return(ubc_isinuse_locked(vp
, busycount
, 0));
1956 * ubc_isinuse_locked
1958 * Determine whether or not a vnode is currently in use by ubc at a level in
1959 * excess of the requested busycount
1961 * Parameters: vp The vnode to check
1962 * busycount The threshold busy count, used to bias
1963 * the count usually already held by the
1964 * caller to avoid races
1965 * locked True if the vnode is already locked by
1968 * Returns: 1 The vnode is in use over the threshold
1969 * 0 The vnode is not in use over the
1972 * Notes: If the vnode is not locked on entry, it is locked while
1973 * actually asking the use count. If this is the case, this
1974 * function only represents a snapshot of the current state of
1975 * the vnode. If more accurate information is required, the
1976 * vnode lock should be held by the caller, otherwise an
1977 * additional busycount should be held by the caller and a
1978 * non-zero busycount used.
1980 * If there is no ubc_info associated with the vnode, this
1981 * function will report that the vnode is not in use by ubc.
1984 ubc_isinuse_locked(struct vnode
*vp
, int busycount
, int locked
)
1990 vnode_lock_spin(vp
);
1992 if ((vp
->v_usecount
- vp
->v_kusecount
) > busycount
)
2004 * Reverse the effects of a ubc_map() call for a given vnode
2006 * Parameters: vp vnode to unmap from ubc
2010 * Notes: This is an internal use function used by vnode_pager_unmap().
2011 * It will attempt to obtain a reference on the supplied vnode,
2012 * and if it can do so, and there is an associated ubc_info, and
2013 * the flags indicate that it was mapped via ubc_map(), then the
2014 * flag is cleared, the mapping removed, and the reference taken
2015 * by ubc_map() is released.
2017 * IMPORTANT: This MUST only be called by the VM
2018 * to prevent race conditions.
2020 __private_extern__
void
2021 ubc_unmap(struct vnode
*vp
)
2023 struct ubc_info
*uip
;
2025 int need_wakeup
= 0;
2027 if (vnode_getwithref(vp
))
2030 if (UBCINFOEXISTS(vp
)) {
2031 bool want_fsevent
= false;
2034 uip
= vp
->v_ubcinfo
;
2036 while (ISSET(uip
->ui_flags
, UI_MAPBUSY
)) {
2037 SET(uip
->ui_flags
, UI_MAPWAITING
);
2038 (void) msleep(&uip
->ui_flags
, &vp
->v_lock
,
2039 PRIBIO
, "ubc_unmap", NULL
);
2041 SET(uip
->ui_flags
, UI_MAPBUSY
);
2043 if (ISSET(uip
->ui_flags
, UI_ISMAPPED
)) {
2044 if (ISSET(uip
->ui_flags
, UI_MAPPEDWRITE
))
2045 want_fsevent
= true;
2050 * We want to clear the mapped flags after we've called
2051 * VNOP_MNOMAP to avoid certain races and allow
2052 * VNOP_MNOMAP to call ubc_is_mapped_writable.
2058 vfs_context_t ctx
= vfs_context_current();
2060 (void)VNOP_MNOMAP(vp
, ctx
);
2064 * Why do we want an fsevent here? Normally the
2065 * content modified fsevent is posted when a file is
2066 * closed and only if it's written to via conventional
2067 * means. It's perfectly legal to close a file and
2068 * keep your mappings and we don't currently track
2069 * whether it was written to via a mapping.
2070 * Therefore, we need to post an fsevent here if the
2071 * file was mapped writable. This may result in false
2072 * events, i.e. we post a notification when nothing
2073 * has really changed.
2075 if (want_fsevent
&& need_fsevent(FSE_CONTENT_MODIFIED
, vp
)) {
2076 add_fsevent(FSE_CONTENT_MODIFIED
, ctx
,
2085 vnode_lock_spin(vp
);
2088 CLR(uip
->ui_flags
, UI_ISMAPPED
| UI_MAPPEDWRITE
);
2090 CLR(uip
->ui_flags
, UI_MAPBUSY
);
2092 if (ISSET(uip
->ui_flags
, UI_MAPWAITING
)) {
2093 CLR(uip
->ui_flags
, UI_MAPWAITING
);
2099 wakeup(&uip
->ui_flags
);
2103 * the drop of the vnode ref will cleanup
2112 * Manipulate individual page state for a vnode with an associated ubc_info
2113 * with an associated memory object control.
2115 * Parameters: vp The vnode backing the page
2116 * f_offset A file offset interior to the page
2117 * ops The operations to perform, as a bitmap
2118 * (see below for more information)
2119 * phys_entryp The address of a ppnum_t; may be NULL
2121 * flagsp A pointer to an int to contain flags;
2122 * may be NULL to ignore
2124 * Returns: KERN_SUCCESS Success
2125 * KERN_INVALID_ARGUMENT If the memory object control has no VM
2127 * KERN_INVALID_OBJECT If UPL_POP_PHYSICAL and the object is
2128 * not physically contiguous
2129 * KERN_INVALID_OBJECT If !UPL_POP_PHYSICAL and the object is
2130 * physically contiguous
2131 * KERN_FAILURE If the page cannot be looked up
2134 * *phys_entryp (modified) If phys_entryp is non-NULL and
2136 * *flagsp (modified) If flagsp is non-NULL and there was
2137 * !UPL_POP_PHYSICAL and a KERN_SUCCESS
2139 * Notes: For object boundaries, it is considerably more efficient to
2140 * ensure that f_offset is in fact on a page boundary, as this
2141 * will avoid internal use of the hash table to identify the
2142 * page, and would therefore skip a number of early optimizations.
2143 * Since this is a page operation anyway, the caller should try
2144 * to pass only a page aligned offset because of this.
2146 * *flagsp may be modified even if this function fails. If it is
2147 * modified, it will contain the condition of the page before the
2148 * requested operation was attempted; these will only include the
2149 * bitmap flags, and not the PL_POP_PHYSICAL, UPL_POP_DUMP,
2150 * UPL_POP_SET, or UPL_POP_CLR bits.
2152 * The flags field may contain a specific operation, such as
2153 * UPL_POP_PHYSICAL or UPL_POP_DUMP:
2155 * o UPL_POP_PHYSICAL Fail if not contiguous; if
2156 * *phys_entryp and successful, set
2158 * o UPL_POP_DUMP Dump the specified page
2160 * Otherwise, it is treated as a bitmap of one or more page
2161 * operations to perform on the final memory object; allowable
2164 * o UPL_POP_DIRTY The page is dirty
2165 * o UPL_POP_PAGEOUT The page is paged out
2166 * o UPL_POP_PRECIOUS The page is precious
2167 * o UPL_POP_ABSENT The page is absent
2168 * o UPL_POP_BUSY The page is busy
2170 * If the page status is only being queried and not modified, then
2171 * not other bits should be specified. However, if it is being
2172 * modified, exactly ONE of the following bits should be set:
2174 * o UPL_POP_SET Set the current bitmap bits
2175 * o UPL_POP_CLR Clear the current bitmap bits
2177 * Thus to effect a combination of setting an clearing, it may be
2178 * necessary to call this function twice. If this is done, the
2179 * set should be used before the clear, since clearing may trigger
2180 * a wakeup on the destination page, and if the page is backed by
2181 * an encrypted swap file, setting will trigger the decryption
2182 * needed before the wakeup occurs.
2189 ppnum_t
*phys_entryp
,
2192 memory_object_control_t control
;
2194 control
= ubc_getobject(vp
, UBC_FLAGS_NONE
);
2195 if (control
== MEMORY_OBJECT_CONTROL_NULL
)
2196 return KERN_INVALID_ARGUMENT
;
2198 return (memory_object_page_op(control
,
2199 (memory_object_offset_t
)f_offset
,
2209 * Manipulate page state for a range of memory for a vnode with an associated
2210 * ubc_info with an associated memory object control, when page level state is
2211 * not required to be returned from the call (i.e. there are no phys_entryp or
2212 * flagsp parameters to this call, and it takes a range which may contain
2213 * multiple pages, rather than an offset interior to a single page).
2215 * Parameters: vp The vnode backing the page
2216 * f_offset_beg A file offset interior to the start page
2217 * f_offset_end A file offset interior to the end page
2218 * ops The operations to perform, as a bitmap
2219 * (see below for more information)
2220 * range The address of an int; may be NULL to
2223 * Returns: KERN_SUCCESS Success
2224 * KERN_INVALID_ARGUMENT If the memory object control has no VM
2226 * KERN_INVALID_OBJECT If the object is physically contiguous
2229 * *range (modified) If range is non-NULL, its contents will
2230 * be modified to contain the number of
2231 * bytes successfully operated upon.
2233 * Notes: IMPORTANT: This function cannot be used on a range that
2234 * consists of physically contiguous pages.
2236 * For object boundaries, it is considerably more efficient to
2237 * ensure that f_offset_beg and f_offset_end are in fact on page
2238 * boundaries, as this will avoid internal use of the hash table
2239 * to identify the page, and would therefore skip a number of
2240 * early optimizations. Since this is an operation on a set of
2241 * pages anyway, the caller should try to pass only a page aligned
2242 * offsets because of this.
2244 * *range will be modified only if this function succeeds.
2246 * The flags field MUST contain a specific operation; allowable
2249 * o UPL_ROP_ABSENT Returns the extent of the range
2250 * presented which is absent, starting
2251 * with the start address presented
2253 * o UPL_ROP_PRESENT Returns the extent of the range
2254 * presented which is present (resident),
2255 * starting with the start address
2257 * o UPL_ROP_DUMP Dump the pages which are found in the
2258 * target object for the target range.
2260 * IMPORTANT: For UPL_ROP_ABSENT and UPL_ROP_PRESENT; if there are
2261 * multiple regions in the range, only the first matching region
2272 memory_object_control_t control
;
2274 control
= ubc_getobject(vp
, UBC_FLAGS_NONE
);
2275 if (control
== MEMORY_OBJECT_CONTROL_NULL
)
2276 return KERN_INVALID_ARGUMENT
;
2278 return (memory_object_range_op(control
,
2279 (memory_object_offset_t
)f_offset_beg
,
2280 (memory_object_offset_t
)f_offset_end
,
2289 * Given a vnode, cause the population of a portion of the vm_object; based on
2290 * the nature of the request, the pages returned may contain valid data, or
2291 * they may be uninitialized.
2293 * Parameters: vp The vnode from which to create the upl
2294 * f_offset The start offset into the backing store
2295 * represented by the vnode
2296 * bufsize The size of the upl to create
2297 * uplp Pointer to the upl_t to receive the
2298 * created upl; MUST NOT be NULL
2299 * plp Pointer to receive the internal page
2300 * list for the created upl; MAY be NULL
2303 * Returns: KERN_SUCCESS The requested upl has been created
2304 * KERN_INVALID_ARGUMENT The bufsize argument is not an even
2305 * multiple of the page size
2306 * KERN_INVALID_ARGUMENT There is no ubc_info associated with
2307 * the vnode, or there is no memory object
2308 * control associated with the ubc_info
2309 * memory_object_upl_request:KERN_INVALID_VALUE
2310 * The supplied upl_flags argument is
2314 * *plp (modified) If non-NULL, the value of *plp will be
2315 * modified to point to the internal page
2316 * list; this modification may occur even
2317 * if this function is unsuccessful, in
2318 * which case the contents may be invalid
2320 * Note: If successful, the returned *uplp MUST subsequently be freed
2321 * via a call to ubc_upl_commit(), ubc_upl_commit_range(),
2322 * ubc_upl_abort(), or ubc_upl_abort_range().
2325 ubc_create_upl_external(
2330 upl_page_info_t
**plp
,
2333 return (ubc_create_upl_kernel(vp
, f_offset
, bufsize
, uplp
, plp
, uplflags
, vm_tag_bt()));
2337 ubc_create_upl_kernel(
2342 upl_page_info_t
**plp
,
2346 memory_object_control_t control
;
2353 if (bufsize
& 0xfff)
2354 return KERN_INVALID_ARGUMENT
;
2356 if (bufsize
> MAX_UPL_SIZE_BYTES
)
2357 return KERN_INVALID_ARGUMENT
;
2359 if (uplflags
& (UPL_UBC_MSYNC
| UPL_UBC_PAGEOUT
| UPL_UBC_PAGEIN
)) {
2361 if (uplflags
& UPL_UBC_MSYNC
) {
2362 uplflags
&= UPL_RET_ONLY_DIRTY
;
2364 uplflags
|= UPL_COPYOUT_FROM
| UPL_CLEAN_IN_PLACE
|
2365 UPL_SET_INTERNAL
| UPL_SET_LITE
;
2367 } else if (uplflags
& UPL_UBC_PAGEOUT
) {
2368 uplflags
&= UPL_RET_ONLY_DIRTY
;
2370 if (uplflags
& UPL_RET_ONLY_DIRTY
)
2371 uplflags
|= UPL_NOBLOCK
;
2373 uplflags
|= UPL_FOR_PAGEOUT
| UPL_CLEAN_IN_PLACE
|
2374 UPL_COPYOUT_FROM
| UPL_SET_INTERNAL
| UPL_SET_LITE
;
2376 uplflags
|= UPL_RET_ONLY_ABSENT
|
2377 UPL_NO_SYNC
| UPL_CLEAN_IN_PLACE
|
2378 UPL_SET_INTERNAL
| UPL_SET_LITE
;
2381 * if the requested size == PAGE_SIZE, we don't want to set
2382 * the UPL_NOBLOCK since we may be trying to recover from a
2383 * previous partial pagein I/O that occurred because we were low
2384 * on memory and bailed early in order to honor the UPL_NOBLOCK...
2385 * since we're only asking for a single page, we can block w/o fear
2386 * of tying up pages while waiting for more to become available
2388 if (bufsize
> PAGE_SIZE
)
2389 uplflags
|= UPL_NOBLOCK
;
2392 uplflags
&= ~UPL_FOR_PAGEOUT
;
2394 if (uplflags
& UPL_WILL_BE_DUMPED
) {
2395 uplflags
&= ~UPL_WILL_BE_DUMPED
;
2396 uplflags
|= (UPL_NO_SYNC
|UPL_SET_INTERNAL
);
2398 uplflags
|= (UPL_NO_SYNC
|UPL_CLEAN_IN_PLACE
|UPL_SET_INTERNAL
);
2400 control
= ubc_getobject(vp
, UBC_FLAGS_NONE
);
2401 if (control
== MEMORY_OBJECT_CONTROL_NULL
)
2402 return KERN_INVALID_ARGUMENT
;
2404 kr
= memory_object_upl_request(control
, f_offset
, bufsize
, uplp
, NULL
, NULL
, uplflags
, tag
);
2405 if (kr
== KERN_SUCCESS
&& plp
!= NULL
)
2406 *plp
= UPL_GET_INTERNAL_PAGE_LIST(*uplp
);
2412 * ubc_upl_maxbufsize
2414 * Return the maximum bufsize ubc_create_upl( ) will take.
2418 * Returns: maximum size buffer (in bytes) ubc_create_upl( ) will take.
2424 return(MAX_UPL_SIZE_BYTES
);
2430 * Map the page list assocated with the supplied upl into the kernel virtual
2431 * address space at the virtual address indicated by the dst_addr argument;
2432 * the entire upl is mapped
2434 * Parameters: upl The upl to map
2435 * dst_addr The address at which to map the upl
2437 * Returns: KERN_SUCCESS The upl has been mapped
2438 * KERN_INVALID_ARGUMENT The upl is UPL_NULL
2439 * KERN_FAILURE The upl is already mapped
2440 * vm_map_enter:KERN_INVALID_ARGUMENT
2441 * A failure code from vm_map_enter() due
2442 * to an invalid argument
2447 vm_offset_t
*dst_addr
)
2449 return (vm_upl_map(kernel_map
, upl
, dst_addr
));
2456 * Unmap the page list assocated with the supplied upl from the kernel virtual
2457 * address space; the entire upl is unmapped.
2459 * Parameters: upl The upl to unmap
2461 * Returns: KERN_SUCCESS The upl has been unmapped
2462 * KERN_FAILURE The upl is not currently mapped
2463 * KERN_INVALID_ARGUMENT If the upl is UPL_NULL
2469 return(vm_upl_unmap(kernel_map
, upl
));
2476 * Commit the contents of the upl to the backing store
2478 * Parameters: upl The upl to commit
2480 * Returns: KERN_SUCCESS The upl has been committed
2481 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2482 * KERN_FAILURE The supplied upl does not represent
2483 * device memory, and the offset plus the
2484 * size would exceed the actual size of
2487 * Notes: In practice, the only return value for this function should be
2488 * KERN_SUCCESS, unless there has been data structure corruption;
2489 * since the upl is deallocated regardless of success or failure,
2490 * there's really nothing to do about this other than panic.
2492 * IMPORTANT: Use of this function should not be mixed with use of
2493 * ubc_upl_commit_range(), due to the unconditional deallocation
2500 upl_page_info_t
*pl
;
2503 pl
= UPL_GET_INTERNAL_PAGE_LIST(upl
);
2504 kr
= upl_commit(upl
, pl
, MAX_UPL_SIZE_BYTES
>> PAGE_SHIFT
);
2505 upl_deallocate(upl
);
2513 * Commit the contents of the specified range of the upl to the backing store
2515 * Parameters: upl The upl to commit
2516 * offset The offset into the upl
2517 * size The size of the region to be committed,
2518 * starting at the specified offset
2519 * flags commit type (see below)
2521 * Returns: KERN_SUCCESS The range has been committed
2522 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2523 * KERN_FAILURE The supplied upl does not represent
2524 * device memory, and the offset plus the
2525 * size would exceed the actual size of
2528 * Notes: IMPORTANT: If the commit is successful, and the object is now
2529 * empty, the upl will be deallocated. Since the caller cannot
2530 * check that this is the case, the UPL_COMMIT_FREE_ON_EMPTY flag
2531 * should generally only be used when the offset is 0 and the size
2532 * is equal to the upl size.
2534 * The flags argument is a bitmap of flags on the rage of pages in
2535 * the upl to be committed; allowable flags are:
2537 * o UPL_COMMIT_FREE_ON_EMPTY Free the upl when it is
2538 * both empty and has been
2539 * successfully committed
2540 * o UPL_COMMIT_CLEAR_DIRTY Clear each pages dirty
2541 * bit; will prevent a
2543 * o UPL_COMMIT_SET_DIRTY Set each pages dirty
2544 * bit; will cause a later
2546 * o UPL_COMMIT_INACTIVATE Clear each pages
2547 * reference bit; the page
2548 * will not be accessed
2549 * o UPL_COMMIT_ALLOW_ACCESS Unbusy each page; pages
2550 * become busy when an
2551 * IOMemoryDescriptor is
2552 * mapped or redirected,
2553 * and we have to wait for
2556 * The flag UPL_COMMIT_NOTIFY_EMPTY is used internally, and should
2557 * not be specified by the caller.
2559 * The UPL_COMMIT_CLEAR_DIRTY and UPL_COMMIT_SET_DIRTY flags are
2560 * mutually exclusive, and should not be combined.
2563 ubc_upl_commit_range(
2565 upl_offset_t offset
,
2569 upl_page_info_t
*pl
;
2573 if (flags
& UPL_COMMIT_FREE_ON_EMPTY
)
2574 flags
|= UPL_COMMIT_NOTIFY_EMPTY
;
2576 if (flags
& UPL_COMMIT_KERNEL_ONLY_FLAGS
) {
2577 return KERN_INVALID_ARGUMENT
;
2580 pl
= UPL_GET_INTERNAL_PAGE_LIST(upl
);
2582 kr
= upl_commit_range(upl
, offset
, size
, flags
,
2583 pl
, MAX_UPL_SIZE_BYTES
>> PAGE_SHIFT
, &empty
);
2585 if((flags
& UPL_COMMIT_FREE_ON_EMPTY
) && empty
)
2586 upl_deallocate(upl
);
2593 * ubc_upl_abort_range
2595 * Abort the contents of the specified range of the specified upl
2597 * Parameters: upl The upl to abort
2598 * offset The offset into the upl
2599 * size The size of the region to be aborted,
2600 * starting at the specified offset
2601 * abort_flags abort type (see below)
2603 * Returns: KERN_SUCCESS The range has been aborted
2604 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2605 * KERN_FAILURE The supplied upl does not represent
2606 * device memory, and the offset plus the
2607 * size would exceed the actual size of
2610 * Notes: IMPORTANT: If the abort is successful, and the object is now
2611 * empty, the upl will be deallocated. Since the caller cannot
2612 * check that this is the case, the UPL_ABORT_FREE_ON_EMPTY flag
2613 * should generally only be used when the offset is 0 and the size
2614 * is equal to the upl size.
2616 * The abort_flags argument is a bitmap of flags on the range of
2617 * pages in the upl to be aborted; allowable flags are:
2619 * o UPL_ABORT_FREE_ON_EMPTY Free the upl when it is both
2620 * empty and has been successfully
2622 * o UPL_ABORT_RESTART The operation must be restarted
2623 * o UPL_ABORT_UNAVAILABLE The pages are unavailable
2624 * o UPL_ABORT_ERROR An I/O error occurred
2625 * o UPL_ABORT_DUMP_PAGES Just free the pages
2626 * o UPL_ABORT_NOTIFY_EMPTY RESERVED
2627 * o UPL_ABORT_ALLOW_ACCESS RESERVED
2629 * The UPL_ABORT_NOTIFY_EMPTY is an internal use flag and should
2630 * not be specified by the caller. It is intended to fulfill the
2631 * same role as UPL_COMMIT_NOTIFY_EMPTY does in the function
2632 * ubc_upl_commit_range(), but is never referenced internally.
2634 * The UPL_ABORT_ALLOW_ACCESS is defined, but neither set nor
2635 * referenced; do not use it.
2638 ubc_upl_abort_range(
2640 upl_offset_t offset
,
2645 boolean_t empty
= FALSE
;
2647 if (abort_flags
& UPL_ABORT_FREE_ON_EMPTY
)
2648 abort_flags
|= UPL_ABORT_NOTIFY_EMPTY
;
2650 kr
= upl_abort_range(upl
, offset
, size
, abort_flags
, &empty
);
2652 if((abort_flags
& UPL_ABORT_FREE_ON_EMPTY
) && empty
)
2653 upl_deallocate(upl
);
2662 * Abort the contents of the specified upl
2664 * Parameters: upl The upl to abort
2665 * abort_type abort type (see below)
2667 * Returns: KERN_SUCCESS The range has been aborted
2668 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2669 * KERN_FAILURE The supplied upl does not represent
2670 * device memory, and the offset plus the
2671 * size would exceed the actual size of
2674 * Notes: IMPORTANT: If the abort is successful, and the object is now
2675 * empty, the upl will be deallocated. Since the caller cannot
2676 * check that this is the case, the UPL_ABORT_FREE_ON_EMPTY flag
2677 * should generally only be used when the offset is 0 and the size
2678 * is equal to the upl size.
2680 * The abort_type is a bitmap of flags on the range of
2681 * pages in the upl to be aborted; allowable flags are:
2683 * o UPL_ABORT_FREE_ON_EMPTY Free the upl when it is both
2684 * empty and has been successfully
2686 * o UPL_ABORT_RESTART The operation must be restarted
2687 * o UPL_ABORT_UNAVAILABLE The pages are unavailable
2688 * o UPL_ABORT_ERROR An I/O error occurred
2689 * o UPL_ABORT_DUMP_PAGES Just free the pages
2690 * o UPL_ABORT_NOTIFY_EMPTY RESERVED
2691 * o UPL_ABORT_ALLOW_ACCESS RESERVED
2693 * The UPL_ABORT_NOTIFY_EMPTY is an internal use flag and should
2694 * not be specified by the caller. It is intended to fulfill the
2695 * same role as UPL_COMMIT_NOTIFY_EMPTY does in the function
2696 * ubc_upl_commit_range(), but is never referenced internally.
2698 * The UPL_ABORT_ALLOW_ACCESS is defined, but neither set nor
2699 * referenced; do not use it.
2708 kr
= upl_abort(upl
, abort_type
);
2709 upl_deallocate(upl
);
2717 * Retrieve the internal page list for the specified upl
2719 * Parameters: upl The upl to obtain the page list from
2721 * Returns: !NULL The (upl_page_info_t *) for the page
2722 * list internal to the upl
2723 * NULL Error/no page list associated
2725 * Notes: IMPORTANT: The function is only valid on internal objects
2726 * where the list request was made with the UPL_INTERNAL flag.
2728 * This function is a utility helper function, since some callers
2729 * may not have direct access to the header defining the macro,
2730 * due to abstraction layering constraints.
2736 return (UPL_GET_INTERNAL_PAGE_LIST(upl
));
2741 UBCINFOEXISTS(const struct vnode
* vp
)
2743 return((vp
) && ((vp
)->v_type
== VREG
) && ((vp
)->v_ubcinfo
!= UBC_INFO_NULL
));
2748 ubc_upl_range_needed(
2753 upl_range_needed(upl
, index
, count
);
2756 boolean_t
ubc_is_mapped(const struct vnode
*vp
, boolean_t
*writable
)
2758 if (!UBCINFOEXISTS(vp
) || !ISSET(vp
->v_ubcinfo
->ui_flags
, UI_ISMAPPED
))
2761 *writable
= ISSET(vp
->v_ubcinfo
->ui_flags
, UI_MAPPEDWRITE
);
2765 boolean_t
ubc_is_mapped_writable(const struct vnode
*vp
)
2768 return ubc_is_mapped(vp
, &writable
) && writable
;
2775 static volatile SInt32 cs_blob_size
= 0;
2776 static volatile SInt32 cs_blob_count
= 0;
2777 static SInt32 cs_blob_size_peak
= 0;
2778 static UInt32 cs_blob_size_max
= 0;
2779 static SInt32 cs_blob_count_peak
= 0;
2781 SYSCTL_INT(_vm
, OID_AUTO
, cs_blob_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, (int *)(uintptr_t)&cs_blob_count
, 0, "Current number of code signature blobs");
2782 SYSCTL_INT(_vm
, OID_AUTO
, cs_blob_size
, CTLFLAG_RD
| CTLFLAG_LOCKED
, (int *)(uintptr_t)&cs_blob_size
, 0, "Current size of all code signature blobs");
2783 SYSCTL_INT(_vm
, OID_AUTO
, cs_blob_count_peak
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &cs_blob_count_peak
, 0, "Peak number of code signature blobs");
2784 SYSCTL_INT(_vm
, OID_AUTO
, cs_blob_size_peak
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &cs_blob_size_peak
, 0, "Peak size of code signature blobs");
2785 SYSCTL_INT(_vm
, OID_AUTO
, cs_blob_size_max
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &cs_blob_size_max
, 0, "Size of biggest code signature blob");
2788 * Function: csblob_parse_teamid
2790 * Description: This function returns a pointer to the team id
2791 stored within the codedirectory of the csblob.
2792 If the codedirectory predates team-ids, it returns
2794 This does not copy the name but returns a pointer to
2795 it within the CD. Subsequently, the CD must be
2796 available when this is used.
2800 csblob_parse_teamid(struct cs_blob
*csblob
)
2802 const CS_CodeDirectory
*cd
;
2804 cd
= csblob
->csb_cd
;
2806 if (ntohl(cd
->version
) < CS_SUPPORTSTEAMID
)
2809 if (cd
->teamOffset
== 0)
2812 const char *name
= ((const char *)cd
) + ntohl(cd
->teamOffset
);
2814 printf("found team-id %s in cdblob\n", name
);
2821 ubc_cs_blob_allocate(
2822 vm_offset_t
*blob_addr_p
,
2823 vm_size_t
*blob_size_p
)
2825 kern_return_t kr
= KERN_FAILURE
;
2828 *blob_addr_p
= (vm_offset_t
) kalloc_tag(*blob_size_p
, VM_KERN_MEMORY_SECURITY
);
2830 if (*blob_addr_p
== 0) {
2841 ubc_cs_blob_deallocate(
2842 vm_offset_t blob_addr
,
2843 vm_size_t blob_size
)
2846 if (blob_size
> pmap_cs_blob_limit
) {
2847 kmem_free(kernel_map
, blob_addr
, blob_size
);
2851 kfree((void *) blob_addr
, blob_size
);
2856 * Some codesigned files use a lowest common denominator page size of
2857 * 4KiB, but can be used on systems that have a runtime page size of
2858 * 16KiB. Since faults will only occur on 16KiB ranges in
2859 * cs_validate_range(), we can convert the original Code Directory to
2860 * a multi-level scheme where groups of 4 hashes are combined to form
2861 * a new hash, which represents 16KiB in the on-disk file. This can
2862 * reduce the wired memory requirement for the Code Directory by
2863 * 75%. Care must be taken for binaries that use the "fourk" VM pager
2864 * for unaligned access, which may still attempt to validate on
2865 * non-16KiB multiples for compatibility with 3rd party binaries.
2868 ubc_cs_supports_multilevel_hash(struct cs_blob
*blob
)
2870 const CS_CodeDirectory
*cd
;
2874 * Only applies to binaries that ship as part of the OS,
2875 * primarily the shared cache.
2877 if (!blob
->csb_platform_binary
|| blob
->csb_teamid
!= NULL
) {
2882 * If the runtime page size matches the code signing page
2883 * size, there is no work to do.
2885 if (PAGE_SHIFT
<= blob
->csb_hash_pageshift
) {
2892 * There must be a valid integral multiple of hashes
2894 if (ntohl(cd
->nCodeSlots
) & (PAGE_MASK
>> blob
->csb_hash_pageshift
)) {
2899 * Scatter lists must also have ranges that have an integral number of hashes
2901 if ((ntohl(cd
->version
) >= CS_SUPPORTSSCATTER
) && (ntohl(cd
->scatterOffset
))) {
2903 const SC_Scatter
*scatter
= (const SC_Scatter
*)
2904 ((const char*)cd
+ ntohl(cd
->scatterOffset
));
2905 /* iterate all scatter structs to make sure they are all aligned */
2907 uint32_t sbase
= ntohl(scatter
->base
);
2908 uint32_t scount
= ntohl(scatter
->count
);
2915 if (sbase
& (PAGE_MASK
>> blob
->csb_hash_pageshift
)) {
2919 if (scount
& (PAGE_MASK
>> blob
->csb_hash_pageshift
)) {
2927 /* Covered range must be a multiple of the new page size */
2928 if (ntohl(cd
->codeLimit
) & PAGE_MASK
) {
2932 /* All checks pass */
2937 * Given a cs_blob with an already chosen best code directory, this
2938 * function allocates memory and copies into it only the blobs that
2939 * will be needed by the kernel, namely the single chosen code
2940 * directory (and not any of its alternatives) and the entitlement
2943 * This saves significant memory with agile signatures, and additional
2944 * memory for 3rd Party Code because we also omit the CMS blob.
2946 * To support multilevel and other potential code directory rewriting,
2947 * the size of a new code directory can be specified. Since that code
2948 * directory will replace the existing code directory,
2949 * ubc_cs_reconstitute_code_signature does not copy the original code
2950 * directory when a size is given, and the caller must fill it in.
2953 ubc_cs_reconstitute_code_signature(struct cs_blob
const *blob
, vm_size_t optional_new_cd_size
,
2954 vm_address_t
*new_blob_addr_p
, vm_size_t
*new_blob_size_p
,
2955 CS_CodeDirectory
**new_cd_p
, CS_GenericBlob
const **new_entitlements_p
)
2957 const CS_CodeDirectory
*old_cd
, *cd
;
2958 CS_CodeDirectory
*new_cd
;
2959 const CS_GenericBlob
*entitlements
;
2960 vm_offset_t new_blob_addr
;
2961 vm_size_t new_blob_size
;
2962 vm_size_t new_cdsize
;
2966 old_cd
= blob
->csb_cd
;
2968 new_cdsize
= optional_new_cd_size
!= 0 ? optional_new_cd_size
: htonl(old_cd
->length
);
2970 new_blob_size
= sizeof(CS_SuperBlob
);
2971 new_blob_size
+= sizeof(CS_BlobIndex
);
2972 new_blob_size
+= new_cdsize
;
2974 if (blob
->csb_entitlements_blob
) {
2975 /* We need to add a slot for the entitlements */
2976 new_blob_size
+= sizeof(CS_BlobIndex
);
2977 new_blob_size
+= ntohl(blob
->csb_entitlements_blob
->length
);
2980 kr
= ubc_cs_blob_allocate(&new_blob_addr
, &new_blob_size
);
2981 if (kr
!= KERN_SUCCESS
) {
2983 printf("CODE SIGNING: Failed to allocate memory for new Code Signing Blob: %d\n",
2989 CS_SuperBlob
*new_superblob
;
2991 new_superblob
= (CS_SuperBlob
*)new_blob_addr
;
2992 new_superblob
->magic
= htonl(CSMAGIC_EMBEDDED_SIGNATURE
);
2993 new_superblob
->length
= htonl((uint32_t)new_blob_size
);
2994 if (blob
->csb_entitlements_blob
) {
2995 vm_size_t ent_offset
, cd_offset
;
2997 cd_offset
= sizeof(CS_SuperBlob
) + 2 * sizeof(CS_BlobIndex
);
2998 ent_offset
= cd_offset
+ new_cdsize
;
3000 new_superblob
->count
= htonl(2);
3001 new_superblob
->index
[0].type
= htonl(CSSLOT_CODEDIRECTORY
);
3002 new_superblob
->index
[0].offset
= htonl((uint32_t)cd_offset
);
3003 new_superblob
->index
[1].type
= htonl(CSSLOT_ENTITLEMENTS
);
3004 new_superblob
->index
[1].offset
= htonl((uint32_t)ent_offset
);
3006 memcpy((void *)(new_blob_addr
+ ent_offset
), blob
->csb_entitlements_blob
, ntohl(blob
->csb_entitlements_blob
->length
));
3008 new_cd
= (CS_CodeDirectory
*)(new_blob_addr
+ cd_offset
);
3010 // Blob is the code directory, directly.
3011 new_cd
= (CS_CodeDirectory
*)new_blob_addr
;
3014 if (optional_new_cd_size
== 0) {
3015 // Copy code directory, and revalidate.
3016 memcpy(new_cd
, old_cd
, new_cdsize
);
3018 vm_size_t length
= new_blob_size
;
3020 error
= cs_validate_csblob((const uint8_t *)new_blob_addr
, length
, &cd
, &entitlements
);
3023 printf("CODE SIGNING: Failed to validate new Code Signing Blob: %d\n",
3026 ubc_cs_blob_deallocate(new_blob_addr
, new_blob_size
);
3029 *new_entitlements_p
= entitlements
;
3031 // Caller will fill out and validate code directory.
3032 memset(new_cd
, 0, new_cdsize
);
3033 *new_entitlements_p
= NULL
;
3036 *new_blob_addr_p
= new_blob_addr
;
3037 *new_blob_size_p
= new_blob_size
;
3044 ubc_cs_convert_to_multilevel_hash(struct cs_blob
*blob
)
3046 const CS_CodeDirectory
*old_cd
, *cd
;
3047 CS_CodeDirectory
*new_cd
;
3048 const CS_GenericBlob
*entitlements
;
3049 vm_offset_t new_blob_addr
;
3050 vm_size_t new_blob_size
;
3051 vm_size_t new_cdsize
;
3054 uint32_t hashes_per_new_hash_shift
= (uint32_t)(PAGE_SHIFT
- blob
->csb_hash_pageshift
);
3057 printf("CODE SIGNING: Attempting to convert Code Directory for %lu -> %lu page shift\n",
3058 (unsigned long)blob
->csb_hash_pageshift
, (unsigned long)PAGE_SHIFT
);
3061 old_cd
= blob
->csb_cd
;
3063 /* Up to the hashes, we can copy all data */
3064 new_cdsize
= ntohl(old_cd
->hashOffset
);
3065 new_cdsize
+= (ntohl(old_cd
->nCodeSlots
) >> hashes_per_new_hash_shift
) * old_cd
->hashSize
;
3067 error
= ubc_cs_reconstitute_code_signature(blob
, new_cdsize
,
3068 &new_blob_addr
, &new_blob_size
, &new_cd
,
3071 printf("CODE SIGNING: Failed to reconsitute code signature: %d\n", error
);
3075 memcpy(new_cd
, old_cd
, ntohl(old_cd
->hashOffset
));
3077 /* Update fields in the Code Directory structure */
3078 new_cd
->length
= htonl((uint32_t)new_cdsize
);
3080 uint32_t nCodeSlots
= ntohl(new_cd
->nCodeSlots
);
3081 nCodeSlots
>>= hashes_per_new_hash_shift
;
3082 new_cd
->nCodeSlots
= htonl(nCodeSlots
);
3084 new_cd
->pageSize
= PAGE_SHIFT
; /* Not byte-swapped */
3086 if ((ntohl(new_cd
->version
) >= CS_SUPPORTSSCATTER
) && (ntohl(new_cd
->scatterOffset
))) {
3087 SC_Scatter
*scatter
= (SC_Scatter
*)
3088 ((char *)new_cd
+ ntohl(new_cd
->scatterOffset
));
3089 /* iterate all scatter structs to scale their counts */
3091 uint32_t scount
= ntohl(scatter
->count
);
3092 uint32_t sbase
= ntohl(scatter
->base
);
3099 scount
>>= hashes_per_new_hash_shift
;
3100 scatter
->count
= htonl(scount
);
3102 sbase
>>= hashes_per_new_hash_shift
;
3103 scatter
->base
= htonl(sbase
);
3109 /* For each group of hashes, hash them together */
3110 const unsigned char *src_base
= (const unsigned char *)old_cd
+ ntohl(old_cd
->hashOffset
);
3111 unsigned char *dst_base
= (unsigned char *)new_cd
+ ntohl(new_cd
->hashOffset
);
3113 uint32_t hash_index
;
3114 for (hash_index
= 0; hash_index
< nCodeSlots
; hash_index
++) {
3115 union cs_hash_union mdctx
;
3117 uint32_t source_hash_len
= old_cd
->hashSize
<< hashes_per_new_hash_shift
;
3118 const unsigned char *src
= src_base
+ hash_index
* source_hash_len
;
3119 unsigned char *dst
= dst_base
+ hash_index
* new_cd
->hashSize
;
3121 blob
->csb_hashtype
->cs_init(&mdctx
);
3122 blob
->csb_hashtype
->cs_update(&mdctx
, src
, source_hash_len
);
3123 blob
->csb_hashtype
->cs_final(dst
, &mdctx
);
3126 error
= cs_validate_csblob((const uint8_t *)new_blob_addr
, new_blob_size
, &cd
, &entitlements
);
3129 printf("CODE SIGNING: Failed to validate new Code Signing Blob: %d\n",
3132 ubc_cs_blob_deallocate(new_blob_addr
, new_blob_size
);
3136 /* New Code Directory is ready for use, swap it out in the blob structure */
3137 ubc_cs_blob_deallocate(blob
->csb_mem_kaddr
, blob
->csb_mem_size
);
3139 blob
->csb_mem_size
= new_blob_size
;
3140 blob
->csb_mem_kaddr
= new_blob_addr
;
3142 blob
->csb_entitlements_blob
= entitlements
;
3144 /* The blob has some cached attributes of the Code Directory, so update those */
3146 blob
->csb_hash_firstlevel_pagesize
= blob
->csb_hash_pagesize
; /* Save the original page size */
3148 blob
->csb_hash_pagesize
= PAGE_SIZE
;
3149 blob
->csb_hash_pagemask
= PAGE_MASK
;
3150 blob
->csb_hash_pageshift
= PAGE_SHIFT
;
3151 blob
->csb_end_offset
= ntohl(cd
->codeLimit
);
3152 if((ntohl(cd
->version
) >= CS_SUPPORTSSCATTER
) && (ntohl(cd
->scatterOffset
))) {
3153 const SC_Scatter
*scatter
= (const SC_Scatter
*)
3154 ((const char*)cd
+ ntohl(cd
->scatterOffset
));
3155 blob
->csb_start_offset
= ((off_t
)ntohl(scatter
->base
)) * PAGE_SIZE
;
3157 blob
->csb_start_offset
= 0;
3164 * Validate the code signature blob, create a struct cs_blob wrapper
3165 * and return it together with a pointer to the chosen code directory
3166 * and entitlements blob.
3168 * Note that this takes ownership of the memory as addr, mainly because
3169 * this function can actually replace the passed in blob with another
3170 * one, e.g. when performing multilevel hashing optimization.
3173 cs_blob_create_validated(
3174 vm_address_t
* const addr
,
3176 struct cs_blob
** const ret_blob
,
3177 CS_CodeDirectory
const ** const ret_cd
)
3179 struct cs_blob
*blob
;
3181 const CS_CodeDirectory
*cd
;
3182 const CS_GenericBlob
*entitlements
;
3183 union cs_hash_union mdctx
;
3189 blob
= (struct cs_blob
*) kalloc(sizeof (struct cs_blob
));
3194 /* fill in the new blob */
3195 blob
->csb_mem_size
= size
;
3196 blob
->csb_mem_offset
= 0;
3197 blob
->csb_mem_kaddr
= *addr
;
3198 blob
->csb_flags
= 0;
3199 blob
->csb_signer_type
= CS_SIGNER_TYPE_UNKNOWN
;
3200 blob
->csb_platform_binary
= 0;
3201 blob
->csb_platform_path
= 0;
3202 blob
->csb_teamid
= NULL
;
3203 blob
->csb_entitlements_blob
= NULL
;
3204 blob
->csb_entitlements
= NULL
;
3205 blob
->csb_reconstituted
= false;
3207 /* Transfer ownership. Even on error, this function will deallocate */
3211 * Validate the blob's contents
3213 length
= (size_t) size
;
3214 error
= cs_validate_csblob((const uint8_t *)blob
->csb_mem_kaddr
,
3215 length
, &cd
, &entitlements
);
3219 printf("CODESIGNING: csblob invalid: %d\n", error
);
3221 * The vnode checker can't make the rest of this function
3222 * succeed if csblob validation failed, so bail */
3226 const unsigned char *md_base
;
3227 uint8_t hash
[CS_HASH_MAX_SIZE
];
3231 blob
->csb_entitlements_blob
= entitlements
; /* may be NULL, not yet validated */
3232 blob
->csb_hashtype
= cs_find_md(cd
->hashType
);
3233 if (blob
->csb_hashtype
== NULL
|| blob
->csb_hashtype
->cs_digest_size
> sizeof(hash
))
3234 panic("validated CodeDirectory but unsupported type");
3236 blob
->csb_hash_pageshift
= cd
->pageSize
;
3237 blob
->csb_hash_pagesize
= (1U << cd
->pageSize
);
3238 blob
->csb_hash_pagemask
= blob
->csb_hash_pagesize
- 1;
3239 blob
->csb_hash_firstlevel_pagesize
= 0;
3240 blob
->csb_flags
= (ntohl(cd
->flags
) & CS_ALLOWED_MACHO
) | CS_VALID
;
3241 blob
->csb_end_offset
= (((vm_offset_t
)ntohl(cd
->codeLimit
) + blob
->csb_hash_pagemask
) & ~((vm_offset_t
)blob
->csb_hash_pagemask
));
3242 if((ntohl(cd
->version
) >= CS_SUPPORTSSCATTER
) && (ntohl(cd
->scatterOffset
))) {
3243 const SC_Scatter
*scatter
= (const SC_Scatter
*)
3244 ((const char*)cd
+ ntohl(cd
->scatterOffset
));
3245 blob
->csb_start_offset
= ((off_t
)ntohl(scatter
->base
)) * blob
->csb_hash_pagesize
;
3247 blob
->csb_start_offset
= 0;
3249 /* compute the blob's cdhash */
3250 md_base
= (const unsigned char *) cd
;
3251 md_size
= ntohl(cd
->length
);
3253 blob
->csb_hashtype
->cs_init(&mdctx
);
3254 blob
->csb_hashtype
->cs_update(&mdctx
, md_base
, md_size
);
3255 blob
->csb_hashtype
->cs_final(hash
, &mdctx
);
3257 memcpy(blob
->csb_cdhash
, hash
, CS_CDHASH_LEN
);
3269 if (ret_blob
!= NULL
) {
3272 if (ret_cd
!= NULL
) {
3280 * Free a cs_blob previously created by cs_blob_create_validated.
3284 struct cs_blob
* const blob
)
3287 if (blob
->csb_mem_kaddr
) {
3288 ubc_cs_blob_deallocate(blob
->csb_mem_kaddr
, blob
->csb_mem_size
);
3289 blob
->csb_mem_kaddr
= 0;
3291 if (blob
->csb_entitlements
!= NULL
) {
3292 osobject_release(blob
->csb_entitlements
);
3293 blob
->csb_entitlements
= NULL
;
3295 kfree(blob
, sizeof (*blob
));
3306 struct image_params
*imgp
,
3308 struct cs_blob
**ret_blob
)
3311 struct ubc_info
*uip
;
3312 struct cs_blob
*blob
, *oblob
;
3314 CS_CodeDirectory
const *cd
;
3315 off_t blob_start_offset
, blob_end_offset
;
3316 boolean_t record_mtime
;
3318 record_mtime
= FALSE
;
3322 /* Create the struct cs_blob wrapper that will be attached to the vnode.
3323 * Validates the passed in blob in the process. */
3324 error
= cs_blob_create_validated(addr
, size
, &blob
, &cd
);
3327 printf("malform code signature blob: %d\n", error
);
3331 blob
->csb_cpu_type
= cputype
;
3332 blob
->csb_base_offset
= base_offset
;
3335 * Let policy module check whether the blob's signature is accepted.
3338 unsigned int cs_flags
= blob
->csb_flags
;
3339 unsigned int signer_type
= blob
->csb_signer_type
;
3340 error
= mac_vnode_check_signature(vp
, blob
, imgp
, &cs_flags
, &signer_type
, flags
);
3341 blob
->csb_flags
= cs_flags
;
3342 blob
->csb_signer_type
= signer_type
;
3346 printf("check_signature[pid: %d], error = %d\n", current_proc()->p_pid
, error
);
3349 if ((flags
& MAC_VNODE_CHECK_DYLD_SIM
) && !(blob
->csb_flags
& CS_PLATFORM_BINARY
)) {
3351 printf("check_signature[pid: %d], is not apple signed\n", current_proc()->p_pid
);
3357 #if CONFIG_ENFORCE_SIGNED_CODE
3359 * Reconstitute code signature
3362 vm_address_t new_mem_kaddr
= 0;
3363 vm_size_t new_mem_size
= 0;
3365 CS_CodeDirectory
*new_cd
= NULL
;
3366 CS_GenericBlob
const *new_entitlements
= NULL
;
3368 error
= ubc_cs_reconstitute_code_signature(blob
, 0,
3369 &new_mem_kaddr
, &new_mem_size
,
3370 &new_cd
, &new_entitlements
);
3373 printf("failed code signature reconstitution: %d\n", error
);
3377 ubc_cs_blob_deallocate(blob
->csb_mem_kaddr
, blob
->csb_mem_size
);
3379 blob
->csb_mem_kaddr
= new_mem_kaddr
;
3380 blob
->csb_mem_size
= new_mem_size
;
3381 blob
->csb_cd
= new_cd
;
3382 blob
->csb_entitlements_blob
= new_entitlements
;
3383 blob
->csb_reconstituted
= true;
3389 if (blob
->csb_flags
& CS_PLATFORM_BINARY
) {
3391 printf("check_signature[pid: %d]: platform binary\n", current_proc()->p_pid
);
3392 blob
->csb_platform_binary
= 1;
3393 blob
->csb_platform_path
= !!(blob
->csb_flags
& CS_PLATFORM_PATH
);
3395 blob
->csb_platform_binary
= 0;
3396 blob
->csb_platform_path
= 0;
3397 blob
->csb_teamid
= csblob_parse_teamid(blob
);
3399 if (blob
->csb_teamid
)
3400 printf("check_signature[pid: %d]: team-id is %s\n", current_proc()->p_pid
, blob
->csb_teamid
);
3402 printf("check_signature[pid: %d]: no team-id\n", current_proc()->p_pid
);
3407 * Validate the blob's coverage
3409 blob_start_offset
= blob
->csb_base_offset
+ blob
->csb_start_offset
;
3410 blob_end_offset
= blob
->csb_base_offset
+ blob
->csb_end_offset
;
3412 if (blob_start_offset
>= blob_end_offset
||
3413 blob_start_offset
< 0 ||
3414 blob_end_offset
<= 0) {
3415 /* reject empty or backwards blob */
3420 if (ubc_cs_supports_multilevel_hash(blob
)) {
3421 error
= ubc_cs_convert_to_multilevel_hash(blob
);
3423 printf("failed multilevel hash conversion: %d\n", error
);
3426 blob
->csb_reconstituted
= true;
3430 if (! UBCINFOEXISTS(vp
)) {
3435 uip
= vp
->v_ubcinfo
;
3437 /* check if this new blob overlaps with an existing blob */
3438 for (oblob
= uip
->cs_blobs
;
3440 oblob
= oblob
->csb_next
) {
3441 off_t oblob_start_offset
, oblob_end_offset
;
3443 if (blob
->csb_signer_type
!= oblob
->csb_signer_type
) { // signer type needs to be the same for slices
3447 } else if (blob
->csb_platform_binary
) { //platform binary needs to be the same for app slices
3448 if (!oblob
->csb_platform_binary
) {
3453 } else if (blob
->csb_teamid
) { //teamid binary needs to be the same for app slices
3454 if (oblob
->csb_platform_binary
||
3455 oblob
->csb_teamid
== NULL
||
3456 strcmp(oblob
->csb_teamid
, blob
->csb_teamid
) != 0) {
3461 } else { // non teamid binary needs to be the same for app slices
3462 if (oblob
->csb_platform_binary
||
3463 oblob
->csb_teamid
!= NULL
) {
3470 oblob_start_offset
= (oblob
->csb_base_offset
+
3471 oblob
->csb_start_offset
);
3472 oblob_end_offset
= (oblob
->csb_base_offset
+
3473 oblob
->csb_end_offset
);
3474 if (blob_start_offset
>= oblob_end_offset
||
3475 blob_end_offset
<= oblob_start_offset
) {
3476 /* no conflict with this existing blob */
3479 if (blob_start_offset
== oblob_start_offset
&&
3480 blob_end_offset
== oblob_end_offset
&&
3481 blob
->csb_mem_size
== oblob
->csb_mem_size
&&
3482 blob
->csb_flags
== oblob
->csb_flags
&&
3483 (blob
->csb_cpu_type
== CPU_TYPE_ANY
||
3484 oblob
->csb_cpu_type
== CPU_TYPE_ANY
||
3485 blob
->csb_cpu_type
== oblob
->csb_cpu_type
) &&
3486 !bcmp(blob
->csb_cdhash
,
3490 * We already have this blob:
3491 * we'll return success but
3492 * throw away the new blob.
3494 if (oblob
->csb_cpu_type
== CPU_TYPE_ANY
) {
3496 * The old blob matches this one
3497 * but doesn't have any CPU type.
3498 * Update it with whatever the caller
3499 * provided this time.
3501 oblob
->csb_cpu_type
= cputype
;
3504 /* The signature is still accepted, so update the
3505 * generation count. */
3506 uip
->cs_add_gen
= cs_blob_generation_count
;
3514 /* different blob: reject the new one */
3524 /* mark this vnode's VM object as having "signed pages" */
3525 kr
= memory_object_signed(uip
->ui_control
, TRUE
);
3526 if (kr
!= KERN_SUCCESS
) {
3532 if (uip
->cs_blobs
== NULL
) {
3533 /* loading 1st blob: record the file's current "modify time" */
3534 record_mtime
= TRUE
;
3537 /* set the generation count for cs_blobs */
3538 uip
->cs_add_gen
= cs_blob_generation_count
;
3541 * Add this blob to the list of blobs for this vnode.
3542 * We always add at the front of the list and we never remove a
3543 * blob from the list, so ubc_cs_get_blobs() can return whatever
3544 * the top of the list was and that list will remain valid
3545 * while we validate a page, even after we release the vnode's lock.
3547 blob
->csb_next
= uip
->cs_blobs
;
3548 uip
->cs_blobs
= blob
;
3550 OSAddAtomic(+1, &cs_blob_count
);
3551 if (cs_blob_count
> cs_blob_count_peak
) {
3552 cs_blob_count_peak
= cs_blob_count
; /* XXX atomic ? */
3554 OSAddAtomic((SInt32
) +blob
->csb_mem_size
, &cs_blob_size
);
3555 if ((SInt32
) cs_blob_size
> cs_blob_size_peak
) {
3556 cs_blob_size_peak
= (SInt32
) cs_blob_size
; /* XXX atomic ? */
3558 if ((UInt32
) blob
->csb_mem_size
> cs_blob_size_max
) {
3559 cs_blob_size_max
= (UInt32
) blob
->csb_mem_size
;
3564 const char *name
= vnode_getname_printable(vp
);
3566 printf("CODE SIGNING: proc %d(%s) "
3567 "loaded %s signatures for file (%s) "
3568 "range 0x%llx:0x%llx flags 0x%x\n",
3569 p
->p_pid
, p
->p_comm
,
3570 blob
->csb_cpu_type
== -1 ? "detached" : "embedded",
3572 blob
->csb_base_offset
+ blob
->csb_start_offset
,
3573 blob
->csb_base_offset
+ blob
->csb_end_offset
,
3575 vnode_putname_printable(name
);
3581 vnode_mtime(vp
, &uip
->cs_mtime
, vfs_context_current());
3587 error
= 0; /* success ! */
3592 printf("check_signature[pid: %d]: error = %d\n", current_proc()->p_pid
, error
);
3597 if (error
== EAGAIN
) {
3599 * See above: error is EAGAIN if we were asked
3600 * to add an existing blob again. We cleaned the new
3601 * blob and we want to return success.
3610 csvnode_print_debug(struct vnode
*vp
)
3612 const char *name
= NULL
;
3613 struct ubc_info
*uip
;
3614 struct cs_blob
*blob
;
3616 name
= vnode_getname_printable(vp
);
3618 printf("csvnode: name: %s\n", name
);
3619 vnode_putname_printable(name
);
3622 vnode_lock_spin(vp
);
3624 if (! UBCINFOEXISTS(vp
)) {
3629 uip
= vp
->v_ubcinfo
;
3630 for (blob
= uip
->cs_blobs
; blob
!= NULL
; blob
= blob
->csb_next
) {
3631 printf("csvnode: range: %lu -> %lu flags: 0x%08x platform: %s path: %s team: %s\n",
3632 (unsigned long)blob
->csb_start_offset
,
3633 (unsigned long)blob
->csb_end_offset
,
3635 blob
->csb_platform_binary
? "yes" : "no",
3636 blob
->csb_platform_path
? "yes" : "no",
3637 blob
->csb_teamid
? blob
->csb_teamid
: "<NO-TEAM>");
3651 struct ubc_info
*uip
;
3652 struct cs_blob
*blob
;
3653 off_t offset_in_blob
;
3655 vnode_lock_spin(vp
);
3657 if (! UBCINFOEXISTS(vp
)) {
3662 uip
= vp
->v_ubcinfo
;
3663 for (blob
= uip
->cs_blobs
;
3665 blob
= blob
->csb_next
) {
3666 if (cputype
!= -1 && blob
->csb_cpu_type
== cputype
) {
3670 offset_in_blob
= offset
- blob
->csb_base_offset
;
3671 if (offset_in_blob
>= blob
->csb_start_offset
&&
3672 offset_in_blob
< blob
->csb_end_offset
) {
3673 /* our offset is covered by this blob */
3687 struct ubc_info
*uip
)
3689 struct cs_blob
*blob
, *next_blob
;
3691 for (blob
= uip
->cs_blobs
;
3694 next_blob
= blob
->csb_next
;
3695 OSAddAtomic(-1, &cs_blob_count
);
3696 OSAddAtomic((SInt32
) -blob
->csb_mem_size
, &cs_blob_size
);
3699 #if CHECK_CS_VALIDATION_BITMAP
3700 ubc_cs_validation_bitmap_deallocate( uip
->ui_vnode
);
3702 uip
->cs_blobs
= NULL
;
3705 /* check cs blob generation on vnode
3707 * 0 : Success, the cs_blob attached is current
3708 * ENEEDAUTH : Generation count mismatch. Needs authentication again.
3711 ubc_cs_generation_check(
3714 int retval
= ENEEDAUTH
;
3716 vnode_lock_spin(vp
);
3718 if (UBCINFOEXISTS(vp
) && vp
->v_ubcinfo
->cs_add_gen
== cs_blob_generation_count
) {
3727 ubc_cs_blob_revalidate(
3729 struct cs_blob
*blob
,
3730 struct image_params
*imgp
,
3735 const CS_CodeDirectory
*cd
= NULL
;
3736 const CS_GenericBlob
*entitlements
= NULL
;
3739 assert(blob
!= NULL
);
3741 size
= blob
->csb_mem_size
;
3742 error
= cs_validate_csblob((const uint8_t *)blob
->csb_mem_kaddr
,
3743 size
, &cd
, &entitlements
);
3746 printf("CODESIGNING: csblob invalid: %d\n", error
);
3751 unsigned int cs_flags
= (ntohl(cd
->flags
) & CS_ALLOWED_MACHO
) | CS_VALID
;
3752 unsigned int signer_type
= CS_SIGNER_TYPE_UNKNOWN
;
3754 if (blob
->csb_reconstituted
) {
3756 * Code signatures that have been modified after validation
3757 * cannot be revalidated inline from their in-memory blob.
3759 * That's okay, though, because the only path left that relies
3760 * on revalidation of existing in-memory blobs is the legacy
3761 * detached signature database path, which only exists on macOS,
3762 * which does not do reconstitution of any kind.
3765 printf("CODESIGNING: revalidate: not inline revalidating reconstituted signature.\n");
3769 * EAGAIN tells the caller that they may reread the code
3770 * signature and try attaching it again, which is the same
3771 * thing they would do if there was no cs_blob yet in the
3774 * Conveniently, after ubc_cs_blob_add did a successful
3775 * validation, it will detect that a matching cs_blob (cdhash,
3776 * offset, arch etc.) already exists, and return success
3777 * without re-adding a cs_blob to the vnode.
3782 /* callout to mac_vnode_check_signature */
3784 error
= mac_vnode_check_signature(vp
, blob
, imgp
, &cs_flags
, &signer_type
, flags
);
3785 if (cs_debug
&& error
) {
3786 printf("revalidate: check_signature[pid: %d], error = %d\n", current_proc()->p_pid
, error
);
3793 /* update generation number if success */
3794 vnode_lock_spin(vp
);
3795 blob
->csb_flags
= cs_flags
;
3796 blob
->csb_signer_type
= signer_type
;
3797 if (UBCINFOEXISTS(vp
)) {
3799 vp
->v_ubcinfo
->cs_add_gen
= cs_blob_generation_count
;
3801 vp
->v_ubcinfo
->cs_add_gen
= 0;
3811 cs_blob_reset_cache()
3813 /* incrementing odd no by 2 makes sure '0' is never reached. */
3814 OSAddAtomic(+2, &cs_blob_generation_count
);
3815 printf("Reseting cs_blob cache from all vnodes. \n");
3822 struct ubc_info
*uip
;
3823 struct cs_blob
*blobs
;
3826 * No need to take the vnode lock here. The caller must be holding
3827 * a reference on the vnode (via a VM mapping or open file descriptor),
3828 * so the vnode will not go away. The ubc_info stays until the vnode
3829 * goes away. And we only modify "blobs" by adding to the head of the
3831 * The ubc_info could go away entirely if the vnode gets reclaimed as
3832 * part of a forced unmount. In the case of a code-signature validation
3833 * during a page fault, the "paging_in_progress" reference on the VM
3834 * object guarantess that the vnode pager (and the ubc_info) won't go
3835 * away during the fault.
3836 * Other callers need to protect against vnode reclaim by holding the
3837 * vnode lock, for example.
3840 if (! UBCINFOEXISTS(vp
)) {
3845 uip
= vp
->v_ubcinfo
;
3846 blobs
= uip
->cs_blobs
;
3855 struct timespec
*cs_mtime
)
3857 struct ubc_info
*uip
;
3859 if (! UBCINFOEXISTS(vp
)) {
3860 cs_mtime
->tv_sec
= 0;
3861 cs_mtime
->tv_nsec
= 0;
3865 uip
= vp
->v_ubcinfo
;
3866 cs_mtime
->tv_sec
= uip
->cs_mtime
.tv_sec
;
3867 cs_mtime
->tv_nsec
= uip
->cs_mtime
.tv_nsec
;
3870 unsigned long cs_validate_page_no_hash
= 0;
3871 unsigned long cs_validate_page_bad_hash
= 0;
3874 struct cs_blob
*blobs
,
3875 memory_object_t pager
,
3876 memory_object_offset_t page_offset
,
3878 vm_size_t
*bytes_processed
,
3881 union cs_hash_union mdctx
;
3882 struct cs_hash
const *hashtype
= NULL
;
3883 unsigned char actual_hash
[CS_HASH_MAX_SIZE
];
3884 unsigned char expected_hash
[CS_HASH_MAX_SIZE
];
3885 boolean_t found_hash
;
3886 struct cs_blob
*blob
;
3887 const CS_CodeDirectory
*cd
;
3888 const unsigned char *hash
;
3889 boolean_t validated
;
3890 off_t offset
; /* page offset in the file */
3892 off_t codeLimit
= 0;
3893 const char *lower_bound
, *upper_bound
;
3894 vm_offset_t kaddr
, blob_addr
;
3896 /* retrieve the expected hash */
3901 blob
= blob
->csb_next
) {
3902 offset
= page_offset
- blob
->csb_base_offset
;
3903 if (offset
< blob
->csb_start_offset
||
3904 offset
>= blob
->csb_end_offset
) {
3905 /* our page is not covered by this blob */
3909 /* blob data has been released */
3910 kaddr
= blob
->csb_mem_kaddr
;
3915 blob_addr
= kaddr
+ blob
->csb_mem_offset
;
3916 lower_bound
= CAST_DOWN(char *, blob_addr
);
3917 upper_bound
= lower_bound
+ blob
->csb_mem_size
;
3921 /* all CD's that have been injected is already validated */
3923 hashtype
= blob
->csb_hashtype
;
3924 if (hashtype
== NULL
)
3925 panic("unknown hash type ?");
3926 if (hashtype
->cs_digest_size
> sizeof(actual_hash
))
3927 panic("hash size too large");
3928 if (offset
& blob
->csb_hash_pagemask
)
3929 panic("offset not aligned to cshash boundary");
3931 codeLimit
= ntohl(cd
->codeLimit
);
3933 hash
= hashes(cd
, (uint32_t)(offset
>>blob
->csb_hash_pageshift
),
3935 lower_bound
, upper_bound
);
3937 bcopy(hash
, expected_hash
, hashtype
->cs_size
);
3945 if (found_hash
== FALSE
) {
3947 * We can't verify this page because there is no signature
3948 * for it (yet). It's possible that this part of the object
3949 * is not signed, or that signatures for that part have not
3951 * Report that the page has not been validated and let the
3952 * caller decide if it wants to accept it or not.
3954 cs_validate_page_no_hash
++;
3956 printf("CODE SIGNING: cs_validate_page: "
3957 "mobj %p off 0x%llx: no hash to validate !?\n",
3958 pager
, page_offset
);
3966 size
= blob
->csb_hash_pagesize
;
3967 *bytes_processed
= size
;
3969 const uint32_t *asha1
, *esha1
;
3970 if ((off_t
)(offset
+ size
) > codeLimit
) {
3971 /* partial page at end of segment */
3972 assert(offset
< codeLimit
);
3973 size
= (size_t) (codeLimit
& blob
->csb_hash_pagemask
);
3974 *tainted
|= CS_VALIDATE_NX
;
3977 hashtype
->cs_init(&mdctx
);
3979 if (blob
->csb_hash_firstlevel_pagesize
) {
3980 const unsigned char *partial_data
= (const unsigned char *)data
;
3982 for (i
=0; i
< size
;) {
3983 union cs_hash_union partialctx
;
3984 unsigned char partial_digest
[CS_HASH_MAX_SIZE
];
3985 size_t partial_size
= MIN(size
-i
, blob
->csb_hash_firstlevel_pagesize
);
3987 hashtype
->cs_init(&partialctx
);
3988 hashtype
->cs_update(&partialctx
, partial_data
, partial_size
);
3989 hashtype
->cs_final(partial_digest
, &partialctx
);
3991 /* Update cumulative multi-level hash */
3992 hashtype
->cs_update(&mdctx
, partial_digest
, hashtype
->cs_size
);
3993 partial_data
= partial_data
+ partial_size
;
3997 hashtype
->cs_update(&mdctx
, data
, size
);
3999 hashtype
->cs_final(actual_hash
, &mdctx
);
4001 asha1
= (const uint32_t *) actual_hash
;
4002 esha1
= (const uint32_t *) expected_hash
;
4004 if (bcmp(expected_hash
, actual_hash
, hashtype
->cs_size
) != 0) {
4006 printf("CODE SIGNING: cs_validate_page: "
4007 "mobj %p off 0x%llx size 0x%lx: "
4008 "actual [0x%x 0x%x 0x%x 0x%x 0x%x] != "
4009 "expected [0x%x 0x%x 0x%x 0x%x 0x%x]\n",
4010 pager
, page_offset
, size
,
4011 asha1
[0], asha1
[1], asha1
[2],
4013 esha1
[0], esha1
[1], esha1
[2],
4014 esha1
[3], esha1
[4]);
4016 cs_validate_page_bad_hash
++;
4017 *tainted
|= CS_VALIDATE_TAINTED
;
4019 if (cs_debug
> 10) {
4020 printf("CODE SIGNING: cs_validate_page: "
4021 "mobj %p off 0x%llx size 0x%lx: "
4023 pager
, page_offset
, size
);
4035 memory_object_t pager
,
4036 memory_object_offset_t page_offset
,
4041 vm_size_t offset_in_range
;
4042 boolean_t all_subranges_validated
= TRUE
; /* turn false if any subrange fails */
4044 struct cs_blob
*blobs
= ubc_get_cs_blobs(vp
);
4048 for (offset_in_range
= 0;
4049 offset_in_range
< dsize
;
4050 /* offset_in_range updated based on bytes processed */) {
4051 unsigned subrange_tainted
= 0;
4052 boolean_t subrange_validated
;
4053 vm_size_t bytes_processed
= 0;
4055 subrange_validated
= cs_validate_hash(blobs
,
4057 page_offset
+ offset_in_range
,
4058 (const void *)((const char *)data
+ offset_in_range
),
4062 *tainted
|= subrange_tainted
;
4064 if (bytes_processed
== 0) {
4065 /* Cannote make forward progress, so return an error */
4066 all_subranges_validated
= FALSE
;
4068 } else if (subrange_validated
== FALSE
) {
4069 all_subranges_validated
= FALSE
;
4070 /* Keep going to detect other types of failures in subranges */
4073 offset_in_range
+= bytes_processed
;
4076 return all_subranges_validated
;
4083 unsigned char *cdhash
)
4085 struct cs_blob
*blobs
, *blob
;
4091 blobs
= ubc_get_cs_blobs(vp
);
4094 blob
= blob
->csb_next
) {
4095 /* compute offset relative to this blob */
4096 rel_offset
= offset
- blob
->csb_base_offset
;
4097 if (rel_offset
>= blob
->csb_start_offset
&&
4098 rel_offset
< blob
->csb_end_offset
) {
4099 /* this blob does cover our "offset" ! */
4105 /* we didn't find a blob covering "offset" */
4106 ret
= EBADEXEC
; /* XXX any better error ? */
4108 /* get the SHA1 hash of that blob */
4109 bcopy(blob
->csb_cdhash
, cdhash
, sizeof (blob
->csb_cdhash
));
4119 ubc_cs_is_range_codesigned(
4121 mach_vm_offset_t start
,
4122 mach_vm_size_t size
)
4124 struct cs_blob
*csblob
;
4125 mach_vm_offset_t blob_start
;
4126 mach_vm_offset_t blob_end
;
4129 /* no file: no code signature */
4133 /* no range: no code signature */
4136 if (start
+ size
< start
) {
4141 csblob
= ubc_cs_blob_get(vp
, -1, start
);
4142 if (csblob
== NULL
) {
4147 * We currently check if the range is covered by a single blob,
4148 * which should always be the case for the dyld shared cache.
4149 * If we ever want to make this routine handle other cases, we
4150 * would have to iterate if the blob does not cover the full range.
4152 blob_start
= (mach_vm_offset_t
) (csblob
->csb_base_offset
+
4153 csblob
->csb_start_offset
);
4154 blob_end
= (mach_vm_offset_t
) (csblob
->csb_base_offset
+
4155 csblob
->csb_end_offset
);
4156 if (blob_start
> start
|| blob_end
< (start
+ size
)) {
4157 /* range not fully covered by this code-signing blob */
4164 #if CHECK_CS_VALIDATION_BITMAP
4165 #define stob(s) (((atop_64(round_page_64(s))) + 07) >> 3)
4166 extern boolean_t root_fs_upgrade_try
;
4169 * Should we use the code-sign bitmap to avoid repeated code-sign validation?
4171 * a) Is the target vnode on the root filesystem?
4172 * b) Has someone tried to mount the root filesystem read-write?
4173 * If answers are (a) yes AND (b) no, then we can use the bitmap.
4175 #define USE_CODE_SIGN_BITMAP(vp) ( (vp != NULL) && (vp->v_mount != NULL) && (vp->v_mount->mnt_flag & MNT_ROOTFS) && !root_fs_upgrade_try)
4177 ubc_cs_validation_bitmap_allocate(
4180 kern_return_t kr
= KERN_SUCCESS
;
4181 struct ubc_info
*uip
;
4182 char *target_bitmap
;
4183 vm_object_size_t bitmap_size
;
4185 if ( ! USE_CODE_SIGN_BITMAP(vp
) || (! UBCINFOEXISTS(vp
))) {
4186 kr
= KERN_INVALID_ARGUMENT
;
4188 uip
= vp
->v_ubcinfo
;
4190 if ( uip
->cs_valid_bitmap
== NULL
) {
4191 bitmap_size
= stob(uip
->ui_size
);
4192 target_bitmap
= (char*) kalloc( (vm_size_t
)bitmap_size
);
4193 if (target_bitmap
== 0) {
4198 if( kr
== KERN_SUCCESS
) {
4199 memset( target_bitmap
, 0, (size_t)bitmap_size
);
4200 uip
->cs_valid_bitmap
= (void*)target_bitmap
;
4201 uip
->cs_valid_bitmap_size
= bitmap_size
;
4209 ubc_cs_check_validation_bitmap (
4211 memory_object_offset_t offset
,
4214 kern_return_t kr
= KERN_SUCCESS
;
4216 if ( ! USE_CODE_SIGN_BITMAP(vp
) || ! UBCINFOEXISTS(vp
)) {
4217 kr
= KERN_INVALID_ARGUMENT
;
4219 struct ubc_info
*uip
= vp
->v_ubcinfo
;
4220 char *target_bitmap
= uip
->cs_valid_bitmap
;
4222 if ( target_bitmap
== NULL
) {
4223 kr
= KERN_INVALID_ARGUMENT
;
4226 bit
= atop_64( offset
);
4229 if ( byte
> uip
->cs_valid_bitmap_size
) {
4230 kr
= KERN_INVALID_ARGUMENT
;
4233 if (optype
== CS_BITMAP_SET
) {
4234 target_bitmap
[byte
] |= (1 << (bit
& 07));
4236 } else if (optype
== CS_BITMAP_CLEAR
) {
4237 target_bitmap
[byte
] &= ~(1 << (bit
& 07));
4239 } else if (optype
== CS_BITMAP_CHECK
) {
4240 if ( target_bitmap
[byte
] & (1 << (bit
& 07))) {
4253 ubc_cs_validation_bitmap_deallocate(
4256 struct ubc_info
*uip
;
4257 void *target_bitmap
;
4258 vm_object_size_t bitmap_size
;
4260 if ( UBCINFOEXISTS(vp
)) {
4261 uip
= vp
->v_ubcinfo
;
4263 if ( (target_bitmap
= uip
->cs_valid_bitmap
) != NULL
) {
4264 bitmap_size
= uip
->cs_valid_bitmap_size
;
4265 kfree( target_bitmap
, (vm_size_t
) bitmap_size
);
4266 uip
->cs_valid_bitmap
= NULL
;
4271 kern_return_t
ubc_cs_validation_bitmap_allocate(__unused vnode_t vp
){
4272 return KERN_INVALID_ARGUMENT
;
4275 kern_return_t
ubc_cs_check_validation_bitmap(
4276 __unused
struct vnode
*vp
,
4277 __unused memory_object_offset_t offset
,
4278 __unused
int optype
){
4280 return KERN_INVALID_ARGUMENT
;
4283 void ubc_cs_validation_bitmap_deallocate(__unused vnode_t vp
){
4286 #endif /* CHECK_CS_VALIDATION_BITMAP */
4290 cs_associate_blob_with_mapping(
4292 vm_map_offset_t start
,
4294 vm_object_offset_t offset
,
4297 off_t blob_start_offset
, blob_end_offset
;
4299 struct cs_blob
*blobs
, *blob
;
4301 struct pmap_cs_code_directory
*cd_entry
= NULL
;
4304 return KERN_NOT_SUPPORTED
;
4307 blobs
= (struct cs_blob
*)blobs_p
;
4311 blob
= blob
->csb_next
) {
4312 blob_start_offset
= (blob
->csb_base_offset
+
4313 blob
->csb_start_offset
);
4314 blob_end_offset
= (blob
->csb_base_offset
+
4315 blob
->csb_end_offset
);
4316 if ((off_t
) offset
< blob_start_offset
||
4317 (off_t
) offset
>= blob_end_offset
||
4318 (off_t
) (offset
+ size
) <= blob_start_offset
||
4319 (off_t
) (offset
+ size
) > blob_end_offset
) {
4322 kaddr
= blob
->csb_mem_kaddr
;
4324 /* blob data has been released */
4327 cd_entry
= blob
->csb_pmap_cs_entry
;
4328 if (cd_entry
== NULL
) {
4335 if (cd_entry
!= NULL
) {
4336 kr
= pmap_cs_associate(pmap
,
4341 kr
= KERN_CODESIGN_ERROR
;
4344 printf("FBDP %d[%s] pmap_cs_associate(%p,%p,0x%llx,0x%llx) -> kr=0x%x\n", proc_selfpid(), &(current_proc()->p_comm
[0]), pmap
, cd_entry
, (uint64_t)start
, (uint64_t)size
, kr
);
4349 #endif /* PMAP_CS */