2 * Copyright (c) 1999-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 * Author: Umesh Vaishampayan [umeshv@apple.com]
31 * 05-Aug-1999 umeshv Created.
33 * Functions related to Unified Buffer cache.
35 * Caller of UBC functions MUST have a valid reference on the vnode.
39 #include <sys/types.h>
40 #include <sys/param.h>
41 #include <sys/systm.h>
44 #include <sys/mount_internal.h>
45 #include <sys/vnode_internal.h>
46 #include <sys/ubc_internal.h>
47 #include <sys/ucred.h>
48 #include <sys/proc_internal.h>
49 #include <sys/kauth.h>
52 #include <sys/codesign.h>
53 #include <sys/codedir_internal.h>
54 #include <sys/fsevents.h>
55 #include <sys/fcntl.h>
56 #include <sys/reboot.h>
58 #include <mach/mach_types.h>
59 #include <mach/memory_object_types.h>
60 #include <mach/memory_object_control.h>
61 #include <mach/vm_map.h>
62 #include <mach/mach_vm.h>
65 #include <kern/kern_types.h>
66 #include <kern/kalloc.h>
67 #include <kern/zalloc.h>
68 #include <kern/thread.h>
70 #include <vm/vm_kern.h>
71 #include <vm/vm_protos.h> /* last */
73 #include <libkern/crypto/sha1.h>
74 #include <libkern/crypto/sha2.h>
75 #include <libkern/libkern.h>
76 #include <libkern/ptrauth_utils.h>
78 #include <security/mac_framework.h>
80 #include <stdatomic.h>
82 /* XXX These should be in a BSD accessible Mach header, but aren't. */
83 extern kern_return_t
memory_object_pages_resident(memory_object_control_t
,
85 extern kern_return_t
memory_object_signed(memory_object_control_t control
,
87 extern boolean_t
memory_object_is_signed(memory_object_control_t
);
88 extern void memory_object_mark_trusted(
89 memory_object_control_t control
);
91 /* XXX Same for those. */
93 extern void Debugger(const char *message
);
96 /* XXX no one uses this interface! */
97 kern_return_t
ubc_page_op_with_control(
98 memory_object_control_t control
,
101 ppnum_t
*phys_entryp
,
109 #define assert(cond) \
110 ((void) ((cond) ? 0 : panic("Assert failed: %s", # cond)))
112 #include <kern/assert.h>
113 #endif /* DIAGNOSTIC */
115 static int ubc_info_init_internal(struct vnode
*vp
, int withfsize
, off_t filesize
);
116 static int ubc_umcallback(vnode_t
, void *);
117 static int ubc_msync_internal(vnode_t
, off_t
, off_t
, off_t
*, int, int *);
118 static void ubc_cs_free(struct ubc_info
*uip
);
120 static boolean_t
ubc_cs_supports_multilevel_hash(struct cs_blob
*blob
);
121 static kern_return_t
ubc_cs_convert_to_multilevel_hash(struct cs_blob
*blob
);
123 ZONE_DECLARE(ubc_info_zone
, "ubc_info zone", sizeof(struct ubc_info
),
124 ZC_NOENCRYPT
| ZC_ZFREE_CLEARMEM
);
125 static uint32_t cs_blob_generation_count
= 1;
129 * Routines to navigate code signing data structures in the kernel...
134 #define PAGE_SHIFT_4K (12)
140 const void *lower_bound
,
141 const void *upper_bound
)
143 if (upper_bound
< lower_bound
||
148 if (start
< lower_bound
||
156 typedef void (*cs_md_init
)(void *ctx
);
157 typedef void (*cs_md_update
)(void *ctx
, const void *data
, size_t size
);
158 typedef void (*cs_md_final
)(void *hash
, void *ctx
);
161 uint8_t cs_type
; /* type code as per code signing */
162 size_t cs_size
; /* size of effective hash (may be truncated) */
163 size_t cs_digest_size
;/* size of native hash */
165 cs_md_update cs_update
;
166 cs_md_final cs_final
;
171 struct cs_hash
const * const cs_hash
)
173 return cs_hash
->cs_type
;
176 static const struct cs_hash cs_hash_sha1
= {
177 .cs_type
= CS_HASHTYPE_SHA1
,
178 .cs_size
= CS_SHA1_LEN
,
179 .cs_digest_size
= SHA_DIGEST_LENGTH
,
180 .cs_init
= (cs_md_init
)SHA1Init
,
181 .cs_update
= (cs_md_update
)SHA1Update
,
182 .cs_final
= (cs_md_final
)SHA1Final
,
185 static const struct cs_hash cs_hash_sha256
= {
186 .cs_type
= CS_HASHTYPE_SHA256
,
187 .cs_size
= SHA256_DIGEST_LENGTH
,
188 .cs_digest_size
= SHA256_DIGEST_LENGTH
,
189 .cs_init
= (cs_md_init
)SHA256_Init
,
190 .cs_update
= (cs_md_update
)SHA256_Update
,
191 .cs_final
= (cs_md_final
)SHA256_Final
,
193 static const struct cs_hash cs_hash_sha256_truncate
= {
194 .cs_type
= CS_HASHTYPE_SHA256_TRUNCATED
,
195 .cs_size
= CS_SHA256_TRUNCATED_LEN
,
196 .cs_digest_size
= SHA256_DIGEST_LENGTH
,
197 .cs_init
= (cs_md_init
)SHA256_Init
,
198 .cs_update
= (cs_md_update
)SHA256_Update
,
199 .cs_final
= (cs_md_final
)SHA256_Final
,
201 static const struct cs_hash cs_hash_sha384
= {
202 .cs_type
= CS_HASHTYPE_SHA384
,
203 .cs_size
= SHA384_DIGEST_LENGTH
,
204 .cs_digest_size
= SHA384_DIGEST_LENGTH
,
205 .cs_init
= (cs_md_init
)SHA384_Init
,
206 .cs_update
= (cs_md_update
)SHA384_Update
,
207 .cs_final
= (cs_md_final
)SHA384_Final
,
211 static struct cs_hash
const *
212 cs_find_md(uint8_t type
)
214 if (type
== CS_HASHTYPE_SHA1
) {
215 return &cs_hash_sha1
;
217 } else if (type
== CS_HASHTYPE_SHA256
) {
218 return &cs_hash_sha256
;
219 } else if (type
== CS_HASHTYPE_SHA256_TRUNCATED
) {
220 return &cs_hash_sha256_truncate
;
221 } else if (type
== CS_HASHTYPE_SHA384
) {
222 return &cs_hash_sha384
;
228 union cs_hash_union
{
230 SHA256_CTX sha256ctx
;
231 SHA384_CTX sha384ctx
;
236 * Choose among different hash algorithms.
237 * Higher is better, 0 => don't use at all.
239 static const uint32_t hashPriorities
[] = {
241 CS_HASHTYPE_SHA256_TRUNCATED
,
247 hash_rank(const CS_CodeDirectory
*cd
)
249 uint32_t type
= cd
->hashType
;
252 for (n
= 0; n
< sizeof(hashPriorities
) / sizeof(hashPriorities
[0]); ++n
) {
253 if (hashPriorities
[n
] == type
) {
257 return 0; /* not supported */
262 * Locating a page hash
264 static const unsigned char *
266 const CS_CodeDirectory
*cd
,
269 const char *lower_bound
,
270 const char *upper_bound
)
272 const unsigned char *base
, *top
, *hash
;
273 uint32_t nCodeSlots
= ntohl(cd
->nCodeSlots
);
275 assert(cs_valid_range(cd
, cd
+ 1, lower_bound
, upper_bound
));
277 if ((ntohl(cd
->version
) >= CS_SUPPORTSSCATTER
) && (ntohl(cd
->scatterOffset
))) {
278 /* Get first scatter struct */
279 const SC_Scatter
*scatter
= (const SC_Scatter
*)
280 ((const char*)cd
+ ntohl(cd
->scatterOffset
));
281 uint32_t hashindex
= 0, scount
, sbase
= 0;
282 /* iterate all scatter structs */
284 if ((const char*)scatter
> (const char*)cd
+ ntohl(cd
->length
)) {
286 printf("CODE SIGNING: Scatter extends past Code Directory\n");
291 scount
= ntohl(scatter
->count
);
292 uint32_t new_base
= ntohl(scatter
->base
);
299 if ((hashindex
> 0) && (new_base
<= sbase
)) {
301 printf("CODE SIGNING: unordered Scatter, prev base %d, cur base %d\n",
304 return NULL
; /* unordered scatter array */
308 /* this scatter beyond page we're looking for? */
313 if (sbase
+ scount
>= page
) {
314 /* Found the scatter struct that is
315 * referencing our page */
317 /* base = address of first hash covered by scatter */
318 base
= (const unsigned char *)cd
+ ntohl(cd
->hashOffset
) +
319 hashindex
* hash_len
;
320 /* top = address of first hash after this scatter */
321 top
= base
+ scount
* hash_len
;
322 if (!cs_valid_range(base
, top
, lower_bound
,
324 hashindex
> nCodeSlots
) {
331 /* this scatter struct is before the page we're looking
337 hash
= base
+ (page
- sbase
) * hash_len
;
339 base
= (const unsigned char *)cd
+ ntohl(cd
->hashOffset
);
340 top
= base
+ nCodeSlots
* hash_len
;
341 if (!cs_valid_range(base
, top
, lower_bound
, upper_bound
) ||
345 assert(page
< nCodeSlots
);
347 hash
= base
+ page
* hash_len
;
350 if (!cs_valid_range(hash
, hash
+ hash_len
,
351 lower_bound
, upper_bound
)) {
359 * cs_validate_codedirectory
361 * Validate that pointers inside the code directory to make sure that
362 * all offsets and lengths are constrained within the buffer.
364 * Parameters: cd Pointer to code directory buffer
365 * length Length of buffer
368 * EBADEXEC Invalid code signature
372 cs_validate_codedirectory(const CS_CodeDirectory
*cd
, size_t length
)
374 struct cs_hash
const *hashtype
;
376 if (length
< sizeof(*cd
)) {
379 if (ntohl(cd
->magic
) != CSMAGIC_CODEDIRECTORY
) {
382 if (cd
->pageSize
< PAGE_SHIFT_4K
|| cd
->pageSize
> PAGE_SHIFT
) {
385 hashtype
= cs_find_md(cd
->hashType
);
386 if (hashtype
== NULL
) {
390 if (cd
->hashSize
!= hashtype
->cs_size
) {
394 if (length
< ntohl(cd
->hashOffset
)) {
398 /* check that nSpecialSlots fits in the buffer in front of hashOffset */
399 if (ntohl(cd
->hashOffset
) / hashtype
->cs_size
< ntohl(cd
->nSpecialSlots
)) {
403 /* check that codeslots fits in the buffer */
404 if ((length
- ntohl(cd
->hashOffset
)) / hashtype
->cs_size
< ntohl(cd
->nCodeSlots
)) {
408 if (ntohl(cd
->version
) >= CS_SUPPORTSSCATTER
&& cd
->scatterOffset
) {
409 if (length
< ntohl(cd
->scatterOffset
)) {
413 const SC_Scatter
*scatter
= (const SC_Scatter
*)
414 (((const uint8_t *)cd
) + ntohl(cd
->scatterOffset
));
418 * Check each scatter buffer, since we don't know the
419 * length of the scatter buffer array, we have to
423 /* check that the end of each scatter buffer in within the length */
424 if (((const uint8_t *)scatter
) + sizeof(scatter
[0]) > (const uint8_t *)cd
+ length
) {
427 uint32_t scount
= ntohl(scatter
->count
);
431 if (nPages
+ scount
< nPages
) {
437 /* XXX check that basees doesn't overlap */
438 /* XXX check that targetOffset doesn't overlap */
440 #if 0 /* rdar://12579439 */
441 if (nPages
!= ntohl(cd
->nCodeSlots
)) {
447 if (length
< ntohl(cd
->identOffset
)) {
451 /* identifier is NUL terminated string */
452 if (cd
->identOffset
) {
453 const uint8_t *ptr
= (const uint8_t *)cd
+ ntohl(cd
->identOffset
);
454 if (memchr(ptr
, 0, length
- ntohl(cd
->identOffset
)) == NULL
) {
459 /* team identifier is NULL terminated string */
460 if (ntohl(cd
->version
) >= CS_SUPPORTSTEAMID
&& ntohl(cd
->teamOffset
)) {
461 if (length
< ntohl(cd
->teamOffset
)) {
465 const uint8_t *ptr
= (const uint8_t *)cd
+ ntohl(cd
->teamOffset
);
466 if (memchr(ptr
, 0, length
- ntohl(cd
->teamOffset
)) == NULL
) {
471 /* linkage is variable length binary data */
472 if (ntohl(cd
->version
) >= CS_SUPPORTSLINKAGE
&& cd
->linkageHashType
!= 0) {
473 const uintptr_t ptr
= (uintptr_t)cd
+ ntohl(cd
->linkageOffset
);
474 const uintptr_t ptr_end
= ptr
+ ntohl(cd
->linkageSize
);
476 if (ptr_end
< ptr
|| ptr
< (uintptr_t)cd
|| ptr_end
> (uintptr_t)cd
+ length
) {
490 cs_validate_blob(const CS_GenericBlob
*blob
, size_t length
)
492 if (length
< sizeof(CS_GenericBlob
) || length
< ntohl(blob
->length
)) {
501 * Validate that superblob/embedded code directory to make sure that
502 * all internal pointers are valid.
504 * Will validate both a superblob csblob and a "raw" code directory.
507 * Parameters: buffer Pointer to code signature
508 * length Length of buffer
509 * rcd returns pointer to code directory
512 * EBADEXEC Invalid code signature
518 const size_t blob_size
,
519 const CS_CodeDirectory
**rcd
,
520 const CS_GenericBlob
**rentitlements
)
522 const CS_GenericBlob
*blob
;
527 *rentitlements
= NULL
;
529 blob
= (const CS_GenericBlob
*)(const void *)addr
;
532 error
= cs_validate_blob(blob
, length
);
536 length
= ntohl(blob
->length
);
538 if (ntohl(blob
->magic
) == CSMAGIC_EMBEDDED_SIGNATURE
) {
539 const CS_SuperBlob
*sb
;
541 const CS_CodeDirectory
*best_cd
= NULL
;
542 unsigned int best_rank
= 0;
544 const CS_CodeDirectory
*sha1_cd
= NULL
;
547 if (length
< sizeof(CS_SuperBlob
)) {
551 sb
= (const CS_SuperBlob
*)blob
;
552 count
= ntohl(sb
->count
);
554 /* check that the array of BlobIndex fits in the rest of the data */
555 if ((length
- sizeof(CS_SuperBlob
)) / sizeof(CS_BlobIndex
) < count
) {
559 /* now check each BlobIndex */
560 for (n
= 0; n
< count
; n
++) {
561 const CS_BlobIndex
*blobIndex
= &sb
->index
[n
];
562 uint32_t type
= ntohl(blobIndex
->type
);
563 uint32_t offset
= ntohl(blobIndex
->offset
);
564 if (length
< offset
) {
568 const CS_GenericBlob
*subBlob
=
569 (const CS_GenericBlob
*)(const void *)(addr
+ offset
);
571 size_t subLength
= length
- offset
;
573 if ((error
= cs_validate_blob(subBlob
, subLength
)) != 0) {
576 subLength
= ntohl(subBlob
->length
);
578 /* extra validation for CDs, that is also returned */
579 if (type
== CSSLOT_CODEDIRECTORY
|| (type
>= CSSLOT_ALTERNATE_CODEDIRECTORIES
&& type
< CSSLOT_ALTERNATE_CODEDIRECTORY_LIMIT
)) {
580 const CS_CodeDirectory
*candidate
= (const CS_CodeDirectory
*)subBlob
;
581 if ((error
= cs_validate_codedirectory(candidate
, subLength
)) != 0) {
584 unsigned int rank
= hash_rank(candidate
);
586 printf("CodeDirectory type %d rank %d at slot 0x%x index %d\n", candidate
->hashType
, (int)rank
, (int)type
, (int)n
);
588 if (best_cd
== NULL
|| rank
> best_rank
) {
593 printf("using CodeDirectory type %d (rank %d)\n", (int)best_cd
->hashType
, best_rank
);
596 } else if (best_cd
!= NULL
&& rank
== best_rank
) {
597 /* repeat of a hash type (1:1 mapped to ranks), illegal and suspicious */
598 printf("multiple hash=%d CodeDirectories in signature; rejecting\n", best_cd
->hashType
);
602 if (candidate
->hashType
== CS_HASHTYPE_SHA1
) {
603 if (sha1_cd
!= NULL
) {
604 printf("multiple sha1 CodeDirectories in signature; rejecting\n");
610 } else if (type
== CSSLOT_ENTITLEMENTS
) {
611 if (ntohl(subBlob
->magic
) != CSMAGIC_EMBEDDED_ENTITLEMENTS
) {
614 if (*rentitlements
!= NULL
) {
615 printf("multiple entitlements blobs\n");
618 *rentitlements
= subBlob
;
623 /* To keep watchOS fast enough, we have to resort to sha1 for
626 * At the time of writing this comment, known sha1 attacks are
627 * collision attacks (not preimage or second preimage
628 * attacks), which do not apply to platform binaries since
629 * they have a fixed hash in the trust cache. Given this
630 * property, we only prefer sha1 code directories for adhoc
631 * signatures, which always have to be in a trust cache to be
632 * valid (can-load-cdhash does not exist for watchOS). Those
633 * are, incidentally, also the platform binaries, for which we
634 * care about the performance hit that sha256 would bring us.
636 * Platform binaries may still contain a (not chosen) sha256
637 * code directory, which keeps software updates that switch to
641 if (*rcd
!= NULL
&& sha1_cd
!= NULL
&& (ntohl(sha1_cd
->flags
) & CS_ADHOC
)) {
642 if (sha1_cd
->flags
!= (*rcd
)->flags
) {
643 printf("mismatched flags between hash %d (flags: %#x) and sha1 (flags: %#x) cd.\n",
644 (int)(*rcd
)->hashType
, (*rcd
)->flags
, sha1_cd
->flags
);
652 } else if (ntohl(blob
->magic
) == CSMAGIC_CODEDIRECTORY
) {
653 if ((error
= cs_validate_codedirectory((const CS_CodeDirectory
*)(const void *)addr
, length
)) != 0) {
656 *rcd
= (const CS_CodeDirectory
*)blob
;
671 * Find an blob from the superblob/code directory. The blob must have
672 * been been validated by cs_validate_csblob() before calling
673 * this. Use csblob_find_blob() instead.
675 * Will also find a "raw" code directory if its stored as well as
676 * searching the superblob.
678 * Parameters: buffer Pointer to code signature
679 * length Length of buffer
680 * type type of blob to find
681 * magic the magic number for that blob
683 * Returns: pointer Success
684 * NULL Buffer not found
687 const CS_GenericBlob
*
688 csblob_find_blob_bytes(const uint8_t *addr
, size_t length
, uint32_t type
, uint32_t magic
)
690 const CS_GenericBlob
*blob
= (const CS_GenericBlob
*)(const void *)addr
;
692 if (ntohl(blob
->magic
) == CSMAGIC_EMBEDDED_SIGNATURE
) {
693 const CS_SuperBlob
*sb
= (const CS_SuperBlob
*)blob
;
694 size_t n
, count
= ntohl(sb
->count
);
696 for (n
= 0; n
< count
; n
++) {
697 if (ntohl(sb
->index
[n
].type
) != type
) {
700 uint32_t offset
= ntohl(sb
->index
[n
].offset
);
701 if (length
- sizeof(const CS_GenericBlob
) < offset
) {
704 blob
= (const CS_GenericBlob
*)(const void *)(addr
+ offset
);
705 if (ntohl(blob
->magic
) != magic
) {
710 } else if (type
== CSSLOT_CODEDIRECTORY
711 && ntohl(blob
->magic
) == CSMAGIC_CODEDIRECTORY
712 && magic
== CSMAGIC_CODEDIRECTORY
) {
719 const CS_GenericBlob
*
720 csblob_find_blob(struct cs_blob
*csblob
, uint32_t type
, uint32_t magic
)
722 if ((csblob
->csb_flags
& CS_VALID
) == 0) {
725 return csblob_find_blob_bytes((const uint8_t *)csblob
->csb_mem_kaddr
, csblob
->csb_mem_size
, type
, magic
);
728 static const uint8_t *
729 find_special_slot(const CS_CodeDirectory
*cd
, size_t slotsize
, uint32_t slot
)
731 /* there is no zero special slot since that is the first code slot */
732 if (ntohl(cd
->nSpecialSlots
) < slot
|| slot
== 0) {
736 return (const uint8_t *)cd
+ ntohl(cd
->hashOffset
) - (slotsize
* slot
);
739 static uint8_t cshash_zero
[CS_HASH_MAX_SIZE
] = { 0 };
742 csblob_get_entitlements(struct cs_blob
*csblob
, void **out_start
, size_t *out_length
)
744 uint8_t computed_hash
[CS_HASH_MAX_SIZE
];
745 const CS_GenericBlob
*entitlements
;
746 const CS_CodeDirectory
*code_dir
;
747 const uint8_t *embedded_hash
;
748 union cs_hash_union context
;
753 if (csblob
->csb_hashtype
== NULL
|| csblob
->csb_hashtype
->cs_digest_size
> sizeof(computed_hash
)) {
757 code_dir
= csblob
->csb_cd
;
759 if ((csblob
->csb_flags
& CS_VALID
) == 0) {
762 entitlements
= csblob
->csb_entitlements_blob
;
764 embedded_hash
= find_special_slot(code_dir
, csblob
->csb_hashtype
->cs_size
, CSSLOT_ENTITLEMENTS
);
766 if (embedded_hash
== NULL
) {
771 } else if (entitlements
== NULL
) {
772 if (memcmp(embedded_hash
, cshash_zero
, csblob
->csb_hashtype
->cs_size
) != 0) {
779 csblob
->csb_hashtype
->cs_init(&context
);
780 csblob
->csb_hashtype
->cs_update(&context
, entitlements
, ntohl(entitlements
->length
));
781 csblob
->csb_hashtype
->cs_final(computed_hash
, &context
);
783 if (memcmp(computed_hash
, embedded_hash
, csblob
->csb_hashtype
->cs_size
) != 0) {
787 *out_start
= __DECONST(void *, entitlements
);
788 *out_length
= ntohl(entitlements
->length
);
795 * End of routines to navigate code signing data structures in the kernel.
803 * Allocate and attach an empty ubc_info structure to a vnode
805 * Parameters: vp Pointer to the vnode
808 * vnode_size:ENOMEM Not enough space
809 * vnode_size:??? Other error from vnode_getattr
813 ubc_info_init(struct vnode
*vp
)
815 return ubc_info_init_internal(vp
, 0, 0);
820 * ubc_info_init_withsize
822 * Allocate and attach a sized ubc_info structure to a vnode
824 * Parameters: vp Pointer to the vnode
825 * filesize The size of the file
828 * vnode_size:ENOMEM Not enough space
829 * vnode_size:??? Other error from vnode_getattr
832 ubc_info_init_withsize(struct vnode
*vp
, off_t filesize
)
834 return ubc_info_init_internal(vp
, 1, filesize
);
839 * ubc_info_init_internal
841 * Allocate and attach a ubc_info structure to a vnode
843 * Parameters: vp Pointer to the vnode
844 * withfsize{0,1} Zero if the size should be obtained
845 * from the vnode; otherwise, use filesize
846 * filesize The size of the file, if withfsize == 1
849 * vnode_size:ENOMEM Not enough space
850 * vnode_size:??? Other error from vnode_getattr
852 * Notes: We call a blocking zalloc(), and the zone was created as an
853 * expandable and collectable zone, so if no memory is available,
854 * it is possible for zalloc() to block indefinitely. zalloc()
855 * may also panic if the zone of zones is exhausted, since it's
858 * We unconditionally call vnode_pager_setup(), even if this is
859 * a reuse of a ubc_info; in that case, we should probably assert
860 * that it does not already have a pager association, but do not.
862 * Since memory_object_create_named() can only fail from receiving
863 * an invalid pager argument, the explicit check and panic is
864 * merely precautionary.
867 ubc_info_init_internal(vnode_t vp
, int withfsize
, off_t filesize
)
869 struct ubc_info
*uip
;
873 memory_object_control_t control
;
878 * If there is not already a ubc_info attached to the vnode, we
879 * attach one; otherwise, we will reuse the one that's there.
881 if (uip
== UBC_INFO_NULL
) {
882 uip
= (struct ubc_info
*) zalloc(ubc_info_zone
);
883 bzero((char *)uip
, sizeof(struct ubc_info
));
886 uip
->ui_flags
= UI_INITED
;
887 uip
->ui_ucred
= NOCRED
;
889 assert(uip
->ui_flags
!= UI_NONE
);
890 assert(uip
->ui_vnode
== vp
);
892 /* now set this ubc_info in the vnode */
896 * Allocate a pager object for this vnode
898 * XXX The value of the pager parameter is currently ignored.
899 * XXX Presumably, this API changed to avoid the race between
900 * XXX setting the pager and the UI_HASPAGER flag.
902 pager
= (void *)vnode_pager_setup(vp
, uip
->ui_pager
);
906 * Explicitly set the pager into the ubc_info, after setting the
909 SET(uip
->ui_flags
, UI_HASPAGER
);
910 uip
->ui_pager
= pager
;
913 * Note: We can not use VNOP_GETATTR() to get accurate
914 * value of ui_size because this may be an NFS vnode, and
915 * nfs_getattr() can call vinvalbuf(); if this happens,
916 * ubc_info is not set up to deal with that event.
921 * create a vnode - vm_object association
922 * memory_object_create_named() creates a "named" reference on the
923 * memory object we hold this reference as long as the vnode is
924 * "alive." Since memory_object_create_named() took its own reference
925 * on the vnode pager we passed it, we can drop the reference
926 * vnode_pager_setup() returned here.
928 kret
= memory_object_create_named(pager
,
929 (memory_object_size_t
)uip
->ui_size
, &control
);
930 vnode_pager_deallocate(pager
);
931 if (kret
!= KERN_SUCCESS
) {
932 panic("ubc_info_init: memory_object_create_named returned %d", kret
);
936 uip
->ui_control
= control
; /* cache the value of the mo control */
937 SET(uip
->ui_flags
, UI_HASOBJREF
); /* with a named reference */
939 if (withfsize
== 0) {
940 /* initialize the size */
941 error
= vnode_size(vp
, &uip
->ui_size
, vfs_context_current());
946 uip
->ui_size
= filesize
;
948 vp
->v_lflag
|= VNAMED_UBC
; /* vnode has a named ubc reference */
957 * Free a ubc_info structure
959 * Parameters: uip A pointer to the ubc_info to free
963 * Notes: If there is a credential that has subsequently been associated
964 * with the ubc_info via a call to ubc_setcred(), the reference
965 * to the credential is dropped.
967 * It's actually impossible for a ubc_info.ui_control to take the
968 * value MEMORY_OBJECT_CONTROL_NULL.
971 ubc_info_free(struct ubc_info
*uip
)
973 if (IS_VALID_CRED(uip
->ui_ucred
)) {
974 kauth_cred_unref(&uip
->ui_ucred
);
977 if (uip
->ui_control
!= MEMORY_OBJECT_CONTROL_NULL
) {
978 memory_object_control_deallocate(uip
->ui_control
);
981 cluster_release(uip
);
984 zfree(ubc_info_zone
, uip
);
990 ubc_info_deallocate(struct ubc_info
*uip
)
996 mach_to_bsd_errno(kern_return_t mach_err
)
1002 case KERN_INVALID_ADDRESS
:
1003 case KERN_INVALID_ARGUMENT
:
1004 case KERN_NOT_IN_SET
:
1005 case KERN_INVALID_NAME
:
1006 case KERN_INVALID_TASK
:
1007 case KERN_INVALID_RIGHT
:
1008 case KERN_INVALID_VALUE
:
1009 case KERN_INVALID_CAPABILITY
:
1010 case KERN_INVALID_HOST
:
1011 case KERN_MEMORY_PRESENT
:
1012 case KERN_INVALID_PROCESSOR_SET
:
1013 case KERN_INVALID_POLICY
:
1014 case KERN_ALREADY_WAITING
:
1015 case KERN_DEFAULT_SET
:
1016 case KERN_EXCEPTION_PROTECTED
:
1017 case KERN_INVALID_LEDGER
:
1018 case KERN_INVALID_MEMORY_CONTROL
:
1019 case KERN_INVALID_SECURITY
:
1020 case KERN_NOT_DEPRESSED
:
1021 case KERN_LOCK_OWNED
:
1022 case KERN_LOCK_OWNED_SELF
:
1025 case KERN_PROTECTION_FAILURE
:
1026 case KERN_NOT_RECEIVER
:
1027 case KERN_NO_ACCESS
:
1028 case KERN_POLICY_STATIC
:
1032 case KERN_RESOURCE_SHORTAGE
:
1033 case KERN_UREFS_OVERFLOW
:
1034 case KERN_INVALID_OBJECT
:
1040 case KERN_MEMORY_FAILURE
:
1041 case KERN_POLICY_LIMIT
:
1042 case KERN_CODESIGN_ERROR
:
1045 case KERN_MEMORY_ERROR
:
1048 case KERN_ALREADY_IN_SET
:
1049 case KERN_NAME_EXISTS
:
1050 case KERN_RIGHT_EXISTS
:
1056 case KERN_TERMINATED
:
1057 case KERN_LOCK_SET_DESTROYED
:
1058 case KERN_LOCK_UNSTABLE
:
1059 case KERN_SEMAPHORE_DESTROYED
:
1062 case KERN_RPC_SERVER_TERMINATED
:
1065 case KERN_NOT_SUPPORTED
:
1068 case KERN_NODE_DOWN
:
1071 case KERN_NOT_WAITING
:
1074 case KERN_OPERATION_TIMED_OUT
:
1085 * Tell the VM that the the size of the file represented by the vnode has
1088 * Parameters: vp The vp whose backing file size is
1090 * nsize The new size of the backing file
1093 * Returns: EINVAL for new size < 0
1094 * ENOENT if no UBC info exists
1095 * EAGAIN if UBC_SETSIZE_NO_FS_REENTRY option is set and new_size < old size
1096 * Other errors (mapped to errno_t) returned by VM functions
1098 * Notes: This function will indicate success if the new size is the
1099 * same or larger than the old size (in this case, the
1100 * remainder of the file will require modification or use of
1101 * an existing upl to access successfully).
1103 * This function will fail if the new file size is smaller,
1104 * and the memory region being invalidated was unable to
1105 * actually be invalidated and/or the last page could not be
1106 * flushed, if the new size is not aligned to a page
1107 * boundary. This is usually indicative of an I/O error.
1110 ubc_setsize_ex(struct vnode
*vp
, off_t nsize
, ubc_setsize_opts_t opts
)
1112 off_t osize
; /* ui_size before change */
1113 off_t lastpg
, olastpgend
, lastoff
;
1114 struct ubc_info
*uip
;
1115 memory_object_control_t control
;
1116 kern_return_t kret
= KERN_SUCCESS
;
1118 if (nsize
< (off_t
)0) {
1122 if (!UBCINFOEXISTS(vp
)) {
1126 uip
= vp
->v_ubcinfo
;
1127 osize
= uip
->ui_size
;
1129 if (ISSET(opts
, UBC_SETSIZE_NO_FS_REENTRY
) && nsize
< osize
) {
1134 * Update the size before flushing the VM
1136 uip
->ui_size
= nsize
;
1138 if (nsize
>= osize
) { /* Nothing more to do */
1139 if (nsize
> osize
) {
1140 lock_vnode_and_post(vp
, NOTE_EXTEND
);
1147 * When the file shrinks, invalidate the pages beyond the
1148 * new size. Also get rid of garbage beyond nsize on the
1149 * last page. The ui_size already has the nsize, so any
1150 * subsequent page-in will zero-fill the tail properly
1152 lastpg
= trunc_page_64(nsize
);
1153 olastpgend
= round_page_64(osize
);
1154 control
= uip
->ui_control
;
1156 lastoff
= (nsize
& PAGE_MASK_64
);
1160 upl_page_info_t
*pl
;
1163 * new EOF ends up in the middle of a page
1164 * zero the tail of this page if it's currently
1165 * present in the cache
1167 kret
= ubc_create_upl_kernel(vp
, lastpg
, PAGE_SIZE
, &upl
, &pl
, UPL_SET_LITE
| UPL_WILL_MODIFY
, VM_KERN_MEMORY_FILE
);
1169 if (kret
!= KERN_SUCCESS
) {
1170 panic("ubc_setsize: ubc_create_upl (error = %d)\n", kret
);
1173 if (upl_valid_page(pl
, 0)) {
1174 cluster_zero(upl
, (uint32_t)lastoff
, PAGE_SIZE
- (uint32_t)lastoff
, NULL
);
1177 ubc_upl_abort_range(upl
, 0, PAGE_SIZE
, UPL_ABORT_FREE_ON_EMPTY
);
1179 lastpg
+= PAGE_SIZE_64
;
1181 if (olastpgend
> lastpg
) {
1185 flags
= MEMORY_OBJECT_DATA_FLUSH_ALL
;
1187 flags
= MEMORY_OBJECT_DATA_FLUSH
;
1190 * invalidate the pages beyond the new EOF page
1193 kret
= memory_object_lock_request(control
,
1194 (memory_object_offset_t
)lastpg
,
1195 (memory_object_size_t
)(olastpgend
- lastpg
), NULL
, NULL
,
1196 MEMORY_OBJECT_RETURN_NONE
, flags
, VM_PROT_NO_CHANGE
);
1197 if (kret
!= KERN_SUCCESS
) {
1198 printf("ubc_setsize: invalidate failed (error = %d)\n", kret
);
1201 return mach_to_bsd_errno(kret
);
1204 // Returns true for success
1206 ubc_setsize(vnode_t vp
, off_t nsize
)
1208 return ubc_setsize_ex(vp
, nsize
, 0) == 0;
1214 * Get the size of the file assocated with the specified vnode
1216 * Parameters: vp The vnode whose size is of interest
1218 * Returns: 0 There is no ubc_info associated with
1219 * this vnode, or the size is zero
1220 * !0 The size of the file
1222 * Notes: Using this routine, it is not possible for a caller to
1223 * successfully distinguish between a vnode associate with a zero
1224 * length file, and a vnode with no associated ubc_info. The
1225 * caller therefore needs to not care, or needs to ensure that
1226 * they have previously successfully called ubc_info_init() or
1227 * ubc_info_init_withsize().
1230 ubc_getsize(struct vnode
*vp
)
1232 /* people depend on the side effect of this working this way
1233 * as they call this for directory
1235 if (!UBCINFOEXISTS(vp
)) {
1238 return vp
->v_ubcinfo
->ui_size
;
1245 * Call ubc_msync(vp, 0, EOF, NULL, UBC_PUSHALL) on all the vnodes for this
1248 * Parameters: mp The mount point
1250 * Returns: 0 Success
1252 * Notes: There is no failure indication for this function.
1254 * This function is used in the unmount path; since it may block
1255 * I/O indefinitely, it should not be used in the forced unmount
1256 * path, since a device unavailability could also block that
1259 * Because there is no device ejection interlock on USB, FireWire,
1260 * or similar devices, it's possible that an ejection that begins
1261 * subsequent to the vnode_iterate() completing, either on one of
1262 * those devices, or a network mount for which the server quits
1263 * responding, etc., may cause the caller to block indefinitely.
1265 __private_extern__
int
1266 ubc_umount(struct mount
*mp
)
1268 vnode_iterate(mp
, 0, ubc_umcallback
, 0);
1276 * Used by ubc_umount() as an internal implementation detail; see ubc_umount()
1277 * and vnode_iterate() for details of implementation.
1280 ubc_umcallback(vnode_t vp
, __unused
void * args
)
1282 if (UBCINFOEXISTS(vp
)) {
1283 (void) ubc_msync(vp
, (off_t
)0, ubc_getsize(vp
), NULL
, UBC_PUSHALL
);
1285 return VNODE_RETURNED
;
1292 * Get the credentials currently active for the ubc_info associated with the
1295 * Parameters: vp The vnode whose ubc_info credentials
1296 * are to be retrieved
1298 * Returns: !NOCRED The credentials
1299 * NOCRED If there is no ubc_info for the vnode,
1300 * or if there is one, but it has not had
1301 * any credentials associated with it via
1302 * a call to ubc_setcred()
1305 ubc_getcred(struct vnode
*vp
)
1307 if (UBCINFOEXISTS(vp
)) {
1308 return vp
->v_ubcinfo
->ui_ucred
;
1318 * If they are not already set, set the credentials of the ubc_info structure
1319 * associated with the vnode to those of the supplied thread; otherwise leave
1322 * Parameters: vp The vnode whose ubc_info creds are to
1324 * p The process whose credentials are to
1325 * be used, if not running on an assumed
1327 * thread The thread whose credentials are to
1330 * Returns: 1 This vnode has no associated ubc_info
1333 * Notes: This function takes a proc parameter to account for bootstrap
1334 * issues where a task or thread may call this routine, either
1335 * before credentials have been initialized by bsd_init(), or if
1336 * there is no BSD info asscoiate with a mach thread yet. This
1337 * is known to happen in both the initial swap and memory mapping
1340 * This function is generally used only in the following cases:
1342 * o a memory mapped file via the mmap() system call
1343 * o a swap store backing file
1344 * o subsequent to a successful write via vn_write()
1346 * The information is then used by the NFS client in order to
1347 * cons up a wire message in either the page-in or page-out path.
1349 * There are two potential problems with the use of this API:
1351 * o Because the write path only set it on a successful
1352 * write, there is a race window between setting the
1353 * credential and its use to evict the pages to the
1354 * remote file server
1356 * o Because a page-in may occur prior to a write, the
1357 * credential may not be set at this time, if the page-in
1358 * is not the result of a mapping established via mmap().
1360 * In both these cases, this will be triggered from the paging
1361 * path, which will instead use the credential of the current
1362 * process, which in this case is either the dynamic_pager or
1363 * the kernel task, both of which utilize "root" credentials.
1365 * This may potentially permit operations to occur which should
1366 * be denied, or it may cause to be denied operations which
1367 * should be permitted, depending on the configuration of the NFS
1371 ubc_setthreadcred(struct vnode
*vp
, proc_t p
, thread_t thread
)
1373 struct ubc_info
*uip
;
1375 struct uthread
*uthread
= get_bsdthread_info(thread
);
1377 if (!UBCINFOEXISTS(vp
)) {
1383 uip
= vp
->v_ubcinfo
;
1384 credp
= uip
->ui_ucred
;
1386 if (!IS_VALID_CRED(credp
)) {
1387 /* use per-thread cred, if assumed identity, else proc cred */
1388 if (uthread
== NULL
|| (uthread
->uu_flag
& UT_SETUID
) == 0) {
1389 uip
->ui_ucred
= kauth_cred_proc_ref(p
);
1391 uip
->ui_ucred
= uthread
->uu_ucred
;
1392 kauth_cred_ref(uip
->ui_ucred
);
1404 * If they are not already set, set the credentials of the ubc_info structure
1405 * associated with the vnode to those of the process; otherwise leave them
1408 * Parameters: vp The vnode whose ubc_info creds are to
1410 * p The process whose credentials are to
1413 * Returns: 0 This vnode has no associated ubc_info
1416 * Notes: The return values for this function are inverted from nearly
1417 * all other uses in the kernel.
1419 * See also ubc_setthreadcred(), above.
1421 * This function is considered deprecated, and generally should
1422 * not be used, as it is incompatible with per-thread credentials;
1423 * it exists for legacy KPI reasons.
1425 * DEPRECATION: ubc_setcred() is being deprecated. Please use
1426 * ubc_setthreadcred() instead.
1429 ubc_setcred(struct vnode
*vp
, proc_t p
)
1431 struct ubc_info
*uip
;
1434 /* If there is no ubc_info, deny the operation */
1435 if (!UBCINFOEXISTS(vp
)) {
1440 * Check to see if there is already a credential reference in the
1441 * ubc_info; if there is not, take one on the supplied credential.
1444 uip
= vp
->v_ubcinfo
;
1445 credp
= uip
->ui_ucred
;
1446 if (!IS_VALID_CRED(credp
)) {
1447 uip
->ui_ucred
= kauth_cred_proc_ref(p
);
1457 * Get the pager associated with the ubc_info associated with the vnode.
1459 * Parameters: vp The vnode to obtain the pager from
1461 * Returns: !VNODE_PAGER_NULL The memory_object_t for the pager
1462 * VNODE_PAGER_NULL There is no ubc_info for this vnode
1464 * Notes: For each vnode that has a ubc_info associated with it, that
1465 * ubc_info SHALL have a pager associated with it, so in the
1466 * normal case, it's impossible to return VNODE_PAGER_NULL for
1467 * a vnode with an associated ubc_info.
1469 __private_extern__ memory_object_t
1470 ubc_getpager(struct vnode
*vp
)
1472 if (UBCINFOEXISTS(vp
)) {
1473 return vp
->v_ubcinfo
->ui_pager
;
1483 * Get the memory object control associated with the ubc_info associated with
1486 * Parameters: vp The vnode to obtain the memory object
1490 * Returns: !MEMORY_OBJECT_CONTROL_NULL
1491 * MEMORY_OBJECT_CONTROL_NULL
1493 * Notes: Historically, if the flags were not "do not reactivate", this
1494 * function would look up the memory object using the pager if
1495 * it did not exist (this could be the case if the vnode had
1496 * been previously reactivated). The flags would also permit a
1497 * hold to be requested, which would have created an object
1498 * reference, if one had not already existed. This usage is
1499 * deprecated, as it would permit a race between finding and
1500 * taking the reference vs. a single reference being dropped in
1503 memory_object_control_t
1504 ubc_getobject(struct vnode
*vp
, __unused
int flags
)
1506 if (UBCINFOEXISTS(vp
)) {
1507 return vp
->v_ubcinfo
->ui_control
;
1510 return MEMORY_OBJECT_CONTROL_NULL
;
1516 * Convert a given block number to a memory backing object (file) offset for a
1519 * Parameters: vp The vnode in which the block is located
1520 * blkno The block number to convert
1522 * Returns: !-1 The offset into the backing object
1523 * -1 There is no ubc_info associated with
1525 * -1 An error occurred in the underlying VFS
1526 * while translating the block to an
1527 * offset; the most likely cause is that
1528 * the caller specified a block past the
1529 * end of the file, but this could also be
1530 * any other error from VNOP_BLKTOOFF().
1532 * Note: Representing the error in band loses some information, but does
1533 * not occlude a valid offset, since an off_t of -1 is normally
1534 * used to represent EOF. If we had a more reliable constant in
1535 * our header files for it (i.e. explicitly cast to an off_t), we
1536 * would use it here instead.
1539 ubc_blktooff(vnode_t vp
, daddr64_t blkno
)
1541 off_t file_offset
= -1;
1544 if (UBCINFOEXISTS(vp
)) {
1545 error
= VNOP_BLKTOOFF(vp
, blkno
, &file_offset
);
1558 * Convert a given offset in a memory backing object into a block number for a
1561 * Parameters: vp The vnode in which the offset is
1563 * offset The offset into the backing object
1565 * Returns: !-1 The returned block number
1566 * -1 There is no ubc_info associated with
1568 * -1 An error occurred in the underlying VFS
1569 * while translating the block to an
1570 * offset; the most likely cause is that
1571 * the caller specified a block past the
1572 * end of the file, but this could also be
1573 * any other error from VNOP_OFFTOBLK().
1575 * Note: Representing the error in band loses some information, but does
1576 * not occlude a valid block number, since block numbers exceed
1577 * the valid range for offsets, due to their relative sizes. If
1578 * we had a more reliable constant than -1 in our header files
1579 * for it (i.e. explicitly cast to an daddr64_t), we would use it
1583 ubc_offtoblk(vnode_t vp
, off_t offset
)
1585 daddr64_t blkno
= -1;
1588 if (UBCINFOEXISTS(vp
)) {
1589 error
= VNOP_OFFTOBLK(vp
, offset
, &blkno
);
1600 * ubc_pages_resident
1602 * Determine whether or not a given vnode has pages resident via the memory
1603 * object control associated with the ubc_info associated with the vnode
1605 * Parameters: vp The vnode we want to know about
1611 ubc_pages_resident(vnode_t vp
)
1614 boolean_t has_pages_resident
;
1616 if (!UBCINFOEXISTS(vp
)) {
1621 * The following call may fail if an invalid ui_control is specified,
1622 * or if there is no VM object associated with the control object. In
1623 * either case, reacting to it as if there were no pages resident will
1624 * result in correct behavior.
1626 kret
= memory_object_pages_resident(vp
->v_ubcinfo
->ui_control
, &has_pages_resident
);
1628 if (kret
!= KERN_SUCCESS
) {
1632 if (has_pages_resident
== TRUE
) {
1642 * Clean and/or invalidate a range in the memory object that backs this vnode
1644 * Parameters: vp The vnode whose associated ubc_info's
1645 * associated memory object is to have a
1646 * range invalidated within it
1647 * beg_off The start of the range, as an offset
1648 * end_off The end of the range, as an offset
1649 * resid_off The address of an off_t supplied by the
1650 * caller; may be set to NULL to ignore
1651 * flags See ubc_msync_internal()
1653 * Returns: 0 Success
1654 * !0 Failure; an errno is returned
1657 * *resid_off, modified If non-NULL, the contents are ALWAYS
1658 * modified; they are initialized to the
1659 * beg_off, and in case of an I/O error,
1660 * the difference between beg_off and the
1661 * current value will reflect what was
1662 * able to be written before the error
1663 * occurred. If no error is returned, the
1664 * value of the resid_off is undefined; do
1665 * NOT use it in place of end_off if you
1666 * intend to increment from the end of the
1667 * last call and call iteratively.
1669 * Notes: see ubc_msync_internal() for more detailed information.
1673 ubc_msync(vnode_t vp
, off_t beg_off
, off_t end_off
, off_t
*resid_off
, int flags
)
1679 *resid_off
= beg_off
;
1682 retval
= ubc_msync_internal(vp
, beg_off
, end_off
, resid_off
, flags
, &io_errno
);
1684 if (retval
== 0 && io_errno
== 0) {
1692 * ubc_msync_internal
1694 * Clean and/or invalidate a range in the memory object that backs this vnode
1696 * Parameters: vp The vnode whose associated ubc_info's
1697 * associated memory object is to have a
1698 * range invalidated within it
1699 * beg_off The start of the range, as an offset
1700 * end_off The end of the range, as an offset
1701 * resid_off The address of an off_t supplied by the
1702 * caller; may be set to NULL to ignore
1703 * flags MUST contain at least one of the flags
1704 * UBC_INVALIDATE, UBC_PUSHDIRTY, or
1705 * UBC_PUSHALL; if UBC_PUSHDIRTY is used,
1706 * UBC_SYNC may also be specified to cause
1707 * this function to block until the
1708 * operation is complete. The behavior
1709 * of UBC_SYNC is otherwise undefined.
1710 * io_errno The address of an int to contain the
1711 * errno from a failed I/O operation, if
1712 * one occurs; may be set to NULL to
1715 * Returns: 1 Success
1719 * *resid_off, modified The contents of this offset MAY be
1720 * modified; in case of an I/O error, the
1721 * difference between beg_off and the
1722 * current value will reflect what was
1723 * able to be written before the error
1725 * *io_errno, modified The contents of this offset are set to
1726 * an errno, if an error occurs; if the
1727 * caller supplies an io_errno parameter,
1728 * they should be careful to initialize it
1729 * to 0 before calling this function to
1730 * enable them to distinguish an error
1731 * with a valid *resid_off from an invalid
1732 * one, and to avoid potentially falsely
1733 * reporting an error, depending on use.
1735 * Notes: If there is no ubc_info associated with the vnode supplied,
1736 * this function immediately returns success.
1738 * If the value of end_off is less than or equal to beg_off, this
1739 * function immediately returns success; that is, end_off is NOT
1742 * IMPORTANT: one of the flags UBC_INVALIDATE, UBC_PUSHDIRTY, or
1743 * UBC_PUSHALL MUST be specified; that is, it is NOT possible to
1744 * attempt to block on in-progress I/O by calling this function
1745 * with UBC_PUSHDIRTY, and then later call it with just UBC_SYNC
1746 * in order to block pending on the I/O already in progress.
1748 * The start offset is truncated to the page boundary and the
1749 * size is adjusted to include the last page in the range; that
1750 * is, end_off on exactly a page boundary will not change if it
1751 * is rounded, and the range of bytes written will be from the
1752 * truncate beg_off to the rounded (end_off - 1).
1755 ubc_msync_internal(vnode_t vp
, off_t beg_off
, off_t end_off
, off_t
*resid_off
, int flags
, int *io_errno
)
1757 memory_object_size_t tsize
;
1759 int request_flags
= 0;
1760 int flush_flags
= MEMORY_OBJECT_RETURN_NONE
;
1762 if (!UBCINFOEXISTS(vp
)) {
1765 if ((flags
& (UBC_INVALIDATE
| UBC_PUSHDIRTY
| UBC_PUSHALL
)) == 0) {
1768 if (end_off
<= beg_off
) {
1772 if (flags
& UBC_INVALIDATE
) {
1774 * discard the resident pages
1776 request_flags
= (MEMORY_OBJECT_DATA_FLUSH
| MEMORY_OBJECT_DATA_NO_CHANGE
);
1779 if (flags
& UBC_SYNC
) {
1781 * wait for all the I/O to complete before returning
1783 request_flags
|= MEMORY_OBJECT_IO_SYNC
;
1786 if (flags
& UBC_PUSHDIRTY
) {
1788 * we only return the dirty pages in the range
1790 flush_flags
= MEMORY_OBJECT_RETURN_DIRTY
;
1793 if (flags
& UBC_PUSHALL
) {
1795 * then return all the interesting pages in the range (both
1796 * dirty and precious) to the pager
1798 flush_flags
= MEMORY_OBJECT_RETURN_ALL
;
1801 beg_off
= trunc_page_64(beg_off
);
1802 end_off
= round_page_64(end_off
);
1803 tsize
= (memory_object_size_t
)end_off
- beg_off
;
1805 /* flush and/or invalidate pages in the range requested */
1806 kret
= memory_object_lock_request(vp
->v_ubcinfo
->ui_control
,
1808 (memory_object_offset_t
*)resid_off
,
1809 io_errno
, flush_flags
, request_flags
,
1812 return (kret
== KERN_SUCCESS
) ? 1 : 0;
1819 * Explicitly map a vnode that has an associate ubc_info, and add a reference
1820 * to it for the ubc system, if there isn't one already, so it will not be
1821 * recycled while it's in use, and set flags on the ubc_info to indicate that
1824 * Parameters: vp The vnode to map
1825 * flags The mapping flags for the vnode; this
1826 * will be a combination of one or more of
1827 * PROT_READ, PROT_WRITE, and PROT_EXEC
1829 * Returns: 0 Success
1830 * EPERM Permission was denied
1832 * Notes: An I/O reference on the vnode must already be held on entry
1834 * If there is no ubc_info associated with the vnode, this function
1835 * will return success.
1837 * If a permission error occurs, this function will return
1838 * failure; all other failures will cause this function to return
1841 * IMPORTANT: This is an internal use function, and its symbols
1842 * are not exported, hence its error checking is not very robust.
1843 * It is primarily used by:
1845 * o mmap(), when mapping a file
1846 * o When mapping a shared file (a shared library in the
1847 * shared segment region)
1848 * o When loading a program image during the exec process
1850 * ...all of these uses ignore the return code, and any fault that
1851 * results later because of a failure is handled in the fix-up path
1852 * of the fault handler. The interface exists primarily as a
1855 * Given that third party implementation of the type of interfaces
1856 * that would use this function, such as alternative executable
1857 * formats, etc., are unsupported, this function is not exported
1860 * The extra reference is held until the VM system unmaps the
1861 * vnode from its own context to maintain a vnode reference in
1862 * cases like open()/mmap()/close(), which leave the backing
1863 * object referenced by a mapped memory region in a process
1866 __private_extern__
int
1867 ubc_map(vnode_t vp
, int flags
)
1869 struct ubc_info
*uip
;
1872 int need_wakeup
= 0;
1874 if (UBCINFOEXISTS(vp
)) {
1876 uip
= vp
->v_ubcinfo
;
1878 while (ISSET(uip
->ui_flags
, UI_MAPBUSY
)) {
1879 SET(uip
->ui_flags
, UI_MAPWAITING
);
1880 (void) msleep(&uip
->ui_flags
, &vp
->v_lock
,
1881 PRIBIO
, "ubc_map", NULL
);
1883 SET(uip
->ui_flags
, UI_MAPBUSY
);
1886 error
= VNOP_MMAP(vp
, flags
, vfs_context_current());
1889 * rdar://problem/22587101 required that we stop propagating
1890 * EPERM up the stack. Otherwise, we would have to funnel up
1891 * the error at all the call sites for memory_object_map().
1892 * The risk is in having to undo the map/object/entry state at
1893 * all these call sites. It would also affect more than just mmap()
1896 * if (error != EPERM)
1902 vnode_lock_spin(vp
);
1905 if (!ISSET(uip
->ui_flags
, UI_ISMAPPED
)) {
1908 SET(uip
->ui_flags
, (UI_WASMAPPED
| UI_ISMAPPED
));
1909 if (flags
& PROT_WRITE
) {
1910 SET(uip
->ui_flags
, UI_MAPPEDWRITE
);
1913 CLR(uip
->ui_flags
, UI_MAPBUSY
);
1915 if (ISSET(uip
->ui_flags
, UI_MAPWAITING
)) {
1916 CLR(uip
->ui_flags
, UI_MAPWAITING
);
1922 wakeup(&uip
->ui_flags
);
1927 * Make sure we get a ref as we can't unwind from here
1929 if (vnode_ref_ext(vp
, 0, VNODE_REF_FORCE
)) {
1930 panic("%s : VNODE_REF_FORCE failed\n", __FUNCTION__
);
1933 * Vnodes that are on "unreliable" media (like disk
1934 * images, network filesystems, 3rd-party filesystems,
1935 * and possibly external devices) could see their
1936 * contents be changed via the backing store without
1937 * triggering copy-on-write, so we can't fully rely
1938 * on copy-on-write and might have to resort to
1939 * copy-on-read to protect "privileged" processes and
1940 * prevent privilege escalation.
1942 * The root filesystem is considered "reliable" because
1943 * there's not much point in trying to protect
1944 * ourselves from such a vulnerability and the extra
1945 * cost of copy-on-read (CPU time and memory pressure)
1946 * could result in some serious regressions.
1948 if (vp
->v_mount
!= NULL
&&
1949 ((vp
->v_mount
->mnt_flag
& MNT_ROOTFS
) ||
1950 vnode_on_reliable_media(vp
))) {
1952 * This vnode is deemed "reliable" so mark
1953 * its VM object as "trusted".
1955 memory_object_mark_trusted(uip
->ui_control
);
1957 // printf("BUGGYCOW: %s:%d vp %p \"%s\" in mnt %p \"%s\" is untrusted\n", __FUNCTION__, __LINE__, vp, vp->v_name, vp->v_mount, vp->v_mount->mnt_vnodecovered->v_name);
1968 * Destroy the named memory object associated with the ubc_info control object
1969 * associated with the designated vnode, if there is a ubc_info associated
1970 * with the vnode, and a control object is associated with it
1972 * Parameters: vp The designated vnode
1976 * Notes: This function is called on vnode termination for all vnodes,
1977 * and must therefore not assume that there is a ubc_info that is
1978 * associated with the vnode, nor that there is a control object
1979 * associated with the ubc_info.
1981 * If all the conditions necessary are present, this function
1982 * calls memory_object_destory(), which will in turn end up
1983 * calling ubc_unmap() to release any vnode references that were
1984 * established via ubc_map().
1986 * IMPORTANT: This is an internal use function that is used
1987 * exclusively by the internal use function vclean().
1989 __private_extern__
void
1990 ubc_destroy_named(vnode_t vp
)
1992 memory_object_control_t control
;
1993 struct ubc_info
*uip
;
1996 if (UBCINFOEXISTS(vp
)) {
1997 uip
= vp
->v_ubcinfo
;
1999 /* Terminate the memory object */
2000 control
= ubc_getobject(vp
, UBC_HOLDOBJECT
);
2001 if (control
!= MEMORY_OBJECT_CONTROL_NULL
) {
2002 kret
= memory_object_destroy(control
, 0);
2003 if (kret
!= KERN_SUCCESS
) {
2004 panic("ubc_destroy_named: memory_object_destroy failed");
2014 * Determine whether or not a vnode is currently in use by ubc at a level in
2015 * excess of the requested busycount
2017 * Parameters: vp The vnode to check
2018 * busycount The threshold busy count, used to bias
2019 * the count usually already held by the
2020 * caller to avoid races
2022 * Returns: 1 The vnode is in use over the threshold
2023 * 0 The vnode is not in use over the
2026 * Notes: Because the vnode is only held locked while actually asking
2027 * the use count, this function only represents a snapshot of the
2028 * current state of the vnode. If more accurate information is
2029 * required, an additional busycount should be held by the caller
2030 * and a non-zero busycount used.
2032 * If there is no ubc_info associated with the vnode, this
2033 * function will report that the vnode is not in use by ubc.
2036 ubc_isinuse(struct vnode
*vp
, int busycount
)
2038 if (!UBCINFOEXISTS(vp
)) {
2041 return ubc_isinuse_locked(vp
, busycount
, 0);
2046 * ubc_isinuse_locked
2048 * Determine whether or not a vnode is currently in use by ubc at a level in
2049 * excess of the requested busycount
2051 * Parameters: vp The vnode to check
2052 * busycount The threshold busy count, used to bias
2053 * the count usually already held by the
2054 * caller to avoid races
2055 * locked True if the vnode is already locked by
2058 * Returns: 1 The vnode is in use over the threshold
2059 * 0 The vnode is not in use over the
2062 * Notes: If the vnode is not locked on entry, it is locked while
2063 * actually asking the use count. If this is the case, this
2064 * function only represents a snapshot of the current state of
2065 * the vnode. If more accurate information is required, the
2066 * vnode lock should be held by the caller, otherwise an
2067 * additional busycount should be held by the caller and a
2068 * non-zero busycount used.
2070 * If there is no ubc_info associated with the vnode, this
2071 * function will report that the vnode is not in use by ubc.
2074 ubc_isinuse_locked(struct vnode
*vp
, int busycount
, int locked
)
2080 vnode_lock_spin(vp
);
2083 if ((vp
->v_usecount
- vp
->v_kusecount
) > busycount
) {
2097 * Reverse the effects of a ubc_map() call for a given vnode
2099 * Parameters: vp vnode to unmap from ubc
2103 * Notes: This is an internal use function used by vnode_pager_unmap().
2104 * It will attempt to obtain a reference on the supplied vnode,
2105 * and if it can do so, and there is an associated ubc_info, and
2106 * the flags indicate that it was mapped via ubc_map(), then the
2107 * flag is cleared, the mapping removed, and the reference taken
2108 * by ubc_map() is released.
2110 * IMPORTANT: This MUST only be called by the VM
2111 * to prevent race conditions.
2113 __private_extern__
void
2114 ubc_unmap(struct vnode
*vp
)
2116 struct ubc_info
*uip
;
2118 int need_wakeup
= 0;
2120 if (vnode_getwithref(vp
)) {
2124 if (UBCINFOEXISTS(vp
)) {
2125 bool want_fsevent
= false;
2128 uip
= vp
->v_ubcinfo
;
2130 while (ISSET(uip
->ui_flags
, UI_MAPBUSY
)) {
2131 SET(uip
->ui_flags
, UI_MAPWAITING
);
2132 (void) msleep(&uip
->ui_flags
, &vp
->v_lock
,
2133 PRIBIO
, "ubc_unmap", NULL
);
2135 SET(uip
->ui_flags
, UI_MAPBUSY
);
2137 if (ISSET(uip
->ui_flags
, UI_ISMAPPED
)) {
2138 if (ISSET(uip
->ui_flags
, UI_MAPPEDWRITE
)) {
2139 want_fsevent
= true;
2145 * We want to clear the mapped flags after we've called
2146 * VNOP_MNOMAP to avoid certain races and allow
2147 * VNOP_MNOMAP to call ubc_is_mapped_writable.
2153 vfs_context_t ctx
= vfs_context_current();
2155 (void)VNOP_MNOMAP(vp
, ctx
);
2159 * Why do we want an fsevent here? Normally the
2160 * content modified fsevent is posted when a file is
2161 * closed and only if it's written to via conventional
2162 * means. It's perfectly legal to close a file and
2163 * keep your mappings and we don't currently track
2164 * whether it was written to via a mapping.
2165 * Therefore, we need to post an fsevent here if the
2166 * file was mapped writable. This may result in false
2167 * events, i.e. we post a notification when nothing
2168 * has really changed.
2170 if (want_fsevent
&& need_fsevent(FSE_CONTENT_MODIFIED
, vp
)) {
2171 add_fsevent(FSE_CONTENT_MODIFIED
, ctx
,
2180 vnode_lock_spin(vp
);
2183 CLR(uip
->ui_flags
, UI_ISMAPPED
| UI_MAPPEDWRITE
);
2186 CLR(uip
->ui_flags
, UI_MAPBUSY
);
2188 if (ISSET(uip
->ui_flags
, UI_MAPWAITING
)) {
2189 CLR(uip
->ui_flags
, UI_MAPWAITING
);
2195 wakeup(&uip
->ui_flags
);
2199 * the drop of the vnode ref will cleanup
2208 * Manipulate individual page state for a vnode with an associated ubc_info
2209 * with an associated memory object control.
2211 * Parameters: vp The vnode backing the page
2212 * f_offset A file offset interior to the page
2213 * ops The operations to perform, as a bitmap
2214 * (see below for more information)
2215 * phys_entryp The address of a ppnum_t; may be NULL
2217 * flagsp A pointer to an int to contain flags;
2218 * may be NULL to ignore
2220 * Returns: KERN_SUCCESS Success
2221 * KERN_INVALID_ARGUMENT If the memory object control has no VM
2223 * KERN_INVALID_OBJECT If UPL_POP_PHYSICAL and the object is
2224 * not physically contiguous
2225 * KERN_INVALID_OBJECT If !UPL_POP_PHYSICAL and the object is
2226 * physically contiguous
2227 * KERN_FAILURE If the page cannot be looked up
2230 * *phys_entryp (modified) If phys_entryp is non-NULL and
2232 * *flagsp (modified) If flagsp is non-NULL and there was
2233 * !UPL_POP_PHYSICAL and a KERN_SUCCESS
2235 * Notes: For object boundaries, it is considerably more efficient to
2236 * ensure that f_offset is in fact on a page boundary, as this
2237 * will avoid internal use of the hash table to identify the
2238 * page, and would therefore skip a number of early optimizations.
2239 * Since this is a page operation anyway, the caller should try
2240 * to pass only a page aligned offset because of this.
2242 * *flagsp may be modified even if this function fails. If it is
2243 * modified, it will contain the condition of the page before the
2244 * requested operation was attempted; these will only include the
2245 * bitmap flags, and not the PL_POP_PHYSICAL, UPL_POP_DUMP,
2246 * UPL_POP_SET, or UPL_POP_CLR bits.
2248 * The flags field may contain a specific operation, such as
2249 * UPL_POP_PHYSICAL or UPL_POP_DUMP:
2251 * o UPL_POP_PHYSICAL Fail if not contiguous; if
2252 * *phys_entryp and successful, set
2254 * o UPL_POP_DUMP Dump the specified page
2256 * Otherwise, it is treated as a bitmap of one or more page
2257 * operations to perform on the final memory object; allowable
2260 * o UPL_POP_DIRTY The page is dirty
2261 * o UPL_POP_PAGEOUT The page is paged out
2262 * o UPL_POP_PRECIOUS The page is precious
2263 * o UPL_POP_ABSENT The page is absent
2264 * o UPL_POP_BUSY The page is busy
2266 * If the page status is only being queried and not modified, then
2267 * not other bits should be specified. However, if it is being
2268 * modified, exactly ONE of the following bits should be set:
2270 * o UPL_POP_SET Set the current bitmap bits
2271 * o UPL_POP_CLR Clear the current bitmap bits
2273 * Thus to effect a combination of setting an clearing, it may be
2274 * necessary to call this function twice. If this is done, the
2275 * set should be used before the clear, since clearing may trigger
2276 * a wakeup on the destination page, and if the page is backed by
2277 * an encrypted swap file, setting will trigger the decryption
2278 * needed before the wakeup occurs.
2285 ppnum_t
*phys_entryp
,
2288 memory_object_control_t control
;
2290 control
= ubc_getobject(vp
, UBC_FLAGS_NONE
);
2291 if (control
== MEMORY_OBJECT_CONTROL_NULL
) {
2292 return KERN_INVALID_ARGUMENT
;
2295 return memory_object_page_op(control
,
2296 (memory_object_offset_t
)f_offset
,
2306 * Manipulate page state for a range of memory for a vnode with an associated
2307 * ubc_info with an associated memory object control, when page level state is
2308 * not required to be returned from the call (i.e. there are no phys_entryp or
2309 * flagsp parameters to this call, and it takes a range which may contain
2310 * multiple pages, rather than an offset interior to a single page).
2312 * Parameters: vp The vnode backing the page
2313 * f_offset_beg A file offset interior to the start page
2314 * f_offset_end A file offset interior to the end page
2315 * ops The operations to perform, as a bitmap
2316 * (see below for more information)
2317 * range The address of an int; may be NULL to
2320 * Returns: KERN_SUCCESS Success
2321 * KERN_INVALID_ARGUMENT If the memory object control has no VM
2323 * KERN_INVALID_OBJECT If the object is physically contiguous
2326 * *range (modified) If range is non-NULL, its contents will
2327 * be modified to contain the number of
2328 * bytes successfully operated upon.
2330 * Notes: IMPORTANT: This function cannot be used on a range that
2331 * consists of physically contiguous pages.
2333 * For object boundaries, it is considerably more efficient to
2334 * ensure that f_offset_beg and f_offset_end are in fact on page
2335 * boundaries, as this will avoid internal use of the hash table
2336 * to identify the page, and would therefore skip a number of
2337 * early optimizations. Since this is an operation on a set of
2338 * pages anyway, the caller should try to pass only a page aligned
2339 * offsets because of this.
2341 * *range will be modified only if this function succeeds.
2343 * The flags field MUST contain a specific operation; allowable
2346 * o UPL_ROP_ABSENT Returns the extent of the range
2347 * presented which is absent, starting
2348 * with the start address presented
2350 * o UPL_ROP_PRESENT Returns the extent of the range
2351 * presented which is present (resident),
2352 * starting with the start address
2354 * o UPL_ROP_DUMP Dump the pages which are found in the
2355 * target object for the target range.
2357 * IMPORTANT: For UPL_ROP_ABSENT and UPL_ROP_PRESENT; if there are
2358 * multiple regions in the range, only the first matching region
2369 memory_object_control_t control
;
2371 control
= ubc_getobject(vp
, UBC_FLAGS_NONE
);
2372 if (control
== MEMORY_OBJECT_CONTROL_NULL
) {
2373 return KERN_INVALID_ARGUMENT
;
2376 return memory_object_range_op(control
,
2377 (memory_object_offset_t
)f_offset_beg
,
2378 (memory_object_offset_t
)f_offset_end
,
2387 * Given a vnode, cause the population of a portion of the vm_object; based on
2388 * the nature of the request, the pages returned may contain valid data, or
2389 * they may be uninitialized.
2391 * Parameters: vp The vnode from which to create the upl
2392 * f_offset The start offset into the backing store
2393 * represented by the vnode
2394 * bufsize The size of the upl to create
2395 * uplp Pointer to the upl_t to receive the
2396 * created upl; MUST NOT be NULL
2397 * plp Pointer to receive the internal page
2398 * list for the created upl; MAY be NULL
2401 * Returns: KERN_SUCCESS The requested upl has been created
2402 * KERN_INVALID_ARGUMENT The bufsize argument is not an even
2403 * multiple of the page size
2404 * KERN_INVALID_ARGUMENT There is no ubc_info associated with
2405 * the vnode, or there is no memory object
2406 * control associated with the ubc_info
2407 * memory_object_upl_request:KERN_INVALID_VALUE
2408 * The supplied upl_flags argument is
2412 * *plp (modified) If non-NULL, the value of *plp will be
2413 * modified to point to the internal page
2414 * list; this modification may occur even
2415 * if this function is unsuccessful, in
2416 * which case the contents may be invalid
2418 * Note: If successful, the returned *uplp MUST subsequently be freed
2419 * via a call to ubc_upl_commit(), ubc_upl_commit_range(),
2420 * ubc_upl_abort(), or ubc_upl_abort_range().
2423 ubc_create_upl_external(
2428 upl_page_info_t
**plp
,
2431 return ubc_create_upl_kernel(vp
, f_offset
, bufsize
, uplp
, plp
, uplflags
, vm_tag_bt());
2435 ubc_create_upl_kernel(
2440 upl_page_info_t
**plp
,
2444 memory_object_control_t control
;
2452 if (bufsize
& 0xfff) {
2453 return KERN_INVALID_ARGUMENT
;
2456 if (bufsize
> MAX_UPL_SIZE_BYTES
) {
2457 return KERN_INVALID_ARGUMENT
;
2460 if (uplflags
& (UPL_UBC_MSYNC
| UPL_UBC_PAGEOUT
| UPL_UBC_PAGEIN
)) {
2461 if (uplflags
& UPL_UBC_MSYNC
) {
2462 uplflags
&= UPL_RET_ONLY_DIRTY
;
2464 uplflags
|= UPL_COPYOUT_FROM
| UPL_CLEAN_IN_PLACE
|
2465 UPL_SET_INTERNAL
| UPL_SET_LITE
;
2466 } else if (uplflags
& UPL_UBC_PAGEOUT
) {
2467 uplflags
&= UPL_RET_ONLY_DIRTY
;
2469 if (uplflags
& UPL_RET_ONLY_DIRTY
) {
2470 uplflags
|= UPL_NOBLOCK
;
2473 uplflags
|= UPL_FOR_PAGEOUT
| UPL_CLEAN_IN_PLACE
|
2474 UPL_COPYOUT_FROM
| UPL_SET_INTERNAL
| UPL_SET_LITE
;
2476 uplflags
|= UPL_RET_ONLY_ABSENT
|
2477 UPL_NO_SYNC
| UPL_CLEAN_IN_PLACE
|
2478 UPL_SET_INTERNAL
| UPL_SET_LITE
;
2481 * if the requested size == PAGE_SIZE, we don't want to set
2482 * the UPL_NOBLOCK since we may be trying to recover from a
2483 * previous partial pagein I/O that occurred because we were low
2484 * on memory and bailed early in order to honor the UPL_NOBLOCK...
2485 * since we're only asking for a single page, we can block w/o fear
2486 * of tying up pages while waiting for more to become available
2488 if (bufsize
> PAGE_SIZE
) {
2489 uplflags
|= UPL_NOBLOCK
;
2493 uplflags
&= ~UPL_FOR_PAGEOUT
;
2495 if (uplflags
& UPL_WILL_BE_DUMPED
) {
2496 uplflags
&= ~UPL_WILL_BE_DUMPED
;
2497 uplflags
|= (UPL_NO_SYNC
| UPL_SET_INTERNAL
);
2499 uplflags
|= (UPL_NO_SYNC
| UPL_CLEAN_IN_PLACE
| UPL_SET_INTERNAL
);
2502 control
= ubc_getobject(vp
, UBC_FLAGS_NONE
);
2503 if (control
== MEMORY_OBJECT_CONTROL_NULL
) {
2504 return KERN_INVALID_ARGUMENT
;
2507 kr
= memory_object_upl_request(control
, f_offset
, bufsize
, uplp
, NULL
, NULL
, uplflags
, tag
);
2508 if (kr
== KERN_SUCCESS
&& plp
!= NULL
) {
2509 *plp
= UPL_GET_INTERNAL_PAGE_LIST(*uplp
);
2516 * ubc_upl_maxbufsize
2518 * Return the maximum bufsize ubc_create_upl( ) will take.
2522 * Returns: maximum size buffer (in bytes) ubc_create_upl( ) will take.
2528 return MAX_UPL_SIZE_BYTES
;
2534 * Map the page list assocated with the supplied upl into the kernel virtual
2535 * address space at the virtual address indicated by the dst_addr argument;
2536 * the entire upl is mapped
2538 * Parameters: upl The upl to map
2539 * dst_addr The address at which to map the upl
2541 * Returns: KERN_SUCCESS The upl has been mapped
2542 * KERN_INVALID_ARGUMENT The upl is UPL_NULL
2543 * KERN_FAILURE The upl is already mapped
2544 * vm_map_enter:KERN_INVALID_ARGUMENT
2545 * A failure code from vm_map_enter() due
2546 * to an invalid argument
2551 vm_offset_t
*dst_addr
)
2553 return vm_upl_map(kernel_map
, upl
, dst_addr
);
2560 * Unmap the page list assocated with the supplied upl from the kernel virtual
2561 * address space; the entire upl is unmapped.
2563 * Parameters: upl The upl to unmap
2565 * Returns: KERN_SUCCESS The upl has been unmapped
2566 * KERN_FAILURE The upl is not currently mapped
2567 * KERN_INVALID_ARGUMENT If the upl is UPL_NULL
2573 return vm_upl_unmap(kernel_map
, upl
);
2580 * Commit the contents of the upl to the backing store
2582 * Parameters: upl The upl to commit
2584 * Returns: KERN_SUCCESS The upl has been committed
2585 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2586 * KERN_FAILURE The supplied upl does not represent
2587 * device memory, and the offset plus the
2588 * size would exceed the actual size of
2591 * Notes: In practice, the only return value for this function should be
2592 * KERN_SUCCESS, unless there has been data structure corruption;
2593 * since the upl is deallocated regardless of success or failure,
2594 * there's really nothing to do about this other than panic.
2596 * IMPORTANT: Use of this function should not be mixed with use of
2597 * ubc_upl_commit_range(), due to the unconditional deallocation
2604 upl_page_info_t
*pl
;
2607 pl
= UPL_GET_INTERNAL_PAGE_LIST(upl
);
2608 kr
= upl_commit(upl
, pl
, MAX_UPL_SIZE_BYTES
>> PAGE_SHIFT
);
2609 upl_deallocate(upl
);
2617 * Commit the contents of the specified range of the upl to the backing store
2619 * Parameters: upl The upl to commit
2620 * offset The offset into the upl
2621 * size The size of the region to be committed,
2622 * starting at the specified offset
2623 * flags commit type (see below)
2625 * Returns: KERN_SUCCESS The range has been committed
2626 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2627 * KERN_FAILURE The supplied upl does not represent
2628 * device memory, and the offset plus the
2629 * size would exceed the actual size of
2632 * Notes: IMPORTANT: If the commit is successful, and the object is now
2633 * empty, the upl will be deallocated. Since the caller cannot
2634 * check that this is the case, the UPL_COMMIT_FREE_ON_EMPTY flag
2635 * should generally only be used when the offset is 0 and the size
2636 * is equal to the upl size.
2638 * The flags argument is a bitmap of flags on the rage of pages in
2639 * the upl to be committed; allowable flags are:
2641 * o UPL_COMMIT_FREE_ON_EMPTY Free the upl when it is
2642 * both empty and has been
2643 * successfully committed
2644 * o UPL_COMMIT_CLEAR_DIRTY Clear each pages dirty
2645 * bit; will prevent a
2647 * o UPL_COMMIT_SET_DIRTY Set each pages dirty
2648 * bit; will cause a later
2650 * o UPL_COMMIT_INACTIVATE Clear each pages
2651 * reference bit; the page
2652 * will not be accessed
2653 * o UPL_COMMIT_ALLOW_ACCESS Unbusy each page; pages
2654 * become busy when an
2655 * IOMemoryDescriptor is
2656 * mapped or redirected,
2657 * and we have to wait for
2660 * The flag UPL_COMMIT_NOTIFY_EMPTY is used internally, and should
2661 * not be specified by the caller.
2663 * The UPL_COMMIT_CLEAR_DIRTY and UPL_COMMIT_SET_DIRTY flags are
2664 * mutually exclusive, and should not be combined.
2667 ubc_upl_commit_range(
2669 upl_offset_t offset
,
2673 upl_page_info_t
*pl
;
2677 if (flags
& UPL_COMMIT_FREE_ON_EMPTY
) {
2678 flags
|= UPL_COMMIT_NOTIFY_EMPTY
;
2681 if (flags
& UPL_COMMIT_KERNEL_ONLY_FLAGS
) {
2682 return KERN_INVALID_ARGUMENT
;
2685 pl
= UPL_GET_INTERNAL_PAGE_LIST(upl
);
2687 kr
= upl_commit_range(upl
, offset
, size
, flags
,
2688 pl
, MAX_UPL_SIZE_BYTES
>> PAGE_SHIFT
, &empty
);
2690 if ((flags
& UPL_COMMIT_FREE_ON_EMPTY
) && empty
) {
2691 upl_deallocate(upl
);
2699 * ubc_upl_abort_range
2701 * Abort the contents of the specified range of the specified upl
2703 * Parameters: upl The upl to abort
2704 * offset The offset into the upl
2705 * size The size of the region to be aborted,
2706 * starting at the specified offset
2707 * abort_flags abort type (see below)
2709 * Returns: KERN_SUCCESS The range has been aborted
2710 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2711 * KERN_FAILURE The supplied upl does not represent
2712 * device memory, and the offset plus the
2713 * size would exceed the actual size of
2716 * Notes: IMPORTANT: If the abort is successful, and the object is now
2717 * empty, the upl will be deallocated. Since the caller cannot
2718 * check that this is the case, the UPL_ABORT_FREE_ON_EMPTY flag
2719 * should generally only be used when the offset is 0 and the size
2720 * is equal to the upl size.
2722 * The abort_flags argument is a bitmap of flags on the range of
2723 * pages in the upl to be aborted; allowable flags are:
2725 * o UPL_ABORT_FREE_ON_EMPTY Free the upl when it is both
2726 * empty and has been successfully
2728 * o UPL_ABORT_RESTART The operation must be restarted
2729 * o UPL_ABORT_UNAVAILABLE The pages are unavailable
2730 * o UPL_ABORT_ERROR An I/O error occurred
2731 * o UPL_ABORT_DUMP_PAGES Just free the pages
2732 * o UPL_ABORT_NOTIFY_EMPTY RESERVED
2733 * o UPL_ABORT_ALLOW_ACCESS RESERVED
2735 * The UPL_ABORT_NOTIFY_EMPTY is an internal use flag and should
2736 * not be specified by the caller. It is intended to fulfill the
2737 * same role as UPL_COMMIT_NOTIFY_EMPTY does in the function
2738 * ubc_upl_commit_range(), but is never referenced internally.
2740 * The UPL_ABORT_ALLOW_ACCESS is defined, but neither set nor
2741 * referenced; do not use it.
2744 ubc_upl_abort_range(
2746 upl_offset_t offset
,
2751 boolean_t empty
= FALSE
;
2753 if (abort_flags
& UPL_ABORT_FREE_ON_EMPTY
) {
2754 abort_flags
|= UPL_ABORT_NOTIFY_EMPTY
;
2757 kr
= upl_abort_range(upl
, offset
, size
, abort_flags
, &empty
);
2759 if ((abort_flags
& UPL_ABORT_FREE_ON_EMPTY
) && empty
) {
2760 upl_deallocate(upl
);
2770 * Abort the contents of the specified upl
2772 * Parameters: upl The upl to abort
2773 * abort_type abort type (see below)
2775 * Returns: KERN_SUCCESS The range has been aborted
2776 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2777 * KERN_FAILURE The supplied upl does not represent
2778 * device memory, and the offset plus the
2779 * size would exceed the actual size of
2782 * Notes: IMPORTANT: If the abort is successful, and the object is now
2783 * empty, the upl will be deallocated. Since the caller cannot
2784 * check that this is the case, the UPL_ABORT_FREE_ON_EMPTY flag
2785 * should generally only be used when the offset is 0 and the size
2786 * is equal to the upl size.
2788 * The abort_type is a bitmap of flags on the range of
2789 * pages in the upl to be aborted; allowable flags are:
2791 * o UPL_ABORT_FREE_ON_EMPTY Free the upl when it is both
2792 * empty and has been successfully
2794 * o UPL_ABORT_RESTART The operation must be restarted
2795 * o UPL_ABORT_UNAVAILABLE The pages are unavailable
2796 * o UPL_ABORT_ERROR An I/O error occurred
2797 * o UPL_ABORT_DUMP_PAGES Just free the pages
2798 * o UPL_ABORT_NOTIFY_EMPTY RESERVED
2799 * o UPL_ABORT_ALLOW_ACCESS RESERVED
2801 * The UPL_ABORT_NOTIFY_EMPTY is an internal use flag and should
2802 * not be specified by the caller. It is intended to fulfill the
2803 * same role as UPL_COMMIT_NOTIFY_EMPTY does in the function
2804 * ubc_upl_commit_range(), but is never referenced internally.
2806 * The UPL_ABORT_ALLOW_ACCESS is defined, but neither set nor
2807 * referenced; do not use it.
2816 kr
= upl_abort(upl
, abort_type
);
2817 upl_deallocate(upl
);
2825 * Retrieve the internal page list for the specified upl
2827 * Parameters: upl The upl to obtain the page list from
2829 * Returns: !NULL The (upl_page_info_t *) for the page
2830 * list internal to the upl
2831 * NULL Error/no page list associated
2833 * Notes: IMPORTANT: The function is only valid on internal objects
2834 * where the list request was made with the UPL_INTERNAL flag.
2836 * This function is a utility helper function, since some callers
2837 * may not have direct access to the header defining the macro,
2838 * due to abstraction layering constraints.
2844 return UPL_GET_INTERNAL_PAGE_LIST(upl
);
2849 UBCINFOEXISTS(const struct vnode
* vp
)
2851 return (vp
) && ((vp
)->v_type
== VREG
) && ((vp
)->v_ubcinfo
!= UBC_INFO_NULL
);
2856 ubc_upl_range_needed(
2861 upl_range_needed(upl
, index
, count
);
2865 ubc_is_mapped(const struct vnode
*vp
, boolean_t
*writable
)
2867 if (!UBCINFOEXISTS(vp
) || !ISSET(vp
->v_ubcinfo
->ui_flags
, UI_ISMAPPED
)) {
2871 *writable
= ISSET(vp
->v_ubcinfo
->ui_flags
, UI_MAPPEDWRITE
);
2877 ubc_is_mapped_writable(const struct vnode
*vp
)
2880 return ubc_is_mapped(vp
, &writable
) && writable
;
2887 static atomic_size_t cs_blob_size
= 0;
2888 static atomic_uint_fast32_t cs_blob_count
= 0;
2889 static atomic_size_t cs_blob_size_peak
= 0;
2890 static atomic_size_t cs_blob_size_max
= 0;
2891 static atomic_uint_fast32_t cs_blob_count_peak
= 0;
2893 SYSCTL_UINT(_vm
, OID_AUTO
, cs_blob_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &cs_blob_count
, 0, "Current number of code signature blobs");
2894 SYSCTL_ULONG(_vm
, OID_AUTO
, cs_blob_size
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &cs_blob_size
, "Current size of all code signature blobs");
2895 SYSCTL_UINT(_vm
, OID_AUTO
, cs_blob_count_peak
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &cs_blob_count_peak
, 0, "Peak number of code signature blobs");
2896 SYSCTL_ULONG(_vm
, OID_AUTO
, cs_blob_size_peak
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &cs_blob_size_peak
, "Peak size of code signature blobs");
2897 SYSCTL_ULONG(_vm
, OID_AUTO
, cs_blob_size_max
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &cs_blob_size_max
, "Size of biggest code signature blob");
2900 * Function: csblob_parse_teamid
2902 * Description: This function returns a pointer to the team id
2903 * stored within the codedirectory of the csblob.
2904 * If the codedirectory predates team-ids, it returns
2906 * This does not copy the name but returns a pointer to
2907 * it within the CD. Subsequently, the CD must be
2908 * available when this is used.
2912 csblob_parse_teamid(struct cs_blob
*csblob
)
2914 const CS_CodeDirectory
*cd
;
2916 cd
= csblob
->csb_cd
;
2918 if (ntohl(cd
->version
) < CS_SUPPORTSTEAMID
) {
2922 if (cd
->teamOffset
== 0) {
2926 const char *name
= ((const char *)cd
) + ntohl(cd
->teamOffset
);
2928 printf("found team-id %s in cdblob\n", name
);
2936 ubc_cs_blob_allocate(
2937 vm_offset_t
*blob_addr_p
,
2938 vm_size_t
*blob_size_p
)
2940 kern_return_t kr
= KERN_FAILURE
;
2943 *blob_addr_p
= (vm_offset_t
) kalloc_tag(*blob_size_p
, VM_KERN_MEMORY_SECURITY
);
2945 if (*blob_addr_p
== 0) {
2956 ubc_cs_blob_deallocate(
2957 vm_offset_t blob_addr
,
2958 vm_size_t blob_size
)
2961 if (blob_size
> pmap_cs_blob_limit
) {
2962 kmem_free(kernel_map
, blob_addr
, blob_size
);
2966 kfree(blob_addr
, blob_size
);
2971 * Some codesigned files use a lowest common denominator page size of
2972 * 4KiB, but can be used on systems that have a runtime page size of
2973 * 16KiB. Since faults will only occur on 16KiB ranges in
2974 * cs_validate_range(), we can convert the original Code Directory to
2975 * a multi-level scheme where groups of 4 hashes are combined to form
2976 * a new hash, which represents 16KiB in the on-disk file. This can
2977 * reduce the wired memory requirement for the Code Directory by
2978 * 75%. Care must be taken for binaries that use the "fourk" VM pager
2979 * for unaligned access, which may still attempt to validate on
2980 * non-16KiB multiples for compatibility with 3rd party binaries.
2983 ubc_cs_supports_multilevel_hash(struct cs_blob
*blob __unused
)
2985 const CS_CodeDirectory
*cd
;
2989 * Only applies to binaries that ship as part of the OS,
2990 * primarily the shared cache.
2992 if (!blob
->csb_platform_binary
|| blob
->csb_teamid
!= NULL
) {
2997 * If the runtime page size matches the code signing page
2998 * size, there is no work to do.
3000 if (PAGE_SHIFT
<= blob
->csb_hash_pageshift
) {
3007 * There must be a valid integral multiple of hashes
3009 if (ntohl(cd
->nCodeSlots
) & (PAGE_MASK
>> blob
->csb_hash_pageshift
)) {
3014 * Scatter lists must also have ranges that have an integral number of hashes
3016 if ((ntohl(cd
->version
) >= CS_SUPPORTSSCATTER
) && (ntohl(cd
->scatterOffset
))) {
3017 const SC_Scatter
*scatter
= (const SC_Scatter
*)
3018 ((const char*)cd
+ ntohl(cd
->scatterOffset
));
3019 /* iterate all scatter structs to make sure they are all aligned */
3021 uint32_t sbase
= ntohl(scatter
->base
);
3022 uint32_t scount
= ntohl(scatter
->count
);
3029 if (sbase
& (PAGE_MASK
>> blob
->csb_hash_pageshift
)) {
3033 if (scount
& (PAGE_MASK
>> blob
->csb_hash_pageshift
)) {
3041 /* Covered range must be a multiple of the new page size */
3042 if (ntohl(cd
->codeLimit
) & PAGE_MASK
) {
3046 /* All checks pass */
3051 * Given a cs_blob with an already chosen best code directory, this
3052 * function allocates memory and copies into it only the blobs that
3053 * will be needed by the kernel, namely the single chosen code
3054 * directory (and not any of its alternatives) and the entitlement
3057 * This saves significant memory with agile signatures, and additional
3058 * memory for 3rd Party Code because we also omit the CMS blob.
3060 * To support multilevel and other potential code directory rewriting,
3061 * the size of a new code directory can be specified. Since that code
3062 * directory will replace the existing code directory,
3063 * ubc_cs_reconstitute_code_signature does not copy the original code
3064 * directory when a size is given, and the caller must fill it in.
3067 ubc_cs_reconstitute_code_signature(struct cs_blob
const *blob
, vm_size_t optional_new_cd_size
,
3068 vm_address_t
*new_blob_addr_p
, vm_size_t
*new_blob_size_p
,
3069 CS_CodeDirectory
**new_cd_p
, CS_GenericBlob
const **new_entitlements_p
)
3071 const CS_CodeDirectory
*old_cd
, *cd
;
3072 CS_CodeDirectory
*new_cd
;
3073 const CS_GenericBlob
*entitlements
;
3074 vm_offset_t new_blob_addr
;
3075 vm_size_t new_blob_size
;
3076 vm_size_t new_cdsize
;
3080 old_cd
= blob
->csb_cd
;
3082 new_cdsize
= optional_new_cd_size
!= 0 ? optional_new_cd_size
: htonl(old_cd
->length
);
3084 new_blob_size
= sizeof(CS_SuperBlob
);
3085 new_blob_size
+= sizeof(CS_BlobIndex
);
3086 new_blob_size
+= new_cdsize
;
3088 if (blob
->csb_entitlements_blob
) {
3089 /* We need to add a slot for the entitlements */
3090 new_blob_size
+= sizeof(CS_BlobIndex
);
3091 new_blob_size
+= ntohl(blob
->csb_entitlements_blob
->length
);
3094 kr
= ubc_cs_blob_allocate(&new_blob_addr
, &new_blob_size
);
3095 if (kr
!= KERN_SUCCESS
) {
3097 printf("CODE SIGNING: Failed to allocate memory for new Code Signing Blob: %d\n",
3103 CS_SuperBlob
*new_superblob
;
3105 new_superblob
= (CS_SuperBlob
*)new_blob_addr
;
3106 new_superblob
->magic
= htonl(CSMAGIC_EMBEDDED_SIGNATURE
);
3107 new_superblob
->length
= htonl((uint32_t)new_blob_size
);
3108 if (blob
->csb_entitlements_blob
) {
3109 vm_size_t ent_offset
, cd_offset
;
3111 cd_offset
= sizeof(CS_SuperBlob
) + 2 * sizeof(CS_BlobIndex
);
3112 ent_offset
= cd_offset
+ new_cdsize
;
3114 new_superblob
->count
= htonl(2);
3115 new_superblob
->index
[0].type
= htonl(CSSLOT_CODEDIRECTORY
);
3116 new_superblob
->index
[0].offset
= htonl((uint32_t)cd_offset
);
3117 new_superblob
->index
[1].type
= htonl(CSSLOT_ENTITLEMENTS
);
3118 new_superblob
->index
[1].offset
= htonl((uint32_t)ent_offset
);
3120 memcpy((void *)(new_blob_addr
+ ent_offset
), blob
->csb_entitlements_blob
, ntohl(blob
->csb_entitlements_blob
->length
));
3122 new_cd
= (CS_CodeDirectory
*)(new_blob_addr
+ cd_offset
);
3124 // Blob is the code directory, directly.
3125 new_cd
= (CS_CodeDirectory
*)new_blob_addr
;
3128 if (optional_new_cd_size
== 0) {
3129 // Copy code directory, and revalidate.
3130 memcpy(new_cd
, old_cd
, new_cdsize
);
3132 vm_size_t length
= new_blob_size
;
3134 error
= cs_validate_csblob((const uint8_t *)new_blob_addr
, length
, &cd
, &entitlements
);
3137 printf("CODE SIGNING: Failed to validate new Code Signing Blob: %d\n",
3140 ubc_cs_blob_deallocate(new_blob_addr
, new_blob_size
);
3143 *new_entitlements_p
= entitlements
;
3145 // Caller will fill out and validate code directory.
3146 memset(new_cd
, 0, new_cdsize
);
3147 *new_entitlements_p
= NULL
;
3150 *new_blob_addr_p
= new_blob_addr
;
3151 *new_blob_size_p
= new_blob_size
;
3158 ubc_cs_convert_to_multilevel_hash(struct cs_blob
*blob
)
3160 const CS_CodeDirectory
*old_cd
, *cd
;
3161 CS_CodeDirectory
*new_cd
;
3162 const CS_GenericBlob
*entitlements
;
3163 vm_offset_t new_blob_addr
;
3164 vm_size_t new_blob_size
;
3165 vm_size_t new_cdsize
;
3168 uint32_t hashes_per_new_hash_shift
= (uint32_t)(PAGE_SHIFT
- blob
->csb_hash_pageshift
);
3171 printf("CODE SIGNING: Attempting to convert Code Directory for %lu -> %lu page shift\n",
3172 (unsigned long)blob
->csb_hash_pageshift
, (unsigned long)PAGE_SHIFT
);
3175 old_cd
= blob
->csb_cd
;
3177 /* Up to the hashes, we can copy all data */
3178 new_cdsize
= ntohl(old_cd
->hashOffset
);
3179 new_cdsize
+= (ntohl(old_cd
->nCodeSlots
) >> hashes_per_new_hash_shift
) * old_cd
->hashSize
;
3181 error
= ubc_cs_reconstitute_code_signature(blob
, new_cdsize
,
3182 &new_blob_addr
, &new_blob_size
, &new_cd
,
3185 printf("CODE SIGNING: Failed to reconsitute code signature: %d\n", error
);
3189 memcpy(new_cd
, old_cd
, ntohl(old_cd
->hashOffset
));
3191 /* Update fields in the Code Directory structure */
3192 new_cd
->length
= htonl((uint32_t)new_cdsize
);
3194 uint32_t nCodeSlots
= ntohl(new_cd
->nCodeSlots
);
3195 nCodeSlots
>>= hashes_per_new_hash_shift
;
3196 new_cd
->nCodeSlots
= htonl(nCodeSlots
);
3198 new_cd
->pageSize
= (uint8_t)PAGE_SHIFT
; /* Not byte-swapped */
3200 if ((ntohl(new_cd
->version
) >= CS_SUPPORTSSCATTER
) && (ntohl(new_cd
->scatterOffset
))) {
3201 SC_Scatter
*scatter
= (SC_Scatter
*)
3202 ((char *)new_cd
+ ntohl(new_cd
->scatterOffset
));
3203 /* iterate all scatter structs to scale their counts */
3205 uint32_t scount
= ntohl(scatter
->count
);
3206 uint32_t sbase
= ntohl(scatter
->base
);
3213 scount
>>= hashes_per_new_hash_shift
;
3214 scatter
->count
= htonl(scount
);
3216 sbase
>>= hashes_per_new_hash_shift
;
3217 scatter
->base
= htonl(sbase
);
3223 /* For each group of hashes, hash them together */
3224 const unsigned char *src_base
= (const unsigned char *)old_cd
+ ntohl(old_cd
->hashOffset
);
3225 unsigned char *dst_base
= (unsigned char *)new_cd
+ ntohl(new_cd
->hashOffset
);
3227 uint32_t hash_index
;
3228 for (hash_index
= 0; hash_index
< nCodeSlots
; hash_index
++) {
3229 union cs_hash_union mdctx
;
3231 uint32_t source_hash_len
= old_cd
->hashSize
<< hashes_per_new_hash_shift
;
3232 const unsigned char *src
= src_base
+ hash_index
* source_hash_len
;
3233 unsigned char *dst
= dst_base
+ hash_index
* new_cd
->hashSize
;
3235 blob
->csb_hashtype
->cs_init(&mdctx
);
3236 blob
->csb_hashtype
->cs_update(&mdctx
, src
, source_hash_len
);
3237 blob
->csb_hashtype
->cs_final(dst
, &mdctx
);
3240 error
= cs_validate_csblob((const uint8_t *)new_blob_addr
, new_blob_size
, &cd
, &entitlements
);
3242 printf("CODE SIGNING: Failed to validate new Code Signing Blob: %d\n",
3245 ubc_cs_blob_deallocate(new_blob_addr
, new_blob_size
);
3249 /* New Code Directory is ready for use, swap it out in the blob structure */
3250 ubc_cs_blob_deallocate(blob
->csb_mem_kaddr
, blob
->csb_mem_size
);
3252 blob
->csb_mem_size
= new_blob_size
;
3253 blob
->csb_mem_kaddr
= new_blob_addr
;
3255 blob
->csb_entitlements_blob
= entitlements
;
3257 /* The blob has some cached attributes of the Code Directory, so update those */
3259 blob
->csb_hash_firstlevel_pageshift
= blob
->csb_hash_pageshift
; /* Save the original page size */
3261 blob
->csb_hash_pageshift
= PAGE_SHIFT
;
3262 blob
->csb_end_offset
= ntohl(cd
->codeLimit
);
3263 if ((ntohl(cd
->version
) >= CS_SUPPORTSSCATTER
) && (ntohl(cd
->scatterOffset
))) {
3264 const SC_Scatter
*scatter
= (const SC_Scatter
*)
3265 ((const char*)cd
+ ntohl(cd
->scatterOffset
));
3266 blob
->csb_start_offset
= ((off_t
)ntohl(scatter
->base
)) * PAGE_SIZE
;
3268 blob
->csb_start_offset
= 0;
3275 * Validate the code signature blob, create a struct cs_blob wrapper
3276 * and return it together with a pointer to the chosen code directory
3277 * and entitlements blob.
3279 * Note that this takes ownership of the memory as addr, mainly because
3280 * this function can actually replace the passed in blob with another
3281 * one, e.g. when performing multilevel hashing optimization.
3284 cs_blob_create_validated(
3285 vm_address_t
* const addr
,
3287 struct cs_blob
** const ret_blob
,
3288 CS_CodeDirectory
const ** const ret_cd
)
3290 struct cs_blob
*blob
;
3292 const CS_CodeDirectory
*cd
;
3293 const CS_GenericBlob
*entitlements
;
3294 union cs_hash_union mdctx
;
3301 blob
= (struct cs_blob
*) kalloc(sizeof(struct cs_blob
));
3306 /* fill in the new blob */
3307 blob
->csb_mem_size
= size
;
3308 blob
->csb_mem_offset
= 0;
3309 blob
->csb_mem_kaddr
= *addr
;
3310 blob
->csb_flags
= 0;
3311 blob
->csb_signer_type
= CS_SIGNER_TYPE_UNKNOWN
;
3312 blob
->csb_platform_binary
= 0;
3313 blob
->csb_platform_path
= 0;
3314 blob
->csb_teamid
= NULL
;
3315 #if CONFIG_SUPPLEMENTAL_SIGNATURES
3316 blob
->csb_supplement_teamid
= NULL
;
3318 blob
->csb_entitlements_blob
= NULL
;
3319 blob
->csb_entitlements
= NULL
;
3320 blob
->csb_reconstituted
= false;
3322 /* Transfer ownership. Even on error, this function will deallocate */
3326 * Validate the blob's contents
3328 length
= (size_t) size
;
3329 error
= cs_validate_csblob((const uint8_t *)blob
->csb_mem_kaddr
,
3330 length
, &cd
, &entitlements
);
3333 printf("CODESIGNING: csblob invalid: %d\n", error
);
3336 * The vnode checker can't make the rest of this function
3337 * succeed if csblob validation failed, so bail */
3340 const unsigned char *md_base
;
3341 uint8_t hash
[CS_HASH_MAX_SIZE
];
3343 vm_offset_t hash_pagemask
;
3346 blob
->csb_entitlements_blob
= entitlements
; /* may be NULL, not yet validated */
3347 blob
->csb_hashtype
= cs_find_md(cd
->hashType
);
3348 if (blob
->csb_hashtype
== NULL
|| blob
->csb_hashtype
->cs_digest_size
> sizeof(hash
)) {
3349 panic("validated CodeDirectory but unsupported type");
3352 blob
->csb_hash_pageshift
= cd
->pageSize
;
3353 hash_pagemask
= (1U << cd
->pageSize
) - 1;
3354 blob
->csb_hash_firstlevel_pageshift
= 0;
3355 blob
->csb_flags
= (ntohl(cd
->flags
) & CS_ALLOWED_MACHO
) | CS_VALID
;
3356 blob
->csb_end_offset
= (((vm_offset_t
)ntohl(cd
->codeLimit
) + hash_pagemask
) & ~hash_pagemask
);
3357 if ((ntohl(cd
->version
) >= CS_SUPPORTSSCATTER
) && (ntohl(cd
->scatterOffset
))) {
3358 const SC_Scatter
*scatter
= (const SC_Scatter
*)
3359 ((const char*)cd
+ ntohl(cd
->scatterOffset
));
3360 blob
->csb_start_offset
= ((off_t
)ntohl(scatter
->base
)) * (1U << blob
->csb_hash_pageshift
);
3362 blob
->csb_start_offset
= 0;
3364 /* compute the blob's cdhash */
3365 md_base
= (const unsigned char *) cd
;
3366 md_size
= ntohl(cd
->length
);
3368 blob
->csb_hashtype
->cs_init(&mdctx
);
3369 blob
->csb_hashtype
->cs_update(&mdctx
, md_base
, md_size
);
3370 blob
->csb_hashtype
->cs_final(hash
, &mdctx
);
3372 memcpy(blob
->csb_cdhash
, hash
, CS_CDHASH_LEN
);
3373 blob
->csb_cdhash_signature
= ptrauth_utils_sign_blob_generic(blob
->csb_cdhash
,
3374 sizeof(blob
->csb_cdhash
),
3375 OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_cd_signature"),
3376 PTRAUTH_ADDR_DIVERSIFY
);
3378 #if CONFIG_SUPPLEMENTAL_SIGNATURES
3379 blob
->csb_linkage_hashtype
= NULL
;
3380 if (ntohl(cd
->version
) >= CS_SUPPORTSLINKAGE
&& cd
->linkageHashType
!= 0 &&
3381 ntohl(cd
->linkageSize
) >= CS_CDHASH_LEN
) {
3382 blob
->csb_linkage_hashtype
= cs_find_md(cd
->linkageHashType
);
3384 if (blob
->csb_linkage_hashtype
!= NULL
) {
3385 memcpy(blob
->csb_linkage
, (uint8_t const*)cd
+ ntohl(cd
->linkageOffset
),
3401 if (ret_blob
!= NULL
) {
3404 if (ret_cd
!= NULL
) {
3412 * Free a cs_blob previously created by cs_blob_create_validated.
3416 struct cs_blob
* const blob
)
3419 if (blob
->csb_mem_kaddr
) {
3420 ubc_cs_blob_deallocate(blob
->csb_mem_kaddr
, blob
->csb_mem_size
);
3421 blob
->csb_mem_kaddr
= 0;
3423 if (blob
->csb_entitlements
!= NULL
) {
3424 osobject_release(blob
->csb_entitlements
);
3425 blob
->csb_entitlements
= NULL
;
3427 (kfree
)(blob
, sizeof(*blob
));
3430 #if CONFIG_SUPPLEMENTAL_SIGNATURES
3432 cs_blob_supplement_free(struct cs_blob
* const blob
)
3435 if (blob
->csb_supplement_teamid
!= NULL
) {
3436 vm_size_t teamid_size
= strlen(blob
->csb_supplement_teamid
) + 1;
3437 kfree(blob
->csb_supplement_teamid
, teamid_size
);
3438 blob
->csb_supplement_teamid
= NULL
;
3446 ubc_cs_blob_adjust_statistics(struct cs_blob
const *blob
)
3448 /* Note that the atomic ops are not enough to guarantee
3449 * correctness: If a blob with an intermediate size is inserted
3450 * concurrently, we can lose a peak value assignment. But these
3451 * statistics are only advisory anyway, so we're not going to
3452 * employ full locking here. (Consequently, we are also okay with
3453 * relaxed ordering of those accesses.)
3456 unsigned int new_cs_blob_count
= os_atomic_add(&cs_blob_count
, 1, relaxed
);
3457 if (new_cs_blob_count
> os_atomic_load(&cs_blob_count_peak
, relaxed
)) {
3458 os_atomic_store(&cs_blob_count_peak
, new_cs_blob_count
, relaxed
);
3461 size_t new_cs_blob_size
= os_atomic_add(&cs_blob_size
, blob
->csb_mem_size
, relaxed
);
3463 if (new_cs_blob_size
> os_atomic_load(&cs_blob_size_peak
, relaxed
)) {
3464 os_atomic_store(&cs_blob_size_peak
, new_cs_blob_size
, relaxed
);
3466 if (blob
->csb_mem_size
> os_atomic_load(&cs_blob_size_max
, relaxed
)) {
3467 os_atomic_store(&cs_blob_size_max
, blob
->csb_mem_size
, relaxed
);
3476 cpu_subtype_t cpusubtype
,
3480 struct image_params
*imgp
,
3482 struct cs_blob
**ret_blob
)
3485 struct ubc_info
*uip
;
3486 struct cs_blob
*blob
= NULL
, *oblob
= NULL
;
3488 CS_CodeDirectory
const *cd
;
3489 off_t blob_start_offset
, blob_end_offset
;
3490 boolean_t record_mtime
;
3492 record_mtime
= FALSE
;
3497 /* Create the struct cs_blob wrapper that will be attached to the vnode.
3498 * Validates the passed in blob in the process. */
3499 error
= cs_blob_create_validated(addr
, size
, &blob
, &cd
);
3502 printf("malform code signature blob: %d\n", error
);
3506 blob
->csb_cpu_type
= cputype
;
3507 blob
->csb_cpu_subtype
= cpusubtype
& ~CPU_SUBTYPE_MASK
;
3508 blob
->csb_base_offset
= base_offset
;
3511 * Let policy module check whether the blob's signature is accepted.
3514 unsigned int cs_flags
= blob
->csb_flags
;
3515 unsigned int signer_type
= blob
->csb_signer_type
;
3516 error
= mac_vnode_check_signature(vp
, blob
, imgp
, &cs_flags
, &signer_type
, flags
, platform
);
3517 blob
->csb_flags
= cs_flags
;
3518 blob
->csb_signer_type
= signer_type
;
3522 printf("check_signature[pid: %d], error = %d\n", current_proc()->p_pid
, error
);
3526 if ((flags
& MAC_VNODE_CHECK_DYLD_SIM
) && !(blob
->csb_flags
& CS_PLATFORM_BINARY
)) {
3528 printf("check_signature[pid: %d], is not apple signed\n", current_proc()->p_pid
);
3535 #if CONFIG_ENFORCE_SIGNED_CODE
3537 * Reconstitute code signature
3540 vm_address_t new_mem_kaddr
= 0;
3541 vm_size_t new_mem_size
= 0;
3543 CS_CodeDirectory
*new_cd
= NULL
;
3544 CS_GenericBlob
const *new_entitlements
= NULL
;
3546 error
= ubc_cs_reconstitute_code_signature(blob
, 0,
3547 &new_mem_kaddr
, &new_mem_size
,
3548 &new_cd
, &new_entitlements
);
3551 printf("failed code signature reconstitution: %d\n", error
);
3555 ubc_cs_blob_deallocate(blob
->csb_mem_kaddr
, blob
->csb_mem_size
);
3557 blob
->csb_mem_kaddr
= new_mem_kaddr
;
3558 blob
->csb_mem_size
= new_mem_size
;
3559 blob
->csb_cd
= new_cd
;
3560 blob
->csb_entitlements_blob
= new_entitlements
;
3561 blob
->csb_reconstituted
= true;
3565 * When pmap_cs is enabled, there's an expectation that large blobs are
3566 * relocated to their own page. Above, this happens under
3567 * ubc_cs_reconstitute_code_signature() but that discards parts of the
3568 * signatures that are necessary on some platforms (eg, requirements).
3569 * So in this case, just copy everything.
3571 if (pmap_cs
&& (blob
->csb_mem_size
> pmap_cs_blob_limit
)) {
3572 vm_offset_t cd_offset
, ent_offset
;
3573 vm_size_t new_mem_size
= round_page(blob
->csb_mem_size
);
3574 vm_address_t new_mem_kaddr
= 0;
3576 kr
= kmem_alloc_kobject(kernel_map
, &new_mem_kaddr
, new_mem_size
, VM_KERN_MEMORY_SECURITY
);
3577 if (kr
!= KERN_SUCCESS
) {
3578 printf("failed to allocate %lu bytes to relocate blob: %d\n", new_mem_size
, kr
);
3583 cd_offset
= (vm_address_t
) blob
->csb_cd
- blob
->csb_mem_kaddr
;
3584 ent_offset
= (vm_address_t
) blob
->csb_entitlements_blob
- blob
->csb_mem_kaddr
;
3586 memcpy((void *) new_mem_kaddr
, (const void *) blob
->csb_mem_kaddr
, blob
->csb_mem_size
);
3587 ubc_cs_blob_deallocate(blob
->csb_mem_kaddr
, blob
->csb_mem_size
);
3588 blob
->csb_cd
= (const CS_CodeDirectory
*) (new_mem_kaddr
+ cd_offset
);
3589 /* Only update the entitlements blob pointer if it is non-NULL. If it is NULL, then
3590 * the blob has no entitlements and ent_offset is garbage. */
3591 if (blob
->csb_entitlements_blob
!= NULL
) {
3592 blob
->csb_entitlements_blob
= (const CS_GenericBlob
*) (new_mem_kaddr
+ ent_offset
);
3594 blob
->csb_mem_kaddr
= new_mem_kaddr
;
3595 blob
->csb_mem_size
= new_mem_size
;
3600 if (blob
->csb_flags
& CS_PLATFORM_BINARY
) {
3602 printf("check_signature[pid: %d]: platform binary\n", current_proc()->p_pid
);
3604 blob
->csb_platform_binary
= 1;
3605 blob
->csb_platform_path
= !!(blob
->csb_flags
& CS_PLATFORM_PATH
);
3607 blob
->csb_platform_binary
= 0;
3608 blob
->csb_platform_path
= 0;
3609 blob
->csb_teamid
= csblob_parse_teamid(blob
);
3611 if (blob
->csb_teamid
) {
3612 printf("check_signature[pid: %d]: team-id is %s\n", current_proc()->p_pid
, blob
->csb_teamid
);
3614 printf("check_signature[pid: %d]: no team-id\n", current_proc()->p_pid
);
3620 * Validate the blob's coverage
3622 blob_start_offset
= blob
->csb_base_offset
+ blob
->csb_start_offset
;
3623 blob_end_offset
= blob
->csb_base_offset
+ blob
->csb_end_offset
;
3625 if (blob_start_offset
>= blob_end_offset
||
3626 blob_start_offset
< 0 ||
3627 blob_end_offset
<= 0) {
3628 /* reject empty or backwards blob */
3633 if (ubc_cs_supports_multilevel_hash(blob
)) {
3634 error
= ubc_cs_convert_to_multilevel_hash(blob
);
3636 printf("failed multilevel hash conversion: %d\n", error
);
3639 blob
->csb_reconstituted
= true;
3643 if (!UBCINFOEXISTS(vp
)) {
3648 uip
= vp
->v_ubcinfo
;
3650 /* check if this new blob overlaps with an existing blob */
3651 for (oblob
= uip
->cs_blobs
;
3653 oblob
= oblob
->csb_next
) {
3654 off_t oblob_start_offset
, oblob_end_offset
;
3656 if (blob
->csb_signer_type
!= oblob
->csb_signer_type
) { // signer type needs to be the same for slices
3660 } else if (blob
->csb_platform_binary
) { //platform binary needs to be the same for app slices
3661 if (!oblob
->csb_platform_binary
) {
3666 } else if (blob
->csb_teamid
) { //teamid binary needs to be the same for app slices
3667 if (oblob
->csb_platform_binary
||
3668 oblob
->csb_teamid
== NULL
||
3669 strcmp(oblob
->csb_teamid
, blob
->csb_teamid
) != 0) {
3674 } else { // non teamid binary needs to be the same for app slices
3675 if (oblob
->csb_platform_binary
||
3676 oblob
->csb_teamid
!= NULL
) {
3683 oblob_start_offset
= (oblob
->csb_base_offset
+
3684 oblob
->csb_start_offset
);
3685 oblob_end_offset
= (oblob
->csb_base_offset
+
3686 oblob
->csb_end_offset
);
3687 if (blob_start_offset
>= oblob_end_offset
||
3688 blob_end_offset
<= oblob_start_offset
) {
3689 /* no conflict with this existing blob */
3692 if (blob_start_offset
== oblob_start_offset
&&
3693 blob_end_offset
== oblob_end_offset
&&
3694 blob
->csb_mem_size
== oblob
->csb_mem_size
&&
3695 blob
->csb_flags
== oblob
->csb_flags
&&
3696 (blob
->csb_cpu_type
== CPU_TYPE_ANY
||
3697 oblob
->csb_cpu_type
== CPU_TYPE_ANY
||
3698 blob
->csb_cpu_type
== oblob
->csb_cpu_type
) &&
3699 !bcmp(blob
->csb_cdhash
,
3703 * We already have this blob:
3704 * we'll return success but
3705 * throw away the new blob.
3707 if (oblob
->csb_cpu_type
== CPU_TYPE_ANY
) {
3709 * The old blob matches this one
3710 * but doesn't have any CPU type.
3711 * Update it with whatever the caller
3712 * provided this time.
3714 oblob
->csb_cpu_type
= cputype
;
3717 /* The signature is still accepted, so update the
3718 * generation count. */
3719 uip
->cs_add_gen
= cs_blob_generation_count
;
3728 /* different blob: reject the new one */
3737 /* mark this vnode's VM object as having "signed pages" */
3738 kr
= memory_object_signed(uip
->ui_control
, TRUE
);
3739 if (kr
!= KERN_SUCCESS
) {
3745 if (uip
->cs_blobs
== NULL
) {
3746 /* loading 1st blob: record the file's current "modify time" */
3747 record_mtime
= TRUE
;
3750 /* set the generation count for cs_blobs */
3751 uip
->cs_add_gen
= cs_blob_generation_count
;
3754 * Add this blob to the list of blobs for this vnode.
3755 * We always add at the front of the list and we never remove a
3756 * blob from the list, so ubc_cs_get_blobs() can return whatever
3757 * the top of the list was and that list will remain valid
3758 * while we validate a page, even after we release the vnode's lock.
3760 blob
->csb_next
= uip
->cs_blobs
;
3761 uip
->cs_blobs
= blob
;
3763 ubc_cs_blob_adjust_statistics(blob
);
3767 const char *name
= vnode_getname_printable(vp
);
3769 printf("CODE SIGNING: proc %d(%s) "
3770 "loaded %s signatures for file (%s) "
3771 "range 0x%llx:0x%llx flags 0x%x\n",
3772 p
->p_pid
, p
->p_comm
,
3773 blob
->csb_cpu_type
== -1 ? "detached" : "embedded",
3775 blob
->csb_base_offset
+ blob
->csb_start_offset
,
3776 blob
->csb_base_offset
+ blob
->csb_end_offset
,
3778 vnode_putname_printable(name
);
3784 vnode_mtime(vp
, &uip
->cs_mtime
, vfs_context_current());
3791 error
= 0; /* success ! */
3796 printf("check_signature[pid: %d]: error = %d\n", current_proc()->p_pid
, error
);
3802 if (error
== EAGAIN
) {
3804 * See above: error is EAGAIN if we were asked
3805 * to add an existing blob again. We cleaned the new
3806 * blob and we want to return success.
3814 #if CONFIG_SUPPLEMENTAL_SIGNATURES
3816 ubc_cs_blob_add_supplement(
3818 struct vnode
*orig_vp
,
3822 struct cs_blob
**ret_blob
)
3825 struct ubc_info
*uip
, *orig_uip
;
3827 struct cs_blob
*blob
, *orig_blob
;
3828 CS_CodeDirectory
const *cd
;
3829 off_t blob_start_offset
, blob_end_offset
;
3835 /* Create the struct cs_blob wrapper that will be attached to the vnode.
3836 * Validates the passed in blob in the process. */
3837 error
= cs_blob_create_validated(addr
, size
, &blob
, &cd
);
3840 printf("malformed code signature supplement blob: %d\n", error
);
3844 blob
->csb_cpu_type
= -1;
3845 blob
->csb_base_offset
= base_offset
;
3847 blob
->csb_reconstituted
= false;
3849 vnode_lock(orig_vp
);
3850 if (!UBCINFOEXISTS(orig_vp
)) {
3851 vnode_unlock(orig_vp
);
3856 orig_uip
= orig_vp
->v_ubcinfo
;
3858 /* check that the supplement's linked cdhash matches a cdhash of
3862 if (blob
->csb_linkage_hashtype
== NULL
) {
3864 const char *iname
= vnode_getname_printable(vp
);
3867 printf("CODE SIGNING: proc %d(%s) supplemental signature for file (%s) "
3868 "is not a supplemental.\n",
3869 p
->p_pid
, p
->p_comm
, iname
);
3873 vnode_putname_printable(iname
);
3874 vnode_unlock(orig_vp
);
3878 for (orig_blob
= orig_uip
->cs_blobs
; orig_blob
!= NULL
;
3879 orig_blob
= orig_blob
->csb_next
) {
3880 ptrauth_utils_auth_blob_generic(orig_blob
->csb_cdhash
,
3881 sizeof(orig_blob
->csb_cdhash
),
3882 OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_cd_signature"),
3883 PTRAUTH_ADDR_DIVERSIFY
,
3884 orig_blob
->csb_cdhash_signature
);
3885 if (orig_blob
->csb_hashtype
== blob
->csb_linkage_hashtype
&&
3886 memcmp(orig_blob
->csb_cdhash
, blob
->csb_linkage
, CS_CDHASH_LEN
) == 0) {
3892 if (orig_blob
== NULL
) {
3896 const char *iname
= vnode_getname_printable(vp
);
3899 printf("CODE SIGNING: proc %d(%s) supplemental signature for file (%s) "
3900 "does not match any attached cdhash.\n",
3901 p
->p_pid
, p
->p_comm
, iname
);
3905 vnode_putname_printable(iname
);
3906 vnode_unlock(orig_vp
);
3910 vnode_unlock(orig_vp
);
3912 // validate the signature against policy!
3914 unsigned int signer_type
= blob
->csb_signer_type
;
3915 error
= mac_vnode_check_supplemental_signature(vp
, blob
, orig_vp
, orig_blob
, &signer_type
);
3916 blob
->csb_signer_type
= signer_type
;
3921 printf("check_supplemental_signature[pid: %d], error = %d\n", current_proc()->p_pid
, error
);
3927 // We allowed the supplemental signature blob so
3928 // copy the platform bit or team-id from the linked signature and whether or not the original is developer code
3929 blob
->csb_platform_binary
= 0;
3930 blob
->csb_platform_path
= 0;
3931 if (orig_blob
->csb_platform_binary
== 1) {
3932 blob
->csb_platform_binary
= orig_blob
->csb_platform_binary
;
3933 blob
->csb_platform_path
= orig_blob
->csb_platform_path
;
3934 } else if (orig_blob
->csb_teamid
!= NULL
) {
3935 vm_size_t teamid_size
= strlen(orig_blob
->csb_teamid
) + 1;
3936 blob
->csb_supplement_teamid
= kalloc(teamid_size
);
3937 if (blob
->csb_supplement_teamid
== NULL
) {
3941 strlcpy(blob
->csb_supplement_teamid
, orig_blob
->csb_teamid
, teamid_size
);
3943 blob
->csb_flags
= (orig_blob
->csb_flags
& CS_DEV_CODE
);
3945 // Validate the blob's coverage
3946 blob_start_offset
= blob
->csb_base_offset
+ blob
->csb_start_offset
;
3947 blob_end_offset
= blob
->csb_base_offset
+ blob
->csb_end_offset
;
3949 if (blob_start_offset
>= blob_end_offset
|| blob_start_offset
< 0 || blob_end_offset
<= 0) {
3950 /* reject empty or backwards blob */
3956 if (!UBCINFOEXISTS(vp
)) {
3961 uip
= vp
->v_ubcinfo
;
3963 struct cs_blob
*existing
= uip
->cs_blob_supplement
;
3964 if (existing
!= NULL
) {
3965 if (blob
->csb_hashtype
== existing
->csb_hashtype
&&
3966 memcmp(blob
->csb_cdhash
, existing
->csb_cdhash
, CS_CDHASH_LEN
) == 0) {
3967 error
= EAGAIN
; // non-fatal
3969 error
= EALREADY
; // fatal
3976 /* Unlike regular cs_blobs, we only ever support one supplement. */
3977 blob
->csb_next
= NULL
;
3978 uip
->cs_blob_supplement
= blob
;
3980 /* mark this vnode's VM object as having "signed pages" */
3981 kr
= memory_object_signed(uip
->ui_control
, TRUE
);
3982 if (kr
!= KERN_SUCCESS
) {
3990 /* We still adjust statistics even for supplemental blobs, as they
3991 * consume memory just the same. */
3992 ubc_cs_blob_adjust_statistics(blob
);
3996 const char *name
= vnode_getname_printable(vp
);
3998 printf("CODE SIGNING: proc %d(%s) "
3999 "loaded supplemental signature for file (%s) "
4000 "range 0x%llx:0x%llx\n",
4001 p
->p_pid
, p
->p_comm
,
4003 blob
->csb_base_offset
+ blob
->csb_start_offset
,
4004 blob
->csb_base_offset
+ blob
->csb_end_offset
);
4005 vnode_putname_printable(name
);
4012 error
= 0; // Success!
4016 printf("ubc_cs_blob_add_supplement[pid: %d]: error = %d\n", current_proc()->p_pid
, error
);
4019 cs_blob_supplement_free(blob
);
4022 if (error
== EAGAIN
) {
4023 /* We were asked to add an existing blob.
4024 * We cleaned up and ignore the attempt. */
4035 csvnode_print_debug(struct vnode
*vp
)
4037 const char *name
= NULL
;
4038 struct ubc_info
*uip
;
4039 struct cs_blob
*blob
;
4041 name
= vnode_getname_printable(vp
);
4043 printf("csvnode: name: %s\n", name
);
4044 vnode_putname_printable(name
);
4047 vnode_lock_spin(vp
);
4049 if (!UBCINFOEXISTS(vp
)) {
4054 uip
= vp
->v_ubcinfo
;
4055 for (blob
= uip
->cs_blobs
; blob
!= NULL
; blob
= blob
->csb_next
) {
4056 printf("csvnode: range: %lu -> %lu flags: 0x%08x platform: %s path: %s team: %s\n",
4057 (unsigned long)blob
->csb_start_offset
,
4058 (unsigned long)blob
->csb_end_offset
,
4060 blob
->csb_platform_binary
? "yes" : "no",
4061 blob
->csb_platform_path
? "yes" : "no",
4062 blob
->csb_teamid
? blob
->csb_teamid
: "<NO-TEAM>");
4069 #if CONFIG_SUPPLEMENTAL_SIGNATURES
4071 ubc_cs_blob_get_supplement(
4075 struct cs_blob
*blob
;
4076 off_t offset_in_blob
;
4078 vnode_lock_spin(vp
);
4080 if (!UBCINFOEXISTS(vp
)) {
4085 blob
= vp
->v_ubcinfo
->cs_blob_supplement
;
4088 // no supplemental blob
4094 offset_in_blob
= offset
- blob
->csb_base_offset
;
4095 if (offset_in_blob
< blob
->csb_start_offset
|| offset_in_blob
>= blob
->csb_end_offset
) {
4096 // not actually covered by this blob
4112 cpu_subtype_t cpusubtype
,
4115 struct ubc_info
*uip
;
4116 struct cs_blob
*blob
;
4117 off_t offset_in_blob
;
4119 vnode_lock_spin(vp
);
4121 if (!UBCINFOEXISTS(vp
)) {
4126 uip
= vp
->v_ubcinfo
;
4127 for (blob
= uip
->cs_blobs
;
4129 blob
= blob
->csb_next
) {
4130 if (cputype
!= -1 && blob
->csb_cpu_type
== cputype
&& (cpusubtype
== -1 || blob
->csb_cpu_subtype
== (cpusubtype
& ~CPU_SUBTYPE_MASK
))) {
4134 offset_in_blob
= offset
- blob
->csb_base_offset
;
4135 if (offset_in_blob
>= blob
->csb_start_offset
&&
4136 offset_in_blob
< blob
->csb_end_offset
) {
4137 /* our offset is covered by this blob */
4151 struct ubc_info
*uip
)
4153 struct cs_blob
*blob
, *next_blob
;
4155 for (blob
= uip
->cs_blobs
;
4158 next_blob
= blob
->csb_next
;
4159 os_atomic_add(&cs_blob_count
, -1, relaxed
);
4160 os_atomic_add(&cs_blob_size
, -blob
->csb_mem_size
, relaxed
);
4163 #if CHECK_CS_VALIDATION_BITMAP
4164 ubc_cs_validation_bitmap_deallocate( uip
->ui_vnode
);
4166 uip
->cs_blobs
= NULL
;
4167 #if CONFIG_SUPPLEMENTAL_SIGNATURES
4168 if (uip
->cs_blob_supplement
!= NULL
) {
4169 blob
= uip
->cs_blob_supplement
;
4170 os_atomic_add(&cs_blob_count
, -1, relaxed
);
4171 os_atomic_add(&cs_blob_size
, -blob
->csb_mem_size
, relaxed
);
4172 cs_blob_supplement_free(uip
->cs_blob_supplement
);
4173 uip
->cs_blob_supplement
= NULL
;
4178 /* check cs blob generation on vnode
4180 * 0 : Success, the cs_blob attached is current
4181 * ENEEDAUTH : Generation count mismatch. Needs authentication again.
4184 ubc_cs_generation_check(
4187 int retval
= ENEEDAUTH
;
4189 vnode_lock_spin(vp
);
4191 if (UBCINFOEXISTS(vp
) && vp
->v_ubcinfo
->cs_add_gen
== cs_blob_generation_count
) {
4200 ubc_cs_blob_revalidate(
4202 struct cs_blob
*blob
,
4203 struct image_params
*imgp
,
4209 const CS_CodeDirectory
*cd
= NULL
;
4210 const CS_GenericBlob
*entitlements
= NULL
;
4213 assert(blob
!= NULL
);
4215 size
= blob
->csb_mem_size
;
4216 error
= cs_validate_csblob((const uint8_t *)blob
->csb_mem_kaddr
,
4217 size
, &cd
, &entitlements
);
4220 printf("CODESIGNING: csblob invalid: %d\n", error
);
4225 unsigned int cs_flags
= (ntohl(cd
->flags
) & CS_ALLOWED_MACHO
) | CS_VALID
;
4226 unsigned int signer_type
= CS_SIGNER_TYPE_UNKNOWN
;
4228 if (blob
->csb_reconstituted
) {
4230 * Code signatures that have been modified after validation
4231 * cannot be revalidated inline from their in-memory blob.
4233 * That's okay, though, because the only path left that relies
4234 * on revalidation of existing in-memory blobs is the legacy
4235 * detached signature database path, which only exists on macOS,
4236 * which does not do reconstitution of any kind.
4239 printf("CODESIGNING: revalidate: not inline revalidating reconstituted signature.\n");
4243 * EAGAIN tells the caller that they may reread the code
4244 * signature and try attaching it again, which is the same
4245 * thing they would do if there was no cs_blob yet in the
4248 * Conveniently, after ubc_cs_blob_add did a successful
4249 * validation, it will detect that a matching cs_blob (cdhash,
4250 * offset, arch etc.) already exists, and return success
4251 * without re-adding a cs_blob to the vnode.
4256 /* callout to mac_vnode_check_signature */
4258 error
= mac_vnode_check_signature(vp
, blob
, imgp
, &cs_flags
, &signer_type
, flags
, platform
);
4259 if (cs_debug
&& error
) {
4260 printf("revalidate: check_signature[pid: %d], error = %d\n", current_proc()->p_pid
, error
);
4267 /* update generation number if success */
4268 vnode_lock_spin(vp
);
4269 blob
->csb_flags
= cs_flags
;
4270 blob
->csb_signer_type
= signer_type
;
4271 if (UBCINFOEXISTS(vp
)) {
4273 vp
->v_ubcinfo
->cs_add_gen
= cs_blob_generation_count
;
4275 vp
->v_ubcinfo
->cs_add_gen
= 0;
4286 cs_blob_reset_cache()
4288 /* incrementing odd no by 2 makes sure '0' is never reached. */
4289 OSAddAtomic(+2, &cs_blob_generation_count
);
4290 printf("Reseting cs_blob cache from all vnodes. \n");
4297 struct ubc_info
*uip
;
4298 struct cs_blob
*blobs
;
4301 * No need to take the vnode lock here. The caller must be holding
4302 * a reference on the vnode (via a VM mapping or open file descriptor),
4303 * so the vnode will not go away. The ubc_info stays until the vnode
4304 * goes away. And we only modify "blobs" by adding to the head of the
4306 * The ubc_info could go away entirely if the vnode gets reclaimed as
4307 * part of a forced unmount. In the case of a code-signature validation
4308 * during a page fault, the "paging_in_progress" reference on the VM
4309 * object guarantess that the vnode pager (and the ubc_info) won't go
4310 * away during the fault.
4311 * Other callers need to protect against vnode reclaim by holding the
4312 * vnode lock, for example.
4315 if (!UBCINFOEXISTS(vp
)) {
4320 uip
= vp
->v_ubcinfo
;
4321 blobs
= uip
->cs_blobs
;
4327 #if CONFIG_SUPPLEMENTAL_SIGNATURES
4329 ubc_get_cs_supplement(
4332 struct ubc_info
*uip
;
4333 struct cs_blob
*blob
;
4336 * No need to take the vnode lock here. The caller must be holding
4337 * a reference on the vnode (via a VM mapping or open file descriptor),
4338 * so the vnode will not go away. The ubc_info stays until the vnode
4340 * The ubc_info could go away entirely if the vnode gets reclaimed as
4341 * part of a forced unmount. In the case of a code-signature validation
4342 * during a page fault, the "paging_in_progress" reference on the VM
4343 * object guarantess that the vnode pager (and the ubc_info) won't go
4344 * away during the fault.
4345 * Other callers need to protect against vnode reclaim by holding the
4346 * vnode lock, for example.
4349 if (!UBCINFOEXISTS(vp
)) {
4354 uip
= vp
->v_ubcinfo
;
4355 blob
= uip
->cs_blob_supplement
;
4366 struct timespec
*cs_mtime
)
4368 struct ubc_info
*uip
;
4370 if (!UBCINFOEXISTS(vp
)) {
4371 cs_mtime
->tv_sec
= 0;
4372 cs_mtime
->tv_nsec
= 0;
4376 uip
= vp
->v_ubcinfo
;
4377 cs_mtime
->tv_sec
= uip
->cs_mtime
.tv_sec
;
4378 cs_mtime
->tv_nsec
= uip
->cs_mtime
.tv_nsec
;
4381 unsigned long cs_validate_page_no_hash
= 0;
4382 unsigned long cs_validate_page_bad_hash
= 0;
4385 struct cs_blob
*blobs
,
4386 memory_object_t pager
,
4387 memory_object_offset_t page_offset
,
4389 vm_size_t
*bytes_processed
,
4392 union cs_hash_union mdctx
;
4393 struct cs_hash
const *hashtype
= NULL
;
4394 unsigned char actual_hash
[CS_HASH_MAX_SIZE
];
4395 unsigned char expected_hash
[CS_HASH_MAX_SIZE
];
4396 boolean_t found_hash
;
4397 struct cs_blob
*blob
;
4398 const CS_CodeDirectory
*cd
;
4399 const unsigned char *hash
;
4400 boolean_t validated
;
4401 off_t offset
; /* page offset in the file */
4403 off_t codeLimit
= 0;
4404 const char *lower_bound
, *upper_bound
;
4405 vm_offset_t kaddr
, blob_addr
;
4407 /* retrieve the expected hash */
4412 blob
= blob
->csb_next
) {
4413 offset
= page_offset
- blob
->csb_base_offset
;
4414 if (offset
< blob
->csb_start_offset
||
4415 offset
>= blob
->csb_end_offset
) {
4416 /* our page is not covered by this blob */
4420 /* blob data has been released */
4421 kaddr
= blob
->csb_mem_kaddr
;
4426 blob_addr
= kaddr
+ blob
->csb_mem_offset
;
4427 lower_bound
= CAST_DOWN(char *, blob_addr
);
4428 upper_bound
= lower_bound
+ blob
->csb_mem_size
;
4432 /* all CD's that have been injected is already validated */
4434 hashtype
= blob
->csb_hashtype
;
4435 if (hashtype
== NULL
) {
4436 panic("unknown hash type ?");
4438 if (hashtype
->cs_digest_size
> sizeof(actual_hash
)) {
4439 panic("hash size too large");
4441 if (offset
& ((1U << blob
->csb_hash_pageshift
) - 1)) {
4442 panic("offset not aligned to cshash boundary");
4445 codeLimit
= ntohl(cd
->codeLimit
);
4447 hash
= hashes(cd
, (uint32_t)(offset
>> blob
->csb_hash_pageshift
),
4449 lower_bound
, upper_bound
);
4451 bcopy(hash
, expected_hash
, hashtype
->cs_size
);
4459 if (found_hash
== FALSE
) {
4461 * We can't verify this page because there is no signature
4462 * for it (yet). It's possible that this part of the object
4463 * is not signed, or that signatures for that part have not
4465 * Report that the page has not been validated and let the
4466 * caller decide if it wants to accept it or not.
4468 cs_validate_page_no_hash
++;
4470 printf("CODE SIGNING: cs_validate_page: "
4471 "mobj %p off 0x%llx: no hash to validate !?\n",
4472 pager
, page_offset
);
4479 size
= (1U << blob
->csb_hash_pageshift
);
4480 *bytes_processed
= size
;
4482 const uint32_t *asha1
, *esha1
;
4483 if ((off_t
)(offset
+ size
) > codeLimit
) {
4484 /* partial page at end of segment */
4485 assert(offset
< codeLimit
);
4486 size
= (size_t) (codeLimit
& (size
- 1));
4487 *tainted
|= CS_VALIDATE_NX
;
4490 hashtype
->cs_init(&mdctx
);
4492 if (blob
->csb_hash_firstlevel_pageshift
) {
4493 const unsigned char *partial_data
= (const unsigned char *)data
;
4495 for (i
= 0; i
< size
;) {
4496 union cs_hash_union partialctx
;
4497 unsigned char partial_digest
[CS_HASH_MAX_SIZE
];
4498 size_t partial_size
= MIN(size
- i
, (1U << blob
->csb_hash_firstlevel_pageshift
));
4500 hashtype
->cs_init(&partialctx
);
4501 hashtype
->cs_update(&partialctx
, partial_data
, partial_size
);
4502 hashtype
->cs_final(partial_digest
, &partialctx
);
4504 /* Update cumulative multi-level hash */
4505 hashtype
->cs_update(&mdctx
, partial_digest
, hashtype
->cs_size
);
4506 partial_data
= partial_data
+ partial_size
;
4510 hashtype
->cs_update(&mdctx
, data
, size
);
4512 hashtype
->cs_final(actual_hash
, &mdctx
);
4514 asha1
= (const uint32_t *) actual_hash
;
4515 esha1
= (const uint32_t *) expected_hash
;
4517 if (bcmp(expected_hash
, actual_hash
, hashtype
->cs_size
) != 0) {
4519 printf("CODE SIGNING: cs_validate_page: "
4520 "mobj %p off 0x%llx size 0x%lx: "
4521 "actual [0x%x 0x%x 0x%x 0x%x 0x%x] != "
4522 "expected [0x%x 0x%x 0x%x 0x%x 0x%x]\n",
4523 pager
, page_offset
, size
,
4524 asha1
[0], asha1
[1], asha1
[2],
4526 esha1
[0], esha1
[1], esha1
[2],
4527 esha1
[3], esha1
[4]);
4529 cs_validate_page_bad_hash
++;
4530 *tainted
|= CS_VALIDATE_TAINTED
;
4532 if (cs_debug
> 10) {
4533 printf("CODE SIGNING: cs_validate_page: "
4534 "mobj %p off 0x%llx size 0x%lx: "
4536 pager
, page_offset
, size
);
4548 memory_object_t pager
,
4549 memory_object_offset_t page_offset
,
4554 vm_size_t offset_in_range
;
4555 boolean_t all_subranges_validated
= TRUE
; /* turn false if any subrange fails */
4557 struct cs_blob
*blobs
= ubc_get_cs_blobs(vp
);
4559 #if CONFIG_SUPPLEMENTAL_SIGNATURES
4560 if (blobs
== NULL
&& proc_is_translated(current_proc())) {
4561 struct cs_blob
*supp
= ubc_get_cs_supplement(vp
);
4575 for (offset_in_range
= 0;
4576 offset_in_range
< dsize
;
4577 /* offset_in_range updated based on bytes processed */) {
4578 unsigned subrange_tainted
= 0;
4579 boolean_t subrange_validated
;
4580 vm_size_t bytes_processed
= 0;
4582 subrange_validated
= cs_validate_hash(blobs
,
4584 page_offset
+ offset_in_range
,
4585 (const void *)((const char *)data
+ offset_in_range
),
4589 *tainted
|= subrange_tainted
;
4591 if (bytes_processed
== 0) {
4592 /* Cannote make forward progress, so return an error */
4593 all_subranges_validated
= FALSE
;
4595 } else if (subrange_validated
== FALSE
) {
4596 all_subranges_validated
= FALSE
;
4597 /* Keep going to detect other types of failures in subranges */
4600 offset_in_range
+= bytes_processed
;
4603 return all_subranges_validated
;
4609 memory_object_t pager
,
4610 memory_object_offset_t page_offset
,
4616 vm_size_t offset_in_page
;
4617 struct cs_blob
*blobs
;
4619 blobs
= ubc_get_cs_blobs(vp
);
4621 #if CONFIG_SUPPLEMENTAL_SIGNATURES
4622 if (blobs
== NULL
&& proc_is_translated(current_proc())) {
4623 struct cs_blob
*supp
= ubc_get_cs_supplement(vp
);
4631 *validated_p
= VMP_CS_ALL_FALSE
;
4632 *tainted_p
= VMP_CS_ALL_FALSE
;
4633 *nx_p
= VMP_CS_ALL_FALSE
;
4635 for (offset_in_page
= 0;
4636 offset_in_page
< PAGE_SIZE
;
4637 /* offset_in_page updated based on bytes processed */) {
4638 unsigned subrange_tainted
= 0;
4639 boolean_t subrange_validated
;
4640 vm_size_t bytes_processed
= 0;
4643 subrange_validated
= cs_validate_hash(blobs
,
4645 page_offset
+ offset_in_page
,
4646 (const void *)((const char *)data
+ offset_in_page
),
4650 if (bytes_processed
== 0) {
4651 /* 4k chunk not code-signed: try next one */
4652 offset_in_page
+= FOURK_PAGE_SIZE
;
4655 if (offset_in_page
== 0 &&
4656 bytes_processed
> PAGE_SIZE
- FOURK_PAGE_SIZE
) {
4657 /* all processed: no 4k granularity */
4658 if (subrange_validated
) {
4659 *validated_p
= VMP_CS_ALL_TRUE
;
4661 if (subrange_tainted
& CS_VALIDATE_TAINTED
) {
4662 *tainted_p
= VMP_CS_ALL_TRUE
;
4664 if (subrange_tainted
& CS_VALIDATE_NX
) {
4665 *nx_p
= VMP_CS_ALL_TRUE
;
4669 /* we only handle 4k or 16k code-signing granularity... */
4670 assertf(bytes_processed
<= FOURK_PAGE_SIZE
,
4671 "vp %p blobs %p offset 0x%llx + 0x%llx bytes_processed 0x%llx\n",
4672 vp
, blobs
, (uint64_t)page_offset
,
4673 (uint64_t)offset_in_page
, (uint64_t)bytes_processed
);
4674 sub_bit
= 1 << (offset_in_page
>> FOURK_PAGE_SHIFT
);
4675 if (subrange_validated
) {
4676 *validated_p
|= sub_bit
;
4678 if (subrange_tainted
& CS_VALIDATE_TAINTED
) {
4679 *tainted_p
|= sub_bit
;
4681 if (subrange_tainted
& CS_VALIDATE_NX
) {
4684 /* go to next 4k chunk */
4685 offset_in_page
+= FOURK_PAGE_SIZE
;
4695 unsigned char *cdhash
)
4697 struct cs_blob
*blobs
, *blob
;
4703 blobs
= ubc_get_cs_blobs(vp
);
4706 blob
= blob
->csb_next
) {
4707 /* compute offset relative to this blob */
4708 rel_offset
= offset
- blob
->csb_base_offset
;
4709 if (rel_offset
>= blob
->csb_start_offset
&&
4710 rel_offset
< blob
->csb_end_offset
) {
4711 /* this blob does cover our "offset" ! */
4717 /* we didn't find a blob covering "offset" */
4718 ret
= EBADEXEC
; /* XXX any better error ? */
4720 /* get the SHA1 hash of that blob */
4721 ptrauth_utils_auth_blob_generic(blob
->csb_cdhash
,
4722 sizeof(blob
->csb_cdhash
),
4723 OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_cd_signature"),
4724 PTRAUTH_ADDR_DIVERSIFY
,
4725 blob
->csb_cdhash_signature
);
4726 bcopy(blob
->csb_cdhash
, cdhash
, sizeof(blob
->csb_cdhash
));
4736 ubc_cs_is_range_codesigned(
4738 mach_vm_offset_t start
,
4739 mach_vm_size_t size
)
4741 struct cs_blob
*csblob
;
4742 mach_vm_offset_t blob_start
;
4743 mach_vm_offset_t blob_end
;
4746 /* no file: no code signature */
4750 /* no range: no code signature */
4753 if (start
+ size
< start
) {
4758 csblob
= ubc_cs_blob_get(vp
, -1, -1, start
);
4759 if (csblob
== NULL
) {
4764 * We currently check if the range is covered by a single blob,
4765 * which should always be the case for the dyld shared cache.
4766 * If we ever want to make this routine handle other cases, we
4767 * would have to iterate if the blob does not cover the full range.
4769 blob_start
= (mach_vm_offset_t
) (csblob
->csb_base_offset
+
4770 csblob
->csb_start_offset
);
4771 blob_end
= (mach_vm_offset_t
) (csblob
->csb_base_offset
+
4772 csblob
->csb_end_offset
);
4773 if (blob_start
> start
|| blob_end
< (start
+ size
)) {
4774 /* range not fully covered by this code-signing blob */
4781 #if CHECK_CS_VALIDATION_BITMAP
4782 #define stob(s) (((atop_64(round_page_64(s))) + 07) >> 3)
4783 extern boolean_t root_fs_upgrade_try
;
4786 * Should we use the code-sign bitmap to avoid repeated code-sign validation?
4788 * a) Is the target vnode on the root filesystem?
4789 * b) Has someone tried to mount the root filesystem read-write?
4790 * If answers are (a) yes AND (b) no, then we can use the bitmap.
4792 #define USE_CODE_SIGN_BITMAP(vp) ( (vp != NULL) && (vp->v_mount != NULL) && (vp->v_mount->mnt_flag & MNT_ROOTFS) && !root_fs_upgrade_try)
4794 ubc_cs_validation_bitmap_allocate(
4797 kern_return_t kr
= KERN_SUCCESS
;
4798 struct ubc_info
*uip
;
4799 char *target_bitmap
;
4800 vm_object_size_t bitmap_size
;
4802 if (!USE_CODE_SIGN_BITMAP(vp
) || (!UBCINFOEXISTS(vp
))) {
4803 kr
= KERN_INVALID_ARGUMENT
;
4805 uip
= vp
->v_ubcinfo
;
4807 if (uip
->cs_valid_bitmap
== NULL
) {
4808 bitmap_size
= stob(uip
->ui_size
);
4809 target_bitmap
= (char*) kalloc((vm_size_t
)bitmap_size
);
4810 if (target_bitmap
== 0) {
4815 if (kr
== KERN_SUCCESS
) {
4816 memset( target_bitmap
, 0, (size_t)bitmap_size
);
4817 uip
->cs_valid_bitmap
= (void*)target_bitmap
;
4818 uip
->cs_valid_bitmap_size
= bitmap_size
;
4826 ubc_cs_check_validation_bitmap(
4828 memory_object_offset_t offset
,
4831 kern_return_t kr
= KERN_SUCCESS
;
4833 if (!USE_CODE_SIGN_BITMAP(vp
) || !UBCINFOEXISTS(vp
)) {
4834 kr
= KERN_INVALID_ARGUMENT
;
4836 struct ubc_info
*uip
= vp
->v_ubcinfo
;
4837 char *target_bitmap
= uip
->cs_valid_bitmap
;
4839 if (target_bitmap
== NULL
) {
4840 kr
= KERN_INVALID_ARGUMENT
;
4843 bit
= atop_64( offset
);
4846 if (byte
> uip
->cs_valid_bitmap_size
) {
4847 kr
= KERN_INVALID_ARGUMENT
;
4849 if (optype
== CS_BITMAP_SET
) {
4850 target_bitmap
[byte
] |= (1 << (bit
& 07));
4852 } else if (optype
== CS_BITMAP_CLEAR
) {
4853 target_bitmap
[byte
] &= ~(1 << (bit
& 07));
4855 } else if (optype
== CS_BITMAP_CHECK
) {
4856 if (target_bitmap
[byte
] & (1 << (bit
& 07))) {
4869 ubc_cs_validation_bitmap_deallocate(
4872 struct ubc_info
*uip
;
4873 void *target_bitmap
;
4874 vm_object_size_t bitmap_size
;
4876 if (UBCINFOEXISTS(vp
)) {
4877 uip
= vp
->v_ubcinfo
;
4879 if ((target_bitmap
= uip
->cs_valid_bitmap
) != NULL
) {
4880 bitmap_size
= uip
->cs_valid_bitmap_size
;
4881 kfree( target_bitmap
, (vm_size_t
) bitmap_size
);
4882 uip
->cs_valid_bitmap
= NULL
;
4888 ubc_cs_validation_bitmap_allocate(__unused vnode_t vp
)
4890 return KERN_INVALID_ARGUMENT
;
4894 ubc_cs_check_validation_bitmap(
4895 __unused
struct vnode
*vp
,
4896 __unused memory_object_offset_t offset
,
4897 __unused
int optype
)
4899 return KERN_INVALID_ARGUMENT
;
4903 ubc_cs_validation_bitmap_deallocate(__unused vnode_t vp
)
4907 #endif /* CHECK_CS_VALIDATION_BITMAP */
4911 cs_associate_blob_with_mapping(
4913 vm_map_offset_t start
,
4915 vm_object_offset_t offset
,
4918 off_t blob_start_offset
, blob_end_offset
;
4920 struct cs_blob
*blobs
, *blob
;
4922 struct pmap_cs_code_directory
*cd_entry
= NULL
;
4925 return KERN_NOT_SUPPORTED
;
4928 blobs
= (struct cs_blob
*)blobs_p
;
4932 blob
= blob
->csb_next
) {
4933 blob_start_offset
= (blob
->csb_base_offset
+
4934 blob
->csb_start_offset
);
4935 blob_end_offset
= (blob
->csb_base_offset
+
4936 blob
->csb_end_offset
);
4937 if ((off_t
) offset
< blob_start_offset
||
4938 (off_t
) offset
>= blob_end_offset
||
4939 (off_t
) (offset
+ size
) <= blob_start_offset
||
4940 (off_t
) (offset
+ size
) > blob_end_offset
) {
4943 kaddr
= blob
->csb_mem_kaddr
;
4945 /* blob data has been released */
4948 cd_entry
= blob
->csb_pmap_cs_entry
;
4949 if (cd_entry
== NULL
) {
4956 if (cd_entry
!= NULL
) {
4957 kr
= pmap_cs_associate(pmap
,
4961 offset
- blob_start_offset
);
4963 kr
= KERN_CODESIGN_ERROR
;
4966 printf("FBDP %d[%s] pmap_cs_associate(%p,%p,0x%llx,0x%llx) -> kr=0x%x\n", proc_selfpid(), &(current_proc()->p_comm
[0]), pmap
, cd_entry
, (uint64_t)start
, (uint64_t)size
, kr
);
4971 #endif /* PMAP_CS */