2 * Copyright (c) 1999-2014 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 * Author: Umesh Vaishampayan [umeshv@apple.com]
31 * 05-Aug-1999 umeshv Created.
33 * Functions related to Unified Buffer cache.
35 * Caller of UBC functions MUST have a valid reference on the vnode.
39 #include <sys/types.h>
40 #include <sys/param.h>
41 #include <sys/systm.h>
44 #include <sys/mount_internal.h>
45 #include <sys/vnode_internal.h>
46 #include <sys/ubc_internal.h>
47 #include <sys/ucred.h>
48 #include <sys/proc_internal.h>
49 #include <sys/kauth.h>
52 #include <sys/codesign.h>
53 #include <sys/codedir_internal.h>
54 #include <sys/fsevents.h>
55 #include <sys/fcntl.h>
57 #include <mach/mach_types.h>
58 #include <mach/memory_object_types.h>
59 #include <mach/memory_object_control.h>
60 #include <mach/vm_map.h>
61 #include <mach/mach_vm.h>
64 #include <kern/kern_types.h>
65 #include <kern/kalloc.h>
66 #include <kern/zalloc.h>
67 #include <kern/thread.h>
69 #include <vm/vm_kern.h>
70 #include <vm/vm_protos.h> /* last */
72 #include <libkern/crypto/sha1.h>
73 #include <libkern/crypto/sha2.h>
74 #include <libkern/libkern.h>
76 #include <security/mac_framework.h>
79 /* XXX These should be in a BSD accessible Mach header, but aren't. */
80 extern kern_return_t
memory_object_pages_resident(memory_object_control_t
,
82 extern kern_return_t
memory_object_signed(memory_object_control_t control
,
84 extern boolean_t
memory_object_is_slid(memory_object_control_t control
);
85 extern boolean_t
memory_object_is_signed(memory_object_control_t
);
87 extern void Debugger(const char *message
);
90 /* XXX no one uses this interface! */
91 kern_return_t
ubc_page_op_with_control(
92 memory_object_control_t control
,
103 #define assert(cond) \
104 ((void) ((cond) ? 0 : panic("Assert failed: %s", # cond)))
106 #include <kern/assert.h>
107 #endif /* DIAGNOSTIC */
109 static int ubc_info_init_internal(struct vnode
*vp
, int withfsize
, off_t filesize
);
110 static int ubc_umcallback(vnode_t
, void *);
111 static int ubc_msync_internal(vnode_t
, off_t
, off_t
, off_t
*, int, int *);
112 static void ubc_cs_free(struct ubc_info
*uip
);
114 static boolean_t
ubc_cs_supports_multilevel_hash(struct cs_blob
*blob
);
115 static void ubc_cs_convert_to_multilevel_hash(struct cs_blob
*blob
);
117 struct zone
*ubc_info_zone
;
118 static uint32_t cs_blob_generation_count
= 1;
122 * Routines to navigate code signing data structures in the kernel...
127 #define PAGE_SHIFT_4K (12)
133 const void *lower_bound
,
134 const void *upper_bound
)
136 if (upper_bound
< lower_bound
||
141 if (start
< lower_bound
||
149 typedef void (*cs_md_init
)(void *ctx
);
150 typedef void (*cs_md_update
)(void *ctx
, const void *data
, size_t size
);
151 typedef void (*cs_md_final
)(void *hash
, void *ctx
);
154 uint8_t cs_type
; /* type code as per code signing */
155 size_t cs_size
; /* size of effective hash (may be truncated) */
156 size_t cs_digest_size
; /* size of native hash */
158 cs_md_update cs_update
;
159 cs_md_final cs_final
;
162 uint8_t cs_hash_type(
163 struct cs_hash
const * const cs_hash
)
165 return cs_hash
->cs_type
;
168 static const struct cs_hash cs_hash_sha1
= {
169 .cs_type
= CS_HASHTYPE_SHA1
,
170 .cs_size
= CS_SHA1_LEN
,
171 .cs_digest_size
= SHA_DIGEST_LENGTH
,
172 .cs_init
= (cs_md_init
)SHA1Init
,
173 .cs_update
= (cs_md_update
)SHA1Update
,
174 .cs_final
= (cs_md_final
)SHA1Final
,
177 static const struct cs_hash cs_hash_sha256
= {
178 .cs_type
= CS_HASHTYPE_SHA256
,
179 .cs_size
= SHA256_DIGEST_LENGTH
,
180 .cs_digest_size
= SHA256_DIGEST_LENGTH
,
181 .cs_init
= (cs_md_init
)SHA256_Init
,
182 .cs_update
= (cs_md_update
)SHA256_Update
,
183 .cs_final
= (cs_md_final
)SHA256_Final
,
185 static const struct cs_hash cs_hash_sha256_truncate
= {
186 .cs_type
= CS_HASHTYPE_SHA256_TRUNCATED
,
187 .cs_size
= CS_SHA256_TRUNCATED_LEN
,
188 .cs_digest_size
= SHA256_DIGEST_LENGTH
,
189 .cs_init
= (cs_md_init
)SHA256_Init
,
190 .cs_update
= (cs_md_update
)SHA256_Update
,
191 .cs_final
= (cs_md_final
)SHA256_Final
,
193 static const struct cs_hash cs_hash_sha384
= {
194 .cs_type
= CS_HASHTYPE_SHA384
,
195 .cs_size
= SHA384_DIGEST_LENGTH
,
196 .cs_digest_size
= SHA384_DIGEST_LENGTH
,
197 .cs_init
= (cs_md_init
)SHA384_Init
,
198 .cs_update
= (cs_md_update
)SHA384_Update
,
199 .cs_final
= (cs_md_final
)SHA384_Final
,
203 static struct cs_hash
const *
204 cs_find_md(uint8_t type
)
206 if (type
== CS_HASHTYPE_SHA1
) {
207 return &cs_hash_sha1
;
209 } else if (type
== CS_HASHTYPE_SHA256
) {
210 return &cs_hash_sha256
;
211 } else if (type
== CS_HASHTYPE_SHA256_TRUNCATED
) {
212 return &cs_hash_sha256_truncate
;
213 } else if (type
== CS_HASHTYPE_SHA384
) {
214 return &cs_hash_sha384
;
220 union cs_hash_union
{
222 SHA256_CTX sha256ctx
;
223 SHA384_CTX sha384ctx
;
228 * Choose among different hash algorithms.
229 * Higher is better, 0 => don't use at all.
231 static const uint32_t hashPriorities
[] = {
233 CS_HASHTYPE_SHA256_TRUNCATED
,
239 hash_rank(const CS_CodeDirectory
*cd
)
241 uint32_t type
= cd
->hashType
;
244 for (n
= 0; n
< sizeof(hashPriorities
) / sizeof(hashPriorities
[0]); ++n
)
245 if (hashPriorities
[n
] == type
)
247 return 0; /* not supported */
252 * Locating a page hash
254 static const unsigned char *
256 const CS_CodeDirectory
*cd
,
259 const char *lower_bound
,
260 const char *upper_bound
)
262 const unsigned char *base
, *top
, *hash
;
263 uint32_t nCodeSlots
= ntohl(cd
->nCodeSlots
);
265 assert(cs_valid_range(cd
, cd
+ 1, lower_bound
, upper_bound
));
267 if((ntohl(cd
->version
) >= CS_SUPPORTSSCATTER
) && (ntohl(cd
->scatterOffset
))) {
268 /* Get first scatter struct */
269 const SC_Scatter
*scatter
= (const SC_Scatter
*)
270 ((const char*)cd
+ ntohl(cd
->scatterOffset
));
271 uint32_t hashindex
=0, scount
, sbase
=0;
272 /* iterate all scatter structs */
274 if((const char*)scatter
> (const char*)cd
+ ntohl(cd
->length
)) {
276 printf("CODE SIGNING: Scatter extends past Code Directory\n");
281 scount
= ntohl(scatter
->count
);
282 uint32_t new_base
= ntohl(scatter
->base
);
289 if((hashindex
> 0) && (new_base
<= sbase
)) {
291 printf("CODE SIGNING: unordered Scatter, prev base %d, cur base %d\n",
294 return NULL
; /* unordered scatter array */
298 /* this scatter beyond page we're looking for? */
303 if (sbase
+scount
>= page
) {
304 /* Found the scatter struct that is
305 * referencing our page */
307 /* base = address of first hash covered by scatter */
308 base
= (const unsigned char *)cd
+ ntohl(cd
->hashOffset
) +
309 hashindex
* hash_len
;
310 /* top = address of first hash after this scatter */
311 top
= base
+ scount
* hash_len
;
312 if (!cs_valid_range(base
, top
, lower_bound
,
314 hashindex
> nCodeSlots
) {
321 /* this scatter struct is before the page we're looking
327 hash
= base
+ (page
- sbase
) * hash_len
;
329 base
= (const unsigned char *)cd
+ ntohl(cd
->hashOffset
);
330 top
= base
+ nCodeSlots
* hash_len
;
331 if (!cs_valid_range(base
, top
, lower_bound
, upper_bound
) ||
335 assert(page
< nCodeSlots
);
337 hash
= base
+ page
* hash_len
;
340 if (!cs_valid_range(hash
, hash
+ hash_len
,
341 lower_bound
, upper_bound
)) {
349 * cs_validate_codedirectory
351 * Validate that pointers inside the code directory to make sure that
352 * all offsets and lengths are constrained within the buffer.
354 * Parameters: cd Pointer to code directory buffer
355 * length Length of buffer
358 * EBADEXEC Invalid code signature
362 cs_validate_codedirectory(const CS_CodeDirectory
*cd
, size_t length
)
364 struct cs_hash
const *hashtype
;
366 if (length
< sizeof(*cd
))
368 if (ntohl(cd
->magic
) != CSMAGIC_CODEDIRECTORY
)
370 if (cd
->pageSize
< PAGE_SHIFT_4K
|| cd
->pageSize
> PAGE_SHIFT
)
372 hashtype
= cs_find_md(cd
->hashType
);
373 if (hashtype
== NULL
)
376 if (cd
->hashSize
!= hashtype
->cs_size
)
379 if (length
< ntohl(cd
->hashOffset
))
382 /* check that nSpecialSlots fits in the buffer in front of hashOffset */
383 if (ntohl(cd
->hashOffset
) / hashtype
->cs_size
< ntohl(cd
->nSpecialSlots
))
386 /* check that codeslots fits in the buffer */
387 if ((length
- ntohl(cd
->hashOffset
)) / hashtype
->cs_size
< ntohl(cd
->nCodeSlots
))
390 if (ntohl(cd
->version
) >= CS_SUPPORTSSCATTER
&& cd
->scatterOffset
) {
392 if (length
< ntohl(cd
->scatterOffset
))
395 const SC_Scatter
*scatter
= (const SC_Scatter
*)
396 (((const uint8_t *)cd
) + ntohl(cd
->scatterOffset
));
400 * Check each scatter buffer, since we don't know the
401 * length of the scatter buffer array, we have to
405 /* check that the end of each scatter buffer in within the length */
406 if (((const uint8_t *)scatter
) + sizeof(scatter
[0]) > (const uint8_t *)cd
+ length
)
408 uint32_t scount
= ntohl(scatter
->count
);
411 if (nPages
+ scount
< nPages
)
416 /* XXX check that basees doesn't overlap */
417 /* XXX check that targetOffset doesn't overlap */
419 #if 0 /* rdar://12579439 */
420 if (nPages
!= ntohl(cd
->nCodeSlots
))
425 if (length
< ntohl(cd
->identOffset
))
428 /* identifier is NUL terminated string */
429 if (cd
->identOffset
) {
430 const uint8_t *ptr
= (const uint8_t *)cd
+ ntohl(cd
->identOffset
);
431 if (memchr(ptr
, 0, length
- ntohl(cd
->identOffset
)) == NULL
)
435 /* team identifier is NULL terminated string */
436 if (ntohl(cd
->version
) >= CS_SUPPORTSTEAMID
&& ntohl(cd
->teamOffset
)) {
437 if (length
< ntohl(cd
->teamOffset
))
440 const uint8_t *ptr
= (const uint8_t *)cd
+ ntohl(cd
->teamOffset
);
441 if (memchr(ptr
, 0, length
- ntohl(cd
->teamOffset
)) == NULL
)
453 cs_validate_blob(const CS_GenericBlob
*blob
, size_t length
)
455 if (length
< sizeof(CS_GenericBlob
) || length
< ntohl(blob
->length
))
463 * Validate that superblob/embedded code directory to make sure that
464 * all internal pointers are valid.
466 * Will validate both a superblob csblob and a "raw" code directory.
469 * Parameters: buffer Pointer to code signature
470 * length Length of buffer
471 * rcd returns pointer to code directory
474 * EBADEXEC Invalid code signature
481 const CS_CodeDirectory
**rcd
,
482 const CS_GenericBlob
**rentitlements
)
484 const CS_GenericBlob
*blob
;
486 size_t length
, blob_size
;
489 *rentitlements
= NULL
;
491 blob
= (const CS_GenericBlob
*)(const void *)addr
;
492 blob_size
= *blob_size_p
;
495 error
= cs_validate_blob(blob
, length
);
498 length
= ntohl(blob
->length
);
500 if (ntohl(blob
->magic
) == CSMAGIC_EMBEDDED_SIGNATURE
) {
501 const CS_SuperBlob
*sb
;
503 const CS_CodeDirectory
*best_cd
= NULL
;
504 unsigned int best_rank
= 0;
506 const CS_CodeDirectory
*sha1_cd
= NULL
;
509 if (length
< sizeof(CS_SuperBlob
))
512 sb
= (const CS_SuperBlob
*)blob
;
513 count
= ntohl(sb
->count
);
515 /* check that the array of BlobIndex fits in the rest of the data */
516 if ((length
- sizeof(CS_SuperBlob
)) / sizeof(CS_BlobIndex
) < count
)
519 /* now check each BlobIndex */
520 for (n
= 0; n
< count
; n
++) {
521 const CS_BlobIndex
*blobIndex
= &sb
->index
[n
];
522 uint32_t type
= ntohl(blobIndex
->type
);
523 uint32_t offset
= ntohl(blobIndex
->offset
);
527 const CS_GenericBlob
*subBlob
=
528 (const CS_GenericBlob
*)(const void *)(addr
+ offset
);
530 size_t subLength
= length
- offset
;
532 if ((error
= cs_validate_blob(subBlob
, subLength
)) != 0)
534 subLength
= ntohl(subBlob
->length
);
536 /* extra validation for CDs, that is also returned */
537 if (type
== CSSLOT_CODEDIRECTORY
|| (type
>= CSSLOT_ALTERNATE_CODEDIRECTORIES
&& type
< CSSLOT_ALTERNATE_CODEDIRECTORY_LIMIT
)) {
538 const CS_CodeDirectory
*candidate
= (const CS_CodeDirectory
*)subBlob
;
539 if ((error
= cs_validate_codedirectory(candidate
, subLength
)) != 0)
541 unsigned int rank
= hash_rank(candidate
);
543 printf("CodeDirectory type %d rank %d at slot 0x%x index %d\n", candidate
->hashType
, (int)rank
, (int)type
, (int)n
);
544 if (best_cd
== NULL
|| rank
> best_rank
) {
549 printf("using CodeDirectory type %d (rank %d)\n", (int)best_cd
->hashType
, best_rank
);
551 } else if (best_cd
!= NULL
&& rank
== best_rank
) {
552 /* repeat of a hash type (1:1 mapped to ranks), illegal and suspicious */
553 printf("multiple hash=%d CodeDirectories in signature; rejecting\n", best_cd
->hashType
);
557 if (candidate
->hashType
== CS_HASHTYPE_SHA1
) {
558 if (sha1_cd
!= NULL
) {
559 printf("multiple sha1 CodeDirectories in signature; rejecting\n");
565 } else if (type
== CSSLOT_ENTITLEMENTS
) {
566 if (ntohl(subBlob
->magic
) != CSMAGIC_EMBEDDED_ENTITLEMENTS
) {
569 if (*rentitlements
!= NULL
) {
570 printf("multiple entitlements blobs\n");
573 *rentitlements
= subBlob
;
578 /* To keep watchOS fast enough, we have to resort to sha1 for
581 * At the time of writing this comment, known sha1 attacks are
582 * collision attacks (not preimage or second preimage
583 * attacks), which do not apply to platform binaries since
584 * they have a fixed hash in the trust cache. Given this
585 * property, we only prefer sha1 code directories for adhoc
586 * signatures, which always have to be in a trust cache to be
587 * valid (can-load-cdhash does not exist for watchOS). Those
588 * are, incidentally, also the platform binaries, for which we
589 * care about the performance hit that sha256 would bring us.
591 * Platform binaries may still contain a (not chosen) sha256
592 * code directory, which keeps software updates that switch to
596 if (*rcd
!= NULL
&& sha1_cd
!= NULL
&& (ntohl(sha1_cd
->flags
) & CS_ADHOC
)) {
597 if (sha1_cd
->flags
!= (*rcd
)->flags
) {
598 printf("mismatched flags between hash %d (flags: %#x) and sha1 (flags: %#x) cd.\n",
599 (int)(*rcd
)->hashType
, (*rcd
)->flags
, sha1_cd
->flags
);
608 } else if (ntohl(blob
->magic
) == CSMAGIC_CODEDIRECTORY
) {
610 if ((error
= cs_validate_codedirectory((const CS_CodeDirectory
*)(const void *)addr
, length
)) != 0)
612 *rcd
= (const CS_CodeDirectory
*)blob
;
620 *blob_size_p
= blob_size
;
628 * Find an blob from the superblob/code directory. The blob must have
629 * been been validated by cs_validate_csblob() before calling
630 * this. Use csblob_find_blob() instead.
632 * Will also find a "raw" code directory if its stored as well as
633 * searching the superblob.
635 * Parameters: buffer Pointer to code signature
636 * length Length of buffer
637 * type type of blob to find
638 * magic the magic number for that blob
640 * Returns: pointer Success
641 * NULL Buffer not found
644 const CS_GenericBlob
*
645 csblob_find_blob_bytes(const uint8_t *addr
, size_t length
, uint32_t type
, uint32_t magic
)
647 const CS_GenericBlob
*blob
= (const CS_GenericBlob
*)(const void *)addr
;
649 if (ntohl(blob
->magic
) == CSMAGIC_EMBEDDED_SIGNATURE
) {
650 const CS_SuperBlob
*sb
= (const CS_SuperBlob
*)blob
;
651 size_t n
, count
= ntohl(sb
->count
);
653 for (n
= 0; n
< count
; n
++) {
654 if (ntohl(sb
->index
[n
].type
) != type
)
656 uint32_t offset
= ntohl(sb
->index
[n
].offset
);
657 if (length
- sizeof(const CS_GenericBlob
) < offset
)
659 blob
= (const CS_GenericBlob
*)(const void *)(addr
+ offset
);
660 if (ntohl(blob
->magic
) != magic
)
664 } else if (type
== CSSLOT_CODEDIRECTORY
665 && ntohl(blob
->magic
) == CSMAGIC_CODEDIRECTORY
666 && magic
== CSMAGIC_CODEDIRECTORY
)
672 const CS_GenericBlob
*
673 csblob_find_blob(struct cs_blob
*csblob
, uint32_t type
, uint32_t magic
)
675 if ((csblob
->csb_flags
& CS_VALID
) == 0)
677 return csblob_find_blob_bytes((const uint8_t *)csblob
->csb_mem_kaddr
, csblob
->csb_mem_size
, type
, magic
);
680 static const uint8_t *
681 find_special_slot(const CS_CodeDirectory
*cd
, size_t slotsize
, uint32_t slot
)
683 /* there is no zero special slot since that is the first code slot */
684 if (ntohl(cd
->nSpecialSlots
) < slot
|| slot
== 0)
687 return ((const uint8_t *)cd
+ ntohl(cd
->hashOffset
) - (slotsize
* slot
));
690 static uint8_t cshash_zero
[CS_HASH_MAX_SIZE
] = { 0 };
693 csblob_get_entitlements(struct cs_blob
*csblob
, void **out_start
, size_t *out_length
)
695 uint8_t computed_hash
[CS_HASH_MAX_SIZE
];
696 const CS_GenericBlob
*entitlements
;
697 const CS_CodeDirectory
*code_dir
;
698 const uint8_t *embedded_hash
;
699 union cs_hash_union context
;
704 if (csblob
->csb_hashtype
== NULL
|| csblob
->csb_hashtype
->cs_digest_size
> sizeof(computed_hash
))
707 code_dir
= csblob
->csb_cd
;
709 if ((csblob
->csb_flags
& CS_VALID
) == 0) {
712 entitlements
= csblob
->csb_entitlements_blob
;
714 embedded_hash
= find_special_slot(code_dir
, csblob
->csb_hashtype
->cs_size
, CSSLOT_ENTITLEMENTS
);
716 if (embedded_hash
== NULL
) {
720 } else if (entitlements
== NULL
) {
721 if (memcmp(embedded_hash
, cshash_zero
, csblob
->csb_hashtype
->cs_size
) != 0) {
728 csblob
->csb_hashtype
->cs_init(&context
);
729 csblob
->csb_hashtype
->cs_update(&context
, entitlements
, ntohl(entitlements
->length
));
730 csblob
->csb_hashtype
->cs_final(computed_hash
, &context
);
732 if (memcmp(computed_hash
, embedded_hash
, csblob
->csb_hashtype
->cs_size
) != 0)
735 *out_start
= __DECONST(void *, entitlements
);
736 *out_length
= ntohl(entitlements
->length
);
743 * End of routines to navigate code signing data structures in the kernel.
751 * Initialization of the zone for Unified Buffer Cache.
758 * ubc_info_zone(global) initialized for subsequent allocations
760 __private_extern__
void
765 i
= (vm_size_t
) sizeof (struct ubc_info
);
767 ubc_info_zone
= zinit (i
, 10000*i
, 8192, "ubc_info zone");
769 zone_change(ubc_info_zone
, Z_NOENCRYPT
, TRUE
);
776 * Allocate and attach an empty ubc_info structure to a vnode
778 * Parameters: vp Pointer to the vnode
781 * vnode_size:ENOMEM Not enough space
782 * vnode_size:??? Other error from vnode_getattr
786 ubc_info_init(struct vnode
*vp
)
788 return(ubc_info_init_internal(vp
, 0, 0));
793 * ubc_info_init_withsize
795 * Allocate and attach a sized ubc_info structure to a vnode
797 * Parameters: vp Pointer to the vnode
798 * filesize The size of the file
801 * vnode_size:ENOMEM Not enough space
802 * vnode_size:??? Other error from vnode_getattr
805 ubc_info_init_withsize(struct vnode
*vp
, off_t filesize
)
807 return(ubc_info_init_internal(vp
, 1, filesize
));
812 * ubc_info_init_internal
814 * Allocate and attach a ubc_info structure to a vnode
816 * Parameters: vp Pointer to the vnode
817 * withfsize{0,1} Zero if the size should be obtained
818 * from the vnode; otherwise, use filesize
819 * filesize The size of the file, if withfsize == 1
822 * vnode_size:ENOMEM Not enough space
823 * vnode_size:??? Other error from vnode_getattr
825 * Notes: We call a blocking zalloc(), and the zone was created as an
826 * expandable and collectable zone, so if no memory is available,
827 * it is possible for zalloc() to block indefinitely. zalloc()
828 * may also panic if the zone of zones is exhausted, since it's
831 * We unconditionally call vnode_pager_setup(), even if this is
832 * a reuse of a ubc_info; in that case, we should probably assert
833 * that it does not already have a pager association, but do not.
835 * Since memory_object_create_named() can only fail from receiving
836 * an invalid pager argument, the explicit check and panic is
837 * merely precautionary.
840 ubc_info_init_internal(vnode_t vp
, int withfsize
, off_t filesize
)
842 struct ubc_info
*uip
;
846 memory_object_control_t control
;
851 * If there is not already a ubc_info attached to the vnode, we
852 * attach one; otherwise, we will reuse the one that's there.
854 if (uip
== UBC_INFO_NULL
) {
856 uip
= (struct ubc_info
*) zalloc(ubc_info_zone
);
857 bzero((char *)uip
, sizeof(struct ubc_info
));
860 uip
->ui_flags
= UI_INITED
;
861 uip
->ui_ucred
= NOCRED
;
863 assert(uip
->ui_flags
!= UI_NONE
);
864 assert(uip
->ui_vnode
== vp
);
866 /* now set this ubc_info in the vnode */
870 * Allocate a pager object for this vnode
872 * XXX The value of the pager parameter is currently ignored.
873 * XXX Presumably, this API changed to avoid the race between
874 * XXX setting the pager and the UI_HASPAGER flag.
876 pager
= (void *)vnode_pager_setup(vp
, uip
->ui_pager
);
880 * Explicitly set the pager into the ubc_info, after setting the
883 SET(uip
->ui_flags
, UI_HASPAGER
);
884 uip
->ui_pager
= pager
;
887 * Note: We can not use VNOP_GETATTR() to get accurate
888 * value of ui_size because this may be an NFS vnode, and
889 * nfs_getattr() can call vinvalbuf(); if this happens,
890 * ubc_info is not set up to deal with that event.
895 * create a vnode - vm_object association
896 * memory_object_create_named() creates a "named" reference on the
897 * memory object we hold this reference as long as the vnode is
898 * "alive." Since memory_object_create_named() took its own reference
899 * on the vnode pager we passed it, we can drop the reference
900 * vnode_pager_setup() returned here.
902 kret
= memory_object_create_named(pager
,
903 (memory_object_size_t
)uip
->ui_size
, &control
);
904 vnode_pager_deallocate(pager
);
905 if (kret
!= KERN_SUCCESS
)
906 panic("ubc_info_init: memory_object_create_named returned %d", kret
);
909 uip
->ui_control
= control
; /* cache the value of the mo control */
910 SET(uip
->ui_flags
, UI_HASOBJREF
); /* with a named reference */
912 if (withfsize
== 0) {
913 /* initialize the size */
914 error
= vnode_size(vp
, &uip
->ui_size
, vfs_context_current());
918 uip
->ui_size
= filesize
;
920 vp
->v_lflag
|= VNAMED_UBC
; /* vnode has a named ubc reference */
929 * Free a ubc_info structure
931 * Parameters: uip A pointer to the ubc_info to free
935 * Notes: If there is a credential that has subsequently been associated
936 * with the ubc_info via a call to ubc_setcred(), the reference
937 * to the credential is dropped.
939 * It's actually impossible for a ubc_info.ui_control to take the
940 * value MEMORY_OBJECT_CONTROL_NULL.
943 ubc_info_free(struct ubc_info
*uip
)
945 if (IS_VALID_CRED(uip
->ui_ucred
)) {
946 kauth_cred_unref(&uip
->ui_ucred
);
949 if (uip
->ui_control
!= MEMORY_OBJECT_CONTROL_NULL
)
950 memory_object_control_deallocate(uip
->ui_control
);
952 cluster_release(uip
);
955 zfree(ubc_info_zone
, uip
);
961 ubc_info_deallocate(struct ubc_info
*uip
)
966 errno_t
mach_to_bsd_errno(kern_return_t mach_err
)
972 case KERN_INVALID_ADDRESS
:
973 case KERN_INVALID_ARGUMENT
:
974 case KERN_NOT_IN_SET
:
975 case KERN_INVALID_NAME
:
976 case KERN_INVALID_TASK
:
977 case KERN_INVALID_RIGHT
:
978 case KERN_INVALID_VALUE
:
979 case KERN_INVALID_CAPABILITY
:
980 case KERN_INVALID_HOST
:
981 case KERN_MEMORY_PRESENT
:
982 case KERN_INVALID_PROCESSOR_SET
:
983 case KERN_INVALID_POLICY
:
984 case KERN_ALREADY_WAITING
:
985 case KERN_DEFAULT_SET
:
986 case KERN_EXCEPTION_PROTECTED
:
987 case KERN_INVALID_LEDGER
:
988 case KERN_INVALID_MEMORY_CONTROL
:
989 case KERN_INVALID_SECURITY
:
990 case KERN_NOT_DEPRESSED
:
991 case KERN_LOCK_OWNED
:
992 case KERN_LOCK_OWNED_SELF
:
995 case KERN_PROTECTION_FAILURE
:
996 case KERN_NOT_RECEIVER
:
998 case KERN_POLICY_STATIC
:
1002 case KERN_RESOURCE_SHORTAGE
:
1003 case KERN_UREFS_OVERFLOW
:
1004 case KERN_INVALID_OBJECT
:
1010 case KERN_MEMORY_FAILURE
:
1011 case KERN_POLICY_LIMIT
:
1012 case KERN_CODESIGN_ERROR
:
1015 case KERN_MEMORY_ERROR
:
1018 case KERN_ALREADY_IN_SET
:
1019 case KERN_NAME_EXISTS
:
1020 case KERN_RIGHT_EXISTS
:
1026 case KERN_TERMINATED
:
1027 case KERN_LOCK_SET_DESTROYED
:
1028 case KERN_LOCK_UNSTABLE
:
1029 case KERN_SEMAPHORE_DESTROYED
:
1032 case KERN_RPC_SERVER_TERMINATED
:
1035 case KERN_NOT_SUPPORTED
:
1038 case KERN_NODE_DOWN
:
1041 case KERN_NOT_WAITING
:
1044 case KERN_OPERATION_TIMED_OUT
:
1055 * Tell the VM that the the size of the file represented by the vnode has
1058 * Parameters: vp The vp whose backing file size is
1060 * nsize The new size of the backing file
1063 * Returns: EINVAL for new size < 0
1064 * ENOENT if no UBC info exists
1065 * EAGAIN if UBC_SETSIZE_NO_FS_REENTRY option is set and new_size < old size
1066 * Other errors (mapped to errno_t) returned by VM functions
1068 * Notes: This function will indicate success if the new size is the
1069 * same or larger than the old size (in this case, the
1070 * remainder of the file will require modification or use of
1071 * an existing upl to access successfully).
1073 * This function will fail if the new file size is smaller,
1074 * and the memory region being invalidated was unable to
1075 * actually be invalidated and/or the last page could not be
1076 * flushed, if the new size is not aligned to a page
1077 * boundary. This is usually indicative of an I/O error.
1079 errno_t
ubc_setsize_ex(struct vnode
*vp
, off_t nsize
, ubc_setsize_opts_t opts
)
1081 off_t osize
; /* ui_size before change */
1082 off_t lastpg
, olastpgend
, lastoff
;
1083 struct ubc_info
*uip
;
1084 memory_object_control_t control
;
1085 kern_return_t kret
= KERN_SUCCESS
;
1087 if (nsize
< (off_t
)0)
1090 if (!UBCINFOEXISTS(vp
))
1093 uip
= vp
->v_ubcinfo
;
1094 osize
= uip
->ui_size
;
1096 if (ISSET(opts
, UBC_SETSIZE_NO_FS_REENTRY
) && nsize
< osize
)
1100 * Update the size before flushing the VM
1102 uip
->ui_size
= nsize
;
1104 if (nsize
>= osize
) { /* Nothing more to do */
1105 if (nsize
> osize
) {
1106 lock_vnode_and_post(vp
, NOTE_EXTEND
);
1113 * When the file shrinks, invalidate the pages beyond the
1114 * new size. Also get rid of garbage beyond nsize on the
1115 * last page. The ui_size already has the nsize, so any
1116 * subsequent page-in will zero-fill the tail properly
1118 lastpg
= trunc_page_64(nsize
);
1119 olastpgend
= round_page_64(osize
);
1120 control
= uip
->ui_control
;
1122 lastoff
= (nsize
& PAGE_MASK_64
);
1126 upl_page_info_t
*pl
;
1129 * new EOF ends up in the middle of a page
1130 * zero the tail of this page if it's currently
1131 * present in the cache
1133 kret
= ubc_create_upl_kernel(vp
, lastpg
, PAGE_SIZE
, &upl
, &pl
, UPL_SET_LITE
, VM_KERN_MEMORY_FILE
);
1135 if (kret
!= KERN_SUCCESS
)
1136 panic("ubc_setsize: ubc_create_upl (error = %d)\n", kret
);
1138 if (upl_valid_page(pl
, 0))
1139 cluster_zero(upl
, (uint32_t)lastoff
, PAGE_SIZE
- (uint32_t)lastoff
, NULL
);
1141 ubc_upl_abort_range(upl
, 0, PAGE_SIZE
, UPL_ABORT_FREE_ON_EMPTY
);
1143 lastpg
+= PAGE_SIZE_64
;
1145 if (olastpgend
> lastpg
) {
1149 flags
= MEMORY_OBJECT_DATA_FLUSH_ALL
;
1151 flags
= MEMORY_OBJECT_DATA_FLUSH
;
1153 * invalidate the pages beyond the new EOF page
1156 kret
= memory_object_lock_request(control
,
1157 (memory_object_offset_t
)lastpg
,
1158 (memory_object_size_t
)(olastpgend
- lastpg
), NULL
, NULL
,
1159 MEMORY_OBJECT_RETURN_NONE
, flags
, VM_PROT_NO_CHANGE
);
1160 if (kret
!= KERN_SUCCESS
)
1161 printf("ubc_setsize: invalidate failed (error = %d)\n", kret
);
1163 return mach_to_bsd_errno(kret
);
1166 // Returns true for success
1167 int ubc_setsize(vnode_t vp
, off_t nsize
)
1169 return ubc_setsize_ex(vp
, nsize
, 0) == 0;
1175 * Get the size of the file assocated with the specified vnode
1177 * Parameters: vp The vnode whose size is of interest
1179 * Returns: 0 There is no ubc_info associated with
1180 * this vnode, or the size is zero
1181 * !0 The size of the file
1183 * Notes: Using this routine, it is not possible for a caller to
1184 * successfully distinguish between a vnode associate with a zero
1185 * length file, and a vnode with no associated ubc_info. The
1186 * caller therefore needs to not care, or needs to ensure that
1187 * they have previously successfully called ubc_info_init() or
1188 * ubc_info_init_withsize().
1191 ubc_getsize(struct vnode
*vp
)
1193 /* people depend on the side effect of this working this way
1194 * as they call this for directory
1196 if (!UBCINFOEXISTS(vp
))
1198 return (vp
->v_ubcinfo
->ui_size
);
1205 * Call ubc_msync(vp, 0, EOF, NULL, UBC_PUSHALL) on all the vnodes for this
1208 * Parameters: mp The mount point
1210 * Returns: 0 Success
1212 * Notes: There is no failure indication for this function.
1214 * This function is used in the unmount path; since it may block
1215 * I/O indefinitely, it should not be used in the forced unmount
1216 * path, since a device unavailability could also block that
1219 * Because there is no device ejection interlock on USB, FireWire,
1220 * or similar devices, it's possible that an ejection that begins
1221 * subsequent to the vnode_iterate() completing, either on one of
1222 * those devices, or a network mount for which the server quits
1223 * responding, etc., may cause the caller to block indefinitely.
1225 __private_extern__
int
1226 ubc_umount(struct mount
*mp
)
1228 vnode_iterate(mp
, 0, ubc_umcallback
, 0);
1236 * Used by ubc_umount() as an internal implementation detail; see ubc_umount()
1237 * and vnode_iterate() for details of implementation.
1240 ubc_umcallback(vnode_t vp
, __unused
void * args
)
1243 if (UBCINFOEXISTS(vp
)) {
1245 (void) ubc_msync(vp
, (off_t
)0, ubc_getsize(vp
), NULL
, UBC_PUSHALL
);
1247 return (VNODE_RETURNED
);
1254 * Get the credentials currently active for the ubc_info associated with the
1257 * Parameters: vp The vnode whose ubc_info credentials
1258 * are to be retrieved
1260 * Returns: !NOCRED The credentials
1261 * NOCRED If there is no ubc_info for the vnode,
1262 * or if there is one, but it has not had
1263 * any credentials associated with it via
1264 * a call to ubc_setcred()
1267 ubc_getcred(struct vnode
*vp
)
1269 if (UBCINFOEXISTS(vp
))
1270 return (vp
->v_ubcinfo
->ui_ucred
);
1279 * If they are not already set, set the credentials of the ubc_info structure
1280 * associated with the vnode to those of the supplied thread; otherwise leave
1283 * Parameters: vp The vnode whose ubc_info creds are to
1285 * p The process whose credentials are to
1286 * be used, if not running on an assumed
1288 * thread The thread whose credentials are to
1291 * Returns: 1 This vnode has no associated ubc_info
1294 * Notes: This function takes a proc parameter to account for bootstrap
1295 * issues where a task or thread may call this routine, either
1296 * before credentials have been initialized by bsd_init(), or if
1297 * there is no BSD info asscoiate with a mach thread yet. This
1298 * is known to happen in both the initial swap and memory mapping
1301 * This function is generally used only in the following cases:
1303 * o a memory mapped file via the mmap() system call
1304 * o a swap store backing file
1305 * o subsequent to a successful write via vn_write()
1307 * The information is then used by the NFS client in order to
1308 * cons up a wire message in either the page-in or page-out path.
1310 * There are two potential problems with the use of this API:
1312 * o Because the write path only set it on a successful
1313 * write, there is a race window between setting the
1314 * credential and its use to evict the pages to the
1315 * remote file server
1317 * o Because a page-in may occur prior to a write, the
1318 * credential may not be set at this time, if the page-in
1319 * is not the result of a mapping established via mmap().
1321 * In both these cases, this will be triggered from the paging
1322 * path, which will instead use the credential of the current
1323 * process, which in this case is either the dynamic_pager or
1324 * the kernel task, both of which utilize "root" credentials.
1326 * This may potentially permit operations to occur which should
1327 * be denied, or it may cause to be denied operations which
1328 * should be permitted, depending on the configuration of the NFS
1332 ubc_setthreadcred(struct vnode
*vp
, proc_t p
, thread_t thread
)
1334 struct ubc_info
*uip
;
1336 struct uthread
*uthread
= get_bsdthread_info(thread
);
1338 if (!UBCINFOEXISTS(vp
))
1343 uip
= vp
->v_ubcinfo
;
1344 credp
= uip
->ui_ucred
;
1346 if (!IS_VALID_CRED(credp
)) {
1347 /* use per-thread cred, if assumed identity, else proc cred */
1348 if (uthread
== NULL
|| (uthread
->uu_flag
& UT_SETUID
) == 0) {
1349 uip
->ui_ucred
= kauth_cred_proc_ref(p
);
1351 uip
->ui_ucred
= uthread
->uu_ucred
;
1352 kauth_cred_ref(uip
->ui_ucred
);
1364 * If they are not already set, set the credentials of the ubc_info structure
1365 * associated with the vnode to those of the process; otherwise leave them
1368 * Parameters: vp The vnode whose ubc_info creds are to
1370 * p The process whose credentials are to
1373 * Returns: 0 This vnode has no associated ubc_info
1376 * Notes: The return values for this function are inverted from nearly
1377 * all other uses in the kernel.
1379 * See also ubc_setthreadcred(), above.
1381 * This function is considered deprecated, and generally should
1382 * not be used, as it is incompatible with per-thread credentials;
1383 * it exists for legacy KPI reasons.
1385 * DEPRECATION: ubc_setcred() is being deprecated. Please use
1386 * ubc_setthreadcred() instead.
1389 ubc_setcred(struct vnode
*vp
, proc_t p
)
1391 struct ubc_info
*uip
;
1394 /* If there is no ubc_info, deny the operation */
1395 if ( !UBCINFOEXISTS(vp
))
1399 * Check to see if there is already a credential reference in the
1400 * ubc_info; if there is not, take one on the supplied credential.
1403 uip
= vp
->v_ubcinfo
;
1404 credp
= uip
->ui_ucred
;
1405 if (!IS_VALID_CRED(credp
)) {
1406 uip
->ui_ucred
= kauth_cred_proc_ref(p
);
1416 * Get the pager associated with the ubc_info associated with the vnode.
1418 * Parameters: vp The vnode to obtain the pager from
1420 * Returns: !VNODE_PAGER_NULL The memory_object_t for the pager
1421 * VNODE_PAGER_NULL There is no ubc_info for this vnode
1423 * Notes: For each vnode that has a ubc_info associated with it, that
1424 * ubc_info SHALL have a pager associated with it, so in the
1425 * normal case, it's impossible to return VNODE_PAGER_NULL for
1426 * a vnode with an associated ubc_info.
1428 __private_extern__ memory_object_t
1429 ubc_getpager(struct vnode
*vp
)
1431 if (UBCINFOEXISTS(vp
))
1432 return (vp
->v_ubcinfo
->ui_pager
);
1441 * Get the memory object control associated with the ubc_info associated with
1444 * Parameters: vp The vnode to obtain the memory object
1448 * Returns: !MEMORY_OBJECT_CONTROL_NULL
1449 * MEMORY_OBJECT_CONTROL_NULL
1451 * Notes: Historically, if the flags were not "do not reactivate", this
1452 * function would look up the memory object using the pager if
1453 * it did not exist (this could be the case if the vnode had
1454 * been previously reactivated). The flags would also permit a
1455 * hold to be requested, which would have created an object
1456 * reference, if one had not already existed. This usage is
1457 * deprecated, as it would permit a race between finding and
1458 * taking the reference vs. a single reference being dropped in
1461 memory_object_control_t
1462 ubc_getobject(struct vnode
*vp
, __unused
int flags
)
1464 if (UBCINFOEXISTS(vp
))
1465 return((vp
->v_ubcinfo
->ui_control
));
1467 return (MEMORY_OBJECT_CONTROL_NULL
);
1471 ubc_strict_uncached_IO(struct vnode
*vp
)
1473 boolean_t result
= FALSE
;
1475 if (UBCINFOEXISTS(vp
)) {
1476 result
= memory_object_is_slid(vp
->v_ubcinfo
->ui_control
);
1484 * Convert a given block number to a memory backing object (file) offset for a
1487 * Parameters: vp The vnode in which the block is located
1488 * blkno The block number to convert
1490 * Returns: !-1 The offset into the backing object
1491 * -1 There is no ubc_info associated with
1493 * -1 An error occurred in the underlying VFS
1494 * while translating the block to an
1495 * offset; the most likely cause is that
1496 * the caller specified a block past the
1497 * end of the file, but this could also be
1498 * any other error from VNOP_BLKTOOFF().
1500 * Note: Representing the error in band loses some information, but does
1501 * not occlude a valid offset, since an off_t of -1 is normally
1502 * used to represent EOF. If we had a more reliable constant in
1503 * our header files for it (i.e. explicitly cast to an off_t), we
1504 * would use it here instead.
1507 ubc_blktooff(vnode_t vp
, daddr64_t blkno
)
1509 off_t file_offset
= -1;
1512 if (UBCINFOEXISTS(vp
)) {
1513 error
= VNOP_BLKTOOFF(vp
, blkno
, &file_offset
);
1518 return (file_offset
);
1525 * Convert a given offset in a memory backing object into a block number for a
1528 * Parameters: vp The vnode in which the offset is
1530 * offset The offset into the backing object
1532 * Returns: !-1 The returned block number
1533 * -1 There is no ubc_info associated with
1535 * -1 An error occurred in the underlying VFS
1536 * while translating the block to an
1537 * offset; the most likely cause is that
1538 * the caller specified a block past the
1539 * end of the file, but this could also be
1540 * any other error from VNOP_OFFTOBLK().
1542 * Note: Representing the error in band loses some information, but does
1543 * not occlude a valid block number, since block numbers exceed
1544 * the valid range for offsets, due to their relative sizes. If
1545 * we had a more reliable constant than -1 in our header files
1546 * for it (i.e. explicitly cast to an daddr64_t), we would use it
1550 ubc_offtoblk(vnode_t vp
, off_t offset
)
1552 daddr64_t blkno
= -1;
1555 if (UBCINFOEXISTS(vp
)) {
1556 error
= VNOP_OFFTOBLK(vp
, offset
, &blkno
);
1566 * ubc_pages_resident
1568 * Determine whether or not a given vnode has pages resident via the memory
1569 * object control associated with the ubc_info associated with the vnode
1571 * Parameters: vp The vnode we want to know about
1577 ubc_pages_resident(vnode_t vp
)
1580 boolean_t has_pages_resident
;
1582 if (!UBCINFOEXISTS(vp
))
1586 * The following call may fail if an invalid ui_control is specified,
1587 * or if there is no VM object associated with the control object. In
1588 * either case, reacting to it as if there were no pages resident will
1589 * result in correct behavior.
1591 kret
= memory_object_pages_resident(vp
->v_ubcinfo
->ui_control
, &has_pages_resident
);
1593 if (kret
!= KERN_SUCCESS
)
1596 if (has_pages_resident
== TRUE
)
1605 * Clean and/or invalidate a range in the memory object that backs this vnode
1607 * Parameters: vp The vnode whose associated ubc_info's
1608 * associated memory object is to have a
1609 * range invalidated within it
1610 * beg_off The start of the range, as an offset
1611 * end_off The end of the range, as an offset
1612 * resid_off The address of an off_t supplied by the
1613 * caller; may be set to NULL to ignore
1614 * flags See ubc_msync_internal()
1616 * Returns: 0 Success
1617 * !0 Failure; an errno is returned
1620 * *resid_off, modified If non-NULL, the contents are ALWAYS
1621 * modified; they are initialized to the
1622 * beg_off, and in case of an I/O error,
1623 * the difference between beg_off and the
1624 * current value will reflect what was
1625 * able to be written before the error
1626 * occurred. If no error is returned, the
1627 * value of the resid_off is undefined; do
1628 * NOT use it in place of end_off if you
1629 * intend to increment from the end of the
1630 * last call and call iteratively.
1632 * Notes: see ubc_msync_internal() for more detailed information.
1636 ubc_msync(vnode_t vp
, off_t beg_off
, off_t end_off
, off_t
*resid_off
, int flags
)
1642 *resid_off
= beg_off
;
1644 retval
= ubc_msync_internal(vp
, beg_off
, end_off
, resid_off
, flags
, &io_errno
);
1646 if (retval
== 0 && io_errno
== 0)
1653 * ubc_msync_internal
1655 * Clean and/or invalidate a range in the memory object that backs this vnode
1657 * Parameters: vp The vnode whose associated ubc_info's
1658 * associated memory object is to have a
1659 * range invalidated within it
1660 * beg_off The start of the range, as an offset
1661 * end_off The end of the range, as an offset
1662 * resid_off The address of an off_t supplied by the
1663 * caller; may be set to NULL to ignore
1664 * flags MUST contain at least one of the flags
1665 * UBC_INVALIDATE, UBC_PUSHDIRTY, or
1666 * UBC_PUSHALL; if UBC_PUSHDIRTY is used,
1667 * UBC_SYNC may also be specified to cause
1668 * this function to block until the
1669 * operation is complete. The behavior
1670 * of UBC_SYNC is otherwise undefined.
1671 * io_errno The address of an int to contain the
1672 * errno from a failed I/O operation, if
1673 * one occurs; may be set to NULL to
1676 * Returns: 1 Success
1680 * *resid_off, modified The contents of this offset MAY be
1681 * modified; in case of an I/O error, the
1682 * difference between beg_off and the
1683 * current value will reflect what was
1684 * able to be written before the error
1686 * *io_errno, modified The contents of this offset are set to
1687 * an errno, if an error occurs; if the
1688 * caller supplies an io_errno parameter,
1689 * they should be careful to initialize it
1690 * to 0 before calling this function to
1691 * enable them to distinguish an error
1692 * with a valid *resid_off from an invalid
1693 * one, and to avoid potentially falsely
1694 * reporting an error, depending on use.
1696 * Notes: If there is no ubc_info associated with the vnode supplied,
1697 * this function immediately returns success.
1699 * If the value of end_off is less than or equal to beg_off, this
1700 * function immediately returns success; that is, end_off is NOT
1703 * IMPORTANT: one of the flags UBC_INVALIDATE, UBC_PUSHDIRTY, or
1704 * UBC_PUSHALL MUST be specified; that is, it is NOT possible to
1705 * attempt to block on in-progress I/O by calling this function
1706 * with UBC_PUSHDIRTY, and then later call it with just UBC_SYNC
1707 * in order to block pending on the I/O already in progress.
1709 * The start offset is truncated to the page boundary and the
1710 * size is adjusted to include the last page in the range; that
1711 * is, end_off on exactly a page boundary will not change if it
1712 * is rounded, and the range of bytes written will be from the
1713 * truncate beg_off to the rounded (end_off - 1).
1716 ubc_msync_internal(vnode_t vp
, off_t beg_off
, off_t end_off
, off_t
*resid_off
, int flags
, int *io_errno
)
1718 memory_object_size_t tsize
;
1720 int request_flags
= 0;
1721 int flush_flags
= MEMORY_OBJECT_RETURN_NONE
;
1723 if ( !UBCINFOEXISTS(vp
))
1725 if ((flags
& (UBC_INVALIDATE
| UBC_PUSHDIRTY
| UBC_PUSHALL
)) == 0)
1727 if (end_off
<= beg_off
)
1730 if (flags
& UBC_INVALIDATE
)
1732 * discard the resident pages
1734 request_flags
= (MEMORY_OBJECT_DATA_FLUSH
| MEMORY_OBJECT_DATA_NO_CHANGE
);
1736 if (flags
& UBC_SYNC
)
1738 * wait for all the I/O to complete before returning
1740 request_flags
|= MEMORY_OBJECT_IO_SYNC
;
1742 if (flags
& UBC_PUSHDIRTY
)
1744 * we only return the dirty pages in the range
1746 flush_flags
= MEMORY_OBJECT_RETURN_DIRTY
;
1748 if (flags
& UBC_PUSHALL
)
1750 * then return all the interesting pages in the range (both
1751 * dirty and precious) to the pager
1753 flush_flags
= MEMORY_OBJECT_RETURN_ALL
;
1755 beg_off
= trunc_page_64(beg_off
);
1756 end_off
= round_page_64(end_off
);
1757 tsize
= (memory_object_size_t
)end_off
- beg_off
;
1759 /* flush and/or invalidate pages in the range requested */
1760 kret
= memory_object_lock_request(vp
->v_ubcinfo
->ui_control
,
1762 (memory_object_offset_t
*)resid_off
,
1763 io_errno
, flush_flags
, request_flags
,
1766 return ((kret
== KERN_SUCCESS
) ? 1 : 0);
1773 * Explicitly map a vnode that has an associate ubc_info, and add a reference
1774 * to it for the ubc system, if there isn't one already, so it will not be
1775 * recycled while it's in use, and set flags on the ubc_info to indicate that
1778 * Parameters: vp The vnode to map
1779 * flags The mapping flags for the vnode; this
1780 * will be a combination of one or more of
1781 * PROT_READ, PROT_WRITE, and PROT_EXEC
1783 * Returns: 0 Success
1784 * EPERM Permission was denied
1786 * Notes: An I/O reference on the vnode must already be held on entry
1788 * If there is no ubc_info associated with the vnode, this function
1789 * will return success.
1791 * If a permission error occurs, this function will return
1792 * failure; all other failures will cause this function to return
1795 * IMPORTANT: This is an internal use function, and its symbols
1796 * are not exported, hence its error checking is not very robust.
1797 * It is primarily used by:
1799 * o mmap(), when mapping a file
1800 * o When mapping a shared file (a shared library in the
1801 * shared segment region)
1802 * o When loading a program image during the exec process
1804 * ...all of these uses ignore the return code, and any fault that
1805 * results later because of a failure is handled in the fix-up path
1806 * of the fault handler. The interface exists primarily as a
1809 * Given that third party implementation of the type of interfaces
1810 * that would use this function, such as alternative executable
1811 * formats, etc., are unsupported, this function is not exported
1814 * The extra reference is held until the VM system unmaps the
1815 * vnode from its own context to maintain a vnode reference in
1816 * cases like open()/mmap()/close(), which leave the backing
1817 * object referenced by a mapped memory region in a process
1820 __private_extern__
int
1821 ubc_map(vnode_t vp
, int flags
)
1823 struct ubc_info
*uip
;
1826 int need_wakeup
= 0;
1828 if (UBCINFOEXISTS(vp
)) {
1831 uip
= vp
->v_ubcinfo
;
1833 while (ISSET(uip
->ui_flags
, UI_MAPBUSY
)) {
1834 SET(uip
->ui_flags
, UI_MAPWAITING
);
1835 (void) msleep(&uip
->ui_flags
, &vp
->v_lock
,
1836 PRIBIO
, "ubc_map", NULL
);
1838 SET(uip
->ui_flags
, UI_MAPBUSY
);
1841 error
= VNOP_MMAP(vp
, flags
, vfs_context_current());
1844 * rdar://problem/22587101 required that we stop propagating
1845 * EPERM up the stack. Otherwise, we would have to funnel up
1846 * the error at all the call sites for memory_object_map().
1847 * The risk is in having to undo the map/object/entry state at
1848 * all these call sites. It would also affect more than just mmap()
1851 * if (error != EPERM)
1857 vnode_lock_spin(vp
);
1860 if ( !ISSET(uip
->ui_flags
, UI_ISMAPPED
))
1862 SET(uip
->ui_flags
, (UI_WASMAPPED
| UI_ISMAPPED
));
1863 if (flags
& PROT_WRITE
) {
1864 SET(uip
->ui_flags
, UI_MAPPEDWRITE
);
1867 CLR(uip
->ui_flags
, UI_MAPBUSY
);
1869 if (ISSET(uip
->ui_flags
, UI_MAPWAITING
)) {
1870 CLR(uip
->ui_flags
, UI_MAPWAITING
);
1876 wakeup(&uip
->ui_flags
);
1880 * Make sure we get a ref as we can't unwind from here
1882 if (vnode_ref_ext(vp
, 0, VNODE_REF_FORCE
))
1883 panic("%s : VNODE_REF_FORCE failed\n", __FUNCTION__
);
1893 * Destroy the named memory object associated with the ubc_info control object
1894 * associated with the designated vnode, if there is a ubc_info associated
1895 * with the vnode, and a control object is associated with it
1897 * Parameters: vp The designated vnode
1901 * Notes: This function is called on vnode termination for all vnodes,
1902 * and must therefore not assume that there is a ubc_info that is
1903 * associated with the vnode, nor that there is a control object
1904 * associated with the ubc_info.
1906 * If all the conditions necessary are present, this function
1907 * calls memory_object_destory(), which will in turn end up
1908 * calling ubc_unmap() to release any vnode references that were
1909 * established via ubc_map().
1911 * IMPORTANT: This is an internal use function that is used
1912 * exclusively by the internal use function vclean().
1914 __private_extern__
void
1915 ubc_destroy_named(vnode_t vp
)
1917 memory_object_control_t control
;
1918 struct ubc_info
*uip
;
1921 if (UBCINFOEXISTS(vp
)) {
1922 uip
= vp
->v_ubcinfo
;
1924 /* Terminate the memory object */
1925 control
= ubc_getobject(vp
, UBC_HOLDOBJECT
);
1926 if (control
!= MEMORY_OBJECT_CONTROL_NULL
) {
1927 kret
= memory_object_destroy(control
, 0);
1928 if (kret
!= KERN_SUCCESS
)
1929 panic("ubc_destroy_named: memory_object_destroy failed");
1938 * Determine whether or not a vnode is currently in use by ubc at a level in
1939 * excess of the requested busycount
1941 * Parameters: vp The vnode to check
1942 * busycount The threshold busy count, used to bias
1943 * the count usually already held by the
1944 * caller to avoid races
1946 * Returns: 1 The vnode is in use over the threshold
1947 * 0 The vnode is not in use over the
1950 * Notes: Because the vnode is only held locked while actually asking
1951 * the use count, this function only represents a snapshot of the
1952 * current state of the vnode. If more accurate information is
1953 * required, an additional busycount should be held by the caller
1954 * and a non-zero busycount used.
1956 * If there is no ubc_info associated with the vnode, this
1957 * function will report that the vnode is not in use by ubc.
1960 ubc_isinuse(struct vnode
*vp
, int busycount
)
1962 if ( !UBCINFOEXISTS(vp
))
1964 return(ubc_isinuse_locked(vp
, busycount
, 0));
1969 * ubc_isinuse_locked
1971 * Determine whether or not a vnode is currently in use by ubc at a level in
1972 * excess of the requested busycount
1974 * Parameters: vp The vnode to check
1975 * busycount The threshold busy count, used to bias
1976 * the count usually already held by the
1977 * caller to avoid races
1978 * locked True if the vnode is already locked by
1981 * Returns: 1 The vnode is in use over the threshold
1982 * 0 The vnode is not in use over the
1985 * Notes: If the vnode is not locked on entry, it is locked while
1986 * actually asking the use count. If this is the case, this
1987 * function only represents a snapshot of the current state of
1988 * the vnode. If more accurate information is required, the
1989 * vnode lock should be held by the caller, otherwise an
1990 * additional busycount should be held by the caller and a
1991 * non-zero busycount used.
1993 * If there is no ubc_info associated with the vnode, this
1994 * function will report that the vnode is not in use by ubc.
1997 ubc_isinuse_locked(struct vnode
*vp
, int busycount
, int locked
)
2003 vnode_lock_spin(vp
);
2005 if ((vp
->v_usecount
- vp
->v_kusecount
) > busycount
)
2017 * Reverse the effects of a ubc_map() call for a given vnode
2019 * Parameters: vp vnode to unmap from ubc
2023 * Notes: This is an internal use function used by vnode_pager_unmap().
2024 * It will attempt to obtain a reference on the supplied vnode,
2025 * and if it can do so, and there is an associated ubc_info, and
2026 * the flags indicate that it was mapped via ubc_map(), then the
2027 * flag is cleared, the mapping removed, and the reference taken
2028 * by ubc_map() is released.
2030 * IMPORTANT: This MUST only be called by the VM
2031 * to prevent race conditions.
2033 __private_extern__
void
2034 ubc_unmap(struct vnode
*vp
)
2036 struct ubc_info
*uip
;
2038 int need_wakeup
= 0;
2040 if (vnode_getwithref(vp
))
2043 if (UBCINFOEXISTS(vp
)) {
2044 bool want_fsevent
= false;
2047 uip
= vp
->v_ubcinfo
;
2049 while (ISSET(uip
->ui_flags
, UI_MAPBUSY
)) {
2050 SET(uip
->ui_flags
, UI_MAPWAITING
);
2051 (void) msleep(&uip
->ui_flags
, &vp
->v_lock
,
2052 PRIBIO
, "ubc_unmap", NULL
);
2054 SET(uip
->ui_flags
, UI_MAPBUSY
);
2056 if (ISSET(uip
->ui_flags
, UI_ISMAPPED
)) {
2057 if (ISSET(uip
->ui_flags
, UI_MAPPEDWRITE
))
2058 want_fsevent
= true;
2063 * We want to clear the mapped flags after we've called
2064 * VNOP_MNOMAP to avoid certain races and allow
2065 * VNOP_MNOMAP to call ubc_is_mapped_writable.
2071 vfs_context_t ctx
= vfs_context_current();
2073 (void)VNOP_MNOMAP(vp
, ctx
);
2077 * Why do we want an fsevent here? Normally the
2078 * content modified fsevent is posted when a file is
2079 * closed and only if it's written to via conventional
2080 * means. It's perfectly legal to close a file and
2081 * keep your mappings and we don't currently track
2082 * whether it was written to via a mapping.
2083 * Therefore, we need to post an fsevent here if the
2084 * file was mapped writable. This may result in false
2085 * events, i.e. we post a notification when nothing
2086 * has really changed.
2088 if (want_fsevent
&& need_fsevent(FSE_CONTENT_MODIFIED
, vp
)) {
2089 add_fsevent(FSE_CONTENT_MODIFIED
, ctx
,
2098 vnode_lock_spin(vp
);
2101 CLR(uip
->ui_flags
, UI_ISMAPPED
| UI_MAPPEDWRITE
);
2103 CLR(uip
->ui_flags
, UI_MAPBUSY
);
2105 if (ISSET(uip
->ui_flags
, UI_MAPWAITING
)) {
2106 CLR(uip
->ui_flags
, UI_MAPWAITING
);
2112 wakeup(&uip
->ui_flags
);
2116 * the drop of the vnode ref will cleanup
2125 * Manipulate individual page state for a vnode with an associated ubc_info
2126 * with an associated memory object control.
2128 * Parameters: vp The vnode backing the page
2129 * f_offset A file offset interior to the page
2130 * ops The operations to perform, as a bitmap
2131 * (see below for more information)
2132 * phys_entryp The address of a ppnum_t; may be NULL
2134 * flagsp A pointer to an int to contain flags;
2135 * may be NULL to ignore
2137 * Returns: KERN_SUCCESS Success
2138 * KERN_INVALID_ARGUMENT If the memory object control has no VM
2140 * KERN_INVALID_OBJECT If UPL_POP_PHYSICAL and the object is
2141 * not physically contiguous
2142 * KERN_INVALID_OBJECT If !UPL_POP_PHYSICAL and the object is
2143 * physically contiguous
2144 * KERN_FAILURE If the page cannot be looked up
2147 * *phys_entryp (modified) If phys_entryp is non-NULL and
2149 * *flagsp (modified) If flagsp is non-NULL and there was
2150 * !UPL_POP_PHYSICAL and a KERN_SUCCESS
2152 * Notes: For object boundaries, it is considerably more efficient to
2153 * ensure that f_offset is in fact on a page boundary, as this
2154 * will avoid internal use of the hash table to identify the
2155 * page, and would therefore skip a number of early optimizations.
2156 * Since this is a page operation anyway, the caller should try
2157 * to pass only a page aligned offset because of this.
2159 * *flagsp may be modified even if this function fails. If it is
2160 * modified, it will contain the condition of the page before the
2161 * requested operation was attempted; these will only include the
2162 * bitmap flags, and not the PL_POP_PHYSICAL, UPL_POP_DUMP,
2163 * UPL_POP_SET, or UPL_POP_CLR bits.
2165 * The flags field may contain a specific operation, such as
2166 * UPL_POP_PHYSICAL or UPL_POP_DUMP:
2168 * o UPL_POP_PHYSICAL Fail if not contiguous; if
2169 * *phys_entryp and successful, set
2171 * o UPL_POP_DUMP Dump the specified page
2173 * Otherwise, it is treated as a bitmap of one or more page
2174 * operations to perform on the final memory object; allowable
2177 * o UPL_POP_DIRTY The page is dirty
2178 * o UPL_POP_PAGEOUT The page is paged out
2179 * o UPL_POP_PRECIOUS The page is precious
2180 * o UPL_POP_ABSENT The page is absent
2181 * o UPL_POP_BUSY The page is busy
2183 * If the page status is only being queried and not modified, then
2184 * not other bits should be specified. However, if it is being
2185 * modified, exactly ONE of the following bits should be set:
2187 * o UPL_POP_SET Set the current bitmap bits
2188 * o UPL_POP_CLR Clear the current bitmap bits
2190 * Thus to effect a combination of setting an clearing, it may be
2191 * necessary to call this function twice. If this is done, the
2192 * set should be used before the clear, since clearing may trigger
2193 * a wakeup on the destination page, and if the page is backed by
2194 * an encrypted swap file, setting will trigger the decryption
2195 * needed before the wakeup occurs.
2202 ppnum_t
*phys_entryp
,
2205 memory_object_control_t control
;
2207 control
= ubc_getobject(vp
, UBC_FLAGS_NONE
);
2208 if (control
== MEMORY_OBJECT_CONTROL_NULL
)
2209 return KERN_INVALID_ARGUMENT
;
2211 return (memory_object_page_op(control
,
2212 (memory_object_offset_t
)f_offset
,
2222 * Manipulate page state for a range of memory for a vnode with an associated
2223 * ubc_info with an associated memory object control, when page level state is
2224 * not required to be returned from the call (i.e. there are no phys_entryp or
2225 * flagsp parameters to this call, and it takes a range which may contain
2226 * multiple pages, rather than an offset interior to a single page).
2228 * Parameters: vp The vnode backing the page
2229 * f_offset_beg A file offset interior to the start page
2230 * f_offset_end A file offset interior to the end page
2231 * ops The operations to perform, as a bitmap
2232 * (see below for more information)
2233 * range The address of an int; may be NULL to
2236 * Returns: KERN_SUCCESS Success
2237 * KERN_INVALID_ARGUMENT If the memory object control has no VM
2239 * KERN_INVALID_OBJECT If the object is physically contiguous
2242 * *range (modified) If range is non-NULL, its contents will
2243 * be modified to contain the number of
2244 * bytes successfully operated upon.
2246 * Notes: IMPORTANT: This function cannot be used on a range that
2247 * consists of physically contiguous pages.
2249 * For object boundaries, it is considerably more efficient to
2250 * ensure that f_offset_beg and f_offset_end are in fact on page
2251 * boundaries, as this will avoid internal use of the hash table
2252 * to identify the page, and would therefore skip a number of
2253 * early optimizations. Since this is an operation on a set of
2254 * pages anyway, the caller should try to pass only a page aligned
2255 * offsets because of this.
2257 * *range will be modified only if this function succeeds.
2259 * The flags field MUST contain a specific operation; allowable
2262 * o UPL_ROP_ABSENT Returns the extent of the range
2263 * presented which is absent, starting
2264 * with the start address presented
2266 * o UPL_ROP_PRESENT Returns the extent of the range
2267 * presented which is present (resident),
2268 * starting with the start address
2270 * o UPL_ROP_DUMP Dump the pages which are found in the
2271 * target object for the target range.
2273 * IMPORTANT: For UPL_ROP_ABSENT and UPL_ROP_PRESENT; if there are
2274 * multiple regions in the range, only the first matching region
2285 memory_object_control_t control
;
2287 control
= ubc_getobject(vp
, UBC_FLAGS_NONE
);
2288 if (control
== MEMORY_OBJECT_CONTROL_NULL
)
2289 return KERN_INVALID_ARGUMENT
;
2291 return (memory_object_range_op(control
,
2292 (memory_object_offset_t
)f_offset_beg
,
2293 (memory_object_offset_t
)f_offset_end
,
2302 * Given a vnode, cause the population of a portion of the vm_object; based on
2303 * the nature of the request, the pages returned may contain valid data, or
2304 * they may be uninitialized.
2306 * Parameters: vp The vnode from which to create the upl
2307 * f_offset The start offset into the backing store
2308 * represented by the vnode
2309 * bufsize The size of the upl to create
2310 * uplp Pointer to the upl_t to receive the
2311 * created upl; MUST NOT be NULL
2312 * plp Pointer to receive the internal page
2313 * list for the created upl; MAY be NULL
2316 * Returns: KERN_SUCCESS The requested upl has been created
2317 * KERN_INVALID_ARGUMENT The bufsize argument is not an even
2318 * multiple of the page size
2319 * KERN_INVALID_ARGUMENT There is no ubc_info associated with
2320 * the vnode, or there is no memory object
2321 * control associated with the ubc_info
2322 * memory_object_upl_request:KERN_INVALID_VALUE
2323 * The supplied upl_flags argument is
2327 * *plp (modified) If non-NULL, the value of *plp will be
2328 * modified to point to the internal page
2329 * list; this modification may occur even
2330 * if this function is unsuccessful, in
2331 * which case the contents may be invalid
2333 * Note: If successful, the returned *uplp MUST subsequently be freed
2334 * via a call to ubc_upl_commit(), ubc_upl_commit_range(),
2335 * ubc_upl_abort(), or ubc_upl_abort_range().
2338 ubc_create_upl_external(
2343 upl_page_info_t
**plp
,
2346 return (ubc_create_upl_kernel(vp
, f_offset
, bufsize
, uplp
, plp
, uplflags
, vm_tag_bt()));
2350 ubc_create_upl_kernel(
2355 upl_page_info_t
**plp
,
2359 memory_object_control_t control
;
2366 if (bufsize
& 0xfff)
2367 return KERN_INVALID_ARGUMENT
;
2369 if (bufsize
> MAX_UPL_SIZE_BYTES
)
2370 return KERN_INVALID_ARGUMENT
;
2372 if (uplflags
& (UPL_UBC_MSYNC
| UPL_UBC_PAGEOUT
| UPL_UBC_PAGEIN
)) {
2374 if (uplflags
& UPL_UBC_MSYNC
) {
2375 uplflags
&= UPL_RET_ONLY_DIRTY
;
2377 uplflags
|= UPL_COPYOUT_FROM
| UPL_CLEAN_IN_PLACE
|
2378 UPL_SET_INTERNAL
| UPL_SET_LITE
;
2380 } else if (uplflags
& UPL_UBC_PAGEOUT
) {
2381 uplflags
&= UPL_RET_ONLY_DIRTY
;
2383 if (uplflags
& UPL_RET_ONLY_DIRTY
)
2384 uplflags
|= UPL_NOBLOCK
;
2386 uplflags
|= UPL_FOR_PAGEOUT
| UPL_CLEAN_IN_PLACE
|
2387 UPL_COPYOUT_FROM
| UPL_SET_INTERNAL
| UPL_SET_LITE
;
2389 uplflags
|= UPL_RET_ONLY_ABSENT
|
2390 UPL_NO_SYNC
| UPL_CLEAN_IN_PLACE
|
2391 UPL_SET_INTERNAL
| UPL_SET_LITE
;
2394 * if the requested size == PAGE_SIZE, we don't want to set
2395 * the UPL_NOBLOCK since we may be trying to recover from a
2396 * previous partial pagein I/O that occurred because we were low
2397 * on memory and bailed early in order to honor the UPL_NOBLOCK...
2398 * since we're only asking for a single page, we can block w/o fear
2399 * of tying up pages while waiting for more to become available
2401 if (bufsize
> PAGE_SIZE
)
2402 uplflags
|= UPL_NOBLOCK
;
2405 uplflags
&= ~UPL_FOR_PAGEOUT
;
2407 if (uplflags
& UPL_WILL_BE_DUMPED
) {
2408 uplflags
&= ~UPL_WILL_BE_DUMPED
;
2409 uplflags
|= (UPL_NO_SYNC
|UPL_SET_INTERNAL
);
2411 uplflags
|= (UPL_NO_SYNC
|UPL_CLEAN_IN_PLACE
|UPL_SET_INTERNAL
);
2413 control
= ubc_getobject(vp
, UBC_FLAGS_NONE
);
2414 if (control
== MEMORY_OBJECT_CONTROL_NULL
)
2415 return KERN_INVALID_ARGUMENT
;
2417 kr
= memory_object_upl_request(control
, f_offset
, bufsize
, uplp
, NULL
, NULL
, uplflags
, tag
);
2418 if (kr
== KERN_SUCCESS
&& plp
!= NULL
)
2419 *plp
= UPL_GET_INTERNAL_PAGE_LIST(*uplp
);
2425 * ubc_upl_maxbufsize
2427 * Return the maximum bufsize ubc_create_upl( ) will take.
2431 * Returns: maximum size buffer (in bytes) ubc_create_upl( ) will take.
2437 return(MAX_UPL_SIZE_BYTES
);
2443 * Map the page list assocated with the supplied upl into the kernel virtual
2444 * address space at the virtual address indicated by the dst_addr argument;
2445 * the entire upl is mapped
2447 * Parameters: upl The upl to map
2448 * dst_addr The address at which to map the upl
2450 * Returns: KERN_SUCCESS The upl has been mapped
2451 * KERN_INVALID_ARGUMENT The upl is UPL_NULL
2452 * KERN_FAILURE The upl is already mapped
2453 * vm_map_enter:KERN_INVALID_ARGUMENT
2454 * A failure code from vm_map_enter() due
2455 * to an invalid argument
2460 vm_offset_t
*dst_addr
)
2462 return (vm_upl_map(kernel_map
, upl
, dst_addr
));
2469 * Unmap the page list assocated with the supplied upl from the kernel virtual
2470 * address space; the entire upl is unmapped.
2472 * Parameters: upl The upl to unmap
2474 * Returns: KERN_SUCCESS The upl has been unmapped
2475 * KERN_FAILURE The upl is not currently mapped
2476 * KERN_INVALID_ARGUMENT If the upl is UPL_NULL
2482 return(vm_upl_unmap(kernel_map
, upl
));
2489 * Commit the contents of the upl to the backing store
2491 * Parameters: upl The upl to commit
2493 * Returns: KERN_SUCCESS The upl has been committed
2494 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2495 * KERN_FAILURE The supplied upl does not represent
2496 * device memory, and the offset plus the
2497 * size would exceed the actual size of
2500 * Notes: In practice, the only return value for this function should be
2501 * KERN_SUCCESS, unless there has been data structure corruption;
2502 * since the upl is deallocated regardless of success or failure,
2503 * there's really nothing to do about this other than panic.
2505 * IMPORTANT: Use of this function should not be mixed with use of
2506 * ubc_upl_commit_range(), due to the unconditional deallocation
2513 upl_page_info_t
*pl
;
2516 pl
= UPL_GET_INTERNAL_PAGE_LIST(upl
);
2517 kr
= upl_commit(upl
, pl
, MAX_UPL_SIZE_BYTES
>> PAGE_SHIFT
);
2518 upl_deallocate(upl
);
2526 * Commit the contents of the specified range of the upl to the backing store
2528 * Parameters: upl The upl to commit
2529 * offset The offset into the upl
2530 * size The size of the region to be committed,
2531 * starting at the specified offset
2532 * flags commit type (see below)
2534 * Returns: KERN_SUCCESS The range has been committed
2535 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2536 * KERN_FAILURE The supplied upl does not represent
2537 * device memory, and the offset plus the
2538 * size would exceed the actual size of
2541 * Notes: IMPORTANT: If the commit is successful, and the object is now
2542 * empty, the upl will be deallocated. Since the caller cannot
2543 * check that this is the case, the UPL_COMMIT_FREE_ON_EMPTY flag
2544 * should generally only be used when the offset is 0 and the size
2545 * is equal to the upl size.
2547 * The flags argument is a bitmap of flags on the rage of pages in
2548 * the upl to be committed; allowable flags are:
2550 * o UPL_COMMIT_FREE_ON_EMPTY Free the upl when it is
2551 * both empty and has been
2552 * successfully committed
2553 * o UPL_COMMIT_CLEAR_DIRTY Clear each pages dirty
2554 * bit; will prevent a
2556 * o UPL_COMMIT_SET_DIRTY Set each pages dirty
2557 * bit; will cause a later
2559 * o UPL_COMMIT_INACTIVATE Clear each pages
2560 * reference bit; the page
2561 * will not be accessed
2562 * o UPL_COMMIT_ALLOW_ACCESS Unbusy each page; pages
2563 * become busy when an
2564 * IOMemoryDescriptor is
2565 * mapped or redirected,
2566 * and we have to wait for
2569 * The flag UPL_COMMIT_NOTIFY_EMPTY is used internally, and should
2570 * not be specified by the caller.
2572 * The UPL_COMMIT_CLEAR_DIRTY and UPL_COMMIT_SET_DIRTY flags are
2573 * mutually exclusive, and should not be combined.
2576 ubc_upl_commit_range(
2578 upl_offset_t offset
,
2582 upl_page_info_t
*pl
;
2586 if (flags
& UPL_COMMIT_FREE_ON_EMPTY
)
2587 flags
|= UPL_COMMIT_NOTIFY_EMPTY
;
2589 if (flags
& UPL_COMMIT_KERNEL_ONLY_FLAGS
) {
2590 return KERN_INVALID_ARGUMENT
;
2593 pl
= UPL_GET_INTERNAL_PAGE_LIST(upl
);
2595 kr
= upl_commit_range(upl
, offset
, size
, flags
,
2596 pl
, MAX_UPL_SIZE_BYTES
>> PAGE_SHIFT
, &empty
);
2598 if((flags
& UPL_COMMIT_FREE_ON_EMPTY
) && empty
)
2599 upl_deallocate(upl
);
2606 * ubc_upl_abort_range
2608 * Abort the contents of the specified range of the specified upl
2610 * Parameters: upl The upl to abort
2611 * offset The offset into the upl
2612 * size The size of the region to be aborted,
2613 * starting at the specified offset
2614 * abort_flags abort type (see below)
2616 * Returns: KERN_SUCCESS The range has been aborted
2617 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2618 * KERN_FAILURE The supplied upl does not represent
2619 * device memory, and the offset plus the
2620 * size would exceed the actual size of
2623 * Notes: IMPORTANT: If the abort is successful, and the object is now
2624 * empty, the upl will be deallocated. Since the caller cannot
2625 * check that this is the case, the UPL_ABORT_FREE_ON_EMPTY flag
2626 * should generally only be used when the offset is 0 and the size
2627 * is equal to the upl size.
2629 * The abort_flags argument is a bitmap of flags on the range of
2630 * pages in the upl to be aborted; allowable flags are:
2632 * o UPL_ABORT_FREE_ON_EMPTY Free the upl when it is both
2633 * empty and has been successfully
2635 * o UPL_ABORT_RESTART The operation must be restarted
2636 * o UPL_ABORT_UNAVAILABLE The pages are unavailable
2637 * o UPL_ABORT_ERROR An I/O error occurred
2638 * o UPL_ABORT_DUMP_PAGES Just free the pages
2639 * o UPL_ABORT_NOTIFY_EMPTY RESERVED
2640 * o UPL_ABORT_ALLOW_ACCESS RESERVED
2642 * The UPL_ABORT_NOTIFY_EMPTY is an internal use flag and should
2643 * not be specified by the caller. It is intended to fulfill the
2644 * same role as UPL_COMMIT_NOTIFY_EMPTY does in the function
2645 * ubc_upl_commit_range(), but is never referenced internally.
2647 * The UPL_ABORT_ALLOW_ACCESS is defined, but neither set nor
2648 * referenced; do not use it.
2651 ubc_upl_abort_range(
2653 upl_offset_t offset
,
2658 boolean_t empty
= FALSE
;
2660 if (abort_flags
& UPL_ABORT_FREE_ON_EMPTY
)
2661 abort_flags
|= UPL_ABORT_NOTIFY_EMPTY
;
2663 kr
= upl_abort_range(upl
, offset
, size
, abort_flags
, &empty
);
2665 if((abort_flags
& UPL_ABORT_FREE_ON_EMPTY
) && empty
)
2666 upl_deallocate(upl
);
2675 * Abort the contents of the specified upl
2677 * Parameters: upl The upl to abort
2678 * abort_type abort type (see below)
2680 * Returns: KERN_SUCCESS The range has been aborted
2681 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2682 * KERN_FAILURE The supplied upl does not represent
2683 * device memory, and the offset plus the
2684 * size would exceed the actual size of
2687 * Notes: IMPORTANT: If the abort is successful, and the object is now
2688 * empty, the upl will be deallocated. Since the caller cannot
2689 * check that this is the case, the UPL_ABORT_FREE_ON_EMPTY flag
2690 * should generally only be used when the offset is 0 and the size
2691 * is equal to the upl size.
2693 * The abort_type is a bitmap of flags on the range of
2694 * pages in the upl to be aborted; allowable flags are:
2696 * o UPL_ABORT_FREE_ON_EMPTY Free the upl when it is both
2697 * empty and has been successfully
2699 * o UPL_ABORT_RESTART The operation must be restarted
2700 * o UPL_ABORT_UNAVAILABLE The pages are unavailable
2701 * o UPL_ABORT_ERROR An I/O error occurred
2702 * o UPL_ABORT_DUMP_PAGES Just free the pages
2703 * o UPL_ABORT_NOTIFY_EMPTY RESERVED
2704 * o UPL_ABORT_ALLOW_ACCESS RESERVED
2706 * The UPL_ABORT_NOTIFY_EMPTY is an internal use flag and should
2707 * not be specified by the caller. It is intended to fulfill the
2708 * same role as UPL_COMMIT_NOTIFY_EMPTY does in the function
2709 * ubc_upl_commit_range(), but is never referenced internally.
2711 * The UPL_ABORT_ALLOW_ACCESS is defined, but neither set nor
2712 * referenced; do not use it.
2721 kr
= upl_abort(upl
, abort_type
);
2722 upl_deallocate(upl
);
2730 * Retrieve the internal page list for the specified upl
2732 * Parameters: upl The upl to obtain the page list from
2734 * Returns: !NULL The (upl_page_info_t *) for the page
2735 * list internal to the upl
2736 * NULL Error/no page list associated
2738 * Notes: IMPORTANT: The function is only valid on internal objects
2739 * where the list request was made with the UPL_INTERNAL flag.
2741 * This function is a utility helper function, since some callers
2742 * may not have direct access to the header defining the macro,
2743 * due to abstraction layering constraints.
2749 return (UPL_GET_INTERNAL_PAGE_LIST(upl
));
2754 UBCINFOEXISTS(const struct vnode
* vp
)
2756 return((vp
) && ((vp
)->v_type
== VREG
) && ((vp
)->v_ubcinfo
!= UBC_INFO_NULL
));
2761 ubc_upl_range_needed(
2766 upl_range_needed(upl
, index
, count
);
2769 boolean_t
ubc_is_mapped(const struct vnode
*vp
, boolean_t
*writable
)
2771 if (!UBCINFOEXISTS(vp
) || !ISSET(vp
->v_ubcinfo
->ui_flags
, UI_ISMAPPED
))
2774 *writable
= ISSET(vp
->v_ubcinfo
->ui_flags
, UI_MAPPEDWRITE
);
2778 boolean_t
ubc_is_mapped_writable(const struct vnode
*vp
)
2781 return ubc_is_mapped(vp
, &writable
) && writable
;
2788 static volatile SInt32 cs_blob_size
= 0;
2789 static volatile SInt32 cs_blob_count
= 0;
2790 static SInt32 cs_blob_size_peak
= 0;
2791 static UInt32 cs_blob_size_max
= 0;
2792 static SInt32 cs_blob_count_peak
= 0;
2794 SYSCTL_INT(_vm
, OID_AUTO
, cs_blob_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, (int *)(uintptr_t)&cs_blob_count
, 0, "Current number of code signature blobs");
2795 SYSCTL_INT(_vm
, OID_AUTO
, cs_blob_size
, CTLFLAG_RD
| CTLFLAG_LOCKED
, (int *)(uintptr_t)&cs_blob_size
, 0, "Current size of all code signature blobs");
2796 SYSCTL_INT(_vm
, OID_AUTO
, cs_blob_count_peak
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &cs_blob_count_peak
, 0, "Peak number of code signature blobs");
2797 SYSCTL_INT(_vm
, OID_AUTO
, cs_blob_size_peak
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &cs_blob_size_peak
, 0, "Peak size of code signature blobs");
2798 SYSCTL_INT(_vm
, OID_AUTO
, cs_blob_size_max
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &cs_blob_size_max
, 0, "Size of biggest code signature blob");
2801 * Function: csblob_parse_teamid
2803 * Description: This function returns a pointer to the team id
2804 stored within the codedirectory of the csblob.
2805 If the codedirectory predates team-ids, it returns
2807 This does not copy the name but returns a pointer to
2808 it within the CD. Subsequently, the CD must be
2809 available when this is used.
2813 csblob_parse_teamid(struct cs_blob
*csblob
)
2815 const CS_CodeDirectory
*cd
;
2817 cd
= csblob
->csb_cd
;
2819 if (ntohl(cd
->version
) < CS_SUPPORTSTEAMID
)
2822 if (cd
->teamOffset
== 0)
2825 const char *name
= ((const char *)cd
) + ntohl(cd
->teamOffset
);
2827 printf("found team-id %s in cdblob\n", name
);
2834 ubc_cs_blob_allocate(
2835 vm_offset_t
*blob_addr_p
,
2836 vm_size_t
*blob_size_p
)
2840 *blob_addr_p
= (vm_offset_t
) kalloc_tag(*blob_size_p
, VM_KERN_MEMORY_SECURITY
);
2841 if (*blob_addr_p
== 0) {
2850 ubc_cs_blob_deallocate(
2851 vm_offset_t blob_addr
,
2852 vm_size_t blob_size
)
2854 kfree((void *) blob_addr
, blob_size
);
2858 * Some codesigned files use a lowest common denominator page size of
2859 * 4KiB, but can be used on systems that have a runtime page size of
2860 * 16KiB. Since faults will only occur on 16KiB ranges in
2861 * cs_validate_range(), we can convert the original Code Directory to
2862 * a multi-level scheme where groups of 4 hashes are combined to form
2863 * a new hash, which represents 16KiB in the on-disk file. This can
2864 * reduce the wired memory requirement for the Code Directory by
2865 * 75%. Care must be taken for binaries that use the "fourk" VM pager
2866 * for unaligned access, which may still attempt to validate on
2867 * non-16KiB multiples for compatibility with 3rd party binaries.
2870 ubc_cs_supports_multilevel_hash(struct cs_blob
*blob
)
2872 const CS_CodeDirectory
*cd
;
2875 * Only applies to binaries that ship as part of the OS,
2876 * primarily the shared cache.
2878 if (!blob
->csb_platform_binary
|| blob
->csb_teamid
!= NULL
) {
2883 * If the runtime page size matches the code signing page
2884 * size, there is no work to do.
2886 if (PAGE_SHIFT
<= blob
->csb_hash_pageshift
) {
2893 * There must be a valid integral multiple of hashes
2895 if (ntohl(cd
->nCodeSlots
) & (PAGE_MASK
>> blob
->csb_hash_pageshift
)) {
2900 * Scatter lists must also have ranges that have an integral number of hashes
2902 if ((ntohl(cd
->version
) >= CS_SUPPORTSSCATTER
) && (ntohl(cd
->scatterOffset
))) {
2904 const SC_Scatter
*scatter
= (const SC_Scatter
*)
2905 ((const char*)cd
+ ntohl(cd
->scatterOffset
));
2906 /* iterate all scatter structs to make sure they are all aligned */
2908 uint32_t sbase
= ntohl(scatter
->base
);
2909 uint32_t scount
= ntohl(scatter
->count
);
2916 if (sbase
& (PAGE_MASK
>> blob
->csb_hash_pageshift
)) {
2920 if (scount
& (PAGE_MASK
>> blob
->csb_hash_pageshift
)) {
2928 /* Covered range must be a multiple of the new page size */
2929 if (ntohl(cd
->codeLimit
) & PAGE_MASK
) {
2933 /* All checks pass */
2938 * All state and preconditions were checked before, so this
2939 * function cannot fail.
2942 ubc_cs_convert_to_multilevel_hash(struct cs_blob
*blob
)
2944 const CS_CodeDirectory
*old_cd
, *cd
;
2945 CS_CodeDirectory
*new_cd
;
2946 const CS_GenericBlob
*entitlements
;
2947 vm_offset_t new_blob_addr
;
2948 vm_size_t new_blob_size
;
2949 vm_size_t new_cdsize
;
2954 uint32_t hashes_per_new_hash_shift
= (uint32_t)(PAGE_SHIFT
- blob
->csb_hash_pageshift
);
2957 printf("CODE SIGNING: Attempting to convert Code Directory for %lu -> %lu page shift\n",
2958 (unsigned long)blob
->csb_hash_pageshift
, (unsigned long)PAGE_SHIFT
);
2961 old_cd
= blob
->csb_cd
;
2963 /* Up to the hashes, we can copy all data */
2964 new_cdsize
= ntohl(old_cd
->hashOffset
);
2965 new_cdsize
+= (ntohl(old_cd
->nCodeSlots
) >> hashes_per_new_hash_shift
) * old_cd
->hashSize
;
2967 new_blob_size
= sizeof(CS_SuperBlob
);
2968 new_blob_size
+= sizeof(CS_BlobIndex
);
2969 new_blob_size
+= new_cdsize
;
2971 if (blob
->csb_entitlements_blob
) {
2972 /* We need to add a slot for the entitlements */
2973 new_blob_size
+= sizeof(CS_BlobIndex
);
2974 new_blob_size
+= ntohl(blob
->csb_entitlements_blob
->length
);
2977 kr
= ubc_cs_blob_allocate(&new_blob_addr
, &new_blob_size
);
2978 if (kr
!= KERN_SUCCESS
) {
2980 printf("CODE SIGNING: Failed to allocate memory for new Code Signing Blob: %d\n",
2986 CS_SuperBlob
*new_superblob
;
2988 new_superblob
= (CS_SuperBlob
*)new_blob_addr
;
2989 new_superblob
->magic
= htonl(CSMAGIC_EMBEDDED_SIGNATURE
);
2990 new_superblob
->length
= htonl((uint32_t)new_blob_size
);
2991 if (blob
->csb_entitlements_blob
) {
2992 vm_size_t ent_offset
, cd_offset
;
2994 cd_offset
= sizeof(CS_SuperBlob
) + 2 * sizeof(CS_BlobIndex
);
2995 ent_offset
= cd_offset
+ new_cdsize
;
2997 new_superblob
->count
= htonl(2);
2998 new_superblob
->index
[0].type
= htonl(CSSLOT_CODEDIRECTORY
);
2999 new_superblob
->index
[0].offset
= htonl((uint32_t)cd_offset
);
3000 new_superblob
->index
[1].type
= htonl(CSSLOT_ENTITLEMENTS
);
3001 new_superblob
->index
[1].offset
= htonl((uint32_t)ent_offset
);
3003 memcpy((void *)(new_blob_addr
+ ent_offset
), blob
->csb_entitlements_blob
, ntohl(blob
->csb_entitlements_blob
->length
));
3005 new_cd
= (CS_CodeDirectory
*)(new_blob_addr
+ cd_offset
);
3007 vm_size_t cd_offset
;
3009 cd_offset
= sizeof(CS_SuperBlob
) + 1 * sizeof(CS_BlobIndex
);
3011 new_superblob
->count
= htonl(1);
3012 new_superblob
->index
[0].type
= htonl(CSSLOT_CODEDIRECTORY
);
3013 new_superblob
->index
[0].offset
= htonl((uint32_t)cd_offset
);
3015 new_cd
= (CS_CodeDirectory
*)new_blob_addr
;
3018 memcpy(new_cd
, old_cd
, ntohl(old_cd
->hashOffset
));
3020 /* Update fields in the Code Directory structure */
3021 new_cd
->length
= htonl((uint32_t)new_cdsize
);
3023 uint32_t nCodeSlots
= ntohl(new_cd
->nCodeSlots
);
3024 nCodeSlots
>>= hashes_per_new_hash_shift
;
3025 new_cd
->nCodeSlots
= htonl(nCodeSlots
);
3027 new_cd
->pageSize
= PAGE_SHIFT
; /* Not byte-swapped */
3029 if ((ntohl(new_cd
->version
) >= CS_SUPPORTSSCATTER
) && (ntohl(new_cd
->scatterOffset
))) {
3030 SC_Scatter
*scatter
= (SC_Scatter
*)
3031 ((char *)new_cd
+ ntohl(new_cd
->scatterOffset
));
3032 /* iterate all scatter structs to scale their counts */
3034 uint32_t scount
= ntohl(scatter
->count
);
3035 uint32_t sbase
= ntohl(scatter
->base
);
3042 scount
>>= hashes_per_new_hash_shift
;
3043 scatter
->count
= htonl(scount
);
3045 sbase
>>= hashes_per_new_hash_shift
;
3046 scatter
->base
= htonl(sbase
);
3052 /* For each group of hashes, hash them together */
3053 const unsigned char *src_base
= (const unsigned char *)old_cd
+ ntohl(old_cd
->hashOffset
);
3054 unsigned char *dst_base
= (unsigned char *)new_cd
+ ntohl(new_cd
->hashOffset
);
3056 uint32_t hash_index
;
3057 for (hash_index
= 0; hash_index
< nCodeSlots
; hash_index
++) {
3058 union cs_hash_union mdctx
;
3060 uint32_t source_hash_len
= old_cd
->hashSize
<< hashes_per_new_hash_shift
;
3061 const unsigned char *src
= src_base
+ hash_index
* source_hash_len
;
3062 unsigned char *dst
= dst_base
+ hash_index
* new_cd
->hashSize
;
3064 blob
->csb_hashtype
->cs_init(&mdctx
);
3065 blob
->csb_hashtype
->cs_update(&mdctx
, src
, source_hash_len
);
3066 blob
->csb_hashtype
->cs_final(dst
, &mdctx
);
3069 length
= new_blob_size
;
3070 error
= cs_validate_csblob((const uint8_t *)new_blob_addr
, &length
, &cd
, &entitlements
);
3071 assert(length
== new_blob_size
);
3075 printf("CODE SIGNING: Failed to validate new Code Signing Blob: %d\n",
3079 ubc_cs_blob_deallocate(new_blob_addr
, new_blob_size
);
3083 /* New Code Directory is ready for use, swap it out in the blob structure */
3084 ubc_cs_blob_deallocate(blob
->csb_mem_kaddr
, blob
->csb_mem_size
);
3086 blob
->csb_mem_size
= new_blob_size
;
3087 blob
->csb_mem_kaddr
= new_blob_addr
;
3089 blob
->csb_entitlements_blob
= entitlements
;
3091 /* The blob has some cached attributes of the Code Directory, so update those */
3093 blob
->csb_hash_firstlevel_pagesize
= blob
->csb_hash_pagesize
; /* Save the original page size */
3095 blob
->csb_hash_pagesize
= PAGE_SIZE
;
3096 blob
->csb_hash_pagemask
= PAGE_MASK
;
3097 blob
->csb_hash_pageshift
= PAGE_SHIFT
;
3098 blob
->csb_end_offset
= ntohl(cd
->codeLimit
);
3099 if((ntohl(cd
->version
) >= CS_SUPPORTSSCATTER
) && (ntohl(cd
->scatterOffset
))) {
3100 const SC_Scatter
*scatter
= (const SC_Scatter
*)
3101 ((const char*)cd
+ ntohl(cd
->scatterOffset
));
3102 blob
->csb_start_offset
= ((off_t
)ntohl(scatter
->base
)) * PAGE_SIZE
;
3104 blob
->csb_start_offset
= 0;
3115 struct image_params
*imgp
,
3117 struct cs_blob
**ret_blob
)
3120 struct ubc_info
*uip
;
3121 struct cs_blob
*blob
, *oblob
;
3123 const CS_CodeDirectory
*cd
;
3124 const CS_GenericBlob
*entitlements
;
3125 off_t blob_start_offset
, blob_end_offset
;
3126 union cs_hash_union mdctx
;
3127 boolean_t record_mtime
;
3130 record_mtime
= FALSE
;
3134 blob
= (struct cs_blob
*) kalloc(sizeof (struct cs_blob
));
3139 /* fill in the new blob */
3140 blob
->csb_cpu_type
= cputype
;
3141 blob
->csb_base_offset
= base_offset
;
3142 blob
->csb_mem_size
= size
;
3143 blob
->csb_mem_offset
= 0;
3144 blob
->csb_mem_kaddr
= *addr
;
3145 blob
->csb_flags
= 0;
3146 blob
->csb_signer_type
= CS_SIGNER_TYPE_UNKNOWN
;
3147 blob
->csb_platform_binary
= 0;
3148 blob
->csb_platform_path
= 0;
3149 blob
->csb_teamid
= NULL
;
3150 blob
->csb_entitlements_blob
= NULL
;
3151 blob
->csb_entitlements
= NULL
;
3153 /* Transfer ownership. Even on error, this function will deallocate */
3157 * Validate the blob's contents
3159 length
= (size_t) size
;
3160 error
= cs_validate_csblob((const uint8_t *)blob
->csb_mem_kaddr
,
3161 &length
, &cd
, &entitlements
);
3165 printf("CODESIGNING: csblob invalid: %d\n", error
);
3167 * The vnode checker can't make the rest of this function
3168 * succeed if csblob validation failed, so bail */
3172 const unsigned char *md_base
;
3173 uint8_t hash
[CS_HASH_MAX_SIZE
];
3176 size
= (vm_size_t
) length
;
3177 assert(size
<= blob
->csb_mem_size
);
3178 if (size
< blob
->csb_mem_size
) {
3179 vm_address_t new_blob_addr
;
3180 const CS_CodeDirectory
*new_cd
;
3181 const CS_GenericBlob
*new_entitlements
;
3183 kr
= ubc_cs_blob_allocate(&new_blob_addr
, &size
);
3184 if (kr
!= KERN_SUCCESS
) {
3186 printf("CODE SIGNING: failed to "
3187 "re-allocate blob (size "
3188 "0x%llx->0x%llx) error 0x%x\n",
3189 (uint64_t)blob
->csb_mem_size
,
3194 memcpy((void *)new_blob_addr
, (void *)blob
->csb_mem_kaddr
, size
);
3198 new_cd
= (void *)(((uintptr_t)cd
3199 - (uintptr_t)blob
->csb_mem_kaddr
3200 + (uintptr_t)new_blob_addr
));
3202 if (entitlements
== NULL
) {
3203 new_entitlements
= NULL
;
3205 new_entitlements
= (void *)(((uintptr_t)entitlements
3206 - (uintptr_t)blob
->csb_mem_kaddr
3207 + (uintptr_t)new_blob_addr
));
3209 // printf("CODE SIGNING: %s:%d kaddr 0x%llx cd %p ents %p -> blob 0x%llx cd %p ents %p\n", __FUNCTION__, __LINE__, (uint64_t)blob->csb_mem_kaddr, cd, entitlements, (uint64_t)new_blob_addr, new_cd, new_entitlements);
3210 ubc_cs_blob_deallocate(blob
->csb_mem_kaddr
,
3211 blob
->csb_mem_size
);
3212 blob
->csb_mem_kaddr
= new_blob_addr
;
3213 blob
->csb_mem_size
= size
;
3215 entitlements
= new_entitlements
;
3220 blob
->csb_entitlements_blob
= entitlements
; /* may be NULL, not yet validated */
3221 blob
->csb_hashtype
= cs_find_md(cd
->hashType
);
3222 if (blob
->csb_hashtype
== NULL
|| blob
->csb_hashtype
->cs_digest_size
> sizeof(hash
))
3223 panic("validated CodeDirectory but unsupported type");
3225 blob
->csb_hash_pageshift
= cd
->pageSize
;
3226 blob
->csb_hash_pagesize
= (1U << cd
->pageSize
);
3227 blob
->csb_hash_pagemask
= blob
->csb_hash_pagesize
- 1;
3228 blob
->csb_hash_firstlevel_pagesize
= 0;
3229 blob
->csb_flags
= (ntohl(cd
->flags
) & CS_ALLOWED_MACHO
) | CS_VALID
;
3230 blob
->csb_end_offset
= (((vm_offset_t
)ntohl(cd
->codeLimit
) + blob
->csb_hash_pagemask
) & ~((vm_offset_t
)blob
->csb_hash_pagemask
));
3231 if((ntohl(cd
->version
) >= CS_SUPPORTSSCATTER
) && (ntohl(cd
->scatterOffset
))) {
3232 const SC_Scatter
*scatter
= (const SC_Scatter
*)
3233 ((const char*)cd
+ ntohl(cd
->scatterOffset
));
3234 blob
->csb_start_offset
= ((off_t
)ntohl(scatter
->base
)) * blob
->csb_hash_pagesize
;
3236 blob
->csb_start_offset
= 0;
3238 /* compute the blob's cdhash */
3239 md_base
= (const unsigned char *) cd
;
3240 md_size
= ntohl(cd
->length
);
3242 blob
->csb_hashtype
->cs_init(&mdctx
);
3243 blob
->csb_hashtype
->cs_update(&mdctx
, md_base
, md_size
);
3244 blob
->csb_hashtype
->cs_final(hash
, &mdctx
);
3246 memcpy(blob
->csb_cdhash
, hash
, CS_CDHASH_LEN
);
3250 * Let policy module check whether the blob's signature is accepted.
3253 unsigned int cs_flags
= blob
->csb_flags
;
3254 unsigned int signer_type
= blob
->csb_signer_type
;
3255 error
= mac_vnode_check_signature(vp
, blob
, imgp
, &cs_flags
, &signer_type
, flags
);
3256 blob
->csb_flags
= cs_flags
;
3257 blob
->csb_signer_type
= signer_type
;
3261 printf("check_signature[pid: %d], error = %d\n", current_proc()->p_pid
, error
);
3264 if ((flags
& MAC_VNODE_CHECK_DYLD_SIM
) && !(blob
->csb_flags
& CS_PLATFORM_BINARY
)) {
3266 printf("check_signature[pid: %d], is not apple signed\n", current_proc()->p_pid
);
3272 if (blob
->csb_flags
& CS_PLATFORM_BINARY
) {
3274 printf("check_signature[pid: %d]: platform binary\n", current_proc()->p_pid
);
3275 blob
->csb_platform_binary
= 1;
3276 blob
->csb_platform_path
= !!(blob
->csb_flags
& CS_PLATFORM_PATH
);
3278 blob
->csb_platform_binary
= 0;
3279 blob
->csb_platform_path
= 0;
3280 blob
->csb_teamid
= csblob_parse_teamid(blob
);
3282 if (blob
->csb_teamid
)
3283 printf("check_signature[pid: %d]: team-id is %s\n", current_proc()->p_pid
, blob
->csb_teamid
);
3285 printf("check_signature[pid: %d]: no team-id\n", current_proc()->p_pid
);
3290 * Validate the blob's coverage
3292 blob_start_offset
= blob
->csb_base_offset
+ blob
->csb_start_offset
;
3293 blob_end_offset
= blob
->csb_base_offset
+ blob
->csb_end_offset
;
3295 if (blob_start_offset
>= blob_end_offset
||
3296 blob_start_offset
< 0 ||
3297 blob_end_offset
<= 0) {
3298 /* reject empty or backwards blob */
3303 if (ubc_cs_supports_multilevel_hash(blob
)) {
3304 ubc_cs_convert_to_multilevel_hash(blob
);
3308 if (! UBCINFOEXISTS(vp
)) {
3313 uip
= vp
->v_ubcinfo
;
3315 /* check if this new blob overlaps with an existing blob */
3316 for (oblob
= uip
->cs_blobs
;
3318 oblob
= oblob
->csb_next
) {
3319 off_t oblob_start_offset
, oblob_end_offset
;
3321 if (blob
->csb_signer_type
!= oblob
->csb_signer_type
) { // signer type needs to be the same for slices
3325 } else if (blob
->csb_platform_binary
) { //platform binary needs to be the same for app slices
3326 if (!oblob
->csb_platform_binary
) {
3331 } else if (blob
->csb_teamid
) { //teamid binary needs to be the same for app slices
3332 if (oblob
->csb_platform_binary
||
3333 oblob
->csb_teamid
== NULL
||
3334 strcmp(oblob
->csb_teamid
, blob
->csb_teamid
) != 0) {
3339 } else { // non teamid binary needs to be the same for app slices
3340 if (oblob
->csb_platform_binary
||
3341 oblob
->csb_teamid
!= NULL
) {
3348 oblob_start_offset
= (oblob
->csb_base_offset
+
3349 oblob
->csb_start_offset
);
3350 oblob_end_offset
= (oblob
->csb_base_offset
+
3351 oblob
->csb_end_offset
);
3352 if (blob_start_offset
>= oblob_end_offset
||
3353 blob_end_offset
<= oblob_start_offset
) {
3354 /* no conflict with this existing blob */
3357 if (blob_start_offset
== oblob_start_offset
&&
3358 blob_end_offset
== oblob_end_offset
&&
3359 blob
->csb_mem_size
== oblob
->csb_mem_size
&&
3360 blob
->csb_flags
== oblob
->csb_flags
&&
3361 (blob
->csb_cpu_type
== CPU_TYPE_ANY
||
3362 oblob
->csb_cpu_type
== CPU_TYPE_ANY
||
3363 blob
->csb_cpu_type
== oblob
->csb_cpu_type
) &&
3364 !bcmp(blob
->csb_cdhash
,
3368 * We already have this blob:
3369 * we'll return success but
3370 * throw away the new blob.
3372 if (oblob
->csb_cpu_type
== CPU_TYPE_ANY
) {
3374 * The old blob matches this one
3375 * but doesn't have any CPU type.
3376 * Update it with whatever the caller
3377 * provided this time.
3379 oblob
->csb_cpu_type
= cputype
;
3387 /* different blob: reject the new one */
3397 /* mark this vnode's VM object as having "signed pages" */
3398 kr
= memory_object_signed(uip
->ui_control
, TRUE
);
3399 if (kr
!= KERN_SUCCESS
) {
3405 if (uip
->cs_blobs
== NULL
) {
3406 /* loading 1st blob: record the file's current "modify time" */
3407 record_mtime
= TRUE
;
3410 /* set the generation count for cs_blobs */
3411 uip
->cs_add_gen
= cs_blob_generation_count
;
3414 * Add this blob to the list of blobs for this vnode.
3415 * We always add at the front of the list and we never remove a
3416 * blob from the list, so ubc_cs_get_blobs() can return whatever
3417 * the top of the list was and that list will remain valid
3418 * while we validate a page, even after we release the vnode's lock.
3420 blob
->csb_next
= uip
->cs_blobs
;
3421 uip
->cs_blobs
= blob
;
3423 OSAddAtomic(+1, &cs_blob_count
);
3424 if (cs_blob_count
> cs_blob_count_peak
) {
3425 cs_blob_count_peak
= cs_blob_count
; /* XXX atomic ? */
3427 OSAddAtomic((SInt32
) +blob
->csb_mem_size
, &cs_blob_size
);
3428 if ((SInt32
) cs_blob_size
> cs_blob_size_peak
) {
3429 cs_blob_size_peak
= (SInt32
) cs_blob_size
; /* XXX atomic ? */
3431 if ((UInt32
) blob
->csb_mem_size
> cs_blob_size_max
) {
3432 cs_blob_size_max
= (UInt32
) blob
->csb_mem_size
;
3437 const char *name
= vnode_getname_printable(vp
);
3439 printf("CODE SIGNING: proc %d(%s) "
3440 "loaded %s signatures for file (%s) "
3441 "range 0x%llx:0x%llx flags 0x%x\n",
3442 p
->p_pid
, p
->p_comm
,
3443 blob
->csb_cpu_type
== -1 ? "detached" : "embedded",
3445 blob
->csb_base_offset
+ blob
->csb_start_offset
,
3446 blob
->csb_base_offset
+ blob
->csb_end_offset
,
3448 vnode_putname_printable(name
);
3454 vnode_mtime(vp
, &uip
->cs_mtime
, vfs_context_current());
3460 error
= 0; /* success ! */
3465 printf("check_signature[pid: %d]: error = %d\n", current_proc()->p_pid
, error
);
3467 /* we failed; release what we allocated */
3469 if (blob
->csb_mem_kaddr
) {
3470 ubc_cs_blob_deallocate(blob
->csb_mem_kaddr
, blob
->csb_mem_size
);
3471 blob
->csb_mem_kaddr
= 0;
3473 if (blob
->csb_entitlements
!= NULL
) {
3474 osobject_release(blob
->csb_entitlements
);
3475 blob
->csb_entitlements
= NULL
;
3477 kfree(blob
, sizeof (*blob
));
3482 if (error
== EAGAIN
) {
3484 * See above: error is EAGAIN if we were asked
3485 * to add an existing blob again. We cleaned the new
3486 * blob and we want to return success.
3495 csvnode_print_debug(struct vnode
*vp
)
3497 const char *name
= NULL
;
3498 struct ubc_info
*uip
;
3499 struct cs_blob
*blob
;
3501 name
= vnode_getname_printable(vp
);
3503 printf("csvnode: name: %s\n", name
);
3504 vnode_putname_printable(name
);
3507 vnode_lock_spin(vp
);
3509 if (! UBCINFOEXISTS(vp
)) {
3514 uip
= vp
->v_ubcinfo
;
3515 for (blob
= uip
->cs_blobs
; blob
!= NULL
; blob
= blob
->csb_next
) {
3516 printf("csvnode: range: %lu -> %lu flags: 0x%08x platform: %s path: %s team: %s\n",
3517 (unsigned long)blob
->csb_start_offset
,
3518 (unsigned long)blob
->csb_end_offset
,
3520 blob
->csb_platform_binary
? "yes" : "no",
3521 blob
->csb_platform_path
? "yes" : "no",
3522 blob
->csb_teamid
? blob
->csb_teamid
: "<NO-TEAM>");
3536 struct ubc_info
*uip
;
3537 struct cs_blob
*blob
;
3538 off_t offset_in_blob
;
3540 vnode_lock_spin(vp
);
3542 if (! UBCINFOEXISTS(vp
)) {
3547 uip
= vp
->v_ubcinfo
;
3548 for (blob
= uip
->cs_blobs
;
3550 blob
= blob
->csb_next
) {
3551 if (cputype
!= -1 && blob
->csb_cpu_type
== cputype
) {
3555 offset_in_blob
= offset
- blob
->csb_base_offset
;
3556 if (offset_in_blob
>= blob
->csb_start_offset
&&
3557 offset_in_blob
< blob
->csb_end_offset
) {
3558 /* our offset is covered by this blob */
3572 struct ubc_info
*uip
)
3574 struct cs_blob
*blob
, *next_blob
;
3576 for (blob
= uip
->cs_blobs
;
3579 next_blob
= blob
->csb_next
;
3580 if (blob
->csb_mem_kaddr
!= 0) {
3581 ubc_cs_blob_deallocate(blob
->csb_mem_kaddr
,
3582 blob
->csb_mem_size
);
3583 blob
->csb_mem_kaddr
= 0;
3585 if (blob
->csb_entitlements
!= NULL
) {
3586 osobject_release(blob
->csb_entitlements
);
3587 blob
->csb_entitlements
= NULL
;
3589 OSAddAtomic(-1, &cs_blob_count
);
3590 OSAddAtomic((SInt32
) -blob
->csb_mem_size
, &cs_blob_size
);
3591 kfree(blob
, sizeof (*blob
));
3593 #if CHECK_CS_VALIDATION_BITMAP
3594 ubc_cs_validation_bitmap_deallocate( uip
->ui_vnode
);
3596 uip
->cs_blobs
= NULL
;
3599 /* check cs blob generation on vnode
3601 * 0 : Success, the cs_blob attached is current
3602 * ENEEDAUTH : Generation count mismatch. Needs authentication again.
3605 ubc_cs_generation_check(
3608 int retval
= ENEEDAUTH
;
3610 vnode_lock_spin(vp
);
3612 if (UBCINFOEXISTS(vp
) && vp
->v_ubcinfo
->cs_add_gen
== cs_blob_generation_count
) {
3621 ubc_cs_blob_revalidate(
3623 struct cs_blob
*blob
,
3624 struct image_params
*imgp
,
3629 const CS_CodeDirectory
*cd
= NULL
;
3630 const CS_GenericBlob
*entitlements
= NULL
;
3633 assert(blob
!= NULL
);
3635 size
= blob
->csb_mem_size
;
3636 error
= cs_validate_csblob((const uint8_t *)blob
->csb_mem_kaddr
,
3637 &size
, &cd
, &entitlements
);
3640 printf("CODESIGNING: csblob invalid: %d\n", error
);
3644 assert(size
== blob
->csb_mem_size
);
3646 unsigned int cs_flags
= (ntohl(cd
->flags
) & CS_ALLOWED_MACHO
) | CS_VALID
;
3647 unsigned int signer_type
= CS_SIGNER_TYPE_UNKNOWN
;
3648 /* callout to mac_vnode_check_signature */
3650 error
= mac_vnode_check_signature(vp
, blob
, imgp
, &cs_flags
, &signer_type
, flags
);
3651 if (cs_debug
&& error
) {
3652 printf("revalidate: check_signature[pid: %d], error = %d\n", current_proc()->p_pid
, error
);
3659 /* update generation number if success */
3660 vnode_lock_spin(vp
);
3661 blob
->csb_flags
= cs_flags
;
3662 blob
->csb_signer_type
= signer_type
;
3663 if (UBCINFOEXISTS(vp
)) {
3665 vp
->v_ubcinfo
->cs_add_gen
= cs_blob_generation_count
;
3667 vp
->v_ubcinfo
->cs_add_gen
= 0;
3677 cs_blob_reset_cache()
3679 /* incrementing odd no by 2 makes sure '0' is never reached. */
3680 OSAddAtomic(+2, &cs_blob_generation_count
);
3681 printf("Reseting cs_blob cache from all vnodes. \n");
3688 struct ubc_info
*uip
;
3689 struct cs_blob
*blobs
;
3692 * No need to take the vnode lock here. The caller must be holding
3693 * a reference on the vnode (via a VM mapping or open file descriptor),
3694 * so the vnode will not go away. The ubc_info stays until the vnode
3695 * goes away. And we only modify "blobs" by adding to the head of the
3697 * The ubc_info could go away entirely if the vnode gets reclaimed as
3698 * part of a forced unmount. In the case of a code-signature validation
3699 * during a page fault, the "paging_in_progress" reference on the VM
3700 * object guarantess that the vnode pager (and the ubc_info) won't go
3701 * away during the fault.
3702 * Other callers need to protect against vnode reclaim by holding the
3703 * vnode lock, for example.
3706 if (! UBCINFOEXISTS(vp
)) {
3711 uip
= vp
->v_ubcinfo
;
3712 blobs
= uip
->cs_blobs
;
3721 struct timespec
*cs_mtime
)
3723 struct ubc_info
*uip
;
3725 if (! UBCINFOEXISTS(vp
)) {
3726 cs_mtime
->tv_sec
= 0;
3727 cs_mtime
->tv_nsec
= 0;
3731 uip
= vp
->v_ubcinfo
;
3732 cs_mtime
->tv_sec
= uip
->cs_mtime
.tv_sec
;
3733 cs_mtime
->tv_nsec
= uip
->cs_mtime
.tv_nsec
;
3736 unsigned long cs_validate_page_no_hash
= 0;
3737 unsigned long cs_validate_page_bad_hash
= 0;
3740 struct cs_blob
*blobs
,
3741 memory_object_t pager
,
3742 memory_object_offset_t page_offset
,
3744 vm_size_t
*bytes_processed
,
3747 union cs_hash_union mdctx
;
3748 struct cs_hash
const *hashtype
= NULL
;
3749 unsigned char actual_hash
[CS_HASH_MAX_SIZE
];
3750 unsigned char expected_hash
[CS_HASH_MAX_SIZE
];
3751 boolean_t found_hash
;
3752 struct cs_blob
*blob
;
3753 const CS_CodeDirectory
*cd
;
3754 const unsigned char *hash
;
3755 boolean_t validated
;
3756 off_t offset
; /* page offset in the file */
3758 off_t codeLimit
= 0;
3759 const char *lower_bound
, *upper_bound
;
3760 vm_offset_t kaddr
, blob_addr
;
3762 /* retrieve the expected hash */
3767 blob
= blob
->csb_next
) {
3768 offset
= page_offset
- blob
->csb_base_offset
;
3769 if (offset
< blob
->csb_start_offset
||
3770 offset
>= blob
->csb_end_offset
) {
3771 /* our page is not covered by this blob */
3775 /* blob data has been released */
3776 kaddr
= blob
->csb_mem_kaddr
;
3781 blob_addr
= kaddr
+ blob
->csb_mem_offset
;
3782 lower_bound
= CAST_DOWN(char *, blob_addr
);
3783 upper_bound
= lower_bound
+ blob
->csb_mem_size
;
3787 /* all CD's that have been injected is already validated */
3789 hashtype
= blob
->csb_hashtype
;
3790 if (hashtype
== NULL
)
3791 panic("unknown hash type ?");
3792 if (hashtype
->cs_digest_size
> sizeof(actual_hash
))
3793 panic("hash size too large");
3794 if (offset
& blob
->csb_hash_pagemask
)
3795 panic("offset not aligned to cshash boundary");
3797 codeLimit
= ntohl(cd
->codeLimit
);
3799 hash
= hashes(cd
, (uint32_t)(offset
>>blob
->csb_hash_pageshift
),
3801 lower_bound
, upper_bound
);
3803 bcopy(hash
, expected_hash
, hashtype
->cs_size
);
3811 if (found_hash
== FALSE
) {
3813 * We can't verify this page because there is no signature
3814 * for it (yet). It's possible that this part of the object
3815 * is not signed, or that signatures for that part have not
3817 * Report that the page has not been validated and let the
3818 * caller decide if it wants to accept it or not.
3820 cs_validate_page_no_hash
++;
3822 printf("CODE SIGNING: cs_validate_page: "
3823 "mobj %p off 0x%llx: no hash to validate !?\n",
3824 pager
, page_offset
);
3832 size
= blob
->csb_hash_pagesize
;
3833 *bytes_processed
= size
;
3835 const uint32_t *asha1
, *esha1
;
3836 if ((off_t
)(offset
+ size
) > codeLimit
) {
3837 /* partial page at end of segment */
3838 assert(offset
< codeLimit
);
3839 size
= (size_t) (codeLimit
& blob
->csb_hash_pagemask
);
3840 *tainted
|= CS_VALIDATE_NX
;
3843 hashtype
->cs_init(&mdctx
);
3845 if (blob
->csb_hash_firstlevel_pagesize
) {
3846 const unsigned char *partial_data
= (const unsigned char *)data
;
3848 for (i
=0; i
< size
;) {
3849 union cs_hash_union partialctx
;
3850 unsigned char partial_digest
[CS_HASH_MAX_SIZE
];
3851 size_t partial_size
= MIN(size
-i
, blob
->csb_hash_firstlevel_pagesize
);
3853 hashtype
->cs_init(&partialctx
);
3854 hashtype
->cs_update(&partialctx
, partial_data
, partial_size
);
3855 hashtype
->cs_final(partial_digest
, &partialctx
);
3857 /* Update cumulative multi-level hash */
3858 hashtype
->cs_update(&mdctx
, partial_digest
, hashtype
->cs_size
);
3859 partial_data
= partial_data
+ partial_size
;
3863 hashtype
->cs_update(&mdctx
, data
, size
);
3865 hashtype
->cs_final(actual_hash
, &mdctx
);
3867 asha1
= (const uint32_t *) actual_hash
;
3868 esha1
= (const uint32_t *) expected_hash
;
3870 if (bcmp(expected_hash
, actual_hash
, hashtype
->cs_size
) != 0) {
3872 printf("CODE SIGNING: cs_validate_page: "
3873 "mobj %p off 0x%llx size 0x%lx: "
3874 "actual [0x%x 0x%x 0x%x 0x%x 0x%x] != "
3875 "expected [0x%x 0x%x 0x%x 0x%x 0x%x]\n",
3876 pager
, page_offset
, size
,
3877 asha1
[0], asha1
[1], asha1
[2],
3879 esha1
[0], esha1
[1], esha1
[2],
3880 esha1
[3], esha1
[4]);
3882 cs_validate_page_bad_hash
++;
3883 *tainted
|= CS_VALIDATE_TAINTED
;
3885 if (cs_debug
> 10) {
3886 printf("CODE SIGNING: cs_validate_page: "
3887 "mobj %p off 0x%llx size 0x%lx: "
3889 pager
, page_offset
, size
);
3901 memory_object_t pager
,
3902 memory_object_offset_t page_offset
,
3907 vm_size_t offset_in_range
;
3908 boolean_t all_subranges_validated
= TRUE
; /* turn false if any subrange fails */
3910 struct cs_blob
*blobs
= ubc_get_cs_blobs(vp
);
3914 for (offset_in_range
= 0;
3915 offset_in_range
< dsize
;
3916 /* offset_in_range updated based on bytes processed */) {
3917 unsigned subrange_tainted
= 0;
3918 boolean_t subrange_validated
;
3919 vm_size_t bytes_processed
= 0;
3921 subrange_validated
= cs_validate_hash(blobs
,
3923 page_offset
+ offset_in_range
,
3924 (const void *)((const char *)data
+ offset_in_range
),
3928 *tainted
|= subrange_tainted
;
3930 if (bytes_processed
== 0) {
3931 /* Cannote make forward progress, so return an error */
3932 all_subranges_validated
= FALSE
;
3934 } else if (subrange_validated
== FALSE
) {
3935 all_subranges_validated
= FALSE
;
3936 /* Keep going to detect other types of failures in subranges */
3939 offset_in_range
+= bytes_processed
;
3942 return all_subranges_validated
;
3949 unsigned char *cdhash
)
3951 struct cs_blob
*blobs
, *blob
;
3957 blobs
= ubc_get_cs_blobs(vp
);
3960 blob
= blob
->csb_next
) {
3961 /* compute offset relative to this blob */
3962 rel_offset
= offset
- blob
->csb_base_offset
;
3963 if (rel_offset
>= blob
->csb_start_offset
&&
3964 rel_offset
< blob
->csb_end_offset
) {
3965 /* this blob does cover our "offset" ! */
3971 /* we didn't find a blob covering "offset" */
3972 ret
= EBADEXEC
; /* XXX any better error ? */
3974 /* get the SHA1 hash of that blob */
3975 bcopy(blob
->csb_cdhash
, cdhash
, sizeof (blob
->csb_cdhash
));
3985 ubc_cs_is_range_codesigned(
3987 mach_vm_offset_t start
,
3988 mach_vm_size_t size
)
3990 struct cs_blob
*csblob
;
3991 mach_vm_offset_t blob_start
;
3992 mach_vm_offset_t blob_end
;
3995 /* no file: no code signature */
3999 /* no range: no code signature */
4002 if (start
+ size
< start
) {
4007 csblob
= ubc_cs_blob_get(vp
, -1, start
);
4008 if (csblob
== NULL
) {
4013 * We currently check if the range is covered by a single blob,
4014 * which should always be the case for the dyld shared cache.
4015 * If we ever want to make this routine handle other cases, we
4016 * would have to iterate if the blob does not cover the full range.
4018 blob_start
= (mach_vm_offset_t
) (csblob
->csb_base_offset
+
4019 csblob
->csb_start_offset
);
4020 blob_end
= (mach_vm_offset_t
) (csblob
->csb_base_offset
+
4021 csblob
->csb_end_offset
);
4022 if (blob_start
> start
|| blob_end
< (start
+ size
)) {
4023 /* range not fully covered by this code-signing blob */
4030 #if CHECK_CS_VALIDATION_BITMAP
4031 #define stob(s) (((atop_64(round_page_64(s))) + 07) >> 3)
4032 extern boolean_t root_fs_upgrade_try
;
4035 * Should we use the code-sign bitmap to avoid repeated code-sign validation?
4037 * a) Is the target vnode on the root filesystem?
4038 * b) Has someone tried to mount the root filesystem read-write?
4039 * If answers are (a) yes AND (b) no, then we can use the bitmap.
4041 #define USE_CODE_SIGN_BITMAP(vp) ( (vp != NULL) && (vp->v_mount != NULL) && (vp->v_mount->mnt_flag & MNT_ROOTFS) && !root_fs_upgrade_try)
4043 ubc_cs_validation_bitmap_allocate(
4046 kern_return_t kr
= KERN_SUCCESS
;
4047 struct ubc_info
*uip
;
4048 char *target_bitmap
;
4049 vm_object_size_t bitmap_size
;
4051 if ( ! USE_CODE_SIGN_BITMAP(vp
) || (! UBCINFOEXISTS(vp
))) {
4052 kr
= KERN_INVALID_ARGUMENT
;
4054 uip
= vp
->v_ubcinfo
;
4056 if ( uip
->cs_valid_bitmap
== NULL
) {
4057 bitmap_size
= stob(uip
->ui_size
);
4058 target_bitmap
= (char*) kalloc( (vm_size_t
)bitmap_size
);
4059 if (target_bitmap
== 0) {
4064 if( kr
== KERN_SUCCESS
) {
4065 memset( target_bitmap
, 0, (size_t)bitmap_size
);
4066 uip
->cs_valid_bitmap
= (void*)target_bitmap
;
4067 uip
->cs_valid_bitmap_size
= bitmap_size
;
4075 ubc_cs_check_validation_bitmap (
4077 memory_object_offset_t offset
,
4080 kern_return_t kr
= KERN_SUCCESS
;
4082 if ( ! USE_CODE_SIGN_BITMAP(vp
) || ! UBCINFOEXISTS(vp
)) {
4083 kr
= KERN_INVALID_ARGUMENT
;
4085 struct ubc_info
*uip
= vp
->v_ubcinfo
;
4086 char *target_bitmap
= uip
->cs_valid_bitmap
;
4088 if ( target_bitmap
== NULL
) {
4089 kr
= KERN_INVALID_ARGUMENT
;
4092 bit
= atop_64( offset
);
4095 if ( byte
> uip
->cs_valid_bitmap_size
) {
4096 kr
= KERN_INVALID_ARGUMENT
;
4099 if (optype
== CS_BITMAP_SET
) {
4100 target_bitmap
[byte
] |= (1 << (bit
& 07));
4102 } else if (optype
== CS_BITMAP_CLEAR
) {
4103 target_bitmap
[byte
] &= ~(1 << (bit
& 07));
4105 } else if (optype
== CS_BITMAP_CHECK
) {
4106 if ( target_bitmap
[byte
] & (1 << (bit
& 07))) {
4119 ubc_cs_validation_bitmap_deallocate(
4122 struct ubc_info
*uip
;
4123 void *target_bitmap
;
4124 vm_object_size_t bitmap_size
;
4126 if ( UBCINFOEXISTS(vp
)) {
4127 uip
= vp
->v_ubcinfo
;
4129 if ( (target_bitmap
= uip
->cs_valid_bitmap
) != NULL
) {
4130 bitmap_size
= uip
->cs_valid_bitmap_size
;
4131 kfree( target_bitmap
, (vm_size_t
) bitmap_size
);
4132 uip
->cs_valid_bitmap
= NULL
;
4137 kern_return_t
ubc_cs_validation_bitmap_allocate(__unused vnode_t vp
){
4138 return KERN_INVALID_ARGUMENT
;
4141 kern_return_t
ubc_cs_check_validation_bitmap(
4142 __unused
struct vnode
*vp
,
4143 __unused memory_object_offset_t offset
,
4144 __unused
int optype
){
4146 return KERN_INVALID_ARGUMENT
;
4149 void ubc_cs_validation_bitmap_deallocate(__unused vnode_t vp
){
4152 #endif /* CHECK_CS_VALIDATION_BITMAP */