2 * Copyright (c) 1999-2014 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 * Author: Umesh Vaishampayan [umeshv@apple.com]
31 * 05-Aug-1999 umeshv Created.
33 * Functions related to Unified Buffer cache.
35 * Caller of UBC functions MUST have a valid reference on the vnode.
39 #include <sys/types.h>
40 #include <sys/param.h>
41 #include <sys/systm.h>
44 #include <sys/mount_internal.h>
45 #include <sys/vnode_internal.h>
46 #include <sys/ubc_internal.h>
47 #include <sys/ucred.h>
48 #include <sys/proc_internal.h>
49 #include <sys/kauth.h>
52 #include <sys/codesign.h>
53 #include <sys/codedir_internal.h>
54 #include <sys/fsevents.h>
55 #include <sys/fcntl.h>
57 #include <mach/mach_types.h>
58 #include <mach/memory_object_types.h>
59 #include <mach/memory_object_control.h>
60 #include <mach/vm_map.h>
61 #include <mach/mach_vm.h>
64 #include <kern/kern_types.h>
65 #include <kern/kalloc.h>
66 #include <kern/zalloc.h>
67 #include <kern/thread.h>
68 #include <vm/vm_kern.h>
69 #include <vm/vm_protos.h> /* last */
71 #include <libkern/crypto/sha1.h>
72 #include <libkern/libkern.h>
74 #include <security/mac_framework.h>
77 /* XXX These should be in a BSD accessible Mach header, but aren't. */
78 extern kern_return_t
memory_object_pages_resident(memory_object_control_t
,
80 extern kern_return_t
memory_object_signed(memory_object_control_t control
,
82 extern boolean_t
memory_object_is_slid(memory_object_control_t control
);
83 extern boolean_t
memory_object_is_signed(memory_object_control_t
);
85 extern void Debugger(const char *message
);
88 /* XXX no one uses this interface! */
89 kern_return_t
ubc_page_op_with_control(
90 memory_object_control_t control
,
101 #define assert(cond) \
102 ((void) ((cond) ? 0 : panic("Assert failed: %s", # cond)))
104 #include <kern/assert.h>
105 #endif /* DIAGNOSTIC */
107 static int ubc_info_init_internal(struct vnode
*vp
, int withfsize
, off_t filesize
);
108 static int ubc_umcallback(vnode_t
, void *);
109 static int ubc_msync_internal(vnode_t
, off_t
, off_t
, off_t
*, int, int *);
110 static void ubc_cs_free(struct ubc_info
*uip
);
112 struct zone
*ubc_info_zone
;
113 static uint32_t cs_blob_generation_count
= 1;
117 * Routines to navigate code signing data structures in the kernel...
122 #define PAGE_SHIFT_4K (12)
123 #define PAGE_SIZE_4K ((1<<PAGE_SHIFT_4K))
124 #define PAGE_MASK_4K ((PAGE_SIZE_4K-1))
125 #define round_page_4K(x) (((vm_offset_t)(x) + PAGE_MASK_4K) & ~((vm_offset_t)PAGE_MASK_4K))
131 const void *lower_bound
,
132 const void *upper_bound
)
134 if (upper_bound
< lower_bound
||
139 if (start
< lower_bound
||
148 * Locate the CodeDirectory from an embedded signature blob
151 CS_CodeDirectory
*findCodeDirectory(
152 const CS_SuperBlob
*embedded
,
156 const CS_CodeDirectory
*cd
= NULL
;
159 cs_valid_range(embedded
, embedded
+ 1, lower_bound
, upper_bound
) &&
160 ntohl(embedded
->magic
) == CSMAGIC_EMBEDDED_SIGNATURE
) {
161 const CS_BlobIndex
*limit
;
162 const CS_BlobIndex
*p
;
164 limit
= &embedded
->index
[ntohl(embedded
->count
)];
165 if (!cs_valid_range(&embedded
->index
[0], limit
,
166 lower_bound
, upper_bound
)) {
169 for (p
= embedded
->index
; p
< limit
; ++p
) {
170 if (ntohl(p
->type
) == CSSLOT_CODEDIRECTORY
) {
171 const unsigned char *base
;
173 base
= (const unsigned char *)embedded
;
174 cd
= (const CS_CodeDirectory
*)(base
+ ntohl(p
->offset
));
180 * Detached signatures come as a bare CS_CodeDirectory,
183 cd
= (const CS_CodeDirectory
*) embedded
;
187 cs_valid_range(cd
, cd
+ 1, lower_bound
, upper_bound
) &&
188 cs_valid_range(cd
, (const char *) cd
+ ntohl(cd
->length
),
189 lower_bound
, upper_bound
) &&
190 cs_valid_range(cd
, (const char *) cd
+ ntohl(cd
->hashOffset
),
191 lower_bound
, upper_bound
) &&
192 cs_valid_range(cd
, (const char *) cd
+
193 ntohl(cd
->hashOffset
) +
194 (ntohl(cd
->nCodeSlots
) * SHA1_RESULTLEN
),
195 lower_bound
, upper_bound
) &&
197 ntohl(cd
->magic
) == CSMAGIC_CODEDIRECTORY
) {
201 // not found or not a valid code directory
207 * Locating a page hash
209 static const unsigned char *
211 const CS_CodeDirectory
*cd
,
216 const unsigned char *base
, *top
, *hash
;
217 uint32_t nCodeSlots
= ntohl(cd
->nCodeSlots
);
219 assert(cs_valid_range(cd
, cd
+ 1, lower_bound
, upper_bound
));
221 if((ntohl(cd
->version
) >= CS_SUPPORTSSCATTER
) && (ntohl(cd
->scatterOffset
))) {
222 /* Get first scatter struct */
223 const SC_Scatter
*scatter
= (const SC_Scatter
*)
224 ((const char*)cd
+ ntohl(cd
->scatterOffset
));
225 uint32_t hashindex
=0, scount
, sbase
=0;
226 /* iterate all scatter structs */
228 if((const char*)scatter
> (const char*)cd
+ ntohl(cd
->length
)) {
230 printf("CODE SIGNING: Scatter extends past Code Directory\n");
235 scount
= ntohl(scatter
->count
);
236 uint32_t new_base
= ntohl(scatter
->base
);
243 if((hashindex
> 0) && (new_base
<= sbase
)) {
245 printf("CODE SIGNING: unordered Scatter, prev base %d, cur base %d\n",
248 return NULL
; /* unordered scatter array */
252 /* this scatter beyond page we're looking for? */
257 if (sbase
+scount
>= page
) {
258 /* Found the scatter struct that is
259 * referencing our page */
261 /* base = address of first hash covered by scatter */
262 base
= (const unsigned char *)cd
+ ntohl(cd
->hashOffset
) +
263 hashindex
* SHA1_RESULTLEN
;
264 /* top = address of first hash after this scatter */
265 top
= base
+ scount
* SHA1_RESULTLEN
;
266 if (!cs_valid_range(base
, top
, lower_bound
,
268 hashindex
> nCodeSlots
) {
275 /* this scatter struct is before the page we're looking
281 hash
= base
+ (page
- sbase
) * SHA1_RESULTLEN
;
283 base
= (const unsigned char *)cd
+ ntohl(cd
->hashOffset
);
284 top
= base
+ nCodeSlots
* SHA1_RESULTLEN
;
285 if (!cs_valid_range(base
, top
, lower_bound
, upper_bound
) ||
289 assert(page
< nCodeSlots
);
291 hash
= base
+ page
* SHA1_RESULTLEN
;
294 if (!cs_valid_range(hash
, hash
+ SHA1_RESULTLEN
,
295 lower_bound
, upper_bound
)) {
303 * cs_validate_codedirectory
305 * Validate that pointers inside the code directory to make sure that
306 * all offsets and lengths are constrained within the buffer.
308 * Parameters: cd Pointer to code directory buffer
309 * length Length of buffer
312 * EBADEXEC Invalid code signature
316 cs_validate_codedirectory(const CS_CodeDirectory
*cd
, size_t length
)
319 if (length
< sizeof(*cd
))
321 if (ntohl(cd
->magic
) != CSMAGIC_CODEDIRECTORY
)
323 if (cd
->hashSize
!= SHA1_RESULTLEN
)
325 if (cd
->pageSize
!= PAGE_SHIFT_4K
)
327 if (cd
->hashType
!= CS_HASHTYPE_SHA1
)
330 if (length
< ntohl(cd
->hashOffset
))
333 /* check that nSpecialSlots fits in the buffer in front of hashOffset */
334 if (ntohl(cd
->hashOffset
) / SHA1_RESULTLEN
< ntohl(cd
->nSpecialSlots
))
337 /* check that codeslots fits in the buffer */
338 if ((length
- ntohl(cd
->hashOffset
)) / SHA1_RESULTLEN
< ntohl(cd
->nCodeSlots
))
341 if (ntohl(cd
->version
) >= CS_SUPPORTSSCATTER
&& cd
->scatterOffset
) {
343 if (length
< ntohl(cd
->scatterOffset
))
346 SC_Scatter
*scatter
= (SC_Scatter
*)
347 (((uint8_t *)cd
) + ntohl(cd
->scatterOffset
));
351 * Check each scatter buffer, since we don't know the
352 * length of the scatter buffer array, we have to
356 /* check that the end of each scatter buffer in within the length */
357 if (((const uint8_t *)scatter
) + sizeof(scatter
[0]) > (const uint8_t *)cd
+ length
)
359 uint32_t scount
= ntohl(scatter
->count
);
362 if (nPages
+ scount
< nPages
)
367 /* XXX check that basees doesn't overlap */
368 /* XXX check that targetOffset doesn't overlap */
370 #if 0 /* rdar://12579439 */
371 if (nPages
!= ntohl(cd
->nCodeSlots
))
376 if (length
< ntohl(cd
->identOffset
))
379 /* identifier is NUL terminated string */
380 if (cd
->identOffset
) {
381 uint8_t *ptr
= (uint8_t *)cd
+ ntohl(cd
->identOffset
);
382 if (memchr(ptr
, 0, length
- ntohl(cd
->identOffset
)) == NULL
)
386 /* team identifier is NULL terminated string */
387 if (ntohl(cd
->version
) >= CS_SUPPORTSTEAMID
&& ntohl(cd
->teamOffset
)) {
388 if (length
< ntohl(cd
->teamOffset
))
391 uint8_t *ptr
= (uint8_t *)cd
+ ntohl(cd
->teamOffset
);
392 if (memchr(ptr
, 0, length
- ntohl(cd
->teamOffset
)) == NULL
)
404 cs_validate_blob(const CS_GenericBlob
*blob
, size_t length
)
406 if (length
< sizeof(CS_GenericBlob
) || length
< ntohl(blob
->length
))
414 * Validate that superblob/embedded code directory to make sure that
415 * all internal pointers are valid.
417 * Will validate both a superblob csblob and a "raw" code directory.
420 * Parameters: buffer Pointer to code signature
421 * length Length of buffer
422 * rcd returns pointer to code directory
425 * EBADEXEC Invalid code signature
429 cs_validate_csblob(const uint8_t *addr
, size_t length
,
430 const CS_CodeDirectory
**rcd
)
432 const CS_GenericBlob
*blob
= (const CS_GenericBlob
*)(void *)addr
;
437 error
= cs_validate_blob(blob
, length
);
441 length
= ntohl(blob
->length
);
443 if (ntohl(blob
->magic
) == CSMAGIC_EMBEDDED_SIGNATURE
) {
444 const CS_SuperBlob
*sb
= (const CS_SuperBlob
*)blob
;
445 uint32_t n
, count
= ntohl(sb
->count
);
447 if (length
< sizeof(CS_SuperBlob
))
450 /* check that the array of BlobIndex fits in the rest of the data */
451 if ((length
- sizeof(CS_SuperBlob
)) / sizeof(CS_BlobIndex
) < count
)
454 /* now check each BlobIndex */
455 for (n
= 0; n
< count
; n
++) {
456 const CS_BlobIndex
*blobIndex
= &sb
->index
[n
];
457 if (length
< ntohl(blobIndex
->offset
))
460 const CS_GenericBlob
*subBlob
=
461 (const CS_GenericBlob
*)(void *)(addr
+ ntohl(blobIndex
->offset
));
463 size_t subLength
= length
- ntohl(blobIndex
->offset
);
465 if ((error
= cs_validate_blob(subBlob
, subLength
)) != 0)
467 subLength
= ntohl(subBlob
->length
);
469 /* extra validation for CDs, that is also returned */
470 if (ntohl(blobIndex
->type
) == CSSLOT_CODEDIRECTORY
) {
471 const CS_CodeDirectory
*cd
= (const CS_CodeDirectory
*)subBlob
;
472 if ((error
= cs_validate_codedirectory(cd
, subLength
)) != 0)
478 } else if (ntohl(blob
->magic
) == CSMAGIC_CODEDIRECTORY
) {
480 if ((error
= cs_validate_codedirectory((const CS_CodeDirectory
*)(void *)addr
, length
)) != 0)
482 *rcd
= (const CS_CodeDirectory
*)blob
;
496 * Find an blob from the superblob/code directory. The blob must have
497 * been been validated by cs_validate_csblob() before calling
498 * this. Use cs_find_blob() instead.
500 * Will also find a "raw" code directory if its stored as well as
501 * searching the superblob.
503 * Parameters: buffer Pointer to code signature
504 * length Length of buffer
505 * type type of blob to find
506 * magic the magic number for that blob
508 * Returns: pointer Success
509 * NULL Buffer not found
512 static const CS_GenericBlob
*
513 cs_find_blob_bytes(const uint8_t *addr
, size_t length
, uint32_t type
, uint32_t magic
)
515 const CS_GenericBlob
*blob
= (const CS_GenericBlob
*)(void *)addr
;
517 if (ntohl(blob
->magic
) == CSMAGIC_EMBEDDED_SIGNATURE
) {
518 const CS_SuperBlob
*sb
= (const CS_SuperBlob
*)blob
;
519 size_t n
, count
= ntohl(sb
->count
);
521 for (n
= 0; n
< count
; n
++) {
522 if (ntohl(sb
->index
[n
].type
) != type
)
524 uint32_t offset
= ntohl(sb
->index
[n
].offset
);
525 if (length
- sizeof(const CS_GenericBlob
) < offset
)
527 blob
= (const CS_GenericBlob
*)(void *)(addr
+ offset
);
528 if (ntohl(blob
->magic
) != magic
)
532 } else if (type
== CSSLOT_CODEDIRECTORY
533 && ntohl(blob
->magic
) == CSMAGIC_CODEDIRECTORY
534 && magic
== CSMAGIC_CODEDIRECTORY
)
540 const CS_GenericBlob
*
541 cs_find_blob(struct cs_blob
*csblob
, uint32_t type
, uint32_t magic
)
543 if ((csblob
->csb_flags
& CS_VALID
) == 0)
545 return cs_find_blob_bytes((const uint8_t *)csblob
->csb_mem_kaddr
, csblob
->csb_mem_size
, type
, magic
);
548 static const uint8_t *
549 cs_find_special_slot(const CS_CodeDirectory
*cd
, uint32_t slot
)
551 /* there is no zero special slot since that is the first code slot */
552 if (ntohl(cd
->nSpecialSlots
) < slot
|| slot
== 0)
555 return ((const uint8_t *)cd
+ ntohl(cd
->hashOffset
) - (SHA1_RESULTLEN
* slot
));
560 * End of routines to navigate code signing data structures in the kernel.
565 * Routines to navigate entitlements in the kernel.
568 /* Retrieve the entitlements blob for a process.
570 * EINVAL no text vnode associated with the process
571 * EBADEXEC invalid code signing data
572 * 0 no error occurred
574 * On success, out_start and out_length will point to the
575 * entitlements blob if found; or will be set to NULL/zero
576 * if there were no entitlements.
579 static uint8_t sha1_zero
[SHA1_RESULTLEN
] = { 0 };
582 cs_entitlements_blob_get(proc_t p
, void **out_start
, size_t *out_length
)
584 uint8_t computed_hash
[SHA1_RESULTLEN
];
585 const CS_GenericBlob
*entitlements
;
586 const CS_CodeDirectory
*code_dir
;
587 struct cs_blob
*csblob
;
588 const uint8_t *embedded_hash
;
594 if (NULL
== p
->p_textvp
)
597 if ((csblob
= ubc_cs_blob_get(p
->p_textvp
, -1, p
->p_textoff
)) == NULL
)
600 if ((code_dir
= (const CS_CodeDirectory
*)cs_find_blob(csblob
, CSSLOT_CODEDIRECTORY
, CSMAGIC_CODEDIRECTORY
)) == NULL
)
603 entitlements
= cs_find_blob(csblob
, CSSLOT_ENTITLEMENTS
, CSMAGIC_EMBEDDED_ENTITLEMENTS
);
604 embedded_hash
= cs_find_special_slot(code_dir
, CSSLOT_ENTITLEMENTS
);
606 if (embedded_hash
== NULL
) {
610 } else if (entitlements
== NULL
&& memcmp(embedded_hash
, sha1_zero
, SHA1_RESULTLEN
) != 0) {
615 SHA1Update(&context
, entitlements
, ntohl(entitlements
->length
));
616 SHA1Final(computed_hash
, &context
);
617 if (memcmp(computed_hash
, embedded_hash
, SHA1_RESULTLEN
) != 0)
620 *out_start
= (void *)entitlements
;
621 *out_length
= ntohl(entitlements
->length
);
626 /* Retrieve the codesign identity for a process.
628 * NULL an error occured
629 * string the cs_identity
633 cs_identity_get(proc_t p
)
635 const CS_CodeDirectory
*code_dir
;
636 struct cs_blob
*csblob
;
638 if (NULL
== p
->p_textvp
)
641 if ((csblob
= ubc_cs_blob_get(p
->p_textvp
, -1, p
->p_textoff
)) == NULL
)
644 if ((code_dir
= (const CS_CodeDirectory
*)cs_find_blob(csblob
, CSSLOT_CODEDIRECTORY
, CSMAGIC_CODEDIRECTORY
)) == NULL
)
647 if (code_dir
->identOffset
== 0)
650 return ((const char *)code_dir
) + ntohl(code_dir
->identOffset
);
655 /* Retrieve the codesign blob for a process.
657 * EINVAL no text vnode associated with the process
658 * 0 no error occurred
660 * On success, out_start and out_length will point to the
661 * cms blob if found; or will be set to NULL/zero
662 * if there were no blob.
666 cs_blob_get(proc_t p
, void **out_start
, size_t *out_length
)
668 struct cs_blob
*csblob
;
673 if (NULL
== p
->p_textvp
)
676 if ((csblob
= ubc_cs_blob_get(p
->p_textvp
, -1, p
->p_textoff
)) == NULL
)
679 *out_start
= (void *)csblob
->csb_mem_kaddr
;
680 *out_length
= csblob
->csb_mem_size
;
686 cs_get_cdhash(struct proc
*p
)
688 struct cs_blob
*csblob
;
690 if (NULL
== p
->p_textvp
)
693 if ((csblob
= ubc_cs_blob_get(p
->p_textvp
, -1, p
->p_textoff
)) == NULL
)
696 return csblob
->csb_sha1
;
701 * End of routines to navigate entitlements in the kernel.
709 * Initialization of the zone for Unified Buffer Cache.
716 * ubc_info_zone(global) initialized for subsequent allocations
718 __private_extern__
void
723 i
= (vm_size_t
) sizeof (struct ubc_info
);
725 ubc_info_zone
= zinit (i
, 10000*i
, 8192, "ubc_info zone");
727 zone_change(ubc_info_zone
, Z_NOENCRYPT
, TRUE
);
734 * Allocate and attach an empty ubc_info structure to a vnode
736 * Parameters: vp Pointer to the vnode
739 * vnode_size:ENOMEM Not enough space
740 * vnode_size:??? Other error from vnode_getattr
744 ubc_info_init(struct vnode
*vp
)
746 return(ubc_info_init_internal(vp
, 0, 0));
751 * ubc_info_init_withsize
753 * Allocate and attach a sized ubc_info structure to a vnode
755 * Parameters: vp Pointer to the vnode
756 * filesize The size of the file
759 * vnode_size:ENOMEM Not enough space
760 * vnode_size:??? Other error from vnode_getattr
763 ubc_info_init_withsize(struct vnode
*vp
, off_t filesize
)
765 return(ubc_info_init_internal(vp
, 1, filesize
));
770 * ubc_info_init_internal
772 * Allocate and attach a ubc_info structure to a vnode
774 * Parameters: vp Pointer to the vnode
775 * withfsize{0,1} Zero if the size should be obtained
776 * from the vnode; otherwise, use filesize
777 * filesize The size of the file, if withfsize == 1
780 * vnode_size:ENOMEM Not enough space
781 * vnode_size:??? Other error from vnode_getattr
783 * Notes: We call a blocking zalloc(), and the zone was created as an
784 * expandable and collectable zone, so if no memory is available,
785 * it is possible for zalloc() to block indefinitely. zalloc()
786 * may also panic if the zone of zones is exhausted, since it's
789 * We unconditionally call vnode_pager_setup(), even if this is
790 * a reuse of a ubc_info; in that case, we should probably assert
791 * that it does not already have a pager association, but do not.
793 * Since memory_object_create_named() can only fail from receiving
794 * an invalid pager argument, the explicit check and panic is
795 * merely precautionary.
798 ubc_info_init_internal(vnode_t vp
, int withfsize
, off_t filesize
)
800 register struct ubc_info
*uip
;
804 memory_object_control_t control
;
809 * If there is not already a ubc_info attached to the vnode, we
810 * attach one; otherwise, we will reuse the one that's there.
812 if (uip
== UBC_INFO_NULL
) {
814 uip
= (struct ubc_info
*) zalloc(ubc_info_zone
);
815 bzero((char *)uip
, sizeof(struct ubc_info
));
818 uip
->ui_flags
= UI_INITED
;
819 uip
->ui_ucred
= NOCRED
;
821 assert(uip
->ui_flags
!= UI_NONE
);
822 assert(uip
->ui_vnode
== vp
);
824 /* now set this ubc_info in the vnode */
828 * Allocate a pager object for this vnode
830 * XXX The value of the pager parameter is currently ignored.
831 * XXX Presumably, this API changed to avoid the race between
832 * XXX setting the pager and the UI_HASPAGER flag.
834 pager
= (void *)vnode_pager_setup(vp
, uip
->ui_pager
);
838 * Explicitly set the pager into the ubc_info, after setting the
841 SET(uip
->ui_flags
, UI_HASPAGER
);
842 uip
->ui_pager
= pager
;
845 * Note: We can not use VNOP_GETATTR() to get accurate
846 * value of ui_size because this may be an NFS vnode, and
847 * nfs_getattr() can call vinvalbuf(); if this happens,
848 * ubc_info is not set up to deal with that event.
853 * create a vnode - vm_object association
854 * memory_object_create_named() creates a "named" reference on the
855 * memory object we hold this reference as long as the vnode is
856 * "alive." Since memory_object_create_named() took its own reference
857 * on the vnode pager we passed it, we can drop the reference
858 * vnode_pager_setup() returned here.
860 kret
= memory_object_create_named(pager
,
861 (memory_object_size_t
)uip
->ui_size
, &control
);
862 vnode_pager_deallocate(pager
);
863 if (kret
!= KERN_SUCCESS
)
864 panic("ubc_info_init: memory_object_create_named returned %d", kret
);
867 uip
->ui_control
= control
; /* cache the value of the mo control */
868 SET(uip
->ui_flags
, UI_HASOBJREF
); /* with a named reference */
870 if (withfsize
== 0) {
871 /* initialize the size */
872 error
= vnode_size(vp
, &uip
->ui_size
, vfs_context_current());
876 uip
->ui_size
= filesize
;
878 vp
->v_lflag
|= VNAMED_UBC
; /* vnode has a named ubc reference */
887 * Free a ubc_info structure
889 * Parameters: uip A pointer to the ubc_info to free
893 * Notes: If there is a credential that has subsequently been associated
894 * with the ubc_info via a call to ubc_setcred(), the reference
895 * to the credential is dropped.
897 * It's actually impossible for a ubc_info.ui_control to take the
898 * value MEMORY_OBJECT_CONTROL_NULL.
901 ubc_info_free(struct ubc_info
*uip
)
903 if (IS_VALID_CRED(uip
->ui_ucred
)) {
904 kauth_cred_unref(&uip
->ui_ucred
);
907 if (uip
->ui_control
!= MEMORY_OBJECT_CONTROL_NULL
)
908 memory_object_control_deallocate(uip
->ui_control
);
910 cluster_release(uip
);
913 zfree(ubc_info_zone
, uip
);
919 ubc_info_deallocate(struct ubc_info
*uip
)
925 * This should be public but currently it is only used below so we
926 * defer making that change.
928 static errno_t
mach_to_bsd_errno(kern_return_t mach_err
)
934 case KERN_INVALID_ADDRESS
:
935 case KERN_INVALID_ARGUMENT
:
936 case KERN_NOT_IN_SET
:
937 case KERN_INVALID_NAME
:
938 case KERN_INVALID_TASK
:
939 case KERN_INVALID_RIGHT
:
940 case KERN_INVALID_VALUE
:
941 case KERN_INVALID_CAPABILITY
:
942 case KERN_INVALID_HOST
:
943 case KERN_MEMORY_PRESENT
:
944 case KERN_INVALID_PROCESSOR_SET
:
945 case KERN_INVALID_POLICY
:
946 case KERN_ALREADY_WAITING
:
947 case KERN_DEFAULT_SET
:
948 case KERN_EXCEPTION_PROTECTED
:
949 case KERN_INVALID_LEDGER
:
950 case KERN_INVALID_MEMORY_CONTROL
:
951 case KERN_INVALID_SECURITY
:
952 case KERN_NOT_DEPRESSED
:
953 case KERN_LOCK_OWNED
:
954 case KERN_LOCK_OWNED_SELF
:
957 case KERN_PROTECTION_FAILURE
:
958 case KERN_NOT_RECEIVER
:
960 case KERN_POLICY_STATIC
:
964 case KERN_RESOURCE_SHORTAGE
:
965 case KERN_UREFS_OVERFLOW
:
966 case KERN_INVALID_OBJECT
:
972 case KERN_MEMORY_FAILURE
:
973 case KERN_POLICY_LIMIT
:
974 case KERN_CODESIGN_ERROR
:
977 case KERN_MEMORY_ERROR
:
980 case KERN_ALREADY_IN_SET
:
981 case KERN_NAME_EXISTS
:
982 case KERN_RIGHT_EXISTS
:
988 case KERN_TERMINATED
:
989 case KERN_LOCK_SET_DESTROYED
:
990 case KERN_LOCK_UNSTABLE
:
991 case KERN_SEMAPHORE_DESTROYED
:
994 case KERN_RPC_SERVER_TERMINATED
:
997 case KERN_NOT_SUPPORTED
:
1000 case KERN_NODE_DOWN
:
1003 case KERN_NOT_WAITING
:
1006 case KERN_OPERATION_TIMED_OUT
:
1017 * Tell the VM that the the size of the file represented by the vnode has
1020 * Parameters: vp The vp whose backing file size is
1022 * nsize The new size of the backing file
1025 * Returns: EINVAL for new size < 0
1026 * ENOENT if no UBC info exists
1027 * EAGAIN if UBC_SETSIZE_NO_FS_REENTRY option is set and new_size < old size
1028 * Other errors (mapped to errno_t) returned by VM functions
1030 * Notes: This function will indicate success if the new size is the
1031 * same or larger than the old size (in this case, the
1032 * remainder of the file will require modification or use of
1033 * an existing upl to access successfully).
1035 * This function will fail if the new file size is smaller,
1036 * and the memory region being invalidated was unable to
1037 * actually be invalidated and/or the last page could not be
1038 * flushed, if the new size is not aligned to a page
1039 * boundary. This is usually indicative of an I/O error.
1041 errno_t
ubc_setsize_ex(struct vnode
*vp
, off_t nsize
, ubc_setsize_opts_t opts
)
1043 off_t osize
; /* ui_size before change */
1044 off_t lastpg
, olastpgend
, lastoff
;
1045 struct ubc_info
*uip
;
1046 memory_object_control_t control
;
1047 kern_return_t kret
= KERN_SUCCESS
;
1049 if (nsize
< (off_t
)0)
1052 if (!UBCINFOEXISTS(vp
))
1055 uip
= vp
->v_ubcinfo
;
1056 osize
= uip
->ui_size
;
1058 if (ISSET(opts
, UBC_SETSIZE_NO_FS_REENTRY
) && nsize
< osize
)
1062 * Update the size before flushing the VM
1064 uip
->ui_size
= nsize
;
1066 if (nsize
>= osize
) { /* Nothing more to do */
1067 if (nsize
> osize
) {
1068 lock_vnode_and_post(vp
, NOTE_EXTEND
);
1075 * When the file shrinks, invalidate the pages beyond the
1076 * new size. Also get rid of garbage beyond nsize on the
1077 * last page. The ui_size already has the nsize, so any
1078 * subsequent page-in will zero-fill the tail properly
1080 lastpg
= trunc_page_64(nsize
);
1081 olastpgend
= round_page_64(osize
);
1082 control
= uip
->ui_control
;
1084 lastoff
= (nsize
& PAGE_MASK_64
);
1088 upl_page_info_t
*pl
;
1091 * new EOF ends up in the middle of a page
1092 * zero the tail of this page if it's currently
1093 * present in the cache
1095 kret
= ubc_create_upl(vp
, lastpg
, PAGE_SIZE
, &upl
, &pl
, UPL_SET_LITE
);
1097 if (kret
!= KERN_SUCCESS
)
1098 panic("ubc_setsize: ubc_create_upl (error = %d)\n", kret
);
1100 if (upl_valid_page(pl
, 0))
1101 cluster_zero(upl
, (uint32_t)lastoff
, PAGE_SIZE
- (uint32_t)lastoff
, NULL
);
1103 ubc_upl_abort_range(upl
, 0, PAGE_SIZE
, UPL_ABORT_FREE_ON_EMPTY
);
1105 lastpg
+= PAGE_SIZE_64
;
1107 if (olastpgend
> lastpg
) {
1111 flags
= MEMORY_OBJECT_DATA_FLUSH_ALL
;
1113 flags
= MEMORY_OBJECT_DATA_FLUSH
;
1115 * invalidate the pages beyond the new EOF page
1118 kret
= memory_object_lock_request(control
,
1119 (memory_object_offset_t
)lastpg
,
1120 (memory_object_size_t
)(olastpgend
- lastpg
), NULL
, NULL
,
1121 MEMORY_OBJECT_RETURN_NONE
, flags
, VM_PROT_NO_CHANGE
);
1122 if (kret
!= KERN_SUCCESS
)
1123 printf("ubc_setsize: invalidate failed (error = %d)\n", kret
);
1125 return mach_to_bsd_errno(kret
);
1128 // Returns true for success
1129 int ubc_setsize(vnode_t vp
, off_t nsize
)
1131 return ubc_setsize_ex(vp
, nsize
, 0) == 0;
1137 * Get the size of the file assocated with the specified vnode
1139 * Parameters: vp The vnode whose size is of interest
1141 * Returns: 0 There is no ubc_info associated with
1142 * this vnode, or the size is zero
1143 * !0 The size of the file
1145 * Notes: Using this routine, it is not possible for a caller to
1146 * successfully distinguish between a vnode associate with a zero
1147 * length file, and a vnode with no associated ubc_info. The
1148 * caller therefore needs to not care, or needs to ensure that
1149 * they have previously successfully called ubc_info_init() or
1150 * ubc_info_init_withsize().
1153 ubc_getsize(struct vnode
*vp
)
1155 /* people depend on the side effect of this working this way
1156 * as they call this for directory
1158 if (!UBCINFOEXISTS(vp
))
1160 return (vp
->v_ubcinfo
->ui_size
);
1167 * Call ubc_msync(vp, 0, EOF, NULL, UBC_PUSHALL) on all the vnodes for this
1170 * Parameters: mp The mount point
1172 * Returns: 0 Success
1174 * Notes: There is no failure indication for this function.
1176 * This function is used in the unmount path; since it may block
1177 * I/O indefinitely, it should not be used in the forced unmount
1178 * path, since a device unavailability could also block that
1181 * Because there is no device ejection interlock on USB, FireWire,
1182 * or similar devices, it's possible that an ejection that begins
1183 * subsequent to the vnode_iterate() completing, either on one of
1184 * those devices, or a network mount for which the server quits
1185 * responding, etc., may cause the caller to block indefinitely.
1187 __private_extern__
int
1188 ubc_umount(struct mount
*mp
)
1190 vnode_iterate(mp
, 0, ubc_umcallback
, 0);
1198 * Used by ubc_umount() as an internal implementation detail; see ubc_umount()
1199 * and vnode_iterate() for details of implementation.
1202 ubc_umcallback(vnode_t vp
, __unused
void * args
)
1205 if (UBCINFOEXISTS(vp
)) {
1207 (void) ubc_msync(vp
, (off_t
)0, ubc_getsize(vp
), NULL
, UBC_PUSHALL
);
1209 return (VNODE_RETURNED
);
1216 * Get the credentials currently active for the ubc_info associated with the
1219 * Parameters: vp The vnode whose ubc_info credentials
1220 * are to be retrieved
1222 * Returns: !NOCRED The credentials
1223 * NOCRED If there is no ubc_info for the vnode,
1224 * or if there is one, but it has not had
1225 * any credentials associated with it via
1226 * a call to ubc_setcred()
1229 ubc_getcred(struct vnode
*vp
)
1231 if (UBCINFOEXISTS(vp
))
1232 return (vp
->v_ubcinfo
->ui_ucred
);
1241 * If they are not already set, set the credentials of the ubc_info structure
1242 * associated with the vnode to those of the supplied thread; otherwise leave
1245 * Parameters: vp The vnode whose ubc_info creds are to
1247 * p The process whose credentials are to
1248 * be used, if not running on an assumed
1250 * thread The thread whose credentials are to
1253 * Returns: 1 This vnode has no associated ubc_info
1256 * Notes: This function takes a proc parameter to account for bootstrap
1257 * issues where a task or thread may call this routine, either
1258 * before credentials have been initialized by bsd_init(), or if
1259 * there is no BSD info asscoiate with a mach thread yet. This
1260 * is known to happen in both the initial swap and memory mapping
1263 * This function is generally used only in the following cases:
1265 * o a memory mapped file via the mmap() system call
1266 * o a swap store backing file
1267 * o subsequent to a successful write via vn_write()
1269 * The information is then used by the NFS client in order to
1270 * cons up a wire message in either the page-in or page-out path.
1272 * There are two potential problems with the use of this API:
1274 * o Because the write path only set it on a successful
1275 * write, there is a race window between setting the
1276 * credential and its use to evict the pages to the
1277 * remote file server
1279 * o Because a page-in may occur prior to a write, the
1280 * credential may not be set at this time, if the page-in
1281 * is not the result of a mapping established via mmap().
1283 * In both these cases, this will be triggered from the paging
1284 * path, which will instead use the credential of the current
1285 * process, which in this case is either the dynamic_pager or
1286 * the kernel task, both of which utilize "root" credentials.
1288 * This may potentially permit operations to occur which should
1289 * be denied, or it may cause to be denied operations which
1290 * should be permitted, depending on the configuration of the NFS
1294 ubc_setthreadcred(struct vnode
*vp
, proc_t p
, thread_t thread
)
1296 struct ubc_info
*uip
;
1298 struct uthread
*uthread
= get_bsdthread_info(thread
);
1300 if (!UBCINFOEXISTS(vp
))
1305 uip
= vp
->v_ubcinfo
;
1306 credp
= uip
->ui_ucred
;
1308 if (!IS_VALID_CRED(credp
)) {
1309 /* use per-thread cred, if assumed identity, else proc cred */
1310 if (uthread
== NULL
|| (uthread
->uu_flag
& UT_SETUID
) == 0) {
1311 uip
->ui_ucred
= kauth_cred_proc_ref(p
);
1313 uip
->ui_ucred
= uthread
->uu_ucred
;
1314 kauth_cred_ref(uip
->ui_ucred
);
1326 * If they are not already set, set the credentials of the ubc_info structure
1327 * associated with the vnode to those of the process; otherwise leave them
1330 * Parameters: vp The vnode whose ubc_info creds are to
1332 * p The process whose credentials are to
1335 * Returns: 0 This vnode has no associated ubc_info
1338 * Notes: The return values for this function are inverted from nearly
1339 * all other uses in the kernel.
1341 * See also ubc_setthreadcred(), above.
1343 * This function is considered deprecated, and generally should
1344 * not be used, as it is incompatible with per-thread credentials;
1345 * it exists for legacy KPI reasons.
1347 * DEPRECATION: ubc_setcred() is being deprecated. Please use
1348 * ubc_setthreadcred() instead.
1351 ubc_setcred(struct vnode
*vp
, proc_t p
)
1353 struct ubc_info
*uip
;
1356 /* If there is no ubc_info, deny the operation */
1357 if ( !UBCINFOEXISTS(vp
))
1361 * Check to see if there is already a credential reference in the
1362 * ubc_info; if there is not, take one on the supplied credential.
1365 uip
= vp
->v_ubcinfo
;
1366 credp
= uip
->ui_ucred
;
1367 if (!IS_VALID_CRED(credp
)) {
1368 uip
->ui_ucred
= kauth_cred_proc_ref(p
);
1378 * Get the pager associated with the ubc_info associated with the vnode.
1380 * Parameters: vp The vnode to obtain the pager from
1382 * Returns: !VNODE_PAGER_NULL The memory_object_t for the pager
1383 * VNODE_PAGER_NULL There is no ubc_info for this vnode
1385 * Notes: For each vnode that has a ubc_info associated with it, that
1386 * ubc_info SHALL have a pager associated with it, so in the
1387 * normal case, it's impossible to return VNODE_PAGER_NULL for
1388 * a vnode with an associated ubc_info.
1390 __private_extern__ memory_object_t
1391 ubc_getpager(struct vnode
*vp
)
1393 if (UBCINFOEXISTS(vp
))
1394 return (vp
->v_ubcinfo
->ui_pager
);
1403 * Get the memory object control associated with the ubc_info associated with
1406 * Parameters: vp The vnode to obtain the memory object
1410 * Returns: !MEMORY_OBJECT_CONTROL_NULL
1411 * MEMORY_OBJECT_CONTROL_NULL
1413 * Notes: Historically, if the flags were not "do not reactivate", this
1414 * function would look up the memory object using the pager if
1415 * it did not exist (this could be the case if the vnode had
1416 * been previously reactivated). The flags would also permit a
1417 * hold to be requested, which would have created an object
1418 * reference, if one had not already existed. This usage is
1419 * deprecated, as it would permit a race between finding and
1420 * taking the reference vs. a single reference being dropped in
1423 memory_object_control_t
1424 ubc_getobject(struct vnode
*vp
, __unused
int flags
)
1426 if (UBCINFOEXISTS(vp
))
1427 return((vp
->v_ubcinfo
->ui_control
));
1429 return (MEMORY_OBJECT_CONTROL_NULL
);
1433 ubc_strict_uncached_IO(struct vnode
*vp
)
1435 boolean_t result
= FALSE
;
1437 if (UBCINFOEXISTS(vp
)) {
1438 result
= memory_object_is_slid(vp
->v_ubcinfo
->ui_control
);
1446 * Convert a given block number to a memory backing object (file) offset for a
1449 * Parameters: vp The vnode in which the block is located
1450 * blkno The block number to convert
1452 * Returns: !-1 The offset into the backing object
1453 * -1 There is no ubc_info associated with
1455 * -1 An error occurred in the underlying VFS
1456 * while translating the block to an
1457 * offset; the most likely cause is that
1458 * the caller specified a block past the
1459 * end of the file, but this could also be
1460 * any other error from VNOP_BLKTOOFF().
1462 * Note: Representing the error in band loses some information, but does
1463 * not occlude a valid offset, since an off_t of -1 is normally
1464 * used to represent EOF. If we had a more reliable constant in
1465 * our header files for it (i.e. explicitly cast to an off_t), we
1466 * would use it here instead.
1469 ubc_blktooff(vnode_t vp
, daddr64_t blkno
)
1471 off_t file_offset
= -1;
1474 if (UBCINFOEXISTS(vp
)) {
1475 error
= VNOP_BLKTOOFF(vp
, blkno
, &file_offset
);
1480 return (file_offset
);
1487 * Convert a given offset in a memory backing object into a block number for a
1490 * Parameters: vp The vnode in which the offset is
1492 * offset The offset into the backing object
1494 * Returns: !-1 The returned block number
1495 * -1 There is no ubc_info associated with
1497 * -1 An error occurred in the underlying VFS
1498 * while translating the block to an
1499 * offset; the most likely cause is that
1500 * the caller specified a block past the
1501 * end of the file, but this could also be
1502 * any other error from VNOP_OFFTOBLK().
1504 * Note: Representing the error in band loses some information, but does
1505 * not occlude a valid block number, since block numbers exceed
1506 * the valid range for offsets, due to their relative sizes. If
1507 * we had a more reliable constant than -1 in our header files
1508 * for it (i.e. explicitly cast to an daddr64_t), we would use it
1512 ubc_offtoblk(vnode_t vp
, off_t offset
)
1514 daddr64_t blkno
= -1;
1517 if (UBCINFOEXISTS(vp
)) {
1518 error
= VNOP_OFFTOBLK(vp
, offset
, &blkno
);
1528 * ubc_pages_resident
1530 * Determine whether or not a given vnode has pages resident via the memory
1531 * object control associated with the ubc_info associated with the vnode
1533 * Parameters: vp The vnode we want to know about
1539 ubc_pages_resident(vnode_t vp
)
1542 boolean_t has_pages_resident
;
1544 if (!UBCINFOEXISTS(vp
))
1548 * The following call may fail if an invalid ui_control is specified,
1549 * or if there is no VM object associated with the control object. In
1550 * either case, reacting to it as if there were no pages resident will
1551 * result in correct behavior.
1553 kret
= memory_object_pages_resident(vp
->v_ubcinfo
->ui_control
, &has_pages_resident
);
1555 if (kret
!= KERN_SUCCESS
)
1558 if (has_pages_resident
== TRUE
)
1567 * Clean and/or invalidate a range in the memory object that backs this vnode
1569 * Parameters: vp The vnode whose associated ubc_info's
1570 * associated memory object is to have a
1571 * range invalidated within it
1572 * beg_off The start of the range, as an offset
1573 * end_off The end of the range, as an offset
1574 * resid_off The address of an off_t supplied by the
1575 * caller; may be set to NULL to ignore
1576 * flags See ubc_msync_internal()
1578 * Returns: 0 Success
1579 * !0 Failure; an errno is returned
1582 * *resid_off, modified If non-NULL, the contents are ALWAYS
1583 * modified; they are initialized to the
1584 * beg_off, and in case of an I/O error,
1585 * the difference between beg_off and the
1586 * current value will reflect what was
1587 * able to be written before the error
1588 * occurred. If no error is returned, the
1589 * value of the resid_off is undefined; do
1590 * NOT use it in place of end_off if you
1591 * intend to increment from the end of the
1592 * last call and call iteratively.
1594 * Notes: see ubc_msync_internal() for more detailed information.
1598 ubc_msync(vnode_t vp
, off_t beg_off
, off_t end_off
, off_t
*resid_off
, int flags
)
1604 *resid_off
= beg_off
;
1606 retval
= ubc_msync_internal(vp
, beg_off
, end_off
, resid_off
, flags
, &io_errno
);
1608 if (retval
== 0 && io_errno
== 0)
1615 * ubc_msync_internal
1617 * Clean and/or invalidate a range in the memory object that backs this vnode
1619 * Parameters: vp The vnode whose associated ubc_info's
1620 * associated memory object is to have a
1621 * range invalidated within it
1622 * beg_off The start of the range, as an offset
1623 * end_off The end of the range, as an offset
1624 * resid_off The address of an off_t supplied by the
1625 * caller; may be set to NULL to ignore
1626 * flags MUST contain at least one of the flags
1627 * UBC_INVALIDATE, UBC_PUSHDIRTY, or
1628 * UBC_PUSHALL; if UBC_PUSHDIRTY is used,
1629 * UBC_SYNC may also be specified to cause
1630 * this function to block until the
1631 * operation is complete. The behavior
1632 * of UBC_SYNC is otherwise undefined.
1633 * io_errno The address of an int to contain the
1634 * errno from a failed I/O operation, if
1635 * one occurs; may be set to NULL to
1638 * Returns: 1 Success
1642 * *resid_off, modified The contents of this offset MAY be
1643 * modified; in case of an I/O error, the
1644 * difference between beg_off and the
1645 * current value will reflect what was
1646 * able to be written before the error
1648 * *io_errno, modified The contents of this offset are set to
1649 * an errno, if an error occurs; if the
1650 * caller supplies an io_errno parameter,
1651 * they should be careful to initialize it
1652 * to 0 before calling this function to
1653 * enable them to distinguish an error
1654 * with a valid *resid_off from an invalid
1655 * one, and to avoid potentially falsely
1656 * reporting an error, depending on use.
1658 * Notes: If there is no ubc_info associated with the vnode supplied,
1659 * this function immediately returns success.
1661 * If the value of end_off is less than or equal to beg_off, this
1662 * function immediately returns success; that is, end_off is NOT
1665 * IMPORTANT: one of the flags UBC_INVALIDATE, UBC_PUSHDIRTY, or
1666 * UBC_PUSHALL MUST be specified; that is, it is NOT possible to
1667 * attempt to block on in-progress I/O by calling this function
1668 * with UBC_PUSHDIRTY, and then later call it with just UBC_SYNC
1669 * in order to block pending on the I/O already in progress.
1671 * The start offset is truncated to the page boundary and the
1672 * size is adjusted to include the last page in the range; that
1673 * is, end_off on exactly a page boundary will not change if it
1674 * is rounded, and the range of bytes written will be from the
1675 * truncate beg_off to the rounded (end_off - 1).
1678 ubc_msync_internal(vnode_t vp
, off_t beg_off
, off_t end_off
, off_t
*resid_off
, int flags
, int *io_errno
)
1680 memory_object_size_t tsize
;
1682 int request_flags
= 0;
1683 int flush_flags
= MEMORY_OBJECT_RETURN_NONE
;
1685 if ( !UBCINFOEXISTS(vp
))
1687 if ((flags
& (UBC_INVALIDATE
| UBC_PUSHDIRTY
| UBC_PUSHALL
)) == 0)
1689 if (end_off
<= beg_off
)
1692 if (flags
& UBC_INVALIDATE
)
1694 * discard the resident pages
1696 request_flags
= (MEMORY_OBJECT_DATA_FLUSH
| MEMORY_OBJECT_DATA_NO_CHANGE
);
1698 if (flags
& UBC_SYNC
)
1700 * wait for all the I/O to complete before returning
1702 request_flags
|= MEMORY_OBJECT_IO_SYNC
;
1704 if (flags
& UBC_PUSHDIRTY
)
1706 * we only return the dirty pages in the range
1708 flush_flags
= MEMORY_OBJECT_RETURN_DIRTY
;
1710 if (flags
& UBC_PUSHALL
)
1712 * then return all the interesting pages in the range (both
1713 * dirty and precious) to the pager
1715 flush_flags
= MEMORY_OBJECT_RETURN_ALL
;
1717 beg_off
= trunc_page_64(beg_off
);
1718 end_off
= round_page_64(end_off
);
1719 tsize
= (memory_object_size_t
)end_off
- beg_off
;
1721 /* flush and/or invalidate pages in the range requested */
1722 kret
= memory_object_lock_request(vp
->v_ubcinfo
->ui_control
,
1724 (memory_object_offset_t
*)resid_off
,
1725 io_errno
, flush_flags
, request_flags
,
1728 return ((kret
== KERN_SUCCESS
) ? 1 : 0);
1735 * Explicitly map a vnode that has an associate ubc_info, and add a reference
1736 * to it for the ubc system, if there isn't one already, so it will not be
1737 * recycled while it's in use, and set flags on the ubc_info to indicate that
1740 * Parameters: vp The vnode to map
1741 * flags The mapping flags for the vnode; this
1742 * will be a combination of one or more of
1743 * PROT_READ, PROT_WRITE, and PROT_EXEC
1745 * Returns: 0 Success
1746 * EPERM Permission was denied
1748 * Notes: An I/O reference on the vnode must already be held on entry
1750 * If there is no ubc_info associated with the vnode, this function
1751 * will return success.
1753 * If a permission error occurs, this function will return
1754 * failure; all other failures will cause this function to return
1757 * IMPORTANT: This is an internal use function, and its symbols
1758 * are not exported, hence its error checking is not very robust.
1759 * It is primarily used by:
1761 * o mmap(), when mapping a file
1762 * o When mapping a shared file (a shared library in the
1763 * shared segment region)
1764 * o When loading a program image during the exec process
1766 * ...all of these uses ignore the return code, and any fault that
1767 * results later because of a failure is handled in the fix-up path
1768 * of the fault handler. The interface exists primarily as a
1771 * Given that third party implementation of the type of interfaces
1772 * that would use this function, such as alternative executable
1773 * formats, etc., are unsupported, this function is not exported
1776 * The extra reference is held until the VM system unmaps the
1777 * vnode from its own context to maintain a vnode reference in
1778 * cases like open()/mmap()/close(), which leave the backing
1779 * object referenced by a mapped memory region in a process
1782 __private_extern__
int
1783 ubc_map(vnode_t vp
, int flags
)
1785 struct ubc_info
*uip
;
1788 int need_wakeup
= 0;
1790 if (UBCINFOEXISTS(vp
)) {
1793 uip
= vp
->v_ubcinfo
;
1795 while (ISSET(uip
->ui_flags
, UI_MAPBUSY
)) {
1796 SET(uip
->ui_flags
, UI_MAPWAITING
);
1797 (void) msleep(&uip
->ui_flags
, &vp
->v_lock
,
1798 PRIBIO
, "ubc_map", NULL
);
1800 SET(uip
->ui_flags
, UI_MAPBUSY
);
1803 error
= VNOP_MMAP(vp
, flags
, vfs_context_current());
1808 vnode_lock_spin(vp
);
1811 if ( !ISSET(uip
->ui_flags
, UI_ISMAPPED
))
1813 SET(uip
->ui_flags
, (UI_WASMAPPED
| UI_ISMAPPED
));
1814 if (flags
& PROT_WRITE
) {
1815 SET(uip
->ui_flags
, UI_MAPPEDWRITE
);
1818 CLR(uip
->ui_flags
, UI_MAPBUSY
);
1820 if (ISSET(uip
->ui_flags
, UI_MAPWAITING
)) {
1821 CLR(uip
->ui_flags
, UI_MAPWAITING
);
1827 wakeup(&uip
->ui_flags
);
1839 * Destroy the named memory object associated with the ubc_info control object
1840 * associated with the designated vnode, if there is a ubc_info associated
1841 * with the vnode, and a control object is associated with it
1843 * Parameters: vp The designated vnode
1847 * Notes: This function is called on vnode termination for all vnodes,
1848 * and must therefore not assume that there is a ubc_info that is
1849 * associated with the vnode, nor that there is a control object
1850 * associated with the ubc_info.
1852 * If all the conditions necessary are present, this function
1853 * calls memory_object_destory(), which will in turn end up
1854 * calling ubc_unmap() to release any vnode references that were
1855 * established via ubc_map().
1857 * IMPORTANT: This is an internal use function that is used
1858 * exclusively by the internal use function vclean().
1860 __private_extern__
void
1861 ubc_destroy_named(vnode_t vp
)
1863 memory_object_control_t control
;
1864 struct ubc_info
*uip
;
1867 if (UBCINFOEXISTS(vp
)) {
1868 uip
= vp
->v_ubcinfo
;
1870 /* Terminate the memory object */
1871 control
= ubc_getobject(vp
, UBC_HOLDOBJECT
);
1872 if (control
!= MEMORY_OBJECT_CONTROL_NULL
) {
1873 kret
= memory_object_destroy(control
, 0);
1874 if (kret
!= KERN_SUCCESS
)
1875 panic("ubc_destroy_named: memory_object_destroy failed");
1884 * Determine whether or not a vnode is currently in use by ubc at a level in
1885 * excess of the requested busycount
1887 * Parameters: vp The vnode to check
1888 * busycount The threshold busy count, used to bias
1889 * the count usually already held by the
1890 * caller to avoid races
1892 * Returns: 1 The vnode is in use over the threshold
1893 * 0 The vnode is not in use over the
1896 * Notes: Because the vnode is only held locked while actually asking
1897 * the use count, this function only represents a snapshot of the
1898 * current state of the vnode. If more accurate information is
1899 * required, an additional busycount should be held by the caller
1900 * and a non-zero busycount used.
1902 * If there is no ubc_info associated with the vnode, this
1903 * function will report that the vnode is not in use by ubc.
1906 ubc_isinuse(struct vnode
*vp
, int busycount
)
1908 if ( !UBCINFOEXISTS(vp
))
1910 return(ubc_isinuse_locked(vp
, busycount
, 0));
1915 * ubc_isinuse_locked
1917 * Determine whether or not a vnode is currently in use by ubc at a level in
1918 * excess of the requested busycount
1920 * Parameters: vp The vnode to check
1921 * busycount The threshold busy count, used to bias
1922 * the count usually already held by the
1923 * caller to avoid races
1924 * locked True if the vnode is already locked by
1927 * Returns: 1 The vnode is in use over the threshold
1928 * 0 The vnode is not in use over the
1931 * Notes: If the vnode is not locked on entry, it is locked while
1932 * actually asking the use count. If this is the case, this
1933 * function only represents a snapshot of the current state of
1934 * the vnode. If more accurate information is required, the
1935 * vnode lock should be held by the caller, otherwise an
1936 * additional busycount should be held by the caller and a
1937 * non-zero busycount used.
1939 * If there is no ubc_info associated with the vnode, this
1940 * function will report that the vnode is not in use by ubc.
1943 ubc_isinuse_locked(struct vnode
*vp
, int busycount
, int locked
)
1949 vnode_lock_spin(vp
);
1951 if ((vp
->v_usecount
- vp
->v_kusecount
) > busycount
)
1963 * Reverse the effects of a ubc_map() call for a given vnode
1965 * Parameters: vp vnode to unmap from ubc
1969 * Notes: This is an internal use function used by vnode_pager_unmap().
1970 * It will attempt to obtain a reference on the supplied vnode,
1971 * and if it can do so, and there is an associated ubc_info, and
1972 * the flags indicate that it was mapped via ubc_map(), then the
1973 * flag is cleared, the mapping removed, and the reference taken
1974 * by ubc_map() is released.
1976 * IMPORTANT: This MUST only be called by the VM
1977 * to prevent race conditions.
1979 __private_extern__
void
1980 ubc_unmap(struct vnode
*vp
)
1982 struct ubc_info
*uip
;
1984 int need_wakeup
= 0;
1986 if (vnode_getwithref(vp
))
1989 if (UBCINFOEXISTS(vp
)) {
1990 bool want_fsevent
= false;
1993 uip
= vp
->v_ubcinfo
;
1995 while (ISSET(uip
->ui_flags
, UI_MAPBUSY
)) {
1996 SET(uip
->ui_flags
, UI_MAPWAITING
);
1997 (void) msleep(&uip
->ui_flags
, &vp
->v_lock
,
1998 PRIBIO
, "ubc_unmap", NULL
);
2000 SET(uip
->ui_flags
, UI_MAPBUSY
);
2002 if (ISSET(uip
->ui_flags
, UI_ISMAPPED
)) {
2003 if (ISSET(uip
->ui_flags
, UI_MAPPEDWRITE
))
2004 want_fsevent
= true;
2009 * We want to clear the mapped flags after we've called
2010 * VNOP_MNOMAP to avoid certain races and allow
2011 * VNOP_MNOMAP to call ubc_is_mapped_writable.
2017 vfs_context_t ctx
= vfs_context_current();
2019 (void)VNOP_MNOMAP(vp
, ctx
);
2023 * Why do we want an fsevent here? Normally the
2024 * content modified fsevent is posted when a file is
2025 * closed and only if it's written to via conventional
2026 * means. It's perfectly legal to close a file and
2027 * keep your mappings and we don't currently track
2028 * whether it was written to via a mapping.
2029 * Therefore, we need to post an fsevent here if the
2030 * file was mapped writable. This may result in false
2031 * events, i.e. we post a notification when nothing
2032 * has really changed.
2034 if (want_fsevent
&& need_fsevent(FSE_CONTENT_MODIFIED
, vp
)) {
2035 add_fsevent(FSE_CONTENT_MODIFIED
, ctx
,
2044 vnode_lock_spin(vp
);
2047 CLR(uip
->ui_flags
, UI_ISMAPPED
| UI_MAPPEDWRITE
);
2049 CLR(uip
->ui_flags
, UI_MAPBUSY
);
2051 if (ISSET(uip
->ui_flags
, UI_MAPWAITING
)) {
2052 CLR(uip
->ui_flags
, UI_MAPWAITING
);
2058 wakeup(&uip
->ui_flags
);
2062 * the drop of the vnode ref will cleanup
2071 * Manipulate individual page state for a vnode with an associated ubc_info
2072 * with an associated memory object control.
2074 * Parameters: vp The vnode backing the page
2075 * f_offset A file offset interior to the page
2076 * ops The operations to perform, as a bitmap
2077 * (see below for more information)
2078 * phys_entryp The address of a ppnum_t; may be NULL
2080 * flagsp A pointer to an int to contain flags;
2081 * may be NULL to ignore
2083 * Returns: KERN_SUCCESS Success
2084 * KERN_INVALID_ARGUMENT If the memory object control has no VM
2086 * KERN_INVALID_OBJECT If UPL_POP_PHYSICAL and the object is
2087 * not physically contiguous
2088 * KERN_INVALID_OBJECT If !UPL_POP_PHYSICAL and the object is
2089 * physically contiguous
2090 * KERN_FAILURE If the page cannot be looked up
2093 * *phys_entryp (modified) If phys_entryp is non-NULL and
2095 * *flagsp (modified) If flagsp is non-NULL and there was
2096 * !UPL_POP_PHYSICAL and a KERN_SUCCESS
2098 * Notes: For object boundaries, it is considerably more efficient to
2099 * ensure that f_offset is in fact on a page boundary, as this
2100 * will avoid internal use of the hash table to identify the
2101 * page, and would therefore skip a number of early optimizations.
2102 * Since this is a page operation anyway, the caller should try
2103 * to pass only a page aligned offset because of this.
2105 * *flagsp may be modified even if this function fails. If it is
2106 * modified, it will contain the condition of the page before the
2107 * requested operation was attempted; these will only include the
2108 * bitmap flags, and not the PL_POP_PHYSICAL, UPL_POP_DUMP,
2109 * UPL_POP_SET, or UPL_POP_CLR bits.
2111 * The flags field may contain a specific operation, such as
2112 * UPL_POP_PHYSICAL or UPL_POP_DUMP:
2114 * o UPL_POP_PHYSICAL Fail if not contiguous; if
2115 * *phys_entryp and successful, set
2117 * o UPL_POP_DUMP Dump the specified page
2119 * Otherwise, it is treated as a bitmap of one or more page
2120 * operations to perform on the final memory object; allowable
2123 * o UPL_POP_DIRTY The page is dirty
2124 * o UPL_POP_PAGEOUT The page is paged out
2125 * o UPL_POP_PRECIOUS The page is precious
2126 * o UPL_POP_ABSENT The page is absent
2127 * o UPL_POP_BUSY The page is busy
2129 * If the page status is only being queried and not modified, then
2130 * not other bits should be specified. However, if it is being
2131 * modified, exactly ONE of the following bits should be set:
2133 * o UPL_POP_SET Set the current bitmap bits
2134 * o UPL_POP_CLR Clear the current bitmap bits
2136 * Thus to effect a combination of setting an clearing, it may be
2137 * necessary to call this function twice. If this is done, the
2138 * set should be used before the clear, since clearing may trigger
2139 * a wakeup on the destination page, and if the page is backed by
2140 * an encrypted swap file, setting will trigger the decryption
2141 * needed before the wakeup occurs.
2148 ppnum_t
*phys_entryp
,
2151 memory_object_control_t control
;
2153 control
= ubc_getobject(vp
, UBC_FLAGS_NONE
);
2154 if (control
== MEMORY_OBJECT_CONTROL_NULL
)
2155 return KERN_INVALID_ARGUMENT
;
2157 return (memory_object_page_op(control
,
2158 (memory_object_offset_t
)f_offset
,
2168 * Manipulate page state for a range of memory for a vnode with an associated
2169 * ubc_info with an associated memory object control, when page level state is
2170 * not required to be returned from the call (i.e. there are no phys_entryp or
2171 * flagsp parameters to this call, and it takes a range which may contain
2172 * multiple pages, rather than an offset interior to a single page).
2174 * Parameters: vp The vnode backing the page
2175 * f_offset_beg A file offset interior to the start page
2176 * f_offset_end A file offset interior to the end page
2177 * ops The operations to perform, as a bitmap
2178 * (see below for more information)
2179 * range The address of an int; may be NULL to
2182 * Returns: KERN_SUCCESS Success
2183 * KERN_INVALID_ARGUMENT If the memory object control has no VM
2185 * KERN_INVALID_OBJECT If the object is physically contiguous
2188 * *range (modified) If range is non-NULL, its contents will
2189 * be modified to contain the number of
2190 * bytes successfully operated upon.
2192 * Notes: IMPORTANT: This function cannot be used on a range that
2193 * consists of physically contiguous pages.
2195 * For object boundaries, it is considerably more efficient to
2196 * ensure that f_offset_beg and f_offset_end are in fact on page
2197 * boundaries, as this will avoid internal use of the hash table
2198 * to identify the page, and would therefore skip a number of
2199 * early optimizations. Since this is an operation on a set of
2200 * pages anyway, the caller should try to pass only a page aligned
2201 * offsets because of this.
2203 * *range will be modified only if this function succeeds.
2205 * The flags field MUST contain a specific operation; allowable
2208 * o UPL_ROP_ABSENT Returns the extent of the range
2209 * presented which is absent, starting
2210 * with the start address presented
2212 * o UPL_ROP_PRESENT Returns the extent of the range
2213 * presented which is present (resident),
2214 * starting with the start address
2216 * o UPL_ROP_DUMP Dump the pages which are found in the
2217 * target object for the target range.
2219 * IMPORTANT: For UPL_ROP_ABSENT and UPL_ROP_PRESENT; if there are
2220 * multiple regions in the range, only the first matching region
2231 memory_object_control_t control
;
2233 control
= ubc_getobject(vp
, UBC_FLAGS_NONE
);
2234 if (control
== MEMORY_OBJECT_CONTROL_NULL
)
2235 return KERN_INVALID_ARGUMENT
;
2237 return (memory_object_range_op(control
,
2238 (memory_object_offset_t
)f_offset_beg
,
2239 (memory_object_offset_t
)f_offset_end
,
2248 * Given a vnode, cause the population of a portion of the vm_object; based on
2249 * the nature of the request, the pages returned may contain valid data, or
2250 * they may be uninitialized.
2252 * Parameters: vp The vnode from which to create the upl
2253 * f_offset The start offset into the backing store
2254 * represented by the vnode
2255 * bufsize The size of the upl to create
2256 * uplp Pointer to the upl_t to receive the
2257 * created upl; MUST NOT be NULL
2258 * plp Pointer to receive the internal page
2259 * list for the created upl; MAY be NULL
2262 * Returns: KERN_SUCCESS The requested upl has been created
2263 * KERN_INVALID_ARGUMENT The bufsize argument is not an even
2264 * multiple of the page size
2265 * KERN_INVALID_ARGUMENT There is no ubc_info associated with
2266 * the vnode, or there is no memory object
2267 * control associated with the ubc_info
2268 * memory_object_upl_request:KERN_INVALID_VALUE
2269 * The supplied upl_flags argument is
2273 * *plp (modified) If non-NULL, the value of *plp will be
2274 * modified to point to the internal page
2275 * list; this modification may occur even
2276 * if this function is unsuccessful, in
2277 * which case the contents may be invalid
2279 * Note: If successful, the returned *uplp MUST subsequently be freed
2280 * via a call to ubc_upl_commit(), ubc_upl_commit_range(),
2281 * ubc_upl_abort(), or ubc_upl_abort_range().
2289 upl_page_info_t
**plp
,
2292 memory_object_control_t control
;
2299 if (bufsize
& 0xfff)
2300 return KERN_INVALID_ARGUMENT
;
2302 if (bufsize
> MAX_UPL_SIZE_BYTES
)
2303 return KERN_INVALID_ARGUMENT
;
2305 if (uplflags
& (UPL_UBC_MSYNC
| UPL_UBC_PAGEOUT
| UPL_UBC_PAGEIN
)) {
2307 if (uplflags
& UPL_UBC_MSYNC
) {
2308 uplflags
&= UPL_RET_ONLY_DIRTY
;
2310 uplflags
|= UPL_COPYOUT_FROM
| UPL_CLEAN_IN_PLACE
|
2311 UPL_SET_INTERNAL
| UPL_SET_LITE
;
2313 } else if (uplflags
& UPL_UBC_PAGEOUT
) {
2314 uplflags
&= UPL_RET_ONLY_DIRTY
;
2316 if (uplflags
& UPL_RET_ONLY_DIRTY
)
2317 uplflags
|= UPL_NOBLOCK
;
2319 uplflags
|= UPL_FOR_PAGEOUT
| UPL_CLEAN_IN_PLACE
|
2320 UPL_COPYOUT_FROM
| UPL_SET_INTERNAL
| UPL_SET_LITE
;
2322 uplflags
|= UPL_RET_ONLY_ABSENT
|
2323 UPL_NO_SYNC
| UPL_CLEAN_IN_PLACE
|
2324 UPL_SET_INTERNAL
| UPL_SET_LITE
;
2327 * if the requested size == PAGE_SIZE, we don't want to set
2328 * the UPL_NOBLOCK since we may be trying to recover from a
2329 * previous partial pagein I/O that occurred because we were low
2330 * on memory and bailed early in order to honor the UPL_NOBLOCK...
2331 * since we're only asking for a single page, we can block w/o fear
2332 * of tying up pages while waiting for more to become available
2334 if (bufsize
> PAGE_SIZE
)
2335 uplflags
|= UPL_NOBLOCK
;
2338 uplflags
&= ~UPL_FOR_PAGEOUT
;
2340 if (uplflags
& UPL_WILL_BE_DUMPED
) {
2341 uplflags
&= ~UPL_WILL_BE_DUMPED
;
2342 uplflags
|= (UPL_NO_SYNC
|UPL_SET_INTERNAL
);
2344 uplflags
|= (UPL_NO_SYNC
|UPL_CLEAN_IN_PLACE
|UPL_SET_INTERNAL
);
2346 control
= ubc_getobject(vp
, UBC_FLAGS_NONE
);
2347 if (control
== MEMORY_OBJECT_CONTROL_NULL
)
2348 return KERN_INVALID_ARGUMENT
;
2350 kr
= memory_object_upl_request(control
, f_offset
, bufsize
, uplp
, NULL
, NULL
, uplflags
);
2351 if (kr
== KERN_SUCCESS
&& plp
!= NULL
)
2352 *plp
= UPL_GET_INTERNAL_PAGE_LIST(*uplp
);
2358 * ubc_upl_maxbufsize
2360 * Return the maximum bufsize ubc_create_upl( ) will take.
2364 * Returns: maximum size buffer (in bytes) ubc_create_upl( ) will take.
2370 return(MAX_UPL_SIZE_BYTES
);
2376 * Map the page list assocated with the supplied upl into the kernel virtual
2377 * address space at the virtual address indicated by the dst_addr argument;
2378 * the entire upl is mapped
2380 * Parameters: upl The upl to map
2381 * dst_addr The address at which to map the upl
2383 * Returns: KERN_SUCCESS The upl has been mapped
2384 * KERN_INVALID_ARGUMENT The upl is UPL_NULL
2385 * KERN_FAILURE The upl is already mapped
2386 * vm_map_enter:KERN_INVALID_ARGUMENT
2387 * A failure code from vm_map_enter() due
2388 * to an invalid argument
2393 vm_offset_t
*dst_addr
)
2395 return (vm_upl_map(kernel_map
, upl
, dst_addr
));
2402 * Unmap the page list assocated with the supplied upl from the kernel virtual
2403 * address space; the entire upl is unmapped.
2405 * Parameters: upl The upl to unmap
2407 * Returns: KERN_SUCCESS The upl has been unmapped
2408 * KERN_FAILURE The upl is not currently mapped
2409 * KERN_INVALID_ARGUMENT If the upl is UPL_NULL
2415 return(vm_upl_unmap(kernel_map
, upl
));
2422 * Commit the contents of the upl to the backing store
2424 * Parameters: upl The upl to commit
2426 * Returns: KERN_SUCCESS The upl has been committed
2427 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2428 * KERN_FAILURE The supplied upl does not represent
2429 * device memory, and the offset plus the
2430 * size would exceed the actual size of
2433 * Notes: In practice, the only return value for this function should be
2434 * KERN_SUCCESS, unless there has been data structure corruption;
2435 * since the upl is deallocated regardless of success or failure,
2436 * there's really nothing to do about this other than panic.
2438 * IMPORTANT: Use of this function should not be mixed with use of
2439 * ubc_upl_commit_range(), due to the unconditional deallocation
2446 upl_page_info_t
*pl
;
2449 pl
= UPL_GET_INTERNAL_PAGE_LIST(upl
);
2450 kr
= upl_commit(upl
, pl
, MAX_UPL_SIZE_BYTES
>> PAGE_SHIFT
);
2451 upl_deallocate(upl
);
2459 * Commit the contents of the specified range of the upl to the backing store
2461 * Parameters: upl The upl to commit
2462 * offset The offset into the upl
2463 * size The size of the region to be committed,
2464 * starting at the specified offset
2465 * flags commit type (see below)
2467 * Returns: KERN_SUCCESS The range has been committed
2468 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2469 * KERN_FAILURE The supplied upl does not represent
2470 * device memory, and the offset plus the
2471 * size would exceed the actual size of
2474 * Notes: IMPORTANT: If the commit is successful, and the object is now
2475 * empty, the upl will be deallocated. Since the caller cannot
2476 * check that this is the case, the UPL_COMMIT_FREE_ON_EMPTY flag
2477 * should generally only be used when the offset is 0 and the size
2478 * is equal to the upl size.
2480 * The flags argument is a bitmap of flags on the rage of pages in
2481 * the upl to be committed; allowable flags are:
2483 * o UPL_COMMIT_FREE_ON_EMPTY Free the upl when it is
2484 * both empty and has been
2485 * successfully committed
2486 * o UPL_COMMIT_CLEAR_DIRTY Clear each pages dirty
2487 * bit; will prevent a
2489 * o UPL_COMMIT_SET_DIRTY Set each pages dirty
2490 * bit; will cause a later
2492 * o UPL_COMMIT_INACTIVATE Clear each pages
2493 * reference bit; the page
2494 * will not be accessed
2495 * o UPL_COMMIT_ALLOW_ACCESS Unbusy each page; pages
2496 * become busy when an
2497 * IOMemoryDescriptor is
2498 * mapped or redirected,
2499 * and we have to wait for
2502 * The flag UPL_COMMIT_NOTIFY_EMPTY is used internally, and should
2503 * not be specified by the caller.
2505 * The UPL_COMMIT_CLEAR_DIRTY and UPL_COMMIT_SET_DIRTY flags are
2506 * mutually exclusive, and should not be combined.
2509 ubc_upl_commit_range(
2511 upl_offset_t offset
,
2515 upl_page_info_t
*pl
;
2519 if (flags
& UPL_COMMIT_FREE_ON_EMPTY
)
2520 flags
|= UPL_COMMIT_NOTIFY_EMPTY
;
2522 if (flags
& UPL_COMMIT_KERNEL_ONLY_FLAGS
) {
2523 return KERN_INVALID_ARGUMENT
;
2526 pl
= UPL_GET_INTERNAL_PAGE_LIST(upl
);
2528 kr
= upl_commit_range(upl
, offset
, size
, flags
,
2529 pl
, MAX_UPL_SIZE_BYTES
>> PAGE_SHIFT
, &empty
);
2531 if((flags
& UPL_COMMIT_FREE_ON_EMPTY
) && empty
)
2532 upl_deallocate(upl
);
2539 * ubc_upl_abort_range
2541 * Abort the contents of the specified range of the specified upl
2543 * Parameters: upl The upl to abort
2544 * offset The offset into the upl
2545 * size The size of the region to be aborted,
2546 * starting at the specified offset
2547 * abort_flags abort type (see below)
2549 * Returns: KERN_SUCCESS The range has been aborted
2550 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2551 * KERN_FAILURE The supplied upl does not represent
2552 * device memory, and the offset plus the
2553 * size would exceed the actual size of
2556 * Notes: IMPORTANT: If the abort is successful, and the object is now
2557 * empty, the upl will be deallocated. Since the caller cannot
2558 * check that this is the case, the UPL_ABORT_FREE_ON_EMPTY flag
2559 * should generally only be used when the offset is 0 and the size
2560 * is equal to the upl size.
2562 * The abort_flags argument is a bitmap of flags on the range of
2563 * pages in the upl to be aborted; allowable flags are:
2565 * o UPL_ABORT_FREE_ON_EMPTY Free the upl when it is both
2566 * empty and has been successfully
2568 * o UPL_ABORT_RESTART The operation must be restarted
2569 * o UPL_ABORT_UNAVAILABLE The pages are unavailable
2570 * o UPL_ABORT_ERROR An I/O error occurred
2571 * o UPL_ABORT_DUMP_PAGES Just free the pages
2572 * o UPL_ABORT_NOTIFY_EMPTY RESERVED
2573 * o UPL_ABORT_ALLOW_ACCESS RESERVED
2575 * The UPL_ABORT_NOTIFY_EMPTY is an internal use flag and should
2576 * not be specified by the caller. It is intended to fulfill the
2577 * same role as UPL_COMMIT_NOTIFY_EMPTY does in the function
2578 * ubc_upl_commit_range(), but is never referenced internally.
2580 * The UPL_ABORT_ALLOW_ACCESS is defined, but neither set nor
2581 * referenced; do not use it.
2584 ubc_upl_abort_range(
2586 upl_offset_t offset
,
2591 boolean_t empty
= FALSE
;
2593 if (abort_flags
& UPL_ABORT_FREE_ON_EMPTY
)
2594 abort_flags
|= UPL_ABORT_NOTIFY_EMPTY
;
2596 kr
= upl_abort_range(upl
, offset
, size
, abort_flags
, &empty
);
2598 if((abort_flags
& UPL_ABORT_FREE_ON_EMPTY
) && empty
)
2599 upl_deallocate(upl
);
2608 * Abort the contents of the specified upl
2610 * Parameters: upl The upl to abort
2611 * abort_type abort type (see below)
2613 * Returns: KERN_SUCCESS The range has been aborted
2614 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2615 * KERN_FAILURE The supplied upl does not represent
2616 * device memory, and the offset plus the
2617 * size would exceed the actual size of
2620 * Notes: IMPORTANT: If the abort is successful, and the object is now
2621 * empty, the upl will be deallocated. Since the caller cannot
2622 * check that this is the case, the UPL_ABORT_FREE_ON_EMPTY flag
2623 * should generally only be used when the offset is 0 and the size
2624 * is equal to the upl size.
2626 * The abort_type is a bitmap of flags on the range of
2627 * pages in the upl to be aborted; allowable flags are:
2629 * o UPL_ABORT_FREE_ON_EMPTY Free the upl when it is both
2630 * empty and has been successfully
2632 * o UPL_ABORT_RESTART The operation must be restarted
2633 * o UPL_ABORT_UNAVAILABLE The pages are unavailable
2634 * o UPL_ABORT_ERROR An I/O error occurred
2635 * o UPL_ABORT_DUMP_PAGES Just free the pages
2636 * o UPL_ABORT_NOTIFY_EMPTY RESERVED
2637 * o UPL_ABORT_ALLOW_ACCESS RESERVED
2639 * The UPL_ABORT_NOTIFY_EMPTY is an internal use flag and should
2640 * not be specified by the caller. It is intended to fulfill the
2641 * same role as UPL_COMMIT_NOTIFY_EMPTY does in the function
2642 * ubc_upl_commit_range(), but is never referenced internally.
2644 * The UPL_ABORT_ALLOW_ACCESS is defined, but neither set nor
2645 * referenced; do not use it.
2654 kr
= upl_abort(upl
, abort_type
);
2655 upl_deallocate(upl
);
2663 * Retrieve the internal page list for the specified upl
2665 * Parameters: upl The upl to obtain the page list from
2667 * Returns: !NULL The (upl_page_info_t *) for the page
2668 * list internal to the upl
2669 * NULL Error/no page list associated
2671 * Notes: IMPORTANT: The function is only valid on internal objects
2672 * where the list request was made with the UPL_INTERNAL flag.
2674 * This function is a utility helper function, since some callers
2675 * may not have direct access to the header defining the macro,
2676 * due to abstraction layering constraints.
2682 return (UPL_GET_INTERNAL_PAGE_LIST(upl
));
2687 UBCINFOEXISTS(const struct vnode
* vp
)
2689 return((vp
) && ((vp
)->v_type
== VREG
) && ((vp
)->v_ubcinfo
!= UBC_INFO_NULL
));
2694 ubc_upl_range_needed(
2699 upl_range_needed(upl
, index
, count
);
2702 boolean_t
ubc_is_mapped(const struct vnode
*vp
, boolean_t
*writable
)
2704 if (!UBCINFOEXISTS(vp
) || !ISSET(vp
->v_ubcinfo
->ui_flags
, UI_ISMAPPED
))
2707 *writable
= ISSET(vp
->v_ubcinfo
->ui_flags
, UI_MAPPEDWRITE
);
2711 boolean_t
ubc_is_mapped_writable(const struct vnode
*vp
)
2714 return ubc_is_mapped(vp
, &writable
) && writable
;
2721 #define CS_BLOB_PAGEABLE 0
2722 static volatile SInt32 cs_blob_size
= 0;
2723 static volatile SInt32 cs_blob_count
= 0;
2724 static SInt32 cs_blob_size_peak
= 0;
2725 static UInt32 cs_blob_size_max
= 0;
2726 static SInt32 cs_blob_count_peak
= 0;
2728 int cs_validation
= 1;
2730 #ifndef SECURE_KERNEL
2731 SYSCTL_INT(_vm
, OID_AUTO
, cs_validation
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &cs_validation
, 0, "Do validate code signatures");
2733 SYSCTL_INT(_vm
, OID_AUTO
, cs_blob_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, (int *)(uintptr_t)&cs_blob_count
, 0, "Current number of code signature blobs");
2734 SYSCTL_INT(_vm
, OID_AUTO
, cs_blob_size
, CTLFLAG_RD
| CTLFLAG_LOCKED
, (int *)(uintptr_t)&cs_blob_size
, 0, "Current size of all code signature blobs");
2735 SYSCTL_INT(_vm
, OID_AUTO
, cs_blob_count_peak
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &cs_blob_count_peak
, 0, "Peak number of code signature blobs");
2736 SYSCTL_INT(_vm
, OID_AUTO
, cs_blob_size_peak
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &cs_blob_size_peak
, 0, "Peak size of code signature blobs");
2737 SYSCTL_INT(_vm
, OID_AUTO
, cs_blob_size_max
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &cs_blob_size_max
, 0, "Size of biggest code signature blob");
2741 ubc_cs_blob_allocate(
2742 vm_offset_t
*blob_addr_p
,
2743 vm_size_t
*blob_size_p
)
2747 #if CS_BLOB_PAGEABLE
2748 *blob_size_p
= round_page(*blob_size_p
);
2749 kr
= kmem_alloc(kernel_map
, blob_addr_p
, *blob_size_p
);
2750 #else /* CS_BLOB_PAGEABLE */
2751 *blob_addr_p
= (vm_offset_t
) kalloc(*blob_size_p
);
2752 if (*blob_addr_p
== 0) {
2757 #endif /* CS_BLOB_PAGEABLE */
2762 ubc_cs_blob_deallocate(
2763 vm_offset_t blob_addr
,
2764 vm_size_t blob_size
)
2766 #if CS_BLOB_PAGEABLE
2767 kmem_free(kernel_map
, blob_addr
, blob_size
);
2768 #else /* CS_BLOB_PAGEABLE */
2769 kfree((void *) blob_addr
, blob_size
);
2770 #endif /* CS_BLOB_PAGEABLE */
2776 vm_address_t address
,
2780 struct ubc_info
*uip
;
2781 struct cs_blob
*blob
;
2782 memory_object_control_t control
;
2783 const CS_CodeDirectory
*cd
;
2786 control
= ubc_getobject(vp
, UBC_FLAGS_NONE
);
2787 if (control
== MEMORY_OBJECT_CONTROL_NULL
)
2788 return KERN_INVALID_ARGUMENT
;
2790 if (memory_object_is_signed(control
))
2793 blob
= (struct cs_blob
*) kalloc(sizeof (struct cs_blob
));
2797 /* fill in the new blob */
2798 blob
->csb_cpu_type
= CPU_TYPE_ANY
;
2799 blob
->csb_base_offset
= 0;
2800 blob
->csb_mem_size
= size
;
2801 blob
->csb_mem_offset
= 0;
2802 blob
->csb_mem_handle
= IPC_PORT_NULL
;
2803 blob
->csb_mem_kaddr
= address
;
2804 blob
->csb_sigpup
= 1;
2805 blob
->csb_platform_binary
= 0;
2806 blob
->csb_teamid
= NULL
;
2809 * Validate the blob's contents
2811 cd
= findCodeDirectory(
2812 (const CS_SuperBlob
*) address
,
2814 (char *) address
+ blob
->csb_mem_size
);
2816 /* no code directory => useless blob ! */
2821 blob
->csb_flags
= ntohl(cd
->flags
) | CS_VALID
;
2822 blob
->csb_end_offset
= round_page_4K(ntohl(cd
->codeLimit
));
2823 if((ntohl(cd
->version
) >= CS_SUPPORTSSCATTER
) && (ntohl(cd
->scatterOffset
))) {
2824 const SC_Scatter
*scatter
= (const SC_Scatter
*)
2825 ((const char*)cd
+ ntohl(cd
->scatterOffset
));
2826 blob
->csb_start_offset
= ntohl(scatter
->base
) * PAGE_SIZE_4K
;
2828 blob
->csb_start_offset
= (blob
->csb_end_offset
- (ntohl(cd
->nCodeSlots
) * PAGE_SIZE_4K
));
2832 * We don't need to check with the policy module, since the input data is supposed to be already checked
2836 if (! UBCINFOEXISTS(vp
)) {
2839 printf("out ubc object\n");
2843 uip
= vp
->v_ubcinfo
;
2845 /* someone raced us to adding the code directory */
2846 if (uip
->cs_blobs
!= NULL
) {
2848 printf("sigpup: vnode already have CD ?\n");
2854 blob
->csb_next
= uip
->cs_blobs
;
2855 uip
->cs_blobs
= blob
;
2857 OSAddAtomic(+1, &cs_blob_count
);
2858 OSAddAtomic((SInt32
) +blob
->csb_mem_size
, &cs_blob_size
);
2860 /* mark this vnode's VM object as having "signed pages" */
2861 kr
= memory_object_signed(uip
->ui_control
, TRUE
);
2862 if (kr
!= KERN_SUCCESS
) {
2865 printf("sigpup: not signable ?\n");
2876 printf("sigpup: not signable ?\n");
2877 /* we failed; release what we allocated */
2879 kfree(blob
, sizeof (*blob
));
2897 struct ubc_info
*uip
;
2898 struct cs_blob
*blob
, *oblob
;
2900 ipc_port_t blob_handle
;
2901 memory_object_size_t blob_size
;
2902 const CS_CodeDirectory
*cd
;
2903 off_t blob_start_offset
, blob_end_offset
;
2905 boolean_t record_mtime
;
2906 int is_platform_binary
;
2908 record_mtime
= FALSE
;
2909 is_platform_binary
= 0;
2911 blob_handle
= IPC_PORT_NULL
;
2913 blob
= (struct cs_blob
*) kalloc(sizeof (struct cs_blob
));
2918 #if CS_BLOB_PAGEABLE
2919 /* get a memory entry on the blob */
2920 blob_size
= (memory_object_size_t
) size
;
2921 kr
= mach_make_memory_entry_64(kernel_map
,
2927 if (kr
!= KERN_SUCCESS
) {
2931 if (memory_object_round_page(blob_size
) !=
2932 (memory_object_size_t
) round_page(size
)) {
2933 printf("ubc_cs_blob_add: size mismatch 0x%llx 0x%lx !?\n",
2934 blob_size
, (size_t)size
);
2935 panic("XXX FBDP size mismatch 0x%llx 0x%lx\n", blob_size
, (size_t)size
);
2940 blob_size
= (memory_object_size_t
) size
;
2941 blob_handle
= IPC_PORT_NULL
;
2944 /* fill in the new blob */
2945 blob
->csb_cpu_type
= cputype
;
2946 blob
->csb_sigpup
= 0;
2947 blob
->csb_base_offset
= base_offset
;
2948 blob
->csb_mem_size
= size
;
2949 blob
->csb_mem_offset
= 0;
2950 blob
->csb_mem_handle
= blob_handle
;
2951 blob
->csb_mem_kaddr
= addr
;
2952 blob
->csb_flags
= 0;
2953 blob
->csb_platform_binary
= 0;
2954 blob
->csb_teamid
= NULL
;
2957 * Validate the blob's contents
2960 error
= cs_validate_csblob((const uint8_t *)addr
, size
, &cd
);
2963 printf("CODESIGNING: csblob invalid: %d\n", error
);
2964 blob
->csb_flags
= 0;
2965 blob
->csb_start_offset
= 0;
2966 blob
->csb_end_offset
= 0;
2967 memset(blob
->csb_sha1
, 0, SHA1_RESULTLEN
);
2968 /* let the vnode checker determine if the signature is valid or not */
2970 const unsigned char *sha1_base
;
2973 blob
->csb_flags
= (ntohl(cd
->flags
) & CS_ALLOWED_MACHO
) | CS_VALID
;
2974 blob
->csb_end_offset
= round_page_4K(ntohl(cd
->codeLimit
));
2975 if((ntohl(cd
->version
) >= CS_SUPPORTSSCATTER
) && (ntohl(cd
->scatterOffset
))) {
2976 const SC_Scatter
*scatter
= (const SC_Scatter
*)
2977 ((const char*)cd
+ ntohl(cd
->scatterOffset
));
2978 blob
->csb_start_offset
= ntohl(scatter
->base
) * PAGE_SIZE_4K
;
2980 blob
->csb_start_offset
= (blob
->csb_end_offset
-
2981 (ntohl(cd
->nCodeSlots
) * PAGE_SIZE_4K
));
2983 /* compute the blob's SHA1 hash */
2984 sha1_base
= (const unsigned char *) cd
;
2985 sha1_size
= ntohl(cd
->length
);
2986 SHA1Init(&sha1ctxt
);
2987 SHA1Update(&sha1ctxt
, sha1_base
, sha1_size
);
2988 SHA1Final(blob
->csb_sha1
, &sha1ctxt
);
2992 * Let policy module check whether the blob's signature is accepted.
2995 error
= mac_vnode_check_signature(vp
,
3000 &is_platform_binary
);
3003 printf("check_signature[pid: %d], error = %d\n", current_proc()->p_pid
, error
);
3006 if ((flags
& MAC_VNODE_CHECK_DYLD_SIM
) && !is_platform_binary
) {
3008 printf("check_signature[pid: %d], is not apple signed\n", current_proc()->p_pid
);
3014 if (is_platform_binary
) {
3016 printf("check_signature[pid: %d]: platform binary\n", current_proc()->p_pid
);
3017 blob
->csb_platform_binary
= 1;
3019 blob
->csb_platform_binary
= 0;
3020 blob
->csb_teamid
= csblob_get_teamid(blob
);
3022 if (blob
->csb_teamid
)
3023 printf("check_signature[pid: %d]: team-id is %s\n", current_proc()->p_pid
, blob
->csb_teamid
);
3025 printf("check_signature[pid: %d]: no team-id\n", current_proc()->p_pid
);
3030 * Validate the blob's coverage
3032 blob_start_offset
= blob
->csb_base_offset
+ blob
->csb_start_offset
;
3033 blob_end_offset
= blob
->csb_base_offset
+ blob
->csb_end_offset
;
3035 if (blob_start_offset
>= blob_end_offset
||
3036 blob_start_offset
< 0 ||
3037 blob_end_offset
<= 0) {
3038 /* reject empty or backwards blob */
3044 if (! UBCINFOEXISTS(vp
)) {
3049 uip
= vp
->v_ubcinfo
;
3051 /* check if this new blob overlaps with an existing blob */
3052 for (oblob
= uip
->cs_blobs
;
3054 oblob
= oblob
->csb_next
) {
3055 off_t oblob_start_offset
, oblob_end_offset
;
3057 /* check for conflicting teamid */
3058 if (blob
->csb_platform_binary
) { //platform binary needs to be the same for app slices
3059 if (!oblob
->csb_platform_binary
) {
3064 } else if (blob
->csb_teamid
) { //teamid binary needs to be the same for app slices
3065 if (oblob
->csb_platform_binary
||
3066 oblob
->csb_teamid
== NULL
||
3067 strcmp(oblob
->csb_teamid
, blob
->csb_teamid
) != 0) {
3072 } else { // non teamid binary needs to be the same for app slices
3073 if (oblob
->csb_platform_binary
||
3074 oblob
->csb_teamid
!= NULL
) {
3081 oblob_start_offset
= (oblob
->csb_base_offset
+
3082 oblob
->csb_start_offset
);
3083 oblob_end_offset
= (oblob
->csb_base_offset
+
3084 oblob
->csb_end_offset
);
3085 if (blob_start_offset
>= oblob_end_offset
||
3086 blob_end_offset
<= oblob_start_offset
) {
3087 /* no conflict with this existing blob */
3090 if (blob_start_offset
== oblob_start_offset
&&
3091 blob_end_offset
== oblob_end_offset
&&
3092 blob
->csb_mem_size
== oblob
->csb_mem_size
&&
3093 blob
->csb_flags
== oblob
->csb_flags
&&
3094 (blob
->csb_cpu_type
== CPU_TYPE_ANY
||
3095 oblob
->csb_cpu_type
== CPU_TYPE_ANY
||
3096 blob
->csb_cpu_type
== oblob
->csb_cpu_type
) &&
3097 !bcmp(blob
->csb_sha1
,
3101 * We already have this blob:
3102 * we'll return success but
3103 * throw away the new blob.
3105 if (oblob
->csb_cpu_type
== CPU_TYPE_ANY
) {
3107 * The old blob matches this one
3108 * but doesn't have any CPU type.
3109 * Update it with whatever the caller
3110 * provided this time.
3112 oblob
->csb_cpu_type
= cputype
;
3118 /* different blob: reject the new one */
3128 /* mark this vnode's VM object as having "signed pages" */
3129 kr
= memory_object_signed(uip
->ui_control
, TRUE
);
3130 if (kr
!= KERN_SUCCESS
) {
3136 if (uip
->cs_blobs
== NULL
) {
3137 /* loading 1st blob: record the file's current "modify time" */
3138 record_mtime
= TRUE
;
3141 /* set the generation count for cs_blobs */
3142 uip
->cs_add_gen
= cs_blob_generation_count
;
3145 * Add this blob to the list of blobs for this vnode.
3146 * We always add at the front of the list and we never remove a
3147 * blob from the list, so ubc_cs_get_blobs() can return whatever
3148 * the top of the list was and that list will remain valid
3149 * while we validate a page, even after we release the vnode's lock.
3151 blob
->csb_next
= uip
->cs_blobs
;
3152 uip
->cs_blobs
= blob
;
3154 OSAddAtomic(+1, &cs_blob_count
);
3155 if (cs_blob_count
> cs_blob_count_peak
) {
3156 cs_blob_count_peak
= cs_blob_count
; /* XXX atomic ? */
3158 OSAddAtomic((SInt32
) +blob
->csb_mem_size
, &cs_blob_size
);
3159 if ((SInt32
) cs_blob_size
> cs_blob_size_peak
) {
3160 cs_blob_size_peak
= (SInt32
) cs_blob_size
; /* XXX atomic ? */
3162 if ((UInt32
) blob
->csb_mem_size
> cs_blob_size_max
) {
3163 cs_blob_size_max
= (UInt32
) blob
->csb_mem_size
;
3168 const char *name
= vnode_getname_printable(vp
);
3170 printf("CODE SIGNING: proc %d(%s) "
3171 "loaded %s signatures for file (%s) "
3172 "range 0x%llx:0x%llx flags 0x%x\n",
3173 p
->p_pid
, p
->p_comm
,
3174 blob
->csb_cpu_type
== -1 ? "detached" : "embedded",
3176 blob
->csb_base_offset
+ blob
->csb_start_offset
,
3177 blob
->csb_base_offset
+ blob
->csb_end_offset
,
3179 vnode_putname_printable(name
);
3185 vnode_mtime(vp
, &uip
->cs_mtime
, vfs_context_current());
3188 error
= 0; /* success ! */
3193 printf("check_signature[pid: %d]: error = %d\n", current_proc()->p_pid
, error
);
3195 /* we failed; release what we allocated */
3197 kfree(blob
, sizeof (*blob
));
3200 if (blob_handle
!= IPC_PORT_NULL
) {
3201 mach_memory_entry_port_release(blob_handle
);
3202 blob_handle
= IPC_PORT_NULL
;
3206 if (error
== EAGAIN
) {
3208 * See above: error is EAGAIN if we were asked
3209 * to add an existing blob again. We cleaned the new
3210 * blob and we want to return success.
3214 * Since we're not failing, consume the data we received.
3216 ubc_cs_blob_deallocate(addr
, size
);
3228 struct ubc_info
*uip
;
3229 struct cs_blob
*blob
;
3230 off_t offset_in_blob
;
3232 vnode_lock_spin(vp
);
3234 if (! UBCINFOEXISTS(vp
)) {
3239 uip
= vp
->v_ubcinfo
;
3240 for (blob
= uip
->cs_blobs
;
3242 blob
= blob
->csb_next
) {
3243 if (cputype
!= -1 && blob
->csb_cpu_type
== cputype
) {
3247 offset_in_blob
= offset
- blob
->csb_base_offset
;
3248 if (offset_in_blob
>= blob
->csb_start_offset
&&
3249 offset_in_blob
< blob
->csb_end_offset
) {
3250 /* our offset is covered by this blob */
3256 if (cs_debug
&& blob
!= NULL
&& blob
->csb_sigpup
) {
3257 printf("found sig pup blob\n");
3268 struct ubc_info
*uip
)
3270 struct cs_blob
*blob
, *next_blob
;
3272 for (blob
= uip
->cs_blobs
;
3275 next_blob
= blob
->csb_next
;
3276 if (blob
->csb_mem_kaddr
!= 0 && !blob
->csb_sigpup
) {
3277 ubc_cs_blob_deallocate(blob
->csb_mem_kaddr
,
3278 blob
->csb_mem_size
);
3279 blob
->csb_mem_kaddr
= 0;
3281 if (blob
->csb_mem_handle
!= IPC_PORT_NULL
) {
3282 mach_memory_entry_port_release(blob
->csb_mem_handle
);
3284 blob
->csb_mem_handle
= IPC_PORT_NULL
;
3285 OSAddAtomic(-1, &cs_blob_count
);
3286 OSAddAtomic((SInt32
) -blob
->csb_mem_size
, &cs_blob_size
);
3287 kfree(blob
, sizeof (*blob
));
3289 #if CHECK_CS_VALIDATION_BITMAP
3290 ubc_cs_validation_bitmap_deallocate( uip
->ui_vnode
);
3292 uip
->cs_blobs
= NULL
;
3295 /* check cs blob generation on vnode
3297 * 0 : Success, the cs_blob attached is current
3298 * ENEEDAUTH : Generation count mismatch. Needs authentication again.
3301 ubc_cs_generation_check(
3304 int retval
= ENEEDAUTH
;
3306 vnode_lock_spin(vp
);
3308 if (UBCINFOEXISTS(vp
) && vp
->v_ubcinfo
->cs_add_gen
== cs_blob_generation_count
) {
3317 ubc_cs_blob_revalidate(
3319 struct cs_blob
*blob
,
3325 int is_platform_binary
= 0;
3327 const CS_CodeDirectory
*cd
= NULL
;
3330 assert(blob
!= NULL
);
3332 error
= cs_validate_csblob((const uint8_t *)blob
->csb_mem_kaddr
, blob
->csb_mem_size
, &cd
);
3335 printf("CODESIGNING: csblob invalid: %d\n", error
);
3340 /* callout to mac_vnode_check_signature */
3342 error
= mac_vnode_check_signature(vp
, blob
->csb_base_offset
, blob
->csb_sha1
, (const void*)cd
, blob
->csb_cpu_type
, flags
, &is_platform_binary
);
3343 if (cs_debug
&& error
) {
3344 printf("revalidate: check_signature[pid: %d], error = %d\n", current_proc()->p_pid
, error
);
3348 /* update generation number if success */
3349 vnode_lock_spin(vp
);
3350 if (UBCINFOEXISTS(vp
)) {
3352 vp
->v_ubcinfo
->cs_add_gen
= cs_blob_generation_count
;
3354 vp
->v_ubcinfo
->cs_add_gen
= 0;
3364 cs_blob_reset_cache()
3366 /* incrementing odd no by 2 makes sure '0' is never reached. */
3367 OSAddAtomic(+2, &cs_blob_generation_count
);
3368 printf("Reseting cs_blob cache from all vnodes. \n");
3375 struct ubc_info
*uip
;
3376 struct cs_blob
*blobs
;
3379 * No need to take the vnode lock here. The caller must be holding
3380 * a reference on the vnode (via a VM mapping or open file descriptor),
3381 * so the vnode will not go away. The ubc_info stays until the vnode
3382 * goes away. And we only modify "blobs" by adding to the head of the
3384 * The ubc_info could go away entirely if the vnode gets reclaimed as
3385 * part of a forced unmount. In the case of a code-signature validation
3386 * during a page fault, the "paging_in_progress" reference on the VM
3387 * object guarantess that the vnode pager (and the ubc_info) won't go
3388 * away during the fault.
3389 * Other callers need to protect against vnode reclaim by holding the
3390 * vnode lock, for example.
3393 if (! UBCINFOEXISTS(vp
)) {
3398 uip
= vp
->v_ubcinfo
;
3399 blobs
= uip
->cs_blobs
;
3408 struct timespec
*cs_mtime
)
3410 struct ubc_info
*uip
;
3412 if (! UBCINFOEXISTS(vp
)) {
3413 cs_mtime
->tv_sec
= 0;
3414 cs_mtime
->tv_nsec
= 0;
3418 uip
= vp
->v_ubcinfo
;
3419 cs_mtime
->tv_sec
= uip
->cs_mtime
.tv_sec
;
3420 cs_mtime
->tv_nsec
= uip
->cs_mtime
.tv_nsec
;
3423 unsigned long cs_validate_page_no_hash
= 0;
3424 unsigned long cs_validate_page_bad_hash
= 0;
3428 memory_object_t pager
,
3429 memory_object_offset_t page_offset
,
3434 unsigned char actual_hash
[SHA1_RESULTLEN
];
3435 unsigned char expected_hash
[SHA1_RESULTLEN
];
3436 boolean_t found_hash
;
3437 struct cs_blob
*blobs
, *blob
;
3438 const CS_CodeDirectory
*cd
;
3439 const CS_SuperBlob
*embedded
;
3440 const unsigned char *hash
;
3441 boolean_t validated
;
3442 off_t offset
; /* page offset in the file */
3444 off_t codeLimit
= 0;
3445 char *lower_bound
, *upper_bound
;
3446 vm_offset_t kaddr
, blob_addr
;
3450 offset
= page_offset
;
3452 /* retrieve the expected hash */
3454 blobs
= (struct cs_blob
*) _blobs
;
3458 blob
= blob
->csb_next
) {
3459 offset
= page_offset
- blob
->csb_base_offset
;
3460 if (offset
< blob
->csb_start_offset
||
3461 offset
>= blob
->csb_end_offset
) {
3462 /* our page is not covered by this blob */
3466 /* map the blob in the kernel address space */
3467 kaddr
= blob
->csb_mem_kaddr
;
3469 ksize
= (vm_size_t
) (blob
->csb_mem_size
+
3470 blob
->csb_mem_offset
);
3471 kr
= vm_map(kernel_map
,
3476 blob
->csb_mem_handle
,
3482 if (kr
!= KERN_SUCCESS
) {
3483 /* XXX FBDP what to do !? */
3484 printf("cs_validate_page: failed to map blob, "
3485 "size=0x%lx kr=0x%x\n",
3486 (size_t)blob
->csb_mem_size
, kr
);
3490 if (blob
->csb_sigpup
&& cs_debug
)
3491 printf("checking for a sigpup CD\n");
3493 blob_addr
= kaddr
+ blob
->csb_mem_offset
;
3495 lower_bound
= CAST_DOWN(char *, blob_addr
);
3496 upper_bound
= lower_bound
+ blob
->csb_mem_size
;
3498 embedded
= (const CS_SuperBlob
*) blob_addr
;
3499 cd
= findCodeDirectory(embedded
, lower_bound
, upper_bound
);
3501 if (cd
->pageSize
!= PAGE_SHIFT_4K
||
3502 cd
->hashType
!= CS_HASHTYPE_SHA1
||
3503 cd
->hashSize
!= SHA1_RESULTLEN
) {
3505 if (blob
->csb_sigpup
&& cs_debug
)
3506 printf("page foo bogus sigpup CD\n");
3510 offset
= page_offset
- blob
->csb_base_offset
;
3511 if (offset
< blob
->csb_start_offset
||
3512 offset
>= blob
->csb_end_offset
) {
3513 /* our page is not covered by this blob */
3514 if (blob
->csb_sigpup
&& cs_debug
)
3515 printf("OOB sigpup CD\n");
3519 codeLimit
= ntohl(cd
->codeLimit
);
3520 if (blob
->csb_sigpup
&& cs_debug
)
3521 printf("sigpup codesize %d\n", (int)codeLimit
);
3523 hash
= hashes(cd
, (unsigned)(offset
>>PAGE_SHIFT_4K
),
3524 lower_bound
, upper_bound
);
3526 bcopy(hash
, expected_hash
,
3527 sizeof (expected_hash
));
3529 if (blob
->csb_sigpup
&& cs_debug
)
3530 printf("sigpup hash\n");
3535 if (blob
->csb_sigpup
&& cs_debug
)
3536 printf("sig pup had no valid CD\n");
3541 if (found_hash
== FALSE
) {
3543 * We can't verify this page because there is no signature
3544 * for it (yet). It's possible that this part of the object
3545 * is not signed, or that signatures for that part have not
3547 * Report that the page has not been validated and let the
3548 * caller decide if it wants to accept it or not.
3550 cs_validate_page_no_hash
++;
3552 printf("CODE SIGNING: cs_validate_page: "
3553 "mobj %p off 0x%llx: no hash to validate !?\n",
3554 pager
, page_offset
);
3562 size
= PAGE_SIZE_4K
;
3563 const uint32_t *asha1
, *esha1
;
3564 if ((off_t
)(offset
+ size
) > codeLimit
) {
3565 /* partial page at end of segment */
3566 assert(offset
< codeLimit
);
3567 size
= (size_t) (codeLimit
& PAGE_MASK_4K
);
3568 *tainted
|= CS_VALIDATE_NX
;
3570 /* compute the actual page's SHA1 hash */
3571 SHA1Init(&sha1ctxt
);
3572 SHA1UpdateUsePhysicalAddress(&sha1ctxt
, data
, size
);
3573 SHA1Final(actual_hash
, &sha1ctxt
);
3575 asha1
= (const uint32_t *) actual_hash
;
3576 esha1
= (const uint32_t *) expected_hash
;
3578 if (bcmp(expected_hash
, actual_hash
, SHA1_RESULTLEN
) != 0) {
3580 printf("CODE SIGNING: cs_validate_page: "
3581 "mobj %p off 0x%llx size 0x%lx: "
3582 "actual [0x%x 0x%x 0x%x 0x%x 0x%x] != "
3583 "expected [0x%x 0x%x 0x%x 0x%x 0x%x]\n",
3584 pager
, page_offset
, size
,
3585 asha1
[0], asha1
[1], asha1
[2],
3587 esha1
[0], esha1
[1], esha1
[2],
3588 esha1
[3], esha1
[4]);
3590 cs_validate_page_bad_hash
++;
3591 *tainted
|= CS_VALIDATE_TAINTED
;
3593 if (cs_debug
> 10) {
3594 printf("CODE SIGNING: cs_validate_page: "
3595 "mobj %p off 0x%llx size 0x%lx: "
3597 pager
, page_offset
, size
);
3610 unsigned char *cdhash
)
3612 struct cs_blob
*blobs
, *blob
;
3618 blobs
= ubc_get_cs_blobs(vp
);
3621 blob
= blob
->csb_next
) {
3622 /* compute offset relative to this blob */
3623 rel_offset
= offset
- blob
->csb_base_offset
;
3624 if (rel_offset
>= blob
->csb_start_offset
&&
3625 rel_offset
< blob
->csb_end_offset
) {
3626 /* this blob does cover our "offset" ! */
3632 /* we didn't find a blob covering "offset" */
3633 ret
= EBADEXEC
; /* XXX any better error ? */
3635 /* get the SHA1 hash of that blob */
3636 bcopy(blob
->csb_sha1
, cdhash
, sizeof (blob
->csb_sha1
));
3645 #if CHECK_CS_VALIDATION_BITMAP
3646 #define stob(s) ((atop_64((s)) + 07) >> 3)
3647 extern boolean_t root_fs_upgrade_try
;
3650 * Should we use the code-sign bitmap to avoid repeated code-sign validation?
3652 * a) Is the target vnode on the root filesystem?
3653 * b) Has someone tried to mount the root filesystem read-write?
3654 * If answers are (a) yes AND (b) no, then we can use the bitmap.
3656 #define USE_CODE_SIGN_BITMAP(vp) ( (vp != NULL) && (vp->v_mount != NULL) && (vp->v_mount->mnt_flag & MNT_ROOTFS) && !root_fs_upgrade_try)
3658 ubc_cs_validation_bitmap_allocate(
3661 kern_return_t kr
= KERN_SUCCESS
;
3662 struct ubc_info
*uip
;
3663 char *target_bitmap
;
3664 vm_object_size_t bitmap_size
;
3666 if ( ! USE_CODE_SIGN_BITMAP(vp
) || (! UBCINFOEXISTS(vp
))) {
3667 kr
= KERN_INVALID_ARGUMENT
;
3669 uip
= vp
->v_ubcinfo
;
3671 if ( uip
->cs_valid_bitmap
== NULL
) {
3672 bitmap_size
= stob(uip
->ui_size
);
3673 target_bitmap
= (char*) kalloc( (vm_size_t
)bitmap_size
);
3674 if (target_bitmap
== 0) {
3679 if( kr
== KERN_SUCCESS
) {
3680 memset( target_bitmap
, 0, (size_t)bitmap_size
);
3681 uip
->cs_valid_bitmap
= (void*)target_bitmap
;
3682 uip
->cs_valid_bitmap_size
= bitmap_size
;
3690 ubc_cs_check_validation_bitmap (
3692 memory_object_offset_t offset
,
3695 kern_return_t kr
= KERN_SUCCESS
;
3697 if ( ! USE_CODE_SIGN_BITMAP(vp
) || ! UBCINFOEXISTS(vp
)) {
3698 kr
= KERN_INVALID_ARGUMENT
;
3700 struct ubc_info
*uip
= vp
->v_ubcinfo
;
3701 char *target_bitmap
= uip
->cs_valid_bitmap
;
3703 if ( target_bitmap
== NULL
) {
3704 kr
= KERN_INVALID_ARGUMENT
;
3707 bit
= atop_64( offset
);
3710 if ( byte
> uip
->cs_valid_bitmap_size
) {
3711 kr
= KERN_INVALID_ARGUMENT
;
3714 if (optype
== CS_BITMAP_SET
) {
3715 target_bitmap
[byte
] |= (1 << (bit
& 07));
3717 } else if (optype
== CS_BITMAP_CLEAR
) {
3718 target_bitmap
[byte
] &= ~(1 << (bit
& 07));
3720 } else if (optype
== CS_BITMAP_CHECK
) {
3721 if ( target_bitmap
[byte
] & (1 << (bit
& 07))) {
3734 ubc_cs_validation_bitmap_deallocate(
3737 struct ubc_info
*uip
;
3738 void *target_bitmap
;
3739 vm_object_size_t bitmap_size
;
3741 if ( UBCINFOEXISTS(vp
)) {
3742 uip
= vp
->v_ubcinfo
;
3744 if ( (target_bitmap
= uip
->cs_valid_bitmap
) != NULL
) {
3745 bitmap_size
= uip
->cs_valid_bitmap_size
;
3746 kfree( target_bitmap
, (vm_size_t
) bitmap_size
);
3747 uip
->cs_valid_bitmap
= NULL
;
3752 kern_return_t
ubc_cs_validation_bitmap_allocate(__unused vnode_t vp
){
3753 return KERN_INVALID_ARGUMENT
;
3756 kern_return_t
ubc_cs_check_validation_bitmap(
3757 __unused
struct vnode
*vp
,
3758 __unused memory_object_offset_t offset
,
3759 __unused
int optype
){
3761 return KERN_INVALID_ARGUMENT
;
3764 void ubc_cs_validation_bitmap_deallocate(__unused vnode_t vp
){
3767 #endif /* CHECK_CS_VALIDATION_BITMAP */