2  * Copyright (c) 1999-2014 Apple Inc. All rights reserved. 
   4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 
   6  * This file contains Original Code and/or Modifications of Original Code 
   7  * as defined in and that are subject to the Apple Public Source License 
   8  * Version 2.0 (the 'License'). You may not use this file except in 
   9  * compliance with the License. The rights granted to you under the License 
  10  * may not be used to create, or enable the creation or redistribution of, 
  11  * unlawful or unlicensed copies of an Apple operating system, or to 
  12  * circumvent, violate, or enable the circumvention or violation of, any 
  13  * terms of an Apple operating system software license agreement. 
  15  * Please obtain a copy of the License at 
  16  * http://www.opensource.apple.com/apsl/ and read it before using this file. 
  18  * The Original Code and all software distributed under the License are 
  19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
  20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
  21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
  22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
  23  * Please see the License for the specific language governing rights and 
  24  * limitations under the License. 
  26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 
  30  *      Author: Umesh Vaishampayan [umeshv@apple.com] 
  31  *              05-Aug-1999     umeshv  Created. 
  33  *      Functions related to Unified Buffer cache. 
  35  * Caller of UBC functions MUST have a valid reference on the vnode. 
  39 #include <sys/types.h> 
  40 #include <sys/param.h> 
  41 #include <sys/systm.h> 
  44 #include <sys/mount_internal.h> 
  45 #include <sys/vnode_internal.h> 
  46 #include <sys/ubc_internal.h> 
  47 #include <sys/ucred.h> 
  48 #include <sys/proc_internal.h> 
  49 #include <sys/kauth.h> 
  52 #include <sys/codesign.h> 
  53 #include <sys/codedir_internal.h> 
  54 #include <sys/fsevents.h> 
  56 #include <mach/mach_types.h> 
  57 #include <mach/memory_object_types.h> 
  58 #include <mach/memory_object_control.h> 
  59 #include <mach/vm_map.h> 
  60 #include <mach/mach_vm.h> 
  63 #include <kern/kern_types.h> 
  64 #include <kern/kalloc.h> 
  65 #include <kern/zalloc.h> 
  66 #include <kern/thread.h> 
  67 #include <vm/vm_kern.h> 
  68 #include <vm/vm_protos.h> /* last */ 
  70 #include <libkern/crypto/sha1.h> 
  71 #include <libkern/libkern.h> 
  73 #include <security/mac_framework.h> 
  76 /* XXX These should be in a BSD accessible Mach header, but aren't. */ 
  77 extern kern_return_t 
memory_object_pages_resident(memory_object_control_t
, 
  79 extern kern_return_t    
memory_object_signed(memory_object_control_t control
, 
  81 extern boolean_t        
memory_object_is_slid(memory_object_control_t   control
); 
  82 extern boolean_t        
memory_object_is_signed(memory_object_control_t
); 
  84 extern void Debugger(const char *message
); 
  87 /* XXX no one uses this interface! */ 
  88 kern_return_t 
ubc_page_op_with_control( 
  89         memory_object_control_t  control
, 
 100 #define assert(cond)    \ 
 101     ((void) ((cond) ? 0 : panic("Assert failed: %s", # cond))) 
 103 #include <kern/assert.h> 
 104 #endif /* DIAGNOSTIC */ 
 106 static int ubc_info_init_internal(struct vnode 
*vp
, int withfsize
, off_t filesize
); 
 107 static int ubc_umcallback(vnode_t
, void *); 
 108 static int ubc_msync_internal(vnode_t
, off_t
, off_t
, off_t 
*, int, int *); 
 109 static void ubc_cs_free(struct ubc_info 
*uip
); 
 111 struct zone     
*ubc_info_zone
; 
 112 static uint32_t cs_blob_generation_count 
= 1; 
 116  * Routines to navigate code signing data structures in the kernel... 
 121 #define PAGE_SHIFT_4K           (12) 
 122 #define PAGE_SIZE_4K            ((1<<PAGE_SHIFT_4K)) 
 123 #define PAGE_MASK_4K            ((PAGE_SIZE_4K-1)) 
 124 #define round_page_4K(x)        (((vm_offset_t)(x) + PAGE_MASK_4K) & ~((vm_offset_t)PAGE_MASK_4K)) 
 130         const void *lower_bound
, 
 131         const void *upper_bound
) 
 133         if (upper_bound 
< lower_bound 
|| 
 138         if (start 
< lower_bound 
|| 
 147  * Locate the CodeDirectory from an embedded signature blob 
 150 CS_CodeDirectory 
*findCodeDirectory( 
 151         const CS_SuperBlob 
*embedded
, 
 155         const CS_CodeDirectory 
*cd 
= NULL
; 
 158             cs_valid_range(embedded
, embedded 
+ 1, lower_bound
, upper_bound
) && 
 159             ntohl(embedded
->magic
) == CSMAGIC_EMBEDDED_SIGNATURE
) { 
 160                 const CS_BlobIndex 
*limit
; 
 161                 const CS_BlobIndex 
*p
; 
 163                 limit 
= &embedded
->index
[ntohl(embedded
->count
)]; 
 164                 if (!cs_valid_range(&embedded
->index
[0], limit
, 
 165                                     lower_bound
, upper_bound
)) { 
 168                 for (p 
= embedded
->index
; p 
< limit
; ++p
) { 
 169                         if (ntohl(p
->type
) == CSSLOT_CODEDIRECTORY
) { 
 170                                 const unsigned char *base
; 
 172                                 base 
= (const unsigned char *)embedded
; 
 173                                 cd 
= (const CS_CodeDirectory 
*)(base 
+ ntohl(p
->offset
)); 
 179                  * Detached signatures come as a bare CS_CodeDirectory, 
 182                 cd 
= (const CS_CodeDirectory 
*) embedded
; 
 186             cs_valid_range(cd
, cd 
+ 1, lower_bound
, upper_bound
) && 
 187             cs_valid_range(cd
, (const char *) cd 
+ ntohl(cd
->length
), 
 188                            lower_bound
, upper_bound
) && 
 189             cs_valid_range(cd
, (const char *) cd 
+ ntohl(cd
->hashOffset
), 
 190                            lower_bound
, upper_bound
) && 
 191             cs_valid_range(cd
, (const char *) cd 
+ 
 192                            ntohl(cd
->hashOffset
) + 
 193                            (ntohl(cd
->nCodeSlots
) * SHA1_RESULTLEN
), 
 194                            lower_bound
, upper_bound
) && 
 196             ntohl(cd
->magic
) == CSMAGIC_CODEDIRECTORY
) { 
 200         // not found or not a valid code directory 
 206  * Locating a page hash 
 208 static const unsigned char * 
 210         const CS_CodeDirectory 
*cd
, 
 215         const unsigned char *base
, *top
, *hash
; 
 216         uint32_t nCodeSlots 
= ntohl(cd
->nCodeSlots
); 
 218         assert(cs_valid_range(cd
, cd 
+ 1, lower_bound
, upper_bound
)); 
 220         if((ntohl(cd
->version
) >= CS_SUPPORTSSCATTER
) && (ntohl(cd
->scatterOffset
))) { 
 221                 /* Get first scatter struct */ 
 222                 const SC_Scatter 
*scatter 
= (const SC_Scatter
*) 
 223                         ((const char*)cd 
+ ntohl(cd
->scatterOffset
)); 
 224                 uint32_t hashindex
=0, scount
, sbase
=0; 
 225                 /* iterate all scatter structs */ 
 227                         if((const char*)scatter 
> (const char*)cd 
+ ntohl(cd
->length
)) { 
 229                                         printf("CODE SIGNING: Scatter extends past Code Directory\n"); 
 234                         scount 
= ntohl(scatter
->count
); 
 235                         uint32_t new_base 
= ntohl(scatter
->base
); 
 242                         if((hashindex 
> 0) && (new_base 
<= sbase
)) { 
 244                                         printf("CODE SIGNING: unordered Scatter, prev base %d, cur base %d\n", 
 247                                 return NULL
;    /* unordered scatter array */ 
 251                         /* this scatter beyond page we're looking for? */ 
 256                         if (sbase
+scount 
>= page
) { 
 257                                 /* Found the scatter struct that is  
 258                                  * referencing our page */ 
 260                                 /* base = address of first hash covered by scatter */ 
 261                                 base 
= (const unsigned char *)cd 
+ ntohl(cd
->hashOffset
) +  
 262                                         hashindex 
* SHA1_RESULTLEN
; 
 263                                 /* top = address of first hash after this scatter */ 
 264                                 top 
= base 
+ scount 
* SHA1_RESULTLEN
; 
 265                                 if (!cs_valid_range(base
, top
, lower_bound
,  
 267                                     hashindex 
> nCodeSlots
) { 
 274                         /* this scatter struct is before the page we're looking  
 280                 hash 
= base 
+ (page 
- sbase
) * SHA1_RESULTLEN
; 
 282                 base 
= (const unsigned char *)cd 
+ ntohl(cd
->hashOffset
); 
 283                 top 
= base 
+ nCodeSlots 
* SHA1_RESULTLEN
; 
 284                 if (!cs_valid_range(base
, top
, lower_bound
, upper_bound
) || 
 288                 assert(page 
< nCodeSlots
); 
 290                 hash 
= base 
+ page 
* SHA1_RESULTLEN
; 
 293         if (!cs_valid_range(hash
, hash 
+ SHA1_RESULTLEN
, 
 294                             lower_bound
, upper_bound
)) { 
 302  * cs_validate_codedirectory 
 304  * Validate that pointers inside the code directory to make sure that 
 305  * all offsets and lengths are constrained within the buffer. 
 307  * Parameters:  cd                      Pointer to code directory buffer 
 308  *              length                  Length of buffer 
 311  *              EBADEXEC                Invalid code signature 
 315 cs_validate_codedirectory(const CS_CodeDirectory 
*cd
, size_t length
) 
 318         if (length 
< sizeof(*cd
)) 
 320         if (ntohl(cd
->magic
) != CSMAGIC_CODEDIRECTORY
) 
 322         if (cd
->hashSize 
!= SHA1_RESULTLEN
) 
 324         if (cd
->pageSize 
!= PAGE_SHIFT_4K
) 
 326         if (cd
->hashType 
!= CS_HASHTYPE_SHA1
) 
 329         if (length 
< ntohl(cd
->hashOffset
)) 
 332         /* check that nSpecialSlots fits in the buffer in front of hashOffset */ 
 333         if (ntohl(cd
->hashOffset
) / SHA1_RESULTLEN 
< ntohl(cd
->nSpecialSlots
)) 
 336         /* check that codeslots fits in the buffer */ 
 337         if ((length 
- ntohl(cd
->hashOffset
)) / SHA1_RESULTLEN 
<  ntohl(cd
->nCodeSlots
)) 
 340         if (ntohl(cd
->version
) >= CS_SUPPORTSSCATTER 
&& cd
->scatterOffset
) { 
 342                 if (length 
< ntohl(cd
->scatterOffset
)) 
 345                 SC_Scatter 
*scatter 
= (SC_Scatter 
*) 
 346                         (((uint8_t *)cd
) + ntohl(cd
->scatterOffset
)); 
 350                  * Check each scatter buffer, since we don't know the 
 351                  * length of the scatter buffer array, we have to 
 355                         /* check that the end of each scatter buffer in within the length */ 
 356                         if (((const uint8_t *)scatter
) + sizeof(scatter
[0]) > (const uint8_t *)cd 
+ length
) 
 358                         uint32_t scount 
= ntohl(scatter
->count
); 
 361                         if (nPages 
+ scount 
< nPages
) 
 366                         /* XXX check that basees doesn't overlap */ 
 367                         /* XXX check that targetOffset doesn't overlap */ 
 369 #if 0 /* rdar://12579439 */ 
 370                 if (nPages 
!= ntohl(cd
->nCodeSlots
)) 
 375         if (length 
< ntohl(cd
->identOffset
)) 
 378         /* identifier is NUL terminated string */ 
 379         if (cd
->identOffset
) { 
 380                 uint8_t *ptr 
= (uint8_t *)cd 
+ ntohl(cd
->identOffset
); 
 381                 if (memchr(ptr
, 0, length 
- ntohl(cd
->identOffset
)) == NULL
) 
 385         /* team identifier is NULL terminated string */ 
 386         if (ntohl(cd
->version
) >= CS_SUPPORTSTEAMID 
&& ntohl(cd
->teamOffset
)) { 
 387                 if (length 
< ntohl(cd
->teamOffset
)) 
 390                 uint8_t *ptr 
= (uint8_t *)cd 
+ ntohl(cd
->teamOffset
); 
 391                 if (memchr(ptr
, 0, length 
- ntohl(cd
->teamOffset
)) == NULL
) 
 403 cs_validate_blob(const CS_GenericBlob 
*blob
, size_t length
) 
 405         if (length 
< sizeof(CS_GenericBlob
) || length 
< ntohl(blob
->length
)) 
 413  * Validate that superblob/embedded code directory to make sure that 
 414  * all internal pointers are valid. 
 416  * Will validate both a superblob csblob and a "raw" code directory. 
 419  * Parameters:  buffer                  Pointer to code signature 
 420  *              length                  Length of buffer 
 421  *              rcd                     returns pointer to code directory 
 424  *              EBADEXEC                Invalid code signature 
 428 cs_validate_csblob(const uint8_t *addr
, size_t length
, 
 429                    const CS_CodeDirectory 
**rcd
) 
 431         const CS_GenericBlob 
*blob 
= (const CS_GenericBlob 
*)(void *)addr
; 
 436         error 
= cs_validate_blob(blob
, length
); 
 440         length 
= ntohl(blob
->length
); 
 442         if (ntohl(blob
->magic
) == CSMAGIC_EMBEDDED_SIGNATURE
) { 
 443                 const CS_SuperBlob 
*sb 
= (const CS_SuperBlob 
*)blob
; 
 444                 uint32_t n
, count 
= ntohl(sb
->count
); 
 446                 if (length 
< sizeof(CS_SuperBlob
)) 
 449                 /* check that the array of BlobIndex fits in the rest of the data */ 
 450                 if ((length 
- sizeof(CS_SuperBlob
)) / sizeof(CS_BlobIndex
) < count
) 
 453                 /* now check each BlobIndex */ 
 454                 for (n 
= 0; n 
< count
; n
++) { 
 455                         const CS_BlobIndex 
*blobIndex 
= &sb
->index
[n
]; 
 456                         if (length 
< ntohl(blobIndex
->offset
)) 
 459                         const CS_GenericBlob 
*subBlob 
= 
 460                                 (const CS_GenericBlob 
*)(void *)(addr 
+ ntohl(blobIndex
->offset
)); 
 462                         size_t subLength 
= length 
- ntohl(blobIndex
->offset
); 
 464                         if ((error 
= cs_validate_blob(subBlob
, subLength
)) != 0) 
 466                         subLength 
= ntohl(subBlob
->length
); 
 468                         /* extra validation for CDs, that is also returned */ 
 469                         if (ntohl(blobIndex
->type
) == CSSLOT_CODEDIRECTORY
) { 
 470                                 const CS_CodeDirectory 
*cd 
= (const CS_CodeDirectory 
*)subBlob
; 
 471                                 if ((error 
= cs_validate_codedirectory(cd
, subLength
)) != 0) 
 477         } else if (ntohl(blob
->magic
) == CSMAGIC_CODEDIRECTORY
) { 
 479                 if ((error 
= cs_validate_codedirectory((const CS_CodeDirectory 
*)(void *)addr
, length
)) != 0) 
 481                 *rcd 
= (const CS_CodeDirectory 
*)blob
; 
 495  * Find an blob from the superblob/code directory. The blob must have 
 496  * been been validated by cs_validate_csblob() before calling 
 497  * this. Use cs_find_blob() instead. 
 499  * Will also find a "raw" code directory if its stored as well as 
 500  * searching the superblob. 
 502  * Parameters:  buffer                  Pointer to code signature 
 503  *              length                  Length of buffer 
 504  *              type                    type of blob to find 
 505  *              magic                   the magic number for that blob 
 507  * Returns:     pointer                 Success 
 508  *              NULL                    Buffer not found 
 511 static const CS_GenericBlob 
* 
 512 cs_find_blob_bytes(const uint8_t *addr
, size_t length
, uint32_t type
, uint32_t magic
) 
 514         const CS_GenericBlob 
*blob 
= (const CS_GenericBlob 
*)(void *)addr
; 
 516         if (ntohl(blob
->magic
) == CSMAGIC_EMBEDDED_SIGNATURE
) { 
 517                 const CS_SuperBlob 
*sb 
= (const CS_SuperBlob 
*)blob
; 
 518                 size_t n
, count 
= ntohl(sb
->count
); 
 520                 for (n 
= 0; n 
< count
; n
++) { 
 521                         if (ntohl(sb
->index
[n
].type
) != type
) 
 523                         uint32_t offset 
= ntohl(sb
->index
[n
].offset
); 
 524                         if (length 
- sizeof(const CS_GenericBlob
) < offset
) 
 526                         blob 
= (const CS_GenericBlob 
*)(void *)(addr 
+ offset
); 
 527                         if (ntohl(blob
->magic
) != magic
) 
 531         } else if (type 
== CSSLOT_CODEDIRECTORY
 
 532                    && ntohl(blob
->magic
) == CSMAGIC_CODEDIRECTORY
 
 533                    && magic 
== CSMAGIC_CODEDIRECTORY
) 
 539 const CS_GenericBlob 
* 
 540 cs_find_blob(struct cs_blob 
*csblob
, uint32_t type
, uint32_t magic
) 
 542         if ((csblob
->csb_flags 
& CS_VALID
) == 0) 
 544         return cs_find_blob_bytes((const uint8_t *)csblob
->csb_mem_kaddr
, csblob
->csb_mem_size
, type
, magic
); 
 547 static const uint8_t * 
 548 cs_find_special_slot(const CS_CodeDirectory 
*cd
, uint32_t slot
) 
 550         /* there is no zero special slot since that is the first code slot */ 
 551         if (ntohl(cd
->nSpecialSlots
) < slot 
|| slot 
== 0) 
 554         return ((const uint8_t *)cd 
+ ntohl(cd
->hashOffset
) - (SHA1_RESULTLEN 
* slot
)); 
 559  * End of routines to navigate code signing data structures in the kernel. 
 564  * Routines to navigate entitlements in the kernel. 
 567 /* Retrieve the entitlements blob for a process. 
 569  *   EINVAL     no text vnode associated with the process 
 570  *   EBADEXEC   invalid code signing data 
 571  *   0          no error occurred 
 573  * On success, out_start and out_length will point to the 
 574  * entitlements blob if found; or will be set to NULL/zero 
 575  * if there were no entitlements. 
 578 static uint8_t sha1_zero
[SHA1_RESULTLEN
] = { 0 }; 
 581 cs_entitlements_blob_get(proc_t p
, void **out_start
, size_t *out_length
) 
 583         uint8_t computed_hash
[SHA1_RESULTLEN
]; 
 584         const CS_GenericBlob 
*entitlements
; 
 585         const CS_CodeDirectory 
*code_dir
; 
 586         struct cs_blob 
*csblob
; 
 587         const uint8_t *embedded_hash
; 
 593         if (NULL 
== p
->p_textvp
) 
 596         if ((csblob 
= ubc_cs_blob_get(p
->p_textvp
, -1, p
->p_textoff
)) == NULL
) 
 599         if ((code_dir 
= (const CS_CodeDirectory 
*)cs_find_blob(csblob
, CSSLOT_CODEDIRECTORY
, CSMAGIC_CODEDIRECTORY
)) == NULL
) 
 602         entitlements 
= cs_find_blob(csblob
, CSSLOT_ENTITLEMENTS
, CSMAGIC_EMBEDDED_ENTITLEMENTS
); 
 603         embedded_hash 
= cs_find_special_slot(code_dir
, CSSLOT_ENTITLEMENTS
); 
 605         if (embedded_hash 
== NULL
) { 
 609         } else if (entitlements 
== NULL 
&& memcmp(embedded_hash
, sha1_zero
, SHA1_RESULTLEN
) != 0) { 
 614         SHA1Update(&context
, entitlements
, ntohl(entitlements
->length
)); 
 615         SHA1Final(computed_hash
, &context
); 
 616         if (memcmp(computed_hash
, embedded_hash
, SHA1_RESULTLEN
) != 0) 
 619         *out_start 
= (void *)entitlements
; 
 620         *out_length 
= ntohl(entitlements
->length
); 
 625 /* Retrieve the codesign identity for a process. 
 627  *   NULL       an error occured 
 628  *   string     the cs_identity 
 632 cs_identity_get(proc_t p
) 
 634         const CS_CodeDirectory 
*code_dir
; 
 635         struct cs_blob 
*csblob
; 
 637         if (NULL 
== p
->p_textvp
) 
 640         if ((csblob 
= ubc_cs_blob_get(p
->p_textvp
, -1, p
->p_textoff
)) == NULL
) 
 643         if ((code_dir 
= (const CS_CodeDirectory 
*)cs_find_blob(csblob
, CSSLOT_CODEDIRECTORY
, CSMAGIC_CODEDIRECTORY
)) == NULL
) 
 646         if (code_dir
->identOffset 
== 0) 
 649         return ((const char *)code_dir
) + ntohl(code_dir
->identOffset
); 
 654 /* Retrieve the codesign blob for a process. 
 656  *   EINVAL     no text vnode associated with the process 
 657  *   0          no error occurred 
 659  * On success, out_start and out_length will point to the 
 660  * cms blob if found; or will be set to NULL/zero 
 661  * if there were no blob. 
 665 cs_blob_get(proc_t p
, void **out_start
, size_t *out_length
) 
 667         struct cs_blob 
*csblob
; 
 672         if (NULL 
== p
->p_textvp
) 
 675         if ((csblob 
= ubc_cs_blob_get(p
->p_textvp
, -1, p
->p_textoff
)) == NULL
) 
 678         *out_start 
= (void *)csblob
->csb_mem_kaddr
; 
 679         *out_length 
= csblob
->csb_mem_size
; 
 685 cs_get_cdhash(struct proc 
*p
) 
 687         struct cs_blob 
*csblob
; 
 689         if (NULL 
== p
->p_textvp
) 
 692         if ((csblob 
= ubc_cs_blob_get(p
->p_textvp
, -1, p
->p_textoff
)) == NULL
) 
 695         return csblob
->csb_sha1
; 
 700  * End of routines to navigate entitlements in the kernel. 
 708  * Initialization of the zone for Unified Buffer Cache. 
 715  *              ubc_info_zone(global)   initialized for subsequent allocations 
 717 __private_extern__ 
void 
 722         i 
= (vm_size_t
) sizeof (struct ubc_info
); 
 724         ubc_info_zone 
= zinit (i
, 10000*i
, 8192, "ubc_info zone"); 
 726         zone_change(ubc_info_zone
, Z_NOENCRYPT
, TRUE
); 
 733  * Allocate and attach an empty ubc_info structure to a vnode 
 735  * Parameters:  vp                      Pointer to the vnode 
 738  *      vnode_size:ENOMEM               Not enough space 
 739  *      vnode_size:???                  Other error from vnode_getattr 
 743 ubc_info_init(struct vnode 
*vp
) 
 745         return(ubc_info_init_internal(vp
, 0, 0)); 
 750  * ubc_info_init_withsize 
 752  * Allocate and attach a sized ubc_info structure to a vnode 
 754  * Parameters:  vp                      Pointer to the vnode 
 755  *              filesize                The size of the file 
 758  *      vnode_size:ENOMEM               Not enough space 
 759  *      vnode_size:???                  Other error from vnode_getattr 
 762 ubc_info_init_withsize(struct vnode 
*vp
, off_t filesize
) 
 764         return(ubc_info_init_internal(vp
, 1, filesize
)); 
 769  * ubc_info_init_internal 
 771  * Allocate and attach a ubc_info structure to a vnode 
 773  * Parameters:  vp                      Pointer to the vnode 
 774  *              withfsize{0,1}          Zero if the size should be obtained 
 775  *                                      from the vnode; otherwise, use filesize 
 776  *              filesize                The size of the file, if withfsize == 1 
 779  *      vnode_size:ENOMEM               Not enough space 
 780  *      vnode_size:???                  Other error from vnode_getattr 
 782  * Notes:       We call a blocking zalloc(), and the zone was created as an 
 783  *              expandable and collectable zone, so if no memory is available, 
 784  *              it is possible for zalloc() to block indefinitely.  zalloc() 
 785  *              may also panic if the zone of zones is exhausted, since it's 
 788  *              We unconditionally call vnode_pager_setup(), even if this is 
 789  *              a reuse of a ubc_info; in that case, we should probably assert 
 790  *              that it does not already have a pager association, but do not. 
 792  *              Since memory_object_create_named() can only fail from receiving 
 793  *              an invalid pager argument, the explicit check and panic is 
 794  *              merely precautionary. 
 797 ubc_info_init_internal(vnode_t vp
, int withfsize
, off_t filesize
) 
 799         register struct ubc_info        
*uip
; 
 803         memory_object_control_t control
; 
 808          * If there is not already a ubc_info attached to the vnode, we 
 809          * attach one; otherwise, we will reuse the one that's there. 
 811         if (uip 
== UBC_INFO_NULL
) { 
 813                 uip 
= (struct ubc_info 
*) zalloc(ubc_info_zone
); 
 814                 bzero((char *)uip
, sizeof(struct ubc_info
)); 
 817                 uip
->ui_flags 
= UI_INITED
; 
 818                 uip
->ui_ucred 
= NOCRED
; 
 820         assert(uip
->ui_flags 
!= UI_NONE
); 
 821         assert(uip
->ui_vnode 
== vp
); 
 823         /* now set this ubc_info in the vnode */ 
 827          * Allocate a pager object for this vnode 
 829          * XXX The value of the pager parameter is currently ignored. 
 830          * XXX Presumably, this API changed to avoid the race between 
 831          * XXX setting the pager and the UI_HASPAGER flag. 
 833         pager 
= (void *)vnode_pager_setup(vp
, uip
->ui_pager
); 
 837          * Explicitly set the pager into the ubc_info, after setting the 
 840         SET(uip
->ui_flags
, UI_HASPAGER
); 
 841         uip
->ui_pager 
= pager
; 
 844          * Note: We can not use VNOP_GETATTR() to get accurate 
 845          * value of ui_size because this may be an NFS vnode, and 
 846          * nfs_getattr() can call vinvalbuf(); if this happens, 
 847          * ubc_info is not set up to deal with that event. 
 852          * create a vnode - vm_object association 
 853          * memory_object_create_named() creates a "named" reference on the 
 854          * memory object we hold this reference as long as the vnode is 
 855          * "alive."  Since memory_object_create_named() took its own reference 
 856          * on the vnode pager we passed it, we can drop the reference 
 857          * vnode_pager_setup() returned here. 
 859         kret 
= memory_object_create_named(pager
, 
 860                 (memory_object_size_t
)uip
->ui_size
, &control
); 
 861         vnode_pager_deallocate(pager
);  
 862         if (kret 
!= KERN_SUCCESS
) 
 863                 panic("ubc_info_init: memory_object_create_named returned %d", kret
); 
 866         uip
->ui_control 
= control
;      /* cache the value of the mo control */ 
 867         SET(uip
->ui_flags
, UI_HASOBJREF
);       /* with a named reference */ 
 869         if (withfsize 
== 0) { 
 870                 /* initialize the size */ 
 871                 error 
= vnode_size(vp
, &uip
->ui_size
, vfs_context_current()); 
 875                 uip
->ui_size 
= filesize
; 
 877         vp
->v_lflag 
|= VNAMED_UBC
;      /* vnode has a named ubc reference */ 
 886  * Free a ubc_info structure 
 888  * Parameters:  uip                     A pointer to the ubc_info to free 
 892  * Notes:       If there is a credential that has subsequently been associated 
 893  *              with the ubc_info via a call to ubc_setcred(), the reference 
 894  *              to the credential is dropped. 
 896  *              It's actually impossible for a ubc_info.ui_control to take the 
 897  *              value MEMORY_OBJECT_CONTROL_NULL. 
 900 ubc_info_free(struct ubc_info 
*uip
) 
 902         if (IS_VALID_CRED(uip
->ui_ucred
)) { 
 903                 kauth_cred_unref(&uip
->ui_ucred
); 
 906         if (uip
->ui_control 
!= MEMORY_OBJECT_CONTROL_NULL
) 
 907                 memory_object_control_deallocate(uip
->ui_control
); 
 909         cluster_release(uip
); 
 912         zfree(ubc_info_zone
, uip
); 
 918 ubc_info_deallocate(struct ubc_info 
*uip
) 
 924  * This should be public but currently it is only used below so we 
 925  * defer making that change. 
 927 static errno_t 
mach_to_bsd_errno(kern_return_t mach_err
) 
 933         case KERN_INVALID_ADDRESS
: 
 934         case KERN_INVALID_ARGUMENT
: 
 935         case KERN_NOT_IN_SET
: 
 936         case KERN_INVALID_NAME
: 
 937         case KERN_INVALID_TASK
: 
 938         case KERN_INVALID_RIGHT
: 
 939         case KERN_INVALID_VALUE
: 
 940         case KERN_INVALID_CAPABILITY
: 
 941         case KERN_INVALID_HOST
: 
 942         case KERN_MEMORY_PRESENT
: 
 943         case KERN_INVALID_PROCESSOR_SET
: 
 944         case KERN_INVALID_POLICY
: 
 945         case KERN_ALREADY_WAITING
: 
 946         case KERN_DEFAULT_SET
: 
 947         case KERN_EXCEPTION_PROTECTED
: 
 948         case KERN_INVALID_LEDGER
: 
 949         case KERN_INVALID_MEMORY_CONTROL
: 
 950         case KERN_INVALID_SECURITY
: 
 951         case KERN_NOT_DEPRESSED
: 
 952         case KERN_LOCK_OWNED
: 
 953         case KERN_LOCK_OWNED_SELF
: 
 956         case KERN_PROTECTION_FAILURE
: 
 957         case KERN_NOT_RECEIVER
: 
 959         case KERN_POLICY_STATIC
: 
 963         case KERN_RESOURCE_SHORTAGE
: 
 964         case KERN_UREFS_OVERFLOW
: 
 965         case KERN_INVALID_OBJECT
: 
 971         case KERN_MEMORY_FAILURE
: 
 972         case KERN_POLICY_LIMIT
: 
 973         case KERN_CODESIGN_ERROR
: 
 976         case KERN_MEMORY_ERROR
: 
 979         case KERN_ALREADY_IN_SET
: 
 980         case KERN_NAME_EXISTS
: 
 981         case KERN_RIGHT_EXISTS
: 
 987         case KERN_TERMINATED
: 
 988         case KERN_LOCK_SET_DESTROYED
: 
 989         case KERN_LOCK_UNSTABLE
: 
 990         case KERN_SEMAPHORE_DESTROYED
: 
 993         case KERN_RPC_SERVER_TERMINATED
: 
 996         case KERN_NOT_SUPPORTED
: 
1002         case KERN_NOT_WAITING
: 
1005         case KERN_OPERATION_TIMED_OUT
: 
1016  * Tell the VM that the the size of the file represented by the vnode has 
1019  * Parameters:  vp         The vp whose backing file size is 
1021  *                              nsize  The new size of the backing file 
1024  * Returns:     EINVAL for new size < 0 
1025  *                      ENOENT if no UBC info exists 
1026  *          EAGAIN if UBC_SETSIZE_NO_FS_REENTRY option is set and new_size < old size 
1027  *          Other errors (mapped to errno_t) returned by VM functions 
1029  * Notes:   This function will indicate success if the new size is the 
1030  *                  same or larger than the old size (in this case, the 
1031  *                  remainder of the file will require modification or use of 
1032  *                  an existing upl to access successfully). 
1034  *                  This function will fail if the new file size is smaller, 
1035  *                  and the memory region being invalidated was unable to 
1036  *                  actually be invalidated and/or the last page could not be 
1037  *                  flushed, if the new size is not aligned to a page 
1038  *                  boundary.  This is usually indicative of an I/O error. 
1040 errno_t 
ubc_setsize_ex(struct vnode 
*vp
, off_t nsize
, ubc_setsize_opts_t opts
) 
1042         off_t osize
;    /* ui_size before change */ 
1043         off_t lastpg
, olastpgend
, lastoff
; 
1044         struct ubc_info 
*uip
; 
1045         memory_object_control_t control
; 
1046         kern_return_t kret 
= KERN_SUCCESS
; 
1048         if (nsize 
< (off_t
)0) 
1051         if (!UBCINFOEXISTS(vp
)) 
1054         uip 
= vp
->v_ubcinfo
; 
1055         osize 
= uip
->ui_size
; 
1057         if (ISSET(opts
, UBC_SETSIZE_NO_FS_REENTRY
) && nsize 
< osize
) 
1061          * Update the size before flushing the VM 
1063         uip
->ui_size 
= nsize
; 
1065         if (nsize 
>= osize
) {   /* Nothing more to do */ 
1066                 if (nsize 
> osize
) { 
1067                         lock_vnode_and_post(vp
, NOTE_EXTEND
); 
1074          * When the file shrinks, invalidate the pages beyond the 
1075          * new size. Also get rid of garbage beyond nsize on the 
1076          * last page. The ui_size already has the nsize, so any 
1077          * subsequent page-in will zero-fill the tail properly 
1079         lastpg 
= trunc_page_64(nsize
); 
1080         olastpgend 
= round_page_64(osize
); 
1081         control 
= uip
->ui_control
; 
1083         lastoff 
= (nsize 
& PAGE_MASK_64
); 
1087                 upl_page_info_t 
*pl
; 
1090                  * new EOF ends up in the middle of a page 
1091                  * zero the tail of this page if it's currently 
1092                  * present in the cache 
1094                 kret 
= ubc_create_upl(vp
, lastpg
, PAGE_SIZE
, &upl
, &pl
, UPL_SET_LITE
); 
1096                 if (kret 
!= KERN_SUCCESS
) 
1097                         panic("ubc_setsize: ubc_create_upl (error = %d)\n", kret
); 
1099                 if (upl_valid_page(pl
, 0)) 
1100                         cluster_zero(upl
, (uint32_t)lastoff
, PAGE_SIZE 
- (uint32_t)lastoff
, NULL
); 
1102                 ubc_upl_abort_range(upl
, 0, PAGE_SIZE
, UPL_ABORT_FREE_ON_EMPTY
); 
1104                 lastpg 
+= PAGE_SIZE_64
; 
1106         if (olastpgend 
> lastpg
) { 
1110                         flags 
= MEMORY_OBJECT_DATA_FLUSH_ALL
; 
1112                         flags 
= MEMORY_OBJECT_DATA_FLUSH
; 
1114                  * invalidate the pages beyond the new EOF page 
1117                 kret 
= memory_object_lock_request(control
, 
1118                                                                                   (memory_object_offset_t
)lastpg
, 
1119                                                                                   (memory_object_size_t
)(olastpgend 
- lastpg
), NULL
, NULL
, 
1120                                                                                   MEMORY_OBJECT_RETURN_NONE
, flags
, VM_PROT_NO_CHANGE
); 
1121                 if (kret 
!= KERN_SUCCESS
) 
1122                         printf("ubc_setsize: invalidate failed (error = %d)\n", kret
); 
1124         return mach_to_bsd_errno(kret
); 
1127 // Returns true for success 
1128 int ubc_setsize(vnode_t vp
, off_t nsize
) 
1130         return ubc_setsize_ex(vp
, nsize
, 0) == 0; 
1136  * Get the size of the file assocated with the specified vnode 
1138  * Parameters:  vp                      The vnode whose size is of interest 
1140  * Returns:     0                       There is no ubc_info associated with 
1141  *                                      this vnode, or the size is zero 
1142  *              !0                      The size of the file 
1144  * Notes:       Using this routine, it is not possible for a caller to 
1145  *              successfully distinguish between a vnode associate with a zero 
1146  *              length file, and a vnode with no associated ubc_info.  The 
1147  *              caller therefore needs to not care, or needs to ensure that 
1148  *              they have previously successfully called ubc_info_init() or 
1149  *              ubc_info_init_withsize(). 
1152 ubc_getsize(struct vnode 
*vp
) 
1154         /* people depend on the side effect of this working this way 
1155          * as they call this for directory  
1157         if (!UBCINFOEXISTS(vp
)) 
1159         return (vp
->v_ubcinfo
->ui_size
); 
1166  * Call ubc_msync(vp, 0, EOF, NULL, UBC_PUSHALL) on all the vnodes for this 
1169  * Parameters:  mp                      The mount point 
1171  * Returns:     0                       Success 
1173  * Notes:       There is no failure indication for this function. 
1175  *              This function is used in the unmount path; since it may block 
1176  *              I/O indefinitely, it should not be used in the forced unmount 
1177  *              path, since a device unavailability could also block that 
1180  *              Because there is no device ejection interlock on USB, FireWire, 
1181  *              or similar devices, it's possible that an ejection that begins 
1182  *              subsequent to the vnode_iterate() completing, either on one of 
1183  *              those devices, or a network mount for which the server quits 
1184  *              responding, etc., may cause the caller to block indefinitely. 
1186 __private_extern__ 
int 
1187 ubc_umount(struct mount 
*mp
) 
1189         vnode_iterate(mp
, 0, ubc_umcallback
, 0); 
1197  * Used by ubc_umount() as an internal implementation detail; see ubc_umount() 
1198  * and vnode_iterate() for details of implementation. 
1201 ubc_umcallback(vnode_t vp
, __unused 
void * args
) 
1204         if (UBCINFOEXISTS(vp
)) { 
1206                 (void) ubc_msync(vp
, (off_t
)0, ubc_getsize(vp
), NULL
, UBC_PUSHALL
); 
1208         return (VNODE_RETURNED
); 
1215  * Get the credentials currently active for the ubc_info associated with the 
1218  * Parameters:  vp                      The vnode whose ubc_info credentials 
1219  *                                      are to be retrieved 
1221  * Returns:     !NOCRED                 The credentials 
1222  *              NOCRED                  If there is no ubc_info for the vnode, 
1223  *                                      or if there is one, but it has not had 
1224  *                                      any credentials associated with it via 
1225  *                                      a call to ubc_setcred() 
1228 ubc_getcred(struct vnode 
*vp
) 
1230         if (UBCINFOEXISTS(vp
)) 
1231                 return (vp
->v_ubcinfo
->ui_ucred
); 
1240  * If they are not already set, set the credentials of the ubc_info structure 
1241  * associated with the vnode to those of the supplied thread; otherwise leave 
1244  * Parameters:  vp                      The vnode whose ubc_info creds are to 
1246  *              p                       The process whose credentials are to 
1247  *                                      be used, if not running on an assumed 
1249  *              thread                  The thread whose credentials are to 
1252  * Returns:     1                       This vnode has no associated ubc_info 
1255  * Notes:       This function takes a proc parameter to account for bootstrap 
1256  *              issues where a task or thread may call this routine, either 
1257  *              before credentials have been initialized by bsd_init(), or if 
1258  *              there is no BSD info asscoiate with a mach thread yet.  This 
1259  *              is known to happen in both the initial swap and memory mapping 
1262  *              This function is generally used only in the following cases: 
1264  *              o       a memory mapped file via the mmap() system call 
1265  *              o       a swap store backing file 
1266  *              o       subsequent to a successful write via vn_write() 
1268  *              The information is then used by the NFS client in order to 
1269  *              cons up a wire message in either the page-in or page-out path. 
1271  *              There are two potential problems with the use of this API: 
1273  *              o       Because the write path only set it on a successful 
1274  *                      write, there is a race window between setting the 
1275  *                      credential and its use to evict the pages to the 
1276  *                      remote file server 
1278  *              o       Because a page-in may occur prior to a write, the 
1279  *                      credential may not be set at this time, if the page-in 
1280  *                      is not the result of a mapping established via mmap(). 
1282  *              In both these cases, this will be triggered from the paging 
1283  *              path, which will instead use the credential of the current 
1284  *              process, which in this case is either the dynamic_pager or 
1285  *              the kernel task, both of which utilize "root" credentials. 
1287  *              This may potentially permit operations to occur which should 
1288  *              be denied, or it may cause to be denied operations which 
1289  *              should be permitted, depending on the configuration of the NFS 
1293 ubc_setthreadcred(struct vnode 
*vp
, proc_t p
, thread_t thread
) 
1295         struct ubc_info 
*uip
; 
1297         struct uthread  
*uthread 
= get_bsdthread_info(thread
); 
1299         if (!UBCINFOEXISTS(vp
)) 
1304         uip 
= vp
->v_ubcinfo
; 
1305         credp 
= uip
->ui_ucred
; 
1307         if (!IS_VALID_CRED(credp
)) { 
1308                 /* use per-thread cred, if assumed identity, else proc cred */ 
1309                 if (uthread 
== NULL 
|| (uthread
->uu_flag 
& UT_SETUID
) == 0) { 
1310                         uip
->ui_ucred 
= kauth_cred_proc_ref(p
); 
1312                         uip
->ui_ucred 
= uthread
->uu_ucred
; 
1313                         kauth_cred_ref(uip
->ui_ucred
); 
1325  * If they are not already set, set the credentials of the ubc_info structure 
1326  * associated with the vnode to those of the process; otherwise leave them 
1329  * Parameters:  vp                      The vnode whose ubc_info creds are to 
1331  *              p                       The process whose credentials are to 
1334  * Returns:     0                       This vnode has no associated ubc_info 
1337  * Notes:       The return values for this function are inverted from nearly 
1338  *              all other uses in the kernel. 
1340  *              See also ubc_setthreadcred(), above. 
1342  *              This function is considered deprecated, and generally should 
1343  *              not be used, as it is incompatible with per-thread credentials; 
1344  *              it exists for legacy KPI reasons. 
1346  * DEPRECATION: ubc_setcred() is being deprecated. Please use  
1347  *              ubc_setthreadcred() instead. 
1350 ubc_setcred(struct vnode 
*vp
, proc_t p
) 
1352         struct ubc_info 
*uip
; 
1355         /* If there is no ubc_info, deny the operation */ 
1356         if ( !UBCINFOEXISTS(vp
)) 
1360          * Check to see if there is already a credential reference in the 
1361          * ubc_info; if there is not, take one on the supplied credential. 
1364         uip 
= vp
->v_ubcinfo
; 
1365         credp 
= uip
->ui_ucred
; 
1366         if (!IS_VALID_CRED(credp
)) { 
1367                 uip
->ui_ucred 
= kauth_cred_proc_ref(p
); 
1377  * Get the pager associated with the ubc_info associated with the vnode. 
1379  * Parameters:  vp                      The vnode to obtain the pager from 
1381  * Returns:     !VNODE_PAGER_NULL       The memory_object_t for the pager 
1382  *              VNODE_PAGER_NULL        There is no ubc_info for this vnode 
1384  * Notes:       For each vnode that has a ubc_info associated with it, that 
1385  *              ubc_info SHALL have a pager associated with it, so in the 
1386  *              normal case, it's impossible to return VNODE_PAGER_NULL for 
1387  *              a vnode with an associated ubc_info. 
1389 __private_extern__ memory_object_t
 
1390 ubc_getpager(struct vnode 
*vp
) 
1392         if (UBCINFOEXISTS(vp
)) 
1393                 return (vp
->v_ubcinfo
->ui_pager
); 
1402  * Get the memory object control associated with the ubc_info associated with 
1405  * Parameters:  vp                      The vnode to obtain the memory object 
1409  * Returns:     !MEMORY_OBJECT_CONTROL_NULL 
1410  *              MEMORY_OBJECT_CONTROL_NULL 
1412  * Notes:       Historically, if the flags were not "do not reactivate", this 
1413  *              function would look up the memory object using the pager if 
1414  *              it did not exist (this could be the case if the vnode had 
1415  *              been previously reactivated).  The flags would also permit a 
1416  *              hold to be requested, which would have created an object 
1417  *              reference, if one had not already existed.  This usage is 
1418  *              deprecated, as it would permit a race between finding and 
1419  *              taking the reference vs. a single reference being dropped in 
1422 memory_object_control_t
 
1423 ubc_getobject(struct vnode 
*vp
, __unused 
int flags
) 
1425         if (UBCINFOEXISTS(vp
)) 
1426                 return((vp
->v_ubcinfo
->ui_control
)); 
1428         return (MEMORY_OBJECT_CONTROL_NULL
); 
1432 ubc_strict_uncached_IO(struct vnode 
*vp
) 
1434         boolean_t result 
= FALSE
; 
1436         if (UBCINFOEXISTS(vp
)) { 
1437                 result 
= memory_object_is_slid(vp
->v_ubcinfo
->ui_control
); 
1445  * Convert a given block number to a memory backing object (file) offset for a 
1448  * Parameters:  vp                      The vnode in which the block is located 
1449  *              blkno                   The block number to convert 
1451  * Returns:     !-1                     The offset into the backing object 
1452  *              -1                      There is no ubc_info associated with 
1454  *              -1                      An error occurred in the underlying VFS 
1455  *                                      while translating the block to an 
1456  *                                      offset; the most likely cause is that 
1457  *                                      the caller specified a block past the 
1458  *                                      end of the file, but this could also be 
1459  *                                      any other error from VNOP_BLKTOOFF(). 
1461  * Note:        Representing the error in band loses some information, but does 
1462  *              not occlude a valid offset, since an off_t of -1 is normally 
1463  *              used to represent EOF.  If we had a more reliable constant in 
1464  *              our header files for it (i.e. explicitly cast to an off_t), we 
1465  *              would use it here instead. 
1468 ubc_blktooff(vnode_t vp
, daddr64_t blkno
) 
1470         off_t file_offset 
= -1; 
1473         if (UBCINFOEXISTS(vp
)) { 
1474                 error 
= VNOP_BLKTOOFF(vp
, blkno
, &file_offset
); 
1479         return (file_offset
); 
1486  * Convert a given offset in a memory backing object into a block number for a 
1489  * Parameters:  vp                      The vnode in which the offset is 
1491  *              offset                  The offset into the backing object 
1493  * Returns:     !-1                     The returned block number 
1494  *              -1                      There is no ubc_info associated with 
1496  *              -1                      An error occurred in the underlying VFS 
1497  *                                      while translating the block to an 
1498  *                                      offset; the most likely cause is that 
1499  *                                      the caller specified a block past the 
1500  *                                      end of the file, but this could also be 
1501  *                                      any other error from VNOP_OFFTOBLK(). 
1503  * Note:        Representing the error in band loses some information, but does 
1504  *              not occlude a valid block number, since block numbers exceed 
1505  *              the valid range for offsets, due to their relative sizes.  If 
1506  *              we had a more reliable constant than -1 in our header files 
1507  *              for it (i.e. explicitly cast to an daddr64_t), we would use it 
1511 ubc_offtoblk(vnode_t vp
, off_t offset
) 
1513         daddr64_t blkno 
= -1; 
1516         if (UBCINFOEXISTS(vp
)) { 
1517                 error 
= VNOP_OFFTOBLK(vp
, offset
, &blkno
); 
1527  * ubc_pages_resident 
1529  * Determine whether or not a given vnode has pages resident via the memory 
1530  * object control associated with the ubc_info associated with the vnode 
1532  * Parameters:  vp                      The vnode we want to know about 
1538 ubc_pages_resident(vnode_t vp
) 
1541         boolean_t                       has_pages_resident
; 
1543         if (!UBCINFOEXISTS(vp
)) 
1547          * The following call may fail if an invalid ui_control is specified, 
1548          * or if there is no VM object associated with the control object.  In 
1549          * either case, reacting to it as if there were no pages resident will 
1550          * result in correct behavior. 
1552         kret 
= memory_object_pages_resident(vp
->v_ubcinfo
->ui_control
, &has_pages_resident
); 
1554         if (kret 
!= KERN_SUCCESS
) 
1557         if (has_pages_resident 
== TRUE
) 
1566  * Clean and/or invalidate a range in the memory object that backs this vnode 
1568  * Parameters:  vp                      The vnode whose associated ubc_info's 
1569  *                                      associated memory object is to have a 
1570  *                                      range invalidated within it 
1571  *              beg_off                 The start of the range, as an offset 
1572  *              end_off                 The end of the range, as an offset 
1573  *              resid_off               The address of an off_t supplied by the 
1574  *                                      caller; may be set to NULL to ignore 
1575  *              flags                   See ubc_msync_internal() 
1577  * Returns:     0                       Success 
1578  *              !0                      Failure; an errno is returned 
1581  *              *resid_off, modified    If non-NULL, the  contents are ALWAYS 
1582  *                                      modified; they are initialized to the 
1583  *                                      beg_off, and in case of an I/O error, 
1584  *                                      the difference between beg_off and the 
1585  *                                      current value will reflect what was 
1586  *                                      able to be written before the error 
1587  *                                      occurred.  If no error is returned, the 
1588  *                                      value of the resid_off is undefined; do 
1589  *                                      NOT use it in place of end_off if you 
1590  *                                      intend to increment from the end of the 
1591  *                                      last call and call iteratively. 
1593  * Notes:       see ubc_msync_internal() for more detailed information. 
1597 ubc_msync(vnode_t vp
, off_t beg_off
, off_t end_off
, off_t 
*resid_off
, int flags
) 
1603                 *resid_off 
= beg_off
; 
1605         retval 
= ubc_msync_internal(vp
, beg_off
, end_off
, resid_off
, flags
, &io_errno
); 
1607         if (retval 
== 0 && io_errno 
== 0) 
1614  * ubc_msync_internal 
1616  * Clean and/or invalidate a range in the memory object that backs this vnode 
1618  * Parameters:  vp                      The vnode whose associated ubc_info's 
1619  *                                      associated memory object is to have a 
1620  *                                      range invalidated within it 
1621  *              beg_off                 The start of the range, as an offset 
1622  *              end_off                 The end of the range, as an offset 
1623  *              resid_off               The address of an off_t supplied by the 
1624  *                                      caller; may be set to NULL to ignore 
1625  *              flags                   MUST contain at least one of the flags 
1626  *                                      UBC_INVALIDATE, UBC_PUSHDIRTY, or 
1627  *                                      UBC_PUSHALL; if UBC_PUSHDIRTY is used, 
1628  *                                      UBC_SYNC may also be specified to cause 
1629  *                                      this function to block until the 
1630  *                                      operation is complete.  The behavior 
1631  *                                      of UBC_SYNC is otherwise undefined. 
1632  *              io_errno                The address of an int to contain the 
1633  *                                      errno from a failed I/O operation, if 
1634  *                                      one occurs; may be set to NULL to 
1637  * Returns:     1                       Success 
1641  *              *resid_off, modified    The contents of this offset MAY be 
1642  *                                      modified; in case of an I/O error, the 
1643  *                                      difference between beg_off and the 
1644  *                                      current value will reflect what was 
1645  *                                      able to be written before the error 
1647  *              *io_errno, modified     The contents of this offset are set to 
1648  *                                      an errno, if an error occurs; if the 
1649  *                                      caller supplies an io_errno parameter, 
1650  *                                      they should be careful to initialize it 
1651  *                                      to 0 before calling this function to 
1652  *                                      enable them to distinguish an error 
1653  *                                      with a valid *resid_off from an invalid 
1654  *                                      one, and to avoid potentially falsely 
1655  *                                      reporting an error, depending on use. 
1657  * Notes:       If there is no ubc_info associated with the vnode supplied, 
1658  *              this function immediately returns success. 
1660  *              If the value of end_off is less than or equal to beg_off, this 
1661  *              function immediately returns success; that is, end_off is NOT 
1664  *              IMPORTANT: one of the flags UBC_INVALIDATE, UBC_PUSHDIRTY, or 
1665  *              UBC_PUSHALL MUST be specified; that is, it is NOT possible to 
1666  *              attempt to block on in-progress I/O by calling this function 
1667  *              with UBC_PUSHDIRTY, and then later call it with just UBC_SYNC 
1668  *              in order to block pending on the I/O already in progress. 
1670  *              The start offset is truncated to the page boundary and the 
1671  *              size is adjusted to include the last page in the range; that 
1672  *              is, end_off on exactly a page boundary will not change if it 
1673  *              is rounded, and the range of bytes written will be from the 
1674  *              truncate beg_off to the rounded (end_off - 1). 
1677 ubc_msync_internal(vnode_t vp
, off_t beg_off
, off_t end_off
, off_t 
*resid_off
, int flags
, int *io_errno
) 
1679         memory_object_size_t    tsize
; 
1681         int request_flags 
= 0; 
1682         int flush_flags   
= MEMORY_OBJECT_RETURN_NONE
; 
1684         if ( !UBCINFOEXISTS(vp
)) 
1686         if ((flags 
& (UBC_INVALIDATE 
| UBC_PUSHDIRTY 
| UBC_PUSHALL
)) == 0) 
1688         if (end_off 
<= beg_off
) 
1691         if (flags 
& UBC_INVALIDATE
) 
1693                  * discard the resident pages 
1695                 request_flags 
= (MEMORY_OBJECT_DATA_FLUSH 
| MEMORY_OBJECT_DATA_NO_CHANGE
); 
1697         if (flags 
& UBC_SYNC
) 
1699                  * wait for all the I/O to complete before returning 
1701                 request_flags 
|= MEMORY_OBJECT_IO_SYNC
; 
1703         if (flags 
& UBC_PUSHDIRTY
) 
1705                  * we only return the dirty pages in the range 
1707                 flush_flags 
= MEMORY_OBJECT_RETURN_DIRTY
; 
1709         if (flags 
& UBC_PUSHALL
) 
1711                  * then return all the interesting pages in the range (both 
1712                  * dirty and precious) to the pager 
1714                 flush_flags 
= MEMORY_OBJECT_RETURN_ALL
; 
1716         beg_off 
= trunc_page_64(beg_off
); 
1717         end_off 
= round_page_64(end_off
); 
1718         tsize   
= (memory_object_size_t
)end_off 
- beg_off
; 
1720         /* flush and/or invalidate pages in the range requested */ 
1721         kret 
= memory_object_lock_request(vp
->v_ubcinfo
->ui_control
, 
1723                                           (memory_object_offset_t 
*)resid_off
, 
1724                                           io_errno
, flush_flags
, request_flags
, 
1727         return ((kret 
== KERN_SUCCESS
) ? 1 : 0); 
1734  * Explicitly map a vnode that has an associate ubc_info, and add a reference 
1735  * to it for the ubc system, if there isn't one already, so it will not be 
1736  * recycled while it's in use, and set flags on the ubc_info to indicate that 
1739  * Parameters:  vp                      The vnode to map 
1740  *              flags                   The mapping flags for the vnode; this 
1741  *                                      will be a combination of one or more of 
1742  *                                      PROT_READ, PROT_WRITE, and PROT_EXEC 
1744  * Returns:     0                       Success 
1745  *              EPERM                   Permission was denied 
1747  * Notes:       An I/O reference on the vnode must already be held on entry 
1749  *              If there is no ubc_info associated with the vnode, this function 
1750  *              will return success. 
1752  *              If a permission error occurs, this function will return 
1753  *              failure; all other failures will cause this function to return 
1756  *              IMPORTANT: This is an internal use function, and its symbols 
1757  *              are not exported, hence its error checking is not very robust. 
1758  *              It is primarily used by: 
1760  *              o       mmap(), when mapping a file 
1761  *              o       When mapping a shared file (a shared library in the 
1762  *                      shared segment region) 
1763  *              o       When loading a program image during the exec process 
1765  *              ...all of these uses ignore the return code, and any fault that 
1766  *              results later because of a failure is handled in the fix-up path 
1767  *              of the fault handler.  The interface exists primarily as a 
1770  *              Given that third party implementation of the type of interfaces 
1771  *              that would use this function, such as alternative executable 
1772  *              formats, etc., are unsupported, this function is not exported 
1775  *              The extra reference is held until the VM system unmaps the 
1776  *              vnode from its own context to maintain a vnode reference in 
1777  *              cases like open()/mmap()/close(), which leave the backing 
1778  *              object referenced by a mapped memory region in a process 
1781 __private_extern__ 
int 
1782 ubc_map(vnode_t vp
, int flags
) 
1784         struct ubc_info 
*uip
; 
1787         int need_wakeup 
= 0; 
1789         if (UBCINFOEXISTS(vp
)) { 
1792                 uip 
= vp
->v_ubcinfo
; 
1794                 while (ISSET(uip
->ui_flags
, UI_MAPBUSY
)) { 
1795                         SET(uip
->ui_flags
, UI_MAPWAITING
); 
1796                         (void) msleep(&uip
->ui_flags
, &vp
->v_lock
, 
1797                                       PRIBIO
, "ubc_map", NULL
); 
1799                 SET(uip
->ui_flags
, UI_MAPBUSY
); 
1802                 error 
= VNOP_MMAP(vp
, flags
, vfs_context_current()); 
1807                 vnode_lock_spin(vp
); 
1810                         if ( !ISSET(uip
->ui_flags
, UI_ISMAPPED
)) 
1812                         SET(uip
->ui_flags
, (UI_WASMAPPED 
| UI_ISMAPPED
)); 
1813                         if (flags 
& PROT_WRITE
) { 
1814                                 SET(uip
->ui_flags
, UI_MAPPEDWRITE
); 
1817                 CLR(uip
->ui_flags
, UI_MAPBUSY
); 
1819                 if (ISSET(uip
->ui_flags
, UI_MAPWAITING
)) { 
1820                         CLR(uip
->ui_flags
, UI_MAPWAITING
); 
1826                         wakeup(&uip
->ui_flags
); 
1838  * Destroy the named memory object associated with the ubc_info control object 
1839  * associated with the designated vnode, if there is a ubc_info associated 
1840  * with the vnode, and a control object is associated with it 
1842  * Parameters:  vp                      The designated vnode 
1846  * Notes:       This function is called on vnode termination for all vnodes, 
1847  *              and must therefore not assume that there is a ubc_info that is 
1848  *              associated with the vnode, nor that there is a control object 
1849  *              associated with the ubc_info. 
1851  *              If all the conditions necessary are present, this function 
1852  *              calls memory_object_destory(), which will in turn end up 
1853  *              calling ubc_unmap() to release any vnode references that were 
1854  *              established via ubc_map(). 
1856  *              IMPORTANT: This is an internal use function that is used 
1857  *              exclusively by the internal use function vclean(). 
1859 __private_extern__ 
void 
1860 ubc_destroy_named(vnode_t vp
) 
1862         memory_object_control_t control
; 
1863         struct ubc_info 
*uip
; 
1866         if (UBCINFOEXISTS(vp
)) { 
1867                 uip 
= vp
->v_ubcinfo
; 
1869                 /* Terminate the memory object  */ 
1870                 control 
= ubc_getobject(vp
, UBC_HOLDOBJECT
); 
1871                 if (control 
!= MEMORY_OBJECT_CONTROL_NULL
) { 
1872                         kret 
= memory_object_destroy(control
, 0); 
1873                         if (kret 
!= KERN_SUCCESS
) 
1874                                 panic("ubc_destroy_named: memory_object_destroy failed"); 
1883  * Determine whether or not a vnode is currently in use by ubc at a level in 
1884  * excess of the requested busycount 
1886  * Parameters:  vp                      The vnode to check 
1887  *              busycount               The threshold busy count, used to bias 
1888  *                                      the count usually already held by the 
1889  *                                      caller to avoid races 
1891  * Returns:     1                       The vnode is in use over the threshold 
1892  *              0                       The vnode is not in use over the 
1895  * Notes:       Because the vnode is only held locked while actually asking 
1896  *              the use count, this function only represents a snapshot of the 
1897  *              current state of the vnode.  If more accurate information is 
1898  *              required, an additional busycount should be held by the caller 
1899  *              and a non-zero busycount used. 
1901  *              If there is no ubc_info associated with the vnode, this 
1902  *              function will report that the vnode is not in use by ubc. 
1905 ubc_isinuse(struct vnode 
*vp
, int busycount
) 
1907         if ( !UBCINFOEXISTS(vp
)) 
1909         return(ubc_isinuse_locked(vp
, busycount
, 0)); 
1914  * ubc_isinuse_locked 
1916  * Determine whether or not a vnode is currently in use by ubc at a level in 
1917  * excess of the requested busycount 
1919  * Parameters:  vp                      The vnode to check 
1920  *              busycount               The threshold busy count, used to bias 
1921  *                                      the count usually already held by the 
1922  *                                      caller to avoid races 
1923  *              locked                  True if the vnode is already locked by 
1926  * Returns:     1                       The vnode is in use over the threshold 
1927  *              0                       The vnode is not in use over the 
1930  * Notes:       If the vnode is not locked on entry, it is locked while 
1931  *              actually asking the use count.  If this is the case, this 
1932  *              function only represents a snapshot of the current state of 
1933  *              the vnode.  If more accurate information is required, the 
1934  *              vnode lock should be held by the caller, otherwise an 
1935  *              additional busycount should be held by the caller and a 
1936  *              non-zero busycount used. 
1938  *              If there is no ubc_info associated with the vnode, this 
1939  *              function will report that the vnode is not in use by ubc. 
1942 ubc_isinuse_locked(struct vnode 
*vp
, int busycount
, int locked
) 
1948                 vnode_lock_spin(vp
); 
1950         if ((vp
->v_usecount 
- vp
->v_kusecount
) > busycount
) 
1962  * Reverse the effects of a ubc_map() call for a given vnode 
1964  * Parameters:  vp                      vnode to unmap from ubc 
1968  * Notes:       This is an internal use function used by vnode_pager_unmap(). 
1969  *              It will attempt to obtain a reference on the supplied vnode, 
1970  *              and if it can do so, and there is an associated ubc_info, and 
1971  *              the flags indicate that it was mapped via ubc_map(), then the 
1972  *              flag is cleared, the mapping removed, and the reference taken 
1973  *              by ubc_map() is released. 
1975  *              IMPORTANT: This MUST only be called by the VM 
1976  *              to prevent race conditions. 
1978 __private_extern__ 
void 
1979 ubc_unmap(struct vnode 
*vp
) 
1981         struct ubc_info 
*uip
; 
1983         int     need_wakeup 
= 0; 
1985         if (vnode_getwithref(vp
)) 
1988         if (UBCINFOEXISTS(vp
)) { 
1989                 bool want_fsevent 
= false; 
1992                 uip 
= vp
->v_ubcinfo
; 
1994                 while (ISSET(uip
->ui_flags
, UI_MAPBUSY
)) { 
1995                         SET(uip
->ui_flags
, UI_MAPWAITING
); 
1996                         (void) msleep(&uip
->ui_flags
, &vp
->v_lock
, 
1997                                       PRIBIO
, "ubc_unmap", NULL
); 
1999                 SET(uip
->ui_flags
, UI_MAPBUSY
); 
2001                 if (ISSET(uip
->ui_flags
, UI_ISMAPPED
)) { 
2002                         if (ISSET(uip
->ui_flags
, UI_MAPPEDWRITE
)) 
2003                                 want_fsevent 
= true; 
2008                          * We want to clear the mapped flags after we've called 
2009                          * VNOP_MNOMAP to avoid certain races and allow 
2010                          * VNOP_MNOMAP to call ubc_is_mapped_writable. 
2016                                 vfs_context_t ctx 
= vfs_context_current(); 
2018                         (void)VNOP_MNOMAP(vp
, ctx
); 
2022                                  * Why do we want an fsevent here?  Normally the 
2023                                  * content modified fsevent is posted when a file is 
2024                                  * closed and only if it's written to via conventional 
2025                                  * means.  It's perfectly legal to close a file and 
2026                                  * keep your mappings and we don't currently track 
2027                                  * whether it was written to via a mapping. 
2028                                  * Therefore, we need to post an fsevent here if the 
2029                                  * file was mapped writable.  This may result in false 
2030                                  * events, i.e. we post a notification when nothing 
2031                                  * has really changed. 
2033                                 if (want_fsevent 
&& need_fsevent(FSE_CONTENT_MODIFIED
, vp
)) { 
2034                                         add_fsevent(FSE_CONTENT_MODIFIED
, ctx
, 
2043                 vnode_lock_spin(vp
); 
2046                         CLR(uip
->ui_flags
, UI_ISMAPPED 
| UI_MAPPEDWRITE
); 
2048                 CLR(uip
->ui_flags
, UI_MAPBUSY
); 
2050                 if (ISSET(uip
->ui_flags
, UI_MAPWAITING
)) { 
2051                         CLR(uip
->ui_flags
, UI_MAPWAITING
); 
2057                         wakeup(&uip
->ui_flags
); 
2061          * the drop of the vnode ref will cleanup 
2070  * Manipulate individual page state for a vnode with an associated ubc_info 
2071  * with an associated memory object control. 
2073  * Parameters:  vp                      The vnode backing the page 
2074  *              f_offset                A file offset interior to the page 
2075  *              ops                     The operations to perform, as a bitmap 
2076  *                                      (see below for more information) 
2077  *              phys_entryp             The address of a ppnum_t; may be NULL 
2079  *              flagsp                  A pointer to an int to contain flags; 
2080  *                                      may be NULL to ignore 
2082  * Returns:     KERN_SUCCESS            Success 
2083  *              KERN_INVALID_ARGUMENT   If the memory object control has no VM 
2085  *              KERN_INVALID_OBJECT     If UPL_POP_PHYSICAL and the object is 
2086  *                                      not physically contiguous 
2087  *              KERN_INVALID_OBJECT     If !UPL_POP_PHYSICAL and the object is 
2088  *                                      physically contiguous 
2089  *              KERN_FAILURE            If the page cannot be looked up 
2092  *              *phys_entryp (modified) If phys_entryp is non-NULL and 
2094  *              *flagsp (modified)      If flagsp is non-NULL and there was 
2095  *                                      !UPL_POP_PHYSICAL and a KERN_SUCCESS 
2097  * Notes:       For object boundaries, it is considerably more efficient to 
2098  *              ensure that f_offset is in fact on a page boundary, as this 
2099  *              will avoid internal use of the hash table to identify the 
2100  *              page, and would therefore skip a number of early optimizations. 
2101  *              Since this is a page operation anyway, the caller should try 
2102  *              to pass only a page aligned offset because of this. 
2104  *              *flagsp may be modified even if this function fails.  If it is 
2105  *              modified, it will contain the condition of the page before the 
2106  *              requested operation was attempted; these will only include the 
2107  *              bitmap flags, and not the PL_POP_PHYSICAL, UPL_POP_DUMP, 
2108  *              UPL_POP_SET, or UPL_POP_CLR bits. 
2110  *              The flags field may contain a specific operation, such as 
2111  *              UPL_POP_PHYSICAL or UPL_POP_DUMP: 
2113  *              o       UPL_POP_PHYSICAL        Fail if not contiguous; if 
2114  *                                              *phys_entryp and successful, set 
2116  *              o       UPL_POP_DUMP            Dump the specified page 
2118  *              Otherwise, it is treated as a bitmap of one or more page 
2119  *              operations to perform on the final memory object; allowable 
2122  *              o       UPL_POP_DIRTY           The page is dirty 
2123  *              o       UPL_POP_PAGEOUT         The page is paged out 
2124  *              o       UPL_POP_PRECIOUS        The page is precious 
2125  *              o       UPL_POP_ABSENT          The page is absent 
2126  *              o       UPL_POP_BUSY            The page is busy 
2128  *              If the page status is only being queried and not modified, then 
2129  *              not other bits should be specified.  However, if it is being 
2130  *              modified, exactly ONE of the following bits should be set: 
2132  *              o       UPL_POP_SET             Set the current bitmap bits 
2133  *              o       UPL_POP_CLR             Clear the current bitmap bits 
2135  *              Thus to effect a combination of setting an clearing, it may be 
2136  *              necessary to call this function twice.  If this is done, the 
2137  *              set should be used before the clear, since clearing may trigger 
2138  *              a wakeup on the destination page, and if the page is backed by 
2139  *              an encrypted swap file, setting will trigger the decryption 
2140  *              needed before the wakeup occurs. 
2147         ppnum_t 
*phys_entryp
, 
2150         memory_object_control_t         control
; 
2152         control 
= ubc_getobject(vp
, UBC_FLAGS_NONE
); 
2153         if (control 
== MEMORY_OBJECT_CONTROL_NULL
) 
2154                 return KERN_INVALID_ARGUMENT
; 
2156         return (memory_object_page_op(control
, 
2157                                       (memory_object_offset_t
)f_offset
, 
2167  * Manipulate page state for a range of memory for a vnode with an associated 
2168  * ubc_info with an associated memory object control, when page level state is 
2169  * not required to be returned from the call (i.e. there are no phys_entryp or 
2170  * flagsp parameters to this call, and it takes a range which may contain 
2171  * multiple pages, rather than an offset interior to a single page). 
2173  * Parameters:  vp                      The vnode backing the page 
2174  *              f_offset_beg            A file offset interior to the start page 
2175  *              f_offset_end            A file offset interior to the end page 
2176  *              ops                     The operations to perform, as a bitmap 
2177  *                                      (see below for more information) 
2178  *              range                   The address of an int; may be NULL to 
2181  * Returns:     KERN_SUCCESS            Success 
2182  *              KERN_INVALID_ARGUMENT   If the memory object control has no VM 
2184  *              KERN_INVALID_OBJECT     If the object is physically contiguous 
2187  *              *range (modified)       If range is non-NULL, its contents will 
2188  *                                      be modified to contain the number of 
2189  *                                      bytes successfully operated upon. 
2191  * Notes:       IMPORTANT: This function cannot be used on a range that 
2192  *              consists of physically contiguous pages. 
2194  *              For object boundaries, it is considerably more efficient to 
2195  *              ensure that f_offset_beg and f_offset_end are in fact on page 
2196  *              boundaries, as this will avoid internal use of the hash table 
2197  *              to identify the page, and would therefore skip a number of 
2198  *              early optimizations.  Since this is an operation on a set of 
2199  *              pages anyway, the caller should try to pass only a page aligned 
2200  *              offsets because of this. 
2202  *              *range will be modified only if this function succeeds. 
2204  *              The flags field MUST contain a specific operation; allowable 
2207  *              o       UPL_ROP_ABSENT  Returns the extent of the range 
2208  *                                      presented which is absent, starting 
2209  *                                      with the start address presented 
2211  *              o       UPL_ROP_PRESENT Returns the extent of the range 
2212  *                                      presented which is present (resident), 
2213  *                                      starting with the start address 
2215  *              o       UPL_ROP_DUMP    Dump the pages which are found in the 
2216  *                                      target object for the target range. 
2218  *              IMPORTANT: For UPL_ROP_ABSENT and UPL_ROP_PRESENT; if there are 
2219  *              multiple regions in the range, only the first matching region 
2230         memory_object_control_t         control
; 
2232         control 
= ubc_getobject(vp
, UBC_FLAGS_NONE
); 
2233         if (control 
== MEMORY_OBJECT_CONTROL_NULL
) 
2234                 return KERN_INVALID_ARGUMENT
; 
2236         return (memory_object_range_op(control
, 
2237                                       (memory_object_offset_t
)f_offset_beg
, 
2238                                       (memory_object_offset_t
)f_offset_end
, 
2247  * Given a vnode, cause the population of a portion of the vm_object; based on 
2248  * the nature of the request, the pages returned may contain valid data, or 
2249  * they may be uninitialized. 
2251  * Parameters:  vp                      The vnode from which to create the upl 
2252  *              f_offset                The start offset into the backing store 
2253  *                                      represented by the vnode 
2254  *              bufsize                 The size of the upl to create 
2255  *              uplp                    Pointer to the upl_t to receive the 
2256  *                                      created upl; MUST NOT be NULL 
2257  *              plp                     Pointer to receive the internal page 
2258  *                                      list for the created upl; MAY be NULL 
2261  * Returns:     KERN_SUCCESS            The requested upl has been created 
2262  *              KERN_INVALID_ARGUMENT   The bufsize argument is not an even 
2263  *                                      multiple of the page size 
2264  *              KERN_INVALID_ARGUMENT   There is no ubc_info associated with 
2265  *                                      the vnode, or there is no memory object 
2266  *                                      control associated with the ubc_info  
2267  *      memory_object_upl_request:KERN_INVALID_VALUE 
2268  *                                      The supplied upl_flags argument is 
2272  *              *plp (modified)         If non-NULL, the value of *plp will be 
2273  *                                      modified to point to the internal page 
2274  *                                      list; this modification may occur even 
2275  *                                      if this function is unsuccessful, in 
2276  *                                      which case the contents may be invalid 
2278  * Note:        If successful, the returned *uplp MUST subsequently be freed 
2279  *              via a call to ubc_upl_commit(), ubc_upl_commit_range(), 
2280  *              ubc_upl_abort(), or ubc_upl_abort_range(). 
2288         upl_page_info_t 
**plp
, 
2291         memory_object_control_t         control
; 
2298         if (bufsize 
& 0xfff) 
2299                 return KERN_INVALID_ARGUMENT
; 
2301         if (bufsize 
> MAX_UPL_SIZE_BYTES
) 
2302                 return KERN_INVALID_ARGUMENT
; 
2304         if (uplflags 
& (UPL_UBC_MSYNC 
| UPL_UBC_PAGEOUT 
| UPL_UBC_PAGEIN
)) { 
2306                 if (uplflags 
& UPL_UBC_MSYNC
) { 
2307                         uplflags 
&= UPL_RET_ONLY_DIRTY
; 
2309                         uplflags 
|= UPL_COPYOUT_FROM 
| UPL_CLEAN_IN_PLACE 
| 
2310                                     UPL_SET_INTERNAL 
| UPL_SET_LITE
; 
2312                 } else if (uplflags 
& UPL_UBC_PAGEOUT
) { 
2313                         uplflags 
&= UPL_RET_ONLY_DIRTY
; 
2315                         if (uplflags 
& UPL_RET_ONLY_DIRTY
) 
2316                                 uplflags 
|= UPL_NOBLOCK
; 
2318                         uplflags 
|= UPL_FOR_PAGEOUT 
| UPL_CLEAN_IN_PLACE 
| 
2319                                     UPL_COPYOUT_FROM 
| UPL_SET_INTERNAL 
| UPL_SET_LITE
; 
2321                         uplflags 
|= UPL_RET_ONLY_ABSENT 
| 
2322                                     UPL_NO_SYNC 
| UPL_CLEAN_IN_PLACE 
| 
2323                                     UPL_SET_INTERNAL 
| UPL_SET_LITE
; 
2326                          * if the requested size == PAGE_SIZE, we don't want to set 
2327                          * the UPL_NOBLOCK since we may be trying to recover from a 
2328                          * previous partial pagein I/O that occurred because we were low 
2329                          * on memory and bailed early in order to honor the UPL_NOBLOCK... 
2330                          * since we're only asking for a single page, we can block w/o fear 
2331                          * of tying up pages while waiting for more to become available 
2333                         if (bufsize 
> PAGE_SIZE
) 
2334                                 uplflags 
|= UPL_NOBLOCK
; 
2337                 uplflags 
&= ~UPL_FOR_PAGEOUT
; 
2339                 if (uplflags 
& UPL_WILL_BE_DUMPED
) { 
2340                         uplflags 
&= ~UPL_WILL_BE_DUMPED
; 
2341                         uplflags 
|= (UPL_NO_SYNC
|UPL_SET_INTERNAL
); 
2343                         uplflags 
|= (UPL_NO_SYNC
|UPL_CLEAN_IN_PLACE
|UPL_SET_INTERNAL
); 
2345         control 
= ubc_getobject(vp
, UBC_FLAGS_NONE
); 
2346         if (control 
== MEMORY_OBJECT_CONTROL_NULL
) 
2347                 return KERN_INVALID_ARGUMENT
; 
2349         kr 
= memory_object_upl_request(control
, f_offset
, bufsize
, uplp
, NULL
, NULL
, uplflags
); 
2350         if (kr 
== KERN_SUCCESS 
&& plp 
!= NULL
) 
2351                 *plp 
= UPL_GET_INTERNAL_PAGE_LIST(*uplp
); 
2357  * ubc_upl_maxbufsize 
2359  * Return the maximum bufsize ubc_create_upl( ) will take. 
2363  * Returns:     maximum size buffer (in bytes) ubc_create_upl( ) will take. 
2369         return(MAX_UPL_SIZE_BYTES
); 
2375  * Map the page list assocated with the supplied upl into the kernel virtual 
2376  * address space at the virtual address indicated by the dst_addr argument; 
2377  * the entire upl is mapped 
2379  * Parameters:  upl                     The upl to map 
2380  *              dst_addr                The address at which to map the upl 
2382  * Returns:     KERN_SUCCESS            The upl has been mapped 
2383  *              KERN_INVALID_ARGUMENT   The upl is UPL_NULL 
2384  *              KERN_FAILURE            The upl is already mapped 
2385  *      vm_map_enter:KERN_INVALID_ARGUMENT 
2386  *                                      A failure code from vm_map_enter() due 
2387  *                                      to an invalid argument 
2392         vm_offset_t     
*dst_addr
) 
2394         return (vm_upl_map(kernel_map
, upl
, dst_addr
)); 
2401  * Unmap the page list assocated with the supplied upl from the kernel virtual 
2402  * address space; the entire upl is unmapped. 
2404  * Parameters:  upl                     The upl to unmap 
2406  * Returns:     KERN_SUCCESS            The upl has been unmapped 
2407  *              KERN_FAILURE            The upl is not currently mapped 
2408  *              KERN_INVALID_ARGUMENT   If the upl is UPL_NULL 
2414         return(vm_upl_unmap(kernel_map
, upl
)); 
2421  * Commit the contents of the upl to the backing store 
2423  * Parameters:  upl                     The upl to commit 
2425  * Returns:     KERN_SUCCESS            The upl has been committed 
2426  *              KERN_INVALID_ARGUMENT   The supplied upl was UPL_NULL 
2427  *              KERN_FAILURE            The supplied upl does not represent 
2428  *                                      device memory, and the offset plus the 
2429  *                                      size would exceed the actual size of 
2432  * Notes:       In practice, the only return value for this function should be 
2433  *              KERN_SUCCESS, unless there has been data structure corruption; 
2434  *              since the upl is deallocated regardless of success or failure, 
2435  *              there's really nothing to do about this other than panic. 
2437  *              IMPORTANT: Use of this function should not be mixed with use of 
2438  *              ubc_upl_commit_range(), due to the unconditional deallocation 
2445         upl_page_info_t 
*pl
; 
2448         pl 
= UPL_GET_INTERNAL_PAGE_LIST(upl
); 
2449         kr 
= upl_commit(upl
, pl
, MAX_UPL_SIZE_BYTES 
>> PAGE_SHIFT
); 
2450         upl_deallocate(upl
); 
2458  * Commit the contents of the specified range of the upl to the backing store 
2460  * Parameters:  upl                     The upl to commit 
2461  *              offset                  The offset into the upl 
2462  *              size                    The size of the region to be committed, 
2463  *                                      starting at the specified offset 
2464  *              flags                   commit type (see below) 
2466  * Returns:     KERN_SUCCESS            The range has been committed 
2467  *              KERN_INVALID_ARGUMENT   The supplied upl was UPL_NULL 
2468  *              KERN_FAILURE            The supplied upl does not represent 
2469  *                                      device memory, and the offset plus the 
2470  *                                      size would exceed the actual size of 
2473  * Notes:       IMPORTANT: If the commit is successful, and the object is now 
2474  *              empty, the upl will be deallocated.  Since the caller cannot 
2475  *              check that this is the case, the UPL_COMMIT_FREE_ON_EMPTY flag 
2476  *              should generally only be used when the offset is 0 and the size 
2477  *              is equal to the upl size. 
2479  *              The flags argument is a bitmap of flags on the rage of pages in 
2480  *              the upl to be committed; allowable flags are: 
2482  *              o       UPL_COMMIT_FREE_ON_EMPTY        Free the upl when it is 
2483  *                                                      both empty and has been 
2484  *                                                      successfully committed 
2485  *              o       UPL_COMMIT_CLEAR_DIRTY          Clear each pages dirty 
2486  *                                                      bit; will prevent a 
2488  *              o       UPL_COMMIT_SET_DIRTY            Set each pages dirty 
2489  *                                                      bit; will cause a later 
2491  *              o       UPL_COMMIT_INACTIVATE           Clear each pages 
2492  *                                                      reference bit; the page 
2493  *                                                      will not be accessed 
2494  *              o       UPL_COMMIT_ALLOW_ACCESS         Unbusy each page; pages 
2495  *                                                      become busy when an 
2496  *                                                      IOMemoryDescriptor is 
2497  *                                                      mapped or redirected, 
2498  *                                                      and we have to wait for 
2501  *              The flag UPL_COMMIT_NOTIFY_EMPTY is used internally, and should 
2502  *              not be specified by the caller. 
2504  *              The UPL_COMMIT_CLEAR_DIRTY and UPL_COMMIT_SET_DIRTY flags are 
2505  *              mutually exclusive, and should not be combined. 
2508 ubc_upl_commit_range( 
2510         upl_offset_t            offset
, 
2514         upl_page_info_t 
*pl
; 
2518         if (flags 
& UPL_COMMIT_FREE_ON_EMPTY
) 
2519                 flags 
|= UPL_COMMIT_NOTIFY_EMPTY
; 
2521         if (flags 
& UPL_COMMIT_KERNEL_ONLY_FLAGS
) { 
2522                 return KERN_INVALID_ARGUMENT
; 
2525         pl 
= UPL_GET_INTERNAL_PAGE_LIST(upl
); 
2527         kr 
= upl_commit_range(upl
, offset
, size
, flags
, 
2528                               pl
, MAX_UPL_SIZE_BYTES 
>> PAGE_SHIFT
, &empty
); 
2530         if((flags 
& UPL_COMMIT_FREE_ON_EMPTY
) && empty
) 
2531                 upl_deallocate(upl
); 
2538  * ubc_upl_abort_range 
2540  * Abort the contents of the specified range of the specified upl 
2542  * Parameters:  upl                     The upl to abort 
2543  *              offset                  The offset into the upl 
2544  *              size                    The size of the region to be aborted, 
2545  *                                      starting at the specified offset 
2546  *              abort_flags             abort type (see below) 
2548  * Returns:     KERN_SUCCESS            The range has been aborted 
2549  *              KERN_INVALID_ARGUMENT   The supplied upl was UPL_NULL 
2550  *              KERN_FAILURE            The supplied upl does not represent 
2551  *                                      device memory, and the offset plus the 
2552  *                                      size would exceed the actual size of 
2555  * Notes:       IMPORTANT: If the abort is successful, and the object is now 
2556  *              empty, the upl will be deallocated.  Since the caller cannot 
2557  *              check that this is the case, the UPL_ABORT_FREE_ON_EMPTY flag 
2558  *              should generally only be used when the offset is 0 and the size 
2559  *              is equal to the upl size. 
2561  *              The abort_flags argument is a bitmap of flags on the range of 
2562  *              pages in the upl to be aborted; allowable flags are: 
2564  *              o       UPL_ABORT_FREE_ON_EMPTY Free the upl when it is both 
2565  *                                              empty and has been successfully 
2567  *              o       UPL_ABORT_RESTART       The operation must be restarted 
2568  *              o       UPL_ABORT_UNAVAILABLE   The pages are unavailable 
2569  *              o       UPL_ABORT_ERROR         An I/O error occurred 
2570  *              o       UPL_ABORT_DUMP_PAGES    Just free the pages 
2571  *              o       UPL_ABORT_NOTIFY_EMPTY  RESERVED 
2572  *              o       UPL_ABORT_ALLOW_ACCESS  RESERVED 
2574  *              The UPL_ABORT_NOTIFY_EMPTY is an internal use flag and should 
2575  *              not be specified by the caller.  It is intended to fulfill the 
2576  *              same role as UPL_COMMIT_NOTIFY_EMPTY does in the function 
2577  *              ubc_upl_commit_range(), but is never referenced internally. 
2579  *              The UPL_ABORT_ALLOW_ACCESS is defined, but neither set nor 
2580  *              referenced; do not use it. 
2583 ubc_upl_abort_range( 
2585         upl_offset_t            offset
, 
2590         boolean_t               empty 
= FALSE
; 
2592         if (abort_flags 
& UPL_ABORT_FREE_ON_EMPTY
) 
2593                 abort_flags 
|= UPL_ABORT_NOTIFY_EMPTY
; 
2595         kr 
= upl_abort_range(upl
, offset
, size
, abort_flags
, &empty
); 
2597         if((abort_flags 
& UPL_ABORT_FREE_ON_EMPTY
) && empty
) 
2598                 upl_deallocate(upl
); 
2607  * Abort the contents of the specified upl 
2609  * Parameters:  upl                     The upl to abort 
2610  *              abort_type              abort type (see below) 
2612  * Returns:     KERN_SUCCESS            The range has been aborted 
2613  *              KERN_INVALID_ARGUMENT   The supplied upl was UPL_NULL 
2614  *              KERN_FAILURE            The supplied upl does not represent 
2615  *                                      device memory, and the offset plus the 
2616  *                                      size would exceed the actual size of 
2619  * Notes:       IMPORTANT: If the abort is successful, and the object is now 
2620  *              empty, the upl will be deallocated.  Since the caller cannot 
2621  *              check that this is the case, the UPL_ABORT_FREE_ON_EMPTY flag 
2622  *              should generally only be used when the offset is 0 and the size 
2623  *              is equal to the upl size. 
2625  *              The abort_type is a bitmap of flags on the range of 
2626  *              pages in the upl to be aborted; allowable flags are: 
2628  *              o       UPL_ABORT_FREE_ON_EMPTY Free the upl when it is both 
2629  *                                              empty and has been successfully 
2631  *              o       UPL_ABORT_RESTART       The operation must be restarted 
2632  *              o       UPL_ABORT_UNAVAILABLE   The pages are unavailable 
2633  *              o       UPL_ABORT_ERROR         An I/O error occurred 
2634  *              o       UPL_ABORT_DUMP_PAGES    Just free the pages 
2635  *              o       UPL_ABORT_NOTIFY_EMPTY  RESERVED 
2636  *              o       UPL_ABORT_ALLOW_ACCESS  RESERVED 
2638  *              The UPL_ABORT_NOTIFY_EMPTY is an internal use flag and should 
2639  *              not be specified by the caller.  It is intended to fulfill the 
2640  *              same role as UPL_COMMIT_NOTIFY_EMPTY does in the function 
2641  *              ubc_upl_commit_range(), but is never referenced internally. 
2643  *              The UPL_ABORT_ALLOW_ACCESS is defined, but neither set nor 
2644  *              referenced; do not use it. 
2653         kr 
= upl_abort(upl
, abort_type
); 
2654         upl_deallocate(upl
); 
2662  *  Retrieve the internal page list for the specified upl 
2664  * Parameters:  upl                     The upl to obtain the page list from 
2666  * Returns:     !NULL                   The (upl_page_info_t *) for the page 
2667  *                                      list internal to the upl 
2668  *              NULL                    Error/no page list associated 
2670  * Notes:       IMPORTANT: The function is only valid on internal objects 
2671  *              where the list request was made with the UPL_INTERNAL flag. 
2673  *              This function is a utility helper function, since some callers 
2674  *              may not have direct access to the header defining the macro, 
2675  *              due to abstraction layering constraints. 
2681         return (UPL_GET_INTERNAL_PAGE_LIST(upl
)); 
2686 UBCINFOEXISTS(const struct vnode 
* vp
) 
2688         return((vp
) && ((vp
)->v_type 
== VREG
) && ((vp
)->v_ubcinfo 
!= UBC_INFO_NULL
)); 
2693 ubc_upl_range_needed( 
2698         upl_range_needed(upl
, index
, count
); 
2701 boolean_t 
ubc_is_mapped(const struct vnode 
*vp
, boolean_t 
*writable
) 
2703         if (!UBCINFOEXISTS(vp
) || !ISSET(vp
->v_ubcinfo
->ui_flags
, UI_ISMAPPED
)) 
2706                 *writable 
= ISSET(vp
->v_ubcinfo
->ui_flags
, UI_MAPPEDWRITE
); 
2710 boolean_t 
ubc_is_mapped_writable(const struct vnode 
*vp
) 
2713         return ubc_is_mapped(vp
, &writable
) && writable
; 
2720 #define CS_BLOB_PAGEABLE 0 
2721 static volatile SInt32 cs_blob_size 
= 0; 
2722 static volatile SInt32 cs_blob_count 
= 0; 
2723 static SInt32 cs_blob_size_peak 
= 0; 
2724 static UInt32 cs_blob_size_max 
= 0; 
2725 static SInt32 cs_blob_count_peak 
= 0; 
2727 int cs_validation 
= 1; 
2729 #ifndef SECURE_KERNEL 
2730 SYSCTL_INT(_vm
, OID_AUTO
, cs_validation
, CTLFLAG_RW 
| CTLFLAG_LOCKED
, &cs_validation
, 0, "Do validate code signatures"); 
2732 SYSCTL_INT(_vm
, OID_AUTO
, cs_blob_count
, CTLFLAG_RD 
| CTLFLAG_LOCKED
, (int *)(uintptr_t)&cs_blob_count
, 0, "Current number of code signature blobs"); 
2733 SYSCTL_INT(_vm
, OID_AUTO
, cs_blob_size
, CTLFLAG_RD 
| CTLFLAG_LOCKED
, (int *)(uintptr_t)&cs_blob_size
, 0, "Current size of all code signature blobs"); 
2734 SYSCTL_INT(_vm
, OID_AUTO
, cs_blob_count_peak
, CTLFLAG_RD 
| CTLFLAG_LOCKED
, &cs_blob_count_peak
, 0, "Peak number of code signature blobs"); 
2735 SYSCTL_INT(_vm
, OID_AUTO
, cs_blob_size_peak
, CTLFLAG_RD 
| CTLFLAG_LOCKED
, &cs_blob_size_peak
, 0, "Peak size of code signature blobs"); 
2736 SYSCTL_INT(_vm
, OID_AUTO
, cs_blob_size_max
, CTLFLAG_RD 
| CTLFLAG_LOCKED
, &cs_blob_size_max
, 0, "Size of biggest code signature blob"); 
2740 ubc_cs_blob_allocate( 
2741         vm_offset_t     
*blob_addr_p
, 
2742         vm_size_t       
*blob_size_p
) 
2746 #if CS_BLOB_PAGEABLE 
2747         *blob_size_p 
= round_page(*blob_size_p
); 
2748         kr 
= kmem_alloc(kernel_map
, blob_addr_p
, *blob_size_p
); 
2749 #else   /* CS_BLOB_PAGEABLE */ 
2750         *blob_addr_p 
= (vm_offset_t
) kalloc(*blob_size_p
); 
2751         if (*blob_addr_p 
== 0) { 
2756 #endif  /* CS_BLOB_PAGEABLE */ 
2761 ubc_cs_blob_deallocate( 
2762         vm_offset_t     blob_addr
, 
2763         vm_size_t       blob_size
) 
2765 #if CS_BLOB_PAGEABLE 
2766         kmem_free(kernel_map
, blob_addr
, blob_size
); 
2767 #else   /* CS_BLOB_PAGEABLE */ 
2768         kfree((void *) blob_addr
, blob_size
); 
2769 #endif  /* CS_BLOB_PAGEABLE */ 
2775         vm_address_t    address
, 
2779         struct ubc_info         
*uip
; 
2780         struct cs_blob          
*blob
; 
2781         memory_object_control_t control
; 
2782         const CS_CodeDirectory 
*cd
; 
2785         control 
= ubc_getobject(vp
, UBC_FLAGS_NONE
); 
2786         if (control 
== MEMORY_OBJECT_CONTROL_NULL
) 
2787                 return KERN_INVALID_ARGUMENT
; 
2789         if (memory_object_is_signed(control
)) 
2792         blob 
= (struct cs_blob 
*) kalloc(sizeof (struct cs_blob
)); 
2796         /* fill in the new blob */ 
2797         blob
->csb_cpu_type 
= CPU_TYPE_ANY
; 
2798         blob
->csb_base_offset 
= 0; 
2799         blob
->csb_mem_size 
= size
; 
2800         blob
->csb_mem_offset 
= 0; 
2801         blob
->csb_mem_handle 
= IPC_PORT_NULL
; 
2802         blob
->csb_mem_kaddr 
= address
; 
2803         blob
->csb_sigpup 
= 1; 
2804         blob
->csb_platform_binary 
= 0; 
2805         blob
->csb_teamid 
= NULL
; 
2808          * Validate the blob's contents 
2810         cd 
= findCodeDirectory( 
2811                 (const CS_SuperBlob 
*) address
,  
2813                 (char *) address 
+ blob
->csb_mem_size
); 
2815                 /* no code directory => useless blob ! */ 
2820         blob
->csb_flags 
= ntohl(cd
->flags
) | CS_VALID
; 
2821         blob
->csb_end_offset 
= round_page_4K(ntohl(cd
->codeLimit
)); 
2822         if((ntohl(cd
->version
) >= CS_SUPPORTSSCATTER
) && (ntohl(cd
->scatterOffset
))) { 
2823                 const SC_Scatter 
*scatter 
= (const SC_Scatter
*) 
2824                     ((const char*)cd 
+ ntohl(cd
->scatterOffset
)); 
2825                 blob
->csb_start_offset 
= ntohl(scatter
->base
) * PAGE_SIZE_4K
; 
2827                 blob
->csb_start_offset 
= (blob
->csb_end_offset 
- (ntohl(cd
->nCodeSlots
) * PAGE_SIZE_4K
)); 
2831          * We don't need to check with the policy module, since the input data is supposed to be already checked 
2835         if (! UBCINFOEXISTS(vp
)) { 
2838                         printf("out ubc object\n"); 
2842         uip 
= vp
->v_ubcinfo
; 
2844         /* someone raced us to adding the code directory */ 
2845         if (uip
->cs_blobs 
!= NULL
) { 
2847                         printf("sigpup: vnode already have CD ?\n"); 
2853         blob
->csb_next 
= uip
->cs_blobs
; 
2854         uip
->cs_blobs 
= blob
; 
2856         OSAddAtomic(+1, &cs_blob_count
); 
2857         OSAddAtomic((SInt32
) +blob
->csb_mem_size
, &cs_blob_size
); 
2859         /* mark this vnode's VM object as having "signed pages" */ 
2860         kr 
= memory_object_signed(uip
->ui_control
, TRUE
); 
2861         if (kr 
!= KERN_SUCCESS
) { 
2864                         printf("sigpup: not signable ?\n"); 
2875                         printf("sigpup: not signable ?\n"); 
2876                 /* we failed; release what we allocated */ 
2878                         kfree(blob
, sizeof (*blob
)); 
2895         struct ubc_info         
*uip
; 
2896         struct cs_blob          
*blob
, *oblob
; 
2898         ipc_port_t              blob_handle
; 
2899         memory_object_size_t    blob_size
; 
2900         const CS_CodeDirectory 
*cd
; 
2901         off_t                   blob_start_offset
, blob_end_offset
; 
2903         boolean_t               record_mtime
; 
2904         int                     is_platform_binary
; 
2906         record_mtime 
= FALSE
; 
2907         is_platform_binary 
= 0; 
2909         blob_handle 
= IPC_PORT_NULL
; 
2911         blob 
= (struct cs_blob 
*) kalloc(sizeof (struct cs_blob
)); 
2916 #if CS_BLOB_PAGEABLE 
2917         /* get a memory entry on the blob */ 
2918         blob_size 
= (memory_object_size_t
) size
; 
2919         kr 
= mach_make_memory_entry_64(kernel_map
, 
2925         if (kr 
!= KERN_SUCCESS
) { 
2929         if (memory_object_round_page(blob_size
) != 
2930             (memory_object_size_t
) round_page(size
)) { 
2931                 printf("ubc_cs_blob_add: size mismatch 0x%llx 0x%lx !?\n", 
2932                        blob_size
, (size_t)size
); 
2933                 panic("XXX FBDP size mismatch 0x%llx 0x%lx\n", blob_size
, (size_t)size
); 
2938         blob_size 
= (memory_object_size_t
) size
; 
2939         blob_handle 
= IPC_PORT_NULL
; 
2942         /* fill in the new blob */ 
2943         blob
->csb_cpu_type 
= cputype
; 
2944         blob
->csb_sigpup 
= 0; 
2945         blob
->csb_base_offset 
= base_offset
; 
2946         blob
->csb_mem_size 
= size
; 
2947         blob
->csb_mem_offset 
= 0; 
2948         blob
->csb_mem_handle 
= blob_handle
; 
2949         blob
->csb_mem_kaddr 
= addr
; 
2950         blob
->csb_flags 
= 0; 
2951         blob
->csb_platform_binary 
= 0; 
2952         blob
->csb_teamid 
= NULL
; 
2955          * Validate the blob's contents 
2958         error 
= cs_validate_csblob((const uint8_t *)addr
, size
, &cd
); 
2961                         printf("CODESIGNING: csblob invalid: %d\n", error
); 
2962                 blob
->csb_flags 
= 0; 
2963                 blob
->csb_start_offset 
= 0; 
2964                 blob
->csb_end_offset 
= 0; 
2965                 memset(blob
->csb_sha1
, 0, SHA1_RESULTLEN
); 
2966                 /* let the vnode checker determine if the signature is valid or not */ 
2968                 const unsigned char *sha1_base
; 
2971                 blob
->csb_flags 
= (ntohl(cd
->flags
) & CS_ALLOWED_MACHO
) | CS_VALID
; 
2972                 blob
->csb_end_offset 
= round_page_4K(ntohl(cd
->codeLimit
)); 
2973                 if((ntohl(cd
->version
) >= CS_SUPPORTSSCATTER
) && (ntohl(cd
->scatterOffset
))) { 
2974                         const SC_Scatter 
*scatter 
= (const SC_Scatter
*) 
2975                                 ((const char*)cd 
+ ntohl(cd
->scatterOffset
)); 
2976                         blob
->csb_start_offset 
= ntohl(scatter
->base
) * PAGE_SIZE_4K
; 
2978                         blob
->csb_start_offset 
= (blob
->csb_end_offset 
- 
2979                                                   (ntohl(cd
->nCodeSlots
) * PAGE_SIZE_4K
)); 
2981                 /* compute the blob's SHA1 hash */ 
2982                 sha1_base 
= (const unsigned char *) cd
; 
2983                 sha1_size 
= ntohl(cd
->length
); 
2984                 SHA1Init(&sha1ctxt
); 
2985                 SHA1Update(&sha1ctxt
, sha1_base
, sha1_size
); 
2986                 SHA1Final(blob
->csb_sha1
, &sha1ctxt
); 
2990          * Let policy module check whether the blob's signature is accepted. 
2993         error 
= mac_vnode_check_signature(vp
, base_offset
, blob
->csb_sha1
, (const void*)cd
, size
, &is_platform_binary
); 
2996                         printf("check_signature[pid: %d], error = %d\n", current_proc()->p_pid
, error
); 
3001         if (is_platform_binary
) { 
3003                         printf("check_signature[pid: %d]: platform binary\n", current_proc()->p_pid
); 
3004                 blob
->csb_platform_binary 
= 1; 
3006                 blob
->csb_platform_binary 
= 0; 
3007                 blob
->csb_teamid 
= csblob_get_teamid(blob
); 
3009                         if (blob
->csb_teamid
) 
3010                                 printf("check_signature[pid: %d]: team-id is %s\n", current_proc()->p_pid
, blob
->csb_teamid
); 
3012                                 printf("check_signature[pid: %d]: no team-id\n", current_proc()->p_pid
); 
3017          * Validate the blob's coverage 
3019         blob_start_offset 
= blob
->csb_base_offset 
+ blob
->csb_start_offset
; 
3020         blob_end_offset 
= blob
->csb_base_offset 
+ blob
->csb_end_offset
; 
3022         if (blob_start_offset 
>= blob_end_offset 
|| 
3023             blob_start_offset 
< 0 || 
3024             blob_end_offset 
<= 0) { 
3025                 /* reject empty or backwards blob */ 
3031         if (! UBCINFOEXISTS(vp
)) { 
3036         uip 
= vp
->v_ubcinfo
; 
3038         /* check if this new blob overlaps with an existing blob */ 
3039         for (oblob 
= uip
->cs_blobs
; 
3041              oblob 
= oblob
->csb_next
) { 
3042                  off_t oblob_start_offset
, oblob_end_offset
; 
3044                  /* check for conflicting teamid */ 
3045                  if (blob
->csb_platform_binary
) { //platform binary needs to be the same for app slices 
3046                          if (!oblob
->csb_platform_binary
) { 
3051                  } else if (blob
->csb_teamid
) { //teamid binary needs to be the same for app slices 
3052                         if (oblob
->csb_platform_binary 
|| 
3053                             oblob
->csb_teamid 
== NULL 
|| 
3054                             strcmp(oblob
->csb_teamid
, blob
->csb_teamid
) != 0) { 
3059                  } else { // non teamid binary needs to be the same for app slices 
3060                         if (oblob
->csb_platform_binary 
|| 
3061                                 oblob
->csb_teamid 
!= NULL
) { 
3068                  oblob_start_offset 
= (oblob
->csb_base_offset 
+ 
3069                                        oblob
->csb_start_offset
); 
3070                  oblob_end_offset 
= (oblob
->csb_base_offset 
+ 
3071                                      oblob
->csb_end_offset
); 
3072                  if (blob_start_offset 
>= oblob_end_offset 
|| 
3073                      blob_end_offset 
<= oblob_start_offset
) { 
3074                          /* no conflict with this existing blob */ 
3077                          if (blob_start_offset 
== oblob_start_offset 
&& 
3078                              blob_end_offset 
== oblob_end_offset 
&& 
3079                              blob
->csb_mem_size 
== oblob
->csb_mem_size 
&& 
3080                              blob
->csb_flags 
== oblob
->csb_flags 
&& 
3081                              (blob
->csb_cpu_type 
== CPU_TYPE_ANY 
|| 
3082                               oblob
->csb_cpu_type 
== CPU_TYPE_ANY 
|| 
3083                               blob
->csb_cpu_type 
== oblob
->csb_cpu_type
) && 
3084                              !bcmp(blob
->csb_sha1
, 
3088                                   * We already have this blob: 
3089                                   * we'll return success but 
3090                                   * throw away the new blob. 
3092                                  if (oblob
->csb_cpu_type 
== CPU_TYPE_ANY
) { 
3094                                           * The old blob matches this one 
3095                                           * but doesn't have any CPU type. 
3096                                           * Update it with whatever the caller 
3097                                           * provided this time. 
3099                                          oblob
->csb_cpu_type 
= cputype
; 
3105                                  /* different blob: reject the new one */ 
3115         /* mark this vnode's VM object as having "signed pages" */ 
3116         kr 
= memory_object_signed(uip
->ui_control
, TRUE
); 
3117         if (kr 
!= KERN_SUCCESS
) { 
3123         if (uip
->cs_blobs 
== NULL
) { 
3124                 /* loading 1st blob: record the file's current "modify time" */ 
3125                 record_mtime 
= TRUE
; 
3128         /* set the generation count for cs_blobs */ 
3129         uip
->cs_add_gen 
= cs_blob_generation_count
; 
3132          * Add this blob to the list of blobs for this vnode. 
3133          * We always add at the front of the list and we never remove a 
3134          * blob from the list, so ubc_cs_get_blobs() can return whatever 
3135          * the top of the list was and that list will remain valid 
3136          * while we validate a page, even after we release the vnode's lock. 
3138         blob
->csb_next 
= uip
->cs_blobs
; 
3139         uip
->cs_blobs 
= blob
; 
3141         OSAddAtomic(+1, &cs_blob_count
); 
3142         if (cs_blob_count 
> cs_blob_count_peak
) { 
3143                 cs_blob_count_peak 
= cs_blob_count
; /* XXX atomic ? */ 
3145         OSAddAtomic((SInt32
) +blob
->csb_mem_size
, &cs_blob_size
); 
3146         if ((SInt32
) cs_blob_size 
> cs_blob_size_peak
) { 
3147                 cs_blob_size_peak 
= (SInt32
) cs_blob_size
; /* XXX atomic ? */ 
3149         if ((UInt32
) blob
->csb_mem_size 
> cs_blob_size_max
) { 
3150                 cs_blob_size_max 
= (UInt32
) blob
->csb_mem_size
; 
3155                 const char *name 
= vnode_getname_printable(vp
); 
3157                 printf("CODE SIGNING: proc %d(%s) " 
3158                        "loaded %s signatures for file (%s) " 
3159                        "range 0x%llx:0x%llx flags 0x%x\n", 
3160                        p
->p_pid
, p
->p_comm
, 
3161                        blob
->csb_cpu_type 
== -1 ? "detached" : "embedded", 
3163                        blob
->csb_base_offset 
+ blob
->csb_start_offset
, 
3164                        blob
->csb_base_offset 
+ blob
->csb_end_offset
, 
3166                 vnode_putname_printable(name
); 
3172                 vnode_mtime(vp
, &uip
->cs_mtime
, vfs_context_current()); 
3175         error 
= 0;      /* success ! */ 
3180                         printf("check_signature[pid: %d]: error = %d\n", current_proc()->p_pid
, error
); 
3182                 /* we failed; release what we allocated */ 
3184                         kfree(blob
, sizeof (*blob
)); 
3187                 if (blob_handle 
!= IPC_PORT_NULL
) { 
3188                         mach_memory_entry_port_release(blob_handle
); 
3189                         blob_handle 
= IPC_PORT_NULL
; 
3193         if (error 
== EAGAIN
) { 
3195                  * See above:  error is EAGAIN if we were asked  
3196                  * to add an existing blob again.  We cleaned the new 
3197                  * blob and we want to return success. 
3201                  * Since we're not failing, consume the data we received. 
3203                 ubc_cs_blob_deallocate(addr
, size
); 
3215         struct ubc_info 
*uip
; 
3216         struct cs_blob  
*blob
; 
3217         off_t offset_in_blob
; 
3219         vnode_lock_spin(vp
); 
3221         if (! UBCINFOEXISTS(vp
)) { 
3226         uip 
= vp
->v_ubcinfo
; 
3227         for (blob 
= uip
->cs_blobs
; 
3229              blob 
= blob
->csb_next
) { 
3230                 if (cputype 
!= -1 && blob
->csb_cpu_type 
== cputype
) { 
3234                         offset_in_blob 
= offset 
- blob
->csb_base_offset
; 
3235                         if (offset_in_blob 
>= blob
->csb_start_offset 
&& 
3236                             offset_in_blob 
< blob
->csb_end_offset
) { 
3237                                 /* our offset is covered by this blob */ 
3243         if (cs_debug 
&& blob 
!= NULL 
&& blob
->csb_sigpup
) { 
3244                 printf("found sig pup blob\n"); 
3255         struct ubc_info 
*uip
) 
3257         struct cs_blob  
*blob
, *next_blob
; 
3259         for (blob 
= uip
->cs_blobs
; 
3262                 next_blob 
= blob
->csb_next
; 
3263                 if (blob
->csb_mem_kaddr 
!= 0 && !blob
->csb_sigpup
) { 
3264                         ubc_cs_blob_deallocate(blob
->csb_mem_kaddr
, 
3265                                                blob
->csb_mem_size
); 
3266                         blob
->csb_mem_kaddr 
= 0; 
3268                 if (blob
->csb_mem_handle 
!= IPC_PORT_NULL
) { 
3269                         mach_memory_entry_port_release(blob
->csb_mem_handle
); 
3271                 blob
->csb_mem_handle 
= IPC_PORT_NULL
; 
3272                 OSAddAtomic(-1, &cs_blob_count
); 
3273                 OSAddAtomic((SInt32
) -blob
->csb_mem_size
, &cs_blob_size
); 
3274                 kfree(blob
, sizeof (*blob
)); 
3276 #if CHECK_CS_VALIDATION_BITMAP 
3277         ubc_cs_validation_bitmap_deallocate( uip
->ui_vnode 
); 
3279         uip
->cs_blobs 
= NULL
; 
3282 /* check cs blob generation on vnode 
3284  *    0         : Success, the cs_blob attached is current 
3285  *    ENEEDAUTH : Generation count mismatch. Needs authentication again. 
3288 ubc_cs_generation_check( 
3291         int retval 
= ENEEDAUTH
; 
3293         vnode_lock_spin(vp
); 
3295         if (UBCINFOEXISTS(vp
) && vp
->v_ubcinfo
->cs_add_gen 
== cs_blob_generation_count
) { 
3304 ubc_cs_blob_revalidate( 
3306         struct cs_blob 
*blob
 
3311         int is_platform_binary 
= 0; 
3313         const CS_CodeDirectory 
*cd 
= NULL
; 
3316         assert(blob 
!= NULL
); 
3318         error 
= cs_validate_csblob((const uint8_t *)blob
->csb_mem_kaddr
, blob
->csb_mem_size
, &cd
); 
3321                         printf("CODESIGNING: csblob invalid: %d\n", error
); 
3326         /* callout to mac_vnode_check_signature */ 
3328         error 
= mac_vnode_check_signature(vp
, blob
->csb_base_offset
, blob
->csb_sha1
, (const void*)cd
, blob
->csb_cpu_type
, &is_platform_binary
); 
3329         if (cs_debug 
&& error
) { 
3330                         printf("revalidate: check_signature[pid: %d], error = %d\n", current_proc()->p_pid
, error
); 
3334         /* update generation number if success */ 
3335         vnode_lock_spin(vp
); 
3336         if (UBCINFOEXISTS(vp
)) { 
3338                         vp
->v_ubcinfo
->cs_add_gen 
= cs_blob_generation_count
; 
3340                         vp
->v_ubcinfo
->cs_add_gen 
= 0; 
3350 cs_blob_reset_cache() 
3352         /* incrementing odd no by 2 makes sure '0' is never reached. */ 
3353         OSAddAtomic(+2, &cs_blob_generation_count
); 
3354         printf("Reseting cs_blob cache from all vnodes. \n"); 
3361         struct ubc_info 
*uip
; 
3362         struct cs_blob  
*blobs
; 
3365          * No need to take the vnode lock here.  The caller must be holding 
3366          * a reference on the vnode (via a VM mapping or open file descriptor), 
3367          * so the vnode will not go away.  The ubc_info stays until the vnode 
3368          * goes away.  And we only modify "blobs" by adding to the head of the 
3370          * The ubc_info could go away entirely if the vnode gets reclaimed as 
3371          * part of a forced unmount.  In the case of a code-signature validation 
3372          * during a page fault, the "paging_in_progress" reference on the VM 
3373          * object guarantess that the vnode pager (and the ubc_info) won't go 
3374          * away during the fault. 
3375          * Other callers need to protect against vnode reclaim by holding the 
3376          * vnode lock, for example. 
3379         if (! UBCINFOEXISTS(vp
)) { 
3384         uip 
= vp
->v_ubcinfo
; 
3385         blobs 
= uip
->cs_blobs
; 
3394         struct timespec 
*cs_mtime
) 
3396         struct ubc_info 
*uip
; 
3398         if (! UBCINFOEXISTS(vp
)) { 
3399                 cs_mtime
->tv_sec 
= 0; 
3400                 cs_mtime
->tv_nsec 
= 0; 
3404         uip 
= vp
->v_ubcinfo
; 
3405         cs_mtime
->tv_sec 
= uip
->cs_mtime
.tv_sec
; 
3406         cs_mtime
->tv_nsec 
= uip
->cs_mtime
.tv_nsec
; 
3409 unsigned long cs_validate_page_no_hash 
= 0; 
3410 unsigned long cs_validate_page_bad_hash 
= 0; 
3414         memory_object_t         pager
, 
3415         memory_object_offset_t  page_offset
, 
3420         unsigned char           actual_hash
[SHA1_RESULTLEN
]; 
3421         unsigned char           expected_hash
[SHA1_RESULTLEN
]; 
3422         boolean_t               found_hash
; 
3423         struct cs_blob          
*blobs
, *blob
; 
3424         const CS_CodeDirectory  
*cd
; 
3425         const CS_SuperBlob      
*embedded
; 
3426         const unsigned char     *hash
; 
3427         boolean_t               validated
; 
3428         off_t                   offset
; /* page offset in the file */ 
3430         off_t                   codeLimit 
= 0; 
3431         char                    *lower_bound
, *upper_bound
; 
3432         vm_offset_t             kaddr
, blob_addr
; 
3436         offset 
= page_offset
; 
3438         /* retrieve the expected hash */ 
3440         blobs 
= (struct cs_blob 
*) _blobs
; 
3444              blob 
= blob
->csb_next
) { 
3445                 offset 
= page_offset 
- blob
->csb_base_offset
; 
3446                 if (offset 
< blob
->csb_start_offset 
|| 
3447                     offset 
>= blob
->csb_end_offset
) { 
3448                         /* our page is not covered by this blob */ 
3452                 /* map the blob in the kernel address space */ 
3453                 kaddr 
= blob
->csb_mem_kaddr
; 
3455                         ksize 
= (vm_size_t
) (blob
->csb_mem_size 
+ 
3456                                              blob
->csb_mem_offset
); 
3457                         kr 
= vm_map(kernel_map
, 
3462                                     blob
->csb_mem_handle
, 
3468                         if (kr 
!= KERN_SUCCESS
) { 
3469                                 /* XXX FBDP what to do !? */ 
3470                                 printf("cs_validate_page: failed to map blob, " 
3471                                        "size=0x%lx kr=0x%x\n", 
3472                                        (size_t)blob
->csb_mem_size
, kr
); 
3476                 if (blob
->csb_sigpup 
&& cs_debug
) 
3477                         printf("checking for a sigpup CD\n"); 
3479                 blob_addr 
= kaddr 
+ blob
->csb_mem_offset
; 
3481                 lower_bound 
= CAST_DOWN(char *, blob_addr
); 
3482                 upper_bound 
= lower_bound 
+ blob
->csb_mem_size
; 
3484                 embedded 
= (const CS_SuperBlob 
*) blob_addr
; 
3485                 cd 
= findCodeDirectory(embedded
, lower_bound
, upper_bound
); 
3487                         if (cd
->pageSize 
!= PAGE_SHIFT_4K 
|| 
3488                             cd
->hashType 
!= CS_HASHTYPE_SHA1 
|| 
3489                             cd
->hashSize 
!= SHA1_RESULTLEN
) { 
3491                                 if (blob
->csb_sigpup 
&& cs_debug
) 
3492                                         printf("page foo bogus sigpup CD\n"); 
3496                         offset 
= page_offset 
- blob
->csb_base_offset
; 
3497                         if (offset 
< blob
->csb_start_offset 
|| 
3498                             offset 
>= blob
->csb_end_offset
) { 
3499                                 /* our page is not covered by this blob */ 
3500                                 if (blob
->csb_sigpup 
&& cs_debug
) 
3501                                         printf("OOB sigpup CD\n"); 
3505                         codeLimit 
= ntohl(cd
->codeLimit
); 
3506                         if (blob
->csb_sigpup 
&& cs_debug
) 
3507                                 printf("sigpup codesize %d\n", (int)codeLimit
); 
3509                         hash 
= hashes(cd
, (unsigned)(offset
>>PAGE_SHIFT_4K
), 
3510                                       lower_bound
, upper_bound
); 
3512                                 bcopy(hash
, expected_hash
, 
3513                                       sizeof (expected_hash
)); 
3515                                 if (blob
->csb_sigpup 
&& cs_debug
) 
3516                                         printf("sigpup hash\n"); 
3521                         if (blob
->csb_sigpup 
&& cs_debug
) 
3522                                 printf("sig pup had no valid CD\n"); 
3527         if (found_hash 
== FALSE
) { 
3529                  * We can't verify this page because there is no signature 
3530                  * for it (yet).  It's possible that this part of the object 
3531                  * is not signed, or that signatures for that part have not 
3533                  * Report that the page has not been validated and let the 
3534                  * caller decide if it wants to accept it or not. 
3536                 cs_validate_page_no_hash
++; 
3538                         printf("CODE SIGNING: cs_validate_page: " 
3539                                "mobj %p off 0x%llx: no hash to validate !?\n", 
3540                                pager
, page_offset
); 
3546                 size 
= PAGE_SIZE_4K
; 
3547                 const uint32_t *asha1
, *esha1
; 
3548                 if ((off_t
)(offset 
+ size
) > codeLimit
) { 
3549                         /* partial page at end of segment */ 
3550                         assert(offset 
< codeLimit
); 
3551                         size 
= (size_t) (codeLimit 
& PAGE_MASK_4K
); 
3553                 /* compute the actual page's SHA1 hash */ 
3554                 SHA1Init(&sha1ctxt
); 
3555                 SHA1UpdateUsePhysicalAddress(&sha1ctxt
, data
, size
); 
3556                 SHA1Final(actual_hash
, &sha1ctxt
); 
3558                 asha1 
= (const uint32_t *) actual_hash
; 
3559                 esha1 
= (const uint32_t *) expected_hash
; 
3561                 if (bcmp(expected_hash
, actual_hash
, SHA1_RESULTLEN
) != 0) { 
3563                                 printf("CODE SIGNING: cs_validate_page: " 
3564                                        "mobj %p off 0x%llx size 0x%lx: " 
3565                                        "actual [0x%x 0x%x 0x%x 0x%x 0x%x] != " 
3566                                        "expected [0x%x 0x%x 0x%x 0x%x 0x%x]\n", 
3567                                        pager
, page_offset
, size
, 
3568                                        asha1
[0], asha1
[1], asha1
[2], 
3570                                        esha1
[0], esha1
[1], esha1
[2], 
3571                                        esha1
[3], esha1
[4]); 
3573                         cs_validate_page_bad_hash
++; 
3576                         if (cs_debug 
> 10) { 
3577                                 printf("CODE SIGNING: cs_validate_page: " 
3578                                        "mobj %p off 0x%llx size 0x%lx: " 
3580                                        pager
, page_offset
, size
); 
3594         unsigned char   *cdhash
) 
3596         struct cs_blob  
*blobs
, *blob
; 
3602         blobs 
= ubc_get_cs_blobs(vp
); 
3605              blob 
= blob
->csb_next
) { 
3606                 /* compute offset relative to this blob */ 
3607                 rel_offset 
= offset 
- blob
->csb_base_offset
; 
3608                 if (rel_offset 
>= blob
->csb_start_offset 
&& 
3609                     rel_offset 
< blob
->csb_end_offset
) { 
3610                         /* this blob does cover our "offset" ! */ 
3616                 /* we didn't find a blob covering "offset" */ 
3617                 ret 
= EBADEXEC
; /* XXX any better error ? */ 
3619                 /* get the SHA1 hash of that blob */ 
3620                 bcopy(blob
->csb_sha1
, cdhash
, sizeof (blob
->csb_sha1
)); 
3629 #if CHECK_CS_VALIDATION_BITMAP 
3630 #define stob(s) ((atop_64((s)) + 07) >> 3) 
3631 extern  boolean_t       root_fs_upgrade_try
; 
3634  * Should we use the code-sign bitmap to avoid repeated code-sign validation? 
3636  * a) Is the target vnode on the root filesystem? 
3637  * b) Has someone tried to mount the root filesystem read-write? 
3638  * If answers are (a) yes AND (b) no, then we can use the bitmap. 
3640 #define USE_CODE_SIGN_BITMAP(vp)        ( (vp != NULL) && (vp->v_mount != NULL) && (vp->v_mount->mnt_flag & MNT_ROOTFS) && !root_fs_upgrade_try)  
3642 ubc_cs_validation_bitmap_allocate( 
3645         kern_return_t   kr 
= KERN_SUCCESS
; 
3646         struct ubc_info 
*uip
; 
3647         char            *target_bitmap
; 
3648         vm_object_size_t        bitmap_size
; 
3650         if ( ! USE_CODE_SIGN_BITMAP(vp
) || (! UBCINFOEXISTS(vp
))) { 
3651                 kr 
= KERN_INVALID_ARGUMENT
; 
3653                 uip 
= vp
->v_ubcinfo
; 
3655                 if ( uip
->cs_valid_bitmap 
== NULL 
) { 
3656                         bitmap_size 
= stob(uip
->ui_size
); 
3657                         target_bitmap 
= (char*) kalloc( (vm_size_t
)bitmap_size 
); 
3658                         if (target_bitmap 
== 0) { 
3663                         if( kr 
== KERN_SUCCESS 
) { 
3664                                 memset( target_bitmap
, 0, (size_t)bitmap_size
); 
3665                                 uip
->cs_valid_bitmap 
= (void*)target_bitmap
; 
3666                                 uip
->cs_valid_bitmap_size 
= bitmap_size
; 
3674 ubc_cs_check_validation_bitmap ( 
3676         memory_object_offset_t          offset
, 
3679         kern_return_t   kr 
= KERN_SUCCESS
; 
3681         if ( ! USE_CODE_SIGN_BITMAP(vp
) || ! UBCINFOEXISTS(vp
)) { 
3682                 kr 
= KERN_INVALID_ARGUMENT
; 
3684                 struct ubc_info 
*uip 
= vp
->v_ubcinfo
; 
3685                 char            *target_bitmap 
= uip
->cs_valid_bitmap
; 
3687                 if ( target_bitmap 
== NULL 
) { 
3688                        kr 
= KERN_INVALID_ARGUMENT
; 
3691                         bit 
= atop_64( offset 
); 
3694                         if ( byte 
> uip
->cs_valid_bitmap_size 
) { 
3695                                kr 
= KERN_INVALID_ARGUMENT
; 
3698                                 if (optype 
== CS_BITMAP_SET
) { 
3699                                         target_bitmap
[byte
] |= (1 << (bit 
& 07)); 
3701                                 } else if (optype 
== CS_BITMAP_CLEAR
) { 
3702                                         target_bitmap
[byte
] &= ~(1 << (bit 
& 07)); 
3704                                 } else if (optype 
== CS_BITMAP_CHECK
) { 
3705                                         if ( target_bitmap
[byte
] & (1 << (bit 
& 07))) { 
3718 ubc_cs_validation_bitmap_deallocate( 
3721         struct ubc_info 
*uip
; 
3722         void            *target_bitmap
; 
3723         vm_object_size_t        bitmap_size
; 
3725         if ( UBCINFOEXISTS(vp
)) { 
3726                 uip 
= vp
->v_ubcinfo
; 
3728                 if ( (target_bitmap 
= uip
->cs_valid_bitmap
) != NULL 
) { 
3729                         bitmap_size 
= uip
->cs_valid_bitmap_size
; 
3730                         kfree( target_bitmap
, (vm_size_t
) bitmap_size 
); 
3731                         uip
->cs_valid_bitmap 
= NULL
; 
3736 kern_return_t   
ubc_cs_validation_bitmap_allocate(__unused vnode_t vp
){ 
3737         return KERN_INVALID_ARGUMENT
; 
3740 kern_return_t 
ubc_cs_check_validation_bitmap( 
3741         __unused 
struct vnode 
*vp
,  
3742         __unused memory_object_offset_t offset
, 
3743         __unused 
int optype
){ 
3745         return KERN_INVALID_ARGUMENT
; 
3748 void    ubc_cs_validation_bitmap_deallocate(__unused vnode_t vp
){ 
3751 #endif /* CHECK_CS_VALIDATION_BITMAP */