]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/ubc_subr.c
xnu-6153.41.3.tar.gz
[apple/xnu.git] / bsd / kern / ubc_subr.c
1 /*
2 * Copyright (c) 1999-2014 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * File: ubc_subr.c
30 * Author: Umesh Vaishampayan [umeshv@apple.com]
31 * 05-Aug-1999 umeshv Created.
32 *
33 * Functions related to Unified Buffer cache.
34 *
35 * Caller of UBC functions MUST have a valid reference on the vnode.
36 *
37 */
38
39 #include <sys/types.h>
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/lock.h>
43 #include <sys/mman.h>
44 #include <sys/mount_internal.h>
45 #include <sys/vnode_internal.h>
46 #include <sys/ubc_internal.h>
47 #include <sys/ucred.h>
48 #include <sys/proc_internal.h>
49 #include <sys/kauth.h>
50 #include <sys/buf.h>
51 #include <sys/user.h>
52 #include <sys/codesign.h>
53 #include <sys/codedir_internal.h>
54 #include <sys/fsevents.h>
55 #include <sys/fcntl.h>
56
57 #include <mach/mach_types.h>
58 #include <mach/memory_object_types.h>
59 #include <mach/memory_object_control.h>
60 #include <mach/vm_map.h>
61 #include <mach/mach_vm.h>
62 #include <mach/upl.h>
63
64 #include <kern/kern_types.h>
65 #include <kern/kalloc.h>
66 #include <kern/zalloc.h>
67 #include <kern/thread.h>
68 #include <vm/pmap.h>
69 #include <vm/vm_kern.h>
70 #include <vm/vm_protos.h> /* last */
71
72 #include <libkern/crypto/sha1.h>
73 #include <libkern/crypto/sha2.h>
74 #include <libkern/libkern.h>
75
76 #include <security/mac_framework.h>
77 #include <stdbool.h>
78
79 /* XXX These should be in a BSD accessible Mach header, but aren't. */
80 extern kern_return_t memory_object_pages_resident(memory_object_control_t,
81 boolean_t *);
82 extern kern_return_t memory_object_signed(memory_object_control_t control,
83 boolean_t is_signed);
84 extern boolean_t memory_object_is_signed(memory_object_control_t);
85 extern void memory_object_mark_trusted(
86 memory_object_control_t control);
87
88 /* XXX Same for those. */
89
90 extern void Debugger(const char *message);
91
92
93 /* XXX no one uses this interface! */
94 kern_return_t ubc_page_op_with_control(
95 memory_object_control_t control,
96 off_t f_offset,
97 int ops,
98 ppnum_t *phys_entryp,
99 int *flagsp);
100
101
102 #if DIAGNOSTIC
103 #if defined(assert)
104 #undef assert
105 #endif
106 #define assert(cond) \
107 ((void) ((cond) ? 0 : panic("Assert failed: %s", # cond)))
108 #else
109 #include <kern/assert.h>
110 #endif /* DIAGNOSTIC */
111
112 static int ubc_info_init_internal(struct vnode *vp, int withfsize, off_t filesize);
113 static int ubc_umcallback(vnode_t, void *);
114 static int ubc_msync_internal(vnode_t, off_t, off_t, off_t *, int, int *);
115 static void ubc_cs_free(struct ubc_info *uip);
116
117 static boolean_t ubc_cs_supports_multilevel_hash(struct cs_blob *blob);
118 static kern_return_t ubc_cs_convert_to_multilevel_hash(struct cs_blob *blob);
119
120 struct zone *ubc_info_zone;
121 static uint32_t cs_blob_generation_count = 1;
122
123 /*
124 * CODESIGNING
125 * Routines to navigate code signing data structures in the kernel...
126 */
127
128 extern int cs_debug;
129
130 #define PAGE_SHIFT_4K (12)
131
132 static boolean_t
133 cs_valid_range(
134 const void *start,
135 const void *end,
136 const void *lower_bound,
137 const void *upper_bound)
138 {
139 if (upper_bound < lower_bound ||
140 end < start) {
141 return FALSE;
142 }
143
144 if (start < lower_bound ||
145 end > upper_bound) {
146 return FALSE;
147 }
148
149 return TRUE;
150 }
151
152 typedef void (*cs_md_init)(void *ctx);
153 typedef void (*cs_md_update)(void *ctx, const void *data, size_t size);
154 typedef void (*cs_md_final)(void *hash, void *ctx);
155
156 struct cs_hash {
157 uint8_t cs_type; /* type code as per code signing */
158 size_t cs_size; /* size of effective hash (may be truncated) */
159 size_t cs_digest_size;/* size of native hash */
160 cs_md_init cs_init;
161 cs_md_update cs_update;
162 cs_md_final cs_final;
163 };
164
165 uint8_t
166 cs_hash_type(
167 struct cs_hash const * const cs_hash)
168 {
169 return cs_hash->cs_type;
170 }
171
172 static const struct cs_hash cs_hash_sha1 = {
173 .cs_type = CS_HASHTYPE_SHA1,
174 .cs_size = CS_SHA1_LEN,
175 .cs_digest_size = SHA_DIGEST_LENGTH,
176 .cs_init = (cs_md_init)SHA1Init,
177 .cs_update = (cs_md_update)SHA1Update,
178 .cs_final = (cs_md_final)SHA1Final,
179 };
180 #if CRYPTO_SHA2
181 static const struct cs_hash cs_hash_sha256 = {
182 .cs_type = CS_HASHTYPE_SHA256,
183 .cs_size = SHA256_DIGEST_LENGTH,
184 .cs_digest_size = SHA256_DIGEST_LENGTH,
185 .cs_init = (cs_md_init)SHA256_Init,
186 .cs_update = (cs_md_update)SHA256_Update,
187 .cs_final = (cs_md_final)SHA256_Final,
188 };
189 static const struct cs_hash cs_hash_sha256_truncate = {
190 .cs_type = CS_HASHTYPE_SHA256_TRUNCATED,
191 .cs_size = CS_SHA256_TRUNCATED_LEN,
192 .cs_digest_size = SHA256_DIGEST_LENGTH,
193 .cs_init = (cs_md_init)SHA256_Init,
194 .cs_update = (cs_md_update)SHA256_Update,
195 .cs_final = (cs_md_final)SHA256_Final,
196 };
197 static const struct cs_hash cs_hash_sha384 = {
198 .cs_type = CS_HASHTYPE_SHA384,
199 .cs_size = SHA384_DIGEST_LENGTH,
200 .cs_digest_size = SHA384_DIGEST_LENGTH,
201 .cs_init = (cs_md_init)SHA384_Init,
202 .cs_update = (cs_md_update)SHA384_Update,
203 .cs_final = (cs_md_final)SHA384_Final,
204 };
205 #endif
206
207 static struct cs_hash const *
208 cs_find_md(uint8_t type)
209 {
210 if (type == CS_HASHTYPE_SHA1) {
211 return &cs_hash_sha1;
212 #if CRYPTO_SHA2
213 } else if (type == CS_HASHTYPE_SHA256) {
214 return &cs_hash_sha256;
215 } else if (type == CS_HASHTYPE_SHA256_TRUNCATED) {
216 return &cs_hash_sha256_truncate;
217 } else if (type == CS_HASHTYPE_SHA384) {
218 return &cs_hash_sha384;
219 #endif
220 }
221 return NULL;
222 }
223
224 union cs_hash_union {
225 SHA1_CTX sha1ctxt;
226 SHA256_CTX sha256ctx;
227 SHA384_CTX sha384ctx;
228 };
229
230
231 /*
232 * Choose among different hash algorithms.
233 * Higher is better, 0 => don't use at all.
234 */
235 static const uint32_t hashPriorities[] = {
236 CS_HASHTYPE_SHA1,
237 CS_HASHTYPE_SHA256_TRUNCATED,
238 CS_HASHTYPE_SHA256,
239 CS_HASHTYPE_SHA384,
240 };
241
242 static unsigned int
243 hash_rank(const CS_CodeDirectory *cd)
244 {
245 uint32_t type = cd->hashType;
246 unsigned int n;
247
248 for (n = 0; n < sizeof(hashPriorities) / sizeof(hashPriorities[0]); ++n) {
249 if (hashPriorities[n] == type) {
250 return n + 1;
251 }
252 }
253 return 0; /* not supported */
254 }
255
256
257 /*
258 * Locating a page hash
259 */
260 static const unsigned char *
261 hashes(
262 const CS_CodeDirectory *cd,
263 uint32_t page,
264 size_t hash_len,
265 const char *lower_bound,
266 const char *upper_bound)
267 {
268 const unsigned char *base, *top, *hash;
269 uint32_t nCodeSlots = ntohl(cd->nCodeSlots);
270
271 assert(cs_valid_range(cd, cd + 1, lower_bound, upper_bound));
272
273 if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
274 /* Get first scatter struct */
275 const SC_Scatter *scatter = (const SC_Scatter*)
276 ((const char*)cd + ntohl(cd->scatterOffset));
277 uint32_t hashindex = 0, scount, sbase = 0;
278 /* iterate all scatter structs */
279 do {
280 if ((const char*)scatter > (const char*)cd + ntohl(cd->length)) {
281 if (cs_debug) {
282 printf("CODE SIGNING: Scatter extends past Code Directory\n");
283 }
284 return NULL;
285 }
286
287 scount = ntohl(scatter->count);
288 uint32_t new_base = ntohl(scatter->base);
289
290 /* last scatter? */
291 if (scount == 0) {
292 return NULL;
293 }
294
295 if ((hashindex > 0) && (new_base <= sbase)) {
296 if (cs_debug) {
297 printf("CODE SIGNING: unordered Scatter, prev base %d, cur base %d\n",
298 sbase, new_base);
299 }
300 return NULL; /* unordered scatter array */
301 }
302 sbase = new_base;
303
304 /* this scatter beyond page we're looking for? */
305 if (sbase > page) {
306 return NULL;
307 }
308
309 if (sbase + scount >= page) {
310 /* Found the scatter struct that is
311 * referencing our page */
312
313 /* base = address of first hash covered by scatter */
314 base = (const unsigned char *)cd + ntohl(cd->hashOffset) +
315 hashindex * hash_len;
316 /* top = address of first hash after this scatter */
317 top = base + scount * hash_len;
318 if (!cs_valid_range(base, top, lower_bound,
319 upper_bound) ||
320 hashindex > nCodeSlots) {
321 return NULL;
322 }
323
324 break;
325 }
326
327 /* this scatter struct is before the page we're looking
328 * for. Iterate. */
329 hashindex += scount;
330 scatter++;
331 } while (1);
332
333 hash = base + (page - sbase) * hash_len;
334 } else {
335 base = (const unsigned char *)cd + ntohl(cd->hashOffset);
336 top = base + nCodeSlots * hash_len;
337 if (!cs_valid_range(base, top, lower_bound, upper_bound) ||
338 page > nCodeSlots) {
339 return NULL;
340 }
341 assert(page < nCodeSlots);
342
343 hash = base + page * hash_len;
344 }
345
346 if (!cs_valid_range(hash, hash + hash_len,
347 lower_bound, upper_bound)) {
348 hash = NULL;
349 }
350
351 return hash;
352 }
353
354 /*
355 * cs_validate_codedirectory
356 *
357 * Validate that pointers inside the code directory to make sure that
358 * all offsets and lengths are constrained within the buffer.
359 *
360 * Parameters: cd Pointer to code directory buffer
361 * length Length of buffer
362 *
363 * Returns: 0 Success
364 * EBADEXEC Invalid code signature
365 */
366
367 static int
368 cs_validate_codedirectory(const CS_CodeDirectory *cd, size_t length)
369 {
370 struct cs_hash const *hashtype;
371
372 if (length < sizeof(*cd)) {
373 return EBADEXEC;
374 }
375 if (ntohl(cd->magic) != CSMAGIC_CODEDIRECTORY) {
376 return EBADEXEC;
377 }
378 if (cd->pageSize < PAGE_SHIFT_4K || cd->pageSize > PAGE_SHIFT) {
379 return EBADEXEC;
380 }
381 hashtype = cs_find_md(cd->hashType);
382 if (hashtype == NULL) {
383 return EBADEXEC;
384 }
385
386 if (cd->hashSize != hashtype->cs_size) {
387 return EBADEXEC;
388 }
389
390 if (length < ntohl(cd->hashOffset)) {
391 return EBADEXEC;
392 }
393
394 /* check that nSpecialSlots fits in the buffer in front of hashOffset */
395 if (ntohl(cd->hashOffset) / hashtype->cs_size < ntohl(cd->nSpecialSlots)) {
396 return EBADEXEC;
397 }
398
399 /* check that codeslots fits in the buffer */
400 if ((length - ntohl(cd->hashOffset)) / hashtype->cs_size < ntohl(cd->nCodeSlots)) {
401 return EBADEXEC;
402 }
403
404 if (ntohl(cd->version) >= CS_SUPPORTSSCATTER && cd->scatterOffset) {
405 if (length < ntohl(cd->scatterOffset)) {
406 return EBADEXEC;
407 }
408
409 const SC_Scatter *scatter = (const SC_Scatter *)
410 (((const uint8_t *)cd) + ntohl(cd->scatterOffset));
411 uint32_t nPages = 0;
412
413 /*
414 * Check each scatter buffer, since we don't know the
415 * length of the scatter buffer array, we have to
416 * check each entry.
417 */
418 while (1) {
419 /* check that the end of each scatter buffer in within the length */
420 if (((const uint8_t *)scatter) + sizeof(scatter[0]) > (const uint8_t *)cd + length) {
421 return EBADEXEC;
422 }
423 uint32_t scount = ntohl(scatter->count);
424 if (scount == 0) {
425 break;
426 }
427 if (nPages + scount < nPages) {
428 return EBADEXEC;
429 }
430 nPages += scount;
431 scatter++;
432
433 /* XXX check that basees doesn't overlap */
434 /* XXX check that targetOffset doesn't overlap */
435 }
436 #if 0 /* rdar://12579439 */
437 if (nPages != ntohl(cd->nCodeSlots)) {
438 return EBADEXEC;
439 }
440 #endif
441 }
442
443 if (length < ntohl(cd->identOffset)) {
444 return EBADEXEC;
445 }
446
447 /* identifier is NUL terminated string */
448 if (cd->identOffset) {
449 const uint8_t *ptr = (const uint8_t *)cd + ntohl(cd->identOffset);
450 if (memchr(ptr, 0, length - ntohl(cd->identOffset)) == NULL) {
451 return EBADEXEC;
452 }
453 }
454
455 /* team identifier is NULL terminated string */
456 if (ntohl(cd->version) >= CS_SUPPORTSTEAMID && ntohl(cd->teamOffset)) {
457 if (length < ntohl(cd->teamOffset)) {
458 return EBADEXEC;
459 }
460
461 const uint8_t *ptr = (const uint8_t *)cd + ntohl(cd->teamOffset);
462 if (memchr(ptr, 0, length - ntohl(cd->teamOffset)) == NULL) {
463 return EBADEXEC;
464 }
465 }
466
467 return 0;
468 }
469
470 /*
471 *
472 */
473
474 static int
475 cs_validate_blob(const CS_GenericBlob *blob, size_t length)
476 {
477 if (length < sizeof(CS_GenericBlob) || length < ntohl(blob->length)) {
478 return EBADEXEC;
479 }
480 return 0;
481 }
482
483 /*
484 * cs_validate_csblob
485 *
486 * Validate that superblob/embedded code directory to make sure that
487 * all internal pointers are valid.
488 *
489 * Will validate both a superblob csblob and a "raw" code directory.
490 *
491 *
492 * Parameters: buffer Pointer to code signature
493 * length Length of buffer
494 * rcd returns pointer to code directory
495 *
496 * Returns: 0 Success
497 * EBADEXEC Invalid code signature
498 */
499
500 static int
501 cs_validate_csblob(
502 const uint8_t *addr,
503 const size_t blob_size,
504 const CS_CodeDirectory **rcd,
505 const CS_GenericBlob **rentitlements)
506 {
507 const CS_GenericBlob *blob;
508 int error;
509 size_t length;
510
511 *rcd = NULL;
512 *rentitlements = NULL;
513
514 blob = (const CS_GenericBlob *)(const void *)addr;
515
516 length = blob_size;
517 error = cs_validate_blob(blob, length);
518 if (error) {
519 return error;
520 }
521 length = ntohl(blob->length);
522
523 if (ntohl(blob->magic) == CSMAGIC_EMBEDDED_SIGNATURE) {
524 const CS_SuperBlob *sb;
525 uint32_t n, count;
526 const CS_CodeDirectory *best_cd = NULL;
527 unsigned int best_rank = 0;
528 #if PLATFORM_WatchOS
529 const CS_CodeDirectory *sha1_cd = NULL;
530 #endif
531
532 if (length < sizeof(CS_SuperBlob)) {
533 return EBADEXEC;
534 }
535
536 sb = (const CS_SuperBlob *)blob;
537 count = ntohl(sb->count);
538
539 /* check that the array of BlobIndex fits in the rest of the data */
540 if ((length - sizeof(CS_SuperBlob)) / sizeof(CS_BlobIndex) < count) {
541 return EBADEXEC;
542 }
543
544 /* now check each BlobIndex */
545 for (n = 0; n < count; n++) {
546 const CS_BlobIndex *blobIndex = &sb->index[n];
547 uint32_t type = ntohl(blobIndex->type);
548 uint32_t offset = ntohl(blobIndex->offset);
549 if (length < offset) {
550 return EBADEXEC;
551 }
552
553 const CS_GenericBlob *subBlob =
554 (const CS_GenericBlob *)(const void *)(addr + offset);
555
556 size_t subLength = length - offset;
557
558 if ((error = cs_validate_blob(subBlob, subLength)) != 0) {
559 return error;
560 }
561 subLength = ntohl(subBlob->length);
562
563 /* extra validation for CDs, that is also returned */
564 if (type == CSSLOT_CODEDIRECTORY || (type >= CSSLOT_ALTERNATE_CODEDIRECTORIES && type < CSSLOT_ALTERNATE_CODEDIRECTORY_LIMIT)) {
565 const CS_CodeDirectory *candidate = (const CS_CodeDirectory *)subBlob;
566 if ((error = cs_validate_codedirectory(candidate, subLength)) != 0) {
567 return error;
568 }
569 unsigned int rank = hash_rank(candidate);
570 if (cs_debug > 3) {
571 printf("CodeDirectory type %d rank %d at slot 0x%x index %d\n", candidate->hashType, (int)rank, (int)type, (int)n);
572 }
573 if (best_cd == NULL || rank > best_rank) {
574 best_cd = candidate;
575 best_rank = rank;
576
577 if (cs_debug > 2) {
578 printf("using CodeDirectory type %d (rank %d)\n", (int)best_cd->hashType, best_rank);
579 }
580 *rcd = best_cd;
581 } else if (best_cd != NULL && rank == best_rank) {
582 /* repeat of a hash type (1:1 mapped to ranks), illegal and suspicious */
583 printf("multiple hash=%d CodeDirectories in signature; rejecting\n", best_cd->hashType);
584 return EBADEXEC;
585 }
586 #if PLATFORM_WatchOS
587 if (candidate->hashType == CS_HASHTYPE_SHA1) {
588 if (sha1_cd != NULL) {
589 printf("multiple sha1 CodeDirectories in signature; rejecting\n");
590 return EBADEXEC;
591 }
592 sha1_cd = candidate;
593 }
594 #endif
595 } else if (type == CSSLOT_ENTITLEMENTS) {
596 if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_ENTITLEMENTS) {
597 return EBADEXEC;
598 }
599 if (*rentitlements != NULL) {
600 printf("multiple entitlements blobs\n");
601 return EBADEXEC;
602 }
603 *rentitlements = subBlob;
604 }
605 }
606
607 #if PLATFORM_WatchOS
608 /* To keep watchOS fast enough, we have to resort to sha1 for
609 * some code.
610 *
611 * At the time of writing this comment, known sha1 attacks are
612 * collision attacks (not preimage or second preimage
613 * attacks), which do not apply to platform binaries since
614 * they have a fixed hash in the trust cache. Given this
615 * property, we only prefer sha1 code directories for adhoc
616 * signatures, which always have to be in a trust cache to be
617 * valid (can-load-cdhash does not exist for watchOS). Those
618 * are, incidentally, also the platform binaries, for which we
619 * care about the performance hit that sha256 would bring us.
620 *
621 * Platform binaries may still contain a (not chosen) sha256
622 * code directory, which keeps software updates that switch to
623 * sha256-only small.
624 */
625
626 if (*rcd != NULL && sha1_cd != NULL && (ntohl(sha1_cd->flags) & CS_ADHOC)) {
627 if (sha1_cd->flags != (*rcd)->flags) {
628 printf("mismatched flags between hash %d (flags: %#x) and sha1 (flags: %#x) cd.\n",
629 (int)(*rcd)->hashType, (*rcd)->flags, sha1_cd->flags);
630 *rcd = NULL;
631 return EBADEXEC;
632 }
633
634 *rcd = sha1_cd;
635 }
636 #endif
637 } else if (ntohl(blob->magic) == CSMAGIC_CODEDIRECTORY) {
638 if ((error = cs_validate_codedirectory((const CS_CodeDirectory *)(const void *)addr, length)) != 0) {
639 return error;
640 }
641 *rcd = (const CS_CodeDirectory *)blob;
642 } else {
643 return EBADEXEC;
644 }
645
646 if (*rcd == NULL) {
647 return EBADEXEC;
648 }
649
650 return 0;
651 }
652
653 /*
654 * cs_find_blob_bytes
655 *
656 * Find an blob from the superblob/code directory. The blob must have
657 * been been validated by cs_validate_csblob() before calling
658 * this. Use csblob_find_blob() instead.
659 *
660 * Will also find a "raw" code directory if its stored as well as
661 * searching the superblob.
662 *
663 * Parameters: buffer Pointer to code signature
664 * length Length of buffer
665 * type type of blob to find
666 * magic the magic number for that blob
667 *
668 * Returns: pointer Success
669 * NULL Buffer not found
670 */
671
672 const CS_GenericBlob *
673 csblob_find_blob_bytes(const uint8_t *addr, size_t length, uint32_t type, uint32_t magic)
674 {
675 const CS_GenericBlob *blob = (const CS_GenericBlob *)(const void *)addr;
676
677 if (ntohl(blob->magic) == CSMAGIC_EMBEDDED_SIGNATURE) {
678 const CS_SuperBlob *sb = (const CS_SuperBlob *)blob;
679 size_t n, count = ntohl(sb->count);
680
681 for (n = 0; n < count; n++) {
682 if (ntohl(sb->index[n].type) != type) {
683 continue;
684 }
685 uint32_t offset = ntohl(sb->index[n].offset);
686 if (length - sizeof(const CS_GenericBlob) < offset) {
687 return NULL;
688 }
689 blob = (const CS_GenericBlob *)(const void *)(addr + offset);
690 if (ntohl(blob->magic) != magic) {
691 continue;
692 }
693 return blob;
694 }
695 } else if (type == CSSLOT_CODEDIRECTORY
696 && ntohl(blob->magic) == CSMAGIC_CODEDIRECTORY
697 && magic == CSMAGIC_CODEDIRECTORY) {
698 return blob;
699 }
700 return NULL;
701 }
702
703
704 const CS_GenericBlob *
705 csblob_find_blob(struct cs_blob *csblob, uint32_t type, uint32_t magic)
706 {
707 if ((csblob->csb_flags & CS_VALID) == 0) {
708 return NULL;
709 }
710 return csblob_find_blob_bytes((const uint8_t *)csblob->csb_mem_kaddr, csblob->csb_mem_size, type, magic);
711 }
712
713 static const uint8_t *
714 find_special_slot(const CS_CodeDirectory *cd, size_t slotsize, uint32_t slot)
715 {
716 /* there is no zero special slot since that is the first code slot */
717 if (ntohl(cd->nSpecialSlots) < slot || slot == 0) {
718 return NULL;
719 }
720
721 return (const uint8_t *)cd + ntohl(cd->hashOffset) - (slotsize * slot);
722 }
723
724 static uint8_t cshash_zero[CS_HASH_MAX_SIZE] = { 0 };
725
726 int
727 csblob_get_entitlements(struct cs_blob *csblob, void **out_start, size_t *out_length)
728 {
729 uint8_t computed_hash[CS_HASH_MAX_SIZE];
730 const CS_GenericBlob *entitlements;
731 const CS_CodeDirectory *code_dir;
732 const uint8_t *embedded_hash;
733 union cs_hash_union context;
734
735 *out_start = NULL;
736 *out_length = 0;
737
738 if (csblob->csb_hashtype == NULL || csblob->csb_hashtype->cs_digest_size > sizeof(computed_hash)) {
739 return EBADEXEC;
740 }
741
742 code_dir = csblob->csb_cd;
743
744 if ((csblob->csb_flags & CS_VALID) == 0) {
745 entitlements = NULL;
746 } else {
747 entitlements = csblob->csb_entitlements_blob;
748 }
749 embedded_hash = find_special_slot(code_dir, csblob->csb_hashtype->cs_size, CSSLOT_ENTITLEMENTS);
750
751 if (embedded_hash == NULL) {
752 if (entitlements) {
753 return EBADEXEC;
754 }
755 return 0;
756 } else if (entitlements == NULL) {
757 if (memcmp(embedded_hash, cshash_zero, csblob->csb_hashtype->cs_size) != 0) {
758 return EBADEXEC;
759 } else {
760 return 0;
761 }
762 }
763
764 csblob->csb_hashtype->cs_init(&context);
765 csblob->csb_hashtype->cs_update(&context, entitlements, ntohl(entitlements->length));
766 csblob->csb_hashtype->cs_final(computed_hash, &context);
767
768 if (memcmp(computed_hash, embedded_hash, csblob->csb_hashtype->cs_size) != 0) {
769 return EBADEXEC;
770 }
771
772 *out_start = __DECONST(void *, entitlements);
773 *out_length = ntohl(entitlements->length);
774
775 return 0;
776 }
777
778 /*
779 * CODESIGNING
780 * End of routines to navigate code signing data structures in the kernel.
781 */
782
783
784
785 /*
786 * ubc_init
787 *
788 * Initialization of the zone for Unified Buffer Cache.
789 *
790 * Parameters: (void)
791 *
792 * Returns: (void)
793 *
794 * Implicit returns:
795 * ubc_info_zone(global) initialized for subsequent allocations
796 */
797 __private_extern__ void
798 ubc_init(void)
799 {
800 int i;
801
802 i = (vm_size_t) sizeof(struct ubc_info);
803
804 ubc_info_zone = zinit(i, 10000 * i, 8192, "ubc_info zone");
805
806 zone_change(ubc_info_zone, Z_NOENCRYPT, TRUE);
807 }
808
809
810 /*
811 * ubc_info_init
812 *
813 * Allocate and attach an empty ubc_info structure to a vnode
814 *
815 * Parameters: vp Pointer to the vnode
816 *
817 * Returns: 0 Success
818 * vnode_size:ENOMEM Not enough space
819 * vnode_size:??? Other error from vnode_getattr
820 *
821 */
822 int
823 ubc_info_init(struct vnode *vp)
824 {
825 return ubc_info_init_internal(vp, 0, 0);
826 }
827
828
829 /*
830 * ubc_info_init_withsize
831 *
832 * Allocate and attach a sized ubc_info structure to a vnode
833 *
834 * Parameters: vp Pointer to the vnode
835 * filesize The size of the file
836 *
837 * Returns: 0 Success
838 * vnode_size:ENOMEM Not enough space
839 * vnode_size:??? Other error from vnode_getattr
840 */
841 int
842 ubc_info_init_withsize(struct vnode *vp, off_t filesize)
843 {
844 return ubc_info_init_internal(vp, 1, filesize);
845 }
846
847
848 /*
849 * ubc_info_init_internal
850 *
851 * Allocate and attach a ubc_info structure to a vnode
852 *
853 * Parameters: vp Pointer to the vnode
854 * withfsize{0,1} Zero if the size should be obtained
855 * from the vnode; otherwise, use filesize
856 * filesize The size of the file, if withfsize == 1
857 *
858 * Returns: 0 Success
859 * vnode_size:ENOMEM Not enough space
860 * vnode_size:??? Other error from vnode_getattr
861 *
862 * Notes: We call a blocking zalloc(), and the zone was created as an
863 * expandable and collectable zone, so if no memory is available,
864 * it is possible for zalloc() to block indefinitely. zalloc()
865 * may also panic if the zone of zones is exhausted, since it's
866 * NOT expandable.
867 *
868 * We unconditionally call vnode_pager_setup(), even if this is
869 * a reuse of a ubc_info; in that case, we should probably assert
870 * that it does not already have a pager association, but do not.
871 *
872 * Since memory_object_create_named() can only fail from receiving
873 * an invalid pager argument, the explicit check and panic is
874 * merely precautionary.
875 */
876 static int
877 ubc_info_init_internal(vnode_t vp, int withfsize, off_t filesize)
878 {
879 struct ubc_info *uip;
880 void * pager;
881 int error = 0;
882 kern_return_t kret;
883 memory_object_control_t control;
884
885 uip = vp->v_ubcinfo;
886
887 /*
888 * If there is not already a ubc_info attached to the vnode, we
889 * attach one; otherwise, we will reuse the one that's there.
890 */
891 if (uip == UBC_INFO_NULL) {
892 uip = (struct ubc_info *) zalloc(ubc_info_zone);
893 bzero((char *)uip, sizeof(struct ubc_info));
894
895 uip->ui_vnode = vp;
896 uip->ui_flags = UI_INITED;
897 uip->ui_ucred = NOCRED;
898 }
899 assert(uip->ui_flags != UI_NONE);
900 assert(uip->ui_vnode == vp);
901
902 /* now set this ubc_info in the vnode */
903 vp->v_ubcinfo = uip;
904
905 /*
906 * Allocate a pager object for this vnode
907 *
908 * XXX The value of the pager parameter is currently ignored.
909 * XXX Presumably, this API changed to avoid the race between
910 * XXX setting the pager and the UI_HASPAGER flag.
911 */
912 pager = (void *)vnode_pager_setup(vp, uip->ui_pager);
913 assert(pager);
914
915 /*
916 * Explicitly set the pager into the ubc_info, after setting the
917 * UI_HASPAGER flag.
918 */
919 SET(uip->ui_flags, UI_HASPAGER);
920 uip->ui_pager = pager;
921
922 /*
923 * Note: We can not use VNOP_GETATTR() to get accurate
924 * value of ui_size because this may be an NFS vnode, and
925 * nfs_getattr() can call vinvalbuf(); if this happens,
926 * ubc_info is not set up to deal with that event.
927 * So use bogus size.
928 */
929
930 /*
931 * create a vnode - vm_object association
932 * memory_object_create_named() creates a "named" reference on the
933 * memory object we hold this reference as long as the vnode is
934 * "alive." Since memory_object_create_named() took its own reference
935 * on the vnode pager we passed it, we can drop the reference
936 * vnode_pager_setup() returned here.
937 */
938 kret = memory_object_create_named(pager,
939 (memory_object_size_t)uip->ui_size, &control);
940 vnode_pager_deallocate(pager);
941 if (kret != KERN_SUCCESS) {
942 panic("ubc_info_init: memory_object_create_named returned %d", kret);
943 }
944
945 assert(control);
946 uip->ui_control = control; /* cache the value of the mo control */
947 SET(uip->ui_flags, UI_HASOBJREF); /* with a named reference */
948
949 if (withfsize == 0) {
950 /* initialize the size */
951 error = vnode_size(vp, &uip->ui_size, vfs_context_current());
952 if (error) {
953 uip->ui_size = 0;
954 }
955 } else {
956 uip->ui_size = filesize;
957 }
958 vp->v_lflag |= VNAMED_UBC; /* vnode has a named ubc reference */
959
960 return error;
961 }
962
963
964 /*
965 * ubc_info_free
966 *
967 * Free a ubc_info structure
968 *
969 * Parameters: uip A pointer to the ubc_info to free
970 *
971 * Returns: (void)
972 *
973 * Notes: If there is a credential that has subsequently been associated
974 * with the ubc_info via a call to ubc_setcred(), the reference
975 * to the credential is dropped.
976 *
977 * It's actually impossible for a ubc_info.ui_control to take the
978 * value MEMORY_OBJECT_CONTROL_NULL.
979 */
980 static void
981 ubc_info_free(struct ubc_info *uip)
982 {
983 if (IS_VALID_CRED(uip->ui_ucred)) {
984 kauth_cred_unref(&uip->ui_ucred);
985 }
986
987 if (uip->ui_control != MEMORY_OBJECT_CONTROL_NULL) {
988 memory_object_control_deallocate(uip->ui_control);
989 }
990
991 cluster_release(uip);
992 ubc_cs_free(uip);
993
994 zfree(ubc_info_zone, uip);
995 return;
996 }
997
998
999 void
1000 ubc_info_deallocate(struct ubc_info *uip)
1001 {
1002 ubc_info_free(uip);
1003 }
1004
1005 errno_t
1006 mach_to_bsd_errno(kern_return_t mach_err)
1007 {
1008 switch (mach_err) {
1009 case KERN_SUCCESS:
1010 return 0;
1011
1012 case KERN_INVALID_ADDRESS:
1013 case KERN_INVALID_ARGUMENT:
1014 case KERN_NOT_IN_SET:
1015 case KERN_INVALID_NAME:
1016 case KERN_INVALID_TASK:
1017 case KERN_INVALID_RIGHT:
1018 case KERN_INVALID_VALUE:
1019 case KERN_INVALID_CAPABILITY:
1020 case KERN_INVALID_HOST:
1021 case KERN_MEMORY_PRESENT:
1022 case KERN_INVALID_PROCESSOR_SET:
1023 case KERN_INVALID_POLICY:
1024 case KERN_ALREADY_WAITING:
1025 case KERN_DEFAULT_SET:
1026 case KERN_EXCEPTION_PROTECTED:
1027 case KERN_INVALID_LEDGER:
1028 case KERN_INVALID_MEMORY_CONTROL:
1029 case KERN_INVALID_SECURITY:
1030 case KERN_NOT_DEPRESSED:
1031 case KERN_LOCK_OWNED:
1032 case KERN_LOCK_OWNED_SELF:
1033 return EINVAL;
1034
1035 case KERN_PROTECTION_FAILURE:
1036 case KERN_NOT_RECEIVER:
1037 case KERN_NO_ACCESS:
1038 case KERN_POLICY_STATIC:
1039 return EACCES;
1040
1041 case KERN_NO_SPACE:
1042 case KERN_RESOURCE_SHORTAGE:
1043 case KERN_UREFS_OVERFLOW:
1044 case KERN_INVALID_OBJECT:
1045 return ENOMEM;
1046
1047 case KERN_FAILURE:
1048 return EIO;
1049
1050 case KERN_MEMORY_FAILURE:
1051 case KERN_POLICY_LIMIT:
1052 case KERN_CODESIGN_ERROR:
1053 return EPERM;
1054
1055 case KERN_MEMORY_ERROR:
1056 return EBUSY;
1057
1058 case KERN_ALREADY_IN_SET:
1059 case KERN_NAME_EXISTS:
1060 case KERN_RIGHT_EXISTS:
1061 return EEXIST;
1062
1063 case KERN_ABORTED:
1064 return EINTR;
1065
1066 case KERN_TERMINATED:
1067 case KERN_LOCK_SET_DESTROYED:
1068 case KERN_LOCK_UNSTABLE:
1069 case KERN_SEMAPHORE_DESTROYED:
1070 return ENOENT;
1071
1072 case KERN_RPC_SERVER_TERMINATED:
1073 return ECONNRESET;
1074
1075 case KERN_NOT_SUPPORTED:
1076 return ENOTSUP;
1077
1078 case KERN_NODE_DOWN:
1079 return ENETDOWN;
1080
1081 case KERN_NOT_WAITING:
1082 return ENOENT;
1083
1084 case KERN_OPERATION_TIMED_OUT:
1085 return ETIMEDOUT;
1086
1087 default:
1088 return EIO;
1089 }
1090 }
1091
1092 /*
1093 * ubc_setsize_ex
1094 *
1095 * Tell the VM that the the size of the file represented by the vnode has
1096 * changed
1097 *
1098 * Parameters: vp The vp whose backing file size is
1099 * being changed
1100 * nsize The new size of the backing file
1101 * opts Options
1102 *
1103 * Returns: EINVAL for new size < 0
1104 * ENOENT if no UBC info exists
1105 * EAGAIN if UBC_SETSIZE_NO_FS_REENTRY option is set and new_size < old size
1106 * Other errors (mapped to errno_t) returned by VM functions
1107 *
1108 * Notes: This function will indicate success if the new size is the
1109 * same or larger than the old size (in this case, the
1110 * remainder of the file will require modification or use of
1111 * an existing upl to access successfully).
1112 *
1113 * This function will fail if the new file size is smaller,
1114 * and the memory region being invalidated was unable to
1115 * actually be invalidated and/or the last page could not be
1116 * flushed, if the new size is not aligned to a page
1117 * boundary. This is usually indicative of an I/O error.
1118 */
1119 errno_t
1120 ubc_setsize_ex(struct vnode *vp, off_t nsize, ubc_setsize_opts_t opts)
1121 {
1122 off_t osize; /* ui_size before change */
1123 off_t lastpg, olastpgend, lastoff;
1124 struct ubc_info *uip;
1125 memory_object_control_t control;
1126 kern_return_t kret = KERN_SUCCESS;
1127
1128 if (nsize < (off_t)0) {
1129 return EINVAL;
1130 }
1131
1132 if (!UBCINFOEXISTS(vp)) {
1133 return ENOENT;
1134 }
1135
1136 uip = vp->v_ubcinfo;
1137 osize = uip->ui_size;
1138
1139 if (ISSET(opts, UBC_SETSIZE_NO_FS_REENTRY) && nsize < osize) {
1140 return EAGAIN;
1141 }
1142
1143 /*
1144 * Update the size before flushing the VM
1145 */
1146 uip->ui_size = nsize;
1147
1148 if (nsize >= osize) { /* Nothing more to do */
1149 if (nsize > osize) {
1150 lock_vnode_and_post(vp, NOTE_EXTEND);
1151 }
1152
1153 return 0;
1154 }
1155
1156 /*
1157 * When the file shrinks, invalidate the pages beyond the
1158 * new size. Also get rid of garbage beyond nsize on the
1159 * last page. The ui_size already has the nsize, so any
1160 * subsequent page-in will zero-fill the tail properly
1161 */
1162 lastpg = trunc_page_64(nsize);
1163 olastpgend = round_page_64(osize);
1164 control = uip->ui_control;
1165 assert(control);
1166 lastoff = (nsize & PAGE_MASK_64);
1167
1168 if (lastoff) {
1169 upl_t upl;
1170 upl_page_info_t *pl;
1171
1172 /*
1173 * new EOF ends up in the middle of a page
1174 * zero the tail of this page if it's currently
1175 * present in the cache
1176 */
1177 kret = ubc_create_upl_kernel(vp, lastpg, PAGE_SIZE, &upl, &pl, UPL_SET_LITE | UPL_WILL_MODIFY, VM_KERN_MEMORY_FILE);
1178
1179 if (kret != KERN_SUCCESS) {
1180 panic("ubc_setsize: ubc_create_upl (error = %d)\n", kret);
1181 }
1182
1183 if (upl_valid_page(pl, 0)) {
1184 cluster_zero(upl, (uint32_t)lastoff, PAGE_SIZE - (uint32_t)lastoff, NULL);
1185 }
1186
1187 ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
1188
1189 lastpg += PAGE_SIZE_64;
1190 }
1191 if (olastpgend > lastpg) {
1192 int flags;
1193
1194 if (lastpg == 0) {
1195 flags = MEMORY_OBJECT_DATA_FLUSH_ALL;
1196 } else {
1197 flags = MEMORY_OBJECT_DATA_FLUSH;
1198 }
1199 /*
1200 * invalidate the pages beyond the new EOF page
1201 *
1202 */
1203 kret = memory_object_lock_request(control,
1204 (memory_object_offset_t)lastpg,
1205 (memory_object_size_t)(olastpgend - lastpg), NULL, NULL,
1206 MEMORY_OBJECT_RETURN_NONE, flags, VM_PROT_NO_CHANGE);
1207 if (kret != KERN_SUCCESS) {
1208 printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
1209 }
1210 }
1211 return mach_to_bsd_errno(kret);
1212 }
1213
1214 // Returns true for success
1215 int
1216 ubc_setsize(vnode_t vp, off_t nsize)
1217 {
1218 return ubc_setsize_ex(vp, nsize, 0) == 0;
1219 }
1220
1221 /*
1222 * ubc_getsize
1223 *
1224 * Get the size of the file assocated with the specified vnode
1225 *
1226 * Parameters: vp The vnode whose size is of interest
1227 *
1228 * Returns: 0 There is no ubc_info associated with
1229 * this vnode, or the size is zero
1230 * !0 The size of the file
1231 *
1232 * Notes: Using this routine, it is not possible for a caller to
1233 * successfully distinguish between a vnode associate with a zero
1234 * length file, and a vnode with no associated ubc_info. The
1235 * caller therefore needs to not care, or needs to ensure that
1236 * they have previously successfully called ubc_info_init() or
1237 * ubc_info_init_withsize().
1238 */
1239 off_t
1240 ubc_getsize(struct vnode *vp)
1241 {
1242 /* people depend on the side effect of this working this way
1243 * as they call this for directory
1244 */
1245 if (!UBCINFOEXISTS(vp)) {
1246 return (off_t)0;
1247 }
1248 return vp->v_ubcinfo->ui_size;
1249 }
1250
1251
1252 /*
1253 * ubc_umount
1254 *
1255 * Call ubc_msync(vp, 0, EOF, NULL, UBC_PUSHALL) on all the vnodes for this
1256 * mount point
1257 *
1258 * Parameters: mp The mount point
1259 *
1260 * Returns: 0 Success
1261 *
1262 * Notes: There is no failure indication for this function.
1263 *
1264 * This function is used in the unmount path; since it may block
1265 * I/O indefinitely, it should not be used in the forced unmount
1266 * path, since a device unavailability could also block that
1267 * indefinitely.
1268 *
1269 * Because there is no device ejection interlock on USB, FireWire,
1270 * or similar devices, it's possible that an ejection that begins
1271 * subsequent to the vnode_iterate() completing, either on one of
1272 * those devices, or a network mount for which the server quits
1273 * responding, etc., may cause the caller to block indefinitely.
1274 */
1275 __private_extern__ int
1276 ubc_umount(struct mount *mp)
1277 {
1278 vnode_iterate(mp, 0, ubc_umcallback, 0);
1279 return 0;
1280 }
1281
1282
1283 /*
1284 * ubc_umcallback
1285 *
1286 * Used by ubc_umount() as an internal implementation detail; see ubc_umount()
1287 * and vnode_iterate() for details of implementation.
1288 */
1289 static int
1290 ubc_umcallback(vnode_t vp, __unused void * args)
1291 {
1292 if (UBCINFOEXISTS(vp)) {
1293 (void) ubc_msync(vp, (off_t)0, ubc_getsize(vp), NULL, UBC_PUSHALL);
1294 }
1295 return VNODE_RETURNED;
1296 }
1297
1298
1299 /*
1300 * ubc_getcred
1301 *
1302 * Get the credentials currently active for the ubc_info associated with the
1303 * vnode.
1304 *
1305 * Parameters: vp The vnode whose ubc_info credentials
1306 * are to be retrieved
1307 *
1308 * Returns: !NOCRED The credentials
1309 * NOCRED If there is no ubc_info for the vnode,
1310 * or if there is one, but it has not had
1311 * any credentials associated with it via
1312 * a call to ubc_setcred()
1313 */
1314 kauth_cred_t
1315 ubc_getcred(struct vnode *vp)
1316 {
1317 if (UBCINFOEXISTS(vp)) {
1318 return vp->v_ubcinfo->ui_ucred;
1319 }
1320
1321 return NOCRED;
1322 }
1323
1324
1325 /*
1326 * ubc_setthreadcred
1327 *
1328 * If they are not already set, set the credentials of the ubc_info structure
1329 * associated with the vnode to those of the supplied thread; otherwise leave
1330 * them alone.
1331 *
1332 * Parameters: vp The vnode whose ubc_info creds are to
1333 * be set
1334 * p The process whose credentials are to
1335 * be used, if not running on an assumed
1336 * credential
1337 * thread The thread whose credentials are to
1338 * be used
1339 *
1340 * Returns: 1 This vnode has no associated ubc_info
1341 * 0 Success
1342 *
1343 * Notes: This function takes a proc parameter to account for bootstrap
1344 * issues where a task or thread may call this routine, either
1345 * before credentials have been initialized by bsd_init(), or if
1346 * there is no BSD info asscoiate with a mach thread yet. This
1347 * is known to happen in both the initial swap and memory mapping
1348 * calls.
1349 *
1350 * This function is generally used only in the following cases:
1351 *
1352 * o a memory mapped file via the mmap() system call
1353 * o a swap store backing file
1354 * o subsequent to a successful write via vn_write()
1355 *
1356 * The information is then used by the NFS client in order to
1357 * cons up a wire message in either the page-in or page-out path.
1358 *
1359 * There are two potential problems with the use of this API:
1360 *
1361 * o Because the write path only set it on a successful
1362 * write, there is a race window between setting the
1363 * credential and its use to evict the pages to the
1364 * remote file server
1365 *
1366 * o Because a page-in may occur prior to a write, the
1367 * credential may not be set at this time, if the page-in
1368 * is not the result of a mapping established via mmap().
1369 *
1370 * In both these cases, this will be triggered from the paging
1371 * path, which will instead use the credential of the current
1372 * process, which in this case is either the dynamic_pager or
1373 * the kernel task, both of which utilize "root" credentials.
1374 *
1375 * This may potentially permit operations to occur which should
1376 * be denied, or it may cause to be denied operations which
1377 * should be permitted, depending on the configuration of the NFS
1378 * server.
1379 */
1380 int
1381 ubc_setthreadcred(struct vnode *vp, proc_t p, thread_t thread)
1382 {
1383 struct ubc_info *uip;
1384 kauth_cred_t credp;
1385 struct uthread *uthread = get_bsdthread_info(thread);
1386
1387 if (!UBCINFOEXISTS(vp)) {
1388 return 1;
1389 }
1390
1391 vnode_lock(vp);
1392
1393 uip = vp->v_ubcinfo;
1394 credp = uip->ui_ucred;
1395
1396 if (!IS_VALID_CRED(credp)) {
1397 /* use per-thread cred, if assumed identity, else proc cred */
1398 if (uthread == NULL || (uthread->uu_flag & UT_SETUID) == 0) {
1399 uip->ui_ucred = kauth_cred_proc_ref(p);
1400 } else {
1401 uip->ui_ucred = uthread->uu_ucred;
1402 kauth_cred_ref(uip->ui_ucred);
1403 }
1404 }
1405 vnode_unlock(vp);
1406
1407 return 0;
1408 }
1409
1410
1411 /*
1412 * ubc_setcred
1413 *
1414 * If they are not already set, set the credentials of the ubc_info structure
1415 * associated with the vnode to those of the process; otherwise leave them
1416 * alone.
1417 *
1418 * Parameters: vp The vnode whose ubc_info creds are to
1419 * be set
1420 * p The process whose credentials are to
1421 * be used
1422 *
1423 * Returns: 0 This vnode has no associated ubc_info
1424 * 1 Success
1425 *
1426 * Notes: The return values for this function are inverted from nearly
1427 * all other uses in the kernel.
1428 *
1429 * See also ubc_setthreadcred(), above.
1430 *
1431 * This function is considered deprecated, and generally should
1432 * not be used, as it is incompatible with per-thread credentials;
1433 * it exists for legacy KPI reasons.
1434 *
1435 * DEPRECATION: ubc_setcred() is being deprecated. Please use
1436 * ubc_setthreadcred() instead.
1437 */
1438 int
1439 ubc_setcred(struct vnode *vp, proc_t p)
1440 {
1441 struct ubc_info *uip;
1442 kauth_cred_t credp;
1443
1444 /* If there is no ubc_info, deny the operation */
1445 if (!UBCINFOEXISTS(vp)) {
1446 return 0;
1447 }
1448
1449 /*
1450 * Check to see if there is already a credential reference in the
1451 * ubc_info; if there is not, take one on the supplied credential.
1452 */
1453 vnode_lock(vp);
1454 uip = vp->v_ubcinfo;
1455 credp = uip->ui_ucred;
1456 if (!IS_VALID_CRED(credp)) {
1457 uip->ui_ucred = kauth_cred_proc_ref(p);
1458 }
1459 vnode_unlock(vp);
1460
1461 return 1;
1462 }
1463
1464 /*
1465 * ubc_getpager
1466 *
1467 * Get the pager associated with the ubc_info associated with the vnode.
1468 *
1469 * Parameters: vp The vnode to obtain the pager from
1470 *
1471 * Returns: !VNODE_PAGER_NULL The memory_object_t for the pager
1472 * VNODE_PAGER_NULL There is no ubc_info for this vnode
1473 *
1474 * Notes: For each vnode that has a ubc_info associated with it, that
1475 * ubc_info SHALL have a pager associated with it, so in the
1476 * normal case, it's impossible to return VNODE_PAGER_NULL for
1477 * a vnode with an associated ubc_info.
1478 */
1479 __private_extern__ memory_object_t
1480 ubc_getpager(struct vnode *vp)
1481 {
1482 if (UBCINFOEXISTS(vp)) {
1483 return vp->v_ubcinfo->ui_pager;
1484 }
1485
1486 return 0;
1487 }
1488
1489
1490 /*
1491 * ubc_getobject
1492 *
1493 * Get the memory object control associated with the ubc_info associated with
1494 * the vnode
1495 *
1496 * Parameters: vp The vnode to obtain the memory object
1497 * from
1498 * flags DEPRECATED
1499 *
1500 * Returns: !MEMORY_OBJECT_CONTROL_NULL
1501 * MEMORY_OBJECT_CONTROL_NULL
1502 *
1503 * Notes: Historically, if the flags were not "do not reactivate", this
1504 * function would look up the memory object using the pager if
1505 * it did not exist (this could be the case if the vnode had
1506 * been previously reactivated). The flags would also permit a
1507 * hold to be requested, which would have created an object
1508 * reference, if one had not already existed. This usage is
1509 * deprecated, as it would permit a race between finding and
1510 * taking the reference vs. a single reference being dropped in
1511 * another thread.
1512 */
1513 memory_object_control_t
1514 ubc_getobject(struct vnode *vp, __unused int flags)
1515 {
1516 if (UBCINFOEXISTS(vp)) {
1517 return vp->v_ubcinfo->ui_control;
1518 }
1519
1520 return MEMORY_OBJECT_CONTROL_NULL;
1521 }
1522
1523 /*
1524 * ubc_blktooff
1525 *
1526 * Convert a given block number to a memory backing object (file) offset for a
1527 * given vnode
1528 *
1529 * Parameters: vp The vnode in which the block is located
1530 * blkno The block number to convert
1531 *
1532 * Returns: !-1 The offset into the backing object
1533 * -1 There is no ubc_info associated with
1534 * the vnode
1535 * -1 An error occurred in the underlying VFS
1536 * while translating the block to an
1537 * offset; the most likely cause is that
1538 * the caller specified a block past the
1539 * end of the file, but this could also be
1540 * any other error from VNOP_BLKTOOFF().
1541 *
1542 * Note: Representing the error in band loses some information, but does
1543 * not occlude a valid offset, since an off_t of -1 is normally
1544 * used to represent EOF. If we had a more reliable constant in
1545 * our header files for it (i.e. explicitly cast to an off_t), we
1546 * would use it here instead.
1547 */
1548 off_t
1549 ubc_blktooff(vnode_t vp, daddr64_t blkno)
1550 {
1551 off_t file_offset = -1;
1552 int error;
1553
1554 if (UBCINFOEXISTS(vp)) {
1555 error = VNOP_BLKTOOFF(vp, blkno, &file_offset);
1556 if (error) {
1557 file_offset = -1;
1558 }
1559 }
1560
1561 return file_offset;
1562 }
1563
1564
1565 /*
1566 * ubc_offtoblk
1567 *
1568 * Convert a given offset in a memory backing object into a block number for a
1569 * given vnode
1570 *
1571 * Parameters: vp The vnode in which the offset is
1572 * located
1573 * offset The offset into the backing object
1574 *
1575 * Returns: !-1 The returned block number
1576 * -1 There is no ubc_info associated with
1577 * the vnode
1578 * -1 An error occurred in the underlying VFS
1579 * while translating the block to an
1580 * offset; the most likely cause is that
1581 * the caller specified a block past the
1582 * end of the file, but this could also be
1583 * any other error from VNOP_OFFTOBLK().
1584 *
1585 * Note: Representing the error in band loses some information, but does
1586 * not occlude a valid block number, since block numbers exceed
1587 * the valid range for offsets, due to their relative sizes. If
1588 * we had a more reliable constant than -1 in our header files
1589 * for it (i.e. explicitly cast to an daddr64_t), we would use it
1590 * here instead.
1591 */
1592 daddr64_t
1593 ubc_offtoblk(vnode_t vp, off_t offset)
1594 {
1595 daddr64_t blkno = -1;
1596 int error = 0;
1597
1598 if (UBCINFOEXISTS(vp)) {
1599 error = VNOP_OFFTOBLK(vp, offset, &blkno);
1600 if (error) {
1601 blkno = -1;
1602 }
1603 }
1604
1605 return blkno;
1606 }
1607
1608
1609 /*
1610 * ubc_pages_resident
1611 *
1612 * Determine whether or not a given vnode has pages resident via the memory
1613 * object control associated with the ubc_info associated with the vnode
1614 *
1615 * Parameters: vp The vnode we want to know about
1616 *
1617 * Returns: 1 Yes
1618 * 0 No
1619 */
1620 int
1621 ubc_pages_resident(vnode_t vp)
1622 {
1623 kern_return_t kret;
1624 boolean_t has_pages_resident;
1625
1626 if (!UBCINFOEXISTS(vp)) {
1627 return 0;
1628 }
1629
1630 /*
1631 * The following call may fail if an invalid ui_control is specified,
1632 * or if there is no VM object associated with the control object. In
1633 * either case, reacting to it as if there were no pages resident will
1634 * result in correct behavior.
1635 */
1636 kret = memory_object_pages_resident(vp->v_ubcinfo->ui_control, &has_pages_resident);
1637
1638 if (kret != KERN_SUCCESS) {
1639 return 0;
1640 }
1641
1642 if (has_pages_resident == TRUE) {
1643 return 1;
1644 }
1645
1646 return 0;
1647 }
1648
1649 /*
1650 * ubc_msync
1651 *
1652 * Clean and/or invalidate a range in the memory object that backs this vnode
1653 *
1654 * Parameters: vp The vnode whose associated ubc_info's
1655 * associated memory object is to have a
1656 * range invalidated within it
1657 * beg_off The start of the range, as an offset
1658 * end_off The end of the range, as an offset
1659 * resid_off The address of an off_t supplied by the
1660 * caller; may be set to NULL to ignore
1661 * flags See ubc_msync_internal()
1662 *
1663 * Returns: 0 Success
1664 * !0 Failure; an errno is returned
1665 *
1666 * Implicit Returns:
1667 * *resid_off, modified If non-NULL, the contents are ALWAYS
1668 * modified; they are initialized to the
1669 * beg_off, and in case of an I/O error,
1670 * the difference between beg_off and the
1671 * current value will reflect what was
1672 * able to be written before the error
1673 * occurred. If no error is returned, the
1674 * value of the resid_off is undefined; do
1675 * NOT use it in place of end_off if you
1676 * intend to increment from the end of the
1677 * last call and call iteratively.
1678 *
1679 * Notes: see ubc_msync_internal() for more detailed information.
1680 *
1681 */
1682 errno_t
1683 ubc_msync(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags)
1684 {
1685 int retval;
1686 int io_errno = 0;
1687
1688 if (resid_off) {
1689 *resid_off = beg_off;
1690 }
1691
1692 retval = ubc_msync_internal(vp, beg_off, end_off, resid_off, flags, &io_errno);
1693
1694 if (retval == 0 && io_errno == 0) {
1695 return EINVAL;
1696 }
1697 return io_errno;
1698 }
1699
1700
1701 /*
1702 * ubc_msync_internal
1703 *
1704 * Clean and/or invalidate a range in the memory object that backs this vnode
1705 *
1706 * Parameters: vp The vnode whose associated ubc_info's
1707 * associated memory object is to have a
1708 * range invalidated within it
1709 * beg_off The start of the range, as an offset
1710 * end_off The end of the range, as an offset
1711 * resid_off The address of an off_t supplied by the
1712 * caller; may be set to NULL to ignore
1713 * flags MUST contain at least one of the flags
1714 * UBC_INVALIDATE, UBC_PUSHDIRTY, or
1715 * UBC_PUSHALL; if UBC_PUSHDIRTY is used,
1716 * UBC_SYNC may also be specified to cause
1717 * this function to block until the
1718 * operation is complete. The behavior
1719 * of UBC_SYNC is otherwise undefined.
1720 * io_errno The address of an int to contain the
1721 * errno from a failed I/O operation, if
1722 * one occurs; may be set to NULL to
1723 * ignore
1724 *
1725 * Returns: 1 Success
1726 * 0 Failure
1727 *
1728 * Implicit Returns:
1729 * *resid_off, modified The contents of this offset MAY be
1730 * modified; in case of an I/O error, the
1731 * difference between beg_off and the
1732 * current value will reflect what was
1733 * able to be written before the error
1734 * occurred.
1735 * *io_errno, modified The contents of this offset are set to
1736 * an errno, if an error occurs; if the
1737 * caller supplies an io_errno parameter,
1738 * they should be careful to initialize it
1739 * to 0 before calling this function to
1740 * enable them to distinguish an error
1741 * with a valid *resid_off from an invalid
1742 * one, and to avoid potentially falsely
1743 * reporting an error, depending on use.
1744 *
1745 * Notes: If there is no ubc_info associated with the vnode supplied,
1746 * this function immediately returns success.
1747 *
1748 * If the value of end_off is less than or equal to beg_off, this
1749 * function immediately returns success; that is, end_off is NOT
1750 * inclusive.
1751 *
1752 * IMPORTANT: one of the flags UBC_INVALIDATE, UBC_PUSHDIRTY, or
1753 * UBC_PUSHALL MUST be specified; that is, it is NOT possible to
1754 * attempt to block on in-progress I/O by calling this function
1755 * with UBC_PUSHDIRTY, and then later call it with just UBC_SYNC
1756 * in order to block pending on the I/O already in progress.
1757 *
1758 * The start offset is truncated to the page boundary and the
1759 * size is adjusted to include the last page in the range; that
1760 * is, end_off on exactly a page boundary will not change if it
1761 * is rounded, and the range of bytes written will be from the
1762 * truncate beg_off to the rounded (end_off - 1).
1763 */
1764 static int
1765 ubc_msync_internal(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags, int *io_errno)
1766 {
1767 memory_object_size_t tsize;
1768 kern_return_t kret;
1769 int request_flags = 0;
1770 int flush_flags = MEMORY_OBJECT_RETURN_NONE;
1771
1772 if (!UBCINFOEXISTS(vp)) {
1773 return 0;
1774 }
1775 if ((flags & (UBC_INVALIDATE | UBC_PUSHDIRTY | UBC_PUSHALL)) == 0) {
1776 return 0;
1777 }
1778 if (end_off <= beg_off) {
1779 return 1;
1780 }
1781
1782 if (flags & UBC_INVALIDATE) {
1783 /*
1784 * discard the resident pages
1785 */
1786 request_flags = (MEMORY_OBJECT_DATA_FLUSH | MEMORY_OBJECT_DATA_NO_CHANGE);
1787 }
1788
1789 if (flags & UBC_SYNC) {
1790 /*
1791 * wait for all the I/O to complete before returning
1792 */
1793 request_flags |= MEMORY_OBJECT_IO_SYNC;
1794 }
1795
1796 if (flags & UBC_PUSHDIRTY) {
1797 /*
1798 * we only return the dirty pages in the range
1799 */
1800 flush_flags = MEMORY_OBJECT_RETURN_DIRTY;
1801 }
1802
1803 if (flags & UBC_PUSHALL) {
1804 /*
1805 * then return all the interesting pages in the range (both
1806 * dirty and precious) to the pager
1807 */
1808 flush_flags = MEMORY_OBJECT_RETURN_ALL;
1809 }
1810
1811 beg_off = trunc_page_64(beg_off);
1812 end_off = round_page_64(end_off);
1813 tsize = (memory_object_size_t)end_off - beg_off;
1814
1815 /* flush and/or invalidate pages in the range requested */
1816 kret = memory_object_lock_request(vp->v_ubcinfo->ui_control,
1817 beg_off, tsize,
1818 (memory_object_offset_t *)resid_off,
1819 io_errno, flush_flags, request_flags,
1820 VM_PROT_NO_CHANGE);
1821
1822 return (kret == KERN_SUCCESS) ? 1 : 0;
1823 }
1824
1825
1826 /*
1827 * ubc_map
1828 *
1829 * Explicitly map a vnode that has an associate ubc_info, and add a reference
1830 * to it for the ubc system, if there isn't one already, so it will not be
1831 * recycled while it's in use, and set flags on the ubc_info to indicate that
1832 * we have done this
1833 *
1834 * Parameters: vp The vnode to map
1835 * flags The mapping flags for the vnode; this
1836 * will be a combination of one or more of
1837 * PROT_READ, PROT_WRITE, and PROT_EXEC
1838 *
1839 * Returns: 0 Success
1840 * EPERM Permission was denied
1841 *
1842 * Notes: An I/O reference on the vnode must already be held on entry
1843 *
1844 * If there is no ubc_info associated with the vnode, this function
1845 * will return success.
1846 *
1847 * If a permission error occurs, this function will return
1848 * failure; all other failures will cause this function to return
1849 * success.
1850 *
1851 * IMPORTANT: This is an internal use function, and its symbols
1852 * are not exported, hence its error checking is not very robust.
1853 * It is primarily used by:
1854 *
1855 * o mmap(), when mapping a file
1856 * o When mapping a shared file (a shared library in the
1857 * shared segment region)
1858 * o When loading a program image during the exec process
1859 *
1860 * ...all of these uses ignore the return code, and any fault that
1861 * results later because of a failure is handled in the fix-up path
1862 * of the fault handler. The interface exists primarily as a
1863 * performance hint.
1864 *
1865 * Given that third party implementation of the type of interfaces
1866 * that would use this function, such as alternative executable
1867 * formats, etc., are unsupported, this function is not exported
1868 * for general use.
1869 *
1870 * The extra reference is held until the VM system unmaps the
1871 * vnode from its own context to maintain a vnode reference in
1872 * cases like open()/mmap()/close(), which leave the backing
1873 * object referenced by a mapped memory region in a process
1874 * address space.
1875 */
1876 __private_extern__ int
1877 ubc_map(vnode_t vp, int flags)
1878 {
1879 struct ubc_info *uip;
1880 int error = 0;
1881 int need_ref = 0;
1882 int need_wakeup = 0;
1883
1884 if (UBCINFOEXISTS(vp)) {
1885 vnode_lock(vp);
1886 uip = vp->v_ubcinfo;
1887
1888 while (ISSET(uip->ui_flags, UI_MAPBUSY)) {
1889 SET(uip->ui_flags, UI_MAPWAITING);
1890 (void) msleep(&uip->ui_flags, &vp->v_lock,
1891 PRIBIO, "ubc_map", NULL);
1892 }
1893 SET(uip->ui_flags, UI_MAPBUSY);
1894 vnode_unlock(vp);
1895
1896 error = VNOP_MMAP(vp, flags, vfs_context_current());
1897
1898 /*
1899 * rdar://problem/22587101 required that we stop propagating
1900 * EPERM up the stack. Otherwise, we would have to funnel up
1901 * the error at all the call sites for memory_object_map().
1902 * The risk is in having to undo the map/object/entry state at
1903 * all these call sites. It would also affect more than just mmap()
1904 * e.g. vm_remap().
1905 *
1906 * if (error != EPERM)
1907 * error = 0;
1908 */
1909
1910 error = 0;
1911
1912 vnode_lock_spin(vp);
1913
1914 if (error == 0) {
1915 if (!ISSET(uip->ui_flags, UI_ISMAPPED)) {
1916 need_ref = 1;
1917 }
1918 SET(uip->ui_flags, (UI_WASMAPPED | UI_ISMAPPED));
1919 if (flags & PROT_WRITE) {
1920 SET(uip->ui_flags, UI_MAPPEDWRITE);
1921 }
1922 }
1923 CLR(uip->ui_flags, UI_MAPBUSY);
1924
1925 if (ISSET(uip->ui_flags, UI_MAPWAITING)) {
1926 CLR(uip->ui_flags, UI_MAPWAITING);
1927 need_wakeup = 1;
1928 }
1929 vnode_unlock(vp);
1930
1931 if (need_wakeup) {
1932 wakeup(&uip->ui_flags);
1933 }
1934
1935 if (need_ref) {
1936 /*
1937 * Make sure we get a ref as we can't unwind from here
1938 */
1939 if (vnode_ref_ext(vp, 0, VNODE_REF_FORCE)) {
1940 panic("%s : VNODE_REF_FORCE failed\n", __FUNCTION__);
1941 }
1942 /*
1943 * Vnodes that are on "unreliable" media (like disk
1944 * images, network filesystems, 3rd-party filesystems,
1945 * and possibly external devices) could see their
1946 * contents be changed via the backing store without
1947 * triggering copy-on-write, so we can't fully rely
1948 * on copy-on-write and might have to resort to
1949 * copy-on-read to protect "privileged" processes and
1950 * prevent privilege escalation.
1951 *
1952 * The root filesystem is considered "reliable" because
1953 * there's not much point in trying to protect
1954 * ourselves from such a vulnerability and the extra
1955 * cost of copy-on-read (CPU time and memory pressure)
1956 * could result in some serious regressions.
1957 */
1958 if (vp->v_mount != NULL &&
1959 ((vp->v_mount->mnt_flag & MNT_ROOTFS) ||
1960 vnode_on_reliable_media(vp))) {
1961 /*
1962 * This vnode is deemed "reliable" so mark
1963 * its VM object as "trusted".
1964 */
1965 memory_object_mark_trusted(uip->ui_control);
1966 } else {
1967 // printf("BUGGYCOW: %s:%d vp %p \"%s\" in mnt %p \"%s\" is untrusted\n", __FUNCTION__, __LINE__, vp, vp->v_name, vp->v_mount, vp->v_mount->mnt_vnodecovered->v_name);
1968 }
1969 }
1970 }
1971 return error;
1972 }
1973
1974
1975 /*
1976 * ubc_destroy_named
1977 *
1978 * Destroy the named memory object associated with the ubc_info control object
1979 * associated with the designated vnode, if there is a ubc_info associated
1980 * with the vnode, and a control object is associated with it
1981 *
1982 * Parameters: vp The designated vnode
1983 *
1984 * Returns: (void)
1985 *
1986 * Notes: This function is called on vnode termination for all vnodes,
1987 * and must therefore not assume that there is a ubc_info that is
1988 * associated with the vnode, nor that there is a control object
1989 * associated with the ubc_info.
1990 *
1991 * If all the conditions necessary are present, this function
1992 * calls memory_object_destory(), which will in turn end up
1993 * calling ubc_unmap() to release any vnode references that were
1994 * established via ubc_map().
1995 *
1996 * IMPORTANT: This is an internal use function that is used
1997 * exclusively by the internal use function vclean().
1998 */
1999 __private_extern__ void
2000 ubc_destroy_named(vnode_t vp)
2001 {
2002 memory_object_control_t control;
2003 struct ubc_info *uip;
2004 kern_return_t kret;
2005
2006 if (UBCINFOEXISTS(vp)) {
2007 uip = vp->v_ubcinfo;
2008
2009 /* Terminate the memory object */
2010 control = ubc_getobject(vp, UBC_HOLDOBJECT);
2011 if (control != MEMORY_OBJECT_CONTROL_NULL) {
2012 kret = memory_object_destroy(control, 0);
2013 if (kret != KERN_SUCCESS) {
2014 panic("ubc_destroy_named: memory_object_destroy failed");
2015 }
2016 }
2017 }
2018 }
2019
2020
2021 /*
2022 * ubc_isinuse
2023 *
2024 * Determine whether or not a vnode is currently in use by ubc at a level in
2025 * excess of the requested busycount
2026 *
2027 * Parameters: vp The vnode to check
2028 * busycount The threshold busy count, used to bias
2029 * the count usually already held by the
2030 * caller to avoid races
2031 *
2032 * Returns: 1 The vnode is in use over the threshold
2033 * 0 The vnode is not in use over the
2034 * threshold
2035 *
2036 * Notes: Because the vnode is only held locked while actually asking
2037 * the use count, this function only represents a snapshot of the
2038 * current state of the vnode. If more accurate information is
2039 * required, an additional busycount should be held by the caller
2040 * and a non-zero busycount used.
2041 *
2042 * If there is no ubc_info associated with the vnode, this
2043 * function will report that the vnode is not in use by ubc.
2044 */
2045 int
2046 ubc_isinuse(struct vnode *vp, int busycount)
2047 {
2048 if (!UBCINFOEXISTS(vp)) {
2049 return 0;
2050 }
2051 return ubc_isinuse_locked(vp, busycount, 0);
2052 }
2053
2054
2055 /*
2056 * ubc_isinuse_locked
2057 *
2058 * Determine whether or not a vnode is currently in use by ubc at a level in
2059 * excess of the requested busycount
2060 *
2061 * Parameters: vp The vnode to check
2062 * busycount The threshold busy count, used to bias
2063 * the count usually already held by the
2064 * caller to avoid races
2065 * locked True if the vnode is already locked by
2066 * the caller
2067 *
2068 * Returns: 1 The vnode is in use over the threshold
2069 * 0 The vnode is not in use over the
2070 * threshold
2071 *
2072 * Notes: If the vnode is not locked on entry, it is locked while
2073 * actually asking the use count. If this is the case, this
2074 * function only represents a snapshot of the current state of
2075 * the vnode. If more accurate information is required, the
2076 * vnode lock should be held by the caller, otherwise an
2077 * additional busycount should be held by the caller and a
2078 * non-zero busycount used.
2079 *
2080 * If there is no ubc_info associated with the vnode, this
2081 * function will report that the vnode is not in use by ubc.
2082 */
2083 int
2084 ubc_isinuse_locked(struct vnode *vp, int busycount, int locked)
2085 {
2086 int retval = 0;
2087
2088
2089 if (!locked) {
2090 vnode_lock_spin(vp);
2091 }
2092
2093 if ((vp->v_usecount - vp->v_kusecount) > busycount) {
2094 retval = 1;
2095 }
2096
2097 if (!locked) {
2098 vnode_unlock(vp);
2099 }
2100 return retval;
2101 }
2102
2103
2104 /*
2105 * ubc_unmap
2106 *
2107 * Reverse the effects of a ubc_map() call for a given vnode
2108 *
2109 * Parameters: vp vnode to unmap from ubc
2110 *
2111 * Returns: (void)
2112 *
2113 * Notes: This is an internal use function used by vnode_pager_unmap().
2114 * It will attempt to obtain a reference on the supplied vnode,
2115 * and if it can do so, and there is an associated ubc_info, and
2116 * the flags indicate that it was mapped via ubc_map(), then the
2117 * flag is cleared, the mapping removed, and the reference taken
2118 * by ubc_map() is released.
2119 *
2120 * IMPORTANT: This MUST only be called by the VM
2121 * to prevent race conditions.
2122 */
2123 __private_extern__ void
2124 ubc_unmap(struct vnode *vp)
2125 {
2126 struct ubc_info *uip;
2127 int need_rele = 0;
2128 int need_wakeup = 0;
2129
2130 if (vnode_getwithref(vp)) {
2131 return;
2132 }
2133
2134 if (UBCINFOEXISTS(vp)) {
2135 bool want_fsevent = false;
2136
2137 vnode_lock(vp);
2138 uip = vp->v_ubcinfo;
2139
2140 while (ISSET(uip->ui_flags, UI_MAPBUSY)) {
2141 SET(uip->ui_flags, UI_MAPWAITING);
2142 (void) msleep(&uip->ui_flags, &vp->v_lock,
2143 PRIBIO, "ubc_unmap", NULL);
2144 }
2145 SET(uip->ui_flags, UI_MAPBUSY);
2146
2147 if (ISSET(uip->ui_flags, UI_ISMAPPED)) {
2148 if (ISSET(uip->ui_flags, UI_MAPPEDWRITE)) {
2149 want_fsevent = true;
2150 }
2151
2152 need_rele = 1;
2153
2154 /*
2155 * We want to clear the mapped flags after we've called
2156 * VNOP_MNOMAP to avoid certain races and allow
2157 * VNOP_MNOMAP to call ubc_is_mapped_writable.
2158 */
2159 }
2160 vnode_unlock(vp);
2161
2162 if (need_rele) {
2163 vfs_context_t ctx = vfs_context_current();
2164
2165 (void)VNOP_MNOMAP(vp, ctx);
2166
2167 #if CONFIG_FSE
2168 /*
2169 * Why do we want an fsevent here? Normally the
2170 * content modified fsevent is posted when a file is
2171 * closed and only if it's written to via conventional
2172 * means. It's perfectly legal to close a file and
2173 * keep your mappings and we don't currently track
2174 * whether it was written to via a mapping.
2175 * Therefore, we need to post an fsevent here if the
2176 * file was mapped writable. This may result in false
2177 * events, i.e. we post a notification when nothing
2178 * has really changed.
2179 */
2180 if (want_fsevent && need_fsevent(FSE_CONTENT_MODIFIED, vp)) {
2181 add_fsevent(FSE_CONTENT_MODIFIED, ctx,
2182 FSE_ARG_VNODE, vp,
2183 FSE_ARG_DONE);
2184 }
2185 #endif
2186
2187 vnode_rele(vp);
2188 }
2189
2190 vnode_lock_spin(vp);
2191
2192 if (need_rele) {
2193 CLR(uip->ui_flags, UI_ISMAPPED | UI_MAPPEDWRITE);
2194 }
2195
2196 CLR(uip->ui_flags, UI_MAPBUSY);
2197
2198 if (ISSET(uip->ui_flags, UI_MAPWAITING)) {
2199 CLR(uip->ui_flags, UI_MAPWAITING);
2200 need_wakeup = 1;
2201 }
2202 vnode_unlock(vp);
2203
2204 if (need_wakeup) {
2205 wakeup(&uip->ui_flags);
2206 }
2207 }
2208 /*
2209 * the drop of the vnode ref will cleanup
2210 */
2211 vnode_put(vp);
2212 }
2213
2214
2215 /*
2216 * ubc_page_op
2217 *
2218 * Manipulate individual page state for a vnode with an associated ubc_info
2219 * with an associated memory object control.
2220 *
2221 * Parameters: vp The vnode backing the page
2222 * f_offset A file offset interior to the page
2223 * ops The operations to perform, as a bitmap
2224 * (see below for more information)
2225 * phys_entryp The address of a ppnum_t; may be NULL
2226 * to ignore
2227 * flagsp A pointer to an int to contain flags;
2228 * may be NULL to ignore
2229 *
2230 * Returns: KERN_SUCCESS Success
2231 * KERN_INVALID_ARGUMENT If the memory object control has no VM
2232 * object associated
2233 * KERN_INVALID_OBJECT If UPL_POP_PHYSICAL and the object is
2234 * not physically contiguous
2235 * KERN_INVALID_OBJECT If !UPL_POP_PHYSICAL and the object is
2236 * physically contiguous
2237 * KERN_FAILURE If the page cannot be looked up
2238 *
2239 * Implicit Returns:
2240 * *phys_entryp (modified) If phys_entryp is non-NULL and
2241 * UPL_POP_PHYSICAL
2242 * *flagsp (modified) If flagsp is non-NULL and there was
2243 * !UPL_POP_PHYSICAL and a KERN_SUCCESS
2244 *
2245 * Notes: For object boundaries, it is considerably more efficient to
2246 * ensure that f_offset is in fact on a page boundary, as this
2247 * will avoid internal use of the hash table to identify the
2248 * page, and would therefore skip a number of early optimizations.
2249 * Since this is a page operation anyway, the caller should try
2250 * to pass only a page aligned offset because of this.
2251 *
2252 * *flagsp may be modified even if this function fails. If it is
2253 * modified, it will contain the condition of the page before the
2254 * requested operation was attempted; these will only include the
2255 * bitmap flags, and not the PL_POP_PHYSICAL, UPL_POP_DUMP,
2256 * UPL_POP_SET, or UPL_POP_CLR bits.
2257 *
2258 * The flags field may contain a specific operation, such as
2259 * UPL_POP_PHYSICAL or UPL_POP_DUMP:
2260 *
2261 * o UPL_POP_PHYSICAL Fail if not contiguous; if
2262 * *phys_entryp and successful, set
2263 * *phys_entryp
2264 * o UPL_POP_DUMP Dump the specified page
2265 *
2266 * Otherwise, it is treated as a bitmap of one or more page
2267 * operations to perform on the final memory object; allowable
2268 * bit values are:
2269 *
2270 * o UPL_POP_DIRTY The page is dirty
2271 * o UPL_POP_PAGEOUT The page is paged out
2272 * o UPL_POP_PRECIOUS The page is precious
2273 * o UPL_POP_ABSENT The page is absent
2274 * o UPL_POP_BUSY The page is busy
2275 *
2276 * If the page status is only being queried and not modified, then
2277 * not other bits should be specified. However, if it is being
2278 * modified, exactly ONE of the following bits should be set:
2279 *
2280 * o UPL_POP_SET Set the current bitmap bits
2281 * o UPL_POP_CLR Clear the current bitmap bits
2282 *
2283 * Thus to effect a combination of setting an clearing, it may be
2284 * necessary to call this function twice. If this is done, the
2285 * set should be used before the clear, since clearing may trigger
2286 * a wakeup on the destination page, and if the page is backed by
2287 * an encrypted swap file, setting will trigger the decryption
2288 * needed before the wakeup occurs.
2289 */
2290 kern_return_t
2291 ubc_page_op(
2292 struct vnode *vp,
2293 off_t f_offset,
2294 int ops,
2295 ppnum_t *phys_entryp,
2296 int *flagsp)
2297 {
2298 memory_object_control_t control;
2299
2300 control = ubc_getobject(vp, UBC_FLAGS_NONE);
2301 if (control == MEMORY_OBJECT_CONTROL_NULL) {
2302 return KERN_INVALID_ARGUMENT;
2303 }
2304
2305 return memory_object_page_op(control,
2306 (memory_object_offset_t)f_offset,
2307 ops,
2308 phys_entryp,
2309 flagsp);
2310 }
2311
2312
2313 /*
2314 * ubc_range_op
2315 *
2316 * Manipulate page state for a range of memory for a vnode with an associated
2317 * ubc_info with an associated memory object control, when page level state is
2318 * not required to be returned from the call (i.e. there are no phys_entryp or
2319 * flagsp parameters to this call, and it takes a range which may contain
2320 * multiple pages, rather than an offset interior to a single page).
2321 *
2322 * Parameters: vp The vnode backing the page
2323 * f_offset_beg A file offset interior to the start page
2324 * f_offset_end A file offset interior to the end page
2325 * ops The operations to perform, as a bitmap
2326 * (see below for more information)
2327 * range The address of an int; may be NULL to
2328 * ignore
2329 *
2330 * Returns: KERN_SUCCESS Success
2331 * KERN_INVALID_ARGUMENT If the memory object control has no VM
2332 * object associated
2333 * KERN_INVALID_OBJECT If the object is physically contiguous
2334 *
2335 * Implicit Returns:
2336 * *range (modified) If range is non-NULL, its contents will
2337 * be modified to contain the number of
2338 * bytes successfully operated upon.
2339 *
2340 * Notes: IMPORTANT: This function cannot be used on a range that
2341 * consists of physically contiguous pages.
2342 *
2343 * For object boundaries, it is considerably more efficient to
2344 * ensure that f_offset_beg and f_offset_end are in fact on page
2345 * boundaries, as this will avoid internal use of the hash table
2346 * to identify the page, and would therefore skip a number of
2347 * early optimizations. Since this is an operation on a set of
2348 * pages anyway, the caller should try to pass only a page aligned
2349 * offsets because of this.
2350 *
2351 * *range will be modified only if this function succeeds.
2352 *
2353 * The flags field MUST contain a specific operation; allowable
2354 * values are:
2355 *
2356 * o UPL_ROP_ABSENT Returns the extent of the range
2357 * presented which is absent, starting
2358 * with the start address presented
2359 *
2360 * o UPL_ROP_PRESENT Returns the extent of the range
2361 * presented which is present (resident),
2362 * starting with the start address
2363 * presented
2364 * o UPL_ROP_DUMP Dump the pages which are found in the
2365 * target object for the target range.
2366 *
2367 * IMPORTANT: For UPL_ROP_ABSENT and UPL_ROP_PRESENT; if there are
2368 * multiple regions in the range, only the first matching region
2369 * is returned.
2370 */
2371 kern_return_t
2372 ubc_range_op(
2373 struct vnode *vp,
2374 off_t f_offset_beg,
2375 off_t f_offset_end,
2376 int ops,
2377 int *range)
2378 {
2379 memory_object_control_t control;
2380
2381 control = ubc_getobject(vp, UBC_FLAGS_NONE);
2382 if (control == MEMORY_OBJECT_CONTROL_NULL) {
2383 return KERN_INVALID_ARGUMENT;
2384 }
2385
2386 return memory_object_range_op(control,
2387 (memory_object_offset_t)f_offset_beg,
2388 (memory_object_offset_t)f_offset_end,
2389 ops,
2390 range);
2391 }
2392
2393
2394 /*
2395 * ubc_create_upl
2396 *
2397 * Given a vnode, cause the population of a portion of the vm_object; based on
2398 * the nature of the request, the pages returned may contain valid data, or
2399 * they may be uninitialized.
2400 *
2401 * Parameters: vp The vnode from which to create the upl
2402 * f_offset The start offset into the backing store
2403 * represented by the vnode
2404 * bufsize The size of the upl to create
2405 * uplp Pointer to the upl_t to receive the
2406 * created upl; MUST NOT be NULL
2407 * plp Pointer to receive the internal page
2408 * list for the created upl; MAY be NULL
2409 * to ignore
2410 *
2411 * Returns: KERN_SUCCESS The requested upl has been created
2412 * KERN_INVALID_ARGUMENT The bufsize argument is not an even
2413 * multiple of the page size
2414 * KERN_INVALID_ARGUMENT There is no ubc_info associated with
2415 * the vnode, or there is no memory object
2416 * control associated with the ubc_info
2417 * memory_object_upl_request:KERN_INVALID_VALUE
2418 * The supplied upl_flags argument is
2419 * invalid
2420 * Implicit Returns:
2421 * *uplp (modified)
2422 * *plp (modified) If non-NULL, the value of *plp will be
2423 * modified to point to the internal page
2424 * list; this modification may occur even
2425 * if this function is unsuccessful, in
2426 * which case the contents may be invalid
2427 *
2428 * Note: If successful, the returned *uplp MUST subsequently be freed
2429 * via a call to ubc_upl_commit(), ubc_upl_commit_range(),
2430 * ubc_upl_abort(), or ubc_upl_abort_range().
2431 */
2432 kern_return_t
2433 ubc_create_upl_external(
2434 struct vnode *vp,
2435 off_t f_offset,
2436 int bufsize,
2437 upl_t *uplp,
2438 upl_page_info_t **plp,
2439 int uplflags)
2440 {
2441 return ubc_create_upl_kernel(vp, f_offset, bufsize, uplp, plp, uplflags, vm_tag_bt());
2442 }
2443
2444 kern_return_t
2445 ubc_create_upl_kernel(
2446 struct vnode *vp,
2447 off_t f_offset,
2448 int bufsize,
2449 upl_t *uplp,
2450 upl_page_info_t **plp,
2451 int uplflags,
2452 vm_tag_t tag)
2453 {
2454 memory_object_control_t control;
2455 kern_return_t kr;
2456
2457 if (plp != NULL) {
2458 *plp = NULL;
2459 }
2460 *uplp = NULL;
2461
2462 if (bufsize & 0xfff) {
2463 return KERN_INVALID_ARGUMENT;
2464 }
2465
2466 if (bufsize > MAX_UPL_SIZE_BYTES) {
2467 return KERN_INVALID_ARGUMENT;
2468 }
2469
2470 if (uplflags & (UPL_UBC_MSYNC | UPL_UBC_PAGEOUT | UPL_UBC_PAGEIN)) {
2471 if (uplflags & UPL_UBC_MSYNC) {
2472 uplflags &= UPL_RET_ONLY_DIRTY;
2473
2474 uplflags |= UPL_COPYOUT_FROM | UPL_CLEAN_IN_PLACE |
2475 UPL_SET_INTERNAL | UPL_SET_LITE;
2476 } else if (uplflags & UPL_UBC_PAGEOUT) {
2477 uplflags &= UPL_RET_ONLY_DIRTY;
2478
2479 if (uplflags & UPL_RET_ONLY_DIRTY) {
2480 uplflags |= UPL_NOBLOCK;
2481 }
2482
2483 uplflags |= UPL_FOR_PAGEOUT | UPL_CLEAN_IN_PLACE |
2484 UPL_COPYOUT_FROM | UPL_SET_INTERNAL | UPL_SET_LITE;
2485 } else {
2486 uplflags |= UPL_RET_ONLY_ABSENT |
2487 UPL_NO_SYNC | UPL_CLEAN_IN_PLACE |
2488 UPL_SET_INTERNAL | UPL_SET_LITE;
2489
2490 /*
2491 * if the requested size == PAGE_SIZE, we don't want to set
2492 * the UPL_NOBLOCK since we may be trying to recover from a
2493 * previous partial pagein I/O that occurred because we were low
2494 * on memory and bailed early in order to honor the UPL_NOBLOCK...
2495 * since we're only asking for a single page, we can block w/o fear
2496 * of tying up pages while waiting for more to become available
2497 */
2498 if (bufsize > PAGE_SIZE) {
2499 uplflags |= UPL_NOBLOCK;
2500 }
2501 }
2502 } else {
2503 uplflags &= ~UPL_FOR_PAGEOUT;
2504
2505 if (uplflags & UPL_WILL_BE_DUMPED) {
2506 uplflags &= ~UPL_WILL_BE_DUMPED;
2507 uplflags |= (UPL_NO_SYNC | UPL_SET_INTERNAL);
2508 } else {
2509 uplflags |= (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL);
2510 }
2511 }
2512 control = ubc_getobject(vp, UBC_FLAGS_NONE);
2513 if (control == MEMORY_OBJECT_CONTROL_NULL) {
2514 return KERN_INVALID_ARGUMENT;
2515 }
2516
2517 kr = memory_object_upl_request(control, f_offset, bufsize, uplp, NULL, NULL, uplflags, tag);
2518 if (kr == KERN_SUCCESS && plp != NULL) {
2519 *plp = UPL_GET_INTERNAL_PAGE_LIST(*uplp);
2520 }
2521 return kr;
2522 }
2523
2524
2525 /*
2526 * ubc_upl_maxbufsize
2527 *
2528 * Return the maximum bufsize ubc_create_upl( ) will take.
2529 *
2530 * Parameters: none
2531 *
2532 * Returns: maximum size buffer (in bytes) ubc_create_upl( ) will take.
2533 */
2534 upl_size_t
2535 ubc_upl_maxbufsize(
2536 void)
2537 {
2538 return MAX_UPL_SIZE_BYTES;
2539 }
2540
2541 /*
2542 * ubc_upl_map
2543 *
2544 * Map the page list assocated with the supplied upl into the kernel virtual
2545 * address space at the virtual address indicated by the dst_addr argument;
2546 * the entire upl is mapped
2547 *
2548 * Parameters: upl The upl to map
2549 * dst_addr The address at which to map the upl
2550 *
2551 * Returns: KERN_SUCCESS The upl has been mapped
2552 * KERN_INVALID_ARGUMENT The upl is UPL_NULL
2553 * KERN_FAILURE The upl is already mapped
2554 * vm_map_enter:KERN_INVALID_ARGUMENT
2555 * A failure code from vm_map_enter() due
2556 * to an invalid argument
2557 */
2558 kern_return_t
2559 ubc_upl_map(
2560 upl_t upl,
2561 vm_offset_t *dst_addr)
2562 {
2563 return vm_upl_map(kernel_map, upl, dst_addr);
2564 }
2565
2566
2567 /*
2568 * ubc_upl_unmap
2569 *
2570 * Unmap the page list assocated with the supplied upl from the kernel virtual
2571 * address space; the entire upl is unmapped.
2572 *
2573 * Parameters: upl The upl to unmap
2574 *
2575 * Returns: KERN_SUCCESS The upl has been unmapped
2576 * KERN_FAILURE The upl is not currently mapped
2577 * KERN_INVALID_ARGUMENT If the upl is UPL_NULL
2578 */
2579 kern_return_t
2580 ubc_upl_unmap(
2581 upl_t upl)
2582 {
2583 return vm_upl_unmap(kernel_map, upl);
2584 }
2585
2586
2587 /*
2588 * ubc_upl_commit
2589 *
2590 * Commit the contents of the upl to the backing store
2591 *
2592 * Parameters: upl The upl to commit
2593 *
2594 * Returns: KERN_SUCCESS The upl has been committed
2595 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2596 * KERN_FAILURE The supplied upl does not represent
2597 * device memory, and the offset plus the
2598 * size would exceed the actual size of
2599 * the upl
2600 *
2601 * Notes: In practice, the only return value for this function should be
2602 * KERN_SUCCESS, unless there has been data structure corruption;
2603 * since the upl is deallocated regardless of success or failure,
2604 * there's really nothing to do about this other than panic.
2605 *
2606 * IMPORTANT: Use of this function should not be mixed with use of
2607 * ubc_upl_commit_range(), due to the unconditional deallocation
2608 * by this function.
2609 */
2610 kern_return_t
2611 ubc_upl_commit(
2612 upl_t upl)
2613 {
2614 upl_page_info_t *pl;
2615 kern_return_t kr;
2616
2617 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
2618 kr = upl_commit(upl, pl, MAX_UPL_SIZE_BYTES >> PAGE_SHIFT);
2619 upl_deallocate(upl);
2620 return kr;
2621 }
2622
2623
2624 /*
2625 * ubc_upl_commit
2626 *
2627 * Commit the contents of the specified range of the upl to the backing store
2628 *
2629 * Parameters: upl The upl to commit
2630 * offset The offset into the upl
2631 * size The size of the region to be committed,
2632 * starting at the specified offset
2633 * flags commit type (see below)
2634 *
2635 * Returns: KERN_SUCCESS The range has been committed
2636 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2637 * KERN_FAILURE The supplied upl does not represent
2638 * device memory, and the offset plus the
2639 * size would exceed the actual size of
2640 * the upl
2641 *
2642 * Notes: IMPORTANT: If the commit is successful, and the object is now
2643 * empty, the upl will be deallocated. Since the caller cannot
2644 * check that this is the case, the UPL_COMMIT_FREE_ON_EMPTY flag
2645 * should generally only be used when the offset is 0 and the size
2646 * is equal to the upl size.
2647 *
2648 * The flags argument is a bitmap of flags on the rage of pages in
2649 * the upl to be committed; allowable flags are:
2650 *
2651 * o UPL_COMMIT_FREE_ON_EMPTY Free the upl when it is
2652 * both empty and has been
2653 * successfully committed
2654 * o UPL_COMMIT_CLEAR_DIRTY Clear each pages dirty
2655 * bit; will prevent a
2656 * later pageout
2657 * o UPL_COMMIT_SET_DIRTY Set each pages dirty
2658 * bit; will cause a later
2659 * pageout
2660 * o UPL_COMMIT_INACTIVATE Clear each pages
2661 * reference bit; the page
2662 * will not be accessed
2663 * o UPL_COMMIT_ALLOW_ACCESS Unbusy each page; pages
2664 * become busy when an
2665 * IOMemoryDescriptor is
2666 * mapped or redirected,
2667 * and we have to wait for
2668 * an IOKit driver
2669 *
2670 * The flag UPL_COMMIT_NOTIFY_EMPTY is used internally, and should
2671 * not be specified by the caller.
2672 *
2673 * The UPL_COMMIT_CLEAR_DIRTY and UPL_COMMIT_SET_DIRTY flags are
2674 * mutually exclusive, and should not be combined.
2675 */
2676 kern_return_t
2677 ubc_upl_commit_range(
2678 upl_t upl,
2679 upl_offset_t offset,
2680 upl_size_t size,
2681 int flags)
2682 {
2683 upl_page_info_t *pl;
2684 boolean_t empty;
2685 kern_return_t kr;
2686
2687 if (flags & UPL_COMMIT_FREE_ON_EMPTY) {
2688 flags |= UPL_COMMIT_NOTIFY_EMPTY;
2689 }
2690
2691 if (flags & UPL_COMMIT_KERNEL_ONLY_FLAGS) {
2692 return KERN_INVALID_ARGUMENT;
2693 }
2694
2695 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
2696
2697 kr = upl_commit_range(upl, offset, size, flags,
2698 pl, MAX_UPL_SIZE_BYTES >> PAGE_SHIFT, &empty);
2699
2700 if ((flags & UPL_COMMIT_FREE_ON_EMPTY) && empty) {
2701 upl_deallocate(upl);
2702 }
2703
2704 return kr;
2705 }
2706
2707
2708 /*
2709 * ubc_upl_abort_range
2710 *
2711 * Abort the contents of the specified range of the specified upl
2712 *
2713 * Parameters: upl The upl to abort
2714 * offset The offset into the upl
2715 * size The size of the region to be aborted,
2716 * starting at the specified offset
2717 * abort_flags abort type (see below)
2718 *
2719 * Returns: KERN_SUCCESS The range has been aborted
2720 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2721 * KERN_FAILURE The supplied upl does not represent
2722 * device memory, and the offset plus the
2723 * size would exceed the actual size of
2724 * the upl
2725 *
2726 * Notes: IMPORTANT: If the abort is successful, and the object is now
2727 * empty, the upl will be deallocated. Since the caller cannot
2728 * check that this is the case, the UPL_ABORT_FREE_ON_EMPTY flag
2729 * should generally only be used when the offset is 0 and the size
2730 * is equal to the upl size.
2731 *
2732 * The abort_flags argument is a bitmap of flags on the range of
2733 * pages in the upl to be aborted; allowable flags are:
2734 *
2735 * o UPL_ABORT_FREE_ON_EMPTY Free the upl when it is both
2736 * empty and has been successfully
2737 * aborted
2738 * o UPL_ABORT_RESTART The operation must be restarted
2739 * o UPL_ABORT_UNAVAILABLE The pages are unavailable
2740 * o UPL_ABORT_ERROR An I/O error occurred
2741 * o UPL_ABORT_DUMP_PAGES Just free the pages
2742 * o UPL_ABORT_NOTIFY_EMPTY RESERVED
2743 * o UPL_ABORT_ALLOW_ACCESS RESERVED
2744 *
2745 * The UPL_ABORT_NOTIFY_EMPTY is an internal use flag and should
2746 * not be specified by the caller. It is intended to fulfill the
2747 * same role as UPL_COMMIT_NOTIFY_EMPTY does in the function
2748 * ubc_upl_commit_range(), but is never referenced internally.
2749 *
2750 * The UPL_ABORT_ALLOW_ACCESS is defined, but neither set nor
2751 * referenced; do not use it.
2752 */
2753 kern_return_t
2754 ubc_upl_abort_range(
2755 upl_t upl,
2756 upl_offset_t offset,
2757 upl_size_t size,
2758 int abort_flags)
2759 {
2760 kern_return_t kr;
2761 boolean_t empty = FALSE;
2762
2763 if (abort_flags & UPL_ABORT_FREE_ON_EMPTY) {
2764 abort_flags |= UPL_ABORT_NOTIFY_EMPTY;
2765 }
2766
2767 kr = upl_abort_range(upl, offset, size, abort_flags, &empty);
2768
2769 if ((abort_flags & UPL_ABORT_FREE_ON_EMPTY) && empty) {
2770 upl_deallocate(upl);
2771 }
2772
2773 return kr;
2774 }
2775
2776
2777 /*
2778 * ubc_upl_abort
2779 *
2780 * Abort the contents of the specified upl
2781 *
2782 * Parameters: upl The upl to abort
2783 * abort_type abort type (see below)
2784 *
2785 * Returns: KERN_SUCCESS The range has been aborted
2786 * KERN_INVALID_ARGUMENT The supplied upl was UPL_NULL
2787 * KERN_FAILURE The supplied upl does not represent
2788 * device memory, and the offset plus the
2789 * size would exceed the actual size of
2790 * the upl
2791 *
2792 * Notes: IMPORTANT: If the abort is successful, and the object is now
2793 * empty, the upl will be deallocated. Since the caller cannot
2794 * check that this is the case, the UPL_ABORT_FREE_ON_EMPTY flag
2795 * should generally only be used when the offset is 0 and the size
2796 * is equal to the upl size.
2797 *
2798 * The abort_type is a bitmap of flags on the range of
2799 * pages in the upl to be aborted; allowable flags are:
2800 *
2801 * o UPL_ABORT_FREE_ON_EMPTY Free the upl when it is both
2802 * empty and has been successfully
2803 * aborted
2804 * o UPL_ABORT_RESTART The operation must be restarted
2805 * o UPL_ABORT_UNAVAILABLE The pages are unavailable
2806 * o UPL_ABORT_ERROR An I/O error occurred
2807 * o UPL_ABORT_DUMP_PAGES Just free the pages
2808 * o UPL_ABORT_NOTIFY_EMPTY RESERVED
2809 * o UPL_ABORT_ALLOW_ACCESS RESERVED
2810 *
2811 * The UPL_ABORT_NOTIFY_EMPTY is an internal use flag and should
2812 * not be specified by the caller. It is intended to fulfill the
2813 * same role as UPL_COMMIT_NOTIFY_EMPTY does in the function
2814 * ubc_upl_commit_range(), but is never referenced internally.
2815 *
2816 * The UPL_ABORT_ALLOW_ACCESS is defined, but neither set nor
2817 * referenced; do not use it.
2818 */
2819 kern_return_t
2820 ubc_upl_abort(
2821 upl_t upl,
2822 int abort_type)
2823 {
2824 kern_return_t kr;
2825
2826 kr = upl_abort(upl, abort_type);
2827 upl_deallocate(upl);
2828 return kr;
2829 }
2830
2831
2832 /*
2833 * ubc_upl_pageinfo
2834 *
2835 * Retrieve the internal page list for the specified upl
2836 *
2837 * Parameters: upl The upl to obtain the page list from
2838 *
2839 * Returns: !NULL The (upl_page_info_t *) for the page
2840 * list internal to the upl
2841 * NULL Error/no page list associated
2842 *
2843 * Notes: IMPORTANT: The function is only valid on internal objects
2844 * where the list request was made with the UPL_INTERNAL flag.
2845 *
2846 * This function is a utility helper function, since some callers
2847 * may not have direct access to the header defining the macro,
2848 * due to abstraction layering constraints.
2849 */
2850 upl_page_info_t *
2851 ubc_upl_pageinfo(
2852 upl_t upl)
2853 {
2854 return UPL_GET_INTERNAL_PAGE_LIST(upl);
2855 }
2856
2857
2858 int
2859 UBCINFOEXISTS(const struct vnode * vp)
2860 {
2861 return (vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo != UBC_INFO_NULL);
2862 }
2863
2864
2865 void
2866 ubc_upl_range_needed(
2867 upl_t upl,
2868 int index,
2869 int count)
2870 {
2871 upl_range_needed(upl, index, count);
2872 }
2873
2874 boolean_t
2875 ubc_is_mapped(const struct vnode *vp, boolean_t *writable)
2876 {
2877 if (!UBCINFOEXISTS(vp) || !ISSET(vp->v_ubcinfo->ui_flags, UI_ISMAPPED)) {
2878 return FALSE;
2879 }
2880 if (writable) {
2881 *writable = ISSET(vp->v_ubcinfo->ui_flags, UI_MAPPEDWRITE);
2882 }
2883 return TRUE;
2884 }
2885
2886 boolean_t
2887 ubc_is_mapped_writable(const struct vnode *vp)
2888 {
2889 boolean_t writable;
2890 return ubc_is_mapped(vp, &writable) && writable;
2891 }
2892
2893
2894 /*
2895 * CODE SIGNING
2896 */
2897 static volatile SInt32 cs_blob_size = 0;
2898 static volatile SInt32 cs_blob_count = 0;
2899 static SInt32 cs_blob_size_peak = 0;
2900 static UInt32 cs_blob_size_max = 0;
2901 static SInt32 cs_blob_count_peak = 0;
2902
2903 SYSCTL_INT(_vm, OID_AUTO, cs_blob_count, CTLFLAG_RD | CTLFLAG_LOCKED, (int *)(uintptr_t)&cs_blob_count, 0, "Current number of code signature blobs");
2904 SYSCTL_INT(_vm, OID_AUTO, cs_blob_size, CTLFLAG_RD | CTLFLAG_LOCKED, (int *)(uintptr_t)&cs_blob_size, 0, "Current size of all code signature blobs");
2905 SYSCTL_INT(_vm, OID_AUTO, cs_blob_count_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_count_peak, 0, "Peak number of code signature blobs");
2906 SYSCTL_INT(_vm, OID_AUTO, cs_blob_size_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size_peak, 0, "Peak size of code signature blobs");
2907 SYSCTL_INT(_vm, OID_AUTO, cs_blob_size_max, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size_max, 0, "Size of biggest code signature blob");
2908
2909 /*
2910 * Function: csblob_parse_teamid
2911 *
2912 * Description: This function returns a pointer to the team id
2913 * stored within the codedirectory of the csblob.
2914 * If the codedirectory predates team-ids, it returns
2915 * NULL.
2916 * This does not copy the name but returns a pointer to
2917 * it within the CD. Subsequently, the CD must be
2918 * available when this is used.
2919 */
2920
2921 static const char *
2922 csblob_parse_teamid(struct cs_blob *csblob)
2923 {
2924 const CS_CodeDirectory *cd;
2925
2926 cd = csblob->csb_cd;
2927
2928 if (ntohl(cd->version) < CS_SUPPORTSTEAMID) {
2929 return NULL;
2930 }
2931
2932 if (cd->teamOffset == 0) {
2933 return NULL;
2934 }
2935
2936 const char *name = ((const char *)cd) + ntohl(cd->teamOffset);
2937 if (cs_debug > 1) {
2938 printf("found team-id %s in cdblob\n", name);
2939 }
2940
2941 return name;
2942 }
2943
2944
2945 kern_return_t
2946 ubc_cs_blob_allocate(
2947 vm_offset_t *blob_addr_p,
2948 vm_size_t *blob_size_p)
2949 {
2950 kern_return_t kr = KERN_FAILURE;
2951
2952 {
2953 *blob_addr_p = (vm_offset_t) kalloc_tag(*blob_size_p, VM_KERN_MEMORY_SECURITY);
2954
2955 if (*blob_addr_p == 0) {
2956 kr = KERN_NO_SPACE;
2957 } else {
2958 kr = KERN_SUCCESS;
2959 }
2960 }
2961
2962 return kr;
2963 }
2964
2965 void
2966 ubc_cs_blob_deallocate(
2967 vm_offset_t blob_addr,
2968 vm_size_t blob_size)
2969 {
2970 #if PMAP_CS
2971 if (blob_size > pmap_cs_blob_limit) {
2972 kmem_free(kernel_map, blob_addr, blob_size);
2973 } else
2974 #endif
2975 {
2976 kfree(blob_addr, blob_size);
2977 }
2978 }
2979
2980 /*
2981 * Some codesigned files use a lowest common denominator page size of
2982 * 4KiB, but can be used on systems that have a runtime page size of
2983 * 16KiB. Since faults will only occur on 16KiB ranges in
2984 * cs_validate_range(), we can convert the original Code Directory to
2985 * a multi-level scheme where groups of 4 hashes are combined to form
2986 * a new hash, which represents 16KiB in the on-disk file. This can
2987 * reduce the wired memory requirement for the Code Directory by
2988 * 75%. Care must be taken for binaries that use the "fourk" VM pager
2989 * for unaligned access, which may still attempt to validate on
2990 * non-16KiB multiples for compatibility with 3rd party binaries.
2991 */
2992 static boolean_t
2993 ubc_cs_supports_multilevel_hash(struct cs_blob *blob)
2994 {
2995 const CS_CodeDirectory *cd;
2996
2997
2998 /*
2999 * Only applies to binaries that ship as part of the OS,
3000 * primarily the shared cache.
3001 */
3002 if (!blob->csb_platform_binary || blob->csb_teamid != NULL) {
3003 return FALSE;
3004 }
3005
3006 /*
3007 * If the runtime page size matches the code signing page
3008 * size, there is no work to do.
3009 */
3010 if (PAGE_SHIFT <= blob->csb_hash_pageshift) {
3011 return FALSE;
3012 }
3013
3014 cd = blob->csb_cd;
3015
3016 /*
3017 * There must be a valid integral multiple of hashes
3018 */
3019 if (ntohl(cd->nCodeSlots) & (PAGE_MASK >> blob->csb_hash_pageshift)) {
3020 return FALSE;
3021 }
3022
3023 /*
3024 * Scatter lists must also have ranges that have an integral number of hashes
3025 */
3026 if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
3027 const SC_Scatter *scatter = (const SC_Scatter*)
3028 ((const char*)cd + ntohl(cd->scatterOffset));
3029 /* iterate all scatter structs to make sure they are all aligned */
3030 do {
3031 uint32_t sbase = ntohl(scatter->base);
3032 uint32_t scount = ntohl(scatter->count);
3033
3034 /* last scatter? */
3035 if (scount == 0) {
3036 break;
3037 }
3038
3039 if (sbase & (PAGE_MASK >> blob->csb_hash_pageshift)) {
3040 return FALSE;
3041 }
3042
3043 if (scount & (PAGE_MASK >> blob->csb_hash_pageshift)) {
3044 return FALSE;
3045 }
3046
3047 scatter++;
3048 } while (1);
3049 }
3050
3051 /* Covered range must be a multiple of the new page size */
3052 if (ntohl(cd->codeLimit) & PAGE_MASK) {
3053 return FALSE;
3054 }
3055
3056 /* All checks pass */
3057 return TRUE;
3058 }
3059
3060 /*
3061 * Given a cs_blob with an already chosen best code directory, this
3062 * function allocates memory and copies into it only the blobs that
3063 * will be needed by the kernel, namely the single chosen code
3064 * directory (and not any of its alternatives) and the entitlement
3065 * blob.
3066 *
3067 * This saves significant memory with agile signatures, and additional
3068 * memory for 3rd Party Code because we also omit the CMS blob.
3069 *
3070 * To support multilevel and other potential code directory rewriting,
3071 * the size of a new code directory can be specified. Since that code
3072 * directory will replace the existing code directory,
3073 * ubc_cs_reconstitute_code_signature does not copy the original code
3074 * directory when a size is given, and the caller must fill it in.
3075 */
3076 static int
3077 ubc_cs_reconstitute_code_signature(struct cs_blob const *blob, vm_size_t optional_new_cd_size,
3078 vm_address_t *new_blob_addr_p, vm_size_t *new_blob_size_p,
3079 CS_CodeDirectory **new_cd_p, CS_GenericBlob const **new_entitlements_p)
3080 {
3081 const CS_CodeDirectory *old_cd, *cd;
3082 CS_CodeDirectory *new_cd;
3083 const CS_GenericBlob *entitlements;
3084 vm_offset_t new_blob_addr;
3085 vm_size_t new_blob_size;
3086 vm_size_t new_cdsize;
3087 kern_return_t kr;
3088 int error;
3089
3090 old_cd = blob->csb_cd;
3091
3092 new_cdsize = optional_new_cd_size != 0 ? optional_new_cd_size : htonl(old_cd->length);
3093
3094 new_blob_size = sizeof(CS_SuperBlob);
3095 new_blob_size += sizeof(CS_BlobIndex);
3096 new_blob_size += new_cdsize;
3097
3098 if (blob->csb_entitlements_blob) {
3099 /* We need to add a slot for the entitlements */
3100 new_blob_size += sizeof(CS_BlobIndex);
3101 new_blob_size += ntohl(blob->csb_entitlements_blob->length);
3102 }
3103
3104 kr = ubc_cs_blob_allocate(&new_blob_addr, &new_blob_size);
3105 if (kr != KERN_SUCCESS) {
3106 if (cs_debug > 1) {
3107 printf("CODE SIGNING: Failed to allocate memory for new Code Signing Blob: %d\n",
3108 kr);
3109 }
3110 return ENOMEM;
3111 }
3112
3113 CS_SuperBlob *new_superblob;
3114
3115 new_superblob = (CS_SuperBlob *)new_blob_addr;
3116 new_superblob->magic = htonl(CSMAGIC_EMBEDDED_SIGNATURE);
3117 new_superblob->length = htonl((uint32_t)new_blob_size);
3118 if (blob->csb_entitlements_blob) {
3119 vm_size_t ent_offset, cd_offset;
3120
3121 cd_offset = sizeof(CS_SuperBlob) + 2 * sizeof(CS_BlobIndex);
3122 ent_offset = cd_offset + new_cdsize;
3123
3124 new_superblob->count = htonl(2);
3125 new_superblob->index[0].type = htonl(CSSLOT_CODEDIRECTORY);
3126 new_superblob->index[0].offset = htonl((uint32_t)cd_offset);
3127 new_superblob->index[1].type = htonl(CSSLOT_ENTITLEMENTS);
3128 new_superblob->index[1].offset = htonl((uint32_t)ent_offset);
3129
3130 memcpy((void *)(new_blob_addr + ent_offset), blob->csb_entitlements_blob, ntohl(blob->csb_entitlements_blob->length));
3131
3132 new_cd = (CS_CodeDirectory *)(new_blob_addr + cd_offset);
3133 } else {
3134 // Blob is the code directory, directly.
3135 new_cd = (CS_CodeDirectory *)new_blob_addr;
3136 }
3137
3138 if (optional_new_cd_size == 0) {
3139 // Copy code directory, and revalidate.
3140 memcpy(new_cd, old_cd, new_cdsize);
3141
3142 vm_size_t length = new_blob_size;
3143
3144 error = cs_validate_csblob((const uint8_t *)new_blob_addr, length, &cd, &entitlements);
3145
3146 if (error) {
3147 printf("CODE SIGNING: Failed to validate new Code Signing Blob: %d\n",
3148 error);
3149
3150 ubc_cs_blob_deallocate(new_blob_addr, new_blob_size);
3151 return error;
3152 }
3153 *new_entitlements_p = entitlements;
3154 } else {
3155 // Caller will fill out and validate code directory.
3156 memset(new_cd, 0, new_cdsize);
3157 *new_entitlements_p = NULL;
3158 }
3159
3160 *new_blob_addr_p = new_blob_addr;
3161 *new_blob_size_p = new_blob_size;
3162 *new_cd_p = new_cd;
3163
3164 return 0;
3165 }
3166
3167 static int
3168 ubc_cs_convert_to_multilevel_hash(struct cs_blob *blob)
3169 {
3170 const CS_CodeDirectory *old_cd, *cd;
3171 CS_CodeDirectory *new_cd;
3172 const CS_GenericBlob *entitlements;
3173 vm_offset_t new_blob_addr;
3174 vm_size_t new_blob_size;
3175 vm_size_t new_cdsize;
3176 int error;
3177
3178 uint32_t hashes_per_new_hash_shift = (uint32_t)(PAGE_SHIFT - blob->csb_hash_pageshift);
3179
3180 if (cs_debug > 1) {
3181 printf("CODE SIGNING: Attempting to convert Code Directory for %lu -> %lu page shift\n",
3182 (unsigned long)blob->csb_hash_pageshift, (unsigned long)PAGE_SHIFT);
3183 }
3184
3185 old_cd = blob->csb_cd;
3186
3187 /* Up to the hashes, we can copy all data */
3188 new_cdsize = ntohl(old_cd->hashOffset);
3189 new_cdsize += (ntohl(old_cd->nCodeSlots) >> hashes_per_new_hash_shift) * old_cd->hashSize;
3190
3191 error = ubc_cs_reconstitute_code_signature(blob, new_cdsize,
3192 &new_blob_addr, &new_blob_size, &new_cd,
3193 &entitlements);
3194 if (error != 0) {
3195 printf("CODE SIGNING: Failed to reconsitute code signature: %d\n", error);
3196 return error;
3197 }
3198
3199 memcpy(new_cd, old_cd, ntohl(old_cd->hashOffset));
3200
3201 /* Update fields in the Code Directory structure */
3202 new_cd->length = htonl((uint32_t)new_cdsize);
3203
3204 uint32_t nCodeSlots = ntohl(new_cd->nCodeSlots);
3205 nCodeSlots >>= hashes_per_new_hash_shift;
3206 new_cd->nCodeSlots = htonl(nCodeSlots);
3207
3208 new_cd->pageSize = PAGE_SHIFT; /* Not byte-swapped */
3209
3210 if ((ntohl(new_cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(new_cd->scatterOffset))) {
3211 SC_Scatter *scatter = (SC_Scatter*)
3212 ((char *)new_cd + ntohl(new_cd->scatterOffset));
3213 /* iterate all scatter structs to scale their counts */
3214 do {
3215 uint32_t scount = ntohl(scatter->count);
3216 uint32_t sbase = ntohl(scatter->base);
3217
3218 /* last scatter? */
3219 if (scount == 0) {
3220 break;
3221 }
3222
3223 scount >>= hashes_per_new_hash_shift;
3224 scatter->count = htonl(scount);
3225
3226 sbase >>= hashes_per_new_hash_shift;
3227 scatter->base = htonl(sbase);
3228
3229 scatter++;
3230 } while (1);
3231 }
3232
3233 /* For each group of hashes, hash them together */
3234 const unsigned char *src_base = (const unsigned char *)old_cd + ntohl(old_cd->hashOffset);
3235 unsigned char *dst_base = (unsigned char *)new_cd + ntohl(new_cd->hashOffset);
3236
3237 uint32_t hash_index;
3238 for (hash_index = 0; hash_index < nCodeSlots; hash_index++) {
3239 union cs_hash_union mdctx;
3240
3241 uint32_t source_hash_len = old_cd->hashSize << hashes_per_new_hash_shift;
3242 const unsigned char *src = src_base + hash_index * source_hash_len;
3243 unsigned char *dst = dst_base + hash_index * new_cd->hashSize;
3244
3245 blob->csb_hashtype->cs_init(&mdctx);
3246 blob->csb_hashtype->cs_update(&mdctx, src, source_hash_len);
3247 blob->csb_hashtype->cs_final(dst, &mdctx);
3248 }
3249
3250 error = cs_validate_csblob((const uint8_t *)new_blob_addr, new_blob_size, &cd, &entitlements);
3251 if (error != 0) {
3252 printf("CODE SIGNING: Failed to validate new Code Signing Blob: %d\n",
3253 error);
3254
3255 ubc_cs_blob_deallocate(new_blob_addr, new_blob_size);
3256 return error;
3257 }
3258
3259 /* New Code Directory is ready for use, swap it out in the blob structure */
3260 ubc_cs_blob_deallocate(blob->csb_mem_kaddr, blob->csb_mem_size);
3261
3262 blob->csb_mem_size = new_blob_size;
3263 blob->csb_mem_kaddr = new_blob_addr;
3264 blob->csb_cd = cd;
3265 blob->csb_entitlements_blob = entitlements;
3266
3267 /* The blob has some cached attributes of the Code Directory, so update those */
3268
3269 blob->csb_hash_firstlevel_pagesize = blob->csb_hash_pagesize; /* Save the original page size */
3270
3271 blob->csb_hash_pagesize = PAGE_SIZE;
3272 blob->csb_hash_pagemask = PAGE_MASK;
3273 blob->csb_hash_pageshift = PAGE_SHIFT;
3274 blob->csb_end_offset = ntohl(cd->codeLimit);
3275 if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
3276 const SC_Scatter *scatter = (const SC_Scatter*)
3277 ((const char*)cd + ntohl(cd->scatterOffset));
3278 blob->csb_start_offset = ((off_t)ntohl(scatter->base)) * PAGE_SIZE;
3279 } else {
3280 blob->csb_start_offset = 0;
3281 }
3282
3283 return 0;
3284 }
3285
3286 /*
3287 * Validate the code signature blob, create a struct cs_blob wrapper
3288 * and return it together with a pointer to the chosen code directory
3289 * and entitlements blob.
3290 *
3291 * Note that this takes ownership of the memory as addr, mainly because
3292 * this function can actually replace the passed in blob with another
3293 * one, e.g. when performing multilevel hashing optimization.
3294 */
3295 int
3296 cs_blob_create_validated(
3297 vm_address_t * const addr,
3298 vm_size_t size,
3299 struct cs_blob ** const ret_blob,
3300 CS_CodeDirectory const ** const ret_cd)
3301 {
3302 struct cs_blob *blob;
3303 int error = EINVAL;
3304 const CS_CodeDirectory *cd;
3305 const CS_GenericBlob *entitlements;
3306 union cs_hash_union mdctx;
3307 size_t length;
3308
3309 if (ret_blob) {
3310 *ret_blob = NULL;
3311 }
3312
3313 blob = (struct cs_blob *) kalloc(sizeof(struct cs_blob));
3314 if (blob == NULL) {
3315 return ENOMEM;
3316 }
3317
3318 /* fill in the new blob */
3319 blob->csb_mem_size = size;
3320 blob->csb_mem_offset = 0;
3321 blob->csb_mem_kaddr = *addr;
3322 blob->csb_flags = 0;
3323 blob->csb_signer_type = CS_SIGNER_TYPE_UNKNOWN;
3324 blob->csb_platform_binary = 0;
3325 blob->csb_platform_path = 0;
3326 blob->csb_teamid = NULL;
3327 blob->csb_entitlements_blob = NULL;
3328 blob->csb_entitlements = NULL;
3329 blob->csb_reconstituted = false;
3330
3331 /* Transfer ownership. Even on error, this function will deallocate */
3332 *addr = 0;
3333
3334 /*
3335 * Validate the blob's contents
3336 */
3337 length = (size_t) size;
3338 error = cs_validate_csblob((const uint8_t *)blob->csb_mem_kaddr,
3339 length, &cd, &entitlements);
3340 if (error) {
3341 if (cs_debug) {
3342 printf("CODESIGNING: csblob invalid: %d\n", error);
3343 }
3344 /*
3345 * The vnode checker can't make the rest of this function
3346 * succeed if csblob validation failed, so bail */
3347 goto out;
3348 } else {
3349 const unsigned char *md_base;
3350 uint8_t hash[CS_HASH_MAX_SIZE];
3351 int md_size;
3352
3353 blob->csb_cd = cd;
3354 blob->csb_entitlements_blob = entitlements; /* may be NULL, not yet validated */
3355 blob->csb_hashtype = cs_find_md(cd->hashType);
3356 if (blob->csb_hashtype == NULL || blob->csb_hashtype->cs_digest_size > sizeof(hash)) {
3357 panic("validated CodeDirectory but unsupported type");
3358 }
3359
3360 blob->csb_hash_pageshift = cd->pageSize;
3361 blob->csb_hash_pagesize = (1U << cd->pageSize);
3362 blob->csb_hash_pagemask = blob->csb_hash_pagesize - 1;
3363 blob->csb_hash_firstlevel_pagesize = 0;
3364 blob->csb_flags = (ntohl(cd->flags) & CS_ALLOWED_MACHO) | CS_VALID;
3365 blob->csb_end_offset = (((vm_offset_t)ntohl(cd->codeLimit) + blob->csb_hash_pagemask) & ~((vm_offset_t)blob->csb_hash_pagemask));
3366 if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
3367 const SC_Scatter *scatter = (const SC_Scatter*)
3368 ((const char*)cd + ntohl(cd->scatterOffset));
3369 blob->csb_start_offset = ((off_t)ntohl(scatter->base)) * blob->csb_hash_pagesize;
3370 } else {
3371 blob->csb_start_offset = 0;
3372 }
3373 /* compute the blob's cdhash */
3374 md_base = (const unsigned char *) cd;
3375 md_size = ntohl(cd->length);
3376
3377 blob->csb_hashtype->cs_init(&mdctx);
3378 blob->csb_hashtype->cs_update(&mdctx, md_base, md_size);
3379 blob->csb_hashtype->cs_final(hash, &mdctx);
3380
3381 memcpy(blob->csb_cdhash, hash, CS_CDHASH_LEN);
3382 }
3383
3384 error = 0;
3385
3386 out:
3387 if (error != 0) {
3388 cs_blob_free(blob);
3389 blob = NULL;
3390 cd = NULL;
3391 }
3392
3393 if (ret_blob != NULL) {
3394 *ret_blob = blob;
3395 }
3396 if (ret_cd != NULL) {
3397 *ret_cd = cd;
3398 }
3399
3400 return error;
3401 }
3402
3403 /*
3404 * Free a cs_blob previously created by cs_blob_create_validated.
3405 */
3406 void
3407 cs_blob_free(
3408 struct cs_blob * const blob)
3409 {
3410 if (blob != NULL) {
3411 if (blob->csb_mem_kaddr) {
3412 ubc_cs_blob_deallocate(blob->csb_mem_kaddr, blob->csb_mem_size);
3413 blob->csb_mem_kaddr = 0;
3414 }
3415 if (blob->csb_entitlements != NULL) {
3416 osobject_release(blob->csb_entitlements);
3417 blob->csb_entitlements = NULL;
3418 }
3419 (kfree)(blob, sizeof(*blob));
3420 }
3421 }
3422
3423 int
3424 ubc_cs_blob_add(
3425 struct vnode *vp,
3426 cpu_type_t cputype,
3427 off_t base_offset,
3428 vm_address_t *addr,
3429 vm_size_t size,
3430 struct image_params *imgp,
3431 __unused int flags,
3432 struct cs_blob **ret_blob)
3433 {
3434 kern_return_t kr;
3435 struct ubc_info *uip;
3436 struct cs_blob *blob, *oblob;
3437 int error;
3438 CS_CodeDirectory const *cd;
3439 off_t blob_start_offset, blob_end_offset;
3440 boolean_t record_mtime;
3441
3442 record_mtime = FALSE;
3443 if (ret_blob) {
3444 *ret_blob = NULL;
3445 }
3446
3447 /* Create the struct cs_blob wrapper that will be attached to the vnode.
3448 * Validates the passed in blob in the process. */
3449 error = cs_blob_create_validated(addr, size, &blob, &cd);
3450
3451 if (error != 0) {
3452 printf("malform code signature blob: %d\n", error);
3453 return error;
3454 }
3455
3456 blob->csb_cpu_type = cputype;
3457 blob->csb_base_offset = base_offset;
3458
3459 /*
3460 * Let policy module check whether the blob's signature is accepted.
3461 */
3462 #if CONFIG_MACF
3463 unsigned int cs_flags = blob->csb_flags;
3464 unsigned int signer_type = blob->csb_signer_type;
3465 error = mac_vnode_check_signature(vp, blob, imgp, &cs_flags, &signer_type, flags);
3466 blob->csb_flags = cs_flags;
3467 blob->csb_signer_type = signer_type;
3468
3469 if (error) {
3470 if (cs_debug) {
3471 printf("check_signature[pid: %d], error = %d\n", current_proc()->p_pid, error);
3472 }
3473 goto out;
3474 }
3475 if ((flags & MAC_VNODE_CHECK_DYLD_SIM) && !(blob->csb_flags & CS_PLATFORM_BINARY)) {
3476 if (cs_debug) {
3477 printf("check_signature[pid: %d], is not apple signed\n", current_proc()->p_pid);
3478 }
3479 error = EPERM;
3480 goto out;
3481 }
3482 #endif
3483
3484 #if CONFIG_ENFORCE_SIGNED_CODE
3485 /*
3486 * Reconstitute code signature
3487 */
3488 {
3489 vm_address_t new_mem_kaddr = 0;
3490 vm_size_t new_mem_size = 0;
3491
3492 CS_CodeDirectory *new_cd = NULL;
3493 CS_GenericBlob const *new_entitlements = NULL;
3494
3495 error = ubc_cs_reconstitute_code_signature(blob, 0,
3496 &new_mem_kaddr, &new_mem_size,
3497 &new_cd, &new_entitlements);
3498
3499 if (error != 0) {
3500 printf("failed code signature reconstitution: %d\n", error);
3501 goto out;
3502 }
3503
3504 ubc_cs_blob_deallocate(blob->csb_mem_kaddr, blob->csb_mem_size);
3505
3506 blob->csb_mem_kaddr = new_mem_kaddr;
3507 blob->csb_mem_size = new_mem_size;
3508 blob->csb_cd = new_cd;
3509 blob->csb_entitlements_blob = new_entitlements;
3510 blob->csb_reconstituted = true;
3511 }
3512
3513 #endif
3514
3515
3516 if (blob->csb_flags & CS_PLATFORM_BINARY) {
3517 if (cs_debug > 1) {
3518 printf("check_signature[pid: %d]: platform binary\n", current_proc()->p_pid);
3519 }
3520 blob->csb_platform_binary = 1;
3521 blob->csb_platform_path = !!(blob->csb_flags & CS_PLATFORM_PATH);
3522 } else {
3523 blob->csb_platform_binary = 0;
3524 blob->csb_platform_path = 0;
3525 blob->csb_teamid = csblob_parse_teamid(blob);
3526 if (cs_debug > 1) {
3527 if (blob->csb_teamid) {
3528 printf("check_signature[pid: %d]: team-id is %s\n", current_proc()->p_pid, blob->csb_teamid);
3529 } else {
3530 printf("check_signature[pid: %d]: no team-id\n", current_proc()->p_pid);
3531 }
3532 }
3533 }
3534
3535 /*
3536 * Validate the blob's coverage
3537 */
3538 blob_start_offset = blob->csb_base_offset + blob->csb_start_offset;
3539 blob_end_offset = blob->csb_base_offset + blob->csb_end_offset;
3540
3541 if (blob_start_offset >= blob_end_offset ||
3542 blob_start_offset < 0 ||
3543 blob_end_offset <= 0) {
3544 /* reject empty or backwards blob */
3545 error = EINVAL;
3546 goto out;
3547 }
3548
3549 if (ubc_cs_supports_multilevel_hash(blob)) {
3550 error = ubc_cs_convert_to_multilevel_hash(blob);
3551 if (error != 0) {
3552 printf("failed multilevel hash conversion: %d\n", error);
3553 goto out;
3554 }
3555 blob->csb_reconstituted = true;
3556 }
3557
3558 vnode_lock(vp);
3559 if (!UBCINFOEXISTS(vp)) {
3560 vnode_unlock(vp);
3561 error = ENOENT;
3562 goto out;
3563 }
3564 uip = vp->v_ubcinfo;
3565
3566 /* check if this new blob overlaps with an existing blob */
3567 for (oblob = uip->cs_blobs;
3568 oblob != NULL;
3569 oblob = oblob->csb_next) {
3570 off_t oblob_start_offset, oblob_end_offset;
3571
3572 if (blob->csb_signer_type != oblob->csb_signer_type) { // signer type needs to be the same for slices
3573 vnode_unlock(vp);
3574 error = EALREADY;
3575 goto out;
3576 } else if (blob->csb_platform_binary) { //platform binary needs to be the same for app slices
3577 if (!oblob->csb_platform_binary) {
3578 vnode_unlock(vp);
3579 error = EALREADY;
3580 goto out;
3581 }
3582 } else if (blob->csb_teamid) { //teamid binary needs to be the same for app slices
3583 if (oblob->csb_platform_binary ||
3584 oblob->csb_teamid == NULL ||
3585 strcmp(oblob->csb_teamid, blob->csb_teamid) != 0) {
3586 vnode_unlock(vp);
3587 error = EALREADY;
3588 goto out;
3589 }
3590 } else { // non teamid binary needs to be the same for app slices
3591 if (oblob->csb_platform_binary ||
3592 oblob->csb_teamid != NULL) {
3593 vnode_unlock(vp);
3594 error = EALREADY;
3595 goto out;
3596 }
3597 }
3598
3599 oblob_start_offset = (oblob->csb_base_offset +
3600 oblob->csb_start_offset);
3601 oblob_end_offset = (oblob->csb_base_offset +
3602 oblob->csb_end_offset);
3603 if (blob_start_offset >= oblob_end_offset ||
3604 blob_end_offset <= oblob_start_offset) {
3605 /* no conflict with this existing blob */
3606 } else {
3607 /* conflict ! */
3608 if (blob_start_offset == oblob_start_offset &&
3609 blob_end_offset == oblob_end_offset &&
3610 blob->csb_mem_size == oblob->csb_mem_size &&
3611 blob->csb_flags == oblob->csb_flags &&
3612 (blob->csb_cpu_type == CPU_TYPE_ANY ||
3613 oblob->csb_cpu_type == CPU_TYPE_ANY ||
3614 blob->csb_cpu_type == oblob->csb_cpu_type) &&
3615 !bcmp(blob->csb_cdhash,
3616 oblob->csb_cdhash,
3617 CS_CDHASH_LEN)) {
3618 /*
3619 * We already have this blob:
3620 * we'll return success but
3621 * throw away the new blob.
3622 */
3623 if (oblob->csb_cpu_type == CPU_TYPE_ANY) {
3624 /*
3625 * The old blob matches this one
3626 * but doesn't have any CPU type.
3627 * Update it with whatever the caller
3628 * provided this time.
3629 */
3630 oblob->csb_cpu_type = cputype;
3631 }
3632
3633 /* The signature is still accepted, so update the
3634 * generation count. */
3635 uip->cs_add_gen = cs_blob_generation_count;
3636
3637 vnode_unlock(vp);
3638 if (ret_blob) {
3639 *ret_blob = oblob;
3640 }
3641 error = EAGAIN;
3642 goto out;
3643 } else {
3644 /* different blob: reject the new one */
3645 vnode_unlock(vp);
3646 error = EALREADY;
3647 goto out;
3648 }
3649 }
3650 }
3651
3652
3653 /* mark this vnode's VM object as having "signed pages" */
3654 kr = memory_object_signed(uip->ui_control, TRUE);
3655 if (kr != KERN_SUCCESS) {
3656 vnode_unlock(vp);
3657 error = ENOENT;
3658 goto out;
3659 }
3660
3661 if (uip->cs_blobs == NULL) {
3662 /* loading 1st blob: record the file's current "modify time" */
3663 record_mtime = TRUE;
3664 }
3665
3666 /* set the generation count for cs_blobs */
3667 uip->cs_add_gen = cs_blob_generation_count;
3668
3669 /*
3670 * Add this blob to the list of blobs for this vnode.
3671 * We always add at the front of the list and we never remove a
3672 * blob from the list, so ubc_cs_get_blobs() can return whatever
3673 * the top of the list was and that list will remain valid
3674 * while we validate a page, even after we release the vnode's lock.
3675 */
3676 blob->csb_next = uip->cs_blobs;
3677 uip->cs_blobs = blob;
3678
3679 OSAddAtomic(+1, &cs_blob_count);
3680 if (cs_blob_count > cs_blob_count_peak) {
3681 cs_blob_count_peak = cs_blob_count; /* XXX atomic ? */
3682 }
3683 OSAddAtomic((SInt32) + blob->csb_mem_size, &cs_blob_size);
3684 if ((SInt32) cs_blob_size > cs_blob_size_peak) {
3685 cs_blob_size_peak = (SInt32) cs_blob_size; /* XXX atomic ? */
3686 }
3687 if ((UInt32) blob->csb_mem_size > cs_blob_size_max) {
3688 cs_blob_size_max = (UInt32) blob->csb_mem_size;
3689 }
3690
3691 if (cs_debug > 1) {
3692 proc_t p;
3693 const char *name = vnode_getname_printable(vp);
3694 p = current_proc();
3695 printf("CODE SIGNING: proc %d(%s) "
3696 "loaded %s signatures for file (%s) "
3697 "range 0x%llx:0x%llx flags 0x%x\n",
3698 p->p_pid, p->p_comm,
3699 blob->csb_cpu_type == -1 ? "detached" : "embedded",
3700 name,
3701 blob->csb_base_offset + blob->csb_start_offset,
3702 blob->csb_base_offset + blob->csb_end_offset,
3703 blob->csb_flags);
3704 vnode_putname_printable(name);
3705 }
3706
3707 vnode_unlock(vp);
3708
3709 if (record_mtime) {
3710 vnode_mtime(vp, &uip->cs_mtime, vfs_context_current());
3711 }
3712
3713 if (ret_blob) {
3714 *ret_blob = blob;
3715 }
3716
3717 error = 0; /* success ! */
3718
3719 out:
3720 if (error) {
3721 if (cs_debug) {
3722 printf("check_signature[pid: %d]: error = %d\n", current_proc()->p_pid, error);
3723 }
3724
3725 cs_blob_free(blob);
3726 }
3727
3728 if (error == EAGAIN) {
3729 /*
3730 * See above: error is EAGAIN if we were asked
3731 * to add an existing blob again. We cleaned the new
3732 * blob and we want to return success.
3733 */
3734 error = 0;
3735 }
3736
3737 return error;
3738 }
3739
3740 void
3741 csvnode_print_debug(struct vnode *vp)
3742 {
3743 const char *name = NULL;
3744 struct ubc_info *uip;
3745 struct cs_blob *blob;
3746
3747 name = vnode_getname_printable(vp);
3748 if (name) {
3749 printf("csvnode: name: %s\n", name);
3750 vnode_putname_printable(name);
3751 }
3752
3753 vnode_lock_spin(vp);
3754
3755 if (!UBCINFOEXISTS(vp)) {
3756 blob = NULL;
3757 goto out;
3758 }
3759
3760 uip = vp->v_ubcinfo;
3761 for (blob = uip->cs_blobs; blob != NULL; blob = blob->csb_next) {
3762 printf("csvnode: range: %lu -> %lu flags: 0x%08x platform: %s path: %s team: %s\n",
3763 (unsigned long)blob->csb_start_offset,
3764 (unsigned long)blob->csb_end_offset,
3765 blob->csb_flags,
3766 blob->csb_platform_binary ? "yes" : "no",
3767 blob->csb_platform_path ? "yes" : "no",
3768 blob->csb_teamid ? blob->csb_teamid : "<NO-TEAM>");
3769 }
3770
3771 out:
3772 vnode_unlock(vp);
3773 }
3774
3775 struct cs_blob *
3776 ubc_cs_blob_get(
3777 struct vnode *vp,
3778 cpu_type_t cputype,
3779 off_t offset)
3780 {
3781 struct ubc_info *uip;
3782 struct cs_blob *blob;
3783 off_t offset_in_blob;
3784
3785 vnode_lock_spin(vp);
3786
3787 if (!UBCINFOEXISTS(vp)) {
3788 blob = NULL;
3789 goto out;
3790 }
3791
3792 uip = vp->v_ubcinfo;
3793 for (blob = uip->cs_blobs;
3794 blob != NULL;
3795 blob = blob->csb_next) {
3796 if (cputype != -1 && blob->csb_cpu_type == cputype) {
3797 break;
3798 }
3799 if (offset != -1) {
3800 offset_in_blob = offset - blob->csb_base_offset;
3801 if (offset_in_blob >= blob->csb_start_offset &&
3802 offset_in_blob < blob->csb_end_offset) {
3803 /* our offset is covered by this blob */
3804 break;
3805 }
3806 }
3807 }
3808
3809 out:
3810 vnode_unlock(vp);
3811
3812 return blob;
3813 }
3814
3815 static void
3816 ubc_cs_free(
3817 struct ubc_info *uip)
3818 {
3819 struct cs_blob *blob, *next_blob;
3820
3821 for (blob = uip->cs_blobs;
3822 blob != NULL;
3823 blob = next_blob) {
3824 next_blob = blob->csb_next;
3825 OSAddAtomic(-1, &cs_blob_count);
3826 OSAddAtomic((SInt32) - blob->csb_mem_size, &cs_blob_size);
3827 cs_blob_free(blob);
3828 }
3829 #if CHECK_CS_VALIDATION_BITMAP
3830 ubc_cs_validation_bitmap_deallocate( uip->ui_vnode );
3831 #endif
3832 uip->cs_blobs = NULL;
3833 }
3834
3835 /* check cs blob generation on vnode
3836 * returns:
3837 * 0 : Success, the cs_blob attached is current
3838 * ENEEDAUTH : Generation count mismatch. Needs authentication again.
3839 */
3840 int
3841 ubc_cs_generation_check(
3842 struct vnode *vp)
3843 {
3844 int retval = ENEEDAUTH;
3845
3846 vnode_lock_spin(vp);
3847
3848 if (UBCINFOEXISTS(vp) && vp->v_ubcinfo->cs_add_gen == cs_blob_generation_count) {
3849 retval = 0;
3850 }
3851
3852 vnode_unlock(vp);
3853 return retval;
3854 }
3855
3856 int
3857 ubc_cs_blob_revalidate(
3858 struct vnode *vp,
3859 struct cs_blob *blob,
3860 struct image_params *imgp,
3861 int flags
3862 )
3863 {
3864 int error = 0;
3865 const CS_CodeDirectory *cd = NULL;
3866 const CS_GenericBlob *entitlements = NULL;
3867 size_t size;
3868 assert(vp != NULL);
3869 assert(blob != NULL);
3870
3871 size = blob->csb_mem_size;
3872 error = cs_validate_csblob((const uint8_t *)blob->csb_mem_kaddr,
3873 size, &cd, &entitlements);
3874 if (error) {
3875 if (cs_debug) {
3876 printf("CODESIGNING: csblob invalid: %d\n", error);
3877 }
3878 goto out;
3879 }
3880
3881 unsigned int cs_flags = (ntohl(cd->flags) & CS_ALLOWED_MACHO) | CS_VALID;
3882 unsigned int signer_type = CS_SIGNER_TYPE_UNKNOWN;
3883
3884 if (blob->csb_reconstituted) {
3885 /*
3886 * Code signatures that have been modified after validation
3887 * cannot be revalidated inline from their in-memory blob.
3888 *
3889 * That's okay, though, because the only path left that relies
3890 * on revalidation of existing in-memory blobs is the legacy
3891 * detached signature database path, which only exists on macOS,
3892 * which does not do reconstitution of any kind.
3893 */
3894 if (cs_debug) {
3895 printf("CODESIGNING: revalidate: not inline revalidating reconstituted signature.\n");
3896 }
3897
3898 /*
3899 * EAGAIN tells the caller that they may reread the code
3900 * signature and try attaching it again, which is the same
3901 * thing they would do if there was no cs_blob yet in the
3902 * first place.
3903 *
3904 * Conveniently, after ubc_cs_blob_add did a successful
3905 * validation, it will detect that a matching cs_blob (cdhash,
3906 * offset, arch etc.) already exists, and return success
3907 * without re-adding a cs_blob to the vnode.
3908 */
3909 return EAGAIN;
3910 }
3911
3912 /* callout to mac_vnode_check_signature */
3913 #if CONFIG_MACF
3914 error = mac_vnode_check_signature(vp, blob, imgp, &cs_flags, &signer_type, flags);
3915 if (cs_debug && error) {
3916 printf("revalidate: check_signature[pid: %d], error = %d\n", current_proc()->p_pid, error);
3917 }
3918 #else
3919 (void)flags;
3920 (void)signer_type;
3921 #endif
3922
3923 /* update generation number if success */
3924 vnode_lock_spin(vp);
3925 blob->csb_flags = cs_flags;
3926 blob->csb_signer_type = signer_type;
3927 if (UBCINFOEXISTS(vp)) {
3928 if (error == 0) {
3929 vp->v_ubcinfo->cs_add_gen = cs_blob_generation_count;
3930 } else {
3931 vp->v_ubcinfo->cs_add_gen = 0;
3932 }
3933 }
3934
3935 vnode_unlock(vp);
3936
3937 out:
3938 return error;
3939 }
3940
3941 void
3942 cs_blob_reset_cache()
3943 {
3944 /* incrementing odd no by 2 makes sure '0' is never reached. */
3945 OSAddAtomic(+2, &cs_blob_generation_count);
3946 printf("Reseting cs_blob cache from all vnodes. \n");
3947 }
3948
3949 struct cs_blob *
3950 ubc_get_cs_blobs(
3951 struct vnode *vp)
3952 {
3953 struct ubc_info *uip;
3954 struct cs_blob *blobs;
3955
3956 /*
3957 * No need to take the vnode lock here. The caller must be holding
3958 * a reference on the vnode (via a VM mapping or open file descriptor),
3959 * so the vnode will not go away. The ubc_info stays until the vnode
3960 * goes away. And we only modify "blobs" by adding to the head of the
3961 * list.
3962 * The ubc_info could go away entirely if the vnode gets reclaimed as
3963 * part of a forced unmount. In the case of a code-signature validation
3964 * during a page fault, the "paging_in_progress" reference on the VM
3965 * object guarantess that the vnode pager (and the ubc_info) won't go
3966 * away during the fault.
3967 * Other callers need to protect against vnode reclaim by holding the
3968 * vnode lock, for example.
3969 */
3970
3971 if (!UBCINFOEXISTS(vp)) {
3972 blobs = NULL;
3973 goto out;
3974 }
3975
3976 uip = vp->v_ubcinfo;
3977 blobs = uip->cs_blobs;
3978
3979 out:
3980 return blobs;
3981 }
3982
3983 void
3984 ubc_get_cs_mtime(
3985 struct vnode *vp,
3986 struct timespec *cs_mtime)
3987 {
3988 struct ubc_info *uip;
3989
3990 if (!UBCINFOEXISTS(vp)) {
3991 cs_mtime->tv_sec = 0;
3992 cs_mtime->tv_nsec = 0;
3993 return;
3994 }
3995
3996 uip = vp->v_ubcinfo;
3997 cs_mtime->tv_sec = uip->cs_mtime.tv_sec;
3998 cs_mtime->tv_nsec = uip->cs_mtime.tv_nsec;
3999 }
4000
4001 unsigned long cs_validate_page_no_hash = 0;
4002 unsigned long cs_validate_page_bad_hash = 0;
4003 static boolean_t
4004 cs_validate_hash(
4005 struct cs_blob *blobs,
4006 memory_object_t pager,
4007 memory_object_offset_t page_offset,
4008 const void *data,
4009 vm_size_t *bytes_processed,
4010 unsigned *tainted)
4011 {
4012 union cs_hash_union mdctx;
4013 struct cs_hash const *hashtype = NULL;
4014 unsigned char actual_hash[CS_HASH_MAX_SIZE];
4015 unsigned char expected_hash[CS_HASH_MAX_SIZE];
4016 boolean_t found_hash;
4017 struct cs_blob *blob;
4018 const CS_CodeDirectory *cd;
4019 const unsigned char *hash;
4020 boolean_t validated;
4021 off_t offset; /* page offset in the file */
4022 size_t size;
4023 off_t codeLimit = 0;
4024 const char *lower_bound, *upper_bound;
4025 vm_offset_t kaddr, blob_addr;
4026
4027 /* retrieve the expected hash */
4028 found_hash = FALSE;
4029
4030 for (blob = blobs;
4031 blob != NULL;
4032 blob = blob->csb_next) {
4033 offset = page_offset - blob->csb_base_offset;
4034 if (offset < blob->csb_start_offset ||
4035 offset >= blob->csb_end_offset) {
4036 /* our page is not covered by this blob */
4037 continue;
4038 }
4039
4040 /* blob data has been released */
4041 kaddr = blob->csb_mem_kaddr;
4042 if (kaddr == 0) {
4043 continue;
4044 }
4045
4046 blob_addr = kaddr + blob->csb_mem_offset;
4047 lower_bound = CAST_DOWN(char *, blob_addr);
4048 upper_bound = lower_bound + blob->csb_mem_size;
4049
4050 cd = blob->csb_cd;
4051 if (cd != NULL) {
4052 /* all CD's that have been injected is already validated */
4053
4054 hashtype = blob->csb_hashtype;
4055 if (hashtype == NULL) {
4056 panic("unknown hash type ?");
4057 }
4058 if (hashtype->cs_digest_size > sizeof(actual_hash)) {
4059 panic("hash size too large");
4060 }
4061 if (offset & blob->csb_hash_pagemask) {
4062 panic("offset not aligned to cshash boundary");
4063 }
4064
4065 codeLimit = ntohl(cd->codeLimit);
4066
4067 hash = hashes(cd, (uint32_t)(offset >> blob->csb_hash_pageshift),
4068 hashtype->cs_size,
4069 lower_bound, upper_bound);
4070 if (hash != NULL) {
4071 bcopy(hash, expected_hash, hashtype->cs_size);
4072 found_hash = TRUE;
4073 }
4074
4075 break;
4076 }
4077 }
4078
4079 if (found_hash == FALSE) {
4080 /*
4081 * We can't verify this page because there is no signature
4082 * for it (yet). It's possible that this part of the object
4083 * is not signed, or that signatures for that part have not
4084 * been loaded yet.
4085 * Report that the page has not been validated and let the
4086 * caller decide if it wants to accept it or not.
4087 */
4088 cs_validate_page_no_hash++;
4089 if (cs_debug > 1) {
4090 printf("CODE SIGNING: cs_validate_page: "
4091 "mobj %p off 0x%llx: no hash to validate !?\n",
4092 pager, page_offset);
4093 }
4094 validated = FALSE;
4095 *tainted = 0;
4096 } else {
4097 *tainted = 0;
4098
4099 size = blob->csb_hash_pagesize;
4100 *bytes_processed = size;
4101
4102 const uint32_t *asha1, *esha1;
4103 if ((off_t)(offset + size) > codeLimit) {
4104 /* partial page at end of segment */
4105 assert(offset < codeLimit);
4106 size = (size_t) (codeLimit & blob->csb_hash_pagemask);
4107 *tainted |= CS_VALIDATE_NX;
4108 }
4109
4110 hashtype->cs_init(&mdctx);
4111
4112 if (blob->csb_hash_firstlevel_pagesize) {
4113 const unsigned char *partial_data = (const unsigned char *)data;
4114 size_t i;
4115 for (i = 0; i < size;) {
4116 union cs_hash_union partialctx;
4117 unsigned char partial_digest[CS_HASH_MAX_SIZE];
4118 size_t partial_size = MIN(size - i, blob->csb_hash_firstlevel_pagesize);
4119
4120 hashtype->cs_init(&partialctx);
4121 hashtype->cs_update(&partialctx, partial_data, partial_size);
4122 hashtype->cs_final(partial_digest, &partialctx);
4123
4124 /* Update cumulative multi-level hash */
4125 hashtype->cs_update(&mdctx, partial_digest, hashtype->cs_size);
4126 partial_data = partial_data + partial_size;
4127 i += partial_size;
4128 }
4129 } else {
4130 hashtype->cs_update(&mdctx, data, size);
4131 }
4132 hashtype->cs_final(actual_hash, &mdctx);
4133
4134 asha1 = (const uint32_t *) actual_hash;
4135 esha1 = (const uint32_t *) expected_hash;
4136
4137 if (bcmp(expected_hash, actual_hash, hashtype->cs_size) != 0) {
4138 if (cs_debug) {
4139 printf("CODE SIGNING: cs_validate_page: "
4140 "mobj %p off 0x%llx size 0x%lx: "
4141 "actual [0x%x 0x%x 0x%x 0x%x 0x%x] != "
4142 "expected [0x%x 0x%x 0x%x 0x%x 0x%x]\n",
4143 pager, page_offset, size,
4144 asha1[0], asha1[1], asha1[2],
4145 asha1[3], asha1[4],
4146 esha1[0], esha1[1], esha1[2],
4147 esha1[3], esha1[4]);
4148 }
4149 cs_validate_page_bad_hash++;
4150 *tainted |= CS_VALIDATE_TAINTED;
4151 } else {
4152 if (cs_debug > 10) {
4153 printf("CODE SIGNING: cs_validate_page: "
4154 "mobj %p off 0x%llx size 0x%lx: "
4155 "SHA1 OK\n",
4156 pager, page_offset, size);
4157 }
4158 }
4159 validated = TRUE;
4160 }
4161
4162 return validated;
4163 }
4164
4165 boolean_t
4166 cs_validate_range(
4167 struct vnode *vp,
4168 memory_object_t pager,
4169 memory_object_offset_t page_offset,
4170 const void *data,
4171 vm_size_t dsize,
4172 unsigned *tainted)
4173 {
4174 vm_size_t offset_in_range;
4175 boolean_t all_subranges_validated = TRUE; /* turn false if any subrange fails */
4176
4177 struct cs_blob *blobs = ubc_get_cs_blobs(vp);
4178
4179 *tainted = 0;
4180
4181 for (offset_in_range = 0;
4182 offset_in_range < dsize;
4183 /* offset_in_range updated based on bytes processed */) {
4184 unsigned subrange_tainted = 0;
4185 boolean_t subrange_validated;
4186 vm_size_t bytes_processed = 0;
4187
4188 subrange_validated = cs_validate_hash(blobs,
4189 pager,
4190 page_offset + offset_in_range,
4191 (const void *)((const char *)data + offset_in_range),
4192 &bytes_processed,
4193 &subrange_tainted);
4194
4195 *tainted |= subrange_tainted;
4196
4197 if (bytes_processed == 0) {
4198 /* Cannote make forward progress, so return an error */
4199 all_subranges_validated = FALSE;
4200 break;
4201 } else if (subrange_validated == FALSE) {
4202 all_subranges_validated = FALSE;
4203 /* Keep going to detect other types of failures in subranges */
4204 }
4205
4206 offset_in_range += bytes_processed;
4207 }
4208
4209 return all_subranges_validated;
4210 }
4211
4212 int
4213 ubc_cs_getcdhash(
4214 vnode_t vp,
4215 off_t offset,
4216 unsigned char *cdhash)
4217 {
4218 struct cs_blob *blobs, *blob;
4219 off_t rel_offset;
4220 int ret;
4221
4222 vnode_lock(vp);
4223
4224 blobs = ubc_get_cs_blobs(vp);
4225 for (blob = blobs;
4226 blob != NULL;
4227 blob = blob->csb_next) {
4228 /* compute offset relative to this blob */
4229 rel_offset = offset - blob->csb_base_offset;
4230 if (rel_offset >= blob->csb_start_offset &&
4231 rel_offset < blob->csb_end_offset) {
4232 /* this blob does cover our "offset" ! */
4233 break;
4234 }
4235 }
4236
4237 if (blob == NULL) {
4238 /* we didn't find a blob covering "offset" */
4239 ret = EBADEXEC; /* XXX any better error ? */
4240 } else {
4241 /* get the SHA1 hash of that blob */
4242 bcopy(blob->csb_cdhash, cdhash, sizeof(blob->csb_cdhash));
4243 ret = 0;
4244 }
4245
4246 vnode_unlock(vp);
4247
4248 return ret;
4249 }
4250
4251 boolean_t
4252 ubc_cs_is_range_codesigned(
4253 vnode_t vp,
4254 mach_vm_offset_t start,
4255 mach_vm_size_t size)
4256 {
4257 struct cs_blob *csblob;
4258 mach_vm_offset_t blob_start;
4259 mach_vm_offset_t blob_end;
4260
4261 if (vp == NULL) {
4262 /* no file: no code signature */
4263 return FALSE;
4264 }
4265 if (size == 0) {
4266 /* no range: no code signature */
4267 return FALSE;
4268 }
4269 if (start + size < start) {
4270 /* overflow */
4271 return FALSE;
4272 }
4273
4274 csblob = ubc_cs_blob_get(vp, -1, start);
4275 if (csblob == NULL) {
4276 return FALSE;
4277 }
4278
4279 /*
4280 * We currently check if the range is covered by a single blob,
4281 * which should always be the case for the dyld shared cache.
4282 * If we ever want to make this routine handle other cases, we
4283 * would have to iterate if the blob does not cover the full range.
4284 */
4285 blob_start = (mach_vm_offset_t) (csblob->csb_base_offset +
4286 csblob->csb_start_offset);
4287 blob_end = (mach_vm_offset_t) (csblob->csb_base_offset +
4288 csblob->csb_end_offset);
4289 if (blob_start > start || blob_end < (start + size)) {
4290 /* range not fully covered by this code-signing blob */
4291 return FALSE;
4292 }
4293
4294 return TRUE;
4295 }
4296
4297 #if CHECK_CS_VALIDATION_BITMAP
4298 #define stob(s) (((atop_64(round_page_64(s))) + 07) >> 3)
4299 extern boolean_t root_fs_upgrade_try;
4300
4301 /*
4302 * Should we use the code-sign bitmap to avoid repeated code-sign validation?
4303 * Depends:
4304 * a) Is the target vnode on the root filesystem?
4305 * b) Has someone tried to mount the root filesystem read-write?
4306 * If answers are (a) yes AND (b) no, then we can use the bitmap.
4307 */
4308 #define USE_CODE_SIGN_BITMAP(vp) ( (vp != NULL) && (vp->v_mount != NULL) && (vp->v_mount->mnt_flag & MNT_ROOTFS) && !root_fs_upgrade_try)
4309 kern_return_t
4310 ubc_cs_validation_bitmap_allocate(
4311 vnode_t vp)
4312 {
4313 kern_return_t kr = KERN_SUCCESS;
4314 struct ubc_info *uip;
4315 char *target_bitmap;
4316 vm_object_size_t bitmap_size;
4317
4318 if (!USE_CODE_SIGN_BITMAP(vp) || (!UBCINFOEXISTS(vp))) {
4319 kr = KERN_INVALID_ARGUMENT;
4320 } else {
4321 uip = vp->v_ubcinfo;
4322
4323 if (uip->cs_valid_bitmap == NULL) {
4324 bitmap_size = stob(uip->ui_size);
4325 target_bitmap = (char*) kalloc((vm_size_t)bitmap_size );
4326 if (target_bitmap == 0) {
4327 kr = KERN_NO_SPACE;
4328 } else {
4329 kr = KERN_SUCCESS;
4330 }
4331 if (kr == KERN_SUCCESS) {
4332 memset( target_bitmap, 0, (size_t)bitmap_size);
4333 uip->cs_valid_bitmap = (void*)target_bitmap;
4334 uip->cs_valid_bitmap_size = bitmap_size;
4335 }
4336 }
4337 }
4338 return kr;
4339 }
4340
4341 kern_return_t
4342 ubc_cs_check_validation_bitmap(
4343 vnode_t vp,
4344 memory_object_offset_t offset,
4345 int optype)
4346 {
4347 kern_return_t kr = KERN_SUCCESS;
4348
4349 if (!USE_CODE_SIGN_BITMAP(vp) || !UBCINFOEXISTS(vp)) {
4350 kr = KERN_INVALID_ARGUMENT;
4351 } else {
4352 struct ubc_info *uip = vp->v_ubcinfo;
4353 char *target_bitmap = uip->cs_valid_bitmap;
4354
4355 if (target_bitmap == NULL) {
4356 kr = KERN_INVALID_ARGUMENT;
4357 } else {
4358 uint64_t bit, byte;
4359 bit = atop_64( offset );
4360 byte = bit >> 3;
4361
4362 if (byte > uip->cs_valid_bitmap_size) {
4363 kr = KERN_INVALID_ARGUMENT;
4364 } else {
4365 if (optype == CS_BITMAP_SET) {
4366 target_bitmap[byte] |= (1 << (bit & 07));
4367 kr = KERN_SUCCESS;
4368 } else if (optype == CS_BITMAP_CLEAR) {
4369 target_bitmap[byte] &= ~(1 << (bit & 07));
4370 kr = KERN_SUCCESS;
4371 } else if (optype == CS_BITMAP_CHECK) {
4372 if (target_bitmap[byte] & (1 << (bit & 07))) {
4373 kr = KERN_SUCCESS;
4374 } else {
4375 kr = KERN_FAILURE;
4376 }
4377 }
4378 }
4379 }
4380 }
4381 return kr;
4382 }
4383
4384 void
4385 ubc_cs_validation_bitmap_deallocate(
4386 vnode_t vp)
4387 {
4388 struct ubc_info *uip;
4389 void *target_bitmap;
4390 vm_object_size_t bitmap_size;
4391
4392 if (UBCINFOEXISTS(vp)) {
4393 uip = vp->v_ubcinfo;
4394
4395 if ((target_bitmap = uip->cs_valid_bitmap) != NULL) {
4396 bitmap_size = uip->cs_valid_bitmap_size;
4397 kfree( target_bitmap, (vm_size_t) bitmap_size );
4398 uip->cs_valid_bitmap = NULL;
4399 }
4400 }
4401 }
4402 #else
4403 kern_return_t
4404 ubc_cs_validation_bitmap_allocate(__unused vnode_t vp)
4405 {
4406 return KERN_INVALID_ARGUMENT;
4407 }
4408
4409 kern_return_t
4410 ubc_cs_check_validation_bitmap(
4411 __unused struct vnode *vp,
4412 __unused memory_object_offset_t offset,
4413 __unused int optype)
4414 {
4415 return KERN_INVALID_ARGUMENT;
4416 }
4417
4418 void
4419 ubc_cs_validation_bitmap_deallocate(__unused vnode_t vp)
4420 {
4421 return;
4422 }
4423 #endif /* CHECK_CS_VALIDATION_BITMAP */
4424
4425 #if PMAP_CS
4426 kern_return_t
4427 cs_associate_blob_with_mapping(
4428 void *pmap,
4429 vm_map_offset_t start,
4430 vm_map_size_t size,
4431 vm_object_offset_t offset,
4432 void *blobs_p)
4433 {
4434 off_t blob_start_offset, blob_end_offset;
4435 kern_return_t kr;
4436 struct cs_blob *blobs, *blob;
4437 vm_offset_t kaddr;
4438 struct pmap_cs_code_directory *cd_entry = NULL;
4439
4440 if (!pmap_cs) {
4441 return KERN_NOT_SUPPORTED;
4442 }
4443
4444 blobs = (struct cs_blob *)blobs_p;
4445
4446 for (blob = blobs;
4447 blob != NULL;
4448 blob = blob->csb_next) {
4449 blob_start_offset = (blob->csb_base_offset +
4450 blob->csb_start_offset);
4451 blob_end_offset = (blob->csb_base_offset +
4452 blob->csb_end_offset);
4453 if ((off_t) offset < blob_start_offset ||
4454 (off_t) offset >= blob_end_offset ||
4455 (off_t) (offset + size) <= blob_start_offset ||
4456 (off_t) (offset + size) > blob_end_offset) {
4457 continue;
4458 }
4459 kaddr = blob->csb_mem_kaddr;
4460 if (kaddr == 0) {
4461 /* blob data has been released */
4462 continue;
4463 }
4464 cd_entry = blob->csb_pmap_cs_entry;
4465 if (cd_entry == NULL) {
4466 continue;
4467 }
4468
4469 break;
4470 }
4471
4472 if (cd_entry != NULL) {
4473 kr = pmap_cs_associate(pmap,
4474 cd_entry,
4475 start,
4476 size);
4477 } else {
4478 kr = KERN_CODESIGN_ERROR;
4479 }
4480 #if 00
4481 printf("FBDP %d[%s] pmap_cs_associate(%p,%p,0x%llx,0x%llx) -> kr=0x%x\n", proc_selfpid(), &(current_proc()->p_comm[0]), pmap, cd_entry, (uint64_t)start, (uint64_t)size, kr);
4482 kr = KERN_SUCCESS;
4483 #endif
4484 return kr;
4485 }
4486 #endif /* PMAP_CS */